diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/alpha/kernel/entry.S linux.21rc5-ac2/arch/alpha/kernel/entry.S --- linux.21rc5/arch/alpha/kernel/entry.S 2003-05-28 15:08:49.000000000 +0100 +++ linux.21rc5-ac2/arch/alpha/kernel/entry.S 2003-04-22 16:44:36.000000000 +0100 @@ -690,6 +690,7 @@ .end entSys .globl ret_from_fork +#if CONFIG_SMP .align 3 .ent ret_from_fork ret_from_fork: @@ -697,6 +698,9 @@ mov $17,$16 jsr $31,schedule_tail .end ret_from_fork +#else +ret_from_fork = ret_from_sys_call +#endif .align 3 .ent reschedule diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/alpha/kernel/process.c linux.21rc5-ac2/arch/alpha/kernel/process.c --- linux.21rc5/arch/alpha/kernel/process.c 2003-05-28 15:08:49.000000000 +0100 +++ linux.21rc5-ac2/arch/alpha/kernel/process.c 2003-04-22 16:44:36.000000000 +0100 @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -74,9 +75,6 @@ cpu_idle(void) { /* An endless idle loop with no priority at all. */ - current->nice = 20; - current->counter = -100; - while (1) { /* FIXME -- EV6 and LCA45 know how to power down the CPU. */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/alpha/kernel/smp.c linux.21rc5-ac2/arch/alpha/kernel/smp.c --- linux.21rc5/arch/alpha/kernel/smp.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/alpha/kernel/smp.c 2003-04-22 16:44:36.000000000 +0100 @@ -81,6 +81,7 @@ int smp_num_probed; /* Internal processor count */ int smp_num_cpus = 1; /* Number that came online. */ int smp_threads_ready; /* True once the per process idle is forked. */ +unsigned long cache_decay_ticks; int __cpu_number_map[NR_CPUS]; int __cpu_logical_map[NR_CPUS]; @@ -155,11 +156,6 @@ { int cpuid = hard_smp_processor_id(); - if (current != init_tasks[cpu_number_map(cpuid)]) { - printk("BUG: smp_calling: cpu %d current %p init_tasks[cpu_number_map(cpuid)] %p\n", - cpuid, current, init_tasks[cpu_number_map(cpuid)]); - } - DBGS(("CALLIN %d state 0x%lx\n", cpuid, current->state)); /* Turn on machine checks. */ @@ -217,9 +213,6 @@ DBGS(("smp_callin: commencing CPU %d current %p\n", cpuid, current)); - /* Setup the scheduler for this processor. */ - init_idle(); - /* ??? This should be in init_idle. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; @@ -449,14 +442,11 @@ if (idle == &init_task) panic("idle process is init_task for CPU %d", cpuid); - idle->processor = cpuid; - idle->cpus_runnable = 1 << cpuid; /* we schedule the first task manually */ + init_idle(idle, cpuid); + unhash_process(idle); + __cpu_logical_map[cpunum] = cpuid; __cpu_number_map[cpuid] = cpunum; - - del_from_runqueue(idle); - unhash_process(idle); - init_tasks[cpunum] = idle; DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", cpuid, idle->state, idle->flags)); @@ -563,13 +553,10 @@ __cpu_number_map[boot_cpuid] = 0; __cpu_logical_map[0] = boot_cpuid; - current->processor = boot_cpuid; smp_store_cpu_info(boot_cpuid); smp_setup_percpu_timer(boot_cpuid); - init_idle(); - /* ??? This should be in init_idle. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/alpha/kernel/srmcons.c linux.21rc5-ac2/arch/alpha/kernel/srmcons.c --- linux.21rc5/arch/alpha/kernel/srmcons.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/alpha/kernel/srmcons.c 2003-04-22 16:44:36.000000000 +0100 @@ -260,7 +260,7 @@ spin_lock_irqsave(&srmconsp->lock, flags); - if (tty->count == 1) { + if (atomic_read(&tty->count) == 1) { srmconsp->tty = NULL; del_timer(&srmconsp->timer); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/alpha/mm/fault.c linux.21rc5-ac2/arch/alpha/mm/fault.c --- linux.21rc5/arch/alpha/mm/fault.c 2003-05-28 15:08:07.000000000 +0100 +++ linux.21rc5-ac2/arch/alpha/mm/fault.c 2003-04-22 16:44:36.000000000 +0100 @@ -122,8 +122,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/arm/config.in linux.21rc5-ac2/arch/arm/config.in --- linux.21rc5/arch/arm/config.in 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/arm/config.in 2003-04-22 16:44:36.000000000 +0100 @@ -646,6 +646,7 @@ bool 'Kernel debugging' CONFIG_DEBUG_KERNEL dep_bool ' Debug memory allocations' CONFIG_DEBUG_SLAB $CONFIG_DEBUG_KERNEL dep_bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ $CONFIG_DEBUG_KERNEL +dep_bool ' Morse code panics' CONFIG_PANIC_MORSE $CONFIG_DEBUG_KERNEL $CONFIG_PC_KEYB dep_bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK $CONFIG_DEBUG_KERNEL dep_bool ' Wait queue debugging' CONFIG_DEBUG_WAITQ $CONFIG_DEBUG_KERNEL dep_bool ' Verbose BUG() reporting (adds 70K)' CONFIG_DEBUG_BUGVERBOSE $CONFIG_DEBUG_KERNEL diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/arm/mm/fault-common.c linux.21rc5-ac2/arch/arm/mm/fault-common.c --- linux.21rc5/arch/arm/mm/fault-common.c 2003-05-28 15:08:12.000000000 +0100 +++ linux.21rc5-ac2/arch/arm/mm/fault-common.c 2003-04-22 16:44:36.000000000 +0100 @@ -229,7 +229,7 @@ goto survive; check_stack: - if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) + if (!expand_stack(vma, addr)) goto good_area; out: return fault; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/cris/drivers/serial.c linux.21rc5-ac2/arch/cris/drivers/serial.c --- linux.21rc5/arch/cris/drivers/serial.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/cris/drivers/serial.c 2003-04-22 16:44:36.000000000 +0100 @@ -3293,7 +3293,7 @@ printk("[%d] rs_close ttyS%d, count = %d\n", current->pid, info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/boot/setup.S linux.21rc5-ac2/arch/i386/boot/setup.S --- linux.21rc5/arch/i386/boot/setup.S 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/boot/setup.S 2003-04-22 16:44:36.000000000 +0100 @@ -45,6 +45,10 @@ * New A20 code ported from SYSLINUX by H. Peter Anvin. AMD Elan bugfixes * by Robert Schwebel, December 2001 * + * BIOS Enhanced Disk Drive support + * by Matt Domsch October 2002 + * conformant to T13 Committee www.t13.org + * projects 1572D, 1484D, 1386D, 1226DT */ #include @@ -53,6 +57,7 @@ #include #include #include +#include #include /* Signature words to ensure LILO loaded us right */ @@ -543,6 +548,70 @@ done_apm_bios: #endif +#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) +# Do the BIOS Enhanced Disk Drive calls +# This consists of two calls: +# int 13h ah=41h "Check Extensions Present" +# int 13h ah=48h "Get Device Parameters" +# +# A buffer of size EDDMAXNR*(EDDEXTSIZE+EDDPARMSIZE) is reserved for our use +# in the empty_zero_page at EDDBUF. The first four bytes of which are +# used to store the device number, interface support map and version +# results from fn41. The following 74 bytes are used to store +# the results from fn48. Starting from device 80h, fn41, then fn48 +# are called and their results stored in EDDBUF+n*(EDDEXTSIZE+EDDPARMIZE). +# Then the pointer is incremented to store the data for the next call. +# This repeats until either a device doesn't exist, or until EDDMAXNR +# devices have been stored. +# The one tricky part is that ds:si always points four bytes into +# the structure, and the fn41 results are stored at offsets +# from there. This removes the need to increment the pointer for +# every store, and leaves it ready for the fn48 call. +# A second one-byte buffer, EDDNR, in the empty_zero_page stores +# the number of BIOS devices which exist, up to EDDMAXNR. +# In setup.c, copy_edd() stores both empty_zero_page buffers away +# for later use, as they would get overwritten otherwise. +# This code is sensitive to the size of the structs in edd.h +edd_start: + # %ds points to the bootsector + # result buffer for fn48 + movw $EDDBUF+EDDEXTSIZE, %si # in ds:si, fn41 results + # kept just before that + movb $0, (EDDNR) # zero value at EDDNR + movb $0x80, %dl # BIOS device 0x80 + +edd_check_ext: + movb $CHECKEXTENSIONSPRESENT, %ah # Function 41 + movw $EDDMAGIC1, %bx # magic + int $0x13 # make the call + jc edd_done # no more BIOS devices + + cmpw $EDDMAGIC2, %bx # is magic right? + jne edd_next # nope, next... + + movb %dl, %ds:-4(%si) # store device number + movb %ah, %ds:-3(%si) # store version + movw %cx, %ds:-2(%si) # store extensions + incb (EDDNR) # note that we stored something + +edd_get_device_params: + movw $EDDPARMSIZE, %ds:(%si) # put size + movb $GETDEVICEPARAMETERS, %ah # Function 48 + int $0x13 # make the call + # Don't check for fail return + # it doesn't matter. + movw %si, %ax # increment si + addw $EDDPARMSIZE+EDDEXTSIZE, %ax + movw %ax, %si + +edd_next: + incb %dl # increment to next device + cmpb $EDDMAXNR, (EDDNR) # Out of space? + jb edd_check_ext # keep looping + +edd_done: +#endif + # Now we want to move to protected mode ... cmpw $0, %cs:realmode_swtch jz rmodeswtch_normal diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/config.in linux.21rc5-ac2/arch/i386/config.in --- linux.21rc5/arch/i386/config.in 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/config.in 2003-04-28 17:59:40.000000000 +0100 @@ -194,6 +194,23 @@ bool 'Machine Check Exception' CONFIG_X86_MCE +mainmenu_option next_comment +comment 'CPU Frequency scaling' +dep_bool 'CPU Frequency scaling (EXPERIMENTAL)' CONFIG_CPU_FREQ $CONFIG_EXPERIMENTAL +if [ "$CONFIG_CPU_FREQ" = "y" ]; then + bool ' /proc/sys/cpu/ interface (2.4. / OLD)' CONFIG_CPU_FREQ_24_API + tristate ' AMD Mobile K6-2/K6-3 PowerNow!' CONFIG_X86_POWERNOW_K6 + if [ "$CONFIG_MELAN" = "y" ]; then + tristate ' AMD Elan' CONFIG_ELAN_CPUFREQ + fi + tristate ' VIA Cyrix III Longhaul' CONFIG_X86_LONGHAUL + tristate ' Intel Speedstep' CONFIG_X86_SPEEDSTEP + tristate ' Intel Pentium 4 clock modulation' CONFIG_X86_P4_CLOCKMOD + tristate ' Transmeta LongRun' CONFIG_X86_LONGRUN + tristate ' Cyrix MediaGX/NatSemi Geode Suspend Modulation' CONFIG_X86_GX_SUSPMOD +fi +endmenu + tristate 'Toshiba Laptop support' CONFIG_TOSHIBA tristate 'Dell laptop support' CONFIG_I8K @@ -201,6 +218,10 @@ tristate '/dev/cpu/*/msr - Model-specific register support' CONFIG_X86_MSR tristate '/dev/cpu/*/cpuid - CPU information support' CONFIG_X86_CPUID +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + tristate 'BIOS Enhanced Disk Drive calls determine boot disk (EXPERIMENTAL)' CONFIG_EDD +fi + choice 'High Memory Support' \ "off CONFIG_NOHIGHMEM \ 4GB CONFIG_HIGHMEM4G \ @@ -231,6 +252,7 @@ define_bool CONFIG_X86_IO_APIC y fi else + bool 'Clustered APIC support' CONFIG_X86_CLUSTERED_APIC bool 'Multi-node NUMA system support' CONFIG_X86_NUMA if [ "$CONFIG_X86_NUMA" = "y" ]; then #Platform Choices @@ -292,6 +314,8 @@ bool 'ISA bus support' CONFIG_ISA fi +tristate 'NatSemi SCx200 support' CONFIG_SCx200 + source drivers/pci/Config.in bool 'EISA support' CONFIG_EISA @@ -324,15 +348,9 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC -bool 'Power Management support' CONFIG_PM +bool 'Kernel .config support' CONFIG_IKCONFIG -if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - dep_bool ' ACPI support' CONFIG_ACPI $CONFIG_PM - - if [ "$CONFIG_ACPI" != "n" ]; then - source drivers/acpi/Config.in - fi -fi +bool 'Power Management support' CONFIG_PM dep_tristate ' Advanced Power Management BIOS support' CONFIG_APM $CONFIG_PM if [ "$CONFIG_APM" != "n" ]; then @@ -345,6 +363,8 @@ bool ' Use real mode APM BIOS call to power off' CONFIG_APM_REAL_MODE_POWER_OFF fi +source drivers/acpi/Config.in + endmenu source drivers/mtd/Config.in @@ -474,12 +494,13 @@ bool 'Kernel debugging' CONFIG_DEBUG_KERNEL if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then bool ' Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW + bool ' Compile the kernel with frame pointers' CONFIG_FRAME_POINTER bool ' Debug high memory support' CONFIG_DEBUG_HIGHMEM bool ' Debug memory allocations' CONFIG_DEBUG_SLAB bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ + bool ' Morse code panics' CONFIG_PANIC_MORSE bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK - bool ' Compile the kernel with frame pointers' CONFIG_FRAME_POINTER fi endmenu diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/defconfig linux.21rc5-ac2/arch/i386/defconfig --- linux.21rc5/arch/i386/defconfig 2003-05-28 15:08:07.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/defconfig 2003-05-28 15:31:54.000000000 +0100 @@ -56,6 +56,7 @@ # CONFIG_MICROCODE is not set # CONFIG_X86_MSR is not set # CONFIG_X86_CPUID is not set +# CONFIG_EDD is not set CONFIG_NOHIGHMEM=y # CONFIG_HIGHMEM4G is not set # CONFIG_HIGHMEM64G is not set @@ -607,6 +608,7 @@ CONFIG_AGP_SIS=y CONFIG_AGP_ALI=y # CONFIG_AGP_SWORKS is not set +CONFIG_AGP_NVIDIA=y CONFIG_DRM=y # CONFIG_DRM_OLD is not set diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/acpi.c linux.21rc5-ac2/arch/i386/kernel/acpi.c --- linux.21rc5/arch/i386/kernel/acpi.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/acpi.c 2003-04-28 17:57:37.000000000 +0100 @@ -0,0 +1,578 @@ +/* + * acpi.c - Architecture-Specific Low-Level ACPI Support + * + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2001 Jun Nakajima + * Copyright (C) 2001 Patrick Mochel + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define PREFIX "ACPI: " + +extern int acpi_disabled; + +/* -------------------------------------------------------------------------- + Boot-time Configuration + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_BOOT + +enum acpi_irq_model_id acpi_irq_model; + + +/* + * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, + * to map the target physical address. The problem is that set_fixmap() + * provides a single page, and it is possible that the page is not + * sufficient. + * By using this area, we can map up to MAX_IO_APICS pages temporarily, + * i.e. until the next __va_range() call. + * + * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* + * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and + * count idx down while incrementing the phys address. + */ +char *__acpi_map_table(unsigned long phys, unsigned long size) +{ + unsigned long base, offset, mapped_size; + int idx; + + if (phys + size < 8*1024*1024) + return __va(phys); + + offset = phys & (PAGE_SIZE - 1); + mapped_size = PAGE_SIZE - offset; + set_fixmap(FIX_ACPI_END, phys); + base = fix_to_virt(FIX_ACPI_END); + + /* + * Most cases can be covered by the below. + */ + idx = FIX_ACPI_END; + while (mapped_size < size) { + if (--idx < FIX_ACPI_BEGIN) + return 0; /* cannot handle this */ + phys += PAGE_SIZE; + set_fixmap(idx, phys); + mapped_size += PAGE_SIZE; + } + + return ((unsigned char *) base + offset); +} + + +#ifdef CONFIG_X86_LOCAL_APIC + +int acpi_lapic; + +static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; + + +static int __init +acpi_parse_madt ( + unsigned long phys_addr, + unsigned long size) +{ + struct acpi_table_madt *madt = NULL; + + if (!phys_addr || !size) + return -EINVAL; + + madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size); + if (!madt) { + printk(KERN_WARNING PREFIX "Unable to map MADT\n"); + return -ENODEV; + } + + if (madt->lapic_address) + acpi_lapic_addr = (u64) madt->lapic_address; + + printk(KERN_INFO PREFIX "Local APIC address 0x%08x\n", + madt->lapic_address); + + return 0; +} + + +static int __init +acpi_parse_lapic ( + acpi_table_entry_header *header) +{ + struct acpi_table_lapic *processor = NULL; + + processor = (struct acpi_table_lapic*) header; + if (!processor) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + mp_register_lapic ( + processor->id, /* APIC ID */ + processor->flags.enabled); /* Enabled? */ + + return 0; +} + + +static int __init +acpi_parse_lapic_addr_ovr ( + acpi_table_entry_header *header) +{ + struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; + + lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header; + if (!lapic_addr_ovr) + return -EINVAL; + + acpi_lapic_addr = lapic_addr_ovr->address; + + return 0; +} + +#ifndef CONFIG_ACPI_HT_ONLY + +static int __init +acpi_parse_lapic_nmi ( + acpi_table_entry_header *header) +{ + struct acpi_table_lapic_nmi *lapic_nmi = NULL; + + lapic_nmi = (struct acpi_table_lapic_nmi*) header; + if (!lapic_nmi) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + if (lapic_nmi->lint != 1) + printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); + + return 0; +} + +#endif /*CONFIG_ACPI_HT_ONLY*/ + +#endif /*CONFIG_X86_LOCAL_APIC*/ + +#ifdef CONFIG_X86_IO_APIC + +int acpi_ioapic; + +#ifndef CONFIG_ACPI_HT_ONLY + +static int __init +acpi_parse_ioapic ( + acpi_table_entry_header *header) +{ + struct acpi_table_ioapic *ioapic = NULL; + + ioapic = (struct acpi_table_ioapic*) header; + if (!ioapic) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + mp_register_ioapic ( + ioapic->id, + ioapic->address, + ioapic->global_irq_base); + + return 0; +} + + +static int __init +acpi_parse_int_src_ovr ( + acpi_table_entry_header *header) +{ + struct acpi_table_int_src_ovr *intsrc = NULL; + + intsrc = (struct acpi_table_int_src_ovr*) header; + if (!intsrc) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + mp_override_legacy_irq ( + intsrc->bus_irq, + intsrc->flags.polarity, + intsrc->flags.trigger, + intsrc->global_irq); + + return 0; +} + + +static int __init +acpi_parse_nmi_src ( + acpi_table_entry_header *header) +{ + struct acpi_table_nmi_src *nmi_src = NULL; + + nmi_src = (struct acpi_table_nmi_src*) header; + if (!nmi_src) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + /* TBD: Support nimsrc entries? */ + + return 0; +} + +#endif /*!CONFIG_ACPI_HT_ONLY*/ +#endif /*CONFIG_X86_IO_APIC*/ + + +static unsigned long __init +acpi_scan_rsdp ( + unsigned long start, + unsigned long length) +{ + unsigned long offset = 0; + unsigned long sig_len = sizeof("RSD PTR ") - 1; + + /* + * Scan all 16-byte boundaries of the physical memory region for the + * RSDP signature. + */ + for (offset = 0; offset < length; offset += 16) { + if (strncmp((char *) (start + offset), "RSD PTR ", sig_len)) + continue; + return (start + offset); + } + + return 0; +} + + +unsigned long __init +acpi_find_rsdp (void) +{ + unsigned long rsdp_phys = 0; + + /* + * Scan memory looking for the RSDP signature. First search EBDA (low + * memory) paragraphs and then search upper memory (E0000-FFFFF). + */ + rsdp_phys = acpi_scan_rsdp (0, 0x400); + if (!rsdp_phys) + rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF); + + return rsdp_phys; +} + + +int __init +acpi_boot_init (void) +{ + int result = 0; + + /* + * The default interrupt routing model is PIC (8259). This gets + * overriden if IOAPICs are enumerated (below). + */ + acpi_irq_model = ACPI_IRQ_MODEL_PIC; + + /* + * Initialize the ACPI boot-time table parser. + */ + result = acpi_table_init(); + if (result) + return result; + + result = acpi_blacklisted(); + if (result) { + acpi_disabled = 1; + return result; + } + else + printk(KERN_NOTICE PREFIX "BIOS passes blacklist\n"); + +#ifdef CONFIG_X86_LOCAL_APIC + + /* + * MADT + * ---- + * Parse the Multiple APIC Description Table (MADT), if exists. + * Note that this table provides platform SMP configuration + * information -- the successor to MPS tables. + */ + + result = acpi_table_parse(ACPI_APIC, acpi_parse_madt); + if (!result) { + printk(KERN_WARNING PREFIX "MADT not present\n"); + return 0; + } + else if (result < 0) { + printk(KERN_ERR PREFIX "Error parsing MADT\n"); + return result; + } + else if (result > 1) + printk(KERN_WARNING PREFIX "Multiple MADT tables exist\n"); + + /* + * Local APIC + * ---------- + * Note that the LAPIC address is obtained from the MADT (32-bit value) + * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). + */ + + result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr); + if (result < 0) { + printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); + return result; + } + + mp_register_lapic_address(acpi_lapic_addr); + + result = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic); + if (!result) { + printk(KERN_ERR PREFIX "No LAPIC entries present\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return -ENODEV; + } + else if (result < 0) { + printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return result; + } + +#ifndef CONFIG_ACPI_HT_ONLY + result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi); + if (result < 0) { + printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return result; + } +#endif /*!CONFIG_ACPI_HT_ONLY*/ + + acpi_lapic = 1; + +#endif /*CONFIG_X86_LOCAL_APIC*/ + +#ifdef CONFIG_X86_IO_APIC +#ifndef CONFIG_ACPI_HT_ONLY + + /* + * I/O APIC + * -------- + */ + + result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic); + if (!result) { + printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); + return -ENODEV; + } + else if (result < 0) { + printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); + return result; + } + + /* Build a default routing table for legacy (ISA) interrupts. */ + mp_config_acpi_legacy_irqs(); + + result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr); + if (result < 0) { + printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return result; + } + + result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src); + if (result < 0) { + printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return result; + } + + acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; + + acpi_ioapic = 1; + +#endif /*!CONFIG_ACPI_HT_ONLY*/ +#endif /*CONFIG_X86_IO_APIC*/ + +#ifdef CONFIG_X86_LOCAL_APIC + if (acpi_lapic && acpi_ioapic) + smp_found_config = 1; +#endif + + return 0; +} + +#endif /*CONFIG_ACPI_BOOT*/ + + +/* -------------------------------------------------------------------------- + Low-Level Sleep Support + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_SLEEP + +#define DEBUG + +#ifdef DEBUG +#include +#endif + +/* address in low memory of the wakeup routine. */ +unsigned long acpi_wakeup_address = 0; + +/* new page directory that we will be using */ +static pmd_t *pmd; + +/* saved page directory */ +static pmd_t saved_pmd; + +/* page which we'll use for the new page directory */ +static pte_t *ptep; + +extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long)); + +/* + * acpi_create_identity_pmd + * + * Create a new, identity mapped pmd. + * + * Do this by creating new page directory, and marking all the pages as R/W + * Then set it as the new Page Middle Directory. + * And, of course, flush the TLB so it takes effect. + * + * We save the address of the old one, for later restoration. + */ +static void acpi_create_identity_pmd (void) +{ + pgd_t *pgd; + int i; + + ptep = (pte_t*)__get_free_page(GFP_KERNEL); + + /* fill page with low mapping */ + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED)); + + pgd = pgd_offset(current->active_mm, 0); + pmd = pmd_alloc(current->mm,pgd, 0); + + /* save the old pmd */ + saved_pmd = *pmd; + + /* set the new one */ + set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(ptep))); + + /* flush the TLB */ + local_flush_tlb(); +} + +/* + * acpi_restore_pmd + * + * Restore the old pmd saved by acpi_create_identity_pmd and + * free the page that said function alloc'd + */ +static void acpi_restore_pmd (void) +{ + set_pmd(pmd, saved_pmd); + local_flush_tlb(); + free_page((unsigned long)ptep); +} + +/** + * acpi_save_state_mem - save kernel state + * + * Create an identity mapped page table and copy the wakeup routine to + * low memory. + */ +int acpi_save_state_mem (void) +{ + acpi_create_identity_pmd(); + acpi_copy_wakeup_routine(acpi_wakeup_address); + + return 0; +} + +/** + * acpi_save_state_disk - save kernel state to disk + * + */ +int acpi_save_state_disk (void) +{ + return 1; +} + +/* + * acpi_restore_state + */ +void acpi_restore_state_mem (void) +{ + acpi_restore_pmd(); +} + +/** + * acpi_reserve_bootmem - do _very_ early ACPI initialisation + * + * We allocate a page in low memory for the wakeup + * routine for when we come back from a sleep state. The + * runtime allocator allows specification of <16M pages, but not + * <1M pages. + */ +void __init acpi_reserve_bootmem(void) +{ + acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE); + printk(KERN_DEBUG "ACPI: have wakeup address 0x%8.8lx\n", acpi_wakeup_address); +} + +void do_suspend_lowlevel_s4bios(int resume) +{ + if (!resume) { + save_processor_context(); + acpi_save_register_state((unsigned long)&&acpi_sleep_done); + acpi_enter_sleep_state_s4bios(); + return; + } +acpi_sleep_done: + restore_processor_context(); +} + + +#endif /*CONFIG_ACPI_SLEEP*/ + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/acpitable.c linux.21rc5-ac2/arch/i386/kernel/acpitable.c --- linux.21rc5/arch/i386/kernel/acpitable.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/acpitable.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,554 +0,0 @@ -/* - * acpitable.c - IA32-specific ACPI boot-time initialization (Revision: 1) - * - * Copyright (C) 1999 Andrew Henroid - * Copyright (C) 2001 Richard Schaal - * Copyright (C) 2001 Paul Diefenbaugh - * Copyright (C) 2001 Jun Nakajima - * Copyright (C) 2001 Arjan van de Ven - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * $Id: acpitable.c,v 1.7 2001/11/04 12:21:18 fenrus Exp $ - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "acpitable.h" - -static acpi_table_handler acpi_boot_ops[ACPI_TABLE_COUNT]; - - -static unsigned char __init -acpi_checksum(void *buffer, int length) -{ - int i; - unsigned char *bytebuffer; - unsigned char sum = 0; - - if (!buffer || length <= 0) - return 0; - - bytebuffer = (unsigned char *) buffer; - - for (i = 0; i < length; i++) - sum += *(bytebuffer++); - - return sum; -} - -static void __init -acpi_print_table_header(acpi_table_header * header) -{ - if (!header) - return; - - printk(KERN_INFO "ACPI table found: %.4s v%d [%.6s %.8s %d.%d]\n", - header->signature, header->revision, header->oem_id, - header->oem_table_id, header->oem_revision >> 16, - header->oem_revision & 0xffff); - - return; -} - -/******************************************************************************* - * - * FUNCTION: acpi_tb_scan_memory_for_rsdp - * - * PARAMETERS: address - Starting pointer for search - * length - Maximum length to search - * - * RETURN: Pointer to the RSDP if found and valid, otherwise NULL. - * - * DESCRIPTION: Search a block of memory for the RSDP signature - * - ******************************************************************************/ - -static void *__init -acpi_tb_scan_memory_for_rsdp(void *address, int length) -{ - u32 offset; - - if (length <= 0) - return NULL; - - /* Search from given start addr for the requested length */ - - offset = 0; - - while (offset < length) { - /* The signature must match and the checksum must be correct */ - if (strncmp(address, RSDP_SIG, sizeof(RSDP_SIG) - 1) == 0 && - acpi_checksum(address, RSDP_CHECKSUM_LENGTH) == 0) { - /* If so, we have found the RSDP */ - printk(KERN_INFO "ACPI: RSDP located at physical address %p\n", - address); - return address; - } - offset += RSDP_SCAN_STEP; - address += RSDP_SCAN_STEP; - } - - /* Searched entire block, no RSDP was found */ - printk(KERN_INFO "ACPI: Searched entire block, no RSDP was found.\n"); - return NULL; -} - -/******************************************************************************* - * - * FUNCTION: acpi_find_root_pointer - * - * PARAMETERS: none - * - * RETURN: physical address of the RSDP - * - * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor - * pointer structure. If it is found, set *RSDP to point to it. - * - * NOTE: The RSDP must be either in the first 1_k of the Extended - * BIOS Data Area or between E0000 and FFFFF (ACPI 1.0 section - * 5.2.2; assertion #421). - * - ******************************************************************************/ - -static struct acpi_table_rsdp * __init -acpi_find_root_pointer(void) -{ - struct acpi_table_rsdp * rsdp; - - /* - * Physical address is given - */ - /* - * Region 1) Search EBDA (low memory) paragraphs - */ - rsdp = acpi_tb_scan_memory_for_rsdp(__va(LO_RSDP_WINDOW_BASE), - LO_RSDP_WINDOW_SIZE); - - if (rsdp) - return rsdp; - - /* - * Region 2) Search upper memory: 16-byte boundaries in E0000h-F0000h - */ - rsdp = acpi_tb_scan_memory_for_rsdp(__va(HI_RSDP_WINDOW_BASE), - HI_RSDP_WINDOW_SIZE); - - - - if (rsdp) - return rsdp; - - printk(KERN_ERR "ACPI: System description tables not found\n"); - return NULL; -} - - -/* - * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, - * to map the target physical address. The problem is that set_fixmap() - * provides a single page, and it is possible that the page is not - * sufficient. - * By using this area, we can map up to MAX_IO_APICS pages temporarily, - * i.e. until the next __va_range() call. - * - * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* - * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and - * count idx down while incrementing the phys address. - */ -static __init char * -__va_range(unsigned long phys, unsigned long size) -{ - unsigned long base, offset, mapped_size; - int idx; - - offset = phys & (PAGE_SIZE - 1); - mapped_size = PAGE_SIZE - offset; - set_fixmap(FIX_IO_APIC_BASE_END, phys); - base = fix_to_virt(FIX_IO_APIC_BASE_END); - dprintk("__va_range(0x%lx, 0x%lx): idx=%d mapped at %lx\n", phys, size, - FIX_IO_APIC_BASE_END, base); - - /* - * Most cases can be covered by the below. - */ - idx = FIX_IO_APIC_BASE_END; - while (mapped_size < size) { - if (--idx < FIX_IO_APIC_BASE_0) - return 0; /* cannot handle this */ - phys += PAGE_SIZE; - set_fixmap(idx, phys); - mapped_size += PAGE_SIZE; - } - - return ((unsigned char *) base + offset); -} - -static int __init acpi_tables_init(void) -{ - int result = -ENODEV; - acpi_table_header *header = NULL; - struct acpi_table_rsdp *rsdp = NULL; - struct acpi_table_rsdt *rsdt = NULL; - struct acpi_table_rsdt saved_rsdt; - int tables = 0; - int type = 0; - int i = 0; - - - rsdp = (struct acpi_table_rsdp *) acpi_find_root_pointer(); - - if (!rsdp) - return -ENODEV; - - printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision, - rsdp->oem_id); - - if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) { - printk(KERN_WARNING "RSDP table signature incorrect\n"); - return -EINVAL; - } - - rsdt = (struct acpi_table_rsdt *) - __va_range(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt)); - - if (!rsdt) { - printk(KERN_WARNING "ACPI: Invalid root system description tables (RSDT)\n"); - return -ENODEV; - } - - header = & rsdt->header; - acpi_print_table_header(header); - - if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) { - printk(KERN_WARNING "ACPI: RSDT signature incorrect\n"); - return -ENODEV; - } - - /* - * The number of tables is computed by taking the - * size of all entries (header size minus total - * size of RSDT) divided by the size of each entry - * (4-byte table pointers). - */ - tables = (header->length - sizeof(acpi_table_header)) / 4; - - memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt)); - - if (saved_rsdt.header.length > sizeof(saved_rsdt)) { - printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n", saved_rsdt.header.length); - return -ENODEV; - } - - for (i = 0; i < tables; i++) { - /* Map in header, then map in full table length. */ - header = (acpi_table_header *) - __va_range(saved_rsdt.entry[i], - sizeof(acpi_table_header)); - if (!header) - break; - header = (acpi_table_header *) - __va_range(saved_rsdt.entry[i], header->length); - if (!header) - break; - - acpi_print_table_header(header); - - if (acpi_checksum(header,header->length)) { - printk(KERN_WARNING "ACPI %s has invalid checksum\n", - acpi_table_signatures[i]); - continue; - } - - for (type = 0; type < ACPI_TABLE_COUNT; type++) - if (!strncmp((char *) &header->signature, - acpi_table_signatures[type],strlen(acpi_table_signatures[type]))) - break; - - if (type >= ACPI_TABLE_COUNT) { - printk(KERN_WARNING "ACPI: Unsupported table %.4s\n", - header->signature); - continue; - } - - - if (!acpi_boot_ops[type]) - continue; - - result = acpi_boot_ops[type] (header, - (unsigned long) saved_rsdt. - entry[i]); - } - - return result; -} - -static int total_cpus __initdata = 0; -int have_acpi_tables; - -extern void __init MP_processor_info(struct mpc_config_processor *); - -static void __init -acpi_parse_lapic(struct acpi_table_lapic *local_apic) -{ - struct mpc_config_processor proc_entry; - int ix = 0; - - if (!local_apic) - return; - - printk(KERN_INFO "LAPIC (acpi_id[0x%04x] id[0x%x] enabled[%d])\n", - local_apic->acpi_id, local_apic->id, local_apic->flags.enabled); - - printk(KERN_INFO "CPU %d (0x%02x00)", total_cpus, local_apic->id); - - if (local_apic->flags.enabled) { - printk(" enabled"); - ix = local_apic->id; - if (ix >= MAX_APICS) { - printk(KERN_WARNING - "Processor #%d INVALID - (Max ID: %d).\n", ix, - MAX_APICS); - return; - } - /* - * Fill in the info we want to save. Not concerned about - * the processor ID. Processor features aren't present in - * the table. - */ - proc_entry.mpc_type = MP_PROCESSOR; - proc_entry.mpc_apicid = local_apic->id; - proc_entry.mpc_cpuflag = CPU_ENABLED; - if (proc_entry.mpc_apicid == boot_cpu_physical_apicid) { - printk(" (BSP)"); - proc_entry.mpc_cpuflag |= CPU_BOOTPROCESSOR; - } - proc_entry.mpc_cpufeature = - (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | - boot_cpu_data.x86_mask; - proc_entry.mpc_featureflag = boot_cpu_data.x86_capability[0]; - proc_entry.mpc_reserved[0] = 0; - proc_entry.mpc_reserved[1] = 0; - proc_entry.mpc_apicver = 0x10; /* integrated APIC */ - MP_processor_info(&proc_entry); - } else { - printk(" disabled"); - } - printk("\n"); - - total_cpus++; - return; -} - -static void __init -acpi_parse_ioapic(struct acpi_table_ioapic *ioapic) -{ - - if (!ioapic) - return; - - printk(KERN_INFO - "IOAPIC (id[0x%x] address[0x%x] global_irq_base[0x%x])\n", - ioapic->id, ioapic->address, ioapic->global_irq_base); - - if (nr_ioapics >= MAX_IO_APICS) { - printk(KERN_WARNING - "Max # of I/O APICs (%d) exceeded (found %d).\n", - MAX_IO_APICS, nr_ioapics); -/* panic("Recompile kernel with bigger MAX_IO_APICS!\n"); */ - } -} - - -/* Interrupt source overrides inform the machine about exceptions - to the normal "PIC" mode interrupt routing */ - -static void __init -acpi_parse_int_src_ovr(struct acpi_table_int_src_ovr *intsrc) -{ - if (!intsrc) - return; - - printk(KERN_INFO - "INT_SRC_OVR (bus[%d] irq[0x%x] global_irq[0x%x] polarity[0x%x] trigger[0x%x])\n", - intsrc->bus, intsrc->bus_irq, intsrc->global_irq, - intsrc->flags.polarity, intsrc->flags.trigger); -} - -/* - * At this point, we look at the interrupt assignment entries in the MPS - * table. - */ - -static void __init acpi_parse_nmi_src(struct acpi_table_nmi_src *nmisrc) -{ - if (!nmisrc) - return; - - printk(KERN_INFO - "NMI_SRC (polarity[0x%x] trigger[0x%x] global_irq[0x%x])\n", - nmisrc->flags.polarity, nmisrc->flags.trigger, - nmisrc->global_irq); - -} -static void __init -acpi_parse_lapic_nmi(struct acpi_table_lapic_nmi *localnmi) -{ - if (!localnmi) - return; - - printk(KERN_INFO - "LAPIC_NMI (acpi_id[0x%04x] polarity[0x%x] trigger[0x%x] lint[0x%x])\n", - localnmi->acpi_id, localnmi->flags.polarity, - localnmi->flags.trigger, localnmi->lint); -} -static void __init -acpi_parse_lapic_addr_ovr(struct acpi_table_lapic_addr_ovr *lapic_addr_ovr) -{ - if (!lapic_addr_ovr) - return; - - printk(KERN_INFO "LAPIC_ADDR_OVR (address[0x%lx])\n", - (unsigned long) lapic_addr_ovr->address); - -} - -static void __init -acpi_parse_plat_int_src(struct acpi_table_plat_int_src *plintsrc) -{ - if (!plintsrc) - return; - - printk(KERN_INFO - "PLAT_INT_SRC (polarity[0x%x] trigger[0x%x] type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n", - plintsrc->flags.polarity, plintsrc->flags.trigger, - plintsrc->type, plintsrc->id, plintsrc->eid, - plintsrc->iosapic_vector, plintsrc->global_irq); -} -static int __init -acpi_parse_madt(acpi_table_header * header, unsigned long phys) -{ - - struct acpi_table_madt *madt; - acpi_madt_entry_header *entry_header; - int table_size; - - madt = (struct acpi_table_madt *) __va_range(phys, header->length); - - if (!madt) - return -EINVAL; - - table_size = (int) (header->length - sizeof(*madt)); - entry_header = - (acpi_madt_entry_header *) ((void *) madt + sizeof(*madt)); - - while (entry_header && (table_size > 0)) { - switch (entry_header->type) { - case ACPI_MADT_LAPIC: - acpi_parse_lapic((struct acpi_table_lapic *) - entry_header); - break; - case ACPI_MADT_IOAPIC: - acpi_parse_ioapic((struct acpi_table_ioapic *) - entry_header); - break; - case ACPI_MADT_INT_SRC_OVR: - acpi_parse_int_src_ovr((struct acpi_table_int_src_ovr *) - entry_header); - break; - case ACPI_MADT_NMI_SRC: - acpi_parse_nmi_src((struct acpi_table_nmi_src *) - entry_header); - break; - case ACPI_MADT_LAPIC_NMI: - acpi_parse_lapic_nmi((struct acpi_table_lapic_nmi *) - entry_header); - break; - case ACPI_MADT_LAPIC_ADDR_OVR: - acpi_parse_lapic_addr_ovr((struct - acpi_table_lapic_addr_ovr *) - entry_header); - break; - case ACPI_MADT_PLAT_INT_SRC: - acpi_parse_plat_int_src((struct acpi_table_plat_int_src - *) entry_header); - break; - default: - printk(KERN_WARNING - "Unsupported MADT entry type 0x%x\n", - entry_header->type); - break; - } - table_size -= entry_header->length; - entry_header = - (acpi_madt_entry_header *) ((void *) entry_header + - entry_header->length); - } - - if (!total_cpus) { - printk("ACPI: No Processors found in the APCI table.\n"); - return -EINVAL; - } - - printk(KERN_INFO "%d CPUs total\n", total_cpus); - - if (madt->lapic_address) - mp_lapic_addr = madt->lapic_address; - else - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; - - printk(KERN_INFO "Local APIC address %x\n", madt->lapic_address); - - return 0; -} - -extern int enable_acpi_smp_table; - -/* - * Configure the processor info using MADT in the ACPI tables. If we fail to - * configure that, then we use the MPS tables. - */ -void __init -config_acpi_tables(void) -{ - - memset(&acpi_boot_ops, 0, sizeof(acpi_boot_ops)); - acpi_boot_ops[ACPI_APIC] = acpi_parse_madt; - - /* - * Only do this when requested, either because of CPU/Bios type or from the command line - */ - - if (enable_acpi_smp_table && !acpi_tables_init()) { - have_acpi_tables = 1; - printk("Enabling the CPU's according to the ACPI table\n"); - } -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/acpitable.h linux.21rc5-ac2/arch/i386/kernel/acpitable.h --- linux.21rc5/arch/i386/kernel/acpitable.h 2003-05-28 15:08:07.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/acpitable.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,260 +0,0 @@ -/* - * acpitable.c - IA32-specific ACPI boot-time initialization (Revision: 1) - * - * Copyright (C) 1999 Andrew Henroid - * Copyright (C) 2001 Richard Schaal - * Copyright (C) 2001 Paul Diefenbaugh - * Copyright (C) 2001 Jun Nakajima - * Copyright (C) 2001 Arjan van de Ven - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * $Id: acpitable.h,v 1.3 2001/11/03 22:41:34 fenrus Exp $ - */ - -/* - * The following codes are cut&pasted from drivers/acpi. Part of the code - * there can be not updated or delivered yet. - * To avoid conflicts when CONFIG_ACPI is defined, the following codes are - * modified so that they are self-contained in this file. - * -- jun - */ - -#ifndef _HEADER_ACPITABLE_H_ -#define _HEADER_ACPITABLE_H_ - -#define dprintk printk -typedef unsigned int ACPI_TBLPTR; - -typedef struct { /* ACPI common table header */ - char signature[4]; /* identifies type of table */ - u32 length; /* length of table, - in bytes, * including header */ - u8 revision; /* specification minor version # */ - u8 checksum; /* to make sum of entire table == 0 */ - char oem_id[6]; /* OEM identification */ - char oem_table_id[8]; /* OEM table identification */ - u32 oem_revision; /* OEM revision number */ - char asl_compiler_id[4]; /* ASL compiler vendor ID */ - u32 asl_compiler_revision; /* ASL compiler revision number */ -} acpi_table_header __attribute__ ((packed));; - -enum { - ACPI_APIC = 0, - ACPI_BOOT, - ACPI_DBGP, - ACPI_DSDT, - ACPI_ECDT, - ACPI_ETDT, - ACPI_FACP, - ACPI_FACS, - ACPI_OEMX, - ACPI_PSDT, - ACPI_SBST, - ACPI_SLIT, - ACPI_SPCR, - ACPI_SRAT, - ACPI_SSDT, - ACPI_SPMI, - ACPI_XSDT, - ACPI_TABLE_COUNT -}; - -static char *acpi_table_signatures[ACPI_TABLE_COUNT] = { - "APIC", - "BOOT", - "DBGP", - "DSDT", - "ECDT", - "ETDT", - "FACP", - "FACS", - "OEM", - "PSDT", - "SBST", - "SLIT", - "SPCR", - "SRAT", - "SSDT", - "SPMI", - "XSDT" -}; - -struct acpi_table_madt { - acpi_table_header header; - u32 lapic_address; - struct { - u32 pcat_compat:1; - u32 reserved:31; - } flags __attribute__ ((packed)); -} __attribute__ ((packed));; - -enum { - ACPI_MADT_LAPIC = 0, - ACPI_MADT_IOAPIC, - ACPI_MADT_INT_SRC_OVR, - ACPI_MADT_NMI_SRC, - ACPI_MADT_LAPIC_NMI, - ACPI_MADT_LAPIC_ADDR_OVR, - ACPI_MADT_IOSAPIC, - ACPI_MADT_LSAPIC, - ACPI_MADT_PLAT_INT_SRC, - ACPI_MADT_ENTRY_COUNT -}; - -#define RSDP_SIG "RSD PTR " -#define RSDT_SIG "RSDT" - -#define ACPI_DEBUG_PRINT(pl) - -#define ACPI_MEMORY_MODE 0x01 -#define ACPI_LOGICAL_ADDRESSING 0x00 -#define ACPI_PHYSICAL_ADDRESSING 0x01 - -#define LO_RSDP_WINDOW_BASE 0 /* Physical Address */ -#define HI_RSDP_WINDOW_BASE 0xE0000 /* Physical Address */ -#define LO_RSDP_WINDOW_SIZE 0x400 -#define HI_RSDP_WINDOW_SIZE 0x20000 -#define RSDP_SCAN_STEP 16 -#define RSDP_CHECKSUM_LENGTH 20 - -typedef int (*acpi_table_handler) (acpi_table_header * header, unsigned long); - -struct acpi_table_rsdp { - char signature[8]; - u8 checksum; - char oem_id[6]; - u8 revision; - u32 rsdt_address; -} __attribute__ ((packed)); - -struct acpi_table_rsdt { - acpi_table_header header; - u32 entry[ACPI_TABLE_COUNT]; -} __attribute__ ((packed)); - -typedef struct { - u8 type; - u8 length; -} acpi_madt_entry_header __attribute__ ((packed)); - -typedef struct { - u16 polarity:2; - u16 trigger:2; - u16 reserved:12; -} acpi_madt_int_flags __attribute__ ((packed)); - -struct acpi_table_lapic { - acpi_madt_entry_header header; - u8 acpi_id; - u8 id; - struct { - u32 enabled:1; - u32 reserved:31; - } flags __attribute__ ((packed)); -} __attribute__ ((packed)); - -struct acpi_table_ioapic { - acpi_madt_entry_header header; - u8 id; - u8 reserved; - u32 address; - u32 global_irq_base; -} __attribute__ ((packed)); - -struct acpi_table_int_src_ovr { - acpi_madt_entry_header header; - u8 bus; - u8 bus_irq; - u32 global_irq; - acpi_madt_int_flags flags; -} __attribute__ ((packed)); - -struct acpi_table_nmi_src { - acpi_madt_entry_header header; - acpi_madt_int_flags flags; - u32 global_irq; -} __attribute__ ((packed)); - -struct acpi_table_lapic_nmi { - acpi_madt_entry_header header; - u8 acpi_id; - acpi_madt_int_flags flags; - u8 lint; -} __attribute__ ((packed)); - -struct acpi_table_lapic_addr_ovr { - acpi_madt_entry_header header; - u8 reserved[2]; - u64 address; -} __attribute__ ((packed)); - -struct acpi_table_iosapic { - acpi_madt_entry_header header; - u8 id; - u8 reserved; - u32 global_irq_base; - u64 address; -} __attribute__ ((packed)); - -struct acpi_table_lsapic { - acpi_madt_entry_header header; - u8 acpi_id; - u8 id; - u8 eid; - u8 reserved[3]; - struct { - u32 enabled:1; - u32 reserved:31; - } flags; -} __attribute__ ((packed)); - -struct acpi_table_plat_int_src { - acpi_madt_entry_header header; - acpi_madt_int_flags flags; - u8 type; - u8 id; - u8 eid; - u8 iosapic_vector; - u32 global_irq; - u32 reserved; -} __attribute__ ((packed)); - -/* - * ACPI Table Descriptor. One per ACPI table - */ -typedef struct acpi_table_desc { - struct acpi_table_desc *prev; - struct acpi_table_desc *next; - struct acpi_table_desc *installed_desc; - acpi_table_header *pointer; - void *base_pointer; - u8 *aml_pointer; - u64 physical_address; - u32 aml_length; - u32 length; - u32 count; - u16 table_id; - u8 type; - u8 allocation; - u8 loaded_into_namespace; - -} acpi_table_desc __attribute__ ((packed));; - -#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/acpi_wakeup.S linux.21rc5-ac2/arch/i386/kernel/acpi_wakeup.S --- linux.21rc5/arch/i386/kernel/acpi_wakeup.S 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/acpi_wakeup.S 2003-04-28 17:57:37.000000000 +0100 @@ -0,0 +1,145 @@ + +.text +#include +#include + + +ALIGN +wakeup_start: +wakeup_code: + wakeup_code_start = . + .code16 + + cli + cld + + # setup data segment + movw %cs, %ax + + addw $(wakeup_data - wakeup_code) >> 4, %ax + movw %ax, %ds + movw %ax, %ss + + # Private stack is needed for ASUS board + mov $(wakeup_stack - wakeup_data), %sp + + # set up page table + movl (real_save_cr3 - wakeup_data), %eax + movl %eax, %cr3 + + # make sure %cr4 is set correctly (features, etc) + movl (real_save_cr4 - wakeup_data), %eax + movl %eax, %cr4 + + # need a gdt + lgdt real_save_gdt - wakeup_data + + # Flush the prefetch queue + jmp 1f +1: + + movl %cr0, %eax + orl $0x80000001, %eax + movl %eax, %cr0 + + ljmpl $__KERNEL_CS,$SYMBOL_NAME(wakeup_pmode_return) + + .code32 + ALIGN + +.org 0x100 +wakeup_data: + .word 0 +real_save_gdt: .word 0 + .long 0 +real_save_cr3: .long 0 +real_save_cr4: .long 0 + +.org 0x300 +wakeup_stack: +wakeup_end: + +wakeup_pmode_return: + # restore data segment + movl $__KERNEL_DS, %eax + movw %ax, %ds + movw %ax, %es + + # and restore the stack + movw %ax, %ss + movl saved_esp, %esp + + # restore other segment registers + xorl %eax, %eax + movw %ax, %fs + movw %ax, %gs + + # reload the gdt, as we need the full 32 bit address + lgdt saved_gdt + lidt saved_idt + lldt saved_ldt + + # restore the other general registers + movl saved_ebx, %ebx + movl saved_edi, %edi + movl saved_esi, %esi + movl saved_ebp, %ebp + + # jump to place where we left off + movl saved_eip,%eax + jmp *%eax + +## +# acpi_copy_wakeup_routine +# +# Copy the above routine to low memory. +# +# Parameters: +# %eax: place to copy wakeup routine to +# +# Returned address is location of code in low memory (past data and stack) +# +ENTRY(acpi_copy_wakeup_routine) + + pushl %esi + pushl %edi + + sgdt saved_gdt + sidt saved_idt + sldt saved_ldt + str saved_tss + + movl %eax, %edi + leal wakeup_start, %esi + movl $(wakeup_end - wakeup_start) >> 2, %ecx + + rep ; movsl + + movl %cr3, %edx + movl %edx, real_save_cr3 - wakeup_start (%eax) + movl %cr4, %edx + movl %edx, real_save_cr4 - wakeup_start (%eax) + sgdt real_save_gdt - wakeup_start (%eax) + + # restore the regs we used + popl %edi + popl %esi + ret + + +.data +ALIGN +# saved registers +saved_gdt: .long 0,0 +saved_idt: .long 0,0 +saved_ldt: .long 0 +saved_tss: .long 0 +saved_cr0: .long 0 + +ENTRY(saved_ebp) .long 0 +ENTRY(saved_esi) .long 0 +ENTRY(saved_edi) .long 0 +ENTRY(saved_ebx) .long 0 + +ENTRY(saved_eip) .long 0 +ENTRY(saved_esp) .long 0 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/dmi_scan.c linux.21rc5-ac2/arch/i386/kernel/dmi_scan.c --- linux.21rc5/arch/i386/kernel/dmi_scan.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/dmi_scan.c 2003-05-01 14:56:41.000000000 +0100 @@ -415,8 +415,10 @@ */ extern int skip_ioapic_setup; +#ifdef CONFIG_PCI extern int broken_440gx_bios; extern unsigned int pci_probe; +#endif static __init int broken_pirq(struct dmi_blacklist *d) { printk(KERN_INFO " *** Possibly defective BIOS detected (irqtable)\n"); @@ -483,6 +485,18 @@ } /* + * Exploding PnPBIOS. Don't yet know if its the BIOS or us for + * some entries + */ + +static __init int exploding_pnp_bios(struct dmi_blacklist *d) +{ + printk(KERN_WARNING "%s detected. Disabling PnPBIOS\n", d->ident); + dmi_broken |= BROKEN_PNP_BIOS; + return 0; +} + +/* * Simple "print if true" callback */ @@ -692,6 +706,13 @@ NO_MATCH, NO_MATCH } }, + { exploding_pnp_bios, "Higraded P14H", { /* BIOSPnP problem */ + MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), + MATCH(DMI_BIOS_VERSION, "07.00T"), + MATCH(DMI_SYS_VENDOR, "Higraded"), + MATCH(DMI_PRODUCT_NAME, "P14H") + } }, + /* Machines which have problems handling enabled local APICs */ { local_apic_kills_bios, "Dell Inspiron", { @@ -774,7 +795,6 @@ MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"), MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736") } }, - /* * Generic per vendor APM settings diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/edd.c linux.21rc5-ac2/arch/i386/kernel/edd.c --- linux.21rc5/arch/i386/kernel/edd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/edd.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,672 @@ +/* + * linux/arch/i386/kernel/edd.c + * Copyright (C) 2002 Dell Computer Corporation + * by Matt Domsch + * + * BIOS Enhanced Disk Drive Services (EDD) + * conformant to T13 Committee www.t13.org + * projects 1572D, 1484D, 1386D, 1226DT + * + * This code takes information provided by BIOS EDD calls + * fn41 - Check Extensions Present and + * fn48 - Get Device Parametes with EDD extensions + * made in setup.S, copied to safe structures in setup.c, + * and presents it in /proc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * TODO: + * - move edd.[ch] to better locations if/when one is decided + * - keep current with 2.5 EDD code changes + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Matt Domsch "); +MODULE_DESCRIPTION("proc interface to BIOS EDD information"); +MODULE_LICENSE("GPL"); + +#define EDD_VERSION "0.09 2003-Jan-22" +#define EDD_DEVICE_NAME_SIZE 16 +#define REPORT_URL "http://domsch.com/linux/edd30/results.html" + +#define left (count - (p - page) - 1) + +static struct proc_dir_entry *bios_dir; + +struct attr_entry { + struct proc_dir_entry *entry; + struct list_head node; +}; + +struct edd_device { + char name[EDD_DEVICE_NAME_SIZE]; + struct edd_info *info; + struct proc_dir_entry *dir; + struct list_head attr_list; +}; + +static struct edd_device *edd_devices[EDDMAXNR]; + +struct edd_attribute { + char *name; + int (*show)(char *page, char **start, off_t off, + int count, int *eof, void *data); + int (*test) (struct edd_device * edev); +}; + +#define EDD_DEVICE_ATTR(_name,_show,_test) \ +struct edd_attribute edd_attr_##_name = { \ + .name = __stringify(_name), \ + .show = _show, \ + .test = _test, \ +}; + +static inline struct edd_info * +edd_dev_get_info(struct edd_device *edev) +{ + return edev->info; +} + +static inline void +edd_dev_set_info(struct edd_device *edev, struct edd_info *info) +{ + edev->info = info; +} + +static int +proc_calc_metrics(char *page, char **start, off_t off, + int count, int *eof, int len) +{ + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + return len; +} + +static int +edd_dump_raw_data(char *b, int count, void *data, int length) +{ + char *orig_b = b; + char hexbuf[80], ascbuf[20], *h, *a, c; + unsigned char *p = data; + unsigned long column = 0; + int length_printed = 0, d; + const char maxcolumn = 16; + while (length_printed < length && count > 0) { + h = hexbuf; + a = ascbuf; + for (column = 0; + column < maxcolumn && length_printed < length; column++) { + h += sprintf(h, "%02x ", (unsigned char) *p); + if (!isprint(*p)) + c = '.'; + else + c = *p; + a += sprintf(a, "%c", c); + p++; + length_printed++; + } + /* pad out the line */ + for (; column < maxcolumn; column++) { + h += sprintf(h, " "); + a += sprintf(a, " "); + } + d = snprintf(b, count, "%s\t%s\n", hexbuf, ascbuf); + b += d; + count -= d; + } + return (b - orig_b); +} + +static int +edd_show_host_bus(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + int i; + + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + for (i = 0; i < 4; i++) { + if (isprint(info->params.host_bus_type[i])) { + p += snprintf(p, left, "%c", info->params.host_bus_type[i]); + } else { + p += snprintf(p, left, " "); + } + } + + if (!strncmp(info->params.host_bus_type, "ISA", 3)) { + p += snprintf(p, left, "\tbase_address: %x\n", + info->params.interface_path.isa.base_address); + } else if (!strncmp(info->params.host_bus_type, "PCIX", 4) || + !strncmp(info->params.host_bus_type, "PCI", 3)) { + p += snprintf(p, left, + "\t%02x:%02x.%d channel: %u\n", + info->params.interface_path.pci.bus, + info->params.interface_path.pci.slot, + info->params.interface_path.pci.function, + info->params.interface_path.pci.channel); + } else if (!strncmp(info->params.host_bus_type, "IBND", 4) || + !strncmp(info->params.host_bus_type, "XPRS", 4) || + !strncmp(info->params.host_bus_type, "HTPT", 4)) { + p += snprintf(p, left, + "\tTBD: %llx\n", + info->params.interface_path.ibnd.reserved); + + } else { + p += snprintf(p, left, "\tunknown: %llx\n", + info->params.interface_path.unknown.reserved); + } + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_interface(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + int i; + + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + for (i = 0; i < 8; i++) { + if (isprint(info->params.interface_type[i])) { + p += snprintf(p, left, "%c", info->params.interface_type[i]); + } else { + p += snprintf(p, left, " "); + } + } + if (!strncmp(info->params.interface_type, "ATAPI", 5)) { + p += snprintf(p, left, "\tdevice: %u lun: %u\n", + info->params.device_path.atapi.device, + info->params.device_path.atapi.lun); + } else if (!strncmp(info->params.interface_type, "ATA", 3)) { + p += snprintf(p, left, "\tdevice: %u\n", + info->params.device_path.ata.device); + } else if (!strncmp(info->params.interface_type, "SCSI", 4)) { + p += snprintf(p, left, "\tid: %u lun: %llu\n", + info->params.device_path.scsi.id, + info->params.device_path.scsi.lun); + } else if (!strncmp(info->params.interface_type, "USB", 3)) { + p += snprintf(p, left, "\tserial_number: %llx\n", + info->params.device_path.usb.serial_number); + } else if (!strncmp(info->params.interface_type, "1394", 4)) { + p += snprintf(p, left, "\teui: %llx\n", + info->params.device_path.i1394.eui); + } else if (!strncmp(info->params.interface_type, "FIBRE", 5)) { + p += snprintf(p, left, "\twwid: %llx lun: %llx\n", + info->params.device_path.fibre.wwid, + info->params.device_path.fibre.lun); + } else if (!strncmp(info->params.interface_type, "I2O", 3)) { + p += snprintf(p, left, "\tidentity_tag: %llx\n", + info->params.device_path.i2o.identity_tag); + } else if (!strncmp(info->params.interface_type, "RAID", 4)) { + p += snprintf(p, left, "\tidentity_tag: %x\n", + info->params.device_path.raid.array_number); + } else if (!strncmp(info->params.interface_type, "SATA", 4)) { + p += snprintf(p, left, "\tdevice: %u\n", + info->params.device_path.sata.device); + } else { + p += snprintf(p, left, "\tunknown: %llx %llx\n", + info->params.device_path.unknown.reserved1, + info->params.device_path.unknown.reserved2); + } + + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +/** + * edd_show_raw_data() - unparses EDD information, returned to user-space + * + * Returns: number of bytes written, or 0 on failure + */ +static int +edd_show_raw_data(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + int i, warn_padding = 0, nonzero_path = 0, + len = sizeof (*info) - 4; + uint8_t checksum = 0, c = 0; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) + len = info->params.length; + + p += snprintf(p, left, "int13 fn48 returned data:\n\n"); + p += edd_dump_raw_data(p, left, ((char *) info) + 4, len); + + /* Spec violation. Adaptec AIC7899 returns 0xDDBE + here, when it should be 0xBEDD. + */ + p += snprintf(p, left, "\n"); + if (info->params.key == 0xDDBE) { + p += snprintf(p, left, + "Warning: Spec violation. Key should be 0xBEDD, is 0xDDBE\n"); + } + + if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) { + goto out; + } + + for (i = 30; i <= 73; i++) { + c = *(((uint8_t *) info) + i + 4); + if (c) + nonzero_path++; + checksum += c; + } + + if (checksum) { + p += snprintf(p, left, + "Warning: Spec violation. Device Path checksum invalid.\n"); + } + + if (!nonzero_path) { + p += snprintf(p, left, "Error: Spec violation. Empty device path.\n"); + goto out; + } + + for (i = 0; i < 4; i++) { + if (!isprint(info->params.host_bus_type[i])) { + warn_padding++; + } + } + for (i = 0; i < 8; i++) { + if (!isprint(info->params.interface_type[i])) { + warn_padding++; + } + } + + if (warn_padding) { + p += snprintf(p, left, + "Warning: Spec violation. Padding should be 0x20.\n"); + } + +out: + p += snprintf(p, left, "\nPlease check %s\n", REPORT_URL); + p += snprintf(p, left, "to see if this device has been reported. If not,\n"); + p += snprintf(p, left, "please send the information requested there.\n"); + + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_version(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%02x\n", info->version); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_extensions(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + if (info->interface_support & EDD_EXT_FIXED_DISK_ACCESS) { + p += snprintf(p, left, "Fixed disk access\n"); + } + if (info->interface_support & EDD_EXT_DEVICE_LOCKING_AND_EJECTING) { + p += snprintf(p, left, "Device locking and ejecting\n"); + } + if (info->interface_support & EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT) { + p += snprintf(p, left, "Enhanced Disk Drive support\n"); + } + if (info->interface_support & EDD_EXT_64BIT_EXTENSIONS) { + p += snprintf(p, left, "64-bit extensions\n"); + } + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_info_flags(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + if (info->params.info_flags & EDD_INFO_DMA_BOUNDRY_ERROR_TRANSPARENT) + p += snprintf(p, left, "DMA boundry error transparent\n"); + if (info->params.info_flags & EDD_INFO_GEOMETRY_VALID) + p += snprintf(p, left, "geometry valid\n"); + if (info->params.info_flags & EDD_INFO_REMOVABLE) + p += snprintf(p, left, "removable\n"); + if (info->params.info_flags & EDD_INFO_WRITE_VERIFY) + p += snprintf(p, left, "write verify\n"); + if (info->params.info_flags & EDD_INFO_MEDIA_CHANGE_NOTIFICATION) + p += snprintf(p, left, "media change notification\n"); + if (info->params.info_flags & EDD_INFO_LOCKABLE) + p += snprintf(p, left, "lockable\n"); + if (info->params.info_flags & EDD_INFO_NO_MEDIA_PRESENT) + p += snprintf(p, left, "no media present\n"); + if (info->params.info_flags & EDD_INFO_USE_INT13_FN50) + p += snprintf(p, left, "use int13 fn50\n"); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_default_cylinders(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%x\n", info->params.num_default_cylinders); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_default_heads(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%x\n", info->params.num_default_heads); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_default_sectors_per_track(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%x\n", info->params.sectors_per_track); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_sectors(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%llx\n", info->params.number_of_sectors); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_has_default_cylinders(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + if (!edev || !info) + return 0; + return info->params.num_default_cylinders > 0; +} + +static int +edd_has_default_heads(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + if (!edev || !info) + return 0; + return info->params.num_default_heads > 0; +} + +static int +edd_has_default_sectors_per_track(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + if (!edev || !info) + return 0; + return info->params.sectors_per_track > 0; +} + +static int +edd_has_edd30(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + int i, nonzero_path = 0; + char c; + + if (!edev || !info) + return 0; + + if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) { + return 0; + } + + for (i = 30; i <= 73; i++) { + c = *(((uint8_t *) info) + i + 4); + if (c) { + nonzero_path++; + break; + } + } + if (!nonzero_path) { + return 0; + } + + return 1; +} + +static EDD_DEVICE_ATTR(raw_data, edd_show_raw_data, NULL); +static EDD_DEVICE_ATTR(version, edd_show_version, NULL); +static EDD_DEVICE_ATTR(extensions, edd_show_extensions, NULL); +static EDD_DEVICE_ATTR(info_flags, edd_show_info_flags, NULL); +static EDD_DEVICE_ATTR(sectors, edd_show_sectors, NULL); +static EDD_DEVICE_ATTR(default_cylinders, edd_show_default_cylinders, + edd_has_default_cylinders); +static EDD_DEVICE_ATTR(default_heads, edd_show_default_heads, + edd_has_default_heads); +static EDD_DEVICE_ATTR(default_sectors_per_track, + edd_show_default_sectors_per_track, + edd_has_default_sectors_per_track); +static EDD_DEVICE_ATTR(interface, edd_show_interface,edd_has_edd30); +static EDD_DEVICE_ATTR(host_bus, edd_show_host_bus, edd_has_edd30); + +static struct edd_attribute *def_attrs[] = { + &edd_attr_raw_data, + &edd_attr_version, + &edd_attr_extensions, + &edd_attr_info_flags, + &edd_attr_sectors, + &edd_attr_default_cylinders, + &edd_attr_default_heads, + &edd_attr_default_sectors_per_track, + &edd_attr_interface, + &edd_attr_host_bus, + NULL, +}; + +static inline void +edd_device_unregister(struct edd_device *edev) +{ + struct list_head *pos, *next; + struct attr_entry *ae; + + list_for_each_safe(pos, next, &edev->attr_list) { + ae = list_entry(pos, struct attr_entry, node); + remove_proc_entry(ae->entry->name, edev->dir); + list_del(&ae->node); + kfree(ae); + } + + remove_proc_entry(edev->dir->name, bios_dir); +} + +static int +edd_populate_dir(struct edd_device *edev) +{ + struct edd_attribute *attr; + struct attr_entry *ae; + int i; + int error = 0; + + for (i = 0; (attr=def_attrs[i]); i++) { + if (!attr->test || (attr->test && attr->test(edev))) { + ae = kmalloc(sizeof (*ae), GFP_KERNEL); + if (ae == NULL) { + error = 1; + break; + } + INIT_LIST_HEAD(&ae->node); + ae->entry = + create_proc_read_entry(attr->name, 0444, + edev->dir, attr->show, + edd_dev_get_info(edev)); + if (ae->entry == NULL) { + error = 1; + break; + } + list_add(&ae->node, &edev->attr_list); + } + } + + if (error) + return error; + + return 0; +} + +static int +edd_make_dir(struct edd_device *edev) +{ + int error=1; + + edev->dir = proc_mkdir(edev->name, bios_dir); + if (edev->dir != NULL) { + edev->dir->mode = (S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO); + error = edd_populate_dir(edev); + } + return error; +} + +static int +edd_device_register(struct edd_device *edev, int i) +{ + int error; + + if (!edev) + return 1; + memset(edev, 0, sizeof (*edev)); + INIT_LIST_HEAD(&edev->attr_list); + edd_dev_set_info(edev, &edd[i]); + snprintf(edev->name, EDD_DEVICE_NAME_SIZE, "int13_dev%02x", + edd[i].device); + error = edd_make_dir(edev); + return error; +} + +/** + * edd_init() - creates /proc tree of EDD data + * + * This assumes that eddnr and edd were + * assigned in setup.c already. + */ +static int __init +edd_init(void) +{ + unsigned int i; + int rc = 0; + struct edd_device *edev; + + printk(KERN_INFO "BIOS EDD facility v%s, %d devices found\n", + EDD_VERSION, eddnr); + + if (!eddnr) { + printk(KERN_INFO "EDD information not available.\n"); + return 1; + } + + bios_dir = proc_mkdir("bios", NULL); + if (bios_dir == NULL) + return 1; + + for (i = 0; i < eddnr && i < EDDMAXNR && !rc; i++) { + edev = kmalloc(sizeof (*edev), GFP_KERNEL); + if (!edev) { + rc = 1; + break; + } + + rc = edd_device_register(edev, i); + if (rc) { + break; + } + edd_devices[i] = edev; + } + + if (rc) { + for (i = 0; i < eddnr && i < EDDMAXNR; i++) { + if ((edev = edd_devices[i])) { + edd_device_unregister(edev); + kfree(edev); + } + } + + remove_proc_entry(bios_dir->name, NULL); + } + + return rc; +} + +static void __exit +edd_exit(void) +{ + int i; + struct edd_device *edev; + + for (i = 0; i < eddnr && i < EDDMAXNR; i++) { + if ((edev = edd_devices[i])) { + edd_device_unregister(edev); + kfree(edev); + } + } + + remove_proc_entry(bios_dir->name, NULL); +} + +module_init(edd_init); +module_exit(edd_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/elanfreq.c linux.21rc5-ac2/arch/i386/kernel/elanfreq.c --- linux.21rc5/arch/i386/kernel/elanfreq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/elanfreq.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,297 @@ +/* + * elanfreq: cpufreq driver for the AMD ELAN family + * + * (c) Copyright 2002 Robert Schwebel + * + * Parts of this code are (c) Sven Geggus + * + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel + * + */ + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ +#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ + +static struct cpufreq_driver *elanfreq_driver; + +/* Module parameter */ +static int max_freq; + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Robert Schwebel , Sven Geggus "); +MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs"); + +struct s_elan_multiplier { + int clock; /* frequency in kHz */ + int val40h; /* PMU Force Mode register */ + int val80h; /* CPU Clock Speed Register */ +}; + +/* + * It is important that the frequencies + * are listed in ascending order here! + */ +struct s_elan_multiplier elan_multiplier[] = { + {1000, 0x02, 0x18}, + {2000, 0x02, 0x10}, + {4000, 0x02, 0x08}, + {8000, 0x00, 0x00}, + {16000, 0x00, 0x02}, + {33000, 0x00, 0x04}, + {66000, 0x01, 0x04}, + {99000, 0x01, 0x05} +}; + +static struct cpufreq_frequency_table elanfreq_table[] = { + {0, 1000}, + {1, 2000}, + {2, 4000}, + {3, 8000}, + {4, 16000}, + {5, 33000}, + {6, 66000}, + {7, 99000}, + {0, CPUFREQ_TABLE_END}, +}; + + +/** + * elanfreq_get_cpu_frequency: determine current cpu speed + * + * Finds out at which frequency the CPU of the Elan SOC runs + * at the moment. Frequencies from 1 to 33 MHz are generated + * the normal way, 66 and 99 MHz are called "Hyperspeed Mode" + * and have the rest of the chip running with 33 MHz. + */ + +static unsigned int elanfreq_get_cpu_frequency(void) +{ + u8 clockspeed_reg; /* Clock Speed Register */ + + local_irq_disable(); + outb_p(0x80,REG_CSCIR); + clockspeed_reg = inb_p(REG_CSCDR); + local_irq_enable(); + + if ((clockspeed_reg & 0xE0) == 0xE0) { return 0; } + + /* Are we in CPU clock multiplied mode (66/99 MHz)? */ + if ((clockspeed_reg & 0xE0) == 0xC0) { + if ((clockspeed_reg & 0x01) == 0) { + return 66000; + } else { + return 99000; + } + } + + /* 33 MHz is not 32 MHz... */ + if ((clockspeed_reg & 0xE0)==0xA0) + return 33000; + + return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000); +} + + +/** + * elanfreq_set_cpu_frequency: Change the CPU core frequency + * @cpu: cpu number + * @freq: frequency in kHz + * + * This function takes a frequency value and changes the CPU frequency + * according to this. Note that the frequency has to be checked by + * elanfreq_validatespeed() for correctness! + * + * There is no return value. + */ + +static void elanfreq_set_cpu_state (unsigned int state) { + + struct cpufreq_freqs freqs; + + if (!elanfreq_driver) { + printk(KERN_ERR "cpufreq: initialization problem or invalid target frequency\n"); + return; + } + + freqs.old = elanfreq_get_cpu_frequency(); + freqs.new = elan_multiplier[state].clock; + freqs.cpu = 0; /* elanfreq.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",elan_multiplier[state].clock); + + + /* + * Access to the Elan's internal registers is indexed via + * 0x22: Chip Setup & Control Register Index Register (CSCI) + * 0x23: Chip Setup & Control Register Data Register (CSCD) + * + */ + + /* + * 0x40 is the Power Management Unit's Force Mode Register. + * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency) + */ + + local_irq_disable(); + outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */ + outb_p(0x00,REG_CSCDR); + local_irq_enable(); /* wait till internal pipelines and */ + udelay(1000); /* buffers have cleaned up */ + + local_irq_disable(); + + /* now, set the CPU clock speed register (0x80) */ + outb_p(0x80,REG_CSCIR); + outb_p(elan_multiplier[state].val80h,REG_CSCDR); + + /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ + outb_p(0x40,REG_CSCIR); + outb_p(elan_multiplier[state].val40h,REG_CSCDR); + udelay(10000); + local_irq_enable(); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +}; + + +/** + * elanfreq_validatespeed: test if frequency range is valid + * + * This function checks if a given frequency range in kHz is valid + * for the hardware supported by the driver. + */ + +static int elanfreq_verify (struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); +} + +static int elanfreq_setpolicy (struct cpufreq_policy *policy) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_setpolicy(policy, &elanfreq_table[0], &newstate)) + return -EINVAL; + + elanfreq_set_cpu_state(newstate); + + return 0; +} + + +/* + * Module init and exit code + */ + +#ifndef MODULE +/** + * elanfreq_setup - elanfreq command line parameter parsing + * + * elanfreq command line parameter. Use: + * elanfreq=66000 + * to set the maximum CPU frequency to 66 MHz. Note that in + * case you do not give this boot parameter, the maximum + * frequency will fall back to _current_ CPU frequency which + * might be lower. If you build this as a module, use the + * max_freq module parameter instead. + */ +static int __init elanfreq_setup(char *str) +{ + max_freq = simple_strtoul(str, &str, 0); + return 1; +} +__setup("elanfreq=", elanfreq_setup); +#endif + +static int __init elanfreq_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + struct cpufreq_driver *driver; + int ret, i; + + /* Test if we have the right hardware */ + if ((c->x86_vendor != X86_VENDOR_AMD) || + (c->x86 != 4) || (c->x86_model!=10)) + { + printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); + return -ENODEV; + } + + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + + if (!max_freq) + max_freq = elanfreq_get_cpu_frequency(); + + /* table init */ + for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if (elanfreq_table[i].frequency > max_freq) + elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; + } + +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = elanfreq_get_cpu_frequency(); +#endif + + driver->verify = &elanfreq_verify; + driver->setpolicy = &elanfreq_setpolicy; + + driver->policy[0].cpu = 0; + ret = cpufreq_frequency_table_cpuinfo(&driver->policy[0], &elanfreq_table[0]); + if (ret) { + kfree(driver); + return ret; + } + driver->policy[0].policy = CPUFREQ_POLICY_PERFORMANCE; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + elanfreq_driver = driver; + + ret = cpufreq_register(driver); + if (ret) { + elanfreq_driver = NULL; + kfree(driver); + } + + return ret; +} + + +static void __exit elanfreq_exit(void) +{ + if (elanfreq_driver) { + cpufreq_unregister(); + kfree(elanfreq_driver); + } +} + +module_init(elanfreq_init); +module_exit(elanfreq_exit); + +MODULE_PARM (max_freq, "i"); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/entry.S linux.21rc5-ac2/arch/i386/kernel/entry.S --- linux.21rc5/arch/i386/kernel/entry.S 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/entry.S 2003-04-22 16:44:36.000000000 +0100 @@ -79,7 +79,7 @@ exec_domain = 16 need_resched = 20 tsk_ptrace = 24 -processor = 52 +cpu = 32 ENOSYS = 38 @@ -184,9 +184,11 @@ ENTRY(ret_from_fork) +#if CONFIG_SMP pushl %ebx call SYMBOL_NAME(schedule_tail) addl $4, %esp +#endif GET_CURRENT(%ebx) testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS jne tracesys_exit @@ -645,8 +647,8 @@ .long SYMBOL_NAME(sys_tkill) .long SYMBOL_NAME(sys_sendfile64) .long SYMBOL_NAME(sys_ni_syscall) /* 240 reserved for futex */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_setaffinity */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_getaffinity */ + .long SYMBOL_NAME(sys_sched_setaffinity) + .long SYMBOL_NAME(sys_sched_getaffinity) .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_thread_area */ .long SYMBOL_NAME(sys_ni_syscall) /* sys_get_thread_area */ .long SYMBOL_NAME(sys_ni_syscall) /* 245 sys_io_setup */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/gx-suspmod.c linux.21rc5-ac2/arch/i386/kernel/gx-suspmod.c --- linux.21rc5/arch/i386/kernel/gx-suspmod.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/gx-suspmod.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,514 @@ +/* + * Cyrix MediaGX and NatSemi Geode Suspend Modulation + * (C) 2002 Zwane Mwaikambo + * (C) 2002 Hiroshi Miura + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation + * + * The author(s) of this software shall not be held liable for damages + * of any nature resulting due to the use of this software. This + * software is provided AS-IS with no warranties. + * + * Theoritical note: + * + * (see Geode(tm) CS5530 manual (rev.4.1) page.56) + * + * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 + * are based on Suspend Moduration. + * + * Suspend Modulation works by asserting and de-asserting the SUSP# pin + * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# + * the CPU enters an idle state. GX1 stops its core clock when SUSP# is + * asserted then power consumption is reduced. + * + * Suspend Modulation's OFF/ON duration are configurable + * with 'Suspend Modulation OFF Count Register' + * and 'Suspend Modulation ON Count Register'. + * These registers are 8bit counters that represent the number of + * 32us intervals which the SUSP# pin is asserted/de-asserted to the + * processor. + * + * These counters define a ratio which is the effective frequency + * of operation of the system. + * + * On Count + * F_eff = Fgx * ---------------------- + * On Count + Off Count + * + * 0 <= On Count, Off Count <= 255 + * + * From these limits, we can get register values + * + * on_duration + off_duration <= MAX_DURATION + * off_duration = on_duration * (stock_freq - freq) / freq + * + * on_duration = (freq * DURATION) / stock_freq + * off_duration = DURATION - on_duration + * + * + *--------------------------------------------------------------------------- + * + * ChangeLog: + * Dec. 11, 2002 Hiroshi Miura + * - rewrite for Cyrix MediaGX Cx5510/5520 and + * NatSemi Geode Cs5530(A). + * + * Jul. ??, 2002 Zwane Mwaikambo + * - cs5530_mod patch for 2.4.19-rc1. + * + *--------------------------------------------------------------------------- + * + * Todo + * Test on machines with 5510, 5530, 5530A + */ + +/************************************************************************ + * Suspend Modulation - Definitions * + ************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* PCI config registers, all at F0 */ +#define PCI_PMER1 0x80 /* power management enable register 1 */ +#define PCI_PMER2 0x81 /* power management enable register 2 */ +#define PCI_PMER3 0x82 /* power management enable register 3 */ +#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */ +#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */ +#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */ +#define PCI_MODON 0x95 /* suspend modulation ON counter register */ +#define PCI_SUSCFG 0x96 /* suspend configuration register */ + +/* PMER1 bits */ +#define GPM (1<<0) /* global power management */ +#define GIT (1<<1) /* globally enable PM device idle timers */ +#define GTR (1<<2) /* globally enable IO traps */ +#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */ +#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */ + +/* SUSCFG bits */ +#define SUSMOD (1<<0) /* enable/disable suspend modulation */ +/* the belows support only with cs5530 (after rev.1.2)/cs5530A */ +#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ + /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ +#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ +/* the belows support only with cs5530A */ +#define PWRSVE_ISA (1<<3) /* stop ISA clock */ +#define PWRSVE (1<<4) /* active idle */ + +struct gxfreq_params { + u8 on_duration; + u8 off_duration; + u8 pci_suscfg; + u8 pci_pmer1; + u8 pci_pmer2; + u8 pci_rev; + struct pci_dev *cs55x0; +}; + +static struct cpufreq_driver *gx_driver; +static struct gxfreq_params *gx_params; +static int stock_freq; + +/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ +static int pci_busclk = 0; +MODULE_PARM(pci_busclk, "i"); + +/* maximum duration for which the cpu may be suspended + * (32us * MAX_DURATION). If no parameter is given, this defaults + * to 255. + * Note that this leads to a maximum of 8 ms(!) where the CPU clock + * is suspended -- processing power is just 0.39% of what it used to be, + * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ +static int max_duration = 255; +MODULE_PARM(max_duration, "i"); + +/* For the default policy, we want at least some processing power + * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) + */ +#define POLICY_MIN_DIV 20 + + +/* DEBUG + * Define it if you want verbose debug output + */ + +#define SUSPMOD_DEBUG 1 + +#ifdef SUSPMOD_DEBUG +#define dprintk(msg...) printk(KERN_DEBUG "cpufreq:" msg) +#else +#define dprintk(msg...) do { } while(0); +#endif + +/** + * we can detect a core multipiler from dir0_lsb + * from GX1 datasheet p.56, + * MULT[3:0]: + * 0000 = SYSCLK multiplied by 4 (test only) + * 0001 = SYSCLK multiplied by 10 + * 0010 = SYSCLK multiplied by 4 + * 0011 = SYSCLK multiplied by 6 + * 0100 = SYSCLK multiplied by 9 + * 0101 = SYSCLK multiplied by 5 + * 0110 = SYSCLK multiplied by 7 + * 0111 = SYSCLK multiplied by 8 + * of 33.3MHz + **/ +static int gx_freq_mult[16] = { + 4, 10, 4, 6, 9, 5, 7, 8, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + + +/**************************************************************** + * Low Level chipset interface * + ****************************************************************/ +static struct pci_device_id gx_chipset_tbl[] __initdata = { + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID }, + { 0, }, +}; + +/** + * gx_detect_chipset: + * + **/ +static __init struct pci_dev *gx_detect_chipset(void) +{ + struct pci_dev *gx_pci; + + /* check if CPU is a MediaGX or a Geode. */ + if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) && + (current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { + printk(KERN_INFO "gx-suspmod: error: no MediaGX/Geode processor found!\n"); + return NULL; + } + + /* detect which companion chip is used */ + pci_for_each_dev(gx_pci) { + if ((pci_match_device (gx_chipset_tbl, gx_pci)) != NULL) { + return gx_pci; + } + } + + dprintk(KERN_INFO "gx-suspmod: error: no supported chipset found!\n"); + return NULL; +} + +/** + * gx_get_cpuspeed: + * + * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs. + */ +static int gx_get_cpuspeed(void) +{ + if ((gx_params->pci_suscfg & SUSMOD) == 0) + return stock_freq; + + return (stock_freq * gx_params->on_duration) + / (gx_params->on_duration + gx_params->off_duration); +} + +/** + * gx_validate_speed: + * determine current cpu speed + * +**/ + +static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration) +{ + unsigned int i; + u8 tmp_on, tmp_off; + int old_tmp_freq = stock_freq; + int tmp_freq; + + *on_duration=1; + *off_duration=0; + + for (i=max_duration; i>0; i--) { + tmp_on = ((khz * i) / stock_freq) & 0xff; + tmp_off = i - tmp_on; + tmp_freq = (stock_freq * tmp_on) / i; + /* if this relation is closer to khz, use this. If it's equal, + * prefer it, too - lower latency */ + if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) { + *on_duration = tmp_on; + *off_duration = tmp_off; + old_tmp_freq = tmp_freq; + } + } + + return old_tmp_freq; +} + + +/** + * gx_set_cpuspeed: + * set cpu speed in khz. + **/ + +static void gx_set_cpuspeed(unsigned int khz) +{ + u8 suscfg, pmer1; + unsigned int new_khz; + unsigned long flags; + struct cpufreq_freqs freqs; + + + freqs.cpu = 0; + freqs.old = gx_get_cpuspeed(); + + new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration); + + freqs.new = new_khz; + + if (new_khz == stock_freq) { /* if new khz == 100% of CPU speed, it is special case */ + local_irq_save(flags); + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, (gx_params->pci_suscfg & ~(SUSMOD))); + pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &(gx_params->pci_suscfg)); + local_irq_restore(flags); + dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n"); + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + return; + } + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + local_irq_save(flags); + switch (gx_params->cs55x0->device) { + case PCI_DEVICE_ID_CYRIX_5530_LEGACY: + pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; + /* FIXME: need to test other values -- Zwane,Miura */ + pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */ + pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */ + pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1); + + if (gx_params->pci_rev < 0x10) { /* CS5530(rev 1.2, 1.3) */ + suscfg = gx_params->pci_suscfg | SUSMOD; + } else { /* CS5530A,B.. */ + suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE; + } + break; + case PCI_DEVICE_ID_CYRIX_5520: + case PCI_DEVICE_ID_CYRIX_5510: + suscfg = gx_params->pci_suscfg | SUSMOD; + break; + default: + local_irq_restore(flags); + dprintk("fatal: try to set unknown chipset.\n"); + return; + } + + pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); + pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration); + + pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg); + pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); + + local_irq_restore(flags); + + gx_params->pci_suscfg = suscfg; + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", + gx_params->on_duration * 32, gx_params->off_duration * 32); + dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); +} + +/**************************************************************** + * High level functions * + ****************************************************************/ + +/* + * cpufreq_gx_verify: test if frequency range is valid + * + * This function checks if a given frequency range in kHz is valid + * for the hardware supported by the driver. + */ + +static int cpufreq_gx_verify(struct cpufreq_policy *policy) +{ + unsigned int tmp_freq = 0; + u8 tmp1, tmp2; + + if (!gx_driver || !stock_freq || !policy) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + + /* it needs to be assured that at least one supported frequency is + * within policy->min and policy->max. If it is not, policy->max + * needs to be increased until one freuqency is supported. + * policy->min may not be decreased, though. This way we guarantee a + * specific processing capacity. + */ + tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); + if (tmp_freq < policy->min) + tmp_freq += stock_freq / max_duration; + policy->min = tmp_freq; + if (policy->min > policy->max) + policy->max = tmp_freq; + tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2); + if (tmp_freq > policy->max) + tmp_freq -= stock_freq / max_duration; + policy->max = tmp_freq; + if (policy->max < policy->min) + policy->max = policy->min; + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + + return 0; +} + +/* + * cpufreq_gx_setpolicy: + * + */ +static int cpufreq_gx_setpolicy(struct cpufreq_policy *policy) +{ + + if (!gx_driver || !stock_freq || !policy) + return -EINVAL; + + policy->cpu = 0; + + if (policy->policy == CPUFREQ_POLICY_POWERSAVE) { + /* here we need to make sure that we don't set the + * frequency below policy->min (see comment in + * cpufreq_gx_verify() - guarantee of processing + * capacity. + */ + u8 tmp1, tmp2; + unsigned int tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); + while (tmp_freq < policy->min) { + tmp_freq += stock_freq / max_duration; + tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); + } + gx_set_cpuspeed(tmp_freq); + } else { + gx_set_cpuspeed(policy->max); + } + return 0; +} + +/* + * cpufreq_gx_init: + * MediaGX/Geode GX initilize cpufreq driver + */ + +static int __init cpufreq_gx_init(void) +{ + int maxfreq,ret,curfreq; + struct cpufreq_driver *driver; + struct gxfreq_params *params; + struct pci_dev *gx_pci; + u32 class_rev; + + /* Test if we have the right hardware */ + if ((gx_pci = gx_detect_chipset()) == NULL) + return -ENODEV; + + /* check whether module parameters are sane */ + if (max_duration > 0xff) + max_duration = 0xff; + + dprintk("geode suspend modulation available.\n"); + + driver = kmalloc(sizeof(struct cpufreq_driver) + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (driver == NULL) + return -ENOMEM; + params = kmalloc(sizeof(struct gxfreq_params), GFP_KERNEL); + if (params == NULL) { + kfree(driver); + return -ENOMEM; + } + + driver->policy = (struct cpufreq_policy *)(driver + 1); + params->cs55x0 = gx_pci; + + /* keep cs55x0 configurations */ + pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg)); + pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1)); + pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); + pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); + pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); + pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev); + params->pci_rev = class_rev && 0xff; + + gx_params = params; + + /* determine maximum frequency */ + if (pci_busclk) { + maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; + } else if (cpu_khz) { + maxfreq = cpu_khz; + } else { + maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; + } + stock_freq = maxfreq; + curfreq = gx_get_cpuspeed(); + + dprintk("cpu max frequency is %d.\n", maxfreq); + dprintk("cpu current frequency is %dkHz.\n",curfreq); + + /* setup basic struct for cpufreq API */ +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = curfreq; +#endif + driver->policy[0].cpu = 0; + + if (max_duration < POLICY_MIN_DIV) + driver->policy[0].min = maxfreq / max_duration; + else + driver->policy[0].min = maxfreq / POLICY_MIN_DIV; + driver->policy[0].max = maxfreq; + driver->policy[0].policy = CPUFREQ_POLICY_PERFORMANCE; + driver->policy[0].cpuinfo.min_freq = maxfreq / max_duration; + driver->policy[0].cpuinfo.max_freq = maxfreq; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + driver->verify = &cpufreq_gx_verify; + driver->setpolicy = &cpufreq_gx_setpolicy; + + gx_driver = driver; + + if ((ret = cpufreq_register(driver))) { + kfree(driver); + kfree(params); + return ret; /* register error! */ + } + + return 0; +} + +static void __exit cpufreq_gx_exit(void) +{ + if (gx_driver) { + /* disable throttling */ + gx_set_cpuspeed(stock_freq); + cpufreq_unregister(); + kfree(gx_driver); + kfree(gx_params); + } +} + +MODULE_AUTHOR ("Hiroshi Miura "); +MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); +MODULE_LICENSE ("GPL"); + +module_init(cpufreq_gx_init); +module_exit(cpufreq_gx_exit); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/head.S linux.21rc5-ac2/arch/i386/kernel/head.S --- linux.21rc5/arch/i386/kernel/head.S 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/head.S 2003-04-22 16:44:36.000000000 +0100 @@ -445,4 +445,15 @@ .quad 0x00409a0000000000 /* 0x48 APM CS code */ .quad 0x00009a0000000000 /* 0x50 APM CS 16 code (16 bit) */ .quad 0x0040920000000000 /* 0x58 APM DS data */ + /* Segments used for calling PnP BIOS */ + .quad 0x00c09a0000000000 /* 0x60 32-bit code */ + .quad 0x00809a0000000000 /* 0x68 16-bit code */ + .quad 0x0080920000000000 /* 0x70 16-bit data */ + .quad 0x0080920000000000 /* 0x78 16-bit data */ + .quad 0x0080920000000000 /* 0x80 16-bit data */ + .quad 0x0000000000000000 /* 0x88 not used */ + .quad 0x0000000000000000 /* 0x90 not used */ + .quad 0x0000000000000000 /* 0x98 not used */ + /* Per CPU segments */ .fill NR_CPUS*4,8,0 /* space for TSS's and LDT's */ + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/i386_ksyms.c linux.21rc5-ac2/arch/i386/kernel/i386_ksyms.c --- linux.21rc5/arch/i386/kernel/i386_ksyms.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/i386_ksyms.c 2003-04-22 16:44:36.000000000 +0100 @@ -28,6 +28,7 @@ #include #include #include +#include extern void dump_thread(struct pt_regs *, struct user *); extern spinlock_t rtc_lock; @@ -49,6 +50,7 @@ EXPORT_SYMBOL(drive_info); #endif +extern unsigned long cpu_khz; extern unsigned long get_cmos_time(void); /* platform dependent support */ @@ -71,6 +73,7 @@ EXPORT_SYMBOL(pm_idle); EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(get_cmos_time); +EXPORT_SYMBOL(cpu_khz); EXPORT_SYMBOL(apm_info); EXPORT_SYMBOL(gdt); EXPORT_SYMBOL(empty_zero_page); @@ -130,6 +133,7 @@ EXPORT_SYMBOL(cpu_data); EXPORT_SYMBOL(kernel_flag_cacheline); EXPORT_SYMBOL(smp_num_cpus); +EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL_NOVERS(__write_lock_failed); EXPORT_SYMBOL_NOVERS(__read_lock_failed); @@ -180,3 +184,8 @@ #ifdef CONFIG_MULTIQUAD EXPORT_SYMBOL(xquad_portio); #endif + +#ifdef CONFIG_EDD_MODULE +EXPORT_SYMBOL(edd); +EXPORT_SYMBOL(eddnr); +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/i387.c linux.21rc5-ac2/arch/i386/kernel/i387.c --- linux.21rc5/arch/i386/kernel/i387.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/i387.c 2003-04-22 16:44:36.000000000 +0100 @@ -248,7 +248,7 @@ * FXSR floating point environment conversions. */ -static inline int convert_fxsr_to_user( struct _fpstate *buf, +static int convert_fxsr_to_user( struct _fpstate *buf, struct i387_fxsave_struct *fxsave ) { unsigned long env[7]; @@ -270,13 +270,18 @@ to = &buf->_st[0]; from = (struct _fpxreg *) &fxsave->st_space[0]; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { - if ( __copy_to_user( to, from, sizeof(*to) ) ) + unsigned long *t = (unsigned long *)to; + unsigned long *f = (unsigned long *)from; + + if (__put_user(*f, t) || + __put_user(*(f + 1), t + 1) || + __put_user(from->exponent, &to->exponent)) return 1; } return 0; } -static inline int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, +static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, struct _fpstate *buf ) { unsigned long env[7]; @@ -299,7 +304,12 @@ to = (struct _fpxreg *) &fxsave->st_space[0]; from = &buf->_st[0]; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { - if ( __copy_from_user( to, from, sizeof(*from) ) ) + unsigned long *t = (unsigned long *)to; + unsigned long *f = (unsigned long *)from; + + if (__get_user(*t, f) || + __get_user(*(t + 1), f + 1) || + __get_user(to->exponent, &from->exponent)) return 1; } return 0; @@ -321,7 +331,7 @@ return 1; } -static inline int save_i387_fxsave( struct _fpstate *buf ) +static int save_i387_fxsave( struct _fpstate *buf ) { struct task_struct *tsk = current; int err = 0; @@ -371,7 +381,7 @@ sizeof(struct i387_fsave_struct) ); } -static inline int restore_i387_fxsave( struct _fpstate *buf ) +static int restore_i387_fxsave( struct _fpstate *buf ) { struct task_struct *tsk = current; clear_fpu( tsk ); @@ -389,7 +399,7 @@ if ( HAVE_HWFP ) { if ( cpu_has_fxsr ) { - err = restore_i387_fxsave( buf ); + err = restore_i387_fxsave( buf ); } else { err = restore_i387_fsave( buf ); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/io_apic.c linux.21rc5-ac2/arch/i386/kernel/io_apic.c --- linux.21rc5/arch/i386/kernel/io_apic.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/io_apic.c 2003-05-29 01:44:00.000000000 +0100 @@ -17,6 +17,7 @@ * thanks to Eric Gilmore * and Rolf G. Tews * for testing these extensively + * Paul Diefenbaugh : Added full ACPI support */ #include @@ -28,6 +29,7 @@ #include #include #include +#include #include #include @@ -42,7 +44,7 @@ unsigned int int_dest_addr_mode = APIC_DEST_LOGICAL; unsigned char int_delivery_mode = dest_LowestPrio; - +extern unsigned int xapic_support; /* * # of IRQ routing registers @@ -123,7 +125,7 @@ break; \ reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \ reg ACTION; \ - io_apic_modify(entry->apic, reg); \ + io_apic_write(entry->apic, 0x10 + R + pin*2, reg); \ if (!entry->next) \ break; \ entry = irq_2_pin + entry->next; \ @@ -167,6 +169,14 @@ { struct IO_APIC_route_entry entry; unsigned long flags; + + /* Check delivery_mode to be sure we're not clearing an SMI pin */ + *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); + *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin); + if (entry.delivery_mode == dest_SMI){ + printk(KERN_INFO "apic %i pin %i is an SMI pin!\n", apic, pin); + return; + } /* * Disable it in the IO-APIC irq-routing table: @@ -188,6 +198,105 @@ clear_IO_APIC_pin(apic, pin); } +static void set_ioapic_affinity (unsigned int irq, unsigned long mask) +{ + unsigned long flags; + + /* + * Only the first 8 bits are valid. + */ + mask = mask << 24; + spin_lock_irqsave(&ioapic_lock, flags); + __DO_ACTION(1, = mask, ) + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +#ifndef CONFIG_SMP + +void send_IPI_self(int vector) +{ + unsigned int cfg; + + /* + * Wait for idle. + */ + apic_wait_icr_idle(); + cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL; + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + apic_write_around(APIC_ICR, cfg); +} +#endif + + +#if CONFIG_SMP + +typedef struct { + unsigned int cpu; + unsigned long timestamp; +} ____cacheline_aligned irq_balance_t; + +static irq_balance_t irq_balance[NR_IRQS] __cacheline_aligned + = { [ 0 ... NR_IRQS-1 ] = { 0, 0 } }; + +extern unsigned long irq_affinity [NR_IRQS]; + +#endif + +#define IDLE_ENOUGH(cpu,now) \ + (idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1)) + +#define IRQ_ALLOWED(cpu,allowed_mask) \ + ((1 << cpu) & (allowed_mask)) + +static unsigned long move(int curr_cpu, unsigned long allowed_mask, unsigned long now, int direction) +{ + int search_idle = 1; + int cpu = curr_cpu; + + goto inside; + + do { + if (unlikely(cpu == curr_cpu)) + search_idle = 0; +inside: + if (direction == 1) { + cpu++; + if (cpu >= smp_num_cpus) + cpu = 0; + } else { + cpu--; + if (cpu == -1) + cpu = smp_num_cpus-1; + } + } while (!IRQ_ALLOWED(cpu,allowed_mask) || + (search_idle && !IDLE_ENOUGH(cpu,now))); + + return cpu; +} + +static inline void balance_irq(int irq) +{ +#if CONFIG_SMP + irq_balance_t *entry = irq_balance + irq; + unsigned long now = jiffies; + + if (unlikely(entry->timestamp != now)) { + unsigned long allowed_mask; + int random_number; + + rdtscl(random_number); + random_number &= 1; + + allowed_mask = cpu_online_map & irq_affinity[irq]; + entry->timestamp = now; + entry->cpu = move(entry->cpu, allowed_mask, now, random_number); + set_ioapic_affinity(irq, apicid_to_phys_cpu_present(entry->cpu)); + } +#endif +} + /* * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to * specific CPU-side IRQs. @@ -801,6 +910,7 @@ (reg_01.version != 0x10) && /* oldest IO-APICs */ (reg_01.version != 0x11) && /* Pentium/Pro IO-APICs */ (reg_01.version != 0x13) && /* Xeon IO-APICs */ + (reg_01.version != 0x14) && /* SiS */ (reg_01.version != 0x20) /* Intel P64H (82806 AA) */ ) UNEXPECTED_IO_APIC(); @@ -1057,6 +1167,10 @@ unsigned char old_id; unsigned long flags; + if (acpi_ioapic) + /* This gets done during IOAPIC enumeration for ACPI. */ + return; + if (clustered_apic_mode) /* We don't have a good way to do this yet - hack */ phys_id_present_map = (u_long) 0xf; @@ -1072,7 +1186,8 @@ old_id = mp_ioapics[apic].mpc_apicid; - if (mp_ioapics[apic].mpc_apicid >= apic_broadcast_id) { + if (!xapic_support && + (mp_ioapics[apic].mpc_apicid >= apic_broadcast_id)) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", apic, mp_ioapics[apic].mpc_apicid); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", @@ -1086,7 +1201,8 @@ * 'stuck on smp_invalidate_needed IPI wait' messages. * I/O APIC IDs no longer have any meaning for xAPICs and SAPICs. */ - if ((clustered_apic_mode != CLUSTERED_APIC_XAPIC) && + if (!xapic_support && + (clustered_apic_mode != CLUSTERED_APIC_XAPIC) && (phys_id_present_map & (1 << mp_ioapics[apic].mpc_apicid))) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", apic, mp_ioapics[apic].mpc_apicid); @@ -1221,6 +1337,7 @@ */ static void ack_edge_ioapic_irq(unsigned int irq) { + balance_irq(irq); if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_DISABLED)) mask_IO_APIC_irq(irq); @@ -1260,6 +1377,7 @@ unsigned long v; int i; + balance_irq(irq); /* * It appears there is an erratum which affects at least version 0x11 * of I/O APIC (that's the 82093AA and cores integrated into various @@ -1316,19 +1434,6 @@ static void mask_and_ack_level_ioapic_irq (unsigned int irq) { /* nothing */ } -static void set_ioapic_affinity (unsigned int irq, unsigned long mask) -{ - unsigned long flags; - /* - * Only the first 8 bits are valid. - */ - mask = mask << 24; - - spin_lock_irqsave(&ioapic_lock, flags); - __DO_ACTION(1, = mask, ) - spin_unlock_irqrestore(&ioapic_lock, flags); -} - /* * Level and edge triggered IO-APIC interrupts need different handling, * so we use two separate IRQ descriptors. Edge triggered IRQs can be @@ -1657,8 +1762,7 @@ printk("ENABLING IO-APIC IRQs\n"); /* - * Set up the IO-APIC IRQ routing table by parsing the MP-BIOS - * mptable: + * Set up IO-APIC IRQ routing. */ setup_ioapic_ids_from_mpc(); sync_Arb_IDs(); @@ -1667,3 +1771,159 @@ check_timer(); print_IO_APIC(); } + + +/* -------------------------------------------------------------------------- + ACPI-based IOAPIC Configuration + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_BOOT + +#define IO_APIC_MAX_ID 15 + +int __init io_apic_get_unique_id (int ioapic, int apic_id) +{ + struct IO_APIC_reg_00 reg_00; + static unsigned long apic_id_map = 0; + unsigned long flags; + int i = 0; + + /* + * The P4 platform supports up to 256 APIC IDs on two separate APIC + * buses (one for LAPICs, one for IOAPICs), where predecessors only + * supports up to 16 on one shared APIC bus. + * + * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full + * advantage of new APIC bus architecture. + */ + + if (!apic_id_map) + apic_id_map = phys_cpu_present_map; + + spin_lock_irqsave(&ioapic_lock, flags); + *(int *)®_00 = io_apic_read(ioapic, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + + if (apic_id >= IO_APIC_MAX_ID) { + printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " + "%d\n", ioapic, apic_id, reg_00.ID); + apic_id = reg_00.ID; + } + + /* + * Every APIC in a system must have a unique ID or we get lots of nice + * 'stuck on smp_invalidate_needed IPI wait' messages. + */ + if (apic_id_map & (1 << apic_id)) { + + for (i = 0; i < IO_APIC_MAX_ID; i++) { + if (!(apic_id_map & (1 << i))) + break; + } + + if (i == IO_APIC_MAX_ID) + panic("Max apic_id exceeded!\n"); + + printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " + "trying %d\n", ioapic, apic_id, i); + + apic_id = i; + } + + apic_id_map |= (1 << apic_id); + + if (reg_00.ID != apic_id) { + reg_00.ID = apic_id; + + spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(ioapic, 0, *(int *)®_00); + *(int *)®_00 = io_apic_read(ioapic, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + + /* Sanity check */ + if (reg_00.ID != apic_id) + panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic); + } + + printk(KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); + + return apic_id; +} + + +int __init io_apic_get_version (int ioapic) +{ + struct IO_APIC_reg_01 reg_01; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + *(int *)®_01 = io_apic_read(ioapic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return reg_01.version; +} + + +int __init io_apic_get_redir_entries (int ioapic) +{ + struct IO_APIC_reg_01 reg_01; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + *(int *)®_01 = io_apic_read(ioapic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return reg_01.entries; +} + + +int io_apic_set_pci_routing (int ioapic, int pin, int irq) +{ + struct IO_APIC_route_entry entry; + unsigned long flags; + + if (!IO_APIC_IRQ(irq)) { + printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0/n", + ioapic); + return -EINVAL; + } + + /* + * Generate a PCI IRQ routing entry and program the IOAPIC accordingly. + * Note that we mask (disable) IRQs now -- these get enabled when the + * corresponding device driver registers for this IRQ. + */ + + memset(&entry,0,sizeof(entry)); + + entry.delivery_mode = dest_LowestPrio; + entry.dest_mode = INT_DELIVERY_MODE; + entry.dest.logical.logical_dest = target_cpus(); + entry.mask = 1; /* Disabled (masked) */ + entry.trigger = 1; /* Level sensitive */ + entry.polarity = 1; /* Low active */ + + add_pin_to_irq(irq, ioapic, pin); + + entry.vector = assign_irq_vector(irq); + + printk(KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> " + "IRQ %d)\n", ioapic, + mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq); + + irq_desc[irq].handler = &ioapic_level_irq_type; + + set_intr_gate(entry.vector, interrupt[irq]); + + if (!ioapic && (irq < 16)) + disable_8259A_irq(irq); + + spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1)); + io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0)); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return 0; +} + +#endif /*CONFIG_ACPI_BOOT*/ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/irq.c linux.21rc5-ac2/arch/i386/kernel/irq.c --- linux.21rc5/arch/i386/kernel/irq.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/irq.c 2003-04-22 16:44:36.000000000 +0100 @@ -1090,7 +1090,7 @@ static struct proc_dir_entry * smp_affinity_entry [NR_IRQS]; -static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL }; +unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL }; static int irq_affinity_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/ldt.c linux.21rc5-ac2/arch/i386/kernel/ldt.c --- linux.21rc5/arch/i386/kernel/ldt.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/ldt.c 2003-04-22 16:44:36.000000000 +0100 @@ -12,37 +12,139 @@ #include #include #include +#include #include #include #include #include +#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */ +static void flush_ldt(void *mm) +{ + if (current->active_mm) + load_LDT(¤t->active_mm->context); +} +#endif + +static int alloc_ldt(mm_context_t *pc, int mincount, int reload) +{ + void *oldldt; + void *newldt; + int oldsize; + + if (mincount <= pc->size) + return 0; + oldsize = pc->size; + mincount = (mincount+511)&(~511); + if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE) + newldt = vmalloc(mincount*LDT_ENTRY_SIZE); + else + newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL); + + if (!newldt) + return -ENOMEM; + + if (oldsize) + memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE); + + oldldt = pc->ldt; + memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE); + wmb(); + pc->ldt = newldt; + pc->size = mincount; + if (reload) { + load_LDT(pc); +#ifdef CONFIG_SMP + if (current->mm->cpu_vm_mask != (1< PAGE_SIZE) + vfree(oldldt); + else + kfree(oldldt); + } + return 0; +} + +static inline int copy_ldt(mm_context_t *new, mm_context_t *old) +{ + int err = alloc_ldt(new, old->size, 0); + if (err < 0) { + printk(KERN_WARNING "ldt allocation failed\n"); + new->size = 0; + return err; + } + memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE); + return 0; +} + +/* + * we do not have to muck with descriptors here, that is + * done in switch_mm() as needed. + */ +int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + struct mm_struct * old_mm; + int retval = 0; + + init_MUTEX(&mm->context.sem); + mm->context.size = 0; + old_mm = current->mm; + if (old_mm && old_mm->context.size > 0) { + down(&old_mm->context.sem); + retval = copy_ldt(&mm->context, &old_mm->context); + up(&old_mm->context.sem); + } + return retval; +} + /* - * read_ldt() is not really atomic - this is not a problem since - * synchronization of reads and writes done to the LDT has to be - * assured by user-space anyway. Writes are atomic, to protect - * the security checks done on new descriptors. + * No need to lock the MM as we are the last user + * Do not touch the ldt register, we are already + * in the next thread. */ +void destroy_context(struct mm_struct *mm) +{ + if (mm->context.size) { + if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE) + vfree(mm->context.ldt); + else + kfree(mm->context.ldt); + mm->context.size = 0; + } +} + static int read_ldt(void * ptr, unsigned long bytecount) { int err; unsigned long size; struct mm_struct * mm = current->mm; - err = 0; - if (!mm->context.segments) - goto out; + if (!mm->context.size) + return 0; + if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) + bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; - size = LDT_ENTRIES*LDT_ENTRY_SIZE; + down(&mm->context.sem); + size = mm->context.size*LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; - err = size; - if (copy_to_user(ptr, mm->context.segments, size)) + err = 0; + if (copy_to_user(ptr, mm->context.ldt, size)) err = -EFAULT; -out: - return err; + up(&mm->context.sem); + if (err < 0) + return err; + if (size != bytecount) { + /* zero-fill the rest */ + clear_user(ptr+size, bytecount-size); + } + return bytecount; } static int read_default_ldt(void * ptr, unsigned long bytecount) @@ -53,7 +155,7 @@ err = 0; address = &default_ldt[0]; - size = sizeof(struct desc_struct); + size = 5*sizeof(struct desc_struct); if (size > bytecount) size = bytecount; @@ -88,24 +190,14 @@ goto out; } - /* - * the GDT index of the LDT is allocated dynamically, and is - * limited by MAX_LDT_DESCRIPTORS. - */ - down_write(&mm->mmap_sem); - if (!mm->context.segments) { - void * segments = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); - error = -ENOMEM; - if (!segments) + down(&mm->context.sem); + if (ldt_info.entry_number >= mm->context.size) { + error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); + if (error < 0) goto out_unlock; - memset(segments, 0, LDT_ENTRIES*LDT_ENTRY_SIZE); - wmb(); - mm->context.segments = segments; - mm->context.cpuvalid = 1UL << smp_processor_id(); - load_LDT(mm); } - lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.segments); + lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt); /* Allow LDTs to be cleared by the user. */ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { @@ -143,7 +235,7 @@ error = 0; out_unlock: - up_write(&mm->mmap_sem); + up(&mm->context.sem); out: return error; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/longhaul.c linux.21rc5-ac2/arch/i386/kernel/longhaul.c --- linux.21rc5/arch/i386/kernel/longhaul.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/longhaul.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,813 @@ +/* + * $Id: longhaul.c,v 1.83 2003/01/02 22:16:26 db Exp $ + * + * (C) 2001 Dave Jones. + * (C) 2002 Padraig Brady. + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon datasheets & sample CPUs kindly provided by VIA. + * + * VIA have currently 3 different versions of Longhaul. + * + * +---------------------+----------+---------------------------------+ + * | Marketing name | Codename | longhaul version / features. | + * +---------------------+----------+---------------------------------+ + * | Samuel/CyrixIII | C5A | v1 : multipliers only | + * | Samuel2/C3 | C3E/C5B | v1 : multiplier only | + * | Ezra | C5C | v2 : multipliers & voltage | + * | Ezra-T | C5M/C5N | v3 : multipliers, voltage & FSB | + * +---------------------+----------+---------------------------------+ + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DEBUG + +#ifdef DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0); +#endif + +static int numscales=16, numvscales; +static int minvid, maxvid; +static int can_scale_voltage; +static int can_scale_fsb; +static int vrmrev; + + +/* Module parameters */ +static int dont_scale_voltage; +static int dont_scale_fsb; +static int current_fsb; + +#define __hlt() __asm__ __volatile__("hlt": : :"memory") + +/* + * Clock ratio tables. + * The eblcr ones specify the ratio read from the CPU. + * The clock_ratio ones specify what to write to the CPU. + */ + +/* VIA C3 Samuel 1 & Samuel 2 (stepping 0)*/ +static int __initdata longhaul1_clock_ratio[16] = { + -1, /* 0000 -> RESERVED */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + -1, /* 0011 -> RESERVED */ + -1, /* 0100 -> RESERVED */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + -1, /* 1110 -> RESERVED */ + -1, /* 1111 -> RESERVED */ +}; + +static int __initdata samuel1_eblcr[16] = { + 50, /* 0000 -> RESERVED */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + -1, /* 0011 -> RESERVED */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + -1, /* 0111 -> RESERVED */ + -1, /* 1000 -> RESERVED */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + -1, /* 1100 -> RESERVED */ + 75, /* 1101 -> 7.5x */ + -1, /* 1110 -> RESERVED */ + 65, /* 1111 -> 6.5x */ +}; + +/* VIA C3 Samuel2 Stepping 1->15 & VIA C3 Ezra */ +static int __initdata longhaul2_clock_ratio[16] = { + 100, /* 0000 -> 10.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 90, /* 0011 -> 9.0x */ + 95, /* 0100 -> 9.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 120, /* 1111 -> 12.0x */ +}; + +static int __initdata samuel2_eblcr[16] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 110, /* 0111 -> 11.0x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 130, /* 1110 -> 13.0x */ + 65, /* 1111 -> 6.5x */ +}; + +static int __initdata ezra_eblcr[16] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 95, /* 0111 -> 9.5x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 65, /* 1111 -> 6.5x */ +}; + +/* VIA C5M. */ +static int __initdata longhaul3_clock_ratio[32] = { + 100, /* 0000 -> 10.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 90, /* 0011 -> 9.0x */ + 95, /* 0100 -> 9.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 120, /* 1111 -> 12.0x */ + + -1, /* 0000 -> RESERVED (10.0x) */ + 110, /* 0001 -> 11.0x */ + 120, /* 0010 -> 12.0x */ + -1, /* 0011 -> RESERVED (9.0x)*/ + 105, /* 0100 -> 10.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 135, /* 0111 -> 13.5x */ + 140, /* 1000 -> 14.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 130, /* 1011 -> 13.0x */ + 145, /* 1100 -> 14.5x */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + -1, /* 1111 -> RESERVED (12.0x) */ +}; + +static int __initdata c5m_eblcr[32] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 95, /* 0111 -> 9.5x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 65, /* 1111 -> 6.5x */ + + -1, /* 0000 -> RESERVED (9.0x) */ + 110, /* 0001 -> 11.0x */ + 120, /* 0010 -> 12.0x */ + -1, /* 0011 -> RESERVED (10.0x)*/ + 135, /* 0100 -> 13.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 105, /* 0111 -> 10.5x */ + 130, /* 1000 -> 13.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 140, /* 1011 -> 14.0x */ + -1, /* 1100 -> RESERVED (12.0x) */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + 145, /* 1111 -> 14.5x */ +}; + +/* fsb values as defined in CPU */ +static unsigned int eblcr_fsb_table[] = { 66, 133, 100, -1 }; +/* fsb values to favour low fsb speed (lower power) */ +static unsigned int power_fsb_table[] = { 66, 100, 133, -1 }; +/* fsb values to favour high fsb speed (for e.g. if lowering CPU + freq because of heat, but want to maintain highest performance possible) */ +static unsigned int perf_fsb_table[] = { 133, 100, 66, -1 }; +static unsigned int *fsb_search_table; + +/* Voltage scales. Div by 1000 to get actual voltage. */ +static int __initdata vrm85scales[32] = { + 1250, 1200, 1150, 1100, 1050, 1800, 1750, 1700, + 1650, 1600, 1550, 1500, 1450, 1400, 1350, 1300, + 1275, 1225, 1175, 1125, 1075, 1825, 1775, 1725, + 1675, 1625, 1575, 1525, 1475, 1425, 1375, 1325, +}; + +static int __initdata mobilevrmscales[32] = { + 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, + 1600, 1550, 1500, 1450, 1500, 1350, 1300, -1, + 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, + 1075, 1050, 1025, 1000, 975, 950, 925, -1, +}; + +/* Clock ratios multiplied by 10 */ +static int clock_ratio[32]; +static int eblcr_table[32]; +static int voltage_table[32]; +static int highest_speed, lowest_speed; /* kHz */ +static int longhaul; /* version. */ +static struct cpufreq_driver *longhaul_driver; + + +static int longhaul_get_cpu_fsb (void) +{ + unsigned long invalue=0,lo, hi; + + if (current_fsb == 0) { + rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); + invalue = (lo & (1<<18|1<<19)) >>18; + return eblcr_fsb_table[invalue]; + } else { + return current_fsb; + } +} + + +static int longhaul_get_cpu_mult (void) +{ + unsigned long invalue=0,lo, hi; + + rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); + invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22; + if (longhaul==3) { + if (lo & (1<<27)) + invalue+=16; + } + return eblcr_table[invalue]; +} + + +/** + * longhaul_set_cpu_frequency() + * @clock_ratio_index : index of clock_ratio[] for new frequency + * @newfsb: the new FSB + * + * Sets a new clock ratio, and -if applicable- a new Front Side Bus + */ + +static void longhaul_setstate (unsigned int clock_ratio_index, unsigned int newfsb) +{ + unsigned long lo, hi; + unsigned int bits; + int revkey; + int vidindex, i; + struct cpufreq_freqs freqs; + + if (!newfsb || (clock_ratio[clock_ratio_index] == -1)) + return; + + if ((!can_scale_fsb) && (newfsb != current_fsb)) + return; + + if (((clock_ratio[clock_ratio_index] * newfsb * 100) > highest_speed) || + ((clock_ratio[clock_ratio_index] * newfsb * 100) < lowest_speed)) + return; + + freqs.old = longhaul_get_cpu_mult() * longhaul_get_cpu_fsb() * 100; + freqs.new = clock_ratio[clock_ratio_index] * newfsb * 100; + freqs.cpu = 0; /* longhaul.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + dprintk (KERN_INFO "longhaul: New FSB:%d Mult(x10):%d\n", + newfsb, clock_ratio[clock_ratio_index]); + + bits = clock_ratio_index; + /* "bits" contains the bitpattern of the new multiplier. + we now need to transform it to the desired format. */ + + switch (longhaul) { + case 1: + rdmsr (MSR_VIA_BCR2, lo, hi); + revkey = (lo & 0xf)<<4; /* Rev key. */ + lo &= ~(1<<23|1<<24|1<<25|1<<26); + lo |= (1<<19); /* Enable software clock multiplier */ + lo |= (bits<<23); /* desired multiplier */ + lo |= revkey; + wrmsr (MSR_VIA_BCR2, lo, hi); + + __hlt(); + + /* Disable software clock multiplier */ + rdmsr (MSR_VIA_BCR2, lo, hi); + lo &= ~(1<<19); + lo |= revkey; + wrmsr (MSR_VIA_BCR2, lo, hi); + break; + + case 2: + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + revkey = (lo & 0xf)<<4; /* Rev key. */ + lo &= 0xfff0bf0f; /* reset [19:16,14](bus ratio) and [7:4](rev key) to 0 */ + lo |= (bits<<16); + lo |= (1<<8); /* EnableSoftBusRatio */ + lo |= revkey; + + if (can_scale_voltage) { + if (can_scale_fsb==1) { + dprintk (KERN_INFO "longhaul: Voltage scaling + FSB scaling not done yet.\n"); + goto bad_voltage; + } else { + /* PB: TODO fix this up */ + vidindex = (((highest_speed-lowest_speed) / (newfsb/2)) - + ((highest_speed-((clock_ratio[clock_ratio_index] * newfsb * 100)/1000)) / (newfsb/2))); + } + for (i=0;i<32;i++) { + dprintk (KERN_INFO "VID hunting. Looking for %d, found %d\n", + minvid+(vidindex*25), voltage_table[i]); + if (voltage_table[i]==(minvid + (vidindex * 25))) + break; + } + if (i==32) + goto bad_voltage; + + dprintk (KERN_INFO "longhaul: Desired vid index=%d\n", i); +#if 0 + lo &= 0xfe0fffff;/* reset [24:20](voltage) to 0 */ + lo |= (i<<20); /* set voltage */ + lo |= (1<<9); /* EnableSoftVID */ +#endif + } + +bad_voltage: + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + __hlt(); + + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + lo &= ~(1<<8); + if (can_scale_voltage) + lo &= ~(1<<9); + lo |= revkey; + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + break; + + case 3: + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + revkey = (lo & 0xf)<<4; /* Rev key. */ + lo &= 0xfff0bf0f; /* reset longhaul[19:16,14] to 0 */ + lo |= (bits<<16); + lo |= (1<<8); /* EnableSoftBusRatio */ + lo |= revkey; + + /* Set FSB */ + if (can_scale_fsb==1) { + lo &= ~(1<<28|1<<29); + switch (newfsb) { + case 66: lo |= (1<<28|1<<29); /* 11 */ + break; + case 100: lo |= 1<<28; /* 01 */ + break; + case 133: break; /* 00*/ + } + } + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + __hlt(); + + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + lo &= ~(1<<8); + lo |= revkey; + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + break; + } + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +} + + +static void __init longhaul_get_ranges (void) +{ + unsigned long lo, hi, invalue; + unsigned int minmult=0, maxmult=0, minfsb=0, maxfsb=0; + unsigned int multipliers[32]= { + 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, + -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; + unsigned int fsb_table[4] = { 133, 100, -1, 66 }; + + switch (longhaul) { + case 1: + /* Ugh, Longhaul v1 didn't have the min/max MSRs. + Assume min=3.0x & max = whatever we booted at. */ + minmult = 30; + maxmult = longhaul_get_cpu_mult(); + minfsb = maxfsb = current_fsb; + break; + + case 2 ... 3: + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + + invalue = (hi & (1<<0|1<<1|1<<2|1<<3)); + if (hi & (1<<11)) + invalue += 16; + maxmult=multipliers[invalue]; + +#if 0 /* This is MaxMhz @ Min Voltage. Ignore for now */ + invalue = (hi & (1<<16|1<<17|1<<18|1<<19)) >> 16; + if (hi & (1<<27)) + invalue += 16; + minmult = multipliers[invalue]; +#else + minmult = 30; /* as per spec */ +#endif + + if (can_scale_fsb==1) { + invalue = (hi & (1<<9|1<<10)) >> 9; + maxfsb = fsb_table[invalue]; + + invalue = (hi & (1<<25|1<<26)) >> 25; + minfsb = fsb_table[invalue]; + + dprintk (KERN_INFO "longhaul: Min FSB=%d Max FSB=%d\n", + minfsb, maxfsb); + } else { + minfsb = maxfsb = current_fsb; + } + break; + } + + highest_speed = maxmult * maxfsb * 100; + lowest_speed = minmult * minfsb * 100; + + dprintk (KERN_INFO "longhaul: MinMult(x10)=%d MaxMult(x10)=%d\n", + minmult, maxmult); + dprintk (KERN_INFO "longhaul: Lowestspeed=%d Highestspeed=%d\n", + lowest_speed, highest_speed); +} + + +static void __init longhaul_setup_voltagescaling (unsigned long lo, unsigned long hi) +{ + int revkey; + + can_scale_voltage = 1; + + minvid = (hi & (1<<20|1<<21|1<<22|1<<23|1<<24)) >> 20; /* 56:52 */ + maxvid = (hi & (1<<4|1<<5|1<<6|1<<7|1<<8)) >> 4; /* 40:36 */ + vrmrev = (lo & (1<<15))>>15; + + if (vrmrev==0) { + dprintk (KERN_INFO "longhaul: VRM 8.5 : "); + memcpy (voltage_table, vrm85scales, sizeof(voltage_table)); + numvscales = (voltage_table[maxvid]-voltage_table[minvid])/25; + } else { + dprintk (KERN_INFO "longhaul: Mobile VRM : "); + memcpy (voltage_table, mobilevrmscales, sizeof(voltage_table)); + numvscales = (voltage_table[maxvid]-voltage_table[minvid])/5; + } + + /* Current voltage isn't readable at first, so we need to + set it to a known value. The spec says to use maxvid */ + revkey = (lo & 0xf)<<4; /* Rev key. */ + lo &= 0xfe0fff0f; /* Mask unneeded bits */ + lo |= (1<<9); /* EnableSoftVID */ + lo |= revkey; /* Reinsert key */ + lo |= maxvid << 20; + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + minvid = voltage_table[minvid]; + maxvid = voltage_table[maxvid]; + dprintk ("Min VID=%d.%03d Max VID=%d.%03d, %d possible voltage scales\n", + maxvid/1000, maxvid%1000, minvid/1000, minvid%1000, numvscales); +} + + +static inline unsigned int longhaul_statecount_fsb(struct cpufreq_policy *policy, unsigned int fsb) { + unsigned int i, count = 0; + + for(i=0; imax) && + ((clock_ratio[i] * fsb * 100) >= policy->min)) + count++; + } + + return count; +} + + +static int longhaul_verify(struct cpufreq_policy *policy) +{ + unsigned int number_states = 0; + unsigned int i; + unsigned int fsb_index = 0; + unsigned int tmpfreq = 0; + unsigned int newmax = -1; + + if (!policy || !longhaul_driver) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, lowest_speed, highest_speed); + + if (can_scale_fsb==1) { + for (fsb_index=0; fsb_search_table[fsb_index]!=-1; fsb_index++) + number_states += longhaul_statecount_fsb(policy, fsb_search_table[fsb_index]); + } else + number_states = longhaul_statecount_fsb(policy, current_fsb); + + if (number_states) + return 0; + + /* get frequency closest above current policy->max */ + if (can_scale_fsb==1) { + for (fsb_index=0; fsb_search_table[fsb_index] != -1; fsb_index++) + for(i=0; i policy->max) && + (tmpfreq < newmax)) + newmax = tmpfreq; + } + } else { + for(i=0; i policy->max) && + (tmpfreq < newmax)) + newmax = tmpfreq; + } + } + + policy->max = newmax; + + cpufreq_verify_within_limits(policy, lowest_speed, highest_speed); + + return 0; +} + + +static int longhaul_get_best_freq_for_fsb(struct cpufreq_policy *policy, + unsigned int min_mult, + unsigned int max_mult, + unsigned int fsb, + unsigned int *new_mult) +{ + unsigned int optimal = 0; + unsigned int found_optimal = 0; + unsigned int i; + + switch(policy->policy) { + case CPUFREQ_POLICY_POWERSAVE: + optimal = max_mult; + break; + case CPUFREQ_POLICY_PERFORMANCE: + optimal = min_mult; + } + + for(i=0; i policy->max) || + (freq < policy->min)) + continue; + switch(policy->policy) { + case CPUFREQ_POLICY_POWERSAVE: + if (clock_ratio[i] < clock_ratio[optimal]) { + found_optimal = 1; + optimal = i; + } + break; + case CPUFREQ_POLICY_PERFORMANCE: + if (clock_ratio[i] > clock_ratio[optimal]) { + found_optimal = 1; + optimal = i; + } + break; + } + } + + if (found_optimal) { + *new_mult = optimal; + return 1; + } + return 0; +} + + +static int longhaul_setpolicy (struct cpufreq_policy *policy) +{ + unsigned int i; + unsigned int fsb_index = 0; + unsigned int new_fsb = 0; + unsigned int new_clock_ratio = 0; + unsigned int min_mult = 0; + unsigned int max_mult = 0; + + + if (!longhaul_driver) + return -EINVAL; + + if (policy->policy==CPUFREQ_POLICY_PERFORMANCE) + fsb_search_table = perf_fsb_table; + else + fsb_search_table = power_fsb_table; + + for(i=0;i clock_ratio[i]) + min_mult = i; + } + + if (can_scale_fsb==1) { + unsigned int found = 0; + for (fsb_index=0; fsb_search_table[fsb_index]!=-1; fsb_index++) + { + if (longhaul_get_best_freq_for_fsb(policy, + min_mult, max_mult, + fsb_search_table[fsb_index], + &new_clock_ratio)) { + new_fsb = fsb_search_table[fsb_index]; + break; + } + } + if (!found) + return -EINVAL; + } else { + new_fsb = current_fsb; + if (!longhaul_get_best_freq_for_fsb(policy, min_mult, + max_mult, new_fsb, &new_clock_ratio)) + return -EINVAL; + } + + longhaul_setstate(new_clock_ratio, new_fsb); + + return 0; +} + + +static int __init longhaul_init (void) +{ + struct cpuinfo_x86 *c = cpu_data; + unsigned int currentspeed; + static int currentmult; + unsigned long lo, hi; + int ret; + struct cpufreq_driver *driver; + + if ((c->x86_vendor != X86_VENDOR_CENTAUR) || (c->x86 !=6) ) + return -ENODEV; + + switch (c->x86_model) { + case 6: /* VIA C3 Samuel C5A */ + longhaul=1; + memcpy (clock_ratio, longhaul1_clock_ratio, sizeof(longhaul1_clock_ratio)); + memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr)); + break; + + case 7: /* C5B / C5C */ + switch (c->x86_mask) { + case 0: + longhaul=1; + memcpy (clock_ratio, longhaul1_clock_ratio, sizeof(longhaul1_clock_ratio)); + memcpy (eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr)); + break; + case 1 ... 15: + longhaul=2; + memcpy (clock_ratio, longhaul2_clock_ratio, sizeof(longhaul2_clock_ratio)); + memcpy (eblcr_table, ezra_eblcr, sizeof(ezra_eblcr)); + break; + } + break; + + case 8: /* C5M/C5N */ + return -ENODEV; // Waiting on updated docs from VIA before this is usable + longhaul=3; + numscales=32; + memcpy (clock_ratio, longhaul3_clock_ratio, sizeof(longhaul3_clock_ratio)); + memcpy (eblcr_table, c5m_eblcr, sizeof(c5m_eblcr)); + break; + + default: + printk (KERN_INFO "longhaul: Unknown VIA CPU. Contact davej@suse.de\n"); + return -ENODEV; + } + + printk (KERN_INFO "longhaul: VIA CPU detected. Longhaul version %d supported\n", longhaul); + + current_fsb = longhaul_get_cpu_fsb(); + currentmult = longhaul_get_cpu_mult(); + currentspeed = currentmult * current_fsb * 100; + + dprintk (KERN_INFO "longhaul: CPU currently at %dMHz (%d x %d.%d)\n", + (currentspeed/1000), current_fsb, currentmult/10, currentmult%10); + + if (longhaul==2 || longhaul==3) { + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + if ((lo & (1<<0)) && (dont_scale_voltage==0)) + longhaul_setup_voltagescaling (lo, hi); + + if ((lo & (1<<1)) && (dont_scale_fsb==0) && (current_fsb==0)) + can_scale_fsb = 1; + } + + longhaul_get_ranges(); + + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = currentspeed; +#endif + + driver->verify = &longhaul_verify; + driver->setpolicy = &longhaul_setpolicy; + + driver->policy[0].cpu = 0; + driver->policy[0].min = (unsigned int) lowest_speed; + driver->policy[0].max = (unsigned int) highest_speed; + driver->policy[0].policy = CPUFREQ_POLICY_PERFORMANCE; + driver->policy[0].cpuinfo.min_freq = (unsigned int) lowest_speed; + driver->policy[0].cpuinfo.max_freq = (unsigned int) highest_speed; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + longhaul_driver = driver; + + ret = cpufreq_register(driver); + if (ret) { + longhaul_driver = NULL; + kfree(driver); + } + + return ret; +} + + +static void __exit longhaul_exit (void) +{ + if (longhaul_driver) { + cpufreq_unregister(); + kfree(longhaul_driver); + } +} + +MODULE_PARM (dont_scale_fsb, "i"); +MODULE_PARM (dont_scale_voltage, "i"); +MODULE_PARM (current_fsb, "i"); + +MODULE_AUTHOR ("Dave Jones "); +MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); +MODULE_LICENSE ("GPL"); + +module_init(longhaul_init); +module_exit(longhaul_exit); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/longrun.c linux.21rc5-ac2/arch/i386/kernel/longrun.c --- linux.21rc5/arch/i386/kernel/longrun.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/longrun.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,292 @@ +/* + * $Id: longrun.c,v 1.18 2003/01/02 22:16:26 db Exp $ + * + * (C) 2002 Dominik Brodowski + * + * Licensed under the terms of the GNU GPL License version 2. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +static struct cpufreq_driver *longrun_driver; + +/** + * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz + * values into per cent values. In TMTA microcode, the following is valid: + * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) + */ +static unsigned int longrun_low_freq, longrun_high_freq; + + +/** + * longrun_get_policy - get the current LongRun policy + * @policy: struct cpufreq_policy where current policy is written into + * + * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS + * and MSR_TMTA_LONGRUN_CTRL + */ +static void longrun_get_policy(struct cpufreq_policy *policy) +{ + u32 msr_lo, msr_hi; + + if (!longrun_driver) + return; + + rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + if (msr_lo & 0x01) + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + else + policy->policy = CPUFREQ_POLICY_POWERSAVE; + + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + msr_lo &= 0x0000007F; + msr_hi &= 0x0000007F; + + policy->min = longrun_low_freq + msr_lo * + ((longrun_high_freq - longrun_low_freq) / 100); + policy->min = longrun_low_freq + msr_hi * + ((longrun_high_freq - longrun_low_freq) / 100); + policy->cpu = 0; +} + + +/** + * longrun_set_policy - sets a new CPUFreq policy + * @policy - new policy + * + * Sets a new CPUFreq policy on LongRun-capable processors. This function + * has to be called with cpufreq_driver locked. + */ +static int longrun_set_policy(struct cpufreq_policy *policy) +{ + u32 msr_lo, msr_hi; + u32 pctg_lo, pctg_hi; + + if (!longrun_driver || !policy) + return -EINVAL; + + pctg_lo = (policy->min - longrun_low_freq) / + ((longrun_high_freq - longrun_low_freq) / 100); + pctg_hi = (policy->max - longrun_low_freq) / + ((longrun_high_freq - longrun_low_freq) / 100); + + if (pctg_hi > 100) + pctg_hi = 100; + if (pctg_lo > pctg_hi) + pctg_lo = pctg_hi; + + /* performance or economy mode */ + rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + msr_lo &= 0xFFFFFFFE; + switch (policy->policy) { + case CPUFREQ_POLICY_PERFORMANCE: + msr_lo |= 0x00000001; + break; + case CPUFREQ_POLICY_POWERSAVE: + break; + } + wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + + /* lower and upper boundary */ + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + msr_lo &= 0xFFFFFF80; + msr_hi &= 0xFFFFFF80; + msr_lo |= pctg_lo; + msr_hi |= pctg_hi; + wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + + return 0; +} + + +/** + * longrun_verify_poliy - verifies a new CPUFreq policy + * + * Validates a new CPUFreq policy. This function has to be called with + * cpufreq_driver locked. + */ +static int longrun_verify_policy(struct cpufreq_policy *policy) +{ + if (!policy || !longrun_driver) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, + longrun_driver->policy[0].cpuinfo.min_freq, + longrun_driver->policy[0].cpuinfo.max_freq); + + return 0; +} + + +/** + * longrun_determine_freqs - determines the lowest and highest possible core frequency + * + * Determines the lowest and highest possible core frequencies on this CPU. + * This is neccessary to calculate the performance percentage according to + * TMTA rules: + * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) + */ +static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, + unsigned int *high_freq) +{ + u32 msr_lo, msr_hi; + u32 save_lo, save_hi; + u32 eax, ebx, ecx, edx; + struct cpuinfo_x86 *c = cpu_data; + + if (!low_freq || !high_freq) + return -EINVAL; + + if (cpu_has(c, X86_FEATURE_LRTI)) { + /* if the LongRun Table Interface is present, the + * detection is a bit easier: + * For minimum frequency, read out the maximum + * level (msr_hi), write that into "currently + * selected level", and read out the frequency. + * For maximum frequency, read out level zero. + */ + /* minimum */ + rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi); + wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi); + rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); + *low_freq = msr_lo * 1000; /* to kHz */ + + /* maximum */ + wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi); + rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); + *high_freq = msr_lo * 1000; /* to kHz */ + + if (*low_freq > *high_freq) + *low_freq = *high_freq; + return 0; + } + + /* set the upper border to the value determined during TSC init */ + *high_freq = (cpu_khz / 1000); + *high_freq = *high_freq * 1000; + + /* get current borders */ + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + save_lo = msr_lo & 0x0000007F; + save_hi = msr_hi & 0x0000007F; + + /* if current perf_pctg is larger than 90%, we need to decrease the + * upper limit to make the calculation more accurate. + */ + cpuid(0x80860007, &eax, &ebx, &ecx, &edx); + if (ecx > 90) { + /* set to 0 to 80 perf_pctg */ + msr_lo &= 0xFFFFFF80; + msr_hi &= 0xFFFFFF80; + msr_lo |= 0; + msr_hi |= 80; + wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + + /* read out current core MHz and current perf_pctg */ + cpuid(0x80860007, &eax, &ebx, &ecx, &edx); + + /* restore values */ + wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); + } + + /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) + * eqals + * low_freq * ( 1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) + * + * high_freq * perf_pctg is stored tempoarily into "ebx". + */ + ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */ + + if ((ecx > 95) || (ecx == 0) || (eax < ebx)) + return -EIO; + + edx = (eax - ebx) / (100 - ecx); + *low_freq = edx * 1000; /* back to kHz */ + + if (*low_freq > *high_freq) + *low_freq = *high_freq; + + return 0; +} + + +/** + * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver + * + * Initializes the LongRun support. + */ +static int __init longrun_init(void) +{ + int result; + struct cpufreq_driver *driver; + struct cpuinfo_x86 *c = cpu_data; + + if (c->x86_vendor != X86_VENDOR_TRANSMETA || + !cpu_has(c, X86_FEATURE_LONGRUN)) + return 0; + + /* initialization of main "cpufreq" code*/ + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + + if (longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq)) { + kfree(driver); + return -EIO; + } + driver->policy[0].cpuinfo.min_freq = longrun_low_freq; + driver->policy[0].cpuinfo.max_freq = longrun_high_freq; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + longrun_get_policy(&driver->policy[0]); + +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = longrun_high_freq; /* dummy value */ +#endif + + driver->verify = &longrun_verify_policy; + driver->setpolicy = &longrun_set_policy; + + longrun_driver = driver; + + result = cpufreq_register(driver); + if (result) { + longrun_driver = NULL; + kfree(driver); + } + + return result; +} + + +/** + * longrun_exit - unregisters LongRun support + */ +static void __exit longrun_exit(void) +{ + if (longrun_driver) { + cpufreq_unregister(); + kfree(longrun_driver); + } +} + + +MODULE_AUTHOR ("Dominik Brodowski "); +MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe processors."); +MODULE_LICENSE ("GPL"); +module_init(longrun_init); +module_exit(longrun_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/Makefile linux.21rc5-ac2/arch/i386/kernel/Makefile --- linux.21rc5/arch/i386/kernel/Makefile 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/Makefile 2003-04-28 17:57:37.000000000 +0100 @@ -36,9 +36,20 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_APM) += apm.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_ACPI_SLEEP) += acpi_wakeup.o obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o obj-$(CONFIG_X86_LOCAL_APIC) += mpparse.o apic.o nmi.o -obj-$(CONFIG_X86_IO_APIC) += io_apic.o acpitable.o +obj-$(CONFIG_X86_IO_APIC) += io_apic.o obj-$(CONFIG_X86_VISWS_APIC) += visws_apic.o +obj-$(CONFIG_EDD) += edd.o +obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o +obj-$(CONFIG_X86_LONGHAUL) += longhaul.o +obj-$(CONFIG_X86_SPEEDSTEP) += speedstep.o +obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o +obj-$(CONFIG_X86_LONGRUN) += longrun.o +obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o +obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o + include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/mpparse.c linux.21rc5-ac2/arch/i386/kernel/mpparse.c --- linux.21rc5/arch/i386/kernel/mpparse.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/mpparse.c 2003-05-03 19:05:29.000000000 +0100 @@ -9,12 +9,14 @@ * Erich Boleyn : MP v1.4 and additional changes. * Alan Cox : Added EBDA scanning * Ingo Molnar : various cleanups and rewrites - * Maciej W. Rozycki : Bits for default MP configurations + * Maciej W. Rozycki: Bits for default MP configurations + * Paul Diefenbaugh: Added full ACPI support */ #include #include #include +#include #include #include #include @@ -23,10 +25,12 @@ #include #include +#include #include #include #include #include +#include /* Have we found an MP table */ int smp_found_config; @@ -74,6 +78,7 @@ unsigned char clustered_apic_mode = CLUSTERED_APIC_NONE; unsigned int apic_broadcast_id = APIC_BROADCAST_ID_APIC; #endif +unsigned int xapic_support = 0; unsigned char raw_phys_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; /* @@ -137,12 +142,6 @@ return n; } -#ifdef CONFIG_X86_IO_APIC -extern int have_acpi_tables; /* set by acpitable.c */ -#else -#define have_acpi_tables (0) -#endif - /* * Have to match translation table entries to main table entries by counter * hence the mpc_record variable .... can't see a less disgusting way of @@ -240,6 +239,8 @@ return; } ver = m->mpc_apicver; + if (APIC_XAPIC_SUPPORT(ver)) + xapic_support = 1; logical_cpu_present_map |= 1 << (num_processors-1); phys_cpu_present_map |= apicid_to_phys_cpu_present(m->mpc_apicid); @@ -447,10 +448,11 @@ printk("APIC at: 0x%lX\n",mpc->mpc_lapic); - /* save the local APIC address, it might be non-default, - * but only if we're not using the ACPI tables + /* + * Save the local APIC address (it might be non-default) -- but only + * if we're not using ACPI. */ - if (!have_acpi_tables) + if (!acpi_lapic) mp_lapic_addr = mpc->mpc_lapic; if ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ) && mpc->mpc_oemptr) { @@ -530,9 +532,8 @@ { struct mpc_config_processor *m= (struct mpc_config_processor *)mpt; - - /* ACPI may already have provided this one for us */ - if (!have_acpi_tables) + /* ACPI may have already provided this data */ + if (!acpi_lapic) MP_processor_info(m); mpt += sizeof(*m); count += sizeof(*m); @@ -589,15 +590,6 @@ } - printk("Enabling APIC mode: "); - if(clustered_apic_mode == CLUSTERED_APIC_NUMAQ) - printk("Clustered Logical. "); - else if(clustered_apic_mode == CLUSTERED_APIC_XAPIC) - printk("Physical. "); - else - printk("Flat. "); - printk("Using %d I/O APICs\n",nr_ioapics); - if (!num_processors) printk(KERN_ERR "SMP mptable: no processors registered!\n"); return num_processors; @@ -761,7 +753,6 @@ } static struct intel_mp_floating *mpf_found; -extern void config_acpi_tables(void); /* * Scan the memory blocks for an SMP configuration block. @@ -770,17 +761,19 @@ { struct intel_mp_floating *mpf = mpf_found; -#ifdef CONFIG_X86_IO_APIC /* - * Check if the ACPI tables are provided. Use them only to get - * the processor information, mainly because it provides - * the info on the logical processor(s), rather than the physical - * processor(s) that are provided by the MPS. We attempt to - * check only if the user provided a commandline override + * ACPI may be used to obtain the entire SMP configuration or just to + * enumerate/configure processors (CONFIG_ACPI_HT_ONLY). Note that + * ACPI supports both logical (e.g. Hyper-Threading) and physical + * processors, where MPS only supports physical. */ - config_acpi_tables(); -#endif - + if (acpi_lapic && acpi_ioapic) { + printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); + return; + } + else if (acpi_lapic) + printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); + printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); if (mpf->mpf_feature2 & (1<<7)) { printk(" IMCR and PIC compatibility mode.\n"); @@ -832,6 +825,34 @@ BUG(); printk("Processors: %d\n", num_processors); + printk("xAPIC support %s present\n", (xapic_support?"is":"is not")); + +#ifdef CONFIG_X86_CLUSTERED_APIC + /* + * Switch to Physical destination mode in case of generic + * more than 8 CPU system, which has xAPIC support + */ +#define FLAT_APIC_CPU_MAX 8 + if ((clustered_apic_mode == CLUSTERED_APIC_NONE) && + (xapic_support) && + (num_processors > FLAT_APIC_CPU_MAX)) { + clustered_apic_mode = CLUSTERED_APIC_XAPIC; + apic_broadcast_id = APIC_BROADCAST_ID_XAPIC; + int_dest_addr_mode = APIC_DEST_PHYSICAL; + int_delivery_mode = dest_Fixed; + esr_disable = 1; + } +#endif + + printk("Enabling APIC mode: "); + if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) + printk("Clustered Logical. "); + else if (clustered_apic_mode == CLUSTERED_APIC_XAPIC) + printk("Physical. "); + else + printk("Flat. "); + printk("Using %d I/O APICs\n",nr_ioapics); + /* * Only use the first configuration found. */ @@ -939,3 +960,393 @@ #endif } + +/* -------------------------------------------------------------------------- + ACPI-based MP Configuration + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_BOOT + +void __init mp_register_lapic_address ( + u64 address) +{ + mp_lapic_addr = (unsigned long) address; + + set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); + + if (boot_cpu_physical_apicid == -1U) + boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + + Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); +} + + +void __init mp_register_lapic ( + u8 id, + u8 enabled) +{ + struct mpc_config_processor processor; + int boot_cpu = 0; + + if (id >= MAX_APICS) { + printk(KERN_WARNING "Processor #%d invalid (max %d)\n", + id, MAX_APICS); + return; + } + + if (id == boot_cpu_physical_apicid) + boot_cpu = 1; + + processor.mpc_type = MP_PROCESSOR; + processor.mpc_apicid = id; + processor.mpc_apicver = 0x10; /* TBD: lapic version */ + processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); + processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); + processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; + processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; + processor.mpc_reserved[0] = 0; + processor.mpc_reserved[1] = 0; + + MP_processor_info(&processor); +} + +#ifdef CONFIG_X86_IO_APIC + +#define MP_ISA_BUS 0 +#define MP_MAX_IOAPIC_PIN 127 + +struct mp_ioapic_routing { + int apic_id; + int irq_start; + int irq_end; + u32 pin_programmed[4]; +} mp_ioapic_routing[MAX_IO_APICS]; + + +static int __init mp_find_ioapic ( + int irq) +{ + int i = 0; + + /* Find the IOAPIC that manages this IRQ. */ + for (i = 0; i < nr_ioapics; i++) { + if ((irq >= mp_ioapic_routing[i].irq_start) + && (irq <= mp_ioapic_routing[i].irq_end)) + return i; + } + + printk(KERN_ERR "ERROR: Unable to locate IOAPIC for IRQ %d/n", irq); + + return -1; +} + + +void __init mp_register_ioapic ( + u8 id, + u32 address, + u32 irq_base) +{ + int idx = 0; + + if (nr_ioapics >= MAX_IO_APICS) { + printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " + "(found %d)\n", MAX_IO_APICS, nr_ioapics); + panic("Recompile kernel with bigger MAX_IO_APICS!\n"); + } + if (!address) { + printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" + " found in MADT table, skipping!\n"); + return; + } + + idx = nr_ioapics++; + + mp_ioapics[idx].mpc_type = MP_IOAPIC; + mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; + mp_ioapics[idx].mpc_apicaddr = address; + + set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); + mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id); + mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); + + /* + * Build basic IRQ lookup table to facilitate irq->io_apic lookups + * and to prevent reprogramming of IOAPIC pins (PCI IRQs). + */ + mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; + mp_ioapic_routing[idx].irq_start = irq_base; + mp_ioapic_routing[idx].irq_end = irq_base + + io_apic_get_redir_entries(idx); + + printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " + "IRQ %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, + mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, + mp_ioapic_routing[idx].irq_start, + mp_ioapic_routing[idx].irq_end); + + return; +} + + +void __init mp_override_legacy_irq ( + u8 bus_irq, + u8 polarity, + u8 trigger, + u32 global_irq) +{ + struct mpc_config_intsrc intsrc; + int i = 0; + int found = 0; + int ioapic = -1; + int pin = -1; + + /* + * Convert 'global_irq' to 'ioapic.pin'. + */ + ioapic = mp_find_ioapic(global_irq); + if (ioapic < 0) + return; + pin = global_irq - mp_ioapic_routing[ioapic].irq_start; + + /* + * TBD: This check is for faulty timer entries, where the override + * erroneously sets the trigger to level, resulting in a HUGE + * increase of timer interrupts! + */ + if ((bus_irq == 0) && (global_irq == 2) && (trigger == 3)) + trigger = 1; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqtype = mp_INT; + intsrc.mpc_irqflag = (trigger << 2) | polarity; + intsrc.mpc_srcbus = MP_ISA_BUS; + intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ + intsrc.mpc_dstirq = pin; /* INTIN# */ + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", + intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); + + /* + * If an existing [IOAPIC.PIN -> IRQ] routing entry exists we override it. + * Otherwise create a new entry (e.g. global_irq == 2). + */ + for (i = 0; i < mp_irq_entries; i++) { + if ((mp_irqs[i].mpc_dstapic == intsrc.mpc_dstapic) + && (mp_irqs[i].mpc_dstirq == intsrc.mpc_dstirq)) { + mp_irqs[i] = intsrc; + found = 1; + break; + } + } + if (!found) { + mp_irqs[mp_irq_entries] = intsrc; + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!\n"); + } + + return; +} + + +void __init mp_config_acpi_legacy_irqs (void) +{ + int i = 0; + int ioapic = -1; + + /* + * Initialize mp_irqs for IRQ configuration. + */ + unsigned char *bus_data; + int count; + + count = (MAX_MP_BUSSES * sizeof(int)) * 4; + count += (MAX_IRQ_SOURCES * sizeof(int)) * 4; + bus_data = alloc_bootmem(count); + if (!bus_data) { + panic("Fatal: can't allocate bus memory for ACPI legacy IRQ!"); + } + mp_bus_id_to_type = (int *)&bus_data[0]; + mp_bus_id_to_node = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int))]; + mp_bus_id_to_local = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 2]; + mp_bus_id_to_pci_bus = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 3]; + mp_irqs = (struct mpc_config_intsrc *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 4]; + memset(mp_bus_id_to_pci_bus, -1, MAX_MP_BUSSES); + + /* + * Fabricate the legacy ISA bus (bus #31). + */ + mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; + Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); + + /* + * Locate the IOAPIC that manages the ISA IRQs (0-15). + */ + ioapic = mp_find_ioapic(0); + if (ioapic < 0) + return; + + /* + * Use the default configuration for the IRQs 0-15. These may be + * overriden by (MADT) interrupt source override entries. + */ + for (i = 0; i < 16; i++) { + + if (i == 2) continue; /* Don't connect IRQ2 */ + + mp_irqs[mp_irq_entries].mpc_type = MP_INTSRC; + mp_irqs[mp_irq_entries].mpc_irqflag = 0; /* Conforming */ + mp_irqs[mp_irq_entries].mpc_srcbus = MP_ISA_BUS; + mp_irqs[mp_irq_entries].mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; + mp_irqs[mp_irq_entries].mpc_irqtype = i ? mp_INT : mp_ExtINT; /* 8259A to #0 */ + mp_irqs[mp_irq_entries].mpc_srcbusirq = i; /* Identity mapped */ + mp_irqs[mp_irq_entries].mpc_dstirq = i; + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " + "%d-%d\n", + mp_irqs[mp_irq_entries].mpc_irqtype, + mp_irqs[mp_irq_entries].mpc_irqflag & 3, + (mp_irqs[mp_irq_entries].mpc_irqflag >> 2) & 3, + mp_irqs[mp_irq_entries].mpc_srcbus, + mp_irqs[mp_irq_entries].mpc_srcbusirq, + mp_irqs[mp_irq_entries].mpc_dstapic, + mp_irqs[mp_irq_entries].mpc_dstirq); + + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!\n"); + } +} + +#ifndef CONFIG_ACPI_HT_ONLY + +/* Ensure the ACPI SCI interrupt level is active low, edge-triggered */ + +extern FADT_DESCRIPTOR acpi_fadt; + +void __init mp_config_ioapic_for_sci(int irq) +{ + int ioapic; + int ioapic_pin; + struct acpi_table_madt* madt; + struct acpi_table_int_src_ovr *entry = NULL; + void *madt_end; + acpi_status status; + + /* + * Ensure that if there is an interrupt source override entry + * for the ACPI SCI, we leave it as is. Unfortunately this involves + * walking the MADT again. + */ + status = acpi_get_firmware_table("APIC", 1, ACPI_LOGICAL_ADDRESSING, + (struct acpi_table_header **) &madt); + if (ACPI_SUCCESS(status)) { + madt_end = (void *) (unsigned long)madt + madt->header.length; + + entry = (struct acpi_table_int_src_ovr *) + ((unsigned long) madt + sizeof(struct acpi_table_madt)); + + while ((void *) entry < madt_end) { + if (entry->header.type == ACPI_MADT_INT_SRC_OVR && + acpi_fadt.sci_int == entry->bus_irq) { + /* + * See the note at the end of ACPI 2.0b section + * 5.2.10.8 for what this is about. + */ + if (entry->bus_irq != entry->global_irq) { + acpi_fadt.sci_int = entry->global_irq; + irq = entry->global_irq; + break; + } + else + return; + } + + entry = (struct acpi_table_int_src_ovr *) + ((unsigned long) entry + entry->header.length); + } + } + + ioapic = mp_find_ioapic(irq); + + ioapic_pin = irq - mp_ioapic_routing[ioapic].irq_start; + + io_apic_set_pci_routing(ioapic, ioapic_pin, irq); +} + +#endif /*CONFIG_ACPI_HT_ONLY*/ + +#ifdef CONFIG_ACPI_PCI + +void __init mp_parse_prt (void) +{ + struct list_head *node = NULL; + struct acpi_prt_entry *entry = NULL; + int ioapic = -1; + int ioapic_pin = 0; + int irq = 0; + int idx, bit = 0; + + /* + * Parsing through the PCI Interrupt Routing Table (PRT) and program + * routing for all static (IOAPIC-direct) entries. + */ + list_for_each(node, &acpi_prt.entries) { + entry = list_entry(node, struct acpi_prt_entry, node); + + /* Need to get irq for dynamic entry */ + if (entry->link.handle) { + irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index); + if (!irq) + continue; + } + else + irq = entry->link.index; + + ioapic = mp_find_ioapic(irq); + if (ioapic < 0) + continue; + ioapic_pin = irq - mp_ioapic_routing[ioapic].irq_start; + + /* + * Avoid pin reprogramming. PRTs typically include entries + * with redundant pin->irq mappings (but unique PCI devices); + * we only only program the IOAPIC on the first. + */ + bit = ioapic_pin % 32; + idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); + if (idx > 3) { + printk(KERN_ERR "Invalid reference to IOAPIC pin " + "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, + ioapic_pin); + continue; + } + if ((1<irq = irq; + continue; + } + + mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<irq = irq; + + printk(KERN_DEBUG "%02x:%02x:%02x[%c] -> %d-%d -> IRQ %d\n", + entry->id.segment, entry->id.bus, + entry->id.device, ('A' + entry->pin), + mp_ioapic_routing[ioapic].apic_id, ioapic_pin, + entry->irq); + } + + return; +} + +#endif /*CONFIG_ACPI_PCI*/ + +#endif /*CONFIG_X86_IO_APIC*/ + +#endif /*CONFIG_ACPI_BOOT*/ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/p4-clockmod.c linux.21rc5-ac2/arch/i386/kernel/p4-clockmod.c --- linux.21rc5/arch/i386/kernel/p4-clockmod.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/p4-clockmod.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,287 @@ +/* + * Pentium 4/Xeon CPU on demand clock modulation/speed scaling + * (C) 2002 Zwane Mwaikambo + * (C) 2002 Arjan van de Ven + * (C) 2002 Tora T. Engstad + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * The author(s) of this software shall not be held liable for damages + * of any nature resulting due to the use of this software. This + * software is provided AS-IS with no warranties. + * + * Date Errata Description + * 20020525 N44, O17 12.5% or 25% DC causes lockup + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define PFX "cpufreq: " + +/* + * Duty Cycle (3bits), note DC_DISABLE is not specified in + * intel docs i just use it to mean disable + */ +enum { + DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT, + DC_64PT, DC_75PT, DC_88PT, DC_DISABLE +}; + +#define DC_ENTRIES 8 + + +static int has_N44_O17_errata; +static int stock_freq; +MODULE_PARM(stock_freq, "i"); + +static struct cpufreq_driver *cpufreq_p4_driver; + + +static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) +{ + u32 l, h; + unsigned long cpus_allowed; + struct cpufreq_freqs freqs; + int hyperthreading = 0; + int affected_cpu_map = 0; + int sibling = 0; + + if (!cpu_online(cpu) || (newstate > DC_DISABLE) || + (newstate == DC_RESV)) + return -EINVAL; + + /* switch to physical CPU where state is to be changed*/ + cpus_allowed = current->cpus_allowed; + + /* only run on CPU to be set, or on its sibling */ + affected_cpu_map = 1 << cpu; +#ifdef CONFIG_X86_HT + hyperthreading = ((cpu_has_ht) && (smp_num_siblings == 2)); + if (hyperthreading) { + sibling = cpu_sibling_map[cpu]; + affected_cpu_map |= (1 << sibling); + } +#endif + set_cpus_allowed(current, affected_cpu_map); + BUG_ON(!(affected_cpu_map & (1 << smp_processor_id()))); + + /* get current state */ + rdmsr(MSR_IA32_THERM_CONTROL, l, h); + if (l & 0x10) { + l = l >> 1; + l &= 0x7; + } else + l = DC_DISABLE; + + if (l == newstate) { + set_cpus_allowed(current, cpus_allowed); + return 0; + } else if (l == DC_RESV) { + printk(KERN_ERR PFX "BIG FAT WARNING: currently in invalid setting\n"); + } + + /* notifiers */ + freqs.old = stock_freq * l / 8; + freqs.new = stock_freq * newstate / 8; + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + if (hyperthreading) { + freqs.cpu = sibling; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + + rdmsr(MSR_IA32_THERM_STATUS, l, h); + if (l & 0x01) + printk(KERN_DEBUG PFX "CPU#%d currently thermal throttled\n", cpu); + + if (has_N44_O17_errata && (newstate == DC_25PT || newstate == DC_DFLT)) + newstate = DC_38PT; + + rdmsr(MSR_IA32_THERM_CONTROL, l, h); + if (newstate == DC_DISABLE) { + printk(KERN_INFO PFX "CPU#%d disabling modulation\n", cpu); + wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); + } else { + printk(KERN_INFO PFX "CPU#%d setting duty cycle to %d%%\n", cpu, ((125 * newstate) / 10)); + /* bits 63 - 5 : reserved + * bit 4 : enable/disable + * bits 3-1 : duty cycle + * bit 0 : reserved + */ + l = (l & ~14); + l = l | (1<<4) | ((newstate & 0x7)<<1); + wrmsr(MSR_IA32_THERM_CONTROL, l, h); + } + + set_cpus_allowed(current, cpus_allowed); + + /* notifiers */ + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + if (hyperthreading) { + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + + return 0; +} + + +static struct cpufreq_frequency_table p4clockmod_table[] = { + {DC_RESV, CPUFREQ_ENTRY_INVALID}, + {DC_DFLT, 0}, + {DC_25PT, 0}, + {DC_38PT, 0}, + {DC_50PT, 0}, + {DC_64PT, 0}, + {DC_75PT, 0}, + {DC_88PT, 0}, + {DC_DISABLE, 0}, + {DC_RESV, CPUFREQ_TABLE_END}, +}; + + +static int cpufreq_p4_setpolicy(struct cpufreq_policy *policy) +{ + unsigned int newstate = DC_RESV; + + if (cpufreq_frequency_table_setpolicy(policy, &p4clockmod_table[0], &newstate)) + return -EINVAL; + + cpufreq_p4_setdc(policy->cpu, newstate); + + return 0; +} + + +static int cpufreq_p4_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); +} + + +static int __init cpufreq_p4_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + int cpuid; + int ret; + struct cpufreq_driver *driver; + unsigned int i; + + /* + * THERM_CONTROL is architectural for IA32 now, so + * we can rely on the capability checks + */ + if (c->x86_vendor != X86_VENDOR_INTEL) + return -ENODEV; + + if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) || + !test_bit(X86_FEATURE_ACC, c->x86_capability)) + return -ENODEV; + + /* Errata workarounds */ + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; + switch (cpuid) { + case 0x0f07: + case 0x0f0a: + case 0x0f11: + case 0x0f12: + has_N44_O17_errata = 1; + default: + break; + } + + printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n"); + + if (!stock_freq) { + if (cpu_khz) + stock_freq = cpu_khz; + else { + printk(KERN_INFO PFX "unknown core frequency - please use module parameter 'stock_freq'\n"); + return -EINVAL; + } + } + + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + + /* table init */ + for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if ((i<2) && (has_N44_O17_errata)) + p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; + else + p4clockmod_table[i].frequency = (stock_freq * i)/8; + } + + +#ifdef CONFIG_CPU_FREQ_24_API + for (i=0;icpu_cur_freq[i] = stock_freq; + } +#endif + + driver->verify = &cpufreq_p4_verify; + driver->setpolicy = &cpufreq_p4_setpolicy; + + for (i=0;ipolicy[i].cpu = i; + ret = cpufreq_frequency_table_cpuinfo(&driver->policy[i], &p4clockmod_table[0]); + if (ret) { + kfree(driver); + return ret; + } + driver->policy[i].policy = CPUFREQ_POLICY_PERFORMANCE; + driver->policy[i].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + } + + cpufreq_p4_driver = driver; + + ret = cpufreq_register(driver); + if (ret) { + cpufreq_p4_driver = NULL; + kfree(driver); + } + + return ret; +} + + +static void __exit cpufreq_p4_exit(void) +{ + unsigned int i; + + if (cpufreq_p4_driver) { + for (i=0; i"); +MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); +MODULE_LICENSE ("GPL"); + +module_init(cpufreq_p4_init); +module_exit(cpufreq_p4_exit); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/pci-dma.c linux.21rc5-ac2/arch/i386/kernel/pci-dma.c --- linux.21rc5/arch/i386/kernel/pci-dma.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/pci-dma.c 2003-04-22 16:44:36.000000000 +0100 @@ -19,7 +19,7 @@ void *ret; int gfp = GFP_ATOMIC; - if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff)) + if (hwdev == NULL || hwdev->dma_mask < 0xffffffff) gfp |= GFP_DMA; ret = (void *)__get_free_pages(gfp, get_order(size)); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/pci-irq.c linux.21rc5-ac2/arch/i386/kernel/pci-irq.c --- linux.21rc5/arch/i386/kernel/pci-irq.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/pci-irq.c 2003-04-28 17:57:37.000000000 +0100 @@ -116,7 +116,7 @@ * Code for querying and setting of IRQ routes on various interrupt routers. */ -static void eisa_set_level_irq(unsigned int irq) +void eisa_set_level_irq(unsigned int irq) { unsigned char mask = 1 << (irq & 7); unsigned int port = 0x4d0 + (irq >> 3); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/pci-pc.c linux.21rc5-ac2/arch/i386/kernel/pci-pc.c --- linux.21rc5/arch/i386/kernel/pci-pc.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/pci-pc.c 2003-04-28 17:57:37.000000000 +0100 @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -28,6 +29,8 @@ int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value) = NULL; int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value) = NULL; +static int pci_using_acpi_prt = 0; + #ifdef CONFIG_MULTIQUAD #define BUS2QUAD(global) (mp_bus_id_to_node[global]) #define BUS2LOCAL(global) (mp_bus_id_to_local[global]) @@ -1353,6 +1356,23 @@ pci_read_bridge_bases(b); } +struct pci_bus * __devinit pcibios_scan_root(int busnum) +{ + struct list_head *list; + struct pci_bus *bus; + + list_for_each(list, &pci_root_buses) { + bus = pci_bus_b(list); + if (bus->number == busnum) { + /* Already scanned */ + return bus; + } + } + + printk("PCI: Probing PCI hardware (bus %02x)\n", busnum); + + return pci_scan_bus(busnum, pci_root_ops, NULL); +} void __devinit pcibios_config_init(void) { @@ -1394,6 +1414,8 @@ return; } +int use_acpi_pci __initdata = 1; + void __init pcibios_init(void) { int quad; @@ -1406,7 +1428,19 @@ } printk(KERN_INFO "PCI: Probing PCI hardware\n"); - pci_root_bus = pci_scan_bus(0, pci_root_ops, NULL); +#ifdef CONFIG_ACPI_PCI + if (use_acpi_pci && !acpi_pci_irq_init()) { + pci_using_acpi_prt = 1; + printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n"); + printk(KERN_INFO "PCI: if you experience problems, try using option 'pci=noacpi' or even 'acpi=off'\n"); + } +#endif + if (!pci_using_acpi_prt) { + pci_root_bus = pcibios_scan_root(0); + pcibios_irq_init(); + pcibios_fixup_peer_bridges(); + pcibios_fixup_irqs(); + } if (clustered_apic_mode && (numnodes > 1)) { for (quad = 1; quad < numnodes; ++quad) { printk("Scanning PCI bus %d for quad %d\n", @@ -1416,9 +1450,6 @@ } } - pcibios_irq_init(); - pcibios_fixup_peer_bridges(); - pcibios_fixup_irqs(); pcibios_resource_survey(); #ifdef CONFIG_PCI_BIOS @@ -1470,6 +1501,9 @@ } else if (!strncmp(str, "lastbus=", 8)) { pcibios_last_bus = simple_strtol(str+8, NULL, 0); return NULL; + } else if (!strncmp(str, "noacpi", 6)) { + use_acpi_pci = 0; + return NULL; } return str; } @@ -1485,6 +1519,15 @@ if ((err = pcibios_enable_resources(dev, mask)) < 0) return err; + +#ifdef CONFIG_ACPI_PCI + if (pci_using_acpi_prt) { + acpi_pci_irq_enable(dev); + return 0; + } +#endif + pcibios_enable_irq(dev); + return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/powernow-k6.c linux.21rc5-ac2/arch/i386/kernel/powernow-k6.c --- linux.21rc5/arch/i386/kernel/powernow-k6.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/powernow-k6.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,237 @@ +/* + * $Id: powernow-k6.c,v 1.42 2003/01/02 22:41:08 db Exp $ + * This file was part of Powertweak Linux (http://powertweak.sf.net) + * and is shared with the Linux Kernel module. + * + * (C) 2000-2002 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski. + * + * Licensed under the terms of the GNU GPL License version 2. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long + as it is unused */ + +static struct cpufreq_driver *powernow_driver; +static unsigned int busfreq; /* FSB, in 10 kHz */ +static unsigned int max_multiplier; + + +/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ +static struct cpufreq_frequency_table clock_ratio[] = { + {45, /* 000 -> 4.5x */ 0}, + {50, /* 001 -> 5.0x */ 0}, + {40, /* 010 -> 4.0x */ 0}, + {55, /* 011 -> 5.5x */ 0}, + {20, /* 100 -> 2.0x */ 0}, + {30, /* 101 -> 3.0x */ 0}, + {60, /* 110 -> 6.0x */ 0}, + {35, /* 111 -> 3.5x */ 0}, + {0, CPUFREQ_TABLE_END} +}; + + +/** + * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier + * + * Returns the current setting of the frequency multiplier. Core clock + * speed is frequency of the Front-Side Bus multiplied with this value. + */ +static int powernow_k6_get_cpu_multiplier(void) +{ + u64 invalue = 0; + u32 msrval; + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue=inl(POWERNOW_IOPORT + 0x8); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + return clock_ratio[(invalue >> 5)&7].index; +} + + +/** + * powernow_k6_set_state - set the PowerNow! multiplier + * @best_i: clock_ratio[best_i] is the target multiplier + * + * Tries to change the PowerNow! multiplier + */ +static void powernow_k6_set_state (unsigned int best_i) +{ + unsigned long outvalue=0, invalue=0; + unsigned long msrval; + struct cpufreq_freqs freqs; + + if (!powernow_driver) { + printk(KERN_ERR "cpufreq: initialization problem or invalid target frequency\n"); + return; + } + + freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); + freqs.new = busfreq * clock_ratio[best_i].index; + freqs.cpu = 0; /* powernow-k6.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* we now need to transform best_i to the BVC format, see AMD#23446 */ + + outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue=inl(POWERNOW_IOPORT + 0x8); + invalue = invalue & 0xf; + outvalue = outvalue | invalue; + outl(outvalue ,(POWERNOW_IOPORT + 0x8)); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return; +} + + +/** + * powernow_k6_verify - verifies a new CPUfreq policy + * @policy: new policy + * + * Policy must be within lowest and highest possible CPU Frequency, + * and at least one possible state must be within min and max. + */ +static int powernow_k6_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); +} + + +/** + * powernow_k6_setpolicy - sets a new CPUFreq policy + * @policy - new policy + * + * sets a new CPUFreq policy + */ +static int powernow_k6_setpolicy (struct cpufreq_policy *policy) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_setpolicy(policy, &clock_ratio[0], &newstate)) + return -EINVAL; + + powernow_k6_set_state(newstate); + + return 0; +} + + +/** + * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver + * + * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported + * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero + * on success. + */ +static int __init powernow_k6_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + struct cpufreq_driver *driver; + unsigned int result; + unsigned int i; + + if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || + ((c->x86_model != 12) && (c->x86_model != 13))) + return -ENODEV; + + max_multiplier = powernow_k6_get_cpu_multiplier(); + busfreq = cpu_khz / max_multiplier; + + if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { + printk("cpufreq: PowerNow IOPORT region already used.\n"); + return -EIO; + } + + /* initialization of main "cpufreq" code*/ + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) { + release_region (POWERNOW_IOPORT, 16); + return -ENOMEM; + } + driver->policy = (struct cpufreq_policy *) (driver + 1); + + /* table init */ + for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + if (clock_ratio[i].index > max_multiplier) + clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; + else + clock_ratio[i].frequency = busfreq * clock_ratio[i].index; + } + + driver->verify = &powernow_k6_verify; + driver->setpolicy = &powernow_k6_setpolicy; + + /* cpuinfo and default policy values */ + driver->policy[0].cpu = 0; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + driver->policy[0].policy = CPUFREQ_POLICY_PERFORMANCE; +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = busfreq * max_multiplier; +#endif + result = cpufreq_frequency_table_cpuinfo(&driver->policy[0], &clock_ratio[0]); + if (result) { + kfree(driver); + return result; + } + + powernow_driver = driver; + + result = cpufreq_register(driver); + if (result) { + release_region (POWERNOW_IOPORT, 16); + powernow_driver = NULL; + kfree(driver); + } + + return result; +} + + +/** + * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support + * + * Unregisters AMD K6-2+ / K6-3+ PowerNow! support. + */ +static void __exit powernow_k6_exit(void) +{ + unsigned int i; + + if (powernow_driver) { + for (i=0;i<8;i++) + if (clock_ratio[i].index == max_multiplier) + powernow_k6_set_state(i); + cpufreq_unregister(); + kfree(powernow_driver); + } +} + + +MODULE_AUTHOR ("Arjan van de Ven , Dave Jones , Dominik Brodowski "); +MODULE_DESCRIPTION ("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); +MODULE_LICENSE ("GPL"); +module_init(powernow_k6_init); +module_exit(powernow_k6_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/process.c linux.21rc5-ac2/arch/i386/kernel/process.c --- linux.21rc5/arch/i386/kernel/process.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/process.c 2003-05-27 14:22:53.000000000 +0100 @@ -124,15 +124,12 @@ void cpu_idle (void) { /* endless idle loop with no priority at all */ - init_idle(); - current->nice = 20; - current->counter = -100; while (1) { void (*idle)(void) = pm_idle; if (!idle) idle = default_idle; - while (!current->need_resched) + if (!current->need_resched) idle(); schedule(); check_pgt_cache(); @@ -187,7 +184,7 @@ } /* we will leave sorting out the final value when we are ready to reboot, since we might not - have set up boot_cpu_id or smp_num_cpu */ + have set up boot_cpu_physical_apicid or smp_num_cpu */ break; #endif } @@ -253,7 +250,7 @@ 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */ 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */ 0x74, 0x02, /* jz f */ - 0x0f, 0x08, /* invd */ + 0x0f, 0x09, /* wbinvd */ 0x24, 0x10, /* f: andb $0x10,al */ 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */ }; @@ -466,23 +463,6 @@ } /* - * No need to lock the MM as we are the last user - */ -void release_segments(struct mm_struct *mm) -{ - void * ldt = mm->context.segments; - - /* - * free the LDT - */ - if (ldt) { - mm->context.segments = NULL; - clear_LDT(); - vfree(ldt); - } -} - -/* * Create a kernel thread */ int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) @@ -535,45 +515,19 @@ void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { - void * ldt = dead_task->mm->context.segments; - // temporary debugging check - if (ldt) { - printk("WARNING: dead process %8s still has LDT? <%p>\n", - dead_task->comm, ldt); + if (dead_task->mm->context.size) { + printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", + dead_task->comm, + dead_task->mm->context.ldt, + dead_task->mm->context.size); BUG(); } } - release_x86_irqs(dead_task); } /* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. - */ -void copy_segments(struct task_struct *p, struct mm_struct *new_mm) -{ - struct mm_struct * old_mm; - void *old_ldt, *ldt; - - ldt = NULL; - old_mm = current->mm; - if (old_mm && (old_ldt = old_mm->context.segments) != NULL) { - /* - * Completely new LDT, we initialize it from the parent: - */ - ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); - if (!ldt) - printk(KERN_WARNING "ldt allocation failed\n"); - else - memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE); - } - new_mm->context.segments = ldt; - new_mm->context.cpuvalid = ~0UL; /* valid on all CPU's - they can't have stale data */ -} - -/* * Save a segment. */ #define savesegment(seg,value) \ @@ -698,15 +652,17 @@ asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); /* - * Restore %fs and %gs. + * Restore %fs and %gs if needed. */ - loadsegment(fs, next->fs); - loadsegment(gs, next->gs); + if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) { + loadsegment(fs, next->fs); + loadsegment(gs, next->gs); + } /* * Now maybe reload the debug registers */ - if (next->debugreg[7]){ + if (unlikely(next->debugreg[7])) { loaddebug(next, 0); loaddebug(next, 1); loaddebug(next, 2); @@ -716,7 +672,7 @@ loaddebug(next, 7); } - if (prev->ioperm || next->ioperm) { + if (unlikely(prev->ioperm || next->ioperm)) { if (next->ioperm) { /* * 4 cachelines copy ... not good, but not that diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/setup.c linux.21rc5-ac2/arch/i386/kernel/setup.c --- linux.21rc5/arch/i386/kernel/setup.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/setup.c 2003-04-28 17:57:37.000000000 +0100 @@ -95,6 +95,7 @@ #include #include #include +#include #include #ifdef CONFIG_BLK_DEV_RAM #include @@ -118,6 +119,7 @@ #include #include #include +#include /* * Machine setup.. */ @@ -172,9 +174,10 @@ static int disable_x86_serial_nr __initdata = 1; static int disable_x86_ht __initdata = 0; static u32 disabled_x86_caps[NCAPINTS] __initdata = { 0 }; -extern int blk_nohighio; -int enable_acpi_smp_table; +int acpi_disabled __initdata = 0; + +extern int blk_nohighio; /* * This is set up by the setup-routine at boot-time @@ -196,6 +199,8 @@ #define KERNEL_START (*(unsigned long *) (PARAM+0x214)) #define INITRD_START (*(unsigned long *) (PARAM+0x218)) #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c)) +#define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) +#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) #define COMMAND_LINE ((char *) (PARAM+2048)) #define COMMAND_LINE_SIZE 256 @@ -203,6 +208,7 @@ #define RAMDISK_PROMPT_FLAG 0x8000 #define RAMDISK_LOAD_FLAG 0x4000 + #ifdef CONFIG_VISWS char visws_board_type = -1; char visws_board_rev = -1; @@ -700,6 +706,23 @@ return 0; } +#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) +unsigned char eddnr; +struct edd_info edd[EDDMAXNR]; +/** + * copy_edd() - Copy the BIOS EDD information + * from empty_zero_page into a safe place. + * + */ +static inline void copy_edd(void) +{ + eddnr = EDD_NR; + memcpy(edd, EDD_BUF, sizeof(edd)); +} +#else +#define copy_edd() do {} while (0) +#endif + /* * Do NOT EVER look at the BIOS memory size location. * It does not work on many machines. @@ -795,9 +818,9 @@ set_bit(X86_FEATURE_HT, disabled_x86_caps); } - /* "acpismp=force" forces parsing and use of the ACPI SMP table */ - else if (!memcmp(from, "acpismp=force", 13)) - enable_acpi_smp_table = 1; + /* "acpi=off" disables both ACPI table parsing and interpreter init */ + else if (!memcmp(from, "acpi=off", 8)) + acpi_disabled = 1; /* * highmem=size forces highmem to be exactly 'size' bytes. @@ -1007,10 +1030,15 @@ */ reserve_bootmem(PAGE_SIZE, PAGE_SIZE); #endif - +#ifdef CONFIG_ACPI_SLEEP + /* + * Reserve low memory region for sleep support. + */ + acpi_reserve_bootmem(); +#endif #ifdef CONFIG_X86_LOCAL_APIC /* - * Find and reserve possible boot-time SMP configuration: + * Find and reserve possible boot-time SMP configuration. */ find_smp_config(); #endif @@ -1043,7 +1071,6 @@ { unsigned long low_mem_size; int i; - probe_roms(); for (i = 0; i < e820.nr_map; i++) { struct resource *res; @@ -1112,6 +1139,7 @@ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif setup_memory_region(); + copy_edd(); if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; @@ -1129,21 +1157,10 @@ max_low_pfn = setup_memory(); - /* - * If enable_acpi_smp_table and HT feature present, acpitable.c - * will find all logical cpus despite disable_x86_ht: so if both - * "noht" and "acpismp=force" are specified, let "noht" override - * "acpismp=force" cleanly. Why retain "acpismp=force"? because - * parsing ACPI SMP table might prove useful on some non-HT cpu. - */ if (disable_x86_ht) { clear_bit(X86_FEATURE_HT, &boot_cpu_data.x86_capability[0]); set_bit(X86_FEATURE_HT, disabled_x86_caps); - enable_acpi_smp_table = 0; } - if (test_bit(X86_FEATURE_HT, &boot_cpu_data.x86_capability[0])) - enable_acpi_smp_table = 1; - /* * NOTE: before this point _nobody_ is allowed to allocate @@ -1154,6 +1171,13 @@ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/ #endif paging_init(); +#ifdef CONFIG_ACPI_BOOT + /* + * Parse the ACPI tables for possible boot-time SMP configuration. + */ + if (!acpi_disabled) + acpi_boot_init(); +#endif #ifdef CONFIG_X86_LOCAL_APIC /* * get boot-time SMP configuration: @@ -2937,6 +2961,7 @@ * applications want to get the raw CPUID data, they should access * /dev/cpu//cpuid instead. */ + extern int phys_proc_id[NR_CPUS]; static char *x86_cap_flags[] = { /* Intel-defined */ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", @@ -2994,6 +3019,11 @@ /* Cache size */ if (c->x86_cache_size >= 0) seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); + +#ifdef CONFIG_SMP + seq_printf(m, "physical id\t: %d\n",phys_proc_id[n]); + seq_printf(m, "siblings\t: %d\n",smp_num_siblings); +#endif /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */ fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu); @@ -3096,11 +3126,12 @@ set_tss_desc(nr,t); gdt_table[__TSS(nr)].b &= 0xfffffdff; load_TR(nr); - load_LDT(&init_mm); + load_LDT(&init_mm.context); - /* - * Clear all 6 debug registers: - */ + /* Clear %fs and %gs. */ + asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); + + /* Clear all 6 debug registers: */ #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) ); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/smpboot.c linux.21rc5-ac2/arch/i386/kernel/smpboot.c --- linux.21rc5/arch/i386/kernel/smpboot.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/smpboot.c 2003-04-22 16:44:36.000000000 +0100 @@ -58,7 +58,7 @@ /* Number of siblings per CPU package */ int smp_num_siblings = 1; -int __initdata phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ +int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ /* Bitmask of currently online CPUs */ unsigned long cpu_online_map; @@ -365,7 +365,7 @@ * (This works even if the APIC is not enabled.) */ phys_id = GET_APIC_ID(apic_read(APIC_ID)); - cpuid = current->processor; + cpuid = cpu(); if (test_and_set_bit(cpuid, &cpu_online_map)) { printk("huh, phys CPU#%d, CPU#%d already present??\n", phys_id, cpuid); @@ -435,6 +435,7 @@ */ smp_store_cpu_info(cpuid); + disable_APIC_timer(); /* * Allow the master to continue. */ @@ -443,7 +444,7 @@ /* * Synchronize the TSC with the BP */ - if (cpu_has_tsc) + if (cpu_has_tsc > 1) synchronize_tsc_ap(); } @@ -465,6 +466,7 @@ smp_callin(); while (!atomic_read(&smp_commenced)) rep_nop(); + enable_APIC_timer(); /* * low-memory mappings have been cleared, flush them from * the local TLBs too. @@ -803,16 +805,13 @@ if (!idle) panic("No idle process for CPU %d", cpu); - idle->processor = cpu; - idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */ + init_idle(idle, cpu); map_cpu_to_boot_apicid(cpu, apicid); idle->thread.eip = (unsigned long) start_secondary; - del_from_runqueue(idle); unhash_process(idle); - init_tasks[cpu] = idle; /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); @@ -925,6 +924,7 @@ } cycles_t cacheflush_time; +unsigned long cache_decay_ticks; static void smp_tune_scheduling (void) { @@ -958,9 +958,13 @@ cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth; } + cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000; + printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n", (long)cacheflush_time/(cpu_khz/1000), ((long)cacheflush_time*100/(cpu_khz/1000)) % 100); + printk("task migration cache decay timeout: %ld msecs.\n", + (cache_decay_ticks + 1) * 1000 / HZ); } /* @@ -1026,8 +1030,7 @@ map_cpu_to_boot_apicid(0, boot_cpu_apicid); global_irq_holder = 0; - current->processor = 0; - init_idle(); + current->cpu = 0; smp_tune_scheduling(); /* @@ -1219,7 +1222,7 @@ /* * Synchronize the TSC with the AP */ - if (cpu_has_tsc && cpucount) + if (cpu_has_tsc > 1 && cpucount) synchronize_tsc_bp(); smp_done: diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/smp.c linux.21rc5-ac2/arch/i386/kernel/smp.c --- linux.21rc5/arch/i386/kernel/smp.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/smp.c 2003-04-22 16:44:36.000000000 +0100 @@ -496,13 +496,23 @@ * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */ - void smp_send_reschedule(int cpu) { send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR); } /* + * this function sends a reschedule IPI to all (other) CPUs. + * This should only be used if some 'global' task became runnable, + * such as a RT task, that must be handled now. The first CPU + * that manages to grab the task will run it. + */ +void smp_send_reschedule_all(void) +{ + send_IPI_allbutself(RESCHEDULE_VECTOR); +} + +/* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */ @@ -553,7 +563,7 @@ spin_lock(&call_lock); call_data = &data; - wmb(); + mb(); /* Send a message to all other CPUs and wait for them to respond */ send_IPI_allbutself(CALL_FUNCTION_VECTOR); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/speedstep.c linux.21rc5-ac2/arch/i386/kernel/speedstep.c --- linux.21rc5/arch/i386/kernel/speedstep.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/speedstep.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,731 @@ +/* + * $Id: speedstep.c,v 1.64 2003/01/02 22:16:26 db Exp $ + * + * (C) 2001 Dave Jones, Arjan van de ven. + * (C) 2002 Dominik Brodowski + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon reverse engineered information, and on Intel documentation + * for chipsets ICH2-M and ICH3-M. + * + * Many thanks to Ducrot Bruno for finding and fixing the last + * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler + * for extensive testing. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + + +/********************************************************************* + * SPEEDSTEP - DEFINITIONS * + *********************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include + + +static struct cpufreq_driver *speedstep_driver; + +/* speedstep_chipset: + * It is necessary to know which chipset is used. As accesses to + * this device occur at various places in this module, we need a + * static struct pci_dev * pointing to that device. + */ +static unsigned int speedstep_chipset; +static struct pci_dev *speedstep_chipset_dev; + +#define SPEEDSTEP_CHIPSET_ICH2M 0x00000002 +#define SPEEDSTEP_CHIPSET_ICH3M 0x00000003 + + +/* speedstep_processor + */ +static unsigned int speedstep_processor = 0; +static int speedstep_coppermine = 0; + +#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000001 /* Coppermine core */ +#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000002 /* Tualatin core */ +#define SPEEDSTEP_PROCESSOR_P4M 0x00000003 /* P4-M with 100 MHz FSB */ + + +/* speedstep_[low,high]_freq + * There are only two frequency states for each processor. Values + * are in kHz for the time being. + */ +#define SPEEDSTEP_HIGH 0x00000000 +#define SPEEDSTEP_LOW 0x00000001 + +static struct cpufreq_frequency_table speedstep_freqs[] = { + {SPEEDSTEP_HIGH, 0}, + {SPEEDSTEP_LOW, 0}, + {0, CPUFREQ_TABLE_END}, +}; + +#define speedstep_low_freq speedstep_freqs[SPEEDSTEP_LOW].frequency +#define speedstep_high_freq speedstep_freqs[SPEEDSTEP_HIGH].frequency + + +/* DEBUG + * Define it if you want verbose debug output, e.g. for bug reporting + */ +//#define SPEEDSTEP_DEBUG + +#ifdef SPEEDSTEP_DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0) +#endif + + + +/********************************************************************* + * LOW LEVEL CHIPSET INTERFACE * + *********************************************************************/ + +/** + * speedstep_get_state - read the current SpeedStep state + * @state: Speedstep state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) + * + * Tries to read the SpeedStep state. Returns -EIO when there has been + * trouble to read the status or write to the control register, -EINVAL + * on an unsupported chipset, and zero on success. + */ +static int speedstep_get_state (unsigned int *state) +{ + unsigned long flags; + u32 pmbase; + u8 value; + + if (!speedstep_chipset_dev || !state) + return -EINVAL; + + switch (speedstep_chipset) { + case SPEEDSTEP_CHIPSET_ICH2M: + case SPEEDSTEP_CHIPSET_ICH3M: + /* get PMBASE */ + pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); + if (!(pmbase & 0x01)) + return -EIO; + + pmbase &= 0xFFFFFFFE; + if (!pmbase) + return -EIO; + + /* read state */ + local_irq_save(flags); + value = inb(pmbase + 0x50); + local_irq_restore(flags); + + dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + *state = value & 0x01; + return 0; + + } + + printk (KERN_ERR "cpufreq: setting CPU frequency on this chipset unsupported.\n"); + return -EINVAL; +} + + +/** + * speedstep_set_state - set the SpeedStep state + * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) + * + * Tries to change the SpeedStep state. + */ +static void speedstep_set_state (unsigned int state, int notify) +{ + u32 pmbase; + u8 pm2_blk; + u8 value; + unsigned long flags; + unsigned int oldstate; + struct cpufreq_freqs freqs; + + if (!speedstep_chipset_dev || (state > 0x1)) + return; + + if (speedstep_get_state(&oldstate)) + return; + + if (oldstate == state) + return; + + freqs.old = (oldstate == SPEEDSTEP_HIGH) ? speedstep_high_freq : speedstep_low_freq; + freqs.new = (state == SPEEDSTEP_HIGH) ? speedstep_high_freq : speedstep_low_freq; + freqs.cpu = 0; /* speedstep.c is UP only driver */ + + if (notify) + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + switch (speedstep_chipset) { + case SPEEDSTEP_CHIPSET_ICH2M: + case SPEEDSTEP_CHIPSET_ICH3M: + /* get PMBASE */ + pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); + if (!(pmbase & 0x01)) + { + printk(KERN_ERR "cpufreq: could not find speedstep register\n"); + return; + } + + pmbase &= 0xFFFFFFFE; + if (!pmbase) { + printk(KERN_ERR "cpufreq: could not find speedstep register\n"); + return; + } + + /* Disable IRQs */ + local_irq_save(flags); + + /* read state */ + value = inb(pmbase + 0x50); + + dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + /* write new state */ + value &= 0xFE; + value |= state; + + dprintk(KERN_DEBUG "cpufreq: writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); + + /* Disable bus master arbitration */ + pm2_blk = inb(pmbase + 0x20); + pm2_blk |= 0x01; + outb(pm2_blk, (pmbase + 0x20)); + + /* Actual transition */ + outb(value, (pmbase + 0x50)); + + /* Restore bus master arbitration */ + pm2_blk &= 0xfe; + outb(pm2_blk, (pmbase + 0x20)); + + /* check if transition was sucessful */ + value = inb(pmbase + 0x50); + + /* Enable IRQs */ + local_irq_restore(flags); + + dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + if (state == (value & 0x1)) { + dprintk (KERN_INFO "cpufreq: change to %u MHz succeded\n", (freqs.new / 1000)); + } else { + printk (KERN_ERR "cpufreq: change failed - I/O error\n"); + } + break; + default: + printk (KERN_ERR "cpufreq: setting CPU frequency on this chipset unsupported.\n"); + } + + if (notify) + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return; +} + + +/** + * speedstep_activate - activate SpeedStep control in the chipset + * + * Tries to activate the SpeedStep status and control registers. + * Returns -EINVAL on an unsupported chipset, and zero on success. + */ +static int speedstep_activate (void) +{ + if (!speedstep_chipset_dev) + return -EINVAL; + + switch (speedstep_chipset) { + case SPEEDSTEP_CHIPSET_ICH2M: + case SPEEDSTEP_CHIPSET_ICH3M: + { + u16 value = 0; + + pci_read_config_word(speedstep_chipset_dev, + 0x00A0, &value); + if (!(value & 0x08)) { + value |= 0x08; + dprintk(KERN_DEBUG "cpufreq: activating SpeedStep (TM) registers\n"); + pci_write_config_word(speedstep_chipset_dev, + 0x00A0, value); + } + + return 0; + } + } + + printk (KERN_ERR "cpufreq: SpeedStep (TM) on this chipset unsupported.\n"); + return -EINVAL; +} + + +/** + * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic + * + * Detects PIIX4, ICH2-M and ICH3-M so far. The pci_dev points to + * the LPC bridge / PM module which contains all power-management + * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected + * chipset, or zero on failure. + */ +static unsigned int speedstep_detect_chipset (void) +{ + speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801CA_12, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) + return SPEEDSTEP_CHIPSET_ICH3M; + + + speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801BA_10, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) { + /* speedstep.c causes lockups on Dell Inspirons 8000 and + * 8100 which use a pretty old revision of the 82815 + * host brige. Abort on these systems. + */ + static struct pci_dev *hostbridge; + u8 rev = 0; + + hostbridge = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82815_MC, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + + if (!hostbridge) + return SPEEDSTEP_CHIPSET_ICH2M; + + pci_read_config_byte(hostbridge, PCI_REVISION_ID, &rev); + if (rev < 5) { + dprintk(KERN_INFO "cpufreq: hostbrige does not support speedstep\n"); + speedstep_chipset_dev = NULL; + return 0; + } + + return SPEEDSTEP_CHIPSET_ICH2M; + } + + return 0; +} + + + +/********************************************************************* + * LOW LEVEL PROCESSOR INTERFACE * + *********************************************************************/ + + +/** + * pentium3_get_frequency - get the core frequencies for PIIIs + * + * Returns the core frequency of a Pentium III processor (in kHz) + */ +static unsigned int pentium3_get_frequency (void) +{ + /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ + struct { + unsigned int ratio; /* Frequency Multiplier (x10) */ + u8 bitmap; /* power on configuration bits + [27, 25:22] (in MSR 0x2a) */ + } msr_decode_mult [] = { + { 30, 0x01 }, + { 35, 0x05 }, + { 40, 0x02 }, + { 45, 0x06 }, + { 50, 0x00 }, + { 55, 0x04 }, + { 60, 0x0b }, + { 65, 0x0f }, + { 70, 0x09 }, + { 75, 0x0d }, + { 80, 0x0a }, + { 85, 0x26 }, + { 90, 0x20 }, + { 100, 0x2b }, + { 0, 0xff } /* error or unknown value */ + }; + /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ + struct { + unsigned int value; /* Front Side Bus speed in MHz */ + u8 bitmap; /* power on configuration bits [18: 19] + (in MSR 0x2a) */ + } msr_decode_fsb [] = { + { 66, 0x0 }, + { 100, 0x2 }, + { 133, 0x1 }, + { 0, 0xff} + }; + u32 msr_lo, msr_tmp; + int i = 0, j = 0; + struct cpuinfo_x86 *c = cpu_data; + + /* read MSR 0x2a - we only need the low 32 bits */ + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); + dprintk(KERN_DEBUG "cpufreq: P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + msr_tmp = msr_lo; + + /* decode the FSB */ + msr_tmp &= 0x00c0000; + msr_tmp >>= 18; + while (msr_tmp != msr_decode_fsb[i].bitmap) { + if (msr_decode_fsb[i].bitmap == 0xff) + return -EINVAL; + i++; + } + + /* decode the multiplier */ + if ((c->x86_model == 0x08) && (c->x86_mask == 0x01)) + /* different on early Coppermine PIII */ + msr_lo &= 0x03c00000; + else + msr_lo &= 0x0bc00000; + msr_lo >>= 22; + while (msr_lo != msr_decode_mult[j].bitmap) { + if (msr_decode_mult[j].bitmap == 0xff) + return -EINVAL; + j++; + } + + return (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100); +} + + +/** + * pentium4_get_frequency - get the core frequency for P4-Ms + * + * Should return the core frequency (in kHz) for P4-Ms. + */ +static unsigned int pentium4_get_frequency(void) +{ + u32 msr_lo, msr_hi; + + rdmsr(0x2c, msr_lo, msr_hi); + + dprintk(KERN_DEBUG "cpufreq: P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); + + /* First 12 bits seem to change a lot (0x511, 0x410 and 0x30f seen + * yet). Next 12 bits always seem to be 0x300. If this is not true + * on this CPU, complain. Last 8 bits are frequency (in 100MHz). + */ + if (msr_hi || ((msr_lo & 0x00FFF000) != 0x300000)) { + printk(KERN_DEBUG "cpufreq: P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); + printk(KERN_INFO "cpufreq: problem in initialization. Please contact Dominik Brodowski\n"); + printk(KERN_INFO "cpufreq: and attach this dmesg. Thanks in advance\n"); + return 0; + } + + msr_lo >>= 24; + return (msr_lo * 100000); +} + + +/** + * speedstep_detect_processor - detect Intel SpeedStep-capable processors. + * + * Returns the SPEEDSTEP_PROCESSOR_-number for the detected processor, + * or zero on failure. + */ +static unsigned int speedstep_detect_processor (void) +{ + struct cpuinfo_x86 *c = cpu_data; + u32 ebx; + + if ((c->x86_vendor != X86_VENDOR_INTEL) || + ((c->x86 != 6) && (c->x86 != 0xF))) + return 0; + + if (c->x86 == 0xF) { + /* Intel Pentium 4 Mobile P4-M */ + if (c->x86_model != 2) + return 0; + + if ((c->x86_mask != 4) && (c->x86_mask != 7)) + return 0; + + ebx = cpuid_ebx(0x00000001); + ebx &= 0x000000FF; + if ((ebx != 0x0e) && (ebx != 0x0f)) + return 0; + + return SPEEDSTEP_PROCESSOR_P4M; + } + + switch (c->x86_model) { + case 0x0B: /* Intel PIII [Tualatin] */ + /* cpuid_ebx(1) is 0x04 for desktop PIII, + 0x06 for mobile PIII-M */ + ebx = cpuid_ebx(0x00000001); + + ebx &= 0x000000FF; + if (ebx != 0x06) + return 0; + + /* So far all PIII-M processors support SpeedStep. See + * Intel's 24540633.pdf of August 2002 + */ + + return SPEEDSTEP_PROCESSOR_PIII_T; + + case 0x08: /* Intel PIII [Coppermine] */ + { + u32 msr_lo, msr_hi; + + /* all mobile PIII Coppermines have FSB 100 MHz + * ==> sort out a few desktop PIIIs. */ + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); + dprintk(KERN_DEBUG "cpufreq: Coppermine: MSR_IA32_EBL_Cr_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); + msr_lo &= 0x00c0000; + if (msr_lo != 0x0080000) + return 0; + + if (speedstep_coppermine) + return SPEEDSTEP_PROCESSOR_PIII_C; + + printk(KERN_INFO "cpufreq: in case this is a SpeedStep-capable Intel Pentium III Coppermine\n"); + printk(KERN_INFO "cpufreq: processor, please pass the boot option or module parameter\n"); + printk(KERN_INFO "cpufreq: `speedstep_coppermine=1` to the kernel. Thanks!\n"); + return 0; + } + + default: + return 0; + } +} + + + +/********************************************************************* + * HIGH LEVEL FUNCTIONS * + *********************************************************************/ + +/** + * speedstep_detect_speeds - detects low and high CPU frequencies. + * + * Detects the low and high CPU frequencies in kHz. Returns 0 on + * success or -EINVAL / -EIO on problems. + */ +static int speedstep_detect_speeds (void) +{ + unsigned long flags; + unsigned int state; + int i, result; + + /* Disable irqs for entire detection process */ + local_irq_save(flags); + + for (i=0; i<2; i++) { + /* read the current state */ + result = speedstep_get_state(&state); + if (result) + return result; + + /* save the correct value, and switch to other */ + if (state == SPEEDSTEP_LOW) { + switch (speedstep_processor) { + case SPEEDSTEP_PROCESSOR_PIII_C: + case SPEEDSTEP_PROCESSOR_PIII_T: + speedstep_low_freq = pentium3_get_frequency(); + break; + case SPEEDSTEP_PROCESSOR_P4M: + speedstep_low_freq = pentium4_get_frequency(); + } + speedstep_set_state(SPEEDSTEP_HIGH, 0); + } else { + switch (speedstep_processor) { + case SPEEDSTEP_PROCESSOR_PIII_C: + case SPEEDSTEP_PROCESSOR_PIII_T: + speedstep_high_freq = pentium3_get_frequency(); + break; + case SPEEDSTEP_PROCESSOR_P4M: + speedstep_high_freq = pentium4_get_frequency(); + } + speedstep_set_state(SPEEDSTEP_LOW, 0); + } + } + + local_irq_restore(flags); + + if (!speedstep_low_freq || !speedstep_high_freq || + (speedstep_low_freq == speedstep_high_freq)) + return -EIO; + + return 0; +} + + +/** + * speedstep_setpolicy - set a new CPUFreq policy + * @policy: new policy + * + * Sets a new CPUFreq policy. + */ +static int speedstep_setpolicy (struct cpufreq_policy *policy) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_setpolicy(policy, &speedstep_freqs[0], &newstate)) + return -EINVAL; + + speedstep_set_state(newstate, 1); + + return 0; +} + + +/** + * speedstep_verify - verifies a new CPUFreq policy + * @freq: new policy + * + * Limit must be within speedstep_low_freq and speedstep_high_freq, with + * at least one border included. + */ +static int speedstep_verify (struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); +} + + +#ifndef MODULE +/** + * speedstep_setup speedstep command line parameter parsing + * + * speedstep command line parameter. Use: + * speedstep_coppermine=1 + * if the CPU in your notebook is a SpeedStep-capable Intel + * Pentium III Coppermine. These processors cannot be detected + * automatically, as Intel continues to consider the detection + * alogrithm as proprietary material. + */ +static int __init speedstep_setup(char *str) +{ + speedstep_coppermine = simple_strtoul(str, &str, 0); + return 1; +} +__setup("speedstep_coppermine=", speedstep_setup); +#endif + +/** + * speedstep_init - initializes the SpeedStep CPUFreq driver + * + * Initializes the SpeedStep support. Returns -ENODEV on unsupported + * devices, -EINVAL on problems during initiatization, and zero on + * success. + */ +static int __init speedstep_init(void) +{ + int result; + unsigned int speed; + struct cpufreq_driver *driver; + + + /* detect chipset */ + speedstep_chipset = speedstep_detect_chipset(); + + /* detect chipset */ + if (speedstep_chipset) + speedstep_processor = speedstep_detect_processor(); + + if ((!speedstep_chipset) || (!speedstep_processor)) { + printk(KERN_INFO "cpufreq: Intel(R) SpeedStep(TM) for this %s not (yet) available.\n", speedstep_chipset ? "processor" : "chipset"); + return -ENODEV; + } + + dprintk(KERN_INFO "cpufreq: Intel(R) SpeedStep(TM) support $Revision: 1.64 $\n"); + dprintk(KERN_DEBUG "cpufreq: chipset 0x%x - processor 0x%x\n", + speedstep_chipset, speedstep_processor); + + /* activate speedstep support */ + result = speedstep_activate(); + if (result) + return result; + + /* detect low and high frequency */ + result = speedstep_detect_speeds(); + if (result) + return result; + + /* get current speed setting */ + result = speedstep_get_state(&speed); + if (result) + return result; + + speed = (speed == SPEEDSTEP_LOW) ? speedstep_low_freq : speedstep_high_freq; + + dprintk(KERN_INFO "cpufreq: currently at %s speed setting - %i MHz\n", + (speed == speedstep_low_freq) ? "low" : "high", + (speed / 1000)); + + /* initialization of main "cpufreq" code*/ + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + + driver->policy[0].cpu = 0; + result = cpufreq_frequency_table_cpuinfo(&driver->policy[0], &speedstep_freqs[0]); + if (result) { + kfree(driver); + return result; + } + +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = speed; +#endif + + driver->verify = &speedstep_verify; + driver->setpolicy = &speedstep_setpolicy; + + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + driver->policy[0].policy = (speed == speedstep_low_freq) ? + CPUFREQ_POLICY_POWERSAVE : CPUFREQ_POLICY_PERFORMANCE; + + speedstep_driver = driver; + + result = cpufreq_register(driver); + if (result) { + speedstep_driver = NULL; + kfree(driver); + } + + return result; +} + + +/** + * speedstep_exit - unregisters SpeedStep support + * + * Unregisters SpeedStep support. + */ +static void __exit speedstep_exit(void) +{ + if (speedstep_driver) { + cpufreq_unregister(); + kfree(speedstep_driver); + } +} + + +MODULE_AUTHOR ("Dave Jones , Dominik Brodowski "); +MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors."); +MODULE_LICENSE ("GPL"); +module_init(speedstep_init); +module_exit(speedstep_exit); + +MODULE_PARM (speedstep_coppermine, "i"); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/sys_i386.c linux.21rc5-ac2/arch/i386/kernel/sys_i386.c --- linux.21rc5/arch/i386/kernel/sys_i386.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/sys_i386.c 2003-04-22 16:44:36.000000000 +0100 @@ -139,7 +139,10 @@ switch (call) { case SEMOP: - return sys_semop (first, (struct sembuf *)ptr, second); + return sys_semtimedop (first, (struct sembuf *)ptr, second, NULL); + case SEMTIMEDOP: + return sys_semtimedop (first, (struct sembuf *)ptr, second, + (const struct timespec *)fifth); case SEMGET: return sys_semget (first, second, third); case SEMCTL: { @@ -200,7 +203,7 @@ return sys_shmctl (first, second, (struct shmid_ds *) ptr); default: - return -EINVAL; + return -ENOSYS; } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/time.c linux.21rc5-ac2/arch/i386/kernel/time.c --- linux.21rc5/arch/i386/kernel/time.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/time.c 2003-04-22 16:44:36.000000000 +0100 @@ -42,6 +42,8 @@ #include #include #include +#include +#include #include #include @@ -833,6 +835,50 @@ return 0; } +#ifdef CONFIG_CPU_FREQ +static unsigned int ref_freq = 0; +static unsigned long loops_per_jiffy_ref = 0; + +#ifndef CONFIG_SMP +static unsigned long fast_gettimeoffset_ref = 0; +static unsigned long cpu_khz_ref = 0; +#endif + +static int +time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + + if (!ref_freq) { + ref_freq = freq->old; + loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; +#ifndef CONFIG_SMP + fast_gettimeoffset_ref = fast_gettimeoffset_quotient; + cpu_khz_ref = cpu_khz; +#endif + } + + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { + cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); +#ifndef CONFIG_SMP + if (use_tsc) { + fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); + cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); + } +#endif + } + + return 0; +} + +static struct notifier_block time_cpufreq_notifier_block = { + .notifier_call = time_cpufreq_notifier +}; +#endif + + void __init time_init(void) { extern int x86_udelay_tsc; @@ -904,6 +950,9 @@ } } +#ifdef CONFIG_CPU_FREQ + cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); +#endif #ifdef CONFIG_VISWS printk("Starting Cobalt Timer system clock\n"); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/traps.c linux.21rc5-ac2/arch/i386/kernel/traps.c --- linux.21rc5/arch/i386/kernel/traps.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/traps.c 2003-04-22 16:44:36.000000000 +0100 @@ -284,6 +284,20 @@ void die(const char * str, struct pt_regs * regs, long err) { +#ifdef CONFIG_PNPBIOS + if (regs->xcs == 0x60 || regs->xcs == 0x68) + { + extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; + extern u32 pnp_bios_is_utter_crap; + pnp_bios_is_utter_crap = 1; + printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n"); + __asm__ volatile( + "movl %0, %%esp\n\t" + "jmp %1\n\t" + : "=a" (pnp_bios_fault_esp), "=b" (pnp_bios_fault_eip)); + panic("do_trap: can't hit this"); + } +#endif console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/kernel/vm86.c linux.21rc5-ac2/arch/i386/kernel/vm86.c --- linux.21rc5/arch/i386/kernel/vm86.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/kernel/vm86.c 2003-05-28 15:28:05.000000000 +0100 @@ -362,6 +362,9 @@ if (VEFLAGS & VIF_MASK) flags |= IF_MASK; + else + flags &= ~IF_MASK; + flags |= IOPL_MASK; return flags | (VEFLAGS & current->thread.v86mask); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/math-emu/fpu_system.h linux.21rc5-ac2/arch/i386/math-emu/fpu_system.h --- linux.21rc5/arch/i386/math-emu/fpu_system.h 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/math-emu/fpu_system.h 2003-04-22 16:44:36.000000000 +0100 @@ -20,7 +20,7 @@ of the stack frame of math_emulate() */ #define SETUP_DATA_AREA(arg) FPU_info = (struct info *) &arg -#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.segments)[(s) >> 3]) +#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) #define SEG_D_SIZE(x) ((x).b & (3 << 21)) #define SEG_G_BIT(x) ((x).b & (1 << 23)) #define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/mm/fault.c linux.21rc5-ac2/arch/i386/mm/fault.c --- linux.21rc5/arch/i386/mm/fault.c 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/mm/fault.c 2003-04-22 18:25:56.000000000 +0100 @@ -76,9 +76,7 @@ return 1; check_stack: - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, start) == 0) + if (!expand_stack(vma, start)) goto good_area; bad_area: diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/i386/mm/init.c linux.21rc5-ac2/arch/i386/mm/init.c --- linux.21rc5/arch/i386/mm/init.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/i386/mm/init.c 2003-04-22 16:44:36.000000000 +0100 @@ -510,7 +510,15 @@ if (!mem_map) BUG(); - +#ifdef CONFIG_HIGHMEM + /* check that fixmap and pkmap do not overlap */ + if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { + printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n"); + printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", + PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); + BUG(); + } +#endif set_max_mapnr_init(); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ia64/ia32/sys_ia32.c linux.21rc5-ac2/arch/ia64/ia32/sys_ia32.c --- linux.21rc5/arch/ia64/ia32/sys_ia32.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/ia64/ia32/sys_ia32.c 2003-04-22 16:44:36.000000000 +0100 @@ -2126,6 +2126,7 @@ #define SEMOP 1 #define SEMGET 2 #define SEMCTL 3 +#define SEMTIMEDOP 4 #define MSGSND 11 #define MSGRCV 12 #define MSGGET 13 @@ -2553,6 +2554,17 @@ return err; } +static long +semtimedop32(int semid, struct sembuf *tsems, int nsems, + const struct timespec32 *timeout32) +{ + struct timespec t; + if (get_user (t.tv_sec, &timeout32->tv_sec) || + get_user (t.tv_nsec, &timeout32->tv_nsec)) + return -EFAULT; + return sys_semtimedop(semid, tsems, nsems, &t); +} + asmlinkage long sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) { @@ -2564,7 +2576,11 @@ switch (call) { case SEMOP: /* struct sembuf is the same on 32 and 64bit :)) */ - return sys_semop(first, (struct sembuf *)AA(ptr), second); + return sys_semtimedop(first, (struct sembuf *)AA(ptr), second, + NULL); + case SEMTIMEDOP: + return semtimedop32(first, (struct sembuf *)AA(ptr), second, + (const struct timespec32 *)AA(fifth)); case SEMGET: return sys_semget(first, second, third); case SEMCTL: @@ -3783,7 +3799,11 @@ return result; } -struct dqblk32 { +extern asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr); + +#ifdef CONFIG_QIFACE_COMPAT +#ifdef CONFIG_QIFACE_V1 +struct user_dqblk32 { __u32 dqb_bhardlimit; __u32 dqb_bsoftlimit; __u32 dqb_curblocks; @@ -3793,49 +3813,82 @@ __kernel_time_t32 dqb_btime; __kernel_time_t32 dqb_itime; }; +typedef struct v1c_mem_dqblk comp_dqblk_t; -asmlinkage long -sys32_quotactl (int cmd, unsigned int special, int id, struct dqblk32 *addr) +#define Q_COMP_GETQUOTA Q_V1_GETQUOTA +#define Q_COMP_SETQUOTA Q_V1_SETQUOTA +#define Q_COMP_SETQLIM Q_V1_SETQLIM +#define Q_COMP_SETUSE Q_V1_SETUSE +#else +struct user_dqblk32 { + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u64 dqb_curspace; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v2c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V2_GETQUOTA +#define Q_COMP_SETQUOTA Q_V2_SETQUOTA +#define Q_COMP_SETQLIM Q_V2_SETQLIM +#define Q_COMP_SETUSE Q_V2_SETUSE +#endif + +asmlinkage long sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) { - extern asmlinkage long sys_quotactl (int, const char *, int, caddr_t); int cmds = cmd >> SUBCMDSHIFT; + long err; + comp_dqblk_t d; mm_segment_t old_fs; - struct dqblk d; char *spec; - long err; - + switch (cmds) { - case Q_GETQUOTA: - break; - case Q_SETQUOTA: - case Q_SETUSE: - case Q_SETQLIM: - if (copy_from_user (&d, addr, sizeof(struct dqblk32))) - return -EFAULT; - d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime; - d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime; - break; - default: - return sys_quotactl(cmd, (void *) A(special), id, (caddr_t) addr); + case Q_COMP_GETQUOTA: + break; + case Q_COMP_SETQUOTA: + case Q_COMP_SETUSE: + case Q_COMP_SETQLIM: + if (copy_from_user(&d, (struct user_dqblk32 *)addr, + sizeof (struct user_dqblk32))) + return -EFAULT; + d.dqb_itime = ((struct user_dqblk32 *)&d)->dqb_itime; + d.dqb_btime = ((struct user_dqblk32 *)&d)->dqb_btime; + break; + default: + return sys_quotactl(cmd, special, id, (__kernel_caddr_t)addr); } - spec = getname32((void *) A(special)); + spec = getname (special); err = PTR_ERR(spec); - if (IS_ERR(spec)) + if (IS_ERR(spec)) return err; + old_fs = get_fs(); + set_fs (KERNEL_DS); + err = sys_quotactl(cmd, (const char *)spec, id, (__kernel_caddr_t)&d); + set_fs (old_fs); + putname (spec); + if (err) return err; - old_fs = get_fs (); - set_fs(KERNEL_DS); - err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d); - set_fs(old_fs); - putname(spec); - if (cmds == Q_GETQUOTA) { + if (cmds == Q_COMP_GETQUOTA) { __kernel_time_t b = d.dqb_btime, i = d.dqb_itime; - ((struct dqblk32 *)&d)->dqb_itime = i; - ((struct dqblk32 *)&d)->dqb_btime = b; - if (copy_to_user(addr, &d, sizeof(struct dqblk32))) + ((struct user_dqblk32 *)&d)->dqb_itime = i; + ((struct user_dqblk32 *)&d)->dqb_btime = b; + if (copy_to_user ((struct user_dqblk32 *)addr, &d, + sizeof (struct user_dqblk32))) return -EFAULT; } - return err; + return 0; +} + +#else +/* No conversion needed for new interface */ +asmlinkage long sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) +{ + return sys_quotactl(cmd, special, id, addr); } +#endif asmlinkage long sys32_sched_rr_get_interval (pid_t pid, struct timespec32 *interval) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ia64/kernel/entry.S linux.21rc5-ac2/arch/ia64/kernel/entry.S --- linux.21rc5/arch/ia64/kernel/entry.S 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/ia64/kernel/entry.S 2003-04-22 16:44:36.000000000 +0100 @@ -1200,7 +1200,7 @@ data8 ia64_ni_syscall data8 ia64_ni_syscall // 1245 data8 ia64_ni_syscall - data8 ia64_ni_syscall + data8 sys_semtimedop data8 ia64_ni_syscall data8 ia64_ni_syscall data8 ia64_ni_syscall // 1250 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ia64/mm/fault.c linux.21rc5-ac2/arch/ia64/mm/fault.c --- linux.21rc5/arch/ia64/mm/fault.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/ia64/mm/fault.c 2003-04-22 16:44:36.000000000 +0100 @@ -127,8 +127,6 @@ check_expansion: if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (rgn_index(address) != rgn_index(vma->vm_start) || rgn_offset(address) >= RGN_MAP_LIMIT) goto bad_area; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/mips/au1000/common/serial.c linux.21rc5-ac2/arch/mips/au1000/common/serial.c --- linux.21rc5/arch/mips/au1000/common/serial.c 2003-05-28 15:08:08.000000000 +0100 +++ linux.21rc5-ac2/arch/mips/au1000/common/serial.c 2003-04-22 16:44:36.000000000 +0100 @@ -195,7 +195,7 @@ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ - kdevname(tty->device), (info->flags), serial_refcount,info->count,tty->count,s) + kdevname(tty->device), (info->flags), serial_refcount,info->count,atomic_read(&tty->count),s) #else #define DBG_CNT(s) #endif @@ -1909,7 +1909,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/mips/baget/vacserial.c linux.21rc5-ac2/arch/mips/baget/vacserial.c --- linux.21rc5/arch/mips/baget/vacserial.c 2003-05-28 15:08:07.000000000 +0100 +++ linux.21rc5-ac2/arch/mips/baget/vacserial.c 2003-04-22 16:44:36.000000000 +0100 @@ -29,7 +29,7 @@ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) baget_printk("(%s):[%x] refc=%d, serc=%d, ttyc=%d-> %s\n", \ - kdevname(tty->device),(info->flags),serial_refcount,info->count,tty->count,s) + kdevname(tty->device),(info->flags),serial_refcount,info->count,atomic_read(&tty->count),s) #else #define DBG_CNT(s) #endif @@ -1658,7 +1658,7 @@ baget_printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/mips/kernel/sysmips.c linux.21rc5-ac2/arch/mips/kernel/sysmips.c --- linux.21rc5/arch/mips/kernel/sysmips.c 2003-05-28 15:08:07.000000000 +0100 +++ linux.21rc5-ac2/arch/mips/kernel/sysmips.c 2003-05-28 15:18:46.000000000 +0100 @@ -65,7 +65,7 @@ return -EFAULT; down_write(&uts_sem); - strncpy(system_utsname.nodename, name, len); + strncpy(system_utsname.nodename, nodename, len); system_utsname.nodename[len] = '\0'; up_write(&uts_sem); return 0; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/mips/mm/fault.c linux.21rc5-ac2/arch/mips/mm/fault.c --- linux.21rc5/arch/mips/mm/fault.c 2003-05-28 15:08:07.000000000 +0100 +++ linux.21rc5-ac2/arch/mips/mm/fault.c 2003-04-22 16:44:36.000000000 +0100 @@ -111,8 +111,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/mips64/kernel/syscall.c linux.21rc5-ac2/arch/mips64/kernel/syscall.c --- linux.21rc5/arch/mips64/kernel/syscall.c 2003-05-28 15:08:15.000000000 +0100 +++ linux.21rc5-ac2/arch/mips64/kernel/syscall.c 2003-05-28 15:18:46.000000000 +0100 @@ -207,7 +207,7 @@ return -EFAULT; down_write(&uts_sem); - strncpy(system_utsname.nodename, name, len); + strncpy(system_utsname.nodename, nodename, len); system_utsname.nodename[len] = '\0'; up_write(&uts_sem); return 0; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/mips64/mm/fault.c linux.21rc5-ac2/arch/mips64/mm/fault.c --- linux.21rc5/arch/mips64/mm/fault.c 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/mips64/mm/fault.c 2003-04-22 16:44:36.000000000 +0100 @@ -135,8 +135,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/config.in linux.21rc5-ac2/arch/parisc/config.in --- linux.21rc5/arch/parisc/config.in 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/config.in 2003-04-28 16:05:55.000000000 +0100 @@ -47,12 +47,9 @@ bool 'Symmetric multi-processing support' CONFIG_SMP bool 'Chassis LCD and LED support' CONFIG_CHASSIS_LCD_LED -bool 'Kernel Debugger support' CONFIG_KWDB -# define_bool CONFIG_KWDB n - bool 'U2/Uturn I/O MMU' CONFIG_IOMMU_CCIO bool 'VSC/GSC/HSC bus support' CONFIG_GSC -dep_bool ' Lasi I/O support' CONFIG_GSC_LASI $CONFIG_GSC +dep_bool ' Asp/Lasi I/O support' CONFIG_GSC_LASI $CONFIG_GSC dep_bool ' Wax I/O support' CONFIG_GSC_WAX $CONFIG_GSC dep_bool 'EISA support' CONFIG_EISA $CONFIG_GSC @@ -194,6 +191,7 @@ #bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ +bool 'Debug spinlocks' CONFIG_DEBUG_SPINLOCK endmenu source lib/Config.in diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/defpalo.conf linux.21rc5-ac2/arch/parisc/defpalo.conf --- linux.21rc5/arch/parisc/defpalo.conf 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/defpalo.conf 2003-04-28 16:02:18.000000000 +0100 @@ -0,0 +1,21 @@ +# This a generic Palo configuration file. For more information about how +# it works try 'palo -?'. +# +# Most people using 'make palo' want a bootable file, usable for +# network or tape booting for example. +--init-tape=lifimage +--recoverykernel=vmlinux + +########## Pick your ROOT here! ########## +# You need at least one 'root='! +# +# If you want a root ramdisk, use the next 2 lines +# (Edit the ramdisk image name!!!!) +--ramdisk=ram-disk-image-file +--commandline=0/vmlinux HOME=/ root=/dev/ram initrd=0/ramdisk + +# If you want NFS root, use the following command line (Edit the HOSTNAME!!!) +#--commandline=0/vmlinux HOME=/ root=/dev/nfs nfsroot=HOSTNAME ip=bootp + +# If you have root on a disk partition, use this (Edit the partition name!!!) +#--commandline=0/vmlinux HOME=/ root=/dev/sda1 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/cache.c linux.21rc5-ac2/arch/parisc/kernel/cache.c --- linux.21rc5/arch/parisc/kernel/cache.c 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/cache.c 2003-04-28 16:02:18.000000000 +0100 @@ -194,31 +194,69 @@ void disable_sr_hashing(void) { - int srhash_type; + int srhash_type; - if (boot_cpu_data.cpu_type == pcxl2) - return; /* pcxl2 doesn't support space register hashing */ + switch (boot_cpu_data.cpu_type) { + case pcx: /* We shouldn't get this far. setup.c should prevent it. */ + BUG(); + return; + + case pcxs: + case pcxt: + case pcxt_: + srhash_type = SRHASH_PCXST; + break; + + case pcxl: + srhash_type = SRHASH_PCXL; + break; + + case pcxl2: /* pcxl2 doesn't support space register hashing */ + return; + + default: /* Currently all PA2.0 machines use the same ins. sequence */ + srhash_type = SRHASH_PA20; + break; + } - switch (boot_cpu_data.cpu_type) { - - case pcx: - BUG(); /* We shouldn't get here. code in setup.c should prevent it */ - return; + disable_sr_hashing_asm(srhash_type); +} - case pcxs: - case pcxt: - case pcxt_: - srhash_type = SRHASH_PCXST; - break; +void __flush_dcache_page(struct page *page) +{ + struct mm_struct *mm = current->active_mm; + struct vm_area_struct *mpnt; - case pcxl: - srhash_type = SRHASH_PCXL; - break; + flush_kernel_dcache_page(page_address(page)); - default: /* Currently all PA2.0 machines use the same ins. sequence */ - srhash_type = SRHASH_PA20; - break; - } + if (!page->mapping) + return; - disable_sr_hashing_asm(srhash_type); + for (mpnt = page->mapping->i_mmap_shared; + mpnt != NULL; + mpnt = mpnt->vm_next_share) + { + unsigned long off; + + /* + * If this VMA is not in our MM, we can ignore it. + */ + if (mpnt->vm_mm != mm) + continue; + + if (page->index < mpnt->vm_pgoff) + continue; + + off = page->index - mpnt->vm_pgoff; + if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT) + continue; + + flush_cache_page(mpnt, mpnt->vm_start + (off << PAGE_SHIFT)); + + /* All user shared mappings should be equivalently mapped, + * so once we've flushed one we should be ok + */ + break; + } } + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/ccio-dma.c linux.21rc5-ac2/arch/parisc/kernel/ccio-dma.c --- linux.21rc5/arch/parisc/kernel/ccio-dma.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/ccio-dma.c 2003-04-28 16:02:18.000000000 +0100 @@ -805,7 +805,7 @@ DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents, (unsigned long)sg_dma_address(startsg), cnt, - startsg->address, startsg->length + sg_virt_addr(startsg), startsg->length ); /* @@ -825,7 +825,7 @@ ** Look for a VCONTIG chunk */ if (cnt) { - unsigned long vaddr = (unsigned long)startsg->address; + unsigned long vaddr = (unsigned long) sg_virt_addr(startsg); ASSERT(pdirp); /* Since multiple Vcontig blocks could make up @@ -878,8 +878,8 @@ */ dma_sg = vcontig_sg = startsg; dma_len = vcontig_len = vcontig_end = startsg->length; - vcontig_end += (unsigned long) startsg->address; - dma_offset = (unsigned long) startsg->address & ~IOVP_MASK; + vcontig_end += (unsigned long) sg_virt_addr(startsg); + dma_offset = (unsigned long) sg_virt_addr(startsg) & ~IOVP_MASK; /* PARANOID: clear entries */ sg_dma_address(startsg) = 0; @@ -893,7 +893,7 @@ unsigned long startsg_end; startsg++; - startsg_end = (unsigned long)startsg->address + + startsg_end = (unsigned long) sg_virt_addr(startsg) + startsg->length; /* PARANOID: clear entries */ @@ -912,7 +912,7 @@ /* ** Append the next transaction? */ - if(vcontig_end == (unsigned long) startsg->address) { + if(vcontig_end == (unsigned long) sg_virt_addr(startsg)) { vcontig_len += startsg->length; vcontig_end += startsg->length; dma_len += startsg->length; @@ -981,7 +981,8 @@ /* Fast path single entry scatterlists. */ if(nents == 1) { - sg_dma_address(sglist)= ccio_map_single(dev, sglist->address, + sg_dma_address(sglist)= ccio_map_single(dev, + sg_virt_addr(sglist), sglist->length, direction); sg_dma_len(sglist)= sglist->length; @@ -1043,7 +1044,7 @@ ioc = GET_IOC(dev); DBG_RUN_SG("%s() START %d entries, %p,%x\n", - __FUNCTION__, nents, sglist->address, sglist->length); + __FUNCTION__, nents, sg_virt_address(sglist), sglist->length); #ifdef CONFIG_PROC_FS ioc->usg_calls++; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/ccio-rm-dma.c linux.21rc5-ac2/arch/parisc/kernel/ccio-rm-dma.c --- linux.21rc5/arch/parisc/kernel/ccio-rm-dma.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/ccio-rm-dma.c 2003-04-28 16:02:18.000000000 +0100 @@ -118,7 +118,7 @@ /* KISS: map each buffer seperately. */ while (nents) { - sg_dma_address(sglist) = ccio_map_single(dev, sglist->address, sglist->length, direction); + sg_dma_address(sglist) = ccio_map_single(dev, sg_virt_addr(sglist), sglist->length, direction); sg_dma_len(sglist) = sglist->length; nents--; sglist++; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/drivers.c linux.21rc5-ac2/arch/parisc/kernel/drivers.c --- linux.21rc5/arch/parisc/kernel/drivers.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/drivers.c 2003-04-28 16:02:18.000000000 +0100 @@ -9,6 +9,7 @@ * Copyright (c) 1999 The Puffin Group * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard * Copyright (c) 2001 Helge Deller + * Copyright (c) 2001,2002 Ryan Bradetich * * The file handles registering devices and drivers, then matching them. * It's the closest we get to a dating agency. @@ -23,7 +24,6 @@ #include #include #include -#include /* See comments in include/asm-parisc/pci.h */ struct pci_dma_ops *hppa_dma_ops; @@ -416,6 +416,7 @@ dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) | (iodc_data[5] << 8) | iodc_data[6]; dev->hpa = hpa; + name = parisc_hardware_description(&dev->id); if (name) { strncpy(dev->name, name, sizeof(dev->name)-1); @@ -461,30 +462,26 @@ #define BC_PORT_MASK 0x8 #define BC_LOWER_PORT 0x8 +#define IO_STATUS offsetof(struct bc_module, io_status) + #define BUS_CONVERTER(dev) \ ((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT)) #define IS_LOWER_PORT(dev) \ - ((gsc_readl(&((struct bc_module *)dev->hpa)->io_status) \ - & BC_PORT_MASK) == BC_LOWER_PORT) - -#define READ_IO_IO_LOW(dev) \ - (dev->id.hw_type == HPHW_IOA ? \ - __raw_readl((unsigned long)&((struct bc_module *)dev->hpa)->io_io_low) << 16 : \ - __raw_readl((unsigned long)&((struct bc_module *)dev->hpa)->io_io_low)) - -#define READ_IO_IO_HIGH(dev) \ - (dev->id.hw_type == HPHW_IOA ? \ - __raw_readl((unsigned long)&((struct bc_module *)dev->hpa)->io_io_high) << 16 : \ - __raw_readl((unsigned long)&((struct bc_module *)dev->hpa)->io_io_high)) + ((__raw_readl(dev->hpa + IO_STATUS) & BC_PORT_MASK) == BC_LOWER_PORT) +#define MAX_NATIVE_DEVICES 64 +#define NATIVE_DEVICE_OFFSET 0x1000 -static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, - struct parisc_device *parent); - -#define FLEX_MASK (unsigned long)0xfffffffffffc0000 +#define FLEX_MASK (unsigned long)0xfffffffffffc0000 +#define IO_IO_LOW offsetof(struct bc_module, io_io_low) +#define IO_IO_HIGH offsetof(struct bc_module, io_io_high) +#define READ_IO_IO_LOW(dev) (unsigned long)(signed int)__raw_readl(dev->hpa + IO_IO_LOW) +#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)__raw_readl(dev->hpa + IO_IO_HIGH) +static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, + struct parisc_device *parent); void walk_lower_bus(struct parisc_device *dev) { @@ -493,15 +490,17 @@ if(!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev)) return; - io_io_low = ((unsigned long)(signed int)READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK; - io_io_high = ((unsigned long)(signed int)READ_IO_IO_HIGH(dev) + ~FLEX_MASK) & FLEX_MASK; + if(dev->id.hw_type == HPHW_IOA) { + io_io_low = (unsigned long)(signed int)(READ_IO_IO_LOW(dev) << 16); + io_io_high = io_io_low + MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET; + } else { + io_io_low = (READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK; + io_io_high = (READ_IO_IO_HIGH(dev)+ ~FLEX_MASK) & FLEX_MASK; + } walk_native_bus(io_io_low, io_io_high, dev); } -#define MAX_NATIVE_DEVICES 64 -#define NATIVE_DEVICE_OFFSET 0x1000 - /** * walk_native_bus -- Probe a bus for devices * @io_io_low: Base address of this bus. @@ -515,7 +514,7 @@ * keyboard ports). This problem is not yet solved. */ static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, - struct parisc_device *parent) + struct parisc_device *parent) { int i, devices_found = 0; unsigned long hpa = io_io_low; @@ -523,7 +522,7 @@ get_node_path(parent, &path); do { - for (i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) { + for(i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) { struct parisc_device *dev; /* Was the device already added by Firmware? */ @@ -539,7 +538,7 @@ } walk_lower_bus(dev); } - } while (!devices_found && hpa < io_io_high); + } while(!devices_found && hpa < io_io_high); } #define CENTRAL_BUS_ADDR (unsigned long) 0xfffffffffff80000 @@ -552,7 +551,7 @@ */ void walk_central_bus(void) { - walk_native_bus(CENTRAL_BUS_ADDR, + walk_native_bus(CENTRAL_BUS_ADDR, CENTRAL_BUS_ADDR + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET), &root); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/entry.S linux.21rc5-ac2/arch/parisc/kernel/entry.S --- linux.21rc5/arch/parisc/kernel/entry.S 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/entry.S 2003-04-28 16:02:18.000000000 +0100 @@ -927,11 +927,11 @@ ldo -16(%r30),%r29 /* Reference param save area */ #endif - ldil L%intr_restore, %r2 + ldil L%intr_check_sig, %r2 copy %r25, %r16 /* save pt_regs */ b handle_interruption - ldo R%intr_restore(%r2), %r2 + ldo R%intr_check_sig(%r2), %r2 /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/hardware.c linux.21rc5-ac2/arch/parisc/kernel/hardware.c --- linux.21rc5/arch/parisc/kernel/hardware.c 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/hardware.c 2003-04-28 16:02:18.000000000 +0100 @@ -99,8 +99,8 @@ {HPHW_NPROC,0x481,0x4,0x81,"Wilbur (E25)"}, {HPHW_NPROC,0x482,0x4,0x81,"WB-80 (E35)"}, {HPHW_NPROC,0x483,0x4,0x81,"WB-96 (E45)"}, - {HPHW_NPROC,0x48,0x4,0x81,"UL Proc L-100 (811/D210,D310)"}, - {HPHW_NPROC,0x48,0x4,0x81,"UL Proc L-75 (801/D200)"}, + {HPHW_NPROC,0x484,0x4,0x81,"UL Proc L-100 (811/D210,D310)"}, + {HPHW_NPROC,0x485,0x4,0x81,"UL Proc L-75 (801/D200)"}, {HPHW_NPROC,0x501,0x4,0x81,"Merlin L2 132 (9000/778/B132L)"}, {HPHW_NPROC,0x502,0x4,0x81,"Merlin L2 160 (9000/778/B160L)"}, {HPHW_NPROC,0x503,0x4,0x81,"Merlin L2+ 132 (9000/778/B132L)"}, @@ -403,7 +403,7 @@ {HPHW_BA, 0x01B, 0x00078, 0x0, "Anole 100 VME BA"}, {HPHW_BA, 0x024, 0x00078, 0x0, "Fast Pace VME BA"}, {HPHW_BA, 0x034, 0x00078, 0x0, "Anole T VME BA"}, - {HPHW_BA, 0x04A, 0x00078, 0x0, "Anole L2 132 BME BA"}, + {HPHW_BA, 0x04A, 0x00078, 0x0, "Anole L2 132 VME BA"}, {HPHW_BA, 0x04C, 0x00078, 0x0, "Anole L2 165 VME BA"}, {HPHW_BA, 0x011, 0x00081, 0x0, "WB-96 Core BA"}, {HPHW_BA, 0x012, 0x00081, 0x0, "Orville UX Core BA"}, @@ -805,8 +805,8 @@ {HPHW_FIO, 0x04E, 0x0007B, 0x0, "Kiji L2 132 Core Audio"}, {HPHW_FIO, 0x050, 0x0007B, 0x0, "Merlin Jr 132 Core Audio"}, {HPHW_FIO, 0x051, 0x0007B, 0x0, "Firehawk Audio"}, - {HPHW_FIO, 0x056, 0x0007B, 0x0, "Raven+ w SE FWSCSU Core Audio"}, - {HPHW_FIO, 0x057, 0x0007B, 0x0, "Raven+ w Diff FWSCSU Core Audio"}, + {HPHW_FIO, 0x056, 0x0007B, 0x0, "Raven+ w SE FWSCSI Core Audio"}, + {HPHW_FIO, 0x057, 0x0007B, 0x0, "Raven+ w Diff FWSCSI Core Audio"}, {HPHW_FIO, 0x058, 0x0007B, 0x0, "FireHawk 200 Audio"}, {HPHW_FIO, 0x05C, 0x0007B, 0x0, "SummitHawk 230 Core Audio"}, {HPHW_FIO, 0x800, 0x0007B, 0x0, "Hitachi Tiny 64 Audio"}, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/head.S linux.21rc5-ac2/arch/parisc/kernel/head.S --- linux.21rc5/arch/parisc/kernel/head.S 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/head.S 2003-04-28 16:02:18.000000000 +0100 @@ -74,7 +74,7 @@ ldo R%PA(_end)(%r4),%r4 $bss_loop: cmpb,<<,n %r3,%r4,$bss_loop - stb,ma %r0,1(%r3) + stw,ma %r0,4(%r3) /* Save away the arguments the boot loader passed in (32 bit args) */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/ioctl32.c linux.21rc5-ac2/arch/parisc/kernel/ioctl32.c --- linux.21rc5/arch/parisc/kernel/ioctl32.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/ioctl32.c 2003-04-28 16:02:18.000000000 +0100 @@ -3123,6 +3123,11 @@ COMPATIBLE_IOCTL(SIOCDRARP) COMPATIBLE_IOCTL(SIOCADDDLCI) COMPATIBLE_IOCTL(SIOCDELDLCI) +COMPATIBLE_IOCTL(SIOCGMIIPHY) +COMPATIBLE_IOCTL(SIOCGMIIREG) +COMPATIBLE_IOCTL(SIOCSMIIREG) +COMPATIBLE_IOCTL(SIOCGIFVLAN) +COMPATIBLE_IOCTL(SIOCSIFVLAN) /* SG stuff */ COMPATIBLE_IOCTL(SG_SET_TIMEOUT) COMPATIBLE_IOCTL(SG_GET_TIMEOUT) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/irq.c linux.21rc5-ac2/arch/parisc/kernel/irq.c --- linux.21rc5/arch/parisc/kernel/irq.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/irq.c 2003-04-28 16:02:18.000000000 +0100 @@ -57,7 +57,7 @@ /* Bits in EIEM correlate with cpu_irq_action[]. ** Numbered *Big Endian*! (ie bit 0 is MSB) */ -static unsigned long cpu_eiem = 0; +static volatile unsigned long cpu_eiem = 0; static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */ @@ -462,7 +462,7 @@ for (irq = 0; eirr_val && bit; bit>>=1, irq++) { - if (!(bit&eirr_val)) + if (!(bit&eirr_val&cpu_eiem)) continue; /* clear bit in mask - can exit loop sooner */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/lasimap.map linux.21rc5-ac2/arch/parisc/kernel/lasimap.map --- linux.21rc5/arch/parisc/kernel/lasimap.map 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/lasimap.map 1970-01-01 01:00:00.000000000 +0100 @@ -1,322 +0,0 @@ -# HP 712 kernel keymap. This uses 7 modifier combinations. - -keymaps 0-2,4-5,8,12 -# ie, plain, Shift, AltGr, Control, Control+Shift, Alt and Control+Alt - - -# Change the above line into -# keymaps 0-2,4-6,8,12 -# in case you want the entries -# altgr control keycode 83 = Boot -# altgr control keycode 111 = Boot -# below. -# -# In fact AltGr is used very little, and one more keymap can -# be saved by mapping AltGr to Alt (and adapting a few entries): -# keycode 100 = Alt -# -keycode 1 = F9 F19 Console_21 - control keycode 1 = F9 - alt keycode 1 = Console_9 - control alt keycode 1 = Console_9 -keycode 2 = -keycode 3 = F5 F15 Console_17 - control keycode 3 = F5 - alt keycode 3 = Console_5 - control alt keycode 3 = Console_5 -keycode 4 = F3 F13 Console_15 - control keycode 4 = F3 - alt keycode 4 = Console_3 - control alt keycode 4 = Console_3 -keycode 5 = F1 F11 Console_13 - control keycode 5 = F1 - alt keycode 5 = Console_1 - control alt keycode 5 = Console_1 -keycode 6 = F2 F12 Console_14 - control keycode 6 = F2 - alt keycode 6 = Console_2 - control alt keycode 6 = Console_2 -keycode 7 = F12 F12 Console_24 - control keycode 7 = F12 - alt keycode 7 = Console_12 - control alt keycode 7 = Console_12 -keycode 8 = -keycode 9 = F10 F20 Console_22 - control keycode 9 = F10 - alt keycode 9 = Console_10 - control alt keycode 9 = Console_10 -keycode 10 = F8 F18 Console_20 - control keycode 10 = F8 - alt keycode 10 = Console_8 - control alt keycode 10 = Console_8 -keycode 11 = F6 F16 Console_18 - control keycode 11 = F6 - alt keycode 11 = Console_6 - control alt keycode 11 = Console_6 -keycode 12 = F4 F14 Console_16 - control keycode 12 = F4 - alt keycode 12 = Console_4 - control alt keycode 12 = Console_4 -keycode 13 = Tab Tab - alt keycode 13 = Meta_Tab -keycode 14 = grave asciitilde - control keycode 14 = nul - alt keycode 14 = Meta_grave -keycode 15 = -keycode 16 = -keycode 17 = Alt -keycode 18 = Shift -keycode 19 = -keycode 20 = Control -keycode 21 = q -keycode 22 = one exclam exclam -keycode 23 = -keycode 24 = -keycode 25 = -keycode 26 = z -keycode 27 = s -keycode 28 = a - altgr keycode 28 = Hex_A -keycode 29 = w -keycode 30 = two at at -keycode 31 = -keycode 32 = -keycode 33 = c - altgr keycode 46 = Hex_C -keycode 34 = x -keycode 35 = d - altgr keycode 35 = Hex_D -keycode 36 = e - altgr keycode 36 = Hex_E -keycode 37 = four dollar -keycode 38 = three numbersign -keycode 39 = -keycode 40 = -keycode 41 = -keycode 42 = v -keycode 43 = f - altgr keycode 43 = Hex_F -keycode 44 = t -keycode 45 = r -keycode 46 = five percent -keycode 47 = -keycode 48 = -keycode 49 = n -keycode 50 = b - altgr keycode 50 = Hex_B -keycode 51 = h -keycode 52 = g -keycode 53 = y -keycode 54 = six asciicircum -keycode 55 = -keycode 56 = -keycode 57 = -keycode 58 = m -keycode 59 = j -keycode 60 = u -keycode 61 = seven ampersand -keycode 62 = eight asterisk asterisk -keycode 63 = -keycode 64 = -keycode 65 = comma less - alt keycode 65 = Meta_comma -keycode 66 = k -keycode 67 = i -keycode 68 = o -keycode 69 = zero parenright bracketright -keycode 70 = nine parenleft bracketleft -keycode 71 = -keycode 72 = -keycode 73 = period greater - control keycode 73 = Compose - alt keycode 73 = Meta_period -keycode 74 = slash question - control keycode 74 = Delete - alt keycode 53 = Meta_slash -keycode 75 = l -keycode 76 = semicolon colon - alt keycode 39 = Meta_semicolon -keycode 77 = p -keycode 78 = minus underscore -keycode 79 = -keycode 80 = -keycode 81 = -keycode 82 = apostrophe quotedbl - control keycode 82 = Control_g - alt keycode 40 = Meta_apostrophe -keycode 83 = -keycode 84 = bracketleft braceleft - control keycode 84 = Escape - alt keycode 26 = Meta_bracketleft -keycode 85 = equal plus -keycode 86 = -keycode 87 = -keycode 88 = Caps_Lock -keycode 88 = -keycode 89 = -keycode 89 = -keycode 89 = -keycode 90 = Return - alt keycode 90 = Meta_Control_m -keycode 91 = bracketright braceright asciitilde - control keycode 91 = Control_bracketright - alt keycode 91 = Meta_bracketright -keycode 92 = -keycode 93 = backslash bar - control keycode 43 = Control_backslash - alt keycode 43 = Meta_backslash -keycode 94 = -keycode 95 = -keycode 96 = -keycode 97 = -keycode 98 = -keycode 99 = -keycode 100 = -keycode 101 = -keycode 102 = BackSpace -keycode 103 = -keycode 104 = -keycode 105 = KP_1 - alt keycode 105 = Ascii_1 - altgr keycode 105 = Hex_1 -keycode 106 = -keycode 107 = KP_4 - alt keycode 107 = Ascii_4 - altgr keycode 107 = Hex_4 -keycode 108 = KP_7 - alt keycode 108 = Ascii_7 - altgr keycode 108 = Hex_7 -keycode 109 = -keycode 110 = -keycode 111 = -keycode 112 = KP_0 - alt keycode 82 = Ascii_0 - altgr keycode 82 = Hex_0 -keycode 113 = KP_Period -keycode 114 = KP_2 - alt keycode 114 = Ascii_2 - altgr keycode 114 = Hex_2 -keycode 115 = KP_5 - alt keycode 115 = Ascii_5 - altgr keycode 115 = Hex_5 -keycode 116 = KP_6 - alt keycode 116 = Ascii_6 - altgr keycode 116 = Hex_6 -keycode 117 = KP_8 - alt keycode 117 = Ascii_8 - altgr keycode 117 = Hex_8 -keycode 118 = Escape -keycode 119 = -keycode 120 = F11 -keycode 121 = KP_Add -keycode 122 = KP_3 - alt keycode 122 = Ascii_3 - altgr keycode 122 = Hex_3 -keycode 123 = KP_Subtract -keycode 124 = KP_Multiply -keycode 125 = KP_9 - alt keycode 125 = Ascii_9 - altgr keycode 125 = Hex_9 -keycode 126 = -# 131!! -keycode 127 = F7 F17 Console_19 - control keycode 127 = F7 - alt keycode 127 = Console_7 - control alt keycode 127 = Console_7 - -string F1 = "\033[[A" -string F2 = "\033[[B" -string F3 = "\033[[C" -string F4 = "\033[[D" -string F5 = "\033[[E" -string F6 = "\033[17~" -string F7 = "\033[18~" -string F8 = "\033[19~" -string F9 = "\033[20~" -string F10 = "\033[21~" -string F11 = "\033[23~" -string F12 = "\033[24~" -string F13 = "\033[25~" -string F14 = "\033[26~" -string F15 = "\033[28~" -string F16 = "\033[29~" -string F17 = "\033[31~" -string F18 = "\033[32~" -string F19 = "\033[33~" -string F20 = "\033[34~" -string Find = "\033[1~" -string Insert = "\033[2~" -string Remove = "\033[3~" -string Select = "\033[4~" -string Prior = "\033[5~" -string Next = "\033[6~" -string Macro = "\033[M" -string Pause = "\033[P" -compose '`' 'A' to 'À' -compose '`' 'a' to 'à' -compose '\'' 'A' to 'Á' -compose '\'' 'a' to 'á' -compose '^' 'A' to 'Â' -compose '^' 'a' to 'â' -compose '~' 'A' to 'Ã' -compose '~' 'a' to 'ã' -compose '"' 'A' to 'Ä' -compose '"' 'a' to 'ä' -compose 'O' 'A' to 'Å' -compose 'o' 'a' to 'å' -compose '0' 'A' to 'Å' -compose '0' 'a' to 'å' -compose 'A' 'A' to 'Å' -compose 'a' 'a' to 'å' -compose 'A' 'E' to 'Æ' -compose 'a' 'e' to 'æ' -compose ',' 'C' to 'Ç' -compose ',' 'c' to 'ç' -compose '`' 'E' to 'È' -compose '`' 'e' to 'è' -compose '\'' 'E' to 'É' -compose '\'' 'e' to 'é' -compose '^' 'E' to 'Ê' -compose '^' 'e' to 'ê' -compose '"' 'E' to 'Ë' -compose '"' 'e' to 'ë' -compose '`' 'I' to 'Ì' -compose '`' 'i' to 'ì' -compose '\'' 'I' to 'Í' -compose '\'' 'i' to 'í' -compose '^' 'I' to 'Î' -compose '^' 'i' to 'î' -compose '"' 'I' to 'Ï' -compose '"' 'i' to 'ï' -compose '-' 'D' to 'Ð' -compose '-' 'd' to 'ð' -compose '~' 'N' to 'Ñ' -compose '~' 'n' to 'ñ' -compose '`' 'O' to 'Ò' -compose '`' 'o' to 'ò' -compose '\'' 'O' to 'Ó' -compose '\'' 'o' to 'ó' -compose '^' 'O' to 'Ô' -compose '^' 'o' to 'ô' -compose '~' 'O' to 'Õ' -compose '~' 'o' to 'õ' -compose '"' 'O' to 'Ö' -compose '"' 'o' to 'ö' -compose '/' 'O' to 'Ø' -compose '/' 'o' to 'ø' -compose '`' 'U' to 'Ù' -compose '`' 'u' to 'ù' -compose '\'' 'U' to 'Ú' -compose '\'' 'u' to 'ú' -compose '^' 'U' to 'Û' -compose '^' 'u' to 'û' -compose '"' 'U' to 'Ü' -compose '"' 'u' to 'ü' -compose '\'' 'Y' to 'Ý' -compose '\'' 'y' to 'ý' -compose 'T' 'H' to 'Þ' -compose 't' 'h' to 'þ' -compose 's' 's' to 'ß' -compose '"' 'y' to 'ÿ' -compose 's' 'z' to 'ß' -compose 'i' 'j' to 'ÿ' diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/parisc_ksyms.c linux.21rc5-ac2/arch/parisc/kernel/parisc_ksyms.c --- linux.21rc5/arch/parisc/kernel/parisc_ksyms.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/parisc_ksyms.c 2003-04-28 16:02:18.000000000 +0100 @@ -67,6 +67,7 @@ #include EXPORT_SYMBOL(smp_num_cpus); +EXPORT_SYMBOL(smp_call_function); #endif /* CONFIG_SMP */ #include @@ -127,7 +128,13 @@ #include EXPORT_SYMBOL(flush_kernel_dcache_range_asm); EXPORT_SYMBOL(flush_kernel_dcache_page); -EXPORT_SYMBOL(flush_all_caches); + +/* asm/pgalloc.h doesn't include all it's dependencies */ +extern void __flush_dcache_page(struct page *page); +EXPORT_SYMBOL(__flush_dcache_page); + +extern void flush_cache_all_local(void); +EXPORT_SYMBOL(flush_cache_all_local); #include extern long sys_open(const char *, int, int); @@ -150,6 +157,8 @@ #include EXPORT_SYMBOL(pdc_add_valid); +EXPORT_SYMBOL(pdc_tod_read); +EXPORT_SYMBOL(pdc_tod_set); EXPORT_SYMBOL(pdc_lan_station_id); EXPORT_SYMBOL(pdc_get_initiator); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/pci-dma.c linux.21rc5-ac2/arch/parisc/kernel/pci-dma.c --- linux.21rc5/arch/parisc/kernel/pci-dma.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/pci-dma.c 2003-04-28 16:02:18.000000000 +0100 @@ -428,9 +428,9 @@ BUG(); for (i = 0; i < nents; i++, sglist++ ) { - sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(sglist->address); + sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(sg_virt_addr(sglist)); sg_dma_len(sglist) = sglist->length; - flush_kernel_dcache_range((unsigned long)sglist->address, + flush_kernel_dcache_range((unsigned long)sg_virt_addr(sglist), sglist->length); } return nents; @@ -449,7 +449,7 @@ /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ for (i = 0; i < nents; i++, sglist++ ) - flush_kernel_dcache_range((unsigned long) sglist->address, sglist->length); + flush_kernel_dcache_range((unsigned long) sg_virt_addr(sglist), sglist->length); return; } @@ -468,7 +468,7 @@ /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ for (i = 0; i < nents; i++, sglist++ ) - flush_kernel_dcache_range((unsigned long) sglist->address, sglist->length); + flush_kernel_dcache_range((unsigned long) sg_virt_addr(sglist), sglist->length); } struct pci_dma_ops pcxl_dma_ops = { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/perf.c linux.21rc5-ac2/arch/parisc/kernel/perf.c --- linux.21rc5/arch/parisc/kernel/perf.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/perf.c 2003-04-28 16:02:18.000000000 +0100 @@ -499,6 +499,8 @@ */ static int __init perf_init(void) { + int ret; + /* Determine correct processor interface to use */ bitmask_array = perf_bitmasks; @@ -517,11 +519,17 @@ return -ENODEV; } + ret = misc_register(&perf_dev); + if (ret) { + printk(KERN_ERR "Performance monitoring counters: " + "cannot register misc device.\n"); + return ret; + } + /* Patch the images to match the system */ perf_patch_images(); spin_lock_init(&perf_lock); - misc_register(&perf_dev); /* TODO: this only lets us access the first cpu.. what to do for SMP? */ cpu_device = cpu_data[0].dev; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/processor.c linux.21rc5-ac2/arch/parisc/kernel/processor.c --- linux.21rc5/arch/parisc/kernel/processor.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/processor.c 2003-04-28 16:02:18.000000000 +0100 @@ -77,8 +77,6 @@ unsigned long txn_addr; unsigned long cpuid; struct cpuinfo_parisc *p; - extern struct irq_region_ops cpu_irq_ops; /* arch/parisc...irq.c */ - extern struct irqaction cpu_irq_actions[]; /* arch/parisc...irq.c */ #ifndef CONFIG_SMP if (boot_cpu_data.cpu_count > 0) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/sba_iommu.c linux.21rc5-ac2/arch/parisc/kernel/sba_iommu.c --- linux.21rc5/arch/parisc/kernel/sba_iommu.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/sba_iommu.c 2003-04-28 16:02:18.000000000 +0100 @@ -1056,7 +1056,7 @@ printk(KERN_DEBUG " %2d : %08lx/%05x %p/%05x\n", nents, (unsigned long) sg_dma_address(startsg), cnt, - sg_virt_address(startsg), startsg->length + sg_virt_addr(startsg), startsg->length ); #else DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/smp.c linux.21rc5-ac2/arch/parisc/kernel/smp.c --- linux.21rc5/arch/parisc/kernel/smp.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/smp.c 2003-04-28 16:02:18.000000000 +0100 @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -434,25 +435,21 @@ extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ /* Set modes and Enable floating point coprocessor */ - (void) init_per_cpu(cpunum); + init_per_cpu(cpunum); disable_sr_hashing(); - mb(); /* Well, support 2.4 linux scheme as well. */ - if (test_and_set_bit(cpunum, (unsigned long *) (&cpu_online_map))) - { - extern void machine_halt(void); /* arch/parisc.../process.c */ - + if (test_and_set_bit(cpunum, (unsigned long *) (&cpu_online_map))) { printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); machine_halt(); - } + } /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; - if(current->mm) + if (current->mm) BUG(); enter_lazy_tlb(&init_mm, current, cpunum); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/traps.c linux.21rc5-ac2/arch/parisc/kernel/traps.c --- linux.21rc5/arch/parisc/kernel/traps.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/traps.c 2003-04-28 16:02:18.000000000 +0100 @@ -455,13 +455,6 @@ unsigned long fault_space = 0; struct siginfo si; - /* HACK! jsm is going to fix this. - * entry.S will manage I-bit - only enable I-bit if it was - * enabled before we took the "trap". - */ - if (code != 1) - local_irq_enable(); - switch(code) { case 1: @@ -727,6 +720,7 @@ } } + local_irq_enable(); do_page_fault(regs, code, fault_address); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/kernel/unaligned.c linux.21rc5-ac2/arch/parisc/kernel/unaligned.c --- linux.21rc5/arch/parisc/kernel/unaligned.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/kernel/unaligned.c 2003-04-28 16:02:18.000000000 +0100 @@ -3,6 +3,7 @@ * Unaligned memory access handler * * Copyright (C) 2001 Randolph Chung + * Significantly tweaked by LaMont Jones * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -66,28 +67,28 @@ #define OPCODE3_MASK OPCODE3(0x3f,1) #define OPCODE4_MASK OPCODE4(0x3f) -/* skip LDB (index) */ +/* skip LDB - never unaligned (index) */ #define OPCODE_LDH_I OPCODE1(0x03,0,0x1) #define OPCODE_LDW_I OPCODE1(0x03,0,0x2) #define OPCODE_LDD_I OPCODE1(0x03,0,0x3) #define OPCODE_LDDA_I OPCODE1(0x03,0,0x4) -/* skip LDCD (index) */ +#define OPCODE_LDCD_I OPCODE1(0x03,0,0x5) #define OPCODE_LDWA_I OPCODE1(0x03,0,0x6) -/* skip LDCW (index) */ -/* skip LDB (short) */ +#define OPCODE_LDCW_I OPCODE1(0x03,0,0x7) +/* skip LDB - never unaligned (short) */ #define OPCODE_LDH_S OPCODE1(0x03,1,0x1) #define OPCODE_LDW_S OPCODE1(0x03,1,0x2) #define OPCODE_LDD_S OPCODE1(0x03,1,0x3) #define OPCODE_LDDA_S OPCODE1(0x03,1,0x4) -/* skip LDCD (short) */ +#define OPCODE_LDCD_S OPCODE1(0x03,1,0x5) #define OPCODE_LDWA_S OPCODE1(0x03,1,0x6) -/* skip LDCW (short) */ -/* skip STB */ +#define OPCODE_LDCW_S OPCODE1(0x03,1,0x7) +/* skip STB - never unaligned */ #define OPCODE_STH OPCODE1(0x03,1,0x9) #define OPCODE_STW OPCODE1(0x03,1,0xa) #define OPCODE_STD OPCODE1(0x03,1,0xb) -/* skip STBY */ -/* skip STDBY */ +/* skip STBY - never unaligned */ +/* skip STDBY - never unaligned */ #define OPCODE_STWA OPCODE1(0x03,1,0xe) #define OPCODE_STDA OPCODE1(0x03,1,0xf) @@ -103,129 +104,200 @@ #define OPCODE_LDH_L OPCODE4(0x11) #define OPCODE_LDW_L OPCODE4(0x12) -#define OPCODE_LDW_L2 OPCODE4(0x13) +#define OPCODE_LDWM OPCODE4(0x13) #define OPCODE_STH_L OPCODE4(0x19) #define OPCODE_STW_L OPCODE4(0x1A) -#define OPCODE_STW_L2 OPCODE4(0x1B) +#define OPCODE_STWM OPCODE4(0x1B) + +#define MAJOR_OP(i) (((i)>>26)&0x3f) +#define R1(i) (((i)>>21)&0x1f) +#define R2(i) (((i)>>16)&0x1f) +#define R3(i) ((i)&0x1f) +#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0)) +#define IM5_2(i) IM((i)>>16,5) +#define IM5_3(i) IM((i),5) +#define IM14(i) IM((i),14) int unaligned_enabled = 1; void die_if_kernel (char *str, struct pt_regs *regs, long err); -static int emulate_load(struct pt_regs *regs, int len, int toreg) +static int emulate_ldh(struct pt_regs *regs, int toreg) { unsigned long saddr = regs->ior; unsigned long val = 0; - int ret = 0; - if (regs->isr != regs->sr[7]) - { - printk(KERN_CRIT "isr verification failed (isr: " RFMT ", sr7: " RFMT "\n", - regs->isr, regs->sr[7]); - return 1; - } + DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n", + regs->isr, regs->ior, toreg); + + __asm__ __volatile__ ( +" mtsp %3, %%sr1\n" +" ldbs 0(%%sr1,%2), %%r20\n" +" ldbs 1(%%sr1,%2), %0\n" + "depw %%r20, 23, 24, %0\n" + : "=r" (val) + : "0" (val), "r" (saddr), "r" (regs->isr) + : "r20" ); + + DPRINTF("val = 0x" RFMT "\n", val); + + if (toreg) + regs->gr[toreg] = val; + + return 0; +} +static int emulate_ldw(struct pt_regs *regs, int toreg) +{ + unsigned long saddr = regs->ior; + unsigned long val = 0; - DPRINTF("load " RFMT ":" RFMT " to r%d for %d bytes\n", - regs->isr, regs->ior, toreg, len); + DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n", + regs->isr, regs->ior, toreg); __asm__ __volatile__ ( -" mfsp %%sr1, %%r20\n" -" mtsp %6, %%sr1\n" -" copy %%r0, %0\n" -"0: ldbs,ma 1(%%sr1,%4), %%r19\n" -" addi -1, %5, %5\n" -" cmpib,>= 0, %5, 2f\n" -" or %%r19, %0, %0\n" -" b 0b\n" - -#ifdef __LP64__ - "depd,z %0, 55, 56, %0\n" -#else - "depw,z %0, 23, 24, %0\n" -#endif - -"1: ldi 10, %1\n" -"2: mtsp %%r20, %%sr1\n" -" .section __ex_table,\"a\"\n" +" zdep %2,28,2,%%r19\n" /* r19=(ofs&3)*8 */ +" mtsp %3, %%sr1\n" +" depw %%r0,31,2,%2\n" +" ldw 0(%%sr1,%2),%0\n" +" ldw 4(%%sr1,%2),%%r20\n" +" subi 32,%%r19,%%r19\n" +" mtctl %%r19,11\n" +" vshd %0,%%r20,%0\n" + : "=r" (val) + : "0" (val), "r" (saddr), "r" (regs->isr) + : "r19", "r20" ); + + DPRINTF("val = 0x" RFMT "\n", val); + + if (toreg) + regs->gr[toreg] = val; + + return 0; +} #ifdef __LP64__ - ".dword 0b, (1b-0b)\n" -#else - ".word 0b, (1b-0b)\n" -#endif - ".previous\n" - : "=r" (val), "=r" (ret) - : "0" (val), "1" (ret), "r" (saddr), "r" (len), "r" (regs->isr) +static int emulate_ldd(struct pt_regs *regs, int toreg) +{ + unsigned long saddr = regs->ior; + unsigned long val = 0; + + DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n", + regs->isr, regs->ior, toreg); + + __asm__ __volatile__ ( +" depd,z %2,60,3,%%r19\n" /* r19=(ofs&7)*8 */ +" mtsp %3, %%sr1\n" +" depd %%r0,63,3,%2\n" +" ldd 0(%%sr1,%2),%0\n" +" ldd 8(%%sr1,%2),%%r20\n" +" subi 64,%%r19,%%r19\n" +" mtsar %%r19\n" +" shrpd %0,%%r20,%%sar,%0\n" + : "=r" (val) + : "0" (val), "r" (saddr), "r" (regs->isr) : "r19", "r20" ); DPRINTF("val = 0x" RFMT "\n", val); - regs->gr[toreg] = val; + if (toreg) + regs->gr[toreg] = val; - return ret; + return 0; } +#endif -static int emulate_store(struct pt_regs *regs, int len, int frreg) +static int emulate_sth(struct pt_regs *regs, int frreg) { - int ret = 0; -#ifdef __LP64__ - unsigned long val = regs->gr[frreg] << (64 - (len << 3)); -#else - unsigned long val = regs->gr[frreg] << (32 - (len << 3)); -#endif + unsigned long val = regs->gr[frreg]; + if (!frreg) + val = 0; - if (regs->isr != regs->sr[7]) - { - printk(KERN_CRIT "isr verification failed (isr: " RFMT ", sr7: " RFMT "\n", - regs->isr, regs->sr[7]); - return 1; - } + DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg, + regs->gr[frreg], regs->isr, regs->ior); + + __asm__ __volatile__ ( +" mtsp %2, %%sr1\n" +" extrw,u %0, 23, 8, %%r19\n" +" stb %0, 1(%%sr1, %1)\n" +" stb %%r19, 0(%%sr1, %1)\n" + : + : "r" (val), "r" (regs->ior), "r" (regs->isr) + : "r19" ); + + return 0; +} +static int emulate_stw(struct pt_regs *regs, int frreg) +{ + unsigned long val = regs->gr[frreg]; + if (!frreg) + val = 0; - DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for %d bytes\n", frreg, - regs->gr[frreg], regs->isr, regs->ior, len); + DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg, + regs->gr[frreg], regs->isr, regs->ior); __asm__ __volatile__ ( -" mfsp %%sr1, %%r20\n" /* save sr1 */ -" mtsp %5, %%sr1\n" -#ifdef __LP64__ -"0: extrd,u %2, 7, 8, %%r19\n" -#else -"0: extrw,u %2, 7, 8, %%r19\n" -#endif -"1: stb,ma %%r19, 1(%%sr1, %3)\n" -" addi -1, %4, %4\n" -" cmpib,>= 0, %4, 3f\n" - +" mtsp %2, %%sr1\n" +" zdep %1, 28, 2, %%r19\n" +" dep %%r0, 31, 2, %1\n" +" mtsar %%r19\n" +" depwi,z -2, %%sar, 32, %%r19\n" +" ldw 0(%%sr1,%1),%%r20\n" +" ldw 4(%%sr1,%1),%%r21\n" +" vshd %%r0, %0, %%r22\n" +" vshd %0, %%r0, %%r1\n" +" and %%r20, %%r19, %%r20\n" +" andcm %%r21, %%r19, %%r21\n" +" or %%r22, %%r20, %%r20\n" +" or %%r1, %%r21, %%r21\n" +" stw %%r20,0(%%sr1,%1)\n" +" stw %%r21,4(%%sr1,%1)\n" + : + : "r" (val), "r" (regs->ior), "r" (regs->isr) + : "r19", "r20", "r21", "r22", "r1" ); + + return 0; +} #ifdef __LP64__ - "depd,z %2, 55, 56, %2\n" -#else - "depw,z %2, 23, 24, %2\n" -#endif +static int emulate_std(struct pt_regs *regs, int frreg) +{ + unsigned long val = regs->gr[frreg]; + if (!frreg) + val = 0; -" b 0b\n" -" nop\n" + DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 8 bytes\n", frreg, + regs->gr[frreg], regs->isr, regs->ior); -"2: ldi 11, %0\n" -"3: mtsp %%r20, %%sr1\n" -" .section __ex_table,\"a\"\n" -#ifdef __LP64__ - ".dword 1b, (2b-1b)\n" -#else - ".word 1b, (2b-1b)\n" -#endif - ".previous\n" - : "=r" (ret) - : "0" (ret), "r" (val), "r" (regs->ior), "r" (len), "r" (regs->isr) - : "r19", "r20" ); - return ret; -} + __asm__ __volatile__ ( +" mtsp %2, %%sr1\n" +" depd,z %1, 60, 3, %%r19\n" +" depd %%r0, 63, 3, %1\n" +" mtsar %%r19\n" +" depdi,z -2, %%sar, 64, %%r19\n" +" ldd 0(%%sr1,%1),%%r20\n" +" ldd 8(%%sr1,%1),%%r21\n" +" shrpd %%r0, %0, %%sar, %%r22\n" +" shrpd %0, %%r0, %%sar, %%r1\n" +" and %%r20, %%r19, %%r20\n" +" andcm %%r21, %%r19, %%r21\n" +" or %%r22, %%r20, %%r20\n" +" or %%r1, %%r21, %%r21\n" +" std %%r20,0(%%sr1,%1)\n" +" std %%r21,8(%%sr1,%1)\n" + : + : "r" (val), "r" (regs->ior), "r" (regs->isr) + : "r19", "r20", "r21", "r22", "r1" ); + return 0; +} +#endif void handle_unaligned(struct pt_regs *regs) { unsigned long unaligned_count = 0; unsigned long last_time = 0; + unsigned long newbase = regs->gr[R1(regs->iir)]; + int modify = 0; int ret = -1; struct siginfo si; @@ -284,83 +356,169 @@ if (!unaligned_enabled) goto force_sigbus; + /* handle modification - OK, it's ugly, see the instruction manual */ + switch (MAJOR_OP(regs->iir)) + { + case 0x03: + case 0x09: + case 0x0b: + if (regs->iir&0x20) + { + modify = 1; + if (regs->iir&0x1000) /* short loads */ + if (regs->iir&0x200) + newbase += IM5_3(regs->iir); + else + newbase += IM5_2(regs->iir); + else if (regs->iir&0x2000) /* scaled indexed */ + { + int shift=0; + switch (regs->iir & OPCODE1_MASK) + { + case OPCODE_LDH_I: + shift= 1; break; + case OPCODE_LDW_I: + shift= 2; break; + case OPCODE_LDD_I: + case OPCODE_LDDA_I: + shift= 3; break; + } + newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<iir)?regs->gr[R2(regs->iir)]:0); + } + break; + case 0x13: + case 0x1b: + modify = 1; + newbase += IM14(regs->iir); + break; + case 0x14: + case 0x1c: + if (regs->iir&8) + { + modify = 1; + newbase += IM14(regs->iir&~0xe); + } + break; + case 0x16: + case 0x1e: + modify = 1; + newbase += IM14(regs->iir&6); + break; + case 0x17: + case 0x1f: + if (regs->iir&4) + { + modify = 1; + newbase += IM14(regs->iir&~4); + } + break; + } + + if (regs->isr != regs->sr[7]) + { + printk(KERN_CRIT "isr verification failed (isr: " RFMT ", sr7: " RFMT "\n", + regs->isr, regs->sr[7]); + + /* don't kill him though, since he has appropriate access to the page, or we + * would never have gotten here. + */ + } + /* TODO: make this cleaner... */ switch (regs->iir & OPCODE1_MASK) { case OPCODE_LDH_I: case OPCODE_LDH_S: - ret = emulate_load(regs, 2, regs->iir & 0x1f); + ret = emulate_ldh(regs, R3(regs->iir)); break; case OPCODE_LDW_I: case OPCODE_LDWA_I: case OPCODE_LDW_S: case OPCODE_LDWA_S: - ret = emulate_load(regs, 4, regs->iir&0x1f); - break; - - case OPCODE_LDD_I: - case OPCODE_LDDA_I: - case OPCODE_LDD_S: - case OPCODE_LDDA_S: - ret = emulate_load(regs, 8, regs->iir&0x1f); + ret = emulate_ldw(regs, R3(regs->iir)); break; case OPCODE_STH: - ret = emulate_store(regs, 2, (regs->iir>>16)&0x1f); + ret = emulate_sth(regs, R2(regs->iir)); break; case OPCODE_STW: case OPCODE_STWA: - ret = emulate_store(regs, 4, (regs->iir>>16)&0x1f); + ret = emulate_stw(regs, R2(regs->iir)); + break; + +#ifdef __LP64__ + case OPCODE_LDD_I: + case OPCODE_LDDA_I: + case OPCODE_LDD_S: + case OPCODE_LDDA_S: + ret = emulate_ldd(regs, R3(regs->iir)); break; case OPCODE_STD: case OPCODE_STDA: - ret = emulate_store(regs, 8, (regs->iir>>16)&0x1f); + ret = emulate_std(regs, R2(regs->iir)); + break; +#endif + + case OPCODE_LDCD_I: + case OPCODE_LDCW_I: + case OPCODE_LDCD_S: + case OPCODE_LDCW_S: + ret = -1; /* "undefined", but lets kill them. */ break; } +#ifdef __LP64__ switch (regs->iir & OPCODE2_MASK) { case OPCODE_LDD_L: case OPCODE_FLDD_L: - ret = emulate_load(regs, 8, (regs->iir>>16)&0x1f); + ret = emulate_ldd(regs, R2(regs->iir)); break; case OPCODE_STD_L: case OPCODE_FSTD_L: - ret = emulate_store(regs, 8, (regs->iir>>16)&0x1f); + ret = emulate_std(regs, R2(regs->iir)); break; } +#endif switch (regs->iir & OPCODE3_MASK) { case OPCODE_LDW_M: case OPCODE_FLDW_L: - ret = emulate_load(regs, 4, (regs->iir>>16)&0x1f); + ret = emulate_ldw(regs, R2(regs->iir)); break; case OPCODE_FSTW_L: case OPCODE_STW_M: - ret = emulate_store(regs, 4, (regs->iir>>16)&0x1f); + ret = emulate_stw(regs, R2(regs->iir)); break; } switch (regs->iir & OPCODE4_MASK) { case OPCODE_LDH_L: - ret = emulate_load(regs, 2, (regs->iir>>16)&0x1f); + ret = emulate_ldh(regs, R2(regs->iir)); break; case OPCODE_LDW_L: - case OPCODE_LDW_L2: - ret = emulate_load(regs, 4, (regs->iir>>16)&0x1f); + case OPCODE_LDWM: + ret = emulate_ldw(regs, R2(regs->iir)); break; case OPCODE_STH_L: - ret = emulate_store(regs, 2, (regs->iir>>16)&0x1f); + ret = emulate_sth(regs, R2(regs->iir)); break; case OPCODE_STW_L: - case OPCODE_STW_L2: - ret = emulate_store(regs, 4, (regs->iir>>16)&0x1f); + case OPCODE_STWM: + ret = emulate_stw(regs, R2(regs->iir)); break; } + /* XXX LJ - need to handle float load/store */ + + if (modify && R1(regs->iir)) + regs->gr[R1(regs->iir)] = newbase; + if (ret < 0) printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir); @@ -424,9 +582,9 @@ align_mask = 1UL; break; case OPCODE_LDW_L: - case OPCODE_LDW_L2: + case OPCODE_LDWM: case OPCODE_STW_L: - case OPCODE_STW_L2: + case OPCODE_STWM: align_mask = 3UL; break; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/dfadd.c linux.21rc5-ac2/arch/parisc/math-emu/dfadd.c --- linux.21rc5/arch/parisc/math-emu/dfadd.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/dfadd.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/dfadd.c $Revision: 1.1 $ + * @(#) pa/spmath/dfadd.c $Revision: $ * * Purpose: * Double_add: add two double precision values. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/dfcmp.c linux.21rc5-ac2/arch/parisc/math-emu/dfcmp.c --- linux.21rc5/arch/parisc/math-emu/dfcmp.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/dfcmp.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/dfcmp.c $Revision: 1.1 $ + * @(#) pa/spmath/dfcmp.c $Revision: $ * * Purpose: * dbl_cmp: compare two values diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/dfdiv.c linux.21rc5-ac2/arch/parisc/math-emu/dfdiv.c --- linux.21rc5/arch/parisc/math-emu/dfdiv.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/dfdiv.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/dfdiv.c $Revision: 1.1 $ + * @(#) pa/spmath/dfdiv.c $Revision: $ * * Purpose: * Double Precision Floating-point Divide diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/dfmpy.c linux.21rc5-ac2/arch/parisc/math-emu/dfmpy.c --- linux.21rc5/arch/parisc/math-emu/dfmpy.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/dfmpy.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/dfmpy.c $Revision: 1.1 $ + * @(#) pa/spmath/dfmpy.c $Revision: $ * * Purpose: * Double Precision Floating-point Multiply diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/dfrem.c linux.21rc5-ac2/arch/parisc/math-emu/dfrem.c --- linux.21rc5/arch/parisc/math-emu/dfrem.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/dfrem.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/dfrem.c $Revision: 1.1 $ + * @(#) pa/spmath/dfrem.c $Revision: $ * * Purpose: * Double Precision Floating-point Remainder diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/dfsqrt.c linux.21rc5-ac2/arch/parisc/math-emu/dfsqrt.c --- linux.21rc5/arch/parisc/math-emu/dfsqrt.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/dfsqrt.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/dfsqrt.c $Revision: 1.1 $ + * @(#) pa/spmath/dfsqrt.c $Revision: $ * * Purpose: * Double Floating-point Square Root diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/dfsub.c linux.21rc5-ac2/arch/parisc/math-emu/dfsub.c --- linux.21rc5/arch/parisc/math-emu/dfsub.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/dfsub.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/dfsub.c $Revision: 1.1 $ + * @(#) pa/spmath/dfsub.c $Revision: $ * * Purpose: * Double_subtract: subtract two double precision values. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/driver.c linux.21rc5-ac2/arch/parisc/math-emu/driver.c --- linux.21rc5/arch/parisc/math-emu/driver.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/driver.c 2003-04-28 16:02:18.000000000 +0100 @@ -80,14 +80,19 @@ int handle_fpe(struct pt_regs *regs) { - extern void printbinary(unsigned long x, int nbits); + extern int printbinary(char *buf, unsigned long x, int nbits); struct siginfo si; unsigned int orig_sw, sw; int signalcode; + char buf[128]; /* need an intermediate copy of float regs because FPU emulation * code expects an artificial last entry which contains zero + * + * also, the passed in fr registers contain one word that defines + * the fpu type. the fpu type information is constructed + * inside the emulation code */ - __u64 frcopy[33]; + __u64 frcopy[36]; memcpy(frcopy, regs->fr, sizeof regs->fr); frcopy[32] = 0; @@ -96,8 +101,8 @@ if (FPUDEBUG) { printk(KERN_DEBUG "FP VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI ->\n "); - printbinary(orig_sw, 32); - printk(KERN_DEBUG "\n"); + printbinary(buf, orig_sw, 32); + printk(KERN_DEBUG "%s\n", buf); } signalcode = decode_fpu(frcopy, 0x666); @@ -107,8 +112,8 @@ if (FPUDEBUG) { printk(KERN_DEBUG "VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI decode_fpu returns %d|0x%x\n", signalcode >> 24, signalcode & 0xffffff); - printbinary(sw, 32); - printk(KERN_DEBUG "\n"); + printbinary(buf, sw, 32); + printk(KERN_DEBUG "%s\n", buf); } memcpy(regs->fr, frcopy, sizeof regs->fr); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fcnvff.c linux.21rc5-ac2/arch/parisc/math-emu/fcnvff.c --- linux.21rc5/arch/parisc/math-emu/fcnvff.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fcnvff.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/fcnvff.c $Revision: 1.1 $ + * @(#) pa/spmath/fcnvff.c $Revision: $ * * Purpose: * Single Floating-point to Double Floating-point diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fcnvfu.c linux.21rc5-ac2/arch/parisc/math-emu/fcnvfu.c --- linux.21rc5/arch/parisc/math-emu/fcnvfu.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fcnvfu.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/fcnvfu.c $Revision: 1.1 $ + * @(#) pa/spmath/fcnvfu.c $Revision: $ * * Purpose: * Floating-point to Unsigned Fixed-point Converts diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fcnvfut.c linux.21rc5-ac2/arch/parisc/math-emu/fcnvfut.c --- linux.21rc5/arch/parisc/math-emu/fcnvfut.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fcnvfut.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/fcnvfut.c $Revision: 1.1 $ + * @(#) pa/spmath/fcnvfut.c $Revision: $ * * Purpose: * Floating-point to Unsigned Fixed-point Converts with Truncation diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fcnvfx.c linux.21rc5-ac2/arch/parisc/math-emu/fcnvfx.c --- linux.21rc5/arch/parisc/math-emu/fcnvfx.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fcnvfx.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/fcnvfx.c $Revision: 1.1 $ + * @(#) pa/spmath/fcnvfx.c $Revision: $ * * Purpose: * Single Floating-point to Single Fixed-point diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fcnvfxt.c linux.21rc5-ac2/arch/parisc/math-emu/fcnvfxt.c --- linux.21rc5/arch/parisc/math-emu/fcnvfxt.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fcnvfxt.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/fcnvfxt.c $Revision: 1.1 $ + * @(#) pa/spmath/fcnvfxt.c $Revision: $ * * Purpose: * Single Floating-point to Single Fixed-point /w truncated result diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fcnvuf.c linux.21rc5-ac2/arch/parisc/math-emu/fcnvuf.c --- linux.21rc5/arch/parisc/math-emu/fcnvuf.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fcnvuf.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/fcnvuf.c $Revision: 1.1 $ + * @(#) pa/spmath/fcnvuf.c $Revision: $ * * Purpose: * Fixed point to Floating-point Converts diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fcnvxf.c linux.21rc5-ac2/arch/parisc/math-emu/fcnvxf.c --- linux.21rc5/arch/parisc/math-emu/fcnvxf.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fcnvxf.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/fcnvxf.c $Revision: 1.1 $ + * @(#) pa/spmath/fcnvxf.c $Revision: $ * * Purpose: * Single Fixed-point to Single Floating-point diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/float.h linux.21rc5-ac2/arch/parisc/math-emu/float.h --- linux.21rc5/arch/parisc/math-emu/float.h 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/float.h 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/float.h $Revision: 1.2 $ + * @(#) pa/spmath/float.h $Revision: 1.1 $ * * Purpose: * <> diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fmpyfadd.c linux.21rc5-ac2/arch/parisc/math-emu/fmpyfadd.c --- linux.21rc5/arch/parisc/math-emu/fmpyfadd.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fmpyfadd.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/fmpyfadd.c $Revision: 1.1 $ + * @(#) pa/spmath/fmpyfadd.c $Revision: $ * * Purpose: * Double Floating-point Multiply Fused Add diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fpudispatch.c linux.21rc5-ac2/arch/parisc/math-emu/fpudispatch.c --- linux.21rc5/arch/parisc/math-emu/fpudispatch.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fpudispatch.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/fp/fpudispatch.c $Revision: 1.1 $ + * @(#) pa/fp/fpudispatch.c $Revision: $ * * Purpose: * <> @@ -51,6 +51,7 @@ #include "float.h" #include "types.h" +#include /* #include */ /* #include */ @@ -166,6 +167,20 @@ #define VASSERT(x) +static void parisc_linux_get_fpu_type(u_int fpregs[]) +{ + /* on pa-linux the fpu type is not filled in by the + * caller; it is constructed here + */ + if (boot_cpu_data.cpu_type == pcxs) + fpregs[FPU_TYPE_FLAG_POS] = TIMEX_EXTEN_FLAG; + else if (boot_cpu_data.cpu_type == pcxt || + boot_cpu_data.cpu_type == pcxt_) + fpregs[FPU_TYPE_FLAG_POS] = ROLEX_EXTEN_FLAG; + else if (boot_cpu_data.cpu_type >= pcxu) + fpregs[FPU_TYPE_FLAG_POS] = PA2_0_FPU_FLAG; +} + /* * this routine will decode the excepting floating point instruction and * call the approiate emulation routine. @@ -184,6 +199,8 @@ /* All FP emulation code assumes that ints are 4-bytes in length */ VASSERT(sizeof(int) == 4); + parisc_linux_get_fpu_type(fpregs); + fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ class = get_class(ir); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/fpu.h linux.21rc5-ac2/arch/parisc/math-emu/fpu.h --- linux.21rc5/arch/parisc/math-emu/fpu.h 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/fpu.h 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/fp/fpu.h $Revision: 1.2 $ + * @(#) pa/fp/fpu.h $Revision: $ * * Purpose: * <> diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/sfadd.c linux.21rc5-ac2/arch/parisc/math-emu/sfadd.c --- linux.21rc5/arch/parisc/math-emu/sfadd.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/sfadd.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/sfadd.c $Revision: 1.1 $ + * @(#) pa/spmath/sfadd.c $Revision: $ * * Purpose: * Single_add: add two single precision values. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/sfcmp.c linux.21rc5-ac2/arch/parisc/math-emu/sfcmp.c --- linux.21rc5/arch/parisc/math-emu/sfcmp.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/sfcmp.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/sfcmp.c $Revision: 1.1 $ + * @(#) pa/spmath/sfcmp.c $Revision: $ * * Purpose: * sgl_cmp: compare two values diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/sfdiv.c linux.21rc5-ac2/arch/parisc/math-emu/sfdiv.c --- linux.21rc5/arch/parisc/math-emu/sfdiv.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/sfdiv.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/sfdiv.c $Revision: 1.1 $ + * @(#) pa/spmath/sfdiv.c $Revision: $ * * Purpose: * Single Precision Floating-point Divide diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/sfmpy.c linux.21rc5-ac2/arch/parisc/math-emu/sfmpy.c --- linux.21rc5/arch/parisc/math-emu/sfmpy.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/sfmpy.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/sfmpy.c $Revision: 1.1 $ + * @(#) pa/spmath/sfmpy.c $Revision: $ * * Purpose: * Single Precision Floating-point Multiply diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/sfrem.c linux.21rc5-ac2/arch/parisc/math-emu/sfrem.c --- linux.21rc5/arch/parisc/math-emu/sfrem.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/sfrem.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/sfrem.c $Revision: 1.1 $ + * @(#) pa/spmath/sfrem.c $Revision: $ * * Purpose: * Single Precision Floating-point Remainder diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/sfsqrt.c linux.21rc5-ac2/arch/parisc/math-emu/sfsqrt.c --- linux.21rc5/arch/parisc/math-emu/sfsqrt.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/sfsqrt.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/sfsqrt.c $Revision: 1.1 $ + * @(#) pa/spmath/sfsqrt.c $Revision: $ * * Purpose: * Single Floating-point Square Root diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/parisc/math-emu/sfsub.c linux.21rc5-ac2/arch/parisc/math-emu/sfsub.c --- linux.21rc5/arch/parisc/math-emu/sfsub.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/parisc/math-emu/sfsub.c 2003-04-28 16:02:18.000000000 +0100 @@ -22,7 +22,7 @@ * BEGIN_DESC * * File: - * @(#) pa/spmath/sfsub.c $Revision: 1.1 $ + * @(#) pa/spmath/sfsub.c $Revision: $ * * Purpose: * Single_subtract: subtract two single precision values. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ppc/8xx_io/uart.c linux.21rc5-ac2/arch/ppc/8xx_io/uart.c --- linux.21rc5/arch/ppc/8xx_io/uart.c 2003-05-28 15:08:50.000000000 +0100 +++ linux.21rc5-ac2/arch/ppc/8xx_io/uart.c 2003-04-22 16:44:36.000000000 +0100 @@ -1679,7 +1679,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ppc/kernel/entry.S linux.21rc5-ac2/arch/ppc/kernel/entry.S --- linux.21rc5/arch/ppc/kernel/entry.S 2003-05-28 15:08:51.000000000 +0100 +++ linux.21rc5-ac2/arch/ppc/kernel/entry.S 2003-04-22 16:44:36.000000000 +0100 @@ -260,7 +260,9 @@ .globl ret_from_fork ret_from_fork: +#ifdef CONFIG_SMP bl schedule_tail +#endif lwz r0,TASK_PTRACE(r2) andi. r0,r0,PT_TRACESYS bnel- syscall_trace diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ppc/kernel/idle.c linux.21rc5-ac2/arch/ppc/kernel/idle.c --- linux.21rc5/arch/ppc/kernel/idle.c 2003-05-28 15:08:51.000000000 +0100 +++ linux.21rc5-ac2/arch/ppc/kernel/idle.c 2003-04-22 16:44:36.000000000 +0100 @@ -48,9 +48,6 @@ do_power_save = 1; /* endless loop with no priority at all */ - current->nice = 20; - current->counter = -100; - init_idle(); for (;;) { #ifdef CONFIG_SMP if (!do_power_save) { @@ -70,11 +67,8 @@ if (do_power_save && !current->need_resched) power_save_6xx(); #endif /* CONFIG_6xx */ - - if (current->need_resched) { - schedule(); - check_pgt_cache(); - } + schedule(); + check_pgt_cache(); } return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ppc/kernel/mk_defs.c linux.21rc5-ac2/arch/ppc/kernel/mk_defs.c --- linux.21rc5/arch/ppc/kernel/mk_defs.c 2003-05-28 15:08:51.000000000 +0100 +++ linux.21rc5-ac2/arch/ppc/kernel/mk_defs.c 2003-04-22 16:44:36.000000000 +0100 @@ -34,8 +34,7 @@ /*DEFINE(KERNELBASE, KERNELBASE);*/ DEFINE(STATE, offsetof(struct task_struct, state)); DEFINE(NEXT_TASK, offsetof(struct task_struct, next_task)); - DEFINE(COUNTER, offsetof(struct task_struct, counter)); - DEFINE(PROCESSOR, offsetof(struct task_struct, processor)); + DEFINE(PROCESSOR, offsetof(struct task_struct, cpu)); DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending)); DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(MM, offsetof(struct task_struct, mm)); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ppc/kernel/ppc_defs.h linux.21rc5-ac2/arch/ppc/kernel/ppc_defs.h --- linux.21rc5/arch/ppc/kernel/ppc_defs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/ppc/kernel/ppc_defs.h 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,81 @@ +/* + * WARNING! This file is automatically generated - DO NOT EDIT! + */ +#define STATE 0 +#define NEXT_TASK 76 +#define PROCESSOR 32 +#define SIGPENDING 8 +#define THREAD 608 +#define MM 84 +#define ACTIVE_MM 88 +#define TASK_STRUCT_SIZE 1520 +#define KSP 0 +#define PGDIR 16 +#define LAST_SYSCALL 20 +#define PT_REGS 8 +#define PT_TRACESYS 2 +#define TASK_FLAGS 4 +#define TASK_PTRACE 24 +#define NEED_RESCHED 20 +#define THREAD_FPR0 24 +#define THREAD_FPSCR 284 +#define THREAD_VR0 288 +#define THREAD_VRSAVE 816 +#define THREAD_VSCR 800 +#define TASK_UNION_SIZE 8192 +#define STACK_FRAME_OVERHEAD 16 +#define INT_FRAME_SIZE 192 +#define GPR0 16 +#define GPR1 20 +#define GPR2 24 +#define GPR3 28 +#define GPR4 32 +#define GPR5 36 +#define GPR6 40 +#define GPR7 44 +#define GPR8 48 +#define GPR9 52 +#define GPR10 56 +#define GPR11 60 +#define GPR12 64 +#define GPR13 68 +#define GPR14 72 +#define GPR15 76 +#define GPR16 80 +#define GPR17 84 +#define GPR18 88 +#define GPR19 92 +#define GPR20 96 +#define GPR21 100 +#define GPR22 104 +#define GPR23 108 +#define GPR24 112 +#define GPR25 116 +#define GPR26 120 +#define GPR27 124 +#define GPR28 128 +#define GPR29 132 +#define GPR30 136 +#define GPR31 140 +#define _NIP 144 +#define _MSR 148 +#define _CTR 156 +#define _LINK 160 +#define _CCR 168 +#define _MQ 172 +#define _XER 164 +#define _DAR 180 +#define _DSISR 184 +#define _DEAR 180 +#define _ESR 184 +#define ORIG_GPR3 152 +#define RESULT 188 +#define TRAP 176 +#define CLONE_VM 256 +#define MM_PGD 12 +#define CPU_SPEC_ENTRY_SIZE 32 +#define CPU_SPEC_PVR_MASK 0 +#define CPU_SPEC_PVR_VALUE 4 +#define CPU_SPEC_FEATURES 12 +#define CPU_SPEC_SETUP 28 +#define NUM_USER_SEGMENTS 8 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ppc/kernel/smp.c linux.21rc5-ac2/arch/ppc/kernel/smp.c --- linux.21rc5/arch/ppc/kernel/smp.c 2003-05-28 15:08:51.000000000 +0100 +++ linux.21rc5-ac2/arch/ppc/kernel/smp.c 2003-04-22 17:01:10.000000000 +0100 @@ -294,8 +294,6 @@ cpu_callin_map[0] = 1; current->processor = 0; - init_idle(); - for (i = 0; i < NR_CPUS; i++) { prof_counter[i] = 1; prof_multiplier[i] = 1; @@ -351,7 +349,8 @@ p = init_task.prev_task; if (!p) panic("No idle task for CPU %d", i); - del_from_runqueue(p); + init_idle(p, i); + unhash_process(p); init_tasks[i] = p; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/ppc/mm/fault.c linux.21rc5-ac2/arch/ppc/mm/fault.c --- linux.21rc5/arch/ppc/mm/fault.c 2003-05-28 15:08:51.000000000 +0100 +++ linux.21rc5-ac2/arch/ppc/mm/fault.c 2003-04-22 16:44:36.000000000 +0100 @@ -141,42 +141,40 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (!is_write) - goto bad_area; - - /* - * N.B. The rs6000/xcoff ABI allows programs to access up to - * a few hundred bytes below the stack pointer. - * The kernel signal delivery code writes up to about 1.5kB - * below the stack pointer (r1) before decrementing it. - * The exec code can write slightly over 640kB to the stack - * before setting the user r1. Thus we allow the stack to - * expand to 1MB without further checks. - */ - if (address + 0x100000 < vma->vm_end) { - /* get user regs even if this fault is in kernel mode */ - struct pt_regs *uregs = current->thread.regs; - if (uregs == NULL) - goto bad_area; - - /* - * A user-mode access to an address a long way below - * the stack pointer is only valid if the instruction - * is one which would update the stack pointer to the - * address accessed if the instruction completed, - * i.e. either stwu rs,n(r1) or stwux rs,r1,rb - * (or the byte, halfword, float or double forms). - * - * If we don't check this then any write to the area - * between the last mapped region and the stack will - * expand the stack rather than segfaulting. - */ - if (address + 2048 < uregs->gpr[1] - && (!user_mode(regs) || !store_updates_sp(regs))) - goto bad_area; - } + if (!is_write) + goto bad_area; + + /* + * N.B. The rs6000/xcoff ABI allows programs to access up to + * a few hundred bytes below the stack pointer. + * The kernel signal delivery code writes up to about 1.5kB + * below the stack pointer (r1) before decrementing it. + * The exec code can write slightly over 640kB to the stack + * before setting the user r1. Thus we allow the stack to + * expand to 1MB without further checks. + */ + if (address + 0x100000 < vma->vm_end) { + /* get user regs even if this fault is in kernel mode */ + struct pt_regs *uregs = current->thread.regs; + if (uregs == NULL) + goto bad_area; + + /* + * A user-mode access to an address a long way below + * the stack pointer is only valid if the instruction + * is one which would update the stack pointer to the + * address accessed if the instruction completed, + * i.e. either stwu rs,n(r1) or stwux rs,r1,rb + * (or the byte, halfword, float or double forms). + * + * If we don't check this then any write to the area + * between the last mapped region and the stack will + * expand the stack rather than segfaulting. + */ + if (address + 2048 < uregs->gpr[1] + && (!user_mode(regs) || !store_updates_sp(regs))) + goto bad_area; + } if (expand_stack(vma, address)) goto bad_area; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/defconfig linux.21rc5-ac2/arch/s390/defconfig --- linux.21rc5/arch/s390/defconfig 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/defconfig 2003-04-25 13:53:58.000000000 +0100 @@ -91,22 +91,23 @@ CONFIG_TN3270_CONSOLE=y CONFIG_TN3215=y CONFIG_TN3215_CONSOLE=y -CONFIG_HWC=y -CONFIG_HWC_CONSOLE=y -CONFIG_HWC_CPI=m +CONFIG_SCLP=y +CONFIG_SCLP_TTY=y +CONFIG_SCLP_CONSOLE=y +CONFIG_SCLP_VT220_TTY=y +CONFIG_SCLP_VT220_CONSOLE=y +CONFIG_SCLP_CPI=m CONFIG_S390_TAPE=m # # S/390 tape interface support # -CONFIG_S390_TAPE_CHAR=y CONFIG_S390_TAPE_BLOCK=y # # S/390 tape hardware support # -CONFIG_S390_TAPE_3490=m -CONFIG_S390_TAPE_3480=m +CONFIG_S390_TAPE_34XX=m # # Network device drivers @@ -118,6 +119,7 @@ # CONFIG_TUN is not set CONFIG_NET_ETHERNET=y CONFIG_TR=y +CONFIG_C7000=m # CONFIG_FDDI is not set # @@ -151,13 +153,18 @@ CONFIG_SHARED_IPV6_CARDS=y # CONFIG_KHTTPD is not set # CONFIG_ATM is not set -CONFIG_VLAN_8021Q=m +# CONFIG_VLAN_8021Q is not set # # # # CONFIG_IPX is not set # CONFIG_ATALK is not set + +# +# Appletalk devices +# +# CONFIG_DEV_APPLETALK is not set # CONFIG_DECNET is not set # CONFIG_BRIDGE is not set # CONFIG_X25 is not set @@ -175,6 +182,11 @@ # CONFIG_NET_SCHED is not set # +# Network testing +# +# CONFIG_NET_PKTGEN is not set + +# # File systems # # CONFIG_QUOTA is not set @@ -187,6 +199,8 @@ # CONFIG_ADFS_FS_RW is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BEFS_DEBUG is not set # CONFIG_BFS_FS is not set # CONFIG_EXT3_FS is not set # CONFIG_JBD is not set @@ -200,23 +214,27 @@ # CONFIG_JFFS2_FS is not set # CONFIG_CRAMFS is not set # CONFIG_TMPFS is not set -# CONFIG_RAMFS is not set +CONFIG_RAMFS=y # CONFIG_ISO9660_FS is not set # CONFIG_JOLIET is not set # CONFIG_ZISOFS is not set +# CONFIG_JFS_FS is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set # CONFIG_MINIX_FS is not set # CONFIG_VXFS_FS is not set # CONFIG_NTFS_FS is not set # CONFIG_NTFS_RW is not set # CONFIG_HPFS_FS is not set CONFIG_PROC_FS=y -CONFIG_DEVFS_FS=y -CONFIG_DEVFS_MOUNT=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVFS_MOUNT is not set # CONFIG_DEVFS_DEBUG is not set # CONFIG_DEVPTS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX4FS_RW is not set # CONFIG_ROMFS_FS is not set +# CONFIG_XIP2FS is not set CONFIG_EXT2_FS=y # CONFIG_SYSV_FS is not set # CONFIG_UDF_FS is not set @@ -234,6 +252,7 @@ # CONFIG_ROOT_NFS is not set CONFIG_NFSD=y # CONFIG_NFSD_V3 is not set +# CONFIG_NFSD_TCP is not set CONFIG_SUNRPC=y CONFIG_LOCKD=y # CONFIG_SMB_FS is not set @@ -247,7 +266,6 @@ # CONFIG_NCPFS_NLS is not set # CONFIG_NCPFS_EXTRAS is not set # CONFIG_ZISOFS_FS is not set -# CONFIG_ZLIB_FS_INFLATE is not set # # Partition Types @@ -264,6 +282,7 @@ # CONFIG_SGI_PARTITION is not set # CONFIG_ULTRIX_PARTITION is not set # CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set # CONFIG_SMB_NLS is not set # CONFIG_NLS is not set @@ -271,3 +290,9 @@ # Kernel hacking # CONFIG_MAGIC_SYSRQ=y + +# +# Library routines +# +# CONFIG_ZLIB_INFLATE is not set +# CONFIG_ZLIB_DEFLATE is not set diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/asm-offsets.c linux.21rc5-ac2/arch/s390/kernel/asm-offsets.c --- linux.21rc5/arch/s390/kernel/asm-offsets.c 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/asm-offsets.c 2003-04-25 13:47:51.000000000 +0100 @@ -26,7 +26,7 @@ DEFINE(__TASK_need_resched, offsetof(struct task_struct, need_resched),); DEFINE(__TASK_ptrace, offsetof(struct task_struct, ptrace),); - DEFINE(__TASK_processor, offsetof(struct task_struct, processor),); + DEFINE(__TASK_processor, offsetof(struct task_struct, cpu),); return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/bitmap.S linux.21rc5-ac2/arch/s390/kernel/bitmap.S --- linux.21rc5/arch/s390/kernel/bitmap.S 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/bitmap.S 2003-04-25 13:47:51.000000000 +0100 @@ -35,3 +35,21 @@ .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 + .globl _sb_findmap +_sb_findmap: + .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/debug.c linux.21rc5-ac2/arch/s390/kernel/debug.c --- linux.21rc5/arch/s390/kernel/debug.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/debug.c 2003-05-28 15:13:38.000000000 +0100 @@ -949,9 +949,21 @@ int i; unsigned long flags; mode_t mode = S_IFREG; + struct proc_dir_entry *pde; + if (!id) goto out; + pde = debug_create_proc_dir_entry(id->proc_root_entry, + view->name, mode, + &debug_inode_ops, + &debug_file_ops); + if(!pde){ + printk(KERN_WARNING "debug: create_proc_entry() failed! Cannot register view %s/%s\n", id->name,view->name); + rc = -1; + goto out; + } + spin_lock_irqsave(&id->lock, flags); for (i = 0; i < DEBUG_MAX_VIEWS; i++) { if (id->views[i] == NULL) @@ -962,6 +974,8 @@ id->name,view->name); printk(KERN_WARNING "debug: maximum number of views reached (%i)!\n", i); + debug_delete_proc_dir_entry(id->proc_root_entry, pde); + rc = -1; } else { @@ -970,11 +984,7 @@ mode |= S_IRUSR; if (view->input_proc) mode |= S_IWUSR; - id->proc_entries[i] = - debug_create_proc_dir_entry(id->proc_root_entry, - view->name, mode, - &debug_inode_ops, - &debug_file_ops); + id->proc_entries[i] = pde; rc = 0; } spin_unlock_irqrestore(&id->lock, flags); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/entry.S linux.21rc5-ac2/arch/s390/kernel/entry.S --- linux.21rc5/arch/s390/kernel/entry.S 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/entry.S 2003-04-22 16:44:36.000000000 +0100 @@ -254,13 +254,14 @@ ret_from_fork: basr %r13,0 l %r13,.Lentry_base-.(%r13) # setup base pointer to &entry_base + # not saving R14 here because we go to sysc_return ultimately + l %r1,BASED(.Lschedtail) + basr %r14,%r1 # call schedule_tail (unlock stuff) GET_CURRENT # load pointer to task_struct to R9 stosm 24(%r15),0x03 # reenable interrupts sr %r0,%r0 # child returns 0 st %r0,SP_R2(%r15) # store return value (change R2 on stack) - l %r1,BASED(.Lschedtail) - la %r14,BASED(sysc_return) - br %r1 # call schedule_tail, return to sysc_return + b BASED(sysc_return) # # clone, fork, vfork, exec and sigreturn need glue, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/head.S linux.21rc5-ac2/arch/s390/kernel/head.S --- linux.21rc5/arch/s390/kernel/head.S 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/head.S 2003-05-28 15:13:38.000000000 +0100 @@ -587,7 +587,7 @@ .long .Lduct # cr2: dispatchable unit control table .long 0 # cr3: instruction authorization .long 0 # cr4: instruction authorization - .long 0 # cr5: various things + .long 0xffffffff # cr5: primary-aste origin .long 0 # cr6: I/O interrupts .long 0 # cr7: secondary space segment table .long 0 # cr8: access registers translation diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/process.c linux.21rc5-ac2/arch/s390/kernel/process.c --- linux.21rc5/arch/s390/kernel/process.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/process.c 2003-04-25 13:47:51.000000000 +0100 @@ -50,15 +50,11 @@ * The idle loop on a S390... */ -int cpu_idle(void *unused) +int cpu_idle(void) { psw_t wait_psw; unsigned long reg; - /* endless idle loop with no priority at all */ - init_idle(); - current->nice = 20; - current->counter = -100; while (1) { if (current->need_resched) { schedule(); @@ -94,7 +90,7 @@ { struct task_struct *tsk = current; - printk("CPU: %d %s\n", tsk->processor, print_tainted()); + printk("CPU: %d %s\n", tsk->cpu, print_tainted()); printk("Process %s (pid: %d, task: %08lx, ksp: %08x)\n", current->comm, current->pid, (unsigned long) tsk, tsk->thread.ksp); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/s390_ksyms.c linux.21rc5-ac2/arch/s390/kernel/s390_ksyms.c --- linux.21rc5/arch/s390/kernel/s390_ksyms.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/s390_ksyms.c 2003-05-28 15:40:29.000000000 +0100 @@ -10,6 +10,7 @@ #include #include #include +#include #if CONFIG_IP_MULTICAST #include #endif @@ -60,5 +61,9 @@ EXPORT_SYMBOL(csum_fold); EXPORT_SYMBOL(console_mode); EXPORT_SYMBOL(console_device); -EXPORT_SYMBOL(pfix_table); +EXPORT_SYMBOL(pfix_get_addr); +EXPORT_SYMBOL(pfix_get_page_addr); +EXPORT_SYMBOL(get_storage_key); EXPORT_SYMBOL_NOVERS(do_call_softirq); +EXPORT_SYMBOL(ctrlchar_init); +EXPORT_SYMBOL(ctrlchar_handle); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/setup.c linux.21rc5-ac2/arch/s390/kernel/setup.c --- linux.21rc5/arch/s390/kernel/setup.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/setup.c 2003-05-28 15:13:38.000000000 +0100 @@ -56,7 +56,117 @@ unsigned long cpu_initialized = 0; volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ -void* *pfix_table=NULL; +unsigned long *pfix_table; +unsigned int __global_storage_key; + +unsigned int get_storage_key(void) { return __global_storage_key; } + +unsigned long pfix_get_page_addr(void *addr) +{ + if (!MACHINE_HAS_PFIX) + return (unsigned long) addr; + + return pfix_table[(unsigned long)virt_to_phys(addr)>>PAGE_SHIFT]; +} + +unsigned long pfix_get_addr(void *addr) +{ + if (!MACHINE_HAS_PFIX) + return (unsigned long) addr; + + return pfix_get_page_addr(addr) + ((unsigned long)addr&(PAGE_SIZE-1)); +} + +/* + * These functions allow to lock and unlock pages in VM. They need + * the absolute address of the page to be locked or unlocked. This will + * only work if the diag98 option in the user directory is enabled for + * the guest. + */ + +/* if result is 0, addr will become the host absolute address */ +static inline int pfix_lock_page(void *addr) +{ + int ret; + + __asm__ __volatile__( + " sske %1,%2\n" /* set storage key */ + " l 0,0(%1)\n" /* addr into gpr 0 */ + " lhi 2,0\n" /* function code is 0 */ + " diag 2,0,0x98\n" /* result in gpr 1 */ + " jnz 0f\n" + " lhi %0,0\n" /* if cc=0: return 0 */ + " st 1,0(%1)\n" /* gpr1 contains host absol. addr */ + " j 1f\n" + "0: lr %0,1\n" /* gpr1 contains error code */ + " sske %1,2\n" /* reset storage key */ + "1:\n" + : "=d" (ret), "+d" ((unsigned long)addr) : "d" (__global_storage_key) + : "0", "1", "2", "cc" ); + return ret; +} + +static inline void pfix_unlock_page(unsigned long addr) +{ + __asm__ __volatile__( + " lr 0,%0\n" /* parameter in gpr 0 */ + " lhi 2,4\n" /* function code is 4 */ + " diag 2,0,0x98\n" /* result in gpr 1 */ + " sske %0,%1\n" + : : "a" (addr), "d" (0) + : "0", "1", "2", "cc" ); +} + +static void unpfix_all_pages(void) +{ + unsigned long i; + + if (!pfix_table) + return; + + for (i=0; i> PAGE_SHIFT) * sizeof(long); + pfix_table = alloc_bootmem(size); + if (!pfix_table) + return; + + __global_storage_key = 6 << 4; + for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) { + unsigned long start; + + for(start = memory_chunk[i].addr; + (start < memory_chunk[i].addr + memory_chunk[i].size && + start < max_pfn); start += PAGE_SIZE) { + unsigned long index; + + index = start >> PAGE_SHIFT; + pfix_table[index] = start; + r = pfix_lock_page(&pfix_table[index]); + if (r) { + pfix_table[index] = 0; + unpfix_all_pages(); + free_bootmem((unsigned long)pfix_table, size); + machine_flags &= ~128; /* MACHINE_HAS_PFIX=0 */ + __global_storage_key = 0; + printk(KERN_WARNING "Error enabling PFIX at " + "address 0x%08lx.\n", start); + return; + } + } + } + printk (KERN_INFO "PFIX enabled.\n"); +} /* * Setup options @@ -166,9 +276,9 @@ static int __init conmode_setup(char *str) { -#if defined(CONFIG_HWC_CONSOLE) - if (strncmp(str, "hwc", 4) == 0) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) + SET_CONSOLE_SCLP; #endif #if defined(CONFIG_TN3215_CONSOLE) if (strncmp(str, "3215", 5) == 0) @@ -200,8 +310,8 @@ */ cpcmd("TERM CONMODE 3215", NULL, 0); if (ptr == NULL) { -#if defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif return; } @@ -210,16 +320,16 @@ SET_CONSOLE_3270; #elif defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; -#elif defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#elif defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } else if (strncmp(ptr + 8, "3215", 4) == 0) { #if defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; #elif defined(CONFIG_TN3270_CONSOLE) SET_CONSOLE_3270; -#elif defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#elif defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } } else if (MACHINE_IS_P390) { @@ -229,8 +339,8 @@ SET_CONSOLE_3270; #endif } else { -#if defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } } @@ -273,21 +383,25 @@ /* * Reboot, halt and power_off stubs. They just call _machine_restart, - * _machine_halt or _machine_power_off. + * _machine_halt or _machine_power_off after making sure that all pending + * printks reached their destination. */ void machine_restart(char *command) { + console_unblank(); _machine_restart(command); } void machine_halt(void) { + console_unblank(); _machine_halt(); } void machine_power_off(void) { + console_unblank(); _machine_power_off(); } @@ -303,11 +417,12 @@ unsigned long memory_start, memory_end; char c = ' ', cn, *to = command_line, *from = COMMAND_LINE; struct resource *res; - unsigned long start_pfn, end_pfn; + unsigned long start_pfn, end_pfn, max_pfn=0; static unsigned int smptrap=0; unsigned long delay = 0; struct _lowcore *lowcore; int i; + static int use_pfix; if (smptrap) return; @@ -380,6 +495,14 @@ /* now wait for the requested amount of time */ udelay(delay); } + /* + * "use_pfix" specifies whether we want to fix all pages, + * if possible. + */ + if (c == ' ' && strncmp(from, "use_pfix", 8) == 0) { + use_pfix = 1; + from += 8; + } cn = *(from++); if (!cn) break; @@ -394,6 +517,10 @@ break; *(to++) = c; } + + if (!use_pfix) + machine_flags &= ~128; /* MACHINE_HAS_PFIX = 0 */ + if (c == ' ' && to > command_line) to--; *to = '\0'; *cmdline_p = command_line; @@ -429,6 +556,8 @@ if (start_chunk < end_chunk) free_bootmem(start_chunk << PAGE_SHIFT, (end_chunk - start_chunk) << PAGE_SHIFT); + if (start_chunk + memory_chunk[i].size > max_pfn) + max_pfn = start_chunk + memory_chunk[i].size; } /* @@ -487,6 +616,10 @@ */ paging_init(); + if (MACHINE_HAS_PFIX) + /* max_pfn may be smaller than memory_end. */ + pfix_all_pages(start_pfn, max_pfn); + res = alloc_bootmem_low(sizeof(struct resource)); res->start = 0; res->end = memory_end; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/smp.c linux.21rc5-ac2/arch/s390/kernel/smp.c --- linux.21rc5/arch/s390/kernel/smp.c 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/smp.c 2003-04-25 13:47:51.000000000 +0100 @@ -38,7 +38,7 @@ #include /* prototypes */ -extern int cpu_idle(void * unused); +extern int cpu_idle(void); extern __u16 boot_cpu_addr; extern volatile int __cpu_logical_map[]; @@ -56,6 +56,7 @@ spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; unsigned long cpu_online_map; +unsigned long cache_decay_ticks; /* * Setup routine for controlling SMP activation @@ -468,7 +469,7 @@ { int curr_cpu; - current->processor = 0; + current->cpu = 0; smp_num_cpus = 1; cpu_online_map = 1; for (curr_cpu = 0; @@ -509,7 +510,7 @@ pfault_init(); #endif /* cpu_idle will call schedule for us */ - return cpu_idle(NULL); + return cpu_idle(); } /* @@ -547,12 +548,9 @@ idle = init_task.prev_task; if (!idle) panic("No idle process for CPU %d",cpu); - idle->processor = cpu; - idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */ + init_idle(idle, cpu); - del_from_runqueue(idle); unhash_process(idle); - init_tasks[cpu] = idle; cpu_lowcore = get_cpu_lowcore(cpu); cpu_lowcore->save_area[15] = idle->thread.ksp; @@ -604,6 +602,8 @@ panic("Couldn't request external interrupt 0x1202"); smp_count_cpus(); memset(lowcore_ptr,0,sizeof(lowcore_ptr)); + + cache_decay_ticks = (200 * HZ) / 1000; /* Is 200ms ok? Robus? XXX */ /* * Initialize the logical to physical CPU number mapping diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/kernel/traps.c linux.21rc5-ac2/arch/s390/kernel/traps.c --- linux.21rc5/arch/s390/kernel/traps.c 2003-05-28 15:08:16.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/kernel/traps.c 2003-04-25 13:47:51.000000000 +0100 @@ -142,7 +142,7 @@ * We can't print the backtrace of a running process. It is * unreliable at best and can cause kernel oopses. */ - if (task_has_cpu(tsk)) + if (tsk->state == TASK_RUNNING) return; show_trace((unsigned long *) tsk->thread.ksp); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390/mm/init.c linux.21rc5-ac2/arch/s390/mm/init.c --- linux.21rc5/arch/s390/mm/init.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390/mm/init.c 2003-05-28 15:13:38.000000000 +0100 @@ -154,11 +154,6 @@ if (address >= end_mem) pte_clear(&pte); set_pte(pg_table, pte); - if (address < memory_size) /* do not set storage - key outside the storage - or die */ - set_access_key(pte); /* switch to our - access key */ address += PAGE_SIZE; } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/defconfig linux.21rc5-ac2/arch/s390x/defconfig --- linux.21rc5/arch/s390x/defconfig 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/defconfig 2003-04-25 13:53:58.000000000 +0100 @@ -91,22 +91,23 @@ CONFIG_TN3270_CONSOLE=y CONFIG_TN3215=y CONFIG_TN3215_CONSOLE=y -CONFIG_HWC=y -CONFIG_HWC_CONSOLE=y -CONFIG_HWC_CPI=m +CONFIG_SCLP=y +CONFIG_SCLP_TTY=y +CONFIG_SCLP_CONSOLE=y +CONFIG_SCLP_VT220_TTY=y +CONFIG_SCLP_VT220_CONSOLE=y +CONFIG_SCLP_CPI=m CONFIG_S390_TAPE=m # # S/390 tape interface support # -CONFIG_S390_TAPE_CHAR=y CONFIG_S390_TAPE_BLOCK=y # # S/390 tape hardware support # -CONFIG_S390_TAPE_3490=m -CONFIG_S390_TAPE_3480=m +CONFIG_S390_TAPE_34XX=m # # Network device drivers @@ -151,13 +152,18 @@ CONFIG_SHARED_IPV6_CARDS=y # CONFIG_KHTTPD is not set # CONFIG_ATM is not set -CONFIG_VLAN_8021Q=m +# CONFIG_VLAN_8021Q is not set # # # # CONFIG_IPX is not set # CONFIG_ATALK is not set + +# +# Appletalk devices +# +# CONFIG_DEV_APPLETALK is not set # CONFIG_DECNET is not set # CONFIG_BRIDGE is not set # CONFIG_X25 is not set @@ -175,6 +181,11 @@ # CONFIG_NET_SCHED is not set # +# Network testing +# +# CONFIG_NET_PKTGEN is not set + +# # File systems # # CONFIG_QUOTA is not set @@ -187,6 +198,8 @@ # CONFIG_ADFS_FS_RW is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BEFS_DEBUG is not set # CONFIG_BFS_FS is not set # CONFIG_EXT3_FS is not set # CONFIG_JBD is not set @@ -200,23 +213,27 @@ # CONFIG_JFFS2_FS is not set # CONFIG_CRAMFS is not set # CONFIG_TMPFS is not set -# CONFIG_RAMFS is not set +CONFIG_RAMFS=y # CONFIG_ISO9660_FS is not set # CONFIG_JOLIET is not set # CONFIG_ZISOFS is not set +# CONFIG_JFS_FS is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set # CONFIG_MINIX_FS is not set # CONFIG_VXFS_FS is not set # CONFIG_NTFS_FS is not set # CONFIG_NTFS_RW is not set # CONFIG_HPFS_FS is not set CONFIG_PROC_FS=y -CONFIG_DEVFS_FS=y -CONFIG_DEVFS_MOUNT=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVFS_MOUNT is not set # CONFIG_DEVFS_DEBUG is not set # CONFIG_DEVPTS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX4FS_RW is not set # CONFIG_ROMFS_FS is not set +# CONFIG_XIP2FS is not set CONFIG_EXT2_FS=y # CONFIG_SYSV_FS is not set # CONFIG_UDF_FS is not set @@ -234,6 +251,7 @@ # CONFIG_ROOT_NFS is not set # CONFIG_NFSD is not set # CONFIG_NFSD_V3 is not set +# CONFIG_NFSD_TCP is not set CONFIG_SUNRPC=y CONFIG_LOCKD=y # CONFIG_SMB_FS is not set @@ -247,7 +265,6 @@ # CONFIG_NCPFS_NLS is not set # CONFIG_NCPFS_EXTRAS is not set # CONFIG_ZISOFS_FS is not set -# CONFIG_ZLIB_FS_INFLATE is not set # # Partition Types @@ -264,6 +281,7 @@ # CONFIG_SGI_PARTITION is not set # CONFIG_ULTRIX_PARTITION is not set # CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set # CONFIG_SMB_NLS is not set # CONFIG_NLS is not set @@ -271,3 +289,9 @@ # Kernel hacking # CONFIG_MAGIC_SYSRQ=y + +# +# Library routines +# +# CONFIG_ZLIB_INFLATE is not set +# CONFIG_ZLIB_DEFLATE is not set diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/asm-offsets.c linux.21rc5-ac2/arch/s390x/kernel/asm-offsets.c --- linux.21rc5/arch/s390x/kernel/asm-offsets.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/asm-offsets.c 2003-04-25 13:47:51.000000000 +0100 @@ -26,7 +26,7 @@ DEFINE(__TASK_need_resched, offsetof(struct task_struct, need_resched),); DEFINE(__TASK_ptrace, offsetof(struct task_struct, ptrace),); - DEFINE(__TASK_processor, offsetof(struct task_struct, processor),); + DEFINE(__TASK_processor, offsetof(struct task_struct, cpu),); return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/binfmt_elf32.c linux.21rc5-ac2/arch/s390x/kernel/binfmt_elf32.c --- linux.21rc5/arch/s390x/kernel/binfmt_elf32.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/binfmt_elf32.c 2003-04-25 13:50:03.000000000 +0100 @@ -87,7 +87,13 @@ #define ELF_PLATFORM (NULL) #ifdef __KERNEL__ -#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) +#define SET_PERSONALITY(ex, ibcs2) \ +do { \ + if (ibcs2) \ + set_personality(PER_SVR4); \ + else if (current->personality != PER_LINUX32) \ + set_personality(PER_LINUX); \ +} while (0) #endif #include "linux32.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/bitmap.S linux.21rc5-ac2/arch/s390x/kernel/bitmap.S --- linux.21rc5/arch/s390x/kernel/bitmap.S 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/bitmap.S 2003-04-25 13:47:51.000000000 +0100 @@ -35,3 +35,22 @@ .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 + .globl _sb_findmap +_sb_findmap: + .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/debug.c linux.21rc5-ac2/arch/s390x/kernel/debug.c --- linux.21rc5/arch/s390x/kernel/debug.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/debug.c 2003-05-28 15:13:38.000000000 +0100 @@ -949,9 +949,21 @@ int i; unsigned long flags; mode_t mode = S_IFREG; + struct proc_dir_entry *pde; + if (!id) goto out; + pde = debug_create_proc_dir_entry(id->proc_root_entry, + view->name, mode, + &debug_inode_ops, + &debug_file_ops); + if(!pde){ + printk(KERN_WARNING "debug: create_proc_entry() failed! Cannot register view %s/%s\n", id->name,view->name); + rc = -1; + goto out; + } + spin_lock_irqsave(&id->lock, flags); for (i = 0; i < DEBUG_MAX_VIEWS; i++) { if (id->views[i] == NULL) @@ -962,6 +974,8 @@ id->name,view->name); printk(KERN_WARNING "debug: maximum number of views reached (%i)!\n", i); + debug_delete_proc_dir_entry(id->proc_root_entry, pde); + rc = -1; } else { @@ -970,11 +984,7 @@ mode |= S_IRUSR; if (view->input_proc) mode |= S_IWUSR; - id->proc_entries[i] = - debug_create_proc_dir_entry(id->proc_root_entry, - view->name, mode, - &debug_inode_ops, - &debug_file_ops); + id->proc_entries[i] = pde; rc = 0; } spin_unlock_irqrestore(&id->lock, flags); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/entry.S linux.21rc5-ac2/arch/s390x/kernel/entry.S --- linux.21rc5/arch/s390x/kernel/entry.S 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/entry.S 2003-04-25 13:50:03.000000000 +0100 @@ -240,11 +240,11 @@ # .globl ret_from_fork ret_from_fork: + brasl %r14,schedule_tail GET_CURRENT # load pointer to task_struct to R9 stosm 48(%r15),0x03 # reenable interrupts xc SP_R2(8,%r15),SP_R2(%r15) # child returns 0 - larl %r14,sysc_return - jg schedule_tail # return to sysc_return + j sysc_return # # clone, fork, vfork, exec and sigreturn need glue, @@ -392,7 +392,7 @@ .long SYSCALL(sys_oldumount,sys32_oldumount_wrapper) .long SYSCALL(sys_ni_syscall,sys32_setuid16_wrapper) /* old setuid16 syscall*/ .long SYSCALL(sys_ni_syscall,sys32_getuid16) /* old getuid16 syscall*/ - .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 25 old stime syscall */ + .long SYSCALL(sys_ni_syscall,sys32_stime_wrapper) /* 25 old stime syscall */ .long SYSCALL(sys_ptrace,sys32_ptrace_wrapper) .long SYSCALL(sys_alarm,sys32_alarm_wrapper) .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* old fstat syscall */ @@ -489,7 +489,7 @@ .long SYSCALL(sys_sigreturn_glue,sys32_sigreturn_glue) .long SYSCALL(sys_clone_glue,sys_clone_glue) /* 120 */ .long SYSCALL(sys_setdomainname,sys32_setdomainname_wrapper) - .long SYSCALL(sys_newuname,sys32_newuname_wrapper) + .long SYSCALL(s390x_newuname,sys32_newuname_wrapper) .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* modify_ldt for i386 */ .long SYSCALL(sys_adjtimex,sys32_adjtimex_wrapper) .long SYSCALL(sys_mprotect,sys32_mprotect_wrapper) /* 125 */ @@ -503,7 +503,7 @@ .long SYSCALL(sys_fchdir,sys32_fchdir_wrapper) .long SYSCALL(sys_bdflush,sys32_bdflush_wrapper) .long SYSCALL(sys_sysfs,sys32_sysfs_wrapper) /* 135 */ - .long SYSCALL(sys_personality,sys32_personality_wrapper) + .long SYSCALL(s390x_personality,sys32_personality_wrapper) .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* for afs_syscall */ .long SYSCALL(sys_ni_syscall,sys32_setfsuid16_wrapper) /* old setfsuid16 syscall */ .long SYSCALL(sys_ni_syscall,sys32_setfsgid16_wrapper) /* old setfsgid16 syscall */ @@ -516,7 +516,7 @@ .long SYSCALL(sys_writev,sys32_writev_wrapper) .long SYSCALL(sys_getsid,sys32_getsid_wrapper) .long SYSCALL(sys_fdatasync,sys32_fdatasync_wrapper) - .long SYSCALL(sys_sysctl,sys_ni_syscall) + .long SYSCALL(sys_sysctl,sys32_sysctl_wrapper) .long SYSCALL(sys_mlock,sys32_mlock_wrapper) /* 150 */ .long SYSCALL(sys_munlock,sys32_munlock_wrapper) .long SYSCALL(sys_mlockall,sys32_mlockall_wrapper) @@ -558,7 +558,7 @@ .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* streams1 */ .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* streams2 */ .long SYSCALL(sys_vfork_glue,sys_vfork_glue) /* 190 */ - .long SYSCALL(sys_getrlimit,sys32_old_getrlimit_wrapper) + .long SYSCALL(sys_getrlimit,sys32_getrlimit_wrapper) .long SYSCALL(sys_mmap2,sys32_mmap2_wrapper) .long SYSCALL(sys_ni_syscall,sys32_truncate64_wrapper) .long SYSCALL(sys_ni_syscall,sys32_ftruncate64_wrapper) @@ -589,7 +589,7 @@ .long SYSCALL(sys_madvise,sys32_madvise_wrapper) .long SYSCALL(sys_getdents64,sys32_getdents64_wrapper)/* 220 */ .long SYSCALL(sys_ni_syscall,sys32_fcntl64_wrapper) - .long SYSCALL(sys_readahead,sys_ni_syscall) + .long SYSCALL(sys_readahead,sys32_readahead) .long SYSCALL(sys_ni_syscall,sys_ni_syscall) .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 224 - reserved for setxattr */ .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 225 - reserved for lsetxattr */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/exec_domain32.c linux.21rc5-ac2/arch/s390x/kernel/exec_domain32.c --- linux.21rc5/arch/s390x/kernel/exec_domain32.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/exec_domain32.c 2003-04-25 13:50:03.000000000 +0100 @@ -0,0 +1,30 @@ +/* + * Support for 32-bit Linux for S390 personality. + * + * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Gerhard Tonn (ton@de.ibm.com) + * + * + */ + +#include +#include +#include +#include + +struct exec_domain s390_exec_domain; + +static int __init +s390_init (void) +{ + s390_exec_domain.name = "Linux/s390"; + s390_exec_domain.handler = NULL; + s390_exec_domain.pers_low = PER_LINUX32; + s390_exec_domain.pers_high = PER_LINUX32; + s390_exec_domain.signal_map = default_exec_domain.signal_map; + s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap; + register_exec_domain(&s390_exec_domain); + return 0; +} + +__initcall(s390_init); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/head.S linux.21rc5-ac2/arch/s390x/kernel/head.S --- linux.21rc5/arch/s390x/kernel/head.S 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/head.S 2003-05-28 15:13:38.000000000 +0100 @@ -548,26 +548,11 @@ tm __LC_CPUID,0xff # running under VM ? bno 0f-.LPG1(%r13) oi 7(%r12),1 # set VM flag -# -# we are running under VM, find out if we have PFIX -# - la %r1,0f-.LPG1(%r13) # set program check address - stg %r1,__LC_PGM_NEW_PSW+8 - lghi %r0,0x4000 - lghi %r2,0 - .long 0x83200098 # diag 2,0,x'0098' - lock page - lghi %r2,4 - .long 0x83200098 # diag 2,0,x'0098' - unlock page - oi 7(%r12),128 # set PFIX flag 0: lh %r0,__LC_CPUID+4 # get cpu version chi %r0,0x7490 # running on a P/390 ? bne 1f-.LPG1(%r13) oi 7(%r12),4 # set P/390 flag 1: - chi %r0,0x2084 # new stidp format? - bne 2f-.LPG1(%r13) - oi 7(%r12),64 # set new stidp flag -2: # # find out if we have the MVPG instruction @@ -601,7 +586,7 @@ .quad .Lduct # cr2: dispatchable unit control table .quad 0 # cr3: instruction authorization .quad 0 # cr4: instruction authorization - .quad 0 # cr5: various things + .quad 0xffffffffffffffff # cr5: primary-aste origin .quad 0 # cr6: I/O interrupts .quad 0 # cr7: secondary space segment table .quad 0 # cr8: access registers translation diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/ioctl32.c linux.21rc5-ac2/arch/s390x/kernel/ioctl32.c --- linux.21rc5/arch/s390x/kernel/ioctl32.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/ioctl32.c 2003-04-25 13:53:58.000000000 +0100 @@ -25,9 +25,15 @@ #include #include #include +#include +#include +#include +#include +#include #include #include #include +#include #include #include "linux32.h" @@ -373,6 +379,107 @@ return sys_ioctl(fd, cmd, arg); } +struct loop_info32 { + int lo_number; /* ioctl r/o */ + __kernel_dev_t32 lo_device; /* ioctl r/o */ + unsigned int lo_inode; /* ioctl r/o */ + __kernel_dev_t32 lo_rdevice; /* ioctl r/o */ + int lo_offset; + int lo_encrypt_type; + int lo_encrypt_key_size; /* ioctl w/o */ + int lo_flags; /* ioctl r/o */ + char lo_name[LO_NAME_SIZE]; + unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ + unsigned int lo_init[2]; + char reserved[4]; +}; + +static int loop_status(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + mm_segment_t old_fs = get_fs(); + struct loop_info l; + int err = -EINVAL; + + switch(cmd) { + case LOOP_SET_STATUS: + err = get_user(l.lo_number, &((struct loop_info32 *)arg)->lo_number); + err |= __get_user(l.lo_device, &((struct loop_info32 *)arg)->lo_device); + err |= __get_user(l.lo_inode, &((struct loop_info32 *)arg)->lo_inode); + err |= __get_user(l.lo_rdevice, &((struct loop_info32 *)arg)->lo_rdevice); + err |= __copy_from_user((char *)&l.lo_offset, (char *)&((struct loop_info32 *)arg)->lo_offset, + 8 + (unsigned long)l.lo_init - (unsigned long)&l.lo_offset); + if (err) { + err = -EFAULT; + } else { + set_fs (KERNEL_DS); + err = sys_ioctl (fd, cmd, (unsigned long)&l); + set_fs (old_fs); + } + break; + case LOOP_GET_STATUS: + set_fs (KERNEL_DS); + err = sys_ioctl (fd, cmd, (unsigned long)&l); + set_fs (old_fs); + if (!err) { + err = put_user(l.lo_number, &((struct loop_info32 *)arg)->lo_number); + err |= __put_user(l.lo_device, &((struct loop_info32 *)arg)->lo_device); + err |= __put_user(l.lo_inode, &((struct loop_info32 *)arg)->lo_inode); + err |= __put_user(l.lo_rdevice, &((struct loop_info32 *)arg)->lo_rdevice); + err |= __copy_to_user((char *)&((struct loop_info32 *)arg)->lo_offset, + (char *)&l.lo_offset, (unsigned long)l.lo_init - (unsigned long)&l.lo_offset); + if (err) + err = -EFAULT; + } + break; + default: { + static int count = 0; + if (++count <= 20) + printk("%s: Unknown loop ioctl cmd, fd(%d) " + "cmd(%08x) arg(%08lx)\n", + __FUNCTION__, fd, cmd, arg); + } + } + return err; +} + + +struct blkpg_ioctl_arg32 { + int op; + int flags; + int datalen; + u32 data; +}; + +static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, struct blkpg_ioctl_arg32 *arg) +{ + struct blkpg_ioctl_arg a; + struct blkpg_partition p; + int err; + mm_segment_t old_fs = get_fs(); + + err = get_user(a.op, &arg->op); + err |= __get_user(a.flags, &arg->flags); + err |= __get_user(a.datalen, &arg->datalen); + err |= __get_user((long)a.data, &arg->data); + if (err) return err; + switch (a.op) { + case BLKPG_ADD_PARTITION: + case BLKPG_DEL_PARTITION: + if (a.datalen < sizeof(struct blkpg_partition)) + return -EINVAL; + if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) + return -EFAULT; + a.data = &p; + set_fs (KERNEL_DS); + err = sys_ioctl(fd, cmd, (unsigned long)&a); + set_fs (old_fs); + default: + return -EINVAL; + } + return err; +} + + static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg) { mm_segment_t old_fs = get_fs(); @@ -413,7 +520,21 @@ IOCTL32_DEFAULT(BIODASDINFO), IOCTL32_DEFAULT(BIODASDFMT), + IOCTL32_DEFAULT(TAPE390_DISPLAY), + + IOCTL32_DEFAULT(BLKROSET), + IOCTL32_DEFAULT(BLKROGET), IOCTL32_DEFAULT(BLKRRPART), + IOCTL32_DEFAULT(BLKFLSBUF), + IOCTL32_DEFAULT(BLKRASET), + IOCTL32_DEFAULT(BLKFRASET), + IOCTL32_DEFAULT(BLKSECTSET), + IOCTL32_DEFAULT(BLKSSZGET), + IOCTL32_DEFAULT(BLKBSZGET), + IOCTL32_DEFAULT(BLKGETSIZE64), + + IOCTL32_DEFAULT(BLKELVGET), + IOCTL32_DEFAULT(BLKELVSET), IOCTL32_HANDLER(HDIO_GETGEO, hd_geometry_ioctl), @@ -422,6 +543,7 @@ IOCTL32_DEFAULT(TCSETAW), IOCTL32_DEFAULT(TCSETAF), IOCTL32_DEFAULT(TCSBRK), + IOCTL32_DEFAULT(TCSBRKP), IOCTL32_DEFAULT(TCXONC), IOCTL32_DEFAULT(TCFLSH), IOCTL32_DEFAULT(TCGETS), @@ -508,6 +630,10 @@ IOCTL32_DEFAULT(SIOCGSTAMP), + IOCTL32_DEFAULT(LOOP_SET_FD), + IOCTL32_DEFAULT(LOOP_CLR_FD), + + IOCTL32_DEFAULT(SIOCATMARK), IOCTL32_HANDLER(SIOCGIFNAME, dev_ifname32), IOCTL32_HANDLER(SIOCGIFCONF, dev_ifconf), IOCTL32_HANDLER(SIOCGIFFLAGS, dev_ifsioc), @@ -552,7 +678,18 @@ IOCTL32_HANDLER(EXT2_IOC32_GETVERSION, do_ext2_ioctl), IOCTL32_HANDLER(EXT2_IOC32_SETVERSION, do_ext2_ioctl), - IOCTL32_HANDLER(BLKGETSIZE, w_long) + IOCTL32_HANDLER(LOOP_SET_STATUS, loop_status), + IOCTL32_HANDLER(LOOP_GET_STATUS, loop_status), + +/* Raw devices */ + IOCTL32_DEFAULT(RAW_SETBIND), + IOCTL32_DEFAULT(RAW_GETBIND), + + IOCTL32_HANDLER(BLKRAGET, w_long), + IOCTL32_HANDLER(BLKGETSIZE, w_long), + IOCTL32_HANDLER(BLKFRAGET, w_long), + IOCTL32_HANDLER(BLKSECTGET, w_long), + IOCTL32_HANDLER(BLKPG, blkpg_ioctl_trans) }; @@ -604,6 +741,8 @@ static void ioctl32_insert(struct ioctl32_list *entry) { int hash = ioctl32_hash(entry->handler.cmd); + + entry->next = 0; if (!ioctl32_hash_table[hash]) ioctl32_hash_table[hash] = entry; else { @@ -612,10 +751,51 @@ while (l->next) l = l->next; l->next = entry; - entry->next = 0; } } +int register_ioctl32_conversion(unsigned int cmd, + int (*handler)(unsigned int, unsigned int, + unsigned long, struct file *)) +{ + struct ioctl32_list *l, *new; + int hash; + + hash = ioctl32_hash(cmd); + for (l = ioctl32_hash_table[hash]; l != NULL; l = l->next) + if (l->handler.cmd == cmd) + return -EBUSY; + new = kmalloc(sizeof(struct ioctl32_list), GFP_KERNEL); + if (new == NULL) + return -ENOMEM; + new->handler.cmd = cmd; + new->handler.function = (void *) handler; + ioctl32_insert(new); + return 0; +} + +int unregister_ioctl32_conversion(unsigned int cmd) +{ + struct ioctl32_list *p, *l; + int hash; + + hash = ioctl32_hash(cmd); + p = NULL; + for (l = ioctl32_hash_table[hash]; l != NULL; l = l->next) { + if (l->handler.cmd == cmd) + break; + p = l; + } + if (l == NULL) + return -ENOENT; + if (p == NULL) + ioctl32_hash_table[hash] = l->next; + else + p->next = l->next; + kfree(l); + return 0; +} + static int __init init_ioctl32(void) { int i; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/linux32.c linux.21rc5-ac2/arch/s390x/kernel/linux32.c --- linux.21rc5/arch/s390x/kernel/linux32.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/linux32.c 2003-04-25 13:50:03.000000000 +0100 @@ -63,6 +63,7 @@ #include #include +#include #include "linux32.h" @@ -384,18 +385,18 @@ struct shmid64_ds32 { struct ipc64_perm_ds32 shm_perm; - unsigned int __pad1; + __kernel_size_t32 shm_segsz; __kernel_time_t32 shm_atime; - unsigned int __pad2; + unsigned int __unused1; __kernel_time_t32 shm_dtime; - unsigned int __pad3; + unsigned int __unused2; __kernel_time_t32 shm_ctime; - __kernel_size_t32 shm_segsz; + unsigned int __unused3; __kernel_pid_t32 shm_cpid; __kernel_pid_t32 shm_lpid; unsigned int shm_nattch; - unsigned int __unused1; - unsigned int __unused2; + unsigned int __unused4; + unsigned int __unused5; }; @@ -504,16 +505,23 @@ static int do_sys32_msgsnd (int first, int second, int third, void *uptr) { - struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER); + struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf), GFP_USER); struct msgbuf32 *up = (struct msgbuf32 *)uptr; mm_segment_t old_fs; int err; if (!p) return -ENOMEM; - err = get_user (p->mtype, &up->mtype); - err |= __copy_from_user (p->mtext, &up->mtext, second); - if (err) + + err = -EINVAL; + if (second > MSGMAX || first < 0 || second < 0) + goto out; + + err = -EFAULT; + if (!uptr) + goto out; + if (get_user (p->mtype, &up->mtype) || + __copy_from_user (p->mtext, &up->mtext, second)) goto out; old_fs = get_fs (); set_fs (KERNEL_DS); @@ -532,6 +540,9 @@ mm_segment_t old_fs; int err; + if (first < 0 || second < 0) + return -EINVAL; + if (!version) { struct ipc_kludge_32 *uipck = (struct ipc_kludge_32 *)uptr; struct ipc_kludge_32 ipck; @@ -546,12 +557,12 @@ msgtyp = ipck.msgtyp; } err = -ENOMEM; - p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER); + p = kmalloc (second + sizeof (struct msgbuf), GFP_USER); if (!p) goto out; old_fs = get_fs (); set_fs (KERNEL_DS); - err = sys_msgrcv (first, p, second + 4, msgtyp, third); + err = sys_msgrcv (first, p, second, msgtyp, third); set_fs (old_fs); if (err < 0) goto free_then_out; @@ -879,25 +890,37 @@ { switch (cmd) { case F_GETLK: - case F_SETLK: - case F_SETLKW: { struct flock f; mm_segment_t old_fs; long ret; - if(get_flock(&f, (struct flock32 *)arg)) + if(get_flock(&f, (struct flock32 *)A(arg))) return -EFAULT; old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_fcntl(fd, cmd, (unsigned long)&f); set_fs (old_fs); if (ret) return ret; if (f.l_start >= 0x7fffffffUL || - f.l_len >= 0x7fffffffUL || f.l_start + f.l_len >= 0x7fffffffUL) return -EOVERFLOW; - if(put_flock(&f, (struct flock32 *)arg)) + if(put_flock(&f, (struct flock32 *)A(arg))) + return -EFAULT; + return 0; + } + case F_SETLK: + case F_SETLKW: + { + struct flock f; + mm_segment_t old_fs; + long ret; + + if(get_flock(&f, (struct flock32 *)A(arg))) return -EFAULT; + old_fs = get_fs(); set_fs (KERNEL_DS); + ret = sys_fcntl(fd, cmd, (unsigned long)&f); + set_fs (old_fs); + if (ret) return ret; return 0; } default: @@ -912,64 +935,97 @@ return sys32_fcntl(fd, cmd, arg); } -struct dqblk32 { - __u32 dqb_bhardlimit; - __u32 dqb_bsoftlimit; - __u32 dqb_curblocks; - __u32 dqb_ihardlimit; - __u32 dqb_isoftlimit; - __u32 dqb_curinodes; - __kernel_time_t32 dqb_btime; - __kernel_time_t32 dqb_itime; -}; - extern asmlinkage int sys_quotactl(int cmd, const char *special, int id, caddr_t addr); -asmlinkage int sys32_quotactl(int cmd, const char *special, int id, unsigned long addr) +#ifdef CONFIG_QIFACE_COMPAT +#ifdef CONFIG_QIFACE_V1 +struct user_dqblk32 { + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u32 dqb_curblocks; + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v1c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V1_GETQUOTA +#define Q_COMP_SETQUOTA Q_V1_SETQUOTA +#define Q_COMP_SETQLIM Q_V1_SETQLIM +#define Q_COMP_SETUSE Q_V1_SETUSE +#else +struct user_dqblk32 { + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u64 dqb_curspace; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v2c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V2_GETQUOTA +#define Q_COMP_SETQUOTA Q_V2_SETQUOTA +#define Q_COMP_SETQLIM Q_V2_SETQLIM +#define Q_COMP_SETUSE Q_V2_SETUSE +#endif + +asmlinkage int sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) { int cmds = cmd >> SUBCMDSHIFT; int err; - struct dqblk d; + comp_dqblk_t d; mm_segment_t old_fs; char *spec; switch (cmds) { - case Q_GETQUOTA: - break; - case Q_SETQUOTA: - case Q_SETUSE: - case Q_SETQLIM: - if (copy_from_user (&d, (struct dqblk32 *)addr, - sizeof (struct dqblk32))) - return -EFAULT; - d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime; - d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime; - break; + case Q_COMP_GETQUOTA: + break; + case Q_COMP_SETQUOTA: + case Q_COMP_SETUSE: + case Q_COMP_SETQLIM: + if (copy_from_user(&d, (struct user_dqblk32 *)addr, + sizeof (struct user_dqblk32))) + return -EFAULT; + d.dqb_itime = ((struct user_dqblk32 *)&d)->dqb_itime; + d.dqb_btime = ((struct user_dqblk32 *)&d)->dqb_btime; + break; default: - return sys_quotactl(cmd, special, - id, (caddr_t)addr); + return sys_quotactl(cmd, special, id, (__kernel_caddr_t)addr); } spec = getname (special); err = PTR_ERR(spec); if (IS_ERR(spec)) return err; - old_fs = get_fs (); + old_fs = get_fs(); set_fs (KERNEL_DS); - err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d); + err = sys_quotactl(cmd, (const char *)spec, id, (__kernel_caddr_t)&d); set_fs (old_fs); putname (spec); if (err) return err; - if (cmds == Q_GETQUOTA) { + if (cmds == Q_COMP_GETQUOTA) { __kernel_time_t b = d.dqb_btime, i = d.dqb_itime; - ((struct dqblk32 *)&d)->dqb_itime = i; - ((struct dqblk32 *)&d)->dqb_btime = b; - if (copy_to_user ((struct dqblk32 *)addr, &d, - sizeof (struct dqblk32))) + ((struct user_dqblk32 *)&d)->dqb_itime = i; + ((struct user_dqblk32 *)&d)->dqb_btime = b; + if (copy_to_user ((struct user_dqblk32 *)addr, &d, + sizeof (struct user_dqblk32))) return -EFAULT; } return 0; } +#else +/* No conversion needed for new interface */ +asmlinkage int sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) +{ + return sys_quotactl(cmd, special, id, addr); +} +#endif + static inline int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf) { int err; @@ -2151,15 +2207,17 @@ return ret; } -#define RLIM_INFINITY32 0x7fffffff -#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) +#define RLIM_OLD_INFINITY32 0x7fffffff +#define RLIM_INFINITY32 0xffffffff +#define RESOURCE32_OLD(x) ((x > RLIM_OLD_INFINITY32) ? RLIM_OLD_INFINITY32 : x) +#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) struct rlimit32 { u32 rlim_cur; u32 rlim_max; }; -extern asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit *rlim); +extern asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit *rlim); asmlinkage int sys32_old_getrlimit(unsigned int resource, struct rlimit32 *rlim) { @@ -2168,7 +2226,23 @@ mm_segment_t old_fs = get_fs (); set_fs (KERNEL_DS); - ret = sys_old_getrlimit(resource, &r); + ret = sys_getrlimit(resource, &r); + set_fs (old_fs); + if (!ret) { + ret = put_user (RESOURCE32_OLD(r.rlim_cur), &rlim->rlim_cur); + ret |= __put_user (RESOURCE32_OLD(r.rlim_max), &rlim->rlim_max); + } + return ret; +} + +asmlinkage int sys32_getrlimit(unsigned int resource, struct rlimit32 *rlim) +{ + struct rlimit r; + int ret; + mm_segment_t old_fs = get_fs (); + + set_fs (KERNEL_DS); + ret = sys_getrlimit(resource, &r); set_fs (old_fs); if (!ret) { ret = put_user (RESOURCE32(r.rlim_cur), &rlim->rlim_cur); @@ -2588,8 +2662,23 @@ CMSG32_ALIGN(sizeof(struct cmsghdr32))); kcmsg32->cmsg_len = clen32; + switch (kcmsg32->cmsg_type) { + /* + * The timestamp type's data needs to be converted + * from 64-bit time values to 32-bit time values + */ + case SO_TIMESTAMP: { + __kernel_time_t32* ptr_time32 = CMSG32_DATA(kcmsg32); + __kernel_time_t* ptr_time = CMSG_DATA(ucmsg); + get_user(*ptr_time32, ptr_time); + get_user(*(ptr_time32+1), ptr_time+1); + kcmsg32->cmsg_len -= 2*(sizeof(__kernel_time_t) - + sizeof(__kernel_time_t32)); + } + default:; + } ucmsg = (struct cmsghdr *) (((char *)ucmsg) + CMSG_ALIGN(clen64)); - wp = (((char *)kcmsg32) + CMSG32_ALIGN(clen32)); + wp = (((char *)kcmsg32) + CMSG32_ALIGN(kcmsg32->cmsg_len)); } /* Copy back fixed up data, and adjust pointers. */ @@ -2612,6 +2701,7 @@ kmsg->msg_control = (void *) orig_cmsg_uptr; } +#if 0 asmlinkage int sys32_sendmsg(int fd, struct msghdr32 *user_msg, unsigned user_flags) { struct socket *sock; @@ -2721,7 +2811,7 @@ sockfd_put(sock); } - if(uaddr != NULL && err >= 0) + if(uaddr != NULL && err >= 0 && kern_msg.msg_namelen) err = move_addr_to_user(addr, kern_msg.msg_namelen, uaddr, uaddr_len); if(cmsg_ptr != 0 && err >= 0) { unsigned long ucmsg_ptr = ((unsigned long)kern_msg.msg_control); @@ -2737,6 +2827,222 @@ return err; return len; } +#endif + +/* + * BSD sendmsg interface + */ + +int sys32_sendmsg(int fd, struct msghdr32 *msg, unsigned flags) +{ + struct socket *sock; + char address[MAX_SOCK_ADDR]; + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; + unsigned char ctl[sizeof(struct cmsghdr) + 20]; /* 20 is size of ipv6_pktinfo */ + unsigned char *ctl_buf = ctl; + struct msghdr msg_sys; + int err, ctl_len, iov_size, total_len; + + err = -EFAULT; + if (msghdr_from_user32_to_kern(&msg_sys, msg)) + goto out; + + sock = sockfd_lookup(fd, &err); + if (!sock) + goto out; + + /* do not move before msg_sys is valid */ + err = -EINVAL; + if (msg_sys.msg_iovlen > UIO_MAXIOV) + goto out_put; + + /* Check whether to allocate the iovec area*/ + err = -ENOMEM; + iov_size = msg_sys.msg_iovlen * sizeof(struct iovec32); + if (msg_sys.msg_iovlen > UIO_FASTIOV) { + iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); + if (!iov) + goto out_put; + } + + /* This will also move the address data into kernel space */ + err = verify_iovec32(&msg_sys, iov, address, VERIFY_READ); + if (err < 0) + goto out_freeiov; + total_len = err; + + err = -ENOBUFS; + + if (msg_sys.msg_controllen > INT_MAX) + goto out_freeiov; + ctl_len = msg_sys.msg_controllen; + if (ctl_len) + { + if (ctl_len > sizeof(ctl)) + { + ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); + if (ctl_buf == NULL) + goto out_freeiov; + } + else if (ctl_len < sizeof(struct cmsghdr)) + { + /* to get same error message as on 31 bit native */ + err = EOPNOTSUPP; + goto out_freeiov; + } + err = -EFAULT; + if (cmsghdr_from_user32_to_kern(&msg_sys, ctl_buf, ctl_len)) + goto out_freectl; +// msg_sys.msg_control = ctl_buf; + } + msg_sys.msg_flags = flags; + + if (sock->file->f_flags & O_NONBLOCK) + msg_sys.msg_flags |= MSG_DONTWAIT; + err = sock_sendmsg(sock, &msg_sys, total_len); + +out_freectl: + if (ctl_buf != ctl) + sock_kfree_s(sock->sk, ctl_buf, ctl_len); +out_freeiov: + if (iov != iovstack) + sock_kfree_s(sock->sk, iov, iov_size); +out_put: + sockfd_put(sock); +out: + return err; +} + +static __inline__ void +scm_recv32(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, int flags, unsigned long cmsg_ptr) +{ + if(!msg->msg_control) + { + if(sock->passcred || scm->fp) + msg->msg_flags |= MSG_CTRUNC; + scm_destroy(scm); + return; + } + /* If recvmsg processing itself placed some + * control messages into user space, it's is + * using 64-bit CMSG processing, so we need + * to fix it up before we tack on more stuff. + */ + if((unsigned long) msg->msg_control != cmsg_ptr) + cmsg32_recvmsg_fixup(msg, cmsg_ptr); + /* Wheee... */ + if(sock->passcred) + put_cmsg32(msg, + SOL_SOCKET, SCM_CREDENTIALS, + sizeof(scm->creds), &scm->creds); + if(!scm->fp) + return; + + scm_detach_fds32(msg, scm); +} + +static int +sock_recvmsg32(struct socket *sock, struct msghdr *msg, int size, int flags, + unsigned long cmsg_ptr) +{ + struct scm_cookie scm; + + memset(&scm, 0, sizeof(scm)); + size = sock->ops->recvmsg(sock, msg, size, flags, &scm); + if (size >= 0) + scm_recv32(sock, msg, &scm, flags, cmsg_ptr); + + return size; +} + +/* + * BSD recvmsg interface + */ + +int +sys32_recvmsg (int fd, struct msghdr32 *msg, unsigned int flags) +{ + struct socket *sock; + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov=iovstack; + struct msghdr msg_sys; + unsigned long cmsg_ptr; + int err, iov_size, total_len, len; + + /* kernel mode address */ + char addr[MAX_SOCK_ADDR]; + + /* user mode address pointers */ + struct sockaddr *uaddr; + int *uaddr_len; + + err=-EFAULT; + if (msghdr_from_user32_to_kern(&msg_sys, msg)) + goto out; + + sock = sockfd_lookup(fd, &err); + if (!sock) + goto out; + + err = -EINVAL; + if (msg_sys.msg_iovlen > UIO_MAXIOV) + goto out_put; + + /* Check whether to allocate the iovec area*/ + err = -ENOMEM; + iov_size = msg_sys.msg_iovlen * sizeof(struct iovec); + if (msg_sys.msg_iovlen > UIO_FASTIOV) { + iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); + if (!iov) + goto out_put; + } + + /* + * Save the user-mode address (verify_iovec will change the + * kernel msghdr to use the kernel address space) + */ + + uaddr = msg_sys.msg_name; + uaddr_len = &msg->msg_namelen; + err = verify_iovec32(&msg_sys, iov, addr, VERIFY_WRITE); + if (err < 0) + goto out_freeiov; + total_len=err; + + cmsg_ptr = (unsigned long)msg_sys.msg_control; + msg_sys.msg_flags = 0; + + if (sock->file->f_flags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + err = sock_recvmsg32(sock, &msg_sys, total_len, flags, cmsg_ptr); + if (err < 0) + goto out_freeiov; + len = err; + + if (uaddr != NULL && + /* in order to get same error message as on native 31 bit */ + msg_sys.msg_namelen > 0) { + err = move_addr_to_user(addr, msg_sys.msg_namelen, uaddr, uaddr_len); + if (err < 0) + goto out_freeiov; + } + err = __put_user(msg_sys.msg_flags, &msg->msg_flags); + if (err) + goto out_freeiov; + err = __put_user((__kernel_size_t32) ((unsigned long)msg_sys.msg_control - cmsg_ptr), &msg->msg_controllen); + if (err) + goto out_freeiov; + err = len; + +out_freeiov: + if (iov != iovstack) + sock_kfree_s(sock->sk, iov, iov_size); +out_put: + sockfd_put(sock); +out: + return err; +} extern asmlinkage int sys_setsockopt(int fd, int level, int optname, char *optval, int optlen); @@ -2820,6 +3126,20 @@ if (level == SOL_ICMPV6 && optname == ICMPV6_FILTER) return do_set_icmpv6_filter(fd, level, optname, optval, optlen); + if (level == SOL_SOCKET && + (optname == SO_SNDTIMEO || optname == SO_RCVTIMEO)) { + long ret; + struct timeval tmp; + mm_segment_t old_fs; + + if (get_tv32(&tmp, (struct timeval32 *)optval )) + return -EFAULT; + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = sys_setsockopt(fd, level, optname, (char *) &tmp, sizeof(struct timeval)); + set_fs(old_fs); + return ret; + } return sys_setsockopt(fd, level, optname, optval, optlen); } @@ -3669,7 +3989,7 @@ */ static int nfs_getfh32_res_trans(union nfsctl_res *kres, union nfsctl_res32 *res32) { - return copy_to_user(res32, kres, sizeof(*res32)); + return copy_to_user(res32, kres, sizeof(*res32)) ? -EFAULT : 0; } /* @@ -3891,15 +4211,25 @@ asmlinkage ssize_t32 sys32_pread(unsigned int fd, char *ubuf, __kernel_size_t32 count, u32 poshi, u32 poslo) { + if ((ssize_t32) count < 0) + return -EINVAL; return sys_pread(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); } asmlinkage ssize_t32 sys32_pwrite(unsigned int fd, char *ubuf, __kernel_size_t32 count, u32 poshi, u32 poslo) { + if ((ssize_t32) count < 0) + return -EINVAL; return sys_pwrite(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); } +extern asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count); + +asmlinkage ssize_t32 sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count) +{ + return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count); +} extern asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count); @@ -4203,7 +4533,7 @@ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) { /* Result is out of bounds. */ - do_munmap(current->mm, addr, len); + do_munmap(current->mm, addr, len, 1); error = -ENOMEM; } up_write(¤t->mm->mmap_sem); @@ -4344,3 +4674,22 @@ return ret; } +asmlinkage ssize_t sys_read(unsigned int fd, char * buf, size_t count); + +asmlinkage ssize_t32 sys32_read(unsigned int fd, char * buf, size_t count) +{ + if ((ssize_t32) count < 0) + return -EINVAL; + + return sys_read(fd, buf, count); +} + +asmlinkage ssize_t sys_write(unsigned int fd, const char * buf, size_t count); + +asmlinkage ssize_t32 sys32_write(unsigned int fd, char * buf, size_t count) +{ + if ((ssize_t32) count < 0) + return -EINVAL; + + return sys_write(fd, buf, count); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/Makefile linux.21rc5-ac2/arch/s390x/kernel/Makefile --- linux.21rc5/arch/s390x/kernel/Makefile 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/Makefile 2003-04-25 13:50:03.000000000 +0100 @@ -29,7 +29,7 @@ # obj-$(CONFIG_REMOTE_DEBUG) += gdb-stub.o #gdb-low.o -obj-$(CONFIG_S390_SUPPORT) += linux32.o signal32.o ioctl32.o wrapper32.o exec32.o +obj-$(CONFIG_S390_SUPPORT) += linux32.o signal32.o ioctl32.o wrapper32.o exec32.o exec_domain32.o obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o .PHONY: asm-offsets.h diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/process.c linux.21rc5-ac2/arch/s390x/kernel/process.c --- linux.21rc5/arch/s390x/kernel/process.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/process.c 2003-04-25 13:47:51.000000000 +0100 @@ -55,10 +55,6 @@ psw_t wait_psw; unsigned long reg; - /* endless idle loop with no priority at all */ - init_idle(); - current->nice = 20; - current->counter = -100; while (1) { if (current->need_resched) { schedule(); @@ -91,7 +87,7 @@ { struct task_struct *tsk = current; - printk("CPU: %d %s\n", tsk->processor, print_tainted()); + printk("CPU: %d %s\n", tsk->cpu, print_tainted()); printk("Process %s (pid: %d, task: %016lx, ksp: %016lx)\n", current->comm, current->pid, (unsigned long) tsk, tsk->thread.ksp); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/s390_ksyms.c linux.21rc5-ac2/arch/s390x/kernel/s390_ksyms.c --- linux.21rc5/arch/s390x/kernel/s390_ksyms.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/s390_ksyms.c 2003-05-28 15:40:29.000000000 +0100 @@ -13,6 +13,7 @@ #include #include #include +#include #if CONFIG_IP_MULTICAST #include #endif @@ -62,6 +63,20 @@ EXPORT_SYMBOL(overflowuid); EXPORT_SYMBOL(overflowgid); +#ifdef CONFIG_S390_SUPPORT +/* + * Dynamically add/remove 31 bit ioctl conversion functions. + */ +extern int register_ioctl32_conversion(unsigned int cmd, + int (*handler)(unsigned int, + unsigned int, + unsigned long, + struct file *)); +int unregister_ioctl32_conversion(unsigned int cmd); +EXPORT_SYMBOL(register_ioctl32_conversion); +EXPORT_SYMBOL(unregister_ioctl32_conversion); +#endif + /* * misc. */ @@ -70,5 +85,6 @@ EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(console_mode); EXPORT_SYMBOL(console_device); -EXPORT_SYMBOL(pfix_table); EXPORT_SYMBOL_NOVERS(do_call_softirq); +EXPORT_SYMBOL(ctrlchar_init); +EXPORT_SYMBOL(ctrlchar_handle); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/setup.c linux.21rc5-ac2/arch/s390x/kernel/setup.c --- linux.21rc5/arch/s390x/kernel/setup.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/setup.c 2003-05-28 15:13:38.000000000 +0100 @@ -56,8 +56,6 @@ unsigned long cpu_initialized = 0; volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ -void* *pfix_table=NULL; - /* * Setup options */ @@ -166,9 +164,9 @@ static int __init conmode_setup(char *str) { -#if defined(CONFIG_HWC_CONSOLE) - if (strncmp(str, "hwc", 4) == 0) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) + SET_CONSOLE_SCLP; #endif #if defined(CONFIG_TN3215_CONSOLE) if (strncmp(str, "3215", 5) == 0) @@ -200,8 +198,8 @@ */ cpcmd("TERM CONMODE 3215", NULL, 0); if (ptr == NULL) { -#if defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif return; } @@ -210,16 +208,16 @@ SET_CONSOLE_3270; #elif defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; -#elif defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#elif defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } else if (strncmp(ptr + 8, "3215", 4) == 0) { #if defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; #elif defined(CONFIG_TN3270_CONSOLE) SET_CONSOLE_3270; -#elif defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#elif defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } } else if (MACHINE_IS_P390) { @@ -229,8 +227,8 @@ SET_CONSOLE_3270; #endif } else { -#if defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } } @@ -273,21 +271,25 @@ /* * Reboot, halt and power_off stubs. They just call _machine_restart, - * _machine_halt or _machine_power_off. + * _machine_halt or _machine_power_off after making sure that all pending + * printks reached their destination. */ void machine_restart(char *command) { + console_unblank(); _machine_restart(command); } void machine_halt(void) { + console_unblank(); _machine_halt(); } void machine_power_off(void) { + console_unblank(); _machine_power_off(); } @@ -319,10 +321,6 @@ printk((MACHINE_IS_VM) ? "We are running under VM (64 bit mode)\n" : "We are running native (64 bit mode)\n"); - if (MACHINE_IS_VM) - printk((MACHINE_HAS_PFIX) ? - "This machine has PFIX support\n" : - "This machine has no PFIX support\n"); ROOT_DEV = to_kdev_t(0x0100); memory_start = (unsigned long) &_end; /* fixit if use $CODELO etc*/ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/signal32.c linux.21rc5-ac2/arch/s390x/kernel/signal32.c --- linux.21rc5/arch/s390x/kernel/signal32.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/signal32.c 2003-04-25 13:50:03.000000000 +0100 @@ -302,7 +302,7 @@ save_fp_regs(&fpregs); __put_user(fpregs.fpc, &sregs->fpregs.fpc); for(i=0; ifpregs.fprs[i].d); + err |= __put_user(fpregs.fprs[i].ui, &sregs->fpregs.fprs[i].d); } return(err); @@ -331,7 +331,7 @@ (regs->psw.addr&PSW_ADDR_DEBUGCHANGE); __get_user(fpregs.fpc, &sregs->fpregs.fpc); for(i=0; ifpregs.fprs[i].d); + err |= __get_user(fpregs.fprs[i].ui, &sregs->fpregs.fprs[i].d); if(!err) restore_fp_regs(&fpregs); } @@ -474,6 +474,10 @@ goto give_sigsegv; } + /* Set up backchain. */ + if (__put_user((unsigned int) regs->gprs[15], (unsigned int *) frame)) + goto give_sigsegv; + /* Set up registers for signal handler */ regs->gprs[15] = (addr_t)frame; regs->psw.addr = FIX_PSW(ka->sa.sa_handler); @@ -527,6 +531,10 @@ (u16 *)(frame->retcode)); } + /* Set up backchain. */ + if (__put_user((unsigned int) regs->gprs[15], (unsigned int *) frame)) + goto give_sigsegv; + /* Set up registers for signal handler */ regs->gprs[15] = (addr_t)frame; regs->psw.addr = FIX_PSW(ka->sa.sa_handler); @@ -675,7 +683,7 @@ continue; switch (signr) { - case SIGCONT: case SIGCHLD: case SIGWINCH: + case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG: continue; case SIGTSTP: case SIGTTIN: case SIGTTOU: diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/signal.c linux.21rc5-ac2/arch/s390x/kernel/signal.c --- linux.21rc5/arch/s390x/kernel/signal.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/signal.c 2003-04-25 13:47:51.000000000 +0100 @@ -575,10 +575,7 @@ /* FALLTHRU */ default: - sigaddset(¤t->pending.signal, signr); - recalc_sigpending(current); - current->flags |= PF_SIGNALED; - do_exit(exit_code); + sig_exit(signr, exit_code, &info); /* NOTREACHED */ } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/smp.c linux.21rc5-ac2/arch/s390x/kernel/smp.c --- linux.21rc5/arch/s390x/kernel/smp.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/smp.c 2003-04-25 13:47:51.000000000 +0100 @@ -38,7 +38,7 @@ #include /* prototypes */ -extern int cpu_idle(void * unused); +extern int cpu_idle(void); extern __u16 boot_cpu_addr; extern volatile int __cpu_logical_map[]; @@ -56,6 +56,7 @@ spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; unsigned long cpu_online_map; +unsigned long cache_decay_ticks; /* * Setup routine for controlling SMP activation @@ -451,7 +452,7 @@ { int curr_cpu; - current->processor = 0; + current->cpu = 0; smp_num_cpus = 1; cpu_online_map = 1; for (curr_cpu = 0; @@ -491,7 +492,7 @@ pfault_init(); #endif /* cpu_idle will call schedule for us */ - return cpu_idle(NULL); + return cpu_idle(); } /* @@ -529,12 +530,9 @@ idle = init_task.prev_task; if (!idle) panic("No idle process for CPU %d",cpu); - idle->processor = cpu; - idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */ + init_idle(idle, cpu); - del_from_runqueue(idle); unhash_process(idle); - init_tasks[cpu] = idle; cpu_lowcore = get_cpu_lowcore(cpu); cpu_lowcore->save_area[15] = idle->thread.ksp; @@ -588,6 +586,8 @@ smp_count_cpus(); memset(lowcore_ptr,0,sizeof(lowcore_ptr)); + cache_decay_ticks = (200 * HZ) / 1000; /* Is 200ms ok? Robus? XXX */ + /* * Initialize the logical to physical CPU number mapping */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/traps.c linux.21rc5-ac2/arch/s390x/kernel/traps.c --- linux.21rc5/arch/s390x/kernel/traps.c 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/traps.c 2003-04-25 13:47:51.000000000 +0100 @@ -144,7 +144,7 @@ * We can't print the backtrace of a running process. It is * unreliable at best and can cause kernel oopses. */ - if (task_has_cpu(tsk)) + if (tsk->state == TASK_RUNNING) return; show_trace((unsigned long *) tsk->thread.ksp); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/kernel/wrapper32.S linux.21rc5-ac2/arch/s390x/kernel/wrapper32.S --- linux.21rc5/arch/s390x/kernel/wrapper32.S 2003-05-28 15:08:17.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/kernel/wrapper32.S 2003-04-25 13:50:03.000000000 +0100 @@ -17,14 +17,14 @@ llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t - jg sys_read # branch to sys_read + jg sys32_read # branch to sys_read .globl sys32_write_wrapper sys32_write_wrapper: llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # size_t - jg sys_write # branch to system call + jg sys32_write # branch to system call .globl sys32_open_wrapper sys32_open_wrapper: @@ -311,6 +311,12 @@ llgtr %r3,%r3 # struct rlimit_emu31 * jg sys32_old_getrlimit # branch to system call + .globl sys32_getrlimit_wrapper +sys32_getrlimit_wrapper: + llgfr %r2,%r2 # unsigned int + llgtr %r3,%r3 # struct rlimit_emu31 * + jg sys32_getrlimit # branch to system call + .globl sys32_mmap2_wrapper sys32_mmap2_wrapper: llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * @@ -537,7 +543,7 @@ .globl sys32_newuname_wrapper sys32_newuname_wrapper: llgtr %r2,%r2 # struct new_utsname * - jg sys_newuname # branch to system call + jg s390x_newuname # branch to system call .globl sys32_adjtimex_wrapper sys32_adjtimex_wrapper: @@ -614,7 +620,7 @@ .globl sys32_personality_wrapper sys32_personality_wrapper: llgfr %r2,%r2 # unsigned long - jg sys_personality # branch to system call + jg s390x_personality # branch to system call .globl sys32_setfsuid16_wrapper sys32_setfsuid16_wrapper: @@ -1091,3 +1097,13 @@ llgtr %r3,%r3 # struct stat64 * llgfr %r4,%r4 # long jg sys32_fstat64 # branch to system call + + .globl sys32_stime_wrapper +sys32_stime_wrapper: + llgtr %r2,%r2 # int * + jg sys_stime # branch to system call + + .globl sys32_sysctl_wrapper +sys32_sysctl_wrapper: + llgtr %r2,%r2 # struct __sysctl_args32 * + jg sys32_sysctl diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/s390x/mm/init.c linux.21rc5-ac2/arch/s390x/mm/init.c --- linux.21rc5/arch/s390x/mm/init.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/s390x/mm/init.c 2003-05-28 15:13:38.000000000 +0100 @@ -234,11 +234,6 @@ continue; } set_pte(pt_dir, pte); - if (address < memory_size ) /* do not set storage - key outside the storage - or die */ - set_access_key(pte); /* switch to our - access key */ address += PAGE_SIZE; } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sh/mm/fault.c linux.21rc5-ac2/arch/sh/mm/fault.c --- linux.21rc5/arch/sh/mm/fault.c 2003-05-28 15:08:12.000000000 +0100 +++ linux.21rc5-ac2/arch/sh/mm/fault.c 2003-04-22 16:44:36.000000000 +0100 @@ -72,8 +72,6 @@ return 1; check_stack: - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, start) == 0) goto good_area; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sparc/kernel/signal.c linux.21rc5-ac2/arch/sparc/kernel/signal.c --- linux.21rc5/arch/sparc/kernel/signal.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/sparc/kernel/signal.c 2003-04-22 18:21:00.000000000 +0100 @@ -1131,6 +1131,8 @@ line = d_path(map->vm_file->f_dentry, map->vm_file->f_vfsmnt, buffer, PAGE_SIZE); + if (IS_ERR(line)) + break; } printk(MAPS_LINE_FORMAT, map->vm_start, map->vm_end, str, map->vm_pgoff << PAGE_SHIFT, kdevname(dev), ino); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sparc/kernel/sunos_ioctl.c linux.21rc5-ac2/arch/sparc/kernel/sunos_ioctl.c --- linux.21rc5/arch/sparc/kernel/sunos_ioctl.c 2003-05-28 15:08:07.000000000 +0100 +++ linux.21rc5-ac2/arch/sparc/kernel/sunos_ioctl.c 2003-04-22 16:44:36.000000000 +0100 @@ -39,8 +39,12 @@ { int ret = -EBADF; - if (fd >= SUNOS_NR_OPEN || !fcheck(fd)) + read_lock(¤t->files->file_lock); + if (fd >= SUNOS_NR_OPEN || !fcheck(fd)) { + read_unlock(¤t->files->file_lock); goto out; + } + read_unlock(¤t->files->file_lock); /* First handle an easy compat. case for tty ldisc. */ if(cmd == TIOCSETD) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sparc/mm/fault.c linux.21rc5-ac2/arch/sparc/mm/fault.c --- linux.21rc5/arch/sparc/mm/fault.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/sparc/mm/fault.c 2003-04-22 16:44:36.000000000 +0100 @@ -249,8 +249,6 @@ goto bad_area; if(vma->vm_start <= address) goto good_area; - if(!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if(expand_stack(vma, address)) goto bad_area; /* @@ -496,8 +494,6 @@ goto bad_area; if(vma->vm_start <= address) goto good_area; - if(!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if(expand_stack(vma, address)) goto bad_area; good_area: diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sparc64/kernel/signal32.c linux.21rc5-ac2/arch/sparc64/kernel/signal32.c --- linux.21rc5/arch/sparc64/kernel/signal32.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/sparc64/kernel/signal32.c 2003-04-22 18:21:00.000000000 +0100 @@ -1348,6 +1348,8 @@ line = d_path(map->vm_file->f_dentry, map->vm_file->f_vfsmnt, buffer, PAGE_SIZE); + if (IS_ERR(line)) + break; } printk(MAPS_LINE_FORMAT, map->vm_start, map->vm_end, str, map->vm_pgoff << PAGE_SHIFT, kdevname(dev), ino); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sparc64/kernel/signal.c linux.21rc5-ac2/arch/sparc64/kernel/signal.c --- linux.21rc5/arch/sparc64/kernel/signal.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/sparc64/kernel/signal.c 2003-04-22 18:21:00.000000000 +0100 @@ -679,6 +679,8 @@ line = d_path(map->vm_file->f_dentry, map->vm_file->f_vfsmnt, buffer, PAGE_SIZE); + if (IS_ERR(line)) + break; } printk(MAPS_LINE_FORMAT, map->vm_start, map->vm_end, str, map->vm_pgoff << PAGE_SHIFT, kdevname(dev), ino); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sparc64/kernel/sunos_ioctl32.c linux.21rc5-ac2/arch/sparc64/kernel/sunos_ioctl32.c --- linux.21rc5/arch/sparc64/kernel/sunos_ioctl32.c 2003-05-28 15:08:11.000000000 +0100 +++ linux.21rc5-ac2/arch/sparc64/kernel/sunos_ioctl32.c 2003-04-22 16:44:36.000000000 +0100 @@ -100,8 +100,12 @@ if(fd >= SUNOS_NR_OPEN) goto out; - if(!fcheck(fd)) + read_lock(¤t->files->file_lock); + if(!fcheck(fd)) { + read_unlock(¤t->files->file_lock); goto out; + } + read_unlock(¤t->files->file_lock); if(cmd == TIOCSETD) { mm_segment_t old_fs = get_fs(); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sparc64/kernel/sys_sparc32.c linux.21rc5-ac2/arch/sparc64/kernel/sys_sparc32.c --- linux.21rc5/arch/sparc64/kernel/sys_sparc32.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/sparc64/kernel/sys_sparc32.c 2003-04-22 16:44:36.000000000 +0100 @@ -889,62 +889,97 @@ return sys32_fcntl(fd, cmd, arg); } -struct dqblk32 { - __u32 dqb_bhardlimit; - __u32 dqb_bsoftlimit; - __u32 dqb_curblocks; - __u32 dqb_ihardlimit; - __u32 dqb_isoftlimit; - __u32 dqb_curinodes; - __kernel_time_t32 dqb_btime; - __kernel_time_t32 dqb_itime; -}; - extern asmlinkage int sys_quotactl(int cmd, const char *special, int id, caddr_t addr); -asmlinkage int sys32_quotactl(int cmd, const char *special, int id, unsigned long addr) +#ifdef CONFIG_QIFACE_COMPAT +#ifdef CONFIG_QIFACE_V1 +struct user_dqblk32 { + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u32 dqb_curblocks; + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v1c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V1_GETQUOTA +#define Q_COMP_SETQUOTA Q_V1_SETQUOTA +#define Q_COMP_SETQLIM Q_V1_SETQLIM +#define Q_COMP_SETUSE Q_V1_SETUSE +#else +struct user_dqblk32 { + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u64 dqb_curspace; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v2c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V2_GETQUOTA +#define Q_COMP_SETQUOTA Q_V2_SETQUOTA +#define Q_COMP_SETQLIM Q_V2_SETQLIM +#define Q_COMP_SETUSE Q_V2_SETUSE +#endif + +asmlinkage int sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) { int cmds = cmd >> SUBCMDSHIFT; int err; - struct dqblk d; + comp_dqblk_t d; mm_segment_t old_fs; char *spec; switch (cmds) { - case Q_GETQUOTA: - break; - case Q_SETQUOTA: - case Q_SETUSE: - case Q_SETQLIM: - if (copy_from_user (&d, (struct dqblk32 *)addr, - sizeof (struct dqblk32))) - return -EFAULT; - d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime; - d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime; - break; + case Q_COMP_GETQUOTA: + break; + case Q_COMP_SETQUOTA: + case Q_COMP_SETUSE: + case Q_COMP_SETQLIM: + if (copy_from_user(&d, (struct user_dqblk32 *)addr, + sizeof (struct user_dqblk32))) + return -EFAULT; + d.dqb_itime = ((struct user_dqblk32 *)&d)->dqb_itime; + d.dqb_btime = ((struct user_dqblk32 *)&d)->dqb_btime; + break; default: - return sys_quotactl(cmd, special, - id, (caddr_t)addr); + return sys_quotactl(cmd, special, id, (__kernel_caddr_t)addr); } spec = getname (special); err = PTR_ERR(spec); if (IS_ERR(spec)) return err; - old_fs = get_fs (); + old_fs = get_fs(); set_fs (KERNEL_DS); - err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d); + err = sys_quotactl(cmd, (const char *)spec, id, (__kernel_caddr_t)&d); set_fs (old_fs); putname (spec); - if (cmds == Q_GETQUOTA) { + if (err) + return err; + if (cmds == Q_COMP_GETQUOTA) { __kernel_time_t b = d.dqb_btime, i = d.dqb_itime; - ((struct dqblk32 *)&d)->dqb_itime = i; - ((struct dqblk32 *)&d)->dqb_btime = b; - if (copy_to_user ((struct dqblk32 *)addr, &d, - sizeof (struct dqblk32))) + ((struct user_dqblk32 *)&d)->dqb_itime = i; + ((struct user_dqblk32 *)&d)->dqb_btime = b; + if (copy_to_user ((struct user_dqblk32 *)addr, &d, + sizeof (struct user_dqblk32))) return -EFAULT; } - return err; + return 0; } +#else +/* No conversion needed for new interface */ +asmlinkage int sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) +{ + return sys_quotactl(cmd, special, id, addr); +} +#endif + static inline int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf) { int err; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sparc64/mm/fault.c linux.21rc5-ac2/arch/sparc64/mm/fault.c --- linux.21rc5/arch/sparc64/mm/fault.c 2003-05-28 15:08:11.000000000 +0100 +++ linux.21rc5-ac2/arch/sparc64/mm/fault.c 2003-04-22 16:44:36.000000000 +0100 @@ -373,8 +373,6 @@ if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (!(fault_code & FAULT_CODE_WRITE)) { /* Non-faulting loads shouldn't expand stack. */ insn = get_fault_insn(regs, insn); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/sparc64/solaris/timod.c linux.21rc5-ac2/arch/sparc64/solaris/timod.c --- linux.21rc5/arch/sparc64/solaris/timod.c 2003-05-28 15:08:11.000000000 +0100 +++ linux.21rc5-ac2/arch/sparc64/solaris/timod.c 2003-04-22 16:44:36.000000000 +0100 @@ -149,7 +149,9 @@ struct socket *sock; SOLD("wakeing socket"); + read_lock(¤t->files->file_lock); sock = ¤t->files->fd[fd]->f_dentry->d_inode->u.socket_i; + read_unlock(¤t->files->file_lock); wake_up_interruptible(&sock->wait); read_lock(&sock->sk->callback_lock); if (sock->fasync_list && !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) @@ -163,7 +165,9 @@ struct sol_socket_struct *sock; SOLD("queuing primsg"); + read_lock(¤t->files->file_lock); sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data; + read_unlock(¤t->files->file_lock); it->next = sock->pfirst; sock->pfirst = it; if (!sock->plast) @@ -177,7 +181,9 @@ struct sol_socket_struct *sock; SOLD("queuing primsg at end"); + read_lock(¤t->files->file_lock); sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data; + read_unlock(¤t->files->file_lock); it->next = NULL; if (sock->plast) sock->plast->next = it; @@ -355,7 +361,11 @@ (int (*)(int, unsigned long *))SYS(socketcall); int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int) = (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int))SYS(sendto); - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); + if (!filp) + return -EBADF; ino = filp->f_dentry->d_inode; sock = (struct sol_socket_struct *)filp->private_data; SOLD("entry"); @@ -636,7 +646,11 @@ SOLD("entry"); SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p)); - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); + if (!filp) + return -EBADF; ino = filp->f_dentry->d_inode; sock = (struct sol_socket_struct *)filp->private_data; SOLDD(("%p %p\n", sock->pfirst, sock->pfirst ? sock->pfirst->next : NULL)); @@ -847,7 +861,9 @@ lock_kernel(); if(fd >= NR_OPEN) goto out; - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); if(!filp) goto out; ino = filp->f_dentry->d_inode; @@ -914,7 +930,9 @@ lock_kernel(); if(fd >= NR_OPEN) goto out; - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); if(!filp) goto out; ino = filp->f_dentry->d_inode; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/arch/x86_64/ia32/ia32_ioctl.c linux.21rc5-ac2/arch/x86_64/ia32/ia32_ioctl.c --- linux.21rc5/arch/x86_64/ia32/ia32_ioctl.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/arch/x86_64/ia32/ia32_ioctl.c 2003-05-28 15:12:29.000000000 +0100 @@ -4555,7 +4555,7 @@ "cmd(%08x){%s} arg(%08x) on %s\n", current->comm, current->pid, (int)fd, (unsigned int)cmd, buf, (unsigned int)arg, - fn); + IS_ERR(fn) ? "???" : fn); if (path) free_page((unsigned long)path); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/CREDITS linux.21rc5-ac2/CREDITS --- linux.21rc5/CREDITS 2003-05-28 15:08:49.000000000 +0100 +++ linux.21rc5-ac2/CREDITS 2003-05-09 18:09:58.000000000 +0100 @@ -2580,6 +2580,12 @@ S: 7000 Stuttgart 50 S: Germany +N: Andrew Rodland +E: arodland@linuxguru.net +D: That crazy morse code thing. +P: D2B1 5215 B1B9 18E0 B6AD 6ADD 4373 165F 1770 BD5C +S: Pennsylvania, USA + N: Christoph Rohland E: hans-christoph.rohland@sap.com E: ch.rohland@gmx.net diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/00-INDEX linux.21rc5-ac2/Documentation/00-INDEX --- linux.21rc5/Documentation/00-INDEX 2003-05-28 15:08:19.000000000 +0100 +++ linux.21rc5-ac2/Documentation/00-INDEX 2003-04-22 16:44:36.000000000 +0100 @@ -52,6 +52,8 @@ - directory with information on the CD-ROM drivers that Linux has. computone.txt - info on Computone Intelliport II/Plus Multiport Serial Driver +cpufreq + - describes the CPU frequency and voltage scaling support cpqarray.txt - info on using Compaq's SMART2 Intelligent Disk Array Controllers. devices.txt diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/cciss.txt linux.21rc5-ac2/Documentation/cciss.txt --- linux.21rc5/Documentation/cciss.txt 2003-05-28 15:08:49.000000000 +0100 +++ linux.21rc5-ac2/Documentation/cciss.txt 2003-04-25 13:31:16.000000000 +0100 @@ -11,7 +11,8 @@ * SA 5312 * SA 641 * SA 642 - * SA 6400 + * SA 6402 + * SA 6404/256 If nodes are not already created in the /dev/cciss directory diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/Changes linux.21rc5-ac2/Documentation/Changes --- linux.21rc5/Documentation/Changes 2003-05-28 15:08:18.000000000 +0100 +++ linux.21rc5-ac2/Documentation/Changes 2003-04-28 17:52:08.000000000 +0100 @@ -56,6 +56,7 @@ o e2fsprogs 1.25 # tune2fs o jfsutils 1.0.12 # fsck.jfs -V o reiserfsprogs 3.6.3 # reiserfsck -V 2>&1|grep reiserfsprogs +o xfsprogs 2.1.0 # xfs_db -V o pcmcia-cs 3.1.21 # cardmgr -V o PPP 2.4.0 # pppd --version o isdn4k-utils 3.1pre1 # isdnctrl 2>&1|grep version @@ -190,6 +191,17 @@ versions of mkreiserfs, resize_reiserfs, debugreiserfs and reiserfsck. These utils work on both i386 and alpha platforms. +Xfsprogs +-------- + +The latest version of xfsprogs contains mkfs.xfs, xfs_db, and the +xfs_repair utilities, among others, for the XFS filesystem. It is +architecture independent and any version from 2.0.0 onward should +work correctly with this version of the XFS kernel code. For the new +(v2) log format that has better support for stripe-size aligning on +LVM and MD devices at least xfsprogs 2.1.0 is needed. + + Pcmcia-cs --------- @@ -327,6 +339,10 @@ ------------- o +Xfsprogs +-------- +o + LVM toolset ----------- o diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/Configure.help linux.21rc5-ac2/Documentation/Configure.help --- linux.21rc5/Documentation/Configure.help 2003-05-28 15:08:49.000000000 +0100 +++ linux.21rc5-ac2/Documentation/Configure.help 2003-05-28 15:31:54.000000000 +0100 @@ -262,6 +262,12 @@ If you don't have this computer, you may safely say N. +Clustered APIC support +CONFIG_X86_CLUSTERED_APIC + This option is required to support systems with more than 8 logical CPUs. + + If you don't have such a computer, you may safely say N. + IO-APIC support on uniprocessors CONFIG_X86_UP_IOAPIC An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an @@ -464,8 +470,14 @@ The initial RAM disk is a RAM disk that is loaded by the boot loader (loadlin or lilo) and that is mounted as root before the normal boot procedure. It is typically used to load modules needed to mount the - "real" root file system, etc. See - for details. + "real" root file system, etc. + + Due to a problem elsewhere in the kernel, initial RAM disks _must_ + have the file system on them created with a 1024 byte block size. + If any other value is used, the kernel will be unable to mount the + RAM disk at boot time, causing a kernel panic. + + See for details. Loopback device support CONFIG_BLK_DEV_LOOP @@ -1839,6 +1851,20 @@ want), say M here and read . The module will be called lvm-mod.o. +Device-mapper support +CONFIG_BLK_DEV_DM + Device-mapper is a low level volume manager. It works by allowing + people to specify mappings for ranges of logical sectors. Various + mapping types are available, in addition people may write their own + modules containing custom mappings if they wish. + + Higher level volume managers such as LVM2 use this driver. + + If you want to compile this as a module, say M here and read + . The module will be called dm-mod.o. + + If unsure, say N. + Multiple devices driver support (RAID and LVM) CONFIG_MD Support multiple physical spindles through a single logical device. @@ -3592,11 +3618,10 @@ Generic SiS support CONFIG_AGP_SIS - This option gives you AGP support for the GLX component of the "soon - to be released" XFree86 4.x on Silicon Integrated Systems [SiS] - chipsets. + This option gives you AGP support for the GLX component of + XFree86 4.x on Silicon Integrated Systems [SiS] chipsets. - Note that 5591/5592 AGP chipsets are NOT supported. + The 5591/5592 AGP bridge is only generically supported. You should say Y here if you use XFree86 3.3.6 or 4.x and want to use GLX or DRI. If unsure, say N. @@ -3606,6 +3631,14 @@ Say Y here to support the Serverworks AGP card. See for product descriptions and images. +NVIDIA chipset support +CONFIG_AGP_NVIDIA + This option gives you AGP support for the GLX component of the + XFree86 4.x on NVIDIA nForce/nForce2 chipsets. + + You should say Y here if you use XFree86 3.3.6 or 4.x and want to + use GLX or DRI. If unsure, say N. + ALI chipset support CONFIG_AGP_ALI This option gives you AGP support for the GLX component of the @@ -4760,12 +4793,11 @@ messages. Most people will want to say N here. If unsure, you will also want to say N. -Matrox unified accelerated driver CONFIG_FB_MATROX - Say Y here if you have a Matrox Millennium, Millennium II, Mystique, - Mystique 220, Productiva G100, Mystique G200, Millennium G200, - Matrox G400, G450 or G550 card in your box. At this time, support for - the G-series digital output is almost non-existant. + Say Y here if you have a Matrox Millennium, Matrox Millennium II, + Matrox Mystique, Matrox Mystique 220, Matrox Productiva G100, Matrox + Mystique G200, Matrox Millennium G200, Matrox Marvel G200 video, + Matrox G400, G450 or G550 card in your box. This driver is also available as a module ( = code which can be inserted and removed from the running kernel whenever you want). @@ -4776,7 +4808,6 @@ module load time. The parameters look like "video=matrox:XXX", and are described in . -Matrox Millennium I/II support CONFIG_FB_MATROX_MILLENIUM Say Y here if you have a Matrox Millennium or Matrox Millennium II video card. If you select "Advanced lowlevel driver options" below, @@ -4784,7 +4815,6 @@ packed pixel, 24 bpp packed pixel and 32 bpp packed pixel. You can also use font widths different from 8. -Matrox Mystique support CONFIG_FB_MATROX_MYSTIQUE Say Y here if you have a Matrox Mystique or Matrox Mystique 220 video card. If you select "Advanced lowlevel driver options" below, @@ -4792,27 +4822,46 @@ packed pixel and 32 bpp packed pixel. You can also use font widths different from 8. -Matrox G100/G200/G400/G450/G550 support -CONFIG_FB_MATROX_G100 - Say Y here if you have a Matrox G100, G200, G400, G450, or G550 - based video card. If you select "Advanced lowlevel driver options", - you should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp - packed pixel and 32 bpp packed pixel. You can also use font widths +CONFIG_FB_MATROX_G450 + Say Y here if you have a Matrox G100, G200, G400, G450 or G550 based + video card. If you select "Advanced lowlevel driver options", you + should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp packed + pixel and 32 bpp packed pixel. You can also use font widths different from 8. If you need support for G400 secondary head, you must first say Y to "I2C support" and "I2C bit-banging support" in the character devices section, and then to "Matrox I2C support" and "G400 second head - support" here in the framebuffer section. + support" here in the framebuffer section. G450/G550 secondary head + and digital output are supported without additional modules. - If you have G550, you must also compile support for G450/G550 secondary - head into kernel, otherwise picture will be shown only on the output you - are probably not using... + The driver starts in monitor mode. You must use the matroxset tool + (available at ) to + swap primary and secondary head outputs, or to change output mode. + Secondary head driver always start in 640x480 resolution and you + must use fbset to change it. - If you need support for G450 or G550 secondary head, say Y to - "Matrox G450/G550 second head support" below. + Do not forget that second head supports only 16 and 32 bpp + packed pixels, so it is a good idea to compile them into the kernel + too. You can use only some font widths, as the driver uses generic + painting procedures (the secondary head does not use acceleration + engine). + + G450/G550 hardware can display TV picture only from secondary CRTC, + and it performs no scaling, so picture must have 525 or 625 lines. + +CONFIG_FB_MATROX_G100A + Say Y here if you have a Matrox G100, G200 or G400 based + video card. If you select "Advanced lowlevel driver options", you + should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp packed + pixel and 32 bpp packed pixel. You can also use font widths + different from 8. + + If you need support for G400 secondary head, you must first say Y to + "I2C support" and "I2C bit-banging support" in the character devices + section, and then to "Matrox I2C support" and "G400 second head + support" here in the framebuffer section. -Matrox I2C support CONFIG_FB_MATROX_I2C This drivers creates I2C buses which are needed for accessing the DDC (I2C) bus present on all Matroxes, an I2C bus which @@ -4826,7 +4875,6 @@ If you compile it as module, it will create a module named i2c-matroxfb.o. -Matrox G400 second head support CONFIG_FB_MATROX_MAVEN WARNING !!! This support does not work with G450 !!! @@ -4855,32 +4903,14 @@ painting procedures (the secondary head does not use acceleration engine). -Matrox G450 second head support -CONFIG_FB_MATROX_G450 - Say Y or M here if you want to use a secondary head (meaning two - monitors in parallel) on G450, or if you are using analog output - of G550. - - If you compile it as module, two modules are created, - matroxfb_crtc2.o and matroxfb_g450.o. Both modules are needed if you - want two independent display devices. - - The driver starts in monitor mode and currently does not support - output in TV modes. You must use the matroxset tool (available - at ) to swap - primary and secondary head outputs. Secondary head driver always - start in 640x480 resolution and you must use fbset to change it. - - Note on most G550 cards the analog output is the secondary head, - so you will need to say Y here to use it. - - Also do not forget that second head supports only 16 and 32 bpp - packed pixels, so it is a good idea to compile them into the kernel - too. You can use only some font widths, as the driver uses generic - painting procedures (the secondary head does not use acceleration - engine). - -Matrox unified driver multihead support +CONFIG_FB_MATROX_PROC + Say Y or M here if you want to access some informations about driver + state through /proc interface. + + You should download matrox_pins tool (available at + ) to get human + readable output. + CONFIG_FB_MATROX_MULTIHEAD Say Y here if you have more than one (supported) Matrox device in your computer and you want to use all of them for different monitors @@ -4982,19 +5012,36 @@ SIS acceleration CONFIG_FB_SIS - This is the frame buffer device driver for the SiS 630 and 640 Super - Socket 7 UMA cards. Specs available at . + This is the frame buffer device driver for SiS VGA controllers. + + This driver is required for SiS DRM. + + See for detailed + documentation. + + Hardware specs available at . SIS 630/540/730 support CONFIG_FB_SIS_300 - This is the frame buffer device driver for the SiS 630 and related - Super Socket 7 UMA cards. Specs available at - . + This is the frame buffer device driver for SiS 300/305/630/730 VGA + controllers. + + This driver is required for SiS DRM. + + See for detailed + documentation. + + Hardware specs available at . SIS 315H/315 support CONFIG_FB_SIS_315 - This is the frame buffer device driver for the SiS 315 graphics - card. Specs available at . + This is the frame buffer device driver for SiS 315 (including 315H + and 315PRO), 650, M650, 651, 740 and Xabre VGA controllers. + + See for detailed + documentation. + + Hardware specs available at . IMS Twin Turbo display support CONFIG_FB_IMSTT @@ -5326,6 +5373,19 @@ replacement for kerneld.) Say Y here and read about configuring it in . +Kernel .config file saved in kernel image +CONFIG_IKCONFIG + This option enables the complete Linux kernel ".config" file contents + to be saved in the kernel (zipped) image file. It provides + documentation of which kernel options are used in a running kernel or + in an on-disk kernel. It can be extracted from the kernel image file + with a script and used as input to rebuild the current kernel or to + build another kernel. Since the kernel image is zipped, using this + option adds approximately 8 KB to a kernel image file. + This option is not available as a module. If you want a separate + file to save the kernel's .config contents, use 'installkernel' or 'cp' + or a similar tool, or just save it in '/lib/modules/'. + ARP daemon support CONFIG_ARPD Normally, the kernel maintains an internal cache which maps IP @@ -7290,47 +7350,40 @@ Adaptec AIC7xxx support CONFIG_SCSI_AIC7XXX - This driver supports all of Adaptec's PCI based SCSI controllers - (not the hardware RAID controllers though) as well as the aic7770 - based EISA and VLB SCSI controllers (the 274x and 284x series). - This is an Adaptec sponsored driver written by Justin Gibbs. It is - intended to replace the previous aic7xxx driver maintained by Doug - Ledford since Doug is no longer maintaining that driver. + This driver supports all of Adaptec's Fast through Ultra 160 PCI + based SCSI controllers as well as the aic7770 based EISA and VLB + SCSI controllers (the 274x and 284x series). For AAA and ARO based + configurations, only SCSI functionality is provided. If you want to compile the driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . The module will be called aic7xxx.o. -Adaptec I2O RAID support -CONFIG_SCSI_DPT_I2O - This driver supports all of Adaptec's I2O based RAID controllers as - well as the DPT SmartRaid V cards. This is an Adaptec maintained - driver by Deanna Bonds. See . - - This driver is also available as a module ( = code which can be - inserted in and removed from the running kernel whenever you want). - If you want to compile it as a module, say M here and read - . The module will be called - dpt_i2o.o. - -Default number of TCQ commands per device +Maximum number of TCQ commands per device CONFIG_AIC7XXX_CMDS_PER_DEVICE Specify the number of commands you would like to allocate per SCSI device when Tagged Command Queueing (TCQ) is enabled on that device. This is an upper bound value for the number of tagged transactions to be used for any device. The aic7xxx driver will automatically - vary this number based on device behaviour. For devices with a + vary this number based on device behavior. For devices with a fixed maximum, the driver will eventually lock to this maximum and display a console message indicating this value. - Note: Unless you experience some type of device failure, the default - value, no enforced limit, should work for you. + Due to resource allocation issues in the Linux SCSI mid-layer, using + a high number of commands per device may result in memory allocation + failures when many devices are attached to the system. For this reason, + the default is set to 32. Higher values may result in higer performance + on some devices. The upper bound is 253. 0 disables tagged queueing. - Default: 253 + Per device tag depth can be controlled via the kernel command line + "tag_info" option. See drivers/scsi/aic7xxx/README.aic7xxx + for details. -Delay in seconds after SCSI bus reset + Default: 32 + +Initial bus reset delay in milli-seconds CONFIG_AIC7XXX_RESET_DELAY_MS The number of milliseconds to delay after an initial bus reset. The bus settle delay following all error recovery actions is @@ -7338,16 +7391,43 @@ Default: 15000 (15 seconds) -Build Adapter Firmware with Kernel Build +Probe for EISA and VL AIC7XXX Adapters +CONFIG_AIC7XXX_PROBE_EISA_VL + Probe for EISA and VLB Aic7xxx controllers. In many newer systems, + the invasive probes necessary to detect these controllers can cause + other devices to fail. For this reason, the non-PCI probe code is + disabled by default. The current value of this option can be "toggled" + via the no_probe kernel command line option. + CONFIG_AIC7XXX_BUILD_FIRMWARE This option should only be enabled if you are modifying the firmware source to the aic7xxx driver and wish to have the generated firmware include files updated during a normal kernel build. The assembler for the firmware requires lex and yacc or their equivalents, as well as the db v1 library. You may have to install additional packages - or modify the assembler make file or the files it includes if your + or modify the assembler Makefile or the files it includes if your build environment is different than that of the author. +Compile in Debugging Code +CONFIG_AIC7XXX_DEBUG_ENABLE + Compile in aic7xxx debugging code that can be useful in diagnosing + driver errors. + +Debug code enable mask (2048 for all debugging) +CONFIG_AIC7XXX_DEBUG_MASK + Bit mask of debug options that is only valid if the + CONFIG_AIC7XXX_DEBUG_ENBLE option is enabled. The bits in this mask + are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the + variable ahc_debug in that file to find them. + + Default: 0 + +Decode registers during diagnostics +CONFIG_AIC7XXX_REG_PRETTY_PRINT + Compile in register value tables for the output of expanded register + contents in diagnostics. This make it much easier to understand debug + output without having to refer to a data book and/or the aic7xxx.reg file. + Old Adaptec AIC7xxx support CONFIG_SCSI_AIC7XXX_OLD WARNING This driver is an older aic7xxx driver and is no longer @@ -7456,6 +7536,83 @@ If unsure, say N. +CONFIG_SCSI_AIC79XX + This driver supports all of Adaptec's Ultra 320 PCI-X based SCSI controllers. + +CONFIG_AIC79XX_CMDS_PER_DEVICE 32 + Specify the number of commands you would like to allocate per SCSI + device when Tagged Command Queueing (TCQ) is enabled on that device. + + This is an upper bound value for the number of tagged transactions + to be used for any device. The aic7xxx driver will automatically + vary this number based on device behavior. For devices with a + fixed maximum, the driver will eventually lock to this maximum + and display a console message indicating this value. + + Due to resource allocation issues in the Linux SCSI mid-layer, using + a high number of commands per device may result in memory allocation + failures when many devices are attached to the system. For this reason, + the default is set to 32. Higher values may result in higer performance + on some devices. The upper bound is 253. + + Per device tag depth can be controlled via the kernel command line + "tag_info" option. See drivers/scsi/aic7xxx/README.aic79xx + for details. + + Default: 32 + +CONFIG_AIC79XX_RESET_DELAY_MS 15000 + The number of milliseconds to delay after an initial bus reset. + The bus settle delay following all error recovery actions is + dictated by the SCSI layer and is not affected by this value. + + Default: 15000 (15 seconds) + +CONFIG_AIC79XX_BUILD_FIRMWARE + This option should only be enabled if you are modifying the firmware + source to the aic7xxx driver and wish to have the generated firmware + include files updated during a normal kernel build. The assembler + for the firmware requires lex and yacc or their equivalents, as well + as the db v1 library. You may have to install additional packages + or modify the assembler Makefile or the files it includes if your + build environment is different than that of the author. + +CONFIG_AIC79XX_ENABLE_RD_STRM + Read Streaming is a U320 protocol option that should enhance performance. + Early U320 drive firmware actually performs slower with read streaming + enabled so it is disabled by default. Read Streaming can be configured + in much the same way as tagged queueing using the "rd_strm" command line + option. See drivers/scsi/aic7xxx/README.aic79xx for details. + +CONFIG_AIC79XX_DEBUG_ENABLE + Compile in aic79xx debugging code that can be useful in diagnosing + driver errors. + +CONFIG_AIC79XX_DEBUG_MASK + Bit mask of debug options that is only valid if the + CONFIG_AIC79XX_DEBUG_ENBLE option is enabled. The bits in this mask + are defined in the drivers/scsi/aic7xxx/aic79xx.h - search for the + variable ahd_debug in that file to find them. + + Default: 0 + +CONFIG_AIC79XX_REG_PRETTY_PRINT + Compile in register value tables for the output of expanded register + contents in diagnostics. This make it much easier to understand debug + output without having to refer to a data book and/or the aic7xxx.reg file. + +Adaptec I2O RAID support +CONFIG_SCSI_DPT_I2O + This driver supports all of Adaptec's I2O based RAID controllers as + well as the DPT SmartRaid V cards. This is an Adaptec maintained + driver by Deanna Bonds. See . + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + If you want to compile it as a module, say M here and read + . The module will be called + dpt_i2o.o. + IBM ServeRAID support CONFIG_SCSI_IPS This is support for the IBM ServeRAID hardware RAID controllers. @@ -8399,16 +8556,28 @@ say M here and read . The module will be called AM53C974.o. -AMI MegaRAID support +AMI MegaRAID support (old driver) CONFIG_SCSI_MEGARAID This driver supports the AMI MegaRAID 418, 428, 438, 466, 762, 490 - and 467 SCSI host adapters. + and 467 SCSI host adapters. This is the old and very heavily tested + driver but lacks features like clustering. If you want to compile this driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . The module will be called megaraid.o. +AMI MegaRAID support (new driver) +CONFIG_SCSI_MEGARAID + This driver supports the AMI MegaRAID 418, 428, 438, 466, 762, 490 + and 467 SCSI host adapters. This is the newer less tested but more + featureful driver + + If you want to compile this driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read . The module + will be called megaraid2.o. + Intel/ICP (former GDT SCSI Disk Array) RAID Controller support CONFIG_SCSI_GDTH Formerly called GDT SCSI Disk Array Controller Support. @@ -9149,7 +9318,7 @@ Aironet 4500/4800 I365 broken support CONFIG_AIRONET4500_I365 If you have a PCMCIA Aironet 4500/4800 card which you want to use - without the standard PCMCIA cardservices provided by the pcmcia-cs + without the standard PCMCIA card services provided by the pcmcia-cs package, say Y here. This is not recommended, so say N. Aironet 4500/4800 PCMCIA support @@ -10556,6 +10725,15 @@ If unsure, say N here. +Raw HDLC Ethernet device support +CONFIG_HDLC_RAW_ETH + Say Y to this option if you want generic HDLC driver to support + raw HDLC Ethernet device emulation over WAN (Wide Area Network) + connections. + You will need it for Ethernet over HDLC bridges. + + If unsure, say N here. + Cisco HDLC support CONFIG_HDLC_CISCO Say Y to this option if you want generic HDLC driver to support @@ -10570,13 +10748,6 @@ If unsure, say N here. -Frame-Relay bridging support -CONFIG_HDLC_FR_BRIDGE - Say Y to this option if you want generic HDLC driver to support - bridging LAN frames over Frame-Relay links. - - If unsure, say N here. - Synchronous Point-to-Point Protocol (PPP) support CONFIG_HDLC_PPP Say Y to this option if you want generic HDLC driver to support @@ -12767,12 +12938,44 @@ Quota support CONFIG_QUOTA If you say Y here, you will be able to set per user limits for disk - usage (also called disk quotas). Currently, it works only for the - ext2 file system. You need additional software in order to use quota - support; for details, read the Quota mini-HOWTO, available from + usage (also called disk quotas). Currently, it works for the + ext2, ext3, and reiserfs file system. You need additional software + in order to use quota support (you can download sources from + ). For further details, read + the Quota mini-HOWTO, available from . Probably the quota support is only useful for multi user systems. If unsure, say N. +Old quota format support +CONFIG_QFMT_V1 + This quota format was (is) used by kernels earlier than 2.4.??. If + you have quota working and you don't want to convert to new quota + format say Y here. + +VFS v0 quota format support +CONFIG_QFMT_V2 + This quota format allows using quotas with 32-bit UIDs/GIDs. If you + need this functionality say Y here. Note that you will need latest + quota utilities for new quota format with this kernel. + +Compatible quota interfaces +CONFIG_QIFACE_COMPAT + This option will enable old quota interface in kernel. + If you have old quota tools (version <= 3.04) and you don't want to + upgrade them say Y here. + +Original quota interface +CONFIG_QIFACE_V1 + This is the oldest quota interface. It was used for old quota format. + If you have old quota tools and you use old quota format choose this + interface (if unsure, this interface is the best one to choose). + +VFS v0 quota interface +CONFIG_QIFACE_V2 + This quota interface was used by VFS v0 quota format. If you need + support for VFS v0 quota format (eg. you're using quota on ReiserFS) + and you don't want to upgrade quota tools, choose this interface. + Memory Technology Device (MTD) support CONFIG_MTD Memory Technology Devices are flash, RAM and similar chips, often @@ -15380,7 +15583,7 @@ debugging output from the driver. This is unlike previous versions of the driver, where enabling this option would turn on debugging output automatically. - + Example: mount -t befs /dev/hda2 /mnt -o debug @@ -15783,6 +15986,30 @@ If unsure, say N. +Allow direct I/O on files in NFS +CONFIG_NFS_DIRECTIO + There are important applications whose performance or correctness + depends on uncached access to file data. Database clusters (multiple + copies of the same instance running on separate hosts) implement their + own cache coherency protocol that subsumes the NFS cache protocols. + Applications that process datasets considerably larger than the client's + memory do not always benefit from a local cache. A streaming video + server, for instance, has no need to cache the contents of a file. + + This option enables applications to perform direct I/O on files in NFS + file systems using the O_DIRECT open() flag. When O_DIRECT is set for + files, their data is not cached in the system's page cache. Direct + read and write operations are aligned to block boundaries. Data is + moved to and from user-level application buffers directly. + + Unless your program is designed to use O_DIRECT properly, you are much + better off allowing the NFS client to manage caching for you. Misusing + O_DIRECT can cause poor server performance or network storms. This + kernel build option defaults OFF to avoid exposing system administrators + unwittingly to a potentially hazardous feature. + + If unsure, say N. + Root file system on NFS CONFIG_ROOT_NFS If you want your Linux box to mount its whole root file system (the @@ -16130,6 +16357,78 @@ Say Y here if you want to try writing to UFS partitions. This is experimental, so you should back up your UFS partitions beforehand. +XFS filesystem support +CONFIG_XFS_FS + XFS is a high performance journaling filesystem which originated + on the SGI IRIX platform. It is completely multi-threaded, can + support large files and large filesystems, extended attributes, + variable block sizes, is extent based, and makes extensive use of + Btrees (directories, extents, free space) to aid both performance + and scalability. + + Refer to the documentation at + for complete details. This implementation is on-disk compatible + with the IRIX version of XFS. + + If you want to compile this file system as a module ( = code which + can be inserted in and removed from the running kernel whenever you + want), say M here and read . The + module will be called xfs.o. Be aware, however, that if the file + system of your root partition is compiled as a module, you'll need + to use an initial ramdisk (initrd) to boot. + +Quota support +CONFIG_XFS_QUOTA + If you say Y here, you will be able to set limits for disk usage on + a per user and/or per group basis under XFS. XFS considers quota + information as filesystem metadata and uses journaling to provide a + higher level guarantee of consistency. The on-disk data format for + quota is also compatible with the IRIX version of XFS, allowing a + filesystem to be migrated between Linux and IRIX without any need + for conversion. + + If unsure, say N. More comprehensive documentation can be found in + README.quota in the xfsprogs package. XFS quota can be used either + with or without the generic quota support enabled (CONFIG_QUOTA) - + they are completely independent subsystems. + +Realtime support (EXPERIMENTAL) +CONFIG_XFS_RT + If you say Y here you will be able to mount and use XFS filesystems + which contain a realtime subvolume. The realtime subvolume is a + separate area of disk space where only file data is stored. The + realtime subvolume is designed to provide very deterministic + data rates suitable for media streaming applications. + + See the xfs man page in section 5 for a bit more information. + + This feature is unsupported at this time, is not yet fully + functional, and may cause serious problems. + + If unsure, say N. + +Debugging support (EXPERIMENTAL) +CONFIG_XFS_DEBUG + Say Y here to get an XFS build with many debugging features, + including ASSERT checks, function wrappers around macros, + and extra sanity-checking functions in various code paths. + + Note that the resulting code will be HUGE and SLOW, and probably + not useful unless you are debugging a particular problem. + + Say N unless you are an XFS developer, or play one on TV. + +Pagebuf debugging support (EXPERIMENTAL) +CONFIG_PAGEBUF_DEBUG + Say Y here to get an XFS build which may help you debug pagebuf + problems. Enabling this option will attach tracing information + to pagebufs, which can be read with the kdb kernel debugger. + + Note that you will also have to enable the sysctl in + /proc/sys/vm/pagebuf/debug for this to work. + + Say N unless you're interested in debugging pagebuf. + Advanced partition selection CONFIG_PARTITION_ADVANCED Say Y here if you would like to use hard disks under Linux which @@ -16179,7 +16478,7 @@ Say Y here if you would like to use hard disks under Linux which were partitioned on a Macintosh. -Windows Logical Disk Manager (Dynamic Disk) support (EXPERIMENTAL) +Windows Logical Disk Manager (Dynamic Disk) support CONFIG_LDM_PARTITION Say Y here if you would like to use hard disks under Linux which were partitioned using Windows 2000's or XP's Logical Disk Manager. @@ -16194,8 +16493,7 @@ Normal partitions are now called Basic Disks under Windows 2000 and XP. - Technical documentation to accompany this driver is available from: - . + For a fuller description read . If unsure, say N. @@ -16268,8 +16566,9 @@ Intel EFI GUID partition support CONFIG_EFI_PARTITION Say Y here if you would like to use hard disks under Linux which - were partitioned using EFI GPT. Presently only useful on the - IA-64 platform. + were partitioned using EFI GPT. This is the default partition + scheme on IA64, and can be used on other platforms when + large block device (64-bit block address) support is desired. Ultrix partition table support CONFIG_ULTRIX_PARTITION @@ -17111,17 +17410,36 @@ HIL keyboard support CONFIG_HIL The "Human Interface Loop" is a older, 8-channel USB-like controller - used in Hewlett Packard PA-RISC based machines. There are a few - cases where it is seen on PC/MAC architectures as well, usually also - manufactured by HP. This driver is based off MACH and BSD drivers, - and implements support for a keyboard attached to the HIL port. + used in several Hewlett Packard models. This driver is based off + MACH and BSD drivers, and implements support for a keyboard attached + to the HIL port, but not for any other types of HIL input devices + like mice or tablets. However, it has been thoroughly tested and is + stable. + Full support for the USB-like functions and non-keyboard channels of - the HIL is not provided for in this driver. There are vestiges of - mouse support in the driver, but it is probably not working. The - necessary hardware documentation to fully support the HIL controller - and interface it to the linux-input API is lacking. + the HIL is currently being added to the PA-RISC port and will + be backported to work on the m68k port as well. + + Enable this option if you intend to use a HIL keyboard as your + primary keyboard and/or do not wish to test the new HIL driver. - Enable this option if you intend to use a HIL keyboard. +HP System Device Controller support +CONFIG_HP_SDC + This option enables supports for the the "System Device Controller", + an i8042 carrying microcode to manage a few miscellanous devices + on some Hewlett Packard systems. The SDC itself contains a 10ms + resolution timer/clock capable of delivering interrupts on periodic + and one-shot basis. The SDC may also be connected to a battery-backed + real-time clock, a basic audio waveform generator, and an HP-HIL + Master Link Controller serving up to seven input devices. + + By itself this option is rather useless, but enabling it will + enable selection of drivers for the abovementioned devices. + It is, however, incompatible with the old, reliable HIL keyboard + driver, and the new HIL driver is experimental, so if you plan to + use a HIL keyboard as your primary keyboard, you may wish to + keep using that driver until the new HIL drivers have had more + testing. Include IOP (IIfx/Quadra 9x0) ADB driver CONFIG_ADB_IOP @@ -17312,6 +17630,27 @@ If you want to compile this driver as a module, say M here and read . The module will be called pcxx.o. +Cyclades-PC300 support +CONFIG_PC300 + This is a driver for the Cyclades-PC300 synchronous communication + boards. These boards provide synchronous serial interfaces to your + Linux box (interfaces currently available are RS-232/V.35, X.21 and + T1/E1). If you wish to support Multilink PPP, please select the + option below this one and read the file README.mlppp provided by PC300 + package. + + If you want to compile this as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read Documentation/modules.txt. The module will be + called pc300.o. + + If you haven't heard about it, it's safe to say N. + +Cyclades-PC300 Sync TTY (to MLPPP) support +CONFIG_PC300_MLPPP + Say 'Y' to this option if you are planning to use Multilink PPP over the + PC300 synchronous communication boards. + SDL RISCom/8 card support CONFIG_RISCOM8 This is a driver for the SDL Communications RISCom/8 multiport card, @@ -17431,6 +17770,42 @@ read . The module will be called istallion.o. +PDC software console support +CONFIG_PDC_CONSOLE + Saying Y here will enable the software based PDC console to be + used as the system console. This is useful for machines in + which the hardware based console has not been written yet. The + following steps must be competed to use the PDC console: + + 1. create the device entry (mknod /dev/ttyB0 c 60 0) + 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 + 3. Add device ttyB0 to /etc/securetty (if you want to log on as + root on this console.) + 4. Change the kernel command console parameter to: console=ttyB0 + +Serial MUX support +CONFIG_SERIAL_MUX + Saying Y here will enable the hardware MUX serial driver for + the Nova and K Class systems. Due to limitations in the 2.4 + serial console driver, the Serial MUX shares the same device + as the PDC software console (Instructions for creating the + /dev/ttyB0 device is listed in the PDC software console + support help). Hopefully the Serial MUX code will share the + /dev/ttyS0 code in new serial console code for 2.6. + +PDC software console support +CONFIG_PDC_CONSOLE + Saying Y here will enable the software based PDC console to be + used as the system console. This is useful for machines in + which the hardware based console has not been written yet. The + following steps must be competed to use the PDC console: + + 1. create the device entry (mknod /dev/ttyB0 c 11 0) + 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 + 3. Add device ttyB0 to /etc/securetty (if you want to log on as + root on this console.) + 4. Change the kernel command console parameter to: console=ttyB0 + Microgate SyncLink adapter support CONFIG_SYNCLINK Provides support for the SyncLink ISA and PCI multiprotocol serial @@ -17580,6 +17955,14 @@ doing that; to actually get it to happen you need to pass the option "console=lp0" to the kernel at boot time. + Note that kernel messages can get lost if the printer is out of + paper (or off, or unplugged, or too busy..), but this behaviour + can be changed. See drivers/char/lp.c (do this at your own risk). + + Note that kernel messages can get lost if the printer is out of + paper (or off, or unplugged, or too busy..), but this behaviour + can be changed. See drivers/char/lp.c (do this at your own risk). + If the printer is out of paper (or off, or unplugged, or too busy..) the kernel will stall until the printer is ready again. By defining CONSOLE_LP_STRICT to 0 (at your own risk) you @@ -18598,6 +18981,14 @@ of verbosity. Saying Y enables these statements. This will increase your kernel size by around 50K. +ACPI Relaxed AML Checking +CONFIG_ACPI_RELAXED_AML + If you say `Y' here, the ACPI interpreter will relax its checking + for valid AML and will ignore some AML mistakes, such as off-by-one + errors in region sizes. Some laptops may require this option. In + particular, many Toshiba laptops require this for correct operation + of the AC module. + ACPI Bus Manager CONFIG_ACPI_BUSMGR The ACPI Bus Manager enumerates devices in the ACPI namespace, and @@ -18645,6 +19036,30 @@ This driver handles overheating conditions on laptops. It is HIGHLY recommended, as your laptop CPU may be damaged without it. +ACPI Toshiba Laptop Extras +CONFIG_ACPI_TOSHIBA + This driver adds support for access to certain system settings + on "legacy free" Toshiba laptops. These laptops can be recognized by + their lack of a BIOS setup menu and APM support. + + On these machines, all system configuration is handled through the + ACPI. This driver is required for access to controls not covered + by the general ACPI drivers, such as LCD brightness, video output, + etc. + + This driver differs from the non-ACPI Toshiba laptop driver (located + under "Processor type and features") in several aspects. + Configuration is accessed by reading and writing text files in the + /proc tree instead of by program interface to /dev. Furthermore, no + power management functions are exposed, as those are handled by the + general ACPI drivers. + + More information about this driver is available at + . + + If you have a legacy free Toshiba laptop (such as the Libretto L1 + series), say Y. + Advanced Power Management BIOS support CONFIG_APM APM is a BIOS specification for saving power using several different @@ -19085,6 +19500,15 @@ . The module will be called cpuid.o +x86 BIOS Enhanced Disk Drive support +CONFIG_EDD + Say Y or M here if you want to enable BIOS Enhanced Disk Drive + Services real mode BIOS calls to determine which disk + BIOS tries boot from. This information is then exported via /proc. + + This option is experimental, but believed to be safe, + and most disk controller BIOS vendors do not yet implement this feature. + SBC-60XX Watchdog Timer CONFIG_60XX_WDT This driver can be used with the watchdog timer found on some @@ -19179,6 +19603,62 @@ Provides an emulation for RTC_UIE which is required by some programs and may improve precision of the generic RTC support in some cases. +Generic Real Time Clock Support +CONFIG_GEN_RTC + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + In 2.4 and later kernels this is the only way to set and get rtc + time on m68k systems so it is highly recommended. + + It reports status information via the file /proc/driver/rtc and its + behaviour is set by various ioctls on /dev/rtc. If you enable the + "extended RTC operation" below it will also provide an emulation + for RTC_UIE which is required by some programs and may improve + precision in some cases. + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module is called genrtc.o. If you want to compile it as a module, + say M here and read . To load the + module automatically add 'alias char-major-10-135 genrtc' to your + /etc/modules.conf + +Extended RTC operation +CONFIG_GEN_RTC_X + Provides an emulation for RTC_UIE which is required by some programs + and may improve precision of the generic RTC support in some cases. + +Generic Real Time Clock Support +CONFIG_GEN_RTC + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + In 2.4 and later kernels this is the only way to set and get rtc + time on m68k systems so it is highly recommended. + + It reports status information via the file /proc/driver/rtc and its + behaviour is set by various ioctls on /dev/rtc. If you enable the + "extended RTC operation" below it will also provide an emulation + for RTC_UIE which is required by some programs and may improve + precision in some cases. + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module is called genrtc.o. If you want to compile it as a module, + say M here and read . To load the + module automatically add 'alias char-major-10-135 genrtc' to your + /etc/modules.conf + +Extended RTC operation +CONFIG_GEN_RTC_X + Provides an emulation for RTC_UIE which is required by some programs + and may improve precision of the generic RTC support in some cases. + Tadpole ANA H8 Support CONFIG_H8 The Hitachi H8/337 is a microcontroller used to deal with the power @@ -23678,24 +24158,28 @@ system console. Available only if 3270 support is compiled in statically. -Support for HWC line mode terminal -CONFIG_HWC - Include support for IBM HWC line-mode terminals. - -Console on HWC line mode terminal -CONFIG_HWC_CONSOLE - Include support for using an IBM HWC line-mode terminal as the Linux +Support for SCLP +CONFIG_SCLP + Include support for the IBM SCLP interface to the service element. + +Support for SCLP line mode terminal +CONFIG_SCLP_TTY + Include support for IBM SCLP line-mode terminals. + +Support for console on SCLP line mode terminal +CONFIG_SCLP_CONSOLE + Include support for using an IBM SCLP line-mode terminal as a Linux system console. -Control Program Identification -CONFIG_HWC_CPI - Allows for Control Program Identification via the HWC interface, - i.e. provides a mean to pass an OS instance name (system name) - to the machine. - - This option should only be selected as a module since the - system name has to be passed as module parameter. The module - will be called hwc_cpi.o. +Control-Program Identification +CONFIG_SCLP_CPI + This option enables the hardware console interface for system + identification. This is commonly used for workload management and + gives you a nice name for the system on the service element. + Please select this option as a module since built-in operation is + completely untested. + You should only select this option if you know what you are doing, + need this feature and intend to run your kernel in LPAR. S/390 tape device support CONFIG_S390_TAPE @@ -23791,6 +24275,20 @@ enabled, you'll be able to toggle chpids logically offline and online. Even if you don't understand what this means, you should say "Y". +Process warning machine checks +CONFIG_MACHCHK_WARNING + Select this option if you want the machine check handler on IBM S/390 or + zSeries to process warning machine checks (e.g. on power failures). + If unsure, say "Y". + +Use chscs for Common I/O +CONFIG_CHSC + Select this option if you want the s390 common I/O layer to use information + obtained by channel subsystem calls. This will enable Linux to process link + failures and resource accessibility events. Moreover, if you have procfs + enabled, you'll be able to toggle chpids logically offline and online. Even + if you don't understand what this means, you should say "Y". + Kernel support for 31 bit ELF binaries CONFIG_S390_SUPPORT Select this option if you want to enable your system kernel to @@ -25942,6 +26440,14 @@ of the BUG call as well as the EIP and oops trace. This aids debugging but costs about 70-100K of memory. +Morse code panics +CONFIG_PANIC_MORSE + Say Y here to receive panic messages in morse code on your keyboard LEDs, and + optionally the PC speaker, if available. + The kernel param "panicblink" controls this feature, set it to 0 to disable, + 1 for LEDs only, 2 for pc speaker, or 3 for both. If you disable this option, + then you will receive a steady blink on the LEDs instead. + Include kgdb kernel debugger CONFIG_KGDB Include in-kernel hooks for kgdb, the Linux kernel source level @@ -25987,9 +26493,11 @@ U2/Uturn I/O MMU CONFIG_IOMMU_CCIO - Say Y here to enable DMA management routines for the first - generation of PA-RISC cache-coherent machines. Programs the - U2/Uturn chip in "Virtual Mode" and use the I/O MMU. + The U2/UTurn is a bus converter with io mmu present in the Cxxx, D, + J, K, and R class machines. Compiling this driver into the kernel will + not hurt anything, removing it will reduce your kernel by about 14k. + + If unsure, say Y. LBA/Elroy PCI support CONFIG_PCI_LBA @@ -26362,10 +26870,103 @@ written) to implement the policy. If you don't understand what this is all about, it's safe to say 'N'. + For more information, take a look at linux/Documentation/cpufreq or + at + + If in doubt, say N. + +CONFIG_CPU_FREQ_24_API + This enables the /proc/sys/cpu/ sysctl interface for controlling + CPUFreq, as known from the 2.4.-kernel patches for CPUFreq. 2.5 + uses /proc/cpufreq instead. Please note that some drivers do not + work with the 2.4. /proc/sys/cpu sysctl interface, so if in doubt, + say N here. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_POWERNOW_K6 + This adds the CPUFreq driver for mobile AMD K6-2+ and mobile + AMD K6-3+ processors. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_P4_CLOCKMOD + This adds the CPUFreq driver for Intel Pentium 4 / XEON + processors. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_ELAN_CPUFREQ + This adds the CPUFreq driver for AMD Elan SC400 and SC410 + processors. + + You need to specify the processor maximum speed as a boot + parameter (or as module parameter): + elanfreq=maxspeed + with the argument "maxspeed" given in kHz. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_LONGHAUL + This adds the CPUFreq driver for VIA Samuel/CyrixIII, + VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T + processors. + + If you do not want to scale the Front Side Bus or voltage, + pass the module parameter "dont_scale_fsb=1" or + "dont_scale_voltage=1". Additionally, it is advised that + you pass the current Front Side Bus speed (in MHz) to + this module as module parameter "current_fsb", e.g. + "current_fsb=133" for a Front Side Bus speed of 133 MHz. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_SPEEDSTEP + This adds the CPUFreq driver for certain mobile Intel Pentium III + (Coppermine), all mobile Intel Pentium III-M (Tulatin) and all + mobile Intel Pentium 4 P4-Ms. + + If you use a Coppermine Pentium III which is capable of + SpeedStep, you need to pass the boot or module parameter + "speedstep_coppermine=1" to the kernel. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_LONGRUN + This adds the CPUFreq driver for Transmeta Crusoe processors which + support LongRun. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_GX_SUSPMOD + This add the CPUFreq driver for NatSemi Geode processors which + support suspend modulation. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + SiS CONFIG_DRM_SIS - Choose this option if you have a SIS graphics card. AGP support is - required for this driver to work. + Choose this option if you have a SIS 300 series graphics card or + VGA controller (300, 305, 540, 630, 730). + + AGP support as well as the SiS framebuffer driver are required + for this driver to work. Etrax Ethernet slave support (over lp0/1) CONFIG_ETRAX_ETHERNET_LPSLAVE @@ -26375,7 +26976,7 @@ Slave has its own LEDs CONFIG_ETRAX_ETHERNET_LPSLAVE_HAS_LEDS - Enable if the slave has it's own LEDs. + Enable if the slave has its own LEDs. ATA/IDE support CONFIG_ETRAX_IDE @@ -26572,7 +27173,7 @@ If compiled as a module, it will be called scx200_docflash.o. NatSemi SCx200 I2C using GPIO pins -CONFIG_SCx200_GPIO +CONFIG_SCx200_I2C Enable the use of two GPIO pins of a SCx200 processor as an I2C bus. If you don't know what to do here, say N. @@ -26629,6 +27230,13 @@ CONFIG_IPMI_WATCHDOG This enables the IPMI watchdog timer. +CRC32 functions +CONFIG_CRC32 + This option is provided for the case where no in-kernel-tree + modules require CRC32 functions, but a module built outside the + kernel tree does. Such modules that use library CRC32 functions + require M here. + # # A couple of things I keep forgetting: # capitalize: AppleTalk, Ethernet, DOS, DMA, FAT, FTP, Internet, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/cpufreq linux.21rc5-ac2/Documentation/cpufreq --- linux.21rc5/Documentation/cpufreq 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/Documentation/cpufreq 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,363 @@ + CPU frequency and voltage scaling code in the Linux(TM) kernel + + + L i n u x C P U F r e q + + + + + Dominik Brodowski + David Kimdon + + + + Clock scaling allows you to change the clock speed of the CPUs on the + fly. This is a nice method to save battery power, because the lower + the clock speed, the less power the CPU consumes. + + + +Contents: +--------- +1. Supported architectures +2. User interface +2.1 /proc/cpufreq interface [2.6] +2.2. /proc/sys/cpu/ interface [2.4] +3. CPUFreq core and interfaces +3.1 General information +3.2 CPUFreq notifiers +3.3 CPUFreq architecture drivers +4. Mailing list and Links + + + +1. Supported architectures +========================== + +ARM: + ARM Integrator, SA 1100, SA1110 +-------------------------------- + No known issues. + + +AMD Elan: + SC400, SC410 +-------------------------------- + You need to specify the highest allowed CPU frequency as + a module parameter ("max_freq") or as boot parameter + ("elanfreq="). Else the available speed range will be + limited to the speed at which the CPU runs while this + module is loaded. + + +VIA Cyrix Longhaul: + VIA Samuel/CyrixIII, VIA Cyrix Samuel/C3, + VIA Cyrix Ezra, VIA Cyrix Ezra-T +-------------------------------- + If you do not want to scale the Front Side Bus or voltage, + pass the module parameter "dont_scale_fsb=1" or + "dont_scale_voltage=1". Additionally, it is advised that + you pass the current Front Side Bus speed (in MHz) to + this module as module parameter "current_fsb", e.g. + "current_fsb=133" for a Front Side Bus speed of 133 MHz. + + +Intel SpeedStep: + certain mobile Intel Pentium III (Coppermine), and all mobile + Intel Pentium III-M (Tualatin) and mobile Intel Pentium 4 P4-Ms. +-------------------------------- + Unfortunately, only modern Intel ICH2-M and ICH3-M chipsets are + supported yet. + + +P4 CPU Clock Modulation: + Intel Pentium 4 Xeon processors +--------------------------------- + Note that you can only switch the speed of two logical CPUs at + once - but each phyiscal CPU may have different throttling levels. + + +PowerNow! K6: + mobile AMD K6-2+ / mobile K6-3+: +-------------------------------- + No known issues. + + +Transmeta Crusoe Longrun: + Transmeta Crusoe processors: +-------------------------------- + It is recommended to use the 2.6. /proc/cpufreq interface when + using this driver + + + +2. User Interface +================= + +2.1 /proc/cpufreq interface [2.6] +*********************************** + +Starting in the patches for kernel 2.5.33, CPUFreq uses a "policy" +interface /proc/cpufreq. + +When you "cat" this file, you'll find something like: + +-- + minimum CPU frequency - maximum CPU frequency - policy +CPU 0 1200000 ( 75%) - 1600000 (100%) - performance +-- + +This means the current policy allows this CPU to be run anywhere +between 1.2 GHz (the value is in kHz) and 1.6 GHz with an eye towards +performance. + +To change the policy, "echo" the desired new policy into +/proc/cpufreq. Use one of the following formats: + +cpu_nr:min_freq:max_freq:policy +cpu_nr%min_freq%max_freq%policy +min_freq:max_freq:policy +min_freq%max_freq%policy + +with cpu_nr being the CPU which shall be affected, min_freq and +max_freq the lower and upper limit of the CPU core frequency in kHz, +and policy either "performance" or "powersave". +A few examples: + +root@notebook:#echo -n "0:0:0:powersave" > /proc/cpufreq + sets the CPU #0 to the lowest supported frequency. + +root@notebook:#echo -n "1%100%100%performance" > /proc/cpufreq + sets the CPU #1 to the highest supported frequency. + +root@notebook:#echo -n "1000000:2000000:performance" > /proc/cpufreq + to set the frequency of all CPUs between 1 GHz and 2 GHz and to + the policy "performance". + +Please note that the values you "echo" into /proc/cpufreq are +validated first, and may be limited by hardware or thermal +considerations. Because of this, a read from /proc/cpufreq might +differ from what was written into it. + + +When you read /proc/cpufreq for the first time after a CPUFreq driver +has been initialized, you'll see the "default policy" for this +driver. If this does not suit your needs, you can pass a boot +parameter to the cpufreq core. Use the following syntax for this: + "cpufreq=min_freq:max_freq:policy", i.e. you may not chose a +specific CPU and you need to specify the limits in kHz and not in +per cent. + + +2.2 /proc/cpufreq interface [2.4] +*********************************** + +Previsiously (and still available as a config option), CPUFreq used +a "sysctl" interface which is located in + /proc/sys/cpu/0/ + /proc/sys/cpu/1/ ... (SMP only) + +In these directories, you will find three files of importance for +CPUFreq: speed-max, speed-min and speed: + +speed shows the current CPU frequency in kHz, +speed-min the minimum supported CPU frequency, and +speed-max the maximum supported CPU frequency. + + +To change the CPU frequency, "echo" the desired CPU frequency (in kHz) +to speed. For example, to set the CPU speed to the lowest/highest +allowed frequency do: + +root@notebook:# cat /proc/sys/cpu/0/speed-min > /proc/sys/cpu/0/speed +root@notebook:# cat /proc/sys/cpu/0/speed-max > /proc/sys/cpu/0/speed + + + +3. CPUFreq core and interfaces +=============================== + +3.1 General information +************************* + +The CPUFreq core code is located in linux/kernel/cpufreq.c. This +cpufreq code offers a standardized interface for the CPUFreq +architecture drivers (those pieces of code that do actual +frequency transitions), as well as to "notifiers". These are device +drivers or other part of the kernel that need to be informed of +policy changes (like thermal modules like ACPI) or of all +frequency changes (like timing code) or even need to force certain +speed limits (like LCD drivers on ARM architecture). Additionally, the +kernel "constant" loops_per_jiffy is updated on frequency changes +here. + + +3.2 CPUFreq notifiers +*********************** + +CPUFreq notifiers conform to the standard kernel notifier interface. +See linux/include/linux/notifier.h for details on notifiers. + +There are two different CPUFreq notifiers - policy notifiers and +transition notifiers. + + +3.2.1 CPUFreq policy notifiers +****************************** + +These are notified when a new policy is intended to be set. Each +CPUFreq policy notifier is called three times for a policy transition: + +1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if + they see a need for this - may it be thermal considerations or + hardware limitations. + +2.) During CPUFREQ_INCOMPATIBLE only changes may be done in order to avoid + hardware failure. + +3.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy + - if two hardware drivers failed to agree on a new policy before this + stage, the incompatible hardware shall be shut down, and the user + informed of this. + +The phase is specified in the second argument to the notifier. + +The third argument, a void *pointer, points to a struct cpufreq_policy +consisting of five values: cpu, min, max, policy and max_cpu_freq. Min +and max are the lower and upper frequencies (in kHz) of the new +policy, policy the new policy, cpu the number of the affected CPU or +CPUFREQ_ALL_CPUS for all CPUs; and max_cpu_freq the maximum supported +CPU frequency. This value is given for informational purposes only. + + +3.2.2 CPUFreq transition notifiers +********************************** + +These are notified twice when the CPUfreq driver switches the CPU core +frequency and this change has any external implications. + +The second argument specifies the phase - CPUFREQ_PRECHANGE or +CPUFREQ_POSTCHANGE. + +The third argument is a struct cpufreq_freqs with the following +values: +cpu - number of the affected CPU or CPUFREQ_ALL_CPUS +old - old frequency +new - new frequency + + +3.3 CPUFreq architecture drivers +********************************** + +CPUFreq architecture drivers are the pieces of kernel code that +actually perform CPU frequency transitions. These need to be +initialized separately (separate initcalls), and may be +modularized. They interact with the CPUFreq core in the following way: + +cpufreq_register() +------------------ +cpufreq_register registers an arch driver to the CPUFreq core. Please +note that only one arch driver may be registered at any time. -EBUSY +is returned when an arch driver is already registered. The argument to +cpufreq_register, struct cpufreq_driver *driver, is described later. + +cpufreq_unregister() +-------------------- +cpufreq_unregister unregisters an arch driver, e.g. on module +unloading. Please note that there is no check done that this is called +from the driver which actually registered itself to the core, so +please only call this function when you are sure the arch driver got +registered correctly before. + +cpufreq_notify_transition() +--------------------------- +On "dumb" hardware where only fixed frequency can be set, the driver +must call cpufreq_notify_transition() once before, and once after the +actual transition. + +struct cpufreq_driver +--------------------- +On initialization, the arch driver is supposed to pass a pointer +to a struct cpufreq_driver *cpufreq_driver consisting of the following +entries: + +cpufreq_verify_t verify: This is a pointer to a function with the + following definition: + int verify_function (struct cpufreq_policy *policy). + This function must verify the new policy is within the limits + supported by the CPU, and at least one supported CPU is within + this range. It may be useful to use cpufreq.h / + cpufreq_verify_within_limits for this. If this is called with + CPUFREQ_ALL_CPUS, and there is no common subset of frequencies + for all CPUs, exit with an error. + +cpufreq_setpolicy_t setpolicy: This is a pointer to a function with + the following definition: + int setpolicy_function (struct cpufreq_policy *policy). + This function must set the CPU to the new policy. If it is a + "dumb" CPU which only allows fixed frequencies to be set, it + shall set it to the lowest within the limit for + CPUFREQ_POLICY_POWERSAVE, and to the highest for + CPUFREQ_POLICY_PERFORMANCE. Once CONFIG_CPU_FREQ_DYNAMIC is + implemented, it can use a dynamic method to adjust the speed + between the lower and upper limit. + +struct cpufreq_policy *policy: This is an array of NR_CPUS struct + cpufreq_policies, containing the current policies set for these + CPUs. Note that policy[cpu].max_cpu_freq must contain the + absolute maximum CPU frequency supported by the specified cpu. + +In case the driver is expected to run with the 2.4.-style API +(/proc/sys/cpu/.../), two more values must be passed +#ifdef CONFIG_CPU_FREQ_24_API + unsigned int cpu_min_freq[NR_CPUS]; + unsigned int cpu_cur_freq[NR_CPUS]; +#endif + with cpu_min_freq[cpu] being the minimum CPU frequency + supported by the CPU; and the entries in cpu_cur_freq + reflecting the current speed of the appropriate CPU. + +Some Requirements to CPUFreq architecture drivers +------------------------------------------------- +* Only call cpufreq_register() when the ability to switch CPU + frequencies is _verified_ or can't be missing. Also, all + other initialization must be done beofre this call, as + cpfureq_register calls the driver's verify and setpolicy code for + each CPU. +* cpufreq_unregister() may only be called if cpufreq_register() has + been successfully(!) called before. +* kfree() the struct cpufreq_driver only after the call to + cpufreq_unregister(), unless cpufreq_register() failed. + + + +4. Mailing list and Links +************************* + + +Mailing List +------------ +There is a CPU frequency changing CVS commit and general list where +you can report bugs, problems or submit patches. To post a message, +send an email to cpufreq@www.linux.org.uk, to subscribe go to +http://www.linux.org.uk/mailman/listinfo/cpufreq. Previous post to the +mailing list are available to subscribers at +http://www.linux.org.uk/mailman/private/cpufreq/. + + +Links +----- +the FTP archives: +* ftp://ftp.linux.org.uk/pub/linux/cpufreq/ + +how to access the CVS repository: +* http://cvs.arm.linux.org.uk/ + +the CPUFreq Mailing list: +* http://www.linux.org.uk/mailman/listinfo/cpufreq + +Clock and voltage scaling for the SA-1100: +* http://www.lart.tudelft.nl/projects/scaling + +CPUFreq project homepage +* http://www.brodo.de/cpufreq/ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/devices.txt linux.21rc5-ac2/Documentation/devices.txt --- linux.21rc5/Documentation/devices.txt 2003-05-28 15:08:18.000000000 +0100 +++ linux.21rc5-ac2/Documentation/devices.txt 2003-05-28 15:13:38.000000000 +0100 @@ -2531,17 +2531,17 @@ 1 = /dev/dri/card1 Second graphics card ... -227 char IBM 3270 terminal block-mode access +227 char IBM 3270 terminal Unix tty access + 1 = /dev/3270/tty1 First 3270 terminal + 2 = /dev/3270/tty2 Seconds 3270 terminal + ... + +228 char IBM 3270 terminal block-mode access 0 = /dev/3270/tub Controlling interface 1 = /dev/3270/tub1 First 3270 terminal 2 = /dev/3270/tub2 Second 3270 terminal ... -228 char IBM 3270 terminal Unix tty access - 1 = /dev/3270/tty1 First 3270 terminal - 2 = /dev/3270/tty2 Seconds 3270 terminal - ... - 229 char IBM iSeries virtual console 0 = /dev/iseries/vtty0 First console port 1 = /dev/iseries/vtty1 Second console port diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/DriverFixers linux.21rc5-ac2/Documentation/DriverFixers --- linux.21rc5/Documentation/DriverFixers 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/Documentation/DriverFixers 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,75 @@ +People who fix drivers as a business - ie for money. (No recommendation, +business association or other relationship implied. This for the benefit of +American lawyers is just a list of people who have asked to be listed - nothing +more). + +Companies Who Will Do Small Contract Work +----------------------------------------- + +Company: BitWizard +Contact: Rogier Wolff +E-Mail: R.E.Wolff@BitWizard.nl + +Company: Caederus +Contact: Justin Mitchell +E-Mail: info@caederus.com +Location: Swansea, Wales, UK +URL: http://www.caederus.com/ + +Company: Calsoft Inc +Contact: Anupam Bhide +E-Mail: anupam@calsoftinc.com +URL: http://www.calsoftinc.com +Location: Pune, India + +Company: Hansen Partnership Inc +Contact: James Bottomley +E-Mail: James.Bottomley@HansenPartnership.com +Location: 1, Partridge Square, Oswego, Illinois 60543, USA + +Company: Linking +Contact: Elmer Joandi +E-Mail: elmer@linkingsoft.com + +Company: Penguru Consulting, LLC +Contact: Komron Takmil +E-Mail: komron@penguru.net +Location: Salt Lake City, UT USA + +Company: 7Chips +Contact: Vadim Lebedev +E-Mail: vadim@7chips.com +Location: Paris, France +Notes: Experienced in Linux and uClinux on x86/ARM/Motorola + +Company: Weinigel Ingenjörsbyrå AB +Contact: Christer Weinigel +E-Mail: christer@weinigel.se +Location: Stockholm, Sweden + +Company: WildOpenSource +Contact: Martin Hicks +E-Mail: info@wildopensource.com + + +Companies Only Interested In Larger ($10000+) Jobs +-------------------------------------------------- + + + +Companies Only Interested In Very Large ($100000+) Jobs +------------------------------------------------------- + + +To be added to the list: email giving the +following information + +Company: CompanyName [Required] +Contact: ContactName [Required] +E-Mail: An email address [Required] +URL: Web site [Optional] +Location: Area/Country [Optional] +Telephone: Contact phone number [Optional] +Speciality: Any specific speciality [Optional] +Notes: Any other notes (eg certifications, specialities) + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/fb/vesafb.txt linux.21rc5-ac2/Documentation/fb/vesafb.txt --- linux.21rc5/Documentation/fb/vesafb.txt 2003-05-28 15:08:20.000000000 +0100 +++ linux.21rc5-ac2/Documentation/fb/vesafb.txt 2003-05-07 16:00:53.000000000 +0100 @@ -139,6 +139,11 @@ redraw scroll by redrawing the affected part of the screen, this is the safe (and slow) default. +vram:n remap 'n' MiB of video RAM. If 0 or not specified, remap memory + according to video mode. (2.5.66 patch/idea by Antonino Daplas + reversed to give override possibility (allocate more fb memory + than the kernel would) to 2.4 by tmb@iki.fi) + vgapal Use the standard vga registers for palette changes. This is the default. @@ -146,6 +151,11 @@ mtrr setup memory type range registers for the vesafb framebuffer. +vram:n remap 'n' MiB of video RAM. If 0 or not specified, remap memory + according to video mode. (2.5.66 patch/idea by Antonino Daplas + reversed to give override possibility (allocate more fb memory + than the kernel would) to 2.4 by tmb@iki.fi) + Have fun! diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/filesystems/00-INDEX linux.21rc5-ac2/Documentation/filesystems/00-INDEX --- linux.21rc5/Documentation/filesystems/00-INDEX 2003-05-28 15:08:18.000000000 +0100 +++ linux.21rc5-ac2/Documentation/filesystems/00-INDEX 2003-04-28 17:52:08.000000000 +0100 @@ -48,3 +48,5 @@ - info on using the VFAT filesystem used in Windows NT and Windows 95 vfs.txt - Overview of the Virtual File System +xfs.txt + - info and mount options for the XFS filesystem. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/filesystems/xfs.txt linux.21rc5-ac2/Documentation/filesystems/xfs.txt --- linux.21rc5/Documentation/filesystems/xfs.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/Documentation/filesystems/xfs.txt 2003-04-28 17:52:08.000000000 +0100 @@ -0,0 +1,124 @@ + +The SGI XFS Filesystem +====================== + +XFS is a high performance journaling filesystem which originated +on the SGI IRIX platform. It is completely multi-threaded, can +support large files and large filesystems, extended attributes, +variable block sizes, is extent based, and makes extensive use of +Btrees (directories, extents, free space) to aid both performance +and scalability. + +Refer to the documentation at http://oss.sgi.com/projects/xfs/ +for further details. This implementation is on-disk compatible +with the IRIX version of XFS. + + +Options +======= + +When mounting an XFS filesystem, the following options are accepted. + + biosize=size + Sets the preferred buffered I/O size (default size is 64K). + "size" must be expressed as the logarithm (base2) of the + desired I/O size. + Valid values for this option are 14 through 16, inclusive + (i.e. 16K, 32K, and 64K bytes). On machines with a 4K + pagesize, 13 (8K bytes) is also a valid size. + The preferred buffered I/O size can also be altered on an + individual file basis using the ioctl(2) system call. + + dmapi + Enable the DMAPI (Data Management API) event callouts. + Use with the "mtpt" option. + + irixsgid + Do not inherit the ISGID bit on subdirectories of ISGID + directories, if the process creating the subdirectory + is not a member of the parent directory group ID. + This matches IRIX behavior. + + logbufs=value + Set the number of in-memory log buffers. Valid numbers range + from 2-8 inclusive. + The default value is 8 buffers for filesystems with a + blocksize of 64K, 4 buffers for filesystems with a blocksize + of 32K, 3 buffers for filesystems with a blocksize of 16K + and 2 buffers for all other configurations. Increasing the + number of buffers may increase performance on some workloads + at the cost of the memory used for the additional log buffers + and their associated control structures. + + logbsize=value + Set the size of each in-memory log buffer. + Size may be specified in bytes, or in kilobytes with a "k" suffix. + Valid sizes for version 1 and version 2 logs are 16384 (16k) and + 32768 (32k). Valid sizes for version 2 logs also include + 65536 (64k), 131072 (128k) and 262144 (256k). + The default value for machines with more than 32MB of memory + is 32768, machines with less memory use 16384 by default. + + logdev=device and rtdev=device + Use an external log (metadata journal) and/or real-time device. + An XFS filesystem has up to three parts: a data section, a log + section, and a real-time section. The real-time section is + optional, and the log section can be separate from the data + section or contained within it. + + mtpt=mountpoint + Use with the "dmapi" option. The value specified here will be + included in the DMAPI mount event, and should be the path of + the actual mountpoint that is used. + + noalign + Data allocations will not be aligned at stripe unit boundaries. + + noatime + Access timestamps are not updated when a file is read. + + norecovery + The filesystem will be mounted without running log recovery. + If the filesystem was not cleanly unmounted, it is likely to + be inconsistent when mounted in "norecovery" mode. + Some files or directories may not be accessible because of this. + Filesystems mounted "norecovery" must be mounted read-only or + the mount will fail. + + osyncisosync + Make O_SYNC writes implement true O_SYNC. WITHOUT this option, + Linux XFS behaves as if an "osyncisdsync" option is used, + which will make writes to files opened with the O_SYNC flag set + behave as if the O_DSYNC flag had been used instead. + This can result in better performance without compromising + data safety. + However if this option is not in effect, timestamp updates from + O_SYNC writes can be lost if the system crashes. + If timestamp updates are critical, use the osyncisosync option. + + quota/usrquota/uqnoenforce + User disk quota accounting enabled, and limits (optionally) + enforced. + + grpquota/gqnoenforce + Group disk quota accounting enabled and limits (optionally) + enforced. + + sunit=value and swidth=value + Used to specify the stripe unit and width for a RAID device or + a stripe volume. "value" must be specified in 512-byte block + units. + If this option is not specified and the filesystem was made on + a stripe volume or the stripe width or unit were specified for + the RAID device at mkfs time, then the mount system call will + restore the value from the superblock. For filesystems that + are made directly on RAID devices, these options can be used + to override the information in the superblock if the underlying + disk layout changes after the filesystem has been created. + The "swidth" option is required if the "sunit" option has been + specified, and must be a multiple of the "sunit" value. + + nouuid + Don't check for double mounted file systems using the file system uuid. + This is useful to mount LVM snapshot volumes. + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/i386/zero-page.txt linux.21rc5-ac2/Documentation/i386/zero-page.txt --- linux.21rc5/Documentation/i386/zero-page.txt 2003-05-28 15:08:20.000000000 +0100 +++ linux.21rc5-ac2/Documentation/i386/zero-page.txt 2003-04-22 16:44:36.000000000 +0100 @@ -31,6 +31,7 @@ 0x1e0 unsigned long ALT_MEM_K, alternative mem check, in Kb 0x1e8 char number of entries in E820MAP (below) +0x1e9 unsigned char number of entries in EDDBUF (below) 0x1f1 char size of setup.S, number of sectors 0x1f2 unsigned short MOUNT_ROOT_RDONLY (if !=0) 0x1f4 unsigned short size of compressed kernel-part in the @@ -66,6 +67,7 @@ 0x220 4 bytes (setup.S) 0x224 unsigned short setup.S heap end pointer 0x2d0 - 0x600 E820MAP +0x600 - 0x7D4 EDDBUF (setup.S) 0x800 string, 2K max COMMAND_LINE, the kernel commandline as copied using CL_OFFSET. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/networking/generic-hdlc.txt linux.21rc5-ac2/Documentation/networking/generic-hdlc.txt --- linux.21rc5/Documentation/networking/generic-hdlc.txt 2003-05-28 15:08:49.000000000 +0100 +++ linux.21rc5-ac2/Documentation/networking/generic-hdlc.txt 2003-05-20 20:19:14.000000000 +0100 @@ -1,11 +1,13 @@ -Generic HDLC layer for Linux kernel 2.4/2.5 +Generic HDLC layer Krzysztof Halasa -May, 2001 +January, 2003 Generic HDLC layer currently supports: -- Frame Relay (ANSI, CCITT and no LMI), with ARP support (no InARP), -- raw HDLC (IPv4 only), +- Frame Relay (ANSI, CCITT and no LMI), with ARP support (no InARP). + Normal (routed) and Ethernet-bridged (Ethernet device emulation) + interfaces can share a single PVC. +- raw HDLC - either IP (IPv4) interface or Ethernet device emulation. - Cisco HDLC, - PPP (uses syncppp.c), - X.25 (uses X.25 routines). @@ -15,6 +17,10 @@ - RISCom/N2 by SDL Communications Inc. - and others, some not in the official kernel. +Ethernet device emulation (using HDLC or Frame-Relay PVC) is compatible +with IEEE 802.1Q (VLANs) and 802.1D (Ethernet bridging). + + Make sure the hdlc.o and the hardware driver are loaded. It should create a number of "hdlc" (hdlc0 etc) network devices, one for each WAN port. You'll need the "sethdlc" utility, get it from: @@ -32,8 +38,10 @@ sethdlc hdlc0 cisco interval 10 timeout 25 or sethdlc hdlc0 rs232 clock ext - sethdlc fr lmi ansi - sethdlc create 99 + sethdlc hdlc0 fr lmi ansi + sethdlc hdlc0 create 99 + ifconfig hdlc0 up + ifconfig pvc0 localIP pointopoint remoteIP In Frame Relay mode, ifconfig master hdlc device up (without assigning any IP address to it) before using pvc devices. @@ -58,6 +66,9 @@ no-parity / crc16 / crc16-pr0 (CRC16 with preset zeros) / crc32-itu crc16-itu (CRC16 with ITU-T polynomial) / crc16-itu-pr0 - sets parity +* hdlc-eth - Ethernet device emulation using HDLC. Parity and encoding + as above. + * cisco - sets Cisco HDLC mode (IP, IPv6 and IPX supported) interval - time in seconds between keepalive packets timeout - time in seconds after last received keepalive packet before @@ -77,7 +88,12 @@ n392 - error threshold - both user and network n393 - monitored events count - both user and network -* create | delete n - FR only - adds / deletes PVC interface with DLCI #n. +Frame-Relay only: +* create n | delete n - adds / deletes PVC interface with DLCI #n. + Newly created interface will be named pvc0, pvc1 etc. + +* create ether n | delete ether n - adds a device for Ethernet-bridged + frames. The device will be named pvceth0, pvceth1 etc. @@ -104,11 +120,11 @@ If you have a problem with N2 or C101 card, you can issue the "private" -command to see port's packet descriptor rings: +command to see port's packet descriptor rings (in kernel logs): sethdlc hdlc0 private -The hardware driver have to be build with CONFIG_HDLC_DEBUG_RINGS. +The hardware driver has to be build with CONFIG_HDLC_DEBUG_RINGS. Attaching this info to bug reports would be helpful. Anyway, let me know if you have problems using this. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/s390/c7000.txt linux.21rc5-ac2/Documentation/s390/c7000.txt --- linux.21rc5/Documentation/s390/c7000.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/Documentation/s390/c7000.txt 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,92 @@ +Cisco 7000 (CLAW) support + +The c7000 module provides support for a channel attached Cisco 7xxx +family router on Linux/390. The parameters for the module are as follows: + + base0=0xYYYY This parameter defines the base unit address of the + channel attached router. + + lhost0=s1 This parameter defines the local host name and + must match the claw directive "host-name" field + (first string). The default value is "UTS". + + uhost0=s2 This parameter defines the unit's name and must + match the claw directive "device-name" field + (second string). The default value is "C7011". + + lappl0=s3 This parameter defines the local application name + and must match the claw directive "host-app" field + (third string). The default value is "TCPIP". + + uappl0=s4 This parameter defines the unit application name and + must match the claw directive "device-app" field + (fourth string). The default value is "TCPIP". + + dbg=x This parameter defines the message level. Higher + numbers will result in additional diagnostic messages. + The default value is 0. + + noauto=z This parameter controls the automatic detection of + the unit base address (base0). When set to a non + zero value, automatic detection of unit base addresses + is not done. The default value is 0. + +Note that the values coded in strings s1 - s4 are case sensitive. + +For example, assume that the following claw directive has been coded in +the Cisco router: + +claw 0100 6C 129.212.61.101 UTS C7011 TCPIP TCPIP + +The module can be loaded using the following command: + +insmod c7000 base0=0x336c lhost0="UTS" uhost0="C7011" lappl0="TCPIP" \ +uappl0="TCPIP" dbg=0 noauto=1 + +Additional interfaces can be defined via parameters base1 - base3, +lhost1 - lhost3, lappl1 - lappl3, uhost1 - uhost3, uappl1 - uappl3. +The interfaces are named "ci0" - "ci3". After loading the module, the +ifconfig command is used to configure the interface. For example: + +ifconfig ci0 129.212.61.101 +ifconfig ci0 netmask 255.255.255.0 broadcast 129.212.61.0 +ifconfig ci0 + +The route command is used to specify the router as the default route: + +route add default gw 129.212.61.200 + +The interface can be automatically activated at boot time by following this +procedure: + +1) Add the following two lines to file "/etc/conf.modules": + +alias ci0 c7000 +options c7000 base0=0xYYYY lhost0=s1 uhost0=s2 lappl0=s3 uappl0=s4 + +2) Edit file "/etc/sysconfig/network" as follows: + +NETWORKING=yes +FORWARD_IPV4=no +HOSTNAME=your-hostname +GATEWAYDEV=ci0 +GATEWAY=your-gateway-ip-address + +Substitute your own host name and gateway IP address. + +3) Create a file in directory "/etc/sysconfig/network-scripts" called +"ifcfg-ci0". The contents are as follows: + +DEVICE=ci0 +USERCTL=no +ONBOOT=yes +BOOTPROTO=none +BROADCAST=your-broadcast-ip-address +NETWORK=your-network-address +NETMASK=your-netmask +IPADDR=your-ip-address + +Substitute your IP address, broadcast IP address, network address and +network mask. + +4) chmod +x ifcfg-ci0 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/s390/cds.txt linux.21rc5-ac2/Documentation/s390/cds.txt --- linux.21rc5/Documentation/s390/cds.txt 2003-05-28 15:08:20.000000000 +0100 +++ linux.21rc5-ac2/Documentation/s390/cds.txt 2003-04-25 13:50:29.000000000 +0100 @@ -5,12 +5,14 @@ Author : Ingo Adlung -Copyright, IBM Corp. 1999 +Copyright, IBM Corp. 1999-2002 + +ChangeLog: 02/01/2002 Cornelia Huck brought up-to-date Introduction This document describes the common device support routines for Linux/390. -Different than other hardware architectures, ESA/390 hasdefined a unified +Different than other hardware architectures, ESA/390 has defined a unified I/O access method. This gives relief to the device drivers as they don't have to deal with different bus types, polling versus interrupt processing, shared versus non-shared interrupt processing, DMA versus port @@ -26,14 +28,11 @@ In order to build common device support for ESA/390 I/O interfaces, a functional layer was introduced that provides generic I/O access methods to -the hardware. The following figure shows the usage of the common device support -of Linux/390 using a TbCP/IP driven device access an example. Similar figures -could be drawn for other access methods, e.g. file system access to disk -devices. - -The common device support layer shown above comprises the I/O support routines -defined below. Some of them implement common Linux device driver interfaces, -while some of them are ESA/390 platform specific. +the hardware. + +The common device support layer comprises the I/O support routines defined +below. Some of them implement common Linux device driver interfaces, while +some of them are ESA/390 platform specific. get_dev_info_by_irq() / get_dev_info_by_devno() allow a device driver to determine the devices attached (visible) to the @@ -44,10 +43,17 @@ read_dev_chars() read device characteristics + +read_conf_data() + read configuration data. request_irq() obtain ownership for a specific device. +s390_request_irq_special() + obtain ownership for a specific device. Similar to request_irq(), but + allows for device not operational notification too. + free_irq() release ownership for a specific device. @@ -60,6 +66,9 @@ do_IO() initiate an I/O request. +resume_IO() + resume channel program execution. + halt_IO() terminate the current I/O request processed on the device. @@ -70,12 +79,14 @@ interrupt handler according to the rules (flags) defined during I/O request initiation with do_IO(). -The next chapters describe the functions, other than do_IRQ() in more details. +The next chapters describe the functions other than do_IRQ() in more details. The do_IRQ() interface is not described, as it is called from the Linux/390 first level interrupt handler only and does not comprise a device driver callable interface. Instead, the functional description of do_IO() also describes the input to the device specific interrupt handler. +Note: All explanations apply also to the 64 bit architecture s390x. + Common Device Support (CDS) for Linux/390 Device Drivers @@ -90,7 +101,7 @@ of them can be found on other Linux platforms implementations too. Miscellaneous function prototypes, data declarations, and macro definitions can be found in the architecture specific C header file -linux/arch/s390/kernel/irq.h. +linux/include/asm-s390/irq.h. Overview of CDS interface concepts @@ -106,7 +117,7 @@ single device is uniquely identified to the system by a so called subchannel, where the ESA/390 architecture allows for 64k devices be attached. -Linux, however was first built on the Intel PC architecture, with its two +Linux, however, was first built on the Intel PC architecture, with its two cascaded 8259 programmable interrupt controllers (PICs), that allow for a maximum of 15 different interrupt lines. All devices attached to such a system share those 15 interrupt levels. Devices attached to the ISA bus system must @@ -117,7 +128,7 @@ has to call every single device driver registered on this IRQ in order to determine the device driver owning the device that raised the interrupt. -In order to not introduce a new I/O concept to the common Linux code, +In order not to introduce a new I/O concept to the common Linux code, Linux/390 preserves the IRQ concept and semantically maps the ESA/390 subchannels to Linux as IRQs. This allows Linux/390 to support up to 64k different IRQs, uniquely representig a single device each. @@ -126,7 +137,7 @@ of those devices is uniquely defined by a so called subchannel by the ESA/390 channel subsystem. While the subchannel numbers are system generated, each subchannel also takes a user defined attribute, the so called device number. -Both, subchannel number and device number can not exceed 65535. The +Both subchannel number and device number can not exceed 65535. The init_IRQ() routine gathers the information about control unit type and device types that imply specific I/O commands (channel command words - CCWs) in order to operate the device. Device drivers can retrieve this set of hardware @@ -141,7 +152,7 @@ When a device driver has recognized a device it wants to claim ownership for, it calls request_irq() with the device's subchannel id serving as pseudo irq line. One of the required parameters it has to specify is dev_id, defining a -device status block, the CDS layer will use to notify the device driver's +device status block which the CDS layer will use to notify the device driver's interrupt handler about interrupt information observed. It depends on the device driver to properly handle those interrupts. @@ -169,6 +180,41 @@ +get_irq_first() / get_irq_next() - Retrieve Information about available IRQs + +A device driver can use those interface routines to retrieve information for +those IRQs only that have valid device information available. As +Linux for S/390 supports a maximum of 65535 subchannels (devices), it might +be a waste of CPU to scan for the max number of devices while a fraction is +available/usable only. get_irq_first() will retrieve the first usable IRQ. +Using this as input get_irq_next() will retrieve the next IRQ available, etc.. + +int get_irq_first( void ); +int get_irq_next( int irq ); + +irq - defines the subchannel to start scanning with. This must be + a valid subchannel or an error is returned. + +The get_irq_first() / get_irq_next() functions return: + +non-negative value - next available IRQ + -ENODEV - no more IRQs available + +Example : + + irq = get_irq_first(); + while ( irq != -ENODEV) + { + get_dev_info_by_irq( irq, &dinfo); + if ( dinfo.devno == devno_to_look_for + || dinfo.sid_data.cu_type == cu_type_to_look_for ) + { + do_some_action( irq, &dinfo ); + } /* endif */ + + irq = get_irq_next(irq); + } + get_dev_info_by_irq() / get_dev_info_by_devno() - Retrieve Device Information During system startup - init_IRQ() processing - the generic I/O device support @@ -176,60 +222,69 @@ SenseID information. For those devices supporting the command it also obtains extended SenseID information. -int get_dev_info_by_irq( int irq, - dev_info_t *devinfo); +int get_dev_info_by_irq( int irq, + s390_dev_info_t *pdi); -int get_dev_info_by_devno( unsigned int irq, - dev_info_t *devinfo); +int get_dev_info_by_devno( __u16 devno, + s390_dev_info_t *pdi); -irq - defines the subchannel, status information is to be +irq - defines the subchannel status information is to be returned for. devno - device number. -devinfo - pointer to a user buffer of type dev_info_t that should +pdi - pointer to a user buffer of type s390_dev_info_t that should be filled with device specific information. typedef struct { - unsigned int devno; /* device number */ - unsigned int status; /* device status */ - senseid_t sid_data; /* senseID data */ -} dev_info_t; + int irq; /* irq, aka. subchannel */ + __u16 devno; /* device number */ + unsigned int status; /* device status */ + senseid_t sid_data; /* senseID data */ +} s390_dev_info_t; +irq - subchannel. devno - device number as configured in the IOCDS. status - device status sid_data - data obtained by a SenseID call Possible status values are : -DEVSTAT_NOT_OPER - device was found not-operational. In this case - the caller should disregard the sid_data - buffer content. +DEVSTAT_NOT_OPER - device was found not-operational. In this case + the caller should disregard the sid_data + buffer content. +DEVSTAT_UNFRIENDLY_DEV - device is locked by someone else. The sid_data buffer + doesn't contain valid data. +DEVSTAT_UNKNOWN_DEV - The device is unknown, and the sid_data buffer doesn't + contain valid data. +DEVSTAT_DEVICE_OWNED - An interrupt handler is registered. // -// SenseID response buffer layout +// sense-id response buffer layout // typedef struct { /* common part */ - unsigned char reserved; /* always 0x'FF' */ - unsigned short cu_type; /* control unit type */ - unsigned char cu_model; /* control unit model */ - unsigned short dev_type; /* device type */ - unsigned char dev_model; /* device model */ - unsigned char unused; /* padding byte */ + __u8 reserved; /* always 0x'FF' */ + __u16 cu_type; /* control unit type */ + __u8 cu_model; /* control unit model */ + __u16 dev_type; /* device type */ + __u8 dev_model; /* device model */ + __u8 unused; /* padding byte */ /* extended part */ - ciw_t ciw[62]; /* variable # of CIWs */ -} senseid_t; + ciw_t ciw[MAX_CIWS]; /* variable # of CIWs */ +} __attribute__ ((packed,aligned(4))) senseid_t; + +MAX_CIWS is currently defined as 8. The ESA/390 I/O architecture defines certain device specific I/O functions. The device returns the device specific command code together with the SenseID data in so called Command Information Words (CIW) : typedef struct _ciw { - unsigned int et : 2; // entry type - unsigned int reserved : 2; // reserved - unsigned int ct : 4; // command type - unsigned int cmd : 8; // command - unsigned int count : 16; // count -} ciw_t; + __u32 et : 2; // entry type + __u32 reserved : 2; // reserved + __u32 ct : 4; // command type + __u32 cmd : 8; // command + __u32 count : 16; // count +} __attribute__ ((packed)) ciw_t; Possible CIW entry types are : @@ -245,6 +300,7 @@ -ENODEV - irq or devno don't specify a known subchannel or device number. -EINVAL - invalid devinfo value. +-EUSERS - device is locked by someone else. Usage Notes : @@ -252,7 +308,7 @@ calling get_dev_info() until it returns -ENODEV as there aren't any more available devices. -If a device driver wants to request ownership for a specific device it must +If a device driver wants to request ownership for a specific device, it must call request_irq() prior to be able to issue any I/O request for it, including above mentioned device dependent commands. @@ -269,7 +325,7 @@ numbers configured in the IOCDS. The following routines serve the purpose to convert irq values into device numbers and vice versa. -int get_irq_by_devno( unsigned int devno ); +int get_irq_by_devno( __u16 devno ); unsigned int get_devno_by_irq( int irq ); @@ -336,6 +392,41 @@ he specifies, or the buffer he wants to be allocated. +read_conf_data() - Read Configuration Data + +Retrieve the device dependent configuration data. Please have a look at your +device dependent I/O commands for the device specific layout of the node +descriptor elements. + +The function is meant to be called without an irq handler be in place. However, +the irq for the requested device must not be locked or this will cause a +deadlock situation ! + +The function may be called enabled or disabled. + +int read_conf_data( int irq, void **buffer, int *length, __u8 lpm); + +irq - Specifies the subchannel the configuration data is to be + retrieved for. +buffer - Pointer to a buffer pointer. The read_conf_data() routine + will allocate a buffer and initialize the buffer pointer + accordingly. It's the device driver's responsability to + release the kernel memory if no longer needed. +length - Length of the buffer allocated and retrieved. +lpm - Logical path mask to be used for retrieving the data. If + zero the data is retrieved on the next path available. + +The read_conf_data() function returns : + 0 - Successful completion +-ENODEV - irq doesn't specify a valid subchannel number +-EINVAL - An invalid parameter was detected +-EIO - An irrecoverable I/O error occured or the device is + not operational. +-ENOMEM - The read_conf_data() routine couldn't obtain storage. +-EOPNOTSUPP - The device doesn't support the read configuration + data command. + + request_irq() - Request Device Ownership As previously discussed a device driver will scan for the devices its supports @@ -343,6 +434,9 @@ request_irq() to request ownership for it. This call causes the subchannel to be enabled for interrupts if it was found operational. +Note: This function is obsolete and provided for compatibility purposes only. +Device drivers should use s390_request_irq_special() instead. + int request_irq( unsigned int irq, int (*handler)( int, void *, @@ -354,27 +448,28 @@ irq : specifies the subchannel the ownership is requested for handler : specifies the device driver's interrupt handler to be called for interrupt processing -irqflags : IRQ flags, must be 0 (zero) or SA_SAMPLE_RANDOM +irqflags : IRQ flags, currently ignored devname : device name dev_id : required pointer to a device specific buffer of type devstat_t typedef struct { - unsigned int devno; /* device number from irb */ - unsigned int intparm; /* interrupt parameter */ - unsigned char cstat; /* channel status - accumulated */ - unsigned char dstat; /* device status - accumulated */ - unsigned char lpum; /* last path used mask from irb */ - unsigned char unused; /* not used - reserved */ - unsigned int flag; /* flag : see below */ - unsigned long cpa; /* CCW addr from irb at prim. status */ - unsigned int rescnt; /* count from irb at primary status */ - unsigned int scnt; /* sense count, if available */ + __u16 devno; /* device number, aka. "cuu" from irb */ + unsigned long intparm; /* interrupt parameter */ + __u8 cstat; /* channel status - accumulated */ + __u8 dstat; /* device status - accumulated */ + __u8 lpum; /* last path used mask from irb */ + __u8 unused; /* not used - reserved */ + unsigned int flag; /* flag : see below */ + __u32 cpa; /* CCW address from irb at primary status */ + __u32 rescnt; /* res. count from irb at primary status */ + __u32 scnt; /* sense count, if DEVSTAT_FLAG_SENSE_AVAIL */ union { - irb_t irb; /* interruption response block */ - sense_t sense; /* sense information */ - } ii; /* interrupt information */ - } devstat_t; + irb_t irb; /* interruption response block */ + sense_t sense; /* sense information */ + } ii; /* interrupt information */ +} devstat_t; + During request_irq() processing, the devstat_t layout does not matter as it won't be used during request_irq() processing. See do_IO() for a functional @@ -396,9 +491,9 @@ layer, and the device specific driver. The value passed to request_irq() must therefore point to a valid devstat_t type buffer area the device driver must preserve for later usage. I.e. it must not be released prior to a call -to free_irq() +to free_irq(). -The only value parameter irqflags supports is SA_SAMPLE_RANDOM if appropriate. +Irqflags are currently ignored by the cds layer. The Linux/390 kernel does neither know about "fast" interrupt handlers, nor does it allow for interrupt sharing. Remember, the term interrupt level (irq), device, and subchannel are used interchangeably in Linux/390. @@ -416,6 +511,126 @@ therefore not rely on this parameter on function entry. +s390_request_irq_special() - Request Device Ownership + +As previously discussed a device driver will scan for the devices its supports +by calling get_dev_info(). Once it has found a device it will call +request_irq() to request ownership. + +Note: This function replaces request_irq() described previously. + +int s390_request_irq_special( + int irq, + io_handler_func_t io_handler, + not_oper_handler_func_t not_oper_handler, + unsigned long irqflags, + const char *devname, + void *dev_id); + +irq : specifies the subchannel the ownership is + requested for +io_handler : specifies the device driver's interrupt handler + to be called for interrupt processing +not_oper_handler : specifies a device driver "not operational" handler +irqflags : IRQ flags, currently ignored +devname : device name +dev_id : required pointer to a device specific buffer of + type devstat_t + + +typedef struct { + __u16 devno; /* device number, aka. "cuu" from irb */ + unsigned long intparm; /* interrupt parameter */ + __u8 cstat; /* channel status - accumulated */ + __u8 dstat; /* device status - accumulated */ + __u8 lpum; /* last path used mask from irb */ + __u8 unused; /* not used - reserved */ + unsigned int flag; /* flag : see below */ + __u32 cpa; /* CCW address from irb at primary status */ + __u32 rescnt; /* res. count from irb at primary status */ + __u32 scnt; /* sense count, if DEVSTAT_FLAG_SENSE_AVAIL */ + union { + irb_t irb; /* interruption response block */ + sense_t sense; /* sense information */ + } ii; /* interrupt information */ +} devstat_t; + +During request_irq() processing, the devstat_t layout does not matter as it +won't be used during request_irq() processing. See do_IO() for a functional +description of its usage. + +typedef void (* io_handler_func_t) ( int irq, + void *devstat, + struct pt_regs *rgs); + +irq : IRQ the interrupt handler is called for +devstat : device status block +rgs : obsolete + +typedef (void)(* not_oper_handler_func_t)( int irq, + int status ); + +irq : IRQ the not operational status has been encountered for +status : device status + DEVSTAT_NOT_OPER - device is not operational + DEVSTAT_REVALIDATE - revalidate device number + DEVSTAT_DEVICE_GONE - no such device (irq) + +Note: Revalidate indicates that running under VM the device number has been +modified by means of a DEFINE xxxx [as] yyyy command. Therewith device number +xxxx was altered to yyyy. It's the device drivers responsibility to decide +whether device ownership can be retained. + +Gone indicates that the device was detached under VM, or the device number +became invalid (native, LPAR). In order to prevent further I/O the IRQ was +implicitly freed on behalf of the device driver. The driver must not call +free_irq itself. + +Not Oper indicates the device became not operational. It's the device driver's +responsibility whether it wants to maintain ownership for the IRQ, or not. + +The s390_request_irq_special() function returns : + 0 - successful completion +-EINVAL - an invalid parameter was detected +-EBUSY - device (subchannel) already owned +-ENODEV - the device is not-operational +-ENOMEM - not enough kernel memory to process request + +Usage Notes : + +While Linux for Intel defines dev_id as a unique identifier for shared +interrupt lines, it has a totally different purpose on Linux for S/390. Here +it serves as a shared interrupt status area between the generic device support +layer and the device specific driver. The value passed to request_irq() must +therefore point to a valid devstat_t type buffer area the device driver must +preserve for later usage. I.e. it must not be released prior to a call to +free_irq(). + +Currently, the value of irqflags is ignored. The Linux for S/390 kernel does +neither know about "fast" interrupt handlers, nor does it allow for interrupt +sharing. Remember, the term interrupt level (irq), device, and subchannel are +used interchangeably in Linux for S/390. + +Other than request_irq(), this function does allow for a not operational +handler to be defined. This handler is called when a device either became not +operational, the last path to a device became not operational, or the device +was detached from the system. A detach could be a "detach" under VM or that +the device became unassigned by the Support Element (SE) or Hardware Management +Console (HMC). + +If s390_request_irq_special() was called in enabled state, or if multiple CPUs +are present, the device may present an interrupt to the specified handler prior +to request_irq() return to the caller already ! This includes the possibility +of unsolicited interrupts or a pending interrupt status from an earlier +solicited I/O request. The device driver must be able to handle this situation +properly or the device may become unoperational otherwise ! + +Although the interrupt handler is defined to be called with a pointer to a +struct pt_regs buffer area, this is not implemented by the Linux for S/390 +platform specific common I/O support layer. The device driver's interrupt +handler must therefore not rely on this parameter on function entry. + + free_irq() - Release Device Ownership A device driver may call free_irq() to release ownership of a previously @@ -523,19 +738,19 @@ int do_IO( int irq, ccw1_t *cpa, - unsigned long intparm, + unsigned long user_intparm, unsigned int lpm, unsigned long flag); -irq : irq (subchannel) the I/O request is destined for -cpa : logical start address of channel program -intparm : user specific interrupt information; will be presented - back to the device driver's interrupt handler. Allows a - device driver to associate the interrupt with a - particular I/O request. -lpm : defines the channel path to be used for a specific I/O - request. Valid with flag value DOIO_VALID_LPM only. -flag : defines the action to e parformed for I/O processing +irq : irq (subchannel) the I/O request is destined for +cpa : logical start address of channel program +user_intparm : user specific interrupt information; will be presented + back to the device driver's interrupt handler. Allows a + device driver to associate the interrupt with a + particular I/O request. +lpm : defines the channel path to be used for a specific I/O + request. Valid with flag value DOIO_VALID_LPM only. +flag : defines the action to e parformed for I/O processing Possible flag values are : @@ -543,16 +758,25 @@ DOIO_VALID_LPM - LPM input parameter is valid (see usage notes below for details) DOIO_WAIT_FOR_INTERRUPT - wait synchronously for final status +DOIO_TIMEOUT - perform a loop while waiting for final status + and fail after a timeout DOIO_REPORT_ALL - report all interrupt conditions +DOIO_ALLOW_SUSPEND - channel program may become suspended +DOIO_DENY_PREFETCH - don't allow for CCW prefetch; usually + this implies the channel program might + become modified +DOIO_CANCEL_ON_TIMEOUT - do a cancel_IO if there is a timeout waiting + for the channel program to finish (see usage + notes below for details) The cpa parameter points to the first format 1 CCW of a channel program : typedef struct { - char cmd_code; /* command code */ - char flags; /* flags, like IDA addressing, etc. */ - unsigned short count; /* byte count */ - void *cda; /* data address */ -} ccw1_t __attribute__ ((aligned(8))); + __u8 cmd_code;/* command code */ + __u8 flags; /* flags, like IDA adressing, etc. */ + __u16 count; /* byte count */ + __u32 cda; /* data address */ +} __attribute__ ((packed,aligned(8))) ccw1_t; with the following CCW flags values defined : @@ -605,6 +829,9 @@ successfully be started previously. DEVSTAT_FINAL_STATUS - This is a final interrupt status for the I/O requst identified by intparm. +DEVSTAT_PCI - A PCI was received. +DEVSTAT_SUSPENDED - A "suspended" intermediate status was + received. If device status DEVSTAT_FLAG_SENSE_AVAIL is indicated in field dev_id->flag, field dev_id->scnt describes the numer of device specific sense bytes @@ -612,9 +839,9 @@ driver itself is required. typedef struct { - unsigned char res[32]; /* reserved */ - unsigned char data[32]; /* sense data */ -} sense_t; + __u8 res[32]; /* reserved */ + __u8 data[32]; /* sense data */ + } __attribute__ ((packed)) sense_t; The device interrupt handler can use the following definitions to investigate the primary unit check source coded in sense byte 0 : @@ -625,6 +852,7 @@ SNS0_EQUIPMENT_CHECK 0x10 SNS0_DATA_CHECK 0x08 SNS0_OVERRUN 0x04 +SNS0_INCOMPL_DOMAIN 0x01 Depending on the device status, multiple of those values may be set together. Please refer to the device specific documentation for details. @@ -661,12 +889,10 @@ Usage Notes : -Prior to call do_IO() the device driver must - -assure disabled state, i.e. the I/O mask value in the PSW must be disabled. -This can be accomplished by calling __save_flags( flags). The current PSW -flags are preserved and can be restored by __restore_flags( flags) at a -later time. +Prior to call do_IO() the device driver must assure disabled state, i.e. the +I/O mask value in the PSW must be disabled. This can be accomplished by calling +__save_flags( flags). The current PSW flags are preserved and can be restored +by __restore_flags( flags) at a later time. If the device driver violates this rule while running in a uni-processor environment an interrupt might be presented prior to the do_IO() routine @@ -674,7 +900,7 @@ deadlock situation as the interrupt handler will try to obtain the irq lock the device driver still owns (see below) ! -the driver must assure to hold the device specific lock. This can be +The driver must assure to hold the device specific lock. This can be accomplished by (i) s390irq_spin_lock( irq), or @@ -705,6 +931,10 @@ is therewith primarily meant to be used during device driver initialization for ease of device setup. +If the device driver is using the DOIO_TIMEOUT parameter, it is a good idea +also to specify DOIO_CANCEL_ON_TIMEOUT. Otherwise, the failing channel program +may prevent the execution of any other channel program at the subchannel. + The lpm input parameter might be used for multipath devices shared among multiple systems as the Linux/390 CDS isn't grouping channel paths. Therefore its use might be required if multiple access paths to a device are available @@ -757,6 +987,34 @@ and dev_id->dstat that represent the accumulated subchannel and device status information gathered since do_IO() request initiation. +Channel programs that intend to set the suspend flag on a channel command word +(CCW) must start the I/O operation with the DOIO_ALLOW_SUSPEND option or the +suspend flag will cause a channel program check. At the time the channel program +becomes suspended an intermediate interrupt will be generated by the channel +subsystem. + +resume_IO() - Resume Channel Program Execution + +If a device driver chooses to suspend the current channel program execution by +setting the CCW suspend flag on a particular CCW, the channel program execution +is suspended. In order to resume channel program execution the CIO layer +provides the resume_IO() routine. + +int resume_IO( int irq); + +irq : irq (subchannel) the halt operation is requested for + +The resume_IO() function returns: + + 0 - suspended channel program is resumed +-EBUSY - status pending +-ENODEV - invalid or not-operational subchannel +-EINVAL - resume function not applicable +-ENOTCONN - there is no I/O request pending for completion + +Usage Notes: +Please have a look at the do_IO() usage notes for more details on suspended +channel programs. halt_IO() - Halt I/O Request Processing @@ -765,9 +1023,9 @@ a halt subchannel (HSCH) I/O command. For those purposes the halt_IO() command is provided. -int halt_IO( int irq, /* subchannel number */ - int intparm, /* dummy intparm */ - unsigned int flag); /* operation mode */ +int halt_IO( int irq, /* subchannel number */ + unsigned long intparm, /* dummy intparm */ + unsigned long flag); /* operation mode */ irq : irq (subchannel) the halt operation is requested for intparm : interruption parameter; value is only used if no I/O @@ -867,7 +1125,7 @@ reset_cons_dev - Reset Console Device This routine allows for resetting the console device specification. See -set_cons_dev() for details. +wait_cons_dev() for details. int reset_cons_dev( int irq); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/s390/CommonIO linux.21rc5-ac2/Documentation/s390/CommonIO --- linux.21rc5/Documentation/s390/CommonIO 2003-05-28 15:08:20.000000000 +0100 +++ linux.21rc5-ac2/Documentation/s390/CommonIO 2003-04-25 13:50:29.000000000 +0100 @@ -21,7 +21,8 @@ Default is on. -* cio_ignore = , , ... +* cio_ignore = | , + | , ... The given device numbers will be ignored by the common I/O-layer; no detection and device sensing will be done on any of those devices. The subchannel to @@ -55,14 +56,22 @@ * /proc/subchannels - Shows for each subchannel - - device number - - device type/model and if applicable control unit type/model - - whether the device is in use - - path installed mask, path available mask, path operational mask and last - path used mask - - the channel path IDs (chpids) + This entry shows information on a per-subchannel basis. + The data is ordered in the following way: + + - device number + - subchannel number + - device type/model (if applicable; if not, this is empty) and control unit + type/model + - whether the device is in use (i. e. a device driver has requested ownership + and registered an interrupt handler) + - path installed mask (PIM), as reflected by last store subchannel + - path available mask (PAM), as reflected by last store subchannel + - path operational mask (POM), as reflected by last store subchannel + - the channel path IDs (CHPIDs) + + All fields are separated by spaces, the chpids are in blocks of four chpids. * /proc/deviceinfo/ @@ -137,3 +146,26 @@ This entry counts how many times s390_process_IRQ has been called for each CPU. This info is in /proc/interrupts on other architectures. + +* /proc/chpids + + This entry will only show up if you specified CONFIG_CHSC=y during kernel + config. + + This entry serves a dual purpose: + + - show which chpids are currently known to Linux and their status (online, + logically offline), + + - toggling known chpids logically online/offline. + + To toggle a known chpid logically offline, do an + echo off > /proc/chpids + is interpreted as hex, even if you omit the '0x'. + The chpid will be treated by Linux as if it were not online, which can mean + some devices will become unavailable. + + You can toggle a logically offline chpid online again by + echo on > /proc/chpids + If devices became unavailable by toggling the chpid logically offline, they + will become available again after you toggle the chpid online again. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/s390/Debugging390.txt linux.21rc5-ac2/Documentation/s390/Debugging390.txt --- linux.21rc5/Documentation/s390/Debugging390.txt 2003-05-28 15:08:20.000000000 +0100 +++ linux.21rc5-ac2/Documentation/s390/Debugging390.txt 2003-04-25 13:50:29.000000000 +0100 @@ -238,7 +238,7 @@ On 390 our limitations & strengths make us slightly different. For backward compatibility ( because of the psw address hi bit which -indicates whether we are in 31 or 64 bit mode ) we are only allowed +indicates whether we are in 31 or 24 bit mode ) we are only allowed use 31 bits (2GB) of our 32 bit addresses. However, we use entirely separate address spaces for the user & kernel. @@ -1475,6 +1475,12 @@ D 00014CB4.20 V00014CB4 2F646576 2F636F6E 736F6C65 00001BF5 V00014CC4 FC00014C B4001001 E0001000 B8070707 + +Alternatively you can do the more elegant +D 0.20;BASE2 +BASE2 telling VM to use GPR2 as the base register. + + Now copy the text till the first 00 hex ( which is the end of the string to an xterm & do hex2ascii on it. hex2ascii 2F646576 2F636F6E 736F6C65 00 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/s390/TAPE linux.21rc5-ac2/Documentation/s390/TAPE --- linux.21rc5/Documentation/s390/TAPE 2003-05-28 15:08:20.000000000 +0100 +++ linux.21rc5-ac2/Documentation/s390/TAPE 1970-01-01 01:00:00.000000000 +0100 @@ -1,122 +0,0 @@ -Channel attached Tape device driver - ------------------------------WARNING----------------------------------------- -This driver is considered to be EXPERIMENTAL. Do NOT use it in -production environments. Feel free to test it and report problems back to us. ------------------------------------------------------------------------------ - -The LINUX for zSeries tape device driver manages channel attached tape drives -which are compatible to IBM 3480 or IBM 3490 magnetic tape subsystems. This -includes various models of these devices (for example the 3490E). - - -Tape driver features - -The device driver supports a maximum of 128 tape devices. -No official LINUX device major number is assigned to the zSeries tape device -driver. It allocates major numbers dynamically and reports them on system -startup. -Typically it will get major number 254 for both the character device front-end -and the block device front-end. - -The tape device driver needs no kernel parameters. All supported devices -present are detected on driver initialization at system startup or module load. -The devices detected are ordered by their subchannel numbers. The device with -the lowest subchannel number becomes device 0, the next one will be device 1 -and so on. - - -Tape character device front-end - -The usual way to read or write to the tape device is through the character -device front-end. The zSeries tape device driver provides two character devices -for each physical device -- the first of these will rewind automatically when -it is closed, the second will not rewind automatically. - -The character device nodes are named /dev/rtibm0 (rewinding) and /dev/ntibm0 -(non-rewinding) for the first device, /dev/rtibm1 and /dev/ntibm1 for the -second, and so on. - -The character device front-end can be used as any other LINUX tape device. You -can write to it and read from it using LINUX facilities such as GNU tar. The -tool mt can be used to perform control operations, such as rewinding the tape -or skipping a file. - -Most LINUX tape software should work with either tape character device. - - -Tape block device front-end - -The tape device may also be accessed as a block device in read-only mode. -This could be used for software installation in the same way as it is used with -other operation systems on the zSeries platform (and most LINUX -distributions are shipped on compact disk using ISO9660 filesystems). - -One block device node is provided for each physical device. These are named -/dev/btibm0 for the first device, /dev/btibm1 for the second and so on. -You should only use the ISO9660 filesystem on LINUX for zSeries tapes because -the physical tape devices cannot perform fast seeks and the ISO9660 system is -optimized for this situation. - - -Tape block device example - -In this example a tape with an ISO9660 filesystem is created using the first -tape device. ISO9660 filesystem support must be built into your system kernel -for this. -The mt command is used to issue tape commands and the mkisofs command to -create an ISO9660 filesystem: - -- create a LINUX directory (somedir) with the contents of the filesystem - mkdir somedir - cp contents somedir - -- insert a tape - -- ensure the tape is at the beginning - mt -f /dev/ntibm0 rewind - -- set the blocksize of the character driver. The blocksize 2048 bytes - is commonly used on ISO9660 CD-Roms - mt -f /dev/ntibm0 setblk 2048 - -- write the filesystem to the character device driver - mkisofs -o /dev/ntibm0 somedir - -- rewind the tape again - mt -f /dev/ntibm0 rewind - -- Now you can mount your new filesystem as a block device: - mount -t iso9660 -o ro,block=2048 /dev/btibm0 /mnt - -TODO List - - - Driver has to be stabelized still - -BUGS - -This driver is considered BETA, which means some weaknesses may still -be in it. -If an error occurs which cannot be handled by the code you will get a -sense-data dump.In that case please do the following: - -1. set the tape driver debug level to maximum: - echo 6 >/proc/s390dbf/tape/level - -2. re-perform the actions which produced the bug. (Hopefully the bug will - reappear.) - -3. get a snapshot from the debug-feature: - cat /proc/s390dbf/tape/hex_ascii >somefile - -4. Now put the snapshot together with a detailed description of the situation - that led to the bug: - - Which tool did you use? - - Which hardware do you have? - - Was your tape unit online? - - Is it a shared tape unit? - -5. Send an email with your bug report to: - mailto:Linux390@de.ibm.com - - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/sched-coding.txt linux.21rc5-ac2/Documentation/sched-coding.txt --- linux.21rc5/Documentation/sched-coding.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/Documentation/sched-coding.txt 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,129 @@ + Reference for various scheduler-related methods in the O(1) scheduler + Robert Love , MontaVista Software + + +Note most of these methods are local to kernel/sched.c - this is by design. +The scheduler is meant to be self-contained and abstracted away. This document +is primarily for understanding the scheduler, not interfacing to it. Some of +the discussed interfaces, however, are general process/scheduling methods. +They are typically defined in include/linux/sched.h. + + +Main Scheduling Methods +----------------------- + +void load_balance(runqueue_t *this_rq, int idle) + Attempts to pull tasks from one cpu to another to balance cpu usage, + if needed. This method is called explicitly if the runqueues are + inbalanced or periodically by the timer tick. Prior to calling, + the current runqueue must be locked and interrupts disabled. + +void schedule() + The main scheduling function. Upon return, the highest priority + process will be active. + + +Locking +------- + +Each runqueue has its own lock, rq->lock. When multiple runqueues need +to be locked, lock acquires must be ordered by ascending &runqueue value. + +A specific runqueue is locked via + + task_rq_lock(task_t pid, unsigned long *flags) + +which disables preemption, disables interrupts, and locks the runqueue pid is +running on. Likewise, + + task_rq_unlock(task_t pid, unsigned long *flags) + +unlocks the runqueue pid is running on, restores interrupts to their previous +state, and reenables preemption. + +The routines + + double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) + +and + + double_rq_unlock(runqueue_t *rq1, runqueue_t rq2) + +safely lock and unlock, respectively, the two specified runqueues. They do +not, however, disable and restore interrupts. Users are required to do so +manually before and after calls. + + +Values +------ + +MAX_PRIO + The maximum priority of the system, stored in the task as task->prio. + Lower priorities are higher. Normal (non-RT) priorities range from + MAX_RT_PRIO to (MAX_PRIO - 1). +MAX_RT_PRIO + The maximum real-time priority of the system. Valid RT priorities + range from 0 to (MAX_RT_PRIO - 1). +MAX_USER_RT_PRIO + The maximum real-time priority that is exported to user-space. Should + always be equal to or less than MAX_RT_PRIO. Setting it less allows + kernel threads to have higher priorities than any user-space task. +MIN_TIMESLICE +MAX_TIMESLICE + Respectively, the minimum and maximum timeslices (quanta) of a process. + +Data +---- + +struct runqueue + The main per-CPU runqueue data structure. +struct task_struct + The main per-process data structure. + + +General Methods +--------------- + +cpu_rq(cpu) + Returns the runqueue of the specified cpu. +this_rq() + Returns the runqueue of the current cpu. +task_rq(task) + Returns the runqueue which holds the specified task. +cpu_curr(cpu) + Returns the task currently running on the given cpu. +rt_task(task) + Returns true if task is real-time, false if not. +task_cpu(task) + + +Process Control Methods +----------------------- + +void set_user_nice(task_t *p, long nice) + Sets the "nice" value of task p to the given value. +int setscheduler(pid_t pid, int policy, struct sched_param *param) + Sets the scheduling policy and parameters for the given pid. +void set_cpus_allowed(task_t *p, unsigned long new_mask) + Sets a given task's CPU affinity and migrates it to a proper cpu. + Callers must have a valid reference to the task and assure the + task not exit prematurely. No locks can be held during the call. +set_task_state(tsk, state_value) + Sets the given task's state to the given value. +set_current_state(state_value) + Sets the current task's state to the given value. +void set_tsk_need_resched(struct task_struct *tsk) + Sets need_resched in the given task. +void clear_tsk_need_resched(struct task_struct *tsk) + Clears need_resched in the given task. +void set_need_resched() + Sets need_resched in the current task. +void set_task_cpu(task, cpu) + Sets task->cpu to cpu on SMP. Noop on UP. +void clear_need_resched() + Clears need_resched in the current task. +int need_resched() + Returns true if need_resched is set in the current task, false + otherwise. +yield() + Place the current process at the end of the runqueue and call schedule. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/sched-design.txt linux.21rc5-ac2/Documentation/sched-design.txt --- linux.21rc5/Documentation/sched-design.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/Documentation/sched-design.txt 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,165 @@ + Goals, Design and Implementation of the + new ultra-scalable O(1) scheduler + + + This is an edited version of an email Ingo Molnar sent to + lkml on 4 Jan 2002. It describes the goals, design, and + implementation of Ingo's new ultra-scalable O(1) scheduler. + Last Updated: 18 April 2002. + + +Goal +==== + +The main goal of the new scheduler is to keep all the good things we know +and love about the current Linux scheduler: + + - good interactive performance even during high load: if the user + types or clicks then the system must react instantly and must execute + the user tasks smoothly, even during considerable background load. + + - good scheduling/wakeup performance with 1-2 runnable processes. + + - fairness: no process should stay without any timeslice for any + unreasonable amount of time. No process should get an unjustly high + amount of CPU time. + + - priorities: less important tasks can be started with lower priority, + more important tasks with higher priority. + + - SMP efficiency: no CPU should stay idle if there is work to do. + + - SMP affinity: processes which run on one CPU should stay affine to + that CPU. Processes should not bounce between CPUs too frequently. + + - plus additional scheduler features: RT scheduling, CPU binding. + +and the goal is also to add a few new things: + + - fully O(1) scheduling. Are you tired of the recalculation loop + blowing the L1 cache away every now and then? Do you think the goodness + loop is taking a bit too long to finish if there are lots of runnable + processes? This new scheduler takes no prisoners: wakeup(), schedule(), + the timer interrupt are all O(1) algorithms. There is no recalculation + loop. There is no goodness loop either. + + - 'perfect' SMP scalability. With the new scheduler there is no 'big' + runqueue_lock anymore - it's all per-CPU runqueues and locks - two + tasks on two separate CPUs can wake up, schedule and context-switch + completely in parallel, without any interlocking. All + scheduling-relevant data is structured for maximum scalability. + + - better SMP affinity. The old scheduler has a particular weakness that + causes the random bouncing of tasks between CPUs if/when higher + priority/interactive tasks, this was observed and reported by many + people. The reason is that the timeslice recalculation loop first needs + every currently running task to consume its timeslice. But when this + happens on eg. an 8-way system, then this property starves an + increasing number of CPUs from executing any process. Once the last + task that has a timeslice left has finished using up that timeslice, + the recalculation loop is triggered and other CPUs can start executing + tasks again - after having idled around for a number of timer ticks. + The more CPUs, the worse this effect. + + Furthermore, this same effect causes the bouncing effect as well: + whenever there is such a 'timeslice squeeze' of the global runqueue, + idle processors start executing tasks which are not affine to that CPU. + (because the affine tasks have finished off their timeslices already.) + + The new scheduler solves this problem by distributing timeslices on a + per-CPU basis, without having any global synchronization or + recalculation. + + - batch scheduling. A significant proportion of computing-intensive tasks + benefit from batch-scheduling, where timeslices are long and processes + are roundrobin scheduled. The new scheduler does such batch-scheduling + of the lowest priority tasks - so nice +19 jobs will get + 'batch-scheduled' automatically. With this scheduler, nice +19 jobs are + in essence SCHED_IDLE, from an interactiveness point of view. + + - handle extreme loads more smoothly, without breakdown and scheduling + storms. + + - O(1) RT scheduling. For those RT folks who are paranoid about the + O(nr_running) property of the goodness loop and the recalculation loop. + + - run fork()ed children before the parent. Andrea has pointed out the + advantages of this a few months ago, but patches for this feature + do not work with the old scheduler as well as they should, + because idle processes often steal the new child before the fork()ing + CPU gets to execute it. + + +Design +====== + +the core of the new scheduler are the following mechanizms: + + - *two*, priority-ordered 'priority arrays' per CPU. There is an 'active' + array and an 'expired' array. The active array contains all tasks that + are affine to this CPU and have timeslices left. The expired array + contains all tasks which have used up their timeslices - but this array + is kept sorted as well. The active and expired array is not accessed + directly, it's accessed through two pointers in the per-CPU runqueue + structure. If all active tasks are used up then we 'switch' the two + pointers and from now on the ready-to-go (former-) expired array is the + active array - and the empty active array serves as the new collector + for expired tasks. + + - there is a 64-bit bitmap cache for array indices. Finding the highest + priority task is thus a matter of two x86 BSFL bit-search instructions. + +the split-array solution enables us to have an arbitrary number of active +and expired tasks, and the recalculation of timeslices can be done +immediately when the timeslice expires. Because the arrays are always +access through the pointers in the runqueue, switching the two arrays can +be done very quickly. + +this is a hybride priority-list approach coupled with roundrobin +scheduling and the array-switch method of distributing timeslices. + + - there is a per-task 'load estimator'. + +one of the toughest things to get right is good interactive feel during +heavy system load. While playing with various scheduler variants i found +that the best interactive feel is achieved not by 'boosting' interactive +tasks, but by 'punishing' tasks that want to use more CPU time than there +is available. This method is also much easier to do in an O(1) fashion. + +to establish the actual 'load' the task contributes to the system, a +complex-looking but pretty accurate method is used: there is a 4-entry +'history' ringbuffer of the task's activities during the last 4 seconds. +This ringbuffer is operated without much overhead. The entries tell the +scheduler a pretty accurate load-history of the task: has it used up more +CPU time or less during the past N seconds. [the size '4' and the interval +of 4x 1 seconds was found by lots of experimentation - this part is +flexible and can be changed in both directions.] + +the penalty a task gets for generating more load than the CPU can handle +is a priority decrease - there is a maximum amount to this penalty +relative to their static priority, so even fully CPU-bound tasks will +observe each other's priorities, and will share the CPU accordingly. + +the SMP load-balancer can be extended/switched with additional parallel +computing and cache hierarchy concepts: NUMA scheduling, multi-core CPUs +can be supported easily by changing the load-balancer. Right now it's +tuned for my SMP systems. + +i skipped the prev->mm == next->mm advantage - no workload i know of shows +any sensitivity to this. It can be added back by sacrificing O(1) +schedule() [the current and one-lower priority list can be searched for a +that->mm == current->mm condition], but costs a fair number of cycles +during a number of important workloads, so i wanted to avoid this as much +as possible. + +- the SMP idle-task startup code was still racy and the new scheduler +triggered this. So i streamlined the idle-setup code a bit. We do not call +into schedule() before all processors have started up fully and all idle +threads are in place. + +- the patch also cleans up a number of aspects of sched.c - moves code +into other areas of the kernel where it's appropriate, and simplifies +certain code paths and data constructs. As a result, the new scheduler's +code is smaller than the old one. + + Ingo diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Documentation/vm/overcommit-accounting linux.21rc5-ac2/Documentation/vm/overcommit-accounting --- linux.21rc5/Documentation/vm/overcommit-accounting 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/Documentation/vm/overcommit-accounting 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,70 @@ +* This describes the overcommit management facility in the latest kernel + tree (FIXME: actually it also describes the stuff that isnt yet done) + +The Linux kernel supports four overcommit handling modes + +0 - Heuristic overcommit handling. Obvious overcommits of + address space are refused. Used for a typical system. It + ensures a seriously wild allocation fails while allowing + overcommit to reduce swap usage + +1 - No overcommit handling. Appropriate for some scientific + applications + +2 - (NEW) strict overcommit. The total address space commit + for the system is not permitted to exceed swap + half ram. + In almost all situations this means a process will not be + killed while accessing pages but only by malloc failures + that are reported back by the kernel mmap/brk code. + +3 - (NEW) paranoid overcommit The total address space commit + for the system is not permitted to exceed swap. The machine + will never kill a process accessing pages it has mapped + except due to a bug (ie report it!) + +Gotchas +------- + +The C language stack growth does an implicit mremap. If you want absolute +guarantees and run close to the edge you MUST mmap your stack for the +largest size you think you will need. For typical stack usage is does +not matter much but its a corner case if you really really care + +In modes 2 and 3 the MAP_NORESERVE flag is ignored. + + +How It Works +------------ + +The overcommit is based on the following rules + +For a file backed map + SHARED or READ-only - 0 cost (the file is the map not swap) + PRIVATE WRITABLE - size of mapping per instance + +For an anonymous or /dev/zero map + SHARED - size of mapping + PRIVATE READ-only - 0 cost (but of little use) + PRIVATE WRITABLE - size of mapping per instance + +Additional accounting + Pages made writable copies by mmap + shmfs memory drawn from the same pool + +Status +------ + +o We account mmap memory mappings +o We account mprotect changes in commit +o We account mremap changes in size +o We account brk +o We account munmap +o We report the commit status in /proc +o Account and check on fork +o Review stack handling/building on exec +o SHMfs accounting +o Implement actual limit enforcement + +To Do +----- +o Account ptrace pages (this is hard) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ac.c linux.21rc5-ac2/drivers/acpi/ac.c --- linux.21rc5/drivers/acpi/ac.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ac.c 2003-04-28 17:58:27.000000000 +0100 @@ -0,0 +1,346 @@ +/* + * acpi_ac.c - ACPI AC Adapter Driver ($Revision: 26 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_AC_COMPONENT +ACPI_MODULE_NAME ("acpi_ac") + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION(ACPI_AC_DRIVER_NAME); +MODULE_LICENSE("GPL"); + +#define PREFIX "ACPI: " + + +int acpi_ac_add (struct acpi_device *device); +int acpi_ac_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_ac_driver = { + .name = ACPI_AC_DRIVER_NAME, + .class = ACPI_AC_CLASS, + .ids = ACPI_AC_HID, + .ops = { + .add = acpi_ac_add, + .remove = acpi_ac_remove, + }, +}; + +struct acpi_ac { + acpi_handle handle; + unsigned long state; +}; + + +/* -------------------------------------------------------------------------- + AC Adapter Management + -------------------------------------------------------------------------- */ + +static int +acpi_ac_get_state ( + struct acpi_ac *ac) +{ + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_ac_get_state"); + + if (!ac) + return_VALUE(-EINVAL); + + status = acpi_evaluate_integer(ac->handle, "_PSR", NULL, &ac->state); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error reading AC Adapter state\n")); + ac->state = ACPI_AC_STATUS_UNKNOWN; + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +struct proc_dir_entry *acpi_ac_dir = NULL; + +static int +acpi_ac_read_state ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_ac *ac = (struct acpi_ac *) data; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_ac_read_state"); + + if (!ac || (off != 0)) + goto end; + + if (acpi_ac_get_state(ac)) { + p += sprintf(p, "ERROR: Unable to read AC Adapter state\n"); + goto end; + } + + p += sprintf(p, "state: "); + switch (ac->state) { + case ACPI_AC_STATUS_OFFLINE: + p += sprintf(p, "off-line\n"); + break; + case ACPI_AC_STATUS_ONLINE: + p += sprintf(p, "on-line\n"); + break; + default: + p += sprintf(p, "unknown\n"); + break; + } + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_ac_add_fs ( + struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_ac_add_fs"); + + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_ac_dir); + if (!acpi_device_dir(device)) + return_VALUE(-ENODEV); + } + + /* 'state' [R] */ + entry = create_proc_entry(ACPI_AC_FILE_STATE, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_AC_FILE_STATE)); + else { + entry->read_proc = acpi_ac_read_state; + entry->data = acpi_driver_data(device); + } + + return_VALUE(0); +} + + +static int +acpi_ac_remove_fs ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_ac_remove_fs"); + + if (acpi_device_dir(device)) { + remove_proc_entry(acpi_device_bid(device), acpi_ac_dir); + acpi_device_dir(device) = NULL; + } + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Driver Model + -------------------------------------------------------------------------- */ + +void +acpi_ac_notify ( + acpi_handle handle, + u32 event, + void *data) +{ + struct acpi_ac *ac = (struct acpi_ac *) data; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_ac_notify"); + + if (!ac) + return; + + if (acpi_bus_get_device(ac->handle, &device)) + return_VOID; + + switch (event) { + case ACPI_AC_NOTIFY_STATUS: + acpi_ac_get_state(ac); + acpi_bus_generate_event(device, event, (u32) ac->state); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Unsupported event [0x%x]\n", event)); + break; + } + + return_VOID; +} + + +int +acpi_ac_add ( + struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_ac *ac = NULL; + + ACPI_FUNCTION_TRACE("acpi_ac_add"); + + if (!device) + return_VALUE(-EINVAL); + + ac = kmalloc(sizeof(struct acpi_ac), GFP_KERNEL); + if (!ac) + return_VALUE(-ENOMEM); + memset(ac, 0, sizeof(struct acpi_ac)); + + ac->handle = device->handle; + sprintf(acpi_device_name(device), "%s", ACPI_AC_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_AC_CLASS); + acpi_driver_data(device) = ac; + + result = acpi_ac_get_state(ac); + if (result) + goto end; + + result = acpi_ac_add_fs(device); + if (result) + goto end; + + status = acpi_install_notify_handler(ac->handle, + ACPI_DEVICE_NOTIFY, acpi_ac_notify, ac); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing notify handler\n")); + result = -ENODEV; + goto end; + } + + printk(KERN_INFO PREFIX "%s [%s] (%s)\n", + acpi_device_name(device), acpi_device_bid(device), + ac->state?"on-line":"off-line"); + +end: + if (result) { + acpi_ac_remove_fs(device); + kfree(ac); + } + + return_VALUE(result); +} + + +int +acpi_ac_remove ( + struct acpi_device *device, + int type) +{ + acpi_status status = AE_OK; + struct acpi_ac *ac = NULL; + + ACPI_FUNCTION_TRACE("acpi_ac_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + ac = (struct acpi_ac *) acpi_driver_data(device); + + status = acpi_remove_notify_handler(ac->handle, + ACPI_DEVICE_NOTIFY, acpi_ac_notify); + if (ACPI_FAILURE(status)) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error removing notify handler\n")); + + acpi_ac_remove_fs(device); + + kfree(ac); + + return_VALUE(0); +} + + +int __init +acpi_ac_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_ac_init"); + + acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); + if (!acpi_ac_dir) + return_VALUE(-ENODEV); + + result = acpi_bus_register_driver(&acpi_ac_driver); + if (result < 0) { + remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +void __exit +acpi_ac_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_ac_exit"); + + acpi_bus_unregister_driver(&acpi_ac_driver); + + remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); + + return_VOID; +} + + +module_init(acpi_ac_init); +module_exit(acpi_ac_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/acpi_ksyms.c linux.21rc5-ac2/drivers/acpi/acpi_ksyms.c --- linux.21rc5/drivers/acpi/acpi_ksyms.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/acpi_ksyms.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,64 +1,56 @@ /* - * ksyms.c - ACPI exported symbols + * acpi_ksyms.c - ACPI Kernel Symbols ($Revision: 15 $) * - * Copyright (C) 2000 Andrew Grover + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include #include -#include -#include -#include #include -#include "acpi.h" -#include "acdebug.h" -extern int acpi_in_debugger; -extern FADT_DESCRIPTOR acpi_fadt; +#ifdef CONFIG_ACPI_INTERPRETER -#define _COMPONENT OS_DEPENDENT - MODULE_NAME ("symbols") +/* ACPI Debugger */ #ifdef ENABLE_DEBUGGER + +extern int acpi_in_debugger; + EXPORT_SYMBOL(acpi_in_debugger); EXPORT_SYMBOL(acpi_db_user_commands); -#endif -#ifdef ACPI_DEBUG +#endif /* ENABLE_DEBUGGER */ + +/* ACPI Core Subsystem */ + +#ifdef ACPI_DEBUG_OUTPUT +EXPORT_SYMBOL(acpi_dbg_layer); +EXPORT_SYMBOL(acpi_dbg_level); EXPORT_SYMBOL(acpi_ut_debug_print_raw); EXPORT_SYMBOL(acpi_ut_debug_print); EXPORT_SYMBOL(acpi_ut_status_exit); +EXPORT_SYMBOL(acpi_ut_value_exit); EXPORT_SYMBOL(acpi_ut_exit); EXPORT_SYMBOL(acpi_ut_trace); -#endif - -EXPORT_SYMBOL(acpi_gbl_FADT); - -EXPORT_SYMBOL(acpi_os_free); -EXPORT_SYMBOL(acpi_os_printf); -EXPORT_SYMBOL(acpi_os_callocate); -EXPORT_SYMBOL(acpi_os_sleep); -EXPORT_SYMBOL(acpi_os_stall); -EXPORT_SYMBOL(acpi_os_queue_for_execution); - -EXPORT_SYMBOL(acpi_dbg_layer); -EXPORT_SYMBOL(acpi_dbg_level); - -EXPORT_SYMBOL(acpi_format_exception); +#endif /*ACPI_DEBUG_OUTPUT*/ EXPORT_SYMBOL(acpi_get_handle); EXPORT_SYMBOL(acpi_get_parent); @@ -69,7 +61,6 @@ EXPORT_SYMBOL(acpi_evaluate_object); EXPORT_SYMBOL(acpi_get_table); EXPORT_SYMBOL(acpi_get_firmware_table); - EXPORT_SYMBOL(acpi_install_notify_handler); EXPORT_SYMBOL(acpi_remove_notify_handler); EXPORT_SYMBOL(acpi_install_gpe_handler); @@ -78,40 +69,87 @@ EXPORT_SYMBOL(acpi_remove_address_space_handler); EXPORT_SYMBOL(acpi_install_fixed_event_handler); EXPORT_SYMBOL(acpi_remove_fixed_event_handler); - EXPORT_SYMBOL(acpi_acquire_global_lock); EXPORT_SYMBOL(acpi_release_global_lock); - +EXPORT_SYMBOL(acpi_install_gpe_block); +EXPORT_SYMBOL(acpi_remove_gpe_block); EXPORT_SYMBOL(acpi_get_current_resources); EXPORT_SYMBOL(acpi_get_possible_resources); +EXPORT_SYMBOL(acpi_walk_resources); EXPORT_SYMBOL(acpi_set_current_resources); - EXPORT_SYMBOL(acpi_enable_event); EXPORT_SYMBOL(acpi_disable_event); EXPORT_SYMBOL(acpi_clear_event); - EXPORT_SYMBOL(acpi_get_timer_duration); EXPORT_SYMBOL(acpi_get_timer); +EXPORT_SYMBOL(acpi_get_sleep_type_data); +EXPORT_SYMBOL(acpi_get_register); +EXPORT_SYMBOL(acpi_set_register); +EXPORT_SYMBOL(acpi_enter_sleep_state); +EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios); +EXPORT_SYMBOL(acpi_get_system_info); +EXPORT_SYMBOL(acpi_get_devices); + +/* ACPI OS Services Layer (acpi_osl.c) */ +EXPORT_SYMBOL(acpi_os_free); +EXPORT_SYMBOL(acpi_os_printf); +EXPORT_SYMBOL(acpi_os_sleep); +EXPORT_SYMBOL(acpi_os_stall); +EXPORT_SYMBOL(acpi_os_signal); +EXPORT_SYMBOL(acpi_os_queue_for_execution); EXPORT_SYMBOL(acpi_os_signal_semaphore); EXPORT_SYMBOL(acpi_os_create_semaphore); EXPORT_SYMBOL(acpi_os_delete_semaphore); EXPORT_SYMBOL(acpi_os_wait_semaphore); -EXPORT_SYMBOL(acpi_os_read_port); -EXPORT_SYMBOL(acpi_os_write_port); +EXPORT_SYMBOL(acpi_os_read_pci_configuration); + +/* ACPI Utilities (acpi_utils.c) */ + +EXPORT_SYMBOL(acpi_extract_package); +EXPORT_SYMBOL(acpi_evaluate_integer); +EXPORT_SYMBOL(acpi_evaluate_reference); + +#endif /*CONFIG_ACPI_INTERPRETER*/ + + +/* ACPI Bus Driver (acpi_bus.c) */ + +#ifdef CONFIG_ACPI_BUS EXPORT_SYMBOL(acpi_fadt); -EXPORT_SYMBOL(acpi_hw_register_bit_access); -EXPORT_SYMBOL(acpi_hw_obtain_sleep_type_register_data); -EXPORT_SYMBOL(acpi_enter_sleep_state); -EXPORT_SYMBOL(acpi_get_system_info); -EXPORT_SYMBOL(acpi_leave_sleep_state); EXPORT_SYMBOL(acpi_walk_namespace); -/*EXPORT_SYMBOL(acpi_save_state_mem);*/ -/*EXPORT_SYMBOL(acpi_save_state_disk);*/ -EXPORT_SYMBOL(acpi_hw_register_read); -EXPORT_SYMBOL(acpi_set_firmware_waking_vector); -EXPORT_SYMBOL(acpi_subsystem_status); +EXPORT_SYMBOL(acpi_root_dir); +EXPORT_SYMBOL(acpi_bus_get_device); +EXPORT_SYMBOL(acpi_bus_get_status); +EXPORT_SYMBOL(acpi_bus_get_power); +EXPORT_SYMBOL(acpi_bus_set_power); +EXPORT_SYMBOL(acpi_bus_generate_event); +EXPORT_SYMBOL(acpi_bus_receive_event); +EXPORT_SYMBOL(acpi_bus_register_driver); +EXPORT_SYMBOL(acpi_bus_unregister_driver); +EXPORT_SYMBOL(acpi_bus_scan); +EXPORT_SYMBOL(acpi_init); + +#endif /*CONFIG_ACPI_BUS*/ + + +/* ACPI PCI Driver (pci_irq.c) */ + +#ifdef CONFIG_ACPI_PCI + +#include +extern int acpi_pci_irq_enable(struct pci_dev *dev); +EXPORT_SYMBOL(acpi_pci_irq_enable); +extern int acpi_pci_irq_lookup (int segment, int bus, int device, int pin); +EXPORT_SYMBOL(acpi_pci_irq_lookup); +#endif /*CONFIG_ACPI_PCI */ + +#ifdef CONFIG_ACPI_EC +/* ACPI EC driver (ec.c) */ + +EXPORT_SYMBOL(ec_read); +EXPORT_SYMBOL(ec_write); +#endif -EXPORT_SYMBOL(acpi_os_signal); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/battery.c linux.21rc5-ac2/drivers/acpi/battery.c --- linux.21rc5/drivers/acpi/battery.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/battery.c 2003-04-28 17:58:27.000000000 +0100 @@ -0,0 +1,825 @@ +/* + * acpi_battery.c - ACPI Battery Driver ($Revision: 36 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_BATTERY_COMPONENT +ACPI_MODULE_NAME ("acpi_battery") + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION(ACPI_BATTERY_DRIVER_NAME); +MODULE_LICENSE("GPL"); + +#define PREFIX "ACPI: " + + +#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF + +#define ACPI_BATTERY_FORMAT_BIF "NNNNNNNNNSSSS" +#define ACPI_BATTERY_FORMAT_BST "NNNN" + +static int acpi_battery_add (struct acpi_device *device); +static int acpi_battery_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_battery_driver = { + .name = ACPI_BATTERY_DRIVER_NAME, + .class = ACPI_BATTERY_CLASS, + .ids = ACPI_BATTERY_HID, + .ops = { + .add = acpi_battery_add, + .remove = acpi_battery_remove, + }, +}; + +struct acpi_battery_status { + acpi_integer state; + acpi_integer present_rate; + acpi_integer remaining_capacity; + acpi_integer present_voltage; +}; + +struct acpi_battery_info { + acpi_integer power_unit; + acpi_integer design_capacity; + acpi_integer last_full_capacity; + acpi_integer battery_technology; + acpi_integer design_voltage; + acpi_integer design_capacity_warning; + acpi_integer design_capacity_low; + acpi_integer battery_capacity_granularity_1; + acpi_integer battery_capacity_granularity_2; + acpi_string model_number; + acpi_string serial_number; + acpi_string battery_type; + acpi_string oem_info; +}; + +struct acpi_battery_flags { + u8 present:1; /* Bay occupied? */ + u8 power_unit:1; /* 0=watts, 1=apms */ + u8 alarm:1; /* _BTP present? */ + u8 reserved:5; +}; + +struct acpi_battery_trips { + unsigned long warning; + unsigned long low; +}; + +struct acpi_battery { + acpi_handle handle; + struct acpi_battery_flags flags; + struct acpi_battery_trips trips; + unsigned long alarm; + struct acpi_battery_info *info; +}; + + +/* -------------------------------------------------------------------------- + Battery Management + -------------------------------------------------------------------------- */ + +static int +acpi_battery_get_info ( + struct acpi_battery *battery, + struct acpi_battery_info **bif) +{ + int result = 0; + acpi_status status = 0; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + struct acpi_buffer format = {sizeof(ACPI_BATTERY_FORMAT_BIF), + ACPI_BATTERY_FORMAT_BIF}; + struct acpi_buffer data = {0, NULL}; + union acpi_object *package = NULL; + + ACPI_FUNCTION_TRACE("acpi_battery_get_info"); + + if (!battery || !bif) + return_VALUE(-EINVAL); + + /* Evalute _BIF */ + + status = acpi_evaluate_object(battery->handle, "_BIF", NULL, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _BIF\n")); + return_VALUE(-ENODEV); + } + + package = (union acpi_object *) buffer.pointer; + + /* Extract Package Data */ + + status = acpi_extract_package(package, &format, &data); + if (status != AE_BUFFER_OVERFLOW) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error extracting _BIF\n")); + result = -ENODEV; + goto end; + } + + data.pointer = kmalloc(data.length, GFP_KERNEL); + if (!data.pointer) { + result = -ENOMEM; + goto end; + } + memset(data.pointer, 0, data.length); + + status = acpi_extract_package(package, &format, &data); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error extracting _BIF\n")); + kfree(data.pointer); + result = -ENODEV; + goto end; + } + +end: + acpi_os_free(buffer.pointer); + + if (!result) + (*bif) = (struct acpi_battery_info *) data.pointer; + + return_VALUE(result); +} + +static int +acpi_battery_get_status ( + struct acpi_battery *battery, + struct acpi_battery_status **bst) +{ + int result = 0; + acpi_status status = 0; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + struct acpi_buffer format = {sizeof(ACPI_BATTERY_FORMAT_BST), + ACPI_BATTERY_FORMAT_BST}; + struct acpi_buffer data = {0, NULL}; + union acpi_object *package = NULL; + + ACPI_FUNCTION_TRACE("acpi_battery_get_status"); + + if (!battery || !bst) + return_VALUE(-EINVAL); + + /* Evalute _BST */ + + status = acpi_evaluate_object(battery->handle, "_BST", NULL, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _BST\n")); + return_VALUE(-ENODEV); + } + + package = (union acpi_object *) buffer.pointer; + + /* Extract Package Data */ + + status = acpi_extract_package(package, &format, &data); + if (status != AE_BUFFER_OVERFLOW) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error extracting _BST\n")); + result = -ENODEV; + goto end; + } + + data.pointer = kmalloc(data.length, GFP_KERNEL); + if (!data.pointer) { + result = -ENOMEM; + goto end; + } + memset(data.pointer, 0, data.length); + + status = acpi_extract_package(package, &format, &data); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error extracting _BST\n")); + kfree(data.pointer); + result = -ENODEV; + goto end; + } + +end: + acpi_os_free(buffer.pointer); + + if (!result) + (*bst) = (struct acpi_battery_status *) data.pointer; + + return_VALUE(result); +} + + +static int +acpi_battery_set_alarm ( + struct acpi_battery *battery, + unsigned long alarm) +{ + acpi_status status = 0; + union acpi_object arg0 = {ACPI_TYPE_INTEGER}; + struct acpi_object_list arg_list = {1, &arg0}; + + ACPI_FUNCTION_TRACE("acpi_battery_set_alarm"); + + if (!battery) + return_VALUE(-EINVAL); + + if (!battery->flags.alarm) + return_VALUE(-ENODEV); + + arg0.integer.value = alarm; + + status = acpi_evaluate_object(battery->handle, "_BTP", &arg_list, NULL); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Alarm set to %d\n", (u32) alarm)); + + battery->alarm = alarm; + + return_VALUE(0); +} + + +static int +acpi_battery_check ( + struct acpi_battery *battery) +{ + int result = 0; + acpi_status status = AE_OK; + acpi_handle handle = NULL; + struct acpi_device *device = NULL; + struct acpi_battery_info *bif = NULL; + + ACPI_FUNCTION_TRACE("acpi_battery_check"); + + if (!battery) + return_VALUE(-EINVAL); + + result = acpi_bus_get_device(battery->handle, &device); + if (result) + return_VALUE(result); + + result = acpi_bus_get_status(device); + if (result) + return_VALUE(result); + + /* Insertion? */ + + if (!battery->flags.present && device->status.battery_present) { + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Battery inserted\n")); + + /* Evalute _BIF to get certain static information */ + + result = acpi_battery_get_info(battery, &bif); + if (result) + return_VALUE(result); + + battery->flags.power_unit = bif->power_unit; + battery->trips.warning = bif->design_capacity_warning; + battery->trips.low = bif->design_capacity_low; + kfree(bif); + + /* See if alarms are supported, and if so, set default */ + + status = acpi_get_handle(battery->handle, "_BTP", &handle); + if (ACPI_SUCCESS(status)) { + battery->flags.alarm = 1; + acpi_battery_set_alarm(battery, battery->trips.warning); + } + } + + /* Removal? */ + + else if (battery->flags.present && !device->status.battery_present) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Battery removed\n")); + } + + battery->flags.present = device->status.battery_present; + + return_VALUE(result); +} + + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +struct proc_dir_entry *acpi_battery_dir = NULL; + +static int +acpi_battery_read_info ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + int result = 0; + struct acpi_battery *battery = (struct acpi_battery *) data; + struct acpi_battery_info *bif = NULL; + char *units = "?"; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_battery_read_info"); + + if (!battery) + goto end; + + if (battery->flags.present) + p += sprintf(p, "present: yes\n"); + else { + p += sprintf(p, "present: no\n"); + goto end; + } + + /* Battery Info (_BIF) */ + + result = acpi_battery_get_info(battery, &bif); + if (result || !bif) { + p += sprintf(p, "ERROR: Unable to read battery information\n"); + goto end; + } + + units = bif->power_unit ? ACPI_BATTERY_UNITS_AMPS : ACPI_BATTERY_UNITS_WATTS; + + if (bif->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + p += sprintf(p, "design capacity: unknown\n"); + else + p += sprintf(p, "design capacity: %d %sh\n", + (u32) bif->design_capacity, units); + + if (bif->last_full_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + p += sprintf(p, "last full capacity: unknown\n"); + else + p += sprintf(p, "last full capacity: %d %sh\n", + (u32) bif->last_full_capacity, units); + + switch ((u32) bif->battery_technology) { + case 0: + p += sprintf(p, "battery technology: non-rechargeable\n"); + break; + case 1: + p += sprintf(p, "battery technology: rechargeable\n"); + break; + default: + p += sprintf(p, "battery technology: unknown\n"); + break; + } + + if (bif->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN) + p += sprintf(p, "design voltage: unknown\n"); + else + p += sprintf(p, "design voltage: %d mV\n", + (u32) bif->design_voltage); + + p += sprintf(p, "design capacity warning: %d %sh\n", + (u32) bif->design_capacity_warning, units); + p += sprintf(p, "design capacity low: %d %sh\n", + (u32) bif->design_capacity_low, units); + p += sprintf(p, "capacity granularity 1: %d %sh\n", + (u32) bif->battery_capacity_granularity_1, units); + p += sprintf(p, "capacity granularity 2: %d %sh\n", + (u32) bif->battery_capacity_granularity_2, units); + p += sprintf(p, "model number: %s\n", + bif->model_number); + p += sprintf(p, "serial number: %s\n", + bif->serial_number); + p += sprintf(p, "battery type: %s\n", + bif->battery_type); + p += sprintf(p, "OEM info: %s\n", + bif->oem_info); + +end: + kfree(bif); + + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_battery_read_state ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + int result = 0; + struct acpi_battery *battery = (struct acpi_battery *) data; + struct acpi_battery_status *bst = NULL; + char *units = "?"; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_battery_read_state"); + + if (!battery) + goto end; + + if (battery->flags.present) + p += sprintf(p, "present: yes\n"); + else { + p += sprintf(p, "present: no\n"); + goto end; + } + + /* Battery Units */ + + units = battery->flags.power_unit ? ACPI_BATTERY_UNITS_AMPS : ACPI_BATTERY_UNITS_WATTS; + + /* Battery Status (_BST) */ + + result = acpi_battery_get_status(battery, &bst); + if (result || !bst) { + p += sprintf(p, "ERROR: Unable to read battery status\n"); + goto end; + } + + if (!(bst->state & 0x04)) + p += sprintf(p, "capacity state: ok\n"); + else + p += sprintf(p, "capacity state: critical\n"); + + if ((bst->state & 0x01) && (bst->state & 0x02)) + p += sprintf(p, "charging state: charging/discharging\n"); + else if (bst->state & 0x01) + p += sprintf(p, "charging state: discharging\n"); + else if (bst->state & 0x02) + p += sprintf(p, "charging state: charging\n"); + else + p += sprintf(p, "charging state: unknown\n"); + + if (bst->present_rate == ACPI_BATTERY_VALUE_UNKNOWN) + p += sprintf(p, "present rate: unknown\n"); + else + p += sprintf(p, "present rate: %d %s\n", + (u32) bst->present_rate, units); + + if (bst->remaining_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + p += sprintf(p, "remaining capacity: unknown\n"); + else + p += sprintf(p, "remaining capacity: %d %sh\n", + (u32) bst->remaining_capacity, units); + + if (bst->present_voltage == ACPI_BATTERY_VALUE_UNKNOWN) + p += sprintf(p, "present voltage: unknown\n"); + else + p += sprintf(p, "present voltage: %d mV\n", + (u32) bst->present_voltage); + +end: + kfree(bst); + + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_battery_read_alarm ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_battery *battery = (struct acpi_battery *) data; + char *units = "?"; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_battery_read_alarm"); + + if (!battery) + goto end; + + if (!battery->flags.present) { + p += sprintf(p, "present: no\n"); + goto end; + } + + /* Battery Units */ + + units = battery->flags.power_unit ? ACPI_BATTERY_UNITS_AMPS : ACPI_BATTERY_UNITS_WATTS; + + /* Battery Alarm */ + + p += sprintf(p, "alarm: "); + if (!battery->alarm) + p += sprintf(p, "unsupported\n"); + else + p += sprintf(p, "%d %sh\n", (u32) battery->alarm, units); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_battery_write_alarm ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + struct acpi_battery *battery = (struct acpi_battery *) data; + char alarm_string[12] = {'\0'}; + + ACPI_FUNCTION_TRACE("acpi_battery_write_alarm"); + + if (!battery || (count > sizeof(alarm_string) - 1)) + return_VALUE(-EINVAL); + + if (!battery->flags.present) + return_VALUE(-ENODEV); + + if (copy_from_user(alarm_string, buffer, count)) + return_VALUE(-EFAULT); + + alarm_string[count] = '\0'; + + result = acpi_battery_set_alarm(battery, + simple_strtoul(alarm_string, NULL, 0)); + if (result) + return_VALUE(result); + + return_VALUE(count); +} + + +static int +acpi_battery_add_fs ( + struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_battery_add_fs"); + + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_battery_dir); + if (!acpi_device_dir(device)) + return_VALUE(-ENODEV); + } + + /* 'info' [R] */ + entry = create_proc_entry(ACPI_BATTERY_FILE_INFO, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_BATTERY_FILE_INFO)); + else { + entry->read_proc = acpi_battery_read_info; + entry->data = acpi_driver_data(device); + } + + /* 'status' [R] */ + entry = create_proc_entry(ACPI_BATTERY_FILE_STATUS, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_BATTERY_FILE_STATUS)); + else { + entry->read_proc = acpi_battery_read_state; + entry->data = acpi_driver_data(device); + } + + /* 'alarm' [R/W] */ + entry = create_proc_entry(ACPI_BATTERY_FILE_ALARM, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_BATTERY_FILE_ALARM)); + else { + entry->read_proc = acpi_battery_read_alarm; + entry->write_proc = acpi_battery_write_alarm; + entry->data = acpi_driver_data(device); + } + + return_VALUE(0); +} + + +static int +acpi_battery_remove_fs ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_battery_remove_fs"); + + if (acpi_device_dir(device)) { + remove_proc_entry(acpi_device_bid(device), acpi_battery_dir); + acpi_device_dir(device) = NULL; + } + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +static void +acpi_battery_notify ( + acpi_handle handle, + u32 event, + void *data) +{ + struct acpi_battery *battery = (struct acpi_battery *) data; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_battery_notify"); + + if (!battery) + return_VOID; + + if (acpi_bus_get_device(handle, &device)) + return_VOID; + + switch (event) { + case ACPI_BATTERY_NOTIFY_STATUS: + case ACPI_BATTERY_NOTIFY_INFO: + acpi_battery_check(battery); + acpi_bus_generate_event(device, event, battery->flags.present); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Unsupported event [0x%x]\n", event)); + break; + } + + return_VOID; +} + + +static int +acpi_battery_add ( + struct acpi_device *device) +{ + int result = 0; + acpi_status status = 0; + struct acpi_battery *battery = NULL; + + ACPI_FUNCTION_TRACE("acpi_battery_add"); + + if (!device) + return_VALUE(-EINVAL); + + battery = kmalloc(sizeof(struct acpi_battery), GFP_KERNEL); + if (!battery) + return_VALUE(-ENOMEM); + memset(battery, 0, sizeof(struct acpi_battery)); + + battery->handle = device->handle; + sprintf(acpi_device_name(device), "%s", ACPI_BATTERY_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_BATTERY_CLASS); + acpi_driver_data(device) = battery; + + result = acpi_battery_check(battery); + if (result) + goto end; + + result = acpi_battery_add_fs(device); + if (result) + goto end; + + status = acpi_install_notify_handler(battery->handle, + ACPI_DEVICE_NOTIFY, acpi_battery_notify, battery); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing notify handler\n")); + result = -ENODEV; + goto end; + } + + printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", + ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), + device->status.battery_present?"present":"absent"); + +end: + if (result) { + acpi_battery_remove_fs(device); + kfree(battery); + } + + return_VALUE(result); +} + + +static int +acpi_battery_remove ( + struct acpi_device *device, + int type) +{ + acpi_status status = 0; + struct acpi_battery *battery = NULL; + + ACPI_FUNCTION_TRACE("acpi_battery_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + battery = (struct acpi_battery *) acpi_driver_data(device); + + status = acpi_remove_notify_handler(battery->handle, + ACPI_DEVICE_NOTIFY, acpi_battery_notify); + if (ACPI_FAILURE(status)) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error removing notify handler\n")); + + acpi_battery_remove_fs(device); + + kfree(battery); + + return_VALUE(0); +} + + +static int __init +acpi_battery_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_battery_init"); + + acpi_battery_dir = proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); + if (!acpi_battery_dir) + return_VALUE(-ENODEV); + + result = acpi_bus_register_driver(&acpi_battery_driver); + if (result < 0) { + remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +static void __exit +acpi_battery_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_battery_exit"); + + acpi_bus_unregister_driver(&acpi_battery_driver); + + remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); + + return_VOID; +} + + +module_init(acpi_battery_init); +module_exit(acpi_battery_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/blacklist.c linux.21rc5-ac2/drivers/acpi/blacklist.c --- linux.21rc5/drivers/acpi/blacklist.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/blacklist.c 2003-04-28 17:58:27.000000000 +0100 @@ -0,0 +1,141 @@ +/* + * blacklist.c + * + * Check to see if the given machine has a known bad ACPI BIOS + * + * Copyright (C) 2002 Andy Grover + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + + +#include +#include +#include +#include +#include + +#define PREFIX "ACPI: " + +enum acpi_blacklist_predicates +{ + all_versions, + less_than_or_equal, + equal, + greater_than_or_equal, +}; + +struct acpi_blacklist_item +{ + char oem_id[7]; + char oem_table_id[9]; + u32 oem_revision; + acpi_table_type table; + enum acpi_blacklist_predicates oem_revision_predicate; + char *reason; + u32 is_critical_error; +}; + +/* + * POLICY: If *anything* doesn't work, put it on the blacklist. + * If they are critical errors, mark it critical, and abort driver load. + */ +static struct acpi_blacklist_item acpi_blacklist[] __initdata = +{ + /* Portege 7020, BIOS 8.10 */ + {"TOSHIB", "7020CT ", 0x19991112, ACPI_DSDT, all_versions, "Implicit Return", 0}, + /* Portege 4030 */ + {"TOSHIB", "4030 ", 0x19991112, ACPI_DSDT, all_versions, "Implicit Return", 0}, + /* Portege 310/320, BIOS 7.1 */ + {"TOSHIB", "310 ", 0x19990511, ACPI_DSDT, all_versions, "Implicit Return", 0}, + /* Seattle 2, old bios rev. */ + {"INTEL ", "440BX ", 0x00001000, ACPI_DSDT, less_than_or_equal, "Field beyond end of region", 0}, + /* ASUS K7M */ + {"ASUS ", "K7M ", 0x00001000, ACPI_DSDT, less_than_or_equal, "Field beyond end of region", 0}, + /* Intel 810 Motherboard? */ + {"MNTRAL", "MO81010A", 0x00000012, ACPI_DSDT, less_than_or_equal, "Field beyond end of region", 0}, + /* Compaq Presario 711FR */ + {"COMAPQ", "EAGLES", 0x06040000, ACPI_DSDT, less_than_or_equal, "SCI issues (C2 disabled)", 0}, + /* Compaq Presario 1700 */ + {"PTLTD ", " DSDT ", 0x06040000, ACPI_DSDT, less_than_or_equal, "Multiple problems", 1}, + /* Sony FX120, FX140, FX150? */ + {"SONY ", "U0 ", 0x20010313, ACPI_DSDT, less_than_or_equal, "ACPI driver problem", 1}, + /* Compaq Presario 800, Insyde BIOS */ + {"INT440", "SYSFexxx", 0x00001001, ACPI_DSDT, less_than_or_equal, "Does not use _REG to protect EC OpRegions", 1}, + /* IBM 600E - _ADR should return 7, but it returns 1 */ + {"IBM ", "TP600E ", 0x00000105, ACPI_DSDT, less_than_or_equal, "Incorrect _ADR", 1}, + {"ASUS\0\0", "P2B-S ", 0, ACPI_DSDT, all_versions, "Bogus PCI routing", 1}, + + {""} +}; + + +int __init +acpi_blacklisted(void) +{ + int i = 0; + int blacklisted = 0; + struct acpi_table_header *table_header; + + while (acpi_blacklist[i].oem_id[0] != '\0') + { + if (acpi_get_table_header_early(acpi_blacklist[i].table, &table_header)) { + i++; + continue; + } + + if (strncmp(acpi_blacklist[i].oem_id, table_header->oem_id, 6)) { + i++; + continue; + } + + if (strncmp(acpi_blacklist[i].oem_table_id, table_header->oem_table_id, 8)) { + i++; + continue; + } + + if ((acpi_blacklist[i].oem_revision_predicate == all_versions) + || (acpi_blacklist[i].oem_revision_predicate == less_than_or_equal + && table_header->oem_revision <= acpi_blacklist[i].oem_revision) + || (acpi_blacklist[i].oem_revision_predicate == greater_than_or_equal + && table_header->oem_revision >= acpi_blacklist[i].oem_revision) + || (acpi_blacklist[i].oem_revision_predicate == equal + && table_header->oem_revision == acpi_blacklist[i].oem_revision)) { + + printk(KERN_ERR PREFIX "Vendor \"%6.6s\" System \"%8.8s\" " + "Revision 0x%x has a known ACPI BIOS problem.\n", + acpi_blacklist[i].oem_id, + acpi_blacklist[i].oem_table_id, + acpi_blacklist[i].oem_revision); + + printk(KERN_ERR PREFIX "Reason: %s. This is a %s error\n", + acpi_blacklist[i].reason, + (acpi_blacklist[i].is_critical_error ? "non-recoverable" : "recoverable")); + + blacklisted = acpi_blacklist[i].is_critical_error; + break; + } + else { + i++; + } + } + + return blacklisted; +} + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/bus.c linux.21rc5-ac2/drivers/acpi/bus.c --- linux.21rc5/drivers/acpi/bus.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/bus.c 2003-05-20 20:16:07.000000000 +0100 @@ -0,0 +1,2101 @@ +/* + * acpi_bus.c - ACPI Bus Driver ($Revision: 77 $) + * + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_X86 +#include +#endif +#include +#include +#include /* for acpi_ex_eisa_id_to_string() */ + + +#define _COMPONENT ACPI_BUS_COMPONENT +ACPI_MODULE_NAME ("acpi_bus") + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION(ACPI_BUS_DRIVER_NAME); +MODULE_LICENSE("GPL"); + +#define PREFIX "ACPI: " + +extern void eisa_set_level_irq(unsigned int irq); + +extern int acpi_disabled; + +FADT_DESCRIPTOR acpi_fadt; +struct acpi_device *acpi_root; +struct proc_dir_entry *acpi_root_dir; + +#define STRUCT_TO_INT(s) (*((int*)&s)) + + +/* -------------------------------------------------------------------------- + Linux Driver Model (LDM) Support + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_LDM + +static int acpi_device_probe(struct device *dev); +static int acpi_device_remove(struct device *dev); +static int acpi_device_suspend(struct device *dev, u32 state, u32 stage); +static int acpi_device_resume(struct device *dev, u32 stage); + +static struct device_driver acpi_bus_driver = { + .probe = acpi_device_probe, + .remove = acpi_device_remove, + .suspend = acpi_device_suspend, + .resume = acpi_device_resume, +}; + + +static int +acpi_device_probe ( + struct device *dev) +{ + ACPI_FUNCTION_TRACE("acpi_device_probe"); + + if (!dev) + return_VALUE(-EINVAL); + + /* TBD */ + + return_VALUE(0); +} + + +static int +acpi_device_remove ( + struct device *dev) +{ + ACPI_FUNCTION_TRACE("acpi_device_remove"); + + if (!dev) + return_VALUE(-EINVAL); + + /* TBD */ + + return_VALUE(0); +} + + +static int +acpi_device_suspend ( + struct device *dev, + u32 state, + u32 stage) +{ + ACPI_FUNCTION_TRACE("acpi_device_suspend"); + + if (!dev) + return_VALUE(-EINVAL); + + /* TBD */ + + return_VALUE(0); +} + + +static int +acpi_device_resume ( + struct device *dev, + u32 stage) +{ + ACPI_FUNCTION_TRACE("acpi_device_resume"); + + if (!dev) + return_VALUE(-EINVAL); + + /* TBD */ + + return_VALUE(0); +} + +#if 0 /* not used ATM */ +static int +acpi_platform_add ( + struct device *dev) +{ + ACPI_FUNCTION_TRACE("acpi_platform_add"); + + if (!dev) + return -EINVAL; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device %s (%s) added\n", + dev->name, dev->bus_id)); + + /* TBD */ + + return_VALUE(0); +} + + +static int +acpi_platform_remove ( + struct device *dev) +{ + ACPI_FUNCTION_TRACE("acpi_platform_add"); + + if (!dev) + return -EINVAL; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device %s (%s) removed\n", + dev->name, dev->bus_id)); + + /* TBD */ + + return_VALUE(0); +} +#endif /* unused */ + + +#endif /*CONFIG_LDM*/ + + +static int +acpi_device_register ( + struct acpi_device *device, + struct acpi_device *parent) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_device_register"); + + if (!device) + return_VALUE(-EINVAL); + +#ifdef CONFIG_LDM + sprintf(device->dev.name, "ACPI device %s:%s", + device->pnp.hardware_id, device->pnp.unique_id); + strncpy(device->dev.bus_id, device->pnp.bus_id, sizeof(acpi_bus_id)); + if (parent) + device->dev.parent = &parent->dev; + device->dev.driver = &acpi_bus_driver; + + result = device_register(&device->dev); +#endif /*CONFIG_LDM*/ + + return_VALUE(result); +} + + +static int +acpi_device_unregister ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_device_unregister"); + + if (!device) + return_VALUE(-EINVAL); + +#ifdef CONFIG_LDM + put_device(&device->dev); +#endif /*CONFIG_LDM*/ + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Device Management + -------------------------------------------------------------------------- */ + +static void +acpi_bus_data_handler ( + acpi_handle handle, + u32 function, + void *context) +{ + ACPI_FUNCTION_TRACE("acpi_bus_data_handler"); + + /* TBD */ + + return_VOID; +} + + +int +acpi_bus_get_device ( + acpi_handle handle, + struct acpi_device **device) +{ + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_bus_get_device"); + + if (!device) + return_VALUE(-EINVAL); + + /* TBD: Support fixed-feature devices */ + + status = acpi_get_data(handle, acpi_bus_data_handler, (void**) device); + if (ACPI_FAILURE(status) || !*device) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error getting context for object [%p]\n", + handle)); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + +int +acpi_bus_get_status ( + struct acpi_device *device) +{ + acpi_status status = AE_OK; + unsigned long sta = 0; + + ACPI_FUNCTION_TRACE("acpi_bus_get_status"); + + if (!device) + return_VALUE(-EINVAL); + + /* + * Evaluate _STA if present. + */ + if (device->flags.dynamic_status) { + status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + STRUCT_TO_INT(device->status) = (int) sta; + } + + /* + * Otherwise we assume the status of our parent (unless we don't + * have one, in which case status is implied). + */ + else if (device->parent) + device->status = device->parent->status; + else + STRUCT_TO_INT(device->status) = 0x0F; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n", + device->pnp.bus_id, (u32) STRUCT_TO_INT(device->status))); + + return_VALUE(0); +} + + +/* +static int +acpi_bus_create_device_fs (struct device *device) +{ + ACPI_FUNCTION_TRACE("acpi_bus_create_device_fs"); + + if (!device) + return_VALUE(-EINVAL); + + if (device->dir.entry) + return_VALUE(-EEXIST); + + if (!device->parent) + device->dir.entry = proc_mkdir(device->pnp.bus_id, NULL); + else + device->dir.entry = proc_mkdir(device->pnp.bus_id, + device->parent->fs.entry); + + if (!device->dir.entry) { + printk(KERN_ERR PREFIX "Unable to create fs entry '%s'\n", + device->pnp.bus_id); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +static int +acpi_bus_remove_device_fs (struct device *device) +{ + ACPI_FUNCTION_TRACE("acpi_bus_create_device_fs"); + + if (!device) + return_VALUE(-EINVAL); + + if (!device->dir.entry) + return_VALUE(-ENODEV); + + if (!device->parent) + remove_proc_entry(device->pnp_bus_id, NULL); + else + remove_proc_entry(device->pnp.bus_id, device->parent->fs.entry); + + device->dir.entry = NULL; + + return_VALUE(0); +} +*/ + + +/* -------------------------------------------------------------------------- + Power Management + -------------------------------------------------------------------------- */ + +int +acpi_bus_get_power ( + acpi_handle handle, + int *state) +{ + int result = 0; + acpi_status status = 0; + struct acpi_device *device = NULL; + unsigned long psc = 0; + + ACPI_FUNCTION_TRACE("acpi_bus_get_power"); + + result = acpi_bus_get_device(handle, &device); + if (result) + return_VALUE(result); + + *state = ACPI_STATE_UNKNOWN; + + if (!device->flags.power_manageable) { + /* TBD: Non-recursive algorithm for walking up hierarchy */ + if (device->parent) + *state = device->parent->power.state; + else + *state = ACPI_STATE_D0; + } + else { + /* + * Get the device's power state either directly (via _PSC) or + * indirectly (via power resources). + */ + if (device->power.flags.explicit_get) { + status = acpi_evaluate_integer(device->handle, "_PSC", + NULL, &psc); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + device->power.state = (int) psc; + } + else if (device->power.flags.power_resources) { + result = acpi_power_get_inferred_state(device); + if (result) + return_VALUE(result); + } + + *state = device->power.state; + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", + device->pnp.bus_id, device->power.state)); + + return_VALUE(0); +} + + +int +acpi_bus_set_power ( + acpi_handle handle, + int state) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_device *device = NULL; + char object_name[5] = {'_','P','S','0'+state,'\0'}; + + ACPI_FUNCTION_TRACE("acpi_bus_set_power"); + + result = acpi_bus_get_device(handle, &device); + if (result) + return_VALUE(result); + + if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) + return_VALUE(-EINVAL); + + /* Make sure this is a valid target state */ + + if (!device->flags.power_manageable) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Device is not power manageable\n")); + return_VALUE(-ENODEV); + } + if (state == device->power.state) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", state)); + return_VALUE(0); + } + if (!device->power.states[state].flags.valid) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Device does not support D%d\n", state)); + return_VALUE(-ENODEV); + } + if (device->parent && (state < device->parent->power.state)) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Cannot set device to a higher-powered state than parent\n")); + return_VALUE(-ENODEV); + } + + /* + * Transition Power + * ---------------- + * On transitions to a high-powered state we first apply power (via + * power resources) then evalute _PSx. Conversly for transitions to + * a lower-powered state. + */ + if (state < device->power.state) { + if (device->power.flags.power_resources) { + result = acpi_power_transition(device, state); + if (result) + goto end; + } + if (device->power.states[state].flags.explicit_set) { + status = acpi_evaluate_object(device->handle, + object_name, NULL, NULL); + if (ACPI_FAILURE(status)) { + result = -ENODEV; + goto end; + } + } + } + else { + if (device->power.states[state].flags.explicit_set) { + status = acpi_evaluate_object(device->handle, + object_name, NULL, NULL); + if (ACPI_FAILURE(status)) { + result = -ENODEV; + goto end; + } + } + if (device->power.flags.power_resources) { + result = acpi_power_transition(device, state); + if (result) + goto end; + } + } + +end: + if (result) + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error transitioning device [%s] to D%d\n", + device->pnp.bus_id, state)); + else + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] transitioned to D%d\n", + device->pnp.bus_id, state)); + + return_VALUE(result); +} + + +static int +acpi_bus_get_power_flags ( + struct acpi_device *device) +{ + acpi_status status = 0; + acpi_handle handle = 0; + u32 i = 0; + + ACPI_FUNCTION_TRACE("acpi_bus_get_power_flags"); + + if (!device) + return -ENODEV; + + /* + * Power Management Flags + */ + status = acpi_get_handle(device->handle, "_PSC", &handle); + if (ACPI_SUCCESS(status)) + device->power.flags.explicit_get = 1; + status = acpi_get_handle(device->handle, "_IRC", &handle); + if (ACPI_SUCCESS(status)) + device->power.flags.inrush_current = 1; + status = acpi_get_handle(device->handle, "_PRW", &handle); + if (ACPI_SUCCESS(status)) + device->power.flags.wake_capable = 1; + + /* + * Enumerate supported power management states + */ + for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) { + struct acpi_device_power_state *ps = &device->power.states[i]; + char object_name[5] = {'_','P','R','0'+i,'\0'}; + + /* Evaluate "_PRx" to se if power resources are referenced */ + acpi_evaluate_reference(device->handle, object_name, NULL, + &ps->resources); + if (ps->resources.count) { + device->power.flags.power_resources = 1; + ps->flags.valid = 1; + } + + /* Evaluate "_PSx" to see if we can do explicit sets */ + object_name[2] = 'S'; + status = acpi_get_handle(device->handle, object_name, &handle); + if (ACPI_SUCCESS(status)) { + ps->flags.explicit_set = 1; + ps->flags.valid = 1; + } + + /* State is valid if we have some power control */ + if (ps->resources.count || ps->flags.explicit_set) + ps->flags.valid = 1; + + ps->power = -1; /* Unknown - driver assigned */ + ps->latency = -1; /* Unknown - driver assigned */ + } + + /* Set defaults for D0 and D3 states (always valid) */ + device->power.states[ACPI_STATE_D0].flags.valid = 1; + device->power.states[ACPI_STATE_D0].power = 100; + device->power.states[ACPI_STATE_D3].flags.valid = 1; + device->power.states[ACPI_STATE_D3].power = 0; + + /* + * System Power States + * ------------------- + */ + /* TBD: S1-S4 power state support and resource requirements. */ + /* + for (i=ACPI_STATE_S1; ihandle, name, NULL, + &state); + if (ACPI_FAILURE(status)) + continue; + } + */ + + /* TBD: System wake support and resource requirements. */ + + device->power.state = ACPI_STATE_UNKNOWN; + + return 0; +} + + +/* -------------------------------------------------------------------------- + Performance Management + -------------------------------------------------------------------------- */ + +static int +acpi_bus_get_perf_flags ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_bus_get_perf_flags"); + + if (!device) + return -ENODEV; + + device->performance.state = ACPI_STATE_UNKNOWN; + + return 0; +} + + +/* -------------------------------------------------------------------------- + Event Management + -------------------------------------------------------------------------- */ + +static spinlock_t acpi_bus_event_lock = SPIN_LOCK_UNLOCKED; + +LIST_HEAD(acpi_bus_event_list); +DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue); + +extern int event_is_open; + +int +acpi_bus_generate_event ( + struct acpi_device *device, + u8 type, + int data) +{ + struct acpi_bus_event *event = NULL; + u32 flags = 0; + + ACPI_FUNCTION_TRACE("acpi_bus_generate_event"); + + if (!device) + return_VALUE(-EINVAL); + + /* drop event on the floor if no one's listening */ + if (!event_is_open) + return_VALUE(0); + + event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC); + if (!event) + return_VALUE(-ENOMEM); + + sprintf(event->device_class, "%s", device->pnp.device_class); + sprintf(event->bus_id, "%s", device->pnp.bus_id); + event->type = type; + event->data = data; + + spin_lock_irqsave(&acpi_bus_event_lock, flags); + list_add_tail(&event->node, &acpi_bus_event_list); + spin_unlock_irqrestore(&acpi_bus_event_lock, flags); + + wake_up_interruptible(&acpi_bus_event_queue); + + return_VALUE(0); +} + +int +acpi_bus_receive_event ( + struct acpi_bus_event *event) +{ + u32 flags = 0; + struct acpi_bus_event *entry = NULL; + + DECLARE_WAITQUEUE(wait, current); + + ACPI_FUNCTION_TRACE("acpi_bus_receive_event"); + + if (!event) + return -EINVAL; + + if (list_empty(&acpi_bus_event_list)) { + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&acpi_bus_event_queue, &wait); + + if (list_empty(&acpi_bus_event_list)) + schedule(); + + remove_wait_queue(&acpi_bus_event_queue, &wait); + set_current_state(TASK_RUNNING); + + if (signal_pending(current)) + return_VALUE(-ERESTARTSYS); + } + + spin_lock_irqsave(&acpi_bus_event_lock, flags); + entry = list_entry(acpi_bus_event_list.next, struct acpi_bus_event, node); + if (entry) + list_del(&entry->node); + spin_unlock_irqrestore(&acpi_bus_event_lock, flags); + + if (!entry) + return_VALUE(-ENODEV); + + memcpy(event, entry, sizeof(struct acpi_bus_event)); + + kfree(entry); + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Namespace Management + -------------------------------------------------------------------------- */ + +#define WALK_UP 0 +#define WALK_DOWN 1 + +typedef int (*acpi_bus_walk_callback)(struct acpi_device*, int, void*); + +#define HAS_CHILDREN(d) ((d)->children.next != &((d)->children)) +#define HAS_SIBLINGS(d) (((d)->parent) && ((d)->node.next != &(d)->parent->children)) +#define NODE_TO_DEVICE(n) (list_entry(n, struct acpi_device, node)) + + +/** + * acpi_bus_walk + * ------------- + * Used to walk the ACPI Bus's device namespace. Can walk down (depth-first) + * or up. Able to parse starting at any node in the namespace. Note that a + * callback return value of -ELOOP will terminate the walk. + * + * @start: starting point + * callback: function to call for every device encountered while parsing + * direction: direction to parse (up or down) + * @data: context for this search operation + */ +static int +acpi_bus_walk ( + struct acpi_device *start, + acpi_bus_walk_callback callback, + int direction, + void *data) +{ + int result = 0; + int level = 0; + struct acpi_device *device = NULL; + + if (!start || !callback) + return -EINVAL; + + device = start; + + /* + * Parse Namespace + * --------------- + * Parse a given subtree (specified by start) in the given direction. + * Walking 'up' simply means that we execute the callback on leaf + * devices prior to their parents (useful for things like removing + * or powering down a subtree). + */ + + while (device) { + + if (direction == WALK_DOWN) + if (-ELOOP == callback(device, level, data)) + break; + + /* Depth First */ + + if (HAS_CHILDREN(device)) { + device = NODE_TO_DEVICE(device->children.next); + ++level; + continue; + } + + if (direction == WALK_UP) + if (-ELOOP == callback(device, level, data)) + break; + + /* Now Breadth */ + + if (HAS_SIBLINGS(device)) { + device = NODE_TO_DEVICE(device->node.next); + continue; + } + + /* Scope Exhausted - Find Next */ + + while ((device = device->parent)) { + --level; + if (HAS_SIBLINGS(device)) { + device = NODE_TO_DEVICE(device->node.next); + break; + } + } + } + + if ((direction == WALK_UP) && (result == 0)) + callback(start, level, data); + + return result; +} + + +/* -------------------------------------------------------------------------- + Notification Handling + -------------------------------------------------------------------------- */ + +static int +acpi_bus_check_device ( + struct acpi_device *device, + int *status_changed) +{ + acpi_status status = 0; + struct acpi_device_status old_status; + + ACPI_FUNCTION_TRACE("acpi_bus_check_device"); + + if (!device) + return_VALUE(-EINVAL); + + if (status_changed) + *status_changed = 0; + + old_status = device->status; + + /* + * Make sure this device's parent is present before we go about + * messing with the device. + */ + if (device->parent && !device->parent->status.present) { + device->status = device->parent->status; + if (STRUCT_TO_INT(old_status) != STRUCT_TO_INT(device->status)) { + if (status_changed) + *status_changed = 1; + } + return_VALUE(0); + } + + status = acpi_bus_get_status(device); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status)) + return_VALUE(0); + + if (status_changed) + *status_changed = 1; + + /* + * Device Insertion/Removal + */ + if ((device->status.present) && !(old_status.present)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device insertion detected\n")); + /* TBD: Handle device insertion */ + } + else if (!(device->status.present) && (old_status.present)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n")); + /* TBD: Handle device removal */ + } + + return_VALUE(0); +} + + +static int +acpi_bus_check_scope ( + struct acpi_device *device) +{ + int result = 0; + int status_changed = 0; + + ACPI_FUNCTION_TRACE("acpi_bus_check_scope"); + + if (!device) + return_VALUE(-EINVAL); + + /* Status Change? */ + result = acpi_bus_check_device(device, &status_changed); + if (result) + return_VALUE(result); + + if (!status_changed) + return_VALUE(0); + + /* + * TBD: Enumerate child devices within this device's scope and + * run acpi_bus_check_device()'s on them. + */ + + return_VALUE(0); +} + + +/** + * acpi_bus_notify + * --------------- + * Callback for all 'system-level' device notifications (values 0x00-0x7F). + */ +static void +acpi_bus_notify ( + acpi_handle handle, + u32 type, + void *data) +{ + int result = 0; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_bus_notify"); + + if (acpi_bus_get_device(handle, &device)) + return_VOID; + + switch (type) { + + case ACPI_NOTIFY_BUS_CHECK: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Received BUS CHECK notification for device [%s]\n", + device->pnp.bus_id)); + result = acpi_bus_check_scope(device); + /* + * TBD: We'll need to outsource certain events to non-ACPI + * drivers via the device manager (device.c). + */ + break; + + case ACPI_NOTIFY_DEVICE_CHECK: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Received DEVICE CHECK notification for device [%s]\n", + device->pnp.bus_id)); + result = acpi_bus_check_device(device, NULL); + /* + * TBD: We'll need to outsource certain events to non-ACPI + * drivers via the device manager (device.c). + */ + break; + + case ACPI_NOTIFY_DEVICE_WAKE: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Received DEVICE WAKE notification for device [%s]\n", + device->pnp.bus_id)); + /* TBD */ + break; + + case ACPI_NOTIFY_EJECT_REQUEST: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Received EJECT REQUEST notification for device [%s]\n", + device->pnp.bus_id)); + /* TBD */ + break; + + case ACPI_NOTIFY_DEVICE_CHECK_LIGHT: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Received DEVICE CHECK LIGHT notification for device [%s]\n", + device->pnp.bus_id)); + /* TBD: Exactly what does 'light' mean? */ + break; + + case ACPI_NOTIFY_FREQUENCY_MISMATCH: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Received FREQUENCY MISMATCH notification for device [%s]\n", + device->pnp.bus_id)); + /* TBD */ + break; + + case ACPI_NOTIFY_BUS_MODE_MISMATCH: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Received BUS MODE MISMATCH notification for device [%s]\n", + device->pnp.bus_id)); + /* TBD */ + break; + + case ACPI_NOTIFY_POWER_FAULT: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Received POWER FAULT notification for device [%s]\n", + device->pnp.bus_id)); + /* TBD */ + break; + + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Received unknown/unsupported notification [%08x]\n", + type)); + break; + } + + return_VOID; +} + + +/* -------------------------------------------------------------------------- + Driver Management + -------------------------------------------------------------------------- */ + +static LIST_HEAD(acpi_bus_drivers); +static DECLARE_MUTEX(acpi_bus_drivers_lock); + + +/** + * acpi_bus_match + * -------------- + * Checks the device's hardware (_HID) or compatible (_CID) ids to see if it + * matches the specified driver's criteria. + */ +static int +acpi_bus_match ( + struct acpi_device *device, + struct acpi_driver *driver) +{ + + if (!device || !driver) + return -EINVAL; + + if (device->flags.hardware_id) { + if (strstr(driver->ids, device->pnp.hardware_id)) + return 0; + } + + if (device->flags.compatible_ids) { + acpi_status status = AE_OK; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *object = NULL; + char cid[256]; + + memset(cid, 0, sizeof(cid)); + + status = acpi_evaluate_object(device->handle, "_CID", NULL, + &buffer); + if (ACPI_FAILURE(status) || !buffer.pointer) + return -ENOENT; + + object = (union acpi_object *) buffer.pointer; + + switch (object->type) { + case ACPI_TYPE_INTEGER: + acpi_ex_eisa_id_to_string((u32) object->integer.value, + cid); + break; + case ACPI_TYPE_STRING: + strncpy(cid, object->string.pointer, sizeof(cid) - 1); + break; + case ACPI_TYPE_PACKAGE: + /* TBD: Support CID packages */ + break; + } + + if (!cid[0]) { + acpi_os_free(buffer.pointer); + return -ENOENT; + } + + if (strstr(driver->ids, cid)) { + acpi_os_free(buffer.pointer); + return 0; + } + + acpi_os_free(buffer.pointer); + } + + return -ENOENT; +} + + +/** + * acpi_bus_driver_init + * -------------------- + * Used to initialize a device via its device driver. Called whenever a + * driver is bound to a device. Invokes the driver's add() and start() ops. + */ +static int +acpi_bus_driver_init ( + struct acpi_device *device, + struct acpi_driver *driver) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_bus_driver_init"); + + if (!device || !driver) + return_VALUE(-EINVAL); + + if (!driver->ops.add) + return_VALUE(-ENOSYS); + + result = driver->ops.add(device); + if (result) { + device->driver = NULL; + acpi_driver_data(device) = NULL; + return_VALUE(result); + } + + device->driver = driver; + + /* + * TBD - Configuration Management: Assign resources to device based + * upon possible configuration and currently allocated resources. + */ + + if (driver->ops.start) { + result = driver->ops.start(device); + if (result && driver->ops.remove) + driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL); + return_VALUE(result); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Driver successfully bound to device\n")); + +#ifdef CONFIG_LDM + /* + * Update the device information (in the global device hierarchy) now + * that there's a driver bound to it. + */ + strncpy(device->dev.name, device->pnp.device_name, + sizeof(device->dev.name)); +#endif + + if (driver->ops.scan) { + driver->ops.scan(device); + } + + return_VALUE(0); +} + + +/** + * acpi_bus_attach + * ------------- + * Callback for acpi_bus_walk() used to find devices that match a specific + * driver's criteria and then attach the driver. + */ +static int +acpi_bus_attach ( + struct acpi_device *device, + int level, + void *data) +{ + int result = 0; + struct acpi_driver *driver = NULL; + + ACPI_FUNCTION_TRACE("acpi_bus_attach"); + + if (!device || !data) + return_VALUE(-EINVAL); + + driver = (struct acpi_driver *) data; + + if (device->driver) + return_VALUE(-EEXIST); + + if (!device->status.present) + return_VALUE(-ENODEV); + + result = acpi_bus_match(device, driver); + if (result) + return_VALUE(result); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n", + driver->name, device->pnp.bus_id)); + + result = acpi_bus_driver_init(device, driver); + if (result) + return_VALUE(result); + + down(&acpi_bus_drivers_lock); + ++driver->references; + up(&acpi_bus_drivers_lock); + + return_VALUE(0); +} + + +/** + * acpi_bus_unattach + * ----------------- + * Callback for acpi_bus_walk() used to find devices that match a specific + * driver's criteria and unattach the driver. + */ +static int +acpi_bus_unattach ( + struct acpi_device *device, + int level, + void *data) +{ + int result = 0; + struct acpi_driver *driver = (struct acpi_driver *) data; + + ACPI_FUNCTION_TRACE("acpi_bus_unattach"); + + if (!device || !driver) + return_VALUE(-EINVAL); + + if (device->driver != driver) + return_VALUE(-ENOENT); + + if (!driver->ops.remove) + return_VALUE(-ENOSYS); + + result = driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL); + if (result) + return_VALUE(result); + + device->driver = NULL; + acpi_driver_data(device) = NULL; + + down(&acpi_bus_drivers_lock); + driver->references--; + up(&acpi_bus_drivers_lock); + + return_VALUE(0); +} + + +/** + * acpi_bus_find_driver + * -------------------- + * Parses the list of registered drivers looking for a driver applicable for + * the specified device. + */ +static int +acpi_bus_find_driver ( + struct acpi_device *device) +{ + int result = -ENODEV; + struct list_head *entry = NULL; + struct acpi_driver *driver = NULL; + + ACPI_FUNCTION_TRACE("acpi_bus_find_driver"); + + if (!device || device->driver) + return_VALUE(-EINVAL); + + down(&acpi_bus_drivers_lock); + + list_for_each(entry, &acpi_bus_drivers) { + + driver = list_entry(entry, struct acpi_driver, node); + + if (acpi_bus_match(device, driver)) + continue; + + result = acpi_bus_driver_init(device, driver); + if (!result) + ++driver->references; + + break; + } + + up(&acpi_bus_drivers_lock); + + return_VALUE(result); +} + + +/** + * acpi_bus_register_driver + * ------------------------ + * Registers a driver with the ACPI bus. Searches the namespace for all + * devices that match the driver's criteria and binds. + */ +int +acpi_bus_register_driver ( + struct acpi_driver *driver) +{ + ACPI_FUNCTION_TRACE("acpi_bus_register_driver"); + + if (!driver) + return_VALUE(-EINVAL); + + down(&acpi_bus_drivers_lock); + list_add_tail(&driver->node, &acpi_bus_drivers); + up(&acpi_bus_drivers_lock); + + acpi_bus_walk(acpi_root, acpi_bus_attach, + WALK_DOWN, driver); + + return_VALUE(driver->references); +} + + +/** + * acpi_bus_unregister_driver + * -------------------------- + * Unregisters a driver with the ACPI bus. Searches the namespace for all + * devices that match the driver's criteria and unbinds. + */ +int +acpi_bus_unregister_driver ( + struct acpi_driver *driver) +{ + ACPI_FUNCTION_TRACE("acpi_bus_unregister_driver"); + + if (!driver) + return_VALUE(-EINVAL); + + acpi_bus_walk(acpi_root, acpi_bus_unattach, WALK_UP, driver); + + if (driver->references) + return_VALUE(driver->references); + + down(&acpi_bus_drivers_lock); + list_del(&driver->node); + up(&acpi_bus_drivers_lock); + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Device Enumeration + -------------------------------------------------------------------------- */ + +static int +acpi_bus_get_flags ( + struct acpi_device *device) +{ + acpi_status status = AE_OK; + acpi_handle temp = NULL; + + ACPI_FUNCTION_TRACE("acpi_bus_get_flags"); + + /* Presence of _STA indicates 'dynamic_status' */ + status = acpi_get_handle(device->handle, "_STA", &temp); + if (ACPI_SUCCESS(status)) + device->flags.dynamic_status = 1; + + /* Presence of _CID indicates 'compatible_ids' */ + status = acpi_get_handle(device->handle, "_CID", &temp); + if (ACPI_SUCCESS(status)) + device->flags.compatible_ids = 1; + + /* Presence of _RMV indicates 'removable' */ + status = acpi_get_handle(device->handle, "_RMV", &temp); + if (ACPI_SUCCESS(status)) + device->flags.removable = 1; + + /* Presence of _EJD|_EJ0 indicates 'ejectable' */ + status = acpi_get_handle(device->handle, "_EJD", &temp); + if (ACPI_SUCCESS(status)) + device->flags.ejectable = 1; + else { + status = acpi_get_handle(device->handle, "_EJ0", &temp); + if (ACPI_SUCCESS(status)) + device->flags.ejectable = 1; + } + + /* Presence of _LCK indicates 'lockable' */ + status = acpi_get_handle(device->handle, "_LCK", &temp); + if (ACPI_SUCCESS(status)) + device->flags.lockable = 1; + + /* Presence of _PS0|_PR0 indicates 'power manageable' */ + status = acpi_get_handle(device->handle, "_PS0", &temp); + if (ACPI_FAILURE(status)) + status = acpi_get_handle(device->handle, "_PR0", &temp); + if (ACPI_SUCCESS(status)) + device->flags.power_manageable = 1; + + /* TBD: Peformance management */ + + return_VALUE(0); +} + + +static int +acpi_bus_add ( + struct acpi_device **child, + struct acpi_device *parent, + acpi_handle handle, + int type) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_device *device = NULL; + char bus_id[5] = {'?',0}; + struct acpi_buffer buffer = {sizeof(bus_id), bus_id}; + struct acpi_device_info info; + char *hid = NULL; + char *uid = NULL; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_bus_add"); + + if (!child) + return_VALUE(-EINVAL); + + device = kmalloc(sizeof(struct acpi_device), GFP_KERNEL); + if (!device) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Memory allocation error\n")); + return_VALUE(-ENOMEM); + } + memset(device, 0, sizeof(struct acpi_device)); + + device->handle = handle; + device->parent = parent; + + memset(&info, 0, sizeof(struct acpi_device_info)); + + /* + * Bus ID + * ------ + * The device's Bus ID is simply the object name. + * TBD: Shouldn't this value be unique (within the ACPI namespace)? + */ + switch (type) { + case ACPI_BUS_TYPE_SYSTEM: + sprintf(device->pnp.bus_id, "%s", "ACPI"); + break; + case ACPI_BUS_TYPE_POWER_BUTTON: + sprintf(device->pnp.bus_id, "%s", "PWRF"); + break; + case ACPI_BUS_TYPE_SLEEP_BUTTON: + sprintf(device->pnp.bus_id, "%s", "SLPF"); + break; + default: + acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); + /* Clean up trailing underscores (if any) */ + for (i = 3; i > 1; i--) { + if (bus_id[i] == '_') + bus_id[i] = '\0'; + else + break; + } + sprintf(device->pnp.bus_id, "%s", bus_id); + break; + } + + /* + * Flags + * ----- + * Get prior to calling acpi_bus_get_status() so we know whether + * or not _STA is present. Note that we only look for object + * handles -- cannot evaluate objects until we know the device is + * present and properly initialized. + */ + result = acpi_bus_get_flags(device); + if (result) + goto end; + + /* + * Status + * ------ + * See if the device is present. We always assume that non-Device() + * objects (e.g. thermal zones, power resources, processors, etc.) are + * present, functioning, etc. (at least when parent object is present). + * Note that _STA has a different meaning for some objects (e.g. + * power resources) so we need to be careful how we use it. + */ + switch (type) { + case ACPI_BUS_TYPE_DEVICE: + result = acpi_bus_get_status(device); + if (result) + goto end; + break; + default: + STRUCT_TO_INT(device->status) = 0x0F; + break; + } + if (!device->status.present) { + result = -ENOENT; + goto end; + } + + /* + * Initialize Device + * ----------------- + * TBD: Synch with Core's enumeration/initialization process. + */ + + /* + * Hardware ID, Unique ID, & Bus Address + * ------------------------------------- + */ + switch (type) { + case ACPI_BUS_TYPE_DEVICE: + status = acpi_get_object_info(handle, &info); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error reading device info\n")); + result = -ENODEV; + goto end; + } + /* Clean up info strings (not NULL terminated) */ + info.hardware_id[sizeof(info.hardware_id)-1] = '\0'; + info.unique_id[sizeof(info.unique_id)-1] = '\0'; + if (info.valid & ACPI_VALID_HID) + hid = info.hardware_id; + if (info.valid & ACPI_VALID_UID) + uid = info.unique_id; + if (info.valid & ACPI_VALID_ADR) { + device->pnp.bus_address = info.address; + device->flags.bus_address = 1; + } + break; + case ACPI_BUS_TYPE_POWER: + hid = ACPI_POWER_HID; + break; + case ACPI_BUS_TYPE_PROCESSOR: + hid = ACPI_PROCESSOR_HID; + break; + case ACPI_BUS_TYPE_SYSTEM: + hid = ACPI_SYSTEM_HID; + break; + case ACPI_BUS_TYPE_THERMAL: + hid = ACPI_THERMAL_HID; + break; + case ACPI_BUS_TYPE_POWER_BUTTON: + hid = ACPI_BUTTON_HID_POWERF; + break; + case ACPI_BUS_TYPE_SLEEP_BUTTON: + hid = ACPI_BUTTON_HID_SLEEPF; + break; + } + + /* + * \_SB + * ---- + * Fix for the system root bus device -- the only root-level device. + */ + if ((parent == ACPI_ROOT_OBJECT) && (type == ACPI_BUS_TYPE_DEVICE)) { + hid = ACPI_BUS_HID; + sprintf(device->pnp.device_name, "%s", ACPI_BUS_DEVICE_NAME); + sprintf(device->pnp.device_class, "%s", ACPI_BUS_CLASS); + } + + /* Compaq erroneously puts * in front of their HIDs on old BIOSes */ + if (hid && *hid == '*') { + printk(KERN_DEBUG PREFIX "Ignoring * in _HID. Is this a Compaq?\n"); + hid++; + } + + if (hid) { + sprintf(device->pnp.hardware_id, "%s", hid); + device->flags.hardware_id = 1; + } + if (uid) { + sprintf(device->pnp.unique_id, "%s", uid); + device->flags.unique_id = 1; + } + + /* + * Power Management + * ---------------- + */ + if (device->flags.power_manageable) { + result = acpi_bus_get_power_flags(device); + if (result) + goto end; + } + + /* + * Performance Management + * ---------------------- + */ + if (device->flags.performance_manageable) { + result = acpi_bus_get_perf_flags(device); + if (result) + goto end; + } + + /* + * Context + * ------- + * Attach this 'struct acpi_device' to the ACPI object. This makes + * resolutions from handle->device very efficient. Note that we need + * to be careful with fixed-feature devices as they all attach to the + * root object. + */ + switch (type) { + case ACPI_BUS_TYPE_POWER_BUTTON: + case ACPI_BUS_TYPE_SLEEP_BUTTON: + break; + default: + status = acpi_attach_data(device->handle, + acpi_bus_data_handler, device); + break; + } + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error attaching device data\n")); + result = -ENODEV; + goto end; + } + + /* + * Linkage + * ------- + * Link this device to its parent and siblings. + */ + INIT_LIST_HEAD(&device->children); + if (!device->parent) + INIT_LIST_HEAD(&device->node); + else + list_add_tail(&device->node, &device->parent->children); + +#ifdef CONFIG_ACPI_DEBUG + { + char *type_string = NULL; + char name[80] = {'?','\0'}; + struct acpi_buffer buffer = {sizeof(name), name}; + + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + + switch (type) { + case ACPI_BUS_TYPE_DEVICE: + type_string = "Device"; + break; + case ACPI_BUS_TYPE_POWER: + type_string = "Power Resource"; + break; + case ACPI_BUS_TYPE_PROCESSOR: + type_string = "Processor"; + break; + case ACPI_BUS_TYPE_SYSTEM: + type_string = "System"; + break; + case ACPI_BUS_TYPE_THERMAL: + type_string = "Thermal Zone"; + break; + case ACPI_BUS_TYPE_POWER_BUTTON: + type_string = "Power Button"; + sprintf(name, "PWRB"); + break; + case ACPI_BUS_TYPE_SLEEP_BUTTON: + type_string = "Sleep Button"; + sprintf(name, "SLPB"); + break; + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s %s [%p]\n", + type_string, name, handle)); + } +#endif /*CONFIG_ACPI_DEBUG*/ + + /* + * Global Device Hierarchy: + * ------------------------ + * Register this device with the global device hierarchy. + */ + acpi_device_register(device, parent); + + /* + * Bind _ADR-Based Devices + * ----------------------- + * If there's a a bus address (_ADR) then we utilize the parent's + * 'bind' function (if exists) to bind the ACPI- and natively- + * enumerated device representations. + */ + if (device->flags.bus_address) { + if (device->parent && device->parent->ops.bind) + device->parent->ops.bind(device); + } + + /* + * Locate & Attach Driver + * ---------------------- + * If there's a hardware id (_HID) or compatible ids (_CID) we check + * to see if there's a driver installed for this kind of device. Note + * that drivers can install before or after a device is enumerated. + * + * TBD: Assumes LDM provides driver hot-plug capability. + */ + if (device->flags.hardware_id || device->flags.compatible_ids) + acpi_bus_find_driver(device); + +end: + if (result) { + kfree(device); + return_VALUE(result); + } + + *child = device; + + return_VALUE(0); +} + + +static int +acpi_bus_remove ( + struct acpi_device *device, + int type) +{ + ACPI_FUNCTION_TRACE("acpi_bus_remove"); + + if (!device) + return_VALUE(-ENODEV); + + acpi_device_unregister(device); + + kfree(device); + + return_VALUE(0); +} + + +int +acpi_bus_scan ( + struct acpi_device *start) +{ + acpi_status status = AE_OK; + struct acpi_device *parent = NULL; + struct acpi_device *child = NULL; + acpi_handle phandle = 0; + acpi_handle chandle = 0; + acpi_object_type type = 0; + u32 level = 1; + + ACPI_FUNCTION_TRACE("acpi_bus_scan"); + + if (!start) + return_VALUE(-EINVAL); + + parent = start; + phandle = start->handle; + + /* + * Parse through the ACPI namespace, identify all 'devices', and + * create a new 'struct acpi_device' for each. + */ + while ((level > 0) && parent) { + + status = acpi_get_next_object(ACPI_TYPE_ANY, phandle, + chandle, &chandle); + + /* + * If this scope is exhausted then move our way back up. + */ + if (ACPI_FAILURE(status)) { + level--; + chandle = phandle; + acpi_get_parent(phandle, &phandle); + if (parent->parent) + parent = parent->parent; + continue; + } + + status = acpi_get_type(chandle, &type); + if (ACPI_FAILURE(status)) + continue; + + /* + * If this is a scope object then parse it (depth-first). + */ + if (type == ACPI_TYPE_LOCAL_SCOPE) { + level++; + phandle = chandle; + chandle = 0; + continue; + } + + /* + * We're only interested in objects that we consider 'devices'. + */ + switch (type) { + case ACPI_TYPE_DEVICE: + type = ACPI_BUS_TYPE_DEVICE; + break; + case ACPI_TYPE_PROCESSOR: + type = ACPI_BUS_TYPE_PROCESSOR; + break; + case ACPI_TYPE_THERMAL: + type = ACPI_BUS_TYPE_THERMAL; + break; + case ACPI_TYPE_POWER: + type = ACPI_BUS_TYPE_POWER; + break; + default: + continue; + } + + status = acpi_bus_add(&child, parent, chandle, type); + if (ACPI_FAILURE(status)) + continue; + + /* + * If the device is present, enabled, and functioning then + * parse its scope (depth-first). Note that we need to + * represent absent devices to facilitate PnP notifications + * -- but only the subtree head (not all of its children, + * which will be enumerated when the parent is inserted). + * + * TBD: Need notifications and other detection mechanisms + * in place before we can fully implement this. + */ + if (child->status.present) { + status = acpi_get_next_object(ACPI_TYPE_ANY, chandle, + 0, NULL); + if (ACPI_SUCCESS(status)) { + level++; + phandle = chandle; + chandle = 0; + parent = child; + } + } + } + + return_VALUE(0); +} + + +static int +acpi_bus_scan_fixed ( + struct acpi_device *root) +{ + int result = 0; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_bus_scan_fixed"); + + if (!root) + return_VALUE(-ENODEV); + + /* + * Enumerate all fixed-feature devices. + */ + if (acpi_fadt.pwr_button == 0) + result = acpi_bus_add(&device, acpi_root, + NULL, ACPI_BUS_TYPE_POWER_BUTTON); + + if (acpi_fadt.sleep_button == 0) + result = acpi_bus_add(&device, acpi_root, + NULL, ACPI_BUS_TYPE_SLEEP_BUTTON); + + return_VALUE(result); +} + + +/* -------------------------------------------------------------------------- + Initialization/Cleanup + -------------------------------------------------------------------------- */ + +static int __init +acpi_bus_init_irq (void) +{ + acpi_status status = AE_OK; + union acpi_object arg = {ACPI_TYPE_INTEGER}; + struct acpi_object_list arg_list = {1, &arg}; + char *message = NULL; + + ACPI_FUNCTION_TRACE("acpi_bus_init_irq"); + + /* + * Let the system know what interrupt model we are using by + * evaluating the \_PIC object, if exists. + */ + + switch (acpi_irq_model) { + case ACPI_IRQ_MODEL_PIC: + message = "PIC"; + break; + case ACPI_IRQ_MODEL_IOAPIC: + message = "IOAPIC"; + break; + case ACPI_IRQ_MODEL_IOSAPIC: + message = "IOSAPIC"; + break; + default: + printk(KERN_WARNING PREFIX "Unknown interrupt routing model\n"); + return_VALUE(-ENODEV); + } + + printk(KERN_INFO PREFIX "Using %s for interrupt routing\n", message); + + arg.integer.value = acpi_irq_model; + + status = acpi_evaluate_object(NULL, "\\_PIC", &arg_list, NULL); + if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PIC\n")); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +static int __init +acpi_bus_init (void) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_buffer buffer = {sizeof(acpi_fadt), &acpi_fadt}; + + ACPI_FUNCTION_TRACE("acpi_bus_init"); + + status = acpi_initialize_subsystem(); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PREFIX "Unable to initialize the ACPI Interpreter\n"); + goto error0; + } + + status = acpi_load_tables(); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PREFIX "Unable to load the System Description Tables\n"); + goto error0; + } + + /* + * Get a separate copy of the FADT for use by other drivers. + */ + status = acpi_get_table(ACPI_TABLE_FADT, 1, &buffer); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PREFIX "Unable to get the FADT\n"); + goto error1; + } + +#ifdef CONFIG_X86 + /* Ensure the SCI is set to level-triggered, active-low */ + if (acpi_ioapic) + mp_config_ioapic_for_sci(acpi_fadt.sci_int); + else + eisa_set_level_irq(acpi_fadt.sci_int); +#endif + + status = acpi_enable_subsystem(ACPI_FULL_INITIALIZATION); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PREFIX "Unable to start the ACPI Interpreter\n"); + goto error1; + } + +#ifdef CONFIG_ACPI_EC + /* + * ACPI 2.0 requires the EC driver to be loaded and work before + * the EC device is found in the namespace. This is accomplished + * by looking for the ECDT table, and getting the EC parameters out + * of that. + */ + result = acpi_ec_ecdt_probe(); + if (result) { + goto error1; + } +#endif + + status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n"); + goto error1; + } + + printk(KERN_INFO PREFIX "Interpreter enabled\n"); + + /* + * Get the system interrupt model and evaluate \_PIC. + */ + result = acpi_bus_init_irq(); + if (result) + goto error1; + + /* + * Register the for all standard device notifications. + */ + status = acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY, &acpi_bus_notify, NULL); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PREFIX "Unable to register for device notifications\n"); + result = -ENODEV; + goto error1; + } + + /* + * Create the root device in the bus's device tree + */ + result = acpi_bus_add(&acpi_root, NULL, ACPI_ROOT_OBJECT, + ACPI_BUS_TYPE_SYSTEM); + if (result) + goto error2; + + /* + * Create the top ACPI proc directory + */ + acpi_device_dir(acpi_root) = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL); + if (!acpi_root) { + result = -ENODEV; + goto error3; + } + acpi_root_dir = acpi_device_dir(acpi_root); + + /* + * Install drivers required for proper enumeration of the + * ACPI namespace. + */ + acpi_system_init(); /* ACPI System */ + acpi_power_init(); /* ACPI Bus Power Management */ +#ifdef CONFIG_ACPI_EC + acpi_ec_init(); /* ACPI Embedded Controller */ +#endif +#ifdef CONFIG_ACPI_PCI + acpi_pci_link_init(); /* ACPI PCI Interrupt Link */ + acpi_pci_root_init(); /* ACPI PCI Root Bridge */ +#endif + /* + * Enumerate devices in the ACPI namespace. + */ + result = acpi_bus_scan_fixed(acpi_root); + if (result) + goto error4; + result = acpi_bus_scan(acpi_root); + if (result) + goto error4; + + return_VALUE(0); + + /* Mimic structured exception handling */ +error4: + remove_proc_entry(ACPI_BUS_FILE_ROOT, NULL); +error3: + acpi_bus_remove(acpi_root, ACPI_BUS_REMOVAL_NORMAL); +error2: + acpi_remove_notify_handler(ACPI_ROOT_OBJECT, + ACPI_SYSTEM_NOTIFY, &acpi_bus_notify); +error1: + acpi_terminate(); +error0: + return_VALUE(-ENODEV); +} + + +static void __exit +acpi_bus_exit (void) +{ + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_bus_exit"); + + status = acpi_remove_notify_handler(ACPI_ROOT_OBJECT, + ACPI_SYSTEM_NOTIFY, acpi_bus_notify); + if (ACPI_FAILURE(status)) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error removing notify handler\n")); + +#ifdef CONFIG_ACPI_PCI + acpi_pci_root_exit(); + acpi_pci_link_exit(); +#endif +#ifdef CONFIG_ACPI_EC + acpi_ec_exit(); +#endif + acpi_power_exit(); + acpi_system_exit(); + + acpi_bus_remove(acpi_root, ACPI_BUS_REMOVAL_NORMAL); + + remove_proc_entry(ACPI_BUS_FILE_ROOT, NULL); + + status = acpi_terminate(); + if (ACPI_FAILURE(status)) + printk(KERN_ERR PREFIX "Unable to terminate the ACPI Interpreter\n"); + else + printk(KERN_ERR PREFIX "Interpreter disabled\n"); + + return_VOID; +} + + +int __init +acpi_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_init"); + + printk(KERN_INFO PREFIX "Subsystem revision %08x\n", + ACPI_CA_VERSION); + + /* Initial core debug level excludes drivers, so include them now */ + acpi_set_debug(ACPI_DEBUG_LOW); + + if (acpi_disabled) { + printk(KERN_INFO PREFIX "Disabled via command line (acpi=off)\n"); + return -ENODEV; + } + +#ifdef CONFIG_PM + if (PM_IS_ACTIVE()) { + printk(KERN_INFO PREFIX "APM is already active, exiting\n"); + return -ENODEV; + } +#endif + + result = acpi_bus_init(); + if (result) + return_VALUE(result); + +#ifdef CONFIG_PM + pm_active = 1; +#endif + + return_VALUE(0); +} + + +void __exit +acpi_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_exit"); + +#ifdef CONFIG_PM + pm_active = 0; +#endif + + acpi_bus_exit(); + + return_VOID; +} + + +int __init +acpi_setup(char *str) +{ + while (str && *str) { + if (strncmp(str, "off", 3) == 0) + acpi_disabled = 1; + str = strchr(str, ','); + if (str) + str += strspn(str, ", \t"); + } + return 1; +} + + +__setup("acpi=", acpi_setup); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/button.c linux.21rc5-ac2/drivers/acpi/button.c --- linux.21rc5/drivers/acpi/button.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/button.c 2003-04-28 17:58:27.000000000 +0100 @@ -0,0 +1,498 @@ +/* + * acpi_button.c - ACPI Button Driver ($Revision: 29 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_BUTTON_COMPONENT +ACPI_MODULE_NAME ("acpi_button") + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION(ACPI_BUTTON_DRIVER_NAME); +MODULE_LICENSE("GPL"); + +#define PREFIX "ACPI: " + + +static int acpi_button_add (struct acpi_device *device); +static int acpi_button_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_button_driver = { + .name = ACPI_BUTTON_DRIVER_NAME, + .class = ACPI_BUTTON_CLASS, + .ids = "ACPI_FPB,ACPI_FSB,PNP0C0D,PNP0C0C,PNP0C0E", + .ops = { + .add = acpi_button_add, + .remove = acpi_button_remove, + }, +}; + +struct acpi_button { + acpi_handle handle; + struct acpi_device *device; /* Fixed button kludge */ + u8 type; + unsigned long pushed; +}; + + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +static struct proc_dir_entry *acpi_button_dir = NULL; + +static int +acpi_button_read_info ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_button *button = (struct acpi_button *) data; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_button_read_info"); + + if (!button || !button->device) + goto end; + + p += sprintf(p, "type: %s\n", + acpi_device_name(button->device)); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + +static int +acpi_button_lid_read_state( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_button *button = (struct acpi_button *) data; + char *p = page; + int len = 0; + acpi_status status=AE_OK; + unsigned long state; + + ACPI_FUNCTION_TRACE("acpi_button_lid_read_state"); + + if (!button || !button->device) + goto end; + + status=acpi_evaluate_integer(button->handle,"_LID",NULL,&state); + if (ACPI_FAILURE(status)){ + p += sprintf(p, "state: unsupported\n"); + } + else{ + p += sprintf(p, "state: %s\n", (state ? "open" : "closed")); + } + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + +static int +acpi_button_add_fs ( + struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + struct acpi_button *button = NULL; + + ACPI_FUNCTION_TRACE("acpi_button_add_fs"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + button = acpi_driver_data(device); + + switch (button->type) { + case ACPI_BUTTON_TYPE_POWER: + case ACPI_BUTTON_TYPE_POWERF: + entry = proc_mkdir(ACPI_BUTTON_SUBCLASS_POWER, + acpi_button_dir); + break; + case ACPI_BUTTON_TYPE_SLEEP: + case ACPI_BUTTON_TYPE_SLEEPF: + entry = proc_mkdir(ACPI_BUTTON_SUBCLASS_SLEEP, + acpi_button_dir); + break; + case ACPI_BUTTON_TYPE_LID: + entry = proc_mkdir(ACPI_BUTTON_SUBCLASS_LID, + acpi_button_dir); + break; + } + + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), entry); + if (!acpi_device_dir(device)) + return_VALUE(-ENODEV); + + /* 'info' [R] */ + entry = create_proc_entry(ACPI_BUTTON_FILE_INFO, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_BUTTON_FILE_INFO)); + else { + entry->read_proc = acpi_button_read_info; + entry->data = acpi_driver_data(device); + } + + if (button->type==ACPI_BUTTON_TYPE_LID){ + /* 'state' [R] */ + entry = create_proc_entry(ACPI_BUTTON_FILE_STATE, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_BUTTON_FILE_STATE)); + else { + entry->read_proc = acpi_button_lid_read_state; + entry->data = acpi_driver_data(device); + } + } + + return_VALUE(0); +} + + +static int +acpi_button_remove_fs ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_button_remove_fs"); + + if (acpi_device_dir(device)) { + remove_proc_entry(acpi_device_bid(device), acpi_button_dir); + acpi_device_dir(device) = NULL; + } + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +void +acpi_button_notify ( + acpi_handle handle, + u32 event, + void *data) +{ + struct acpi_button *button = (struct acpi_button *) data; + + ACPI_FUNCTION_TRACE("acpi_button_notify"); + + if (!button || !button->device) + return_VOID; + + switch (event) { + case ACPI_BUTTON_NOTIFY_STATUS: + acpi_bus_generate_event(button->device, event, ++button->pushed); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Unsupported event [0x%x]\n", event)); + break; + } + + return_VOID; +} + + +acpi_status +acpi_button_notify_fixed ( + void *data) +{ + struct acpi_button *button = (struct acpi_button *) data; + + ACPI_FUNCTION_TRACE("acpi_button_notify_fixed"); + + if (!button) + return_ACPI_STATUS(AE_BAD_PARAMETER); + + acpi_button_notify(button->handle, ACPI_BUTTON_NOTIFY_STATUS, button); + + return_ACPI_STATUS(AE_OK); +} + + +static int +acpi_button_add ( + struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_button *button = NULL; + + static struct acpi_device *power_button; + static struct acpi_device *sleep_button; + static struct acpi_device *lid_button; + + ACPI_FUNCTION_TRACE("acpi_button_add"); + + if (!device) + return_VALUE(-EINVAL); + + button = kmalloc(sizeof(struct acpi_button), GFP_KERNEL); + if (!button) + return_VALUE(-ENOMEM); + memset(button, 0, sizeof(struct acpi_button)); + + button->device = device; + button->handle = device->handle; + acpi_driver_data(device) = button; + + /* + * Determine the button type (via hid), as fixed-feature buttons + * need to be handled a bit differently than generic-space. + */ + if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_POWER)) { + button->type = ACPI_BUTTON_TYPE_POWER; + sprintf(acpi_device_name(device), "%s", + ACPI_BUTTON_DEVICE_NAME_POWER); + sprintf(acpi_device_class(device), "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER); + } + else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_POWERF)) { + button->type = ACPI_BUTTON_TYPE_POWERF; + sprintf(acpi_device_name(device), "%s", + ACPI_BUTTON_DEVICE_NAME_POWERF); + sprintf(acpi_device_class(device), "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER); + } + else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_SLEEP)) { + button->type = ACPI_BUTTON_TYPE_SLEEP; + sprintf(acpi_device_name(device), "%s", + ACPI_BUTTON_DEVICE_NAME_SLEEP); + sprintf(acpi_device_class(device), "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP); + } + else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_SLEEPF)) { + button->type = ACPI_BUTTON_TYPE_SLEEPF; + sprintf(acpi_device_name(device), "%s", + ACPI_BUTTON_DEVICE_NAME_SLEEPF); + sprintf(acpi_device_class(device), "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP); + } + else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_LID)) { + button->type = ACPI_BUTTON_TYPE_LID; + sprintf(acpi_device_name(device), "%s", + ACPI_BUTTON_DEVICE_NAME_LID); + sprintf(acpi_device_class(device), "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID); + } + else { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unsupported hid [%s]\n", + acpi_device_hid(device))); + result = -ENODEV; + goto end; + } + + /* + * Ensure only one button of each type is used. + */ + switch (button->type) { + case ACPI_BUTTON_TYPE_POWER: + case ACPI_BUTTON_TYPE_POWERF: + if (!power_button) + power_button = device; + else { + kfree(button); + return_VALUE(-ENODEV); + } + break; + case ACPI_BUTTON_TYPE_SLEEP: + case ACPI_BUTTON_TYPE_SLEEPF: + if (!sleep_button) + sleep_button = device; + else { + kfree(button); + return_VALUE(-ENODEV); + } + break; + case ACPI_BUTTON_TYPE_LID: + if (!lid_button) + lid_button = device; + else { + kfree(button); + return_VALUE(-ENODEV); + } + break; + } + + result = acpi_button_add_fs(device); + if (result) + goto end; + + switch (button->type) { + case ACPI_BUTTON_TYPE_POWERF: + status = acpi_install_fixed_event_handler ( + ACPI_EVENT_POWER_BUTTON, + acpi_button_notify_fixed, + button); + break; + case ACPI_BUTTON_TYPE_SLEEPF: + status = acpi_install_fixed_event_handler ( + ACPI_EVENT_SLEEP_BUTTON, + acpi_button_notify_fixed, + button); + break; + default: + status = acpi_install_notify_handler ( + button->handle, + ACPI_DEVICE_NOTIFY, + acpi_button_notify, + button); + break; + } + + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing notify handler\n")); + result = -ENODEV; + goto end; + } + + printk(KERN_INFO PREFIX "%s [%s]\n", + acpi_device_name(device), acpi_device_bid(device)); + +end: + if (result) { + acpi_button_remove_fs(device); + kfree(button); + } + + return_VALUE(result); +} + + +static int +acpi_button_remove (struct acpi_device *device, int type) +{ + acpi_status status = 0; + struct acpi_button *button = NULL; + + ACPI_FUNCTION_TRACE("acpi_button_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + button = acpi_driver_data(device); + + /* Unregister for device notifications. */ + switch (button->type) { + case ACPI_BUTTON_TYPE_POWERF: + status = acpi_remove_fixed_event_handler( + ACPI_EVENT_POWER_BUTTON, acpi_button_notify_fixed); + break; + case ACPI_BUTTON_TYPE_SLEEPF: + status = acpi_remove_fixed_event_handler( + ACPI_EVENT_SLEEP_BUTTON, acpi_button_notify_fixed); + break; + default: + status = acpi_remove_notify_handler(button->handle, + ACPI_DEVICE_NOTIFY, acpi_button_notify); + break; + } + + if (ACPI_FAILURE(status)) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error removing notify handler\n")); + + acpi_button_remove_fs(device); + + kfree(button); + + return_VALUE(0); +} + + +static int __init +acpi_button_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_button_init"); + + acpi_button_dir = proc_mkdir(ACPI_BUTTON_CLASS, acpi_root_dir); + if (!acpi_button_dir) + return_VALUE(-ENODEV); + + result = acpi_bus_register_driver(&acpi_button_driver); + if (result < 0) { + remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +static void __exit +acpi_button_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_button_exit"); + + acpi_bus_unregister_driver(&acpi_button_driver); + + remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir); + + return_VOID; +} + + +module_init(acpi_button_init); +module_exit(acpi_button_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/Config.in linux.21rc5-ac2/drivers/acpi/Config.in --- linux.21rc5/drivers/acpi/Config.in 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/Config.in 2003-05-03 19:08:44.000000000 +0100 @@ -1,17 +1,93 @@ # -# ACPI configuration +# ACPI Configuration # -#mainmenu_option next_comment -#comment 'ACPI Configuration' -dep_bool ' ACPI Debug Statements' CONFIG_ACPI_DEBUG $CONFIG_ACPI -dep_tristate ' ACPI Bus Manager' CONFIG_ACPI_BUSMGR $CONFIG_ACPI +if [ "$CONFIG_X86" = "y" ]; then -dep_tristate ' System' CONFIG_ACPI_SYS $CONFIG_ACPI_BUSMGR $CONFIG_ACPI -dep_tristate ' Processor' CONFIG_ACPI_CPU $CONFIG_ACPI_BUSMGR $CONFIG_ACPI -dep_tristate ' Button' CONFIG_ACPI_BUTTON $CONFIG_ACPI_BUSMGR $CONFIG_ACPI -dep_tristate ' AC Adapter' CONFIG_ACPI_AC $CONFIG_ACPI_BUSMGR $CONFIG_ACPI -dep_tristate ' Embedded Controller' CONFIG_ACPI_EC $CONFIG_ACPI_BUSMGR $CONFIG_ACPI -dep_tristate ' Control Method Battery' CONFIG_ACPI_CMBATT $CONFIG_ACPI_BUSMGR $CONFIG_ACPI $CONFIG_ACPI_EC -dep_tristate ' Thermal' CONFIG_ACPI_THERMAL $CONFIG_ACPI_BUSMGR $CONFIG_ACPI $CONFIG_ACPI_EC -#endmenu + mainmenu_option next_comment + comment 'ACPI Support' + bool 'ACPI Support' CONFIG_ACPI + if [ "$CONFIG_ACPI" = "y" ]; then + if [ "$CONFIG_X86_LOCAL_APIC" = "y" ]; then + bool 'CPU Enumeration Only' CONFIG_ACPI_HT_ONLY + fi + + if [ "$CONFIG_ACPI_HT_ONLY" = "y" ]; then + define_bool CONFIG_ACPI_BOOT y + else + define_bool CONFIG_ACPI_BOOT y + define_bool CONFIG_ACPI_BUS y + define_bool CONFIG_ACPI_INTERPRETER y + define_bool CONFIG_ACPI_EC y + define_bool CONFIG_ACPI_POWER y + if [ "$CONFIG_PCI" = "y" ]; then + define_bool CONFIG_ACPI_PCI y + fi + define_bool CONFIG_ACPI_SLEEP y + define_bool CONFIG_ACPI_SYSTEM y + tristate ' AC Adapter' CONFIG_ACPI_AC + tristate ' Battery' CONFIG_ACPI_BATTERY + tristate ' Button' CONFIG_ACPI_BUTTON + tristate ' Fan' CONFIG_ACPI_FAN + tristate ' Processor' CONFIG_ACPI_PROCESSOR + dep_tristate ' Thermal Zone' CONFIG_ACPI_THERMAL $CONFIG_ACPI_PROCESSOR + if [ "$CONFIG_NUMA" = "y" ]; then + dep_bool ' NUMA support' CONFIG_ACPI_NUMA $CONFIG_NUMA + fi + tristate ' Toshiba Laptop Extras' CONFIG_ACPI_TOSHIBA + bool ' Debug Statements' CONFIG_ACPI_DEBUG + bool ' Relaxed AML Checking' CONFIG_ACPI_RELAXED_AML + fi + fi + + endmenu + +fi + + +if [ "$CONFIG_IA64" = "y" ]; then + + if [ "$CONFIG_IA64_SGI_SN" = "y" ]; then + mainmenu_option next_comment + comment 'ACPI Support' + define_bool CONFIG_ACPI y + define_bool CONFIG_ACPI_EFI y + define_bool CONFIG_ACPI_BOOT y + define_bool CONFIG_ACPI_BUS n + define_bool CONFIG_ACPI_INTERPRETER n + define_bool CONFIG_ACPI_PCI n + define_bool CONFIG_ACPI_POWER n + define_bool CONFIG_ACPI_SYSTEM n + define_bool CONFIG_ACPI_BUTTON n + define_bool CONFIG_ACPI_FAN n + define_bool CONFIG_ACPI_PROCESSOR n + define_bool CONFIG_ACPI_THERMAL n + define_bool CONFIG_ACPI_NUMA y + endmenu + fi + + if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then + mainmenu_option next_comment + comment 'ACPI Support' + if [ "$CONFIG_PCI" = "y" ]; then + define_bool CONFIG_ACPI_PCI y + fi + define_bool CONFIG_ACPI y + define_bool CONFIG_ACPI_EFI y + define_bool CONFIG_ACPI_BOOT y + define_bool CONFIG_ACPI_BUS y + define_bool CONFIG_ACPI_INTERPRETER y + define_bool CONFIG_ACPI_POWER y + define_bool CONFIG_ACPI_SYSTEM y + tristate ' Button' CONFIG_ACPI_BUTTON + tristate ' Fan' CONFIG_ACPI_FAN + tristate ' Processor' CONFIG_ACPI_PROCESSOR + dep_tristate ' Thermal Zone' CONFIG_ACPI_THERMAL $CONFIG_ACPI_PROCESSOR + if [ "$CONFIG_NUMA" = "y" ]; then + dep_bool ' NUMA support' CONFIG_ACPI_NUMA $CONFIG_NUMA + fi + bool ' Debug Statements' CONFIG_ACPI_DEBUG + endmenu + fi + +fi diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbcmds.c linux.21rc5-ac2/drivers/acpi/debugger/dbcmds.c --- linux.21rc5/drivers/acpi/debugger/dbcmds.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbcmds.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,996 +0,0 @@ -/******************************************************************************* - * - * Module Name: dbcmds - debug commands and output routines - * $Revision: 66 $ - * - ******************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acparser.h" -#include "acevents.h" -#include "acinterp.h" -#include "acdebug.h" -#include "actables.h" -#include "acresrc.h" - -#ifdef ENABLE_DEBUGGER - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbcmds") - - -/* - * Arguments for the Objects command - * These object types map directly to the ACPI_TYPES - */ - -ARGUMENT_INFO acpi_db_object_types [] = -{ {"ANY"}, - {"NUMBERS"}, - {"STRINGS"}, - {"BUFFERS"}, - {"PACKAGES"}, - {"FIELDS"}, - {"DEVICES"}, - {"EVENTS"}, - {"METHODS"}, - {"MUTEXES"}, - {"REGIONS"}, - {"POWERRESOURCES"}, - {"PROCESSORS"}, - {"THERMALZONES"}, - {"BUFFERFIELDS"}, - {"DDBHANDLES"}, - {NULL} /* Must be null terminated */ -}; - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_walk_for_references - * - * PARAMETERS: Callback from Walk_namespace - * - * RETURN: Status - * - * DESCRIPTION: Check if this namespace object refers to the target object - * that is passed in as the context value. - * - ******************************************************************************/ - -acpi_status -acpi_db_walk_for_references ( - acpi_handle obj_handle, - u32 nesting_level, - void *context, - void **return_value) -{ - acpi_operand_object *obj_desc = (acpi_operand_object *) context; - acpi_namespace_node *node = (acpi_namespace_node *) obj_handle; - - - /* Check for match against the namespace node itself */ - - if (node == (void *) obj_desc) { - acpi_os_printf ("Object is a Node [%4.4s]\n", &node->name); - } - - /* Check for match against the object attached to the node */ - - if (node->object == obj_desc) { - acpi_os_printf ("Reference at Node->Object %p [%4.4s]\n", node, &node->name); - } - - /* Check first child for a match */ - /* TBD: [Investigate] probably now obsolete with new datastructure */ - - if (node->child == (void *) obj_desc) { - acpi_os_printf ("Reference at Node->Child %p [%4.4s]\n", node, &node->name); - } - - return (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_find_references - * - * PARAMETERS: Object_arg - String with hex value of the object - * - * RETURN: None - * - * DESCRIPTION: Search namespace for all references to the input object - * - ******************************************************************************/ - -void -acpi_db_find_references ( - NATIVE_CHAR *object_arg) -{ - acpi_operand_object *obj_desc; - - - /* Convert string to object pointer */ - - obj_desc = (acpi_operand_object *) STRTOUL (object_arg, NULL, 16); - - /* Search all nodes in namespace */ - - acpi_walk_namespace (ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, - acpi_db_walk_for_references, (void *) obj_desc, NULL); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_locks - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Display information about internal mutexes. - * - ******************************************************************************/ - -void -acpi_db_display_locks (void) -{ - u32 i; - - - for (i = 0; i < MAX_MTX; i++) { - acpi_os_printf ("%26s : %s\n", acpi_ut_get_mutex_name (i), - acpi_gbl_acpi_mutex_info[i].owner_id == ACPI_MUTEX_NOT_ACQUIRED - ? "Locked" : "Unlocked"); - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_table_info - * - * PARAMETERS: Table_arg - String with name of table to be displayed - * - * RETURN: None - * - * DESCRIPTION: Display information about loaded tables. Current - * implementation displays all loaded tables. - * - ******************************************************************************/ - -void -acpi_db_display_table_info ( - NATIVE_CHAR *table_arg) -{ - u32 i; - - - for (i = 0; i < NUM_ACPI_TABLES; i++) { - if (acpi_gbl_acpi_tables[i].pointer) { - acpi_os_printf ("%s at %p length %X\n", acpi_gbl_acpi_table_data[i].name, - acpi_gbl_acpi_tables[i].pointer, acpi_gbl_acpi_tables[i].length); - } - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_unload_acpi_table - * - * PARAMETERS: Table_arg - Name of the table to be unloaded - * Instance_arg - Which instance of the table to unload (if - * there are multiple tables of the same type) - * - * RETURN: Nonde - * - * DESCRIPTION: Unload an ACPI table. - * Instance is not implemented - * - ******************************************************************************/ - -void -acpi_db_unload_acpi_table ( - NATIVE_CHAR *table_arg, - NATIVE_CHAR *instance_arg) -{ - u32 i; - acpi_status status; - - - /* Search all tables for the target type */ - - for (i = 0; i < NUM_ACPI_TABLES; i++) { - if (!STRNCMP (table_arg, acpi_gbl_acpi_table_data[i].signature, - acpi_gbl_acpi_table_data[i].sig_length)) { - /* Found the table, unload it */ - - status = acpi_unload_table (i); - if (ACPI_SUCCESS (status)) { - acpi_os_printf ("[%s] unloaded and uninstalled\n", table_arg); - } - else { - acpi_os_printf ("%s, while unloading [%s]\n", - acpi_format_exception (status), table_arg); - } - - return; - } - } - - acpi_os_printf ("Unknown table type [%s]\n", table_arg); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_set_method_breakpoint - * - * PARAMETERS: Location - AML offset of breakpoint - * Walk_state - Current walk info - * Op - Current Op (from parse walk) - * - * RETURN: None - * - * DESCRIPTION: Set a breakpoint in a control method at the specified - * AML offset - * - ******************************************************************************/ - -void -acpi_db_set_method_breakpoint ( - NATIVE_CHAR *location, - acpi_walk_state *walk_state, - acpi_parse_object *op) -{ - u32 address; - - - if (!op) { - acpi_os_printf ("There is no method currently executing\n"); - return; - } - - /* Get and verify the breakpoint address */ - - address = STRTOUL (location, NULL, 16); - if (address <= op->aml_offset) { - acpi_os_printf ("Breakpoint %X is beyond current address %X\n", address, op->aml_offset); - } - - /* Save breakpoint in current walk */ - - walk_state->method_breakpoint = address; - acpi_os_printf ("Breakpoint set at AML offset %X\n", address); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_set_method_call_breakpoint - * - * PARAMETERS: Op - Current Op (from parse walk) - * - * RETURN: None - * - * DESCRIPTION: Set a breakpoint in a control method at the specified - * AML offset - * - ******************************************************************************/ - -void -acpi_db_set_method_call_breakpoint ( - acpi_parse_object *op) -{ - - - if (!op) { - acpi_os_printf ("There is no method currently executing\n"); - return; - } - - - acpi_gbl_step_to_next_call = TRUE; -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_disassemble_aml - * - * PARAMETERS: Statements - Number of statements to disassemble - * Op - Current Op (from parse walk) - * - * RETURN: None - * - * DESCRIPTION: Display disassembled AML (ASL) starting from Op for the number - * of statements specified. - * - ******************************************************************************/ - -void -acpi_db_disassemble_aml ( - NATIVE_CHAR *statements, - acpi_parse_object *op) -{ - u32 num_statements = 8; - - - if (!op) { - acpi_os_printf ("There is no method currently executing\n"); - return; - } - - if (statements) { - num_statements = STRTOUL (statements, NULL, 0); - } - - - acpi_db_display_op (NULL, op, num_statements); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_dump_namespace - * - * PARAMETERS: Start_arg - Node to begin namespace dump - * Depth_arg - Maximum tree depth to be dumped - * - * RETURN: None - * - * DESCRIPTION: Dump entire namespace or a subtree. Each node is displayed - * with type and other information. - * - ******************************************************************************/ - -void -acpi_db_dump_namespace ( - NATIVE_CHAR *start_arg, - NATIVE_CHAR *depth_arg) -{ - acpi_handle subtree_entry = acpi_gbl_root_node; - u32 max_depth = ACPI_UINT32_MAX; - - - /* No argument given, just start at the root and dump entire namespace */ - - if (start_arg) { - /* Check if numeric argument, must be a Node */ - - if ((start_arg[0] >= 0x30) && (start_arg[0] <= 0x39)) { - subtree_entry = (acpi_handle) STRTOUL (start_arg, NULL, 16); - if (!acpi_os_readable (subtree_entry, sizeof (acpi_namespace_node))) { - acpi_os_printf ("Address %p is invalid in this address space\n", subtree_entry); - return; - } - - if (!VALID_DESCRIPTOR_TYPE ((subtree_entry), ACPI_DESC_TYPE_NAMED)) { - acpi_os_printf ("Address %p is not a valid Named object\n", subtree_entry); - return; - } - } - - /* Alpha argument */ - - else { - /* The parameter is a name string that must be resolved to a Named obj*/ - - subtree_entry = acpi_db_local_ns_lookup (start_arg); - if (!subtree_entry) { - subtree_entry = acpi_gbl_root_node; - } - } - - /* Now we can check for the depth argument */ - - if (depth_arg) { - max_depth = STRTOUL (depth_arg, NULL, 0); - } - } - - - acpi_db_set_output_destination (DB_DUPLICATE_OUTPUT); - acpi_os_printf ("ACPI Namespace (from %p subtree):\n", subtree_entry); - - /* Display the subtree */ - - acpi_db_set_output_destination (DB_REDIRECTABLE_OUTPUT); - acpi_ns_dump_objects (ACPI_TYPE_ANY, ACPI_DISPLAY_SUMMARY, max_depth, ACPI_UINT32_MAX, subtree_entry); - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_dump_namespace_by_owner - * - * PARAMETERS: Owner_arg - Owner ID whose nodes will be displayed - * Depth_arg - Maximum tree depth to be dumped - * - * RETURN: None - * - * DESCRIPTION: Dump elements of the namespace that are owned by the Owner_id. - * - ******************************************************************************/ - -void -acpi_db_dump_namespace_by_owner ( - NATIVE_CHAR *owner_arg, - NATIVE_CHAR *depth_arg) -{ - acpi_handle subtree_entry = acpi_gbl_root_node; - u32 max_depth = ACPI_UINT32_MAX; - u16 owner_id; - - - owner_id = (u16) STRTOUL (owner_arg, NULL, 0); - - - /* Now we can check for the depth argument */ - - if (depth_arg) { - max_depth = STRTOUL (depth_arg, NULL, 0); - } - - - acpi_db_set_output_destination (DB_DUPLICATE_OUTPUT); - acpi_os_printf ("ACPI Namespace by owner %X:\n", owner_id); - - /* Display the subtree */ - - acpi_db_set_output_destination (DB_REDIRECTABLE_OUTPUT); - acpi_ns_dump_objects (ACPI_TYPE_ANY, ACPI_DISPLAY_SUMMARY, max_depth, owner_id, subtree_entry); - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_send_notify - * - * PARAMETERS: Name - Name of ACPI object to send the notify to - * Value - Value of the notify to send. - * - * RETURN: None - * - * DESCRIPTION: Send an ACPI notification. The value specified is sent to the - * named object as an ACPI notify. - * - ******************************************************************************/ - -void -acpi_db_send_notify ( - NATIVE_CHAR *name, - u32 value) -{ - acpi_namespace_node *node; - - - /* Translate name to an Named object */ - - node = acpi_db_local_ns_lookup (name); - if (!node) { - return; - } - - /* Decode Named object type */ - - switch (node->type) { - case ACPI_TYPE_DEVICE: - case ACPI_TYPE_THERMAL: - - /* Send the notify */ - - acpi_ev_queue_notify_request (node, value); - break; - - default: - acpi_os_printf ("Named object is not a device or a thermal object\n"); - break; - } - -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_set_method_data - * - * PARAMETERS: Type_arg - L for local, A for argument - * Index_arg - which one - * Value_arg - Value to set. - * - * RETURN: None - * - * DESCRIPTION: Set a local or argument for the running control method. - * NOTE: only object supported is Number. - * - ******************************************************************************/ - -void -acpi_db_set_method_data ( - NATIVE_CHAR *type_arg, - NATIVE_CHAR *index_arg, - NATIVE_CHAR *value_arg) -{ - NATIVE_CHAR type; - u32 index; - u32 value; - acpi_walk_state *walk_state; - acpi_operand_object *obj_desc; - - - /* Validate Type_arg */ - - STRUPR (type_arg); - type = type_arg[0]; - if ((type != 'L') && - (type != 'A')) { - acpi_os_printf ("Invalid SET operand: %s\n", type_arg); - return; - } - - /* Get the index and value */ - - index = STRTOUL (index_arg, NULL, 16); - value = STRTOUL (value_arg, NULL, 16); - - walk_state = acpi_ds_get_current_walk_state (acpi_gbl_current_walk_list); - if (!walk_state) { - acpi_os_printf ("There is no method currently executing\n"); - return; - } - - - /* Create and initialize the new object */ - - obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); - if (!obj_desc) { - acpi_os_printf ("Could not create an internal object\n"); - return; - } - - obj_desc->integer.value = value; - - - /* Store the new object into the target */ - - switch (type) { - case 'A': - - /* Set a method argument */ - - if (index > MTH_NUM_ARGS) { - acpi_os_printf ("Arg%d - Invalid argument name\n", index); - return; - } - - acpi_ds_store_object_to_local (AML_ARG_OP, index, obj_desc, walk_state); - obj_desc = walk_state->arguments[index].object; - - acpi_os_printf ("Arg%d: ", index); - acpi_db_display_internal_object (obj_desc, walk_state); - break; - - case 'L': - - /* Set a method local */ - - if (index > MTH_NUM_LOCALS) { - acpi_os_printf ("Local%d - Invalid local variable name\n", index); - return; - } - - acpi_ds_store_object_to_local (AML_LOCAL_OP, index, obj_desc, walk_state); - obj_desc = walk_state->local_variables[index].object; - - acpi_os_printf ("Local%d: ", index); - acpi_db_display_internal_object (obj_desc, walk_state); - break; - - default: - break; - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_walk_for_specific_objects - * - * PARAMETERS: Callback from Walk_namespace - * - * RETURN: Status - * - * DESCRIPTION: Display short info about objects in the namespace - * - ******************************************************************************/ - -acpi_status -acpi_db_walk_for_specific_objects ( - acpi_handle obj_handle, - u32 nesting_level, - void *context, - void **return_value) -{ - acpi_operand_object *obj_desc; - acpi_status status; - u32 buf_size; - NATIVE_CHAR buffer[64]; - - - obj_desc = ((acpi_namespace_node *)obj_handle)->object; - buf_size = sizeof (buffer) / sizeof (*buffer); - - /* Get and display the full pathname to this object */ - - status = acpi_ns_handle_to_pathname (obj_handle, &buf_size, buffer); - - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Could Not get pathname for object %p\n", obj_handle); - return (AE_OK); - } - - acpi_os_printf ("%32s", buffer); - - - /* Display short information about the object */ - - if (obj_desc) { - switch (obj_desc->common.type) { - case ACPI_TYPE_METHOD: - acpi_os_printf (" #Args %d Concurrency %X", obj_desc->method.param_count, obj_desc->method.concurrency); - break; - - case ACPI_TYPE_INTEGER: - acpi_os_printf (" Value %X", obj_desc->integer.value); - break; - - case ACPI_TYPE_STRING: - acpi_os_printf (" \"%s\"", obj_desc->string.pointer); - break; - - case ACPI_TYPE_REGION: - acpi_os_printf (" Space_id %X Address %X Length %X", obj_desc->region.space_id, obj_desc->region.address, obj_desc->region.length); - break; - - case ACPI_TYPE_PACKAGE: - acpi_os_printf (" #Elements %X", obj_desc->package.count); - break; - - case ACPI_TYPE_BUFFER: - acpi_os_printf (" Length %X", obj_desc->buffer.length); - break; - } - } - - acpi_os_printf ("\n"); - return (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_objects - * - * PARAMETERS: Obj_type_arg - Type of object to display - * Display_count_arg - Max depth to display - * - * RETURN: None - * - * DESCRIPTION: Display objects in the namespace of the requested type - * - ******************************************************************************/ - -acpi_status -acpi_db_display_objects ( - NATIVE_CHAR *obj_type_arg, - NATIVE_CHAR *display_count_arg) -{ - acpi_object_type8 type; - - - /* Get the object type */ - - type = acpi_db_match_argument (obj_type_arg, acpi_db_object_types); - if (type == ACPI_TYPE_NOT_FOUND) { - acpi_os_printf ("Invalid or unsupported argument\n"); - return (AE_OK); - } - - acpi_db_set_output_destination (DB_DUPLICATE_OUTPUT); - acpi_os_printf ("Objects of type [%s] defined in the current ACPI Namespace: \n", acpi_ut_get_type_name (type)); - - acpi_db_set_output_destination (DB_REDIRECTABLE_OUTPUT); - - /* Walk the namespace from the root */ - - acpi_walk_namespace (type, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, - acpi_db_walk_for_specific_objects, (void *) &type, NULL); - - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); - return (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_walk_and_match_name - * - * PARAMETERS: Callback from Walk_namespace - * - * RETURN: Status - * - * DESCRIPTION: Find a particular name/names within the namespace. Wildcards - * are supported -- '?' matches any character. - * - ******************************************************************************/ - -acpi_status -acpi_db_walk_and_match_name ( - acpi_handle obj_handle, - u32 nesting_level, - void *context, - void **return_value) -{ - acpi_status status; - NATIVE_CHAR *requested_name = (NATIVE_CHAR *) context; - u32 i; - u32 buf_size; - NATIVE_CHAR buffer[96]; - - - /* Check for a name match */ - - for (i = 0; i < 4; i++) { - /* Wildcard support */ - - if ((requested_name[i] != '?') && - (requested_name[i] != ((NATIVE_CHAR *) (&((acpi_namespace_node *) obj_handle)->name))[i])) { - /* No match, just exit */ - - return (AE_OK); - } - } - - - /* Get the full pathname to this object */ - - buf_size = sizeof (buffer) / sizeof (*buffer); - - status = acpi_ns_handle_to_pathname (obj_handle, &buf_size, buffer); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Could Not get pathname for object %p\n", obj_handle); - } - - else { - acpi_os_printf ("%32s (%p) - %s\n", buffer, obj_handle, - acpi_ut_get_type_name (((acpi_namespace_node *) obj_handle)->type)); - } - - return (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_find_name_in_namespace - * - * PARAMETERS: Name_arg - The 4-character ACPI name to find. - * wildcards are supported. - * - * RETURN: None - * - * DESCRIPTION: Search the namespace for a given name (with wildcards) - * - ******************************************************************************/ - -acpi_status -acpi_db_find_name_in_namespace ( - NATIVE_CHAR *name_arg) -{ - - if (STRLEN (name_arg) > 4) { - acpi_os_printf ("Name must be no longer than 4 characters\n"); - return (AE_OK); - } - - /* Walk the namespace from the root */ - - acpi_walk_namespace (ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, - acpi_db_walk_and_match_name, name_arg, NULL); - - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); - return (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_set_scope - * - * PARAMETERS: Name - New scope path - * - * RETURN: Status - * - * DESCRIPTION: Set the "current scope" as maintained by this utility. - * The scope is used as a prefix to ACPI paths. - * - ******************************************************************************/ - -void -acpi_db_set_scope ( - NATIVE_CHAR *name) -{ - - if (!name || name[0] == 0) { - acpi_os_printf ("Current scope: %s\n", acpi_gbl_db_scope_buf); - return; - } - - acpi_db_prep_namestring (name); - - /* TBD: [Future] Validate scope here */ - - if (name[0] == '\\') { - STRCPY (acpi_gbl_db_scope_buf, name); - STRCAT (acpi_gbl_db_scope_buf, "\\"); - } - - else { - STRCAT (acpi_gbl_db_scope_buf, name); - STRCAT (acpi_gbl_db_scope_buf, "\\"); - } - - acpi_os_printf ("New scope: %s\n", acpi_gbl_db_scope_buf); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_resources - * - * PARAMETERS: Object_arg - String with hex value of the object - * - * RETURN: None - * - * DESCRIPTION: - * - ******************************************************************************/ - -void -acpi_db_display_resources ( - NATIVE_CHAR *object_arg) -{ -#ifndef _IA16 - acpi_operand_object *obj_desc; - acpi_status status; - acpi_buffer return_obj; - - - acpi_db_set_output_destination (DB_REDIRECTABLE_OUTPUT); - - /* Convert string to object pointer */ - - obj_desc = (acpi_operand_object *) STRTOUL (object_arg, NULL, 16); - - /* Prepare for a return object of arbitrary size */ - - return_obj.pointer = acpi_gbl_db_buffer; - return_obj.length = ACPI_DEBUG_BUFFER_SIZE; - - - /* _PRT */ - - acpi_os_printf ("Evaluating _PRT\n"); - - status = acpi_evaluate_object (obj_desc, "_PRT", NULL, &return_obj); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Could not obtain _PRT: %s\n", acpi_format_exception (status)); - goto get_crs; - } - - return_obj.pointer = acpi_gbl_db_buffer; - return_obj.length = ACPI_DEBUG_BUFFER_SIZE; - - status = acpi_get_irq_routing_table (obj_desc, &return_obj); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Get_irq_routing_table failed: %s\n", acpi_format_exception (status)); - } - - else { - acpi_rs_dump_irq_list ((u8 *) acpi_gbl_db_buffer); - } - - - /* _CRS */ - -get_crs: - acpi_os_printf ("Evaluating _CRS\n"); - - return_obj.pointer = acpi_gbl_db_buffer; - return_obj.length = ACPI_DEBUG_BUFFER_SIZE; - - status = acpi_evaluate_object (obj_desc, "_CRS", NULL, &return_obj); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Could not obtain _CRS: %s\n", acpi_format_exception (status)); - goto get_prs; - } - - return_obj.pointer = acpi_gbl_db_buffer; - return_obj.length = ACPI_DEBUG_BUFFER_SIZE; - - status = acpi_get_current_resources (obj_desc, &return_obj); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Acpi_get_current_resources failed: %s\n", acpi_format_exception (status)); - } - - else { - acpi_rs_dump_resource_list ((acpi_resource *) acpi_gbl_db_buffer); - } - - - /* _PRS */ - -get_prs: - acpi_os_printf ("Evaluating _PRS\n"); - - return_obj.pointer = acpi_gbl_db_buffer; - return_obj.length = ACPI_DEBUG_BUFFER_SIZE; - - status = acpi_evaluate_object (obj_desc, "_PRS", NULL, &return_obj); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Could not obtain _PRS: %s\n", acpi_format_exception (status)); - goto cleanup; - } - - return_obj.pointer = acpi_gbl_db_buffer; - return_obj.length = ACPI_DEBUG_BUFFER_SIZE; - - status = acpi_get_possible_resources (obj_desc, &return_obj); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Acpi_get_possible_resources failed: %s\n", acpi_format_exception (status)); - } - - else { - acpi_rs_dump_resource_list ((acpi_resource *) acpi_gbl_db_buffer); - } - - -cleanup: - - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); - return; -#endif - -} - - -#endif /* ENABLE_DEBUGGER */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbdisasm.c linux.21rc5-ac2/drivers/acpi/debugger/dbdisasm.c --- linux.21rc5/drivers/acpi/debugger/dbdisasm.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbdisasm.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,715 +0,0 @@ -/******************************************************************************* - * - * Module Name: dbdisasm - parser op tree display routines - * $Revision: 50 $ - * - ******************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acdebug.h" - - -#ifdef ENABLE_DEBUGGER - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbdisasm") - - -#define MAX_SHOW_ENTRY 128 -#define BLOCK_PAREN 1 -#define BLOCK_BRACE 2 -#define DB_NO_OP_INFO " [%2.2d] " -#define DB_FULL_OP_INFO "%5.5X #%4.4X [%2.2d] " - - -NATIVE_CHAR *acpi_gbl_db_disasm_indent = "...."; - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_block_type - * - * PARAMETERS: Op - Object to be examined - * - * RETURN: Status - * - * DESCRIPTION: Type of block for this op (parens or braces) - * - ******************************************************************************/ - -u32 -acpi_db_block_type ( - acpi_parse_object *op) -{ - - switch (op->opcode) { - case AML_METHOD_OP: - return (BLOCK_BRACE); - break; - - default: - break; - } - - return (BLOCK_PAREN); - -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ps_display_object_pathname - * - * PARAMETERS: Op - Object whose pathname is to be obtained - * - * RETURN: Status - * - * DESCRIPTION: Diplay the pathname associated with a named object. Two - * versions. One searches the parse tree (for parser-only - * applications suchas Acpi_dump), and the other searches the - * ACPI namespace (the parse tree is probably deleted) - * - ******************************************************************************/ - -#ifdef PARSER_ONLY - -acpi_status -acpi_ps_display_object_pathname ( - acpi_walk_state *walk_state, - acpi_parse_object *op) -{ - acpi_parse_object *target_op; - - - /* Search parent tree up to the root if necessary */ - - target_op = acpi_ps_find (op, op->value.name, 0, 0); - if (!target_op) { - /* - * Didn't find the name in the parse tree. This may be - * a problem, or it may simply be one of the predefined names - * (such as _OS_). Rather than worry about looking up all - * the predefined names, just display the name as given - */ - acpi_os_printf (" **** Path not found in parse tree"); - } - - else { - /* The target was found, print the name and complete path */ - - acpi_os_printf (" (Path "); - acpi_db_display_path (target_op); - acpi_os_printf (")"); - } - - return (AE_OK); -} - -#else - -acpi_status -acpi_ps_display_object_pathname ( - acpi_walk_state *walk_state, - acpi_parse_object *op) -{ - acpi_status status; - acpi_namespace_node *node; - NATIVE_CHAR buffer[MAX_SHOW_ENTRY]; - u32 buffer_size = MAX_SHOW_ENTRY; - u32 debug_level; - - - /* Save current debug level so we don't get extraneous debug output */ - - debug_level = acpi_dbg_level; - acpi_dbg_level = 0; - - /* Just get the Node out of the Op object */ - - node = op->node; - if (!node) { - /* Node not defined in this scope, look it up */ - - status = acpi_ns_lookup (walk_state->scope_info, op->value.string, ACPI_TYPE_ANY, - IMODE_EXECUTE, NS_SEARCH_PARENT, walk_state, &(node)); - - if (ACPI_FAILURE (status)) { - /* - * We can't get the pathname since the object - * is not in the namespace. This can happen during single - * stepping where a dynamic named object is *about* to be created. - */ - acpi_os_printf (" [Path not found]"); - goto exit; - } - - /* Save it for next time. */ - - op->node = node; - } - - /* Convert Named_desc/handle to a full pathname */ - - status = acpi_ns_handle_to_pathname (node, &buffer_size, buffer); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("****Could not get pathname****)"); - goto exit; - } - - acpi_os_printf (" (Path %s)", buffer); - - -exit: - /* Restore the debug level */ - - acpi_dbg_level = debug_level; - return (status); -} - -#endif - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_op - * - * PARAMETERS: Origin - Starting object - * Num_opcodes - Max number of opcodes to be displayed - * - * RETURN: None - * - * DESCRIPTION: Display parser object and its children - * - ******************************************************************************/ - -void -acpi_db_display_op ( - acpi_walk_state *walk_state, - acpi_parse_object *origin, - u32 num_opcodes) -{ - acpi_parse_object *op = origin; - acpi_parse_object *arg; - acpi_parse_object *depth; - u32 depth_count = 0; - u32 last_depth = 0; - u32 i; - u32 j; - - - if (op) { - while (op) { - /* indentation */ - - depth_count = 0; - if (!acpi_gbl_db_opt_verbose) { - depth_count++; - } - - /* Determine the nesting depth of this argument */ - - for (depth = op->parent; depth; depth = depth->parent) { - arg = acpi_ps_get_arg (depth, 0); - while (arg && arg != origin) { - arg = arg->next; - } - - if (arg) { - break; - } - - depth_count++; - } - - - /* Open a new block if we are nested further than last time */ - - if (depth_count > last_depth) { - VERBOSE_PRINT ((DB_NO_OP_INFO, last_depth)); - for (i = 0; i < last_depth; i++) { - acpi_os_printf ("%s", acpi_gbl_db_disasm_indent); - } - - if (acpi_db_block_type (op) == BLOCK_PAREN) { - acpi_os_printf ("(\n"); - } - else { - acpi_os_printf ("{\n"); - } - } - - /* Close a block if we are nested less than last time */ - - else if (depth_count < last_depth) { - for (j = 0; j < (last_depth - depth_count); j++) { - VERBOSE_PRINT ((DB_NO_OP_INFO, last_depth - j)); - for (i = 0; i < (last_depth - j - 1); i++) { - acpi_os_printf ("%s", acpi_gbl_db_disasm_indent); - } - - if (acpi_db_block_type (op) == BLOCK_PAREN) { - acpi_os_printf (")\n"); - } - else { - acpi_os_printf ("}\n"); - } - } - } - - /* In verbose mode, print the AML offset, opcode and depth count */ - - VERBOSE_PRINT ((DB_FULL_OP_INFO, (unsigned) op->aml_offset, op->opcode, depth_count)); - - - /* Indent the output according to the depth count */ - - for (i = 0; i < depth_count; i++) { - acpi_os_printf ("%s", acpi_gbl_db_disasm_indent); - } - - - /* Now print the opcode */ - - acpi_db_display_opcode (walk_state, op); - - /* Resolve a name reference */ - - if ((op->opcode == AML_INT_NAMEPATH_OP && op->value.name) && - (op->parent) && - (acpi_gbl_db_opt_verbose)) { - acpi_ps_display_object_pathname (walk_state, op); - } - - acpi_os_printf ("\n"); - - /* Get the next node in the tree */ - - op = acpi_ps_get_depth_next (origin, op); - last_depth = depth_count; - - num_opcodes--; - if (!num_opcodes) { - op = NULL; - } - } - - /* Close the last block(s) */ - - depth_count = last_depth -1; - for (i = 0; i < last_depth; i++) { - VERBOSE_PRINT ((DB_NO_OP_INFO, last_depth - i)); - for (j = 0; j < depth_count; j++) { - acpi_os_printf ("%s", acpi_gbl_db_disasm_indent); - } - acpi_os_printf ("}\n"); - depth_count--; - } - - } - - else { - acpi_db_display_opcode (walk_state, op); - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_namestring - * - * PARAMETERS: Name - ACPI Name string to store - * - * RETURN: None - * - * DESCRIPTION: Display namestring. Handles prefix characters - * - ******************************************************************************/ - -void -acpi_db_display_namestring ( - NATIVE_CHAR *name) -{ - u32 seg_count; - u8 do_dot = FALSE; - - - if (!name) { - acpi_os_printf (""); - return; - } - - if (acpi_ps_is_prefix_char (GET8 (name))) { - /* append prefix character */ - - acpi_os_printf ("%1c", GET8 (name)); - name++; - } - - switch (GET8 (name)) { - case AML_DUAL_NAME_PREFIX: - seg_count = 2; - name++; - break; - - case AML_MULTI_NAME_PREFIX_OP: - seg_count = (u32) GET8 (name + 1); - name += 2; - break; - - default: - seg_count = 1; - break; - } - - while (seg_count--) { - /* append Name segment */ - - if (do_dot) { - /* append dot */ - - acpi_os_printf ("."); - } - - acpi_os_printf ("%4.4s", name); - do_dot = TRUE; - - name += 4; - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_path - * - * PARAMETERS: Op - Named Op whose path is to be constructed - * - * RETURN: None - * - * DESCRIPTION: Walk backwards from current scope and display the name - * of each previous level of scope up to the root scope - * (like "pwd" does with file systems) - * - ******************************************************************************/ - -void -acpi_db_display_path ( - acpi_parse_object *op) -{ - acpi_parse_object *prev; - acpi_parse_object *search; - u32 name; - u8 do_dot = FALSE; - acpi_parse_object *name_path; - const acpi_opcode_info *op_info; - - - /* We are only interested in named objects */ - - op_info = acpi_ps_get_opcode_info (op->opcode); - if (!(op_info->flags & AML_NSNODE)) { - return; - } - - - if (op_info->flags & AML_CREATE) { - /* Field creation - check for a fully qualified namepath */ - - if (op->opcode == AML_CREATE_FIELD_OP) { - name_path = acpi_ps_get_arg (op, 3); - } - else { - name_path = acpi_ps_get_arg (op, 2); - } - - if ((name_path) && - (name_path->value.string) && - (name_path->value.string[0] == '\\')) { - acpi_db_display_namestring (name_path->value.string); - return; - } - } - - prev = NULL; /* Start with Root Node */ - - while (prev != op) { - /* Search upwards in the tree to find scope with "prev" as its parent */ - - search = op; - for (; ;) { - if (search->parent == prev) { - break; - } - - /* Go up one level */ - - search = search->parent; - } - - if (prev) { - op_info = acpi_ps_get_opcode_info (search->opcode); - if (!(op_info->flags & AML_FIELD)) { - /* below root scope, append scope name */ - - if (do_dot) { - /* append dot */ - - acpi_os_printf ("."); - } - - if (op_info->flags & AML_CREATE) { - if (op->opcode == AML_CREATE_FIELD_OP) { - name_path = acpi_ps_get_arg (op, 3); - } - else { - name_path = acpi_ps_get_arg (op, 2); - } - - if ((name_path) && - (name_path->value.string)) { - acpi_os_printf ("%4.4s", name_path->value.string); - } - } - - else { - name = acpi_ps_get_name (search); - acpi_os_printf ("%4.4s", &name); - } - - do_dot = TRUE; - } - } - - prev = search; - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_opcode - * - * PARAMETERS: Op - Op that is to be printed - * - * RETURN: Status - * - * DESCRIPTION: Store printed op in a Buffer and return its length - * (or -1 if out of space) - * - * NOTE: Terse mode prints out ASL-like code. Verbose mode adds more info. - * - ******************************************************************************/ - -void -acpi_db_display_opcode ( - acpi_walk_state *walk_state, - acpi_parse_object *op) -{ - u8 *byte_data; - u32 byte_count; - u32 i; - const acpi_opcode_info *op_info = NULL; - u32 name; - - - if (!op) { - acpi_os_printf (""); - } - - - /* op and arguments */ - - switch (op->opcode) { - - case AML_BYTE_OP: - - if (acpi_gbl_db_opt_verbose) { - acpi_os_printf ("(u8) 0x%2.2X", op->value.integer8); - } - - else { - acpi_os_printf ("0x%2.2X", op->value.integer8); - } - - break; - - - case AML_WORD_OP: - - if (acpi_gbl_db_opt_verbose) { - acpi_os_printf ("(u16) 0x%4.4X", op->value.integer16); - } - - else { - acpi_os_printf ("0x%4.4X", op->value.integer16); - } - - break; - - - case AML_DWORD_OP: - - if (acpi_gbl_db_opt_verbose) { - acpi_os_printf ("(u32) 0x%8.8X", op->value.integer32); - } - - else { - acpi_os_printf ("0x%8.8X", op->value.integer32); - } - - break; - - - case AML_QWORD_OP: - - if (acpi_gbl_db_opt_verbose) { - acpi_os_printf ("(u64) 0x%8.8X%8.8X", op->value.integer64.hi, - op->value.integer64.lo); - } - - else { - acpi_os_printf ("0x%8.8X%8.8X", op->value.integer64.hi, - op->value.integer64.lo); - } - - break; - - - case AML_STRING_OP: - - if (op->value.string) { - acpi_os_printf ("\"%s\"", op->value.string); - } - - else { - acpi_os_printf ("<\"NULL STRING PTR\">"); - } - - break; - - - case AML_INT_STATICSTRING_OP: - - if (op->value.string) { - acpi_os_printf ("\"%s\"", op->value.string); - } - - else { - acpi_os_printf ("\"\""); - } - - break; - - - case AML_INT_NAMEPATH_OP: - - acpi_db_display_namestring (op->value.name); - break; - - - case AML_INT_NAMEDFIELD_OP: - - acpi_os_printf ("Named_field (Length 0x%8.8X) ", op->value.integer32); - break; - - - case AML_INT_RESERVEDFIELD_OP: - - acpi_os_printf ("Reserved_field (Length 0x%8.8X) ", op->value.integer32); - break; - - - case AML_INT_ACCESSFIELD_OP: - - acpi_os_printf ("Access_field (Length 0x%8.8X) ", op->value.integer32); - break; - - - case AML_INT_BYTELIST_OP: - - if (acpi_gbl_db_opt_verbose) { - acpi_os_printf ("Byte_list (Length 0x%8.8X) ", op->value.integer32); - } - - else { - acpi_os_printf ("0x%2.2X", op->value.integer32); - - byte_count = op->value.integer32; - byte_data = ((acpi_parse2_object *) op)->data; - - for (i = 0; i < byte_count; i++) { - acpi_os_printf (", 0x%2.2X", byte_data[i]); - } - } - - break; - - - default: - - /* Just get the opcode name and print it */ - - op_info = acpi_ps_get_opcode_info (op->opcode); - acpi_os_printf ("%s", op_info->name); - - -#ifndef PARSER_ONLY - if ((op->opcode == AML_INT_RETURN_VALUE_OP) && - (walk_state->results) && - (walk_state->results->results.num_results)) { - acpi_db_decode_internal_object (walk_state->results->results.obj_desc [walk_state->results->results.num_results-1]); - } -#endif - - break; - } - - if (!op_info) { - /* If there is another element in the list, add a comma */ - - if (op->next) { - acpi_os_printf (","); - } - } - - /* - * If this is a named opcode, print the associated name value - */ - op_info = acpi_ps_get_opcode_info (op->opcode); - if (op && (op_info->flags & AML_NAMED)) { - name = acpi_ps_get_name (op); - acpi_os_printf (" %4.4s", &name); - - if (acpi_gbl_db_opt_verbose) { - acpi_os_printf (" (Path \\"); - acpi_db_display_path (op); - acpi_os_printf (")"); - } - } -} - - -#endif /* ENABLE_DEBUGGER */ - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbdisply.c linux.21rc5-ac2/drivers/acpi/debugger/dbdisply.c --- linux.21rc5/drivers/acpi/debugger/dbdisply.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbdisply.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,794 +0,0 @@ -/******************************************************************************* - * - * Module Name: dbdisply - debug display commands - * $Revision: 57 $ - * - ******************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acdispat.h" -#include "acnamesp.h" -#include "acparser.h" -#include "acevents.h" -#include "acinterp.h" -#include "acdebug.h" - - -#ifdef ENABLE_DEBUGGER - - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbdisply") - - -/****************************************************************************** - * - * FUNCTION: Acpi_db_get_pointer - * - * PARAMETERS: Target - Pointer to string to be converted - * - * RETURN: Converted pointer - * - * DESCRIPTION: Convert an ascii pointer value to a real value - * - *****************************************************************************/ - -void * -acpi_db_get_pointer ( - void *target) -{ - void *obj_ptr; - - -#ifdef _IA16 -#include - - /* Have to handle 16-bit pointers of the form segment:offset */ - - if (!sscanf (target, "%p", &obj_ptr)) { - acpi_os_printf ("Invalid pointer: %s\n", target); - return (NULL); - } - -#else - - /* Simple flat pointer */ - - obj_ptr = (void *) STRTOUL (target, NULL, 16); - -#endif - - return (obj_ptr); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_dump_parser_descriptor - * - * PARAMETERS: Op - A parser Op descriptor - * - * RETURN: None - * - * DESCRIPTION: Display a formatted parser object - * - ******************************************************************************/ - -void -acpi_db_dump_parser_descriptor ( - acpi_parse_object *op) -{ - const acpi_opcode_info *info; - - - info = acpi_ps_get_opcode_info (op->opcode); - - acpi_os_printf ("Parser Op Descriptor:\n"); - acpi_os_printf ("%20.20s : %4.4X\n", "Opcode", op->opcode); - - DEBUG_ONLY_MEMBERS (acpi_os_printf ("%20.20s : %s\n", "Opcode Name", info->name)); - - acpi_os_printf ("%20.20s : %p\n", "Value/Arg_list", op->value); - acpi_os_printf ("%20.20s : %p\n", "Parent", op->parent); - acpi_os_printf ("%20.20s : %p\n", "Next_op", op->next); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_decode_and_display_object - * - * PARAMETERS: Target - String with object to be displayed. Names - * and hex pointers are supported. - * Output_type - Byte, Word, Dword, or Qword (B|W|D|Q) - * - * RETURN: None - * - * DESCRIPTION: Display a formatted ACPI object - * - ******************************************************************************/ - -void -acpi_db_decode_and_display_object ( - NATIVE_CHAR *target, - NATIVE_CHAR *output_type) -{ - void *obj_ptr; - acpi_namespace_node *node; - u32 display = DB_BYTE_DISPLAY; - NATIVE_CHAR buffer[80]; - acpi_buffer ret_buf; - acpi_status status; - u32 size; - - - if (!target) { - return; - } - - /* Decode the output type */ - - if (output_type) { - STRUPR (output_type); - if (output_type[0] == 'W') { - display = DB_WORD_DISPLAY; - } - else if (output_type[0] == 'D') { - display = DB_DWORD_DISPLAY; - } - else if (output_type[0] == 'Q') { - display = DB_QWORD_DISPLAY; - } - } - - - ret_buf.length = sizeof (buffer); - ret_buf.pointer = buffer; - - /* Differentiate between a number and a name */ - - if ((target[0] >= 0x30) && (target[0] <= 0x39)) { - obj_ptr = acpi_db_get_pointer (target); - if (!acpi_os_readable (obj_ptr, 16)) { - acpi_os_printf ("Address %p is invalid in this address space\n", obj_ptr); - return; - } - - /* Decode the object type */ - - if (VALID_DESCRIPTOR_TYPE ((obj_ptr), ACPI_DESC_TYPE_NAMED)) { - /* This is a Node */ - - if (!acpi_os_readable (obj_ptr, sizeof (acpi_namespace_node))) { - acpi_os_printf ("Cannot read entire Named object at address %p\n", obj_ptr); - return; - } - - node = obj_ptr; - goto dump_nte; - } - - else if (VALID_DESCRIPTOR_TYPE ((obj_ptr), ACPI_DESC_TYPE_INTERNAL)) { - /* This is an ACPI OBJECT */ - - if (!acpi_os_readable (obj_ptr, sizeof (acpi_operand_object))) { - acpi_os_printf ("Cannot read entire ACPI object at address %p\n", obj_ptr); - return; - } - - acpi_ut_dump_buffer (obj_ptr, sizeof (acpi_operand_object), display, ACPI_UINT32_MAX); - acpi_ex_dump_object_descriptor (obj_ptr, 1); - } - - else if (VALID_DESCRIPTOR_TYPE ((obj_ptr), ACPI_DESC_TYPE_PARSER)) { - /* This is an Parser Op object */ - - if (!acpi_os_readable (obj_ptr, sizeof (acpi_parse_object))) { - acpi_os_printf ("Cannot read entire Parser object at address %p\n", obj_ptr); - return; - } - - - acpi_ut_dump_buffer (obj_ptr, sizeof (acpi_parse_object), display, ACPI_UINT32_MAX); - acpi_db_dump_parser_descriptor ((acpi_parse_object *) obj_ptr); - } - - else { - size = 16; - if (acpi_os_readable (obj_ptr, 64)) { - size = 64; - } - - /* Just dump some memory */ - - acpi_ut_dump_buffer (obj_ptr, size, display, ACPI_UINT32_MAX); - } - - return; - } - - - /* The parameter is a name string that must be resolved to a Named obj */ - - node = acpi_db_local_ns_lookup (target); - if (!node) { - return; - } - - -dump_nte: - /* Now dump the Named obj */ - - status = acpi_get_name (node, ACPI_FULL_PATHNAME, &ret_buf); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Could not convert name to pathname\n"); - } - - else { - acpi_os_printf ("Object (%p) Pathname: %s\n", node, ret_buf.pointer); - } - - if (!acpi_os_readable (node, sizeof (acpi_namespace_node))) { - acpi_os_printf ("Invalid Named object at address %p\n", node); - return; - } - - acpi_ut_dump_buffer ((void *) node, sizeof (acpi_namespace_node), display, ACPI_UINT32_MAX); - acpi_ex_dump_node (node, 1); - - if (node->object) { - acpi_os_printf ("\n_attached Object (%p):\n", node->object); - if (!acpi_os_readable (node->object, sizeof (acpi_operand_object))) { - acpi_os_printf ("Invalid internal ACPI Object at address %p\n", node->object); - return; - } - - acpi_ut_dump_buffer ((void *) node->object, sizeof (acpi_operand_object), display, ACPI_UINT32_MAX); - acpi_ex_dump_object_descriptor (node->object, 1); - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_decode_internal_object - * - * PARAMETERS: Obj_desc - Object to be displayed - * - * RETURN: None - * - * DESCRIPTION: Short display of an internal object. Numbers and Strings. - * - ******************************************************************************/ - -void -acpi_db_decode_internal_object ( - acpi_operand_object *obj_desc) -{ - u32 i; - - - if (!obj_desc) { - return; - } - - acpi_os_printf (" %s", acpi_ut_get_type_name (obj_desc->common.type)); - - switch (obj_desc->common.type) { - case ACPI_TYPE_INTEGER: - - acpi_os_printf (" %.8X%.8X", HIDWORD (obj_desc->integer.value), - LODWORD (obj_desc->integer.value)); - break; - - - case ACPI_TYPE_STRING: - - acpi_os_printf ("(%d) \"%.24s", - obj_desc->string.length, obj_desc->string.pointer); - - if (obj_desc->string.length > 24) - { - acpi_os_printf ("..."); - } - else - { - acpi_os_printf ("\""); - } - break; - - - case ACPI_TYPE_BUFFER: - - acpi_os_printf ("(%d)", obj_desc->buffer.length); - for (i = 0; (i < 8) && (i < obj_desc->buffer.length); i++) { - acpi_os_printf (" %2.2X", obj_desc->buffer.pointer[i]); - } - break; - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_internal_object - * - * PARAMETERS: Obj_desc - Object to be displayed - * Walk_state - Current walk state - * - * RETURN: None - * - * DESCRIPTION: Short display of an internal object - * - ******************************************************************************/ - -void -acpi_db_display_internal_object ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state) -{ - u8 type; - - - acpi_os_printf ("%p ", obj_desc); - - if (!obj_desc) { - acpi_os_printf ("\n"); - return; - } - - - /* Decode the object type */ - - else if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_PARSER)) { - acpi_os_printf (" "); - } - - else if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_NAMED)) { - acpi_os_printf (" Name %4.4s Type-%s", - &((acpi_namespace_node *)obj_desc)->name, - acpi_ut_get_type_name (((acpi_namespace_node *) obj_desc)->type)); - if (((acpi_namespace_node *) obj_desc)->flags & ANOBJ_METHOD_ARG) { - acpi_os_printf (" [Method Arg]"); - } - if (((acpi_namespace_node *) obj_desc)->flags & ANOBJ_METHOD_LOCAL) { - acpi_os_printf (" [Method Local]"); - } - } - - else if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_INTERNAL)) { - type = obj_desc->common.type; - if (type > INTERNAL_TYPE_MAX) { - acpi_os_printf (" Type %x [Invalid Type]", type); - return; - } - - /* Decode the ACPI object type */ - - switch (obj_desc->common.type) { - case INTERNAL_TYPE_REFERENCE: - switch (obj_desc->reference.opcode) { - case AML_ZERO_OP: - acpi_os_printf ("[Const] Zero (0) [Null Target]", 0); - break; - - case AML_ONES_OP: - acpi_os_printf ("[Const] Ones (0xFFFFFFFFFFFFFFFF) [No Limit]"); - break; - - case AML_ONE_OP: - acpi_os_printf ("[Const] One (1)"); - break; - - case AML_REVISION_OP: - acpi_os_printf ("[Const] Revision (%X)", ACPI_CA_SUPPORT_LEVEL); - break; - - case AML_LOCAL_OP: - acpi_os_printf ("[Local%d]", obj_desc->reference.offset); - if (walk_state) { - obj_desc = walk_state->local_variables[obj_desc->reference.offset].object; - acpi_os_printf (" %p", obj_desc); - acpi_db_decode_internal_object (obj_desc); - } - break; - - case AML_ARG_OP: - acpi_os_printf ("[Arg%d] ", obj_desc->reference.offset); - if (walk_state) { - obj_desc = walk_state->arguments[obj_desc->reference.offset].object; - acpi_os_printf (" %p", obj_desc); - acpi_db_decode_internal_object (obj_desc); - } - break; - - case AML_DEBUG_OP: - acpi_os_printf ("[Debug] "); - break; - - case AML_INDEX_OP: - acpi_os_printf ("[Index] "); - acpi_db_decode_internal_object (obj_desc->reference.object); - break; - - default: - break; - - } - break; - - default: - acpi_os_printf (" "); - acpi_os_printf (" "); - acpi_db_decode_internal_object (obj_desc); - break; - } - } - - else { - acpi_os_printf (" "); - } - - acpi_os_printf ("\n"); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_method_info - * - * PARAMETERS: Start_op - Root of the control method parse tree - * - * RETURN: None - * - * DESCRIPTION: Display information about the current method - * - ******************************************************************************/ - -void -acpi_db_display_method_info ( - acpi_parse_object *start_op) -{ - acpi_walk_state *walk_state; - acpi_operand_object *obj_desc; - acpi_namespace_node *node; - acpi_parse_object *root_op; - acpi_parse_object *op; - const acpi_opcode_info *op_info; - u32 num_ops = 0; - u32 num_operands = 0; - u32 num_operators = 0; - u32 num_remaining_ops = 0; - u32 num_remaining_operands = 0; - u32 num_remaining_operators = 0; - u32 num_args; - u32 concurrency; - u8 count_remaining = FALSE; - - - walk_state = acpi_ds_get_current_walk_state (acpi_gbl_current_walk_list); - if (!walk_state) { - acpi_os_printf ("There is no method currently executing\n"); - return; - } - - obj_desc = walk_state->method_desc; - node = walk_state->method_node; - - num_args = obj_desc->method.param_count; - concurrency = obj_desc->method.concurrency; - - acpi_os_printf ("Currently executing control method is [%4.4s]\n", &node->name); - acpi_os_printf ("%X arguments, max concurrency = %X\n", num_args, concurrency); - - - root_op = start_op; - while (root_op->parent) { - root_op = root_op->parent; - } - - op = root_op; - - while (op) { - if (op == start_op) { - count_remaining = TRUE; - } - - num_ops++; - if (count_remaining) { - num_remaining_ops++; - } - - /* Decode the opcode */ - - op_info = acpi_ps_get_opcode_info (op->opcode); - switch (op_info->class) { - case AML_CLASS_ARGUMENT: - if (count_remaining) { - num_remaining_operands++; - } - - num_operands++; - break; - - case AML_CLASS_UNKNOWN: - /* Bad opcode or ASCII character */ - - continue; - - default: - if (count_remaining) { - num_remaining_operators++; - } - - num_operators++; - break; - } - - - op = acpi_ps_get_depth_next (start_op, op); - } - - acpi_os_printf ("Method contains: %X AML Opcodes - %X Operators, %X Operands\n", - num_ops, num_operators, num_operands); - - acpi_os_printf ("Remaining to execute: %X AML Opcodes - %X Operators, %X Operands\n", - num_remaining_ops, num_remaining_operators, num_remaining_operands); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_locals - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Display all locals for the currently running control method - * - ******************************************************************************/ - -void -acpi_db_display_locals (void) -{ - u32 i; - acpi_walk_state *walk_state; - acpi_operand_object *obj_desc; - acpi_namespace_node *node; - - - walk_state = acpi_ds_get_current_walk_state (acpi_gbl_current_walk_list); - if (!walk_state) { - acpi_os_printf ("There is no method currently executing\n"); - return; - } - - obj_desc = walk_state->method_desc; - node = walk_state->method_node; - - - acpi_os_printf ("Local Variables for method [%4.4s]:\n", &node->name); - - for (i = 0; i < MTH_NUM_LOCALS; i++) { - obj_desc = walk_state->local_variables[i].object; - acpi_os_printf ("Local%d: ", i); - acpi_db_display_internal_object (obj_desc, walk_state); - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_arguments - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Display all arguments for the currently running control method - * - ******************************************************************************/ - -void -acpi_db_display_arguments (void) -{ - u32 i; - acpi_walk_state *walk_state; - acpi_operand_object *obj_desc; - u32 num_args; - u32 concurrency; - acpi_namespace_node *node; - - - walk_state = acpi_ds_get_current_walk_state (acpi_gbl_current_walk_list); - if (!walk_state) { - acpi_os_printf ("There is no method currently executing\n"); - return; - } - - obj_desc = walk_state->method_desc; - node = walk_state->method_node; - - num_args = obj_desc->method.param_count; - concurrency = obj_desc->method.concurrency; - - acpi_os_printf ("Method [%4.4s] has %X arguments, max concurrency = %X\n", &node->name, num_args, concurrency); - - for (i = 0; i < num_args; i++) { - obj_desc = walk_state->arguments[i].object; - acpi_os_printf ("Arg%d: ", i); - acpi_db_display_internal_object (obj_desc, walk_state); - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_results - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Display current contents of a method result stack - * - ******************************************************************************/ - -void -acpi_db_display_results (void) -{ - u32 i; - acpi_walk_state *walk_state; - acpi_operand_object *obj_desc; - u32 num_results = 0; - acpi_namespace_node *node; - - - walk_state = acpi_ds_get_current_walk_state (acpi_gbl_current_walk_list); - if (!walk_state) { - acpi_os_printf ("There is no method currently executing\n"); - return; - } - - obj_desc = walk_state->method_desc; - node = walk_state->method_node; - - if (walk_state->results) { - num_results = walk_state->results->results.num_results; - } - - acpi_os_printf ("Method [%4.4s] has %X stacked result objects\n", &node->name, num_results); - - for (i = 0; i < num_results; i++) { - obj_desc = walk_state->results->results.obj_desc[i]; - acpi_os_printf ("Result%d: ", i); - acpi_db_display_internal_object (obj_desc, walk_state); - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_calling_tree - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Display current calling tree of nested control methods - * - ******************************************************************************/ - -void -acpi_db_display_calling_tree (void) -{ - u32 i; - acpi_walk_state *walk_state; - acpi_namespace_node *node; - - - walk_state = acpi_ds_get_current_walk_state (acpi_gbl_current_walk_list); - if (!walk_state) { - acpi_os_printf ("There is no method currently executing\n"); - return; - } - - node = walk_state->method_node; - - acpi_os_printf ("Current Control Method Call Tree\n"); - - for (i = 0; walk_state; i++) { - node = walk_state->method_node; - - acpi_os_printf (" [%4.4s]\n", &node->name); - - walk_state = walk_state->next; - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_result_object - * - * PARAMETERS: Obj_desc - Object to be displayed - * Walk_state - Current walk state - * - * RETURN: None - * - * DESCRIPTION: Display the result of an AML opcode - * - ******************************************************************************/ - -void -acpi_db_display_result_object ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state) -{ - - /* TBD: [Future] We don't always want to display the result. - * For now, only display if single stepping - * however, this output is very useful in other contexts also - */ - if (!acpi_gbl_cm_single_step) { - return; - } - - acpi_os_printf ("Result_obj: "); - acpi_db_display_internal_object (obj_desc, walk_state); - acpi_os_printf ("\n"); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_argument_object - * - * PARAMETERS: Obj_desc - Object to be displayed - * Walk_state - Current walk state - * - * RETURN: None - * - * DESCRIPTION: Display the result of an AML opcode - * - ******************************************************************************/ - -void -acpi_db_display_argument_object ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state) -{ - - - if (!acpi_gbl_cm_single_step) { - return; - } - - acpi_os_printf ("Arg_obj: "); - acpi_db_display_internal_object (obj_desc, walk_state); -} - -#endif /* ENABLE_DEBUGGER */ - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbexec.c linux.21rc5-ac2/drivers/acpi/debugger/dbexec.c --- linux.21rc5/drivers/acpi/debugger/dbexec.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbexec.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,404 +0,0 @@ -/******************************************************************************* - * - * Module Name: dbexec - debugger control method execution - * $Revision: 34 $ - * - ******************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acparser.h" -#include "acevents.h" -#include "acinterp.h" -#include "acdebug.h" -#include "actables.h" - -#ifdef ENABLE_DEBUGGER - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbexec") - - -db_method_info acpi_gbl_db_method_info; - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_execute_method - * - * PARAMETERS: Info - Valid info segment - * Return_obj - Where to put return object - * - * RETURN: Status - * - * DESCRIPTION: Execute a control method. - * - ******************************************************************************/ - -acpi_status -acpi_db_execute_method ( - db_method_info *info, - acpi_buffer *return_obj) -{ - acpi_status status; - acpi_object_list param_objects; - acpi_object params[MTH_NUM_ARGS]; - u32 i; - - - if (acpi_gbl_db_output_to_file && !acpi_dbg_level) { - acpi_os_printf ("Warning: debug output is not enabled!\n"); - } - - /* Are there arguments to the method? */ - - if (info->args && info->args[0]) { - for (i = 0; info->args[i] && i < MTH_NUM_ARGS; i++) { - params[i].type = ACPI_TYPE_INTEGER; - params[i].integer.value = STRTOUL (info->args[i], NULL, 16); - } - - param_objects.pointer = params; - param_objects.count = i; - } - - else { - /* Setup default parameters */ - - params[0].type = ACPI_TYPE_INTEGER; - params[0].integer.value = 0x01020304; - - params[1].type = ACPI_TYPE_STRING; - params[1].string.length = 12; - params[1].string.pointer = "AML Debugger"; - - param_objects.pointer = params; - param_objects.count = 2; - } - - /* Prepare for a return object of arbitrary size */ - - return_obj->pointer = acpi_gbl_db_buffer; - return_obj->length = ACPI_DEBUG_BUFFER_SIZE; - - - /* Do the actual method execution */ - - status = acpi_evaluate_object (NULL, info->pathname, ¶m_objects, return_obj); - - acpi_gbl_cm_single_step = FALSE; - acpi_gbl_method_executing = FALSE; - - return (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_execute_setup - * - * PARAMETERS: Info - Valid method info - * - * RETURN: Status - * - * DESCRIPTION: Setup info segment prior to method execution - * - ******************************************************************************/ - -void -acpi_db_execute_setup ( - db_method_info *info) -{ - - /* Catenate the current scope to the supplied name */ - - info->pathname[0] = 0; - if ((info->name[0] != '\\') && - (info->name[0] != '/')) { - STRCAT (info->pathname, acpi_gbl_db_scope_buf); - } - - STRCAT (info->pathname, info->name); - acpi_db_prep_namestring (info->pathname); - - acpi_db_set_output_destination (DB_DUPLICATE_OUTPUT); - acpi_os_printf ("Executing %s\n", info->pathname); - - if (info->flags & EX_SINGLE_STEP) { - acpi_gbl_cm_single_step = TRUE; - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); - } - - else { - /* No single step, allow redirection to a file */ - - acpi_db_set_output_destination (DB_REDIRECTABLE_OUTPUT); - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_get_outstanding_allocations - * - * PARAMETERS: None - * - * RETURN: Current global allocation count minus cache entries - * - * DESCRIPTION: Determine the current number of "outstanding" allocations -- - * those allocations that have not been freed and also are not - * in one of the various object caches. - * - ******************************************************************************/ - -u32 -acpi_db_get_outstanding_allocations (void) -{ - u32 i; - u32 outstanding = 0; - - -#ifdef ACPI_DBG_TRACK_ALLOCATIONS - - for (i = ACPI_MEM_LIST_FIRST_CACHE_LIST; i < ACPI_NUM_MEM_LISTS; i++) { - outstanding += (acpi_gbl_memory_lists[i].total_allocated - - acpi_gbl_memory_lists[i].total_freed - - acpi_gbl_memory_lists[i].cache_depth); - } -#endif - - return (outstanding); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_execute - * - * PARAMETERS: Name - Name of method to execute - * Args - Parameters to the method - * Flags - single step/no single step - * - * RETURN: Status - * - * DESCRIPTION: Execute a control method. Name is relative to the current - * scope. - * - ******************************************************************************/ - -void -acpi_db_execute ( - NATIVE_CHAR *name, - NATIVE_CHAR **args, - u32 flags) -{ - acpi_status status; - acpi_buffer return_obj; - - -#ifdef ACPI_DEBUG - u32 previous_allocations; - u32 allocations; - - - /* Memory allocation tracking */ - - previous_allocations = acpi_db_get_outstanding_allocations (); -#endif - - acpi_gbl_db_method_info.name = name; - acpi_gbl_db_method_info.args = args; - acpi_gbl_db_method_info.flags = flags; - - acpi_db_execute_setup (&acpi_gbl_db_method_info); - status = acpi_db_execute_method (&acpi_gbl_db_method_info, &return_obj); - - /* - * Allow any handlers in separate threads to complete. - * (Such as Notify handlers invoked from AML executed above). - */ - acpi_os_sleep (0, 10); - - -#ifdef ACPI_DEBUG - - /* Memory allocation tracking */ - - allocations = acpi_db_get_outstanding_allocations () - previous_allocations; - - acpi_db_set_output_destination (DB_DUPLICATE_OUTPUT); - - if (allocations > 0) { - acpi_os_printf ("Outstanding: %ld allocations after execution\n", - allocations); - } -#endif - - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Execution of %s failed with status %s\n", - acpi_gbl_db_method_info.pathname, acpi_format_exception (status)); - } - - else { - /* Display a return object, if any */ - - if (return_obj.length) { - acpi_os_printf ("Execution of %s returned object %p Buflen %X\n", - acpi_gbl_db_method_info.pathname, return_obj.pointer, return_obj.length); - acpi_db_dump_object (return_obj.pointer, 1); - } - } - - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_method_thread - * - * PARAMETERS: Context - Execution info segment - * - * RETURN: None - * - * DESCRIPTION: Debugger execute thread. Waits for a command line, then - * simply dispatches it. - * - ******************************************************************************/ - -void -acpi_db_method_thread ( - void *context) -{ - acpi_status status; - db_method_info *info = context; - u32 i; - acpi_buffer return_obj; - - - for (i = 0; i < info->num_loops; i++) { - status = acpi_db_execute_method (info, &return_obj); - if (ACPI_SUCCESS (status)) { - if (return_obj.length) { - acpi_os_printf ("Execution of %s returned object %p Buflen %X\n", - info->pathname, return_obj.pointer, return_obj.length); - acpi_db_dump_object (return_obj.pointer, 1); - } - } - } - - /* Signal our completion */ - - acpi_os_signal_semaphore (info->thread_gate, 1); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_create_execution_threads - * - * PARAMETERS: Num_threads_arg - Number of threads to create - * Num_loops_arg - Loop count for the thread(s) - * Method_name_arg - Control method to execute - * - * RETURN: None - * - * DESCRIPTION: Create threads to execute method(s) - * - ******************************************************************************/ - -void -acpi_db_create_execution_threads ( - NATIVE_CHAR *num_threads_arg, - NATIVE_CHAR *num_loops_arg, - NATIVE_CHAR *method_name_arg) -{ - acpi_status status; - u32 num_threads; - u32 num_loops; - u32 i; - acpi_handle thread_gate; - - - /* Get the arguments */ - - num_threads = STRTOUL (num_threads_arg, NULL, 0); - num_loops = STRTOUL (num_loops_arg, NULL, 0); - - if (!num_threads || !num_loops) { - acpi_os_printf ("Bad argument: Threads %X, Loops %X\n", num_threads, num_loops); - return; - } - - - /* Create the synchronization semaphore */ - - status = acpi_os_create_semaphore (1, 0, &thread_gate); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Could not create semaphore, %s\n", acpi_format_exception (status)); - return; - } - - /* Setup the context to be passed to each thread */ - - acpi_gbl_db_method_info.name = method_name_arg; - acpi_gbl_db_method_info.args = NULL; - acpi_gbl_db_method_info.flags = 0; - acpi_gbl_db_method_info.num_loops = num_loops; - acpi_gbl_db_method_info.thread_gate = thread_gate; - - acpi_db_execute_setup (&acpi_gbl_db_method_info); - - - /* Create the threads */ - - acpi_os_printf ("Creating %X threads to execute %X times each\n", num_threads, num_loops); - - for (i = 0; i < (num_threads); i++) { - acpi_os_queue_for_execution (OSD_PRIORITY_MED, acpi_db_method_thread, &acpi_gbl_db_method_info); - } - - - /* Wait for all threads to complete */ - - i = num_threads; - while (i) /* Brain damage for OSD implementations that only support wait of 1 unit */ { - status = acpi_os_wait_semaphore (thread_gate, 1, WAIT_FOREVER); - i--; - } - - /* Cleanup and exit */ - - acpi_os_delete_semaphore (thread_gate); - - acpi_db_set_output_destination (DB_DUPLICATE_OUTPUT); - acpi_os_printf ("All threads (%X) have completed\n", num_threads); - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); -} - - -#endif /* ENABLE_DEBUGGER */ - - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbfileio.c linux.21rc5-ac2/drivers/acpi/debugger/dbfileio.c --- linux.21rc5/drivers/acpi/debugger/dbfileio.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbfileio.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,381 +0,0 @@ -/******************************************************************************* - * - * Module Name: dbfileio - Debugger file I/O commands. These can't usually - * be used when running the debugger in Ring 0 (Kernel mode) - * $Revision: 53 $ - * - ******************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include "acpi.h" -#include "acdebug.h" -#include "acnamesp.h" -#include "acparser.h" -#include "acevents.h" -#include "actables.h" - -#ifdef ENABLE_DEBUGGER - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbfileio") - - -/* - * NOTE: this is here for lack of a better place. It is used in all - * flavors of the debugger, need LCD file - */ -#ifdef ACPI_APPLICATION -#include -FILE *acpi_gbl_debug_file = NULL; -#endif - - -acpi_table_header *acpi_gbl_db_table_ptr = NULL; - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_match_argument - * - * PARAMETERS: User_argument - User command line - * Arguments - Array of commands to match against - * - * RETURN: Index into command array or ACPI_TYPE_NOT_FOUND if not found - * - * DESCRIPTION: Search command array for a command match - * - ******************************************************************************/ - -acpi_object_type8 -acpi_db_match_argument ( - NATIVE_CHAR *user_argument, - ARGUMENT_INFO *arguments) -{ - u32 i; - - - if (!user_argument || user_argument[0] == 0) { - return (ACPI_TYPE_NOT_FOUND); - } - - for (i = 0; arguments[i].name; i++) { - if (STRSTR (arguments[i].name, user_argument) == arguments[i].name) { - return ((acpi_object_type8) i); - } - } - - /* Argument not recognized */ - - return (ACPI_TYPE_NOT_FOUND); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_close_debug_file - * - * PARAMETERS: None - * - * RETURN: Status - * - * DESCRIPTION: If open, close the current debug output file - * - ******************************************************************************/ - -void -acpi_db_close_debug_file ( - void) -{ - -#ifdef ACPI_APPLICATION - - if (acpi_gbl_debug_file) { - fclose (acpi_gbl_debug_file); - acpi_gbl_debug_file = NULL; - acpi_gbl_db_output_to_file = FALSE; - acpi_os_printf ("Debug output file %s closed\n", acpi_gbl_db_debug_filename); - } -#endif - -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_open_debug_file - * - * PARAMETERS: Name - Filename to open - * - * RETURN: Status - * - * DESCRIPTION: Open a file where debug output will be directed. - * - ******************************************************************************/ - -void -acpi_db_open_debug_file ( - NATIVE_CHAR *name) -{ - -#ifdef ACPI_APPLICATION - - acpi_db_close_debug_file (); - acpi_gbl_debug_file = fopen (name, "w+"); - if (acpi_gbl_debug_file) { - acpi_os_printf ("Debug output file %s opened\n", name); - STRCPY (acpi_gbl_db_debug_filename, name); - acpi_gbl_db_output_to_file = TRUE; - } - else { - acpi_os_printf ("Could not open debug file %s\n", name); - } - -#endif -} - - -#ifdef ACPI_APPLICATION -/******************************************************************************* - * - * FUNCTION: Acpi_db_load_table - * - * PARAMETERS: fp - File that contains table - * Table_ptr - Return value, buffer with table - * Table_lenght - Return value, length of table - * - * RETURN: Status - * - * DESCRIPTION: Load the DSDT from the file pointer - * - ******************************************************************************/ - -acpi_status -acpi_db_load_table( - FILE *fp, - acpi_table_header **table_ptr, - u32 *table_length) -{ - acpi_table_header table_header; - u8 *aml_start; - u32 aml_length; - u32 actual; - acpi_status status; - - - /* Read the table header */ - - if (fread (&table_header, 1, sizeof (table_header), fp) != sizeof (acpi_table_header)) { - acpi_os_printf ("Couldn't read the table header\n"); - return (AE_BAD_SIGNATURE); - } - - - /* Validate the table header/length */ - - status = acpi_tb_validate_table_header (&table_header); - if ((ACPI_FAILURE (status)) || - (table_header.length > 524288)) /* 1/2 Mbyte should be enough */ { - acpi_os_printf ("Table header is invalid!\n"); - return (AE_ERROR); - } - - - /* We only support a limited number of table types */ - - if (STRNCMP ((char *) table_header.signature, DSDT_SIG, 4) && - STRNCMP ((char *) table_header.signature, PSDT_SIG, 4) && - STRNCMP ((char *) table_header.signature, SSDT_SIG, 4)) { - acpi_os_printf ("Table signature is invalid\n"); - DUMP_BUFFER (&table_header, sizeof (acpi_table_header)); - return (AE_ERROR); - } - - /* Allocate a buffer for the table */ - - *table_length = table_header.length; - *table_ptr = acpi_os_allocate ((size_t) *table_length); - if (!*table_ptr) { - acpi_os_printf ("Could not allocate memory for ACPI table %4.4s (size=%X)\n", - table_header.signature, table_header.length); - return (AE_NO_MEMORY); - } - - - aml_start = (u8 *) *table_ptr + sizeof (table_header); - aml_length = *table_length - sizeof (table_header); - - /* Copy the header to the buffer */ - - MEMCPY (*table_ptr, &table_header, sizeof (table_header)); - - /* Get the rest of the table */ - - actual = fread (aml_start, 1, (size_t) aml_length, fp); - if (actual == aml_length) { - return (AE_OK); - } - - if (actual > 0) { - acpi_os_printf ("Warning - reading table, asked for %X got %X\n", aml_length, actual); - return (AE_OK); - } - - - acpi_os_printf ("Error - could not read the table file\n"); - acpi_os_free (*table_ptr); - *table_ptr = NULL; - *table_length = 0; - - return (AE_ERROR); -} -#endif - - -/******************************************************************************* - * - * FUNCTION: Ae_local_load_table - * - * PARAMETERS: Table_ptr - pointer to a buffer containing the entire - * table to be loaded - * - * RETURN: Status - * - * DESCRIPTION: This function is called to load a table from the caller's - * buffer. The buffer must contain an entire ACPI Table including - * a valid header. The header fields will be verified, and if it - * is determined that the table is invalid, the call will fail. - * - * If the call fails an appropriate status will be returned. - * - ******************************************************************************/ - -acpi_status -ae_local_load_table ( - acpi_table_header *table_ptr) -{ - acpi_status status; - acpi_table_desc table_info; - - - FUNCTION_TRACE ("Ae_local_load_table"); - - if (!table_ptr) { - return_ACPI_STATUS (AE_BAD_PARAMETER); - } - - /* Install the new table into the local data structures */ - - table_info.pointer = table_ptr; - - status = acpi_tb_install_table (NULL, &table_info); - if (ACPI_FAILURE (status)) { - /* Free table allocated by Acpi_tb_get_table */ - - acpi_tb_delete_single_table (&table_info); - return_ACPI_STATUS (status); - } - - -#ifndef PARSER_ONLY - status = acpi_ns_load_table (table_info.installed_desc, acpi_gbl_root_node); - if (ACPI_FAILURE (status)) { - /* Uninstall table and free the buffer */ - - acpi_tb_delete_acpi_table (ACPI_TABLE_DSDT); - return_ACPI_STATUS (status); - } -#endif - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_load_acpi_table - * - * PARAMETERS: Filname - File where table is located - * - * RETURN: Status - * - * DESCRIPTION: Load an ACPI table from a file - * - ******************************************************************************/ - -acpi_status -acpi_db_load_acpi_table ( - NATIVE_CHAR *filename) -{ -#ifdef ACPI_APPLICATION - FILE *fp; - acpi_status status; - u32 table_length; - - - /* Open the file */ - - fp = fopen (filename, "rb"); - if (!fp) { - acpi_os_printf ("Could not open file %s\n", filename); - return (AE_ERROR); - } - - - /* Get the entire file */ - - acpi_os_printf ("Loading Acpi table from file %s\n", filename); - status = acpi_db_load_table (fp, &acpi_gbl_db_table_ptr, &table_length); - fclose(fp); - - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Couldn't get table from the file\n"); - return (status); - } - - /* Attempt to recognize and install the table */ - - status = ae_local_load_table (acpi_gbl_db_table_ptr); - if (ACPI_FAILURE (status)) { - if (status == AE_EXIST) { - acpi_os_printf ("Table %4.4s is already installed\n", - &acpi_gbl_db_table_ptr->signature); - } - else { - acpi_os_printf ("Could not install table, %s\n", - acpi_format_exception (status)); - } - - acpi_os_free (acpi_gbl_db_table_ptr); - return (status); - } - - acpi_os_printf ("%4.4s at %p successfully installed and loaded\n", - &acpi_gbl_db_table_ptr->signature, acpi_gbl_db_table_ptr); - - acpi_gbl_acpi_hardware_present = FALSE; - -#endif /* ACPI_APPLICATION */ - return (AE_OK); -} - - -#endif /* ENABLE_DEBUGGER */ - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbhistry.c linux.21rc5-ac2/drivers/acpi/debugger/dbhistry.c --- linux.21rc5/drivers/acpi/debugger/dbhistry.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbhistry.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ -/****************************************************************************** - * - * Module Name: dbhistry - debugger HISTORY command - * $Revision: 19 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acparser.h" -#include "acevents.h" -#include "acinterp.h" -#include "acdebug.h" -#include "actables.h" - -#ifdef ENABLE_DEBUGGER - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbhistry") - - -#define HI_NO_HISTORY 0 -#define HI_RECORD_HISTORY 1 -#define HISTORY_SIZE 20 - - -typedef struct history_info -{ - NATIVE_CHAR command[80]; - u32 cmd_num; - -} HISTORY_INFO; - - -HISTORY_INFO acpi_gbl_history_buffer[HISTORY_SIZE]; -u16 acpi_gbl_lo_history = 0; -u16 acpi_gbl_num_history = 0; -u16 acpi_gbl_next_history_index = 0; -u32 acpi_gbl_next_cmd_num = 1; - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_add_to_history - * - * PARAMETERS: Command_line - Command to add - * - * RETURN: None - * - * DESCRIPTION: Add a command line to the history buffer. - * - ******************************************************************************/ - -void -acpi_db_add_to_history ( - NATIVE_CHAR *command_line) -{ - - - /* Put command into the next available slot */ - - STRCPY (acpi_gbl_history_buffer[acpi_gbl_next_history_index].command, command_line); - - acpi_gbl_history_buffer[acpi_gbl_next_history_index].cmd_num = acpi_gbl_next_cmd_num; - - /* Adjust indexes */ - - if ((acpi_gbl_num_history == HISTORY_SIZE) && - (acpi_gbl_next_history_index == acpi_gbl_lo_history)) { - acpi_gbl_lo_history++; - if (acpi_gbl_lo_history >= HISTORY_SIZE) { - acpi_gbl_lo_history = 0; - } - } - - acpi_gbl_next_history_index++; - if (acpi_gbl_next_history_index >= HISTORY_SIZE) { - acpi_gbl_next_history_index = 0; - } - - - acpi_gbl_next_cmd_num++; - if (acpi_gbl_num_history < HISTORY_SIZE) { - acpi_gbl_num_history++; - } - -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_history - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Display the contents of the history buffer - * - ******************************************************************************/ - -void -acpi_db_display_history (void) -{ - NATIVE_UINT i; - u16 history_index; - - - history_index = acpi_gbl_lo_history; - - /* Dump entire history buffer */ - - for (i = 0; i < acpi_gbl_num_history; i++) { - acpi_os_printf ("%ld %s\n", acpi_gbl_history_buffer[history_index].cmd_num, - acpi_gbl_history_buffer[history_index].command); - - history_index++; - if (history_index >= HISTORY_SIZE) { - history_index = 0; - } - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_get_from_history - * - * PARAMETERS: Command_num_arg - String containing the number of the - * command to be retrieved - * - * RETURN: None - * - * DESCRIPTION: Get a command from the history buffer - * - ******************************************************************************/ - -NATIVE_CHAR * -acpi_db_get_from_history ( - NATIVE_CHAR *command_num_arg) -{ - NATIVE_UINT i; - u16 history_index; - u32 cmd_num; - - - if (command_num_arg == NULL) { - cmd_num = acpi_gbl_next_cmd_num - 1; - } - - else { - cmd_num = STRTOUL (command_num_arg, NULL, 0); - } - - - /* Search history buffer */ - - history_index = acpi_gbl_lo_history; - for (i = 0; i < acpi_gbl_num_history; i++) { - if (acpi_gbl_history_buffer[history_index].cmd_num == cmd_num) { - /* Found the commnad, return it */ - - return (acpi_gbl_history_buffer[history_index].command); - } - - - history_index++; - if (history_index >= HISTORY_SIZE) { - history_index = 0; - } - } - - acpi_os_printf ("Invalid history number: %d\n", history_index); - return (NULL); -} - - -#endif /* ENABLE_DEBUGGER */ - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbinput.c linux.21rc5-ac2/drivers/acpi/debugger/dbinput.c --- linux.21rc5/drivers/acpi/debugger/dbinput.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbinput.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,924 +0,0 @@ -/******************************************************************************* - * - * Module Name: dbinput - user front-end to the AML debugger - * $Revision: 72 $ - * - ******************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include "acpi.h" -#include "acparser.h" -#include "actables.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "acdebug.h" - - -#ifdef ENABLE_DEBUGGER - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbinput") - - -/* - * Globals that are specific to the debugger - */ - -NATIVE_CHAR acpi_gbl_db_line_buf[80]; -NATIVE_CHAR acpi_gbl_db_parsed_buf[80]; -NATIVE_CHAR acpi_gbl_db_scope_buf[40]; -NATIVE_CHAR acpi_gbl_db_debug_filename[40]; -NATIVE_CHAR *acpi_gbl_db_args[DB_MAX_ARGS]; -NATIVE_CHAR *acpi_gbl_db_buffer = NULL; -NATIVE_CHAR *acpi_gbl_db_filename = NULL; -u8 acpi_gbl_db_output_to_file = FALSE; - -u32 acpi_gbl_db_debug_level = ACPI_LV_VERBOSITY2; -u32 acpi_gbl_db_console_debug_level = NORMAL_DEFAULT | ACPI_LV_TABLES; -u8 acpi_gbl_db_output_flags = DB_CONSOLE_OUTPUT; - - -u8 acpi_gbl_db_opt_tables = FALSE; -u8 acpi_gbl_db_opt_disasm = FALSE; -u8 acpi_gbl_db_opt_stats = FALSE; -u8 acpi_gbl_db_opt_parse_jit = FALSE; -u8 acpi_gbl_db_opt_verbose = TRUE; -u8 acpi_gbl_db_opt_ini_methods = TRUE; - -/* - * Statistic globals - */ -u16 acpi_gbl_obj_type_count[INTERNAL_TYPE_NODE_MAX+1]; -u16 acpi_gbl_node_type_count[INTERNAL_TYPE_NODE_MAX+1]; -u16 acpi_gbl_obj_type_count_misc; -u16 acpi_gbl_node_type_count_misc; -u32 acpi_gbl_num_nodes; -u32 acpi_gbl_num_objects; - - -u32 acpi_gbl_size_of_parse_tree; -u32 acpi_gbl_size_of_method_trees; -u32 acpi_gbl_size_of_node_entries; -u32 acpi_gbl_size_of_acpi_objects; - -/* - * Top-level debugger commands. - * - * This list of commands must match the string table below it - */ - -enum acpi_ex_debugger_commands -{ - CMD_NOT_FOUND = 0, - CMD_NULL, - CMD_ALLOCATIONS, - CMD_ARGS, - CMD_ARGUMENTS, - CMD_BREAKPOINT, - CMD_CALL, - CMD_CLOSE, - CMD_DEBUG, - CMD_DUMP, - CMD_ENABLEACPI, - CMD_EVENT, - CMD_EXECUTE, - CMD_EXIT, - CMD_FIND, - CMD_GO, - CMD_HELP, - CMD_HELP2, - CMD_HISTORY, - CMD_HISTORY_EXE, - CMD_HISTORY_LAST, - CMD_INFORMATION, - CMD_INTO, - CMD_LEVEL, - CMD_LIST, - CMD_LOAD, - CMD_LOCALS, - CMD_LOCKS, - CMD_METHODS, - CMD_NAMESPACE, - CMD_NOTIFY, - CMD_OBJECT, - CMD_OPEN, - CMD_OWNER, - CMD_PREFIX, - CMD_QUIT, - CMD_REFERENCES, - CMD_RESOURCES, - CMD_RESULTS, - CMD_SET, - CMD_STATS, - CMD_STOP, - CMD_TABLES, - CMD_TERMINATE, - CMD_THREADS, - CMD_TREE, - CMD_UNLOAD -}; - -#define CMD_FIRST_VALID 2 - - -const COMMAND_INFO acpi_gbl_db_commands[] = -{ {"", 0}, - {"", 0}, - {"ALLOCATIONS", 0}, - {"ARGS", 0}, - {"ARGUMENTS", 0}, - {"BREAKPOINT", 1}, - {"CALL", 0}, - {"CLOSE", 0}, - {"DEBUG", 1}, - {"DUMP", 1}, - {"ENABLEACPI", 0}, - {"EVENT", 1}, - {"EXECUTE", 1}, - {"EXIT", 0}, - {"FIND", 1}, - {"GO", 0}, - {"HELP", 0}, - {"?", 0}, - {"HISTORY", 0}, - {"!", 1}, - {"!!", 0}, - {"INFORMATION", 0}, - {"INTO", 0}, - {"LEVEL", 0}, - {"LIST", 0}, - {"LOAD", 1}, - {"LOCALS", 0}, - {"LOCKS", 0}, - {"METHODS", 0}, - {"NAMESPACE", 0}, - {"NOTIFY", 2}, - {"OBJECT", 1}, - {"OPEN", 1}, - {"OWNER", 1}, - {"PREFIX", 0}, - {"QUIT", 0}, - {"REFERENCES", 1}, - {"RESOURCES", 1}, - {"RESULTS", 0}, - {"SET", 3}, - {"STATS", 0}, - {"STOP", 0}, - {"TABLES", 0}, - {"TERMINATE", 0}, - {"THREADS", 3}, - {"TREE", 0}, - {"UNLOAD", 1}, - {NULL, 0} -}; - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_help - * - * PARAMETERS: Help_type - Subcommand (optional) - * - * RETURN: None - * - * DESCRIPTION: Print a usage message. - * - ******************************************************************************/ - -void -acpi_db_display_help ( - NATIVE_CHAR *help_type) -{ - - - /* No parameter, just give the overview */ - - if (!help_type) - { - acpi_os_printf ("ACPI CA Debugger Commands\n\n"); - acpi_os_printf ("The following classes of commands are available. Help is available for\n"); - acpi_os_printf ("each class by entering \"Help \"\n\n"); - acpi_os_printf (" [GENERAL] General-Purpose Commands\n"); - acpi_os_printf (" [NAMESPACE] Namespace Access Commands\n"); - acpi_os_printf (" [METHOD] Control Method Execution Commands\n"); - acpi_os_printf (" [FILE] File I/O Commands\n"); - return; - - } - - - /* - * Parameter is the command class - * - * The idea here is to keep each class of commands smaller than a screenful - */ - - switch (help_type[0]) - { - case 'G': - acpi_os_printf ("\n_general-Purpose Commands\n\n"); - acpi_os_printf ("Allocations Display list of current memory allocations\n"); - acpi_os_printf ("Dump
|\n"); - acpi_os_printf (" [Byte|Word|Dword|Qword] Display ACPI objects or memory\n"); - acpi_os_printf ("Enable_acpi Enable ACPI (hardware) mode\n"); - acpi_os_printf ("Help This help screen\n"); - acpi_os_printf ("History Display command history buffer\n"); - acpi_os_printf ("Level [] [console] Get/Set debug level for file or console\n"); - acpi_os_printf ("Locks Current status of internal mutexes\n"); - acpi_os_printf ("Quit or Exit Exit this command\n"); - acpi_os_printf ("Stats [Allocations|Memory|Misc\n"); - acpi_os_printf (" |Objects|Tables] Display namespace and memory statistics\n"); - acpi_os_printf ("Tables Display info about loaded ACPI tables\n"); - acpi_os_printf ("Unload [Instance] Unload an ACPI table\n"); - acpi_os_printf ("! Execute command from history buffer\n"); - acpi_os_printf ("!! Execute last command again\n"); - return; - - case 'N': - acpi_os_printf ("\n_namespace Access Commands\n\n"); - acpi_os_printf ("Debug [Arguments] Single Step a control method\n"); - acpi_os_printf ("Event Generate Acpi_event (Fixed/GPE)\n"); - acpi_os_printf ("Execute [Arguments] Execute control method\n"); - acpi_os_printf ("Find (? is wildcard) Find ACPI name(s) with wildcards\n"); - acpi_os_printf ("Method Display list of loaded control methods\n"); - acpi_os_printf ("Namespace [|] [Depth] Display loaded namespace tree/subtree\n"); - acpi_os_printf ("Notify Send a notification\n"); - acpi_os_printf ("Objects Display all objects of the given type\n"); - acpi_os_printf ("Owner [Depth] Display loaded namespace by object owner\n"); - acpi_os_printf ("Prefix [] Set or Get current execution prefix\n"); - acpi_os_printf ("References Find all references to object at addr\n"); - acpi_os_printf ("Resources xxx Get and display resources\n"); - acpi_os_printf ("Terminate Delete namespace and all internal objects\n"); - acpi_os_printf ("Thread Spawn threads to execute method(s)\n"); - return; - - case 'M': - acpi_os_printf ("\n_control Method Execution Commands\n\n"); - acpi_os_printf ("Arguments (or Args) Display method arguments\n"); - acpi_os_printf ("Breakpoint Set an AML execution breakpoint\n"); - acpi_os_printf ("Call Run to next control method invocation\n"); - acpi_os_printf ("Go Allow method to run to completion\n"); - acpi_os_printf ("Information Display info about the current method\n"); - acpi_os_printf ("Into Step into (not over) a method call\n"); - acpi_os_printf ("List [# of Aml Opcodes] Display method ASL statements\n"); - acpi_os_printf ("Locals Display method local variables\n"); - acpi_os_printf ("Results Display method result stack\n"); - acpi_os_printf ("Set <#> Set method data (Arguments/Locals)\n"); - acpi_os_printf ("Stop Terminate control method\n"); - acpi_os_printf ("Tree Display control method calling tree\n"); - acpi_os_printf (" Single step next AML opcode (over calls)\n"); - return; - - case 'F': - acpi_os_printf ("\n_file I/O Commands\n\n"); - acpi_os_printf ("Close Close debug output file\n"); - acpi_os_printf ("Open Open a file for debug output\n"); - acpi_os_printf ("Load Load ACPI table from a file\n"); - return; - - default: - acpi_os_printf ("Unrecognized Command Class: %x\n", help_type); - return; - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_get_next_token - * - * PARAMETERS: String - Command buffer - * Next - Return value, end of next token - * - * RETURN: Pointer to the start of the next token. - * - * DESCRIPTION: Command line parsing. Get the next token on the command line - * - ******************************************************************************/ - -NATIVE_CHAR * -acpi_db_get_next_token ( - NATIVE_CHAR *string, - NATIVE_CHAR **next) -{ - NATIVE_CHAR *start; - - /* At end of buffer? */ - - if (!string || !(*string)) - { - return (NULL); - } - - - /* Get rid of any spaces at the beginning */ - - if (*string == ' ') - { - while (*string && (*string == ' ')) - { - string++; - } - - if (!(*string)) - { - return (NULL); - } - } - - start = string; - - /* Find end of token */ - - while (*string && (*string != ' ')) - { - string++; - } - - - if (!(*string)) - { - *next = NULL; - } - - else - { - *string = 0; - *next = string + 1; - } - - return (start); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_get_line - * - * PARAMETERS: Input_buffer - Command line buffer - * - * RETURN: None - * - * DESCRIPTION: Get the next command line from the user. Gets entire line - * up to the next newline - * - ******************************************************************************/ - -u32 -acpi_db_get_line ( - NATIVE_CHAR *input_buffer) -{ - u32 i; - u32 count; - NATIVE_CHAR *next; - NATIVE_CHAR *this; - - - STRCPY (acpi_gbl_db_parsed_buf, input_buffer); - STRUPR (acpi_gbl_db_parsed_buf); - - this = acpi_gbl_db_parsed_buf; - for (i = 0; i < DB_MAX_ARGS; i++) - { - acpi_gbl_db_args[i] = acpi_db_get_next_token (this, &next); - if (!acpi_gbl_db_args[i]) - { - break; - } - - this = next; - } - - - /* Uppercase the actual command */ - - if (acpi_gbl_db_args[0]) - { - STRUPR (acpi_gbl_db_args[0]); - } - - count = i; - if (count) - { - count--; /* Number of args only */ - } - - return (count); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_match_command - * - * PARAMETERS: User_command - User command line - * - * RETURN: Index into command array, -1 if not found - * - * DESCRIPTION: Search command array for a command match - * - ******************************************************************************/ - -u32 -acpi_db_match_command ( - NATIVE_CHAR *user_command) -{ - u32 i; - - - if (!user_command || user_command[0] == 0) - { - return (CMD_NULL); - } - - for (i = CMD_FIRST_VALID; acpi_gbl_db_commands[i].name; i++) - { - if (STRSTR (acpi_gbl_db_commands[i].name, user_command) == acpi_gbl_db_commands[i].name) - { - return (i); - } - } - - /* Command not recognized */ - - return (CMD_NOT_FOUND); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_command_dispatch - * - * PARAMETERS: Input_buffer - Command line buffer - * Walk_state - Current walk - * Op - Current (executing) parse op - * - * RETURN: Status - * - * DESCRIPTION: Command dispatcher. Called from two places: - * - ******************************************************************************/ - -acpi_status -acpi_db_command_dispatch ( - NATIVE_CHAR *input_buffer, - acpi_walk_state *walk_state, - acpi_parse_object *op) -{ - u32 temp; - u32 command_index; - u32 param_count; - NATIVE_CHAR *command_line; - acpi_status status = AE_CTRL_TRUE; - - - /* If Acpi_terminate has been called, terminate this thread */ - - if (acpi_gbl_db_terminate_threads) - { - return (AE_CTRL_TERMINATE); - } - - param_count = acpi_db_get_line (input_buffer); - command_index = acpi_db_match_command (acpi_gbl_db_args[0]); - temp = 0; - - /* Verify that we have the minimum number of params */ - - if (param_count < acpi_gbl_db_commands[command_index].min_args) - { - acpi_os_printf ("%d parameters entered, [%s] requires %d parameters\n", - param_count, acpi_gbl_db_commands[command_index].name, acpi_gbl_db_commands[command_index].min_args); - return (AE_CTRL_TRUE); - } - - /* Decode and dispatch the command */ - - switch (command_index) - { - case CMD_NULL: - if (op) - { - return (AE_OK); - } - break; - - case CMD_ALLOCATIONS: - -#ifdef ACPI_DBG_TRACK_ALLOCATIONS - acpi_ut_dump_allocations ((u32) -1, NULL); -#endif - break; - - case CMD_ARGS: - case CMD_ARGUMENTS: - acpi_db_display_arguments (); - break; - - case CMD_BREAKPOINT: - acpi_db_set_method_breakpoint (acpi_gbl_db_args[1], walk_state, op); - break; - - case CMD_CALL: - acpi_db_set_method_call_breakpoint (op); - status = AE_OK; - break; - - case CMD_CLOSE: - acpi_db_close_debug_file (); - break; - - case CMD_DEBUG: - acpi_db_execute (acpi_gbl_db_args[1], &acpi_gbl_db_args[2], EX_SINGLE_STEP); - break; - - case CMD_DUMP: - acpi_db_decode_and_display_object (acpi_gbl_db_args[1], acpi_gbl_db_args[2]); - break; - - case CMD_ENABLEACPI: - status = acpi_enable(); - if (ACPI_FAILURE(status)) - { - acpi_os_printf("Acpi_enable failed (Status=%X)\n", status); - return (status); - } - break; - - case CMD_EVENT: - acpi_os_printf ("Event command not implemented\n"); - break; - - case CMD_EXECUTE: - acpi_db_execute (acpi_gbl_db_args[1], &acpi_gbl_db_args[2], EX_NO_SINGLE_STEP); - break; - - case CMD_FIND: - acpi_db_find_name_in_namespace (acpi_gbl_db_args[1]); - break; - - case CMD_GO: - acpi_gbl_cm_single_step = FALSE; - return (AE_OK); - - case CMD_HELP: - case CMD_HELP2: - acpi_db_display_help (acpi_gbl_db_args[1]); - break; - - case CMD_HISTORY: - acpi_db_display_history (); - break; - - case CMD_HISTORY_EXE: - command_line = acpi_db_get_from_history (acpi_gbl_db_args[1]); - if (!command_line) - { - return (AE_CTRL_TRUE); - } - - status = acpi_db_command_dispatch (command_line, walk_state, op); - if (ACPI_SUCCESS (status)) - { - status = AE_CTRL_TRUE; - } - return (status); - break; - - case CMD_HISTORY_LAST: - command_line = acpi_db_get_from_history (NULL); - if (!command_line) - { - return (AE_CTRL_TRUE); - } - - status = acpi_db_command_dispatch (command_line, walk_state, op); - if (ACPI_SUCCESS (status)) - { - status = AE_CTRL_TRUE; - } - return (status); - - case CMD_INFORMATION: - acpi_db_display_method_info (op); - break; - - case CMD_INTO: - if (op) - { - acpi_gbl_cm_single_step = TRUE; - -/* TBD: Must get current walk state */ - /* Acpi_gbl_Method_breakpoint = 0; */ - return (AE_OK); - } - break; - - case CMD_LEVEL: - if (param_count == 0) - { - acpi_os_printf ("Current debug level for file output is: %8.8lX\n", acpi_gbl_db_debug_level); - acpi_os_printf ("Current debug level for console output is: %8.8lX\n", acpi_gbl_db_console_debug_level); - } - else if (param_count == 2) - { - temp = acpi_gbl_db_console_debug_level; - acpi_gbl_db_console_debug_level = STRTOUL (acpi_gbl_db_args[1], NULL, 16); - acpi_os_printf ("Debug Level for console output was %8.8lX, now %8.8lX\n", temp, acpi_gbl_db_console_debug_level); - } - else - { - temp = acpi_gbl_db_debug_level; - acpi_gbl_db_debug_level = STRTOUL (acpi_gbl_db_args[1], NULL, 16); - acpi_os_printf ("Debug Level for file output was %8.8lX, now %8.8lX\n", temp, acpi_gbl_db_debug_level); - } - break; - - case CMD_LIST: - acpi_db_disassemble_aml (acpi_gbl_db_args[1], op); - break; - - case CMD_LOAD: - status = acpi_db_load_acpi_table (acpi_gbl_db_args[1]); - if (ACPI_FAILURE (status)) - { - return (status); - } - break; - - case CMD_LOCKS: - acpi_db_display_locks (); - break; - - case CMD_LOCALS: - acpi_db_display_locals (); - break; - - case CMD_METHODS: - acpi_db_display_objects ("METHOD", acpi_gbl_db_args[1]); - break; - - case CMD_NAMESPACE: - acpi_db_dump_namespace (acpi_gbl_db_args[1], acpi_gbl_db_args[2]); - break; - - case CMD_NOTIFY: - temp = STRTOUL (acpi_gbl_db_args[2], NULL, 0); - acpi_db_send_notify (acpi_gbl_db_args[1], temp); - break; - - case CMD_OBJECT: - acpi_db_display_objects (STRUPR (acpi_gbl_db_args[1]), acpi_gbl_db_args[2]); - break; - - case CMD_OPEN: - acpi_db_open_debug_file (acpi_gbl_db_args[1]); - break; - - case CMD_OWNER: - acpi_db_dump_namespace_by_owner (acpi_gbl_db_args[1], acpi_gbl_db_args[2]); - break; - - case CMD_PREFIX: - acpi_db_set_scope (acpi_gbl_db_args[1]); - break; - - case CMD_REFERENCES: - acpi_db_find_references (acpi_gbl_db_args[1]); - break; - - case CMD_RESOURCES: - acpi_db_display_resources (acpi_gbl_db_args[1]); - break; - - case CMD_RESULTS: - acpi_db_display_results (); - break; - - case CMD_SET: - acpi_db_set_method_data (acpi_gbl_db_args[1], acpi_gbl_db_args[2], acpi_gbl_db_args[3]); - break; - - case CMD_STATS: - acpi_db_display_statistics (acpi_gbl_db_args[1]); - break; - - case CMD_STOP: - return (AE_AML_ERROR); - break; - - case CMD_TABLES: - acpi_db_display_table_info (acpi_gbl_db_args[1]); - break; - - case CMD_TERMINATE: - acpi_db_set_output_destination (DB_REDIRECTABLE_OUTPUT); - acpi_ut_subsystem_shutdown (); - - /* TBD: [Restructure] Need some way to re-initialize without re-creating the semaphores! */ - - /* Acpi_initialize (NULL); */ - break; - - case CMD_THREADS: - acpi_db_create_execution_threads (acpi_gbl_db_args[1], acpi_gbl_db_args[2], acpi_gbl_db_args[3]); - break; - - case CMD_TREE: - acpi_db_display_calling_tree (); - break; - - case CMD_UNLOAD: - acpi_db_unload_acpi_table (acpi_gbl_db_args[1], acpi_gbl_db_args[2]); - break; - - case CMD_EXIT: - case CMD_QUIT: - if (op) - { - acpi_os_printf ("Method execution terminated\n"); - return (AE_CTRL_TERMINATE); - } - - if (!acpi_gbl_db_output_to_file) - { - acpi_dbg_level = DEBUG_DEFAULT; - } - - /* Shutdown */ - - /* Acpi_ut_subsystem_shutdown (); */ - acpi_db_close_debug_file (); - - acpi_gbl_db_terminate_threads = TRUE; - - return (AE_CTRL_TERMINATE); - - case CMD_NOT_FOUND: - acpi_os_printf ("Unknown Command\n"); - return (AE_CTRL_TRUE); - } - - - /* Add all commands that come here to the history buffer */ - - acpi_db_add_to_history (input_buffer); - return (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_execute_thread - * - * PARAMETERS: Context - Not used - * - * RETURN: None - * - * DESCRIPTION: Debugger execute thread. Waits for a command line, then - * simply dispatches it. - * - ******************************************************************************/ - -void -acpi_db_execute_thread ( - void *context) -{ - acpi_status status = AE_OK; - - - while (status != AE_CTRL_TERMINATE) - { - acpi_gbl_method_executing = FALSE; - acpi_gbl_step_to_next_call = FALSE; - - acpi_ut_acquire_mutex (ACPI_MTX_DEBUG_CMD_READY); - status = acpi_db_command_dispatch (acpi_gbl_db_line_buf, NULL, NULL); - acpi_ut_release_mutex (ACPI_MTX_DEBUG_CMD_COMPLETE); - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_single_thread - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Debugger execute thread. Waits for a command line, then - * simply dispatches it. - * - ******************************************************************************/ - -void -acpi_db_single_thread ( - void) -{ - acpi_status status = AE_OK; - - - acpi_gbl_method_executing = FALSE; - acpi_gbl_step_to_next_call = FALSE; - - status = acpi_db_command_dispatch (acpi_gbl_db_line_buf, NULL, NULL); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_user_commands - * - * PARAMETERS: Prompt - User prompt (depends on mode) - * Op - Current executing parse op - * - * RETURN: None - * - * DESCRIPTION: Command line execution for the AML debugger. Commands are - * matched and dispatched here. - * - ******************************************************************************/ - -acpi_status -acpi_db_user_commands ( - NATIVE_CHAR prompt, - acpi_parse_object *op) -{ - acpi_status status = AE_OK; - - - /* TBD: [Restructure] Need a separate command line buffer for step mode */ - - while (!acpi_gbl_db_terminate_threads) - { - /* Force output to console until a command is entered */ - - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); - - /* Different prompt if method is executing */ - - if (!acpi_gbl_method_executing) - { - acpi_os_printf ("%1c ", DB_COMMAND_PROMPT); - } - else - { - acpi_os_printf ("%1c ", DB_EXECUTE_PROMPT); - } - - /* Get the user input line */ - - acpi_os_get_line (acpi_gbl_db_line_buf); - - - /* Check for single or multithreaded debug */ - - if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) - { - /* - * Signal the debug thread that we have a command to execute, - * and wait for the command to complete. - */ - acpi_ut_release_mutex (ACPI_MTX_DEBUG_CMD_READY); - acpi_ut_acquire_mutex (ACPI_MTX_DEBUG_CMD_COMPLETE); - } - - else - { - /* Just call to the command line interpreter */ - - acpi_db_single_thread (); - } - } - - - /* - * Only this thread (the original thread) should actually terminate the subsystem, - * because all the semaphores are deleted during termination - */ - acpi_terminate (); - return (status); -} - - -#endif /* ENABLE_DEBUGGER */ - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbstats.c linux.21rc5-ac2/drivers/acpi/debugger/dbstats.c --- linux.21rc5/drivers/acpi/debugger/dbstats.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbstats.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,456 +0,0 @@ -/******************************************************************************* - * - * Module Name: dbstats - Generation and display of ACPI table statistics - * $Revision: 47 $ - * - ******************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include -#include -#include - -#ifdef ENABLE_DEBUGGER - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbstats") - -/* - * Statistics subcommands - */ -ARGUMENT_INFO acpi_db_stat_types [] = -{ {"ALLOCATIONS"}, - {"OBJECTS"}, - {"MEMORY"}, - {"MISC"}, - {"TABLES"}, - {"SIZES"}, - {"STACK"}, - {NULL} /* Must be null terminated */ -}; - -#define CMD_ALLOCATIONS 0 -#define CMD_OBJECTS 1 -#define CMD_MEMORY 2 -#define CMD_MISC 3 -#define CMD_TABLES 4 -#define CMD_SIZES 5 -#define CMD_STACK 6 - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_enumerate_object - * - * PARAMETERS: Obj_desc - Object to be counted - * - * RETURN: None - * - * DESCRIPTION: Add this object to the global counts, by object type. - * Recursively handles subobjects and packages. - * - * [TBD] Restructure - remove recursion. - * - ******************************************************************************/ - -void -acpi_db_enumerate_object ( - acpi_operand_object *obj_desc) -{ - u32 type; - u32 i; - - - if (!obj_desc) - { - return; - } - - - /* Enumerate this object first */ - - acpi_gbl_num_objects++; - - type = obj_desc->common.type; - if (type > INTERNAL_TYPE_NODE_MAX) - { - acpi_gbl_obj_type_count_misc++; - } - else - { - acpi_gbl_obj_type_count [type]++; - } - - /* Count the sub-objects */ - - switch (type) - { - case ACPI_TYPE_PACKAGE: - for (i = 0; i< obj_desc->package.count; i++) - { - acpi_db_enumerate_object (obj_desc->package.elements[i]); - } - break; - - case ACPI_TYPE_DEVICE: - acpi_db_enumerate_object (obj_desc->device.sys_handler); - acpi_db_enumerate_object (obj_desc->device.drv_handler); - acpi_db_enumerate_object (obj_desc->device.addr_handler); - break; - - case ACPI_TYPE_REGION: - acpi_db_enumerate_object (obj_desc->region.addr_handler); - break; - - case ACPI_TYPE_POWER: - acpi_db_enumerate_object (obj_desc->power_resource.sys_handler); - acpi_db_enumerate_object (obj_desc->power_resource.drv_handler); - break; - - case ACPI_TYPE_PROCESSOR: - acpi_db_enumerate_object (obj_desc->processor.sys_handler); - acpi_db_enumerate_object (obj_desc->processor.drv_handler); - acpi_db_enumerate_object (obj_desc->processor.addr_handler); - break; - - case ACPI_TYPE_THERMAL: - acpi_db_enumerate_object (obj_desc->thermal_zone.sys_handler); - acpi_db_enumerate_object (obj_desc->thermal_zone.drv_handler); - acpi_db_enumerate_object (obj_desc->thermal_zone.addr_handler); - break; - } -} - - -#ifndef PARSER_ONLY - -/******************************************************************************* - * - * FUNCTION: Acpi_db_classify_one_object - * - * PARAMETERS: Callback for Walk_namespace - * - * RETURN: Status - * - * DESCRIPTION: Enumerate both the object descriptor (including subobjects) and - * the parent namespace node. - * - ******************************************************************************/ - -acpi_status -acpi_db_classify_one_object ( - acpi_handle obj_handle, - u32 nesting_level, - void *context, - void **return_value) -{ - acpi_namespace_node *node; - acpi_operand_object *obj_desc; - u32 type; - - - acpi_gbl_num_nodes++; - - node = (acpi_namespace_node *) obj_handle; - obj_desc = ((acpi_namespace_node *) obj_handle)->object; - - acpi_db_enumerate_object (obj_desc); - - type = node->type; - if (type > INTERNAL_TYPE_NODE_MAX) - { - acpi_gbl_node_type_count_misc++; - } - - else - { - acpi_gbl_node_type_count [type]++; - } - - return AE_OK; - - - /* TBD: These need to be counted during the initial parsing phase */ - /* - if (Acpi_ps_is_named_op (Op->Opcode)) - { - Num_nodes++; - } - - if (Is_method) - { - Num_method_elements++; - } - - Num_grammar_elements++; - Op = Acpi_ps_get_depth_next (Root, Op); - - Size_of_parse_tree = (Num_grammar_elements - Num_method_elements) * (u32) sizeof (acpi_parse_object); - Size_of_method_trees = Num_method_elements * (u32) sizeof (acpi_parse_object); - Size_of_node_entries = Num_nodes * (u32) sizeof (acpi_namespace_node); - Size_of_acpi_objects = Num_nodes * (u32) sizeof (acpi_operand_object); - - */ -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_count_namespace_objects - * - * PARAMETERS: None - * - * RETURN: Status - * - * DESCRIPTION: Count and classify the entire namespace, including all - * namespace nodes and attached objects. - * - ******************************************************************************/ - -acpi_status -acpi_db_count_namespace_objects ( - void) -{ - u32 i; - - - acpi_gbl_num_nodes = 0; - acpi_gbl_num_objects = 0; - - acpi_gbl_obj_type_count_misc = 0; - for (i = 0; i < (INTERNAL_TYPE_NODE_MAX -1); i++) - { - acpi_gbl_obj_type_count [i] = 0; - acpi_gbl_node_type_count [i] = 0; - } - - acpi_ns_walk_namespace (ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, - FALSE, acpi_db_classify_one_object, NULL, NULL); - - return (AE_OK); -} - -#endif - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_display_statistics - * - * PARAMETERS: Type_arg - Subcommand - * - * RETURN: Status - * - * DESCRIPTION: Display various statistics - * - ******************************************************************************/ - -acpi_status -acpi_db_display_statistics ( - NATIVE_CHAR *type_arg) -{ - u32 i; - u32 type; - u32 outstanding; - u32 size; - - - if (!acpi_gbl_DSDT) - { - acpi_os_printf ("*** Warning: There is no DSDT loaded\n"); - } - - if (!type_arg) - { - acpi_os_printf ("The following subcommands are available:\n ALLOCATIONS, OBJECTS, MEMORY, MISC, SIZES, TABLES\n"); - return (AE_OK); - } - - STRUPR (type_arg); - type = acpi_db_match_argument (type_arg, acpi_db_stat_types); - if (type == (u32) -1) - { - acpi_os_printf ("Invalid or unsupported argument\n"); - return (AE_OK); - } - - - switch (type) - { -#ifndef PARSER_ONLY - case CMD_ALLOCATIONS: -#ifdef ACPI_DBG_TRACK_ALLOCATIONS - acpi_ut_dump_allocation_info (); -#endif - break; -#endif - - case CMD_TABLES: - - acpi_os_printf ("ACPI Table Information:\n\n"); - if (acpi_gbl_DSDT) - { - acpi_os_printf ("DSDT Length:................% 7ld (%X)\n", acpi_gbl_DSDT->length, acpi_gbl_DSDT->length); - } - break; - - case CMD_OBJECTS: - -#ifndef PARSER_ONLY - - acpi_db_count_namespace_objects (); - - acpi_os_printf ("\n_objects defined in the current namespace:\n\n"); - - acpi_os_printf ("%16.16s % 10.10s % 10.10s\n", "ACPI_TYPE", "NODES", "OBJECTS"); - - for (i = 0; i < INTERNAL_TYPE_NODE_MAX; i++) - { - acpi_os_printf ("%16.16s % 10ld% 10ld\n", acpi_ut_get_type_name (i), - acpi_gbl_node_type_count [i], acpi_gbl_obj_type_count [i]); - } - acpi_os_printf ("%16.16s % 10ld% 10ld\n", "Misc/Unknown", - acpi_gbl_node_type_count_misc, acpi_gbl_obj_type_count_misc); - - acpi_os_printf ("%16.16s % 10ld% 10ld\n", "TOTALS:", - acpi_gbl_num_nodes, acpi_gbl_num_objects); - -#endif - break; - - case CMD_MEMORY: - -#ifdef ACPI_DBG_TRACK_ALLOCATIONS - acpi_os_printf ("\n----Object and Cache Statistics---------------------------------------------\n"); - - for (i = 0; i < ACPI_NUM_MEM_LISTS; i++) - { - acpi_os_printf ("\n%s\n", acpi_gbl_memory_lists[i].list_name); - - if (acpi_gbl_memory_lists[i].max_cache_depth > 0) - { - acpi_os_printf (" Cache: [Depth Max Avail Size] % 7d % 7d % 7d % 7d B\n", - acpi_gbl_memory_lists[i].cache_depth, - acpi_gbl_memory_lists[i].max_cache_depth, - acpi_gbl_memory_lists[i].max_cache_depth - acpi_gbl_memory_lists[i].cache_depth, - (acpi_gbl_memory_lists[i].cache_depth * acpi_gbl_memory_lists[i].object_size)); - - acpi_os_printf (" Cache: [Requests Hits Misses Obj_size] % 7d % 7d % 7d % 7d B\n", - acpi_gbl_memory_lists[i].cache_requests, - acpi_gbl_memory_lists[i].cache_hits, - acpi_gbl_memory_lists[i].cache_requests - acpi_gbl_memory_lists[i].cache_hits, - acpi_gbl_memory_lists[i].object_size); - } - - outstanding = acpi_gbl_memory_lists[i].total_allocated - - acpi_gbl_memory_lists[i].total_freed - - acpi_gbl_memory_lists[i].cache_depth; - - if (acpi_gbl_memory_lists[i].object_size) - { - size = ROUND_UP_TO_1K (outstanding * acpi_gbl_memory_lists[i].object_size); - } - else - { - size = ROUND_UP_TO_1K (acpi_gbl_memory_lists[i].current_total_size); - } - - acpi_os_printf (" Mem: [Alloc Free Outstanding Size] % 7d % 7d % 7d % 7d Kb\n", - acpi_gbl_memory_lists[i].total_allocated, - acpi_gbl_memory_lists[i].total_freed, - outstanding, size); - } -#endif - - break; - - case CMD_MISC: - - acpi_os_printf ("\n_miscellaneous Statistics:\n\n"); - acpi_os_printf ("Calls to Acpi_ps_find:.. ........% 7ld\n", acpi_gbl_ps_find_count); - acpi_os_printf ("Calls to Acpi_ns_lookup:..........% 7ld\n", acpi_gbl_ns_lookup_count); - - acpi_os_printf ("\n"); - - acpi_os_printf ("Mutex usage:\n\n"); - for (i = 0; i < NUM_MTX; i++) - { - acpi_os_printf ("%-28s: % 7ld\n", acpi_ut_get_mutex_name (i), acpi_gbl_acpi_mutex_info[i].use_count); - } - break; - - - case CMD_SIZES: - - acpi_os_printf ("\n_internal object sizes:\n\n"); - - acpi_os_printf ("Common %3d\n", sizeof (ACPI_OBJECT_COMMON)); - acpi_os_printf ("Number %3d\n", sizeof (ACPI_OBJECT_INTEGER)); - acpi_os_printf ("String %3d\n", sizeof (ACPI_OBJECT_STRING)); - acpi_os_printf ("Buffer %3d\n", sizeof (ACPI_OBJECT_BUFFER)); - acpi_os_printf ("Package %3d\n", sizeof (ACPI_OBJECT_PACKAGE)); - acpi_os_printf ("Buffer_field %3d\n", sizeof (ACPI_OBJECT_BUFFER_FIELD)); - acpi_os_printf ("Device %3d\n", sizeof (ACPI_OBJECT_DEVICE)); - acpi_os_printf ("Event %3d\n", sizeof (ACPI_OBJECT_EVENT)); - acpi_os_printf ("Method %3d\n", sizeof (ACPI_OBJECT_METHOD)); - acpi_os_printf ("Mutex %3d\n", sizeof (ACPI_OBJECT_MUTEX)); - acpi_os_printf ("Region %3d\n", sizeof (ACPI_OBJECT_REGION)); - acpi_os_printf ("Power_resource %3d\n", sizeof (ACPI_OBJECT_POWER_RESOURCE)); - acpi_os_printf ("Processor %3d\n", sizeof (ACPI_OBJECT_PROCESSOR)); - acpi_os_printf ("Thermal_zone %3d\n", sizeof (ACPI_OBJECT_THERMAL_ZONE)); - acpi_os_printf ("Region_field %3d\n", sizeof (ACPI_OBJECT_REGION_FIELD)); - acpi_os_printf ("Bank_field %3d\n", sizeof (ACPI_OBJECT_BANK_FIELD)); - acpi_os_printf ("Index_field %3d\n", sizeof (ACPI_OBJECT_INDEX_FIELD)); - acpi_os_printf ("Reference %3d\n", sizeof (ACPI_OBJECT_REFERENCE)); - acpi_os_printf ("Notify_handler %3d\n", sizeof (ACPI_OBJECT_NOTIFY_HANDLER)); - acpi_os_printf ("Addr_handler %3d\n", sizeof (ACPI_OBJECT_ADDR_HANDLER)); - acpi_os_printf ("Extra %3d\n", sizeof (ACPI_OBJECT_EXTRA)); - - acpi_os_printf ("\n"); - - acpi_os_printf ("Parse_object %3d\n", sizeof (acpi_parse_object)); - acpi_os_printf ("Parse2_object %3d\n", sizeof (acpi_parse2_object)); - acpi_os_printf ("Operand_object %3d\n", sizeof (acpi_operand_object)); - acpi_os_printf ("Namespace_node %3d\n", sizeof (acpi_namespace_node)); - - break; - - - case CMD_STACK: - - size = acpi_gbl_entry_stack_pointer - acpi_gbl_lowest_stack_pointer; - - acpi_os_printf ("\n_subsystem Stack Usage:\n\n"); - acpi_os_printf ("Entry Stack Pointer %X\n", acpi_gbl_entry_stack_pointer); - acpi_os_printf ("Lowest Stack Pointer %X\n", acpi_gbl_lowest_stack_pointer); - acpi_os_printf ("Stack Use %X (%d)\n", size, size); - acpi_os_printf ("Deepest Procedure Nesting %d\n", acpi_gbl_deepest_nesting); - break; - } - - acpi_os_printf ("\n"); - return (AE_OK); -} - - -#endif /* ENABLE_DEBUGGER */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbutils.c linux.21rc5-ac2/drivers/acpi/debugger/dbutils.c --- linux.21rc5/drivers/acpi/debugger/dbutils.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbutils.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,384 +0,0 @@ -/******************************************************************************* - * - * Module Name: dbutils - AML debugger utilities - * $Revision: 45 $ - * - ******************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acparser.h" -#include "acevents.h" -#include "acinterp.h" -#include "acdebug.h" -#include "acdispat.h" - - -#ifdef ENABLE_DEBUGGER - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbutils") - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_set_output_destination - * - * PARAMETERS: Output_flags - Current flags word - * - * RETURN: None - * - * DESCRIPTION: Set the current destination for debugger output. Alos sets - * the debug output level accordingly. - * - ******************************************************************************/ - -void -acpi_db_set_output_destination ( - u32 output_flags) -{ - - acpi_gbl_db_output_flags = (u8) output_flags; - - if (output_flags & DB_REDIRECTABLE_OUTPUT) { - if (acpi_gbl_db_output_to_file) { - acpi_dbg_level = acpi_gbl_db_debug_level; - } - } - else { - acpi_dbg_level = acpi_gbl_db_console_debug_level; - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_dump_buffer - * - * PARAMETERS: Address - Pointer to the buffer - * - * RETURN: None - * - * DESCRIPTION: Print a portion of a buffer - * - ******************************************************************************/ - -void -acpi_db_dump_buffer ( - u32 address) -{ - - acpi_os_printf ("\n_location %X:\n", address); - - acpi_dbg_level |= ACPI_LV_TABLES; - acpi_ut_dump_buffer ((u8 *) address, 64, DB_BYTE_DISPLAY, ACPI_UINT32_MAX); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_dump_object - * - * PARAMETERS: Obj_desc - External ACPI object to dump - * Level - Nesting level. - * - * RETURN: None - * - * DESCRIPTION: Dump the contents of an ACPI external object - * - ******************************************************************************/ - -void -acpi_db_dump_object ( - acpi_object *obj_desc, - u32 level) -{ - u32 i; - - - if (!obj_desc) { - acpi_os_printf ("[Null Object]\n"); - return; - } - - for (i = 0; i < level; i++) { - acpi_os_printf (" "); - } - - switch (obj_desc->type) { - case ACPI_TYPE_ANY: - - acpi_os_printf ("[Object Reference] = %p\n", obj_desc->reference.handle); - break; - - - case ACPI_TYPE_INTEGER: - - acpi_os_printf ("[Integer] = %8.8X%8.8X\n", HIDWORD (obj_desc->integer.value), - LODWORD (obj_desc->integer.value)); - break; - - - case ACPI_TYPE_STRING: - - acpi_os_printf ("[String] Value: "); - for (i = 0; i < obj_desc->string.length; i++) { - acpi_os_printf ("%c", obj_desc->string.pointer[i]); - } - acpi_os_printf ("\n"); - break; - - - case ACPI_TYPE_BUFFER: - - acpi_os_printf ("[Buffer] = "); - acpi_ut_dump_buffer ((u8 *) obj_desc->buffer.pointer, obj_desc->buffer.length, DB_DWORD_DISPLAY, _COMPONENT); - break; - - - case ACPI_TYPE_PACKAGE: - - acpi_os_printf ("[Package] Contains %d Elements: \n", obj_desc->package.count); - - for (i = 0; i < obj_desc->package.count; i++) { - acpi_db_dump_object (&obj_desc->package.elements[i], level+1); - } - break; - - - case INTERNAL_TYPE_REFERENCE: - - acpi_os_printf ("[Object Reference] = %p\n", obj_desc->reference.handle); - break; - - - case ACPI_TYPE_PROCESSOR: - - acpi_os_printf ("[Processor]\n"); - break; - - - case ACPI_TYPE_POWER: - - acpi_os_printf ("[Power Resource]\n"); - break; - - - default: - - acpi_os_printf ("[Unknown Type] %X \n", obj_desc->type); - break; - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_prep_namestring - * - * PARAMETERS: Name - String to prepare - * - * RETURN: None - * - * DESCRIPTION: Translate all forward slashes and dots to backslashes. - * - ******************************************************************************/ - -void -acpi_db_prep_namestring ( - NATIVE_CHAR *name) -{ - - - if (!name) { - return; - } - - STRUPR (name); - - /* Convert a leading forward slash to a backslash */ - - if (*name == '/') { - *name = '\\'; - } - - /* Ignore a leading backslash, this is the root prefix */ - - if (*name == '\\') { - name++; - } - - /* Convert all slash path separators to dots */ - - while (*name) { - if ((*name == '/') || - (*name == '\\')) { - *name = '.'; - } - - name++; - } -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_second_pass_parse - * - * PARAMETERS: Root - Root of the parse tree - * - * RETURN: Status - * - * DESCRIPTION: Second pass parse of the ACPI tables. We need to wait until - * second pass to parse the control methods - * - ******************************************************************************/ - -acpi_status -acpi_db_second_pass_parse ( - acpi_parse_object *root) -{ - acpi_parse_object *op = root; - acpi_parse2_object *method; - acpi_parse_object *search_op; - acpi_parse_object *start_op; - acpi_status status = AE_OK; - u32 base_aml_offset; - acpi_walk_state *walk_state; - - - FUNCTION_ENTRY (); - - - acpi_os_printf ("Pass two parse ....\n"); - - - while (op) { - if (op->opcode == AML_METHOD_OP) { - method = (acpi_parse2_object *) op; - - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, - NULL, NULL, NULL); - if (!walk_state) { - return (AE_NO_MEMORY); - } - - - walk_state->parser_state.aml = - walk_state->parser_state.aml_start = method->data; - walk_state->parser_state.aml_end = - walk_state->parser_state.pkg_end = method->data + method->length; - walk_state->parser_state.start_scope = op; - - walk_state->descending_callback = acpi_ds_load1_begin_op; - walk_state->ascending_callback = acpi_ds_load1_end_op; - - - status = acpi_ps_parse_aml (walk_state); - - - base_aml_offset = (method->value.arg)->aml_offset + 1; - start_op = (method->value.arg)->next; - search_op = start_op; - - while (search_op) { - search_op->aml_offset += base_aml_offset; - search_op = acpi_ps_get_depth_next (start_op, search_op); - } - - } - - if (op->opcode == AML_REGION_OP) { - /* TBD: [Investigate] this isn't quite the right thing to do! */ - /* - * - * Method = (ACPI_DEFERRED_OP *) Op; - * Status = Acpi_ps_parse_aml (Op, Method->Body, Method->Body_length); - */ - } - - if (ACPI_FAILURE (status)) { - break; - } - - op = acpi_ps_get_depth_next (root, op); - } - - return (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_local_ns_lookup - * - * PARAMETERS: Name - Name to lookup - * - * RETURN: Pointer to a namespace node - * - * DESCRIPTION: Lookup a name in the ACPI namespace - * - ******************************************************************************/ - -acpi_namespace_node * -acpi_db_local_ns_lookup ( - NATIVE_CHAR *name) -{ - NATIVE_CHAR *internal_path; - acpi_status status; - acpi_namespace_node *node = NULL; - - - acpi_db_prep_namestring (name); - - /* Build an internal namestring */ - - status = acpi_ns_internalize_name (name, &internal_path); - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Invalid namestring: %s\n", name); - return (NULL); - } - - /* Lookup the name */ - - /* TBD: [Investigate] what scope do we use? */ - /* Use the root scope for the start of the search */ - - status = acpi_ns_lookup (NULL, internal_path, ACPI_TYPE_ANY, IMODE_EXECUTE, - NS_NO_UPSEARCH | NS_DONT_OPEN_SCOPE, NULL, &node); - - if (ACPI_FAILURE (status)) { - acpi_os_printf ("Could not locate name: %s %s\n", name, acpi_format_exception (status)); - } - - - ACPI_MEM_FREE (internal_path); - - return (node); -} - - -#endif /* ENABLE_DEBUGGER */ - - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/dbxface.c linux.21rc5-ac2/drivers/acpi/debugger/dbxface.c --- linux.21rc5/drivers/acpi/debugger/dbxface.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/dbxface.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,321 +0,0 @@ -/******************************************************************************* - * - * Module Name: dbxface - AML Debugger external interfaces - * $Revision: 45 $ - * - ******************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acparser.h" -#include "acevents.h" -#include "acinterp.h" -#include "acdebug.h" - - -#ifdef ENABLE_DEBUGGER - -#define _COMPONENT ACPI_DEBUGGER - MODULE_NAME ("dbxface") - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_single_step - * - * PARAMETERS: Walk_state - Current walk - * Op - Current executing op - * Opcode_class - Class of the current AML Opcode - * - * RETURN: Status - * - * DESCRIPTION: Called just before execution of an AML opcode. - * - ******************************************************************************/ - -acpi_status -acpi_db_single_step ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - u32 opcode_class) -{ - acpi_parse_object *next; - acpi_status status = AE_OK; - u32 original_debug_level; - acpi_parse_object *display_op; - - - FUNCTION_ENTRY (); - - /* Is there a breakpoint set? */ - - if (walk_state->method_breakpoint) { - /* Check if the breakpoint has been reached or passed */ - - if (walk_state->method_breakpoint <= op->aml_offset) { - /* Hit the breakpoint, resume single step, reset breakpoint */ - - acpi_os_printf ("***Break*** at AML offset %X\n", op->aml_offset); - acpi_gbl_cm_single_step = TRUE; - acpi_gbl_step_to_next_call = FALSE; - walk_state->method_breakpoint = 0; - } - } - - /* - * Check if this is an opcode that we are interested in -- - * namely, opcodes that have arguments - */ - if (op->opcode == AML_INT_NAMEDFIELD_OP) { - return (AE_OK); - } - - switch (opcode_class) { - case AML_CLASS_UNKNOWN: - case AML_CLASS_ARGUMENT: /* constants, literals, etc. do nothing */ - return (AE_OK); - break; - } - - /* - * Under certain debug conditions, display this opcode and its operands - */ - if ((acpi_gbl_db_output_to_file) || - (acpi_gbl_cm_single_step) || - (acpi_dbg_level & ACPI_LV_PARSE)) { - if ((acpi_gbl_db_output_to_file) || - (acpi_dbg_level & ACPI_LV_PARSE)) { - acpi_os_printf ("\n[Aml_debug] Next AML Opcode to execute:\n"); - } - - /* - * Display this op (and only this op - zero out the NEXT field temporarily, - * and disable parser trace output for the duration of the display because - * we don't want the extraneous debug output) - */ - original_debug_level = acpi_dbg_level; - acpi_dbg_level &= ~(ACPI_LV_PARSE | ACPI_LV_FUNCTIONS); - next = op->next; - op->next = NULL; - - - display_op = op; - if (op->parent) { - if ((op->parent->opcode == AML_IF_OP) || - (op->parent->opcode == AML_WHILE_OP)) { - display_op = op->parent; - } - } - - /* Now we can display it */ - - acpi_db_display_op (walk_state, display_op, ACPI_UINT32_MAX); - - if ((op->opcode == AML_IF_OP) || - (op->opcode == AML_WHILE_OP)) { - if (walk_state->control_state->common.value) { - acpi_os_printf ("Predicate was TRUE, executed block\n"); - } - else { - acpi_os_printf ("Predicate is FALSE, skipping block\n"); - } - } - - else if (op->opcode == AML_ELSE_OP) { - /* TBD */ - } - - /* Restore everything */ - - op->next = next; - acpi_os_printf ("\n"); - acpi_dbg_level = original_debug_level; - } - - /* If we are not single stepping, just continue executing the method */ - - if (!acpi_gbl_cm_single_step) { - return (AE_OK); - } - - - /* - * If we are executing a step-to-call command, - * Check if this is a method call. - */ - if (acpi_gbl_step_to_next_call) { - if (op->opcode != AML_INT_METHODCALL_OP) { - /* Not a method call, just keep executing */ - - return (AE_OK); - } - - /* Found a method call, stop executing */ - - acpi_gbl_step_to_next_call = FALSE; - } - - - /* - * If the next opcode is a method call, we will "step over" it - * by default. - */ - if (op->opcode == AML_INT_METHODCALL_OP) { - acpi_gbl_cm_single_step = FALSE; /* No more single step while executing called method */ - - /* Set the breakpoint on the call, it will stop execution as soon as we return */ - - /* TBD: [Future] don't kill the user breakpoint! */ - - walk_state->method_breakpoint = /* Op->Aml_offset + */ 1; /* Must be non-zero! */ - } - - - /* TBD: [Investigate] what are the namespace locking issues here */ - - /* Acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); */ - - /* Go into the command loop and await next user command */ - - acpi_gbl_method_executing = TRUE; - status = AE_CTRL_TRUE; - while (status == AE_CTRL_TRUE) { - if (acpi_gbl_debugger_configuration == DEBUGGER_MULTI_THREADED) { - /* Handshake with the front-end that gets user command lines */ - - acpi_ut_release_mutex (ACPI_MTX_DEBUG_CMD_COMPLETE); - acpi_ut_acquire_mutex (ACPI_MTX_DEBUG_CMD_READY); - } - - else { - /* Single threaded, we must get a command line ourselves */ - - /* Force output to console until a command is entered */ - - acpi_db_set_output_destination (DB_CONSOLE_OUTPUT); - - /* Different prompt if method is executing */ - - if (!acpi_gbl_method_executing) { - acpi_os_printf ("%1c ", DB_COMMAND_PROMPT); - } - else { - acpi_os_printf ("%1c ", DB_EXECUTE_PROMPT); - } - - /* Get the user input line */ - - acpi_os_get_line (acpi_gbl_db_line_buf); - } - - status = acpi_db_command_dispatch (acpi_gbl_db_line_buf, walk_state, op); - } - - /* Acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); */ - - /* User commands complete, continue execution of the interrupted method */ - - return (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_initialize - * - * PARAMETERS: None - * - * RETURN: Status - * - * DESCRIPTION: Init and start debugger - * - ******************************************************************************/ - -int -acpi_db_initialize (void) -{ - - - /* Init globals */ - - acpi_gbl_db_buffer = acpi_os_callocate (ACPI_DEBUG_BUFFER_SIZE); - - /* Initial scope is the root */ - - acpi_gbl_db_scope_buf [0] = '\\'; - acpi_gbl_db_scope_buf [1] = 0; - - - /* - * If configured for multi-thread support, the debug executor runs in - * a separate thread so that the front end can be in another address - * space, environment, or even another machine. - */ - if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) { - /* These were created with one unit, grab it */ - - acpi_ut_acquire_mutex (ACPI_MTX_DEBUG_CMD_COMPLETE); - acpi_ut_acquire_mutex (ACPI_MTX_DEBUG_CMD_READY); - - /* Create the debug execution thread to execute commands */ - - acpi_os_queue_for_execution (0, acpi_db_execute_thread, NULL); - } - - if (!acpi_gbl_db_opt_verbose) { - acpi_gbl_db_disasm_indent = " "; - acpi_gbl_db_opt_disasm = TRUE; - acpi_gbl_db_opt_stats = FALSE; - } - - return (0); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_db_terminate - * - * PARAMETERS: None - * - * RETURN: Status - * - * DESCRIPTION: Stop debugger - * - ******************************************************************************/ - -void -acpi_db_terminate (void) -{ - - if (acpi_gbl_db_table_ptr) { - acpi_os_free (acpi_gbl_db_table_ptr); - } - if (acpi_gbl_db_buffer) { - acpi_os_free (acpi_gbl_db_buffer); - } -} - - -#endif /* ENABLE_DEBUGGER */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/debugger/Makefile linux.21rc5-ac2/drivers/acpi/debugger/Makefile --- linux.21rc5/drivers/acpi/debugger/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/debugger/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -# -# Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory -# - -O_TARGET := $(notdir $(CURDIR)).o - -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) - -EXTRA_CFLAGS += $(ACPI_CFLAGS) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dsfield.c linux.21rc5-ac2/drivers/acpi/dispatcher/dsfield.c --- linux.21rc5/drivers/acpi/dispatcher/dsfield.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dsfield.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,78 +1,98 @@ /****************************************************************************** * * Module Name: dsfield - Dispatcher field routines - * $Revision: 46 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "acparser.h" +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dsfield") + ACPI_MODULE_NAME ("dsfield") /******************************************************************************* * - * FUNCTION: Acpi_ds_create_buffer_field + * FUNCTION: acpi_ds_create_buffer_field * * PARAMETERS: Opcode - The opcode to be executed * Operands - List of operands for the opcode - * Walk_state - Current state + * walk_state - Current state * * RETURN: Status * - * DESCRIPTION: Execute the Create_field operators: - * Create_bit_field_op, - * Create_byte_field_op, - * Create_word_field_op, - * Create_dWord_field_op, - * Create_qWord_field_op, - * Create_field_op (all of which define fields in buffers) + * DESCRIPTION: Execute the create_field operators: + * create_bit_field_op, + * create_byte_field_op, + * create_word_field_op, + * create_dword_field_op, + * create_qword_field_op, + * create_field_op (all of which define fields in buffers) * ******************************************************************************/ acpi_status acpi_ds_create_buffer_field ( - acpi_parse_object *op, - acpi_walk_state *walk_state) + union acpi_parse_object *op, + struct acpi_walk_state *walk_state) { - acpi_parse_object *arg; - acpi_namespace_node *node; - acpi_status status; - acpi_operand_object *obj_desc; + union acpi_parse_object *arg; + struct acpi_namespace_node *node; + acpi_status status; + union acpi_operand_object *obj_desc; + union acpi_operand_object *second_desc = NULL; + u32 flags; - FUNCTION_TRACE ("Ds_create_buffer_field"); + ACPI_FUNCTION_TRACE ("ds_create_buffer_field"); - /* Get the Name_string argument */ + /* Get the name_string argument */ - if (op->opcode == AML_CREATE_FIELD_OP) { + if (op->common.aml_opcode == AML_CREATE_FIELD_OP) { arg = acpi_ps_get_arg (op, 3); } else { @@ -86,13 +106,25 @@ } /* - * Enter the Name_string into the namespace + * During the load phase, we want to enter the name of the field into + * the namespace. During the execute phase (when we evaluate the size + * operand), we want to lookup the name */ - status = acpi_ns_lookup (walk_state->scope_info, arg->value.string, - INTERNAL_TYPE_DEF_ANY, IMODE_LOAD_PASS1, - NS_NO_UPSEARCH | NS_DONT_OPEN_SCOPE, - walk_state, &(node)); + if (walk_state->parse_flags & ACPI_PARSE_EXECUTE) { + flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE; + } + else { + flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE | ACPI_NS_ERROR_IF_FOUND; + } + + /* + * Enter the name_string into the namespace + */ + status = acpi_ns_lookup (walk_state->scope_info, arg->common.value.string, + ACPI_TYPE_ANY, ACPI_IMODE_LOAD_PASS1, + flags, walk_state, &(node)); if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (arg->common.value.string, status); return_ACPI_STATUS (status); } @@ -100,14 +132,15 @@ * for now, we will put it in the "op" object that the parser uses, so we * can get it again at the end of this scope */ - op->node = node; + op->common.node = node; /* * If there is no object attached to the node, this node was just created and * we need to create the field object. Otherwise, this was a lookup of an * existing node and we don't want to create the field object again. */ - if (node->object) { + obj_desc = acpi_ns_get_attached_object (node); + if (obj_desc) { return_ACPI_STATUS (AE_OK); } @@ -125,27 +158,21 @@ } /* - * Allocate a method object for this field unit - */ - obj_desc->buffer_field.extra = acpi_ut_create_internal_object ( - INTERNAL_TYPE_EXTRA); - if (!obj_desc->buffer_field.extra) { - status = AE_NO_MEMORY; - goto cleanup; - } - - /* * Remember location in AML stream of the field unit * opcode and operands -- since the buffer and index * operands must be evaluated. */ - obj_desc->buffer_field.extra->extra.aml_start = ((acpi_parse2_object *) op)->data; - obj_desc->buffer_field.extra->extra.aml_length = ((acpi_parse2_object *) op)->length; + second_desc = obj_desc->common.next_object; + second_desc->extra.aml_start = op->named.data; + second_desc->extra.aml_length = op->named.length; obj_desc->buffer_field.node = node; - /* Attach constructed field descriptor to parent node */ + /* Attach constructed field descriptors to parent node */ status = acpi_ns_attach_object (node, obj_desc, ACPI_TYPE_BUFFER_FIELD); + if (ACPI_FAILURE (status)) { + goto cleanup; + } cleanup: @@ -159,10 +186,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_get_field_names + * FUNCTION: acpi_ds_get_field_names * - * PARAMETERS: Info - Create_field info structure - * ` Walk_state - Current method state + * PARAMETERS: Info - create_field info structure + * ` walk_state - Current method state * Arg - First parser arg for the field name list * * RETURN: Status @@ -174,14 +201,15 @@ acpi_status acpi_ds_get_field_names ( - ACPI_CREATE_FIELD_INFO *info, - acpi_walk_state *walk_state, - acpi_parse_object *arg) + struct acpi_create_field_info *info, + struct acpi_walk_state *walk_state, + union acpi_parse_object *arg) { - acpi_status status; + acpi_status status; + acpi_integer position; - PROC_NAME("acpi_ds_get_field_names"); + ACPI_FUNCTION_TRACE_PTR ("ds_get_field_names", info); /* First field starts at bit zero */ @@ -194,51 +222,78 @@ /* * Three types of field elements are handled: * 1) Offset - specifies a bit offset - * 2) Access_as - changes the access mode + * 2) access_as - changes the access mode * 3) Name - Enters a new named field into the namespace */ - switch (arg->opcode) { + switch (arg->common.aml_opcode) { case AML_INT_RESERVEDFIELD_OP: - info->field_bit_position += arg->value.size; + position = (acpi_integer) info->field_bit_position + + (acpi_integer) arg->common.value.size; + + if (position > ACPI_UINT32_MAX) { + ACPI_REPORT_ERROR (("Bit offset within field too large (> 0xFFFFFFFF)\n")); + return_ACPI_STATUS (AE_SUPPORT); + } + + info->field_bit_position = (u32) position; break; case AML_INT_ACCESSFIELD_OP: /* - * Get a new Access_type and Access_attribute for all - * entries (until end or another Access_as keyword) + * Get a new access_type and access_attribute -- to be used for all + * field units that follow, until field end or another access_as keyword. + * + * In field_flags, preserve the flag bits other than the ACCESS_TYPE bits */ - info->field_flags = (u8) ((info->field_flags & FIELD_ACCESS_TYPE_MASK) || - ((u8) (arg->value.integer >> 8))); + info->field_flags = (u8) ((info->field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) | + ((u8) ((u32) arg->common.value.integer >> 8))); + + info->attribute = (u8) (arg->common.value.integer); break; case AML_INT_NAMEDFIELD_OP: - /* Enter a new field name into the namespace */ + /* Lookup the name */ status = acpi_ns_lookup (walk_state->scope_info, - (NATIVE_CHAR *) &((acpi_parse2_object *)arg)->name, - info->field_type, IMODE_LOAD_PASS1, - NS_NO_UPSEARCH | NS_DONT_OPEN_SCOPE, - NULL, &info->field_node); + (char *) &arg->named.name, + info->field_type, ACPI_IMODE_EXECUTE, ACPI_NS_DONT_OPEN_SCOPE, + walk_state, &info->field_node); if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + ACPI_REPORT_NSERROR ((char *) &arg->named.name, status); + if (status != AE_ALREADY_EXISTS) { + return_ACPI_STATUS (status); + } + + /* Already exists, ignore error */ + } + else { + arg->common.node = info->field_node; + info->field_bit_length = arg->common.value.size; + + /* Create and initialize an object for the new Field Node */ + + status = acpi_ex_prep_field_value (info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } } - /* Create and initialize an object for the new Field Node */ + /* Keep track of bit position for the next field */ - info->field_bit_length = arg->value.size; + position = (acpi_integer) info->field_bit_position + + (acpi_integer) arg->common.value.size; - status = acpi_ex_prep_field_value (info); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + if (position > ACPI_UINT32_MAX) { + ACPI_REPORT_ERROR (("Field [%4.4s] bit offset too large (> 0xFFFFFFFF)\n", + (char *) &info->field_node->name)); + return_ACPI_STATUS (AE_SUPPORT); } - /* Keep track of bit position for the next field */ - info->field_bit_position += info->field_bit_length; break; @@ -246,12 +301,11 @@ default: ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid opcode in field list: %X\n", - arg->opcode)); - return_ACPI_STATUS (AE_AML_ERROR); - break; + arg->common.aml_opcode)); + return_ACPI_STATUS (AE_AML_BAD_OPCODE); } - arg = arg->next; + arg = arg->common.next; } return_ACPI_STATUS (AE_OK); @@ -260,11 +314,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_create_field + * FUNCTION: acpi_ds_create_field * * PARAMETERS: Op - Op containing the Field definition and args - * Region_node - Object for the containing Operation Region - * ` Walk_state - Current method state + * region_node - Object for the containing Operation Region + * ` walk_state - Current method state * * RETURN: Status * @@ -274,41 +328,43 @@ acpi_status acpi_ds_create_field ( - acpi_parse_object *op, - acpi_namespace_node *region_node, - acpi_walk_state *walk_state) + union acpi_parse_object *op, + struct acpi_namespace_node *region_node, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_AML_ERROR; - acpi_parse_object *arg; - ACPI_CREATE_FIELD_INFO info; + acpi_status status; + union acpi_parse_object *arg; + struct acpi_create_field_info info; - FUNCTION_TRACE_PTR ("Ds_create_field", op); + ACPI_FUNCTION_TRACE_PTR ("ds_create_field", op); - /* First arg is the name of the parent Op_region (must already exist) */ + /* First arg is the name of the parent op_region (must already exist) */ - arg = op->value.arg; + arg = op->common.value.arg; if (!region_node) { - status = acpi_ns_lookup (walk_state->scope_info, arg->value.name, - ACPI_TYPE_REGION, IMODE_EXECUTE, - NS_SEARCH_PARENT, walk_state, ®ion_node); + status = acpi_ns_lookup (walk_state->scope_info, arg->common.value.name, + ACPI_TYPE_REGION, ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT, walk_state, ®ion_node); if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (arg->common.value.name, status); return_ACPI_STATUS (status); } } /* Second arg is the field flags */ - arg = arg->next; - info.field_flags = arg->value.integer8; + arg = arg->common.next; + info.field_flags = (u8) arg->common.value.integer; + info.attribute = 0; /* Each remaining arg is a Named Field */ - info.field_type = INTERNAL_TYPE_REGION_FIELD; + info.field_type = ACPI_TYPE_LOCAL_REGION_FIELD; info.region_node = region_node; - status = acpi_ds_get_field_names (&info, walk_state, arg->next); + status = acpi_ds_get_field_names (&info, walk_state, arg->common.next); return_ACPI_STATUS (status); } @@ -316,11 +372,95 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_create_bank_field + * FUNCTION: acpi_ds_init_field_objects * * PARAMETERS: Op - Op containing the Field definition and args - * Region_node - Object for the containing Operation Region - * ` Walk_state - Current method state + * ` walk_state - Current method state + * + * RETURN: Status + * + * DESCRIPTION: For each "Field Unit" name in the argument list that is + * part of the field declaration, enter the name into the + * namespace. + * + ******************************************************************************/ + +acpi_status +acpi_ds_init_field_objects ( + union acpi_parse_object *op, + struct acpi_walk_state *walk_state) +{ + acpi_status status; + union acpi_parse_object *arg = NULL; + struct acpi_namespace_node *node; + u8 type = 0; + + + ACPI_FUNCTION_TRACE_PTR ("ds_init_field_objects", op); + + + switch (walk_state->opcode) { + case AML_FIELD_OP: + arg = acpi_ps_get_arg (op, 2); + type = ACPI_TYPE_LOCAL_REGION_FIELD; + break; + + case AML_BANK_FIELD_OP: + arg = acpi_ps_get_arg (op, 4); + type = ACPI_TYPE_LOCAL_BANK_FIELD; + break; + + case AML_INDEX_FIELD_OP: + arg = acpi_ps_get_arg (op, 3); + type = ACPI_TYPE_LOCAL_INDEX_FIELD; + break; + + default: + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* + * Walk the list of entries in the field_list + */ + while (arg) { + /* Ignore OFFSET and ACCESSAS terms here */ + + if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) { + status = acpi_ns_lookup (walk_state->scope_info, + (char *) &arg->named.name, + type, ACPI_IMODE_LOAD_PASS1, + ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE | ACPI_NS_ERROR_IF_FOUND, + walk_state, &node); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR ((char *) &arg->named.name, status); + if (status != AE_ALREADY_EXISTS) { + return_ACPI_STATUS (status); + } + + /* Name already exists, just ignore this error */ + + status = AE_OK; + } + + arg->common.node = node; + } + + /* Move to next field in the list */ + + arg = arg->common.next; + } + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ds_create_bank_field + * + * PARAMETERS: Op - Op containing the Field definition and args + * region_node - Object for the containing Operation Region + * ` walk_state - Current method state * * RETURN: Status * @@ -330,56 +470,58 @@ acpi_status acpi_ds_create_bank_field ( - acpi_parse_object *op, - acpi_namespace_node *region_node, - acpi_walk_state *walk_state) + union acpi_parse_object *op, + struct acpi_namespace_node *region_node, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_AML_ERROR; - acpi_parse_object *arg; - ACPI_CREATE_FIELD_INFO info; + acpi_status status; + union acpi_parse_object *arg; + struct acpi_create_field_info info; - FUNCTION_TRACE_PTR ("Ds_create_bank_field", op); + ACPI_FUNCTION_TRACE_PTR ("ds_create_bank_field", op); - /* First arg is the name of the parent Op_region (must already exist) */ + /* First arg is the name of the parent op_region (must already exist) */ - arg = op->value.arg; + arg = op->common.value.arg; if (!region_node) { - status = acpi_ns_lookup (walk_state->scope_info, arg->value.name, - ACPI_TYPE_REGION, IMODE_EXECUTE, - NS_SEARCH_PARENT, walk_state, ®ion_node); + status = acpi_ns_lookup (walk_state->scope_info, arg->common.value.name, + ACPI_TYPE_REGION, ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT, walk_state, ®ion_node); if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (arg->common.value.name, status); return_ACPI_STATUS (status); } } - /* Second arg is the Bank Register (must already exist) */ + /* Second arg is the Bank Register (Field) (must already exist) */ - arg = arg->next; - status = acpi_ns_lookup (walk_state->scope_info, arg->value.string, - INTERNAL_TYPE_BANK_FIELD_DEFN, IMODE_EXECUTE, - NS_SEARCH_PARENT, walk_state, &info.register_node); + arg = arg->common.next; + status = acpi_ns_lookup (walk_state->scope_info, arg->common.value.string, + ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT, walk_state, &info.register_node); if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (arg->common.value.string, status); return_ACPI_STATUS (status); } - /* Third arg is the Bank_value */ + /* Third arg is the bank_value */ - arg = arg->next; - info.bank_value = arg->value.integer32; + arg = arg->common.next; + info.bank_value = (u32) arg->common.value.integer; /* Fourth arg is the field flags */ - arg = arg->next; - info.field_flags = arg->value.integer8; + arg = arg->common.next; + info.field_flags = (u8) arg->common.value.integer; /* Each remaining arg is a Named Field */ - info.field_type = INTERNAL_TYPE_BANK_FIELD; + info.field_type = ACPI_TYPE_LOCAL_BANK_FIELD; info.region_node = region_node; - status = acpi_ds_get_field_names (&info, walk_state, arg->next); + status = acpi_ds_get_field_names (&info, walk_state, arg->common.next); return_ACPI_STATUS (status); } @@ -387,11 +529,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_create_index_field + * FUNCTION: acpi_ds_create_index_field * * PARAMETERS: Op - Op containing the Field definition and args - * Region_node - Object for the containing Operation Region - * ` Walk_state - Current method state + * region_node - Object for the containing Operation Region + * ` walk_state - Current method state * * RETURN: Status * @@ -401,50 +543,51 @@ acpi_status acpi_ds_create_index_field ( - acpi_parse_object *op, - acpi_namespace_node *region_node, - acpi_walk_state *walk_state) + union acpi_parse_object *op, + struct acpi_namespace_node *region_node, + struct acpi_walk_state *walk_state) { - acpi_status status; - acpi_parse_object *arg; - ACPI_CREATE_FIELD_INFO info; + acpi_status status; + union acpi_parse_object *arg; + struct acpi_create_field_info info; - FUNCTION_TRACE_PTR ("Ds_create_index_field", op); + ACPI_FUNCTION_TRACE_PTR ("ds_create_index_field", op); /* First arg is the name of the Index register (must already exist) */ - arg = op->value.arg; - status = acpi_ns_lookup (walk_state->scope_info, arg->value.string, - ACPI_TYPE_ANY, IMODE_EXECUTE, - NS_SEARCH_PARENT, walk_state, &info.register_node); + arg = op->common.value.arg; + status = acpi_ns_lookup (walk_state->scope_info, arg->common.value.string, + ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT, walk_state, &info.register_node); if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (arg->common.value.string, status); return_ACPI_STATUS (status); } /* Second arg is the data register (must already exist) */ - arg = arg->next; - status = acpi_ns_lookup (walk_state->scope_info, arg->value.string, - INTERNAL_TYPE_INDEX_FIELD_DEFN, IMODE_EXECUTE, - NS_SEARCH_PARENT, walk_state, &info.data_register_node); + arg = arg->common.next; + status = acpi_ns_lookup (walk_state->scope_info, arg->common.value.string, + ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT, walk_state, &info.data_register_node); if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (arg->common.value.string, status); return_ACPI_STATUS (status); } /* Next arg is the field flags */ - arg = arg->next; - info.field_flags = arg->value.integer8; - + arg = arg->common.next; + info.field_flags = (u8) arg->common.value.integer; /* Each remaining arg is a Named Field */ - info.field_type = INTERNAL_TYPE_INDEX_FIELD; + info.field_type = ACPI_TYPE_LOCAL_INDEX_FIELD; info.region_node = region_node; - status = acpi_ds_get_field_names (&info, walk_state, arg->next); + status = acpi_ds_get_field_names (&info, walk_state, arg->common.next); return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dsinit.c linux.21rc5-ac2/drivers/acpi/dispatcher/dsinit.c --- linux.21rc5/drivers/acpi/dispatcher/dsinit.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dsinit.c 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,235 @@ +/****************************************************************************** + * + * Module Name: dsinit - Object initialization namespace walk + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#include +#include +#include + +#define _COMPONENT ACPI_DISPATCHER + ACPI_MODULE_NAME ("dsinit") + + +/******************************************************************************* + * + * FUNCTION: acpi_ds_init_one_object + * + * PARAMETERS: obj_handle - Node + * Level - Current nesting level + * Context - Points to a init info struct + * return_value - Not used + * + * RETURN: Status + * + * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every object + * within the namespace. + * + * Currently, the only objects that require initialization are: + * 1) Methods + * 2) Operation Regions + * + ******************************************************************************/ + +acpi_status +acpi_ds_init_one_object ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value) +{ + acpi_object_type type; + acpi_status status; + struct acpi_init_walk_info *info = (struct acpi_init_walk_info *) context; + + + ACPI_FUNCTION_NAME ("ds_init_one_object"); + + + /* + * We are only interested in objects owned by the table that + * was just loaded + */ + if (((struct acpi_namespace_node *) obj_handle)->owner_id != + info->table_desc->table_id) { + return (AE_OK); + } + + info->object_count++; + + /* And even then, we are only interested in a few object types */ + + type = acpi_ns_get_type (obj_handle); + + switch (type) { + case ACPI_TYPE_REGION: + + status = acpi_ds_initialize_region (obj_handle); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region %p [%4.4s] - Init failure, %s\n", + obj_handle, ((struct acpi_namespace_node *) obj_handle)->name.ascii, + acpi_format_exception (status))); + } + + info->op_region_count++; + break; + + + case ACPI_TYPE_METHOD: + + info->method_count++; + + /* Print a dot for each method unless we are going to print the entire pathname */ + + if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, ".")); + } + + /* + * Set the execution data width (32 or 64) based upon the + * revision number of the parent ACPI table. + * TBD: This is really for possible future support of integer width + * on a per-table basis. Currently, we just use a global for the width. + */ + if (info->table_desc->pointer->revision == 1) { + ((struct acpi_namespace_node *) obj_handle)->flags |= ANOBJ_DATA_WIDTH_32; + } + + /* + * Always parse methods to detect errors, we may delete + * the parse tree below + */ + status = acpi_ds_parse_method (obj_handle); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Method %p [%4.4s] - parse failure, %s\n", + obj_handle, ((struct acpi_namespace_node *) obj_handle)->name.ascii, + acpi_format_exception (status))); + + /* This parse failed, but we will continue parsing more methods */ + + break; + } + + /* + * Delete the parse tree. We simple re-parse the method + * for every execution since there isn't much overhead + */ + acpi_ns_delete_namespace_subtree (obj_handle); + acpi_ns_delete_namespace_by_owner (((struct acpi_namespace_node *) obj_handle)->object->method.owning_id); + break; + + + case ACPI_TYPE_DEVICE: + + info->device_count++; + break; + + + default: + break; + } + + /* + * We ignore errors from above, and always return OK, since + * we don't want to abort the walk on a single error. + */ + return (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ds_initialize_objects + * + * PARAMETERS: table_desc - Descriptor for parent ACPI table + * start_node - Root of subtree to be initialized. + * + * RETURN: Status + * + * DESCRIPTION: Walk the namespace starting at "start_node" and perform any + * necessary initialization on the objects found therein + * + ******************************************************************************/ + +acpi_status +acpi_ds_initialize_objects ( + struct acpi_table_desc *table_desc, + struct acpi_namespace_node *start_node) +{ + acpi_status status; + struct acpi_init_walk_info info; + + + ACPI_FUNCTION_TRACE ("ds_initialize_objects"); + + + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, + "**** Starting initialization of namespace objects ****\n")); + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "Parsing all Control Methods:")); + + info.method_count = 0; + info.op_region_count = 0; + info.object_count = 0; + info.device_count = 0; + info.table_desc = table_desc; + + /* Walk entire namespace from the supplied root */ + + status = acpi_walk_namespace (ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, + acpi_ds_init_one_object, &info, NULL); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "walk_namespace failed, %s\n", + acpi_format_exception (status))); + } + + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, + "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n", + table_desc->pointer->signature, table_desc->table_id, info.object_count, + info.device_count, info.method_count, info.op_region_count)); + + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, + "%hd Methods, %hd Regions\n", info.method_count, info.op_region_count)); + + return_ACPI_STATUS (AE_OK); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dsmethod.c linux.21rc5-ac2/drivers/acpi/dispatcher/dsmethod.c --- linux.21rc5/drivers/acpi/dispatcher/dsmethod.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dsmethod.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,51 +1,67 @@ /****************************************************************************** * * Module Name: dsmethod - Parser/Interpreter interface - control method parsing - * $Revision: 69 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "actables.h" -#include "acdebug.h" +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dsmethod") + ACPI_MODULE_NAME ("dsmethod") /******************************************************************************* * - * FUNCTION: Acpi_ds_parse_method + * FUNCTION: acpi_ds_parse_method * - * PARAMETERS: Obj_handle - Node of the method + * PARAMETERS: obj_handle - Node of the method * Level - Current nesting level * Context - Points to a method counter - * Return_value - Not used + * return_value - Not used * * RETURN: Status * @@ -58,17 +74,17 @@ acpi_status acpi_ds_parse_method ( - acpi_handle obj_handle) + acpi_handle obj_handle) { - acpi_status status; - acpi_operand_object *obj_desc; - acpi_parse_object *op; - acpi_namespace_node *node; - acpi_owner_id owner_id; - acpi_walk_state *walk_state; + acpi_status status; + union acpi_operand_object *obj_desc; + union acpi_parse_object *op; + struct acpi_namespace_node *node; + acpi_owner_id owner_id; + struct acpi_walk_state *walk_state; - FUNCTION_TRACE_PTR ("Ds_parse_method", obj_handle); + ACPI_FUNCTION_TRACE_PTR ("ds_parse_method", obj_handle); /* Parameter Validation */ @@ -77,14 +93,13 @@ return_ACPI_STATUS (AE_NULL_ENTRY); } - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "**** Parsing [%4.4s] **** Named_obj=%p\n", - (char*)&((acpi_namespace_node *)obj_handle)->name, obj_handle)); - + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "**** Parsing [%4.4s] **** named_obj=%p\n", + ((struct acpi_namespace_node *) obj_handle)->name.ascii, obj_handle)); /* Extract the method object from the method Node */ - node = (acpi_namespace_node *) obj_handle; - obj_desc = node->object; + node = (struct acpi_namespace_node *) obj_handle; + obj_desc = acpi_ns_get_attached_object (node); if (!obj_desc) { return_ACPI_STATUS (AE_NULL_OBJECT); } @@ -112,13 +127,20 @@ /* Init new op with the method name and pointer back to the Node */ - acpi_ps_set_name (op, node->name); - op->node = node; + acpi_ps_set_name (op, node->name.integer); + op->common.node = node; + + /* + * Get a new owner_id for objects created by this method. Namespace + * objects (such as Operation Regions) can be created during the + * first pass parse. + */ + owner_id = acpi_ut_allocate_owner_id (ACPI_OWNER_TYPE_METHOD); + obj_desc->method.owning_id = owner_id; /* Create and initialize a new walk state */ - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, - NULL, NULL, NULL); + walk_state = acpi_ds_create_walk_state (owner_id, NULL, NULL, NULL); if (!walk_state) { return_ACPI_STATUS (AE_NO_MEMORY); } @@ -126,7 +148,7 @@ status = acpi_ds_init_aml_walk (walk_state, op, node, obj_desc->method.aml_start, obj_desc->method.aml_length, NULL, NULL, 1); if (ACPI_FAILURE (status)) { - /* TBD: delete walk state */ + acpi_ds_delete_walk_state (walk_state); return_ACPI_STATUS (status); } @@ -145,27 +167,22 @@ return_ACPI_STATUS (status); } - /* Get a new Owner_id for objects created by this method */ - - owner_id = acpi_ut_allocate_owner_id (OWNER_TYPE_METHOD); - obj_desc->method.owning_id = owner_id; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "**** [%4.4s] Parsed **** Named_obj=%p Op=%p\n", - (char*)&((acpi_namespace_node *)obj_handle)->name, obj_handle, op)); + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, + "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n", + ((struct acpi_namespace_node *) obj_handle)->name.ascii, obj_handle, op)); acpi_ps_delete_parse_tree (op); - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ds_begin_method_execution + * FUNCTION: acpi_ds_begin_method_execution * - * PARAMETERS: Method_node - Node of the method - * Obj_desc - The method object - * Calling_method_node - Caller of this method (if non-null) + * PARAMETERS: method_node - Node of the method + * obj_desc - The method object + * calling_method_node - Caller of this method (if non-null) * * RETURN: Status * @@ -179,21 +196,20 @@ acpi_status acpi_ds_begin_method_execution ( - acpi_namespace_node *method_node, - acpi_operand_object *obj_desc, - acpi_namespace_node *calling_method_node) + struct acpi_namespace_node *method_node, + union acpi_operand_object *obj_desc, + struct acpi_namespace_node *calling_method_node) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE_PTR ("Ds_begin_method_execution", method_node); + ACPI_FUNCTION_TRACE_PTR ("ds_begin_method_execution", method_node); if (!method_node) { return_ACPI_STATUS (AE_NULL_ENTRY); } - /* * If there is a concurrency limit on this method, we need to * obtain a unit from the method semaphore. @@ -201,7 +217,7 @@ if (obj_desc->method.semaphore) { /* * Allow recursive method calls, up to the reentrancy/concurrency - * limit imposed by the SERIALIZED rule and the Sync_level method + * limit imposed by the SERIALIZED rule and the sync_level method * parameter. * * The point of this code is to avoid permanently blocking a @@ -218,25 +234,23 @@ * interpreter if we block */ status = acpi_ex_system_wait_semaphore (obj_desc->method.semaphore, - WAIT_FOREVER); + ACPI_WAIT_FOREVER); } - /* * Increment the method parse tree thread count since it has been * reentered one more time (even if it is the same thread) */ obj_desc->method.thread_count++; - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ds_call_control_method + * FUNCTION: acpi_ds_call_control_method * - * PARAMETERS: Walk_state - Current state of the walk + * PARAMETERS: walk_state - Current state of the walk * Op - Current Op to be walked * * RETURN: Status @@ -247,18 +261,18 @@ acpi_status acpi_ds_call_control_method ( - acpi_walk_list *walk_list, - acpi_walk_state *this_walk_state, - acpi_parse_object *op) /* TBD: This operand is obsolete */ + struct acpi_thread_state *thread, + struct acpi_walk_state *this_walk_state, + union acpi_parse_object *op) { - acpi_status status; - acpi_namespace_node *method_node; - acpi_operand_object *obj_desc; - acpi_walk_state *next_walk_state; - u32 i; + acpi_status status; + struct acpi_namespace_node *method_node; + union acpi_operand_object *obj_desc; + struct acpi_walk_state *next_walk_state; + u32 i; - FUNCTION_TRACE_PTR ("Ds_call_control_method", this_walk_state); + ACPI_FUNCTION_TRACE_PTR ("ds_call_control_method", this_walk_state); ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Execute method %p, currentstate=%p\n", this_walk_state->prev_op, this_walk_state)); @@ -284,19 +298,17 @@ return_ACPI_STATUS (status); } - /* 1) Parse: Create a new walk state for the preempting walk */ next_walk_state = acpi_ds_create_walk_state (obj_desc->method.owning_id, op, obj_desc, NULL); if (!next_walk_state) { return_ACPI_STATUS (AE_NO_MEMORY); - goto cleanup; } /* Create and init a Root Node */ - op = acpi_ps_alloc_op (AML_SCOPE_OP); + op = acpi_ps_create_scope_op (); if (!op) { status = AE_NO_MEMORY; goto cleanup; @@ -306,7 +318,7 @@ obj_desc->method.aml_start, obj_desc->method.aml_length, NULL, NULL, 1); if (ACPI_FAILURE (status)) { - /* TBD: delete walk state */ + acpi_ds_delete_walk_state (next_walk_state); goto cleanup; } @@ -315,11 +327,10 @@ status = acpi_ps_parse_aml (next_walk_state); acpi_ps_delete_parse_tree (op); - /* 2) Execute: Create a new state for the preempting walk */ next_walk_state = acpi_ds_create_walk_state (obj_desc->method.owning_id, - NULL, obj_desc, walk_list); + NULL, obj_desc, thread); if (!next_walk_state) { status = AE_NO_MEMORY; goto cleanup; @@ -353,8 +364,8 @@ this_walk_state->num_operands = 0; - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Starting nested execution, newstate=%p\n", - next_walk_state)); + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, + "Starting nested execution, newstate=%p\n", next_walk_state)); return_ACPI_STATUS (AE_OK); @@ -362,7 +373,7 @@ /* On error, we must delete the new walk state */ cleanup: - acpi_ds_terminate_control_method (next_walk_state); + (void) acpi_ds_terminate_control_method (next_walk_state); acpi_ds_delete_walk_state (next_walk_state); return_ACPI_STATUS (status); @@ -371,9 +382,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_restart_control_method + * FUNCTION: acpi_ds_restart_control_method * - * PARAMETERS: Walk_state - State of the method when it was preempted + * PARAMETERS: walk_state - State of the method when it was preempted * Op - Pointer to new current op * * RETURN: Status @@ -384,13 +395,13 @@ acpi_status acpi_ds_restart_control_method ( - acpi_walk_state *walk_state, - acpi_operand_object *return_desc) + struct acpi_walk_state *walk_state, + union acpi_operand_object *return_desc) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE_PTR ("Ds_restart_control_method", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ds_restart_control_method", walk_state); if (return_desc) { @@ -405,7 +416,6 @@ return_ACPI_STATUS (status); } } - else { /* * Delete the return value if it will not be used by the @@ -413,24 +423,22 @@ */ acpi_ut_remove_reference (return_desc); } - } ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "Method=%p Return=%p Return_used?=%X Res_stack=%p State=%p\n", + "Method=%p Return=%p return_used?=%X res_stack=%p State=%p\n", walk_state->method_call_op, return_desc, walk_state->return_used, walk_state->results, walk_state)); - return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ds_terminate_control_method + * FUNCTION: acpi_ds_terminate_control_method * - * PARAMETERS: Walk_state - State of the method + * PARAMETERS: walk_state - State of the method * * RETURN: Status * @@ -442,16 +450,21 @@ acpi_status acpi_ds_terminate_control_method ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object *obj_desc; - acpi_namespace_node *method_node; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *method_node; + acpi_status status; - FUNCTION_TRACE_PTR ("Ds_terminate_control_method", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ds_terminate_control_method", walk_state); - /* The method object should be stored in the walk state */ + if (!walk_state) { + return (AE_BAD_PARAMETER); + } + + /* The current method object was saved in the walk state */ obj_desc = walk_state->method_desc; if (!obj_desc) { @@ -467,14 +480,22 @@ * If this is the last thread executing the method, * we have additional cleanup to perform */ - acpi_ut_acquire_mutex (ACPI_MTX_PARSER); - + status = acpi_ut_acquire_mutex (ACPI_MTX_PARSER); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* Signal completion of the execution of this method if necessary */ if (walk_state->method_desc->method.semaphore) { - acpi_os_signal_semaphore ( - walk_state->method_desc->method.semaphore, 1); + status = acpi_os_signal_semaphore ( + walk_state->method_desc->method.semaphore, 1); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not signal method semaphore\n")); + status = AE_OK; + + /* Ignore error and continue cleanup */ + } } /* Decrement the thread count on the method parse tree */ @@ -493,7 +514,11 @@ * Delete any namespace entries created immediately underneath * the method */ - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + if (method_node->child) { acpi_ns_delete_namespace_subtree (method_node); } @@ -503,11 +528,14 @@ * the namespace */ acpi_ns_delete_namespace_by_owner (walk_state->method_desc->method.owning_id); - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } } - acpi_ut_release_mutex (ACPI_MTX_PARSER); - return_ACPI_STATUS (AE_OK); + status = acpi_ut_release_mutex (ACPI_MTX_PARSER); + return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dsmthdat.c linux.21rc5-ac2/drivers/acpi/dispatcher/dsmthdat.c --- linux.21rc5/drivers/acpi/dispatcher/dsmthdat.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dsmthdat.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,193 +1,196 @@ /******************************************************************************* * * Module Name: dsmthdat - control method arguments and local variables - * $Revision: 49 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" +#include +#include +#include +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dsmthdat") + ACPI_MODULE_NAME ("dsmthdat") /******************************************************************************* * - * FUNCTION: Acpi_ds_method_data_init + * FUNCTION: acpi_ds_method_data_init * - * PARAMETERS: Walk_state - Current walk state object + * PARAMETERS: walk_state - Current walk state object * * RETURN: Status * * DESCRIPTION: Initialize the data structures that hold the method's arguments * and locals. The data struct is an array of NTEs for each. - * This allows Ref_of and De_ref_of to work properly for these + * This allows ref_of and de_ref_of to work properly for these * special data types. * + * NOTES: walk_state fields are initialized to zero by the + * ACPI_MEM_CALLOCATE(). + * + * A pseudo-Namespace Node is assigned to each argument and local + * so that ref_of() can return a pointer to the Node. + * ******************************************************************************/ -acpi_status +void acpi_ds_method_data_init ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - u32 i; + u32 i; - FUNCTION_TRACE ("Ds_method_data_init"); + ACPI_FUNCTION_TRACE ("ds_method_data_init"); - /* - * Walk_state fields are initialized to zero by the - * ACPI_MEM_CALLOCATE(). - * - * An Node is assigned to each argument and local so - * that Ref_of() can return a pointer to the Node. - */ /* Init the method arguments */ - for (i = 0; i < MTH_NUM_ARGS; i++) { - MOVE_UNALIGNED32_TO_32 (&walk_state->arguments[i].name, - NAMEOF_ARG_NTE); - walk_state->arguments[i].name |= (i << 24); - walk_state->arguments[i].data_type = ACPI_DESC_TYPE_NAMED; - walk_state->arguments[i].type = ACPI_TYPE_ANY; - walk_state->arguments[i].flags = ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG; + for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) { + ACPI_MOVE_32_TO_32 (&walk_state->arguments[i].name, + NAMEOF_ARG_NTE); + walk_state->arguments[i].name.integer |= (i << 24); + walk_state->arguments[i].descriptor = ACPI_DESC_TYPE_NAMED; + walk_state->arguments[i].type = ACPI_TYPE_ANY; + walk_state->arguments[i].flags = ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG; } /* Init the method locals */ - for (i = 0; i < MTH_NUM_LOCALS; i++) { - MOVE_UNALIGNED32_TO_32 (&walk_state->local_variables[i].name, - NAMEOF_LOCAL_NTE); - - walk_state->local_variables[i].name |= (i << 24); - walk_state->local_variables[i].data_type = ACPI_DESC_TYPE_NAMED; - walk_state->local_variables[i].type = ACPI_TYPE_ANY; - walk_state->local_variables[i].flags = ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL; + for (i = 0; i < ACPI_METHOD_NUM_LOCALS; i++) { + ACPI_MOVE_32_TO_32 (&walk_state->local_variables[i].name, + NAMEOF_LOCAL_NTE); + + walk_state->local_variables[i].name.integer |= (i << 24); + walk_state->local_variables[i].descriptor = ACPI_DESC_TYPE_NAMED; + walk_state->local_variables[i].type = ACPI_TYPE_ANY; + walk_state->local_variables[i].flags = ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL; } - return_ACPI_STATUS (AE_OK); + return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ds_method_data_delete_all + * FUNCTION: acpi_ds_method_data_delete_all * - * PARAMETERS: Walk_state - Current walk state object + * PARAMETERS: walk_state - Current walk state object * - * RETURN: Status + * RETURN: None * * DESCRIPTION: Delete method locals and arguments. Arguments are only * deleted if this method was called from another method. * ******************************************************************************/ -acpi_status +void acpi_ds_method_data_delete_all ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - u32 index; - acpi_operand_object *object; + u32 index; - FUNCTION_TRACE ("Ds_method_data_delete_all"); + ACPI_FUNCTION_TRACE ("ds_method_data_delete_all"); - /* Delete the locals */ + /* Detach the locals */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Deleting local variables in %p\n", walk_state)); + for (index = 0; index < ACPI_METHOD_NUM_LOCALS; index++) { + if (walk_state->local_variables[index].object) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Deleting Local%d=%p\n", + index, walk_state->local_variables[index].object)); - for (index = 0; index < MTH_NUM_LOCALS; index++) { - object = walk_state->local_variables[index].object; - if (object) { - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Deleting Local%d=%p\n", index, object)); + /* Detach object (if present) and remove a reference */ - /* Remove first */ - - walk_state->local_variables[index].object = NULL; - - /* Was given a ref when stored */ - - acpi_ut_remove_reference (object); - } + acpi_ns_detach_object (&walk_state->local_variables[index]); + } } + /* Detach the arguments */ - /* Delete the arguments */ - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Deleting arguments in %p\n", walk_state)); - - for (index = 0; index < MTH_NUM_ARGS; index++) { - object = walk_state->arguments[index].object; - if (object) { - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Deleting Arg%d=%p\n", index, object)); - - /* Remove first */ - - walk_state->arguments[index].object = NULL; + for (index = 0; index < ACPI_METHOD_NUM_ARGS; index++) { + if (walk_state->arguments[index].object) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Deleting Arg%d=%p\n", + index, walk_state->arguments[index].object)); - /* Was given a ref when stored */ + /* Detach object (if present) and remove a reference */ - acpi_ut_remove_reference (object); + acpi_ns_detach_object (&walk_state->arguments[index]); } } - return_ACPI_STATUS (AE_OK); + return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ds_method_data_init_args + * FUNCTION: acpi_ds_method_data_init_args * * PARAMETERS: *Params - Pointer to a parameter list for the method - * Max_param_count - The arg count for this method - * Walk_state - Current walk state object + * max_param_count - The arg count for this method + * walk_state - Current walk state object * * RETURN: Status * - * DESCRIPTION: Initialize arguments for a method + * DESCRIPTION: Initialize arguments for a method. The parameter list is a list + * of ACPI operand objects, either null terminated or whose length + * is defined by max_param_count. * ******************************************************************************/ acpi_status acpi_ds_method_data_init_args ( - acpi_operand_object **params, - u32 max_param_count, - acpi_walk_state *walk_state) + union acpi_operand_object **params, + u32 max_param_count, + struct acpi_walk_state *walk_state) { - acpi_status status; - u32 mindex; - u32 pindex; + acpi_status status; + u32 index = 0; - FUNCTION_TRACE_PTR ("Ds_method_data_init_args", params); + ACPI_FUNCTION_TRACE_PTR ("ds_method_data_init_args", params); if (!params) { @@ -197,293 +200,237 @@ /* Copy passed parameters into the new method stack frame */ - for (pindex = mindex = 0; - (mindex < MTH_NUM_ARGS) && (pindex < max_param_count); - mindex++) { - if (params[pindex]) { - /* - * A valid parameter. - * Set the current method argument to the - * Params[Pindex++] argument object descriptor - */ - status = acpi_ds_store_object_to_local (AML_ARG_OP, mindex, - params[pindex], walk_state); - if (ACPI_FAILURE (status)) { - break; - } - - pindex++; + while ((index < ACPI_METHOD_NUM_ARGS) && (index < max_param_count) && params[index]) { + /* + * A valid parameter. + * Store the argument in the method/walk descriptor + */ + status = acpi_ds_store_object_to_local (AML_ARG_OP, index, params[index], + walk_state); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - else { - break; - } + index++; } - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%d args passed to method\n", pindex)); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%d args passed to method\n", index)); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ds_method_data_get_entry + * FUNCTION: acpi_ds_method_data_get_node * * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP - * Index - Which local_var or argument to get - * Entry - Pointer to where a pointer to the stack - * entry is returned. - * Walk_state - Current walk state object - * - * RETURN: Status + * Index - which local_var or argument whose type + * to get + * walk_state - Current walk state object * - * DESCRIPTION: Get the address of the object entry given by Opcode:Index + * RETURN: Get the Node associated with a local or arg. * ******************************************************************************/ acpi_status -acpi_ds_method_data_get_entry ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state, - acpi_operand_object ***entry) +acpi_ds_method_data_get_node ( + u16 opcode, + u32 index, + struct acpi_walk_state *walk_state, + struct acpi_namespace_node **node) { - - FUNCTION_TRACE_U32 ("Ds_method_data_get_entry", index); + ACPI_FUNCTION_TRACE ("ds_method_data_get_node"); /* - * Get the requested object. - * The stack "Opcode" is either a Local_variable or an Argument + * Method Locals and Arguments are supported */ switch (opcode) { - case AML_LOCAL_OP: - if (index > MTH_MAX_LOCAL) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Local_var index %d is invalid (max %d)\n", - index, MTH_MAX_LOCAL)); - return_ACPI_STATUS (AE_BAD_PARAMETER); + if (index > ACPI_METHOD_MAX_LOCAL) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Local index %d is invalid (max %d)\n", + index, ACPI_METHOD_MAX_LOCAL)); + return_ACPI_STATUS (AE_AML_INVALID_INDEX); } - *entry = (acpi_operand_object **) - &walk_state->local_variables[index].object; - break; + /* Return a pointer to the pseudo-node */ + *node = &walk_state->local_variables[index]; + break; case AML_ARG_OP: - if (index > MTH_MAX_ARG) { + if (index > ACPI_METHOD_MAX_ARG) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Arg index %d is invalid (max %d)\n", - index, MTH_MAX_ARG)); - return_ACPI_STATUS (AE_BAD_PARAMETER); + index, ACPI_METHOD_MAX_ARG)); + return_ACPI_STATUS (AE_AML_INVALID_INDEX); } - *entry = (acpi_operand_object **) - &walk_state->arguments[index].object; - break; + /* Return a pointer to the pseudo-node */ + *node = &walk_state->arguments[index]; + break; default: ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Opcode %d is invalid\n", opcode)); - return_ACPI_STATUS (AE_BAD_PARAMETER); + return_ACPI_STATUS (AE_AML_BAD_OPCODE); } - return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ds_method_data_set_entry + * FUNCTION: acpi_ds_method_data_set_value * * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP - * Index - Which local_var or argument to get + * Index - which local_var or argument to get * Object - Object to be inserted into the stack entry - * Walk_state - Current walk state object + * walk_state - Current walk state object * * RETURN: Status * * DESCRIPTION: Insert an object onto the method stack at entry Opcode:Index. + * Note: There is no "implicit conversion" for locals. * ******************************************************************************/ acpi_status -acpi_ds_method_data_set_entry ( - u16 opcode, - u32 index, - acpi_operand_object *object, - acpi_walk_state *walk_state) +acpi_ds_method_data_set_value ( + u16 opcode, + u32 index, + union acpi_operand_object *object, + struct acpi_walk_state *walk_state) { - acpi_status status; - acpi_operand_object **entry; + acpi_status status; + struct acpi_namespace_node *node; + union acpi_operand_object *new_desc = object; - FUNCTION_TRACE ("Ds_method_data_set_entry"); + ACPI_FUNCTION_TRACE ("ds_method_data_set_value"); - /* Get a pointer to the stack entry to set */ + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, + "obj %p op %X, ref count = %d [%s]\n", object, + opcode, object->common.reference_count, + acpi_ut_get_type_name (object->common.type))); - status = acpi_ds_method_data_get_entry (opcode, index, walk_state, &entry); + /* Get the namespace node for the arg/local */ + + status = acpi_ds_method_data_get_node (opcode, index, walk_state, &node); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - /* Increment ref count so object can't be deleted while installed */ + /* + * If the object has just been created and is not attached to anything, + * (the reference count is 1), then we can just store it directly into + * the arg/local. Otherwise, we must copy it. + */ + if (object->common.reference_count > 1) { + status = acpi_ut_copy_iobject_to_iobject (object, &new_desc, walk_state); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - acpi_ut_add_reference (object); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Object Copied %p, new %p\n", + object, new_desc)); + } + else { + /* Increment ref count so object can't be deleted while installed */ - /* Install the object into the stack entry */ + acpi_ut_add_reference (new_desc); + } - *entry = object; + /* Install the object */ - return_ACPI_STATUS (AE_OK); + node->object = new_desc; + return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ds_method_data_get_type + * FUNCTION: acpi_ds_method_data_get_type * * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP - * Index - Which local_var or argument whose type + * Index - which local_var or argument whose type * to get - * Walk_state - Current walk state object + * walk_state - Current walk state object * - * RETURN: Data type of selected Arg or Local - * Used only in Exec_monadic2()/Type_op. + * RETURN: Data type of current value of the selected Arg or Local * ******************************************************************************/ -acpi_object_type8 +acpi_object_type acpi_ds_method_data_get_type ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state) + u16 opcode, + u32 index, + struct acpi_walk_state *walk_state) { - acpi_status status; - acpi_operand_object **entry; - acpi_operand_object *object; + acpi_status status; + struct acpi_namespace_node *node; + union acpi_operand_object *object; - FUNCTION_TRACE ("Ds_method_data_get_type"); + ACPI_FUNCTION_TRACE ("ds_method_data_get_type"); - /* Get a pointer to the requested stack entry */ + /* Get the namespace node for the arg/local */ - status = acpi_ds_method_data_get_entry (opcode, index, walk_state, &entry); + status = acpi_ds_method_data_get_node (opcode, index, walk_state, &node); if (ACPI_FAILURE (status)) { return_VALUE ((ACPI_TYPE_NOT_FOUND)); } - /* Get the object from the method stack */ - - object = *entry; - - /* Get the object type */ + /* Get the object */ + object = acpi_ns_get_attached_object (node); if (!object) { - /* Any == 0 => "uninitialized" -- see spec 15.2.3.5.2.28 */ - return_VALUE (ACPI_TYPE_ANY); - } - - return_VALUE (object->common.type); -} - + /* Uninitialized local/arg, return TYPE_ANY */ -/******************************************************************************* - * - * FUNCTION: Acpi_ds_method_data_get_node - * - * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP - * Index - Which local_var or argument whose type - * to get - * Walk_state - Current walk state object - * - * RETURN: Get the Node associated with a local or arg. - * - ******************************************************************************/ - -acpi_namespace_node * -acpi_ds_method_data_get_node ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state) -{ - acpi_namespace_node *node = NULL; - - - FUNCTION_TRACE ("Ds_method_data_get_node"); - - - switch (opcode) { - - case AML_LOCAL_OP: - - if (index > MTH_MAX_LOCAL) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Local index %d is invalid (max %d)\n", - index, MTH_MAX_LOCAL)); - return_PTR (node); - } - - node = &walk_state->local_variables[index]; - break; - - - case AML_ARG_OP: - - if (index > MTH_MAX_ARG) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Arg index %d is invalid (max %d)\n", - index, MTH_MAX_ARG)); - return_PTR (node); - } - - node = &walk_state->arguments[index]; - break; - - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Opcode %d is invalid\n", opcode)); - break; + return_VALUE (ACPI_TYPE_ANY); } + /* Get the object type */ - return_PTR (node); + return_VALUE (ACPI_GET_OBJECT_TYPE (object)); } /******************************************************************************* * - * FUNCTION: Acpi_ds_method_data_get_value + * FUNCTION: acpi_ds_method_data_get_value * * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP - * Index - Which local_var or argument to get - * Walk_state - Current walk state object - * *Dest_desc - Ptr to Descriptor into which selected Arg + * Index - which local_var or argument to get + * walk_state - Current walk state object + * *dest_desc - Ptr to Descriptor into which selected Arg * or Local value should be copied * * RETURN: Status * * DESCRIPTION: Retrieve value of selected Arg or Local from the method frame * at the current top of the method stack. - * Used only in Acpi_ex_resolve_to_value(). + * Used only in acpi_ex_resolve_to_value(). * ******************************************************************************/ acpi_status acpi_ds_method_data_get_value ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state, - acpi_operand_object **dest_desc) + u16 opcode, + u32 index, + struct acpi_walk_state *walk_state, + union acpi_operand_object **dest_desc) { - acpi_status status; - acpi_operand_object **entry; - acpi_operand_object *object; + acpi_status status; + struct acpi_namespace_node *node; + union acpi_operand_object *object; - FUNCTION_TRACE ("Ds_method_data_get_value"); + ACPI_FUNCTION_TRACE ("ds_method_data_get_value"); /* Validate the object descriptor */ @@ -493,24 +440,22 @@ return_ACPI_STATUS (AE_BAD_PARAMETER); } + /* Get the namespace node for the arg/local */ - /* Get a pointer to the requested method stack entry */ - - status = acpi_ds_method_data_get_entry (opcode, index, walk_state, &entry); + status = acpi_ds_method_data_get_node (opcode, index, walk_state, &node); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - /* Get the object from the method stack */ - - object = *entry; + /* Get the object from the node */ + object = node->object; /* Examine the returned object, it must be valid. */ if (!object) { /* - * Index points to uninitialized object stack value. + * Index points to uninitialized object. * This means that either 1) The expected argument was * not passed to the method, or 2) A local variable * was referenced by the method (via the ASL) @@ -519,25 +464,25 @@ switch (opcode) { case AML_ARG_OP: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Uninitialized Arg[%d] at entry %p\n", - index, entry)); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Uninitialized Arg[%d] at node %p\n", + index, node)); return_ACPI_STATUS (AE_AML_UNINITIALIZED_ARG); - break; case AML_LOCAL_OP: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Uninitialized Local[%d] at entry %p\n", - index, entry)); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Uninitialized Local[%d] at node %p\n", + index, node)); return_ACPI_STATUS (AE_AML_UNINITIALIZED_LOCAL); - break; + + default: + return_ACPI_STATUS (AE_AML_INTERNAL); } } - /* - * Index points to initialized and valid object stack value. + * The Index points to an initialized and valid object. * Return an additional reference to the object */ *dest_desc = object; @@ -549,125 +494,126 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_method_data_delete_value + * FUNCTION: acpi_ds_method_data_delete_value * * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP - * Index - Which local_var or argument to delete - * Walk_state - Current walk state object + * Index - which local_var or argument to delete + * walk_state - Current walk state object * - * RETURN: Status + * RETURN: None * * DESCRIPTION: Delete the entry at Opcode:Index on the method stack. Inserts * a null into the stack slot after the object is deleted. * ******************************************************************************/ -acpi_status +void acpi_ds_method_data_delete_value ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state) + u16 opcode, + u32 index, + struct acpi_walk_state *walk_state) { - acpi_status status; - acpi_operand_object **entry; - acpi_operand_object *object; + acpi_status status; + struct acpi_namespace_node *node; + union acpi_operand_object *object; - FUNCTION_TRACE ("Ds_method_data_delete_value"); + ACPI_FUNCTION_TRACE ("ds_method_data_delete_value"); - /* Get a pointer to the requested entry */ + /* Get the namespace node for the arg/local */ - status = acpi_ds_method_data_get_entry (opcode, index, walk_state, &entry); + status = acpi_ds_method_data_get_node (opcode, index, walk_state, &node); if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + return_VOID; } - /* Get the current entry in this slot k */ + /* Get the associated object */ - object = *entry; + object = acpi_ns_get_attached_object (node); /* * Undefine the Arg or Local by setting its descriptor * pointer to NULL. Locals/Args can contain both * ACPI_OPERAND_OBJECTS and ACPI_NAMESPACE_NODEs */ - *entry = NULL; + node->object = NULL; if ((object) && - (VALID_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_INTERNAL))) { + (ACPI_GET_DESCRIPTOR_TYPE (object) == ACPI_DESC_TYPE_OPERAND)) { /* - * There is a valid object in this slot + * There is a valid object. * Decrement the reference count by one to balance the - * increment when the object was stored in the slot. + * increment when the object was stored. */ acpi_ut_remove_reference (object); } - return_ACPI_STATUS (AE_OK); + return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ds_store_object_to_local + * FUNCTION: acpi_ds_store_object_to_local * * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP - * Index - Which local_var or argument to set - * Src_desc - Value to be stored - * Walk_state - Current walk state + * Index - which local_var or argument to set + * obj_desc - Value to be stored + * walk_state - Current walk state * * RETURN: Status * - * DESCRIPTION: Store a value in an Arg or Local. The Src_desc is installed + * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed * as the new value for the Arg or Local and the reference count - * for Src_desc is incremented. + * for obj_desc is incremented. * ******************************************************************************/ acpi_status acpi_ds_store_object_to_local ( - u16 opcode, - u32 index, - acpi_operand_object *src_desc, - acpi_walk_state *walk_state) + u16 opcode, + u32 index, + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state) { - acpi_status status; - acpi_operand_object **entry; + acpi_status status; + struct acpi_namespace_node *node; + union acpi_operand_object *current_obj_desc; - FUNCTION_TRACE ("Ds_method_data_set_value"); + ACPI_FUNCTION_TRACE ("ds_store_object_to_local"); ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Opcode=%d Idx=%d Obj=%p\n", - opcode, index, src_desc)); + opcode, index, obj_desc)); /* Parameter validation */ - if (!src_desc) { + if (!obj_desc) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + /* Get the namespace node for the arg/local */ - /* Get a pointer to the requested method stack entry */ - - status = acpi_ds_method_data_get_entry (opcode, index, walk_state, &entry); + status = acpi_ds_method_data_get_node (opcode, index, walk_state, &node); if (ACPI_FAILURE (status)) { - goto cleanup; + return_ACPI_STATUS (status); } - if (*entry == src_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p already installed!\n", src_desc)); - goto cleanup; + current_obj_desc = acpi_ns_get_attached_object (node); + if (current_obj_desc == obj_desc) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p already installed!\n", + obj_desc)); + return_ACPI_STATUS (status); } - /* * If there is an object already in this slot, we either * have to delete it, or if this is an argument and there * is an object reference stored there, we have to do * an indirect store! */ - if (*entry) { + if (current_obj_desc) { /* * Check for an indirect store if an argument * contains an object reference (stored as an Node). @@ -675,45 +621,46 @@ * locals, since a store to a local should overwrite * anything there, including an object reference. * - * If both Arg0 and Local0 contain Ref_of (Local4): + * If both Arg0 and Local0 contain ref_of (Local4): * * Store (1, Arg0) - Causes indirect store to local4 * Store (1, Local0) - Stores 1 in local0, overwriting * the reference to local4 - * Store (1, De_refof (Local0)) - Causes indirect store to local4 + * Store (1, de_refof (Local0)) - Causes indirect store to local4 * * Weird, but true. */ - if ((opcode == AML_ARG_OP) && - (VALID_DESCRIPTOR_TYPE (*entry, ACPI_DESC_TYPE_NAMED))) { - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, - "Arg (%p) is an Obj_ref(Node), storing in %p\n", - src_desc, *entry)); - - /* Detach an existing object from the Node */ - - acpi_ns_detach_object ((acpi_namespace_node *) *entry); - + if (opcode == AML_ARG_OP) { /* - * Store this object into the Node - * (do the indirect store) + * Make sure that the object is the correct type. This may be overkill, but + * it is here because references were NS nodes in the past. Now they are + * operand objects of type Reference. */ - status = acpi_ns_attach_object ((acpi_namespace_node *) *entry, src_desc, - src_desc->common.type); - return_ACPI_STATUS (status); - } - + if (ACPI_GET_DESCRIPTOR_TYPE (current_obj_desc) != ACPI_DESC_TYPE_OPERAND) { + ACPI_REPORT_ERROR (("Invalid descriptor type while storing to method arg: %X\n", + current_obj_desc->common.type)); + return_ACPI_STATUS (AE_AML_INTERNAL); + } -#ifdef ACPI_ENABLE_IMPLICIT_CONVERSION - /* - * Perform "Implicit conversion" of the new object to the type of the - * existing object - */ - status = acpi_ex_convert_to_target_type ((*entry)->common.type, &src_desc, walk_state); - if (ACPI_FAILURE (status)) { - goto cleanup; + /* + * If we have a valid reference object that came from ref_of(), do the + * indirect store + */ + if ((current_obj_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) && + (current_obj_desc->reference.opcode == AML_REF_OF_OP)) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, + "Arg (%p) is an obj_ref(Node), storing in node %p\n", + obj_desc, current_obj_desc)); + + /* + * Store this object to the Node + * (perform the indirect store) + */ + status = acpi_ex_store_object_to_node (obj_desc, + current_obj_desc->reference.object, walk_state); + return_ACPI_STATUS (status); + } } -#endif /* * Delete the existing object @@ -722,27 +669,14 @@ acpi_ds_method_data_delete_value (opcode, index, walk_state); } - /* - * Install the Obj_stack descriptor (*Src_desc) into + * Install the obj_stack descriptor (*obj_desc) into * the descriptor for the Arg or Local. * Install the new object in the stack entry * (increments the object reference count by one) */ - status = acpi_ds_method_data_set_entry (opcode, index, src_desc, walk_state); - if (ACPI_FAILURE (status)) { - goto cleanup; - } - - /* Normal exit */ - - return_ACPI_STATUS (AE_OK); - - - /* Error exit */ - -cleanup: - + status = acpi_ds_method_data_set_value (opcode, index, obj_desc, walk_state); return_ACPI_STATUS (status); } + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dsobject.c linux.21rc5-ac2/drivers/acpi/dispatcher/dsobject.c --- linux.21rc5/drivers/acpi/dispatcher/dsobject.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dsobject.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,692 +1,611 @@ /****************************************************************************** * * Module Name: dsobject - Dispatcher object management routines - * $Revision: 81 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dsobject") + ACPI_MODULE_NAME ("dsobject") -/******************************************************************************* +#ifndef ACPI_NO_METHOD_EXECUTION +/***************************************************************************** * - * FUNCTION: Acpi_ds_init_one_object + * FUNCTION: acpi_ds_build_internal_object * - * PARAMETERS: Obj_handle - Node - * Level - Current nesting level - * Context - Points to a init info struct - * Return_value - Not used + * PARAMETERS: walk_state - Current walk state + * Op - Parser object to be translated + * obj_desc_ptr - Where the ACPI internal object is returned * * RETURN: Status * - * DESCRIPTION: Callback from Acpi_walk_namespace. Invoked for every object - * within the namespace. - * - * Currently, the only objects that require initialization are: - * 1) Methods - * 2) Op Regions + * DESCRIPTION: Translate a parser Op object to the equivalent namespace object + * Simple objects are any objects other than a package object! * - ******************************************************************************/ + ****************************************************************************/ acpi_status -acpi_ds_init_one_object ( - acpi_handle obj_handle, - u32 level, - void *context, - void **return_value) +acpi_ds_build_internal_object ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + union acpi_operand_object **obj_desc_ptr) { - acpi_object_type8 type; - acpi_status status; - acpi_init_walk_info *info = (acpi_init_walk_info *) context; - u8 table_revision; - - - PROC_NAME ("Ds_init_one_object"); - - - info->object_count++; - table_revision = info->table_desc->pointer->revision; - - /* - * We are only interested in objects owned by the table that - * was just loaded - */ - if (((acpi_namespace_node *) obj_handle)->owner_id != - info->table_desc->table_id) { - return (AE_OK); - } - - - /* And even then, we are only interested in a few object types */ - - type = acpi_ns_get_type (obj_handle); - - switch (type) { + union acpi_operand_object *obj_desc; + acpi_status status; - case ACPI_TYPE_REGION: - acpi_ds_initialize_region (obj_handle); + ACPI_FUNCTION_TRACE ("ds_build_internal_object"); - info->op_region_count++; - break; - - - case ACPI_TYPE_METHOD: - - info->method_count++; - - if (!(acpi_dbg_level & ACPI_LV_INIT)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, ".")); - } - - /* - * Set the execution data width (32 or 64) based upon the - * revision number of the parent ACPI table. - */ - if (table_revision == 1) { - ((acpi_namespace_node *)obj_handle)->flags |= ANOBJ_DATA_WIDTH_32; - } + *obj_desc_ptr = NULL; + if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) { /* - * Always parse methods to detect errors, we may delete - * the parse tree below + * This is an named object reference. If this name was + * previously looked up in the namespace, it was stored in this op. + * Otherwise, go ahead and look it up now */ - status = acpi_ds_parse_method (obj_handle); - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Method %p [%4.4s] - parse failure, %s\n", - obj_handle, (char*)&((acpi_namespace_node *)obj_handle)->name, - acpi_format_exception (status))); + if (!op->common.node) { + status = acpi_ns_lookup (walk_state->scope_info, op->common.value.string, + ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, NULL, + (struct acpi_namespace_node **) &(op->common.node)); - /* This parse failed, but we will continue parsing more methods */ - - break; + if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (op->common.value.string, status); + return_ACPI_STATUS (status); + } } - - /* - * Delete the parse tree. We simple re-parse the method - * for every execution since there isn't much overhead - */ - acpi_ns_delete_namespace_subtree (obj_handle); - break; - - default: - break; } - /* - * We ignore errors from above, and always return OK, since - * we don't want to abort the walk on a single error. - */ - return (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ds_initialize_objects - * - * PARAMETERS: None - * - * RETURN: Status - * - * DESCRIPTION: Walk the entire namespace and perform any necessary - * initialization on the objects found therein - * - ******************************************************************************/ - -acpi_status -acpi_ds_initialize_objects ( - acpi_table_desc *table_desc, - acpi_namespace_node *start_node) -{ - acpi_status status; - acpi_init_walk_info info; - - - FUNCTION_TRACE ("Ds_initialize_objects"); - - - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "**** Starting initialization of namespace objects ****\n")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, "Parsing Methods:")); - - - info.method_count = 0; - info.op_region_count = 0; - info.object_count = 0; - info.table_desc = table_desc; - + /* Create and init the internal ACPI object */ - /* Walk entire namespace from the supplied root */ + obj_desc = acpi_ut_create_internal_object ((acpi_ps_get_opcode_info (op->common.aml_opcode))->object_type); + if (!obj_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } - status = acpi_walk_namespace (ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, - acpi_ds_init_one_object, &info, NULL); + status = acpi_ds_init_object_from_op (walk_state, op, op->common.aml_opcode, &obj_desc); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Walk_namespace failed! %x\n", status)); + acpi_ut_remove_reference (obj_desc); + return_ACPI_STATUS (status); } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - "\n%d Control Methods found and parsed (%d nodes total)\n", - info.method_count, info.object_count)); - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "%d Control Methods found\n", info.method_count)); - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "%d Op Regions found\n", info.op_region_count)); - + *obj_desc_ptr = obj_desc; return_ACPI_STATUS (AE_OK); } /***************************************************************************** * - * FUNCTION: Acpi_ds_init_object_from_op + * FUNCTION: acpi_ds_build_internal_buffer_obj * - * PARAMETERS: Op - Parser op used to init the internal object - * Opcode - AML opcode associated with the object - * Obj_desc - Namespace object to be initialized + * PARAMETERS: walk_state - Current walk state + * Op - Parser object to be translated + * buffer_length - Length of the buffer + * obj_desc_ptr - Where the ACPI internal object is returned * * RETURN: Status * - * DESCRIPTION: Initialize a namespace object from a parser Op and its - * associated arguments. The namespace object is a more compact - * representation of the Op and its arguments. + * DESCRIPTION: Translate a parser Op package object to the equivalent + * namespace object * ****************************************************************************/ acpi_status -acpi_ds_init_object_from_op ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - u16 opcode, - acpi_operand_object **ret_obj_desc) +acpi_ds_build_internal_buffer_obj ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + u32 buffer_length, + union acpi_operand_object **obj_desc_ptr) { - acpi_status status; - acpi_parse_object *arg; - acpi_parse2_object *byte_list; - acpi_operand_object *arg_desc; - const acpi_opcode_info *op_info; - acpi_operand_object *obj_desc; - + union acpi_parse_object *arg; + union acpi_operand_object *obj_desc; + union acpi_parse_object *byte_list; + u32 byte_list_length = 0; - PROC_NAME ("Ds_init_object_from_op"); + ACPI_FUNCTION_TRACE ("ds_build_internal_buffer_obj"); - obj_desc = *ret_obj_desc; - op_info = acpi_ps_get_opcode_info (opcode); - if (op_info->class == AML_CLASS_UNKNOWN) { - /* Unknown opcode */ - return (AE_TYPE); + obj_desc = *obj_desc_ptr; + if (obj_desc) { + /* + * We are evaluating a Named buffer object "Name (xxxx, Buffer)". + * The buffer object already exists (from the NS node) + */ } + else { + /* Create a new buffer object */ - - /* Get and prepare the first argument */ - - switch (obj_desc->common.type) { - case ACPI_TYPE_BUFFER: - - /* First arg is a number */ - - acpi_ds_create_operand (walk_state, op->value.arg, 0); - arg_desc = walk_state->operands [walk_state->num_operands - 1]; - acpi_ds_obj_stack_pop (1, walk_state); - - /* Resolve the object (could be an arg or local) */ - - status = acpi_ex_resolve_to_value (&arg_desc, walk_state); - if (ACPI_FAILURE (status)) { - acpi_ut_remove_reference (arg_desc); - return (status); + obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_BUFFER); + *obj_desc_ptr = obj_desc; + if (!obj_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); } + } - /* We are expecting a number */ + /* + * Second arg is the buffer data (optional) byte_list can be either + * individual bytes or a string initializer. In either case, a + * byte_list appears in the AML. + */ + arg = op->common.value.arg; /* skip first arg */ - if (arg_desc->common.type != ACPI_TYPE_INTEGER) { + byte_list = arg->named.next; + if (byte_list) { + if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Expecting number, got obj: %p type %X\n", - arg_desc, arg_desc->common.type)); - acpi_ut_remove_reference (arg_desc); + "Expecting bytelist, got AML opcode %X in op %p\n", + byte_list->common.aml_opcode, byte_list)); + + acpi_ut_remove_reference (obj_desc); return (AE_TYPE); } - /* Get the value, delete the internal object */ + byte_list_length = (u32) byte_list->common.value.integer; + } - obj_desc->buffer.length = (u32) arg_desc->integer.value; - acpi_ut_remove_reference (arg_desc); + /* + * The buffer length (number of bytes) will be the larger of: + * 1) The specified buffer length and + * 2) The length of the initializer byte list + */ + obj_desc->buffer.length = buffer_length; + if (byte_list_length > buffer_length) { + obj_desc->buffer.length = byte_list_length; + } - /* Allocate the buffer */ + /* Allocate the buffer */ - if (obj_desc->buffer.length == 0) { - obj_desc->buffer.pointer = NULL; - REPORT_WARNING (("Buffer created with zero length in AML\n")); - break; + if (obj_desc->buffer.length == 0) { + obj_desc->buffer.pointer = NULL; + ACPI_REPORT_WARNING (("Buffer created with zero length in AML\n")); + } + else { + obj_desc->buffer.pointer = ACPI_MEM_CALLOCATE ( + obj_desc->buffer.length); + if (!obj_desc->buffer.pointer) { + acpi_ut_delete_object_desc (obj_desc); + return_ACPI_STATUS (AE_NO_MEMORY); } - else { - obj_desc->buffer.pointer = ACPI_MEM_CALLOCATE ( - obj_desc->buffer.length); + /* Initialize buffer from the byte_list (if present) */ - if (!obj_desc->buffer.pointer) { - return (AE_NO_MEMORY); - } + if (byte_list) { + ACPI_MEMCPY (obj_desc->buffer.pointer, byte_list->named.data, + byte_list_length); } + } - /* - * Second arg is the buffer data (optional) Byte_list can be either - * individual bytes or a string initializer. - */ - arg = op->value.arg; /* skip first arg */ - - byte_list = (acpi_parse2_object *) arg->next; - if (byte_list) { - if (byte_list->opcode != AML_INT_BYTELIST_OP) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Expecting bytelist, got: %p\n", - byte_list)); - return (AE_TYPE); - } + obj_desc->buffer.flags |= AOPOBJ_DATA_VALID; + op->common.node = (struct acpi_namespace_node *) obj_desc; + return_ACPI_STATUS (AE_OK); +} - MEMCPY (obj_desc->buffer.pointer, byte_list->data, - obj_desc->buffer.length); - } - break; +/***************************************************************************** + * + * FUNCTION: acpi_ds_build_internal_package_obj + * + * PARAMETERS: walk_state - Current walk state + * Op - Parser object to be translated + * package_length - Number of elements in the package + * obj_desc_ptr - Where the ACPI internal object is returned + * + * RETURN: Status + * + * DESCRIPTION: Translate a parser Op package object to the equivalent + * namespace object + * + ****************************************************************************/ +acpi_status +acpi_ds_build_internal_package_obj ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + u32 package_length, + union acpi_operand_object **obj_desc_ptr) +{ + union acpi_parse_object *arg; + union acpi_parse_object *parent; + union acpi_operand_object *obj_desc = NULL; + u32 package_list_length; + acpi_status status = AE_OK; + u32 i; - case ACPI_TYPE_PACKAGE: - /* - * When called, an internal package object has already been built and - * is pointed to by Obj_desc. Acpi_ds_build_internal_object builds another - * internal package object, so remove reference to the original so - * that it is deleted. Error checking is done within the remove - * reference function. - */ - acpi_ut_remove_reference (obj_desc); - status = acpi_ds_build_internal_object (walk_state, op, ret_obj_desc); - break; + ACPI_FUNCTION_TRACE ("ds_build_internal_package_obj"); - case ACPI_TYPE_INTEGER: - obj_desc->integer.value = op->value.integer; - break; + /* Find the parent of a possibly nested package */ - case ACPI_TYPE_STRING: - obj_desc->string.pointer = op->value.string; - obj_desc->string.length = STRLEN (op->value.string); + parent = op->common.parent; + while ((parent->common.aml_opcode == AML_PACKAGE_OP) || + (parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) { + parent = parent->common.parent; + } + obj_desc = *obj_desc_ptr; + if (obj_desc) { /* - * The string is contained in the ACPI table, don't ever try - * to delete it + * We are evaluating a Named package object "Name (xxxx, Package)". + * Get the existing package object from the NS node */ - obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; - break; - - - case ACPI_TYPE_METHOD: - break; - - - case INTERNAL_TYPE_REFERENCE: - - switch (op_info->type) { - case AML_TYPE_LOCAL_VARIABLE: - - /* Split the opcode into a base opcode + offset */ - - obj_desc->reference.opcode = AML_LOCAL_OP; - obj_desc->reference.offset = opcode - AML_LOCAL_OP; - break; + } + else { + obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_PACKAGE); + *obj_desc_ptr = obj_desc; + if (!obj_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + obj_desc->package.node = parent->common.node; + } - case AML_TYPE_METHOD_ARGUMENT: + obj_desc->package.count = package_length; - /* Split the opcode into a base opcode + offset */ + /* Count the number of items in the package list */ - obj_desc->reference.opcode = AML_ARG_OP; - obj_desc->reference.offset = opcode - AML_ARG_OP; - break; + package_list_length = 0; + arg = op->common.value.arg; + arg = arg->common.next; + while (arg) { + package_list_length++; + arg = arg->common.next; + } + /* + * The package length (number of elements) will be the greater + * of the specified length and the length of the initializer list + */ + if (package_list_length > package_length) { + obj_desc->package.count = package_list_length; + } - default: /* Constants, Literals, etc.. */ + /* + * Allocate the pointer array (array of pointers to the + * individual objects). Add an extra pointer slot so + * that the list is always null terminated. + */ + obj_desc->package.elements = ACPI_MEM_CALLOCATE ( + ((acpi_size) obj_desc->package.count + 1) * sizeof (void *)); - if (op->opcode == AML_INT_NAMEPATH_OP) { - /* Node was saved in Op */ + if (!obj_desc->package.elements) { + acpi_ut_delete_object_desc (obj_desc); + return_ACPI_STATUS (AE_NO_MEMORY); + } - obj_desc->reference.node = op->node; - } + /* + * Now init the elements of the package + */ + i = 0; + arg = op->common.value.arg; + arg = arg->common.next; + while (arg) { + if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { + /* Object (package or buffer) is already built */ - obj_desc->reference.opcode = opcode; - break; + obj_desc->package.elements[i] = ACPI_CAST_PTR (union acpi_operand_object, arg->common.node); + } + else { + status = acpi_ds_build_internal_object (walk_state, arg, + &obj_desc->package.elements[i]); } - break; - - - default: - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unimplemented data type: %x\n", - obj_desc->common.type)); - - break; + i++; + arg = arg->common.next; } - return (AE_OK); + obj_desc->package.flags |= AOPOBJ_DATA_VALID; + op->common.node = (struct acpi_namespace_node *) obj_desc; + return_ACPI_STATUS (status); } /***************************************************************************** * - * FUNCTION: Acpi_ds_build_internal_simple_obj + * FUNCTION: acpi_ds_create_node * - * PARAMETERS: Op - Parser object to be translated - * Obj_desc_ptr - Where the ACPI internal object is returned + * PARAMETERS: walk_state - Current walk state + * Node - NS Node to be initialized + * Op - Parser object to be translated * * RETURN: Status * - * DESCRIPTION: Translate a parser Op object to the equivalent namespace object - * Simple objects are any objects other than a package object! + * DESCRIPTION: Create the object to be associated with a namespace node * ****************************************************************************/ -static acpi_status -acpi_ds_build_internal_simple_obj ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_operand_object **obj_desc_ptr) +acpi_status +acpi_ds_create_node ( + struct acpi_walk_state *walk_state, + struct acpi_namespace_node *node, + union acpi_parse_object *op) { - acpi_operand_object *obj_desc; - acpi_object_type8 type; - acpi_status status; - u32 length; - char *name; + acpi_status status; + union acpi_operand_object *obj_desc; - FUNCTION_TRACE ("Ds_build_internal_simple_obj"); - - - if (op->opcode == AML_INT_NAMEPATH_OP) { - /* - * This is an object reference. If The name was - * previously looked up in the NS, it is stored in this op. - * Otherwise, go ahead and look it up now - */ - if (!op->node) { - status = acpi_ns_lookup (walk_state->scope_info, - op->value.string, ACPI_TYPE_ANY, - IMODE_EXECUTE, - NS_SEARCH_PARENT | NS_DONT_OPEN_SCOPE, - NULL, - (acpi_namespace_node **)&(op->node)); + ACPI_FUNCTION_TRACE_PTR ("ds_create_node", op); - if (ACPI_FAILURE (status)) { - if (status == AE_NOT_FOUND) { - name = NULL; - acpi_ns_externalize_name (ACPI_UINT32_MAX, op->value.string, &length, &name); - - if (name) { - REPORT_WARNING (("Reference %s at AML %X not found\n", - name, op->aml_offset)); - ACPI_MEM_FREE (name); - } - - else { - REPORT_WARNING (("Reference %s at AML %X not found\n", - op->value.string, op->aml_offset)); - } - - *obj_desc_ptr = NULL; - } - - else { - return_ACPI_STATUS (status); - } - } - } - /* - * The reference will be a Reference - * TBD: [Restructure] unless we really need a separate - * type of INTERNAL_TYPE_REFERENCE change - * Acpi_ds_map_opcode_to_data_type to handle this case - */ - type = INTERNAL_TYPE_REFERENCE; - } - else { - type = acpi_ds_map_opcode_to_data_type (op->opcode, NULL); + /* + * Because of the execution pass through the non-control-method + * parts of the table, we can arrive here twice. Only init + * the named object node the first time through + */ + if (acpi_ns_get_attached_object (node)) { + return_ACPI_STATUS (AE_OK); } + if (!op->common.value.arg) { + /* No arguments, there is nothing to do */ - /* Create and init the internal ACPI object */ - - obj_desc = acpi_ut_create_internal_object (type); - if (!obj_desc) { - return_ACPI_STATUS (AE_NO_MEMORY); + return_ACPI_STATUS (AE_OK); } - status = acpi_ds_init_object_from_op (walk_state, op, op->opcode, &obj_desc); + /* Build an internal object for the argument(s) */ + + status = acpi_ds_build_internal_object (walk_state, op->common.value.arg, &obj_desc); if (ACPI_FAILURE (status)) { - acpi_ut_remove_reference (obj_desc); return_ACPI_STATUS (status); } - *obj_desc_ptr = obj_desc; + /* Re-type the object according to its argument */ - return_ACPI_STATUS (AE_OK); + node->type = ACPI_GET_OBJECT_TYPE (obj_desc); + + /* Attach obj to node */ + + status = acpi_ns_attach_object (node, obj_desc, node->type); + + /* Remove local reference to the object */ + + acpi_ut_remove_reference (obj_desc); + return_ACPI_STATUS (status); } +#endif /* ACPI_NO_METHOD_EXECUTION */ + /***************************************************************************** * - * FUNCTION: Acpi_ds_build_internal_package_obj + * FUNCTION: acpi_ds_init_object_from_op * - * PARAMETERS: Op - Parser object to be translated - * Obj_desc_ptr - Where the ACPI internal object is returned + * PARAMETERS: walk_state - Current walk state + * Op - Parser op used to init the internal object + * Opcode - AML opcode associated with the object + * ret_obj_desc - Namespace object to be initialized * * RETURN: Status * - * DESCRIPTION: Translate a parser Op package object to the equivalent - * namespace object + * DESCRIPTION: Initialize a namespace object from a parser Op and its + * associated arguments. The namespace object is a more compact + * representation of the Op and its arguments. * ****************************************************************************/ acpi_status -acpi_ds_build_internal_package_obj ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_operand_object **obj_desc_ptr) +acpi_ds_init_object_from_op ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + u16 opcode, + union acpi_operand_object **ret_obj_desc) { - acpi_parse_object *arg; - acpi_operand_object *obj_desc; - acpi_status status = AE_OK; + const struct acpi_opcode_info *op_info; + union acpi_operand_object *obj_desc; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ds_build_internal_package_obj"); + ACPI_FUNCTION_TRACE ("ds_init_object_from_op"); - obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_PACKAGE); - *obj_desc_ptr = obj_desc; - if (!obj_desc) { - return_ACPI_STATUS (AE_NO_MEMORY); + obj_desc = *ret_obj_desc; + op_info = acpi_ps_get_opcode_info (opcode); + if (op_info->class == AML_CLASS_UNKNOWN) { + /* Unknown opcode */ + + return_ACPI_STATUS (AE_TYPE); } - if (op->opcode == AML_VAR_PACKAGE_OP) { + /* Perform per-object initialization */ + + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_BUFFER: + /* - * Variable length package parameters are evaluated JIT + * Defer evaluation of Buffer term_arg operand */ - return_ACPI_STATUS (AE_OK); - } + obj_desc->buffer.node = (struct acpi_namespace_node *) walk_state->operands[0]; + obj_desc->buffer.aml_start = op->named.data; + obj_desc->buffer.aml_length = op->named.length; + break; - /* The first argument must be the package length */ - arg = op->value.arg; - obj_desc->package.count = arg->value.integer32; + case ACPI_TYPE_PACKAGE: - /* - * Allocate the array of pointers (ptrs to the - * individual objects) Add an extra pointer slot so - * that the list is always null terminated. - */ - obj_desc->package.elements = ACPI_MEM_CALLOCATE ( - (obj_desc->package.count + 1) * sizeof (void *)); + /* + * Defer evaluation of Package term_arg operand + */ + obj_desc->package.node = (struct acpi_namespace_node *) walk_state->operands[0]; + obj_desc->package.aml_start = op->named.data; + obj_desc->package.aml_length = op->named.length; + break; - if (!obj_desc->package.elements) { - acpi_ut_delete_object_desc (obj_desc); - return_ACPI_STATUS (AE_NO_MEMORY); - } - obj_desc->package.next_element = obj_desc->package.elements; + case ACPI_TYPE_INTEGER: - /* - * Now init the elements of the package - */ - arg = arg->next; - while (arg) { - if (arg->opcode == AML_PACKAGE_OP) { - status = acpi_ds_build_internal_package_obj (walk_state, arg, - obj_desc->package.next_element); - } + switch (op_info->type) { + case AML_TYPE_CONSTANT: + /* + * Resolve AML Constants here - AND ONLY HERE! + * All constants are integers. + * We mark the integer with a flag that indicates that it started life + * as a constant -- so that stores to constants will perform as expected (noop). + * (zero_op is used as a placeholder for optional target operands.) + */ + obj_desc->common.flags = AOPOBJ_AML_CONSTANT; - else { - status = acpi_ds_build_internal_simple_obj (walk_state, arg, - obj_desc->package.next_element); - } + switch (opcode) { + case AML_ZERO_OP: - obj_desc->package.next_element++; - arg = arg->next; - } + obj_desc->integer.value = 0; + break; - obj_desc->package.flags |= AOPOBJ_DATA_VALID; - return_ACPI_STATUS (status); -} + case AML_ONE_OP: + obj_desc->integer.value = 1; + break; -/***************************************************************************** - * - * FUNCTION: Acpi_ds_build_internal_object - * - * PARAMETERS: Op - Parser object to be translated - * Obj_desc_ptr - Where the ACPI internal object is returned - * - * RETURN: Status - * - * DESCRIPTION: Translate a parser Op object to the equivalent namespace - * object - * - ****************************************************************************/ + case AML_ONES_OP: + + obj_desc->integer.value = ACPI_INTEGER_MAX; + + /* Truncate value if we are executing from a 32-bit ACPI table */ + +#ifndef ACPI_NO_METHOD_EXECUTION + acpi_ex_truncate_for32bit_table (obj_desc); +#endif + break; + + case AML_REVISION_OP: + + obj_desc->integer.value = ACPI_CA_SUPPORT_LEVEL; + break; + + default: + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown constant opcode %X\n", opcode)); + status = AE_AML_OPERAND_TYPE; + break; + } + break; -acpi_status -acpi_ds_build_internal_object ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_operand_object **obj_desc_ptr) -{ - acpi_status status; + case AML_TYPE_LITERAL: + + obj_desc->integer.value = op->common.value.integer; + break; - switch (op->opcode) { - case AML_PACKAGE_OP: - case AML_VAR_PACKAGE_OP: - status = acpi_ds_build_internal_package_obj (walk_state, op, obj_desc_ptr); + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown Integer type %X\n", op_info->type)); + status = AE_AML_OPERAND_TYPE; + break; + } break; - default: + case ACPI_TYPE_STRING: - status = acpi_ds_build_internal_simple_obj (walk_state, op, obj_desc_ptr); + obj_desc->string.pointer = op->common.value.string; + obj_desc->string.length = (u32) ACPI_STRLEN (op->common.value.string); + + /* + * The string is contained in the ACPI table, don't ever try + * to delete it + */ + obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; break; - } - return (status); -} + case ACPI_TYPE_METHOD: + break; -/***************************************************************************** - * - * FUNCTION: Acpi_ds_create_node - * - * PARAMETERS: Op - Parser object to be translated - * Obj_desc_ptr - Where the ACPI internal object is returned - * - * RETURN: Status - * - * DESCRIPTION: - * - ****************************************************************************/ -acpi_status -acpi_ds_create_node ( - acpi_walk_state *walk_state, - acpi_namespace_node *node, - acpi_parse_object *op) -{ - acpi_status status; - acpi_operand_object *obj_desc; + case ACPI_TYPE_LOCAL_REFERENCE: + switch (op_info->type) { + case AML_TYPE_LOCAL_VARIABLE: - FUNCTION_TRACE_PTR ("Ds_create_node", op); + /* Split the opcode into a base opcode + offset */ + obj_desc->reference.opcode = AML_LOCAL_OP; + obj_desc->reference.offset = opcode - AML_LOCAL_OP; - /* - * Because of the execution pass through the non-control-method - * parts of the table, we can arrive here twice. Only init - * the named object node the first time through - */ - if (node->object) { - return_ACPI_STATUS (AE_OK); - } +#ifndef ACPI_NO_METHOD_EXECUTION + status = acpi_ds_method_data_get_node (AML_LOCAL_OP, obj_desc->reference.offset, + walk_state, (struct acpi_namespace_node **) &obj_desc->reference.object); +#endif + break; - if (!op->value.arg) { - /* No arguments, there is nothing to do */ - return_ACPI_STATUS (AE_OK); - } + case AML_TYPE_METHOD_ARGUMENT: - /* Build an internal object for the argument(s) */ + /* Split the opcode into a base opcode + offset */ - status = acpi_ds_build_internal_object (walk_state, op->value.arg, &obj_desc); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } + obj_desc->reference.opcode = AML_ARG_OP; + obj_desc->reference.offset = opcode - AML_ARG_OP; + break; - /* Re-type the object according to it's argument */ + default: /* Other literals, etc.. */ - node->type = obj_desc->common.type; + if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) { + /* Node was saved in Op */ - /* Init obj */ + obj_desc->reference.node = op->common.node; + } - status = acpi_ns_attach_object (node, obj_desc, (u8) node->type); + obj_desc->reference.opcode = opcode; + break; + } + break; - /* Remove local reference to the object */ - acpi_ut_remove_reference (obj_desc); + default: + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unimplemented data type: %X\n", + ACPI_GET_OBJECT_TYPE (obj_desc))); + + status = AE_AML_OPERAND_TYPE; + break; + } + return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dsopcode.c linux.21rc5-ac2/drivers/acpi/dispatcher/dsopcode.c --- linux.21rc5/drivers/acpi/dispatcher/dsopcode.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dsopcode.c 2003-05-29 01:02:16.000000000 +0100 @@ -2,126 +2,118 @@ * * Module Name: dsopcode - Dispatcher Op Region support and handling of * "control" opcodes - * $Revision: 56 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "acevents.h" -#include "actables.h" +#include +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dsopcode") + ACPI_MODULE_NAME ("dsopcode") /***************************************************************************** * - * FUNCTION: Acpi_ds_get_buffer_field_arguments + * FUNCTION: acpi_ds_execute_arguments * - * PARAMETERS: Obj_desc - A valid Buffer_field object + * PARAMETERS: Node - Parent NS node + * aml_length - Length of executable AML + * aml_start - Pointer to the AML * * RETURN: Status. * - * DESCRIPTION: Get Buffer_field Buffer and Index. This implements the late - * evaluation of these field attributes. + * DESCRIPTION: Late execution of region or field arguments * ****************************************************************************/ acpi_status -acpi_ds_get_buffer_field_arguments ( - acpi_operand_object *obj_desc) +acpi_ds_execute_arguments ( + struct acpi_namespace_node *node, + struct acpi_namespace_node *scope_node, + u32 aml_length, + u8 *aml_start) { - acpi_operand_object *extra_desc; - acpi_namespace_node *node; - acpi_parse_object *op; - acpi_parse_object *field_op; - acpi_status status; - acpi_table_desc *table_desc; - acpi_walk_state *walk_state; - + acpi_status status; + union acpi_parse_object *op; + struct acpi_walk_state *walk_state; + union acpi_parse_object *arg; - FUNCTION_TRACE_PTR ("Ds_get_buffer_field_arguments", obj_desc); - - if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { - return_ACPI_STATUS (AE_OK); - } - - - /* Get the AML pointer (method object) and Buffer_field node */ - - extra_desc = obj_desc->buffer_field.extra; - node = obj_desc->buffer_field.node; - - DEBUG_EXEC(acpi_ut_display_init_pathname (node, " [Field]")); - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[%4.4s] Buffer_field JIT Init\n", - (char*)&node->name)); + ACPI_FUNCTION_TRACE ("ds_execute_arguments"); /* - * Allocate a new parser op to be the root of the parsed - * Op_region tree + * Allocate a new parser op to be the root of the parsed tree */ - op = acpi_ps_alloc_op (AML_SCOPE_OP); + op = acpi_ps_alloc_op (AML_INT_EVAL_SUBTREE_OP); if (!op) { - return (AE_NO_MEMORY); + return_ACPI_STATUS (AE_NO_MEMORY); } - /* Save the Node for use in Acpi_ps_parse_aml */ - - op->node = acpi_ns_get_parent_object (node); + /* Save the Node for use in acpi_ps_parse_aml */ - /* Get a handle to the parent ACPI table */ - - status = acpi_tb_handle_to_object (node->owner_id, &table_desc); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } + op->common.node = scope_node; /* Create and initialize a new parser state */ - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, - NULL, NULL, NULL); + walk_state = acpi_ds_create_walk_state (0, NULL, NULL, NULL); if (!walk_state) { return_ACPI_STATUS (AE_NO_MEMORY); } - status = acpi_ds_init_aml_walk (walk_state, op, NULL, extra_desc->extra.aml_start, - extra_desc->extra.aml_length, NULL, NULL, 1); + status = acpi_ds_init_aml_walk (walk_state, op, NULL, aml_start, + aml_length, NULL, NULL, 1); if (ACPI_FAILURE (status)) { - /* TBD: delete walk state */ + acpi_ds_delete_walk_state (walk_state); return_ACPI_STATUS (status); } - /* TBD: No Walk flags?? */ - - walk_state->parse_flags = 0; + walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP; - /* Pass1: Parse the entire Buffer_field declaration */ + /* Pass1: Parse the entire declaration */ status = acpi_ps_parse_aml (walk_state); if (ACPI_FAILURE (status)) { @@ -129,204 +121,250 @@ return_ACPI_STATUS (status); } - /* Get and init the actual Field_unit Op created above */ - - field_op = op->value.arg; - op->node = node; - + /* Get and init the Op created above */ - field_op = op->value.arg; - field_op->node = node; + arg = op->common.value.arg; + op->common.node = node; + arg->common.node = node; acpi_ps_delete_parse_tree (op); - /* Evaluate the address and length arguments for the Op_region */ + /* Evaluate the address and length arguments for the Buffer Field */ - op = acpi_ps_alloc_op (AML_SCOPE_OP); + op = acpi_ps_alloc_op (AML_INT_EVAL_SUBTREE_OP); if (!op) { - return (AE_NO_MEMORY); + return_ACPI_STATUS (AE_NO_MEMORY); } - op->node = acpi_ns_get_parent_object (node); + op->common.node = scope_node; /* Create and initialize a new parser state */ - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, - NULL, NULL, NULL); + walk_state = acpi_ds_create_walk_state (0, NULL, NULL, NULL); if (!walk_state) { return_ACPI_STATUS (AE_NO_MEMORY); } - status = acpi_ds_init_aml_walk (walk_state, op, NULL, extra_desc->extra.aml_start, - extra_desc->extra.aml_length, NULL, NULL, 3); + status = acpi_ds_init_aml_walk (walk_state, op, NULL, aml_start, + aml_length, NULL, NULL, 3); if (ACPI_FAILURE (status)) { - /* TBD: delete walk state */ + acpi_ds_delete_walk_state (walk_state); return_ACPI_STATUS (status); } status = acpi_ps_parse_aml (walk_state); acpi_ps_delete_parse_tree (op); - - /* - * The pseudo-method object is no longer needed since the region is - * now initialized - */ - acpi_ut_remove_reference (obj_desc->buffer_field.extra); - obj_desc->buffer_field.extra = NULL; - return_ACPI_STATUS (status); } /***************************************************************************** * - * FUNCTION: Acpi_ds_get_region_arguments + * FUNCTION: acpi_ds_get_buffer_field_arguments * - * PARAMETERS: Obj_desc - A valid region object + * PARAMETERS: obj_desc - A valid buffer_field object * * RETURN: Status. * - * DESCRIPTION: Get region address and length. This implements the late - * evaluation of these region attributes. + * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late + * evaluation of these field attributes. * ****************************************************************************/ acpi_status -acpi_ds_get_region_arguments ( - acpi_operand_object *obj_desc) +acpi_ds_get_buffer_field_arguments ( + union acpi_operand_object *obj_desc) { - acpi_operand_object *extra_desc = NULL; - acpi_namespace_node *node; - acpi_parse_object *op; - acpi_parse_object *region_op; - acpi_status status; - acpi_table_desc *table_desc; - acpi_walk_state *walk_state; + union acpi_operand_object *extra_desc; + struct acpi_namespace_node *node; + acpi_status status; - FUNCTION_TRACE_PTR ("Ds_get_region_arguments", obj_desc); + ACPI_FUNCTION_TRACE_PTR ("ds_get_buffer_field_arguments", obj_desc); - if (obj_desc->region.flags & AOPOBJ_DATA_VALID) { + if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { return_ACPI_STATUS (AE_OK); } + /* Get the AML pointer (method object) and buffer_field node */ - /* Get the AML pointer (method object) and region node */ + extra_desc = acpi_ns_get_secondary_object (obj_desc); + node = obj_desc->buffer_field.node; - extra_desc = obj_desc->region.extra; - node = obj_desc->region.node; + ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_BUFFER_FIELD, node, NULL)); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[%4.4s] buffer_field JIT Init\n", + node->name.ascii)); - DEBUG_EXEC(acpi_ut_display_init_pathname (node, " [Operation Region]")); + /* Execute the AML code for the term_arg arguments */ - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[%4.4s] Op_region Init at AML %p\n", - (char*)&node->name, extra_desc->extra.aml_start)); + status = acpi_ds_execute_arguments (node, acpi_ns_get_parent_node (node), + extra_desc->extra.aml_length, extra_desc->extra.aml_start); + return_ACPI_STATUS (status); +} - /* - * Allocate a new parser op to be the root of the parsed - * Op_region tree - */ - op = acpi_ps_alloc_op (AML_SCOPE_OP); - if (!op) { - return (AE_NO_MEMORY); - } - /* Save the Node for use in Acpi_ps_parse_aml */ +/***************************************************************************** + * + * FUNCTION: acpi_ds_get_buffer_arguments + * + * PARAMETERS: obj_desc - A valid Bufferobject + * + * RETURN: Status. + * + * DESCRIPTION: Get Buffer length and initializer byte list. This implements + * the late evaluation of these attributes. + * + ****************************************************************************/ - op->node = acpi_ns_get_parent_object (node); +acpi_status +acpi_ds_get_buffer_arguments ( + union acpi_operand_object *obj_desc) +{ + struct acpi_namespace_node *node; + acpi_status status; - /* Get a handle to the parent ACPI table */ - status = acpi_tb_handle_to_object (node->owner_id, &table_desc); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } + ACPI_FUNCTION_TRACE_PTR ("ds_get_buffer_arguments", obj_desc); - /* Create and initialize a new parser state */ - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, - op, NULL, NULL); - if (!walk_state) { - return_ACPI_STATUS (AE_NO_MEMORY); + if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { + return_ACPI_STATUS (AE_OK); } - status = acpi_ds_init_aml_walk (walk_state, op, NULL, extra_desc->extra.aml_start, - extra_desc->extra.aml_length, NULL, NULL, 1); - if (ACPI_FAILURE (status)) { - /* TBD: delete walk state */ - return_ACPI_STATUS (status); + /* Get the Buffer node */ + + node = obj_desc->buffer.node; + if (!node) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "No pointer back to NS node in buffer %p\n", obj_desc)); + return_ACPI_STATUS (AE_AML_INTERNAL); } - /* TBD: No Walk flags?? */ + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Buffer JIT Init\n")); - walk_state->parse_flags = 0; + /* Execute the AML code for the term_arg arguments */ - /* Parse the entire Op_region declaration, creating a parse tree */ + status = acpi_ds_execute_arguments (node, node, + obj_desc->buffer.aml_length, obj_desc->buffer.aml_start); + return_ACPI_STATUS (status); +} - status = acpi_ps_parse_aml (walk_state); - if (ACPI_FAILURE (status)) { - acpi_ps_delete_parse_tree (op); - return_ACPI_STATUS (status); - } - /* Get and init the actual Region_op created above */ +/***************************************************************************** + * + * FUNCTION: acpi_ds_get_package_arguments + * + * PARAMETERS: obj_desc - A valid Packageobject + * + * RETURN: Status. + * + * DESCRIPTION: Get Package length and initializer byte list. This implements + * the late evaluation of these attributes. + * + ****************************************************************************/ - region_op = op->value.arg; - op->node = node; +acpi_status +acpi_ds_get_package_arguments ( + union acpi_operand_object *obj_desc) +{ + struct acpi_namespace_node *node; + acpi_status status; - region_op = op->value.arg; - region_op->node = node; - acpi_ps_delete_parse_tree (op); + ACPI_FUNCTION_TRACE_PTR ("ds_get_package_arguments", obj_desc); - /* Evaluate the address and length arguments for the Op_region */ - op = acpi_ps_alloc_op (AML_SCOPE_OP); - if (!op) { - return (AE_NO_MEMORY); + if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { + return_ACPI_STATUS (AE_OK); } - op->node = acpi_ns_get_parent_object (node); + /* Get the Package node */ - /* Create and initialize a new parser state */ + node = obj_desc->package.node; + if (!node) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "No pointer back to NS node in package %p\n", obj_desc)); + return_ACPI_STATUS (AE_AML_INTERNAL); + } - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, - op, NULL, NULL); - if (!walk_state) { - return_ACPI_STATUS (AE_NO_MEMORY); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Package JIT Init\n")); + + /* Execute the AML code for the term_arg arguments */ + + status = acpi_ds_execute_arguments (node, node, + obj_desc->package.aml_length, obj_desc->package.aml_start); + return_ACPI_STATUS (status); +} + + +/***************************************************************************** + * + * FUNCTION: acpi_ds_get_region_arguments + * + * PARAMETERS: obj_desc - A valid region object + * + * RETURN: Status. + * + * DESCRIPTION: Get region address and length. This implements the late + * evaluation of these region attributes. + * + ****************************************************************************/ + +acpi_status +acpi_ds_get_region_arguments ( + union acpi_operand_object *obj_desc) +{ + struct acpi_namespace_node *node; + acpi_status status; + union acpi_operand_object *extra_desc; + + + ACPI_FUNCTION_TRACE_PTR ("ds_get_region_arguments", obj_desc); + + + if (obj_desc->region.flags & AOPOBJ_DATA_VALID) { + return_ACPI_STATUS (AE_OK); } - status = acpi_ds_init_aml_walk (walk_state, op, NULL, extra_desc->extra.aml_start, - extra_desc->extra.aml_length, NULL, NULL, 3); - if (ACPI_FAILURE (status)) { - /* TBD: delete walk state */ - return_ACPI_STATUS (status); + extra_desc = acpi_ns_get_secondary_object (obj_desc); + if (!extra_desc) { + return_ACPI_STATUS (AE_NOT_EXIST); } - status = acpi_ps_parse_aml (walk_state); - acpi_ps_delete_parse_tree (op); + /* Get the Region node */ + + node = obj_desc->region.node; + ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_REGION, node, NULL)); + + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[%4.4s] op_region Init at AML %p\n", + node->name.ascii, extra_desc->extra.aml_start)); + + + status = acpi_ds_execute_arguments (node, acpi_ns_get_parent_node (node), + extra_desc->extra.aml_length, extra_desc->extra.aml_start); return_ACPI_STATUS (status); } /***************************************************************************** * - * FUNCTION: Acpi_ds_initialize_region + * FUNCTION: acpi_ds_initialize_region * * PARAMETERS: Op - A valid region Op object * * RETURN: Status * - * DESCRIPTION: + * DESCRIPTION: Front end to ev_initialize_region * ****************************************************************************/ acpi_status acpi_ds_initialize_region ( - acpi_handle obj_handle) + acpi_handle obj_handle) { - acpi_operand_object *obj_desc; - acpi_status status; + union acpi_operand_object *obj_desc; + acpi_status status; obj_desc = acpi_ns_get_attached_object (obj_handle); @@ -334,301 +372,276 @@ /* Namespace is NOT locked */ status = acpi_ev_initialize_region (obj_desc, FALSE); - return (status); } /***************************************************************************** * - * FUNCTION: Acpi_ds_eval_buffer_field_operands + * FUNCTION: acpi_ds_init_buffer_field * - * PARAMETERS: Op - A valid Buffer_field Op object + * PARAMETERS: aml_opcode - create_xxx_field + * obj_desc - buffer_field object + * buffer_desc - Host Buffer + * offset_desc - Offset into buffer + * Length - Length of field (CREATE_FIELD_OP only) + * Result - Where to store the result * * RETURN: Status * - * DESCRIPTION: Get Buffer_field Buffer and Index - * Called from Acpi_ds_exec_end_op during Buffer_field parse tree walk - * - * ACPI SPECIFICATION REFERENCES: - * Each of the Buffer Field opcodes is defined as specified in in-line - * comments below. For each one, use the following definitions. - * - * Def_bit_field := Bit_field_op Src_buf Bit_idx Destination - * Def_byte_field := Byte_field_op Src_buf Byte_idx Destination - * Def_create_field := Create_field_op Src_buf Bit_idx Num_bits Name_string - * Def_dWord_field := DWord_field_op Src_buf Byte_idx Destination - * Def_word_field := Word_field_op Src_buf Byte_idx Destination - * Bit_index := Term_arg=>Integer - * Byte_index := Term_arg=>Integer - * Destination := Name_string - * Num_bits := Term_arg=>Integer - * Source_buf := Term_arg=>Buffer + * DESCRIPTION: Perform actual initialization of a buffer field * ****************************************************************************/ acpi_status -acpi_ds_eval_buffer_field_operands ( - acpi_walk_state *walk_state, - acpi_parse_object *op) +acpi_ds_init_buffer_field ( + u16 aml_opcode, + union acpi_operand_object *obj_desc, + union acpi_operand_object *buffer_desc, + union acpi_operand_object *offset_desc, + union acpi_operand_object *length_desc, + union acpi_operand_object *result_desc) { - acpi_status status; - acpi_operand_object *obj_desc; - acpi_namespace_node *node; - acpi_parse_object *next_op; - u32 offset; - u32 bit_offset; - u32 bit_count; - u8 field_flags; - acpi_operand_object *res_desc = NULL; - acpi_operand_object *cnt_desc = NULL; - acpi_operand_object *off_desc = NULL; - acpi_operand_object *src_desc = NULL; - - - FUNCTION_TRACE_PTR ("Ds_eval_buffer_field_operands", op); - - - /* - * This is where we evaluate the address and length fields of the - * Create_xxx_field declaration - */ - node = op->node; - - /* Next_op points to the op that holds the Buffer */ - - next_op = op->value.arg; - - /* Acpi_evaluate/create the address and length operands */ - - status = acpi_ds_create_operands (walk_state, next_op); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } + u32 offset; + u32 bit_offset; + u32 bit_count; + u8 field_flags; + acpi_status status; - obj_desc = acpi_ns_get_attached_object (node); - if (!obj_desc) { - return_ACPI_STATUS (AE_NOT_EXIST); - } + ACPI_FUNCTION_TRACE_PTR ("ds_init_buffer_field", obj_desc); - /* Resolve the operands */ - status = acpi_ex_resolve_operands (op->opcode, WALK_OPERANDS, walk_state); - DUMP_OPERANDS (WALK_OPERANDS, IMODE_EXECUTE, acpi_ps_get_opcode_name (op->opcode), - walk_state->num_operands, "after Acpi_ex_resolve_operands"); + /* Host object must be a Buffer */ - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "(%s) bad operand(s) (%X)\n", - acpi_ps_get_opcode_name (op->opcode), status)); + if (ACPI_GET_OBJECT_TYPE (buffer_desc) != ACPI_TYPE_BUFFER) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Target of Create Field is not a Buffer object - %s\n", + acpi_ut_get_object_type_name (buffer_desc))); + status = AE_AML_OPERAND_TYPE; goto cleanup; } - /* Get the operands */ - - if (AML_CREATE_FIELD_OP == op->opcode) { - res_desc = walk_state->operands[3]; - cnt_desc = walk_state->operands[2]; - } - else { - res_desc = walk_state->operands[2]; - } - - off_desc = walk_state->operands[1]; - src_desc = walk_state->operands[0]; - - - offset = (u32) off_desc->integer.value; - /* - * If Res_desc is a Name, it will be a direct name pointer after - * Acpi_ex_resolve_operands() + * The last parameter to all of these opcodes (result_desc) started + * out as a name_string, and should therefore now be a NS node + * after resolution in acpi_ex_resolve_operands(). */ - if (!VALID_DESCRIPTOR_TYPE (res_desc, ACPI_DESC_TYPE_NAMED)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "(%s) destination must be a Node\n", - acpi_ps_get_opcode_name (op->opcode))); + if (ACPI_GET_DESCRIPTOR_TYPE (result_desc) != ACPI_DESC_TYPE_NAMED) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "(%s) destination must be a NS Node\n", + acpi_ps_get_opcode_name (aml_opcode))); status = AE_AML_OPERAND_TYPE; goto cleanup; } + offset = (u32) offset_desc->integer.value; + /* * Setup the Bit offsets and counts, according to the opcode */ - switch (op->opcode) { - - /* Def_create_field */ - + switch (aml_opcode) { case AML_CREATE_FIELD_OP: /* Offset is in bits, count is in bits */ - bit_offset = offset; - bit_count = (u32) cnt_desc->integer.value; - field_flags = ACCESS_BYTE_ACC; + bit_offset = offset; + bit_count = (u32) length_desc->integer.value; + field_flags = AML_FIELD_ACCESS_BYTE; break; - - /* Def_create_bit_field */ - case AML_CREATE_BIT_FIELD_OP: /* Offset is in bits, Field is one bit */ - bit_offset = offset; - bit_count = 1; - field_flags = ACCESS_BYTE_ACC; + bit_offset = offset; + bit_count = 1; + field_flags = AML_FIELD_ACCESS_BYTE; break; - - /* Def_create_byte_field */ - case AML_CREATE_BYTE_FIELD_OP: /* Offset is in bytes, field is one byte */ - bit_offset = 8 * offset; - bit_count = 8; - field_flags = ACCESS_BYTE_ACC; + bit_offset = 8 * offset; + bit_count = 8; + field_flags = AML_FIELD_ACCESS_BYTE; break; - - /* Def_create_word_field */ - case AML_CREATE_WORD_FIELD_OP: /* Offset is in bytes, field is one word */ - bit_offset = 8 * offset; - bit_count = 16; - field_flags = ACCESS_WORD_ACC; + bit_offset = 8 * offset; + bit_count = 16; + field_flags = AML_FIELD_ACCESS_WORD; break; - - /* Def_create_dWord_field */ - case AML_CREATE_DWORD_FIELD_OP: /* Offset is in bytes, field is one dword */ - bit_offset = 8 * offset; - bit_count = 32; - field_flags = ACCESS_DWORD_ACC; + bit_offset = 8 * offset; + bit_count = 32; + field_flags = AML_FIELD_ACCESS_DWORD; break; - - /* Def_create_qWord_field */ - case AML_CREATE_QWORD_FIELD_OP: /* Offset is in bytes, field is one qword */ - bit_offset = 8 * offset; - bit_count = 64; - field_flags = ACCESS_QWORD_ACC; + bit_offset = 8 * offset; + bit_count = 64; + field_flags = AML_FIELD_ACCESS_QWORD; break; - default: ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Internal error - unknown field creation opcode %02x\n", - op->opcode)); + "Unknown field creation opcode %02x\n", + aml_opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; } + /* Entire field must fit within the current length of the buffer */ + + if ((bit_offset + bit_count) > + (8 * (u32) buffer_desc->buffer.length)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Field size %d exceeds Buffer size %d (bits)\n", + bit_offset + bit_count, 8 * (u32) buffer_desc->buffer.length)); + status = AE_AML_BUFFER_LIMIT; + goto cleanup; + } + /* - * Setup field according to the object type + * Initialize areas of the field object that are common to all fields + * For field_flags, use LOCK_RULE = 0 (NO_LOCK), UPDATE_RULE = 0 (UPDATE_PRESERVE) */ - switch (src_desc->common.type) { + status = acpi_ex_prep_common_field_object (obj_desc, field_flags, 0, + bit_offset, bit_count); + if (ACPI_FAILURE (status)) { + goto cleanup; + } - /* Source_buff := Term_arg=>Buffer */ + obj_desc->buffer_field.buffer_obj = buffer_desc; - case ACPI_TYPE_BUFFER: + /* Reference count for buffer_desc inherits obj_desc count */ - if ((bit_offset + bit_count) > - (8 * (u32) src_desc->buffer.length)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Field size %d exceeds Buffer size %d (bits)\n", - bit_offset + bit_count, 8 * (u32) src_desc->buffer.length)); - status = AE_AML_BUFFER_LIMIT; - goto cleanup; - } + buffer_desc->common.reference_count = (u16) (buffer_desc->common.reference_count + + obj_desc->common.reference_count); - /* - * Initialize areas of the field object that are common to all fields - * For Field_flags, use LOCK_RULE = 0 (NO_LOCK), UPDATE_RULE = 0 (UPDATE_PRESERVE) - */ - status = acpi_ex_prep_common_field_object (obj_desc, field_flags, - bit_offset, bit_count); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } +cleanup: - obj_desc->buffer_field.buffer_obj = src_desc; + /* Always delete the operands */ - /* Reference count for Src_desc inherits Obj_desc count */ + acpi_ut_remove_reference (offset_desc); + acpi_ut_remove_reference (buffer_desc); - src_desc->common.reference_count = (u16) (src_desc->common.reference_count + - obj_desc->common.reference_count); + if (aml_opcode == AML_CREATE_FIELD_OP) { + acpi_ut_remove_reference (length_desc); + } - break; + /* On failure, delete the result descriptor */ + if (ACPI_FAILURE (status)) { + acpi_ut_remove_reference (result_desc); /* Result descriptor */ + } + else { + /* Now the address and length are valid for this buffer_field */ - /* Improper object type */ + obj_desc->buffer_field.flags |= AOPOBJ_DATA_VALID; + } - default: + return_ACPI_STATUS (status); +} - if ((src_desc->common.type > (u8) INTERNAL_TYPE_REFERENCE) || !acpi_ut_valid_object_type (src_desc->common.type)) /* TBD: This line MUST be a single line until Acpi_src can handle it (block deletion) */ { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Tried to create field in invalid object type %X\n", - src_desc->common.type)); - } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Tried to create field in improper object type - %s\n", - acpi_ut_get_type_name (src_desc->common.type))); - } +/***************************************************************************** + * + * FUNCTION: acpi_ds_eval_buffer_field_operands + * + * PARAMETERS: walk_state - Current walk + * Op - A valid buffer_field Op object + * + * RETURN: Status + * + * DESCRIPTION: Get buffer_field Buffer and Index + * Called from acpi_ds_exec_end_op during buffer_field parse tree walk + * + ****************************************************************************/ - status = AE_AML_OPERAND_TYPE; - goto cleanup; - } +acpi_status +acpi_ds_eval_buffer_field_operands ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op) +{ + acpi_status status; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + union acpi_parse_object *next_op; - if (AML_CREATE_FIELD_OP == op->opcode) { - /* Delete object descriptor unique to Create_field */ + ACPI_FUNCTION_TRACE_PTR ("ds_eval_buffer_field_operands", op); - acpi_ut_remove_reference (cnt_desc); - cnt_desc = NULL; - } + /* + * This is where we evaluate the address and length fields of the + * create_xxx_field declaration + */ + node = op->common.node; + + /* next_op points to the op that holds the Buffer */ -cleanup: + next_op = op->common.value.arg; - /* Always delete the operands */ + /* Evaluate/create the address and length operands */ - acpi_ut_remove_reference (off_desc); - acpi_ut_remove_reference (src_desc); + status = acpi_ds_create_operands (walk_state, next_op); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - if (AML_CREATE_FIELD_OP == op->opcode) { - acpi_ut_remove_reference (cnt_desc); + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { + return_ACPI_STATUS (AE_NOT_EXIST); } - /* On failure, delete the result descriptor */ + /* Resolve the operands */ + + status = acpi_ex_resolve_operands (op->common.aml_opcode, + ACPI_WALK_OPERANDS, walk_state); + + ACPI_DUMP_OPERANDS (ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, + acpi_ps_get_opcode_name (op->common.aml_opcode), + walk_state->num_operands, "after acpi_ex_resolve_operands"); if (ACPI_FAILURE (status)) { - acpi_ut_remove_reference (res_desc); /* Result descriptor */ + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "(%s) bad operand(s) (%X)\n", + acpi_ps_get_opcode_name (op->common.aml_opcode), status)); + + return_ACPI_STATUS (status); } + /* Initialize the Buffer Field */ + + if (op->common.aml_opcode == AML_CREATE_FIELD_OP) { + /* NOTE: Slightly different operands for this opcode */ + + status = acpi_ds_init_buffer_field (op->common.aml_opcode, obj_desc, + walk_state->operands[0], walk_state->operands[1], + walk_state->operands[2], walk_state->operands[3]); + } else { - /* Now the address and length are valid for this Buffer_field */ + /* All other, create_xxx_field opcodes */ - obj_desc->buffer_field.flags |= AOPOBJ_DATA_VALID; + status = acpi_ds_init_buffer_field (op->common.aml_opcode, obj_desc, + walk_state->operands[0], walk_state->operands[1], + NULL, walk_state->operands[2]); } return_ACPI_STATUS (status); @@ -637,46 +650,47 @@ /***************************************************************************** * - * FUNCTION: Acpi_ds_eval_region_operands + * FUNCTION: acpi_ds_eval_region_operands * - * PARAMETERS: Op - A valid region Op object + * PARAMETERS: walk_state - Current walk + * Op - A valid region Op object * * RETURN: Status * * DESCRIPTION: Get region address and length - * Called from Acpi_ds_exec_end_op during Op_region parse tree walk + * Called from acpi_ds_exec_end_op during op_region parse tree walk * ****************************************************************************/ acpi_status acpi_ds_eval_region_operands ( - acpi_walk_state *walk_state, - acpi_parse_object *op) + struct acpi_walk_state *walk_state, + union acpi_parse_object *op) { - acpi_status status; - acpi_operand_object *obj_desc; - acpi_operand_object *operand_desc; - acpi_namespace_node *node; - acpi_parse_object *next_op; + acpi_status status; + union acpi_operand_object *obj_desc; + union acpi_operand_object *operand_desc; + struct acpi_namespace_node *node; + union acpi_parse_object *next_op; - FUNCTION_TRACE_PTR ("Ds_eval_region_operands", op); + ACPI_FUNCTION_TRACE_PTR ("ds_eval_region_operands", op); /* - * This is where we evaluate the address and length fields of the Op_region declaration + * This is where we evaluate the address and length fields of the op_region declaration */ - node = op->node; + node = op->common.node; - /* Next_op points to the op that holds the Space_iD */ + /* next_op points to the op that holds the space_iD */ - next_op = op->value.arg; + next_op = op->common.value.arg; - /* Next_op points to address op */ + /* next_op points to address op */ - next_op = next_op->next; + next_op = next_op->common.next; - /* Acpi_evaluate/create the address and length operands */ + /* Evaluate/create the address and length operands */ status = acpi_ds_create_operands (walk_state, next_op); if (ACPI_FAILURE (status)) { @@ -685,15 +699,14 @@ /* Resolve the length and address operands to numbers */ - status = acpi_ex_resolve_operands (op->opcode, WALK_OPERANDS, walk_state); + status = acpi_ex_resolve_operands (op->common.aml_opcode, ACPI_WALK_OPERANDS, walk_state); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - DUMP_OPERANDS (WALK_OPERANDS, IMODE_EXECUTE, - acpi_ps_get_opcode_name (op->opcode), - 1, "after Acpi_ex_resolve_operands"); - + ACPI_DUMP_OPERANDS (ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, + acpi_ps_get_opcode_name (op->common.aml_opcode), + 1, "after acpi_ex_resolve_operands"); obj_desc = acpi_ns_get_attached_object (node); if (!obj_desc) { @@ -715,12 +728,12 @@ */ operand_desc = walk_state->operands[walk_state->num_operands - 2]; - obj_desc->region.address = (ACPI_PHYSICAL_ADDRESS) operand_desc->integer.value; + obj_desc->region.address = (acpi_physical_address) operand_desc->integer.value; acpi_ut_remove_reference (operand_desc); - - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Rgn_obj %p Addr %8.8X%8.8X Len %X\n", - obj_desc, HIDWORD(obj_desc->region.address), LODWORD(obj_desc->region.address), + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "rgn_obj %p Addr %8.8X%8.8X Len %X\n", + obj_desc, + ACPI_HIDWORD (obj_desc->region.address), ACPI_LODWORD (obj_desc->region.address), obj_desc->region.length)); /* Now the address and length are valid for this opregion */ @@ -731,11 +744,106 @@ } +/***************************************************************************** + * + * FUNCTION: acpi_ds_eval_data_object_operands + * + * PARAMETERS: walk_state - Current walk + * Op - A valid data_object Op object + * obj_desc - data_object + * + * RETURN: Status + * + * DESCRIPTION: Get the operands and complete the following data objec types: + * Buffer + * Package + * + ****************************************************************************/ + +acpi_status +acpi_ds_eval_data_object_operands ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + union acpi_operand_object *obj_desc) +{ + acpi_status status; + union acpi_operand_object *arg_desc; + u32 length; + + + ACPI_FUNCTION_TRACE ("ds_eval_data_object_operands"); + + + /* The first operand (for all of these data objects) is the length */ + + status = acpi_ds_create_operand (walk_state, op->common.value.arg, 1); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + status = acpi_ex_resolve_operands (walk_state->opcode, + &(walk_state->operands [walk_state->num_operands -1]), + walk_state); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Extract length operand */ + + arg_desc = walk_state->operands [walk_state->num_operands - 1]; + length = (u32) arg_desc->integer.value; + + /* Cleanup for length operand */ + + status = acpi_ds_obj_stack_pop (1, walk_state); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + acpi_ut_remove_reference (arg_desc); + + /* + * Create the actual data object + */ + switch (op->common.aml_opcode) { + case AML_BUFFER_OP: + + status = acpi_ds_build_internal_buffer_obj (walk_state, op, length, &obj_desc); + break; + + case AML_PACKAGE_OP: + case AML_VAR_PACKAGE_OP: + + status = acpi_ds_build_internal_package_obj (walk_state, op, length, &obj_desc); + break; + + default: + return_ACPI_STATUS (AE_AML_BAD_OPCODE); + } + + if (ACPI_SUCCESS (status)) { + /* + * Return the object in the walk_state, unless the parent is a package -- + * in this case, the return object will be stored in the parse tree + * for the package. + */ + if ((!op->common.parent) || + ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) && + (op->common.parent->common.aml_opcode != AML_VAR_PACKAGE_OP) && + (op->common.parent->common.aml_opcode != AML_NAME_OP))) { + walk_state->result_obj = obj_desc; + } + } + + return_ACPI_STATUS (status); +} + + /******************************************************************************* * - * FUNCTION: Acpi_ds_exec_begin_control_op + * FUNCTION: acpi_ds_exec_begin_control_op * - * PARAMETERS: Walk_list - The list that owns the walk stack + * PARAMETERS: walk_list - The list that owns the walk stack * Op - The control Op * * RETURN: Status @@ -747,20 +855,20 @@ acpi_status acpi_ds_exec_begin_control_op ( - acpi_walk_state *walk_state, - acpi_parse_object *op) + struct acpi_walk_state *walk_state, + union acpi_parse_object *op) { - acpi_status status = AE_OK; - acpi_generic_state *control_state; + acpi_status status = AE_OK; + union acpi_generic_state *control_state; - PROC_NAME ("Ds_exec_begin_control_op"); + ACPI_FUNCTION_NAME ("ds_exec_begin_control_op"); ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op, - op->opcode, walk_state)); + op->common.aml_opcode, walk_state)); - switch (op->opcode) { + switch (op->common.aml_opcode) { case AML_IF_OP: case AML_WHILE_OP: @@ -774,19 +882,19 @@ status = AE_NO_MEMORY; break; } - - acpi_ut_push_generic_state (&walk_state->control_state, control_state); - /* * Save a pointer to the predicate for multiple executions * of a loop */ - walk_state->control_state->control.aml_predicate_start = - walk_state->parser_state.aml - 1; - /* TBD: can this be removed? */ - /*Acpi_ps_pkg_length_encoding_size (GET8 (Walk_state->Parser_state->Aml));*/ - break; + control_state->control.aml_predicate_start = walk_state->parser_state.aml - 1; + control_state->control.package_end = walk_state->parser_state.pkg_end; + control_state->control.opcode = op->common.aml_opcode; + + /* Push the control state on this walk's control stack */ + + acpi_ut_push_generic_state (&walk_state->control_state, control_state); + break; case AML_ELSE_OP: @@ -799,12 +907,10 @@ break; - case AML_RETURN_OP: break; - default: break; } @@ -815,9 +921,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_exec_end_control_op + * FUNCTION: acpi_ds_exec_end_control_op * - * PARAMETERS: Walk_list - The list that owns the walk stack + * PARAMETERS: walk_list - The list that owns the walk stack * Op - The control Op * * RETURN: Status @@ -825,22 +931,21 @@ * DESCRIPTION: Handles all control ops encountered during control method * execution. * - * ******************************************************************************/ acpi_status acpi_ds_exec_end_control_op ( - acpi_walk_state *walk_state, - acpi_parse_object *op) + struct acpi_walk_state *walk_state, + union acpi_parse_object *op) { - acpi_status status = AE_OK; - acpi_generic_state *control_state; + acpi_status status = AE_OK; + union acpi_generic_state *control_state; - PROC_NAME ("Ds_exec_end_control_op"); + ACPI_FUNCTION_NAME ("ds_exec_end_control_op"); - switch (op->opcode) { + switch (op->common.aml_opcode) { case AML_IF_OP: ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op)); @@ -890,17 +995,17 @@ case AML_RETURN_OP: ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "[RETURN_OP] Op=%p Arg=%p\n",op, op->value.arg)); + "[RETURN_OP] Op=%p Arg=%p\n",op, op->common.value.arg)); /* * One optional operand -- the return value * It can be either an immediate operand or a result that * has been bubbled up the tree */ - if (op->value.arg) { + if (op->common.value.arg) { /* Return statement has an immediate operand */ - status = acpi_ds_create_operands (walk_state, op->value.arg); + status = acpi_ds_create_operands (walk_state, op->common.value.arg); if (ACPI_FAILURE (status)) { return (status); } @@ -917,12 +1022,11 @@ /* * Get the return value and save as the last result - * value. This is the only place where Walk_state->Return_desc + * value. This is the only place where walk_state->return_desc * is set to anything other than zero! */ walk_state->return_desc = walk_state->operands[0]; } - else if ((walk_state->results) && (walk_state->results->results.num_results > 0)) { /* @@ -934,18 +1038,17 @@ * * Allow references created by the Index operator to return unchanged. */ - if (VALID_DESCRIPTOR_TYPE (walk_state->results->results.obj_desc [0], ACPI_DESC_TYPE_INTERNAL) && - ((walk_state->results->results.obj_desc [0])->common.type == INTERNAL_TYPE_REFERENCE) && + if ((ACPI_GET_DESCRIPTOR_TYPE (walk_state->results->results.obj_desc[0]) == ACPI_DESC_TYPE_OPERAND) && + (ACPI_GET_OBJECT_TYPE (walk_state->results->results.obj_desc [0]) == ACPI_TYPE_LOCAL_REFERENCE) && ((walk_state->results->results.obj_desc [0])->reference.opcode != AML_INDEX_OP)) { - status = acpi_ex_resolve_to_value (&walk_state->results->results.obj_desc [0], walk_state); - if (ACPI_FAILURE (status)) { - return (status); - } + status = acpi_ex_resolve_to_value (&walk_state->results->results.obj_desc [0], walk_state); + if (ACPI_FAILURE (status)) { + return (status); + } } walk_state->return_desc = walk_state->results->results.obj_desc [0]; } - else { /* No return operand */ @@ -960,7 +1063,7 @@ ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "Completed RETURN_OP State=%p, Ret_val=%p\n", + "Completed RETURN_OP State=%p, ret_val=%p\n", walk_state, walk_state->return_desc)); /* End the control method execution right now */ @@ -979,7 +1082,7 @@ /* Call up to the OS service layer to handle this */ - acpi_os_signal (ACPI_SIGNAL_BREAKPOINT, "Executed AML Breakpoint opcode"); + status = acpi_os_signal (ACPI_SIGNAL_BREAKPOINT, "Executed AML Breakpoint opcode"); /* If and when it returns, all done. */ @@ -987,43 +1090,47 @@ case AML_BREAK_OP: + case AML_CONTINUE_OP: /* ACPI 2.0 */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Break to end of current package, Op=%p\n", op)); - /* TBD: update behavior for ACPI 2.0 */ + /* Pop and delete control states until we find a while */ - /* - * As per the ACPI specification: - * "The break operation causes the current package - * execution to complete" - * "Break -- Stop executing the current code package - * at this point" - * - * Returning AE_FALSE here will cause termination of - * the current package, and execution will continue one - * level up, starting with the completion of the parent Op. - */ - status = AE_CTRL_FALSE; - break; + while (walk_state->control_state && + (walk_state->control_state->control.opcode != AML_WHILE_OP)) { + control_state = acpi_ut_pop_generic_state (&walk_state->control_state); + acpi_ut_delete_generic_state (control_state); + } + /* No while found? */ - case AML_CONTINUE_OP: /* ACPI 2.0 */ + if (!walk_state->control_state) { + return (AE_AML_NO_WHILE); + } + + /* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */ + + walk_state->aml_last_while = walk_state->control_state->control.package_end; - status = AE_NOT_IMPLEMENTED; + /* Return status depending on opcode */ + + if (op->common.aml_opcode == AML_BREAK_OP) { + status = AE_CTRL_BREAK; + } + else { + status = AE_CTRL_CONTINUE; + } break; default: ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown control opcode=%X Op=%p\n", - op->opcode, op)); + op->common.aml_opcode, op)); status = AE_AML_BAD_OPCODE; break; } - return (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dsutils.c linux.21rc5-ac2/drivers/acpi/dispatcher/dsutils.c --- linux.21rc5/drivers/acpi/dispatcher/dsutils.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dsutils.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,48 +1,67 @@ /******************************************************************************* * * Module Name: dsutils - Dispatcher utilities - * $Revision: 80 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "acdebug.h" +#include +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dsutils") + ACPI_MODULE_NAME ("dsutils") +#ifndef ACPI_NO_METHOD_EXECUTION /******************************************************************************* * - * FUNCTION: Acpi_ds_is_result_used + * FUNCTION: acpi_ds_is_result_used * * PARAMETERS: Op - * Result_obj - * Walk_state + * result_obj + * walk_state * * RETURN: Status * @@ -52,13 +71,13 @@ u8 acpi_ds_is_result_used ( - acpi_parse_object *op, - acpi_walk_state *walk_state) + union acpi_parse_object *op, + struct acpi_walk_state *walk_state) { - const acpi_opcode_info *parent_info; + const struct acpi_opcode_info *parent_info; - FUNCTION_TRACE_PTR ("Ds_is_result_used", op); + ACPI_FUNCTION_TRACE_PTR ("ds_is_result_used", op); /* Must have both an Op and a Result Object */ @@ -68,29 +87,25 @@ return_VALUE (TRUE); } - /* * If there is no parent, the result can't possibly be used! * (An executing method typically has no parent, since each * method is parsed separately) However, a method that is * invoked from another method has a parent. */ - if (!op->parent) { + if (!op->common.parent) { return_VALUE (FALSE); } - /* * Get info on the parent. The root Op is AML_SCOPE */ - - parent_info = acpi_ps_get_opcode_info (op->parent->opcode); + parent_info = acpi_ps_get_opcode_info (op->common.parent->common.aml_opcode); if (parent_info->class == AML_CLASS_UNKNOWN) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown parent opcode. Op=%p\n", op)); return_VALUE (FALSE); } - /* * Decide what to do with the result based on the parent. If * the parent opcode will not use the result, delete the object. @@ -98,88 +113,100 @@ * as an operand later. */ switch (parent_info->class) { - /* - * In these cases, the parent will never use the return object - */ - case AML_CLASS_CONTROL: /* IF, ELSE, WHILE only */ + case AML_CLASS_CONTROL: - switch (op->parent->opcode) { + switch (op->common.parent->common.aml_opcode) { case AML_RETURN_OP: /* Never delete the return value associated with a return opcode */ - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "Result used, [RETURN] opcode=%X Op=%p\n", op->opcode, op)); - return_VALUE (TRUE); - break; + goto result_used; case AML_IF_OP: case AML_WHILE_OP: /* * If we are executing the predicate AND this is the predicate op, - * we will use the return value! + * we will use the return value */ - if ((walk_state->control_state->common.state == CONTROL_PREDICATE_EXECUTING) && + if ((walk_state->control_state->common.state == ACPI_CONTROL_PREDICATE_EXECUTING) && (walk_state->control_state->control.predicate_op == op)) { - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "Result used as a predicate, [IF/WHILE] opcode=%X Op=%p\n", - op->opcode, op)); - return_VALUE (TRUE); + goto result_used; } + break; + default: + /* Ignore other control opcodes */ break; } + /* The general control opcode returns no result */ - /* Fall through to not used case below */ + goto result_not_used; - case AML_CLASS_NAMED_OBJECT: /* Scope, method, etc. */ case AML_CLASS_CREATE: /* - * These opcodes allow Term_arg(s) as operands and therefore - * method calls. The result is used. + * These opcodes allow term_arg(s) as operands and therefore + * the operands can be method calls. The result is used. */ - if ((op->parent->opcode == AML_REGION_OP) || - (op->parent->opcode == AML_CREATE_FIELD_OP) || - (op->parent->opcode == AML_CREATE_BIT_FIELD_OP) || - (op->parent->opcode == AML_CREATE_BYTE_FIELD_OP) || - (op->parent->opcode == AML_CREATE_WORD_FIELD_OP) || - (op->parent->opcode == AML_CREATE_DWORD_FIELD_OP) || - (op->parent->opcode == AML_CREATE_QWORD_FIELD_OP)) { - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "Result used, [Region or Create_field] opcode=%X Op=%p\n", - op->opcode, op)); - return_VALUE (TRUE); + goto result_used; + + + case AML_CLASS_NAMED_OBJECT: + + if ((op->common.parent->common.aml_opcode == AML_REGION_OP) || + (op->common.parent->common.aml_opcode == AML_DATA_REGION_OP) || + (op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || + (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) || + (op->common.parent->common.aml_opcode == AML_BUFFER_OP) || + (op->common.parent->common.aml_opcode == AML_INT_EVAL_SUBTREE_OP)) { + /* + * These opcodes allow term_arg(s) as operands and therefore + * the operands can be method calls. The result is used. + */ + goto result_used; } - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "Result not used, Parent opcode=%X Op=%p\n", op->opcode, op)); + goto result_not_used; - return_VALUE (FALSE); - break; - /* - * In all other cases. the parent will actually use the return - * object, so keep it. - */ default: - break; + + /* + * In all other cases. the parent will actually use the return + * object, so keep it. + */ + goto result_used; } + +result_used: + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Result of [%s] used by Parent [%s] Op=%p\n", + acpi_ps_get_opcode_name (op->common.aml_opcode), + acpi_ps_get_opcode_name (op->common.parent->common.aml_opcode), op)); + return_VALUE (TRUE); + + +result_not_used: + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Result of [%s] not used by Parent [%s] Op=%p\n", + acpi_ps_get_opcode_name (op->common.aml_opcode), + acpi_ps_get_opcode_name (op->common.parent->common.aml_opcode), op)); + + return_VALUE (FALSE); + } /******************************************************************************* * - * FUNCTION: Acpi_ds_delete_result_if_not_used + * FUNCTION: acpi_ds_delete_result_if_not_used * * PARAMETERS: Op - * Result_obj - * Walk_state + * result_obj + * walk_state * * RETURN: Status * @@ -192,15 +219,15 @@ void acpi_ds_delete_result_if_not_used ( - acpi_parse_object *op, - acpi_operand_object *result_obj, - acpi_walk_state *walk_state) + union acpi_parse_object *op, + union acpi_operand_object *result_obj, + struct acpi_walk_state *walk_state) { - acpi_operand_object *obj_desc; - acpi_status status; + union acpi_operand_object *obj_desc; + acpi_status status; - FUNCTION_TRACE_PTR ("Ds_delete_result_if_not_used", result_obj); + ACPI_FUNCTION_TRACE_PTR ("ds_delete_result_if_not_used", result_obj); if (!op) { @@ -215,7 +242,7 @@ if (!acpi_ds_is_result_used (op, walk_state)) { /* - * Must pop the result stack (Obj_desc should be equal to Result_obj) + * Must pop the result stack (obj_desc should be equal to result_obj) */ status = acpi_ds_result_pop (&obj_desc, walk_state); if (ACPI_SUCCESS (status)) { @@ -229,9 +256,89 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_create_operand + * FUNCTION: acpi_ds_resolve_operands + * + * PARAMETERS: walk_state - Current walk state with operands on stack + * + * RETURN: Status + * + * DESCRIPTION: Resolve all operands to their values. Used to prepare + * arguments to a control method invocation (a call from one + * method to another.) + * + ******************************************************************************/ + +acpi_status +acpi_ds_resolve_operands ( + struct acpi_walk_state *walk_state) +{ + u32 i; + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE_PTR ("ds_resolve_operands", walk_state); + + + /* + * Attempt to resolve each of the valid operands + * Method arguments are passed by value, not by reference + */ + for (i = 0; i < walk_state->num_operands; i++) { + status = acpi_ex_resolve_to_value (&walk_state->operands[i], walk_state); + if (ACPI_FAILURE (status)) { + break; + } + } + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ds_clear_operands + * + * PARAMETERS: walk_state - Current walk state with operands on stack + * + * RETURN: None + * + * DESCRIPTION: Clear all operands on the current walk state operand stack. + * + ******************************************************************************/ + +void +acpi_ds_clear_operands ( + struct acpi_walk_state *walk_state) +{ + u32 i; + + + ACPI_FUNCTION_TRACE_PTR ("acpi_ds_clear_operands", walk_state); + + + /* + * Remove a reference on each operand on the stack + */ + for (i = 0; i < walk_state->num_operands; i++) { + /* + * Remove a reference to all operands, including both + * "Arguments" and "Targets". + */ + acpi_ut_remove_reference (walk_state->operands[i]); + walk_state->operands[i] = NULL; + } + + walk_state->num_operands = 0; + return_VOID; +} +#endif + + +/******************************************************************************* + * + * FUNCTION: acpi_ds_create_operand * - * PARAMETERS: Walk_state + * PARAMETERS: walk_state * Arg * * RETURN: Status @@ -245,34 +352,32 @@ acpi_status acpi_ds_create_operand ( - acpi_walk_state *walk_state, - acpi_parse_object *arg, - u32 arg_index) + struct acpi_walk_state *walk_state, + union acpi_parse_object *arg, + u32 arg_index) { - acpi_status status = AE_OK; - NATIVE_CHAR *name_string; - u32 name_length; - acpi_object_type8 data_type; - acpi_operand_object *obj_desc; - acpi_parse_object *parent_op; - u16 opcode; - u32 flags; - operating_mode interpreter_mode; - const acpi_opcode_info *op_info; + acpi_status status = AE_OK; + char *name_string; + u32 name_length; + union acpi_operand_object *obj_desc; + union acpi_parse_object *parent_op; + u16 opcode; + acpi_interpreter_mode interpreter_mode; + const struct acpi_opcode_info *op_info; - FUNCTION_TRACE_PTR ("Ds_create_operand", arg); + ACPI_FUNCTION_TRACE_PTR ("ds_create_operand", arg); /* A valid name must be looked up in the namespace */ - if ((arg->opcode == AML_INT_NAMEPATH_OP) && - (arg->value.string)) { + if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) && + (arg->common.value.string)) { ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Getting a name: Arg=%p\n", arg)); /* Get the entire name string from the AML stream */ - status = acpi_ex_get_name_string (ACPI_TYPE_ANY, arg->value.buffer, + status = acpi_ex_get_name_string (ACPI_TYPE_ANY, arg->common.value.buffer, &name_string, &name_length); if (ACPI_FAILURE (status)) { @@ -281,7 +386,7 @@ /* * All prefixes have been handled, and the name is - * in Name_string + * in name_string */ /* @@ -290,46 +395,41 @@ * IMODE_EXECUTE) in order to support the creation of * namespace objects during the execution of control methods. */ - parent_op = arg->parent; - op_info = acpi_ps_get_opcode_info (parent_op->opcode); + parent_op = arg->common.parent; + op_info = acpi_ps_get_opcode_info (parent_op->common.aml_opcode); if ((op_info->flags & AML_NSNODE) && - (parent_op->opcode != AML_INT_METHODCALL_OP) && - (parent_op->opcode != AML_REGION_OP) && - (parent_op->opcode != AML_INT_NAMEPATH_OP)) { + (parent_op->common.aml_opcode != AML_INT_METHODCALL_OP) && + (parent_op->common.aml_opcode != AML_REGION_OP) && + (parent_op->common.aml_opcode != AML_INT_NAMEPATH_OP)) { /* Enter name into namespace if not found */ - interpreter_mode = IMODE_LOAD_PASS2; + interpreter_mode = ACPI_IMODE_LOAD_PASS2; } else { /* Return a failure if name not found */ - interpreter_mode = IMODE_EXECUTE; + interpreter_mode = ACPI_IMODE_EXECUTE; } status = acpi_ns_lookup (walk_state->scope_info, name_string, ACPI_TYPE_ANY, interpreter_mode, - NS_SEARCH_PARENT | NS_DONT_OPEN_SCOPE, + ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, walk_state, - (acpi_namespace_node **) &obj_desc); - - /* Free the namestring created above */ - - ACPI_MEM_FREE (name_string); - + ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, &obj_desc)); /* * The only case where we pass through (ignore) a NOT_FOUND - * error is for the Cond_ref_of opcode. + * error is for the cond_ref_of opcode. */ if (status == AE_NOT_FOUND) { - if (parent_op->opcode == AML_COND_REF_OF_OP) { + if (parent_op->common.aml_opcode == AML_COND_REF_OF_OP) { /* * For the Conditional Reference op, it's OK if * the name is not found; We just need a way to * indicate this to the interpreter, set the * object to the root */ - obj_desc = (acpi_operand_object *) acpi_gbl_root_node; + obj_desc = ACPI_CAST_PTR (union acpi_operand_object, acpi_gbl_root_node); status = AE_OK; } @@ -339,14 +439,17 @@ * very serious error at this point */ status = AE_AML_NAME_NOT_FOUND; - - /* TBD: Externalize Name_string and print */ - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Object name was not found in namespace\n")); } } + if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (name_string, status); + } + + /* Free the namestring created above */ + + ACPI_MEM_FREE (name_string); + /* Check status from the lookup */ if (ACPI_FAILURE (status)) { @@ -359,47 +462,42 @@ if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - DEBUGGER_EXEC (acpi_db_display_argument_object (obj_desc, walk_state)); + ACPI_DEBUGGER_EXEC (acpi_db_display_argument_object (obj_desc, walk_state)); } else { /* Check for null name case */ - if (arg->opcode == AML_INT_NAMEPATH_OP) { + if (arg->common.aml_opcode == AML_INT_NAMEPATH_OP) { /* * If the name is null, this means that this is an * optional result parameter that was not specified - * in the original ASL. Create an Reference for a - * placeholder + * in the original ASL. Create a Zero Constant for a + * placeholder. (Store to a constant is a Noop.) */ opcode = AML_ZERO_OP; /* Has no arguments! */ ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Null namepath: Arg=%p\n", arg)); - - /* - * TBD: [Investigate] anything else needed for the - * zero op lvalue? - */ } else { - opcode = arg->opcode; + opcode = arg->common.aml_opcode; } + /* Get the object type of the argument */ - /* Get the data type of the argument */ - - data_type = acpi_ds_map_opcode_to_data_type (opcode, &flags); - if (data_type == INTERNAL_TYPE_INVALID) { + op_info = acpi_ps_get_opcode_info (opcode); + if (op_info->object_type == ACPI_TYPE_INVALID) { return_ACPI_STATUS (AE_NOT_IMPLEMENTED); } - if (flags & OP_HAS_RETURN_VALUE) { + if (op_info->flags & AML_HAS_RETVAL) { ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Argument previously created, already stacked \n")); - DEBUGGER_EXEC (acpi_db_display_argument_object (walk_state->operands [walk_state->num_operands - 1], walk_state)); + ACPI_DEBUGGER_EXEC (acpi_db_display_argument_object ( + walk_state->operands [walk_state->num_operands - 1], walk_state)); /* * Use value that was already previously returned @@ -415,13 +513,11 @@ acpi_format_exception (status))); return_ACPI_STATUS (status); } - } - else { /* Create an ACPI_INTERNAL_OBJECT for the argument */ - obj_desc = acpi_ut_create_internal_object (data_type); + obj_desc = acpi_ut_create_internal_object (op_info->object_type); if (!obj_desc) { return_ACPI_STATUS (AE_NO_MEMORY); } @@ -434,7 +530,7 @@ acpi_ut_delete_object_desc (obj_desc); return_ACPI_STATUS (status); } - } + } /* Put the operand object on the object stack */ @@ -443,7 +539,7 @@ return_ACPI_STATUS (status); } - DEBUGGER_EXEC (acpi_db_display_argument_object (obj_desc, walk_state)); + ACPI_DEBUGGER_EXEC (acpi_db_display_argument_object (obj_desc, walk_state)); } return_ACPI_STATUS (AE_OK); @@ -452,9 +548,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_create_operands + * FUNCTION: acpi_ds_create_operands * - * PARAMETERS: First_arg - First argument of a parser argument tree + * PARAMETERS: first_arg - First argument of a parser argument tree * * RETURN: Status * @@ -466,15 +562,15 @@ acpi_status acpi_ds_create_operands ( - acpi_walk_state *walk_state, - acpi_parse_object *first_arg) + struct acpi_walk_state *walk_state, + union acpi_parse_object *first_arg) { - acpi_status status = AE_OK; - acpi_parse_object *arg; - u32 arg_count = 0; + acpi_status status = AE_OK; + union acpi_parse_object *arg; + u32 arg_count = 0; - FUNCTION_TRACE_PTR ("Ds_create_operands", first_arg); + ACPI_FUNCTION_TRACE_PTR ("ds_create_operands", first_arg); /* For all arguments in the list... */ @@ -491,7 +587,7 @@ /* Move on to next argument, if any */ - arg = arg->next; + arg = arg->common.next; arg_count++; } @@ -504,7 +600,7 @@ * pop everything off of the operand stack and delete those * objects */ - acpi_ds_obj_stack_pop_and_delete (arg_count, walk_state); + (void) acpi_ds_obj_stack_pop_and_delete (arg_count, walk_state); ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "While creating Arg %d - %s\n", (arg_count + 1), acpi_format_exception (status))); @@ -512,307 +608,3 @@ } -/******************************************************************************* - * - * FUNCTION: Acpi_ds_resolve_operands - * - * PARAMETERS: Walk_state - Current walk state with operands on stack - * - * RETURN: Status - * - * DESCRIPTION: Resolve all operands to their values. Used to prepare - * arguments to a control method invocation (a call from one - * method to another.) - * - ******************************************************************************/ - -acpi_status -acpi_ds_resolve_operands ( - acpi_walk_state *walk_state) -{ - u32 i; - acpi_status status = AE_OK; - - - FUNCTION_TRACE_PTR ("Ds_resolve_operands", walk_state); - - - /* - * Attempt to resolve each of the valid operands - * Method arguments are passed by value, not by reference - */ - - /* - * TBD: [Investigate] Note from previous parser: - * Ref_of problem with Acpi_ex_resolve_to_value() conversion. - */ - for (i = 0; i < walk_state->num_operands; i++) { - status = acpi_ex_resolve_to_value (&walk_state->operands[i], walk_state); - if (ACPI_FAILURE (status)) { - break; - } - } - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ds_map_opcode_to_data_type - * - * PARAMETERS: Opcode - AML opcode to map - * Out_flags - Additional info about the opcode - * - * RETURN: The ACPI type associated with the opcode - * - * DESCRIPTION: Convert a raw AML opcode to the associated ACPI data type, - * if any. If the opcode returns a value as part of the - * intepreter execution, a flag is returned in Out_flags. - * - ******************************************************************************/ - -acpi_object_type8 -acpi_ds_map_opcode_to_data_type ( - u16 opcode, - u32 *out_flags) -{ - acpi_object_type8 data_type = INTERNAL_TYPE_INVALID; - const acpi_opcode_info *op_info; - u32 flags = 0; - - - PROC_NAME ("Ds_map_opcode_to_data_type"); - - - op_info = acpi_ps_get_opcode_info (opcode); - if (op_info->class == AML_CLASS_UNKNOWN) { - /* Unknown opcode */ - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown AML opcode: %x\n", opcode)); - return (data_type); - } - - -/* - * TBD: Use op class - */ - - switch (op_info->type) { - - case AML_TYPE_LITERAL: - - switch (opcode) { - case AML_BYTE_OP: - case AML_WORD_OP: - case AML_DWORD_OP: - case AML_QWORD_OP: - - data_type = ACPI_TYPE_INTEGER; - break; - - - case AML_STRING_OP: - - data_type = ACPI_TYPE_STRING; - break; - - case AML_INT_NAMEPATH_OP: - data_type = INTERNAL_TYPE_REFERENCE; - break; - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Unknown (type LITERAL) AML opcode: %x\n", opcode)); - break; - } - break; - - - case AML_TYPE_DATA_TERM: - - switch (opcode) { - case AML_BUFFER_OP: - - data_type = ACPI_TYPE_BUFFER; - break; - - case AML_PACKAGE_OP: - case AML_VAR_PACKAGE_OP: - - data_type = ACPI_TYPE_PACKAGE; - break; - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Unknown (type DATA_TERM) AML opcode: %x\n", opcode)); - break; - } - break; - - - case AML_TYPE_CONSTANT: - case AML_TYPE_METHOD_ARGUMENT: - case AML_TYPE_LOCAL_VARIABLE: - - data_type = INTERNAL_TYPE_REFERENCE; - break; - - - case AML_TYPE_EXEC_1A_0T_1R: - case AML_TYPE_EXEC_1A_1T_1R: - case AML_TYPE_EXEC_2A_0T_1R: - case AML_TYPE_EXEC_2A_1T_1R: - case AML_TYPE_EXEC_2A_2T_1R: - case AML_TYPE_EXEC_3A_1T_1R: - case AML_TYPE_EXEC_6A_0T_1R: - case AML_TYPE_RETURN: - - flags = OP_HAS_RETURN_VALUE; - data_type = ACPI_TYPE_ANY; - break; - - - case AML_TYPE_METHOD_CALL: - - flags = OP_HAS_RETURN_VALUE; - data_type = ACPI_TYPE_METHOD; - break; - - - case AML_TYPE_NAMED_FIELD: - case AML_TYPE_NAMED_SIMPLE: - case AML_TYPE_NAMED_COMPLEX: - case AML_TYPE_NAMED_NO_OBJ: - - data_type = acpi_ds_map_named_opcode_to_data_type (opcode); - break; - - - case AML_TYPE_EXEC_1A_0T_0R: - case AML_TYPE_EXEC_2A_0T_0R: - case AML_TYPE_EXEC_3A_0T_0R: - case AML_TYPE_EXEC_1A_1T_0R: - case AML_TYPE_CONTROL: - - /* No mapping needed at this time */ - - break; - - - default: - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Unimplemented data type opcode: %x\n", opcode)); - break; - } - - /* Return flags to caller if requested */ - - if (out_flags) { - *out_flags = flags; - } - - return (data_type); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ds_map_named_opcode_to_data_type - * - * PARAMETERS: Opcode - The Named AML opcode to map - * - * RETURN: The ACPI type associated with the named opcode - * - * DESCRIPTION: Convert a raw Named AML opcode to the associated data type. - * Named opcodes are a subsystem of the AML opcodes. - * - ******************************************************************************/ - -acpi_object_type8 -acpi_ds_map_named_opcode_to_data_type ( - u16 opcode) -{ - acpi_object_type8 data_type; - - - FUNCTION_ENTRY (); - - - /* Decode Opcode */ - - switch (opcode) { - case AML_SCOPE_OP: - data_type = INTERNAL_TYPE_SCOPE; - break; - - case AML_DEVICE_OP: - data_type = ACPI_TYPE_DEVICE; - break; - - case AML_THERMAL_ZONE_OP: - data_type = ACPI_TYPE_THERMAL; - break; - - case AML_METHOD_OP: - data_type = ACPI_TYPE_METHOD; - break; - - case AML_POWER_RES_OP: - data_type = ACPI_TYPE_POWER; - break; - - case AML_PROCESSOR_OP: - data_type = ACPI_TYPE_PROCESSOR; - break; - - case AML_FIELD_OP: /* Field_op */ - data_type = INTERNAL_TYPE_FIELD_DEFN; - break; - - case AML_INDEX_FIELD_OP: /* Index_field_op */ - data_type = INTERNAL_TYPE_INDEX_FIELD_DEFN; - break; - - case AML_BANK_FIELD_OP: /* Bank_field_op */ - data_type = INTERNAL_TYPE_BANK_FIELD_DEFN; - break; - - case AML_INT_NAMEDFIELD_OP: /* NO CASE IN ORIGINAL */ - data_type = ACPI_TYPE_ANY; - break; - - case AML_NAME_OP: /* Name_op - special code in original */ - case AML_INT_NAMEPATH_OP: - data_type = ACPI_TYPE_ANY; - break; - - case AML_ALIAS_OP: - data_type = INTERNAL_TYPE_ALIAS; - break; - - case AML_MUTEX_OP: - data_type = ACPI_TYPE_MUTEX; - break; - - case AML_EVENT_OP: - data_type = ACPI_TYPE_EVENT; - break; - - case AML_DATA_REGION_OP: - case AML_REGION_OP: - data_type = ACPI_TYPE_REGION; - break; - - - default: - data_type = ACPI_TYPE_ANY; - break; - - } - - return (data_type); -} - - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dswexec.c linux.21rc5-ac2/drivers/acpi/dispatcher/dswexec.c --- linux.21rc5/drivers/acpi/dispatcher/dswexec.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dswexec.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,45 +2,63 @@ * * Module Name: dswexec - Dispatcher method execution callbacks; * dispatch to interpreter. - * $Revision: 79 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "acdebug.h" +#include +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dswexec") + ACPI_MODULE_NAME ("dswexec") /* - * Dispatch tables for opcode classes + * Dispatch table for opcode classes */ -ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch [] = { +static ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch [] = { acpi_ex_opcode_1A_0T_0R, acpi_ex_opcode_1A_0T_1R, acpi_ex_opcode_1A_1T_0R, @@ -55,9 +73,9 @@ /***************************************************************************** * - * FUNCTION: Acpi_ds_get_predicate_value + * FUNCTION: acpi_ds_get_predicate_value * - * PARAMETERS: Walk_state - Current state of the parse tree walk + * PARAMETERS: walk_state - Current state of the parse tree walk * * RETURN: Status * @@ -67,18 +85,18 @@ acpi_status acpi_ds_get_predicate_value ( - acpi_walk_state *walk_state, - u32 has_result_obj) { - acpi_status status = AE_OK; - acpi_operand_object *obj_desc; + struct acpi_walk_state *walk_state, + union acpi_operand_object *result_obj) { + acpi_status status = AE_OK; + union acpi_operand_object *obj_desc; - FUNCTION_TRACE_PTR ("Ds_get_predicate_value", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ds_get_predicate_value", walk_state); walk_state->control_state->common.state = 0; - if (has_result_obj) { + if (result_obj) { status = acpi_ds_result_pop (&obj_desc, walk_state); if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, @@ -88,7 +106,6 @@ return_ACPI_STATUS (status); } } - else { status = acpi_ds_create_operand (walk_state, walk_state->op, 0); if (ACPI_FAILURE (status)) { @@ -104,30 +121,28 @@ } if (!obj_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No predicate Obj_desc=%p State=%p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No predicate obj_desc=%p State=%p\n", obj_desc, walk_state)); return_ACPI_STATUS (AE_AML_NO_OPERAND); } - /* * Result of predicate evaluation currently must * be a number */ - if (obj_desc->common.type != ACPI_TYPE_INTEGER) { + if (ACPI_GET_OBJECT_TYPE (obj_desc) != ACPI_TYPE_INTEGER) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Bad predicate (not a number) Obj_desc=%p State=%p Type=%X\n", - obj_desc, walk_state, obj_desc->common.type)); + "Bad predicate (not a number) obj_desc=%p State=%p Type=%X\n", + obj_desc, walk_state, ACPI_GET_OBJECT_TYPE (obj_desc))); status = AE_AML_OPERAND_TYPE; goto cleanup; } - /* Truncate the predicate to 32-bits if necessary */ - acpi_ex_truncate_for32bit_table (obj_desc, walk_state); + acpi_ex_truncate_for32bit_table (obj_desc); /* * Save the result of the predicate evaluation on @@ -136,7 +151,6 @@ if (obj_desc->integer.value) { walk_state->control_state->common.value = TRUE; } - else { /* * Predicate is FALSE, we will just toss the @@ -149,12 +163,12 @@ cleanup: - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%pn", + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n", walk_state->control_state->common.value, walk_state->op)); /* Break to debugger to display result */ - DEBUGGER_EXEC (acpi_db_display_result_object (obj_desc, walk_state)); + ACPI_DEBUGGER_EXEC (acpi_db_display_result_object (obj_desc, walk_state)); /* * Delete the predicate result object (we know that @@ -162,17 +176,17 @@ */ acpi_ut_remove_reference (obj_desc); - walk_state->control_state->common.state = CONTROL_NORMAL; + walk_state->control_state->common.state = ACPI_CONTROL_NORMAL; return_ACPI_STATUS (status); } /***************************************************************************** * - * FUNCTION: Acpi_ds_exec_begin_op + * FUNCTION: acpi_ds_exec_begin_op * - * PARAMETERS: Walk_state - Current state of the parse tree walk - * Out_op - Return op if a new one is created + * PARAMETERS: walk_state - Current state of the parse tree walk + * out_op - Return op if a new one is created * * RETURN: Status * @@ -184,15 +198,15 @@ acpi_status acpi_ds_exec_begin_op ( - acpi_walk_state *walk_state, - acpi_parse_object **out_op) + struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op) { - acpi_parse_object *op; - acpi_status status = AE_OK; - u32 opcode_class; + union acpi_parse_object *op; + acpi_status status = AE_OK; + u32 opcode_class; - FUNCTION_TRACE_PTR ("Ds_exec_begin_op", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ds_exec_begin_op", walk_state); op = walk_state->op; @@ -204,8 +218,18 @@ op = *out_op; walk_state->op = op; - walk_state->op_info = acpi_ps_get_opcode_info (op->opcode); - walk_state->opcode = op->opcode; + walk_state->opcode = op->common.aml_opcode; + walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode); + + if (acpi_ns_opens_scope (walk_state->op_info->object_type)) { + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "(%s) Popping scope for Op %p\n", + acpi_ut_get_type_name (walk_state->op_info->object_type), op)); + + status = acpi_ds_scope_stack_pop (walk_state); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } } if (op == walk_state->origin) { @@ -223,11 +247,11 @@ */ if ((walk_state->control_state) && (walk_state->control_state->common.state == - CONTROL_CONDITIONAL_EXECUTING)) { + ACPI_CONTROL_CONDITIONAL_EXECUTING)) { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Exec predicate Op=%p State=%p\n", op, walk_state)); - walk_state->control_state->common.state = CONTROL_PREDICATE_EXECUTING; + walk_state->control_state->common.state = ACPI_CONTROL_PREDICATE_EXECUTING; /* Save start of predicate */ @@ -239,7 +263,7 @@ /* We want to send namepaths to the load code */ - if (op->opcode == AML_INT_NAMEPATH_OP) { + if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) { opcode_class = AML_CLASS_NAMED_OBJECT; } @@ -260,7 +284,7 @@ case AML_CLASS_NAMED_OBJECT: - if (walk_state->walk_type == WALK_METHOD) { + if (walk_state->walk_type == ACPI_WALK_METHOD) { /* * Found a named object declaration during method * execution; we must enter this object into the @@ -271,19 +295,16 @@ status = acpi_ds_load2_begin_op (walk_state, NULL); } - - if (op->opcode == AML_REGION_OP) { + if (op->common.aml_opcode == AML_REGION_OP) { status = acpi_ds_result_stack_push (walk_state); } - break; - /* most operators with arguments */ - case AML_CLASS_EXECUTE: case AML_CLASS_CREATE: + /* most operators with arguments */ /* Start a new result/operand state */ status = acpi_ds_result_stack_push (walk_state); @@ -302,9 +323,9 @@ /***************************************************************************** * - * FUNCTION: Acpi_ds_exec_end_op + * FUNCTION: acpi_ds_exec_end_op * - * PARAMETERS: Walk_state - Current state of the parse tree walk + * PARAMETERS: walk_state - Current state of the parse tree walk * Op - Op that has been just been completed in the * walk; Arguments have now been evaluated. * @@ -318,18 +339,17 @@ acpi_status acpi_ds_exec_end_op ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_parse_object *op; - acpi_status status = AE_OK; - u32 op_type; - u32 op_class; - acpi_parse_object *next_op; - acpi_parse_object *first_arg; - u32 i; + union acpi_parse_object *op; + acpi_status status = AE_OK; + u32 op_type; + u32 op_class; + union acpi_parse_object *next_op; + union acpi_parse_object *first_arg; - FUNCTION_TRACE_PTR ("Ds_exec_end_op", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ds_exec_end_op", walk_state); op = walk_state->op; @@ -337,11 +357,11 @@ op_class = walk_state->op_info->class; if (op_class == AML_CLASS_UNKNOWN) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown opcode %X\n", op->opcode)); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown opcode %X\n", op->common.aml_opcode)); return_ACPI_STATUS (AE_NOT_IMPLEMENTED); } - first_arg = op->value.arg; + first_arg = op->common.value.arg; /* Init the walk state */ @@ -349,22 +369,19 @@ walk_state->return_desc = NULL; walk_state->result_obj = NULL; - /* Call debugger for single step support (DEBUG build only) */ - DEBUGGER_EXEC (status = acpi_db_single_step (walk_state, op, op_class)); - DEBUGGER_EXEC (if (ACPI_FAILURE (status)) {return_ACPI_STATUS (status);}); - + ACPI_DEBUGGER_EXEC (status = acpi_db_single_step (walk_state, op, op_class)); + ACPI_DEBUGGER_EXEC (if (ACPI_FAILURE (status)) {return_ACPI_STATUS (status);}); - switch (op_class) { /* Decode the Opcode Class */ - case AML_CLASS_ARGUMENT: /* constants, literals, etc. do nothing */ + switch (op_class) { + case AML_CLASS_ARGUMENT: /* constants, literals, etc. -- do nothing */ break; - /* most operators with arguments */ - case AML_CLASS_EXECUTE: + case AML_CLASS_EXECUTE: /* most operators with arguments */ /* Build resolved operand stack */ @@ -385,48 +402,28 @@ status = acpi_ex_resolve_operands (walk_state->opcode, &(walk_state->operands [walk_state->num_operands -1]), walk_state); - if (ACPI_FAILURE (status)) { - /* TBD: must pop and delete operands */ - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "[%s]: Could not resolve operands, %s\n", - acpi_ps_get_opcode_name (walk_state->opcode), acpi_format_exception (status))); + if (ACPI_SUCCESS (status)) { + ACPI_DUMP_OPERANDS (ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, + acpi_ps_get_opcode_name (walk_state->opcode), + walk_state->num_operands, "after ex_resolve_operands"); /* - * On error, we must delete all the operands and clear the - * operand stack + * Dispatch the request to the appropriate interpreter handler + * routine. There is one routine per opcode "type" based upon the + * number of opcode arguments and return type. */ - for (i = 0; i < walk_state->num_operands; i++) { - acpi_ut_remove_reference (walk_state->operands[i]); - walk_state->operands[i] = NULL; - } - - walk_state->num_operands = 0; - goto cleanup; + status = acpi_gbl_op_type_dispatch [op_type] (walk_state); } - - DUMP_OPERANDS (WALK_OPERANDS, IMODE_EXECUTE, acpi_ps_get_opcode_name (walk_state->opcode), - walk_state->num_operands, "after Ex_resolve_operands"); - - /* - * Dispatch the request to the appropriate interpreter handler - * routine. There is one routine per opcode "type" based upon the - * number of opcode arguments and return type. - */ - status = acpi_gbl_op_type_dispatch [op_type] (walk_state); - - - /* Delete argument objects and clear the operand stack */ - - for (i = 0; i < walk_state->num_operands; i++) { - /* - * Remove a reference to all operands, including both - * "Arguments" and "Targets". - */ - acpi_ut_remove_reference (walk_state->operands[i]); - walk_state->operands[i] = NULL; + else { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "[%s]: Could not resolve operands, %s\n", + acpi_ps_get_opcode_name (walk_state->opcode), + acpi_format_exception (status))); } - walk_state->num_operands = 0; + /* Always delete the argument objects and clear the operand stack */ + + acpi_ds_clear_operands (walk_state); /* * If a result object was returned from above, push it on the @@ -445,11 +442,14 @@ switch (op_type) { case AML_TYPE_CONTROL: /* Type 1 opcode, IF/ELSE/WHILE/NOOP */ - /* 1 Operand, 0 External_result, 0 Internal_result */ + /* 1 Operand, 0 external_result, 0 internal_result */ status = acpi_ds_exec_end_control_op (walk_state, op); + if (ACPI_FAILURE (status)) { + break; + } - acpi_ds_result_stack_pop (walk_state); + status = acpi_ds_result_stack_pop (walk_state); break; @@ -461,13 +461,13 @@ * (AML_METHODCALL) Op->Value->Arg->Node contains * the method Node pointer */ - /* Next_op points to the op that holds the method name */ + /* next_op points to the op that holds the method name */ next_op = first_arg; - /* Next_op points to first argument op */ + /* next_op points to first argument op */ - next_op = next_op->next; + next_op = next_op->common.next; /* * Get the method's arguments and put them on the operand stack @@ -478,13 +478,15 @@ } /* - * Since the operands will be passed to another - * control method, we must resolve all local - * references here (Local variables, arguments - * to *this* method, etc.) + * Since the operands will be passed to another control method, + * we must resolve all local references here (Local variables, + * arguments to *this* method, etc.) */ status = acpi_ds_resolve_operands (walk_state); if (ACPI_FAILURE (status)) { + /* On error, clear all resolved operands */ + + acpi_ds_clear_operands (walk_state); break; } @@ -499,13 +501,12 @@ * especially the operand count! */ return_ACPI_STATUS (status); - break; case AML_TYPE_CREATE_FIELD: ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, - "Executing Create_field Buffer/Index Op=%p\n", op)); + "Executing create_field Buffer/Index Op=%p\n", op)); status = acpi_ds_load2_end_op (walk_state); if (ACPI_FAILURE (status)) { @@ -516,18 +517,65 @@ break; + case AML_TYPE_CREATE_OBJECT: + + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, + "Executing create_object (Buffer/Package) Op=%p\n", op)); + + switch (op->common.parent->common.aml_opcode) { + case AML_NAME_OP: + + /* + * Put the Node on the object stack (Contains the ACPI Name of + * this object) + */ + walk_state->operands[0] = (void *) op->common.parent->common.node; + walk_state->num_operands = 1; + + status = acpi_ds_create_node (walk_state, op->common.parent->common.node, op->common.parent); + if (ACPI_FAILURE (status)) { + break; + } + + /* Fall through */ + /*lint -fallthrough */ + + case AML_INT_EVAL_SUBTREE_OP: + + status = acpi_ds_eval_data_object_operands (walk_state, op, + acpi_ns_get_attached_object (op->common.parent->common.node)); + break; + + default: + + status = acpi_ds_eval_data_object_operands (walk_state, op, NULL); + break; + } + + /* + * If a result object was returned from above, push it on the + * current result stack + */ + if (ACPI_SUCCESS (status) && + walk_state->result_obj) { + status = acpi_ds_result_push (walk_state->result_obj, walk_state); + } + break; + + case AML_TYPE_NAMED_FIELD: case AML_TYPE_NAMED_COMPLEX: case AML_TYPE_NAMED_SIMPLE: + case AML_TYPE_NAMED_NO_OBJ: status = acpi_ds_load2_end_op (walk_state); if (ACPI_FAILURE (status)) { break; } - if (op->opcode == AML_REGION_OP) { + if (op->common.aml_opcode == AML_REGION_OP) { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, - "Executing Op_region Address/Length Op=%p\n", op)); + "Executing op_region Address/Length Op=%p\n", op)); status = acpi_ds_eval_region_operands (walk_state, op); if (ACPI_FAILURE (status)) { @@ -539,23 +587,26 @@ break; + case AML_TYPE_UNDEFINED: ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Undefined opcode type Op=%p\n", op)); return_ACPI_STATUS (AE_NOT_IMPLEMENTED); - break; case AML_TYPE_BOGUS: - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Internal opcode=%X type Op=%p\n", + + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, + "Internal opcode=%X type Op=%p\n", walk_state->opcode, op)); break; + default: ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p\n", - op_class, op_type, op->opcode, op)); + op_class, op_type, op->common.aml_opcode, op)); status = AE_NOT_IMPLEMENTED; break; @@ -563,10 +614,10 @@ } /* - * ACPI 2.0 support for 64-bit integers: - * Truncate numeric result value if we are executing from a 32-bit ACPI table + * ACPI 2.0 support for 64-bit integers: Truncate numeric + * result value if we are executing from a 32-bit ACPI table */ - acpi_ex_truncate_for32bit_table (walk_state->result_obj, walk_state); + acpi_ex_truncate_for32bit_table (walk_state->result_obj); /* * Check if we just completed the evaluation of a @@ -575,9 +626,9 @@ if ((walk_state->control_state) && (walk_state->control_state->common.state == - CONTROL_PREDICATE_EXECUTING) && + ACPI_CONTROL_PREDICATE_EXECUTING) && (walk_state->control_state->control.predicate_op == op)) { - status = acpi_ds_get_predicate_value (walk_state, !! walk_state->result_obj); + status = acpi_ds_get_predicate_value (walk_state, walk_state->result_obj); walk_state->result_obj = NULL; } @@ -586,7 +637,7 @@ if (walk_state->result_obj) { /* Break to debugger to display result */ - DEBUGGER_EXEC (acpi_db_display_result_object (walk_state->result_obj, walk_state)); + ACPI_DEBUGGER_EXEC (acpi_db_display_result_object (walk_state->result_obj, walk_state)); /* * Delete the result op if and only if: @@ -596,12 +647,16 @@ acpi_ds_delete_result_if_not_used (op, walk_state->result_obj, walk_state); } +#ifdef _UNDER_DEVELOPMENT + + if (walk_state->parser_state.aml == walk_state->parser_state.aml_end) { + acpi_db_method_end (walk_state); + } +#endif + /* Always clear the object stack */ - /* TBD: [Investigate] Clear stack of return value, - but don't delete it */ walk_state->num_operands = 0; - return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dswload.c linux.21rc5-ac2/drivers/acpi/dispatcher/dswload.c --- linux.21rc5/drivers/acpi/dispatcher/dswload.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dswload.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,48 +1,66 @@ /****************************************************************************** * * Module Name: dswload - Dispatcher namespace load callbacks - * $Revision: 50 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "acevents.h" +#include +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dswload") + ACPI_MODULE_NAME ("dswload") /******************************************************************************* * - * FUNCTION: Acpi_ds_init_callbacks + * FUNCTION: acpi_ds_init_callbacks * - * PARAMETERS: Walk_state - Current state of the parse tree walk - * Pass_number - 1, 2, or 3 + * PARAMETERS: walk_state - Current state of the parse tree walk + * pass_number - 1, 2, or 3 * * RETURN: Status * @@ -52,8 +70,8 @@ acpi_status acpi_ds_init_callbacks ( - acpi_walk_state *walk_state, - u32 pass_number) + struct acpi_walk_state *walk_state, + u32 pass_number) { switch (pass_number) { @@ -70,14 +88,15 @@ break; case 3: +#ifndef ACPI_NO_METHOD_EXECUTION walk_state->parse_flags |= ACPI_PARSE_EXECUTE | ACPI_PARSE_DELETE_TREE; walk_state->descending_callback = acpi_ds_exec_begin_op; walk_state->ascending_callback = acpi_ds_exec_end_op; +#endif break; default: return (AE_BAD_PARAMETER); - break; } return (AE_OK); @@ -86,9 +105,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_load1_begin_op + * FUNCTION: acpi_ds_load1_begin_op * - * PARAMETERS: Walk_state - Current state of the parse tree walk + * PARAMETERS: walk_state - Current state of the parse tree walk * Op - Op that has been just been reached in the * walk; Arguments have not been evaluated yet. * @@ -100,33 +119,42 @@ acpi_status acpi_ds_load1_begin_op ( - acpi_walk_state *walk_state, - acpi_parse_object **out_op) + struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op) { - acpi_parse_object *op; - acpi_namespace_node *node; - acpi_status status; - acpi_object_type8 data_type; - NATIVE_CHAR *path; + union acpi_parse_object *op; + struct acpi_namespace_node *node; + acpi_status status; + acpi_object_type object_type; + char *path; + u32 flags; + + ACPI_FUNCTION_NAME ("ds_load1_begin_op"); - PROC_NAME ("Ds_load1_begin_op"); op = walk_state->op; ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); - /* We are only interested in opcodes that have an associated name */ - if (walk_state->op) { - if (!(walk_state->op_info->flags & AML_NAMED)) { + if (op) { + if (!(walk_state->op_info->flags & AML_NAMED)) { +#if 0 + if ((walk_state->op_info->class == AML_CLASS_EXECUTE) || + (walk_state->op_info->class == AML_CLASS_CONTROL)) { + acpi_os_printf ("\n\n***EXECUTABLE OPCODE %s***\n\n", walk_state->op_info->name); + *out_op = op; + return (AE_CTRL_SKIP); + } +#endif *out_op = op; return (AE_OK); } /* Check if this object has already been installed in the namespace */ - if (op->node) { + if (op->common.node) { *out_op = op; return (AE_OK); } @@ -136,30 +164,117 @@ /* Map the raw opcode into an internal object type */ - data_type = acpi_ds_map_named_opcode_to_data_type (walk_state->opcode); - + object_type = walk_state->op_info->object_type; ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "State=%p Op=%p Type=%x\n", walk_state, op, data_type)); + "State=%p Op=%p [%s] ", walk_state, op, acpi_ut_get_type_name (object_type))); + switch (walk_state->opcode) { + case AML_SCOPE_OP: - if (walk_state->opcode == AML_SCOPE_OP) { - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "State=%p Op=%p Type=%x\n", walk_state, op, data_type)); - } + /* + * The target name of the Scope() operator must exist at this point so + * that we can actually open the scope to enter new names underneath it. + * Allow search-to-root for single namesegs. + */ + status = acpi_ns_lookup (walk_state->scope_info, path, object_type, + ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, walk_state, &(node)); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (path, status); + return (status); + } - /* - * Enter the named type into the internal namespace. We enter the name - * as we go downward in the parse tree. Any necessary subobjects that involve - * arguments to the opcode must be created as we go back up the parse tree later. - */ - status = acpi_ns_lookup (walk_state->scope_info, path, data_type, - IMODE_LOAD_PASS1, NS_NO_UPSEARCH, walk_state, &(node)); + /* + * Check to make sure that the target is + * one of the opcodes that actually opens a scope + */ + switch (node->type) { + case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ + case ACPI_TYPE_DEVICE: + case ACPI_TYPE_POWER: + case ACPI_TYPE_PROCESSOR: + case ACPI_TYPE_THERMAL: + + /* These are acceptable types */ + break; - if (ACPI_FAILURE (status)) { - return (status); + case ACPI_TYPE_INTEGER: + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: + + /* + * These types we will allow, but we will change the type. This + * enables some existing code of the form: + * + * Name (DEB, 0) + * Scope (DEB) { ... } + * + * Note: silently change the type here. On the second pass, we will report a warning + */ + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n", + path, acpi_ut_get_type_name (node->type))); + + node->type = ACPI_TYPE_ANY; + walk_state->scope_info->common.value = ACPI_TYPE_ANY; + break; + + default: + + /* All other types are an error */ + + ACPI_REPORT_ERROR (("Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)\n", + acpi_ut_get_type_name (node->type), path)); + + return (AE_AML_OPERAND_TYPE); + } + break; + + + default: + + /* + * For all other named opcodes, we will enter the name into the namespace. + * + * Setup the search flags. + * Since we are entering a name into the namespace, we do not want to + * enable the search-to-root upsearch. + * + * There are only two conditions where it is acceptable that the name + * already exists: + * 1) the Scope() operator can reopen a scoping object that was + * previously defined (Scope, Method, Device, etc.) + * 2) Whenever we are parsing a deferred opcode (op_region, Buffer, + * buffer_field, or Package), the name of the object is already + * in the namespace. + */ + flags = ACPI_NS_NO_UPSEARCH; + if ((walk_state->opcode != AML_SCOPE_OP) && + (!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) { + flags |= ACPI_NS_ERROR_IF_FOUND; + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DISPATCH, "Cannot already exist\n")); + } + else { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DISPATCH, "Both Find or Create allowed\n")); + } + + /* + * Enter the named type into the internal namespace. We enter the name + * as we go downward in the parse tree. Any necessary subobjects that involve + * arguments to the opcode must be created as we go back up the parse tree later. + */ + status = acpi_ns_lookup (walk_state->scope_info, path, object_type, + ACPI_IMODE_LOAD_PASS1, flags, walk_state, &(node)); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (path, status); + return (status); + } + break; } + + /* Common exit */ + if (!op) { /* Create a new op */ @@ -171,13 +286,18 @@ /* Initialize */ - ((acpi_parse2_object *)op)->name = node->name; + op->named.name = node->name.integer; + +#if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)) + op->named.path = (u8 *) path; +#endif + /* * Put the Node in the "op" object that the parser uses, so we * can get it again quickly when this scope is closed */ - op->node = node; + op->common.node = node; acpi_ps_append_arg (acpi_ps_get_parent_scope (&walk_state->parser_state), op); *out_op = op; @@ -187,9 +307,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_load1_end_op + * FUNCTION: acpi_ds_load1_end_op * - * PARAMETERS: Walk_state - Current state of the parse tree walk + * PARAMETERS: walk_state - Current state of the parse tree walk * Op - Op that has been just been completed in the * walk; Arguments have now been evaluated. * @@ -202,57 +322,107 @@ acpi_status acpi_ds_load1_end_op ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_parse_object *op; - acpi_object_type8 data_type; + union acpi_parse_object *op; + acpi_object_type object_type; + acpi_status status = AE_OK; + + ACPI_FUNCTION_NAME ("ds_load1_end_op"); - PROC_NAME ("Ds_load1_end_op"); op = walk_state->op; ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); - /* We are only interested in opcodes that have an associated name */ - if (!(walk_state->op_info->flags & AML_NAMED)) { + if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) { return (AE_OK); } - /* Get the type to determine if we should pop the scope */ + /* Get the object type to determine if we should pop the scope */ - data_type = acpi_ds_map_named_opcode_to_data_type (op->opcode); + object_type = walk_state->op_info->object_type; - if (op->opcode == AML_NAME_OP) { - /* For Name opcode, check the argument */ +#ifndef ACPI_NO_METHOD_EXECUTION + if (walk_state->op_info->flags & AML_FIELD) { + if (walk_state->opcode == AML_FIELD_OP || + walk_state->opcode == AML_BANK_FIELD_OP || + walk_state->opcode == AML_INDEX_FIELD_OP) { + status = acpi_ds_init_field_objects (op, walk_state); + } + return (status); + } - if (op->value.arg) { - data_type = acpi_ds_map_opcode_to_data_type ( - (op->value.arg)->opcode, NULL); - ((acpi_namespace_node *)op->node)->type = - (u8) data_type; + + if (op->common.aml_opcode == AML_REGION_OP) { + status = acpi_ex_create_region (op->named.data, op->named.length, + (acpi_adr_space_type) ((op->common.value.arg)->common.value.integer), walk_state); + if (ACPI_FAILURE (status)) { + return (status); + } + } +#endif + + if (op->common.aml_opcode == AML_NAME_OP) { + /* For Name opcode, get the object type from the argument */ + + if (op->common.value.arg) { + object_type = (acpi_ps_get_opcode_info ((op->common.value.arg)->common.aml_opcode))->object_type; + op->common.node->type = (u8) object_type; + } + } + + if (op->common.aml_opcode == AML_METHOD_OP) { + /* + * method_op pkg_length name_string method_flags term_list + * + * Note: We must create the method node/object pair as soon as we + * see the method declaration. This allows later pass1 parsing + * of invocations of the method (need to know the number of + * arguments.) + */ + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, + "LOADING-Method: State=%p Op=%p named_obj=%p\n", + walk_state, op, op->named.node)); + + if (!acpi_ns_get_attached_object (op->named.node)) { + walk_state->operands[0] = (void *) op->named.node; + walk_state->num_operands = 1; + + status = acpi_ds_create_operands (walk_state, op->common.value.arg); + if (ACPI_SUCCESS (status)) { + status = acpi_ex_create_method (op->named.data, + op->named.length, walk_state); + } + walk_state->operands[0] = NULL; + walk_state->num_operands = 0; + + if (ACPI_FAILURE (status)) { + return (status); + } } } /* Pop the scope stack */ - if (acpi_ns_opens_scope (data_type)) { + if (acpi_ns_opens_scope (object_type)) { ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "(%s): Popping scope for Op %p\n", - acpi_ut_get_type_name (data_type), op)); + acpi_ut_get_type_name (object_type), op)); - acpi_ds_scope_stack_pop (walk_state); + status = acpi_ds_scope_stack_pop (walk_state); } - return (AE_OK); + return (status); } /******************************************************************************* * - * FUNCTION: Acpi_ds_load2_begin_op + * FUNCTION: acpi_ds_load2_begin_op * - * PARAMETERS: Walk_state - Current state of the parse tree walk + * PARAMETERS: walk_state - Current state of the parse tree walk * Op - Op that has been just been reached in the * walk; Arguments have not been evaluated yet. * @@ -264,35 +434,28 @@ acpi_status acpi_ds_load2_begin_op ( - acpi_walk_state *walk_state, - acpi_parse_object **out_op) + struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op) { - acpi_parse_object *op; - acpi_namespace_node *node; - acpi_status status; - acpi_object_type8 data_type; - NATIVE_CHAR *buffer_ptr; - void *original = NULL; + union acpi_parse_object *op; + struct acpi_namespace_node *node; + acpi_status status; + acpi_object_type object_type; + char *buffer_ptr; - PROC_NAME ("Ds_load2_begin_op"); + ACPI_FUNCTION_TRACE ("ds_load2_begin_op"); + op = walk_state->op; ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); - if (op) { /* We only care about Namespace opcodes here */ - if (!(walk_state->op_info->flags & AML_NSOPCODE) && - walk_state->opcode != AML_INT_NAMEPATH_OP) { - return (AE_OK); - } - - /* TBD: [Restructure] Temp! same code as in psparse */ - - if (!(walk_state->op_info->flags & AML_NAMED)) { - return (AE_OK); + if ((!(walk_state->op_info->flags & AML_NSOPCODE) && (walk_state->opcode != AML_INT_NAMEPATH_OP)) || + (!(walk_state->op_info->flags & AML_NAMED))) { + return_ACPI_STATUS (AE_OK); } /* @@ -301,61 +464,125 @@ if (walk_state->opcode == AML_INT_NAMEPATH_OP) { /* For Namepath op, get the path string */ - buffer_ptr = op->value.string; + buffer_ptr = op->common.value.string; if (!buffer_ptr) { /* No name, just exit */ - return (AE_OK); + return_ACPI_STATUS (AE_OK); } } else { /* Get name from the op */ - buffer_ptr = (NATIVE_CHAR *) &((acpi_parse2_object *)op)->name; + buffer_ptr = (char *) &op->named.name; } } else { + /* Get the namestring from the raw AML */ + buffer_ptr = acpi_ps_get_next_namestring (&walk_state->parser_state); } + /* Map the opcode into an internal object type */ - /* Map the raw opcode into an internal object type */ - - data_type = acpi_ds_map_named_opcode_to_data_type (walk_state->opcode); + object_type = walk_state->op_info->object_type; ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "State=%p Op=%p Type=%x\n", walk_state, op, data_type)); + "State=%p Op=%p Type=%X\n", walk_state, op, object_type)); + + switch (walk_state->opcode) { + case AML_FIELD_OP: + case AML_BANK_FIELD_OP: + case AML_INDEX_FIELD_OP: - if (walk_state->opcode == AML_FIELD_OP || - walk_state->opcode == AML_BANK_FIELD_OP || - walk_state->opcode == AML_INDEX_FIELD_OP) { node = NULL; status = AE_OK; - } + break; + + case AML_INT_NAMEPATH_OP: - else if (walk_state->opcode == AML_INT_NAMEPATH_OP) { /* - * The Name_path is an object reference to an existing object. Don't enter the + * The name_path is an object reference to an existing object. Don't enter the * name into the namespace, but look it up for use later */ - status = acpi_ns_lookup (walk_state->scope_info, buffer_ptr, data_type, - IMODE_EXECUTE, NS_SEARCH_PARENT, walk_state, &(node)); - } + status = acpi_ns_lookup (walk_state->scope_info, buffer_ptr, object_type, + ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, walk_state, &(node)); + break; - else { - if (op && op->node) { - original = op->node; - node = op->node; + case AML_SCOPE_OP: - if (acpi_ns_opens_scope (data_type)) { - status = acpi_ds_scope_stack_push (node, data_type, walk_state); + /* + * The Path is an object reference to an existing object. Don't enter the + * name into the namespace, but look it up for use later + */ + status = acpi_ns_lookup (walk_state->scope_info, buffer_ptr, object_type, + ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, walk_state, &(node)); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (buffer_ptr, status); + return_ACPI_STATUS (status); + } + /* + * We must check to make sure that the target is + * one of the opcodes that actually opens a scope + */ + switch (node->type) { + case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ + case ACPI_TYPE_DEVICE: + case ACPI_TYPE_POWER: + case ACPI_TYPE_PROCESSOR: + case ACPI_TYPE_THERMAL: + + /* These are acceptable types */ + break; + + case ACPI_TYPE_INTEGER: + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: + + /* + * These types we will allow, but we will change the type. This + * enables some existing code of the form: + * + * Name (DEB, 0) + * Scope (DEB) { ... } + */ + + ACPI_REPORT_WARNING (("Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n", + buffer_ptr, acpi_ut_get_type_name (node->type))); + + node->type = ACPI_TYPE_ANY; + walk_state->scope_info->common.value = ACPI_TYPE_ANY; + break; + + default: + + /* All other types are an error */ + + ACPI_REPORT_ERROR (("Invalid type (%s) for target of Scope operator [%4.4s]\n", + acpi_ut_get_type_name (node->type), buffer_ptr)); + + return (AE_AML_OPERAND_TYPE); + } + break; + + default: + + /* All other opcodes */ + + if (op && op->common.node) { + /* This op/node was previously entered into the namespace */ + + node = op->common.node; + + if (acpi_ns_opens_scope (object_type)) { + status = acpi_ds_scope_stack_push (node, object_type, walk_state); if (ACPI_FAILURE (status)) { - return (status); + return_ACPI_STATUS (status); } } - return (AE_OK); + return_ACPI_STATUS (AE_OK); } /* @@ -363,50 +590,50 @@ * as we go downward in the parse tree. Any necessary subobjects that involve * arguments to the opcode must be created as we go back up the parse tree later. */ - status = acpi_ns_lookup (walk_state->scope_info, buffer_ptr, data_type, - IMODE_EXECUTE, NS_NO_UPSEARCH, walk_state, &(node)); + status = acpi_ns_lookup (walk_state->scope_info, buffer_ptr, object_type, + ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, walk_state, &(node)); + break; } - if (ACPI_SUCCESS (status)) { - if (!op) { - /* Create a new op */ + if (ACPI_FAILURE (status)) { + ACPI_REPORT_NSERROR (buffer_ptr, status); + return_ACPI_STATUS (status); + } - op = acpi_ps_alloc_op (walk_state->opcode); - if (!op) { - return (AE_NO_MEMORY); - } - /* Initialize */ + if (!op) { + /* Create a new op */ - ((acpi_parse2_object *)op)->name = node->name; - *out_op = op; + op = acpi_ps_alloc_op (walk_state->opcode); + if (!op) { + return_ACPI_STATUS (AE_NO_MEMORY); } - /* - * Put the Node in the "op" object that the parser uses, so we - * can get it again quickly when this scope is closed - */ - op->node = node; - - if (original) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "old %p new %p\n", original, node)); + /* Initialize the new op */ - if (original != node) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Lookup match error: old %p new %p\n", original, node)); - } + if (node) { + op->named.name = node->name.integer; + } + if (out_op) { + *out_op = op; } } - return (status); + /* + * Put the Node in the "op" object that the parser uses, so we + * can get it again quickly when this scope is closed + */ + op->common.node = node; + + return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ds_load2_end_op + * FUNCTION: acpi_ds_load2_end_op * - * PARAMETERS: Walk_state - Current state of the parse tree walk + * PARAMETERS: walk_state - Current state of the parse tree walk * Op - Op that has been just been completed in the * walk; Arguments have now been evaluated. * @@ -419,48 +646,44 @@ acpi_status acpi_ds_load2_end_op ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_parse_object *op; - acpi_status status = AE_OK; - acpi_object_type8 data_type; - acpi_namespace_node *node; - acpi_parse_object *arg; - acpi_namespace_node *new_node; - u32 i; + union acpi_parse_object *op; + acpi_status status = AE_OK; + acpi_object_type object_type; + struct acpi_namespace_node *node; + union acpi_parse_object *arg; + struct acpi_namespace_node *new_node; +#ifndef ACPI_NO_METHOD_EXECUTION + u32 i; +#endif - PROC_NAME ("Ds_load2_end_op"); + ACPI_FUNCTION_TRACE ("ds_load2_end_op"); op = walk_state->op; - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); - + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n", + walk_state->op_info->name, op, walk_state)); /* Only interested in opcodes that have namespace objects */ if (!(walk_state->op_info->flags & AML_NSOBJECT)) { - return (AE_OK); + return_ACPI_STATUS (AE_OK); } - if (op->opcode == AML_SCOPE_OP) { + if (op->common.aml_opcode == AML_SCOPE_OP) { ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "Ending scope Op=%p State=%p\n", op, walk_state)); - - if (((acpi_parse2_object *)op)->name == -1) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unnamed scope! Op=%p State=%p\n", - op, walk_state)); - return (AE_OK); - } } - data_type = acpi_ds_map_named_opcode_to_data_type (op->opcode); + object_type = walk_state->op_info->object_type; /* * Get the Node/name from the earlier lookup * (It was saved in the *op structure) */ - node = op->node; + node = op->common.node; /* * Put the Node on the object stack (Contains the ACPI Name of @@ -471,11 +694,14 @@ /* Pop the scope stack */ - if (acpi_ns_opens_scope (data_type)) { - + if (acpi_ns_opens_scope (object_type) && (op->common.aml_opcode != AML_INT_METHODCALL_OP)) { ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "(%s) Popping scope for Op %p\n", - acpi_ut_get_type_name (data_type), op)); - acpi_ds_scope_stack_pop (walk_state); + acpi_ut_get_type_name (object_type), op)); + + status = acpi_ds_scope_stack_pop (walk_state); + if (ACPI_FAILURE (status)) { + goto cleanup; + } } /* @@ -507,14 +733,16 @@ */ ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "Create-Load [%s] State=%p Op=%p Named_obj=%p\n", - acpi_ps_get_opcode_name (op->opcode), walk_state, op, node)); + "Create-Load [%s] State=%p Op=%p named_obj=%p\n", + acpi_ps_get_opcode_name (op->common.aml_opcode), walk_state, op, node)); /* Decode the opcode */ - arg = op->value.arg; + arg = op->common.value.arg; switch (walk_state->op_info->type) { +#ifndef ACPI_NO_METHOD_EXECUTION + case AML_TYPE_CREATE_FIELD: /* @@ -527,24 +755,25 @@ case AML_TYPE_NAMED_FIELD: - arg = op->value.arg; - switch (op->opcode) { + switch (op->common.aml_opcode) { case AML_INDEX_FIELD_OP: - status = acpi_ds_create_index_field (op, (acpi_handle) arg->node, + status = acpi_ds_create_index_field (op, (acpi_handle) arg->common.node, walk_state); break; - case AML_BANK_FIELD_OP: - status = acpi_ds_create_bank_field (op, arg->node, walk_state); + status = acpi_ds_create_bank_field (op, arg->common.node, walk_state); break; - case AML_FIELD_OP: - status = acpi_ds_create_field (op, arg->node, walk_state); + status = acpi_ds_create_field (op, arg->common.node, walk_state); + break; + + default: + /* All NAMED_FIELD opcodes must be handled above */ break; } break; @@ -557,31 +786,27 @@ goto cleanup; } - switch (op->opcode) { + switch (op->common.aml_opcode) { case AML_PROCESSOR_OP: status = acpi_ex_create_processor (walk_state); break; - case AML_POWER_RES_OP: status = acpi_ex_create_power_resource (walk_state); break; - case AML_MUTEX_OP: status = acpi_ex_create_mutex (walk_state); break; - case AML_EVENT_OP: status = acpi_ex_create_event (walk_state); break; - case AML_DATA_REGION_OP: status = acpi_ex_create_table_region (walk_state); @@ -597,7 +822,6 @@ status = AE_OK; goto cleanup; - break; } /* Delete operands */ @@ -608,40 +832,32 @@ } break; - +#endif /* ACPI_NO_METHOD_EXECUTION */ case AML_TYPE_NAMED_COMPLEX: - switch (op->opcode) { - case AML_METHOD_OP: - /* - * Method_op Pkg_length Names_string Method_flags Term_list - */ - ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "LOADING-Method: State=%p Op=%p Named_obj=%p\n", - walk_state, op, node)); - - if (!node->object) { - status = acpi_ds_create_operands (walk_state, arg); - if (ACPI_FAILURE (status)) { - goto cleanup; - } - - status = acpi_ex_create_method (((acpi_parse2_object *) op)->data, - ((acpi_parse2_object *) op)->length, - walk_state); - } - break; - - + switch (op->common.aml_opcode) { +#ifndef ACPI_NO_METHOD_EXECUTION case AML_REGION_OP: /* - * The Op_region is not fully parsed at this time. Only valid argument is the Space_id. + * The op_region is not fully parsed at this time. Only valid argument is the space_id. * (We must save the address of the AML of the address and length operands) */ - status = acpi_ex_create_region (((acpi_parse2_object *) op)->data, - ((acpi_parse2_object *) op)->length, - (ACPI_ADR_SPACE_TYPE) arg->value.integer, walk_state); + /* + * If we have a valid region, initialize it + * Namespace is NOT locked at this point. + */ + status = acpi_ev_initialize_region (acpi_ns_get_attached_object (node), FALSE); + if (ACPI_FAILURE (status)) { + /* + * If AE_NOT_EXIST is returned, it is not fatal + * because many regions get created before a handler + * is installed for said region. + */ + if (AE_NOT_EXIST == status) { + status = AE_OK; + } + } break; @@ -649,6 +865,13 @@ status = acpi_ds_create_node (walk_state, node, op); break; +#endif /* ACPI_NO_METHOD_EXECUTION */ + + + default: + /* All NAMED_COMPLEX opcodes must be handled above */ + /* Note: Method objects were already created in Pass 1 */ + break; } break; @@ -662,29 +885,34 @@ case AML_CLASS_METHOD_CALL: ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "RESOLVING-Method_call: State=%p Op=%p Named_obj=%p\n", + "RESOLVING-method_call: State=%p Op=%p named_obj=%p\n", walk_state, op, node)); /* * Lookup the method name and save the Node */ - status = acpi_ns_lookup (walk_state->scope_info, arg->value.string, - ACPI_TYPE_ANY, IMODE_LOAD_PASS2, - NS_SEARCH_PARENT | NS_DONT_OPEN_SCOPE, + status = acpi_ns_lookup (walk_state->scope_info, arg->common.value.string, + ACPI_TYPE_ANY, ACPI_IMODE_LOAD_PASS2, + ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, walk_state, &(new_node)); if (ACPI_SUCCESS (status)) { - /* TBD: has name already been resolved by here ??*/ - - /* TBD: [Restructure] Make sure that what we found is indeed a method! */ - /* We didn't search for a method on purpose, to see if the name would resolve! */ + /* + * Make sure that what we found is indeed a method + * We didn't search for a method on purpose, to see if the name would resolve + */ + if (new_node->type != ACPI_TYPE_METHOD) { + status = AE_AML_OPERAND_TYPE; + } /* We could put the returned object (Node) on the object stack for later, but * for now, we will put it in the "op" object that the parser uses, so we * can get it again at the end of this scope */ - op->node = new_node; + op->common.node = new_node; + } + else { + ACPI_REPORT_NSERROR (arg->common.value.string, status); } - break; @@ -692,14 +920,13 @@ break; } - cleanup: /* Remove the Node pushed at the very beginning */ walk_state->operands[0] = NULL; walk_state->num_operands = 0; - return (status); + return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dswscope.c linux.21rc5-ac2/drivers/acpi/dispatcher/dswscope.c --- linux.21rc5/drivers/acpi/dispatcher/dswscope.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dswscope.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,36 +1,53 @@ /****************************************************************************** * * Module Name: dswscope - Scope stack manipulation - * $Revision: 49 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acdispat.h" +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dswscope") + ACPI_MODULE_NAME ("dswscope") #define STACK_POP(head) head @@ -38,7 +55,7 @@ /**************************************************************************** * - * FUNCTION: Acpi_ds_scope_stack_clear + * FUNCTION: acpi_ds_scope_stack_clear * * PARAMETERS: None * @@ -49,11 +66,11 @@ void acpi_ds_scope_stack_clear ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_generic_state *scope_info; + union acpi_generic_state *scope_info; - PROC_NAME ("Ds_scope_stack_clear"); + ACPI_FUNCTION_NAME ("ds_scope_stack_clear"); while (walk_state->scope_info) { @@ -63,7 +80,7 @@ walk_state->scope_info = scope_info->scope.next; ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, - "Popped object type %X\n", scope_info->common.value)); + "Popped object type (%s)\n", acpi_ut_get_type_name (scope_info->common.value))); acpi_ut_delete_generic_state (scope_info); } } @@ -71,7 +88,7 @@ /**************************************************************************** * - * FUNCTION: Acpi_ds_scope_stack_push + * FUNCTION: acpi_ds_scope_stack_push * * PARAMETERS: *Node, - Name to be made current * Type, - Type of frame being pushed @@ -83,27 +100,28 @@ acpi_status acpi_ds_scope_stack_push ( - acpi_namespace_node *node, - acpi_object_type8 type, - acpi_walk_state *walk_state) + struct acpi_namespace_node *node, + acpi_object_type type, + struct acpi_walk_state *walk_state) { - acpi_generic_state *scope_info; + union acpi_generic_state *scope_info; + union acpi_generic_state *old_scope_info; - FUNCTION_TRACE ("Ds_scope_stack_push"); + ACPI_FUNCTION_TRACE ("ds_scope_stack_push"); if (!node) { /* Invalid scope */ - REPORT_ERROR (("Ds_scope_stack_push: null scope passed\n")); + ACPI_REPORT_ERROR (("ds_scope_stack_push: null scope passed\n")); return_ACPI_STATUS (AE_BAD_PARAMETER); } /* Make sure object type is valid */ - if (!acpi_ex_validate_object_type (type)) { - REPORT_WARNING (("Ds_scope_stack_push: type code out of range\n")); + if (!acpi_ut_valid_object_type (type)) { + ACPI_REPORT_WARNING (("ds_scope_stack_push: type code out of range\n")); } @@ -120,6 +138,28 @@ scope_info->scope.node = node; scope_info->common.value = (u16) type; + walk_state->scope_depth++; + + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, + "[%.2d] Pushed scope ", (u32) walk_state->scope_depth)); + + old_scope_info = walk_state->scope_info; + if (old_scope_info) { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, + "[%4.4s] (%10s)", + old_scope_info->scope.node->name.ascii, + acpi_ut_get_type_name (old_scope_info->common.value))); + } + else { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, + "[\\___] (%10s)", "ROOT")); + } + + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, + ", New scope -> [%4.4s] (%s)\n", + scope_info->scope.node->name.ascii, + acpi_ut_get_type_name (scope_info->common.value))); + /* Push new scope object onto stack */ acpi_ut_push_generic_state (&walk_state->scope_info, scope_info); @@ -130,7 +170,7 @@ /**************************************************************************** * - * FUNCTION: Acpi_ds_scope_stack_pop + * FUNCTION: acpi_ds_scope_stack_pop * * PARAMETERS: Type - The type of frame to be found * @@ -147,12 +187,13 @@ acpi_status acpi_ds_scope_stack_pop ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_generic_state *scope_info; + union acpi_generic_state *scope_info; + union acpi_generic_state *new_scope_info; - FUNCTION_TRACE ("Ds_scope_stack_pop"); + ACPI_FUNCTION_TRACE ("ds_scope_stack_pop"); /* @@ -163,8 +204,25 @@ return_ACPI_STATUS (AE_STACK_UNDERFLOW); } + walk_state->scope_depth--; + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, - "Popped object type %X\n", scope_info->common.value)); + "[%.2d] Popped scope [%4.4s] (%10s), New scope -> ", + (u32) walk_state->scope_depth, + scope_info->scope.node->name.ascii, + acpi_ut_get_type_name (scope_info->common.value))); + + new_scope_info = walk_state->scope_info; + if (new_scope_info) { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, + "[%4.4s] (%s)\n", + new_scope_info->scope.node->name.ascii, + acpi_ut_get_type_name (new_scope_info->common.value))); + } + else { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, + "[\\___] (ROOT)\n")); + } acpi_ut_delete_generic_state (scope_info); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/dswstate.c linux.21rc5-ac2/drivers/acpi/dispatcher/dswstate.c --- linux.21rc5/drivers/acpi/dispatcher/dswstate.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/dswstate.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,46 +1,62 @@ /****************************************************************************** * * Module Name: dswstate - Dispatcher parse tree walk management routines - * $Revision: 54 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acparser.h" -#include "acdispat.h" -#include "acnamesp.h" -#include "acinterp.h" +#include +#include +#include +#include #define _COMPONENT ACPI_DISPATCHER - MODULE_NAME ("dswstate") + ACPI_MODULE_NAME ("dswstate") /******************************************************************************* * - * FUNCTION: Acpi_ds_result_insert + * FUNCTION: acpi_ds_result_insert * * PARAMETERS: Object - Object to push - * Walk_state - Current Walk state + * walk_state - Current Walk state * * RETURN: Status * @@ -50,14 +66,14 @@ acpi_status acpi_ds_result_insert ( - void *object, - u32 index, - acpi_walk_state *walk_state) + void *object, + u32 index, + struct acpi_walk_state *walk_state) { - acpi_generic_state *state; + union acpi_generic_state *state; - PROC_NAME ("Ds_result_insert"); + ACPI_FUNCTION_NAME ("ds_result_insert"); state = walk_state->results; @@ -67,7 +83,7 @@ return (AE_NOT_EXIST); } - if (index >= OBJ_NUM_OPERANDS) { + if (index >= ACPI_OBJ_NUM_OPERANDS) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Index out of range: %X Obj=%p State=%p Num=%X\n", index, object, walk_state, state->results.num_results)); @@ -86,7 +102,7 @@ ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p [%s] State=%p Num=%X Cur=%X\n", - object, object ? acpi_ut_get_type_name (((acpi_operand_object *) object)->common.type) : "NULL", + object, object ? acpi_ut_get_object_type_name ((union acpi_operand_object *) object) : "NULL", walk_state, state->results.num_results, walk_state->current_result)); return (AE_OK); @@ -95,10 +111,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_result_remove + * FUNCTION: acpi_ds_result_remove * * PARAMETERS: Object - Where to return the popped object - * Walk_state - Current Walk state + * walk_state - Current Walk state * * RETURN: Status * @@ -109,14 +125,14 @@ acpi_status acpi_ds_result_remove ( - acpi_operand_object **object, - u32 index, - acpi_walk_state *walk_state) + union acpi_operand_object **object, + u32 index, + struct acpi_walk_state *walk_state) { - acpi_generic_state *state; + union acpi_generic_state *state; - PROC_NAME ("Ds_result_remove"); + ACPI_FUNCTION_NAME ("ds_result_remove"); state = walk_state->results; @@ -126,13 +142,12 @@ return (AE_NOT_EXIST); } - if (index >= OBJ_NUM_OPERANDS) { + if (index >= ACPI_OBJ_MAX_OPERAND) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Index out of range: %X State=%p Num=%X\n", index, walk_state, state->results.num_results)); } - /* Check for a valid result object */ if (!state->results.obj_desc [index]) { @@ -151,7 +166,7 @@ ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p [%s] Index=%X State=%p Num=%X\n", - *object, (*object) ? acpi_ut_get_type_name ((*object)->common.type) : "NULL", + *object, (*object) ? acpi_ut_get_object_type_name (*object) : "NULL", index, walk_state, state->results.num_results)); return (AE_OK); @@ -160,10 +175,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_result_pop + * FUNCTION: acpi_ds_result_pop * * PARAMETERS: Object - Where to return the popped object - * Walk_state - Current Walk state + * walk_state - Current Walk state * * RETURN: Status * @@ -174,14 +189,14 @@ acpi_status acpi_ds_result_pop ( - acpi_operand_object **object, - acpi_walk_state *walk_state) + union acpi_operand_object **object, + struct acpi_walk_state *walk_state) { - u32 index; - acpi_generic_state *state; + acpi_native_uint index; + union acpi_generic_state *state; - PROC_NAME ("Ds_result_pop"); + ACPI_FUNCTION_NAME ("ds_result_pop"); state = walk_state->results; @@ -199,7 +214,7 @@ state->results.num_results--; - for (index = OBJ_NUM_OPERANDS; index; index--) { + for (index = ACPI_OBJ_NUM_OPERANDS; index; index--) { /* Check for a valid result object */ if (state->results.obj_desc [index -1]) { @@ -207,8 +222,8 @@ state->results.obj_desc [index -1] = NULL; ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p [%s] Index=%X State=%p Num=%X\n", - *object, (*object) ? acpi_ut_get_type_name ((*object)->common.type) : "NULL", - index -1, walk_state, state->results.num_results)); + *object, (*object) ? acpi_ut_get_object_type_name (*object) : "NULL", + (u32) index -1, walk_state, state->results.num_results)); return (AE_OK); } @@ -220,10 +235,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_result_pop_from_bottom + * FUNCTION: acpi_ds_result_pop_from_bottom * * PARAMETERS: Object - Where to return the popped object - * Walk_state - Current Walk state + * walk_state - Current Walk state * * RETURN: Status * @@ -234,14 +249,14 @@ acpi_status acpi_ds_result_pop_from_bottom ( - acpi_operand_object **object, - acpi_walk_state *walk_state) + union acpi_operand_object **object, + struct acpi_walk_state *walk_state) { - u32 index; - acpi_generic_state *state; + acpi_native_uint index; + union acpi_generic_state *state; - PROC_NAME ("Ds_result_pop_from_bottom"); + ACPI_FUNCTION_NAME ("ds_result_pop_from_bottom"); state = walk_state->results; @@ -251,7 +266,6 @@ return (AE_NOT_EXIST); } - if (!state->results.num_results) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No result objects! State=%p\n", walk_state)); return (AE_AML_NO_RETURN_VALUE); @@ -273,12 +287,12 @@ if (!*object) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Null operand! State=%p #Ops=%X, Index=%X\n", - walk_state, state->results.num_results, index)); + walk_state, state->results.num_results, (u32) index)); return (AE_AML_NO_RETURN_VALUE); } ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p [%s], Results=%p State=%p\n", - *object, (*object) ? acpi_ut_get_type_name ((*object)->common.type) : "NULL", + *object, (*object) ? acpi_ut_get_object_type_name (*object) : "NULL", state, walk_state)); @@ -288,10 +302,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_result_push + * FUNCTION: acpi_ds_result_push * * PARAMETERS: Object - Where to return the popped object - * Walk_state - Current Walk state + * walk_state - Current Walk state * * RETURN: Status * @@ -301,13 +315,13 @@ acpi_status acpi_ds_result_push ( - acpi_operand_object *object, - acpi_walk_state *walk_state) + union acpi_operand_object *object, + struct acpi_walk_state *walk_state) { - acpi_generic_state *state; + union acpi_generic_state *state; - PROC_NAME ("Ds_result_push"); + ACPI_FUNCTION_NAME ("ds_result_push"); state = walk_state->results; @@ -316,7 +330,7 @@ return (AE_AML_INTERNAL); } - if (state->results.num_results == OBJ_NUM_OPERANDS) { + if (state->results.num_results == ACPI_OBJ_NUM_OPERANDS) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Result stack overflow: Obj=%p State=%p Num=%X\n", object, walk_state, state->results.num_results)); @@ -329,12 +343,11 @@ return (AE_BAD_PARAMETER); } - state->results.obj_desc [state->results.num_results] = object; state->results.num_results++; ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p [%s] State=%p Num=%X Cur=%X\n", - object, object ? acpi_ut_get_type_name (((acpi_operand_object *) object)->common.type) : "NULL", + object, object ? acpi_ut_get_object_type_name ((union acpi_operand_object *) object) : "NULL", walk_state, state->results.num_results, walk_state->current_result)); return (AE_OK); @@ -343,24 +356,24 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_result_stack_push + * FUNCTION: acpi_ds_result_stack_push * * PARAMETERS: Object - Object to push - * Walk_state - Current Walk state + * walk_state - Current Walk state * * RETURN: Status * - * DESCRIPTION: + * DESCRIPTION: Push an object onto the walk_state result stack. * ******************************************************************************/ acpi_status acpi_ds_result_stack_push ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_generic_state *state; + union acpi_generic_state *state; - PROC_NAME ("Ds_result_stack_push"); + ACPI_FUNCTION_NAME ("ds_result_stack_push"); state = acpi_ut_create_generic_state (); @@ -380,23 +393,23 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_result_stack_pop + * FUNCTION: acpi_ds_result_stack_pop * - * PARAMETERS: Walk_state - Current Walk state + * PARAMETERS: walk_state - Current Walk state * * RETURN: Status * - * DESCRIPTION: + * DESCRIPTION: Pop an object off of the walk_state result stack. * ******************************************************************************/ acpi_status acpi_ds_result_stack_pop ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_generic_state *state; + union acpi_generic_state *state; - PROC_NAME ("Ds_result_stack_pop"); + ACPI_FUNCTION_NAME ("ds_result_stack_pop"); /* Check for stack underflow */ @@ -411,7 +424,7 @@ state = acpi_ut_pop_generic_state (&walk_state->results); ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, - "Result=%p Remaining_results=%X State=%p\n", + "Result=%p remaining_results=%X State=%p\n", state, state->results.num_results, walk_state)); acpi_ut_delete_generic_state (state); @@ -422,9 +435,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_obj_stack_delete_all + * FUNCTION: acpi_ds_obj_stack_delete_all * - * PARAMETERS: Walk_state - Current Walk state + * PARAMETERS: walk_state - Current Walk state * * RETURN: Status * @@ -435,17 +448,17 @@ acpi_status acpi_ds_obj_stack_delete_all ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - u32 i; + u32 i; - FUNCTION_TRACE_PTR ("Ds_obj_stack_delete_all", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ds_obj_stack_delete_all", walk_state); /* The stack size is configurable, but fixed */ - for (i = 0; i < OBJ_NUM_OPERANDS; i++) { + for (i = 0; i < ACPI_OBJ_NUM_OPERANDS; i++) { if (walk_state->operands[i]) { acpi_ut_remove_reference (walk_state->operands[i]); walk_state->operands[i] = NULL; @@ -458,10 +471,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_obj_stack_push + * FUNCTION: acpi_ds_obj_stack_push * * PARAMETERS: Object - Object to push - * Walk_state - Current Walk state + * walk_state - Current Walk state * * RETURN: Status * @@ -471,15 +484,15 @@ acpi_status acpi_ds_obj_stack_push ( - void *object, - acpi_walk_state *walk_state) + void *object, + struct acpi_walk_state *walk_state) { - PROC_NAME ("Ds_obj_stack_push"); + ACPI_FUNCTION_NAME ("ds_obj_stack_push"); /* Check for stack overflow */ - if (walk_state->num_operands >= OBJ_NUM_OPERANDS) { + if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "overflow! Obj=%p State=%p #Ops=%X\n", object, walk_state, walk_state->num_operands)); @@ -492,7 +505,7 @@ walk_state->num_operands++; ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n", - object, acpi_ut_get_type_name (((acpi_operand_object *) object)->common.type), + object, acpi_ut_get_object_type_name ((union acpi_operand_object *) object), walk_state, walk_state->num_operands)); return (AE_OK); @@ -502,10 +515,10 @@ #if 0 /******************************************************************************* * - * FUNCTION: Acpi_ds_obj_stack_pop_object + * FUNCTION: acpi_ds_obj_stack_pop_object * - * PARAMETERS: Pop_count - Number of objects/entries to pop - * Walk_state - Current Walk state + * PARAMETERS: pop_count - Number of objects/entries to pop + * walk_state - Current Walk state * * RETURN: Status * @@ -516,10 +529,10 @@ acpi_status acpi_ds_obj_stack_pop_object ( - acpi_operand_object **object, - acpi_walk_state *walk_state) + union acpi_operand_object **object, + struct acpi_walk_state *walk_state) { - PROC_NAME ("Ds_obj_stack_pop_object"); + ACPI_FUNCTION_NAME ("ds_obj_stack_pop_object"); /* Check for stack underflow */ @@ -552,7 +565,7 @@ walk_state->operands [walk_state->num_operands] = NULL; ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n", - *object, acpi_ut_get_type_name ((*object)->common.type), + *object, acpi_ut_get_object_type_name (*object), walk_state, walk_state->num_operands)); return (AE_OK); @@ -561,10 +574,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_obj_stack_pop + * FUNCTION: acpi_ds_obj_stack_pop * - * PARAMETERS: Pop_count - Number of objects/entries to pop - * Walk_state - Current Walk state + * PARAMETERS: pop_count - Number of objects/entries to pop + * walk_state - Current Walk state * * RETURN: Status * @@ -575,12 +588,12 @@ acpi_status acpi_ds_obj_stack_pop ( - u32 pop_count, - acpi_walk_state *walk_state) + u32 pop_count, + struct acpi_walk_state *walk_state) { - u32 i; + u32 i; - PROC_NAME ("Ds_obj_stack_pop"); + ACPI_FUNCTION_NAME ("ds_obj_stack_pop"); for (i = 0; i < pop_count; i++) { @@ -608,10 +621,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_obj_stack_pop_and_delete + * FUNCTION: acpi_ds_obj_stack_pop_and_delete * - * PARAMETERS: Pop_count - Number of objects/entries to pop - * Walk_state - Current Walk state + * PARAMETERS: pop_count - Number of objects/entries to pop + * walk_state - Current Walk state * * RETURN: Status * @@ -622,13 +635,13 @@ acpi_status acpi_ds_obj_stack_pop_and_delete ( - u32 pop_count, - acpi_walk_state *walk_state) + u32 pop_count, + struct acpi_walk_state *walk_state) { - u32 i; - acpi_operand_object *obj_desc; + u32 i; + union acpi_operand_object *obj_desc; - PROC_NAME ("Ds_obj_stack_pop_and_delete"); + ACPI_FUNCTION_NAME ("ds_obj_stack_pop_and_delete"); for (i = 0; i < pop_count; i++) { @@ -660,11 +673,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_obj_stack_get_value + * FUNCTION: acpi_ds_obj_stack_get_value * * PARAMETERS: Index - Stack index whose value is desired. Based * on the top of the stack (index=0 == top) - * Walk_state - Current Walk state + * walk_state - Current Walk state * * RETURN: Status * @@ -675,11 +688,11 @@ void * acpi_ds_obj_stack_get_value ( - u32 index, - acpi_walk_state *walk_state) + u32 index, + struct acpi_walk_state *walk_state) { - FUNCTION_TRACE_PTR ("Ds_obj_stack_get_value", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ds_obj_stack_get_value", walk_state); /* Can't do it if the stack is empty */ @@ -694,67 +707,66 @@ return_PTR (NULL); } - - return_PTR (walk_state->operands[(NATIVE_UINT)(walk_state->num_operands - 1) - + return_PTR (walk_state->operands[(acpi_native_uint)(walk_state->num_operands - 1) - index]); } /******************************************************************************* * - * FUNCTION: Acpi_ds_get_current_walk_state + * FUNCTION: acpi_ds_get_current_walk_state * - * PARAMETERS: Walk_list - Get current active state for this walk list + * PARAMETERS: Thread - Get current active state for this Thread * * RETURN: Pointer to the current walk state * * DESCRIPTION: Get the walk state that is at the head of the list (the "current" - * walk state. + * walk state.) * ******************************************************************************/ -acpi_walk_state * +struct acpi_walk_state * acpi_ds_get_current_walk_state ( - acpi_walk_list *walk_list) + struct acpi_thread_state *thread) { - PROC_NAME ("Ds_get_current_walk_state"); - + ACPI_FUNCTION_NAME ("ds_get_current_walk_state"); - ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Ds_get_current_walk_state, =%p\n", - walk_list->walk_state)); - if (!walk_list) { + if (!thread) { return (NULL); } - return (walk_list->walk_state); + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Current walk_state %p\n", + thread->walk_state_list)); + + return (thread->walk_state_list); } /******************************************************************************* * - * FUNCTION: Acpi_ds_push_walk_state + * FUNCTION: acpi_ds_push_walk_state * - * PARAMETERS: Walk_state - State to push - * Walk_list - The list that owns the walk stack + * PARAMETERS: walk_state - State to push + * walk_list - The list that owns the walk stack * * RETURN: None * - * DESCRIPTION: Place the Walk_state at the head of the state list. + * DESCRIPTION: Place the walk_state at the head of the state list. * ******************************************************************************/ void acpi_ds_push_walk_state ( - acpi_walk_state *walk_state, - acpi_walk_list *walk_list) + struct acpi_walk_state *walk_state, + struct acpi_thread_state *thread) { - FUNCTION_TRACE ("Ds_push_walk_state"); + ACPI_FUNCTION_TRACE ("ds_push_walk_state"); - walk_state->next = walk_list->walk_state; - walk_list->walk_state = walk_state; + walk_state->next = thread->walk_state_list; + thread->walk_state_list = walk_state; return_VOID; } @@ -762,11 +774,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_pop_walk_state + * FUNCTION: acpi_ds_pop_walk_state * - * PARAMETERS: Walk_list - The list that owns the walk stack + * PARAMETERS: walk_list - The list that owns the walk stack * - * RETURN: A Walk_state object popped from the stack + * RETURN: A walk_state object popped from the stack * * DESCRIPTION: Remove and return the walkstate object that is at the head of * the walk stack for the given walk list. NULL indicates that @@ -774,27 +786,27 @@ * ******************************************************************************/ -acpi_walk_state * +struct acpi_walk_state * acpi_ds_pop_walk_state ( - acpi_walk_list *walk_list) + struct acpi_thread_state *thread) { - acpi_walk_state *walk_state; + struct acpi_walk_state *walk_state; - FUNCTION_TRACE ("Ds_pop_walk_state"); + ACPI_FUNCTION_TRACE ("ds_pop_walk_state"); - walk_state = walk_list->walk_state; + walk_state = thread->walk_state_list; if (walk_state) { /* Next walk state becomes the current walk state */ - walk_list->walk_state = walk_state->next; + thread->walk_state_list = walk_state->next; /* * Don't clear the NEXT field, this serves as an indicator * that there is a parent WALK STATE - * Walk_state->Next = NULL; + * NO: walk_state->Next = NULL; */ } @@ -804,30 +816,30 @@ /******************************************************************************* * - * FUNCTION: Acpi_ds_create_walk_state + * FUNCTION: acpi_ds_create_walk_state * * PARAMETERS: Origin - Starting point for this walk - * Walk_list - Owning walk list + * Thread - Current thread state * * RETURN: Pointer to the new walk state. * - * DESCRIPTION: Allocate and initialize a new walk state. The current walk state - * is set to this new state. + * DESCRIPTION: Allocate and initialize a new walk state. The current walk + * state is set to this new state. * ******************************************************************************/ -acpi_walk_state * +struct acpi_walk_state * acpi_ds_create_walk_state ( - acpi_owner_id owner_id, - acpi_parse_object *origin, - acpi_operand_object *mth_desc, - acpi_walk_list *walk_list) + acpi_owner_id owner_id, + union acpi_parse_object *origin, + union acpi_operand_object *mth_desc, + struct acpi_thread_state *thread) { - acpi_walk_state *walk_state; - acpi_status status; + struct acpi_walk_state *walk_state; + acpi_status status; - FUNCTION_TRACE ("Ds_create_walk_state"); + ACPI_FUNCTION_TRACE ("ds_create_walk_state"); walk_state = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_WALK); @@ -839,11 +851,13 @@ walk_state->owner_id = owner_id; walk_state->origin = origin; walk_state->method_desc = mth_desc; - walk_state->walk_list = walk_list; + walk_state->thread = thread; + + walk_state->parser_state.start_op = origin; /* Init the method args/local */ -#ifndef _ACPI_ASL_COMPILER +#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY)) acpi_ds_method_data_init (walk_state); #endif @@ -856,20 +870,19 @@ /* Put the new state at the head of the walk list */ - if (walk_list) { - acpi_ds_push_walk_state (walk_state, walk_list); + if (thread) { + acpi_ds_push_walk_state (walk_state, thread); } return_PTR (walk_state); } -#ifndef _ACPI_ASL_COMPILER /******************************************************************************* * - * FUNCTION: Acpi_ds_init_aml_walk + * FUNCTION: acpi_ds_init_aml_walk * - * PARAMETERS: Walk_state - New state to be initialized + * PARAMETERS: walk_state - New state to be initialized * * RETURN: None * @@ -879,20 +892,21 @@ acpi_status acpi_ds_init_aml_walk ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_namespace_node *method_node, - u8 *aml_start, - u32 aml_length, - acpi_operand_object **params, - acpi_operand_object **return_obj_desc, - u32 pass_number) + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + struct acpi_namespace_node *method_node, + u8 *aml_start, + u32 aml_length, + union acpi_operand_object **params, + union acpi_operand_object **return_obj_desc, + u32 pass_number) { - acpi_status status; - acpi_parse_state *parser_state = &walk_state->parser_state; + acpi_status status; + struct acpi_parse_state *parser_state = &walk_state->parser_state; + union acpi_parse_object *extra_op; - FUNCTION_TRACE ("Ds_init_aml_walk"); + ACPI_FUNCTION_TRACE ("ds_init_aml_walk"); walk_state->parser_state.aml = @@ -900,8 +914,7 @@ walk_state->parser_state.aml_end = walk_state->parser_state.pkg_end = aml_start + aml_length; - /* The Next_op of the Next_walk will be the beginning of the method */ - /* TBD: [Restructure] -- obsolete? */ + /* The next_op of the next_walk will be the beginning of the method */ walk_state->next_op = NULL; walk_state->params = params; @@ -914,11 +927,10 @@ if (method_node) { walk_state->parser_state.start_node = method_node; - walk_state->walk_type = WALK_METHOD; + walk_state->walk_type = ACPI_WALK_METHOD; walk_state->method_node = method_node; walk_state->method_desc = acpi_ns_get_attached_object (method_node); - /* Push start scope on scope stack and make it current */ status = acpi_ds_scope_stack_push (method_node, ACPI_TYPE_METHOD, walk_state); @@ -928,13 +940,29 @@ /* Init the method arguments */ - acpi_ds_method_data_init_args (params, MTH_NUM_ARGS, walk_state); + status = acpi_ds_method_data_init_args (params, ACPI_METHOD_NUM_ARGS, walk_state); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } } - else { - /* Setup the current scope */ + /* + * Setup the current scope. + * Find a Named Op that has a namespace node associated with it. + * search upwards from this Op. Current scope is the first + * Op with a namespace node. + */ + extra_op = parser_state->start_op; + while (extra_op && !extra_op->common.node) { + extra_op = extra_op->common.parent; + } + if (!extra_op) { + parser_state->start_node = NULL; + } + else { + parser_state->start_node = extra_op->common.node; + } - parser_state->start_node = parser_state->start_op->node; if (parser_state->start_node) { /* Push start scope on scope stack and make it current */ @@ -946,18 +974,16 @@ } } - acpi_ds_init_callbacks (walk_state, pass_number); - - return_ACPI_STATUS (AE_OK); + status = acpi_ds_init_callbacks (walk_state, pass_number); + return_ACPI_STATUS (status); } -#endif /******************************************************************************* * - * FUNCTION: Acpi_ds_delete_walk_state + * FUNCTION: acpi_ds_delete_walk_state * - * PARAMETERS: Walk_state - State to delete + * PARAMETERS: walk_state - State to delete * * RETURN: Status * @@ -967,12 +993,12 @@ void acpi_ds_delete_walk_state ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_generic_state *state; + union acpi_generic_state *state; - FUNCTION_TRACE_PTR ("Ds_delete_walk_state", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ds_delete_walk_state", walk_state); if (!walk_state) { @@ -984,7 +1010,6 @@ return; } - if (walk_state->parser_state.scope) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%p walk still has a scope list\n", walk_state)); } @@ -1023,7 +1048,7 @@ /****************************************************************************** * - * FUNCTION: Acpi_ds_delete_walk_state_cache + * FUNCTION: acpi_ds_delete_walk_state_cache * * PARAMETERS: None * @@ -1038,7 +1063,7 @@ acpi_ds_delete_walk_state_cache ( void) { - FUNCTION_TRACE ("Ds_delete_walk_state_cache"); + ACPI_FUNCTION_TRACE ("ds_delete_walk_state_cache"); acpi_ut_delete_generic_cache (ACPI_MEM_LIST_WALK); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/dispatcher/Makefile linux.21rc5-ac2/drivers/acpi/dispatcher/Makefile --- linux.21rc5/drivers/acpi/dispatcher/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/dispatcher/Makefile 2003-04-28 17:58:27.000000000 +0100 @@ -1,11 +1,10 @@ # # Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory # O_TARGET := $(notdir $(CURDIR)).o -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) +obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c)) EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/driver.c linux.21rc5-ac2/drivers/acpi/driver.c --- linux.21rc5/drivers/acpi/driver.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/driver.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,217 +0,0 @@ -/* - * driver.c - ACPI driver - * - * Copyright (C) 2000 Andrew Henroid - * Copyright (C) 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -/* - * Changes - * David Woodhouse 2000-12-6 - * - Fix interruptible_sleep_on() races - * Andrew Grover 2001-2-28 - * - Major revamping - * Peter Breuer 2001-5-20 - * - parse boot time params. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "acpi.h" - - -#define _COMPONENT OS_DEPENDENT - MODULE_NAME ("driver") - -FADT_DESCRIPTOR acpi_fadt; - -static int acpi_disabled = 0; - -enum acpi_blacklist_predicates -{ - all_versions, - less_than_or_equal, - equal, - greater_than_or_equal, -}; - -struct acpi_blacklist_item -{ - char oem_id[7]; - char oem_table_id[9]; - u32 oem_revision; - enum acpi_blacklist_predicates oem_revision_predicate; -}; - -/* - * Currently, this blacklists based on items in the FADT. We may want to - * expand this to using other ACPI tables in the future, too. - */ -static struct acpi_blacklist_item acpi_blacklist[] __initdata = -{ - {"TOSHIB", "750 ", 0x970814, less_than_or_equal}, /* Portege 7020, BIOS 8.10 */ - {""} -}; - -int -acpi_blacklisted(FADT_DESCRIPTOR *fadt) -{ - int i = 0; - - while (acpi_blacklist[i].oem_id[0] != '\0') - { - if (strncmp(acpi_blacklist[i].oem_id, fadt->header.oem_id, 6)) { - i++; - continue; - } - - if (strncmp(acpi_blacklist[i].oem_table_id, fadt->header.oem_table_id, 8)) { - i++; - continue; - } - - if (acpi_blacklist[i].oem_revision_predicate == all_versions) - return TRUE; - - if (acpi_blacklist[i].oem_revision_predicate == less_than_or_equal - && fadt->header.oem_revision <= acpi_blacklist[i].oem_revision) - return TRUE; - - if (acpi_blacklist[i].oem_revision_predicate == greater_than_or_equal - && fadt->header.oem_revision >= acpi_blacklist[i].oem_revision) - return TRUE; - - if (acpi_blacklist[i].oem_revision_predicate == equal - && fadt->header.oem_revision == acpi_blacklist[i].oem_revision) - return TRUE; - - i++; - } - - return FALSE; -} - -/* - * Start the interpreter - */ -int -acpi_init(void) -{ - acpi_buffer buffer; - acpi_system_info sys_info; - - if (PM_IS_ACTIVE()) { - printk(KERN_NOTICE "ACPI: APM is already active, exiting\n"); - return -ENODEV; - } - - if (acpi_disabled) { - printk(KERN_NOTICE "ACPI: disabled by cmdline, exiting\n"); - return -ENODEV; - } - - if (!ACPI_SUCCESS(acpi_initialize_subsystem())) { - printk(KERN_ERR "ACPI: Driver initialization failed\n"); - return -ENODEV; - } - - /* from this point on, on error we must call acpi_terminate() */ - if (!ACPI_SUCCESS(acpi_load_tables())) { - printk(KERN_ERR "ACPI: System description table load failed\n"); - acpi_terminate(); - return -ENODEV; - } - - /* get a separate copy of the FADT for use by other drivers */ - memset(&acpi_fadt, 0, sizeof(acpi_fadt)); - buffer.pointer = &acpi_fadt; - buffer.length = sizeof(acpi_fadt); - - if (!ACPI_SUCCESS(acpi_get_table(ACPI_TABLE_FADT, 1, &buffer))) { - printk(KERN_ERR "ACPI: Could not get FADT\n"); - acpi_terminate(); - return -ENODEV; - } - - if (acpi_blacklisted(&acpi_fadt)) { - printk(KERN_ERR "ACPI: On blacklist -- BIOS not fully ACPI compliant\n"); - acpi_terminate(); - return -ENODEV; - } - - buffer.length = sizeof(sys_info); - buffer.pointer = &sys_info; - - if (!ACPI_SUCCESS (acpi_get_system_info(&buffer))) { - printk(KERN_ERR "ACPI: Could not get system info\n"); - acpi_terminate(); - return -ENODEV; - } - - printk(KERN_INFO "ACPI: Core Subsystem version [%x]\n", sys_info.acpi_ca_version); - - if (!ACPI_SUCCESS(acpi_enable_subsystem(ACPI_FULL_INITIALIZATION))) { - printk(KERN_ERR "ACPI: Subsystem enable failed\n"); - acpi_terminate(); - return -ENODEV; - } - - printk(KERN_INFO "ACPI: Subsystem enabled\n"); - - pm_active = 1; - - return 0; -} - -/* - * Terminate the interpreter - */ -void -acpi_exit(void) -{ - acpi_terminate(); - - pm_active = 0; - - printk(KERN_ERR "ACPI: Subsystem disabled\n"); -} - -module_init(acpi_init); -module_exit(acpi_exit); - -#ifndef MODULE -static int __init acpi_setup(char *str) { - while (str && *str) { - if (strncmp(str, "off", 3) == 0) - acpi_disabled = 1; - str = strchr(str, ','); - if (str) - str += strspn(str, ", \t"); - } - return 1; -} - -__setup("acpi=", acpi_setup); -#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ec.c linux.21rc5-ac2/drivers/acpi/ec.c --- linux.21rc5/drivers/acpi/ec.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ec.c 2003-04-28 17:58:27.000000000 +0100 @@ -0,0 +1,859 @@ +/* + * acpi_ec.c - ACPI Embedded Controller Driver ($Revision: 36 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_EC_COMPONENT +ACPI_MODULE_NAME ("acpi_ec") + +#define PREFIX "ACPI: " + + +#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ +#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ +#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ + +#define ACPI_EC_EVENT_OBF 0x01 /* Output buffer full */ +#define ACPI_EC_EVENT_IBE 0x02 /* Input buffer empty */ + +#define ACPI_EC_UDELAY 100 /* Poll @ 100us increments */ +#define ACPI_EC_UDELAY_COUNT 1000 /* Wait 10ms max. during EC ops */ +#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ + +#define ACPI_EC_COMMAND_READ 0x80 +#define ACPI_EC_COMMAND_WRITE 0x81 +#define ACPI_EC_COMMAND_QUERY 0x84 + +static int acpi_ec_add (struct acpi_device *device); +static int acpi_ec_remove (struct acpi_device *device, int type); +static int acpi_ec_start (struct acpi_device *device); +static int acpi_ec_stop (struct acpi_device *device, int type); + +static struct acpi_driver acpi_ec_driver = { + .name = ACPI_EC_DRIVER_NAME, + .class = ACPI_EC_CLASS, + .ids = ACPI_EC_HID, + .ops = { + .add = acpi_ec_add, + .remove = acpi_ec_remove, + .start = acpi_ec_start, + .stop = acpi_ec_stop, + }, +}; + +struct acpi_ec { + acpi_handle handle; + unsigned long uid; + unsigned long gpe_bit; + struct acpi_generic_address status_addr; + struct acpi_generic_address command_addr; + struct acpi_generic_address data_addr; + unsigned long global_lock; + spinlock_t lock; +}; + +/* If we find an EC via the ECDT, we need to keep a ptr to its context */ +static struct acpi_ec *ec_ecdt; + +/* External interfaces use first EC only, so remember */ +static struct acpi_device *first_ec; + +/* -------------------------------------------------------------------------- + Transaction Management + -------------------------------------------------------------------------- */ + +static int +acpi_ec_wait ( + struct acpi_ec *ec, + u8 event) +{ + u32 acpi_ec_status = 0; + u32 i = ACPI_EC_UDELAY_COUNT; + + if (!ec) + return -EINVAL; + + /* Poll the EC status register waiting for the event to occur. */ + switch (event) { + case ACPI_EC_EVENT_OBF: + do { + acpi_hw_low_level_read(8, &acpi_ec_status, &ec->status_addr); + if (acpi_ec_status & ACPI_EC_FLAG_OBF) + return 0; + udelay(ACPI_EC_UDELAY); + } while (--i>0); + break; + case ACPI_EC_EVENT_IBE: + do { + acpi_hw_low_level_read(8, &acpi_ec_status, &ec->status_addr); + if (!(acpi_ec_status & ACPI_EC_FLAG_IBF)) + return 0; + udelay(ACPI_EC_UDELAY); + } while (--i>0); + break; + default: + return -EINVAL; + } + + return -ETIME; +} + + +static int +acpi_ec_read ( + struct acpi_ec *ec, + u8 address, + u32 *data) +{ + acpi_status status = AE_OK; + int result = 0; + unsigned long flags = 0; + u32 glk = 0; + + ACPI_FUNCTION_TRACE("acpi_ec_read"); + + if (!ec || !data) + return_VALUE(-EINVAL); + + *data = 0; + + if (ec->global_lock) { + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + } + + spin_lock_irqsave(&ec->lock, flags); + + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->command_addr); + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); + if (result) + goto end; + + acpi_hw_low_level_write(8, address, &ec->data_addr); + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); + if (result) + goto end; + + + acpi_hw_low_level_read(8, data, &ec->data_addr); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n", + *data, address)); + +end: + spin_unlock_irqrestore(&ec->lock, flags); + + if (ec->global_lock) + acpi_release_global_lock(glk); + + return_VALUE(result); +} + + +static int +acpi_ec_write ( + struct acpi_ec *ec, + u8 address, + u8 data) +{ + int result = 0; + acpi_status status = AE_OK; + unsigned long flags = 0; + u32 glk = 0; + + ACPI_FUNCTION_TRACE("acpi_ec_write"); + + if (!ec) + return_VALUE(-EINVAL); + + if (ec->global_lock) { + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + } + + spin_lock_irqsave(&ec->lock, flags); + + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->command_addr); + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); + if (result) + goto end; + + acpi_hw_low_level_write(8, address, &ec->data_addr); + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); + if (result) + goto end; + + acpi_hw_low_level_write(8, data, &ec->data_addr); + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); + if (result) + goto end; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Wrote [%02x] to address [%02x]\n", + data, address)); + +end: + spin_unlock_irqrestore(&ec->lock, flags); + + if (ec->global_lock) + acpi_release_global_lock(glk); + + return_VALUE(result); +} + +/* + * Externally callable EC access functions. For now, assume 1 EC only + */ +int +ec_read(u8 addr, u8 *val) +{ + struct acpi_ec *ec; + int err; + u32 temp_data; + + if (!first_ec) + return -ENODEV; + + ec = acpi_driver_data(first_ec); + + err = acpi_ec_read(ec, addr, &temp_data); + + if (!err) { + *val = temp_data; + return 0; + } + else + return err; +} + +int +ec_write(u8 addr, u8 val) +{ + struct acpi_ec *ec; + int err; + + if (!first_ec) + return -ENODEV; + + ec = acpi_driver_data(first_ec); + + err = acpi_ec_write(ec, addr, val); + + return err; +} + + +static int +acpi_ec_query ( + struct acpi_ec *ec, + u32 *data) +{ + int result = 0; + acpi_status status = AE_OK; + unsigned long flags = 0; + u32 glk = 0; + + ACPI_FUNCTION_TRACE("acpi_ec_query"); + + if (!ec || !data) + return_VALUE(-EINVAL); + + *data = 0; + + if (ec->global_lock) { + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + } + + /* + * Query the EC to find out which _Qxx method we need to evaluate. + * Note that successful completion of the query causes the ACPI_EC_SCI + * bit to be cleared (and thus clearing the interrupt source). + */ + spin_lock_irqsave(&ec->lock, flags); + + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->command_addr); + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); + if (result) + goto end; + + acpi_hw_low_level_read(8, data, &ec->data_addr); + if (!*data) + result = -ENODATA; + +end: + spin_unlock_irqrestore(&ec->lock, flags); + + if (ec->global_lock) + acpi_release_global_lock(glk); + + return_VALUE(result); +} + + +/* -------------------------------------------------------------------------- + Event Management + -------------------------------------------------------------------------- */ + +struct acpi_ec_query_data { + acpi_handle handle; + u8 data; +}; + +static void +acpi_ec_gpe_query ( + void *ec_cxt) +{ + struct acpi_ec *ec = (struct acpi_ec *) ec_cxt; + u32 value = 0; + unsigned long flags = 0; + static char object_name[5] = {'_','Q','0','0','\0'}; + const char hex[] = {'0','1','2','3','4','5','6','7', + '8','9','A','B','C','D','E','F'}; + + ACPI_FUNCTION_TRACE("acpi_ec_gpe_query"); + + if (!ec_cxt) + goto end; + + spin_lock_irqsave(&ec->lock, flags); + acpi_hw_low_level_read(8, &value, &ec->command_addr); + spin_unlock_irqrestore(&ec->lock, flags); + + /* TBD: Implement asynch events! + * NOTE: All we care about are EC-SCI's. Other EC events are + * handled via polling (yuck!). This is because some systems + * treat EC-SCIs as level (versus EDGE!) triggered, preventing + * a purely interrupt-driven approach (grumble, grumble). + */ + if (!(value & ACPI_EC_FLAG_SCI)) + goto end; + + if (acpi_ec_query(ec, &value)) + goto end; + + object_name[2] = hex[((value >> 4) & 0x0F)]; + object_name[3] = hex[(value & 0x0F)]; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name)); + + acpi_evaluate_object(ec->handle, object_name, NULL, NULL); + +end: + acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); +} + +static void +acpi_ec_gpe_handler ( + void *data) +{ + acpi_status status = AE_OK; + struct acpi_ec *ec = (struct acpi_ec *) data; + + if (!ec) + return; + + acpi_disable_gpe(NULL, ec->gpe_bit, ACPI_ISR); + + status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, + acpi_ec_gpe_query, ec); +} + +/* -------------------------------------------------------------------------- + Address Space Management + -------------------------------------------------------------------------- */ + +static acpi_status +acpi_ec_space_setup ( + acpi_handle region_handle, + u32 function, + void *handler_context, + void **return_context) +{ + /* + * The EC object is in the handler context and is needed + * when calling the acpi_ec_space_handler. + */ + *return_context = handler_context; + + return AE_OK; +} + + +static acpi_status +acpi_ec_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context) +{ + int result = 0; + struct acpi_ec *ec = NULL; + u32 temp = 0; + + ACPI_FUNCTION_TRACE("acpi_ec_space_handler"); + + if ((address > 0xFF) || (bit_width != 8) || !value || !handler_context) + return_VALUE(AE_BAD_PARAMETER); + + ec = (struct acpi_ec *) handler_context; + + switch (function) { + case ACPI_READ: + result = acpi_ec_read(ec, (u8) address, &temp); + *value = (acpi_integer) temp; + break; + case ACPI_WRITE: + result = acpi_ec_write(ec, (u8) address, (u8) *value); + break; + default: + result = -EINVAL; + break; + } + + switch (result) { + case -EINVAL: + return_VALUE(AE_BAD_PARAMETER); + break; + case -ENODEV: + return_VALUE(AE_NOT_FOUND); + break; + case -ETIME: + return_VALUE(AE_TIME); + break; + default: + return_VALUE(AE_OK); + } + +} + + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +struct proc_dir_entry *acpi_ec_dir = NULL; + + +static int +acpi_ec_read_info ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_ec *ec = (struct acpi_ec *) data; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_ec_read_info"); + + if (!ec || (off != 0)) + goto end; + + p += sprintf(p, "gpe bit: 0x%02x\n", + (u32) ec->gpe_bit); + p += sprintf(p, "ports: 0x%02x, 0x%02x\n", + (u32) ec->status_addr.address, (u32) ec->data_addr.address); + p += sprintf(p, "use global lock: %s\n", + ec->global_lock?"yes":"no"); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_ec_add_fs ( + struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_ec_add_fs"); + + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_ec_dir); + if (!acpi_device_dir(device)) + return_VALUE(-ENODEV); + } + + entry = create_proc_read_entry(ACPI_EC_FILE_INFO, S_IRUGO, + acpi_device_dir(device), acpi_ec_read_info, + acpi_driver_data(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Unable to create '%s' fs entry\n", + ACPI_EC_FILE_INFO)); + + return_VALUE(0); +} + + +static int +acpi_ec_remove_fs ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_ec_remove_fs"); + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +static int +acpi_ec_add ( + struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_ec *ec = NULL; + unsigned long uid; + + ACPI_FUNCTION_TRACE("acpi_ec_add"); + + if (!device) + return_VALUE(-EINVAL); + + ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); + if (!ec) + return_VALUE(-ENOMEM); + memset(ec, 0, sizeof(struct acpi_ec)); + + ec->handle = device->handle; + ec->uid = -1; + ec->lock = SPIN_LOCK_UNLOCKED; + sprintf(acpi_device_name(device), "%s", ACPI_EC_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_EC_CLASS); + acpi_driver_data(device) = ec; + + /* Use the global lock for all EC transactions? */ + acpi_evaluate_integer(ec->handle, "_GLK", NULL, &ec->global_lock); + + /* If our UID matches the UID for the ECDT-enumerated EC, + we now have the *real* EC info, so kill the makeshift one.*/ + acpi_evaluate_integer(ec->handle, "_UID", NULL, &uid); + if (ec_ecdt && ec_ecdt->uid == uid) { + acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, + ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); + + acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit, &acpi_ec_gpe_handler); + + kfree(ec_ecdt); + } + + /* Get GPE bit assignment (EC events). */ + /* TODO: Add support for _GPE returning a package */ + status = acpi_evaluate_integer(ec->handle, "_GPE", NULL, &ec->gpe_bit); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error obtaining GPE bit assignment\n")); + result = -ENODEV; + goto end; + } + + result = acpi_ec_add_fs(device); + if (result) + goto end; + + printk(KERN_INFO PREFIX "%s [%s] (gpe %d)\n", + acpi_device_name(device), acpi_device_bid(device), + (u32) ec->gpe_bit); + + if (!first_ec) + first_ec = device; + +end: + if (result) + kfree(ec); + + return_VALUE(result); +} + + +static int +acpi_ec_remove ( + struct acpi_device *device, + int type) +{ + struct acpi_ec *ec = NULL; + + ACPI_FUNCTION_TRACE("acpi_ec_remove"); + + if (!device) + return_VALUE(-EINVAL); + + ec = acpi_driver_data(device); + + acpi_ec_remove_fs(device); + + kfree(ec); + + return_VALUE(0); +} + + +static acpi_status +acpi_ec_io_ports ( + struct acpi_resource *resource, + void *context) +{ + struct acpi_ec *ec = (struct acpi_ec *) context; + struct acpi_generic_address *addr; + + if (resource->id != ACPI_RSTYPE_IO) { + return AE_OK; + } + + /* + * The first address region returned is the data port, and + * the second address region returned is the status/command + * port. + */ + if (ec->data_addr.register_bit_width == 0) { + addr = &ec->data_addr; + } else if (ec->command_addr.register_bit_width == 0) { + addr = &ec->command_addr; + } else { + return AE_CTRL_TERMINATE; + } + + addr->address_space_id = ACPI_ADR_SPACE_SYSTEM_IO; + addr->register_bit_width = 8; + addr->register_bit_offset = 0; + addr->address = resource->data.io.min_base_address; + + return AE_OK; +} + + +static int +acpi_ec_start ( + struct acpi_device *device) +{ + acpi_status status = AE_OK; + struct acpi_ec *ec = NULL; + + ACPI_FUNCTION_TRACE("acpi_ec_start"); + + if (!device) + return_VALUE(-EINVAL); + + ec = acpi_driver_data(device); + + if (!ec) + return_VALUE(-EINVAL); + + /* + * Get I/O port addresses. Convert to GAS format. + */ + status = acpi_walk_resources(ec->handle, METHOD_NAME__CRS, + acpi_ec_io_ports, ec); + if (ACPI_FAILURE(status) || ec->command_addr.register_bit_width == 0) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error getting I/O port addresses")); + return_VALUE(-ENODEV); + } + + ec->status_addr = ec->command_addr; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "gpe=0x%02x, ports=0x%2x,0x%2x\n", + (u32) ec->gpe_bit, (u32) ec->command_addr.address, + (u32) ec->data_addr.address)); + + /* + * Install GPE handler + */ + status = acpi_install_gpe_handler(NULL, ec->gpe_bit, + ACPI_EVENT_EDGE_TRIGGERED, &acpi_ec_gpe_handler, ec); + if (ACPI_FAILURE(status)) { + return_VALUE(-ENODEV); + } + + status = acpi_install_address_space_handler (ec->handle, + ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, + &acpi_ec_space_setup, ec); + if (ACPI_FAILURE(status)) { + acpi_remove_gpe_handler(NULL, ec->gpe_bit, &acpi_ec_gpe_handler); + return_VALUE(-ENODEV); + } + + return_VALUE(AE_OK); +} + + +static int +acpi_ec_stop ( + struct acpi_device *device, + int type) +{ + acpi_status status = AE_OK; + struct acpi_ec *ec = NULL; + + ACPI_FUNCTION_TRACE("acpi_ec_stop"); + + if (!device) + return_VALUE(-EINVAL); + + ec = acpi_driver_data(device); + + status = acpi_remove_address_space_handler(ec->handle, + ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + status = acpi_remove_gpe_handler(NULL, ec->gpe_bit, &acpi_ec_gpe_handler); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + return_VALUE(0); +} + + +int __init +acpi_ec_ecdt_probe (void) +{ + acpi_status status; + struct acpi_table_ecdt *ecdt_ptr; + + status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING, + (struct acpi_table_header **) &ecdt_ptr); + if (ACPI_FAILURE(status)) + return 0; + + printk(KERN_INFO PREFIX "Found ECDT\n"); + + /* + * Generate a temporary ec context to use until the namespace is scanned + */ + ec_ecdt = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); + if (!ec_ecdt) + return -ENOMEM; + memset(ec_ecdt, 0, sizeof(struct acpi_ec)); + + ec_ecdt->command_addr = ecdt_ptr->ec_control; + ec_ecdt->status_addr = ecdt_ptr->ec_control; + ec_ecdt->data_addr = ecdt_ptr->ec_data; + ec_ecdt->gpe_bit = ecdt_ptr->gpe_bit; + ec_ecdt->lock = SPIN_LOCK_UNLOCKED; + /* use the GL just to be safe */ + ec_ecdt->global_lock = TRUE; + ec_ecdt->uid = ecdt_ptr->uid; + + status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle); + if (ACPI_FAILURE(status)) { + goto error; + } + + /* + * Install GPE handler + */ + status = acpi_install_gpe_handler(NULL, ec_ecdt->gpe_bit, + ACPI_EVENT_EDGE_TRIGGERED, &acpi_ec_gpe_handler, + ec_ecdt); + if (ACPI_FAILURE(status)) { + goto error; + } + + status = acpi_install_address_space_handler (ACPI_ROOT_OBJECT, + ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, + &acpi_ec_space_setup, ec_ecdt); + if (ACPI_FAILURE(status)) { + acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit, + &acpi_ec_gpe_handler); + goto error; + } + + return 0; + +error: + printk(KERN_ERR PREFIX "Could not use ECDT\n"); + kfree(ec_ecdt); + ec_ecdt = NULL; + + return -ENODEV; +} + + +int __init +acpi_ec_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_ec_init"); + + acpi_ec_dir = proc_mkdir(ACPI_EC_CLASS, acpi_root_dir); + if (!acpi_ec_dir) + return_VALUE(-ENODEV); + + result = acpi_bus_register_driver(&acpi_ec_driver); + if (result < 0) { + remove_proc_entry(ACPI_EC_CLASS, acpi_root_dir); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + +/* EC can't be unloaded atm, so don't compile these */ +#if 0 +void __exit +acpi_ec_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_ec_exit"); + + acpi_bus_unregister_driver(&acpi_ec_driver); + + remove_proc_entry(ACPI_EC_CLASS, acpi_root_dir); + + return_VOID; +} +#endif /* 0 */ + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evevent.c linux.21rc5-ac2/drivers/acpi/events/evevent.c --- linux.21rc5/drivers/acpi/events/evevent.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evevent.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,49 +1,62 @@ /****************************************************************************** * - * Module Name: evevent - Fixed and General Purpose Acpi_event - * handling and dispatch - * $Revision: 51 $ + * Module Name: evevent - Fixed Event handling and dispatch * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "acevents.h" -#include "acnamesp.h" +#include +#include #define _COMPONENT ACPI_EVENTS - MODULE_NAME ("evevent") + ACPI_MODULE_NAME ("evevent") /******************************************************************************* * - * FUNCTION: Acpi_ev_initialize + * FUNCTION: acpi_ev_initialize * * PARAMETERS: None * * RETURN: Status * - * DESCRIPTION: Ensures that the system control interrupt (SCI) is properly - * configured, disables SCI event sources, installs the SCI - * handler + * DESCRIPTION: Initialize global data structures for events. * ******************************************************************************/ @@ -51,10 +64,10 @@ acpi_ev_initialize ( void) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Ev_initialize"); + ACPI_FUNCTION_TRACE ("ev_initialize"); /* Make sure we have ACPI tables */ @@ -64,48 +77,60 @@ return_ACPI_STATUS (AE_NO_ACPI_TABLES); } - - /* Make sure the BIOS supports ACPI mode */ - - if (SYS_MODE_LEGACY == acpi_hw_get_mode_capabilities()) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "ACPI Mode is not supported!\n")); - return_ACPI_STATUS (AE_ERROR); - } - - - acpi_gbl_original_mode = acpi_hw_get_mode(); - /* - * Initialize the Fixed and General Purpose Acpi_events prior. This is - * done prior to enabling SCIs to prevent interrupts from occuring - * before handers are installed. + * Initialize the Fixed and General Purpose Events. This is + * done prior to enabling SCIs to prevent interrupts from + * occurring before handers are installed. */ status = acpi_ev_fixed_event_initialize (); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_FATAL, "Unable to initialize fixed events.\n")); + ACPI_REPORT_ERROR (( + "Unable to initialize fixed events, %s\n", + acpi_format_exception (status))); return_ACPI_STATUS (status); } status = acpi_ev_gpe_initialize (); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_FATAL, "Unable to initialize general purpose events.\n")); + ACPI_REPORT_ERROR (( + "Unable to initialize general purpose events, %s\n", + acpi_format_exception (status))); return_ACPI_STATUS (status); } - /* Install the SCI handler */ + return_ACPI_STATUS (status); +} - status = acpi_ev_install_sci_handler (); - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_FATAL, "Unable to install System Control Interrupt Handler\n")); - return_ACPI_STATUS (status); - } +/******************************************************************************* + * + * FUNCTION: acpi_ev_handler_initialize + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Install interrupt handlers for the SCI and Global Lock + * + ******************************************************************************/ + +acpi_status +acpi_ev_handler_initialize ( + void) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_handler_initialize"); - /* Install handlers for control method GPE handlers (_Lxx, _Exx) */ - status = acpi_ev_init_gpe_control_methods (); + /* Install the SCI handler */ + + status = acpi_ev_install_sci_handler (); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_FATAL, "Unable to initialize Gpe control methods\n")); + ACPI_REPORT_ERROR (( + "Unable to install System Control Interrupt Handler, %s\n", + acpi_format_exception (status))); return_ACPI_STATUS (status); } @@ -113,44 +138,55 @@ status = acpi_ev_init_global_lock_handler (); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_FATAL, "Unable to initialize Global Lock handler\n")); + ACPI_REPORT_ERROR (( + "Unable to initialize Global Lock handler, %s\n", + acpi_format_exception (status))); return_ACPI_STATUS (status); } - + acpi_gbl_events_initialized = TRUE; return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ev_fixed_event_initialize + * FUNCTION: acpi_ev_fixed_event_initialize * * PARAMETERS: None * * RETURN: Status * - * DESCRIPTION: Initialize the Fixed Acpi_event data structures + * DESCRIPTION: Install the fixed event handlers and enable the fixed events. * ******************************************************************************/ acpi_status -acpi_ev_fixed_event_initialize(void) +acpi_ev_fixed_event_initialize ( + void) { - int i = 0; + acpi_native_uint i; + acpi_status status; - /* Initialize the structure that keeps track of fixed event handlers */ + /* + * Initialize the structure that keeps track of fixed event handlers + * and enable the fixed events. + */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { acpi_gbl_fixed_event_handlers[i].handler = NULL; acpi_gbl_fixed_event_handlers[i].context = NULL; - } - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, TMR_EN, 0); - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, GBL_EN, 0); - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, PWRBTN_EN, 0); - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, SLPBTN_EN, 0); - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, RTC_EN, 0); + /* Enable the fixed event */ + + if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { + status = acpi_set_register (acpi_gbl_fixed_event_info[i].enable_register_id, + 0, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return (status); + } + } + } return (AE_OK); } @@ -158,7 +194,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_fixed_event_detect + * FUNCTION: acpi_ev_fixed_event_detect * * PARAMETERS: None * @@ -169,54 +205,41 @@ ******************************************************************************/ u32 -acpi_ev_fixed_event_detect (void) +acpi_ev_fixed_event_detect ( + void) { - u32 int_status = INTERRUPT_NOT_HANDLED; - u32 status_register; - u32 enable_register; + u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; + u32 fixed_status; + u32 fixed_enable; + acpi_native_uint i; - PROC_NAME ("Ev_fixed_event_detect"); + ACPI_FUNCTION_NAME ("ev_fixed_event_detect"); /* * Read the fixed feature status and enable registers, as all the cases - * depend on their values. + * depend on their values. Ignore errors here. */ - status_register = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, PM1_STS); - enable_register = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, PM1_EN); + (void) acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_STATUS, &fixed_status); + (void) acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_ENABLE, &fixed_enable); ACPI_DEBUG_PRINT ((ACPI_DB_INTERRUPTS, - "Fixed Acpi_event Block: Enable %08X Status %08X\n", - enable_register, status_register)); - + "Fixed Event Block: Enable %08X Status %08X\n", + fixed_enable, fixed_status)); - /* power management timer roll over */ - - if ((status_register & ACPI_STATUS_PMTIMER) && - (enable_register & ACPI_ENABLE_PMTIMER)) { - int_status |= acpi_ev_fixed_event_dispatch (ACPI_EVENT_PMTIMER); - } - - /* global event (BIOS wants the global lock) */ - - if ((status_register & ACPI_STATUS_GLOBAL) && - (enable_register & ACPI_ENABLE_GLOBAL)) { - int_status |= acpi_ev_fixed_event_dispatch (ACPI_EVENT_GLOBAL); - } - - /* power button event */ - - if ((status_register & ACPI_STATUS_POWER_BUTTON) && - (enable_register & ACPI_ENABLE_POWER_BUTTON)) { - int_status |= acpi_ev_fixed_event_dispatch (ACPI_EVENT_POWER_BUTTON); - } + /* + * Check for all possible Fixed Events and dispatch those that are active + */ + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { + /* Both the status and enable bits must be on for this event */ - /* sleep button event */ + if ((fixed_status & acpi_gbl_fixed_event_info[i].status_bit_mask) && + (fixed_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) { + /* Found an active (signalled) event */ - if ((status_register & ACPI_STATUS_SLEEP_BUTTON) && - (enable_register & ACPI_ENABLE_SLEEP_BUTTON)) { - int_status |= acpi_ev_fixed_event_dispatch (ACPI_EVENT_SLEEP_BUTTON); + int_status |= acpi_ev_fixed_event_dispatch ((u32) i); + } } return (int_status); @@ -225,7 +248,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_fixed_event_dispatch + * FUNCTION: acpi_ev_fixed_event_dispatch * * PARAMETERS: Event - Event type * @@ -238,590 +261,37 @@ u32 acpi_ev_fixed_event_dispatch ( - u32 event) + u32 event) { - u32 register_id; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* Clear the status bit */ - switch (event) { - case ACPI_EVENT_PMTIMER: - register_id = TMR_STS; - break; - - case ACPI_EVENT_GLOBAL: - register_id = GBL_STS; - break; - - case ACPI_EVENT_POWER_BUTTON: - register_id = PWRBTN_STS; - break; - - case ACPI_EVENT_SLEEP_BUTTON: - register_id = SLPBTN_STS; - break; - - case ACPI_EVENT_RTC: - register_id = RTC_STS; - break; - - default: - return 0; - break; - } - - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_DO_NOT_LOCK, register_id, 1); + (void) acpi_set_register (acpi_gbl_fixed_event_info[event].status_register_id, + 1, ACPI_MTX_DO_NOT_LOCK); /* * Make sure we've got a handler. If not, report an error. * The event is disabled to prevent further interrupts. */ if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { - register_id = (PM1_EN | REGISTER_BIT_ID(register_id)); + (void) acpi_set_register (acpi_gbl_fixed_event_info[event].enable_register_id, + 0, ACPI_MTX_DO_NOT_LOCK); - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_DO_NOT_LOCK, - register_id, 0); - - REPORT_ERROR ( - ("Ev_gpe_dispatch: No installed handler for fixed event [%08X]\n", + ACPI_REPORT_ERROR ( + ("No installed handler for fixed event [%08X]\n", event)); - return (INTERRUPT_NOT_HANDLED); + return (ACPI_INTERRUPT_NOT_HANDLED); } - /* Invoke the handler */ + /* Invoke the Fixed Event handler */ return ((acpi_gbl_fixed_event_handlers[event].handler)( acpi_gbl_fixed_event_handlers[event].context)); } -/******************************************************************************* - * - * FUNCTION: Acpi_ev_gpe_initialize - * - * PARAMETERS: None - * - * RETURN: Status - * - * DESCRIPTION: Initialize the GPE data structures - * - ******************************************************************************/ - -acpi_status -acpi_ev_gpe_initialize (void) -{ - u32 i; - u32 j; - u32 register_index; - u32 gpe_number; - u16 gpe0register_count; - u16 gpe1_register_count; - - - FUNCTION_TRACE ("Ev_gpe_initialize"); - - /* - * Set up various GPE counts - * - * You may ask,why are the GPE register block lengths divided by 2? - * From the ACPI 2.0 Spec, section, 4.7.1.6 General-Purpose Event - * Registers, we have, - * - * "Each register block contains two registers of equal length - * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the - * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN - * The length of the GPE1_STS and GPE1_EN registers is equal to - * half the GPE1_LEN. If a generic register block is not supported - * then its respective block pointer and block length values in the - * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need - * to be the same size." - */ - gpe0register_count = (u16) DIV_2 (acpi_gbl_FADT->gpe0blk_len); - gpe1_register_count = (u16) DIV_2 (acpi_gbl_FADT->gpe1_blk_len); - acpi_gbl_gpe_register_count = gpe0register_count + gpe1_register_count; - - if (!acpi_gbl_gpe_register_count) { - REPORT_WARNING (("Zero GPEs are defined in the FADT\n")); - return_ACPI_STATUS (AE_OK); - } - - /* - * Allocate the Gpe information block - */ - acpi_gbl_gpe_registers = ACPI_MEM_CALLOCATE (acpi_gbl_gpe_register_count * - sizeof (acpi_gpe_registers)); - if (!acpi_gbl_gpe_registers) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Could not allocate the Gpe_registers block\n")); - return_ACPI_STATUS (AE_NO_MEMORY); - } - - /* - * Allocate the Gpe dispatch handler block - * There are eight distinct GP events per register. - * Initialization to zeros is sufficient - */ - acpi_gbl_gpe_info = ACPI_MEM_CALLOCATE (MUL_8 (acpi_gbl_gpe_register_count) * - sizeof (acpi_gpe_level_info)); - if (!acpi_gbl_gpe_info) { - ACPI_MEM_FREE (acpi_gbl_gpe_registers); - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not allocate the Gpe_info block\n")); - return_ACPI_STATUS (AE_NO_MEMORY); - } - - /* Set the Gpe validation table to GPE_INVALID */ - - MEMSET (acpi_gbl_gpe_valid, (int) ACPI_GPE_INVALID, ACPI_NUM_GPE); - - /* - * Initialize the Gpe information and validation blocks. A goal of these - * blocks is to hide the fact that there are two separate GPE register sets - * In a given block, the status registers occupy the first half, and - * the enable registers occupy the second half. - */ - - /* GPE Block 0 */ - - register_index = 0; - - for (i = 0; i < gpe0register_count; i++) { - acpi_gbl_gpe_registers[register_index].status_addr = - (u16) (ACPI_GET_ADDRESS (acpi_gbl_FADT->Xgpe0blk.address) + i); - - acpi_gbl_gpe_registers[register_index].enable_addr = - (u16) (ACPI_GET_ADDRESS (acpi_gbl_FADT->Xgpe0blk.address) + i + gpe0register_count); - - acpi_gbl_gpe_registers[register_index].gpe_base = (u8) MUL_8 (i); - - for (j = 0; j < 8; j++) { - gpe_number = acpi_gbl_gpe_registers[register_index].gpe_base + j; - acpi_gbl_gpe_valid[gpe_number] = (u8) register_index; - } - - /* - * Clear the status/enable registers. Note that status registers - * are cleared by writing a '1', while enable registers are cleared - * by writing a '0'. - */ - acpi_os_write_port (acpi_gbl_gpe_registers[register_index].enable_addr, 0x00, 8); - acpi_os_write_port (acpi_gbl_gpe_registers[register_index].status_addr, 0xFF, 8); - - register_index++; - } - - /* GPE Block 1 */ - - for (i = 0; i < gpe1_register_count; i++) { - acpi_gbl_gpe_registers[register_index].status_addr = - (u16) (ACPI_GET_ADDRESS (acpi_gbl_FADT->Xgpe1_blk.address) + i); - - acpi_gbl_gpe_registers[register_index].enable_addr = - (u16) (ACPI_GET_ADDRESS (acpi_gbl_FADT->Xgpe1_blk.address) + i + gpe1_register_count); - - acpi_gbl_gpe_registers[register_index].gpe_base = - (u8) (acpi_gbl_FADT->gpe1_base + MUL_8 (i)); - - for (j = 0; j < 8; j++) { - gpe_number = acpi_gbl_gpe_registers[register_index].gpe_base + j; - acpi_gbl_gpe_valid[gpe_number] = (u8) register_index; - } - - /* - * Clear the status/enable registers. Note that status registers - * are cleared by writing a '1', while enable registers are cleared - * by writing a '0'. - */ - acpi_os_write_port (acpi_gbl_gpe_registers[register_index].enable_addr, 0x00, 8); - acpi_os_write_port (acpi_gbl_gpe_registers[register_index].status_addr, 0xFF, 8); - - register_index++; - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "GPE registers: %X@%8.8X%8.8X (Blk0) %X@%8.8X%8.8X (Blk1)\n", - gpe0register_count, HIDWORD(acpi_gbl_FADT->Xgpe0blk.address), LODWORD(acpi_gbl_FADT->Xgpe0blk.address), - gpe1_register_count, HIDWORD(acpi_gbl_FADT->Xgpe1_blk.address), LODWORD(acpi_gbl_FADT->Xgpe1_blk.address))); - - return_ACPI_STATUS (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ev_save_method_info - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Called from Acpi_walk_namespace. Expects each object to be a - * control method under the _GPE portion of the namespace. - * Extract the name and GPE type from the object, saving this - * information for quick lookup during GPE dispatch - * - * The name of each GPE control method is of the form: - * "_Lnn" or "_Enn" - * Where: - * L - means that the GPE is level triggered - * E - means that the GPE is edge triggered - * nn - is the GPE number - * - ******************************************************************************/ - -static acpi_status -acpi_ev_save_method_info ( - acpi_handle obj_handle, - u32 level, - void *obj_desc, - void **return_value) -{ - u32 gpe_number; - NATIVE_CHAR name[ACPI_NAME_SIZE + 1]; - u8 type; - - - PROC_NAME ("Ev_save_method_info"); - - - /* Extract the name from the object and convert to a string */ - - MOVE_UNALIGNED32_TO_32 (name, &((acpi_namespace_node *) obj_handle)->name); - name[ACPI_NAME_SIZE] = 0; - - /* - * Edge/Level determination is based on the 2nd s8 of the method name - */ - if (name[1] == 'L') { - type = ACPI_EVENT_LEVEL_TRIGGERED; - } - else if (name[1] == 'E') { - type = ACPI_EVENT_EDGE_TRIGGERED; - } - else { - /* Unknown method type, just ignore it! */ - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Unknown GPE method type: %s (name not of form _Lnn or _Enn)\n", - name)); - return (AE_OK); - } - - /* Convert the last two characters of the name to the Gpe Number */ - - gpe_number = STRTOUL (&name[2], NULL, 16); - if (gpe_number == ACPI_UINT32_MAX) { - /* Conversion failed; invalid method, just ignore it */ - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Could not extract GPE number from name: %s (name not of form _Lnn or _Enn)\n", - name)); - return (AE_OK); - } - - /* Ensure that we have a valid GPE number */ - - if (acpi_gbl_gpe_valid[gpe_number] == ACPI_GPE_INVALID) { - /* Not valid, all we can do here is ignore it */ - - return (AE_OK); - } - - /* - * Now we can add this information to the Gpe_info block - * for use during dispatch of this GPE. - */ - acpi_gbl_gpe_info [gpe_number].type = type; - acpi_gbl_gpe_info [gpe_number].method_handle = obj_handle; - - - /* - * Enable the GPE (SCIs should be disabled at this point) - */ - acpi_hw_enable_gpe (gpe_number); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Registered GPE method %s as GPE number %X\n", - name, gpe_number)); - return (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ev_init_gpe_control_methods - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Obtain the control methods associated with the GPEs. - * - * NOTE: Must be called AFTER namespace initialization! - * - ******************************************************************************/ - -acpi_status -acpi_ev_init_gpe_control_methods (void) -{ - acpi_status status; - - - FUNCTION_TRACE ("Ev_init_gpe_control_methods"); - - - /* Get a permanent handle to the _GPE object */ - - status = acpi_get_handle (NULL, "\\_GPE", &acpi_gbl_gpe_obj_handle); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - /* Traverse the namespace under \_GPE to find all methods there */ - - status = acpi_walk_namespace (ACPI_TYPE_METHOD, acpi_gbl_gpe_obj_handle, - ACPI_UINT32_MAX, acpi_ev_save_method_info, - NULL, NULL); - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ev_gpe_detect - * - * PARAMETERS: None - * - * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED - * - * DESCRIPTION: Detect if any GP events have occurred - * - ******************************************************************************/ - -u32 -acpi_ev_gpe_detect (void) -{ - u32 int_status = INTERRUPT_NOT_HANDLED; - u32 i; - u32 j; - u8 enabled_status_byte; - u8 bit_mask; - - - PROC_NAME ("Ev_gpe_detect"); - - - /* - * Read all of the 8-bit GPE status and enable registers - * in both of the register blocks, saving all of it. - * Find all currently active GP events. - */ - for (i = 0; i < acpi_gbl_gpe_register_count; i++) { - acpi_os_read_port (acpi_gbl_gpe_registers[i].status_addr, - &acpi_gbl_gpe_registers[i].status, 8); - - acpi_os_read_port (acpi_gbl_gpe_registers[i].enable_addr, - &acpi_gbl_gpe_registers[i].enable, 8); - - ACPI_DEBUG_PRINT ((ACPI_DB_INTERRUPTS, - "GPE block at %X - Enable %08X Status %08X\n", - acpi_gbl_gpe_registers[i].enable_addr, - acpi_gbl_gpe_registers[i].status, - acpi_gbl_gpe_registers[i].enable)); - - /* First check if there is anything active at all in this register */ - - enabled_status_byte = (u8) (acpi_gbl_gpe_registers[i].status & - acpi_gbl_gpe_registers[i].enable); - - if (!enabled_status_byte) { - /* No active GPEs in this register, move on */ - - continue; - } - - /* Now look at the individual GPEs in this byte register */ - - for (j = 0, bit_mask = 1; j < 8; j++, bit_mask <<= 1) { - /* Examine one GPE bit */ - - if (enabled_status_byte & bit_mask) { - /* - * Found an active GPE. Dispatch the event to a handler - * or method. - */ - int_status |= acpi_ev_gpe_dispatch ( - acpi_gbl_gpe_registers[i].gpe_base + j); - } - } - } - - return (int_status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ev_asynch_execute_gpe_method - * - * PARAMETERS: Gpe_number - The 0-based Gpe number - * - * RETURN: None - * - * DESCRIPTION: Perform the actual execution of a GPE control method. This - * function is called from an invocation of Acpi_os_queue_for_execution - * (and therefore does NOT execute at interrupt level) so that - * the control method itself is not executed in the context of - * the SCI interrupt handler. - * - ******************************************************************************/ - -static void -acpi_ev_asynch_execute_gpe_method ( - void *context) -{ - unsigned long gpe_number = (unsigned long) context; - acpi_gpe_level_info gpe_info; - - - FUNCTION_TRACE ("Ev_asynch_execute_gpe_method"); - - /* - * Take a snapshot of the GPE info for this level - */ - acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); - gpe_info = acpi_gbl_gpe_info [gpe_number]; - acpi_ut_release_mutex (ACPI_MTX_EVENTS); - - /* - * Method Handler (_Lxx, _Exx): - * ---------------------------- - * Evaluate the _Lxx/_Exx control method that corresponds to this GPE. - */ - if (gpe_info.method_handle) { - acpi_ns_evaluate_by_handle (gpe_info.method_handle, NULL, NULL); - } - - /* - * Level-Triggered? - * ---------------- - * If level-triggered we clear the GPE status bit after handling the event. - */ - if (gpe_info.type & ACPI_EVENT_LEVEL_TRIGGERED) { - acpi_hw_clear_gpe (gpe_number); - } - - /* - * Enable the GPE. - */ - acpi_hw_enable_gpe (gpe_number); - - return_VOID; -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ev_gpe_dispatch - * - * PARAMETERS: Gpe_number - The 0-based Gpe number - * - * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED - * - * DESCRIPTION: Handle and dispatch a General Purpose Acpi_event. - * Clears the status bit for the requested event. - * - * TBD: [Investigate] is this still valid or necessary: - * The Gpe handler differs from the fixed events in that it clears the enable - * bit rather than the status bit to clear the interrupt. This allows - * software outside of interrupt context to determine what caused the SCI and - * dispatch the correct AML. - * - ******************************************************************************/ - -u32 -acpi_ev_gpe_dispatch ( - u32 gpe_number) -{ - acpi_gpe_level_info gpe_info; - - - FUNCTION_TRACE ("Ev_gpe_dispatch"); - - - /* - * Valid GPE number? - */ - if (acpi_gbl_gpe_valid[gpe_number] == ACPI_GPE_INVALID) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid GPE bit [%X].\n", gpe_number)); - return_VALUE (INTERRUPT_NOT_HANDLED); - } - - /* - * Disable the GPE. - */ - acpi_hw_disable_gpe (gpe_number); - - gpe_info = acpi_gbl_gpe_info [gpe_number]; - - /* - * Edge-Triggered? - * --------------- - * If edge-triggered, clear the GPE status bit now. Note that - * level-triggered events are cleared after the GPE is serviced. - */ - if (gpe_info.type & ACPI_EVENT_EDGE_TRIGGERED) { - acpi_hw_clear_gpe (gpe_number); - } - /* - * Function Handler (e.g. EC)? - */ - if (gpe_info.handler) { - /* Invoke function handler (at interrupt level). */ - - gpe_info.handler (gpe_info.context); - - /* Level-Triggered? */ - - if (gpe_info.type & ACPI_EVENT_LEVEL_TRIGGERED) { - acpi_hw_clear_gpe (gpe_number); - } - - /* Enable GPE */ - - acpi_hw_enable_gpe (gpe_number); - } - - /* - * Method Handler (e.g. _Exx/_Lxx)? - */ - else if (gpe_info.method_handle) { - if (ACPI_FAILURE(acpi_os_queue_for_execution (OSD_PRIORITY_GPE, - acpi_ev_asynch_execute_gpe_method, (void*) (u64)gpe_number))) { - /* - * Shoudn't occur, but if it does report an error. Note that - * the GPE will remain disabled until the ACPI Core Subsystem - * is restarted, or the handler is removed/reinstalled. - */ - REPORT_ERROR (("Acpi_ev_gpe_dispatch: Unable to queue handler for GPE bit [%X]\n", gpe_number)); - } - } - - /* - * No Handler? Report an error and leave the GPE disabled. - */ - else { - REPORT_ERROR (("Acpi_ev_gpe_dispatch: No installed handler for GPE [%X]\n", gpe_number)); - - /* Level-Triggered? */ - - if (gpe_info.type & ACPI_EVENT_LEVEL_TRIGGERED) { - acpi_hw_clear_gpe (gpe_number); - } - } - - return_VALUE (INTERRUPT_HANDLED); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evgpeblk.c linux.21rc5-ac2/drivers/acpi/events/evgpeblk.c --- linux.21rc5/drivers/acpi/events/evgpeblk.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evgpeblk.c 2003-04-28 17:58:27.000000000 +0100 @@ -0,0 +1,900 @@ +/****************************************************************************** + * + * Module Name: evgpeblk - GPE block creation and initialization. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include +#include +#include + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evgpeblk") + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_valid_gpe_event + * + * PARAMETERS: gpe_event_info - Info for this GPE + * + * RETURN: TRUE if the gpe_event is valid + * + * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL. + * Should be called only when the GPE lists are semaphore locked + * and not subject to change. + * + ******************************************************************************/ + +u8 +acpi_ev_valid_gpe_event ( + struct acpi_gpe_event_info *gpe_event_info) +{ + struct acpi_gpe_xrupt_info *gpe_xrupt_block; + struct acpi_gpe_block_info *gpe_block; + + + ACPI_FUNCTION_ENTRY (); + + + /* No need for spin lock since we are not changing any list elements */ + + gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head; + while (gpe_xrupt_block) { + gpe_block = gpe_xrupt_block->gpe_block_list_head; + while (gpe_block) { + if ((&gpe_block->event_info[0] <= gpe_event_info) && + (&gpe_block->event_info[((acpi_size) gpe_block->register_count) * 8] > gpe_event_info)) { + return (TRUE); + } + + gpe_block = gpe_block->next; + } + + gpe_xrupt_block = gpe_xrupt_block->next; + } + + return (FALSE); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_walk_gpe_list + * + * PARAMETERS: gpe_walk_callback - Routine called for each GPE block + * + * RETURN: Status + * + * DESCRIPTION: Walk the GPE lists. + * FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED + * + ******************************************************************************/ + +acpi_status +acpi_ev_walk_gpe_list ( + ACPI_GPE_CALLBACK gpe_walk_callback) +{ + struct acpi_gpe_block_info *gpe_block; + struct acpi_gpe_xrupt_info *gpe_xrupt_info; + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("ev_walk_gpe_list"); + + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_ISR); + + /* Walk the interrupt level descriptor list */ + + gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; + while (gpe_xrupt_info) { + /* Walk all Gpe Blocks attached to this interrupt level */ + + gpe_block = gpe_xrupt_info->gpe_block_list_head; + while (gpe_block) { + /* One callback per GPE block */ + + status = gpe_walk_callback (gpe_xrupt_info, gpe_block); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + gpe_block = gpe_block->next; + } + + gpe_xrupt_info = gpe_xrupt_info->next; + } + +unlock_and_exit: + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_ISR); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_save_method_info + * + * PARAMETERS: Callback from walk_namespace + * + * RETURN: None + * + * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a + * control method under the _GPE portion of the namespace. + * Extract the name and GPE type from the object, saving this + * information for quick lookup during GPE dispatch + * + * The name of each GPE control method is of the form: + * "_Lnn" or "_Enn" + * Where: + * L - means that the GPE is level triggered + * E - means that the GPE is edge triggered + * nn - is the GPE number [in HEX] + * + ******************************************************************************/ + +static acpi_status +acpi_ev_save_method_info ( + acpi_handle obj_handle, + u32 level, + void *obj_desc, + void **return_value) +{ + struct acpi_gpe_block_info *gpe_block = (void *) obj_desc; + struct acpi_gpe_event_info *gpe_event_info; + u32 gpe_number; + char name[ACPI_NAME_SIZE + 1]; + u8 type; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_save_method_info"); + + + /* Extract the name from the object and convert to a string */ + + ACPI_MOVE_32_TO_32 (name, + &((struct acpi_namespace_node *) obj_handle)->name.integer); + name[ACPI_NAME_SIZE] = 0; + + /* + * Edge/Level determination is based on the 2nd character of the method name + */ + switch (name[1]) { + case 'L': + type = ACPI_EVENT_LEVEL_TRIGGERED; + break; + + case 'E': + type = ACPI_EVENT_EDGE_TRIGGERED; + break; + + default: + /* Unknown method type, just ignore it! */ + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Unknown GPE method type: %s (name not of form _Lnn or _Enn)\n", + name)); + return_ACPI_STATUS (AE_OK); + } + + /* Convert the last two characters of the name to the GPE Number */ + + gpe_number = ACPI_STRTOUL (&name[2], NULL, 16); + if (gpe_number == ACPI_UINT32_MAX) { + /* Conversion failed; invalid method, just ignore it */ + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not extract GPE number from name: %s (name is not of form _Lnn or _Enn)\n", + name)); + return_ACPI_STATUS (AE_OK); + } + + /* Ensure that we have a valid GPE number for this GPE block */ + + if ((gpe_number < gpe_block->block_base_number) || + (gpe_number >= (gpe_block->block_base_number + (gpe_block->register_count * 8)))) { + /* + * Not valid for this GPE block, just ignore it + * However, it may be valid for a different GPE block, since GPE0 and GPE1 + * methods both appear under \_GPE. + */ + return_ACPI_STATUS (AE_OK); + } + + /* + * Now we can add this information to the gpe_event_info block + * for use during dispatch of this GPE. + */ + gpe_event_info = &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; + + gpe_event_info->flags = type; + gpe_event_info->method_node = (struct acpi_namespace_node *) obj_handle; + + /* + * Enable the GPE (SCIs should be disabled at this point) + */ + status = acpi_hw_enable_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Registered GPE method %s as GPE number 0x%.2X\n", + name, gpe_number)); + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_get_gpe_xrupt_block + * + * PARAMETERS: interrupt_level - Interrupt for a GPE block + * + * RETURN: A GPE interrupt block + * + * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt + * block per unique interrupt level used for GPEs. + * Should be called only when the GPE lists are semaphore locked + * and not subject to change. + * + ******************************************************************************/ + +static struct acpi_gpe_xrupt_info * +acpi_ev_get_gpe_xrupt_block ( + u32 interrupt_level) +{ + struct acpi_gpe_xrupt_info *next_gpe_xrupt; + struct acpi_gpe_xrupt_info *gpe_xrupt; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_get_gpe_xrupt_block"); + + + /* No need for spin lock since we are not changing any list elements here */ + + next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; + while (next_gpe_xrupt) { + if (next_gpe_xrupt->interrupt_level == interrupt_level) { + return_PTR (next_gpe_xrupt); + } + + next_gpe_xrupt = next_gpe_xrupt->next; + } + + /* Not found, must allocate a new xrupt descriptor */ + + gpe_xrupt = ACPI_MEM_CALLOCATE (sizeof (struct acpi_gpe_xrupt_info)); + if (!gpe_xrupt) { + return_PTR (NULL); + } + + gpe_xrupt->interrupt_level = interrupt_level; + + /* Install new interrupt descriptor with spin lock */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + if (acpi_gbl_gpe_xrupt_list_head) { + next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; + while (next_gpe_xrupt->next) { + next_gpe_xrupt = next_gpe_xrupt->next; + } + + next_gpe_xrupt->next = gpe_xrupt; + gpe_xrupt->previous = next_gpe_xrupt; + } + else { + acpi_gbl_gpe_xrupt_list_head = gpe_xrupt; + } + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + + /* Install new interrupt handler if not SCI_INT */ + + if (interrupt_level != acpi_gbl_FADT->sci_int) { + status = acpi_os_install_interrupt_handler (interrupt_level, + acpi_ev_gpe_xrupt_handler, gpe_xrupt); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not install GPE interrupt handler at level 0x%X\n", + interrupt_level)); + return_PTR (NULL); + } + } + + return_PTR (gpe_xrupt); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_delete_gpe_xrupt + * + * PARAMETERS: gpe_xrupt - A GPE interrupt info block + * + * RETURN: Status + * + * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated + * interrupt handler if not the SCI interrupt. + * + ******************************************************************************/ + +static acpi_status +acpi_ev_delete_gpe_xrupt ( + struct acpi_gpe_xrupt_info *gpe_xrupt) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_delete_gpe_xrupt"); + + + /* We never want to remove the SCI interrupt handler */ + + if (gpe_xrupt->interrupt_level == acpi_gbl_FADT->sci_int) { + gpe_xrupt->gpe_block_list_head = NULL; + return_ACPI_STATUS (AE_OK); + } + + /* Disable this interrupt */ + + status = acpi_os_remove_interrupt_handler (gpe_xrupt->interrupt_level, + acpi_ev_gpe_xrupt_handler); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Unlink the interrupt block with lock */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + if (gpe_xrupt->previous) { + gpe_xrupt->previous->next = gpe_xrupt->next; + } + + if (gpe_xrupt->next) { + gpe_xrupt->next->previous = gpe_xrupt->previous; + } + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + + /* Free the block */ + + ACPI_MEM_FREE (gpe_xrupt); + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_install_gpe_block + * + * PARAMETERS: gpe_block - New GPE block + * interrupt_level - Level to be associated with this GPE block + * + * RETURN: Status + * + * DESCRIPTION: Install new GPE block with mutex support + * + ******************************************************************************/ + +static acpi_status +acpi_ev_install_gpe_block ( + struct acpi_gpe_block_info *gpe_block, + u32 interrupt_level) +{ + struct acpi_gpe_block_info *next_gpe_block; + struct acpi_gpe_xrupt_info *gpe_xrupt_block; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_install_gpe_block"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block (interrupt_level); + if (!gpe_xrupt_block) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + /* Install the new block at the end of the list for this interrupt with lock */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + if (gpe_xrupt_block->gpe_block_list_head) { + next_gpe_block = gpe_xrupt_block->gpe_block_list_head; + while (next_gpe_block->next) { + next_gpe_block = next_gpe_block->next; + } + + next_gpe_block->next = gpe_block; + gpe_block->previous = next_gpe_block; + } + else { + gpe_xrupt_block->gpe_block_list_head = gpe_block; + } + + gpe_block->xrupt_block = gpe_xrupt_block; + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + +unlock_and_exit: + status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_delete_gpe_block + * + * PARAMETERS: gpe_block - Existing GPE block + * + * RETURN: Status + * + * DESCRIPTION: Install new GPE block with mutex support + * + ******************************************************************************/ + +acpi_status +acpi_ev_delete_gpe_block ( + struct acpi_gpe_block_info *gpe_block) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_install_gpe_block"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Disable all GPEs in this block */ + + status = acpi_hw_disable_gpe_block (gpe_block->xrupt_block, gpe_block); + + if (!gpe_block->previous && !gpe_block->next) { + /* This is the last gpe_block on this interrupt */ + + status = acpi_ev_delete_gpe_xrupt (gpe_block->xrupt_block); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + } + else { + /* Remove the block on this interrupt with lock */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + if (gpe_block->previous) { + gpe_block->previous->next = gpe_block->next; + } + else { + gpe_block->xrupt_block->gpe_block_list_head = gpe_block->next; + } + + if (gpe_block->next) { + gpe_block->next->previous = gpe_block->previous; + } + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + } + + /* Free the gpe_block */ + + ACPI_MEM_FREE (gpe_block->register_info); + ACPI_MEM_FREE (gpe_block->event_info); + ACPI_MEM_FREE (gpe_block); + +unlock_and_exit: + status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_create_gpe_info_blocks + * + * PARAMETERS: gpe_block - New GPE block + * + * RETURN: Status + * + * DESCRIPTION: Create the register_info and event_info blocks for this GPE block + * + ******************************************************************************/ + +static acpi_status +acpi_ev_create_gpe_info_blocks ( + struct acpi_gpe_block_info *gpe_block) +{ + struct acpi_gpe_register_info *gpe_register_info = NULL; + struct acpi_gpe_event_info *gpe_event_info = NULL; + struct acpi_gpe_event_info *this_event; + struct acpi_gpe_register_info *this_register; + acpi_native_uint i; + acpi_native_uint j; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_create_gpe_info_blocks"); + + + /* Allocate the GPE register information block */ + + gpe_register_info = ACPI_MEM_CALLOCATE ( + (acpi_size) gpe_block->register_count * + sizeof (struct acpi_gpe_register_info)); + if (!gpe_register_info) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not allocate the gpe_register_info table\n")); + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* + * Allocate the GPE event_info block. There are eight distinct GPEs + * per register. Initialization to zeros is sufficient. + */ + gpe_event_info = ACPI_MEM_CALLOCATE ( + ((acpi_size) gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH) * + sizeof (struct acpi_gpe_event_info)); + if (!gpe_event_info) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not allocate the gpe_event_info table\n")); + status = AE_NO_MEMORY; + goto error_exit; + } + + /* Save the new Info arrays in the GPE block */ + + gpe_block->register_info = gpe_register_info; + gpe_block->event_info = gpe_event_info; + + /* + * Initialize the GPE Register and Event structures. A goal of these + * tables is to hide the fact that there are two separate GPE register sets + * in a given gpe hardware block, the status registers occupy the first half, + * and the enable registers occupy the second half. + */ + this_register = gpe_register_info; + this_event = gpe_event_info; + + for (i = 0; i < gpe_block->register_count; i++) { + /* Init the register_info for this GPE register (8 GPEs) */ + + this_register->base_gpe_number = (u8) (gpe_block->block_base_number + + (i * ACPI_GPE_REGISTER_WIDTH)); + + ACPI_STORE_ADDRESS (this_register->status_address.address, + (gpe_block->block_address.address + + i)); + + ACPI_STORE_ADDRESS (this_register->enable_address.address, + (gpe_block->block_address.address + + i + + gpe_block->register_count)); + + this_register->status_address.address_space_id = gpe_block->block_address.address_space_id; + this_register->enable_address.address_space_id = gpe_block->block_address.address_space_id; + this_register->status_address.register_bit_width = ACPI_GPE_REGISTER_WIDTH; + this_register->enable_address.register_bit_width = ACPI_GPE_REGISTER_WIDTH; + this_register->status_address.register_bit_offset = ACPI_GPE_REGISTER_WIDTH; + this_register->enable_address.register_bit_offset = ACPI_GPE_REGISTER_WIDTH; + + /* Init the event_info for each GPE within this register */ + + for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { + this_event->bit_mask = acpi_gbl_decode_to8bit[j]; + this_event->register_info = this_register; + this_event++; + } + + /* + * Clear the status/enable registers. Note that status registers + * are cleared by writing a '1', while enable registers are cleared + * by writing a '0'. + */ + status = acpi_hw_low_level_write (ACPI_GPE_REGISTER_WIDTH, 0x00, + &this_register->enable_address); + if (ACPI_FAILURE (status)) { + goto error_exit; + } + + status = acpi_hw_low_level_write (ACPI_GPE_REGISTER_WIDTH, 0xFF, + &this_register->status_address); + if (ACPI_FAILURE (status)) { + goto error_exit; + } + + this_register++; + } + + return_ACPI_STATUS (AE_OK); + + +error_exit: + if (gpe_register_info) { + ACPI_MEM_FREE (gpe_register_info); + } + if (gpe_event_info) { + ACPI_MEM_FREE (gpe_event_info); + } + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_create_gpe_block + * + * PARAMETERS: gpe_device - Handle to the parent GPE block + * gpe_block_address - Address and space_iD + * register_count - Number of GPE register pairs in the block + * gpe_block_base_number - Starting GPE number for the block + * interrupt_level - H/W interrupt for the block + * return_gpe_block - Where the new block descriptor is returned + * + * RETURN: Status + * + * DESCRIPTION: Create and Install a block of GPE registers + * + ******************************************************************************/ + +acpi_status +acpi_ev_create_gpe_block ( + struct acpi_namespace_node *gpe_device, + struct acpi_generic_address *gpe_block_address, + u32 register_count, + u8 gpe_block_base_number, + u32 interrupt_level, + struct acpi_gpe_block_info **return_gpe_block) +{ + struct acpi_gpe_block_info *gpe_block; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_create_gpe_block"); + + + if (!register_count) { + return_ACPI_STATUS (AE_OK); + } + + /* Allocate a new GPE block */ + + gpe_block = ACPI_MEM_CALLOCATE (sizeof (struct acpi_gpe_block_info)); + if (!gpe_block) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* Initialize the new GPE block */ + + gpe_block->register_count = register_count; + gpe_block->block_base_number = gpe_block_base_number; + + ACPI_MEMCPY (&gpe_block->block_address, gpe_block_address, sizeof (struct acpi_generic_address)); + + /* Create the register_info and event_info sub-structures */ + + status = acpi_ev_create_gpe_info_blocks (gpe_block); + if (ACPI_FAILURE (status)) { + ACPI_MEM_FREE (gpe_block); + return_ACPI_STATUS (status); + } + + /* Install the new block in the global list(s) */ + + status = acpi_ev_install_gpe_block (gpe_block, interrupt_level); + if (ACPI_FAILURE (status)) { + ACPI_MEM_FREE (gpe_block); + return_ACPI_STATUS (status); + } + + /* Dump info about this GPE block */ + + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "GPE Block: [%4.4s] %X registers at %8.8X%8.8X on interrupt %d\n", + gpe_device->name.ascii, + gpe_block->register_count, + ACPI_HIDWORD (gpe_block->block_address.address), + ACPI_LODWORD (gpe_block->block_address.address), + interrupt_level)); + + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "GPE Block defined as GPE 0x%.2X to GPE 0x%.2X\n", + gpe_block->block_base_number, + (u32) (gpe_block->block_base_number + + ((gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH) -1)))); + + /* Find all GPE methods (_Lxx, _Exx) for this block */ + + status = acpi_ns_walk_namespace (ACPI_TYPE_METHOD, gpe_device, + ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ev_save_method_info, + gpe_block, NULL); + + /* Return the new block */ + + if (return_gpe_block) { + (*return_gpe_block) = gpe_block; + } + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_gpe_initialize + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Initialize the GPE data structures + * + ******************************************************************************/ + +acpi_status +acpi_ev_gpe_initialize (void) +{ + u32 register_count0 = 0; + u32 register_count1 = 0; + u32 gpe_number_max = 0; + acpi_handle gpe_device; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_gpe_initialize"); + + + /* Get a handle to the predefined _GPE object */ + + status = acpi_get_handle (NULL, "\\_GPE", &gpe_device); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* + * Initialize the GPE Blocks defined in the FADT + * + * Why the GPE register block lengths are divided by 2: From the ACPI Spec, + * section "General-Purpose Event Registers", we have: + * + * "Each register block contains two registers of equal length + * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the + * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN + * The length of the GPE1_STS and GPE1_EN registers is equal to + * half the GPE1_LEN. If a generic register block is not supported + * then its respective block pointer and block length values in the + * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need + * to be the same size." + */ + + /* + * Determine the maximum GPE number for this machine. + * + * Note: both GPE0 and GPE1 are optional, and either can exist without + * the other. + * + * If EITHER the register length OR the block address are zero, then that + * particular block is not supported. + */ + if (acpi_gbl_FADT->gpe0_blk_len && + acpi_gbl_FADT->xgpe0_blk.address) { + /* GPE block 0 exists (has both length and address > 0) */ + + register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2); + + gpe_number_max = (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; + + /* Install GPE Block 0 */ + + status = acpi_ev_create_gpe_block (gpe_device, &acpi_gbl_FADT->xgpe0_blk, + register_count0, 0, acpi_gbl_FADT->sci_int, &acpi_gbl_gpe_fadt_blocks[0]); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "Could not create GPE Block 0, %s\n", + acpi_format_exception (status))); + } + } + + if (acpi_gbl_FADT->gpe1_blk_len && + acpi_gbl_FADT->xgpe1_blk.address) { + /* GPE block 1 exists (has both length and address > 0) */ + + register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2); + + /* Check for GPE0/GPE1 overlap (if both banks exist) */ + + if ((register_count0) && + (gpe_number_max >= acpi_gbl_FADT->gpe1_base)) { + ACPI_REPORT_ERROR (( + "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1\n", + gpe_number_max, acpi_gbl_FADT->gpe1_base, + acpi_gbl_FADT->gpe1_base + + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1))); + + /* Ignore GPE1 block by setting the register count to zero */ + + register_count1 = 0; + } + else { + /* Install GPE Block 1 */ + + status = acpi_ev_create_gpe_block (gpe_device, &acpi_gbl_FADT->xgpe1_blk, + register_count1, acpi_gbl_FADT->gpe1_base, + acpi_gbl_FADT->sci_int, &acpi_gbl_gpe_fadt_blocks[1]); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "Could not create GPE Block 1, %s\n", + acpi_format_exception (status))); + } + + /* + * GPE0 and GPE1 do not have to be contiguous in the GPE number space, + * But, GPE0 always starts at zero. + */ + gpe_number_max = acpi_gbl_FADT->gpe1_base + + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); + } + } + + /* Exit if there are no GPE registers */ + + if ((register_count0 + register_count1) == 0) { + /* GPEs are not required by ACPI, this is OK */ + + ACPI_REPORT_INFO (("There are no GPE blocks defined in the FADT\n")); + return_ACPI_STATUS (AE_OK); + } + + /* Check for Max GPE number out-of-range */ + + if (gpe_number_max > ACPI_GPE_MAX) { + ACPI_REPORT_ERROR (("Maximum GPE number from FADT is too large: 0x%X\n", + gpe_number_max)); + return_ACPI_STATUS (AE_BAD_VALUE); + } + + return_ACPI_STATUS (AE_OK); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evgpe.c linux.21rc5-ac2/drivers/acpi/events/evgpe.c --- linux.21rc5/drivers/acpi/events/evgpe.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evgpe.c 2003-04-28 17:58:27.000000000 +0100 @@ -0,0 +1,423 @@ +/****************************************************************************** + * + * Module Name: evgpe - General Purpose Event handling and dispatch + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include +#include +#include + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evgpe") + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_get_gpe_event_info + * + * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 + * gpe_number - Raw GPE number + * + * RETURN: A GPE event_info struct. NULL if not a valid GPE + * + * DESCRIPTION: Returns the event_info struct associated with this GPE. + * Validates the gpe_block and the gpe_number + * + * Should be called only when the GPE lists are semaphore locked + * and not subject to change. + * + ******************************************************************************/ + +struct acpi_gpe_event_info * +acpi_ev_get_gpe_event_info ( + acpi_handle gpe_device, + u32 gpe_number) +{ + union acpi_operand_object *obj_desc; + struct acpi_gpe_block_info *gpe_block; + acpi_native_uint i; + + + ACPI_FUNCTION_ENTRY (); + + + /* A NULL gpe_block means use the FADT-defined GPE block(s) */ + + if (!gpe_device) { + /* Examine GPE Block 0 and 1 (These blocks are permanent) */ + + for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { + gpe_block = acpi_gbl_gpe_fadt_blocks[i]; + if (gpe_block) { + if ((gpe_number >= gpe_block->block_base_number) && + (gpe_number < gpe_block->block_base_number + (gpe_block->register_count * 8))) { + return (&gpe_block->event_info[gpe_number - gpe_block->block_base_number]); + } + } + } + + /* The gpe_number was not in the range of either FADT GPE block */ + + return (NULL); + } + + /* + * A Non-null gpe_device means this is a GPE Block Device. + */ + obj_desc = acpi_ns_get_attached_object ((struct acpi_namespace_node *) gpe_device); + if (!obj_desc || + !obj_desc->device.gpe_block) { + return (NULL); + } + + gpe_block = obj_desc->device.gpe_block; + + if ((gpe_number >= gpe_block->block_base_number) && + (gpe_number < gpe_block->block_base_number + (gpe_block->register_count * 8))) { + return (&gpe_block->event_info[gpe_number - gpe_block->block_base_number]); + } + + return (NULL); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_gpe_detect + * + * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. + * Can have multiple GPE blocks attached. + * + * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED + * + * DESCRIPTION: Detect if any GP events have occurred. This function is + * executed at interrupt level. + * + ******************************************************************************/ + +u32 +acpi_ev_gpe_detect ( + struct acpi_gpe_xrupt_info *gpe_xrupt_list) +{ + u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; + u8 enabled_status_byte; + u8 bit_mask; + struct acpi_gpe_register_info *gpe_register_info; + u32 in_value; + acpi_status status; + struct acpi_gpe_block_info *gpe_block; + u32 gpe_number; + u32 i; + u32 j; + + + ACPI_FUNCTION_NAME ("ev_gpe_detect"); + + + /* Examine all GPE blocks attached to this interrupt level */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_ISR); + gpe_block = gpe_xrupt_list->gpe_block_list_head; + while (gpe_block) { + /* + * Read all of the 8-bit GPE status and enable registers + * in this GPE block, saving all of them. + * Find all currently active GP events. + */ + for (i = 0; i < gpe_block->register_count; i++) { + /* Get the next status/enable pair */ + + gpe_register_info = &gpe_block->register_info[i]; + + /* Read the Status Register */ + + status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &in_value, + &gpe_register_info->status_address); + gpe_register_info->status = (u8) in_value; + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + /* Read the Enable Register */ + + status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &in_value, + &gpe_register_info->enable_address); + gpe_register_info->enable = (u8) in_value; + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + ACPI_DEBUG_PRINT ((ACPI_DB_INTERRUPTS, + "GPE block at %8.8X%8.8X - Values: Enable %02X Status %02X\n", + ACPI_HIDWORD (gpe_register_info->enable_address.address), + ACPI_LODWORD (gpe_register_info->enable_address.address), + gpe_register_info->enable, + gpe_register_info->status)); + + /* First check if there is anything active at all in this register */ + + enabled_status_byte = (u8) (gpe_register_info->status & + gpe_register_info->enable); + if (!enabled_status_byte) { + /* No active GPEs in this register, move on */ + + continue; + } + + /* Now look at the individual GPEs in this byte register */ + + for (j = 0, bit_mask = 1; j < ACPI_GPE_REGISTER_WIDTH; j++, bit_mask <<= 1) { + /* Examine one GPE bit */ + + if (enabled_status_byte & bit_mask) { + /* + * Found an active GPE. Dispatch the event to a handler + * or method. + */ + gpe_number = (i * ACPI_GPE_REGISTER_WIDTH) + j; + + int_status |= acpi_ev_gpe_dispatch ( + &gpe_block->event_info[gpe_number], + gpe_number + gpe_block->register_info[gpe_number].base_gpe_number); + } + } + } + + gpe_block = gpe_block->next; + } + +unlock_and_exit: + + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_ISR); + return (int_status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_asynch_execute_gpe_method + * + * PARAMETERS: Context (gpe_event_info) - Info for this GPE + * + * RETURN: None + * + * DESCRIPTION: Perform the actual execution of a GPE control method. This + * function is called from an invocation of acpi_os_queue_for_execution + * (and therefore does NOT execute at interrupt level) so that + * the control method itself is not executed in the context of + * an interrupt handler. + * + ******************************************************************************/ + +static void ACPI_SYSTEM_XFACE +acpi_ev_asynch_execute_gpe_method ( + void *context) +{ + struct acpi_gpe_event_info *gpe_event_info = (void *) context; + u32 gpe_number = 0; + acpi_status status; + struct acpi_gpe_event_info local_gpe_event_info; + + + ACPI_FUNCTION_TRACE ("ev_asynch_execute_gpe_method"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_VOID; + } + + /* Must revalidate the gpe_number/gpe_block */ + + if (!acpi_ev_valid_gpe_event (gpe_event_info)) { + status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_VOID; + } + + /* + * Take a snapshot of the GPE info for this level - we copy the + * info to prevent a race condition with remove_handler/remove_block. + */ + ACPI_MEMCPY (&local_gpe_event_info, gpe_event_info, sizeof (struct acpi_gpe_event_info)); + + status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_VOID; + } + + if (local_gpe_event_info.method_node) { + /* + * Invoke the GPE Method (_Lxx, _Exx): + * (Evaluate the _Lxx/_Exx control method that corresponds to this GPE.) + */ + status = acpi_ns_evaluate_by_handle (local_gpe_event_info.method_node, NULL, NULL); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("%s while evaluating method [%4.4s] for GPE[%2X]\n", + acpi_format_exception (status), + local_gpe_event_info.method_node->name.ascii, gpe_number)); + } + } + + if (local_gpe_event_info.flags & ACPI_EVENT_LEVEL_TRIGGERED) { + /* + * GPE is level-triggered, we clear the GPE status bit after handling + * the event. + */ + status = acpi_hw_clear_gpe (&local_gpe_event_info); + if (ACPI_FAILURE (status)) { + return_VOID; + } + } + + /* Enable this GPE */ + + (void) acpi_hw_enable_gpe (&local_gpe_event_info); + return_VOID; +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_gpe_dispatch + * + * PARAMETERS: gpe_event_info - info for this GPE + * gpe_number - Number relative to the parent GPE block + * + * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED + * + * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) + * or method (e.g. _Lxx/_Exx) handler. + * + * This function executes at interrupt level. + * + ******************************************************************************/ + +u32 +acpi_ev_gpe_dispatch ( + struct acpi_gpe_event_info *gpe_event_info, + u32 gpe_number) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_gpe_dispatch"); + + + /* + * If edge-triggered, clear the GPE status bit now. Note that + * level-triggered events are cleared after the GPE is serviced. + */ + if (gpe_event_info->flags & ACPI_EVENT_EDGE_TRIGGERED) { + status = acpi_hw_clear_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to clear GPE[%2X]\n", + gpe_number)); + return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); + } + } + + /* + * Dispatch the GPE to either an installed handler, or the control + * method associated with this GPE (_Lxx or _Exx). + * If a handler exists, we invoke it and do not attempt to run the method. + * If there is neither a handler nor a method, we disable the level to + * prevent further events from coming in here. + */ + if (gpe_event_info->handler) { + /* Invoke the installed handler (at interrupt level) */ + + gpe_event_info->handler (gpe_event_info->context); + } + else if (gpe_event_info->method_node) { + /* + * Disable GPE, so it doesn't keep firing before the method has a + * chance to run. + */ + status = acpi_hw_disable_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n", + gpe_number)); + return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); + } + + /* Execute the method associated with the GPE. */ + + if (ACPI_FAILURE (acpi_os_queue_for_execution (OSD_PRIORITY_GPE, + acpi_ev_asynch_execute_gpe_method, + gpe_event_info))) { + ACPI_REPORT_ERROR (( + "acpi_ev_gpe_dispatch: Unable to queue handler for GPE[%2X], event is disabled\n", + gpe_number)); + } + } + else { + /* No handler or method to run! */ + + ACPI_REPORT_ERROR (( + "acpi_ev_gpe_dispatch: No handler or method for GPE[%2X], disabling event\n", + gpe_number)); + + /* + * Disable the GPE. The GPE will remain disabled until the ACPI + * Core Subsystem is restarted, or the handler is reinstalled. + */ + status = acpi_hw_disable_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n", + gpe_number)); + return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); + } + } + + /* It is now safe to clear level-triggered events. */ + + if (gpe_event_info->flags & ACPI_EVENT_LEVEL_TRIGGERED) { + status = acpi_hw_clear_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to clear GPE[%2X]\n", + gpe_number)); + return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); + } + } + + return_VALUE (ACPI_INTERRUPT_HANDLED); +} + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evmisc.c linux.21rc5-ac2/drivers/acpi/events/evmisc.c --- linux.21rc5/drivers/acpi/events/evmisc.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evmisc.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,42 +1,92 @@ /****************************************************************************** * - * Module Name: evmisc - ACPI device notification handler dispatch - * and ACPI Global Lock support - * $Revision: 35 $ + * Module Name: evmisc - Miscellaneous event manager support functions * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acevents.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "achware.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EVENTS - MODULE_NAME ("evmisc") + ACPI_MODULE_NAME ("evmisc") /******************************************************************************* * - * FUNCTION: Acpi_ev_queue_notify_request + * FUNCTION: acpi_ev_is_notify_object + * + * PARAMETERS: Node - Node to check + * + * RETURN: TRUE if notifies allowed on this object + * + * DESCRIPTION: Check type of node for a object that supports notifies. + * + * TBD: This could be replaced by a flag bit in the node. + * + ******************************************************************************/ + +u8 +acpi_ev_is_notify_object ( + struct acpi_namespace_node *node) +{ + switch (node->type) { + case ACPI_TYPE_DEVICE: + case ACPI_TYPE_PROCESSOR: + case ACPI_TYPE_POWER: + case ACPI_TYPE_THERMAL: + /* + * These are the ONLY objects that can receive ACPI notifications + */ + return (TRUE); + + default: + return (FALSE); + } +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_queue_notify_request * * PARAMETERS: * @@ -49,16 +99,16 @@ acpi_status acpi_ev_queue_notify_request ( - acpi_namespace_node *node, - u32 notify_value) + struct acpi_namespace_node *node, + u32 notify_value) { - acpi_operand_object *obj_desc; - acpi_operand_object *handler_obj = NULL; - acpi_generic_state *notify_info; - acpi_status status = AE_OK; + union acpi_operand_object *obj_desc; + union acpi_operand_object *handler_obj = NULL; + union acpi_generic_state *notify_info; + acpi_status status = AE_OK; - PROC_NAME ("Ev_queue_notify_request"); + ACPI_FUNCTION_NAME ("ev_queue_notify_request"); /* @@ -92,43 +142,38 @@ break; } - /* - * Get the notify object attached to the device Node + * Get the notify object attached to the NS Node */ obj_desc = acpi_ns_get_attached_object (node); if (obj_desc) { - /* We have the notify object, Get the right handler */ switch (node->type) { case ACPI_TYPE_DEVICE: - if (notify_value <= MAX_SYS_NOTIFY) { - handler_obj = obj_desc->device.sys_handler; - } - else { - handler_obj = obj_desc->device.drv_handler; - } - break; - case ACPI_TYPE_THERMAL: - if (notify_value <= MAX_SYS_NOTIFY) { - handler_obj = obj_desc->thermal_zone.sys_handler; + case ACPI_TYPE_PROCESSOR: + case ACPI_TYPE_POWER: + + if (notify_value <= ACPI_MAX_SYS_NOTIFY) { + handler_obj = obj_desc->common_notify.system_notify; } else { - handler_obj = obj_desc->thermal_zone.drv_handler; + handler_obj = obj_desc->common_notify.device_notify; } break; + + default: + /* All other types are not supported */ + return (AE_TYPE); } } - /* If there is any handler to run, schedule the dispatcher */ - if ((acpi_gbl_sys_notify.handler && (notify_value <= MAX_SYS_NOTIFY)) || - (acpi_gbl_drv_notify.handler && (notify_value > MAX_SYS_NOTIFY)) || + if ((acpi_gbl_system_notify.handler && (notify_value <= ACPI_MAX_SYS_NOTIFY)) || + (acpi_gbl_device_notify.handler && (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) { - notify_info = acpi_ut_create_generic_state (); if (!notify_info) { return (AE_NO_MEMORY); @@ -149,7 +194,8 @@ if (!handler_obj) { /* There is no per-device notify handler for this device */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "No notify handler for node %p \n", node)); + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "No notify handler for [%4.4s] node %p\n", node->name.ascii, node)); } return (status); @@ -158,7 +204,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_notify_dispatch + * FUNCTION: acpi_ev_notify_dispatch * * PARAMETERS: * @@ -169,42 +215,40 @@ * ******************************************************************************/ -void +void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch ( - void *context) + void *context) { - acpi_generic_state *notify_info = (acpi_generic_state *) context; - acpi_notify_handler global_handler = NULL; - void *global_context = NULL; - acpi_operand_object *handler_obj; + union acpi_generic_state *notify_info = (union acpi_generic_state *) context; + acpi_notify_handler global_handler = NULL; + void *global_context = NULL; + union acpi_operand_object *handler_obj; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* * We will invoke a global notify handler if installed. * This is done _before_ we invoke the per-device handler attached to the device. */ - if (notify_info->notify.value <= MAX_SYS_NOTIFY) { + if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) { /* Global system notification handler */ - if (acpi_gbl_sys_notify.handler) { - global_handler = acpi_gbl_sys_notify.handler; - global_context = acpi_gbl_sys_notify.context; + if (acpi_gbl_system_notify.handler) { + global_handler = acpi_gbl_system_notify.handler; + global_context = acpi_gbl_system_notify.context; } } - else { /* Global driver notification handler */ - if (acpi_gbl_drv_notify.handler) { - global_handler = acpi_gbl_drv_notify.handler; - global_context = acpi_gbl_drv_notify.context; + if (acpi_gbl_device_notify.handler) { + global_handler = acpi_gbl_device_notify.handler; + global_context = acpi_gbl_device_notify.context; } } - /* Invoke the system handler first, if present */ if (global_handler) { @@ -215,8 +259,8 @@ handler_obj = notify_info->notify.handler_obj; if (handler_obj) { - handler_obj->notify_handler.handler (notify_info->notify.node, notify_info->notify.value, - handler_obj->notify_handler.context); + handler_obj->notify.handler (notify_info->notify.node, notify_info->notify.value, + handler_obj->notify.context); } /* All done with the info object */ @@ -227,7 +271,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_global_lock_thread + * FUNCTION: acpi_ev_global_lock_thread * * RETURN: None * @@ -237,25 +281,30 @@ * ******************************************************************************/ -static void +static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread ( - void *context) + void *context) { + acpi_status status; + /* Signal threads that are waiting for the lock */ if (acpi_gbl_global_lock_thread_count) { /* Send sufficient units to the semaphore */ - acpi_os_signal_semaphore (acpi_gbl_global_lock_semaphore, + status = acpi_os_signal_semaphore (acpi_gbl_global_lock_semaphore, acpi_gbl_global_lock_thread_count); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not signal Global Lock semaphore\n")); + } } } /******************************************************************************* * - * FUNCTION: Acpi_ev_global_lock_handler + * FUNCTION: acpi_ev_global_lock_handler * * RETURN: Status * @@ -267,10 +316,10 @@ static u32 acpi_ev_global_lock_handler ( - void *context) + void *context) { - u8 acquired = FALSE; - void *global_lock; + u8 acquired = FALSE; + acpi_status status; /* @@ -278,8 +327,7 @@ * If we don't get it now, it will be marked pending and we will * take another interrupt when it becomes free. */ - global_lock = acpi_gbl_FACS->global_lock; - ACPI_ACQUIRE_GLOBAL_LOCK (global_lock, acquired); + ACPI_ACQUIRE_GLOBAL_LOCK (acpi_gbl_common_fACS.global_lock, acquired); if (acquired) { /* Got the lock, now wake all threads waiting for it */ @@ -287,17 +335,23 @@ /* Run the Global Lock thread which will signal all waiting threads */ - acpi_os_queue_for_execution (OSD_PRIORITY_HIGH, acpi_ev_global_lock_thread, - context); + status = acpi_os_queue_for_execution (OSD_PRIORITY_HIGH, + acpi_ev_global_lock_thread, context); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not queue Global Lock thread, %s\n", + acpi_format_exception (status))); + + return (ACPI_INTERRUPT_NOT_HANDLED); + } } - return (INTERRUPT_HANDLED); + return (ACPI_INTERRUPT_HANDLED); } /******************************************************************************* * - * FUNCTION: Acpi_ev_init_global_lock_handler + * FUNCTION: acpi_ev_init_global_lock_handler * * RETURN: Status * @@ -308,10 +362,10 @@ acpi_status acpi_ev_init_global_lock_handler (void) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Ev_init_global_lock_handler"); + ACPI_FUNCTION_TRACE ("ev_init_global_lock_handler"); acpi_gbl_global_lock_present = TRUE; @@ -320,7 +374,7 @@ /* * If the global lock does not exist on this platform, the attempt - * to enable GBL_STS will fail (the GBL_EN bit will not stick) + * to enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick) * Map to AE_OK, but mark global lock as not present. * Any attempt to actually use the global lock will be flagged * with an error. @@ -336,7 +390,7 @@ /****************************************************************************** * - * FUNCTION: Acpi_ev_acquire_global_lock + * FUNCTION: acpi_ev_acquire_global_lock * * RETURN: Status * @@ -345,116 +399,170 @@ *****************************************************************************/ acpi_status -acpi_ev_acquire_global_lock(void) +acpi_ev_acquire_global_lock ( + u16 timeout) { - acpi_status status = AE_OK; - u8 acquired = FALSE; - void *global_lock; + acpi_status status = AE_OK; + u8 acquired = FALSE; - FUNCTION_TRACE ("Ev_acquire_global_lock"); + ACPI_FUNCTION_TRACE ("ev_acquire_global_lock"); + +#ifndef ACPI_APPLICATION /* Make sure that we actually have a global lock */ if (!acpi_gbl_global_lock_present) { return_ACPI_STATUS (AE_NO_GLOBAL_LOCK); } +#endif /* One more thread wants the global lock */ acpi_gbl_global_lock_thread_count++; - - /* If we (OS side) have the hardware lock already, we are done */ + /* If we (OS side vs. BIOS side) have the hardware lock already, we are done */ if (acpi_gbl_global_lock_acquired) { return_ACPI_STATUS (AE_OK); } - /* Only if the FACS is valid */ - - if (!acpi_gbl_FACS) { - return_ACPI_STATUS (AE_OK); - } - - /* We must acquire the actual hardware lock */ - global_lock = acpi_gbl_FACS->global_lock; - ACPI_ACQUIRE_GLOBAL_LOCK (global_lock, acquired); + ACPI_ACQUIRE_GLOBAL_LOCK (acpi_gbl_common_fACS.global_lock, acquired); if (acquired) { /* We got the lock */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Acquired the Global Lock\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Acquired the HW Global Lock\n")); acpi_gbl_global_lock_acquired = TRUE; return_ACPI_STATUS (AE_OK); } - /* * Did not get the lock. The pending bit was set above, and we must now * wait until we get the global lock released interrupt. */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Waiting for the HW Global Lock\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Waiting for the HW Global Lock\n")); - /* - * Acquire the global lock semaphore first. - * Since this wait will block, we must release the interpreter - */ + /* + * Acquire the global lock semaphore first. + * Since this wait will block, we must release the interpreter + */ status = acpi_ex_system_wait_semaphore (acpi_gbl_global_lock_semaphore, - ACPI_UINT32_MAX); + timeout); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ev_release_global_lock + * FUNCTION: acpi_ev_release_global_lock * * DESCRIPTION: Releases ownership of the Global Lock. * ******************************************************************************/ -void +acpi_status acpi_ev_release_global_lock (void) { - u8 pending = FALSE; - void *global_lock; + u8 pending = FALSE; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ev_release_global_lock"); + ACPI_FUNCTION_TRACE ("ev_release_global_lock"); if (!acpi_gbl_global_lock_thread_count) { - REPORT_WARNING(("Global Lock has not be acquired, cannot release\n")); - return_VOID; + ACPI_REPORT_WARNING(("Cannot release HW Global Lock, it has not been acquired\n")); + return_ACPI_STATUS (AE_NOT_ACQUIRED); } - /* One fewer thread has the global lock */ + /* One fewer thread has the global lock */ acpi_gbl_global_lock_thread_count--; + if (acpi_gbl_global_lock_thread_count) { + /* There are still some threads holding the lock, cannot release */ - /* Have all threads released the lock? */ + return_ACPI_STATUS (AE_OK); + } - if (!acpi_gbl_global_lock_thread_count) { - /* - * No more threads holding lock, we can do the actual hardware - * release - */ - global_lock = acpi_gbl_FACS->global_lock; - ACPI_RELEASE_GLOBAL_LOCK (global_lock, pending); - acpi_gbl_global_lock_acquired = FALSE; + /* + * No more threads holding lock, we can do the actual hardware + * release + */ + ACPI_RELEASE_GLOBAL_LOCK (acpi_gbl_common_fACS.global_lock, pending); + acpi_gbl_global_lock_acquired = FALSE; + /* + * If the pending bit was set, we must write GBL_RLS to the control + * register + */ + if (pending) { + status = acpi_set_register (ACPI_BITREG_GLOBAL_LOCK_RELEASE, 1, ACPI_MTX_LOCK); + } + + return_ACPI_STATUS (status); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_ev_terminate + * + * PARAMETERS: none + * + * RETURN: none + * + * DESCRIPTION: Disable events and free memory allocated for table storage. + * + ******************************************************************************/ + +void +acpi_ev_terminate (void) +{ + acpi_native_uint i; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_terminate"); + + + if (acpi_gbl_events_initialized) { /* - * If the pending bit was set, we must write GBL_RLS to the control - * register + * Disable all event-related functionality. + * In all cases, on error, print a message but obviously we don't abort. */ - if (pending) { - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, - GBL_RLS, 1); + + /* Disable all fixed events */ + + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { + status = acpi_disable_event ((u32) i, 0); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not disable fixed event %d\n", (u32) i)); + } + } + + /* Disable all GPEs in all GPE blocks */ + + status = acpi_ev_walk_gpe_list (acpi_hw_disable_gpe_block); + + /* Remove SCI handler */ + + status = acpi_ev_remove_sci_handler (); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not remove SCI handler\n")); } } + /* Return to original mode if necessary */ + + if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) { + status = acpi_disable (); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "acpi_disable failed\n")); + } + } return_VOID; } + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evregion.c linux.21rc5-ac2/drivers/acpi/events/evregion.c --- linux.21rc5/drivers/acpi/events/evregion.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evregion.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,42 +1,59 @@ /****************************************************************************** * - * Module Name: evregion - ACPI Address_space (Op_region) handler dispatch - * $Revision: 113 $ + * Module Name: evregion - ACPI address_space (op_region) handler dispatch * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acevents.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "amlcode.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EVENTS - MODULE_NAME ("evregion") + ACPI_MODULE_NAME ("evregion") /******************************************************************************* * - * FUNCTION: Acpi_ev_install_default_address_space_handlers + * FUNCTION: acpi_ev_init_address_spaces * * PARAMETERS: * @@ -47,13 +64,13 @@ ******************************************************************************/ acpi_status -acpi_ev_install_default_address_space_handlers ( +acpi_ev_init_address_spaces ( void) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Ev_install_default_address_space_handlers"); + ACPI_FUNCTION_TRACE ("ev_init_address_spaces"); /* @@ -68,45 +85,51 @@ * space must be always available -- even though we are nowhere * near ready to find the PCI root buses at this point. * - * NOTE: We ignore AE_EXIST because this means that a handler has - * already been installed (via Acpi_install_address_space_handler) + * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler + * has already been installed (via acpi_install_address_space_handler) */ - status = acpi_install_address_space_handler (acpi_gbl_root_node, + + status = acpi_install_address_space_handler ((acpi_handle) acpi_gbl_root_node, ACPI_ADR_SPACE_SYSTEM_MEMORY, ACPI_DEFAULT_HANDLER, NULL, NULL); if ((ACPI_FAILURE (status)) && - (status != AE_EXIST)) { + (status != AE_ALREADY_EXISTS)) { return_ACPI_STATUS (status); } - status = acpi_install_address_space_handler (acpi_gbl_root_node, + status = acpi_install_address_space_handler ((acpi_handle) acpi_gbl_root_node, ACPI_ADR_SPACE_SYSTEM_IO, ACPI_DEFAULT_HANDLER, NULL, NULL); if ((ACPI_FAILURE (status)) && - (status != AE_EXIST)) { + (status != AE_ALREADY_EXISTS)) { return_ACPI_STATUS (status); } - status = acpi_install_address_space_handler (acpi_gbl_root_node, + status = acpi_install_address_space_handler ((acpi_handle) acpi_gbl_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); if ((ACPI_FAILURE (status)) && - (status != AE_EXIST)) { + (status != AE_ALREADY_EXISTS)) { return_ACPI_STATUS (status); } + status = acpi_install_address_space_handler ((acpi_handle) acpi_gbl_root_node, + ACPI_ADR_SPACE_DATA_TABLE, + ACPI_DEFAULT_HANDLER, NULL, NULL); + if ((ACPI_FAILURE (status)) && + (status != AE_ALREADY_EXISTS)) { + return_ACPI_STATUS (status); + } return_ACPI_STATUS (AE_OK); } -/* TBD: [Restructure] Move elsewhere */ - /******************************************************************************* * - * FUNCTION: Acpi_ev_execute_reg_method + * FUNCTION: acpi_ev_execute_reg_method * - * PARAMETERS: Region_obj - Object structure + * PARAMETERS: region_obj - Object structure * Function - On (1) or Off (0) * * RETURN: Status @@ -117,25 +140,31 @@ static acpi_status acpi_ev_execute_reg_method ( - acpi_operand_object *region_obj, - u32 function) + union acpi_operand_object *region_obj, + u32 function) { - acpi_operand_object *params[3]; - acpi_status status; + union acpi_operand_object *params[3]; + union acpi_operand_object *region_obj2; + acpi_status status; + + ACPI_FUNCTION_TRACE ("ev_execute_reg_method"); - FUNCTION_TRACE ("Ev_execute_reg_method"); + region_obj2 = acpi_ns_get_secondary_object (region_obj); + if (!region_obj2) { + return_ACPI_STATUS (AE_NOT_EXIST); + } - if (region_obj->region.extra->extra.method_REG == NULL) { + if (region_obj2->extra.method_REG == NULL) { return_ACPI_STATUS (AE_OK); } /* - * _REG method has two arguments - * Arg0: Integer: Operation region space ID - * Same value as Region_obj->Region.Space_id - * Arg1: Integer: connection status + * _REG method has two arguments + * Arg0: Integer: Operation region space ID + * Same value as region_obj->Region.space_id + * Arg1: Integer: connection status * 1 for connecting the handler, * 0 for disconnecting the handler * Passed as a parameter @@ -151,18 +180,16 @@ goto cleanup; } - /* - * Set up the parameter objects - */ - params[0]->integer.value = region_obj->region.space_id; + /* Set up the parameter objects */ + + params[0]->integer.value = region_obj->region.space_id; params[1]->integer.value = function; params[2] = NULL; - /* - * Execute the method, no return value - */ - DEBUG_EXEC(acpi_ut_display_init_pathname (region_obj->region.extra->extra.method_REG, " [Method]")); - status = acpi_ns_evaluate_by_handle (region_obj->region.extra->extra.method_REG, params, NULL); + /* Execute the method, no return value */ + + ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, region_obj2->extra.method_REG, NULL)); + status = acpi_ns_evaluate_by_handle (region_obj2->extra.method_REG, params, NULL); acpi_ut_remove_reference (params[1]); @@ -175,13 +202,13 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_address_space_dispatch + * FUNCTION: acpi_ev_address_space_dispatch * - * PARAMETERS: Region_obj - internal region object - * Space_id - ID of the address space (0-255) + * PARAMETERS: region_obj - internal region object + * space_id - ID of the address space (0-255) * Function - Read or Write operation * Address - Where in the space to read or write - * Bit_width - Field width in bits (8, 16, or 32) + * bit_width - Field width in bits (8, 16, 32, or 64) * Value - Pointer to in or out value * * RETURN: Status @@ -193,121 +220,142 @@ acpi_status acpi_ev_address_space_dispatch ( - acpi_operand_object *region_obj, - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value) + union acpi_operand_object *region_obj, + u32 function, + acpi_physical_address address, + u32 bit_width, + void *value) { - acpi_status status; - acpi_adr_space_handler handler; - acpi_adr_space_setup region_setup; - acpi_operand_object *handler_desc; - void *region_context = NULL; + acpi_status status; + acpi_status status2; + acpi_adr_space_handler handler; + acpi_adr_space_setup region_setup; + union acpi_operand_object *handler_desc; + union acpi_operand_object *region_obj2; + void *region_context = NULL; - FUNCTION_TRACE ("Ev_address_space_dispatch"); + ACPI_FUNCTION_TRACE ("ev_address_space_dispatch"); - /* - * Ensure that there is a handler associated with this region - */ - handler_desc = region_obj->region.addr_handler; + region_obj2 = acpi_ns_get_secondary_object (region_obj); + if (!region_obj2) { + return_ACPI_STATUS (AE_NOT_EXIST); + } + + /* Ensure that there is a handler associated with this region */ + + handler_desc = region_obj->region.address_space; if (!handler_desc) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "no handler for region(%p) [%s]\n", region_obj, acpi_ut_get_region_name (region_obj->region.space_id))); - return_ACPI_STATUS(AE_NOT_EXIST); + return_ACPI_STATUS (AE_NOT_EXIST); } /* * It may be the case that the region has never been initialized * Some types of regions require special init code */ - if (!(region_obj->region.flags & AOPOBJ_INITIALIZED)) { + if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { /* * This region has not been initialized yet, do it */ - region_setup = handler_desc->addr_handler.setup; + region_setup = handler_desc->address_space.setup; if (!region_setup) { - /* - * Bad news, no init routine and not init'd - */ + /* No initialization routine, exit with error */ + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No init routine for region(%p) [%s]\n", region_obj, acpi_ut_get_region_name (region_obj->region.space_id))); - return_ACPI_STATUS (AE_UNKNOWN_STATUS); + return_ACPI_STATUS (AE_NOT_EXIST); } /* * We must exit the interpreter because the region setup will potentially - * execute control methods + * execute control methods (e.g., _REG method for this region) */ acpi_ex_exit_interpreter (); status = region_setup (region_obj, ACPI_REGION_ACTIVATE, - handler_desc->addr_handler.context, ®ion_context); + handler_desc->address_space.context, ®ion_context); /* Re-enter the interpreter */ - acpi_ex_enter_interpreter (); + status2 = acpi_ex_enter_interpreter (); + if (ACPI_FAILURE (status2)) { + return_ACPI_STATUS (status2); + } + + /* Check for failure of the Region Setup */ - /* - * Init routine may fail - */ if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region Init: %s [%s]\n", acpi_format_exception (status), acpi_ut_get_region_name (region_obj->region.space_id))); - return_ACPI_STATUS(status); + return_ACPI_STATUS (status); } - region_obj->region.flags |= AOPOBJ_INITIALIZED; - /* - * Save the returned context for use in all accesses to - * this particular region. + * Region initialization may have been completed by region_setup */ - region_obj->region.extra->extra.region_context = region_context; + if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { + region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; + + if (region_obj2->extra.region_context) { + /* The handler for this region was already installed */ + + ACPI_MEM_FREE (region_context); + } + else { + /* + * Save the returned context for use in all accesses to + * this particular region + */ + region_obj2->extra.region_context = region_context; + } + } } - /* - * We have everything we need, begin the process - */ - handler = handler_desc->addr_handler.handler; + /* We have everything we need, we can invoke the address space handler */ - ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, - "Addrhandler %p (%p), Address %8.8X%8.8X\n", - ®ion_obj->region.addr_handler->addr_handler, handler, HIDWORD(address), - LODWORD(address))); + handler = handler_desc->address_space.handler; - if (!(handler_desc->addr_handler.flags & ADDR_HANDLER_DEFAULT_INSTALLED)) { - /* - * For handlers other than the default (supplied) handlers, we must - * exit the interpreter because the handler *might* block -- we don't - * know what it will do, so we can't hold the lock on the intepreter. + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", + ®ion_obj->region.address_space->address_space, handler, + ACPI_HIDWORD (address), ACPI_LODWORD (address), + acpi_ut_get_region_name (region_obj->region.space_id))); + + if (!(handler_desc->address_space.flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { + /* + * For handlers other than the default (supplied) handlers, we must + * exit the interpreter because the handler *might* block -- we don't + * know what it will do, so we can't hold the lock on the intepreter. */ acpi_ex_exit_interpreter(); } - /* - * Invoke the handler. - */ + /* Call the handler */ + status = handler (function, address, bit_width, value, - handler_desc->addr_handler.context, - region_obj->region.extra->extra.region_context); + handler_desc->address_space.context, + region_obj2->extra.region_context); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region handler: %s [%s]\n", - acpi_format_exception (status), - acpi_ut_get_region_name (region_obj->region.space_id))); + ACPI_REPORT_ERROR (("Handler for [%s] returned %s\n", + acpi_ut_get_region_name (region_obj->region.space_id), + acpi_format_exception (status))); } - if (!(handler_desc->addr_handler.flags & ADDR_HANDLER_DEFAULT_INSTALLED)) { + if (!(handler_desc->address_space.flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* * We just returned from a non-default handler, we must re-enter the * interpreter */ - acpi_ex_enter_interpreter (); + status2 = acpi_ex_enter_interpreter (); + if (ACPI_FAILURE (status2)) { + return_ACPI_STATUS (status2); + } } return_ACPI_STATUS (status); @@ -315,10 +363,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_disassociate_region_from_handler + * FUNCTION: acpi_ev_detach_region * - * PARAMETERS: Region_obj - Region Object - * Acpi_ns_is_locked - Namespace Region Already Locked? + * PARAMETERS: region_obj - Region Object + * acpi_ns_is_locked - Namespace Region Already Locked? * * RETURN: None * @@ -328,112 +376,118 @@ ******************************************************************************/ void -acpi_ev_disassociate_region_from_handler( - acpi_operand_object *region_obj, - u8 acpi_ns_is_locked) +acpi_ev_detach_region( + union acpi_operand_object *region_obj, + u8 acpi_ns_is_locked) { - acpi_operand_object *handler_obj; - acpi_operand_object *obj_desc; - acpi_operand_object **last_obj_ptr; - acpi_adr_space_setup region_setup; - void *region_context; - acpi_status status; + union acpi_operand_object *handler_obj; + union acpi_operand_object *obj_desc; + union acpi_operand_object **last_obj_ptr; + acpi_adr_space_setup region_setup; + void *region_context; + union acpi_operand_object *region_obj2; + acpi_status status; - FUNCTION_TRACE ("Ev_disassociate_region_from_handler"); + ACPI_FUNCTION_TRACE ("ev_detach_region"); - region_context = region_obj->region.extra->extra.region_context; + region_obj2 = acpi_ns_get_secondary_object (region_obj); + if (!region_obj2) { + return_VOID; + } + region_context = region_obj2->extra.region_context; - /* - * Get the address handler from the region object - */ - handler_obj = region_obj->region.addr_handler; + /* Get the address handler from the region object */ + + handler_obj = region_obj->region.address_space; if (!handler_obj) { - /* - * This region has no handler, all done - */ + /* This region has no handler, all done */ + return_VOID; } + /* Find this region in the handler's list */ - /* - * Find this region in the handler's list - */ - obj_desc = handler_obj->addr_handler.region_list; - last_obj_ptr = &handler_obj->addr_handler.region_list; + obj_desc = handler_obj->address_space.region_list; + last_obj_ptr = &handler_obj->address_space.region_list; while (obj_desc) { - /* - * See if this is the one - */ + /* Is this the correct Region? */ + if (obj_desc == region_obj) { ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, "Removing Region %p from address handler %p\n", region_obj, handler_obj)); - /* - * This is it, remove it from the handler's list - */ + + /* This is it, remove it from the handler's list */ + *last_obj_ptr = obj_desc->region.next; obj_desc->region.next = NULL; /* Must clear field */ if (acpi_ns_is_locked) { - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_VOID; + } } - /* - * Now stop region accesses by executing the _REG method - */ - acpi_ev_execute_reg_method (region_obj, 0); + /* Now stop region accesses by executing the _REG method */ + + status = acpi_ev_execute_reg_method (region_obj, 0); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s from region _REG, [%s]\n", + acpi_format_exception (status), + acpi_ut_get_region_name (region_obj->region.space_id))); + } if (acpi_ns_is_locked) { - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_VOID; + } } - /* - * Call the setup handler with the deactivate notification - */ - region_setup = handler_obj->addr_handler.setup; + /* Call the setup handler with the deactivate notification */ + + region_setup = handler_obj->address_space.setup; status = region_setup (region_obj, ACPI_REGION_DEACTIVATE, - handler_obj->addr_handler.context, ®ion_context); + handler_obj->address_space.context, ®ion_context); + + /* Init routine may fail, Just ignore errors */ - /* - * Init routine may fail, Just ignore errors - */ if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s from region init, [%s]\n", acpi_format_exception (status), acpi_ut_get_region_name (region_obj->region.space_id))); } - region_obj->region.flags &= ~(AOPOBJ_INITIALIZED); + region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); /* - * Remove handler reference in the region + * Remove handler reference in the region * - * NOTE: this doesn't mean that the region goes away - * The region is just inaccessible as indicated to - * the _REG method + * NOTE: this doesn't mean that the region goes away + * The region is just inaccessible as indicated to + * the _REG method * - * If the region is on the handler's list - * this better be the region's handler + * If the region is on the handler's list + * this better be the region's handler */ - region_obj->region.addr_handler = NULL; + region_obj->region.address_space = NULL; + acpi_ut_remove_reference (handler_obj); return_VOID; + } - } /* found the right handler */ + /* Walk the linked list of handlers */ - /* - * Move through the linked list of handlers - */ last_obj_ptr = &obj_desc->region.next; obj_desc = obj_desc->region.next; } - /* - * If we get here, the region was not in the handler's region list - */ + /* If we get here, the region was not in the handler's region list */ + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, "Cannot remove region %p from address handler %p\n", region_obj, handler_obj)); @@ -444,11 +498,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_associate_region_and_handler + * FUNCTION: acpi_ev_attach_region * - * PARAMETERS: Handler_obj - Handler Object - * Region_obj - Region Object - * Acpi_ns_is_locked - Namespace Region Already Locked? + * PARAMETERS: handler_obj - Handler Object + * region_obj - Region Object + * acpi_ns_is_locked - Namespace Region Already Locked? * * RETURN: None * @@ -458,15 +512,16 @@ ******************************************************************************/ acpi_status -acpi_ev_associate_region_and_handler ( - acpi_operand_object *handler_obj, - acpi_operand_object *region_obj, - u8 acpi_ns_is_locked) +acpi_ev_attach_region ( + union acpi_operand_object *handler_obj, + union acpi_operand_object *region_obj, + u8 acpi_ns_is_locked) { - acpi_status status; + acpi_status status; + acpi_status status2; - FUNCTION_TRACE ("Ev_associate_region_and_handler"); + ACPI_FUNCTION_TRACE ("ev_attach_region"); ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, @@ -474,28 +529,38 @@ region_obj, handler_obj, acpi_ut_get_region_name (region_obj->region.space_id))); - /* - * Link this region to the front of the handler's list - */ - region_obj->region.next = handler_obj->addr_handler.region_list; - handler_obj->addr_handler.region_list = region_obj; + /* Link this region to the front of the handler's list */ - /* - * set the region's handler - */ - region_obj->region.addr_handler = handler_obj; + region_obj->region.next = handler_obj->address_space.region_list; + handler_obj->address_space.region_list = region_obj; + + /* Install the region's handler */ + + if (region_obj->region.address_space) { + return_ACPI_STATUS (AE_ALREADY_EXISTS); + } + + region_obj->region.address_space = handler_obj; + acpi_ut_add_reference (handler_obj); /* - * Last thing, tell all users that this region is usable + * Tell all users that this region is usable by running the _REG + * method */ if (acpi_ns_is_locked) { - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + status2 = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status2)) { + return_ACPI_STATUS (status2); + } } status = acpi_ev_execute_reg_method (region_obj, 1); if (acpi_ns_is_locked) { - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status2 = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status2)) { + return_ACPI_STATUS (status2); + } } return_ACPI_STATUS (status); @@ -504,14 +569,14 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_addr_handler_helper + * FUNCTION: acpi_ev_install_handler * * PARAMETERS: Handle - Node to be dumped * Level - Nesting level of the handle - * Context - Passed into Acpi_ns_walk_namespace + * Context - Passed into acpi_ns_walk_namespace * - * DESCRIPTION: This routine checks to see if the object is a Region if it - * is then the address handler is installed in it. + * DESCRIPTION: This routine installs an address handler into objects that are + * of type Region or Device. * * If the Object is a Device, and the device has a handler of * the same type then the search is terminated in that branch. @@ -522,23 +587,23 @@ ******************************************************************************/ acpi_status -acpi_ev_addr_handler_helper ( - acpi_handle obj_handle, - u32 level, - void *context, - void **return_value) +acpi_ev_install_handler ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value) { - acpi_operand_object *handler_obj; - acpi_operand_object *tmp_obj; - acpi_operand_object *obj_desc; - acpi_namespace_node *node; - acpi_status status; + union acpi_operand_object *handler_obj; + union acpi_operand_object *next_handler_obj; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + acpi_status status; - PROC_NAME ("Ev_addr_handler_helper"); + ACPI_FUNCTION_NAME ("ev_install_handler"); - handler_obj = (acpi_operand_object *) context; + handler_obj = (union acpi_operand_object *) context; /* Parameter validation */ @@ -554,8 +619,8 @@ } /* - * We only care about regions.and objects - * that can have address handlers + * We only care about regions.and objects + * that are allowed to have address space handlers */ if ((node->type != ACPI_TYPE_DEVICE) && (node->type != ACPI_TYPE_REGION) && @@ -567,81 +632,70 @@ obj_desc = acpi_ns_get_attached_object (node); if (!obj_desc) { - /* - * The object DNE, we don't care about it - */ + /* No object, just exit */ + return (AE_OK); } - /* - * Devices are handled different than regions - */ - if (IS_THIS_OBJECT_TYPE (obj_desc, ACPI_TYPE_DEVICE)) { - /* - * See if this guy has any handlers - */ - tmp_obj = obj_desc->device.addr_handler; - while (tmp_obj) { - /* - * Now let's see if it's for the same address space. - */ - if (tmp_obj->addr_handler.space_id == handler_obj->addr_handler.space_id) { - /* - * It's for the same address space - */ + /* Devices are handled different than regions */ + + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_DEVICE) { + /* Check if this Device already has a handler for this address space */ + + next_handler_obj = obj_desc->device.address_space; + while (next_handler_obj) { + /* Found a handler, is it for the same address space? */ + + if (next_handler_obj->address_space.space_id == handler_obj->address_space.space_id) { ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, "Found handler for region [%s] in device %p(%p) handler %p\n", - acpi_ut_get_region_name (handler_obj->addr_handler.space_id), - obj_desc, tmp_obj, handler_obj)); + acpi_ut_get_region_name (handler_obj->address_space.space_id), + obj_desc, next_handler_obj, handler_obj)); /* - * Since the object we found it on was a device, then it - * means that someone has already installed a handler for - * the branch of the namespace from this device on. Just - * bail out telling the walk routine to not traverse this - * branch. This preserves the scoping rule for handlers. + * Since the object we found it on was a device, then it + * means that someone has already installed a handler for + * the branch of the namespace from this device on. Just + * bail out telling the walk routine to not traverse this + * branch. This preserves the scoping rule for handlers. */ return (AE_CTRL_DEPTH); } - /* - * Move through the linked list of handlers - */ - tmp_obj = tmp_obj->addr_handler.next; + /* Walk the linked list of handlers attached to this device */ + + next_handler_obj = next_handler_obj->address_space.next; } /* - * As long as the device didn't have a handler for this - * space we don't care about it. We just ignore it and - * proceed. + * As long as the device didn't have a handler for this + * space we don't care about it. We just ignore it and + * proceed. */ return (AE_OK); } - /* - * Only here if it was a region - */ - if (obj_desc->region.space_id != handler_obj->addr_handler.space_id) { + /* Object is a Region */ + + if (obj_desc->region.space_id != handler_obj->address_space.space_id) { /* - * This region is for a different address space - * ignore it + * This region is for a different address space + * -- just ignore it */ return (AE_OK); } /* - * Now we have a region and it is for the handler's address - * space type. + * Now we have a region and it is for the handler's address + * space type. * - * First disconnect region for any previous handler (if any) + * First disconnect region for any previous handler (if any) */ - acpi_ev_disassociate_region_from_handler (obj_desc, FALSE); + acpi_ev_detach_region (obj_desc, FALSE); - /* - * Then connect the region to the new handler - */ - status = acpi_ev_associate_region_and_handler (handler_obj, obj_desc, FALSE); + /* Connect the region to the new handler */ + status = acpi_ev_attach_region (handler_obj, obj_desc, FALSE); return (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evrgnini.c linux.21rc5-ac2/drivers/acpi/events/evrgnini.c --- linux.21rc5/drivers/acpi/events/evrgnini.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evrgnini.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,47 +1,63 @@ /****************************************************************************** * - * Module Name: evrgnini- ACPI Address_space (Op_region) init - * $Revision: 48 $ + * Module Name: evrgnini- ACPI address_space (op_region) init * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acevents.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "amlcode.h" +#include +#include +#include #define _COMPONENT ACPI_EVENTS - MODULE_NAME ("evrgnini") + ACPI_MODULE_NAME ("evrgnini") /******************************************************************************* * - * FUNCTION: Acpi_ev_system_memory_region_setup + * FUNCTION: acpi_ev_system_memory_region_setup * - * PARAMETERS: Region_obj - region we are interested in - * Function - start or stop - * Handler_context - Address space handler context - * Region_context - Region specific context + * PARAMETERS: region_obj - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context * * RETURN: Status * @@ -51,12 +67,16 @@ acpi_status acpi_ev_system_memory_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context) + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) { - FUNCTION_TRACE ("Ev_system_memory_region_setup"); + union acpi_operand_object *region_desc = (union acpi_operand_object *) handle; + struct acpi_mem_space_context *local_region_context; + + + ACPI_FUNCTION_TRACE ("ev_system_memory_region_setup"); if (function == ACPI_REGION_DEACTIVATE) { @@ -67,26 +87,31 @@ return_ACPI_STATUS (AE_OK); } + /* Create a new context */ - /* Activate. Create a new context */ - - *region_context = ACPI_MEM_CALLOCATE (sizeof (acpi_mem_space_context)); - if (!(*region_context)) { + local_region_context = ACPI_MEM_CALLOCATE (sizeof (struct acpi_mem_space_context)); + if (!(local_region_context)) { return_ACPI_STATUS (AE_NO_MEMORY); } + /* Save the region length and address for use in the handler */ + + local_region_context->length = region_desc->region.length; + local_region_context->address = region_desc->region.address; + + *region_context = local_region_context; return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ev_io_space_region_setup + * FUNCTION: acpi_ev_io_space_region_setup * - * PARAMETERS: Region_obj - region we are interested in - * Function - start or stop - * Handler_context - Address space handler context - * Region_context - Region specific context + * PARAMETERS: region_obj - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context * * RETURN: Status * @@ -96,12 +121,12 @@ acpi_status acpi_ev_io_space_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context) + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) { - FUNCTION_TRACE ("Ev_io_space_region_setup"); + ACPI_FUNCTION_TRACE ("ev_io_space_region_setup"); if (function == ACPI_REGION_DEACTIVATE) { @@ -117,12 +142,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_pci_config_region_setup + * FUNCTION: acpi_ev_pci_config_region_setup * - * PARAMETERS: Region_obj - region we are interested in - * Function - start or stop - * Handler_context - Address space handler context - * Region_context - Region specific context + * PARAMETERS: region_obj - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context * * RETURN: Status * @@ -134,128 +159,155 @@ acpi_status acpi_ev_pci_config_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context) + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) { - acpi_status status = AE_OK; - acpi_integer temp; - acpi_pci_id *pci_id = *region_context; - acpi_operand_object *handler_obj; - acpi_namespace_node *node; - acpi_operand_object *region_obj = (acpi_operand_object *) handle; - acpi_device_id object_hID; + acpi_status status = AE_OK; + acpi_integer pci_value; + struct acpi_pci_id *pci_id = *region_context; + union acpi_operand_object *handler_obj; + struct acpi_namespace_node *parent_node; + struct acpi_namespace_node *pci_root_node; + union acpi_operand_object *region_obj = (union acpi_operand_object *) handle; + struct acpi_device_id object_hID; - FUNCTION_TRACE ("Ev_pci_config_region_setup"); + ACPI_FUNCTION_TRACE ("ev_pci_config_region_setup"); - handler_obj = region_obj->region.addr_handler; + handler_obj = region_obj->region.address_space; if (!handler_obj) { /* - * No installed handler. This shouldn't happen because the dispatch - * routine checks before we get here, but we check again just in case. + * No installed handler. This shouldn't happen because the dispatch + * routine checks before we get here, but we check again just in case. */ ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, "Attempting to init a region %p, with no handler\n", region_obj)); return_ACPI_STATUS (AE_NOT_EXIST); } + *region_context = NULL; if (function == ACPI_REGION_DEACTIVATE) { if (pci_id) { ACPI_MEM_FREE (pci_id); - *region_context = NULL; } - return_ACPI_STATUS (status); } - - /* Create a new context */ - - pci_id = ACPI_MEM_CALLOCATE (sizeof (acpi_pci_id)); - if (!pci_id) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - /* - * For PCI Config space access, we have to pass the segment, bus, - * device and function numbers. This routine must acquire those. - */ + parent_node = acpi_ns_get_parent_node (region_obj->region.node); /* - * First get device and function numbers from the _ADR object - * in the parent's scope. - */ - node = acpi_ns_get_parent_object (region_obj->region.node); - - - /* Acpi_evaluate the _ADR object */ - - status = acpi_ut_evaluate_numeric_object (METHOD_NAME__ADR, node, &temp); - - /* - * The default is zero, since the allocation above zeroed the data, just - * do nothing on failures. - */ - if (ACPI_SUCCESS (status)) { - pci_id->device = HIWORD (temp); - pci_id->function = LOWORD (temp); - } - - /* - * Get the _SEG and _BBN values from the device upon which the handler - * is installed. + * Get the _SEG and _BBN values from the device upon which the handler + * is installed. * - * We need to get the _SEG and _BBN objects relative to the PCI BUS device. - * This is the device the handler has been registered to handle. + * We need to get the _SEG and _BBN objects relative to the PCI BUS device. + * This is the device the handler has been registered to handle. */ /* - * If the Addr_handler.Node is still pointing to the root, we need - * to scan upward for a PCI Root bridge and re-associate the Op_region - * handlers with that device. + * If the address_space.Node is still pointing to the root, we need + * to scan upward for a PCI Root bridge and re-associate the op_region + * handlers with that device. */ - if (handler_obj->addr_handler.node == acpi_gbl_root_node) { - /* - * Node is currently the parent object - */ - while (node != acpi_gbl_root_node) { - status = acpi_ut_execute_HID (node, &object_hID); + if (handler_obj->address_space.node == acpi_gbl_root_node) { + /* Start search from the parent object */ + + pci_root_node = parent_node; + while (pci_root_node != acpi_gbl_root_node) { + status = acpi_ut_execute_HID (pci_root_node, &object_hID); if (ACPI_SUCCESS (status)) { - if (!(STRNCMP (object_hID.buffer, PCI_ROOT_HID_STRING, + /* Got a valid _HID, check if this is a PCI root */ + + if (!(ACPI_STRNCMP (object_hID.buffer, PCI_ROOT_HID_STRING, sizeof (PCI_ROOT_HID_STRING)))) { - acpi_install_address_space_handler (node, + /* Install a handler for this PCI root bridge */ + + status = acpi_install_address_space_handler ((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); + if (ACPI_FAILURE (status)) { + if (status == AE_SAME_HANDLER) { + /* + * It is OK if the handler is already installed on the root + * bridge. Still need to return a context object for the + * new PCI_Config operation region, however. + */ + status = AE_OK; + } + else { + ACPI_REPORT_ERROR (( + "Could not install pci_config handler for Root Bridge %4.4s, %s\n", + pci_root_node->name.ascii, acpi_format_exception (status))); + } + } break; } } - node = acpi_ns_get_parent_object (node); + pci_root_node = acpi_ns_get_parent_node (pci_root_node); } + + /* PCI root bridge not found, use namespace root node */ } else { - node = handler_obj->addr_handler.node; + pci_root_node = handler_obj->address_space.node; } /* - * The PCI segment number comes from the _SEG method + * If this region is now initialized, we are done. + * (install_address_space_handler could have initialized it) */ - status = acpi_ut_evaluate_numeric_object (METHOD_NAME__SEG, node, &temp); - if (ACPI_SUCCESS (status)) { - pci_id->segment = LOWORD (temp); + if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { + return_ACPI_STATUS (AE_OK); + } + + /* Region is still not initialized. Create a new context */ + + pci_id = ACPI_MEM_CALLOCATE (sizeof (struct acpi_pci_id)); + if (!pci_id) { + return_ACPI_STATUS (AE_NO_MEMORY); } /* - * The PCI bus number comes from the _BBN method + * For PCI_Config space access, we need the segment, bus, + * device and function numbers. Acquire them here. + */ + + /* + * Get the PCI device and function numbers from the _ADR object + * contained in the parent's scope. + */ + status = acpi_ut_evaluate_numeric_object (METHOD_NAME__ADR, parent_node, &pci_value); + + /* + * The default is zero, and since the allocation above zeroed + * the data, just do nothing on failure. */ - status = acpi_ut_evaluate_numeric_object (METHOD_NAME__BBN, node, &temp); if (ACPI_SUCCESS (status)) { - pci_id->bus = LOWORD (temp); + pci_id->device = ACPI_HIWORD (ACPI_LODWORD (pci_value)); + pci_id->function = ACPI_LOWORD (ACPI_LODWORD (pci_value)); } + /* The PCI segment number comes from the _SEG method */ + + status = acpi_ut_evaluate_numeric_object (METHOD_NAME__SEG, pci_root_node, &pci_value); + if (ACPI_SUCCESS (status)) { + pci_id->segment = ACPI_LOWORD (pci_value); + } + + /* The PCI bus number comes from the _BBN method */ + + status = acpi_ut_evaluate_numeric_object (METHOD_NAME__BBN, pci_root_node, &pci_value); + if (ACPI_SUCCESS (status)) { + pci_id->bus = ACPI_LOWORD (pci_value); + } + + /* Complete this device's pci_id */ + + acpi_os_derive_pci_id (pci_root_node, region_obj->region.node, &pci_id); + *region_context = pci_id; return_ACPI_STATUS (AE_OK); } @@ -263,12 +315,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_pci_bar_region_setup + * FUNCTION: acpi_ev_pci_bar_region_setup * - * PARAMETERS: Region_obj - region we are interested in - * Function - start or stop - * Handler_context - Address space handler context - * Region_context - Region specific context + * PARAMETERS: region_obj - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context * * RETURN: Status * @@ -280,13 +332,12 @@ acpi_status acpi_ev_pci_bar_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context) + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) { - - FUNCTION_TRACE ("Ev_pci_bar_region_setup"); + ACPI_FUNCTION_TRACE ("ev_pci_bar_region_setup"); return_ACPI_STATUS (AE_OK); @@ -295,12 +346,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_cmos_region_setup + * FUNCTION: acpi_ev_cmos_region_setup * - * PARAMETERS: Region_obj - region we are interested in - * Function - start or stop - * Handler_context - Address space handler context - * Region_context - Region specific context + * PARAMETERS: region_obj - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context * * RETURN: Status * @@ -312,13 +363,12 @@ acpi_status acpi_ev_cmos_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context) + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) { - - FUNCTION_TRACE ("Ev_cmos_region_setup"); + ACPI_FUNCTION_TRACE ("ev_cmos_region_setup"); return_ACPI_STATUS (AE_OK); @@ -327,12 +377,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_default_region_setup + * FUNCTION: acpi_ev_default_region_setup * - * PARAMETERS: Region_obj - region we are interested in - * Function - start or stop - * Handler_context - Address space handler context - * Region_context - Region specific context + * PARAMETERS: region_obj - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context * * RETURN: Status * @@ -342,12 +392,12 @@ acpi_status acpi_ev_default_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context) + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) { - FUNCTION_TRACE ("Ev_default_region_setup"); + ACPI_FUNCTION_TRACE ("ev_default_region_setup"); if (function == ACPI_REGION_DEACTIVATE) { @@ -363,9 +413,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ev_initialize_region + * FUNCTION: acpi_ev_initialize_region * - * PARAMETERS: Region_obj - Region we are initializing + * PARAMETERS: region_obj - Region we are initializing + * acpi_ns_locked - Is namespace locked? * * RETURN: Status * @@ -375,7 +426,7 @@ * Get the appropriate address space handler for a newly * created region. * - * This also performs address space specific intialization. For + * This also performs address space specific initialization. For * example, PCI regions must have an _ADR object that contains * a PCI address in the scope of the definition. This address is * required to perform an access to PCI config space. @@ -384,113 +435,124 @@ acpi_status acpi_ev_initialize_region ( - acpi_operand_object *region_obj, - u8 acpi_ns_locked) + union acpi_operand_object *region_obj, + u8 acpi_ns_locked) { - acpi_operand_object *handler_obj; - acpi_operand_object *obj_desc; - ACPI_ADR_SPACE_TYPE space_id; - acpi_namespace_node *node; - acpi_status status; - acpi_namespace_node *method_node; - acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG; + union acpi_operand_object *handler_obj; + union acpi_operand_object *obj_desc; + acpi_adr_space_type space_id; + struct acpi_namespace_node *node; + acpi_status status; + struct acpi_namespace_node *method_node; + acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG; + union acpi_operand_object *region_obj2; - FUNCTION_TRACE_U32 ("Ev_initialize_region", acpi_ns_locked); + ACPI_FUNCTION_TRACE_U32 ("ev_initialize_region", acpi_ns_locked); if (!region_obj) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - node = acpi_ns_get_parent_object (region_obj->region.node); + if (region_obj->common.flags & AOPOBJ_OBJECT_INITIALIZED) { + return_ACPI_STATUS (AE_OK); + } + + region_obj2 = acpi_ns_get_secondary_object (region_obj); + if (!region_obj2) { + return_ACPI_STATUS (AE_NOT_EXIST); + } + + node = acpi_ns_get_parent_node (region_obj->region.node); space_id = region_obj->region.space_id; - region_obj->region.addr_handler = NULL; - region_obj->region.extra->extra.method_REG = NULL; - region_obj->region.flags &= ~(AOPOBJ_INITIALIZED); + /* Setup defaults */ + + region_obj->region.address_space = NULL; + region_obj2->extra.method_REG = NULL; + region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE); + region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED; + + /* Find any "_REG" method associated with this region definition */ - /* - * Find any "_REG" associated with this region definition - */ status = acpi_ns_search_node (*reg_name_ptr, node, ACPI_TYPE_METHOD, &method_node); if (ACPI_SUCCESS (status)) { /* - * The _REG method is optional and there can be only one per region - * definition. This will be executed when the handler is attached - * or removed + * The _REG method is optional and there can be only one per region + * definition. This will be executed when the handler is attached + * or removed */ - region_obj->region.extra->extra.method_REG = method_node; + region_obj2->extra.method_REG = method_node; } /* - * The following loop depends upon the root Node having no parent - * ie: Acpi_gbl_Root_node->Parent_entry being set to NULL + * The following loop depends upon the root Node having no parent + * ie: acpi_gbl_root_node->parent_entry being set to NULL */ while (node) { - /* - * Check to see if a handler exists - */ + /* Check to see if a handler exists */ + handler_obj = NULL; obj_desc = acpi_ns_get_attached_object (node); if (obj_desc) { - /* - * can only be a handler if the object exists - */ + /* Can only be a handler if the object exists */ + switch (node->type) { case ACPI_TYPE_DEVICE: - handler_obj = obj_desc->device.addr_handler; + handler_obj = obj_desc->device.address_space; break; case ACPI_TYPE_PROCESSOR: - handler_obj = obj_desc->processor.addr_handler; + handler_obj = obj_desc->processor.address_space; break; case ACPI_TYPE_THERMAL: - handler_obj = obj_desc->thermal_zone.addr_handler; + handler_obj = obj_desc->thermal_zone.address_space; + break; + + default: + /* Ignore other objects */ break; } while (handler_obj) { - /* - * This guy has at least one address handler - * see if it has the type we want - */ - if (handler_obj->addr_handler.space_id == space_id) { + /* Is this handler of the correct type? */ + + if (handler_obj->address_space.space_id == space_id) { + /* Found correct handler */ + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, "Found handler %p for region %p in obj %p\n", handler_obj, region_obj, obj_desc)); - /* - * Found it! Now update the region and the handler - */ - acpi_ev_associate_region_and_handler (handler_obj, region_obj, - acpi_ns_locked); + status = acpi_ev_attach_region (handler_obj, region_obj, + acpi_ns_locked); + return_ACPI_STATUS (AE_OK); } - handler_obj = handler_obj->addr_handler.next; + /* Try next handler in the list */ - } /* while handlerobj */ + handler_obj = handler_obj->address_space.next; + } } /* - * This one does not have the handler we need - * Pop up one level + * This node does not have the handler we need; + * Pop up one level */ - node = acpi_ns_get_parent_object (node); + node = acpi_ns_get_parent_node (node); + } - } /* while Node != ROOT */ + /* If we get here, there is no handler for this region */ - /* - * If we get here, there is no handler for this region - */ ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, - "No handler for Region_type %s(%X) (Region_obj %p)\n", + "No handler for region_type %s(%X) (region_obj %p)\n", acpi_ut_get_region_name (space_id), space_id, region_obj)); return_ACPI_STATUS (AE_NOT_EXIST); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evsci.c linux.21rc5-ac2/drivers/acpi/events/evsci.c --- linux.21rc5/drivers/acpi/events/evsci.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evsci.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,281 +2,198 @@ * * Module Name: evsci - System Control Interrupt configuration and * legacy to ACPI mode state transition functions - * $Revision: 74 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acnamesp.h" -#include "achware.h" -#include "acevents.h" +#include +#include #define _COMPONENT ACPI_EVENTS - MODULE_NAME ("evsci") - - -/* - * Elements correspond to counts for TMR, NOT_USED, GBL, PWR_BTN, SLP_BTN, RTC, - * and GENERAL respectively. These counts are modified by the ACPI interrupt - * handler. - * - * TBD: [Investigate] Note that GENERAL should probably be split out into - * one element for each bit in the GPE registers - */ + ACPI_MODULE_NAME ("evsci") /******************************************************************************* * - * FUNCTION: Acpi_ev_sci_handler + * FUNCTION: acpi_ev_sci_xrupt_handler * * PARAMETERS: Context - Calling Context * * RETURN: Status code indicates whether interrupt was handled. * * DESCRIPTION: Interrupt handler that will figure out what function or - * control method to call to deal with a SCI. Installed - * using BU interrupt support. + * control method to call to deal with a SCI. * ******************************************************************************/ -static u32 -acpi_ev_sci_handler (void *context) +static u32 ACPI_SYSTEM_XFACE +acpi_ev_sci_xrupt_handler ( + void *context) { - u32 interrupt_handled = INTERRUPT_NOT_HANDLED; + struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; + u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED; - FUNCTION_TRACE("Ev_sci_handler"); + ACPI_FUNCTION_TRACE("ev_sci_xrupt_handler"); /* - * Make sure that ACPI is enabled by checking SCI_EN. Note that we are - * required to treat the SCI interrupt as sharable, level, active low. + * We are guaranteed by the ACPI CA initialization/shutdown code that + * if this interrupt handler is installed, ACPI is enabled. */ - if (!acpi_hw_register_bit_access (ACPI_READ, ACPI_MTX_DO_NOT_LOCK, SCI_EN)) { - /* ACPI is not enabled; this interrupt cannot be for us */ - - return_VALUE (INTERRUPT_NOT_HANDLED); - } /* - * Fixed Acpi_events: - * ------------- - * Check for and dispatch any Fixed Acpi_events that have occurred + * Fixed Events: + * Check for and dispatch any Fixed Events that have occurred */ interrupt_handled |= acpi_ev_fixed_event_detect (); /* - * GPEs: - * ----- + * General Purpose Events: * Check for and dispatch any GPEs that have occurred */ - interrupt_handled |= acpi_ev_gpe_detect (); + interrupt_handled |= acpi_ev_gpe_detect (gpe_xrupt_list); return_VALUE (interrupt_handled); } -/****************************************************************************** - * - * FUNCTION: Acpi_ev_install_sci_handler - * - * PARAMETERS: none - * - * RETURN: Status - * - * DESCRIPTION: Installs SCI handler. - * - ******************************************************************************/ - -u32 -acpi_ev_install_sci_handler (void) -{ - u32 status = AE_OK; - - - FUNCTION_TRACE ("Ev_install_sci_handler"); - - - status = acpi_os_install_interrupt_handler ((u32) acpi_gbl_FADT->sci_int, - acpi_ev_sci_handler, NULL); - return_ACPI_STATUS (status); -} - - -/****************************************************************************** - +/******************************************************************************* * - * FUNCTION: Acpi_ev_remove_sci_handler + * FUNCTION: acpi_ev_gpe_xrupt_handler * - * PARAMETERS: none + * PARAMETERS: Context - Calling Context * - * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not - * installed to begin with + * RETURN: Status code indicates whether interrupt was handled. * - * DESCRIPTION: Restores original status of all fixed event enable bits and - * removes SCI handler. + * DESCRIPTION: Handler for GPE Block Device interrupts * ******************************************************************************/ -acpi_status -acpi_ev_remove_sci_handler (void) +u32 ACPI_SYSTEM_XFACE +acpi_ev_gpe_xrupt_handler ( + void *context) { - FUNCTION_TRACE ("Ev_remove_sci_handler"); - + struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; + u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED; -#if 0 - /* TBD:[Investigate] Figure this out!! Disable all events first ??? */ - if (original_fixed_enable_bit_status ^ 1 << acpi_event_index (TMR_FIXED_EVENT)) { - acpi_event_disable_event (TMR_FIXED_EVENT); - } + ACPI_FUNCTION_TRACE("ev_gpe_xrupt_handler"); - if (original_fixed_enable_bit_status ^ 1 << acpi_event_index (GBL_FIXED_EVENT)) { - acpi_event_disable_event (GBL_FIXED_EVENT); - } - if (original_fixed_enable_bit_status ^ 1 << acpi_event_index (PWR_BTN_FIXED_EVENT)) { - acpi_event_disable_event (PWR_BTN_FIXED_EVENT); - } - - if (original_fixed_enable_bit_status ^ 1 << acpi_event_index (SLP_BTN_FIXED_EVENT)) { - acpi_event_disable_event (SLP_BTN_FIXED_EVENT); - } - - if (original_fixed_enable_bit_status ^ 1 << acpi_event_index (RTC_FIXED_EVENT)) { - acpi_event_disable_event (RTC_FIXED_EVENT); - } - - original_fixed_enable_bit_status = 0; - -#endif + /* + * We are guaranteed by the ACPI CA initialization/shutdown code that + * if this interrupt handler is installed, ACPI is enabled. + */ - acpi_os_remove_interrupt_handler ((u32) acpi_gbl_FADT->sci_int, - acpi_ev_sci_handler); + /* + * GPEs: + * Check for and dispatch any GPEs that have occurred + */ + interrupt_handled |= acpi_ev_gpe_detect (gpe_xrupt_list); - return_ACPI_STATUS (AE_OK); + return_VALUE (interrupt_handled); } -/******************************************************************************* +/****************************************************************************** * - * FUNCTION: Acpi_ev_restore_acpi_state + * FUNCTION: acpi_ev_install_sci_handler * * PARAMETERS: none * - * RETURN: none + * RETURN: Status * - * DESCRIPTION: Restore the original ACPI state of the machine + * DESCRIPTION: Installs SCI handler. * ******************************************************************************/ -void -acpi_ev_restore_acpi_state (void) +u32 +acpi_ev_install_sci_handler (void) { - u32 index; - - - FUNCTION_TRACE ("Ev_restore_acpi_state"); - - - /* Restore the state of the chipset enable bits. */ + u32 status = AE_OK; - if (acpi_gbl_restore_acpi_chipset == TRUE) { - /* Restore the fixed events */ - if (acpi_hw_register_read (ACPI_MTX_LOCK, PM1_EN) != - acpi_gbl_pm1_enable_register_save) { - acpi_hw_register_write (ACPI_MTX_LOCK, PM1_EN, - acpi_gbl_pm1_enable_register_save); - } + ACPI_FUNCTION_TRACE ("ev_install_sci_handler"); - /* Ensure that all status bits are clear */ - - acpi_hw_clear_acpi_status (); - - - /* Now restore the GPEs */ - - for (index = 0; index < DIV_2 (acpi_gbl_FADT->gpe0blk_len); index++) { - if (acpi_hw_register_read (ACPI_MTX_LOCK, GPE0_EN_BLOCK | index) != - acpi_gbl_gpe0enable_register_save[index]) { - acpi_hw_register_write (ACPI_MTX_LOCK, GPE0_EN_BLOCK | index, - acpi_gbl_gpe0enable_register_save[index]); - } - } - - /* GPE 1 present? */ - - if (acpi_gbl_FADT->gpe1_blk_len) { - for (index = 0; index < DIV_2 (acpi_gbl_FADT->gpe1_blk_len); index++) { - if (acpi_hw_register_read (ACPI_MTX_LOCK, GPE1_EN_BLOCK | index) != - acpi_gbl_gpe1_enable_register_save[index]) { - acpi_hw_register_write (ACPI_MTX_LOCK, GPE1_EN_BLOCK | index, - acpi_gbl_gpe1_enable_register_save[index]); - } - } - } - - if (acpi_hw_get_mode() != acpi_gbl_original_mode) { - acpi_hw_set_mode (acpi_gbl_original_mode); - } - } - - return_VOID; + status = acpi_os_install_interrupt_handler ((u32) acpi_gbl_FADT->sci_int, + acpi_ev_sci_xrupt_handler, acpi_gbl_gpe_xrupt_list_head); + return_ACPI_STATUS (status); } /****************************************************************************** * - * FUNCTION: Acpi_ev_terminate + * FUNCTION: acpi_ev_remove_sci_handler * * PARAMETERS: none * - * RETURN: none + * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not + * installed to begin with + * + * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be + * taken. * - * DESCRIPTION: free memory allocated for table storage. + * Note: It doesn't seem important to disable all events or set the event + * enable registers to their original values. The OS should disable + * the SCI interrupt level when the handler is removed, so no more + * events will come in. * ******************************************************************************/ -void -acpi_ev_terminate (void) +acpi_status +acpi_ev_remove_sci_handler (void) { + acpi_status status; - FUNCTION_TRACE ("Ev_terminate"); + ACPI_FUNCTION_TRACE ("ev_remove_sci_handler"); - /* - * Free global tables, etc. - */ - if (acpi_gbl_gpe_registers) { - ACPI_MEM_FREE (acpi_gbl_gpe_registers); - } - - if (acpi_gbl_gpe_info) { - ACPI_MEM_FREE (acpi_gbl_gpe_info); - } - return_VOID; + /* Just let the OS remove the handler and disable the level */ + + status = acpi_os_remove_interrupt_handler ((u32) acpi_gbl_FADT->sci_int, + acpi_ev_sci_xrupt_handler); + + return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evxface.c linux.21rc5-ac2/drivers/acpi/events/evxface.c --- linux.21rc5/drivers/acpi/events/evxface.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evxface.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,43 +1,59 @@ /****************************************************************************** * * Module Name: evxface - External interfaces for ACPI events - * $Revision: 116 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "acnamesp.h" -#include "acevents.h" -#include "amlcode.h" -#include "acinterp.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EVENTS - MODULE_NAME ("evxface") + ACPI_MODULE_NAME ("evxface") /******************************************************************************* * - * FUNCTION: Acpi_install_fixed_event_handler + * FUNCTION: acpi_install_fixed_event_handler * * PARAMETERS: Event - Event type to enable. * Handler - Pointer to the handler function for the @@ -53,14 +69,14 @@ acpi_status acpi_install_fixed_event_handler ( - u32 event, - acpi_event_handler handler, - void *context) + u32 event, + acpi_event_handler handler, + void *context) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Acpi_install_fixed_event_handler"); + ACPI_FUNCTION_TRACE ("acpi_install_fixed_event_handler"); /* Parameter validation */ @@ -69,23 +85,25 @@ return_ACPI_STATUS (AE_BAD_PARAMETER); } - acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* Don't allow two handlers. */ if (NULL != acpi_gbl_fixed_event_handlers[event].handler) { - status = AE_EXIST; + status = AE_ALREADY_EXISTS; goto cleanup; } - /* Install the handler before enabling the event */ acpi_gbl_fixed_event_handlers[event].handler = handler; acpi_gbl_fixed_event_handlers[event].context = context; - status = acpi_enable_event (event, ACPI_EVENT_FIXED, 0); - if (!ACPI_SUCCESS (status)) { + status = acpi_enable_event (event, 0); + if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Could not enable fixed event.\n")); /* Remove the handler */ @@ -93,7 +111,6 @@ acpi_gbl_fixed_event_handlers[event].handler = NULL; acpi_gbl_fixed_event_handlers[event].context = NULL; } - else { ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Enabled fixed event %X, Handler=%p\n", event, handler)); @@ -101,14 +118,14 @@ cleanup: - acpi_ut_release_mutex (ACPI_MTX_EVENTS); + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_remove_fixed_event_handler + * FUNCTION: acpi_remove_fixed_event_handler * * PARAMETERS: Event - Event type to disable. * Handler - Address of the handler @@ -121,13 +138,13 @@ acpi_status acpi_remove_fixed_event_handler ( - u32 event, - acpi_event_handler handler) + u32 event, + acpi_event_handler handler) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Acpi_remove_fixed_event_handler"); + ACPI_FUNCTION_TRACE ("acpi_remove_fixed_event_handler"); /* Parameter validation */ @@ -136,18 +153,21 @@ return_ACPI_STATUS (AE_BAD_PARAMETER); } - acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* Disable the event before removing the handler */ - status = acpi_disable_event(event, ACPI_EVENT_FIXED, 0); + status = acpi_disable_event (event, 0); /* Always Remove the handler */ acpi_gbl_fixed_event_handlers[event].handler = NULL; acpi_gbl_fixed_event_handlers[event].context = NULL; - if (!ACPI_SUCCESS (status)) { + if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Could not write to fixed event enable register.\n")); } @@ -155,19 +175,19 @@ ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Disabled fixed event %X.\n", event)); } - acpi_ut_release_mutex (ACPI_MTX_EVENTS); + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_install_notify_handler + * FUNCTION: acpi_install_notify_handler * * PARAMETERS: Device - The device for which notifies will be handled - * Handler_type - The type of handler: - * ACPI_SYSTEM_NOTIFY: System_handler (00-7f) - * ACPI_DEVICE_NOTIFY: Driver_handler (80-ff) + * handler_type - The type of handler: + * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) + * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) * Handler - Address of the handler * Context - Value passed to the handler on each GPE * @@ -179,33 +199,37 @@ acpi_status acpi_install_notify_handler ( - acpi_handle device, - u32 handler_type, - acpi_notify_handler handler, - void *context) + acpi_handle device, + u32 handler_type, + acpi_notify_handler handler, + void *context) { - acpi_operand_object *obj_desc; - acpi_operand_object *notify_obj; - acpi_namespace_node *device_node; - acpi_status status = AE_OK; + union acpi_operand_object *obj_desc; + union acpi_operand_object *notify_obj; + struct acpi_namespace_node *node; + acpi_status status; - FUNCTION_TRACE ("Acpi_install_notify_handler"); + ACPI_FUNCTION_TRACE ("acpi_install_notify_handler"); /* Parameter validation */ - if ((!handler) || + if ((!device) || + (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* Convert and validate the device handle */ - device_node = acpi_ns_map_handle_to_node (device); - if (!device_node) { + node = acpi_ns_map_handle_to_node (device); + if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } @@ -220,22 +244,22 @@ /* Make sure the handler is not already installed */ if (((handler_type == ACPI_SYSTEM_NOTIFY) && - acpi_gbl_sys_notify.handler) || + acpi_gbl_system_notify.handler) || ((handler_type == ACPI_DEVICE_NOTIFY) && - acpi_gbl_drv_notify.handler)) { - status = AE_EXIST; + acpi_gbl_device_notify.handler)) { + status = AE_ALREADY_EXISTS; goto unlock_and_exit; } if (handler_type == ACPI_SYSTEM_NOTIFY) { - acpi_gbl_sys_notify.node = device_node; - acpi_gbl_sys_notify.handler = handler; - acpi_gbl_sys_notify.context = context; + acpi_gbl_system_notify.node = node; + acpi_gbl_system_notify.handler = handler; + acpi_gbl_system_notify.context = context; } else /* ACPI_DEVICE_NOTIFY */ { - acpi_gbl_drv_notify.node = device_node; - acpi_gbl_drv_notify.handler = handler; - acpi_gbl_drv_notify.context = context; + acpi_gbl_device_notify.node = node; + acpi_gbl_device_notify.handler = handler; + acpi_gbl_device_notify.context = context; } /* Global notify handler installed */ @@ -247,37 +271,31 @@ * Note that only certain object types can receive notifications. */ else { - /* - * These are the ONLY objects that can receive ACPI notifications - */ - if ((device_node->type != ACPI_TYPE_DEVICE) && - (device_node->type != ACPI_TYPE_PROCESSOR) && - (device_node->type != ACPI_TYPE_POWER) && - (device_node->type != ACPI_TYPE_THERMAL)) { - status = AE_BAD_PARAMETER; + /* Notifies allowed on this object? */ + + if (!acpi_ev_is_notify_object (node)) { + status = AE_TYPE; goto unlock_and_exit; } /* Check for an existing internal object */ - obj_desc = acpi_ns_get_attached_object (device_node); + obj_desc = acpi_ns_get_attached_object (node); if (obj_desc) { - /* Object exists - make sure there's no handler */ if (((handler_type == ACPI_SYSTEM_NOTIFY) && - obj_desc->device.sys_handler) || + obj_desc->common_notify.system_notify) || ((handler_type == ACPI_DEVICE_NOTIFY) && - obj_desc->device.drv_handler)) { - status = AE_EXIST; + obj_desc->common_notify.device_notify)) { + status = AE_ALREADY_EXISTS; goto unlock_and_exit; } } - else { /* Create a new object */ - obj_desc = acpi_ut_create_internal_object (device_node->type); + obj_desc = acpi_ut_create_internal_object (node->type); if (!obj_desc) { status = AE_NO_MEMORY; goto unlock_and_exit; @@ -285,7 +303,12 @@ /* Attach new object to the Node */ - status = acpi_ns_attach_object (device, obj_desc, (u8) device_node->type); + status = acpi_ns_attach_object (device, obj_desc, node->type); + + /* Remove local reference to the object */ + + acpi_ut_remove_reference (obj_desc); + if (ACPI_FAILURE (status)) { goto unlock_and_exit; } @@ -293,41 +316,39 @@ /* Install the handler */ - notify_obj = acpi_ut_create_internal_object (INTERNAL_TYPE_NOTIFY); + notify_obj = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_NOTIFY); if (!notify_obj) { status = AE_NO_MEMORY; goto unlock_and_exit; } - notify_obj->notify_handler.node = device_node; - notify_obj->notify_handler.handler = handler; - notify_obj->notify_handler.context = context; - + notify_obj->notify.node = node; + notify_obj->notify.handler = handler; + notify_obj->notify.context = context; if (handler_type == ACPI_SYSTEM_NOTIFY) { - obj_desc->device.sys_handler = notify_obj; + obj_desc->common_notify.system_notify = notify_obj; } - else /* ACPI_DEVICE_NOTIFY */ { - obj_desc->device.drv_handler = notify_obj; + obj_desc->common_notify.device_notify = notify_obj; } } unlock_and_exit: - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_remove_notify_handler + * FUNCTION: acpi_remove_notify_handler * * PARAMETERS: Device - The device for which notifies will be handled - * Handler_type - The type of handler: - * ACPI_SYSTEM_NOTIFY: System_handler (00-7f) - * ACPI_DEVICE_NOTIFY: Driver_handler (80-ff) + * handler_type - The type of handler: + * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) + * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) * Handler - Address of the handler * RETURN: Status * @@ -337,32 +358,36 @@ acpi_status acpi_remove_notify_handler ( - acpi_handle device, - u32 handler_type, - acpi_notify_handler handler) + acpi_handle device, + u32 handler_type, + acpi_notify_handler handler) { - acpi_operand_object *notify_obj; - acpi_operand_object *obj_desc; - acpi_namespace_node *device_node; - acpi_status status = AE_OK; + union acpi_operand_object *notify_obj; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + acpi_status status; - FUNCTION_TRACE ("Acpi_remove_notify_handler"); + ACPI_FUNCTION_TRACE ("acpi_remove_notify_handler"); /* Parameter validation */ - if ((!handler) || + if ((!device) || + (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* Convert and validate the device handle */ - device_node = acpi_ns_map_handle_to_node (device); - if (!device_node) { + node = acpi_ns_map_handle_to_node (device); + if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } @@ -374,22 +399,22 @@ ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing notify handler for ROOT object.\n")); if (((handler_type == ACPI_SYSTEM_NOTIFY) && - !acpi_gbl_sys_notify.handler) || + !acpi_gbl_system_notify.handler) || ((handler_type == ACPI_DEVICE_NOTIFY) && - !acpi_gbl_drv_notify.handler)) { + !acpi_gbl_device_notify.handler)) { status = AE_NOT_EXIST; goto unlock_and_exit; } if (handler_type == ACPI_SYSTEM_NOTIFY) { - acpi_gbl_sys_notify.node = NULL; - acpi_gbl_sys_notify.handler = NULL; - acpi_gbl_sys_notify.context = NULL; + acpi_gbl_system_notify.node = NULL; + acpi_gbl_system_notify.handler = NULL; + acpi_gbl_system_notify.context = NULL; } else { - acpi_gbl_drv_notify.node = NULL; - acpi_gbl_drv_notify.handler = NULL; - acpi_gbl_drv_notify.context = NULL; + acpi_gbl_device_notify.node = NULL; + acpi_gbl_device_notify.handler = NULL; + acpi_gbl_device_notify.context = NULL; } } @@ -397,20 +422,16 @@ * All Other Objects */ else { - /* - * These are the ONLY objects that can receive ACPI notifications - */ - if ((device_node->type != ACPI_TYPE_DEVICE) && - (device_node->type != ACPI_TYPE_PROCESSOR) && - (device_node->type != ACPI_TYPE_POWER) && - (device_node->type != ACPI_TYPE_THERMAL)) { - status = AE_BAD_PARAMETER; + /* Notifies allowed on this object? */ + + if (!acpi_ev_is_notify_object (node)) { + status = AE_TYPE; goto unlock_and_exit; } /* Check for an existing internal object */ - obj_desc = acpi_ns_get_attached_object (device_node); + obj_desc = acpi_ns_get_attached_object (node); if (!obj_desc) { status = AE_NOT_EXIST; goto unlock_and_exit; @@ -419,14 +440,14 @@ /* Object exists - make sure there's an existing handler */ if (handler_type == ACPI_SYSTEM_NOTIFY) { - notify_obj = obj_desc->device.sys_handler; + notify_obj = obj_desc->common_notify.system_notify; } else { - notify_obj = obj_desc->device.drv_handler; + notify_obj = obj_desc->common_notify.device_notify; } if ((!notify_obj) || - (notify_obj->notify_handler.handler != handler)) { + (notify_obj->notify.handler != handler)) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } @@ -434,10 +455,10 @@ /* Remove the handler */ if (handler_type == ACPI_SYSTEM_NOTIFY) { - obj_desc->device.sys_handler = NULL; + obj_desc->common_notify.system_notify = NULL; } else { - obj_desc->device.drv_handler = NULL; + obj_desc->common_notify.device_notify = NULL; } acpi_ut_remove_reference (notify_obj); @@ -445,17 +466,17 @@ unlock_and_exit: - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_install_gpe_handler + * FUNCTION: acpi_install_gpe_handler * - * PARAMETERS: Gpe_number - The GPE number. The numbering scheme is - * bank 0 first, then bank 1. + * PARAMETERS: gpe_number - The GPE number within the GPE block + * gpe_block - GPE block (NULL == FADT GPEs) * Type - Whether this GPE should be treated as an * edge- or level-triggered interrupt. * Handler - Address of the handler @@ -469,124 +490,150 @@ acpi_status acpi_install_gpe_handler ( - u32 gpe_number, - u32 type, - acpi_gpe_handler handler, - void *context) + acpi_handle gpe_device, + u32 gpe_number, + u32 type, + acpi_gpe_handler handler, + void *context) { - acpi_status status = AE_OK; + acpi_status status; + struct acpi_gpe_event_info *gpe_event_info; - FUNCTION_TRACE ("Acpi_install_gpe_handler"); + ACPI_FUNCTION_TRACE ("acpi_install_gpe_handler"); /* Parameter validation */ - if (!handler || (gpe_number > ACPI_GPE_MAX)) { + if (!handler) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + /* Ensure that we have a valid GPE number */ - if (acpi_gbl_gpe_valid[gpe_number] == ACPI_GPE_INVALID) { - return_ACPI_STATUS (AE_BAD_PARAMETER); + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; } - acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); - /* Make sure that there isn't a handler there already */ - if (acpi_gbl_gpe_info[gpe_number].handler) { - status = AE_EXIST; - goto cleanup; + if (gpe_event_info->handler) { + status = AE_ALREADY_EXISTS; + goto unlock_and_exit; } /* Install the handler */ - acpi_gbl_gpe_info[gpe_number].handler = handler; - acpi_gbl_gpe_info[gpe_number].context = context; - acpi_gbl_gpe_info[gpe_number].type = (u8) type; + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + gpe_event_info->handler = handler; + gpe_event_info->context = context; + gpe_event_info->flags = (u8) type; + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); /* Clear the GPE (of stale events), the enable it */ - acpi_hw_clear_gpe (gpe_number); - acpi_hw_enable_gpe (gpe_number); + status = acpi_hw_clear_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + status = acpi_hw_enable_gpe (gpe_event_info); -cleanup: - acpi_ut_release_mutex (ACPI_MTX_EVENTS); +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_remove_gpe_handler + * FUNCTION: acpi_remove_gpe_handler * - * PARAMETERS: Gpe_number - The event to remove a handler + * PARAMETERS: gpe_number - The event to remove a handler + * gpe_block - GPE block (NULL == FADT GPEs) * Handler - Address of the handler * * RETURN: Status * - * DESCRIPTION: Remove a handler for a General Purpose Acpi_event. + * DESCRIPTION: Remove a handler for a General Purpose acpi_event. * ******************************************************************************/ acpi_status acpi_remove_gpe_handler ( - u32 gpe_number, - acpi_gpe_handler handler) + acpi_handle gpe_device, + u32 gpe_number, + acpi_gpe_handler handler) { - acpi_status status = AE_OK; + acpi_status status; + struct acpi_gpe_event_info *gpe_event_info; - FUNCTION_TRACE ("Acpi_remove_gpe_handler"); + ACPI_FUNCTION_TRACE ("acpi_remove_gpe_handler"); /* Parameter validation */ - if (!handler || (gpe_number > ACPI_GPE_MAX)) { + if (!handler) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + /* Ensure that we have a valid GPE number */ - if (acpi_gbl_gpe_valid[gpe_number] == ACPI_GPE_INVALID) { - return_ACPI_STATUS (AE_BAD_PARAMETER); + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; } /* Disable the GPE before removing the handler */ - acpi_hw_disable_gpe (gpe_number); - - acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + status = acpi_hw_disable_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } /* Make sure that the installed handler is the same */ - if (acpi_gbl_gpe_info[gpe_number].handler != handler) { - acpi_hw_enable_gpe (gpe_number); + if (gpe_event_info->handler != handler) { + (void) acpi_hw_enable_gpe (gpe_event_info); status = AE_BAD_PARAMETER; - goto cleanup; + goto unlock_and_exit; } /* Remove the handler */ - acpi_gbl_gpe_info[gpe_number].handler = NULL; - acpi_gbl_gpe_info[gpe_number].context = NULL; + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + gpe_event_info->handler = NULL; + gpe_event_info->context = NULL; + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); -cleanup: - acpi_ut_release_mutex (ACPI_MTX_EVENTS); +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_acquire_global_lock + * FUNCTION: acpi_acquire_global_lock * * PARAMETERS: Timeout - How long the caller is willing to wait - * Out_handle - A handle to the lock if acquired + * out_handle - A handle to the lock if acquired * * RETURN: Status * @@ -596,32 +643,38 @@ acpi_status acpi_acquire_global_lock ( - void) + u16 timeout, + u32 *handle) { - acpi_status status; + acpi_status status; + + if (!handle) { + return (AE_BAD_PARAMETER); + } status = acpi_ex_enter_interpreter (); if (ACPI_FAILURE (status)) { return (status); } - /* - * TBD: [Restructure] add timeout param to internal interface, and - * perhaps INTERPRETER_LOCKED - */ - status = acpi_ev_acquire_global_lock (); + status = acpi_ev_acquire_global_lock (timeout); acpi_ex_exit_interpreter (); + if (ACPI_SUCCESS (status)) { + acpi_gbl_global_lock_handle++; + *handle = acpi_gbl_global_lock_handle; + } + return (status); } /******************************************************************************* * - * FUNCTION: Acpi_release_global_lock + * FUNCTION: acpi_release_global_lock * - * PARAMETERS: Handle - Returned from Acpi_acquire_global_lock + * PARAMETERS: Handle - Returned from acpi_acquire_global_lock * * RETURN: Status * @@ -631,11 +684,17 @@ acpi_status acpi_release_global_lock ( - void) + u32 handle) { + acpi_status status; + - acpi_ev_release_global_lock (); - return (AE_OK); + if (handle != acpi_gbl_global_lock_handle) { + return (AE_NOT_ACQUIRED); + } + + status = acpi_ev_release_global_lock (); + return (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evxfevnt.c linux.21rc5-ac2/drivers/acpi/events/evxfevnt.c --- linux.21rc5/drivers/acpi/events/evxfevnt.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evxfevnt.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,43 +1,58 @@ /****************************************************************************** * * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable - * $Revision: 38 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "acnamesp.h" -#include "acevents.h" -#include "amlcode.h" -#include "acinterp.h" +#include +#include +#include #define _COMPONENT ACPI_EVENTS - MODULE_NAME ("evxfevnt") + ACPI_MODULE_NAME ("evxfevnt") /******************************************************************************* * - * FUNCTION: Acpi_enable + * FUNCTION: acpi_enable * * PARAMETERS: None * @@ -50,74 +65,79 @@ acpi_status acpi_enable (void) { - acpi_status status; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Acpi_enable"); + ACPI_FUNCTION_TRACE ("acpi_enable"); - /* Make sure we've got ACPI tables */ + /* Make sure we have the FADT*/ - if (!acpi_gbl_DSDT) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "No ACPI tables present!\n")); + if (!acpi_gbl_FADT) { + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "No FADT information present!\n")); return_ACPI_STATUS (AE_NO_ACPI_TABLES); } - /* Make sure the BIOS supports ACPI mode */ - - if (SYS_MODE_LEGACY == acpi_hw_get_mode_capabilities()) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Only legacy mode supported!\n")); - return_ACPI_STATUS (AE_ERROR); + if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "System is already in ACPI mode\n")); } + else { + /* Transition to ACPI mode */ - /* Transition to ACPI mode */ + status = acpi_hw_set_mode (ACPI_SYS_MODE_ACPI); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not transition to ACPI mode.\n")); + return_ACPI_STATUS (status); + } - status = acpi_hw_set_mode (SYS_MODE_ACPI); - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_FATAL, "Could not transition to ACPI mode.\n")); - return_ACPI_STATUS (status); + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "Transition to ACPI mode successful\n")); } - ACPI_DEBUG_PRINT ((ACPI_DB_OK, "Transition to ACPI mode successful\n")); - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_disable + * FUNCTION: acpi_disable * * PARAMETERS: None * * RETURN: Status * - * DESCRIPTION: Returns the system to original ACPI/legacy mode, and - * uninstalls the SCI interrupt handler. + * DESCRIPTION: Transfers the system into LEGACY mode. * ******************************************************************************/ acpi_status acpi_disable (void) { - acpi_status status; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Acpi_disable"); + ACPI_FUNCTION_TRACE ("acpi_disable"); - /* Restore original mode */ + if (!acpi_gbl_FADT) { + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "No FADT information present!\n")); + return_ACPI_STATUS (AE_NO_ACPI_TABLES); + } - status = acpi_hw_set_mode (acpi_gbl_original_mode); - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unable to transition to original mode")); - return_ACPI_STATUS (status); + if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "System is already in legacy (non-ACPI) mode\n")); } + else { + /* Transition to LEGACY mode */ + + status = acpi_hw_set_mode (ACPI_SYS_MODE_LEGACY); - /* Unload the SCI interrupt handler */ + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not exit ACPI mode to legacy mode")); + return_ACPI_STATUS (status); + } - acpi_ev_remove_sci_handler (); - acpi_ev_restore_acpi_state (); + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "ACPI mode disabled\n")); + } return_ACPI_STATUS (status); } @@ -125,107 +145,179 @@ /******************************************************************************* * - * FUNCTION: Acpi_enable_event + * FUNCTION: acpi_enable_event * - * PARAMETERS: Event - The fixed event or GPE to be enabled - * Type - The type of event - * Flags - Just enable, or also wake enable? + * PARAMETERS: Event - The fixed eventto be enabled + * Flags - Reserved * * RETURN: Status * - * DESCRIPTION: Enable an ACPI event (fixed and general purpose) + * DESCRIPTION: Enable an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_enable_event ( - u32 event, - u32 type, - u32 flags) + u32 event, + u32 flags) { - acpi_status status = AE_OK; - u32 register_id; + acpi_status status = AE_OK; + u32 value; - FUNCTION_TRACE ("Acpi_enable_event"); + ACPI_FUNCTION_TRACE ("acpi_enable_event"); - /* The Type must be either Fixed Acpi_event or GPE */ + /* Decode the Fixed Event */ - switch (type) { + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } - case ACPI_EVENT_FIXED: + /* + * Enable the requested fixed event (by writing a one to the + * enable register bit) + */ + status = acpi_set_register (acpi_gbl_fixed_event_info[event].enable_register_id, + 1, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - /* Decode the Fixed Acpi_event */ + /* Make sure that the hardware responded */ - switch (event) { - case ACPI_EVENT_PMTIMER: - register_id = TMR_EN; - break; + status = acpi_get_register (acpi_gbl_fixed_event_info[event].enable_register_id, + &value, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - case ACPI_EVENT_GLOBAL: - register_id = GBL_EN; - break; + if (value != 1) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not enable %s event\n", acpi_ut_get_event_name (event))); + return_ACPI_STATUS (AE_NO_HARDWARE_RESPONSE); + } - case ACPI_EVENT_POWER_BUTTON: - register_id = PWRBTN_EN; - break; + return_ACPI_STATUS (status); +} - case ACPI_EVENT_SLEEP_BUTTON: - register_id = SLPBTN_EN; - break; - case ACPI_EVENT_RTC: - register_id = RTC_EN; - break; +/******************************************************************************* + * + * FUNCTION: acpi_enable_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device + * gpe_number - GPE level within the GPE block + * Flags - Just enable, or also wake enable? + * Called from ISR or not + * + * RETURN: Status + * + * DESCRIPTION: Enable an ACPI event (general purpose) + * + ******************************************************************************/ + +acpi_status +acpi_enable_gpe ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; - default: - return_ACPI_STATUS (AE_BAD_PARAMETER); - break; - } - /* - * Enable the requested fixed event (by writing a one to the - * enable register bit) - */ - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, register_id, 1); - - if (1 != acpi_hw_register_bit_access(ACPI_READ, ACPI_MTX_LOCK, register_id)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Fixed event bit clear when it should be set\n")); - return_ACPI_STATUS (AE_NO_HARDWARE_RESPONSE); + ACPI_FUNCTION_TRACE ("acpi_enable_gpe"); + + + /* Use semaphore lock if not executing at interrupt level */ + + if (flags & ACPI_NOT_ISR) { + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } + } - break; + /* Ensure that we have a valid GPE number */ + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } - case ACPI_EVENT_GPE: + /* Enable the requested GPE number */ - /* Ensure that we have a valid GPE number */ + status = acpi_hw_enable_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - if ((event > ACPI_GPE_MAX) || - (acpi_gbl_gpe_valid[event] == ACPI_GPE_INVALID)) { - return_ACPI_STATUS (AE_BAD_PARAMETER); - } + if (flags & ACPI_EVENT_WAKE_ENABLE) { + acpi_hw_enable_gpe_for_wakeup (gpe_event_info); + } +unlock_and_exit: + if (flags & ACPI_NOT_ISR) { + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + } + return_ACPI_STATUS (status); +} - /* Enable the requested GPE number */ - if (flags & ACPI_EVENT_ENABLE) { - acpi_hw_enable_gpe (event); - } - if (flags & ACPI_EVENT_WAKE_ENABLE) { - acpi_hw_enable_gpe_for_wakeup (event); - } +/******************************************************************************* + * + * FUNCTION: acpi_disable_event + * + * PARAMETERS: Event - The fixed eventto be enabled + * Flags - Reserved + * + * RETURN: Status + * + * DESCRIPTION: Disable an ACPI event (fixed) + * + ******************************************************************************/ + +acpi_status +acpi_disable_event ( + u32 event, + u32 flags) +{ + acpi_status status = AE_OK; + u32 value; - break; + ACPI_FUNCTION_TRACE ("acpi_disable_event"); - default: - status = AE_BAD_PARAMETER; + /* Decode the Fixed Event */ + + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* + * Disable the requested fixed event (by writing a zero to the + * enable register bit) + */ + status = acpi_set_register (acpi_gbl_fixed_event_info[event].enable_register_id, + 0, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + status = acpi_get_register (acpi_gbl_fixed_event_info[event].enable_register_id, + &value, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } + if (value != 0) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not disable %s events\n", acpi_ut_get_event_name (event))); + return_ACPI_STATUS (AE_NO_HARDWARE_RESPONSE); + } return_ACPI_STATUS (status); } @@ -233,293 +325,412 @@ /******************************************************************************* * - * FUNCTION: Acpi_disable_event + * FUNCTION: acpi_disable_gpe * - * PARAMETERS: Event - The fixed event or GPE to be enabled - * Type - The type of event, fixed or general purpose - * Flags - Wake disable vs. non-wake disable + * PARAMETERS: gpe_device - Parent GPE Device + * gpe_number - GPE level within the GPE block + * Flags - Just enable, or also wake enable? + * Called from ISR or not * * RETURN: Status * - * DESCRIPTION: Disable an ACPI event (fixed and general purpose) + * DESCRIPTION: Disable an ACPI event (general purpose) * ******************************************************************************/ acpi_status -acpi_disable_event ( - u32 event, - u32 type, - u32 flags) +acpi_disable_gpe ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags) { - acpi_status status = AE_OK; - u32 register_id; + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; + + + ACPI_FUNCTION_TRACE ("acpi_disable_gpe"); + + + /* Use semaphore lock if not executing at interrupt level */ + + if (flags & ACPI_NOT_ISR) { + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + /* Ensure that we have a valid GPE number */ - FUNCTION_TRACE ("Acpi_disable_event"); + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* + * Only disable the requested GPE number for wake if specified. + * Otherwise, turn it totally off + */ + if (flags & ACPI_EVENT_WAKE_DISABLE) { + acpi_hw_disable_gpe_for_wakeup (gpe_event_info); + } + else { + status = acpi_hw_disable_gpe (gpe_event_info); + } +unlock_and_exit: + if (flags & ACPI_NOT_ISR) { + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + } + return_ACPI_STATUS (status); +} - /* The Type must be either Fixed Acpi_event or GPE */ - switch (type) { +/******************************************************************************* + * + * FUNCTION: acpi_clear_event + * + * PARAMETERS: Event - The fixed event to be cleared + * + * RETURN: Status + * + * DESCRIPTION: Clear an ACPI event (fixed) + * + ******************************************************************************/ - case ACPI_EVENT_FIXED: +acpi_status +acpi_clear_event ( + u32 event) +{ + acpi_status status = AE_OK; - /* Decode the Fixed Acpi_event */ - switch (event) { - case ACPI_EVENT_PMTIMER: - register_id = TMR_EN; - break; + ACPI_FUNCTION_TRACE ("acpi_clear_event"); - case ACPI_EVENT_GLOBAL: - register_id = GBL_EN; - break; - case ACPI_EVENT_POWER_BUTTON: - register_id = PWRBTN_EN; - break; + /* Decode the Fixed Event */ - case ACPI_EVENT_SLEEP_BUTTON: - register_id = SLPBTN_EN; - break; + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } - case ACPI_EVENT_RTC: - register_id = RTC_EN; - break; + /* + * Clear the requested fixed event (By writing a one to the + * status register bit) + */ + status = acpi_set_register (acpi_gbl_fixed_event_info[event].status_register_id, + 1, ACPI_MTX_LOCK); - default: - return_ACPI_STATUS (AE_BAD_PARAMETER); - break; - } + return_ACPI_STATUS (status); +} - /* - * Disable the requested fixed event (by writing a zero to the - * enable register bit) - */ - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, register_id, 0); - - if (0 != acpi_hw_register_bit_access(ACPI_READ, ACPI_MTX_LOCK, register_id)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Fixed event bit set when it should be clear,\n")); - return_ACPI_STATUS (AE_NO_HARDWARE_RESPONSE); - } - break; +/******************************************************************************* + * + * FUNCTION: acpi_clear_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device + * gpe_number - GPE level within the GPE block + * Flags - Called from an ISR or not + * + * RETURN: Status + * + * DESCRIPTION: Clear an ACPI event (general purpose) + * + ******************************************************************************/ +acpi_status +acpi_clear_gpe ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; - case ACPI_EVENT_GPE: - /* Ensure that we have a valid GPE number */ + ACPI_FUNCTION_TRACE ("acpi_clear_gpe"); - if ((event > ACPI_GPE_MAX) || - (acpi_gbl_gpe_valid[event] == ACPI_GPE_INVALID)) { - return_ACPI_STATUS (AE_BAD_PARAMETER); - } - /* Disable the requested GPE number */ + /* Use semaphore lock if not executing at interrupt level */ - if (flags & ACPI_EVENT_DISABLE) { - acpi_hw_disable_gpe (event); - } - if (flags & ACPI_EVENT_WAKE_DISABLE) { - acpi_hw_disable_gpe_for_wakeup (event); + if (flags & ACPI_NOT_ISR) { + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } + } - break; - + /* Ensure that we have a valid GPE number */ - default: + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { status = AE_BAD_PARAMETER; + goto unlock_and_exit; } + status = acpi_hw_clear_gpe (gpe_event_info); + +unlock_and_exit: + if (flags & ACPI_NOT_ISR) { + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + } return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_clear_event + * FUNCTION: acpi_get_event_status * - * PARAMETERS: Event - The fixed event or GPE to be cleared - * Type - The type of event + * PARAMETERS: Event - The fixed event + * Event Status - Where the current status of the event will + * be returned * * RETURN: Status * - * DESCRIPTION: Clear an ACPI event (fixed and general purpose) + * DESCRIPTION: Obtains and returns the current status of the event * ******************************************************************************/ acpi_status -acpi_clear_event ( - u32 event, - u32 type) +acpi_get_event_status ( + u32 event, + acpi_event_status *event_status) { - acpi_status status = AE_OK; - u32 register_id; - + acpi_status status = AE_OK; - FUNCTION_TRACE ("Acpi_clear_event"); + ACPI_FUNCTION_TRACE ("acpi_get_event_status"); - /* The Type must be either Fixed Acpi_event or GPE */ - switch (type) { + if (!event_status) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } - case ACPI_EVENT_FIXED: + /* Decode the Fixed Event */ - /* Decode the Fixed Acpi_event */ + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } - switch (event) { - case ACPI_EVENT_PMTIMER: - register_id = TMR_STS; - break; + /* Get the status of the requested fixed event */ - case ACPI_EVENT_GLOBAL: - register_id = GBL_STS; - break; + status = acpi_get_register (acpi_gbl_fixed_event_info[event].status_register_id, + event_status, ACPI_MTX_LOCK); - case ACPI_EVENT_POWER_BUTTON: - register_id = PWRBTN_STS; - break; + return_ACPI_STATUS (status); +} - case ACPI_EVENT_SLEEP_BUTTON: - register_id = SLPBTN_STS; - break; - case ACPI_EVENT_RTC: - register_id = RTC_STS; - break; +/******************************************************************************* + * + * FUNCTION: acpi_get_gpe_status + * + * PARAMETERS: gpe_device - Parent GPE Device + * gpe_number - GPE level within the GPE block + * Flags - Called from an ISR or not + * Event Status - Where the current status of the event will + * be returned + * + * RETURN: Status + * + * DESCRIPTION: Get status of an event (general purpose) + * + ******************************************************************************/ - default: - return_ACPI_STATUS (AE_BAD_PARAMETER); - break; - } +acpi_status +acpi_get_gpe_status ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags, + acpi_event_status *event_status) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; - /* - * Clear the requested fixed event (By writing a one to the - * status register bit) - */ - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, register_id, 1); - break; + ACPI_FUNCTION_TRACE ("acpi_get_gpe_status"); - case ACPI_EVENT_GPE: - /* Ensure that we have a valid GPE number */ + /* Use semaphore lock if not executing at interrupt level */ - if ((event > ACPI_GPE_MAX) || - (acpi_gbl_gpe_valid[event] == ACPI_GPE_INVALID)) { - return_ACPI_STATUS (AE_BAD_PARAMETER); + if (flags & ACPI_NOT_ISR) { + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } + } + /* Ensure that we have a valid GPE number */ - acpi_hw_clear_gpe (event); - break; + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + /* Obtain status on the requested GPE number */ - default: + status = acpi_hw_get_gpe_status (gpe_event_info, event_status); - status = AE_BAD_PARAMETER; +unlock_and_exit: + if (flags & ACPI_NOT_ISR) { + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); } - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_get_event_status + * FUNCTION: acpi_install_gpe_block * - * PARAMETERS: Event - The fixed event or GPE - * Type - The type of event - * Status - Where the current status of the event will - * be returned + * PARAMETERS: gpe_device - Handle to the parent GPE Block Device + * gpe_block_address - Address and space_iD + * register_count - Number of GPE register pairs in the block + * interrupt_level - H/W interrupt for the block * * RETURN: Status * - * DESCRIPTION: Obtains and returns the current status of the event + * DESCRIPTION: Create and Install a block of GPE registers * ******************************************************************************/ - acpi_status -acpi_get_event_status ( - u32 event, - u32 type, - acpi_event_status *event_status) +acpi_install_gpe_block ( + acpi_handle gpe_device, + struct acpi_generic_address *gpe_block_address, + u32 register_count, + u32 interrupt_level) { - acpi_status status = AE_OK; - u32 register_id; + acpi_status status; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + struct acpi_gpe_block_info *gpe_block; - FUNCTION_TRACE ("Acpi_get_event_status"); + ACPI_FUNCTION_TRACE ("acpi_install_gpe_block"); - if (!event_status) { + if ((!gpe_device) || + (!gpe_block_address) || + (!register_count)) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } - /* The Type must be either Fixed Acpi_event or GPE */ - - switch (type) { + node = acpi_ns_map_handle_to_node (gpe_device); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } - case ACPI_EVENT_FIXED: + /* + * For user-installed GPE Block Devices, the gpe_block_base_number + * is always zero + */ + status = acpi_ev_create_gpe_block (node, gpe_block_address, register_count, + 0, interrupt_level, &gpe_block); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - /* Decode the Fixed Acpi_event */ + /* Get the device_object attached to the node */ - switch (event) { - case ACPI_EVENT_PMTIMER: - register_id = TMR_STS; - break; + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { + /* No object, create a new one */ - case ACPI_EVENT_GLOBAL: - register_id = GBL_STS; - break; + obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_DEVICE); + if (!obj_desc) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } - case ACPI_EVENT_POWER_BUTTON: - register_id = PWRBTN_STS; - break; + status = acpi_ns_attach_object (node, obj_desc, ACPI_TYPE_DEVICE); - case ACPI_EVENT_SLEEP_BUTTON: - register_id = SLPBTN_STS; - break; + /* Remove local reference to the object */ - case ACPI_EVENT_RTC: - register_id = RTC_STS; - break; + acpi_ut_remove_reference (obj_desc); - default: - return_ACPI_STATUS (AE_BAD_PARAMETER); - break; + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; } + } + + /* Install the GPE block in the device_object */ - /* Get the status of the requested fixed event */ + obj_desc->device.gpe_block = gpe_block; - *event_status = acpi_hw_register_bit_access (ACPI_READ, ACPI_MTX_LOCK, register_id); - break; +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} - case ACPI_EVENT_GPE: - /* Ensure that we have a valid GPE number */ +/******************************************************************************* + * + * FUNCTION: acpi_remove_gpe_block + * + * PARAMETERS: gpe_device - Handle to the parent GPE Block Device + * + * RETURN: Status + * + * DESCRIPTION: Remove a previously installed block of GPE registers + * + ******************************************************************************/ + +acpi_status +acpi_remove_gpe_block ( + acpi_handle gpe_device) +{ + union acpi_operand_object *obj_desc; + acpi_status status; + struct acpi_namespace_node *node; - if ((event > ACPI_GPE_MAX) || - (acpi_gbl_gpe_valid[event] == ACPI_GPE_INVALID)) { - return_ACPI_STATUS (AE_BAD_PARAMETER); - } + ACPI_FUNCTION_TRACE ("acpi_remove_gpe_block"); - /* Obtain status on the requested GPE number */ - acpi_hw_get_gpe_status (event, event_status); - break; + if (!gpe_device) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } - default: + node = acpi_ns_map_handle_to_node (gpe_device); + if (!node) { status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Get the device_object attached to the node */ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc || + !obj_desc->device.gpe_block) { + return_ACPI_STATUS (AE_NULL_OBJECT); + } + + /* Delete the GPE block (but not the device_object) */ + + status = acpi_ev_delete_gpe_block (obj_desc->device.gpe_block); + if (ACPI_SUCCESS (status)) { + obj_desc->device.gpe_block = NULL; } +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/evxfregn.c linux.21rc5-ac2/drivers/acpi/events/evxfregn.c --- linux.21rc5/drivers/acpi/events/evxfregn.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/evxfregn.c 2003-05-29 01:02:16.000000000 +0100 @@ -2,84 +2,101 @@ * * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and * Address Spaces. - * $Revision: 40 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "acnamesp.h" -#include "acevents.h" -#include "amlcode.h" -#include "acinterp.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EVENTS - MODULE_NAME ("evxfregn") + ACPI_MODULE_NAME ("evxfregn") /******************************************************************************* * - * FUNCTION: Acpi_install_address_space_handler + * FUNCTION: acpi_install_address_space_handler * * PARAMETERS: Device - Handle for the device - * Space_id - The address space ID + * space_id - The address space ID * Handler - Address of the handler * Setup - Address of the setup function * Context - Value passed to the handler on each access * * RETURN: Status * - * DESCRIPTION: Install a handler for all Op_regions of a given Space_id. + * DESCRIPTION: Install a handler for all op_regions of a given space_id. * ******************************************************************************/ acpi_status acpi_install_address_space_handler ( - acpi_handle device, - ACPI_ADR_SPACE_TYPE space_id, - acpi_adr_space_handler handler, - acpi_adr_space_setup setup, - void *context) + acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, + void *context) { - acpi_operand_object *obj_desc; - acpi_operand_object *handler_obj; - acpi_namespace_node *node; - acpi_status status = AE_OK; - acpi_object_type8 type; - u16 flags = 0; + union acpi_operand_object *obj_desc; + union acpi_operand_object *handler_obj; + struct acpi_namespace_node *node; + acpi_status status; + acpi_object_type type; + u16 flags = 0; - FUNCTION_TRACE ("Acpi_install_address_space_handler"); + ACPI_FUNCTION_TRACE ("acpi_install_address_space_handler"); /* Parameter validation */ - if ((!device) || - ((!handler) && (handler != ACPI_DEFAULT_HANDLER)) || - (space_id > ACPI_MAX_ADDRESS_SPACE)) { + if (!device) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* Convert and validate the device handle */ @@ -103,7 +120,7 @@ } if (handler == ACPI_DEFAULT_HANDLER) { - flags = ADDR_HANDLER_DEFAULT_INSTALLED; + flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED; switch (space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: @@ -131,55 +148,65 @@ setup = acpi_ev_pci_bar_region_setup; break; + case ACPI_ADR_SPACE_DATA_TABLE: + handler = acpi_ex_data_table_space_handler; + setup = NULL; + break; + default: - status = AE_NOT_EXIST; + status = AE_BAD_PARAMETER; goto unlock_and_exit; - break; } } - /* - * If the caller hasn't specified a setup routine, use the default - */ + /* If the caller hasn't specified a setup routine, use the default */ + if (!setup) { setup = acpi_ev_default_region_setup; } - /* - * Check for an existing internal object - */ + /* Check for an existing internal object */ + obj_desc = acpi_ns_get_attached_object (node); if (obj_desc) { /* - * The object exists. + * The attached device object already exists. * Make sure the handler is not already installed. */ + handler_obj = obj_desc->device.address_space; - /* check the address handler the user requested */ + /* Walk the handler list for this device */ - handler_obj = obj_desc->device.addr_handler; while (handler_obj) { - /* - * We have an Address handler, see if user requested this - * address space. - */ - if(handler_obj->addr_handler.space_id == space_id) { - status = AE_EXIST; + /* Same space_id indicates a handler already installed */ + + if(handler_obj->address_space.space_id == space_id) { + if (handler_obj->address_space.handler == handler) { + /* + * It is (relatively) OK to attempt to install the SAME + * handler twice. This can easily happen with PCI_Config space. + */ + status = AE_SAME_HANDLER; + goto unlock_and_exit; + } + else { + /* A handler is already installed */ + + status = AE_ALREADY_EXISTS; + } goto unlock_and_exit; } - /* - * Move through the linked list of handlers - */ - handler_obj = handler_obj->addr_handler.next; + /* Walk the linked list of handlers */ + + handler_obj = handler_obj->address_space.next; } } - else { ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, "Creating object on Device %p while installing handler\n", node)); - /* Obj_desc does not exist, create one */ + /* obj_desc does not exist, create one */ if (node->type == ACPI_TYPE_ANY) { type = ACPI_TYPE_DEVICE; @@ -200,110 +227,117 @@ /* Attach the new object to the Node */ - status = acpi_ns_attach_object (node, obj_desc, (u8) type); + status = acpi_ns_attach_object (node, obj_desc, type); + + /* Remove local reference to the object */ + + acpi_ut_remove_reference (obj_desc); + if (ACPI_FAILURE (status)) { - acpi_ut_remove_reference (obj_desc); goto unlock_and_exit; } } ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, - "Installing address handler for region %s(%X) on Device %p(%p)\n", - acpi_ut_get_region_name (space_id), space_id, node, obj_desc)); + "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n", + acpi_ut_get_region_name (space_id), space_id, node->name.ascii, node, obj_desc)); /* - * Now we can install the handler + * Install the handler * - * At this point we know that there is no existing handler. - * So, we just allocate the object for the handler and link it + * At this point there is no existing handler. + * Just allocate the object for the handler and link it * into the list. */ - handler_obj = acpi_ut_create_internal_object (INTERNAL_TYPE_ADDRESS_HANDLER); + handler_obj = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_ADDRESS_HANDLER); if (!handler_obj) { status = AE_NO_MEMORY; goto unlock_and_exit; } - handler_obj->addr_handler.space_id = (u8) space_id; - handler_obj->addr_handler.hflags = flags; - handler_obj->addr_handler.next = obj_desc->device.addr_handler; - handler_obj->addr_handler.region_list = NULL; - handler_obj->addr_handler.node = node; - handler_obj->addr_handler.handler = handler; - handler_obj->addr_handler.context = context; - handler_obj->addr_handler.setup = setup; + /* Init handler obj */ + + handler_obj->address_space.space_id = (u8) space_id; + handler_obj->address_space.hflags = flags; + handler_obj->address_space.region_list = NULL; + handler_obj->address_space.node = node; + handler_obj->address_space.handler = handler; + handler_obj->address_space.context = context; + handler_obj->address_space.setup = setup; + + /* Install at head of Device.address_space list */ + + handler_obj->address_space.next = obj_desc->device.address_space; + + /* + * The Device object is the first reference on the handler_obj. + * Each region that uses the handler adds a reference. + */ + obj_desc->device.address_space = handler_obj; /* - * Now walk the namespace finding all of the regions this + * Walk the namespace finding all of the regions this * handler will manage. * - * We start at the device and search the branch toward + * Start at the device and search the branch toward * the leaf nodes until either the leaf is encountered or * a device is detected that has an address handler of the * same type. * - * In either case we back up and search down the remainder + * In either case, back up and search down the remainder * of the branch */ - status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, device, - ACPI_UINT32_MAX, NS_WALK_UNLOCK, - acpi_ev_addr_handler_helper, - handler_obj, NULL); - - /* - * Place this handler 1st on the list - */ - handler_obj->common.reference_count = - (u16) (handler_obj->common.reference_count + - obj_desc->common.reference_count - 1); - obj_desc->device.addr_handler = handler_obj; - + status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, device, ACPI_UINT32_MAX, + ACPI_NS_WALK_UNLOCK, acpi_ev_install_handler, + handler_obj, NULL); unlock_and_exit: - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_remove_address_space_handler + * FUNCTION: acpi_remove_address_space_handler * - * PARAMETERS: Space_id - The address space ID + * PARAMETERS: Device - Handle for the device + * space_id - The address space ID * Handler - Address of the handler * * RETURN: Status * - * DESCRIPTION: Install a handler for accesses on an Operation Region + * DESCRIPTION: Remove a previously installed handler. * ******************************************************************************/ acpi_status acpi_remove_address_space_handler ( - acpi_handle device, - ACPI_ADR_SPACE_TYPE space_id, - acpi_adr_space_handler handler) + acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler) { - acpi_operand_object *obj_desc; - acpi_operand_object *handler_obj; - acpi_operand_object *region_obj; - acpi_operand_object **last_obj_ptr; - acpi_namespace_node *node; - acpi_status status = AE_OK; + union acpi_operand_object *obj_desc; + union acpi_operand_object *handler_obj; + union acpi_operand_object *region_obj; + union acpi_operand_object **last_obj_ptr; + struct acpi_namespace_node *node; + acpi_status status; - FUNCTION_TRACE ("Acpi_remove_address_space_handler"); + ACPI_FUNCTION_TRACE ("acpi_remove_address_space_handler"); /* Parameter validation */ - if ((!device) || - ((!handler) && (handler != ACPI_DEFAULT_HANDLER)) || - (space_id > ACPI_MAX_ADDRESS_SPACE)) { + if (!device) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* Convert and validate the device handle */ @@ -313,7 +347,6 @@ goto unlock_and_exit; } - /* Make sure the internal object exists */ obj_desc = acpi_ns_get_attached_object (node); @@ -322,25 +355,22 @@ goto unlock_and_exit; } - /* - * find the address handler the user requested - */ - handler_obj = obj_desc->device.addr_handler; - last_obj_ptr = &obj_desc->device.addr_handler; + /* Find the address handler the user requested */ + + handler_obj = obj_desc->device.address_space; + last_obj_ptr = &obj_desc->device.address_space; while (handler_obj) { - /* - * We have a handler, see if user requested this one - */ - if (handler_obj->addr_handler.space_id == space_id) { - /* - * Got it, first dereference this in the Regions - */ + /* We have a handler, see if user requested this one */ + + if (handler_obj->address_space.space_id == space_id) { + /* Matched space_id, first dereference this in the Regions */ + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, "Removing address handler %p(%p) for region %s on Device %p(%p)\n", handler_obj, handler, acpi_ut_get_region_name (space_id), node, obj_desc)); - region_obj = handler_obj->addr_handler.region_list; + region_obj = handler_obj->address_space.region_list; /* Walk the handler's region list */ @@ -352,51 +382,42 @@ * The region is just inaccessible as indicated to * the _REG method */ - acpi_ev_disassociate_region_from_handler(region_obj, TRUE); + acpi_ev_detach_region (region_obj, TRUE); /* - * Walk the list, since we took the first region and it - * was removed from the list by the dissassociate call - * we just get the first item on the list again + * Walk the list: Just grab the head because the + * detach_region removed the previous head. */ - region_obj = handler_obj->addr_handler.region_list; + region_obj = handler_obj->address_space.region_list; } - /* - * Remove this Handler object from the list - */ - *last_obj_ptr = handler_obj->addr_handler.next; - - /* - * Now we can delete the handler object - */ - acpi_ut_remove_reference (handler_obj); - acpi_ut_remove_reference (handler_obj); + /* Remove this Handler object from the list */ + *last_obj_ptr = handler_obj->address_space.next; + + /* Now we can delete the handler object */ + + acpi_ut_remove_reference (handler_obj); goto unlock_and_exit; } - /* - * Move through the linked list of handlers - */ - last_obj_ptr = &handler_obj->addr_handler.next; - handler_obj = handler_obj->addr_handler.next; + /* Walk the linked list of handlers */ + + last_obj_ptr = &handler_obj->address_space.next; + handler_obj = handler_obj->address_space.next; } + /* The handler does not exist */ - /* - * The handler does not exist - */ ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, - "Unable to remove address handler %p for %s(%X), Dev_node %p, obj %p\n", + "Unable to remove address handler %p for %s(%X), dev_node %p, obj %p\n", handler, acpi_ut_get_region_name (space_id), space_id, node, obj_desc)); status = AE_NOT_EXIST; - unlock_and_exit: - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/events/Makefile linux.21rc5-ac2/drivers/acpi/events/Makefile --- linux.21rc5/drivers/acpi/events/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/events/Makefile 2003-04-28 17:58:27.000000000 +0100 @@ -1,11 +1,10 @@ # # Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory # O_TARGET := $(notdir $(CURDIR)).o -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) +obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c)) EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exconfig.c linux.21rc5-ac2/drivers/acpi/executer/exconfig.c --- linux.21rc5/drivers/acpi/executer/exconfig.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exconfig.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,242 +1,459 @@ /****************************************************************************** * * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes) - * $Revision: 44 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acevents.h" -#include "actables.h" -#include "acdispat.h" +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exconfig") + ACPI_MODULE_NAME ("exconfig") -/***************************************************************************** +/******************************************************************************* * - * FUNCTION: Acpi_ex_load_table_op + * FUNCTION: acpi_ex_add_table * - * PARAMETERS: Rgn_desc - Op region where the table will be obtained - * Ddb_handle - Where a handle to the table will be returned + * PARAMETERS: Table - Pointer to raw table + * parent_node - Where to load the table (scope) + * ddb_handle - Where to return the table handle. + * + * RETURN: Status + * + * DESCRIPTION: Common function to Install and Load an ACPI table with a + * returned table handle. + * + ******************************************************************************/ + +acpi_status +acpi_ex_add_table ( + struct acpi_table_header *table, + struct acpi_namespace_node *parent_node, + union acpi_operand_object **ddb_handle) +{ + acpi_status status; + struct acpi_table_desc table_info; + union acpi_operand_object *obj_desc; + + + ACPI_FUNCTION_TRACE ("ex_add_table"); + + + /* Create an object to be the table handle */ + + obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_REFERENCE); + if (!obj_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* Install the new table into the local data structures */ + + table_info.pointer = table; + table_info.length = (acpi_size) table->length; + table_info.allocation = ACPI_MEM_ALLOCATED; + + status = acpi_tb_install_table (&table_info); + if (ACPI_FAILURE (status)) { + goto cleanup; + } + + /* Add the table to the namespace */ + + status = acpi_ns_load_table (table_info.installed_desc, parent_node); + if (ACPI_FAILURE (status)) { + /* Uninstall table on error */ + + (void) acpi_tb_uninstall_table (table_info.installed_desc); + goto cleanup; + } + + /* Init the table handle */ + + obj_desc->reference.opcode = AML_LOAD_OP; + obj_desc->reference.object = table_info.installed_desc; + *ddb_handle = obj_desc; + return_ACPI_STATUS (AE_OK); + + +cleanup: + acpi_ut_remove_reference (obj_desc); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ex_load_table_op + * + * PARAMETERS: walk_state - Current state with operands + * return_desc - Where to store the return object * * RETURN: Status * * DESCRIPTION: Load an ACPI table * - ****************************************************************************/ + ******************************************************************************/ acpi_status -acpi_ex_load_op ( - acpi_operand_object *rgn_desc, - acpi_operand_object *ddb_handle) +acpi_ex_load_table_op ( + struct acpi_walk_state *walk_state, + union acpi_operand_object **return_desc) { - acpi_status status; - acpi_operand_object *table_desc = NULL; - u8 *table_ptr; - u8 *table_data_ptr; - acpi_table_header table_header; - acpi_table_desc table_info; - u32 i; + acpi_status status; + union acpi_operand_object **operand = &walk_state->operands[0]; + struct acpi_table_header *table; + struct acpi_namespace_node *parent_node; + struct acpi_namespace_node *start_node; + struct acpi_namespace_node *parameter_node = NULL; + union acpi_operand_object *ddb_handle; + + ACPI_FUNCTION_TRACE ("ex_load_table_op"); - FUNCTION_TRACE ("Ex_load_op"); - /* TBD: [Unhandled] Object can be either a field or an opregion */ +#if 0 + /* + * Make sure that the signature does not match one of the tables that + * is already loaded. + */ + status = acpi_tb_match_signature (operand[0]->string.pointer, NULL); + if (status == AE_OK) { + /* Signature matched -- don't allow override */ + return_ACPI_STATUS (AE_ALREADY_EXISTS); + } +#endif - /* Get the table header */ + /* Find the ACPI table */ - table_header.length = 0; - for (i = 0; i < sizeof (acpi_table_header); i++) { - status = acpi_ev_address_space_dispatch (rgn_desc, ACPI_READ_ADR_SPACE, - (ACPI_PHYSICAL_ADDRESS) i, 8, - (u32 *) ((u8 *) &table_header + i)); + status = acpi_tb_find_table (operand[0]->string.pointer, + operand[1]->string.pointer, + operand[2]->string.pointer, &table); + if (ACPI_FAILURE (status)) { + if (status != AE_NOT_FOUND) { + return_ACPI_STATUS (status); + } + + /* Not found, return an Integer=0 and AE_OK */ + + ddb_handle = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); + if (!ddb_handle) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + ddb_handle->integer.value = 0; + *return_desc = ddb_handle; + + return_ACPI_STATUS (AE_OK); + } + + /* Default nodes */ + + start_node = walk_state->scope_info->scope.node; + parent_node = acpi_gbl_root_node; + + /* root_path (optional parameter) */ + + if (operand[3]->string.length > 0) { + /* + * Find the node referenced by the root_path_string. This is the + * location within the namespace where the table will be loaded. + */ + status = acpi_ns_get_node_by_path (operand[3]->string.pointer, start_node, + ACPI_NS_SEARCH_PARENT, &parent_node); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } } - /* Allocate a buffer for the entire table */ + /* parameter_path (optional parameter) */ - table_ptr = ACPI_MEM_ALLOCATE (table_header.length); - if (!table_ptr) { - return_ACPI_STATUS (AE_NO_MEMORY); + if (operand[4]->string.length > 0) { + if ((operand[4]->string.pointer[0] != '\\') && + (operand[4]->string.pointer[0] != '^')) { + /* + * Path is not absolute, so it will be relative to the node + * referenced by the root_path_string (or the NS root if omitted) + */ + start_node = parent_node; + } + + /* + * Find the node referenced by the parameter_path_string + */ + status = acpi_ns_get_node_by_path (operand[4]->string.pointer, start_node, + ACPI_NS_SEARCH_PARENT, ¶meter_node); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } } - /* Copy the header to the buffer */ + /* Load the table into the namespace */ - MEMCPY (table_ptr, &table_header, sizeof (acpi_table_header)); - table_data_ptr = table_ptr + sizeof (acpi_table_header); + status = acpi_ex_add_table (table, parent_node, &ddb_handle); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + /* Parameter Data (optional) */ - /* Get the table from the op region */ + if (parameter_node) { + /* Store the parameter data into the optional parameter object */ - for (i = 0; i < table_header.length; i++) { - status = acpi_ev_address_space_dispatch (rgn_desc, ACPI_READ_ADR_SPACE, - (ACPI_PHYSICAL_ADDRESS) i, 8, - (u32 *) (table_data_ptr + i)); + status = acpi_ex_store (operand[5], ACPI_CAST_PTR (union acpi_operand_object, parameter_node), + walk_state); if (ACPI_FAILURE (status)) { - goto cleanup; + (void) acpi_ex_unload_table (ddb_handle); } } + return_ACPI_STATUS (status); +} - /* Table must be either an SSDT or a PSDT */ - if ((!STRNCMP (table_header.signature, - acpi_gbl_acpi_table_data[ACPI_TABLE_PSDT].signature, - acpi_gbl_acpi_table_data[ACPI_TABLE_PSDT].sig_length)) && - (!STRNCMP (table_header.signature, - acpi_gbl_acpi_table_data[ACPI_TABLE_SSDT].signature, - acpi_gbl_acpi_table_data[ACPI_TABLE_SSDT].sig_length))) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Table has invalid signature [%4.4s], must be SSDT or PSDT\n", - (char*)table_header.signature)); - status = AE_BAD_SIGNATURE; - goto cleanup; - } +/******************************************************************************* + * + * FUNCTION: acpi_ex_load_op + * + * PARAMETERS: obj_desc - Region or Field where the table will be + * obtained + * Target - Where a handle to the table will be stored + * walk_state - Current state + * + * RETURN: Status + * + * DESCRIPTION: Load an ACPI table from a field or operation region + * + ******************************************************************************/ - /* Create an object to be the table handle */ +acpi_status +acpi_ex_load_op ( + union acpi_operand_object *obj_desc, + union acpi_operand_object *target, + struct acpi_walk_state *walk_state) +{ + acpi_status status; + union acpi_operand_object *ddb_handle; + union acpi_operand_object *buffer_desc = NULL; + struct acpi_table_header *table_ptr = NULL; + u8 *table_data_ptr; + struct acpi_table_header table_header; + u32 i; + + ACPI_FUNCTION_TRACE ("ex_load_op"); + + + /* Object can be either an op_region or a Field */ + + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_REGION: + + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Load from Region %p %s\n", + obj_desc, acpi_ut_get_object_type_name (obj_desc))); + + /* Get the table header */ + + table_header.length = 0; + for (i = 0; i < sizeof (struct acpi_table_header); i++) { + status = acpi_ev_address_space_dispatch (obj_desc, ACPI_READ, + (acpi_physical_address) i, 8, + ((u8 *) &table_header) + i); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } - table_desc = acpi_ut_create_internal_object (INTERNAL_TYPE_REFERENCE); - if (!table_desc) { - status = AE_NO_MEMORY; - goto cleanup; - } + /* Allocate a buffer for the entire table */ + table_ptr = ACPI_MEM_ALLOCATE (table_header.length); + if (!table_ptr) { + return_ACPI_STATUS (AE_NO_MEMORY); + } - /* Install the new table into the local data structures */ + /* Copy the header to the buffer */ - table_info.pointer = (acpi_table_header *) table_ptr; - table_info.length = table_header.length; - table_info.allocation = ACPI_MEM_ALLOCATED; - table_info.base_pointer = table_ptr; + ACPI_MEMCPY (table_ptr, &table_header, sizeof (struct acpi_table_header)); + table_data_ptr = ACPI_PTR_ADD (u8, table_ptr, sizeof (struct acpi_table_header)); - status = acpi_tb_install_table (NULL, &table_info); - if (ACPI_FAILURE (status)) { - goto cleanup; - } + /* Get the table from the op region */ - /* Add the table to the namespace */ + for (i = 0; i < table_header.length; i++) { + status = acpi_ev_address_space_dispatch (obj_desc, ACPI_READ, + (acpi_physical_address) i, 8, + ((u8 *) table_data_ptr + i)); + if (ACPI_FAILURE (status)) { + goto cleanup; + } + } + break; - /* TBD: [Restructure] - change to whatever new interface is appropriate */ -/* - Status = Acpi_load_namespace (); - if (ACPI_FAILURE (Status)) - { -*/ - /* TBD: [Errors] Unload the table on failure ? */ -/* - goto Cleanup; + + case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: + + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Load from Field %p %s\n", + obj_desc, acpi_ut_get_object_type_name (obj_desc))); + + /* + * The length of the field must be at least as large as the table. + * Read the entire field and thus the entire table. Buffer is + * allocated during the read. + */ + status = acpi_ex_read_data_from_field (walk_state, obj_desc, &buffer_desc); + if (ACPI_FAILURE (status)) { + goto cleanup; + } + + table_ptr = ACPI_CAST_PTR (struct acpi_table_header, buffer_desc->buffer.pointer); + break; + + + default: + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } -*/ + /* The table must be either an SSDT or a PSDT */ - /* TBD: [Investigate] we need a pointer to the table desc */ + if ((!ACPI_STRNCMP (table_ptr->signature, + acpi_gbl_table_data[ACPI_TABLE_PSDT].signature, + acpi_gbl_table_data[ACPI_TABLE_PSDT].sig_length)) && + (!ACPI_STRNCMP (table_ptr->signature, + acpi_gbl_table_data[ACPI_TABLE_SSDT].signature, + acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Table has invalid signature [%4.4s], must be SSDT or PSDT\n", + table_ptr->signature)); + status = AE_BAD_SIGNATURE; + goto cleanup; + } - /* Init the table handle */ + /* Install the new table into the local data structures */ - table_desc->reference.opcode = AML_LOAD_OP; - table_desc->reference.object = table_info.installed_desc; + status = acpi_ex_add_table (table_ptr, acpi_gbl_root_node, &ddb_handle); + if (ACPI_FAILURE (status)) { + goto cleanup; + } - /* TBD: store the tabledesc into the Ddb_handle target */ - /* Ddb_handle = Table_desc; */ + /* Store the ddb_handle into the Target operand */ + + status = acpi_ex_store (ddb_handle, target, walk_state); + if (ACPI_FAILURE (status)) { + (void) acpi_ex_unload_table (ddb_handle); + } return_ACPI_STATUS (status); cleanup: - ACPI_MEM_FREE (table_desc); - ACPI_MEM_FREE (table_ptr); + if (buffer_desc) { + acpi_ut_remove_reference (buffer_desc); + } + else { + ACPI_MEM_FREE (table_ptr); + } return_ACPI_STATUS (status); } -/***************************************************************************** +/******************************************************************************* * - * FUNCTION: Acpi_ex_unload_table + * FUNCTION: acpi_ex_unload_table * - * PARAMETERS: Ddb_handle - Handle to a previously loaded table + * PARAMETERS: ddb_handle - Handle to a previously loaded table * * RETURN: Status * * DESCRIPTION: Unload an ACPI table * - ****************************************************************************/ + ******************************************************************************/ acpi_status acpi_ex_unload_table ( - acpi_operand_object *ddb_handle) + union acpi_operand_object *ddb_handle) { - acpi_status status = AE_NOT_IMPLEMENTED; - acpi_operand_object *table_desc = ddb_handle; - acpi_table_desc *table_info; + acpi_status status = AE_NOT_IMPLEMENTED; + union acpi_operand_object *table_desc = ddb_handle; + struct acpi_table_desc *table_info; - FUNCTION_TRACE ("Ex_unload_table"); + ACPI_FUNCTION_TRACE ("ex_unload_table"); /* * Validate the handle - * Although the handle is partially validated in Acpi_ex_reconfiguration(), - * when it calls Acpi_ex_resolve_operands(), the handle is more completely + * Although the handle is partially validated in acpi_ex_reconfiguration(), + * when it calls acpi_ex_resolve_operands(), the handle is more completely * validated here. */ if ((!ddb_handle) || - (!VALID_DESCRIPTOR_TYPE (ddb_handle, ACPI_DESC_TYPE_INTERNAL)) || - (((acpi_operand_object *)ddb_handle)->common.type != - INTERNAL_TYPE_REFERENCE)) { + (ACPI_GET_DESCRIPTOR_TYPE (ddb_handle) != ACPI_DESC_TYPE_OPERAND) || + (ACPI_GET_OBJECT_TYPE (ddb_handle) != ACPI_TYPE_LOCAL_REFERENCE)) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* Get the actual table descriptor from the Ddb_handle */ + /* Get the actual table descriptor from the ddb_handle */ - table_info = (acpi_table_desc *) table_desc->reference.object; + table_info = (struct acpi_table_desc *) table_desc->reference.object; /* * Delete the entire namespace under this table Node - * (Offset contains the Table_id) + * (Offset contains the table_id) */ - status = acpi_ns_delete_namespace_by_owner (table_info->table_id); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } + acpi_ns_delete_namespace_by_owner (table_info->table_id); /* Delete the table itself */ - acpi_tb_uninstall_table (table_info->installed_desc); + (void) acpi_tb_uninstall_table (table_info->installed_desc); - /* Delete the table descriptor (Ddb_handle) */ + /* Delete the table descriptor (ddb_handle) */ acpi_ut_remove_reference (table_desc); - return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exconvrt.c linux.21rc5-ac2/drivers/acpi/executer/exconvrt.c --- linux.21rc5/drivers/acpi/executer/exconvrt.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exconvrt.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,49 +1,63 @@ /****************************************************************************** * * Module Name: exconvrt - Object conversion routines - * $Revision: 24 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "acevents.h" -#include "amlcode.h" -#include "acdispat.h" +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exconvrt") + ACPI_MODULE_NAME ("exconvrt") /******************************************************************************* * - * FUNCTION: Acpi_ex_convert_to_integer + * FUNCTION: acpi_ex_convert_to_integer * - * PARAMETERS: *Obj_desc - Object to be converted. Must be an + * PARAMETERS: *obj_desc - Object to be converted. Must be an * Integer, Buffer, or String - * Walk_state - Current method state + * walk_state - Current method state * * RETURN: Status * @@ -53,60 +67,40 @@ acpi_status acpi_ex_convert_to_integer ( - acpi_operand_object *obj_desc, - acpi_operand_object **result_desc, - acpi_walk_state *walk_state) + union acpi_operand_object *obj_desc, + union acpi_operand_object **result_desc, + struct acpi_walk_state *walk_state) { - u32 i; - acpi_operand_object *ret_desc; - u32 count; - char *pointer; - acpi_integer result; - u32 integer_size = sizeof (acpi_integer); + u32 i; + union acpi_operand_object *ret_desc; + u32 count; + u8 *pointer; + acpi_integer result; + acpi_status status; - FUNCTION_ENTRY (); + ACPI_FUNCTION_TRACE_PTR ("ex_convert_to_integer", obj_desc); - switch (obj_desc->common.type) { + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { case ACPI_TYPE_INTEGER: *result_desc = obj_desc; - return (AE_OK); + return_ACPI_STATUS (AE_OK); case ACPI_TYPE_STRING: - pointer = obj_desc->string.pointer; + pointer = (u8 *) obj_desc->string.pointer; count = obj_desc->string.length; break; case ACPI_TYPE_BUFFER: - pointer = (char *) obj_desc->buffer.pointer; + pointer = obj_desc->buffer.pointer; count = obj_desc->buffer.length; break; default: - return (AE_TYPE); - } - - /* - * Create a new integer - */ - ret_desc = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); - if (!ret_desc) { - return (AE_NO_MEMORY); - } - - - /* Handle both ACPI 1.0 and ACPI 2.0 Integer widths */ - - if (walk_state->method_node->flags & ANOBJ_DATA_WIDTH_32) { - /* - * We are running a method that exists in a 32-bit ACPI table. - * Truncate the value to 32 bits by zeroing out the upper 32-bit field - */ - integer_size = sizeof (u32); + return_ACPI_STATUS (AE_TYPE); } - /* * Convert the buffer/string to an integer. Note that both buffers and * strings are treated as raw data - we don't convert ascii to hex for @@ -120,23 +114,24 @@ /* Transfer no more than an integer's worth of data */ - if (count > integer_size) { - count = integer_size; + if (count > acpi_gbl_integer_byte_width) { + count = acpi_gbl_integer_byte_width; } /* * String conversion is different than Buffer conversion */ - switch (obj_desc->common.type) { + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { case ACPI_TYPE_STRING: - /* TBD: Need to use 64-bit STRTOUL */ - /* * Convert string to an integer * String must be hexadecimal as per the ACPI specification */ - result = STRTOUL (pointer, NULL, 16); + status = acpi_ut_strtoul64 ((char *) pointer, 16, &result); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } break; @@ -154,14 +149,31 @@ */ result |= (((acpi_integer) pointer[i]) << (i * 8)); } + break; + + default: + /* No other types can get here */ break; } - /* Save the Result, delete original descriptor, store new descriptor */ + /* + * Create a new integer + */ + ret_desc = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); + if (!ret_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* Save the Result */ ret_desc->integer.value = result; + /* + * If we are about to overwrite the original object on the operand stack, + * we must remove a reference on the original object because we are + * essentially removing it from the stack. + */ if (*result_desc == obj_desc) { if (walk_state->opcode != AML_STORE_OP) { acpi_ut_remove_reference (obj_desc); @@ -169,139 +181,152 @@ } *result_desc = ret_desc; - return (AE_OK); + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ex_convert_to_buffer + * FUNCTION: acpi_ex_convert_to_buffer * - * PARAMETERS: *Obj_desc - Object to be converted. Must be an + * PARAMETERS: *obj_desc - Object to be converted. Must be an * Integer, Buffer, or String - * Walk_state - Current method state + * walk_state - Current method state * * RETURN: Status * - * DESCRIPTION: Convert an ACPI Object to an Buffer + * DESCRIPTION: Convert an ACPI Object to a Buffer * ******************************************************************************/ acpi_status acpi_ex_convert_to_buffer ( - acpi_operand_object *obj_desc, - acpi_operand_object **result_desc, - acpi_walk_state *walk_state) + union acpi_operand_object *obj_desc, + union acpi_operand_object **result_desc, + struct acpi_walk_state *walk_state) { - acpi_operand_object *ret_desc; - u32 i; - u32 integer_size = sizeof (acpi_integer); - u8 *new_buf; + union acpi_operand_object *ret_desc; + u32 i; + u8 *new_buf; - FUNCTION_ENTRY (); + ACPI_FUNCTION_TRACE_PTR ("ex_convert_to_buffer", obj_desc); - switch (obj_desc->common.type) { - case ACPI_TYPE_INTEGER: + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_BUFFER: - /* - * Create a new Buffer - */ - ret_desc = acpi_ut_create_internal_object (ACPI_TYPE_BUFFER); - if (!ret_desc) { - return (AE_NO_MEMORY); - } + /* No conversion necessary */ - /* Handle both ACPI 1.0 and ACPI 2.0 Integer widths */ + *result_desc = obj_desc; + return_ACPI_STATUS (AE_OK); - if (walk_state->method_node->flags & ANOBJ_DATA_WIDTH_32) { - /* - * We are running a method that exists in a 32-bit ACPI table. - * Truncate the value to 32 bits by zeroing out the upper - * 32-bit field - */ - integer_size = sizeof (u32); - } - /* Need enough space for one integers */ + case ACPI_TYPE_INTEGER: - ret_desc->buffer.length = integer_size; - new_buf = ACPI_MEM_CALLOCATE (integer_size); - if (!new_buf) { - REPORT_ERROR - (("Ex_convert_to_buffer: Buffer allocation failure\n")); - acpi_ut_remove_reference (ret_desc); - return (AE_NO_MEMORY); + /* + * Create a new Buffer object. + * Need enough space for one integer + */ + ret_desc = acpi_ut_create_buffer_object (acpi_gbl_integer_byte_width); + if (!ret_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); } /* Copy the integer to the buffer */ - for (i = 0; i < integer_size; i++) { + new_buf = ret_desc->buffer.pointer; + for (i = 0; i < acpi_gbl_integer_byte_width; i++) { new_buf[i] = (u8) (obj_desc->integer.value >> (i * 8)); } - ret_desc->buffer.pointer = new_buf; - - /* Return the new buffer descriptor */ - - if (*result_desc == obj_desc) { - if (walk_state->opcode != AML_STORE_OP) { - acpi_ut_remove_reference (obj_desc); - } - } - - *result_desc = ret_desc; break; case ACPI_TYPE_STRING: - *result_desc = obj_desc; - break; + /* + * Create a new Buffer object + * Size will be the string length + */ + ret_desc = acpi_ut_create_buffer_object ((acpi_size) obj_desc->string.length); + if (!ret_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } - case ACPI_TYPE_BUFFER: - *result_desc = obj_desc; + /* Copy the string to the buffer */ + + new_buf = ret_desc->buffer.pointer; + ACPI_STRNCPY ((char *) new_buf, (char *) obj_desc->string.pointer, + obj_desc->string.length); break; default: - return (AE_TYPE); - break; - } + return_ACPI_STATUS (AE_TYPE); + } + + /* Mark buffer initialized */ + + ret_desc->common.flags |= AOPOBJ_DATA_VALID; + + /* + * If we are about to overwrite the original object on the operand stack, + * we must remove a reference on the original object because we are + * essentially removing it from the stack. + */ + if (*result_desc == obj_desc) { + if (walk_state->opcode != AML_STORE_OP) { + acpi_ut_remove_reference (obj_desc); + } + } - return (AE_OK); + *result_desc = ret_desc; + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ex_convert_ascii + * FUNCTION: acpi_ex_convert_ascii * - * PARAMETERS: Integer + * PARAMETERS: Integer - Value to be converted + * Base - 10 or 16 + * String - Where the string is returned + * data_width - Size of data item to be converted * * RETURN: Actual string length * - * DESCRIPTION: Convert an ACPI Integer to a hex string + * DESCRIPTION: Convert an ACPI Integer to a hex or decimal string * ******************************************************************************/ u32 acpi_ex_convert_to_ascii ( - acpi_integer integer, - u32 base, - u8 *string) + acpi_integer integer, + u32 base, + u8 *string, + u8 data_width) { - u32 i; - u32 j; - u32 k = 0; - u8 hex_digit; - acpi_integer digit; - u32 remainder; - u32 length = sizeof (acpi_integer); - u8 leading_zero = TRUE; + u32 i; + u32 j; + u32 k = 0; + char hex_digit; + acpi_integer digit; + u32 remainder; + u32 length; + u8 leading_zero; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); + + if (data_width < sizeof (acpi_integer)) { + leading_zero = FALSE; + length = data_width; + } + else { + leading_zero = TRUE; + length = sizeof (acpi_integer); + } switch (base) { @@ -313,7 +338,7 @@ digit = integer; for (j = 1; j < i; j++) { - acpi_ut_short_divide (&digit, 10, &digit, &remainder); + (void) acpi_ut_short_divide (&digit, 10, &digit, &remainder); } /* Create the decimal digit */ @@ -323,7 +348,7 @@ } if (!leading_zero) { - string[k] = (u8) (ASCII_ZERO + remainder); + string[k] = (u8) (ACPI_ASCII_ZERO + remainder); k++; } } @@ -336,12 +361,12 @@ for (i = 0, j = ((length * 2) -1); i < (length * 2); i++, j--) { hex_digit = acpi_ut_hex_to_ascii_char (integer, (j * 4)); - if (hex_digit != ASCII_ZERO) { + if (hex_digit != ACPI_ASCII_ZERO) { leading_zero = FALSE; } if (!leading_zero) { - string[k] = hex_digit; + string[k] = (u8) hex_digit; k++; } } @@ -358,22 +383,22 @@ * Finally, null terminate the string and return the length */ if (!k) { - string [0] = ASCII_ZERO; + string [0] = ACPI_ASCII_ZERO; k = 1; } - string [k] = 0; + string [k] = 0; return (k); } /******************************************************************************* * - * FUNCTION: Acpi_ex_convert_to_string + * FUNCTION: acpi_ex_convert_to_string * - * PARAMETERS: *Obj_desc - Object to be converted. Must be an + * PARAMETERS: *obj_desc - Object to be converted. Must be an * Integer, Buffer, or String - * Walk_state - Current method state + * walk_state - Current method state * * RETURN: Status * @@ -383,39 +408,39 @@ acpi_status acpi_ex_convert_to_string ( - acpi_operand_object *obj_desc, - acpi_operand_object **result_desc, - u32 base, - u32 max_length, - acpi_walk_state *walk_state) + union acpi_operand_object *obj_desc, + union acpi_operand_object **result_desc, + u32 base, + u32 max_length, + struct acpi_walk_state *walk_state) { - acpi_operand_object *ret_desc; - u32 i; - u32 index; - u32 string_length; - u32 integer_size = sizeof (acpi_integer); - u8 *new_buf; - u8 *pointer; + union acpi_operand_object *ret_desc; + u32 i; + u32 string_length; + u8 *new_buf; + u8 *pointer; - FUNCTION_ENTRY (); + ACPI_FUNCTION_TRACE_PTR ("ex_convert_to_string", obj_desc); - switch (obj_desc->common.type) { - case ACPI_TYPE_INTEGER: + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_STRING: - /* Handle both ACPI 1.0 and ACPI 2.0 Integer widths */ + if (max_length >= obj_desc->string.length) { + *result_desc = obj_desc; + return_ACPI_STATUS (AE_OK); + } + else { + /* Must copy the string first and then truncate it */ - if (walk_state->method_node->flags & ANOBJ_DATA_WIDTH_32) { - /* - * We are running a method that exists in a 32-bit ACPI table. - * Truncate the value to 32 bits by zeroing out the upper - * 32-bit field - */ - integer_size = sizeof (u32); + return_ACPI_STATUS (AE_NOT_IMPLEMENTED); } - string_length = integer_size * 2; + + case ACPI_TYPE_INTEGER: + + string_length = acpi_gbl_integer_byte_width * 2; if (base == 10) { string_length = ACPI_MAX_DECIMAL_DIGITS; } @@ -425,23 +450,22 @@ */ ret_desc = acpi_ut_create_internal_object (ACPI_TYPE_STRING); if (!ret_desc) { - return (AE_NO_MEMORY); + return_ACPI_STATUS (AE_NO_MEMORY); } /* Need enough space for one ASCII integer plus null terminator */ - new_buf = ACPI_MEM_CALLOCATE (string_length + 1); + new_buf = ACPI_MEM_CALLOCATE ((acpi_size) string_length + 1); if (!new_buf) { - REPORT_ERROR - (("Ex_convert_to_string: Buffer allocation failure\n")); + ACPI_REPORT_ERROR + (("ex_convert_to_string: Buffer allocation failure\n")); acpi_ut_remove_reference (ret_desc); - return (AE_NO_MEMORY); + return_ACPI_STATUS (AE_NO_MEMORY); } - /* Convert */ - i = acpi_ex_convert_to_ascii (obj_desc->integer.value, base, new_buf); + i = acpi_ex_convert_to_ascii (obj_desc->integer.value, base, new_buf, sizeof (acpi_integer)); /* Null terminate at the correct place */ @@ -455,29 +479,25 @@ } ret_desc->buffer.pointer = new_buf; - - /* Return the new buffer descriptor */ - - if (*result_desc == obj_desc) { - if (walk_state->opcode != AML_STORE_OP) { - acpi_ut_remove_reference (obj_desc); - } - } - - *result_desc = ret_desc; break; case ACPI_TYPE_BUFFER: - string_length = obj_desc->buffer.length * 3; - if (base == 10) { - string_length = obj_desc->buffer.length * 4; + /* Find the string length */ + + pointer = obj_desc->buffer.pointer; + for (string_length = 0; string_length < obj_desc->buffer.length; string_length++) { + /* Exit on null terminator */ + + if (!pointer[string_length]) { + break; + } } if (max_length > ACPI_MAX_STRING_CONVERSION) { if (string_length > ACPI_MAX_STRING_CONVERSION) { - return (AE_AML_STRING_LIMIT); + return_ACPI_STATUS (AE_AML_STRING_LIMIT); } } @@ -486,7 +506,7 @@ */ ret_desc = acpi_ut_create_internal_object (ACPI_TYPE_STRING); if (!ret_desc) { - return (AE_NO_MEMORY); + return_ACPI_STATUS (AE_NO_MEMORY); } /* String length is the lesser of the Max or the actual length */ @@ -495,92 +515,77 @@ string_length = max_length; } - new_buf = ACPI_MEM_CALLOCATE (string_length + 1); + new_buf = ACPI_MEM_CALLOCATE ((acpi_size) string_length + 1); if (!new_buf) { - REPORT_ERROR - (("Ex_convert_to_string: Buffer allocation failure\n")); + ACPI_REPORT_ERROR + (("ex_convert_to_string: Buffer allocation failure\n")); acpi_ut_remove_reference (ret_desc); - return (AE_NO_MEMORY); + return_ACPI_STATUS (AE_NO_MEMORY); } - /* - * Convert each byte of the buffer to two ASCII characters plus a space. - */ - pointer = obj_desc->buffer.pointer; - index = 0; - for (i = 0, index = 0; i < obj_desc->buffer.length; i++) { - index = acpi_ex_convert_to_ascii (pointer[i], base, &new_buf[index]); + /* Copy the appropriate number of buffer characters */ - new_buf[index] = ' '; - index++; - } + ACPI_MEMCPY (new_buf, pointer, string_length); /* Null terminate */ - new_buf [index-1] = 0; + new_buf [string_length] = 0; ret_desc->buffer.pointer = new_buf; - ret_desc->string.length = STRLEN ((char *) new_buf); - - - /* Return the new buffer descriptor */ - - if (*result_desc == obj_desc) { - if (walk_state->opcode != AML_STORE_OP) { - acpi_ut_remove_reference (obj_desc); - } - } - - *result_desc = ret_desc; + ret_desc->string.length = string_length; break; - case ACPI_TYPE_STRING: - - if (max_length >= obj_desc->string.length) { - *result_desc = obj_desc; - } + default: + return_ACPI_STATUS (AE_TYPE); + } - else { - /* Must copy the string first and then truncate it */ - return (AE_NOT_IMPLEMENTED); + /* + * If we are about to overwrite the original object on the operand stack, + * we must remove a reference on the original object because we are + * essentially removing it from the stack. + */ + if (*result_desc == obj_desc) { + if (walk_state->opcode != AML_STORE_OP) { + acpi_ut_remove_reference (obj_desc); } - break; - - - default: - return (AE_TYPE); - break; - } + } - return (AE_OK); + *result_desc = ret_desc; + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ex_convert_to_target_type + * FUNCTION: acpi_ex_convert_to_target_type * - * PARAMETERS: *Obj_desc - Object to be converted. - * Walk_state - Current method state + * PARAMETERS: destination_type - Current type of the destination + * source_desc - Source object to be converted. + * walk_state - Current method state * * RETURN: Status * - * DESCRIPTION: + * DESCRIPTION: Implements "implicit conversion" rules for storing an object. * ******************************************************************************/ acpi_status acpi_ex_convert_to_target_type ( - acpi_object_type8 destination_type, - acpi_operand_object **obj_desc, - acpi_walk_state *walk_state) + acpi_object_type destination_type, + union acpi_operand_object *source_desc, + union acpi_operand_object **result_desc, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE ("ex_convert_to_target_type"); - FUNCTION_TRACE ("Ex_convert_to_target_type"); + /* Default behavior */ + + *result_desc = source_desc; /* * If required by the target, @@ -592,7 +597,7 @@ case ARGI_INTEGER_REF: /* Handles Increment, Decrement cases */ switch (destination_type) { - case INTERNAL_TYPE_REGION_FIELD: + case ACPI_TYPE_LOCAL_REGION_FIELD: /* * Named field can always handle conversions */ @@ -601,10 +606,10 @@ default: /* No conversion allowed for these types */ - if (destination_type != (*obj_desc)->common.type) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Target does not allow conversion of type %s to %s\n", - acpi_ut_get_type_name ((*obj_desc)->common.type), + if (destination_type != ACPI_GET_OBJECT_TYPE (source_desc)) { + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "Explicit operator, will store (%s) over existing type (%s)\n", + acpi_ut_get_object_type_name (source_desc), acpi_ut_get_type_name (destination_type))); status = AE_TYPE; } @@ -617,13 +622,13 @@ switch (destination_type) { case ACPI_TYPE_INTEGER: case ACPI_TYPE_BUFFER_FIELD: - case INTERNAL_TYPE_BANK_FIELD: - case INTERNAL_TYPE_INDEX_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: /* * These types require an Integer operand. We can convert * a Buffer or a String to an Integer if necessary. */ - status = acpi_ex_convert_to_integer (*obj_desc, obj_desc, walk_state); + status = acpi_ex_convert_to_integer (source_desc, result_desc, walk_state); break; @@ -633,17 +638,22 @@ * The operand must be a String. We can convert an * Integer or Buffer if necessary */ - status = acpi_ex_convert_to_string (*obj_desc, obj_desc, 16, ACPI_UINT32_MAX, walk_state); + status = acpi_ex_convert_to_string (source_desc, result_desc, 16, ACPI_UINT32_MAX, walk_state); break; case ACPI_TYPE_BUFFER: /* - * The operand must be a String. We can convert an - * Integer or Buffer if necessary + * The operand must be a Buffer. We can convert an + * Integer or String if necessary */ - status = acpi_ex_convert_to_buffer (*obj_desc, obj_desc, walk_state); + status = acpi_ex_convert_to_buffer (source_desc, result_desc, walk_state); + break; + + + default: + status = AE_AML_INTERNAL; break; } break; @@ -651,21 +661,20 @@ case ARGI_REFERENCE: /* - * Create_xxxx_field cases - we are storing the field object into the name + * create_xxxx_field cases - we are storing the field object into the name */ break; default: ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Unknown Target type ID 0x%X Op %s Dest_type %s\n", + "Unknown Target type ID 0x%X Op %s dest_type %s\n", GET_CURRENT_ARG_TYPE (walk_state->op_info->runtime_args), walk_state->op_info->name, acpi_ut_get_type_name (destination_type))); status = AE_AML_INTERNAL; } - /* * Source-to-Target conversion semantics: * diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/excreate.c linux.21rc5-ac2/drivers/acpi/executer/excreate.c --- linux.21rc5/drivers/acpi/executer/excreate.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/excreate.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,48 +1,65 @@ /****************************************************************************** * * Module Name: excreate - Named object creation - * $Revision: 71 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acevents.h" -#include "acdispat.h" +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("excreate") + ACPI_MODULE_NAME ("excreate") +#ifndef ACPI_NO_METHOD_EXECUTION /***************************************************************************** * - * FUNCTION: Acpi_ex_create_alias + * FUNCTION: acpi_ex_create_alias * - * PARAMETERS: Walk_state - Current state, contains List of - * operands for the opcode + * PARAMETERS: walk_state - Current state, contains operands * * RETURN: Status * @@ -52,32 +69,69 @@ acpi_status acpi_ex_create_alias ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_namespace_node *source_node; - acpi_status status; + struct acpi_namespace_node *target_node; + struct acpi_namespace_node *alias_node; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ex_create_alias"); + ACPI_FUNCTION_TRACE ("ex_create_alias"); /* Get the source/alias operands (both namespace nodes) */ - source_node = (acpi_namespace_node *) walk_state->operands[1]; + alias_node = (struct acpi_namespace_node *) walk_state->operands[0]; + target_node = (struct acpi_namespace_node *) walk_state->operands[1]; - - /* Attach the original source object to the new Alias Node */ - - status = acpi_ns_attach_object ((acpi_namespace_node *) walk_state->operands[0], - source_node->object, - source_node->type); + if (target_node->type == ACPI_TYPE_LOCAL_ALIAS) { + /* + * Dereference an existing alias so that we don't create a chain + * of aliases. With this code, we guarantee that an alias is + * always exactly one level of indirection away from the + * actual aliased name. + */ + target_node = (struct acpi_namespace_node *) target_node->object; + } /* - * The new alias assumes the type of the source, but it points - * to the same object. The reference count of the object has an - * additional reference to prevent deletion out from under either the - * source or the alias Node + * For objects that can never change (i.e., the NS node will + * permanently point to the same object), we can simply attach + * the object to the new NS node. For other objects (such as + * Integers, buffers, etc.), we have to point the Alias node + * to the original Node. */ + switch (target_node->type) { + case ACPI_TYPE_INTEGER: + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: + case ACPI_TYPE_PACKAGE: + case ACPI_TYPE_BUFFER_FIELD: + + /* + * The new alias has the type ALIAS and points to the original + * NS node, not the object itself. This is because for these + * types, the object can change dynamically via a Store. + */ + alias_node->type = ACPI_TYPE_LOCAL_ALIAS; + alias_node->object = ACPI_CAST_PTR (union acpi_operand_object, target_node); + break; + + default: + + /* Attach the original source object to the new Alias Node */ + + /* + * The new alias assumes the type of the target, and it points + * to the same object. The reference count of the object has an + * additional reference to prevent deletion out from under either the + * target node or the alias Node + */ + status = acpi_ns_attach_object (alias_node, + acpi_ns_get_attached_object (target_node), + target_node->type); + break; + } /* Since both operands are Nodes, we don't need to delete them */ @@ -87,9 +141,9 @@ /***************************************************************************** * - * FUNCTION: Acpi_ex_create_event + * FUNCTION: acpi_ex_create_event * - * PARAMETERS: Walk_state - Current state + * PARAMETERS: walk_state - Current state * * RETURN: Status * @@ -99,13 +153,13 @@ acpi_status acpi_ex_create_event ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_status status; - acpi_operand_object *obj_desc; + acpi_status status; + union acpi_operand_object *obj_desc; - FUNCTION_TRACE ("Ex_create_event"); + ACPI_FUNCTION_TRACE ("ex_create_event"); obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_EVENT); @@ -114,11 +168,11 @@ goto cleanup; } - /* Create the actual OS semaphore */ - - /* TBD: [Investigate] should be created with 0 or 1 units? */ - - status = acpi_os_create_semaphore (ACPI_NO_UNIT_LIMIT, 1, + /* + * Create the actual OS semaphore, with zero initial units -- meaning + * that the event is created in an unsignalled state + */ + status = acpi_os_create_semaphore (ACPI_NO_UNIT_LIMIT, 0, &obj_desc->event.semaphore); if (ACPI_FAILURE (status)) { goto cleanup; @@ -126,8 +180,8 @@ /* Attach object to the Node */ - status = acpi_ns_attach_object ((acpi_namespace_node *) walk_state->operands[0], - obj_desc, (u8) ACPI_TYPE_EVENT); + status = acpi_ns_attach_object ((struct acpi_namespace_node *) walk_state->operands[0], + obj_desc, ACPI_TYPE_EVENT); cleanup: /* @@ -141,27 +195,27 @@ /***************************************************************************** * - * FUNCTION: Acpi_ex_create_mutex + * FUNCTION: acpi_ex_create_mutex * - * PARAMETERS: Walk_state - Current state + * PARAMETERS: walk_state - Current state * * RETURN: Status * * DESCRIPTION: Create a new mutex object * - * Mutex (Name[0], Sync_level[1]) + * Mutex (Name[0], sync_level[1]) * ****************************************************************************/ acpi_status acpi_ex_create_mutex ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; - acpi_operand_object *obj_desc; + acpi_status status = AE_OK; + union acpi_operand_object *obj_desc; - FUNCTION_TRACE_PTR ("Ex_create_mutex", WALK_OPERANDS); + ACPI_FUNCTION_TRACE_PTR ("ex_create_mutex", ACPI_WALK_OPERANDS); /* Create the new mutex object */ @@ -172,8 +226,11 @@ goto cleanup; } - /* Create the actual OS semaphore */ - + /* + * Create the actual OS semaphore. + * One unit max to make it a mutex, with one initial unit to allow + * the mutex to be acquired. + */ status = acpi_os_create_semaphore (1, 1, &obj_desc->mutex.semaphore); if (ACPI_FAILURE (status)) { goto cleanup; @@ -182,9 +239,10 @@ /* Init object and attach to NS node */ obj_desc->mutex.sync_level = (u8) walk_state->operands[1]->integer.value; + obj_desc->mutex.node = (struct acpi_namespace_node *) walk_state->operands[0]; - status = acpi_ns_attach_object ((acpi_namespace_node *) walk_state->operands[0], - obj_desc, (u8) ACPI_TYPE_MUTEX); + status = acpi_ns_attach_object (obj_desc->mutex.node, + obj_desc, ACPI_TYPE_MUTEX); cleanup: @@ -199,12 +257,12 @@ /***************************************************************************** * - * FUNCTION: Acpi_ex_create_region + * FUNCTION: acpi_ex_create_region * - * PARAMETERS: Aml_start - Pointer to the region declaration AML - * Aml_length - Max length of the declaration AML + * PARAMETERS: aml_start - Pointer to the region declaration AML + * aml_length - Max length of the declaration AML * Operands - List of operands for the opcode - * Walk_state - Current state + * walk_state - Current state * * RETURN: Status * @@ -214,28 +272,29 @@ acpi_status acpi_ex_create_region ( - u8 *aml_start, - u32 aml_length, - u8 region_space, - acpi_walk_state *walk_state) + u8 *aml_start, + u32 aml_length, + u8 region_space, + struct acpi_walk_state *walk_state) { - acpi_status status; - acpi_operand_object *obj_desc; - acpi_namespace_node *node; + acpi_status status; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + union acpi_operand_object *region_obj2; - FUNCTION_TRACE ("Ex_create_region"); + ACPI_FUNCTION_TRACE ("ex_create_region"); /* Get the Node from the object stack */ - node = (acpi_namespace_node *) walk_state->operands[0]; + node = walk_state->op->common.node; /* * If the region object is already attached to this node, * just return */ - if (node->object) { + if (acpi_ns_get_attached_object (node)) { return_ACPI_STATUS (AE_OK); } @@ -243,9 +302,9 @@ * Space ID must be one of the predefined IDs, or in the user-defined * range */ - if ((region_space >= NUM_REGION_TYPES) && - (region_space < USER_REGION_BEGIN)) { - REPORT_ERROR (("Invalid Address_space type %X\n", region_space)); + if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && + (region_space < ACPI_USER_REGION_BEGIN)) { + ACPI_REPORT_ERROR (("Invalid address_space type %X\n", region_space)); return_ACPI_STATUS (AE_AML_INVALID_SPACE_ID); } @@ -261,21 +320,13 @@ goto cleanup; } - /* Allocate a method object for this region */ - - obj_desc->region.extra = acpi_ut_create_internal_object ( - INTERNAL_TYPE_EXTRA); - if (!obj_desc->region.extra) { - status = AE_NO_MEMORY; - goto cleanup; - } - /* * Remember location in AML stream of address & length * operands since they need to be evaluated at run time. */ - obj_desc->region.extra->extra.aml_start = aml_start; - obj_desc->region.extra->extra.aml_length = aml_length; + region_obj2 = obj_desc->common.next_object; + region_obj2->extra.aml_start = aml_start; + region_obj2->extra.aml_length = aml_length; /* Init the region from the operands */ @@ -286,102 +337,137 @@ /* Install the new region object in the parent Node */ - status = acpi_ns_attach_object (node, obj_desc, - (u8) ACPI_TYPE_REGION); - if (ACPI_FAILURE (status)) { - goto cleanup; - } + status = acpi_ns_attach_object (node, obj_desc, ACPI_TYPE_REGION); - /* - * If we have a valid region, initialize it - * Namespace is NOT locked at this point. - */ - status = acpi_ev_initialize_region (obj_desc, FALSE); - if (ACPI_FAILURE (status)) { - /* - * If AE_NOT_EXIST is returned, it is not fatal - * because many regions get created before a handler - * is installed for said region. - */ - if (AE_NOT_EXIST == status) { - status = AE_OK; - } - } cleanup: /* Remove local reference to the object */ acpi_ut_remove_reference (obj_desc); - return_ACPI_STATUS (status); } /***************************************************************************** * - * FUNCTION: Acpi_ex_create_table_region + * FUNCTION: acpi_ex_create_table_region * - * PARAMETERS: Walk_state - Current state + * PARAMETERS: walk_state - Current state * * RETURN: Status * - * DESCRIPTION: Create a new Data_table_region object + * DESCRIPTION: Create a new data_table_region object * ****************************************************************************/ acpi_status acpi_ex_create_table_region ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; + acpi_status status; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + struct acpi_table_header *table; + union acpi_operand_object *region_obj2; - FUNCTION_TRACE ("Ex_create_table_region"); + ACPI_FUNCTION_TRACE ("ex_create_table_region"); -/* - acpi_operand_object *Obj_desc; - Obj_desc = Acpi_ut_create_internal_object (ACPI_TYPE_REGION); - if (!Obj_desc) - { - Status = AE_NO_MEMORY; - goto Cleanup; + /* Get the Node from the object stack */ + + node = walk_state->op->common.node; + + /* + * If the region object is already attached to this node, + * just return + */ + if (acpi_ns_get_attached_object (node)) { + return_ACPI_STATUS (AE_OK); } + /* Find the ACPI table */ -Cleanup: -*/ + status = acpi_tb_find_table (operand[1]->string.pointer, + operand[2]->string.pointer, + operand[3]->string.pointer, &table); + + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Create the region descriptor */ + + obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_REGION); + if (!obj_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + region_obj2 = obj_desc->common.next_object; + region_obj2->extra.region_context = NULL; + + /* Init the region from the operands */ + + obj_desc->region.space_id = REGION_DATA_TABLE; + obj_desc->region.address = (acpi_physical_address) ACPI_TO_INTEGER (table); + obj_desc->region.length = table->length; + obj_desc->region.node = node; + obj_desc->region.flags = AOPOBJ_DATA_VALID; + /* Install the new region object in the parent Node */ + + status = acpi_ns_attach_object (node, obj_desc, ACPI_TYPE_REGION); + if (ACPI_FAILURE (status)) { + goto cleanup; + } + + status = acpi_ev_initialize_region (obj_desc, FALSE); + if (ACPI_FAILURE (status)) { + if (status == AE_NOT_EXIST) { + status = AE_OK; + } + else { + goto cleanup; + } + } + + obj_desc->region.flags |= AOPOBJ_SETUP_COMPLETE; + + +cleanup: + + /* Remove local reference to the object */ + + acpi_ut_remove_reference (obj_desc); return_ACPI_STATUS (status); } /***************************************************************************** * - * FUNCTION: Acpi_ex_create_processor + * FUNCTION: acpi_ex_create_processor * - * PARAMETERS: Op - Op containing the Processor definition and - * args - * Processor_node - Parent Node for the processor object + * PARAMETERS: walk_state - Current state * * RETURN: Status * * DESCRIPTION: Create a new processor object and populate the fields * - * Processor (Name[0], Cpu_iD[1], Pblock_addr[2], Pblock_length[3]) + * Processor (Name[0], cpu_iD[1], pblock_addr[2], pblock_length[3]) * ****************************************************************************/ acpi_status acpi_ex_create_processor ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_operand_object *obj_desc; - acpi_status status; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *obj_desc; + acpi_status status; - FUNCTION_TRACE_PTR ("Ex_create_processor", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ex_create_processor", walk_state); /* Create the processor object */ @@ -395,13 +481,13 @@ * Initialize the processor object from the operands */ obj_desc->processor.proc_id = (u8) operand[1]->integer.value; - obj_desc->processor.address = (ACPI_IO_ADDRESS) operand[2]->integer.value; + obj_desc->processor.address = (acpi_io_address) operand[2]->integer.value; obj_desc->processor.length = (u8) operand[3]->integer.value; /* Install the processor object in the parent Node */ - status = acpi_ns_attach_object ((acpi_namespace_node *) operand[0], - obj_desc, (u8) ACPI_TYPE_PROCESSOR); + status = acpi_ns_attach_object ((struct acpi_namespace_node *) operand[0], + obj_desc, ACPI_TYPE_PROCESSOR); /* Remove local reference to the object */ @@ -413,30 +499,28 @@ /***************************************************************************** * - * FUNCTION: Acpi_ex_create_power_resource + * FUNCTION: acpi_ex_create_power_resource * - * PARAMETERS: Op - Op containing the Power_resource definition - * and args - * Power_node - Parent Node for the power object + * PARAMETERS: walk_state - Current state * * RETURN: Status * - * DESCRIPTION: Create a new Power_resource object and populate the fields + * DESCRIPTION: Create a new power_resource object and populate the fields * - * Power_resource (Name[0], System_level[1], Resource_order[2]) + * power_resource (Name[0], system_level[1], resource_order[2]) * ****************************************************************************/ acpi_status acpi_ex_create_power_resource ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_status status; - acpi_operand_object *obj_desc; + union acpi_operand_object **operand = &walk_state->operands[0]; + acpi_status status; + union acpi_operand_object *obj_desc; - FUNCTION_TRACE_PTR ("Ex_create_power_resource", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ex_create_power_resource", walk_state); /* Create the power resource object */ @@ -453,8 +537,8 @@ /* Install the power resource object in the parent Node */ - status = acpi_ns_attach_object ((acpi_namespace_node *) operand[0], - obj_desc, (u8) ACPI_TYPE_POWER); + status = acpi_ns_attach_object ((struct acpi_namespace_node *) operand[0], + obj_desc, ACPI_TYPE_POWER); /* Remove local reference to the object */ @@ -463,15 +547,15 @@ return_ACPI_STATUS (status); } +#endif /***************************************************************************** * - * FUNCTION: Acpi_ex_create_method + * FUNCTION: acpi_ex_create_method * - * PARAMETERS: Aml_start - First byte of the method's AML - * Aml_length - AML byte count for this method - * Method_flags - AML method flag byte - * Method - Method Node + * PARAMETERS: aml_start - First byte of the method's AML + * aml_length - AML byte count for this method + * walk_state - Current state * * RETURN: Status * @@ -481,17 +565,17 @@ acpi_status acpi_ex_create_method ( - u8 *aml_start, - u32 aml_length, - acpi_walk_state *walk_state) + u8 *aml_start, + u32 aml_length, + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_operand_object *obj_desc; - acpi_status status; - u8 method_flags; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *obj_desc; + acpi_status status; + u8 method_flags; - FUNCTION_TRACE_PTR ("Ex_create_method", walk_state); + ACPI_FUNCTION_TRACE_PTR ("ex_create_method", walk_state); /* Create a new method object */ @@ -520,7 +604,7 @@ if (method_flags & METHOD_FLAGS_SERIALIZED) { /* * ACPI 1.0: Concurrency = 1 - * ACPI 2.0: Concurrency = (Sync_level (in method declaration) + 1) + * ACPI 2.0: Concurrency = (sync_level (in method declaration) + 1) */ obj_desc->method.concurrency = (u8) (((method_flags & METHOD_FLAGS_SYNCH_LEVEL) >> 4) + 1); @@ -532,8 +616,8 @@ /* Attach the new object to the method Node */ - status = acpi_ns_attach_object ((acpi_namespace_node *) operand[0], - obj_desc, (u8) ACPI_TYPE_METHOD); + status = acpi_ns_attach_object ((struct acpi_namespace_node *) operand[0], + obj_desc, ACPI_TYPE_METHOD); /* Remove local reference to the object */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exdump.c linux.21rc5-ac2/drivers/acpi/executer/exdump.c --- linux.21rc5/drivers/acpi/executer/exdump.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exdump.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,271 +1,198 @@ /****************************************************************************** * * Module Name: exdump - Interpreter debug output routines - * $Revision: 126 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "actables.h" -#include "acparser.h" +#include +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exdump") + ACPI_MODULE_NAME ("exdump") /* * The following routines are used for debug output only */ -#if defined(ACPI_DEBUG) || defined(ENABLE_DEBUGGER) +#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) /***************************************************************************** * - * FUNCTION: Acpi_ex_show_hex_value + * FUNCTION: acpi_ex_dump_operand * - * PARAMETERS: Byte_count - Number of bytes to print (1, 2, or 4) - * *Aml_start - Address in AML stream of bytes to print - * Interpreter_mode - Current running mode (load1/Load2/Exec) - * Lead_space - # of spaces to print ahead of value - * 0 => none ahead but one behind - * - * DESCRIPTION: Print Byte_count byte(s) starting at Aml_start as a single - * value, in hex. If Byte_count > 1 or the value printed is > 9, also - * print in decimal. - * - ****************************************************************************/ - -void -acpi_ex_show_hex_value ( - u32 byte_count, - u8 *aml_start, - u32 lead_space) -{ - u32 value; /* Value retrieved from AML stream */ - u32 show_decimal_value; - u32 length; /* Length of printed field */ - u8 *current_aml_ptr = NULL; /* Pointer to current byte of AML value */ - - - FUNCTION_TRACE ("Ex_show_hex_value"); - - - if (!aml_start) { - REPORT_ERROR (("Ex_show_hex_value: null pointer\n")); - } - - /* - * AML numbers are always stored little-endian, - * even if the processor is big-endian. - */ - for (current_aml_ptr = aml_start + byte_count, - value = 0; - current_aml_ptr > aml_start; ) { - value = (value << 8) + (u32)* --current_aml_ptr; - } - - length = lead_space * byte_count + 2; - if (byte_count > 1) { - length += (byte_count - 1); - } - - show_decimal_value = (byte_count > 1 || value > 9); - if (show_decimal_value) { - length += 3 + acpi_ex_digits_needed (value, 10); - } - - for (length = lead_space; length; --length ) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_LOAD, " ")); - } - - while (byte_count--) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_LOAD, "%02x", *aml_start++)); - - if (byte_count) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_LOAD, " ")); - } - } - - if (show_decimal_value) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_LOAD, " [%d]", value)); - } - - if (0 == lead_space) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_LOAD, " ")); - } - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_LOAD, "\n")); - return_VOID; -} - - -/***************************************************************************** - * - * FUNCTION: Acpi_ex_dump_operand - * - * PARAMETERS: *Entry_desc - Pointer to entry to be dumped + * PARAMETERS: *obj_desc - Pointer to entry to be dumped * * RETURN: Status * - * DESCRIPTION: Dump a stack entry + * DESCRIPTION: Dump an operand object * ****************************************************************************/ -acpi_status +void acpi_ex_dump_operand ( - acpi_operand_object *entry_desc) + union acpi_operand_object *obj_desc) { - u8 *buf = NULL; - u32 length; - u32 i; + u8 *buf = NULL; + u32 length; + union acpi_operand_object **element; + u16 element_index; - PROC_NAME ("Ex_dump_operand") + ACPI_FUNCTION_NAME ("ex_dump_operand") - if (!entry_desc) { + if (!((ACPI_LV_EXEC & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) { + return; + } + + if (!obj_desc) { /* * This usually indicates that something serious is wrong -- * since most (if not all) * code that dumps the stack expects something to be there! */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Null stack entry ptr\n")); - return (AE_OK); + acpi_os_printf ("Null stack entry ptr\n"); + return; } - if (VALID_DESCRIPTOR_TYPE (entry_desc, ACPI_DESC_TYPE_NAMED)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%p NS Node: ", entry_desc)); - DUMP_ENTRY (entry_desc, ACPI_LV_INFO); - return (AE_OK); + if (ACPI_GET_DESCRIPTOR_TYPE (obj_desc) == ACPI_DESC_TYPE_NAMED) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%p NS Node: ", obj_desc)); + ACPI_DUMP_ENTRY (obj_desc, ACPI_LV_EXEC); + return; } - if (!VALID_DESCRIPTOR_TYPE (entry_desc, ACPI_DESC_TYPE_INTERNAL)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%p Is not a local object \n", entry_desc)); - DUMP_BUFFER (entry_desc, sizeof (acpi_operand_object)); - return (AE_OK); + if (ACPI_GET_DESCRIPTOR_TYPE (obj_desc) != ACPI_DESC_TYPE_OPERAND) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%p is not a local object\n", obj_desc)); + ACPI_DUMP_BUFFER (obj_desc, sizeof (union acpi_operand_object)); + return; } - /* Entry_desc is a valid object */ - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%p ", entry_desc)); - - switch (entry_desc->common.type) { - case INTERNAL_TYPE_REFERENCE: - - switch (entry_desc->reference.opcode) { - case AML_ZERO_OP: - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Reference: Zero\n")); - break; - + /* obj_desc is a valid object */ - case AML_ONE_OP: - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Reference: One\n")); - break; - - - case AML_ONES_OP: - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Reference: Ones\n")); - break; - - - case AML_REVISION_OP: - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Reference: Revision\n")); - break; + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%p ", obj_desc)); + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_LOCAL_REFERENCE: + switch (obj_desc->reference.opcode) { case AML_DEBUG_OP: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Reference: Debug\n")); + acpi_os_printf ("Reference: Debug\n"); break; case AML_NAME_OP: - DUMP_PATHNAME (entry_desc->reference.object, "Reference: Name: ", + ACPI_DUMP_PATHNAME (obj_desc->reference.object, "Reference: Name: ", ACPI_LV_INFO, _COMPONENT); - DUMP_ENTRY (entry_desc->reference.object, ACPI_LV_INFO); + ACPI_DUMP_ENTRY (obj_desc->reference.object, ACPI_LV_INFO); break; case AML_INDEX_OP: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Reference: Index %p\n", - entry_desc->reference.object)); + acpi_os_printf ("Reference: Index %p\n", + obj_desc->reference.object); + break; + + + case AML_REF_OF_OP: + + acpi_os_printf ("Reference: (ref_of) %p\n", + obj_desc->reference.object); break; case AML_ARG_OP: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Reference: Arg%d", - entry_desc->reference.offset)); + acpi_os_printf ("Reference: Arg%d", + obj_desc->reference.offset); - if (ACPI_TYPE_INTEGER == entry_desc->common.type) { + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_INTEGER) { /* Value is a Number */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, " value is [%8.8X%8.8x]", - HIDWORD(entry_desc->integer.value), - LODWORD(entry_desc->integer.value))); + acpi_os_printf (" value is [%8.8X%8.8x]", + ACPI_HIDWORD(obj_desc->integer.value), + ACPI_LODWORD(obj_desc->integer.value)); } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "\n")); + acpi_os_printf ("\n"); break; case AML_LOCAL_OP: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Reference: Local%d", - entry_desc->reference.offset)); + acpi_os_printf ("Reference: Local%d", + obj_desc->reference.offset); - if (ACPI_TYPE_INTEGER == entry_desc->common.type) { + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_INTEGER) { /* Value is a Number */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, " value is [%8.8X%8.8x]", - HIDWORD(entry_desc->integer.value), - LODWORD(entry_desc->integer.value))); + acpi_os_printf (" value is [%8.8X%8.8x]", + ACPI_HIDWORD(obj_desc->integer.value), + ACPI_LODWORD(obj_desc->integer.value)); } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "\n")); + acpi_os_printf ("\n"); break; case AML_INT_NAMEPATH_OP: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Reference.Node->Name %X\n", - entry_desc->reference.node->name)); + + acpi_os_printf ("Reference.Node->Name %X\n", + obj_desc->reference.node->name.integer); break; + default: /* unknown opcode */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Unknown opcode=%X\n", - entry_desc->reference.opcode)); + acpi_os_printf ("Unknown Reference opcode=%X\n", + obj_desc->reference.opcode); break; } @@ -275,11 +202,11 @@ case ACPI_TYPE_BUFFER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Buffer len %X @ %p \n", - entry_desc->buffer.length, - entry_desc->buffer.pointer)); + acpi_os_printf ("Buffer len %X @ %p \n", + obj_desc->buffer.length, + obj_desc->buffer.pointer); - length = entry_desc->buffer.length; + length = obj_desc->buffer.length; if (length > 64) { length = 64; @@ -287,13 +214,13 @@ /* Debug only -- dump the buffer contents */ - if (entry_desc->buffer.pointer) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Buffer Contents: ")); + if (obj_desc->buffer.pointer) { + acpi_os_printf ("Buffer Contents: "); - for (buf = entry_desc->buffer.pointer; length--; ++buf) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, " %02x", *buf)); + for (buf = obj_desc->buffer.pointer; length--; ++buf) { + acpi_os_printf (" %02x", *buf); } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO,"\n")); + acpi_os_printf ("\n"); } break; @@ -301,135 +228,104 @@ case ACPI_TYPE_INTEGER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Integer %8.8X%8.8X\n", - HIDWORD (entry_desc->integer.value), - LODWORD (entry_desc->integer.value))); - break; - - - case INTERNAL_TYPE_IF: - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "If [Integer] %8.8X%8.8X\n", - HIDWORD (entry_desc->integer.value), - LODWORD (entry_desc->integer.value))); - break; - - - case INTERNAL_TYPE_WHILE: - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "While [Integer] %8.8X%8.8X\n", - HIDWORD (entry_desc->integer.value), - LODWORD (entry_desc->integer.value))); + acpi_os_printf ("Integer %8.8X%8.8X\n", + ACPI_HIDWORD (obj_desc->integer.value), + ACPI_LODWORD (obj_desc->integer.value)); break; case ACPI_TYPE_PACKAGE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Package count %X @ %p\n", - entry_desc->package.count, entry_desc->package.elements)); + acpi_os_printf ("Package count %X @ %p\n", + obj_desc->package.count, obj_desc->package.elements); /* * If elements exist, package vector pointer is valid, * and debug_level exceeds 1, dump package's elements. */ - if (entry_desc->package.count && - entry_desc->package.elements && + if (obj_desc->package.count && + obj_desc->package.elements && acpi_dbg_level > 1) { - acpi_operand_object**element; - u16 element_index; - - for (element_index = 0, element = entry_desc->package.elements; - element_index < entry_desc->package.count; + for (element_index = 0, element = obj_desc->package.elements; + element_index < obj_desc->package.count; ++element_index, ++element) { acpi_ex_dump_operand (*element); } } - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "\n")); - + acpi_os_printf ("\n"); break; case ACPI_TYPE_REGION: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Region %s (%X)", - acpi_ut_get_region_name (entry_desc->region.space_id), - entry_desc->region.space_id)); + acpi_os_printf ("Region %s (%X)", + acpi_ut_get_region_name (obj_desc->region.space_id), + obj_desc->region.space_id); /* * If the address and length have not been evaluated, * don't print them. */ - if (!(entry_desc->region.flags & AOPOBJ_DATA_VALID)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "\n")); + if (!(obj_desc->region.flags & AOPOBJ_DATA_VALID)) { + acpi_os_printf ("\n"); } else { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, " base %8.8X%8.8X Length %X\n", - HIDWORD(entry_desc->region.address), - LODWORD(entry_desc->region.address), - entry_desc->region.length)); + acpi_os_printf (" base %8.8X%8.8X Length %X\n", + ACPI_HIDWORD (obj_desc->region.address), + ACPI_LODWORD (obj_desc->region.address), + obj_desc->region.length); } break; case ACPI_TYPE_STRING: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "String length %X @ %p \"", - entry_desc->string.length, entry_desc->string.pointer)); - - for (i = 0; i < entry_desc->string.length; i++) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "%c", - entry_desc->string.pointer[i])); - } - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "\"\n")); + acpi_os_printf ("String length %X @ %p ", + obj_desc->string.length, obj_desc->string.pointer); + acpi_ut_print_string (obj_desc->string.pointer, ACPI_UINT8_MAX); + acpi_os_printf ("\n"); break; - case INTERNAL_TYPE_BANK_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Bank_field\n")); + acpi_os_printf ("bank_field\n"); break; - case INTERNAL_TYPE_REGION_FIELD: + case ACPI_TYPE_LOCAL_REGION_FIELD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, - "Region_field: bits=%X bitaccwidth=%X lock=%X update=%X at byte=%X bit=%X of below:\n", - entry_desc->field.bit_length, entry_desc->field.access_bit_width, - entry_desc->field.lock_rule, entry_desc->field.update_rule, - entry_desc->field.base_byte_offset, entry_desc->field.start_field_bit_offset)); - DUMP_STACK_ENTRY (entry_desc->field.region_obj); + acpi_os_printf ( + "region_field: Bits=%X acc_width=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n", + obj_desc->field.bit_length, obj_desc->field.access_byte_width, + obj_desc->field.field_flags & AML_FIELD_LOCK_RULE_MASK, + obj_desc->field.field_flags & AML_FIELD_UPDATE_RULE_MASK, + obj_desc->field.base_byte_offset, obj_desc->field.start_field_bit_offset); + ACPI_DUMP_STACK_ENTRY (obj_desc->field.region_obj); break; - case INTERNAL_TYPE_INDEX_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Index_field\n")); + acpi_os_printf ("index_field\n"); break; case ACPI_TYPE_BUFFER_FIELD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, - "Buffer_field: %X bits at byte %X bit %X of \n", - entry_desc->buffer_field.bit_length, entry_desc->buffer_field.base_byte_offset, - entry_desc->buffer_field.start_field_bit_offset)); + acpi_os_printf ( + "buffer_field: %X bits at byte %X bit %X of \n", + obj_desc->buffer_field.bit_length, obj_desc->buffer_field.base_byte_offset, + obj_desc->buffer_field.start_field_bit_offset); - if (!entry_desc->buffer_field.buffer_obj) - { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "*NULL* \n")); + if (!obj_desc->buffer_field.buffer_obj) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "*NULL* \n")); } - - else if (ACPI_TYPE_BUFFER != - entry_desc->buffer_field.buffer_obj->common.type) - { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "*not a Buffer* \n")); + else if (ACPI_GET_OBJECT_TYPE (obj_desc->buffer_field.buffer_obj) != ACPI_TYPE_BUFFER) { + acpi_os_printf ("*not a Buffer* \n"); } - - else - { - DUMP_STACK_ENTRY (entry_desc->buffer_field.buffer_obj); + else { + ACPI_DUMP_STACK_ENTRY (obj_desc->buffer_field.buffer_obj); } break; @@ -437,81 +333,67 @@ case ACPI_TYPE_EVENT: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Event\n")); + acpi_os_printf ("Event\n"); break; case ACPI_TYPE_METHOD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, + acpi_os_printf ( "Method(%X) @ %p:%X\n", - entry_desc->method.param_count, - entry_desc->method.aml_start, entry_desc->method.aml_length)); + obj_desc->method.param_count, + obj_desc->method.aml_start, obj_desc->method.aml_length); break; case ACPI_TYPE_MUTEX: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Mutex\n")); + acpi_os_printf ("Mutex\n"); break; case ACPI_TYPE_DEVICE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Device\n")); + acpi_os_printf ("Device\n"); break; case ACPI_TYPE_POWER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Power\n")); + acpi_os_printf ("Power\n"); break; case ACPI_TYPE_PROCESSOR: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Processor\n")); + acpi_os_printf ("Processor\n"); break; case ACPI_TYPE_THERMAL: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Thermal\n")); + acpi_os_printf ("Thermal\n"); break; default: - /* unknown Entry_desc->Common.Type value */ - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "Unknown Type %X\n", - entry_desc->common.type)); - - /* Back up to previous entry */ - - entry_desc--; - + /* Unknown Type */ - /* TBD: [Restructure] Change to use dump object routine !! */ - /* What is all of this?? */ - - DUMP_BUFFER (entry_desc, sizeof (acpi_operand_object)); - DUMP_BUFFER (++entry_desc, sizeof (acpi_operand_object)); - DUMP_BUFFER (++entry_desc, sizeof (acpi_operand_object)); + acpi_os_printf ("Unknown Type %X\n", ACPI_GET_OBJECT_TYPE (obj_desc)); break; - } - return (AE_OK); + return; } /***************************************************************************** * - * FUNCTION: Acpi_ex_dump_operands + * FUNCTION: acpi_ex_dump_operands * - * PARAMETERS: Interpreter_mode - Load or Exec + * PARAMETERS: interpreter_mode - Load or Exec * *Ident - Identification - * Num_levels - # of stack entries to dump above line + * num_levels - # of stack entries to dump above line * *Note - Output notation * * DESCRIPTION: Dump the object stack @@ -520,54 +402,45 @@ void acpi_ex_dump_operands ( - acpi_operand_object **operands, - operating_mode interpreter_mode, - NATIVE_CHAR *ident, - u32 num_levels, - NATIVE_CHAR *note, - NATIVE_CHAR *module_name, - u32 line_number) + union acpi_operand_object **operands, + acpi_interpreter_mode interpreter_mode, + char *ident, + u32 num_levels, + char *note, + char *module_name, + u32 line_number) { - NATIVE_UINT i; - acpi_operand_object **entry_desc; + acpi_native_uint i; + union acpi_operand_object **obj_desc; - PROC_NAME ("Ex_dump_operands"); + ACPI_FUNCTION_NAME ("ex_dump_operands"); - if (!ident) - { + if (!ident) { ident = "?"; } - if (!note) - { + if (!note) { note = "?"; } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "************* Operand Stack Contents (Opcode [%s], %d Operands)\n", ident, num_levels)); - if (num_levels == 0) - { + if (num_levels == 0) { num_levels = 1; } - /* Dump the stack starting at the top, working down */ - - for (i = 0; num_levels > 0; i--, num_levels--) - { - entry_desc = &operands[i]; + /* Dump the operand stack starting at the top */ - if (ACPI_FAILURE (acpi_ex_dump_operand (*entry_desc))) - { - break; - } + for (i = 0; num_levels > 0; i--, num_levels--) { + obj_desc = &operands[i]; + acpi_ex_dump_operand (*obj_desc); } - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "************* Stack dump from %s(%d), %s\n", module_name, line_number, note)); return; @@ -576,7 +449,59 @@ /***************************************************************************** * - * FUNCTION: Acpi_ex_dump_node + * FUNCTION: acpi_ex_out* + * + * PARAMETERS: Title - Descriptive text + * Value - Value to be displayed + * + * DESCRIPTION: Object dump output formatting functions. These functions + * reduce the number of format strings required and keeps them + * all in one place for easy modification. + * + ****************************************************************************/ + +void +acpi_ex_out_string ( + char *title, + char *value) +{ + acpi_os_printf ("%20s : %s\n", title, value); +} + +void +acpi_ex_out_pointer ( + char *title, + void *value) +{ + acpi_os_printf ("%20s : %p\n", title, value); +} + +void +acpi_ex_out_integer ( + char *title, + u32 value) +{ + acpi_os_printf ("%20s : %X\n", title, value); +} + +void +acpi_ex_out_address ( + char *title, + acpi_physical_address value) +{ + +#if ACPI_MACHINE_WIDTH == 16 + acpi_os_printf ("%20s : %p\n", title, value); +#else + acpi_os_printf ("%20s : %8.8X%8.8X\n", title, + ACPI_HIDWORD (value), ACPI_LODWORD (value)); +#endif +} + + +/***************************************************************************** + * + * FUNCTION: acpi_ex_dump_node * * PARAMETERS: *Node - Descriptor to dump * Flags - Force display @@ -587,37 +512,34 @@ void acpi_ex_dump_node ( - acpi_namespace_node *node, - u32 flags) + struct acpi_namespace_node *node, + u32 flags) { - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); - if (!flags) - { - if (!((ACPI_LV_OBJECTS & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) - { + if (!flags) { + if (!((ACPI_LV_OBJECTS & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) { return; } } - - acpi_os_printf ("%20s : %4.4s\n", "Name", (char*)&node->name); - acpi_os_printf ("%20s : %s\n", "Type", acpi_ut_get_type_name (node->type)); - acpi_os_printf ("%20s : %X\n", "Flags", node->flags); - acpi_os_printf ("%20s : %X\n", "Owner Id", node->owner_id); - acpi_os_printf ("%20s : %X\n", "Reference Count", node->reference_count); - acpi_os_printf ("%20s : %p\n", "Attached Object", node->object); - acpi_os_printf ("%20s : %p\n", "Child_list", node->child); - acpi_os_printf ("%20s : %p\n", "Next_peer", node->peer); - acpi_os_printf ("%20s : %p\n", "Parent", acpi_ns_get_parent_object (node)); + acpi_os_printf ("%20s : %4.4s\n", "Name", node->name.ascii); + acpi_ex_out_string ("Type", acpi_ut_get_type_name (node->type)); + acpi_ex_out_integer ("Flags", node->flags); + acpi_ex_out_integer ("Owner Id", node->owner_id); + acpi_ex_out_integer ("Reference Count", node->reference_count); + acpi_ex_out_pointer ("Attached Object", acpi_ns_get_attached_object (node)); + acpi_ex_out_pointer ("child_list", node->child); + acpi_ex_out_pointer ("next_peer", node->peer); + acpi_ex_out_pointer ("Parent", acpi_ns_get_parent_node (node)); } /***************************************************************************** * - * FUNCTION: Acpi_ex_dump_object_descriptor + * FUNCTION: acpi_ex_dump_object_descriptor * * PARAMETERS: *Object - Descriptor to dump * Flags - Force display @@ -628,253 +550,240 @@ void acpi_ex_dump_object_descriptor ( - acpi_operand_object *obj_desc, - u32 flags) + union acpi_operand_object *obj_desc, + u32 flags) { - const acpi_opcode_info *op_info; + u32 i; - FUNCTION_TRACE ("Ex_dump_object_descriptor"); + ACPI_FUNCTION_TRACE ("ex_dump_object_descriptor"); - if (!flags) - { - if (!((ACPI_LV_OBJECTS & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) - { - return; + if (!flags) { + if (!((ACPI_LV_OBJECTS & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) { + return_VOID; } } - if (!(VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_INTERNAL))) - { - acpi_os_printf ("%p is not a valid ACPI object\n", obj_desc); + if (ACPI_GET_DESCRIPTOR_TYPE (obj_desc) == ACPI_DESC_TYPE_NAMED) { + acpi_ex_dump_node ((struct acpi_namespace_node *) obj_desc, flags); + acpi_os_printf ("\nAttached Object (%p):\n", ((struct acpi_namespace_node *) obj_desc)->object); + acpi_ex_dump_object_descriptor (((struct acpi_namespace_node *) obj_desc)->object, flags); return; } + if (ACPI_GET_DESCRIPTOR_TYPE (obj_desc) != ACPI_DESC_TYPE_OPERAND) { + acpi_os_printf ("ex_dump_object_descriptor: %p is not a valid ACPI object\n", obj_desc); + return_VOID; + } + /* Common Fields */ - acpi_os_printf ("%20s : %X\n", "Reference Count", obj_desc->common.reference_count); - acpi_os_printf ("%20s : %X\n", "Flags", obj_desc->common.flags); + acpi_ex_out_string ("Type", acpi_ut_get_object_type_name (obj_desc)); + acpi_ex_out_integer ("Reference Count", obj_desc->common.reference_count); + acpi_ex_out_integer ("Flags", obj_desc->common.flags); /* Object-specific Fields */ - switch (obj_desc->common.type) - { + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { case ACPI_TYPE_INTEGER: - acpi_os_printf ("%20s : %s\n", "Type", "Integer"); - acpi_os_printf ("%20s : %X%8.8X\n", "Value", HIDWORD (obj_desc->integer.value), - LODWORD (obj_desc->integer.value)); + acpi_os_printf ("%20s : %8.8X%8.8X\n", "Value", + ACPI_HIDWORD (obj_desc->integer.value), + ACPI_LODWORD (obj_desc->integer.value)); break; case ACPI_TYPE_STRING: - acpi_os_printf ("%20s : %s\n", "Type", "String"); - acpi_os_printf ("%20s : %X\n", "Length", obj_desc->string.length); - acpi_os_printf ("%20s : %p\n", "Pointer", obj_desc->string.pointer); + acpi_ex_out_integer ("Length", obj_desc->string.length); + + acpi_os_printf ("%20s : %p ", "Pointer", obj_desc->string.pointer); + acpi_ut_print_string (obj_desc->string.pointer, ACPI_UINT8_MAX); + acpi_os_printf ("\n"); break; case ACPI_TYPE_BUFFER: - acpi_os_printf ("%20s : %s\n", "Type", "Buffer"); - acpi_os_printf ("%20s : %X\n", "Length", obj_desc->buffer.length); - acpi_os_printf ("%20s : %p\n", "Pointer", obj_desc->buffer.pointer); + acpi_ex_out_integer ("Length", obj_desc->buffer.length); + acpi_ex_out_pointer ("Pointer", obj_desc->buffer.pointer); + ACPI_DUMP_BUFFER (obj_desc->buffer.pointer, obj_desc->buffer.length); break; case ACPI_TYPE_PACKAGE: - acpi_os_printf ("%20s : %s\n", "Type", "Package"); - acpi_os_printf ("%20s : %X\n", "Flags", obj_desc->package.flags); - acpi_os_printf ("%20s : %X\n", "Count", obj_desc->package.count); - acpi_os_printf ("%20s : %p\n", "Elements", obj_desc->package.elements); - acpi_os_printf ("%20s : %p\n", "Next_element", obj_desc->package.next_element); - break; - - - case ACPI_TYPE_BUFFER_FIELD: - - acpi_os_printf ("%20s : %s\n", "Type", "Buffer_field"); - acpi_os_printf ("%20s : %X\n", "Bit_length", obj_desc->buffer_field.bit_length); - acpi_os_printf ("%20s : %X\n", "Bit_offset", obj_desc->buffer_field.start_field_bit_offset); - acpi_os_printf ("%20s : %X\n", "Base_byte_offset",obj_desc->buffer_field.base_byte_offset); - acpi_os_printf ("%20s : %p\n", "Buffer_obj", obj_desc->buffer_field.buffer_obj); + acpi_ex_out_integer ("Flags", obj_desc->package.flags); + acpi_ex_out_integer ("Count", obj_desc->package.count); + acpi_ex_out_pointer ("Elements", obj_desc->package.elements); + + /* Dump the package contents */ + + if (obj_desc->package.count > 0) { + acpi_os_printf ("\nPackage Contents:\n"); + for (i = 0; i < obj_desc->package.count; i++) { + acpi_os_printf ("[%.3d] %p", i, obj_desc->package.elements[i]); + if (obj_desc->package.elements[i]) { + acpi_os_printf (" %s", acpi_ut_get_object_type_name (obj_desc->package.elements[i])); + } + acpi_os_printf ("\n"); + } + } break; case ACPI_TYPE_DEVICE: - acpi_os_printf ("%20s : %s\n", "Type", "Device"); - acpi_os_printf ("%20s : %p\n", "Addr_handler", obj_desc->device.addr_handler); - acpi_os_printf ("%20s : %p\n", "Sys_handler", obj_desc->device.sys_handler); - acpi_os_printf ("%20s : %p\n", "Drv_handler", obj_desc->device.drv_handler); + acpi_ex_out_pointer ("address_space", obj_desc->device.address_space); + acpi_ex_out_pointer ("system_notify", obj_desc->device.system_notify); + acpi_ex_out_pointer ("device_notify", obj_desc->device.device_notify); break; + case ACPI_TYPE_EVENT: - acpi_os_printf ("%20s : %s\n", "Type", "Event"); - acpi_os_printf ("%20s : %X\n", "Semaphore", obj_desc->event.semaphore); + acpi_ex_out_pointer ("Semaphore", obj_desc->event.semaphore); break; case ACPI_TYPE_METHOD: - acpi_os_printf ("%20s : %s\n", "Type", "Method"); - acpi_os_printf ("%20s : %X\n", "Param_count", obj_desc->method.param_count); - acpi_os_printf ("%20s : %X\n", "Concurrency", obj_desc->method.concurrency); - acpi_os_printf ("%20s : %p\n", "Semaphore", obj_desc->method.semaphore); - acpi_os_printf ("%20s : %X\n", "Aml_length", obj_desc->method.aml_length); - acpi_os_printf ("%20s : %X\n", "Aml_start", obj_desc->method.aml_start); + acpi_ex_out_integer ("param_count", obj_desc->method.param_count); + acpi_ex_out_integer ("Concurrency", obj_desc->method.concurrency); + acpi_ex_out_pointer ("Semaphore", obj_desc->method.semaphore); + acpi_ex_out_integer ("owning_id", obj_desc->method.owning_id); + acpi_ex_out_integer ("aml_length", obj_desc->method.aml_length); + acpi_ex_out_pointer ("aml_start", obj_desc->method.aml_start); break; case ACPI_TYPE_MUTEX: - acpi_os_printf ("%20s : %s\n", "Type", "Mutex"); - acpi_os_printf ("%20s : %X\n", "Sync_level", obj_desc->mutex.sync_level); - acpi_os_printf ("%20s : %p\n", "Owner", obj_desc->mutex.owner); - acpi_os_printf ("%20s : %X\n", "Acquisition_depth", obj_desc->mutex.acquisition_depth); - acpi_os_printf ("%20s : %p\n", "Semaphore", obj_desc->mutex.semaphore); + acpi_ex_out_integer ("sync_level", obj_desc->mutex.sync_level); + acpi_ex_out_pointer ("owner_thread", obj_desc->mutex.owner_thread); + acpi_ex_out_integer ("acquisition_depth",obj_desc->mutex.acquisition_depth); + acpi_ex_out_pointer ("Semaphore", obj_desc->mutex.semaphore); break; case ACPI_TYPE_REGION: - acpi_os_printf ("%20s : %s\n", "Type", "Region"); - acpi_os_printf ("%20s : %X\n", "Space_id", obj_desc->region.space_id); - acpi_os_printf ("%20s : %X\n", "Flags", obj_desc->region.flags); - acpi_os_printf ("%20s : %X\n", "Address", obj_desc->region.address); - acpi_os_printf ("%20s : %X\n", "Length", obj_desc->region.length); - acpi_os_printf ("%20s : %p\n", "Addr_handler", obj_desc->region.addr_handler); - acpi_os_printf ("%20s : %p\n", "Next", obj_desc->region.next); + acpi_ex_out_integer ("space_id", obj_desc->region.space_id); + acpi_ex_out_integer ("Flags", obj_desc->region.flags); + acpi_ex_out_address ("Address", obj_desc->region.address); + acpi_ex_out_integer ("Length", obj_desc->region.length); + acpi_ex_out_pointer ("address_space", obj_desc->region.address_space); + acpi_ex_out_pointer ("Next", obj_desc->region.next); break; case ACPI_TYPE_POWER: - acpi_os_printf ("%20s : %s\n", "Type", "Power_resource"); - acpi_os_printf ("%20s : %X\n", "System_level", obj_desc->power_resource.system_level); - acpi_os_printf ("%20s : %X\n", "Resource_order", obj_desc->power_resource.resource_order); - acpi_os_printf ("%20s : %p\n", "Sys_handler", obj_desc->power_resource.sys_handler); - acpi_os_printf ("%20s : %p\n", "Drv_handler", obj_desc->power_resource.drv_handler); + acpi_ex_out_integer ("system_level", obj_desc->power_resource.system_level); + acpi_ex_out_integer ("resource_order", obj_desc->power_resource.resource_order); + acpi_ex_out_pointer ("system_notify", obj_desc->power_resource.system_notify); + acpi_ex_out_pointer ("device_notify", obj_desc->power_resource.device_notify); break; case ACPI_TYPE_PROCESSOR: - acpi_os_printf ("%20s : %s\n", "Type", "Processor"); - acpi_os_printf ("%20s : %X\n", "Processor ID", obj_desc->processor.proc_id); - acpi_os_printf ("%20s : %X\n", "Length", obj_desc->processor.length); - acpi_os_printf ("%20s : %X\n", "Address", obj_desc->processor.address); - acpi_os_printf ("%20s : %p\n", "Sys_handler", obj_desc->processor.sys_handler); - acpi_os_printf ("%20s : %p\n", "Drv_handler", obj_desc->processor.drv_handler); - acpi_os_printf ("%20s : %p\n", "Addr_handler", obj_desc->processor.addr_handler); + acpi_ex_out_integer ("Processor ID", obj_desc->processor.proc_id); + acpi_ex_out_integer ("Length", obj_desc->processor.length); + acpi_ex_out_address ("Address", (acpi_physical_address) obj_desc->processor.address); + acpi_ex_out_pointer ("system_notify", obj_desc->processor.system_notify); + acpi_ex_out_pointer ("device_notify", obj_desc->processor.device_notify); + acpi_ex_out_pointer ("address_space", obj_desc->processor.address_space); break; case ACPI_TYPE_THERMAL: - acpi_os_printf ("%20s : %s\n", "Type", "Thermal_zone"); - acpi_os_printf ("%20s : %p\n", "Sys_handler", obj_desc->thermal_zone.sys_handler); - acpi_os_printf ("%20s : %p\n", "Drv_handler", obj_desc->thermal_zone.drv_handler); - acpi_os_printf ("%20s : %p\n", "Addr_handler", obj_desc->thermal_zone.addr_handler); + acpi_ex_out_pointer ("system_notify", obj_desc->thermal_zone.system_notify); + acpi_ex_out_pointer ("device_notify", obj_desc->thermal_zone.device_notify); + acpi_ex_out_pointer ("address_space", obj_desc->thermal_zone.address_space); break; - case INTERNAL_TYPE_REGION_FIELD: - - acpi_os_printf ("%20s : %p\n", "Access_bit_width", obj_desc->field.access_bit_width); - acpi_os_printf ("%20s : %p\n", "Bit_length", obj_desc->field.bit_length); - acpi_os_printf ("%20s : %p\n", "Base_byte_offset",obj_desc->field.base_byte_offset); - acpi_os_printf ("%20s : %p\n", "Bit_offset", obj_desc->field.start_field_bit_offset); - acpi_os_printf ("%20s : %p\n", "Region_obj", obj_desc->field.region_obj); - break; - - - case INTERNAL_TYPE_BANK_FIELD: - - acpi_os_printf ("%20s : %s\n", "Type", "Bank_field"); - acpi_os_printf ("%20s : %X\n", "Access_bit_width", obj_desc->bank_field.access_bit_width); - acpi_os_printf ("%20s : %X\n", "Lock_rule", obj_desc->bank_field.lock_rule); - acpi_os_printf ("%20s : %X\n", "Update_rule", obj_desc->bank_field.update_rule); - acpi_os_printf ("%20s : %X\n", "Bit_length", obj_desc->bank_field.bit_length); - acpi_os_printf ("%20s : %X\n", "Bit_offset", obj_desc->bank_field.start_field_bit_offset); - acpi_os_printf ("%20s : %X\n", "Base_byte_offset", obj_desc->bank_field.base_byte_offset); - acpi_os_printf ("%20s : %X\n", "Value", obj_desc->bank_field.value); - acpi_os_printf ("%20s : %p\n", "Region_obj", obj_desc->bank_field.region_obj); - acpi_os_printf ("%20s : %X\n", "Bank_register_obj", obj_desc->bank_field.bank_register_obj); - break; - - - case INTERNAL_TYPE_INDEX_FIELD: - - acpi_os_printf ("%20s : %s\n", "Type", "Index_field"); - acpi_os_printf ("%20s : %X\n", "Access_bit_width", obj_desc->index_field.access_bit_width); - acpi_os_printf ("%20s : %X\n", "Lock_rule", obj_desc->index_field.lock_rule); - acpi_os_printf ("%20s : %X\n", "Update_rule", obj_desc->index_field.update_rule); - acpi_os_printf ("%20s : %X\n", "Bit_length", obj_desc->index_field.bit_length); - acpi_os_printf ("%20s : %X\n", "Bit_offset", obj_desc->index_field.start_field_bit_offset); - acpi_os_printf ("%20s : %X\n", "Value", obj_desc->index_field.value); - acpi_os_printf ("%20s : %X\n", "Index", obj_desc->index_field.index_obj); - acpi_os_printf ("%20s : %X\n", "Data", obj_desc->index_field.data_obj); - break; - - - case INTERNAL_TYPE_REFERENCE: - - op_info = acpi_ps_get_opcode_info (obj_desc->reference.opcode); + case ACPI_TYPE_BUFFER_FIELD: + case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: + + acpi_ex_out_integer ("field_flags", obj_desc->common_field.field_flags); + acpi_ex_out_integer ("access_byte_width", obj_desc->common_field.access_byte_width); + acpi_ex_out_integer ("bit_length", obj_desc->common_field.bit_length); + acpi_ex_out_integer ("fld_bit_offset", obj_desc->common_field.start_field_bit_offset); + acpi_ex_out_integer ("base_byte_offset", obj_desc->common_field.base_byte_offset); + acpi_ex_out_integer ("datum_valid_bits", obj_desc->common_field.datum_valid_bits); + acpi_ex_out_integer ("end_fld_valid_bits", obj_desc->common_field.end_field_valid_bits); + acpi_ex_out_integer ("end_buf_valid_bits", obj_desc->common_field.end_buffer_valid_bits); + acpi_ex_out_pointer ("parent_node", obj_desc->common_field.node); + + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_BUFFER_FIELD: + acpi_ex_out_pointer ("buffer_obj", obj_desc->buffer_field.buffer_obj); + break; + + case ACPI_TYPE_LOCAL_REGION_FIELD: + acpi_ex_out_pointer ("region_obj", obj_desc->field.region_obj); + break; + + case ACPI_TYPE_LOCAL_BANK_FIELD: + acpi_ex_out_integer ("Value", obj_desc->bank_field.value); + acpi_ex_out_pointer ("region_obj", obj_desc->bank_field.region_obj); + acpi_ex_out_pointer ("bank_obj", obj_desc->bank_field.bank_obj); + break; + + case ACPI_TYPE_LOCAL_INDEX_FIELD: + acpi_ex_out_integer ("Value", obj_desc->index_field.value); + acpi_ex_out_pointer ("Index", obj_desc->index_field.index_obj); + acpi_ex_out_pointer ("Data", obj_desc->index_field.data_obj); + break; - acpi_os_printf ("%20s : %s\n", "Type", "Reference"); - acpi_os_printf ("%20s : %X\n", "Target_type", obj_desc->reference.target_type); - acpi_os_printf ("%20s : %s\n", "Opcode", op_info->name); - acpi_os_printf ("%20s : %X\n", "Offset", obj_desc->reference.offset); - acpi_os_printf ("%20s : %p\n", "Obj_desc", obj_desc->reference.object); - acpi_os_printf ("%20s : %p\n", "Node", obj_desc->reference.node); - acpi_os_printf ("%20s : %p\n", "Where", obj_desc->reference.where); + default: + /* All object types covered above */ + break; + } break; - case INTERNAL_TYPE_ADDRESS_HANDLER: + case ACPI_TYPE_LOCAL_REFERENCE: - acpi_os_printf ("%20s : %s\n", "Type", "Address Handler"); - acpi_os_printf ("%20s : %X\n", "Space_id", obj_desc->addr_handler.space_id); - acpi_os_printf ("%20s : %p\n", "Next", obj_desc->addr_handler.next); - acpi_os_printf ("%20s : %p\n", "Region_list", obj_desc->addr_handler.region_list); - acpi_os_printf ("%20s : %p\n", "Node", obj_desc->addr_handler.node); - acpi_os_printf ("%20s : %p\n", "Handler", obj_desc->addr_handler.handler); - acpi_os_printf ("%20s : %p\n", "Context", obj_desc->addr_handler.context); + acpi_ex_out_integer ("target_type", obj_desc->reference.target_type); + acpi_ex_out_string ("Opcode", (acpi_ps_get_opcode_info (obj_desc->reference.opcode))->name); + acpi_ex_out_integer ("Offset", obj_desc->reference.offset); + acpi_ex_out_pointer ("obj_desc", obj_desc->reference.object); + acpi_ex_out_pointer ("Node", obj_desc->reference.node); + acpi_ex_out_pointer ("Where", obj_desc->reference.where); break; - case INTERNAL_TYPE_NOTIFY: + case ACPI_TYPE_LOCAL_ADDRESS_HANDLER: - acpi_os_printf ("%20s : %s\n", "Type", "Notify Handler"); - acpi_os_printf ("%20s : %p\n", "Node", obj_desc->notify_handler.node); - acpi_os_printf ("%20s : %p\n", "Handler", obj_desc->notify_handler.handler); - acpi_os_printf ("%20s : %p\n", "Context", obj_desc->notify_handler.context); + acpi_ex_out_integer ("space_id", obj_desc->address_space.space_id); + acpi_ex_out_pointer ("Next", obj_desc->address_space.next); + acpi_ex_out_pointer ("region_list", obj_desc->address_space.region_list); + acpi_ex_out_pointer ("Node", obj_desc->address_space.node); + acpi_ex_out_pointer ("Context", obj_desc->address_space.context); break; - case INTERNAL_TYPE_ALIAS: - case INTERNAL_TYPE_FIELD_DEFN: - case INTERNAL_TYPE_BANK_FIELD_DEFN: - case INTERNAL_TYPE_INDEX_FIELD_DEFN: - case INTERNAL_TYPE_IF: - case INTERNAL_TYPE_ELSE: - case INTERNAL_TYPE_WHILE: - case INTERNAL_TYPE_SCOPE: - case INTERNAL_TYPE_DEF_ANY: + case ACPI_TYPE_LOCAL_NOTIFY: - acpi_os_printf ("*** Structure display not implemented for type %X! ***\n", - obj_desc->common.type); + acpi_ex_out_pointer ("Node", obj_desc->notify.node); + acpi_ex_out_pointer ("Context", obj_desc->notify.context); break; + case ACPI_TYPE_LOCAL_ALIAS: + case ACPI_TYPE_LOCAL_EXTRA: + case ACPI_TYPE_LOCAL_DATA: default: - acpi_os_printf ("*** Cannot display unknown type %X! ***\n", obj_desc->common.type); + acpi_os_printf ("ex_dump_object_descriptor: Display not implemented for object type %s\n", + acpi_ut_get_object_type_name (obj_desc)); break; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exfield.c linux.21rc5-ac2/drivers/acpi/executer/exfield.c --- linux.21rc5/drivers/acpi/executer/exfield.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exfield.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,69 +1,85 @@ /****************************************************************************** * * Module Name: exfield - ACPI AML (p-code) execution - field manipulation - * $Revision: 95 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acdispat.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "achware.h" -#include "acevents.h" +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exfield") + ACPI_MODULE_NAME ("exfield") /******************************************************************************* * - * FUNCTION: Acpi_ex_read_data_from_field + * FUNCTION: acpi_ex_read_data_from_field * - * PARAMETERS: Mode - ACPI_READ or ACPI_WRITE - * *Field_node - Parent node for field to be accessed - * *Buffer - Value(s) to be read or written - * Buffer_length - Number of bytes to transfer + * PARAMETERS: walk_state - Current execution state + * obj_desc - The named field + * ret_buffer_desc - Where the return data object is stored * - * RETURN: Status3 + * RETURN: Status * - * DESCRIPTION: Read or write a named field + * DESCRIPTION: Read from a named field. Returns either an Integer or a + * Buffer, depending on the size of the field. * ******************************************************************************/ acpi_status acpi_ex_read_data_from_field ( - acpi_operand_object *obj_desc, - acpi_operand_object **ret_buffer_desc) + struct acpi_walk_state *walk_state, + union acpi_operand_object *obj_desc, + union acpi_operand_object **ret_buffer_desc) { - acpi_status status; - acpi_operand_object *buffer_desc; - u32 length; - void *buffer; + acpi_status status; + union acpi_operand_object *buffer_desc; + acpi_size length; + void *buffer; + u8 locked; - FUNCTION_TRACE_PTR ("Ex_read_data_from_field", obj_desc); + ACPI_FUNCTION_TRACE_PTR ("ex_read_data_from_field", obj_desc); /* Parameter validation */ @@ -72,6 +88,44 @@ return_ACPI_STATUS (AE_AML_NO_OPERAND); } + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_BUFFER_FIELD) { + /* + * If the buffer_field arguments have not been previously evaluated, + * evaluate them now and save the results. + */ + if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { + status = acpi_ds_get_buffer_field_arguments (obj_desc); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + } + else if ((ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_LOCAL_REGION_FIELD) && + (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_SMBUS)) { + /* + * This is an SMBus read. We must create a buffer to hold the data + * and directly access the region handler. + */ + buffer_desc = acpi_ut_create_buffer_object (ACPI_SMBUS_BUFFER_SIZE); + if (!buffer_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* Lock entire transaction if requested */ + + locked = acpi_ex_acquire_global_lock (obj_desc->common_field.field_flags); + + /* + * Perform the read. + * Note: Smbus protocol value is passed in upper 16-bits of Function + */ + status = acpi_ex_access_region (obj_desc, 0, + ACPI_CAST_PTR (acpi_integer, buffer_desc->buffer.pointer), + ACPI_READ | (obj_desc->field.attribute << 16)); + acpi_ex_release_global_lock (locked); + goto exit; + } + /* * Allocate a buffer for the contents of the field. * @@ -82,28 +136,16 @@ * * Note: Field.length is in bits. */ - length = ROUND_BITS_UP_TO_BYTES (obj_desc->field.bit_length); - - if (length > sizeof (acpi_integer)) { + length = (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES (obj_desc->field.bit_length); + if (length > acpi_gbl_integer_byte_width) { /* Field is too large for an Integer, create a Buffer instead */ - buffer_desc = acpi_ut_create_internal_object (ACPI_TYPE_BUFFER); + buffer_desc = acpi_ut_create_buffer_object (length); if (!buffer_desc) { return_ACPI_STATUS (AE_NO_MEMORY); } - - /* Create the actual read buffer */ - - buffer_desc->buffer.pointer = ACPI_MEM_CALLOCATE (length); - if (!buffer_desc->buffer.pointer) { - acpi_ut_remove_reference (buffer_desc); - return_ACPI_STATUS (AE_NO_MEMORY); - } - - buffer_desc->buffer.length = length; buffer = buffer_desc->buffer.pointer; } - else { /* Field will fit within an Integer (normal case) */ @@ -112,39 +154,34 @@ return_ACPI_STATUS (AE_NO_MEMORY); } - length = sizeof (buffer_desc->integer.value); + length = acpi_gbl_integer_byte_width; + buffer_desc->integer.value = 0; buffer = &buffer_desc->integer.value; } + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "Obj=%p Type=%X Buf=%p Len=%X\n", + obj_desc, ACPI_GET_OBJECT_TYPE (obj_desc), buffer, (u32) length)); + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "field_write: bit_len=%X bit_off=%X byte_off=%X\n", + obj_desc->common_field.bit_length, + obj_desc->common_field.start_field_bit_offset, + obj_desc->common_field.base_byte_offset)); - /* Read from the appropriate field */ - - switch (obj_desc->common.type) { - case ACPI_TYPE_BUFFER_FIELD: - status = acpi_ex_access_buffer_field (ACPI_READ, obj_desc, buffer, length); - break; - - case INTERNAL_TYPE_REGION_FIELD: - status = acpi_ex_access_region_field (ACPI_READ, obj_desc, buffer, length); - break; + /* Lock entire transaction if requested */ - case INTERNAL_TYPE_BANK_FIELD: - status = acpi_ex_access_bank_field (ACPI_READ, obj_desc, buffer, length); - break; + locked = acpi_ex_acquire_global_lock (obj_desc->common_field.field_flags); - case INTERNAL_TYPE_INDEX_FIELD: - status = acpi_ex_access_index_field (ACPI_READ, obj_desc, buffer, length); - break; + /* Read from the field */ - default: - status = AE_AML_INTERNAL; - } + status = acpi_ex_extract_from_field (obj_desc, buffer, (u32) length); + acpi_ex_release_global_lock (locked); +exit: if (ACPI_FAILURE (status)) { acpi_ut_remove_reference (buffer_desc); } - else if (ret_buffer_desc) { *ret_buffer_desc = buffer_desc; } @@ -155,31 +192,33 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_write_data_to_field + * FUNCTION: acpi_ex_write_data_to_field * - * PARAMETERS: Mode - ACPI_READ or ACPI_WRITE - * *Field_node - Parent node for field to be accessed - * *Buffer - Value(s) to be read or written - * Buffer_length - Number of bytes to transfer + * PARAMETERS: source_desc - Contains data to write + * obj_desc - The named field * * RETURN: Status * - * DESCRIPTION: Read or write a named field + * DESCRIPTION: Write to a named field * ******************************************************************************/ - acpi_status acpi_ex_write_data_to_field ( - acpi_operand_object *source_desc, - acpi_operand_object *obj_desc) + union acpi_operand_object *source_desc, + union acpi_operand_object *obj_desc, + union acpi_operand_object **result_desc) { - acpi_status status; - u32 length; - void *buffer; + acpi_status status; + u32 length; + u32 required_length; + void *buffer; + void *new_buffer; + u8 locked; + union acpi_operand_object *buffer_desc; - FUNCTION_TRACE_PTR ("Ex_write_data_to_field", obj_desc); + ACPI_FUNCTION_TRACE_PTR ("ex_write_data_to_field", obj_desc); /* Parameter validation */ @@ -188,11 +227,67 @@ return_ACPI_STATUS (AE_AML_NO_OPERAND); } + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_BUFFER_FIELD) { + /* + * If the buffer_field arguments have not been previously evaluated, + * evaluate them now and save the results. + */ + if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { + status = acpi_ds_get_buffer_field_arguments (obj_desc); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + } + else if ((ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_LOCAL_REGION_FIELD) && + (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_SMBUS)) { + /* + * This is an SMBus write. We will bypass the entire field mechanism + * and handoff the buffer directly to the handler. + * + * Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE). + */ + if (ACPI_GET_OBJECT_TYPE (source_desc) != ACPI_TYPE_BUFFER) { + ACPI_REPORT_ERROR (("SMBus write requires Buffer, found type %s\n", + acpi_ut_get_object_type_name (source_desc))); + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); + } + + if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) { + ACPI_REPORT_ERROR (("SMBus write requires Buffer of length %X, found length %X\n", + ACPI_SMBUS_BUFFER_SIZE, source_desc->buffer.length)); + return_ACPI_STATUS (AE_AML_BUFFER_LIMIT); + } + + buffer_desc = acpi_ut_create_buffer_object (ACPI_SMBUS_BUFFER_SIZE); + if (!buffer_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + buffer = buffer_desc->buffer.pointer; + ACPI_MEMCPY (buffer, source_desc->buffer.pointer, ACPI_SMBUS_BUFFER_SIZE); + + /* Lock entire transaction if requested */ + + locked = acpi_ex_acquire_global_lock (obj_desc->common_field.field_flags); + + /* + * Perform the write (returns status and perhaps data in the same buffer) + * Note: SMBus protocol type is passed in upper 16-bits of Function. + */ + status = acpi_ex_access_region (obj_desc, 0, + (acpi_integer *) buffer, + ACPI_WRITE | (obj_desc->field.attribute << 16)); + acpi_ex_release_global_lock (locked); + + *result_desc = buffer_desc; + return_ACPI_STATUS (status); + } /* * Get a pointer to the data to be written */ - switch (source_desc->common.type) { + switch (ACPI_GET_OBJECT_TYPE (source_desc)) { case ACPI_TYPE_INTEGER: buffer = &source_desc->integer.value; length = sizeof (source_desc->integer.value); @@ -212,315 +307,58 @@ return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } - /* - * Decode the type of field to be written + * We must have a buffer that is at least as long as the field + * we are writing to. This is because individual fields are + * indivisible and partial writes are not supported -- as per + * the ACPI specification. */ - switch (obj_desc->common.type) { - case ACPI_TYPE_BUFFER_FIELD: - status = acpi_ex_access_buffer_field (ACPI_WRITE, obj_desc, buffer, length); - break; - - case INTERNAL_TYPE_REGION_FIELD: - status = acpi_ex_access_region_field (ACPI_WRITE, obj_desc, buffer, length); - break; - - case INTERNAL_TYPE_BANK_FIELD: - status = acpi_ex_access_bank_field (ACPI_WRITE, obj_desc, buffer, length); - break; - - case INTERNAL_TYPE_INDEX_FIELD: - status = acpi_ex_access_index_field (ACPI_WRITE, obj_desc, buffer, length); - break; - - default: - return_ACPI_STATUS (AE_AML_INTERNAL); - } - - - return_ACPI_STATUS (status); -} + new_buffer = NULL; + required_length = ACPI_ROUND_BITS_UP_TO_BYTES (obj_desc->common_field.bit_length); + if (length < required_length) { + /* We need to create a new buffer */ -/******************************************************************************* - * - * FUNCTION: Acpi_ex_access_buffer_field - * - * PARAMETERS: Mode - ACPI_READ or ACPI_WRITE - * *Field_node - Parent node for field to be accessed - * *Buffer - Value(s) to be read or written - * Buffer_length - Number of bytes to transfer - * - * RETURN: Status - * - * DESCRIPTION: Read or write a named field - * - ******************************************************************************/ - -acpi_status -acpi_ex_access_buffer_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length) -{ - acpi_status status; - - - FUNCTION_TRACE_PTR ("Ex_access_buffer_field", obj_desc); - - - /* - * If the Buffer_field arguments have not been previously evaluated, - * evaluate them now and save the results. - */ - if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { - status = acpi_ds_get_buffer_field_arguments (obj_desc); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + new_buffer = ACPI_MEM_CALLOCATE (required_length); + if (!new_buffer) { + return_ACPI_STATUS (AE_NO_MEMORY); } - } - - - status = acpi_ex_common_access_field (mode, obj_desc, buffer, buffer_length); - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ex_access_region_field - * - * PARAMETERS: Mode - ACPI_READ or ACPI_WRITE - * *Field_node - Parent node for field to be accessed - * *Buffer - Value(s) to be read or written - * Buffer_length - Number of bytes to transfer - * - * RETURN: Status - * - * DESCRIPTION: Read or write a named field - * - ******************************************************************************/ - -acpi_status -acpi_ex_access_region_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length) -{ - acpi_status status; - u8 locked; - - - FUNCTION_TRACE_PTR ("Ex_access_region_field", obj_desc); - - - /* - * Get the global lock if needed - */ - locked = acpi_ex_acquire_global_lock (obj_desc->field.lock_rule); - - status = acpi_ex_common_access_field (mode, obj_desc, buffer, buffer_length); - - /* - * Release global lock if we acquired it earlier - */ - acpi_ex_release_global_lock (locked); - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ex_access_bank_field - * - * PARAMETERS: Mode - ACPI_READ or ACPI_WRITE - * *Field_node - Parent node for field to be accessed - * *Buffer - Value(s) to be read or written - * Buffer_length - Number of bytes to transfer - * - * RETURN: Status - * - * DESCRIPTION: Read or write a Bank Field - * - ******************************************************************************/ - -acpi_status -acpi_ex_access_bank_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length) -{ - acpi_status status; - u8 locked; - - - FUNCTION_TRACE_PTR ("Ex_access_bank_field", obj_desc); - - - /* - * Get the global lock if needed - */ - locked = acpi_ex_acquire_global_lock (obj_desc->bank_field.lock_rule); - - - /* - * Write the Bank_value to the Bank_register to select the bank. - * The Bank_value for this Bank_field is specified in the - * Bank_field ASL declaration. The Bank_register is always a Field in - * an operation region. - */ - status = acpi_ex_common_access_field (ACPI_WRITE, - obj_desc->bank_field.bank_register_obj, - &obj_desc->bank_field.value, - sizeof (obj_desc->bank_field.value)); - if (ACPI_FAILURE (status)) { - goto cleanup; - } - - /* - * The bank was successfully selected, now read or write the actual - * data. - */ - status = acpi_ex_common_access_field (mode, obj_desc, buffer, buffer_length); - - -cleanup: - /* - * Release global lock if we acquired it earlier - */ - acpi_ex_release_global_lock (locked); - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ex_access_index_field - * - * PARAMETERS: Mode - ACPI_READ or ACPI_WRITE - * *Field_node - Parent node for field to be accessed - * *Buffer - Value(s) to be read or written - * Buffer_length - Number of bytes to transfer - * - * RETURN: Status - * - * DESCRIPTION: Read or write a Index Field - * - ******************************************************************************/ - -acpi_status -acpi_ex_access_index_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length) -{ - acpi_status status; - u8 locked; - - - FUNCTION_TRACE_PTR ("Ex_access_index_field", obj_desc); - - - /* - * Get the global lock if needed - */ - locked = acpi_ex_acquire_global_lock (obj_desc->index_field.lock_rule); - - - /* - * Set Index value to select proper Data register - */ - status = acpi_ex_common_access_field (ACPI_WRITE, - obj_desc->index_field.index_obj, - &obj_desc->index_field.value, - sizeof (obj_desc->index_field.value)); - if (ACPI_FAILURE (status)) { - goto cleanup; - } - - /* Now read/write the data register */ - - status = acpi_ex_common_access_field (mode, obj_desc->index_field.data_obj, - buffer, buffer_length); - -cleanup: - /* - * Release global lock if we acquired it earlier - */ - acpi_ex_release_global_lock (locked); - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ex_common_access_field - * - * PARAMETERS: Mode - ACPI_READ or ACPI_WRITE - * *Field_node - Parent node for field to be accessed - * *Buffer - Value(s) to be read or written - * Buffer_length - Size of buffer, in bytes. Must be large - * enough for all bits of the field. - * - * RETURN: Status - * - * DESCRIPTION: Read or write a named field - * - ******************************************************************************/ - -acpi_status -acpi_ex_common_access_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length) -{ - acpi_status status; - - - FUNCTION_TRACE_PTR ("Ex_common_access_field", obj_desc); - - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Obj=%p Type=%X Buf=%p Len=%X\n", - obj_desc, obj_desc->common.type, buffer, buffer_length)); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Mode=%d Bit_len=%X Bit_off=%X Byte_off=%X\n", - mode, obj_desc->common_field.bit_length, + /* + * Copy the original data to the new buffer, starting + * at Byte zero. All unused (upper) bytes of the + * buffer will be 0. + */ + ACPI_MEMCPY ((char *) new_buffer, (char *) buffer, length); + buffer = new_buffer; + length = required_length; + } + + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "Obj=%p Type=%X Buf=%p Len=%X\n", + obj_desc, ACPI_GET_OBJECT_TYPE (obj_desc), buffer, length)); + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "field_read: bit_len=%X bit_off=%X byte_off=%X\n", + obj_desc->common_field.bit_length, obj_desc->common_field.start_field_bit_offset, obj_desc->common_field.base_byte_offset)); + /* Lock entire transaction if requested */ - /* Perform the actual read or write of the field */ + locked = acpi_ex_acquire_global_lock (obj_desc->common_field.field_flags); - switch (mode) { - case ACPI_READ: - - status = acpi_ex_extract_from_field (obj_desc, buffer, buffer_length); - break; - - - case ACPI_WRITE: - - status = acpi_ex_insert_into_field (obj_desc, buffer, buffer_length); - break; + /* Write to the field */ + status = acpi_ex_insert_into_field (obj_desc, buffer, length); + acpi_ex_release_global_lock (locked); - default: + /* Free temporary buffer if we used one */ - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown I/O Mode: %X\n", mode)); - status = AE_BAD_PARAMETER; - break; + if (new_buffer) { + ACPI_MEM_FREE (new_buffer); } - return_ACPI_STATUS (status); } + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exfldio.c linux.21rc5-ac2/drivers/acpi/executer/exfldio.c --- linux.21rc5/drivers/acpi/executer/exfldio.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exfldio.c 2003-05-03 19:08:44.000000000 +0100 @@ -1,73 +1,94 @@ /****************************************************************************** * * Module Name: exfldio - Aml Field I/O - * $Revision: 66 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "achware.h" -#include "acevents.h" -#include "acdispat.h" +#include +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exfldio") + ACPI_MODULE_NAME ("exfldio") /******************************************************************************* * - * FUNCTION: Acpi_ex_setup_field + * FUNCTION: acpi_ex_setup_region * - * PARAMETERS: *Obj_desc - Field to be read or written - * Field_datum_byte_offset - Current offset into the field + * PARAMETERS: *obj_desc - Field to be read or written + * field_datum_byte_offset - Byte offset of this datum within the + * parent field * * RETURN: Status * - * DESCRIPTION: Common processing for Acpi_ex_extract_from_field and - * Acpi_ex_insert_into_field + * DESCRIPTION: Common processing for acpi_ex_extract_from_field and + * acpi_ex_insert_into_field. Initialize the * ******************************************************************************/ acpi_status -acpi_ex_setup_field ( - acpi_operand_object *obj_desc, - u32 field_datum_byte_offset) +acpi_ex_setup_region ( + union acpi_operand_object *obj_desc, + u32 field_datum_byte_offset) { - acpi_status status = AE_OK; - acpi_operand_object *rgn_desc; + acpi_status status = AE_OK; + union acpi_operand_object *rgn_desc; - FUNCTION_TRACE_U32 ("Ex_setup_field", field_datum_byte_offset); + ACPI_FUNCTION_TRACE_U32 ("ex_setup_region", field_datum_byte_offset); rgn_desc = obj_desc->common_field.region_obj; - if (ACPI_TYPE_REGION != rgn_desc->common.type) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Needed Region, found type %x %s\n", - rgn_desc->common.type, acpi_ut_get_type_name (rgn_desc->common.type))); + /* We must have a valid region */ + + if (ACPI_GET_OBJECT_TYPE (rgn_desc) != ACPI_TYPE_REGION) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Needed Region, found type %X (%s)\n", + ACPI_GET_OBJECT_TYPE (rgn_desc), + acpi_ut_get_object_type_name (rgn_desc))); + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } @@ -76,30 +97,36 @@ * evaluate them now and save the results. */ if (!(rgn_desc->region.flags & AOPOBJ_DATA_VALID)) { - status = acpi_ds_get_region_arguments (rgn_desc); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } } + if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS) { + /* SMBus has a non-linear address space */ + + return_ACPI_STATUS (AE_OK); + } + /* * Validate the request. The entire request from the byte offset for a * length of one field datum (access width) must fit within the region. * (Region length is specified in bytes) */ - if (rgn_desc->region.length < (obj_desc->common_field.base_byte_offset + - field_datum_byte_offset + - obj_desc->common_field.access_byte_width)) { + if (rgn_desc->region.length < (obj_desc->common_field.base_byte_offset + + field_datum_byte_offset + + obj_desc->common_field.access_byte_width)) { if (rgn_desc->region.length < obj_desc->common_field.access_byte_width) { /* - * This is the case where the Access_type (Acc_word, etc.) is wider + * This is the case where the access_type (acc_word, etc.) is wider * than the region itself. For example, a region of length one * byte, and a field with Dword access specified. */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Field access width (%d bytes) too large for region size (%X)\n", - obj_desc->common_field.access_byte_width, rgn_desc->region.length)); + "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)\n", + obj_desc->common_field.node->name.ascii, obj_desc->common_field.access_byte_width, + rgn_desc->region.node->name.ascii, rgn_desc->region.length)); } /* @@ -107,12 +134,46 @@ * exceeds region length, indicate an error */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Field base+offset+width %X+%X+%X exceeds region size (%X bytes) field=%p region=%p\n", - obj_desc->common_field.base_byte_offset, field_datum_byte_offset, - obj_desc->common_field.access_byte_width, - rgn_desc->region.length, obj_desc, rgn_desc)); + "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)\n", + obj_desc->common_field.node->name.ascii, obj_desc->common_field.base_byte_offset, + field_datum_byte_offset, obj_desc->common_field.access_byte_width, + rgn_desc->region.node->name.ascii, rgn_desc->region.length)); + + #ifdef CONFIG_ACPI_RELAXED_AML + { + /* + * Allow access to the field if it is within the region size + * rounded up to a multiple of the access byte width. This + * overcomes "off-by-one" programming errors in the AML often + * found in Toshiba laptops. These errors were allowed by + * the Microsoft ASL compiler. + */ + u32 rounded_length = ACPI_ROUND_UP(rgn_desc->region.length, + obj_desc->common_field.access_byte_width); - return_ACPI_STATUS (AE_AML_REGION_LIMIT); + if (rounded_length < (obj_desc->common_field.base_byte_offset + + field_datum_byte_offset + + obj_desc->common_field.access_byte_width)) { + return_ACPI_STATUS (AE_AML_REGION_LIMIT); + } else { + static int warn_once = 1; + if (warn_once) { + // Could also associate a flag with each field, and + // warn once for each field. + ACPI_REPORT_WARNING(( + "The ACPI AML in your computer contains errors, " + "please nag the manufacturer to correct it.\n")); + ACPI_REPORT_WARNING(( + "Allowing relaxed access to fields; " + "turn on CONFIG_ACPI_DEBUG for details.\n")); + warn_once = 0; + } + return_ACPI_STATUS (AE_OK); + } + } + #else + return_ACPI_STATUS (AE_AML_REGION_LIMIT); + #endif } return_ACPI_STATUS (AE_OK); @@ -121,116 +182,415 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_read_field_datum + * FUNCTION: acpi_ex_access_region * - * PARAMETERS: *Obj_desc - Field to be read - * *Value - Where to store value (must be 32 bits) + * PARAMETERS: *obj_desc - Field to be read + * field_datum_byte_offset - Byte offset of this datum within the + * parent field + * *Value - Where to store value (must at least + * the size of acpi_integer) + * Function - Read or Write flag plus other region- + * dependent flags * * RETURN: Status * - * DESCRIPTION: Retrieve the value of the given field + * DESCRIPTION: Read or Write a single field datum to an Operation Region. * ******************************************************************************/ acpi_status -acpi_ex_read_field_datum ( - acpi_operand_object *obj_desc, - u32 field_datum_byte_offset, - u32 *value) +acpi_ex_access_region ( + union acpi_operand_object *obj_desc, + u32 field_datum_byte_offset, + acpi_integer *value, + u32 function) { - acpi_status status; - acpi_operand_object *rgn_desc; - ACPI_PHYSICAL_ADDRESS address; - u32 local_value; + acpi_status status; + union acpi_operand_object *rgn_desc; + acpi_physical_address address; + + + ACPI_FUNCTION_TRACE ("ex_access_region"); + + + /* + * Ensure that the region operands are fully evaluated and verify + * the validity of the request + */ + status = acpi_ex_setup_region (obj_desc, field_datum_byte_offset); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + /* + * The physical address of this field datum is: + * + * 1) The base of the region, plus + * 2) The base offset of the field, plus + * 3) The current offset into the field + */ + rgn_desc = obj_desc->common_field.region_obj; + address = rgn_desc->region.address + + obj_desc->common_field.base_byte_offset + + field_datum_byte_offset; + + if ((function & ACPI_IO_MASK) == ACPI_READ) { + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "[READ]")); + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "[WRITE]")); + } + + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_BFIELD, + " Region[%s-%X] Access %X Base:Off %X:%X at %8.8X%8.8X\n", + acpi_ut_get_region_name (rgn_desc->region.space_id), + rgn_desc->region.space_id, + obj_desc->common_field.access_byte_width, + obj_desc->common_field.base_byte_offset, + field_datum_byte_offset, + ACPI_HIDWORD (address), ACPI_LODWORD (address))); - FUNCTION_TRACE_U32 ("Ex_read_field_datum", field_datum_byte_offset); + /* Invoke the appropriate address_space/op_region handler */ + status = acpi_ev_address_space_dispatch (rgn_desc, function, + address, ACPI_MUL_8 (obj_desc->common_field.access_byte_width), value); - if (!value) { - local_value = 0; - value = &local_value; /* support reads without saving value */ + if (ACPI_FAILURE (status)) { + if (status == AE_NOT_IMPLEMENTED) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Region %s(%X) not implemented\n", + acpi_ut_get_region_name (rgn_desc->region.space_id), + rgn_desc->region.space_id)); + } + else if (status == AE_NOT_EXIST) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Region %s(%X) has no handler\n", + acpi_ut_get_region_name (rgn_desc->region.space_id), + rgn_desc->region.space_id)); + } } - /* Clear the entire return buffer first, [Very Important!] */ + return_ACPI_STATUS (status); +} - *value = 0; + +/******************************************************************************* + * + * FUNCTION: acpi_ex_register_overflow + * + * PARAMETERS: *obj_desc - Register(Field) to be written + * Value - Value to be stored + * + * RETURN: TRUE if value overflows the field, FALSE otherwise + * + * DESCRIPTION: Check if a value is out of range of the field being written. + * Used to check if the values written to Index and Bank registers + * are out of range. Normally, the value is simply truncated + * to fit the field, but this case is most likely a serious + * coding error in the ASL. + * + ******************************************************************************/ + +u8 +acpi_ex_register_overflow ( + union acpi_operand_object *obj_desc, + acpi_integer value) +{ + + if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) { + /* + * The field is large enough to hold the maximum integer, so we can + * never overflow it. + */ + return (FALSE); + } + + if (value >= ((acpi_integer) 1 << obj_desc->common_field.bit_length)) { + /* + * The Value is larger than the maximum value that can fit into + * the register. + */ + return (TRUE); + } + + /* The Value will fit into the field with no truncation */ + + return (FALSE); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ex_field_datum_io + * + * PARAMETERS: *obj_desc - Field to be read + * field_datum_byte_offset - Byte offset of this datum within the + * parent field + * *Value - Where to store value (must be 64 bits) + * read_write - Read or Write flag + * + * RETURN: Status + * + * DESCRIPTION: Read or Write a single datum of a field. The field_type is + * demultiplexed here to handle the different types of fields + * (buffer_field, region_field, index_field, bank_field) + * + ******************************************************************************/ + +acpi_status +acpi_ex_field_datum_io ( + union acpi_operand_object *obj_desc, + u32 field_datum_byte_offset, + acpi_integer *value, + u32 read_write) +{ + acpi_status status; + acpi_integer local_value; + + + ACPI_FUNCTION_TRACE_U32 ("ex_field_datum_io", field_datum_byte_offset); + + + if (read_write == ACPI_READ) { + if (!value) { + local_value = 0; + value = &local_value; /* To support reads without saving return value */ + } + + /* Clear the entire return buffer first, [Very Important!] */ + + *value = 0; + } /* - * Buffer_fields - Read from a Buffer - * Other Fields - Read from a Operation Region. + * The four types of fields are: + * + * buffer_fields - Read/write from/to a Buffer + * region_fields - Read/write from/to a Operation Region. + * bank_fields - Write to a Bank Register, then read/write from/to an op_region + * index_fields - Write to an Index Register, then read/write from/to a Data Register */ - switch (obj_desc->common.type) { + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { case ACPI_TYPE_BUFFER_FIELD: - /* - * For Buffer_fields, we only need to copy the data from the - * source buffer. Length is the field width in bytes. + * If the buffer_field arguments have not been previously evaluated, + * evaluate them now and save the results. */ - MEMCPY (value, (obj_desc->buffer_field.buffer_obj)->buffer.pointer - + obj_desc->buffer_field.base_byte_offset + field_datum_byte_offset, - obj_desc->common_field.access_byte_width); + if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { + status = acpi_ds_get_buffer_field_arguments (obj_desc); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + + if (read_write == ACPI_READ) { + /* + * Copy the data from the source buffer. + * Length is the field width in bytes. + */ + ACPI_MEMCPY (value, (obj_desc->buffer_field.buffer_obj)->buffer.pointer + + obj_desc->buffer_field.base_byte_offset + + field_datum_byte_offset, + obj_desc->common_field.access_byte_width); + } + else { + /* + * Copy the data to the target buffer. + * Length is the field width in bytes. + */ + ACPI_MEMCPY ((obj_desc->buffer_field.buffer_obj)->buffer.pointer + + obj_desc->buffer_field.base_byte_offset + + field_datum_byte_offset, + value, obj_desc->common_field.access_byte_width); + } + status = AE_OK; break; - case INTERNAL_TYPE_REGION_FIELD: - case INTERNAL_TYPE_BANK_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + + /* Ensure that the bank_value is not beyond the capacity of the register */ + + if (acpi_ex_register_overflow (obj_desc->bank_field.bank_obj, + (acpi_integer) obj_desc->bank_field.value)) { + return_ACPI_STATUS (AE_AML_REGISTER_LIMIT); + } /* - * For other fields, we need to go through an Operation Region - * (Only types that will get here are Region_fields and Bank_fields) + * For bank_fields, we must write the bank_value to the bank_register + * (itself a region_field) before we can access the data. */ - status = acpi_ex_setup_field (obj_desc, field_datum_byte_offset); + status = acpi_ex_insert_into_field (obj_desc->bank_field.bank_obj, + &obj_desc->bank_field.value, + sizeof (obj_desc->bank_field.value)); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } /* - * The physical address of this field datum is: - * - * 1) The base of the region, plus - * 2) The base offset of the field, plus - * 3) The current offset into the field - */ - rgn_desc = obj_desc->common_field.region_obj; - address = rgn_desc->region.address + obj_desc->common_field.base_byte_offset + - field_datum_byte_offset; - - ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "Region %s(%X) width %X base:off %X:%X at %8.8X%8.8X\n", - acpi_ut_get_region_name (rgn_desc->region.space_id), - rgn_desc->region.space_id, obj_desc->common_field.access_bit_width, - obj_desc->common_field.base_byte_offset, field_datum_byte_offset, - HIDWORD(address), LODWORD(address))); + * Now that the Bank has been selected, fall through to the + * region_field case and write the datum to the Operation Region + */ - /* Invoke the appropriate Address_space/Op_region handler */ + /*lint -fallthrough */ - status = acpi_ev_address_space_dispatch (rgn_desc, ACPI_READ_ADR_SPACE, - address, obj_desc->common_field.access_bit_width, value); - if (status == AE_NOT_IMPLEMENTED) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region %s(%X) not implemented\n", - acpi_ut_get_region_name (rgn_desc->region.space_id), - rgn_desc->region.space_id)); + + case ACPI_TYPE_LOCAL_REGION_FIELD: + /* + * For simple region_fields, we just directly access the owning + * Operation Region. + */ + status = acpi_ex_access_region (obj_desc, field_datum_byte_offset, value, + read_write); + break; + + + case ACPI_TYPE_LOCAL_INDEX_FIELD: + + + /* Ensure that the index_value is not beyond the capacity of the register */ + + if (acpi_ex_register_overflow (obj_desc->index_field.index_obj, + (acpi_integer) obj_desc->index_field.value)) { + return_ACPI_STATUS (AE_AML_REGISTER_LIMIT); } - else if (status == AE_NOT_EXIST) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region %s(%X) has no handler\n", - acpi_ut_get_region_name (rgn_desc->region.space_id), - rgn_desc->region.space_id)); + /* Write the index value to the index_register (itself a region_field) */ + + status = acpi_ex_insert_into_field (obj_desc->index_field.index_obj, + &obj_desc->index_field.value, + sizeof (obj_desc->index_field.value)); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + if (read_write == ACPI_READ) { + /* Read the datum from the data_register */ + + status = acpi_ex_extract_from_field (obj_desc->index_field.data_obj, + value, obj_desc->common_field.access_byte_width); + } + else { + /* Write the datum to the Data register */ + + status = acpi_ex_insert_into_field (obj_desc->index_field.data_obj, + value, obj_desc->common_field.access_byte_width); } break; default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%p, wrong source type - %s\n", - obj_desc, acpi_ut_get_type_name (obj_desc->common.type))); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%p, Wrong object type - %s\n", + obj_desc, acpi_ut_get_object_type_name (obj_desc))); status = AE_AML_INTERNAL; break; } + if (ACPI_SUCCESS (status)) { + if (read_write == ACPI_READ) { + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "Value Read=%8.8X%8.8X\n", + ACPI_HIDWORD (*value), ACPI_LODWORD (*value))); + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "Value Written=%8.8X%8.8X\n", + ACPI_HIDWORD (*value), ACPI_LODWORD (*value))); + } + } + + return_ACPI_STATUS (status); +} - ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "Returned value=%08X \n", *value)); + +/******************************************************************************* + * + * FUNCTION: acpi_ex_write_with_update_rule + * + * PARAMETERS: *obj_desc - Field to be set + * Value - Value to store + * + * RETURN: Status + * + * DESCRIPTION: Apply the field update rule to a field write + * + ******************************************************************************/ + +acpi_status +acpi_ex_write_with_update_rule ( + union acpi_operand_object *obj_desc, + acpi_integer mask, + acpi_integer field_value, + u32 field_datum_byte_offset) +{ + acpi_status status = AE_OK; + acpi_integer merged_value; + acpi_integer current_value; + + + ACPI_FUNCTION_TRACE_U32 ("ex_write_with_update_rule", mask); + + + /* Start with the new bits */ + + merged_value = field_value; + + /* If the mask is all ones, we don't need to worry about the update rule */ + + if (mask != ACPI_INTEGER_MAX) { + /* Decode the update rule */ + + switch (obj_desc->common_field.field_flags & AML_FIELD_UPDATE_RULE_MASK) { + case AML_FIELD_UPDATE_PRESERVE: + /* + * Check if update rule needs to be applied (not if mask is all + * ones) The left shift drops the bits we want to ignore. + */ + if ((~mask << (ACPI_MUL_8 (sizeof (mask)) - + ACPI_MUL_8 (obj_desc->common_field.access_byte_width))) != 0) { + /* + * Read the current contents of the byte/word/dword containing + * the field, and merge with the new field value. + */ + status = acpi_ex_field_datum_io (obj_desc, field_datum_byte_offset, + ¤t_value, ACPI_READ); + merged_value |= (current_value & ~mask); + } + break; + + case AML_FIELD_UPDATE_WRITE_AS_ONES: + + /* Set positions outside the field to all ones */ + + merged_value |= ~mask; + break; + + case AML_FIELD_UPDATE_WRITE_AS_ZEROS: + + /* Set positions outside the field to all zeros */ + + merged_value &= mask; + break; + + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "write_with_update_rule: Unknown update_rule setting: %X\n", + (obj_desc->common_field.field_flags & AML_FIELD_UPDATE_RULE_MASK))); + return_ACPI_STATUS (AE_AML_OPERAND_VALUE); + } + } + + /* Write the merged value */ + + status = acpi_ex_field_datum_io (obj_desc, field_datum_byte_offset, + &merged_value, ACPI_WRITE); + + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "Mask %8.8X%8.8X datum_offset %X Value %8.8X%8.8X, merged_value %8.8X%8.8X\n", + ACPI_HIDWORD (mask), ACPI_LODWORD (mask), + field_datum_byte_offset, + ACPI_HIDWORD (field_value), ACPI_LODWORD (field_value), + ACPI_HIDWORD (merged_value),ACPI_LODWORD (merged_value))); return_ACPI_STATUS (status); } @@ -238,43 +598,65 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_get_buffer_datum + * FUNCTION: acpi_ex_get_buffer_datum * - * PARAMETERS: Merged_datum - Value to store - * Buffer - Receiving buffer - * Byte_granularity - 1/2/4 Granularity of the field + * PARAMETERS: Datum - Where the Datum is returned + * Buffer - Raw field buffer + * buffer_length - Entire length (used for big-endian only) + * byte_granularity - 1/2/4/8 Granularity of the field * (aka Datum Size) - * Offset - Datum offset into the buffer + * buffer_offset - Datum offset into the buffer * * RETURN: none * - * DESCRIPTION: Store the merged datum to the buffer according to the + * DESCRIPTION: Get a datum from the buffer according to the buffer field * byte granularity * ******************************************************************************/ -static void -acpi_ex_get_buffer_datum( - u32 *datum, - void *buffer, - u32 byte_granularity, - u32 offset) +void +acpi_ex_get_buffer_datum ( + acpi_integer *datum, + void *buffer, + u32 buffer_length, + u32 byte_granularity, + u32 buffer_offset) { + u32 index; + + + ACPI_FUNCTION_ENTRY (); - FUNCTION_ENTRY (); + /* Get proper index into buffer (handles big/little endian) */ + + index = ACPI_BUFFER_INDEX (buffer_length, buffer_offset, byte_granularity); + + /* Move the requested number of bytes */ switch (byte_granularity) { case ACPI_FIELD_BYTE_GRANULARITY: - *datum = ((u8 *) buffer) [offset]; + + *datum = ((u8 *) buffer) [index]; break; case ACPI_FIELD_WORD_GRANULARITY: - MOVE_UNALIGNED16_TO_32 (datum, &(((u16 *) buffer) [offset])); + + ACPI_MOVE_16_TO_64 (datum, &(((u16 *) buffer) [index])); break; case ACPI_FIELD_DWORD_GRANULARITY: - MOVE_UNALIGNED32_TO_32 (datum, &(((u32 *) buffer) [offset])); + + ACPI_MOVE_32_TO_64 (datum, &(((u32 *) buffer) [index])); + break; + + case ACPI_FIELD_QWORD_GRANULARITY: + + ACPI_MOVE_64_TO_64 (datum, &(((u64 *) buffer) [index])); + break; + + default: + /* Should not get here */ break; } } @@ -282,13 +664,14 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_set_buffer_datum + * FUNCTION: acpi_ex_set_buffer_datum * - * PARAMETERS: Merged_datum - Value to store + * PARAMETERS: merged_datum - Value to store * Buffer - Receiving buffer - * Byte_granularity - 1/2/4 Granularity of the field + * buffer_length - Entire length (used for big-endian only) + * byte_granularity - 1/2/4/8 Granularity of the field * (aka Datum Size) - * Offset - Datum offset into the buffer + * buffer_offset - Datum offset into the buffer * * RETURN: none * @@ -297,28 +680,48 @@ * ******************************************************************************/ -static void +void acpi_ex_set_buffer_datum ( - u32 merged_datum, - void *buffer, - u32 byte_granularity, - u32 offset) + acpi_integer merged_datum, + void *buffer, + u32 buffer_length, + u32 byte_granularity, + u32 buffer_offset) { + u32 index; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); + /* Get proper index into buffer (handles big/little endian) */ + + index = ACPI_BUFFER_INDEX (buffer_length, buffer_offset, byte_granularity); + + /* Move the requested number of bytes */ + switch (byte_granularity) { case ACPI_FIELD_BYTE_GRANULARITY: - ((u8 *) buffer) [offset] = (u8) merged_datum; + + ((u8 *) buffer) [index] = (u8) merged_datum; break; case ACPI_FIELD_WORD_GRANULARITY: - MOVE_UNALIGNED16_TO_16 (&(((u16 *) buffer)[offset]), &merged_datum); + + ACPI_MOVE_64_TO_16 (&(((u16 *) buffer)[index]), &merged_datum); break; case ACPI_FIELD_DWORD_GRANULARITY: - MOVE_UNALIGNED32_TO_32 (&(((u32 *) buffer)[offset]), &merged_datum); + + ACPI_MOVE_64_TO_32 (&(((u32 *) buffer)[index]), &merged_datum); + break; + + case ACPI_FIELD_QWORD_GRANULARITY: + + ACPI_MOVE_64_TO_64 (&(((u64 *) buffer)[index]), &merged_datum); + break; + + default: + /* Should not get here */ break; } } @@ -326,9 +729,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_extract_from_field + * FUNCTION: acpi_ex_extract_from_field * - * PARAMETERS: *Obj_desc - Field to be read + * PARAMETERS: *obj_desc - Field to be read * *Value - Where to store value * * RETURN: Status @@ -339,29 +742,30 @@ acpi_status acpi_ex_extract_from_field ( - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length) + union acpi_operand_object *obj_desc, + void *buffer, + u32 buffer_length) { - acpi_status status; - u32 field_datum_byte_offset; - u32 datum_offset; - u32 previous_raw_datum; - u32 this_raw_datum = 0; - u32 merged_datum = 0; - u32 byte_field_length; - u32 datum_count; + acpi_status status; + u32 field_datum_byte_offset; + u32 datum_offset; + acpi_integer previous_raw_datum; + acpi_integer this_raw_datum = 0; + acpi_integer merged_datum = 0; + u32 byte_field_length; + u32 datum_count; - FUNCTION_TRACE ("Ex_extract_from_field"); + ACPI_FUNCTION_TRACE ("ex_extract_from_field"); /* * The field must fit within the caller's buffer */ - byte_field_length = ROUND_BITS_UP_TO_BYTES (obj_desc->common_field.bit_length); + byte_field_length = ACPI_ROUND_BITS_UP_TO_BYTES (obj_desc->common_field.bit_length); if (byte_field_length > buffer_length) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Field size %X (bytes) too large for buffer (%X)\n", + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "Field size %X (bytes) too large for buffer (%X)\n", byte_field_length, buffer_length)); return_ACPI_STATUS (AE_BUFFER_OVERFLOW); @@ -369,26 +773,27 @@ /* Convert field byte count to datum count, round up if necessary */ - datum_count = ROUND_UP_TO (byte_field_length, obj_desc->common_field.access_byte_width); + datum_count = ACPI_ROUND_UP_TO (byte_field_length, + obj_desc->common_field.access_byte_width); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Byte_len=%x, Datum_len=%x, Bit_gran=%x, Byte_gran=%x\n", - byte_field_length, datum_count, obj_desc->common_field.access_bit_width, - obj_desc->common_field.access_byte_width)); + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "byte_len=%X, datum_len=%X, byte_gran=%X\n", + byte_field_length, datum_count,obj_desc->common_field.access_byte_width)); /* * Clear the caller's buffer (the whole buffer length as given) * This is very important, especially in the cases where a byte is read, * but the buffer is really a u32 (4 bytes). */ - MEMSET (buffer, 0, buffer_length); + ACPI_MEMSET (buffer, 0, buffer_length); /* Read the first raw datum to prime the loop */ field_datum_byte_offset = 0; datum_offset= 0; - status = acpi_ex_read_field_datum (obj_desc, field_datum_byte_offset, &previous_raw_datum); + status = acpi_ex_field_datum_io (obj_desc, field_datum_byte_offset, + &previous_raw_datum, ACPI_READ); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } @@ -397,7 +802,7 @@ /* We might actually be done if the request fits in one datum */ if ((datum_count == 1) && - (obj_desc->common_field.access_flags & AFIELD_SINGLE_DATUM)) { + (obj_desc->common_field.flags & AOPOBJ_SINGLE_DATUM)) { /* 1) Shift the valid data bits down to start at bit 0 */ merged_datum = (previous_raw_datum >> obj_desc->common_field.start_field_bit_offset); @@ -405,13 +810,13 @@ /* 2) Mask off any upper unused bits (bits not part of the field) */ if (obj_desc->common_field.end_buffer_valid_bits) { - merged_datum &= MASK_BITS_ABOVE (obj_desc->common_field.end_buffer_valid_bits); + merged_datum &= ACPI_MASK_BITS_ABOVE (obj_desc->common_field.end_buffer_valid_bits); } /* Store the datum to the caller buffer */ - acpi_ex_set_buffer_datum (merged_datum, buffer, obj_desc->common_field.access_byte_width, - datum_offset); + acpi_ex_set_buffer_datum (merged_datum, buffer, buffer_length, + obj_desc->common_field.access_byte_width, datum_offset); return_ACPI_STATUS (AE_OK); } @@ -427,17 +832,18 @@ * to perform a final read, since this would potentially read * past the end of the region. * - * TBD: [Investigate] It may make more sense to just split the aligned - * and non-aligned cases since the aligned case is so very simple, + * We could just split the aligned and non-aligned cases since the + * aligned case is so very simple, but this would require more code. */ - if ((obj_desc->common_field.start_field_bit_offset != 0) || + if ((obj_desc->common_field.start_field_bit_offset != 0) || ((obj_desc->common_field.start_field_bit_offset == 0) && (datum_offset < (datum_count -1)))) { /* * Get the next raw datum, it contains some or all bits * of the current field datum */ - status = acpi_ex_read_field_datum (obj_desc, field_datum_byte_offset, &this_raw_datum); + status = acpi_ex_field_datum_io (obj_desc, field_datum_byte_offset, + &this_raw_datum, ACPI_READ); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } @@ -451,7 +857,6 @@ merged_datum = previous_raw_datum; } - else { /* * Put together the appropriate bits of the two raw data to make a @@ -474,7 +879,7 @@ */ if (obj_desc->common_field.end_buffer_valid_bits) { merged_datum &= - MASK_BITS_ABOVE (obj_desc->common_field.end_buffer_valid_bits); + ACPI_MASK_BITS_ABOVE (obj_desc->common_field.end_buffer_valid_bits); } } } @@ -483,8 +888,8 @@ * Store the merged field datum in the caller's buffer, according to * the granularity of the field (size of each datum). */ - acpi_ex_set_buffer_datum (merged_datum, buffer, obj_desc->common_field.access_byte_width, - datum_offset); + acpi_ex_set_buffer_datum (merged_datum, buffer, buffer_length, + obj_desc->common_field.access_byte_width, datum_offset); /* * Save the raw datum that was just acquired since it may contain bits @@ -500,214 +905,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_write_field_datum + * FUNCTION: acpi_ex_insert_into_field * - * PARAMETERS: *Obj_desc - Field to be set - * Value - Value to store - * - * RETURN: Status - * - * DESCRIPTION: Store the value into the given field - * - ******************************************************************************/ - -static acpi_status -acpi_ex_write_field_datum ( - acpi_operand_object *obj_desc, - u32 field_datum_byte_offset, - u32 value) -{ - acpi_status status = AE_OK; - acpi_operand_object *rgn_desc = NULL; - ACPI_PHYSICAL_ADDRESS address; - - - FUNCTION_TRACE_U32 ("Ex_write_field_datum", field_datum_byte_offset); - - - /* - * Buffer_fields - Read from a Buffer - * Other Fields - Read from a Operation Region. - */ - switch (obj_desc->common.type) { - case ACPI_TYPE_BUFFER_FIELD: - - /* - * For Buffer_fields, we only need to copy the data to the - * target buffer. Length is the field width in bytes. - */ - MEMCPY ((obj_desc->buffer_field.buffer_obj)->buffer.pointer - + obj_desc->buffer_field.base_byte_offset + field_datum_byte_offset, - &value, obj_desc->common_field.access_byte_width); - status = AE_OK; - break; - - - case INTERNAL_TYPE_REGION_FIELD: - case INTERNAL_TYPE_BANK_FIELD: - - /* - * For other fields, we need to go through an Operation Region - * (Only types that will get here are Region_fields and Bank_fields) - */ - status = acpi_ex_setup_field (obj_desc, field_datum_byte_offset); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - /* - * The physical address of this field datum is: - * - * 1) The base of the region, plus - * 2) The base offset of the field, plus - * 3) The current offset into the field - */ - rgn_desc = obj_desc->common_field.region_obj; - address = rgn_desc->region.address + - obj_desc->common_field.base_byte_offset + - field_datum_byte_offset; - - ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, - "Store %X in Region %s(%X) at %8.8X%8.8X width %X\n", - value, acpi_ut_get_region_name (rgn_desc->region.space_id), - rgn_desc->region.space_id, HIDWORD(address), LODWORD(address), - obj_desc->common_field.access_bit_width)); - - /* Invoke the appropriate Address_space/Op_region handler */ - - status = acpi_ev_address_space_dispatch (rgn_desc, ACPI_WRITE_ADR_SPACE, - address, obj_desc->common_field.access_bit_width, &value); - - if (status == AE_NOT_IMPLEMENTED) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "**** Region type %s(%X) not implemented\n", - acpi_ut_get_region_name (rgn_desc->region.space_id), - rgn_desc->region.space_id)); - } - - else if (status == AE_NOT_EXIST) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "**** Region type %s(%X) does not have a handler\n", - acpi_ut_get_region_name (rgn_desc->region.space_id), - rgn_desc->region.space_id)); - } - - break; - - - default: - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%p, wrong source type - %s\n", - obj_desc, acpi_ut_get_type_name (obj_desc->common.type))); - status = AE_AML_INTERNAL; - break; - } - - - ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "Value written=%08X \n", value)); - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ex_write_field_datum_with_update_rule - * - * PARAMETERS: *Obj_desc - Field to be set - * Value - Value to store - * - * RETURN: Status - * - * DESCRIPTION: Apply the field update rule to a field write - * - ******************************************************************************/ - -static acpi_status -acpi_ex_write_field_datum_with_update_rule ( - acpi_operand_object *obj_desc, - u32 mask, - u32 field_value, - u32 field_datum_byte_offset) -{ - acpi_status status = AE_OK; - u32 merged_value; - u32 current_value; - - - FUNCTION_TRACE ("Ex_write_field_datum_with_update_rule"); - - - /* Start with the new bits */ - - merged_value = field_value; - - /* If the mask is all ones, we don't need to worry about the update rule */ - - if (mask != ACPI_UINT32_MAX) { - /* Decode the update rule */ - - switch (obj_desc->common_field.update_rule) { - case UPDATE_PRESERVE: - /* - * Check if update rule needs to be applied (not if mask is all - * ones) The left shift drops the bits we want to ignore. - */ - if ((~mask << (sizeof (mask) * 8 - - obj_desc->common_field.access_bit_width)) != 0) { - /* - * Read the current contents of the byte/word/dword containing - * the field, and merge with the new field value. - */ - status = acpi_ex_read_field_datum (obj_desc, field_datum_byte_offset, - ¤t_value); - merged_value |= (current_value & ~mask); - } - break; - - - case UPDATE_WRITE_AS_ONES: - - /* Set positions outside the field to all ones */ - - merged_value |= ~mask; - break; - - - case UPDATE_WRITE_AS_ZEROS: - - /* Set positions outside the field to all zeros */ - - merged_value &= mask; - break; - - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Write_with_update_rule: Unknown Update_rule setting: %x\n", - obj_desc->common_field.update_rule)); - return_ACPI_STATUS (AE_AML_OPERAND_VALUE); - break; - } - } - - - /* Write the merged value */ - - status = acpi_ex_write_field_datum (obj_desc, field_datum_byte_offset, - merged_value); - - ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "Mask %X Datum_offset %X Value %X, Merged_value %X\n", - mask, field_datum_byte_offset, field_value, merged_value)); - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ex_insert_into_field - * - * PARAMETERS: *Obj_desc - Field to be set + * PARAMETERS: *obj_desc - Field to be set * Buffer - Value to store * * RETURN: Status @@ -718,22 +918,22 @@ acpi_status acpi_ex_insert_into_field ( - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length) + union acpi_operand_object *obj_desc, + void *buffer, + u32 buffer_length) { - acpi_status status; - u32 field_datum_byte_offset; - u32 datum_offset; - u32 mask; - u32 merged_datum; - u32 previous_raw_datum; - u32 this_raw_datum; - u32 byte_field_length; - u32 datum_count; + acpi_status status; + u32 field_datum_byte_offset; + u32 datum_offset; + acpi_integer mask; + acpi_integer merged_datum; + acpi_integer previous_raw_datum; + acpi_integer this_raw_datum; + u32 byte_field_length; + u32 datum_count; - FUNCTION_TRACE ("Ex_insert_into_field"); + ACPI_FUNCTION_TRACE ("ex_insert_into_field"); /* @@ -742,24 +942,21 @@ * larger than the field, this typically happens when an integer is * written to a field that is actually smaller than an integer. */ - byte_field_length = ROUND_BITS_UP_TO_BYTES (obj_desc->common_field.bit_length); + byte_field_length = ACPI_ROUND_BITS_UP_TO_BYTES (obj_desc->common_field.bit_length); if (buffer_length < byte_field_length) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Buffer length %X too small for field %X\n", + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "Buffer length %X too small for field %X\n", buffer_length, byte_field_length)); - /* TBD: Need a better error code */ - return_ACPI_STATUS (AE_BUFFER_OVERFLOW); } /* Convert byte count to datum count, round up if necessary */ - datum_count = ROUND_UP_TO (byte_field_length, obj_desc->common_field.access_byte_width); + datum_count = ACPI_ROUND_UP_TO (byte_field_length, obj_desc->common_field.access_byte_width); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Byte_len=%x, Datum_len=%x, Bit_gran=%x, Byte_gran=%x\n", - byte_field_length, datum_count, obj_desc->common_field.access_bit_width, - obj_desc->common_field.access_byte_width)); + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "byte_len=%X, datum_len=%X, byte_gran=%X\n", + byte_field_length, datum_count, obj_desc->common_field.access_byte_width)); /* * Break the request into up to three parts (similar to an I/O request): @@ -772,7 +969,7 @@ /* Get a single datum from the caller's buffer */ - acpi_ex_get_buffer_datum (&previous_raw_datum, buffer, + acpi_ex_get_buffer_datum (&previous_raw_datum, buffer, buffer_length, obj_desc->common_field.access_byte_width, datum_offset); /* @@ -781,19 +978,19 @@ * Note: The code in this section also handles the aligned case * * Construct Mask with 1 bits where the field is, 0 bits elsewhere - * (Only the bottom 5 bits of Bit_length are valid for a shift operation) + * (Only the bottom 5 bits of bit_length are valid for a shift operation) * * Mask off bits that are "below" the field (if any) */ - mask = MASK_BITS_BELOW (obj_desc->common_field.start_field_bit_offset); + mask = ACPI_MASK_BITS_BELOW (obj_desc->common_field.start_field_bit_offset); /* If the field fits in one datum, may need to mask upper bits */ - if ((obj_desc->common_field.access_flags & AFIELD_SINGLE_DATUM) && + if ((obj_desc->common_field.flags & AOPOBJ_SINGLE_DATUM) && obj_desc->common_field.end_field_valid_bits) { /* There are bits above the field, mask them off also */ - mask &= MASK_BITS_ABOVE (obj_desc->common_field.end_field_valid_bits); + mask &= ACPI_MASK_BITS_ABOVE (obj_desc->common_field.end_field_valid_bits); } /* Shift and mask the value into the field position */ @@ -803,7 +1000,7 @@ /* Apply the update rule (if necessary) and write the datum to the field */ - status = acpi_ex_write_field_datum_with_update_rule (obj_desc, mask, merged_datum, + status = acpi_ex_write_with_update_rule (obj_desc, mask, merged_datum, field_datum_byte_offset); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); @@ -812,7 +1009,7 @@ /* If the entire field fits within one datum, we are done. */ if ((datum_count == 1) && - (obj_desc->common_field.access_flags & AFIELD_SINGLE_DATUM)) { + (obj_desc->common_field.flags & AOPOBJ_SINGLE_DATUM)) { return_ACPI_STATUS (AE_OK); } @@ -835,7 +1032,7 @@ * Get the next raw buffer datum. It may contain bits of the previous * field datum */ - acpi_ex_get_buffer_datum (&this_raw_datum, buffer, + acpi_ex_get_buffer_datum (&this_raw_datum, buffer, buffer_length, obj_desc->common_field.access_byte_width, datum_offset); /* Create the field datum based on the field alignment */ @@ -849,7 +1046,6 @@ (previous_raw_datum >> obj_desc->common_field.datum_valid_bits) | (this_raw_datum << obj_desc->common_field.start_field_bit_offset); } - else { /* Field began aligned on datum boundary */ @@ -874,24 +1070,23 @@ * * Mask off the unused bits above (after) the end-of-field */ - mask = MASK_BITS_ABOVE (obj_desc->common_field.end_field_valid_bits); + mask = ACPI_MASK_BITS_ABOVE (obj_desc->common_field.end_field_valid_bits); merged_datum &= mask; /* Write the last datum with the update rule */ - status = acpi_ex_write_field_datum_with_update_rule (obj_desc, mask, - merged_datum, field_datum_byte_offset); + status = acpi_ex_write_with_update_rule (obj_desc, mask, merged_datum, + field_datum_byte_offset); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } } } - else { /* Normal case -- write the completed datum */ - status = acpi_ex_write_field_datum (obj_desc, - field_datum_byte_offset, merged_datum); + status = acpi_ex_field_datum_io (obj_desc, field_datum_byte_offset, + &merged_datum, ACPI_WRITE); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exmisc.c linux.21rc5-ac2/drivers/acpi/executer/exmisc.c --- linux.21rc5/drivers/acpi/executer/exmisc.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exmisc.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,122 +2,229 @@ /****************************************************************************** * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes - * $Revision: 92 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acdispat.h" +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exmisc") + ACPI_MODULE_NAME ("exmisc") /******************************************************************************* * - * FUNCTION: Acpi_ex_get_object_reference + * FUNCTION: acpi_ex_get_object_reference * - * PARAMETERS: Obj_desc - Create a reference to this object - * Return_desc - Where to store the reference + * PARAMETERS: obj_desc - Create a reference to this object + * return_desc - Where to store the reference + * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Obtain and return a "reference" to the target object - * Common code for the Ref_of_op and the Cond_ref_of_op. + * Common code for the ref_of_op and the cond_ref_of_op. * ******************************************************************************/ acpi_status acpi_ex_get_object_reference ( - acpi_operand_object *obj_desc, - acpi_operand_object **return_desc, - acpi_walk_state *walk_state) + union acpi_operand_object *obj_desc, + union acpi_operand_object **return_desc, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; + union acpi_operand_object *reference_obj; + union acpi_operand_object *referenced_obj; - FUNCTION_TRACE_PTR ("Ex_get_object_reference", obj_desc); + ACPI_FUNCTION_TRACE_PTR ("ex_get_object_reference", obj_desc); - if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_INTERNAL)) { - if (obj_desc->common.type != INTERNAL_TYPE_REFERENCE) { - *return_desc = NULL; - status = AE_TYPE; - goto cleanup; + *return_desc = NULL; + + switch (ACPI_GET_DESCRIPTOR_TYPE (obj_desc)) { + case ACPI_DESC_TYPE_OPERAND: + + if (ACPI_GET_OBJECT_TYPE (obj_desc) != ACPI_TYPE_LOCAL_REFERENCE) { + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } /* - * Not a Name -- an indirect name pointer would have - * been converted to a direct name pointer in Acpi_ex_resolve_operands + * Must be a reference to a Local or Arg */ switch (obj_desc->reference.opcode) { case AML_LOCAL_OP: case AML_ARG_OP: - *return_desc = (void *) acpi_ds_method_data_get_node (obj_desc->reference.opcode, - obj_desc->reference.offset, walk_state); + /* The referenced object is the pseudo-node for the local/arg */ + + referenced_obj = obj_desc->reference.object; break; default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "(Internal) Unknown Ref subtype %02x\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown Reference subtype %X\n", obj_desc->reference.opcode)); - *return_desc = NULL; - status = AE_AML_INTERNAL; - goto cleanup; + return_ACPI_STATUS (AE_AML_INTERNAL); } + break; - } - else if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_NAMED)) { - /* Must be a named object; Just return the Node */ + case ACPI_DESC_TYPE_NAMED: - *return_desc = obj_desc; + /* + * A named reference that has already been resolved to a Node + */ + referenced_obj = obj_desc; + break; + + + default: + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid descriptor type %X in %p\n", + ACPI_GET_DESCRIPTOR_TYPE (obj_desc), obj_desc)); + return_ACPI_STATUS (AE_TYPE); } - else { - *return_desc = NULL; - status = AE_TYPE; + + /* Create a new reference object */ + + reference_obj = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_REFERENCE); + if (!reference_obj) { + return_ACPI_STATUS (AE_NO_MEMORY); } + reference_obj->reference.opcode = AML_REF_OF_OP; + reference_obj->reference.object = referenced_obj; + *return_desc = reference_obj; -cleanup: + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Object %p Type [%s], returning Reference %p\n", + obj_desc, acpi_ut_get_object_type_name (obj_desc), *return_desc)); - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p Ref=%p\n", obj_desc, *return_desc)); - return_ACPI_STATUS (status); + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ex_do_concatenate + * FUNCTION: acpi_ex_concat_template * - * PARAMETERS: *Obj_desc - Object to be converted. Must be an + * PARAMETERS: *obj_desc - Object to be converted. Must be an * Integer, Buffer, or String - * Walk_state - Current walk state + * walk_state - Current walk state + * + * RETURN: Status + * + * DESCRIPTION: Concatenate two resource templates + * + ******************************************************************************/ + +acpi_status +acpi_ex_concat_template ( + union acpi_operand_object *obj_desc1, + union acpi_operand_object *obj_desc2, + union acpi_operand_object **actual_return_desc, + struct acpi_walk_state *walk_state) +{ + union acpi_operand_object *return_desc; + u8 *new_buf; + u8 *end_tag1; + u8 *end_tag2; + acpi_size length1; + acpi_size length2; + + + ACPI_FUNCTION_TRACE ("ex_concat_template"); + + + /* Find the end_tags in each resource template */ + + end_tag1 = acpi_ut_get_resource_end_tag (obj_desc1); + end_tag2 = acpi_ut_get_resource_end_tag (obj_desc2); + if (!end_tag1 || !end_tag2) { + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); + } + + /* Compute the length of each part */ + + length1 = ACPI_PTR_DIFF (end_tag1, obj_desc1->buffer.pointer); + length2 = ACPI_PTR_DIFF (end_tag2, obj_desc2->buffer.pointer) + + 2; /* Size of END_TAG */ + + /* Create a new buffer object for the result */ + + return_desc = acpi_ut_create_buffer_object (length1 + length2); + if (!return_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* Copy the templates to the new descriptor */ + + new_buf = return_desc->buffer.pointer; + ACPI_MEMCPY (new_buf, obj_desc1->buffer.pointer, length1); + ACPI_MEMCPY (new_buf + length1, obj_desc2->buffer.pointer, length2); + + /* Compute the new checksum */ + + new_buf[return_desc->buffer.length - 1] = + acpi_ut_generate_checksum (return_desc->buffer.pointer, + (return_desc->buffer.length - 1)); + + /* Return the completed template descriptor */ + + *actual_return_desc = return_desc; + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ex_do_concatenate + * + * PARAMETERS: obj_desc1 - First source object + * obj_desc2 - Second source object + * actual_return_desc - Where to place the return object + * walk_state - Current walk state * * RETURN: Status * @@ -127,75 +234,54 @@ acpi_status acpi_ex_do_concatenate ( - acpi_operand_object *obj_desc, - acpi_operand_object *obj_desc2, - acpi_operand_object **actual_return_desc, - acpi_walk_state *walk_state) + union acpi_operand_object *obj_desc1, + union acpi_operand_object *obj_desc2, + union acpi_operand_object **actual_return_desc, + struct acpi_walk_state *walk_state) { - acpi_status status; - u32 i; - acpi_integer this_integer; - acpi_operand_object *return_desc; - NATIVE_CHAR *new_buf; - u32 integer_size = sizeof (acpi_integer); + acpi_status status; + u32 i; + acpi_integer this_integer; + union acpi_operand_object *return_desc; + char *new_buf; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* * There are three cases to handle: - * 1) Two Integers concatenated to produce a buffer - * 2) Two Strings concatenated to produce a string - * 3) Two Buffers concatenated to produce a buffer + * + * 1) Two Integers concatenated to produce a new Buffer + * 2) Two Strings concatenated to produce a new String + * 3) Two Buffers concatenated to produce a new Buffer */ - switch (obj_desc->common.type) { + switch (ACPI_GET_OBJECT_TYPE (obj_desc1)) { case ACPI_TYPE_INTEGER: - /* Handle both ACPI 1.0 and ACPI 2.0 Integer widths */ - - if (walk_state->method_node->flags & ANOBJ_DATA_WIDTH_32) { - /* - * We are running a method that exists in a 32-bit ACPI table. - * Truncate the value to 32 bits by zeroing out the upper - * 32-bit field - */ - integer_size = sizeof (u32); - } - - /* Result of two integers is a buffer */ + /* Result of two Integers is a Buffer */ + /* Need enough buffer space for two integers */ - return_desc = acpi_ut_create_internal_object (ACPI_TYPE_BUFFER); + return_desc = acpi_ut_create_buffer_object (acpi_gbl_integer_byte_width * 2); if (!return_desc) { return (AE_NO_MEMORY); } - /* Need enough space for two integers */ - - return_desc->buffer.length = integer_size * 2; - new_buf = ACPI_MEM_CALLOCATE (return_desc->buffer.length); - if (!new_buf) { - REPORT_ERROR - (("Ex_do_concatenate: Buffer allocation failure\n")); - status = AE_NO_MEMORY; - goto cleanup; - } - - return_desc->buffer.pointer = (u8 *) new_buf; + new_buf = (char *) return_desc->buffer.pointer; /* Convert the first integer */ - this_integer = obj_desc->integer.value; - for (i = 0; i < integer_size; i++) { - new_buf[i] = (u8) this_integer; + this_integer = obj_desc1->integer.value; + for (i = 0; i < acpi_gbl_integer_byte_width; i++) { + new_buf[i] = (char) this_integer; this_integer >>= 8; } /* Convert the second integer */ this_integer = obj_desc2->integer.value; - for (; i < (integer_size * 2); i++) { - new_buf[i] = (u8) this_integer; + for (; i < (ACPI_MUL_2 (acpi_gbl_integer_byte_width)); i++) { + new_buf[i] = (char) this_integer; this_integer >>= 8; } @@ -204,6 +290,8 @@ case ACPI_TYPE_STRING: + /* Result of two Strings is a String */ + return_desc = acpi_ut_create_internal_object (ACPI_TYPE_STRING); if (!return_desc) { return (AE_NO_MEMORY); @@ -211,66 +299,60 @@ /* Operand0 is string */ - new_buf = ACPI_MEM_ALLOCATE (obj_desc->string.length + - obj_desc2->string.length + 1); + new_buf = ACPI_MEM_CALLOCATE ((acpi_size) obj_desc1->string.length + + (acpi_size) obj_desc2->string.length + 1); if (!new_buf) { - REPORT_ERROR - (("Ex_do_concatenate: String allocation failure\n")); + ACPI_REPORT_ERROR + (("ex_do_concatenate: String allocation failure\n")); status = AE_NO_MEMORY; goto cleanup; } - STRCPY (new_buf, obj_desc->string.pointer); - STRCPY (new_buf + obj_desc->string.length, + /* Concatenate the strings */ + + ACPI_STRCPY (new_buf, obj_desc1->string.pointer); + ACPI_STRCPY (new_buf + obj_desc1->string.length, obj_desc2->string.pointer); - /* Point the return object to the new string */ + /* Complete the String object initialization */ return_desc->string.pointer = new_buf; - return_desc->string.length = obj_desc->string.length += - obj_desc2->string.length; + return_desc->string.length = obj_desc1->string.length + + obj_desc2->string.length; break; case ACPI_TYPE_BUFFER: - /* Operand0 is a buffer */ + /* Result of two Buffers is a Buffer */ - return_desc = acpi_ut_create_internal_object (ACPI_TYPE_BUFFER); + return_desc = acpi_ut_create_buffer_object ( + (acpi_size) obj_desc1->buffer.length + + (acpi_size) obj_desc2->buffer.length); if (!return_desc) { return (AE_NO_MEMORY); } - new_buf = ACPI_MEM_ALLOCATE (obj_desc->buffer.length + - obj_desc2->buffer.length); - if (!new_buf) { - REPORT_ERROR - (("Ex_do_concatenate: Buffer allocation failure\n")); - status = AE_NO_MEMORY; - goto cleanup; - } + new_buf = (char *) return_desc->buffer.pointer; - MEMCPY (new_buf, obj_desc->buffer.pointer, - obj_desc->buffer.length); - MEMCPY (new_buf + obj_desc->buffer.length, obj_desc2->buffer.pointer, - obj_desc2->buffer.length); + /* Concatenate the buffers */ - /* - * Point the return object to the new buffer - */ + ACPI_MEMCPY (new_buf, obj_desc1->buffer.pointer, + obj_desc1->buffer.length); + ACPI_MEMCPY (new_buf + obj_desc1->buffer.length, obj_desc2->buffer.pointer, + obj_desc2->buffer.length); - return_desc->buffer.pointer = (u8 *) new_buf; - return_desc->buffer.length = obj_desc->buffer.length + - obj_desc2->buffer.length; break; default: + + /* Invalid object type, should not happen here */ + status = AE_AML_INTERNAL; return_desc = NULL; } - *actual_return_desc = return_desc; return (AE_OK); @@ -284,11 +366,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_do_math_op + * FUNCTION: acpi_ex_do_math_op * * PARAMETERS: Opcode - AML opcode * Operand0 - Integer operand #0 - * Operand0 - Integer operand #1 + * Operand1 - Integer operand #1 * * RETURN: Integer result of the operation * @@ -300,9 +382,9 @@ acpi_integer acpi_ex_do_math_op ( - u16 opcode, - acpi_integer operand0, - acpi_integer operand1) + u16 opcode, + acpi_integer operand0, + acpi_integer operand1) { @@ -342,12 +424,12 @@ return (operand0 * operand1); - case AML_SHIFT_LEFT_OP: /* Shift_left (Operand, Shift_count, Result) */ + case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */ return (operand0 << operand1); - case AML_SHIFT_RIGHT_OP: /* Shift_right (Operand, Shift_count, Result) */ + case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */ return (operand0 >> operand1); @@ -365,11 +447,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_do_logical_op + * FUNCTION: acpi_ex_do_logical_op * * PARAMETERS: Opcode - AML opcode * Operand0 - Integer operand #0 - * Operand0 - Integer operand #1 + * Operand1 - Integer operand #1 * * RETURN: TRUE/FALSE result of the operation * @@ -386,9 +468,9 @@ u8 acpi_ex_do_logical_op ( - u16 opcode, - acpi_integer operand0, - acpi_integer operand1) + u16 opcode, + acpi_integer operand0, + acpi_integer operand1) { @@ -432,6 +514,9 @@ return (TRUE); } break; + + default: + break; } return (FALSE); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exmutex.c linux.21rc5-ac2/drivers/acpi/executer/exmutex.c --- linux.21rc5/drivers/acpi/executer/exmutex.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exmutex.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,107 +2,136 @@ /****************************************************************************** * * Module Name: exmutex - ASL Mutex Acquire/Release functions - * $Revision: 7 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "achware.h" -#include "acevents.h" +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exmutex") + ACPI_MODULE_NAME ("exmutex") /******************************************************************************* * - * FUNCTION: Acpi_ex_unlink_mutex + * FUNCTION: acpi_ex_unlink_mutex * - * PARAMETERS: *Obj_desc - The mutex to be unlinked + * PARAMETERS: *obj_desc - The mutex to be unlinked * * RETURN: Status * - * DESCRIPTION: Remove a mutex from the "Acquired_mutex" list + * DESCRIPTION: Remove a mutex from the "acquired_mutex" list * ******************************************************************************/ void acpi_ex_unlink_mutex ( - acpi_operand_object *obj_desc) + union acpi_operand_object *obj_desc) { + struct acpi_thread_state *thread = obj_desc->mutex.owner_thread; + + + if (!thread) { + return; + } if (obj_desc->mutex.next) { (obj_desc->mutex.next)->mutex.prev = obj_desc->mutex.prev; } + if (obj_desc->mutex.prev) { (obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next; } + else { + thread->acquired_mutex_list = obj_desc->mutex.next; + } } /******************************************************************************* * - * FUNCTION: Acpi_ex_link_mutex + * FUNCTION: acpi_ex_link_mutex * - * PARAMETERS: *Obj_desc - The mutex to be linked - * *List_head - head of the "Acquired_mutex" list + * PARAMETERS: *obj_desc - The mutex to be linked + * *list_head - head of the "acquired_mutex" list * * RETURN: Status * - * DESCRIPTION: Add a mutex to the "Acquired_mutex" list for this walk + * DESCRIPTION: Add a mutex to the "acquired_mutex" list for this walk * ******************************************************************************/ void acpi_ex_link_mutex ( - acpi_operand_object *obj_desc, - acpi_operand_object *list_head) + union acpi_operand_object *obj_desc, + struct acpi_thread_state *thread) { + union acpi_operand_object *list_head; + + + list_head = thread->acquired_mutex_list; /* This object will be the first object in the list */ - obj_desc->mutex.prev = list_head; - obj_desc->mutex.next = list_head->mutex.next; + obj_desc->mutex.prev = NULL; + obj_desc->mutex.next = list_head; /* Update old first object to point back to this object */ - if (list_head->mutex.next) { - (list_head->mutex.next)->mutex.prev = obj_desc; + if (list_head) { + list_head->mutex.prev = obj_desc; } /* Update list head */ - list_head->mutex.next = obj_desc; + thread->acquired_mutex_list = obj_desc; } /******************************************************************************* * - * FUNCTION: Acpi_ex_acquire_mutex + * FUNCTION: acpi_ex_acquire_mutex * - * PARAMETERS: *Time_desc - The 'time to delay' object descriptor - * *Obj_desc - The object descriptor for this op + * PARAMETERS: *time_desc - The 'time to delay' object descriptor + * *obj_desc - The object descriptor for this op * * RETURN: Status * @@ -112,32 +141,48 @@ acpi_status acpi_ex_acquire_mutex ( - acpi_operand_object *time_desc, - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state) + union acpi_operand_object *time_desc, + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state) { - acpi_status status; + acpi_status status; + + ACPI_FUNCTION_TRACE_PTR ("ex_acquire_mutex", obj_desc); - FUNCTION_TRACE_PTR ("Ex_acquire_mutex", obj_desc); if (!obj_desc) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + /* Sanity check -- we must have a valid thread ID */ + + if (!walk_state->thread) { + ACPI_REPORT_ERROR (("Cannot acquire Mutex [%4.4s], null thread info\n", + obj_desc->mutex.node->name.ascii)); + return_ACPI_STATUS (AE_AML_INTERNAL); + } + /* * Current Sync must be less than or equal to the sync level of the * mutex. This mechanism provides some deadlock prevention */ - if (walk_state->current_sync_level > obj_desc->mutex.sync_level) { + if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { + ACPI_REPORT_ERROR (("Cannot acquire Mutex [%4.4s], incorrect sync_level\n", + obj_desc->mutex.node->name.ascii)); return_ACPI_STATUS (AE_AML_MUTEX_ORDER); } /* - * If the mutex is already owned by this thread, - * just increment the acquisition depth + * Support for multiple acquires by the owning thread */ - if (obj_desc->mutex.owner == walk_state) { + + if ((obj_desc->mutex.owner_thread) && + (obj_desc->mutex.owner_thread->thread_id == walk_state->thread->thread_id)) { + /* + * The mutex is already owned by this thread, + * just increment the acquisition depth + */ obj_desc->mutex.acquisition_depth++; return_ACPI_STATUS (AE_OK); } @@ -146,21 +191,21 @@ status = acpi_ex_system_acquire_mutex (time_desc, obj_desc); if (ACPI_FAILURE (status)) { - /* Includes failure from a timeout on Time_desc */ + /* Includes failure from a timeout on time_desc */ return_ACPI_STATUS (status); } /* Have the mutex, update mutex and walk info */ - obj_desc->mutex.owner = walk_state; + obj_desc->mutex.owner_thread = walk_state->thread; obj_desc->mutex.acquisition_depth = 1; - walk_state->current_sync_level = obj_desc->mutex.sync_level; - /* Link the mutex to the walk state for force-unlock at method exit */ + walk_state->thread->current_sync_level = obj_desc->mutex.sync_level; - acpi_ex_link_mutex (obj_desc, (acpi_operand_object *) - &(walk_state->walk_list->acquired_mutex_list)); + /* Link the mutex to the current thread for force-unlock at method exit */ + + acpi_ex_link_mutex (obj_desc, walk_state->thread); return_ACPI_STATUS (AE_OK); } @@ -168,9 +213,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_release_mutex + * FUNCTION: acpi_ex_release_mutex * - * PARAMETERS: *Obj_desc - The object descriptor for this op + * PARAMETERS: *obj_desc - The object descriptor for this op * * RETURN: Status * @@ -180,13 +225,13 @@ acpi_status acpi_ex_release_mutex ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state) + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Ex_release_mutex"); + ACPI_FUNCTION_TRACE ("ex_release_mutex"); if (!obj_desc) { @@ -195,13 +240,28 @@ /* The mutex must have been previously acquired in order to release it */ - if (!obj_desc->mutex.owner) { + if (!obj_desc->mutex.owner_thread) { + ACPI_REPORT_ERROR (("Cannot release Mutex [%4.4s], not acquired\n", + obj_desc->mutex.node->name.ascii)); return_ACPI_STATUS (AE_AML_MUTEX_NOT_ACQUIRED); } + /* Sanity check -- we must have a valid thread ID */ + + if (!walk_state->thread) { + ACPI_REPORT_ERROR (("Cannot release Mutex [%4.4s], null thread info\n", + obj_desc->mutex.node->name.ascii)); + return_ACPI_STATUS (AE_AML_INTERNAL); + } + /* The Mutex is owned, but this thread must be the owner */ - if (obj_desc->mutex.owner != walk_state) { + if (obj_desc->mutex.owner_thread->thread_id != walk_state->thread->thread_id) { + ACPI_REPORT_ERROR (( + "Thread %X cannot release Mutex [%4.4s] acquired by thread %X\n", + walk_state->thread->thread_id, + obj_desc->mutex.node->name.ascii, + obj_desc->mutex.owner_thread->thread_id)); return_ACPI_STATUS (AE_AML_NOT_OWNER); } @@ -209,7 +269,9 @@ * The sync level of the mutex must be less than or * equal to the current sync level */ - if (obj_desc->mutex.sync_level > walk_state->current_sync_level) { + if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) { + ACPI_REPORT_ERROR (("Cannot release Mutex [%4.4s], incorrect sync_level\n", + obj_desc->mutex.node->name.ascii)); return_ACPI_STATUS (AE_AML_MUTEX_ORDER); } @@ -223,6 +285,9 @@ return_ACPI_STATUS (AE_OK); } + /* Unlink the mutex from the owner's list */ + + acpi_ex_unlink_mutex (obj_desc); /* Release the mutex */ @@ -230,12 +295,8 @@ /* Update the mutex and walk state */ - obj_desc->mutex.owner = NULL; - walk_state->current_sync_level = obj_desc->mutex.sync_level; - - /* Unlink the mutex from the owner's list */ - - acpi_ex_unlink_mutex (obj_desc); + obj_desc->mutex.owner_thread = NULL; + walk_state->thread->current_sync_level = obj_desc->mutex.sync_level; return_ACPI_STATUS (status); } @@ -243,9 +304,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_release_all_mutexes + * FUNCTION: acpi_ex_release_all_mutexes * - * PARAMETERS: *Mutex_list - Head of the mutex list + * PARAMETERS: *mutex_list - Head of the mutex list * * RETURN: Status * @@ -253,15 +314,16 @@ * ******************************************************************************/ -acpi_status +void acpi_ex_release_all_mutexes ( - acpi_operand_object *list_head) + struct acpi_thread_state *thread) { - acpi_operand_object *next = list_head->mutex.next; - acpi_operand_object *this; + union acpi_operand_object *next = thread->acquired_mutex_list; + union acpi_operand_object *this; + acpi_status status; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* @@ -271,19 +333,21 @@ this = next; next = this->mutex.next; - /* Mark mutex un-owned */ - - this->mutex.owner = NULL; - this->mutex.prev = NULL; - this->mutex.next = NULL; - this->mutex.acquisition_depth = 0; + this->mutex.acquisition_depth = 1; + this->mutex.prev = NULL; + this->mutex.next = NULL; /* Release the mutex */ - acpi_ex_system_release_mutex (this); - } + status = acpi_ex_system_release_mutex (this); + if (ACPI_FAILURE (status)) { + continue; + } + + /* Mark mutex unowned */ - return (AE_OK); + this->mutex.owner_thread = NULL; + } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exnames.c linux.21rc5-ac2/drivers/acpi/executer/exnames.c --- linux.21rc5/drivers/acpi/executer/exnames.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exnames.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,36 +2,53 @@ /****************************************************************************** * * Module Name: exnames - interpreter/scanner name load/execute - * $Revision: 83 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exnames") + ACPI_MODULE_NAME ("exnames") /* AML Package Length encodings */ @@ -44,11 +61,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_allocate_name_string + * FUNCTION: acpi_ex_allocate_name_string * - * PARAMETERS: Prefix_count - Count of parent levels. Special cases: + * PARAMETERS: prefix_count - Count of parent levels. Special cases: * (-1) = root, 0 = none - * Num_name_segs - count of 4-character name segments + * num_name_segs - count of 4-character name segments * * RETURN: A pointer to the allocated string segment. This segment must * be deleted by the caller. @@ -58,24 +75,24 @@ * ******************************************************************************/ -NATIVE_CHAR * +char * acpi_ex_allocate_name_string ( - u32 prefix_count, - u32 num_name_segs) + u32 prefix_count, + u32 num_name_segs) { - NATIVE_CHAR *temp_ptr; - NATIVE_CHAR *name_string; - u32 size_needed; + char *temp_ptr; + char *name_string; + u32 size_needed; - FUNCTION_TRACE ("Ex_allocate_name_string"); + ACPI_FUNCTION_TRACE ("ex_allocate_name_string"); /* - * Allow room for all \ and ^ prefixes, all segments, and a Multi_name_prefix. + * Allow room for all \ and ^ prefixes, all segments, and a multi_name_prefix. * Also, one byte for the null terminator. * This may actually be somewhat longer than needed. */ - if (prefix_count == (u32) -1) { + if (prefix_count == ACPI_UINT32_MAX) { /* Special case for root */ size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1; @@ -90,7 +107,7 @@ */ name_string = ACPI_MEM_ALLOCATE (size_needed); if (!name_string) { - REPORT_ERROR (("Ex_allocate_name_string: Could not allocate size %d\n", size_needed)); + ACPI_REPORT_ERROR (("ex_allocate_name_string: Could not allocate size %d\n", size_needed)); return_PTR (NULL); } @@ -98,10 +115,9 @@ /* Set up Root or Parent prefixes if needed */ - if (prefix_count == (u32) -1) { + if (prefix_count == ACPI_UINT32_MAX) { *temp_ptr++ = AML_ROOT_PREFIX; } - else { while (prefix_count--) { *temp_ptr++ = AML_PARENT_PREFIX; @@ -117,7 +133,6 @@ *temp_ptr++ = AML_MULTI_NAME_PREFIX_OP; *temp_ptr++ = (char) num_name_segs; } - else if (2 == num_name_segs) { /* Set up dual prefixes */ @@ -125,7 +140,7 @@ } /* - * Terminate string following prefixes. Acpi_ex_name_segment() will + * Terminate string following prefixes. acpi_ex_name_segment() will * append the segment(s) */ *temp_ptr = 0; @@ -135,9 +150,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_name_segment + * FUNCTION: acpi_ex_name_segment * - * PARAMETERS: Interpreter_mode - Current running mode (load1/Load2/Exec) + * PARAMETERS: interpreter_mode - Current running mode (load1/Load2/Exec) * * RETURN: Status * @@ -147,16 +162,16 @@ acpi_status acpi_ex_name_segment ( - u8 **in_aml_address, - NATIVE_CHAR *name_string) + u8 **in_aml_address, + char *name_string) { - u8 *aml_address = *in_aml_address; - acpi_status status = AE_OK; - u32 index; - NATIVE_CHAR char_buf[5]; + char *aml_address = (void *) *in_aml_address; + acpi_status status = AE_OK; + u32 index; + char char_buf[5]; - FUNCTION_TRACE ("Ex_name_segment"); + ACPI_FUNCTION_TRACE ("ex_name_segment"); /* @@ -172,34 +187,32 @@ ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "Bytes from stream:\n")); - for (index = 4; - (index > 0) && (acpi_ut_valid_acpi_character (*aml_address)); - --index) { - char_buf[4 - index] = *aml_address++; - ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "%c\n", char_buf[4 - index])); + for (index = 0; + (index < ACPI_NAME_SIZE) && (acpi_ut_valid_acpi_character (*aml_address)); + index++) { + char_buf[index] = *aml_address++; + ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "%c\n", char_buf[index])); } /* Valid name segment */ - if (0 == index) { + if (index == 4) { /* Found 4 valid characters */ char_buf[4] = '\0'; if (name_string) { - STRCAT (name_string, char_buf); + ACPI_STRCAT (name_string, char_buf); ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Appended to - %s \n", name_string)); } - else { ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "No Name string - %s \n", char_buf)); } } - - else if (4 == index) { + else if (index == 0) { /* * First character was not a valid name character, * so we are looking at something other than a name. @@ -209,7 +222,6 @@ char_buf[0])); status = AE_CTRL_PENDING; } - else { /* Segment started with one or more valid characters, but fewer than 4 */ @@ -218,16 +230,16 @@ *aml_address, aml_address)); } - *in_aml_address = aml_address; + *in_aml_address = (u8 *) aml_address; return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ex_get_name_string + * FUNCTION: acpi_ex_get_name_string * - * PARAMETERS: Data_type - Data type to be associated with this name + * PARAMETERS: data_type - Data type to be associated with this name * * RETURN: Status * @@ -235,30 +247,28 @@ * ******************************************************************************/ - acpi_status acpi_ex_get_name_string ( - acpi_object_type8 data_type, - u8 *in_aml_address, - NATIVE_CHAR **out_name_string, - u32 *out_name_length) + acpi_object_type data_type, + u8 *in_aml_address, + char **out_name_string, + u32 *out_name_length) { - acpi_status status = AE_OK; - u8 *aml_address = in_aml_address; - NATIVE_CHAR *name_string = NULL; - u32 num_segments; - u32 prefix_count = 0; - u8 prefix = 0; - u8 has_prefix = FALSE; - - - FUNCTION_TRACE_PTR ("Ex_get_name_string", aml_address); - - - if (INTERNAL_TYPE_REGION_FIELD == data_type || - INTERNAL_TYPE_BANK_FIELD == data_type || - INTERNAL_TYPE_INDEX_FIELD == data_type) { - /* Disallow prefixes for types associated with Field_unit names */ + acpi_status status = AE_OK; + u8 *aml_address = in_aml_address; + char *name_string = NULL; + u32 num_segments; + u32 prefix_count = 0; + u8 has_prefix = FALSE; + + + ACPI_FUNCTION_TRACE_PTR ("ex_get_name_string", aml_address); + + + if (ACPI_TYPE_LOCAL_REGION_FIELD == data_type || + ACPI_TYPE_LOCAL_BANK_FIELD == data_type || + ACPI_TYPE_LOCAL_INDEX_FIELD == data_type) { + /* Disallow prefixes for types associated with field_unit names */ name_string = acpi_ex_allocate_name_string (0, 1); if (!name_string) { @@ -268,24 +278,22 @@ status = acpi_ex_name_segment (&aml_address, name_string); } } - else { /* - * Data_type is not a field name. + * data_type is not a field name. * Examine first character of name for root or parent prefix operators */ switch (*aml_address) { - case AML_ROOT_PREFIX: - prefix = *aml_address++; - ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "Root_prefix: %x\n", prefix)); + ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "root_prefix(\\) at %p\n", aml_address)); /* - * Remember that we have a Root_prefix -- - * see comment in Acpi_ex_allocate_name_string() + * Remember that we have a root_prefix -- + * see comment in acpi_ex_allocate_name_string() */ - prefix_count = (u32) -1; + aml_address++; + prefix_count = ACPI_UINT32_MAX; has_prefix = TRUE; break; @@ -295,18 +303,21 @@ /* Increment past possibly multiple parent prefixes */ do { - prefix = *aml_address++; - ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "Parent_prefix: %x\n", prefix)); + ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "parent_prefix (^) at %p\n", aml_address)); - ++prefix_count; + aml_address++; + prefix_count++; } while (*aml_address == AML_PARENT_PREFIX); + has_prefix = TRUE; break; default: + /* Not a prefix character */ + break; } @@ -314,12 +325,11 @@ /* Examine first character of name for name segment prefix operator */ switch (*aml_address) { - case AML_DUAL_NAME_PREFIX: - prefix = *aml_address++; - ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "Dual_name_prefix: %x\n", prefix)); + ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "dual_name_prefix at %p\n", aml_address)); + aml_address++; name_string = acpi_ex_allocate_name_string (prefix_count, 2); if (!name_string) { status = AE_NO_MEMORY; @@ -339,12 +349,12 @@ case AML_MULTI_NAME_PREFIX_OP: - prefix = *aml_address++; - ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "Multi_name_prefix: %x\n", prefix)); + ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, "multi_name_prefix at %p\n", aml_address)); /* Fetch count of segments remaining in name path */ - num_segments = *aml_address++; + aml_address++; + num_segments = *aml_address; name_string = acpi_ex_allocate_name_string (prefix_count, num_segments); if (!name_string) { @@ -354,11 +364,12 @@ /* Indicate that we processed a prefix */ + aml_address++; has_prefix = TRUE; while (num_segments && (status = acpi_ex_name_segment (&aml_address, name_string)) == AE_OK) { - --num_segments; + num_segments--; } break; @@ -366,10 +377,10 @@ case 0: - /* Null_name valid as of 8-12-98 ASL/AML Grammar Update */ + /* null_name valid as of 8-12-98 ASL/AML Grammar Update */ - if (-1 == prefix_count) { - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Name_seg is \"\\\" followed by NULL\n")); + if (prefix_count == ACPI_UINT32_MAX) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "name_seg is \"\\\" followed by NULL\n")); } /* Consume the NULL byte */ @@ -396,20 +407,17 @@ status = acpi_ex_name_segment (&aml_address, name_string); break; - - } /* Switch (Peek_op ()) */ + } } - if (AE_CTRL_PENDING == status && has_prefix) { /* Ran out of segments after processing a prefix */ - REPORT_ERROR ( - ("Ex_do_name: Malformed Name at %p\n", name_string)); + ACPI_REPORT_ERROR ( + ("ex_do_name: Malformed Name at %p\n", name_string)); status = AE_AML_BAD_NAME; } - *out_name_string = name_string; *out_name_length = (u32) (aml_address - in_aml_address); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exoparg1.c linux.21rc5-ac2/drivers/acpi/executer/exoparg1.c --- linux.21rc5/drivers/acpi/executer/exoparg1.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exoparg1.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,39 +2,57 @@ /****************************************************************************** * * Module Name: exoparg1 - AML execution - opcodes with 1 argument - * $Revision: 120 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exoparg1") + ACPI_MODULE_NAME ("exoparg1") /*! @@ -59,12 +77,11 @@ * fully resolved operands. !*/ - /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_1A_0T_0R + * FUNCTION: acpi_ex_opcode_1A_0T_0R * - * PARAMETERS: Walk_state - Current state (contains AML opcode) + * PARAMETERS: walk_state - Current state (contains AML opcode) * * RETURN: Status * @@ -75,45 +92,45 @@ acpi_status acpi_ex_opcode_1A_0T_0R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_status status = AE_OK; + union acpi_operand_object **operand = &walk_state->operands[0]; + acpi_status status = AE_OK; - FUNCTION_TRACE_STR ("Ex_opcode_1A_0T_0R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_1A_0T_0R", acpi_ps_get_opcode_name (walk_state->opcode)); - /* Examine the opcode */ + /* Examine the AML opcode */ switch (walk_state->opcode) { - case AML_RELEASE_OP: /* Release (Mutex_object) */ + case AML_RELEASE_OP: /* Release (mutex_object) */ status = acpi_ex_release_mutex (operand[0], walk_state); break; - case AML_RESET_OP: /* Reset (Event_object) */ + case AML_RESET_OP: /* Reset (event_object) */ status = acpi_ex_system_reset_event (operand[0]); break; - case AML_SIGNAL_OP: /* Signal (Event_object) */ + case AML_SIGNAL_OP: /* Signal (event_object) */ status = acpi_ex_system_signal_event (operand[0]); break; - case AML_SLEEP_OP: /* Sleep (Msec_time) */ + case AML_SLEEP_OP: /* Sleep (msec_time) */ - acpi_ex_system_do_suspend ((u32) operand[0]->integer.value); + status = acpi_ex_system_do_suspend ((u32) operand[0]->integer.value); break; - case AML_STALL_OP: /* Stall (Usec_time) */ + case AML_STALL_OP: /* Stall (usec_time) */ - acpi_ex_system_do_stall ((u32) operand[0]->integer.value); + status = acpi_ex_system_do_stall ((u32) operand[0]->integer.value); break; @@ -125,7 +142,7 @@ default: /* Unknown opcode */ - REPORT_ERROR (("Acpi_ex_opcode_1A_0T_0R: Unknown opcode %X\n", + ACPI_REPORT_ERROR (("acpi_ex_opcode_1A_0T_0R: Unknown opcode %X\n", walk_state->opcode)); status = AE_AML_BAD_OPCODE; break; @@ -137,9 +154,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_1A_1T_0R + * FUNCTION: acpi_ex_opcode_1A_1T_0R * - * PARAMETERS: Walk_state - Current state (contains AML opcode) + * PARAMETERS: walk_state - Current state (contains AML opcode) * * RETURN: Status * @@ -150,25 +167,26 @@ acpi_status acpi_ex_opcode_1A_1T_0R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; - acpi_operand_object **operand = &walk_state->operands[0]; + acpi_status status = AE_OK; + union acpi_operand_object **operand = &walk_state->operands[0]; - FUNCTION_TRACE_STR ("Ex_opcode_1A_1T_0R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_1A_1T_0R", acpi_ps_get_opcode_name (walk_state->opcode)); - switch (walk_state->opcode) { + /* Examine the AML opcode */ + switch (walk_state->opcode) { case AML_LOAD_OP: - status = acpi_ex_load_op (operand[0], operand[1]); + status = acpi_ex_load_op (operand[0], operand[1], walk_state); break; default: /* Unknown opcode */ - REPORT_ERROR (("Acpi_ex_opcode_1A_1T_0R: Unknown opcode %X\n", + ACPI_REPORT_ERROR (("acpi_ex_opcode_1A_1T_0R: Unknown opcode %X\n", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; @@ -183,9 +201,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_1A_1T_1R + * FUNCTION: acpi_ex_opcode_1A_1T_1R * - * PARAMETERS: Walk_state - Current state (contains AML opcode) + * PARAMETERS: walk_state - Current state (contains AML opcode) * * RETURN: Status * @@ -196,22 +214,22 @@ acpi_status acpi_ex_opcode_1A_1T_1R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_operand_object *return_desc = NULL; - acpi_operand_object *return_desc2 = NULL; - u32 temp32; - u32 i; - u32 j; - acpi_integer digit; + acpi_status status = AE_OK; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *return_desc = NULL; + union acpi_operand_object *return_desc2 = NULL; + u32 temp32; + u32 i; + u32 j; + acpi_integer digit; - FUNCTION_TRACE_STR ("Ex_opcode_1A_1T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_1A_1T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); - /* Create a return object of type Integer for most opcodes */ + /* Examine the AML opcode */ switch (walk_state->opcode) { case AML_BIT_NOT_OP: @@ -221,156 +239,153 @@ case AML_TO_BCD_OP: case AML_COND_REF_OF_OP: + /* Create a return object of type Integer for these opcodes */ + return_desc = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } - break; - } + switch (walk_state->opcode) { + case AML_BIT_NOT_OP: /* Not (Operand, Result) */ + return_desc->integer.value = ~operand[0]->integer.value; + break; - switch (walk_state->opcode) { - case AML_BIT_NOT_OP: /* Not (Operand, Result) */ + case AML_FIND_SET_LEFT_BIT_OP: /* find_set_left_bit (Operand, Result) */ - return_desc->integer.value = ~operand[0]->integer.value; - break; + return_desc->integer.value = operand[0]->integer.value; + /* + * Acpi specification describes Integer type as a little + * endian unsigned value, so this boundary condition is valid. + */ + for (temp32 = 0; return_desc->integer.value && temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) { + return_desc->integer.value >>= 1; + } - case AML_FIND_SET_LEFT_BIT_OP: /* Find_set_left_bit (Operand, Result) */ + return_desc->integer.value = temp32; + break; - return_desc->integer.value = operand[0]->integer.value; + case AML_FIND_SET_RIGHT_BIT_OP: /* find_set_right_bit (Operand, Result) */ - /* - * Acpi specification describes Integer type as a little - * endian unsigned value, so this boundary condition is valid. - */ - for (temp32 = 0; return_desc->integer.value && temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) { - return_desc->integer.value >>= 1; - } + return_desc->integer.value = operand[0]->integer.value; - return_desc->integer.value = temp32; - break; + /* + * The Acpi specification describes Integer type as a little + * endian unsigned value, so this boundary condition is valid. + */ + for (temp32 = 0; return_desc->integer.value && temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) { + return_desc->integer.value <<= 1; + } + /* Since the bit position is one-based, subtract from 33 (65) */ - case AML_FIND_SET_RIGHT_BIT_OP: /* Find_set_right_bit (Operand, Result) */ + return_desc->integer.value = temp32 == 0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32; + break; - return_desc->integer.value = operand[0]->integer.value; + case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */ - /* - * The Acpi specification describes Integer type as a little - * endian unsigned value, so this boundary condition is valid. - */ - for (temp32 = 0; return_desc->integer.value && temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) { - return_desc->integer.value <<= 1; - } + /* + * The 64-bit ACPI integer can hold 16 4-bit BCD integers + */ + return_desc->integer.value = 0; + for (i = 0; i < ACPI_MAX_BCD_DIGITS; i++) { + /* Get one BCD digit */ - /* Since the bit position is one-based, subtract from 33 (65) */ + digit = (acpi_integer) ((operand[0]->integer.value >> (i * 4)) & 0xF); - return_desc->integer.value = temp32 == 0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32; - break; + /* Check the range of the digit */ + + if (digit > 9) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "BCD digit too large: %d\n", + (u32) digit)); + status = AE_AML_NUMERIC_OVERFLOW; + goto cleanup; + } + if (digit > 0) { + /* Sum into the result with the appropriate power of 10 */ - case AML_FROM_BCD_OP: /* From_bcd (BCDValue, Result) */ + for (j = 0; j < i; j++) { + digit *= 10; + } - /* - * The 64-bit ACPI integer can hold 16 4-bit BCD integers - */ - return_desc->integer.value = 0; - for (i = 0; i < ACPI_MAX_BCD_DIGITS; i++) { - /* Get one BCD digit */ + return_desc->integer.value += digit; + } + } + break; - digit = (acpi_integer) ((operand[0]->integer.value >> (i * 4)) & 0xF); - /* Check the range of the digit */ + case AML_TO_BCD_OP: /* to_bcd (Operand, Result) */ - if (digit > 9) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "BCD digit too large: %d\n", - (u32) digit)); + if (operand[0]->integer.value > ACPI_MAX_BCD_VALUE) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "BCD overflow: %8.8X%8.8X\n", + ACPI_HIDWORD(operand[0]->integer.value), + ACPI_LODWORD(operand[0]->integer.value))); status = AE_AML_NUMERIC_OVERFLOW; goto cleanup; } - if (digit > 0) { - /* Sum into the result with the appropriate power of 10 */ + return_desc->integer.value = 0; + for (i = 0; i < ACPI_MAX_BCD_DIGITS; i++) { + /* Divide by nth factor of 10 */ + temp32 = 0; + digit = operand[0]->integer.value; for (j = 0; j < i; j++) { - digit *= 10; + (void) acpi_ut_short_divide (&digit, 10, &digit, &temp32); } - return_desc->integer.value += digit; - } - } - break; + /* Create the BCD digit from the remainder above */ + if (digit > 0) { + return_desc->integer.value += ((acpi_integer) temp32 << (i * 4)); + } + } + break; - case AML_TO_BCD_OP: /* To_bcd (Operand, Result) */ - if (operand[0]->integer.value > ACPI_MAX_BCD_VALUE) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "BCD overflow: %8.8X%8.8X\n", - HIDWORD(operand[0]->integer.value), LODWORD(operand[0]->integer.value))); - status = AE_AML_NUMERIC_OVERFLOW; - goto cleanup; - } + case AML_COND_REF_OF_OP: /* cond_ref_of (source_object, Result) */ - return_desc->integer.value = 0; - for (i = 0; i < ACPI_MAX_BCD_DIGITS; i++) { - /* Divide by nth factor of 10 */ - - temp32 = 0; - digit = operand[0]->integer.value; - for (j = 0; j < i; j++) { - acpi_ut_short_divide (&digit, 10, &digit, &temp32); + /* + * This op is a little strange because the internal return value is + * different than the return value stored in the result descriptor + * (There are really two return values) + */ + if ((struct acpi_namespace_node *) operand[0] == acpi_gbl_root_node) { + /* + * This means that the object does not exist in the namespace, + * return FALSE + */ + return_desc->integer.value = 0; + goto cleanup; } - /* Create the BCD digit from the remainder above */ + /* Get the object reference, store it, and remove our reference */ - if (digit > 0) { - return_desc->integer.value += (temp32 << (i * 4)); + status = acpi_ex_get_object_reference (operand[0], &return_desc2, walk_state); + if (ACPI_FAILURE (status)) { + goto cleanup; } - } - break; - - case AML_COND_REF_OF_OP: /* Cond_ref_of (Source_object, Result) */ + status = acpi_ex_store (return_desc2, operand[1], walk_state); + acpi_ut_remove_reference (return_desc2); - /* - * This op is a little strange because the internal return value is - * different than the return value stored in the result descriptor - * (There are really two return values) - */ - if ((acpi_namespace_node *) operand[0] == acpi_gbl_root_node) { - /* - * This means that the object does not exist in the namespace, - * return FALSE - */ - return_desc->integer.value = 0; + /* The object exists in the namespace, return TRUE */ - /* - * Must delete the result descriptor since there is no reference - * being returned - */ - acpi_ut_remove_reference (operand[1]); + return_desc->integer.value = ACPI_INTEGER_MAX; goto cleanup; - } - /* Get the object reference and store it */ - status = acpi_ex_get_object_reference (operand[0], &return_desc2, walk_state); - if (ACPI_FAILURE (status)) { - goto cleanup; + default: + /* No other opcodes get here */ + break; } - - status = acpi_ex_store (return_desc2, operand[1], walk_state); - - /* The object exists in the namespace, return TRUE */ - - return_desc->integer.value = ACPI_INTEGER_MAX; - goto cleanup; break; @@ -386,16 +401,19 @@ return_ACPI_STATUS (status); } - /* - * Normally, we would remove a reference on the Operand[0] parameter; - * But since it is being used as the internal return object - * (meaning we would normally increment it), the two cancel out, - * and we simply don't do anything. - */ - walk_state->result_obj = operand[0]; - walk_state->operands[0] = NULL; /* Prevent deletion */ + /* It is possible that the Store already produced a return object */ + + if (!walk_state->result_obj) { + /* + * Normally, we would remove a reference on the Operand[0] parameter; + * But since it is being used as the internal return object + * (meaning we would normally increment it), the two cancel out, + * and we simply don't do anything. + */ + walk_state->result_obj = operand[0]; + walk_state->operands[0] = NULL; /* Prevent deletion */ + } return_ACPI_STATUS (status); - break; /* @@ -403,58 +421,54 @@ */ case AML_COPY_OP: /* Copy (Source, Target) */ - status = AE_NOT_IMPLEMENTED; - goto cleanup; + status = acpi_ut_copy_iobject_to_iobject (operand[0], &return_desc, walk_state); break; - case AML_TO_DECSTRING_OP: /* To_decimal_string (Data, Result) */ + case AML_TO_DECSTRING_OP: /* to_decimal_string (Data, Result) */ status = acpi_ex_convert_to_string (operand[0], &return_desc, 10, ACPI_UINT32_MAX, walk_state); break; - case AML_TO_HEXSTRING_OP: /* To_hex_string (Data, Result) */ + case AML_TO_HEXSTRING_OP: /* to_hex_string (Data, Result) */ status = acpi_ex_convert_to_string (operand[0], &return_desc, 16, ACPI_UINT32_MAX, walk_state); break; - case AML_TO_BUFFER_OP: /* To_buffer (Data, Result) */ + case AML_TO_BUFFER_OP: /* to_buffer (Data, Result) */ status = acpi_ex_convert_to_buffer (operand[0], &return_desc, walk_state); break; - case AML_TO_INTEGER_OP: /* To_integer (Data, Result) */ + case AML_TO_INTEGER_OP: /* to_integer (Data, Result) */ status = acpi_ex_convert_to_integer (operand[0], &return_desc, walk_state); break; - /* - * These are two obsolete opcodes - */ - case AML_SHIFT_LEFT_BIT_OP: /* Shift_left_bit (Source, Bit_num) */ - case AML_SHIFT_RIGHT_BIT_OP: /* Shift_right_bit (Source, Bit_num) */ - + case AML_SHIFT_LEFT_BIT_OP: /* shift_left_bit (Source, bit_num) */ + case AML_SHIFT_RIGHT_BIT_OP: /* shift_right_bit (Source, bit_num) */ + /* + * These are two obsolete opcodes + */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s is obsolete and not implemented\n", acpi_ps_get_opcode_name (walk_state->opcode))); status = AE_SUPPORT; goto cleanup; - break; default: /* Unknown opcode */ - REPORT_ERROR (("Acpi_ex_opcode_1A_1T_1R: Unknown opcode %X\n", + ACPI_REPORT_ERROR (("acpi_ex_opcode_1A_1T_1R: Unknown opcode %X\n", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; } - /* * Store the return value computed above into the target object */ @@ -463,7 +477,9 @@ cleanup: - walk_state->result_obj = return_desc; + if (!walk_state->result_obj) { + walk_state->result_obj = return_desc; + } /* Delete return object on error */ @@ -477,9 +493,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_1A_0T_1R + * FUNCTION: acpi_ex_opcode_1A_0T_1R * - * PARAMETERS: Walk_state - Current state (contains AML opcode) + * PARAMETERS: walk_state - Current state (contains AML opcode) * * RETURN: Status * @@ -489,23 +505,22 @@ acpi_status acpi_ex_opcode_1A_0T_1R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_operand_object *temp_desc; - acpi_operand_object *return_desc = NULL; - acpi_status status = AE_OK; - u32 type; - acpi_integer value; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *temp_desc; + union acpi_operand_object *return_desc = NULL; + acpi_status status = AE_OK; + u32 type; + acpi_integer value; - FUNCTION_TRACE_STR ("Ex_opcode_1A_0T_0R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_1A_0T_0R", acpi_ps_get_opcode_name (walk_state->opcode)); - /* Get the operand and decode the opcode */ + /* Examine the AML opcode */ switch (walk_state->opcode) { - case AML_LNOT_OP: /* LNot (Operand) */ return_desc = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); @@ -523,18 +538,18 @@ /* * Since we are expecting a Reference operand, it - * can be either a Node or an internal object. + * can be either a NS Node or an internal object. */ return_desc = operand[0]; - if (VALID_DESCRIPTOR_TYPE (operand[0], ACPI_DESC_TYPE_INTERNAL)) { + if (ACPI_GET_DESCRIPTOR_TYPE (operand[0]) == ACPI_DESC_TYPE_OPERAND) { /* Internal reference object - prevent deletion */ acpi_ut_add_reference (return_desc); } /* - * Convert the Return_desc Reference to a Number - * (This removes a reference on the Return_desc object) + * Convert the return_desc Reference to a Number + * (This removes a reference on the return_desc object) */ status = acpi_ex_resolve_operands (AML_LNOT_OP, &return_desc, walk_state); if (ACPI_FAILURE (status)) { @@ -545,7 +560,7 @@ } /* - * Return_desc is now guaranteed to be an Integer object + * return_desc is now guaranteed to be an Integer object * Do the actual increment or decrement */ if (AML_INCREMENT_OP == walk_state->opcode) { @@ -561,83 +576,13 @@ break; - case AML_TYPE_OP: /* Object_type (Source_object) */ - - if (INTERNAL_TYPE_REFERENCE == operand[0]->common.type) { - /* - * Not a Name -- an indirect name pointer would have - * been converted to a direct name pointer in Resolve_operands - */ - switch (operand[0]->reference.opcode) { - case AML_ZERO_OP: - case AML_ONE_OP: - case AML_ONES_OP: - case AML_REVISION_OP: - - /* Constants are of type Integer */ - - type = ACPI_TYPE_INTEGER; - break; - - - case AML_DEBUG_OP: - - /* Per 1.0b spec, Debug object is of type "Debug_object" */ - - type = ACPI_TYPE_DEBUG_OBJECT; - break; - - - case AML_INDEX_OP: - - /* Get the type of this reference (index into another object) */ - - type = operand[0]->reference.target_type; - if (type == ACPI_TYPE_PACKAGE) { - /* - * The main object is a package, we want to get the type - * of the individual package element that is referenced by - * the index. - */ - type = (*(operand[0]->reference.where))->common.type; - } - - break; - - - case AML_LOCAL_OP: - case AML_ARG_OP: - - type = acpi_ds_method_data_get_type (operand[0]->reference.opcode, - operand[0]->reference.offset, walk_state); - break; - - - default: - - REPORT_ERROR (("Acpi_ex_opcode_1A_0T_1R/Type_op: Internal error - Unknown Reference subtype %X\n", - operand[0]->reference.opcode)); - status = AE_AML_INTERNAL; - goto cleanup; - } - } - - else { - /* - * It's not a Reference, so it must be a direct name pointer. - */ - type = acpi_ns_get_type ((acpi_namespace_node *) operand[0]); + case AML_TYPE_OP: /* object_type (source_object) */ - /* Convert internal types to external types */ - - switch (type) { - case INTERNAL_TYPE_REGION_FIELD: - case INTERNAL_TYPE_BANK_FIELD: - case INTERNAL_TYPE_INDEX_FIELD: - - type = ACPI_TYPE_FIELD_UNIT; - } + /* Get the type of the base object */ + status = acpi_ex_resolve_multiple (walk_state, operand[0], &type, NULL); + if (ACPI_FAILURE (status)) { + goto cleanup; } /* Allocate a descriptor to hold the type. */ @@ -652,44 +597,38 @@ break; - case AML_SIZE_OF_OP: /* Size_of (Source_object) */ + case AML_SIZE_OF_OP: /* size_of (source_object) */ - temp_desc = operand[0]; - if (VALID_DESCRIPTOR_TYPE (operand[0], ACPI_DESC_TYPE_NAMED)) { - temp_desc = acpi_ns_get_attached_object ((acpi_namespace_node *) operand[0]); - } + /* Get the base object */ - if (!temp_desc) { - value = 0; + status = acpi_ex_resolve_multiple (walk_state, operand[0], &type, &temp_desc); + if (ACPI_FAILURE (status)) { + goto cleanup; } - else { - switch (temp_desc->common.type) { - case ACPI_TYPE_BUFFER: - value = temp_desc->buffer.length; - break; - - case ACPI_TYPE_STRING: - value = temp_desc->string.length; - break; - - case ACPI_TYPE_PACKAGE: - value = temp_desc->package.count; - break; - - case INTERNAL_TYPE_REFERENCE: - - /* TBD: this must be a reference to a buf/str/pkg?? */ - - value = 4; - break; - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Not Buf/Str/Pkg - found type %X\n", - temp_desc->common.type)); - status = AE_AML_OPERAND_TYPE; - goto cleanup; - } + /* + * Type is guaranteed to be a buffer, string, or package at this + * point (even if the original operand was an object reference, it + * will be resolved and typechecked during operand resolution.) + */ + switch (type) { + case ACPI_TYPE_BUFFER: + value = temp_desc->buffer.length; + break; + + case ACPI_TYPE_STRING: + value = temp_desc->string.length; + break; + + case ACPI_TYPE_PACKAGE: + value = temp_desc->package.count; + break; + + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "size_of, Not Buf/Str/Pkg - found type %s\n", + acpi_ut_get_type_name (type))); + status = AE_AML_OPERAND_TYPE; + goto cleanup; } /* @@ -706,7 +645,7 @@ break; - case AML_REF_OF_OP: /* Ref_of (Source_object) */ + case AML_REF_OF_OP: /* ref_of (source_object) */ status = acpi_ex_get_object_reference (operand[0], &return_desc, walk_state); if (ACPI_FAILURE (status)) { @@ -715,79 +654,113 @@ break; - case AML_DEREF_OF_OP: /* Deref_of (Obj_reference) */ + case AML_DEREF_OF_OP: /* deref_of (obj_reference | String) */ - /* Check for a method local or argument */ + /* Check for a method local or argument, or standalone String */ - if (!VALID_DESCRIPTOR_TYPE (operand[0], ACPI_DESC_TYPE_NAMED)) { - /* - * Must resolve/dereference the local/arg reference first - */ - switch (operand[0]->reference.opcode) { - /* Set Operand[0] to the value of the local/arg */ + if (ACPI_GET_DESCRIPTOR_TYPE (operand[0]) != ACPI_DESC_TYPE_NAMED) { + switch (ACPI_GET_OBJECT_TYPE (operand[0])) { + case ACPI_TYPE_LOCAL_REFERENCE: + /* + * This is a deref_of (local_x | arg_x) + * + * Must resolve/dereference the local/arg reference first + */ + switch (operand[0]->reference.opcode) { + case AML_LOCAL_OP: + case AML_ARG_OP: + + /* Set Operand[0] to the value of the local/arg */ + + status = acpi_ds_method_data_get_value (operand[0]->reference.opcode, + operand[0]->reference.offset, walk_state, &temp_desc); + if (ACPI_FAILURE (status)) { + goto cleanup; + } - case AML_LOCAL_OP: - case AML_ARG_OP: + /* + * Delete our reference to the input object and + * point to the object just retrieved + */ + acpi_ut_remove_reference (operand[0]); + operand[0] = temp_desc; + break; - acpi_ds_method_data_get_value (operand[0]->reference.opcode, - operand[0]->reference.offset, walk_state, &temp_desc); + case AML_REF_OF_OP: - /* - * Delete our reference to the input object and - * point to the object just retrieved - */ - acpi_ut_remove_reference (operand[0]); - operand[0] = temp_desc; - break; + /* Get the object to which the reference refers */ - default: + temp_desc = operand[0]->reference.object; + acpi_ut_remove_reference (operand[0]); + operand[0] = temp_desc; + break; - /* Index op - handled below */ - break; - } - } + default: + /* Must be an Index op - handled below */ + break; + } + break; - /* Operand[0] may have changed from the code above */ - if (VALID_DESCRIPTOR_TYPE (operand[0], ACPI_DESC_TYPE_NAMED)) { - /* Get the actual object from the Node (This is the dereference) */ + case ACPI_TYPE_STRING: - return_desc = ((acpi_namespace_node *) operand[0])->object; + /* + * This is a deref_of (String). The string is a reference to a named ACPI object. + * + * 1) Find the owning Node + * 2) Dereference the node to an actual object. Could be a Field, so we nee + * to resolve the node to a value. + */ + status = acpi_ns_get_node_by_path (operand[0]->string.pointer, + walk_state->scope_info->scope.node, ACPI_NS_SEARCH_PARENT, + ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, &return_desc)); + if (ACPI_FAILURE (status)) { + goto cleanup; + } - /* Returning a pointer to the object, add another reference! */ + status = acpi_ex_resolve_node_to_value ( + ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, &return_desc), walk_state); + goto cleanup; - acpi_ut_add_reference (return_desc); - } - else { - /* - * This must be a reference object produced by the Index - * ASL operation -- check internal opcode - */ - if ((operand[0]->reference.opcode != AML_INDEX_OP) && - (operand[0]->reference.opcode != AML_REF_OF_OP)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown opcode in ref(%p) - %X\n", - operand[0], operand[0]->reference.opcode)); + default: - status = AE_TYPE; + status = AE_AML_OPERAND_TYPE; goto cleanup; } + } + /* Operand[0] may have changed from the code above */ + if (ACPI_GET_DESCRIPTOR_TYPE (operand[0]) == ACPI_DESC_TYPE_NAMED) { + /* + * This is a deref_of (object_reference) + * Get the actual object from the Node (This is the dereference). + * -- This case may only happen when a local_x or arg_x is dereferenced above. + */ + return_desc = acpi_ns_get_attached_object ((struct acpi_namespace_node *) operand[0]); + } + else { + /* + * This must be a reference object produced by either the Index() or + * ref_of() operator + */ switch (operand[0]->reference.opcode) { case AML_INDEX_OP: /* - * Supported target types for the Index operator are - * 1) A Buffer - * 2) A Package + * The target type for the Index operator must be + * either a Buffer or a Package */ - if (operand[0]->reference.target_type == ACPI_TYPE_BUFFER_FIELD) { + switch (operand[0]->reference.target_type) { + case ACPI_TYPE_BUFFER_FIELD: + + temp_desc = operand[0]->reference.object; + /* - * The target is a buffer, we must create a new object that - * contains one element of the buffer, the element pointed - * to by the index. + * Create a new object that contains one element of the + * buffer -- the element pointed to by the index. * * NOTE: index into a buffer is NOT a pointer to a * sub-buffer of the main buffer, it is only a pointer to a @@ -799,20 +772,21 @@ goto cleanup; } - temp_desc = operand[0]->reference.object; + /* + * Since we are returning the value of the buffer at the + * indexed location, we don't need to add an additional + * reference to the buffer itself. + */ return_desc->integer.value = temp_desc->buffer.pointer[operand[0]->reference.offset]; + break; - /* TBD: [Investigate] (see below) Don't add an additional - * ref! - */ - } - else if (operand[0]->reference.target_type == ACPI_TYPE_PACKAGE) { + case ACPI_TYPE_PACKAGE: + /* - * The target is a package, we want to return the referenced - * element of the package. We must add another reference to - * this object, however. + * Return the referenced element of the package. We must add + * another reference to the referenced object, however. */ return_desc = *(operand[0]->reference.where); if (!return_desc) { @@ -821,7 +795,6 @@ * an uninitialized package element and is thus a * severe error. */ - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "NULL package element obj %p\n", operand[0])); status = AE_AML_UNINITIALIZED_ELEMENT; @@ -829,15 +802,16 @@ } acpi_ut_add_reference (return_desc); - } + break; + + + default: - else { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown Target_type %X in obj %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown Index target_type %X in obj %p\n", operand[0]->reference.target_type, operand[0])); status = AE_AML_OPERAND_TYPE; goto cleanup; } - break; @@ -845,19 +819,31 @@ return_desc = operand[0]->reference.object; + if (ACPI_GET_DESCRIPTOR_TYPE (return_desc) == ACPI_DESC_TYPE_NAMED) { + + return_desc = acpi_ns_get_attached_object ((struct acpi_namespace_node *) return_desc); + } + /* Add another reference to the object! */ acpi_ut_add_reference (return_desc); break; + + + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown opcode in ref(%p) - %X\n", + operand[0], operand[0]->reference.opcode)); + + status = AE_TYPE; + goto cleanup; } } - break; default: - REPORT_ERROR (("Acpi_ex_opcode_1A_0T_1R: Unknown opcode %X\n", + ACPI_REPORT_ERROR (("acpi_ex_opcode_1A_0T_1R: Unknown opcode %X\n", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exoparg2.c linux.21rc5-ac2/drivers/acpi/executer/exoparg2.c --- linux.21rc5/drivers/acpi/executer/exoparg2.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exoparg2.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,40 +1,56 @@ /****************************************************************************** * * Module Name: exoparg2 - AML execution - opcodes with 2 arguments - * $Revision: 97 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "acevents.h" -#include "amlcode.h" -#include "acdispat.h" +#include +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exoparg2") + ACPI_MODULE_NAME ("exoparg2") /*! @@ -62,9 +78,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_2A_0T_0R + * FUNCTION: acpi_ex_opcode_2A_0T_0R * - * PARAMETERS: Walk_state - Current walk state + * PARAMETERS: walk_state - Current walk state * * RETURN: Status * @@ -77,57 +93,52 @@ acpi_status acpi_ex_opcode_2A_0T_0R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_namespace_node *node; - acpi_status status = AE_OK; + union acpi_operand_object **operand = &walk_state->operands[0]; + struct acpi_namespace_node *node; + acpi_status status = AE_OK; - FUNCTION_TRACE_STR ("Ex_opcode_2A_0T_0R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_2A_0T_0R", + acpi_ps_get_opcode_name (walk_state->opcode)); /* Examine the opcode */ switch (walk_state->opcode) { - - case AML_NOTIFY_OP: /* Notify (Notify_object, Notify_value) */ + case AML_NOTIFY_OP: /* Notify (notify_object, notify_value) */ /* The first operand is a namespace node */ - node = (acpi_namespace_node *) operand[0]; + node = (struct acpi_namespace_node *) operand[0]; - /* The node must refer to a device or thermal zone */ + /* Notifies allowed on this object? */ - if (node && operand[1]) /* TBD: is this check necessary? */ { - switch (node->type) { - case ACPI_TYPE_DEVICE: - case ACPI_TYPE_THERMAL: - - /* - * Dispatch the notify to the appropriate handler - * NOTE: the request is queued for execution after this method - * completes. The notify handlers are NOT invoked synchronously - * from this thread -- because handlers may in turn run other - * control methods. - */ - status = acpi_ev_queue_notify_request (node, - (u32) operand[1]->integer.value); - break; - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unexpected notify object type %X\n", - node->type)); + if (!acpi_ev_is_notify_object (node)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unexpected notify object type [%s]\n", + acpi_ut_get_type_name (node->type))); - status = AE_AML_OPERAND_TYPE; - break; - } + status = AE_AML_OPERAND_TYPE; + break; } + + /* + * Dispatch the notify to the appropriate handler + * NOTE: the request is queued for execution after this method + * completes. The notify handlers are NOT invoked synchronously + * from this thread -- because handlers may in turn run other + * control methods. + */ + status = acpi_ev_queue_notify_request (node, + (u32) operand[1]->integer.value); break; + default: - REPORT_ERROR (("Acpi_ex_opcode_2A_0T_0R: Unknown opcode %X\n", walk_state->opcode)); + ACPI_REPORT_ERROR (("acpi_ex_opcode_2A_0T_0R: Unknown opcode %X\n", + walk_state->opcode)); status = AE_AML_BAD_OPCODE; } @@ -137,9 +148,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_2A_2T_1R + * FUNCTION: acpi_ex_opcode_2A_2T_1R * - * PARAMETERS: Walk_state - Current walk state + * PARAMETERS: walk_state - Current walk state * * RETURN: Status * @@ -150,22 +161,22 @@ acpi_status acpi_ex_opcode_2A_2T_1R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_operand_object *return_desc1 = NULL; - acpi_operand_object *return_desc2 = NULL; - acpi_status status; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *return_desc1 = NULL; + union acpi_operand_object *return_desc2 = NULL; + acpi_status status; - FUNCTION_TRACE_STR ("Ex_opcode_2A_2T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_2A_2T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); /* * Execute the opcode */ switch (walk_state->opcode) { - case AML_DIVIDE_OP: /* Divide (Dividend, Divisor, Remainder_result Quotient_result) */ + case AML_DIVIDE_OP: /* Divide (Dividend, Divisor, remainder_result quotient_result) */ return_desc1 = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); if (!return_desc1) { @@ -179,7 +190,7 @@ goto cleanup; } - /* Quotient to Return_desc1, remainder to Return_desc2 */ + /* Quotient to return_desc1, remainder to return_desc2 */ status = acpi_ut_divide (&operand[0]->integer.value, &operand[1]->integer.value, &return_desc1->integer.value, &return_desc2->integer.value); @@ -191,11 +202,10 @@ default: - REPORT_ERROR (("Acpi_ex_opcode_2A_2T_1R: Unknown opcode %X\n", + ACPI_REPORT_ERROR (("acpi_ex_opcode_2A_2T_1R: Unknown opcode %X\n", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; - break; } @@ -235,9 +245,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_2A_1T_1R + * FUNCTION: acpi_ex_opcode_2A_1T_1R * - * PARAMETERS: Walk_state - Current walk state + * PARAMETERS: walk_state - Current walk state * * RETURN: Status * @@ -248,16 +258,17 @@ acpi_status acpi_ex_opcode_2A_1T_1R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_operand_object *return_desc = NULL; - acpi_operand_object *temp_desc; - u32 index; - acpi_status status = AE_OK; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *return_desc = NULL; + union acpi_operand_object *temp_desc = NULL; + u32 index; + acpi_status status = AE_OK; + acpi_size length; - FUNCTION_TRACE_STR ("Ex_opcode_2A_1T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_2A_1T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); /* @@ -280,7 +291,7 @@ switch (walk_state->opcode) { - case AML_MOD_OP: /* Mod (Dividend, Divisor, Remainder_result (ACPI 2.0) */ + case AML_MOD_OP: /* Mod (Dividend, Divisor, remainder_result (ACPI 2.0) */ return_desc = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); if (!return_desc) { @@ -288,11 +299,10 @@ goto cleanup; } - /* Return_desc will contain the remainder */ + /* return_desc will contain the remainder */ status = acpi_ut_divide (&operand[0]->integer.value, &operand[1]->integer.value, NULL, &return_desc->integer.value); - break; @@ -305,17 +315,17 @@ * guaranteed to be either Integer/String/Buffer by the operand * resolution mechanism above. */ - switch (operand[0]->common.type) { + switch (ACPI_GET_OBJECT_TYPE (operand[0])) { case ACPI_TYPE_INTEGER: - status = acpi_ex_convert_to_integer (operand[1], &operand[1], walk_state); + status = acpi_ex_convert_to_integer (operand[1], &temp_desc, walk_state); break; case ACPI_TYPE_STRING: - status = acpi_ex_convert_to_string (operand[1], &operand[1], 16, ACPI_UINT32_MAX, walk_state); + status = acpi_ex_convert_to_string (operand[1], &temp_desc, 16, ACPI_UINT32_MAX, walk_state); break; case ACPI_TYPE_BUFFER: - status = acpi_ex_convert_to_buffer (operand[1], &operand[1], walk_state); + status = acpi_ex_convert_to_buffer (operand[1], &temp_desc, walk_state); break; default: @@ -331,20 +341,69 @@ * (Both are Integer, String, or Buffer), and we can now perform the * concatenation. */ - status = acpi_ex_do_concatenate (operand[0], operand[1], &return_desc, walk_state); + status = acpi_ex_do_concatenate (operand[0], temp_desc, &return_desc, walk_state); + if (temp_desc != operand[1]) { + acpi_ut_remove_reference (temp_desc); + } break; - case AML_TO_STRING_OP: /* To_string (Buffer, Length, Result) (ACPI 2.0) */ + case AML_TO_STRING_OP: /* to_string (Buffer, Length, Result) (ACPI 2.0) */ + + /* + * Input object is guaranteed to be a buffer at this point (it may have + * been converted.) Copy the raw buffer data to a new object of type String. + */ + + /* Get the length of the new string */ + + length = 0; + if (operand[1]->integer.value == 0) { + /* Handle optional length value */ + + operand[1]->integer.value = ACPI_INTEGER_MAX; + } + + while ((length < operand[0]->buffer.length) && + (length < operand[1]->integer.value) && + (operand[0]->buffer.pointer[length])) { + length++; + } + + if (length > ACPI_MAX_STRING_CONVERSION) { + status = AE_AML_STRING_LIMIT; + goto cleanup; + } + + /* Create the internal return object */ - status = acpi_ex_convert_to_string (operand[0], &return_desc, 16, - (u32) operand[1]->integer.value, walk_state); + return_desc = acpi_ut_create_internal_object (ACPI_TYPE_STRING); + if (!return_desc) { + status = AE_NO_MEMORY; + goto cleanup; + } + + /* Allocate a new string buffer (Length + 1 for null terminator) */ + + return_desc->string.pointer = ACPI_MEM_CALLOCATE (length + 1); + if (!return_desc->string.pointer) { + status = AE_NO_MEMORY; + goto cleanup; + } + + /* Copy the raw buffer data with no transform */ + + ACPI_MEMCPY (return_desc->string.pointer, operand[0]->buffer.pointer, length); + + /* Set the string length */ + + return_desc->string.length = (u32) length; break; - case AML_CONCAT_RES_OP: /* Concatenate_res_template (Buffer, Buffer, Result) (ACPI 2.0) */ + case AML_CONCAT_RES_OP: /* concatenate_res_template (Buffer, Buffer, Result) (ACPI 2.0) */ - status = AE_NOT_IMPLEMENTED; + status = acpi_ex_concat_template (operand[0], operand[1], &return_desc, walk_state); break; @@ -352,7 +411,7 @@ /* Create the internal return object */ - return_desc = acpi_ut_create_internal_object (INTERNAL_TYPE_REFERENCE); + return_desc = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_REFERENCE); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; @@ -363,73 +422,52 @@ /* * At this point, the Source operand is either a Package or a Buffer */ - if (operand[0]->common.type == ACPI_TYPE_PACKAGE) { + if (ACPI_GET_OBJECT_TYPE (operand[0]) == ACPI_TYPE_PACKAGE) { /* Object to be indexed is a Package */ if (index >= operand[0]->package.count) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Index value beyond package end\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Index value (%X) beyond package end (%X)\n", + index, operand[0]->package.count)); status = AE_AML_PACKAGE_LIMIT; goto cleanup; } - if ((operand[2]->common.type == INTERNAL_TYPE_REFERENCE) && - (operand[2]->reference.opcode == AML_ZERO_OP)) { - /* - * There is no actual result descriptor (the Zero_op Result - * descriptor is a placeholder), so just delete the placeholder and - * return a reference to the package element - */ - acpi_ut_remove_reference (operand[2]); - } - - else { - /* - * Each element of the package is an internal object. Get the one - * we are after. - */ - temp_desc = operand[0]->package.elements [index]; - return_desc->reference.opcode = AML_INDEX_OP; - return_desc->reference.target_type = temp_desc->common.type; - return_desc->reference.object = temp_desc; - - status = acpi_ex_store (return_desc, operand[2], walk_state); - return_desc->reference.object = NULL; - } - - /* - * The local return object must always be a reference to the package element, - * not the element itself. - */ - return_desc->reference.opcode = AML_INDEX_OP; return_desc->reference.target_type = ACPI_TYPE_PACKAGE; + return_desc->reference.object = operand[0]->package.elements [index]; return_desc->reference.where = &operand[0]->package.elements [index]; } - else { /* Object to be indexed is a Buffer */ if (index >= operand[0]->buffer.length) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Index value beyond end of buffer\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Index value (%X) beyond end of buffer (%X)\n", + index, operand[0]->buffer.length)); status = AE_AML_BUFFER_LIMIT; goto cleanup; } - return_desc->reference.opcode = AML_INDEX_OP; return_desc->reference.target_type = ACPI_TYPE_BUFFER_FIELD; - return_desc->reference.object = operand[0]; - return_desc->reference.offset = index; - - status = acpi_ex_store (return_desc, operand[2], walk_state); + return_desc->reference.object = operand[0]; } + /* Complete the Index reference object */ + + return_desc->reference.opcode = AML_INDEX_OP; + return_desc->reference.offset = index; + + /* Store the reference to the Target */ + + status = acpi_ex_store (return_desc, operand[2], walk_state); + + /* Return the reference */ + walk_state->result_obj = return_desc; goto cleanup; - break; default: - REPORT_ERROR (("Acpi_ex_opcode_2A_1T_1R: Unknown opcode %X\n", + ACPI_REPORT_ERROR (("acpi_ex_opcode_2A_1T_1R: Unknown opcode %X\n", walk_state->opcode)); status = AE_AML_BAD_OPCODE; break; @@ -440,7 +478,7 @@ if (ACPI_SUCCESS (status)) { /* - * Store the result of the operation (which is now in Return_desc) into + * Store the result of the operation (which is now in return_desc) into * the Target descriptor. */ status = acpi_ex_store (return_desc, operand[2], walk_state); @@ -448,7 +486,9 @@ goto cleanup; } - walk_state->result_obj = return_desc; + if (!walk_state->result_obj) { + walk_state->result_obj = return_desc; + } } @@ -466,9 +506,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_2A_0T_1R + * FUNCTION: acpi_ex_opcode_2A_0T_1R * - * PARAMETERS: Walk_state - Current walk state + * PARAMETERS: walk_state - Current walk state * * RETURN: Status * @@ -478,15 +518,15 @@ acpi_status acpi_ex_opcode_2A_0T_1R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_operand_object *return_desc = NULL; - acpi_status status = AE_OK; - u8 logical_result = FALSE; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *return_desc = NULL; + acpi_status status = AE_OK; + u8 logical_result = FALSE; - FUNCTION_TRACE_STR ("Ex_opcode_2A_0T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_2A_0T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); /* Create the internal return object */ @@ -500,7 +540,7 @@ /* * Execute the Opcode */ - if (walk_state->op_info->flags & AML_LOGICAL) /* Logical_op (Operand0, Operand1) */ { + if (walk_state->op_info->flags & AML_LOGICAL) /* logical_op (Operand0, Operand1) */ { logical_result = acpi_ex_do_logical_op (walk_state->opcode, operand[0]->integer.value, operand[1]->integer.value); @@ -509,7 +549,7 @@ switch (walk_state->opcode) { - case AML_ACQUIRE_OP: /* Acquire (Mutex_object, Timeout) */ + case AML_ACQUIRE_OP: /* Acquire (mutex_object, Timeout) */ status = acpi_ex_acquire_mutex (operand[1], operand[0], walk_state); if (status == AE_TIME) { @@ -519,7 +559,7 @@ break; - case AML_WAIT_OP: /* Wait (Event_object, Timeout) */ + case AML_WAIT_OP: /* Wait (event_object, Timeout) */ status = acpi_ex_system_wait_event (operand[1], operand[0]); if (status == AE_TIME) { @@ -531,16 +571,16 @@ default: - REPORT_ERROR (("Acpi_ex_opcode_2A_0T_1R: Unknown opcode %X\n", walk_state->opcode)); + ACPI_REPORT_ERROR (("acpi_ex_opcode_2A_0T_1R: Unknown opcode %X\n", + walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; - break; } store_logical_result: /* - * Set return value to according to Logical_result. logical TRUE (all ones) + * Set return value to according to logical_result. logical TRUE (all ones) * Default is FALSE (zero) */ if (logical_result) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exoparg3.c linux.21rc5-ac2/drivers/acpi/executer/exoparg3.c --- linux.21rc5/drivers/acpi/executer/exoparg3.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exoparg3.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,37 +2,55 @@ /****************************************************************************** * * Module Name: exoparg3 - AML execution - opcodes with 3 arguments - * $Revision: 3 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acparser.h" -#include "amlcode.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exoparg3") + ACPI_MODULE_NAME ("exoparg3") /*! @@ -60,9 +78,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_3A_0T_0R + * FUNCTION: acpi_ex_opcode_3A_0T_0R * - * PARAMETERS: Walk_state - Current walk state + * PARAMETERS: walk_state - Current walk state * * RETURN: Status * @@ -72,27 +90,27 @@ acpi_status acpi_ex_opcode_3A_0T_0R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - ACPI_SIGNAL_FATAL_INFO *fatal; - acpi_status status = AE_OK; + union acpi_operand_object **operand = &walk_state->operands[0]; + struct acpi_signal_fatal_info *fatal; + acpi_status status = AE_OK; - FUNCTION_TRACE_STR ("Ex_opcode_3A_0T_0R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_3A_0T_0R", acpi_ps_get_opcode_name (walk_state->opcode)); switch (walk_state->opcode) { - case AML_FATAL_OP: /* Fatal (Fatal_type Fatal_code Fatal_arg) */ + case AML_FATAL_OP: /* Fatal (fatal_type fatal_code fatal_arg) */ ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Fatal_op: Type %x Code %x Arg %x <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n", + "fatal_op: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n", (u32) operand[0]->integer.value, (u32) operand[1]->integer.value, (u32) operand[2]->integer.value)); - fatal = ACPI_MEM_ALLOCATE (sizeof (ACPI_SIGNAL_FATAL_INFO)); + fatal = ACPI_MEM_ALLOCATE (sizeof (struct acpi_signal_fatal_info)); if (fatal) { fatal->type = (u32) operand[0]->integer.value; fatal->code = (u32) operand[1]->integer.value; @@ -102,7 +120,7 @@ /* * Always signal the OS! */ - acpi_os_signal (ACPI_SIGNAL_FATAL, fatal); + status = acpi_os_signal (ACPI_SIGNAL_FATAL, fatal); /* Might return while OS is shutting down, just continue */ @@ -112,11 +130,10 @@ default: - REPORT_ERROR (("Acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", + ACPI_REPORT_ERROR (("acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; - break; } @@ -128,9 +145,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_3A_1T_1R + * FUNCTION: acpi_ex_opcode_3A_1T_1R * - * PARAMETERS: Walk_state - Current walk state + * PARAMETERS: walk_state - Current walk state * * RETURN: Status * @@ -140,17 +157,17 @@ acpi_status acpi_ex_opcode_3A_1T_1R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_operand_object *return_desc = NULL; - char *buffer; - acpi_status status = AE_OK; - u32 index; - u32 length; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *return_desc = NULL; + char *buffer; + acpi_status status = AE_OK; + acpi_native_uint index; + acpi_size length; - FUNCTION_TRACE_STR ("Ex_opcode_3A_1T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_3A_1T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); switch (walk_state->opcode) { @@ -160,7 +177,7 @@ * Create the return object. The Source operand is guaranteed to be * either a String or a Buffer, so just use its type. */ - return_desc = acpi_ut_create_internal_object (operand[0]->common.type); + return_desc = acpi_ut_create_internal_object (ACPI_GET_OBJECT_TYPE (operand[0])); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; @@ -168,8 +185,8 @@ /* Get the Integer values from the objects */ - index = (u32) operand[1]->integer.value; - length = (u32) operand[2]->integer.value; + index = (acpi_native_uint) operand[1]->integer.value; + length = (acpi_size) operand[2]->integer.value; /* * If the index is beyond the length of the String/Buffer, or if the @@ -181,37 +198,36 @@ if ((index + length) > operand[0]->string.length) { - length = operand[0]->string.length - index; + length = (acpi_size) operand[0]->string.length - index; } /* Allocate a new buffer for the String/Buffer */ - buffer = ACPI_MEM_CALLOCATE (length + 1); + buffer = ACPI_MEM_CALLOCATE ((acpi_size) length + 1); if (!buffer) { - return (AE_NO_MEMORY); + status = AE_NO_MEMORY; + goto cleanup; } /* Copy the portion requested */ - MEMCPY (buffer, operand[0]->string.pointer + index, - length); + ACPI_MEMCPY (buffer, operand[0]->string.pointer + index, + length); /* Set the length of the new String/Buffer */ return_desc->string.pointer = buffer; - return_desc->string.length = length; + return_desc->string.length = (u32) length; } - break; default: - REPORT_ERROR (("Acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", + ACPI_REPORT_ERROR (("acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; - break; } /* Store the result in the target */ @@ -228,7 +244,9 @@ /* Set the return object and exit */ - walk_state->result_obj = return_desc; + if (!walk_state->result_obj) { + walk_state->result_obj = return_desc; + } return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exoparg6.c linux.21rc5-ac2/drivers/acpi/executer/exoparg6.c --- linux.21rc5/drivers/acpi/executer/exoparg6.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exoparg6.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,37 +2,55 @@ /****************************************************************************** * * Module Name: exoparg6 - AML execution - opcodes with 6 arguments - * $Revision: 4 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acparser.h" -#include "amlcode.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exoparg6") + ACPI_MODULE_NAME ("exoparg6") /*! @@ -60,11 +78,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_do_match + * FUNCTION: acpi_ex_do_match * - * PARAMETERS: Match_op - The AML match operand - * Package_value - Value from the target package - * Match_value - Value to be matched + * PARAMETERS: match_op - The AML match operand + * package_value - Value from the target package + * match_value - Value to be matched * * RETURN: TRUE if the match is successful, FALSE otherwise * @@ -74,9 +92,9 @@ u8 acpi_ex_do_match ( - u32 match_op, - acpi_integer package_value, - acpi_integer match_value) + u32 match_op, + acpi_integer package_value, + acpi_integer match_value) { switch (match_op) { @@ -137,9 +155,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_opcode_6A_0T_1R + * FUNCTION: acpi_ex_opcode_6A_0T_1R * - * PARAMETERS: Walk_state - Current walk state + * PARAMETERS: walk_state - Current walk state * * RETURN: Status * @@ -149,23 +167,23 @@ acpi_status acpi_ex_opcode_6A_0T_1R ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_operand_object **operand = &walk_state->operands[0]; - acpi_operand_object *return_desc = NULL; - acpi_status status = AE_OK; - u32 index; - acpi_operand_object *this_element; + union acpi_operand_object **operand = &walk_state->operands[0]; + union acpi_operand_object *return_desc = NULL; + acpi_status status = AE_OK; + u32 index; + union acpi_operand_object *this_element; - FUNCTION_TRACE_STR ("Ex_opcode_6A_0T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); + ACPI_FUNCTION_TRACE_STR ("ex_opcode_6A_0T_1R", acpi_ps_get_opcode_name (walk_state->opcode)); switch (walk_state->opcode) { case AML_MATCH_OP: /* - * Match (Search_package[0], Match_op1[1], Match_object1[2], - * Match_op2[3], Match_object2[4], Start_index[5]) + * Match (search_package[0], match_op1[1], match_object1[2], + * match_op2[3], match_object2[4], start_index[5]) */ /* Validate match comparison sub-opcodes */ @@ -199,8 +217,9 @@ * Examine each element until a match is found. Within the loop, * "continue" signifies that the current element does not match * and the next should be examined. + * * Upon finding a match, the loop will terminate via "break" at - * the bottom. If it terminates "normally", Match_value will be -1 + * the bottom. If it terminates "normally", match_value will be -1 * (its initial value) indicating that no match was found. When * returned as a Number, this will produce the Ones value as specified. */ @@ -209,27 +228,21 @@ /* * Treat any NULL or non-numeric elements as non-matching. - * TBD [Unhandled] - if an element is a Name, - * should we examine its value? */ if (!this_element || - this_element->common.type != ACPI_TYPE_INTEGER) { + ACPI_GET_OBJECT_TYPE (this_element) != ACPI_TYPE_INTEGER) { continue; } - /* - * Within these switch statements: - * "break" (exit from the switch) signifies a match; - * "continue" (proceed to next iteration of enclosing - * "for" loop) signifies a non-match. + * "continue" (proceed to next iteration of enclosing + * "for" loop) signifies a non-match. */ if (!acpi_ex_do_match ((u32) operand[1]->integer.value, this_element->integer.value, operand[2]->integer.value)) { continue; } - if (!acpi_ex_do_match ((u32) operand[3]->integer.value, this_element->integer.value, operand[4]->integer.value)) { continue; @@ -246,18 +259,16 @@ case AML_LOAD_TABLE_OP: - status = AE_NOT_IMPLEMENTED; - goto cleanup; + status = acpi_ex_load_table_op (walk_state, &return_desc); break; default: - REPORT_ERROR (("Acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", + ACPI_REPORT_ERROR (("acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; - break; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exprep.c linux.21rc5-ac2/drivers/acpi/executer/exprep.c --- linux.21rc5/drivers/acpi/executer/exprep.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exprep.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,292 +2,348 @@ /****************************************************************************** * * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities - * $Revision: 99 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acparser.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exprep") + ACPI_MODULE_NAME ("exprep") /******************************************************************************* * - * FUNCTION: Acpi_ex_decode_field_access_type + * FUNCTION: acpi_ex_decode_field_access * * PARAMETERS: Access - Encoded field access bits * Length - Field length. * - * RETURN: Field granularity (8, 16, 32 or 64) + * RETURN: Field granularity (8, 16, 32 or 64) and + * byte_alignment (1, 2, 3, or 4) * - * DESCRIPTION: Decode the Access_type bits of a field definition. + * DESCRIPTION: Decode the access_type bits of a field definition. * ******************************************************************************/ static u32 -acpi_ex_decode_field_access_type ( - u32 access, - u16 length, - u32 *alignment) +acpi_ex_decode_field_access ( + union acpi_operand_object *obj_desc, + u8 field_flags, + u32 *return_byte_alignment) { - PROC_NAME ("Ex_decode_field_access_type"); + u32 access; + u8 byte_alignment; + u8 bit_length; +/* u32 Length; */ + ACPI_FUNCTION_NAME ("ex_decode_field_access"); + + + access = (field_flags & AML_FIELD_ACCESS_TYPE_MASK); + switch (access) { - case ACCESS_ANY_ACC: + case AML_FIELD_ACCESS_ANY: + + byte_alignment = 1; + bit_length = 8; - *alignment = 8; +#if 0 + /* + * TBD: optimize + * + * Any attempt to optimize the access size to the size of the field + * must take into consideration the length of the region and take + * care that an access to the field will not attempt to access + * beyond the end of the region. + */ /* Use the length to set the access type */ + length = obj_desc->common_field.bit_length; + if (length <= 8) { - return (8); + bit_length = 8; } else if (length <= 16) { - return (16); + bit_length = 16; } else if (length <= 32) { - return (32); + bit_length = 32; } else if (length <= 64) { - return (64); + bit_length = 64; } + else { + /* Larger than Qword - just use byte-size chunks */ - /* Default is 8 (byte) */ - - return (8); + bit_length = 8; + } +#endif break; - case ACCESS_BYTE_ACC: - *alignment = 8; - return (8); + case AML_FIELD_ACCESS_BYTE: + case AML_FIELD_ACCESS_BUFFER: /* ACPI 2.0 (SMBus Buffer) */ + byte_alignment = 1; + bit_length = 8; break; - case ACCESS_WORD_ACC: - *alignment = 16; - return (16); + case AML_FIELD_ACCESS_WORD: + byte_alignment = 2; + bit_length = 16; break; - case ACCESS_DWORD_ACC: - *alignment = 32; - return (32); + case AML_FIELD_ACCESS_DWORD: + byte_alignment = 4; + bit_length = 32; break; - case ACCESS_QWORD_ACC: /* ACPI 2.0 */ - *alignment = 64; - return (64); + case AML_FIELD_ACCESS_QWORD: /* ACPI 2.0 */ + byte_alignment = 8; + bit_length = 64; break; default: /* Invalid field access type */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Unknown field access type %x\n", + "Unknown field access type %X\n", access)); return (0); } + + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_BUFFER_FIELD) { + /* + * buffer_field access can be on any byte boundary, so the + * byte_alignment is always 1 byte -- regardless of any byte_alignment + * implied by the field access type. + */ + byte_alignment = 1; + } + + *return_byte_alignment = byte_alignment; + return (bit_length); } /******************************************************************************* * - * FUNCTION: Acpi_ex_prep_common_field_object + * FUNCTION: acpi_ex_prep_common_field_object * - * PARAMETERS: Obj_desc - The field object - * Field_flags - Access, Lock_rule, and Update_rule. - * The format of a Field_flag is described + * PARAMETERS: obj_desc - The field object + * field_flags - Access, lock_rule, and update_rule. + * The format of a field_flag is described * in the ACPI specification - * Field_bit_position - Field start position - * Field_bit_length - Field length in number of bits + * field_bit_position - Field start position + * field_bit_length - Field length in number of bits * * RETURN: Status * * DESCRIPTION: Initialize the areas of the field object that are common - * to the various types of fields. + * to the various types of fields. Note: This is very "sensitive" + * code because we are solving the general case for field + * alignment. * ******************************************************************************/ acpi_status acpi_ex_prep_common_field_object ( - acpi_operand_object *obj_desc, - u8 field_flags, - u32 field_bit_position, - u32 field_bit_length) + union acpi_operand_object *obj_desc, + u8 field_flags, + u8 field_attribute, + u32 field_bit_position, + u32 field_bit_length) { - u32 access_bit_width; - u32 alignment; - u32 nearest_byte_address; + u32 access_bit_width; + u32 byte_alignment; + u32 nearest_byte_address; - FUNCTION_TRACE ("Ex_prep_common_field_object"); + ACPI_FUNCTION_TRACE ("ex_prep_common_field_object"); /* * Note: the structure being initialized is the - * ACPI_COMMON_FIELD_INFO; No structure fields outside of the common area - * are initialized by this procedure. + * ACPI_COMMON_FIELD_INFO; No structure fields outside of the common + * area are initialized by this procedure. */ - - /* Demultiplex the Field_flags byte */ - - obj_desc->common_field.lock_rule = (u8) ((field_flags & LOCK_RULE_MASK) - >> LOCK_RULE_SHIFT); - obj_desc->common_field.update_rule = (u8) ((field_flags & UPDATE_RULE_MASK) - >> UPDATE_RULE_SHIFT); - /* Other misc fields */ - - obj_desc->common_field.bit_length = (u16) field_bit_length; + obj_desc->common_field.field_flags = field_flags; + obj_desc->common_field.attribute = field_attribute; + obj_desc->common_field.bit_length = field_bit_length; /* * Decode the access type so we can compute offsets. The access type gives * two pieces of information - the width of each field access and the - * necessary alignment of the access. For Any_acc, the width used is the - * largest necessary/possible in an attempt to access the whole field in one - * I/O operation. However, for Any_acc, the alignment is 8. For all other - * access types (Byte, Word, Dword, Qword), the width is the same as the - * alignment. + * necessary byte_alignment (address granularity) of the access. + * + * For any_acc, the access_bit_width is the largest width that is both + * necessary and possible in an attempt to access the whole field in one + * I/O operation. However, for any_acc, the byte_alignment is always one + * byte. + * + * For all Buffer Fields, the byte_alignment is always one byte. + * + * For all other access types (Byte, Word, Dword, Qword), the Bitwidth is + * the same (equivalent) as the byte_alignment. */ - access_bit_width = acpi_ex_decode_field_access_type ( - ((field_flags & ACCESS_TYPE_MASK) >> ACCESS_TYPE_SHIFT), - obj_desc->field.bit_length, &alignment); + access_bit_width = acpi_ex_decode_field_access (obj_desc, field_flags, + &byte_alignment); if (!access_bit_width) { return_ACPI_STATUS (AE_AML_OPERAND_VALUE); } /* Setup width (access granularity) fields */ - obj_desc->common_field.access_bit_width = (u8) access_bit_width; /* 8, 16, 32, 64 */ - obj_desc->common_field.access_byte_width = (u8) DIV_8 (access_bit_width); /* 1, 2, 4, 8 */ - - if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) { - /* - * Buffer_field access can be on any byte boundary, so the - * alignment is always 8 (regardless of any alignment implied by the - * field access type.) - */ - alignment = 8; - } - + obj_desc->common_field.access_byte_width = (u8) + ACPI_DIV_8 (access_bit_width); /* 1, 2, 4, 8 */ /* - * Base_byte_offset is the address of the start of the field within the region. It is - * the byte address of the first *datum* (field-width data unit) of the field. - * (i.e., the first datum that contains at least the first *bit* of the field.) + * base_byte_offset is the address of the start of the field within the + * region. It is the byte address of the first *datum* (field-width data + * unit) of the field. (i.e., the first datum that contains at least the + * first *bit* of the field.) + * + * Note: byte_alignment is always either equal to the access_bit_width or 8 + * (Byte access), and it defines the addressing granularity of the parent + * region or buffer. */ - nearest_byte_address = ROUND_BITS_DOWN_TO_BYTES (field_bit_position); - obj_desc->common_field.base_byte_offset = ROUND_DOWN (nearest_byte_address, - DIV_8 (alignment)); + nearest_byte_address = + ACPI_ROUND_BITS_DOWN_TO_BYTES (field_bit_position); + obj_desc->common_field.base_byte_offset = + ACPI_ROUND_DOWN (nearest_byte_address, byte_alignment); /* - * Start_field_bit_offset is the offset of the first bit of the field within a field datum. - * This is calculated as the number of bits from the Base_byte_offset. In other words, - * the start of the field is relative to a byte address, regardless of the access type - * of the field. + * start_field_bit_offset is the offset of the first bit of the field within + * a field datum. */ - obj_desc->common_field.start_field_bit_offset = (u8) (MOD_8 (field_bit_position)); + obj_desc->common_field.start_field_bit_offset = (u8) + (field_bit_position - ACPI_MUL_8 (obj_desc->common_field.base_byte_offset)); /* - * Datum_valid_bits is the number of valid field bits in the first field datum. + * Valid bits -- the number of bits that compose a partial datum, + * 1) At the end of the field within the region (arbitrary starting bit + * offset) + * 2) At the end of a buffer used to contain the field (starting offset + * always zero) */ - obj_desc->common_field.datum_valid_bits = (u8) (access_bit_width - - obj_desc->common_field.start_field_bit_offset); + obj_desc->common_field.end_field_valid_bits = (u8) + ((obj_desc->common_field.start_field_bit_offset + field_bit_length) % + access_bit_width); + /* start_buffer_bit_offset always = 0 */ + + obj_desc->common_field.end_buffer_valid_bits = (u8) + (field_bit_length % access_bit_width); /* - * Valid bits -- the number of bits that compose a partial datum, - * 1) At the end of the field within the region (arbitrary starting bit offset) - * 2) At the end of a buffer used to contain the field (starting offset always zero) + * datum_valid_bits is the number of valid field bits in the first + * field datum. */ - obj_desc->common_field.end_field_valid_bits = (u8) ((obj_desc->common_field.start_field_bit_offset + - field_bit_length) % access_bit_width); - obj_desc->common_field.end_buffer_valid_bits = (u8) (field_bit_length % access_bit_width); /* Start_buffer_bit_offset always = 0 */ - + obj_desc->common_field.datum_valid_bits = (u8) + (access_bit_width - obj_desc->common_field.start_field_bit_offset); /* - * Does the entire field fit within a single field access element - * (datum)? (without crossing a datum boundary) + * Does the entire field fit within a single field access element? (datum) + * (i.e., without crossing a datum boundary) */ - if ((obj_desc->common_field.start_field_bit_offset + obj_desc->common_field.bit_length) <= - (u16) obj_desc->common_field.access_bit_width) { - obj_desc->common_field.access_flags |= AFIELD_SINGLE_DATUM; + if ((obj_desc->common_field.start_field_bit_offset + field_bit_length) <= + (u16) access_bit_width) { + obj_desc->common.flags |= AOPOBJ_SINGLE_DATUM; } - return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ex_prep_field_value + * FUNCTION: acpi_ex_prep_field_value * * PARAMETERS: Node - Owning Node - * Region_node - Region in which field is being defined - * Field_flags - Access, Lock_rule, and Update_rule. - * Field_bit_position - Field start position - * Field_bit_length - Field length in number of bits + * region_node - Region in which field is being defined + * field_flags - Access, lock_rule, and update_rule. + * field_bit_position - Field start position + * field_bit_length - Field length in number of bits * * RETURN: Status * - * DESCRIPTION: Construct an acpi_operand_object of type Def_field and + * DESCRIPTION: Construct an union acpi_operand_object of type def_field and * connect it to the parent Node. * ******************************************************************************/ acpi_status acpi_ex_prep_field_value ( - ACPI_CREATE_FIELD_INFO *info) + struct acpi_create_field_info *info) { - acpi_operand_object *obj_desc; - u32 type; - acpi_status status; + union acpi_operand_object *obj_desc; + u32 type; + acpi_status status; - FUNCTION_TRACE ("Ex_prep_field_value"); + ACPI_FUNCTION_TRACE ("ex_prep_field_value"); /* Parameter validation */ - if (info->field_type != INTERNAL_TYPE_INDEX_FIELD) { + if (info->field_type != ACPI_TYPE_LOCAL_INDEX_FIELD) { if (!info->region_node) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Null Region_node\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Null region_node\n")); return_ACPI_STATUS (AE_AML_NO_OPERAND); } type = acpi_ns_get_type (info->region_node); if (type != ACPI_TYPE_REGION) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Needed Region, found type %X %s\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Needed Region, found type %X %s\n", type, acpi_ut_get_type_name (type))); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } } - /* Allocate a new region object */ + /* Allocate a new field object */ obj_desc = acpi_ut_create_internal_object (info->field_type); if (!obj_desc) { @@ -296,8 +352,9 @@ /* Initialize areas of the object that are common to all fields */ + obj_desc->common_field.node = info->field_node; status = acpi_ex_prep_common_field_object (obj_desc, info->field_flags, - info->field_bit_position, info->field_bit_length); + info->attribute, info->field_bit_position, info->field_bit_length); if (ACPI_FAILURE (status)) { acpi_ut_delete_object_desc (obj_desc); return_ACPI_STATUS (status); @@ -306,44 +363,48 @@ /* Initialize areas of the object that are specific to the field type */ switch (info->field_type) { - case INTERNAL_TYPE_REGION_FIELD: + case ACPI_TYPE_LOCAL_REGION_FIELD: - obj_desc->field.region_obj = acpi_ns_get_attached_object (info->region_node); + obj_desc->field.region_obj = acpi_ns_get_attached_object (info->region_node); /* An additional reference for the container */ acpi_ut_add_reference (obj_desc->field.region_obj); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Region_field: Bitoff=%X Off=%X Gran=%X Region %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "region_field: Bitoff=%X Off=%X Gran=%X Region %p\n", obj_desc->field.start_field_bit_offset, obj_desc->field.base_byte_offset, - obj_desc->field.access_bit_width, obj_desc->field.region_obj)); + obj_desc->field.access_byte_width, obj_desc->field.region_obj)); break; - case INTERNAL_TYPE_BANK_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: - obj_desc->bank_field.value = info->bank_value; - obj_desc->bank_field.region_obj = acpi_ns_get_attached_object (info->region_node); - obj_desc->bank_field.bank_register_obj = acpi_ns_get_attached_object (info->register_node); + obj_desc->bank_field.value = info->bank_value; + obj_desc->bank_field.region_obj = acpi_ns_get_attached_object (info->region_node); + obj_desc->bank_field.bank_obj = acpi_ns_get_attached_object (info->register_node); /* An additional reference for the attached objects */ acpi_ut_add_reference (obj_desc->bank_field.region_obj); - acpi_ut_add_reference (obj_desc->bank_field.bank_register_obj); + acpi_ut_add_reference (obj_desc->bank_field.bank_obj); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Bank Field: Bit_off=%X Off=%X Gran=%X Region %p Bank_reg %p\n", - obj_desc->bank_field.start_field_bit_offset, obj_desc->bank_field.base_byte_offset, - obj_desc->field.access_bit_width, obj_desc->bank_field.region_obj, - obj_desc->bank_field.bank_register_obj)); + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "Bank Field: bit_off=%X Off=%X Gran=%X Region %p bank_reg %p\n", + obj_desc->bank_field.start_field_bit_offset, + obj_desc->bank_field.base_byte_offset, + obj_desc->field.access_byte_width, + obj_desc->bank_field.region_obj, + obj_desc->bank_field.bank_obj)); break; - case INTERNAL_TYPE_INDEX_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: obj_desc->index_field.index_obj = acpi_ns_get_attached_object (info->register_node); obj_desc->index_field.data_obj = acpi_ns_get_attached_object (info->data_register_node); - obj_desc->index_field.value = (u32) (info->field_bit_position / - obj_desc->field.access_bit_width); + obj_desc->index_field.value = (u32) + (info->field_bit_position / ACPI_MUL_8 (obj_desc->field.access_byte_width)); if (!obj_desc->index_field.data_obj || !obj_desc->index_field.index_obj) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Null Index Object\n")); @@ -355,22 +416,29 @@ acpi_ut_add_reference (obj_desc->index_field.data_obj); acpi_ut_add_reference (obj_desc->index_field.index_obj); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Index_field: bitoff=%X off=%X gran=%X Index %p Data %p\n", - obj_desc->index_field.start_field_bit_offset, obj_desc->index_field.base_byte_offset, - obj_desc->field.access_bit_width, obj_desc->index_field.index_obj, + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, + "index_field: bitoff=%X off=%X gran=%X Index %p Data %p\n", + obj_desc->index_field.start_field_bit_offset, + obj_desc->index_field.base_byte_offset, + obj_desc->field.access_byte_width, + obj_desc->index_field.index_obj, obj_desc->index_field.data_obj)); break; + + default: + /* No other types should get here */ + break; } /* - * Store the constructed descriptor (Obj_desc) into the parent Node, - * preserving the current type of that Named_obj. + * Store the constructed descriptor (obj_desc) into the parent Node, + * preserving the current type of that named_obj. */ status = acpi_ns_attach_object (info->field_node, obj_desc, - (u8) acpi_ns_get_type (info->field_node)); + acpi_ns_get_type (info->field_node)); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "set Named_obj %p (%4.4s) val = %p\n", - info->field_node, (char*)&(info->field_node->name), obj_desc)); + ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD, "set named_obj %p (%4.4s) val = %p\n", + info->field_node, info->field_node->name.ascii, obj_desc)); /* Remove local reference to the object */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exregion.c linux.21rc5-ac2/drivers/acpi/executer/exregion.c --- linux.21rc5/drivers/acpi/executer/exregion.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exregion.c 2003-04-28 17:58:27.000000000 +0100 @@ -1,52 +1,66 @@ /****************************************************************************** * - * Module Name: exregion - ACPI default Op_region (address space) handlers - * $Revision: 61 $ + * Module Name: exregion - ACPI default op_region (address space) handlers * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "achware.h" -#include "acevents.h" +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exregion") + ACPI_MODULE_NAME ("exregion") /******************************************************************************* * - * FUNCTION: Acpi_ex_system_memory_space_handler + * FUNCTION: acpi_ex_system_memory_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write - * Bit_width - Field width in bits (8, 16, or 32) + * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value - * Handler_context - Pointer to Handler's context - * Region_context - Pointer to context specific to the + * handler_context - Pointer to Handler's context + * region_context - Pointer to context specific to the * accessed region * * RETURN: Status @@ -57,20 +71,23 @@ acpi_status acpi_ex_system_memory_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context) + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context) { - acpi_status status = AE_OK; - void *logical_addr_ptr = NULL; - acpi_mem_space_context *mem_info = region_context; - u32 length; + acpi_status status = AE_OK; + void *logical_addr_ptr = NULL; + struct acpi_mem_space_context *mem_info = region_context; + u32 length; + acpi_size window_size; +#ifndef ACPI_MISALIGNED_TRANSFERS + u32 remainder; +#endif - - FUNCTION_TRACE ("Ex_system_memory_space_handler"); + ACPI_FUNCTION_TRACE ("ex_system_memory_space_handler"); /* Validate and translate the bit width */ @@ -88,14 +105,28 @@ length = 4; break; + case 64: + length = 8; + break; + default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid System_memory width %d\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid system_memory width %d\n", bit_width)); return_ACPI_STATUS (AE_AML_OPERAND_VALUE); - break; } +#ifndef ACPI_MISALIGNED_TRANSFERS + /* + * Hardware does not support non-aligned data transfers, we must verify + * the request. + */ + (void) acpi_ut_short_divide ((acpi_integer *) &address, length, NULL, &remainder); + if (remainder != 0) { + return_ACPI_STATUS (AE_AML_ALIGNMENT); + } +#endif + /* * Does the request fit into the cached memory mapping? * Is 1) Address below the current mapping? OR @@ -115,61 +146,80 @@ mem_info->mapped_length); } - mem_info->mapped_length = 0; /* In case of failure below */ + /* + * Don't attempt to map memory beyond the end of the region, and + * constrain the maximum mapping size to something reasonable. + */ + window_size = (acpi_size) ((mem_info->address + mem_info->length) - address); + if (window_size > ACPI_SYSMEM_REGION_WINDOW_SIZE) { + window_size = ACPI_SYSMEM_REGION_WINDOW_SIZE; + } /* Create a new mapping starting at the address given */ - status = acpi_os_map_memory (address, SYSMEM_REGION_WINDOW_SIZE, + status = acpi_os_map_memory (address, window_size, (void **) &mem_info->mapped_logical_address); if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not map memory at %8.8X%8.8X, size %X\n", + ACPI_HIDWORD (address), ACPI_LODWORD (address), (u32) window_size)); + mem_info->mapped_length = 0; return_ACPI_STATUS (status); } /* Save the physical address and mapping size */ mem_info->mapped_physical_address = address; - mem_info->mapped_length = SYSMEM_REGION_WINDOW_SIZE; + mem_info->mapped_length = window_size; } - /* * Generate a logical pointer corresponding to the address we want to * access */ - - /* TBD: should these pointers go to 64-bit in all cases ? */ - logical_addr_ptr = mem_info->mapped_logical_address + ((acpi_integer) address - (acpi_integer) mem_info->mapped_physical_address); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "System_memory %d (%d width) Address=%8.8X%8.8X\n", function, bit_width, - HIDWORD (address), LODWORD (address))); - - /* Perform the memory read or write */ + "system_memory %d (%d width) Address=%8.8X%8.8X\n", function, bit_width, + ACPI_HIDWORD (address), ACPI_LODWORD (address))); + /* + * Perform the memory read or write + * + * Note: For machines that do not support non-aligned transfers, the target + * address was checked for alignment above. We do not attempt to break the + * transfer up into smaller (byte-size) chunks because the AML specifically + * asked for a transfer width that the hardware may require. + */ switch (function) { + case ACPI_READ: - case ACPI_READ_ADR_SPACE: - + *value = 0; switch (bit_width) { case 8: - *value = (u32)* (u8 *) logical_addr_ptr; + *value = (acpi_integer) *((u8 *) logical_addr_ptr); break; case 16: - MOVE_UNALIGNED16_TO_32 (value, logical_addr_ptr); + *value = (acpi_integer) *((u16 *) logical_addr_ptr); break; case 32: - MOVE_UNALIGNED32_TO_32 (value, logical_addr_ptr); + *value = (acpi_integer) *((u32 *) logical_addr_ptr); break; - } +#if ACPI_MACHINE_WIDTH != 16 + case 64: + *value = (acpi_integer) *((u64 *) logical_addr_ptr); + break; +#endif + default: + /* bit_width was already validated */ + break; + } break; - - case ACPI_WRITE_ADR_SPACE: + case ACPI_WRITE: switch (bit_width) { case 8: @@ -177,16 +227,24 @@ break; case 16: - MOVE_UNALIGNED16_TO_16 (logical_addr_ptr, value); + *(u16 *) logical_addr_ptr = (u16) *value; break; case 32: - MOVE_UNALIGNED32_TO_32 (logical_addr_ptr, value); + *(u32 *) logical_addr_ptr = (u32) *value; break; - } - break; +#if ACPI_MACHINE_WIDTH != 16 + case 64: + *(u64 *) logical_addr_ptr = (u64) *value; + break; +#endif + default: + /* bit_width was already validated */ + break; + } + break; default: status = AE_BAD_PARAMETER; @@ -199,14 +257,14 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_system_io_space_handler + * FUNCTION: acpi_ex_system_io_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write - * Bit_width - Field width in bits (8, 16, or 32) + * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value - * Handler_context - Pointer to Handler's context - * Region_context - Pointer to context specific to the + * handler_context - Pointer to Handler's context + * region_context - Pointer to context specific to the * accessed region * * RETURN: Status @@ -217,40 +275,38 @@ acpi_status acpi_ex_system_io_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context) + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; + u32 value32; - FUNCTION_TRACE ("Ex_system_io_space_handler"); + ACPI_FUNCTION_TRACE ("ex_system_io_space_handler"); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "System_iO %d (%d width) Address=%8.8X%8.8X\n", function, bit_width, - HIDWORD (address), LODWORD (address))); + "system_iO %d (%d width) Address=%8.8X%8.8X\n", function, bit_width, + ACPI_HIDWORD (address), ACPI_LODWORD (address))); /* Decode the function parameter */ switch (function) { + case ACPI_READ: - case ACPI_READ_ADR_SPACE: - - *value = 0; - status = acpi_os_read_port ((ACPI_IO_ADDRESS) address, value, bit_width); + status = acpi_os_read_port ((acpi_io_address) address, &value32, bit_width); + *value = value32; break; + case ACPI_WRITE: - case ACPI_WRITE_ADR_SPACE: - - status = acpi_os_write_port ((ACPI_IO_ADDRESS) address, *value, bit_width); + status = acpi_os_write_port ((acpi_io_address) address, (u32) *value, bit_width); break; - default: status = AE_BAD_PARAMETER; break; @@ -262,14 +318,14 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_pci_config_space_handler + * FUNCTION: acpi_ex_pci_config_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write - * Bit_width - Field width in bits (8, 16, or 32) + * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value - * Handler_context - Pointer to Handler's context - * Region_context - Pointer to context specific to the + * handler_context - Pointer to Handler's context + * region_context - Pointer to context specific to the * accessed region * * RETURN: Status @@ -280,56 +336,53 @@ acpi_status acpi_ex_pci_config_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context) + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context) { - acpi_status status = AE_OK; - acpi_pci_id *pci_id; - u16 pci_register; + acpi_status status = AE_OK; + struct acpi_pci_id *pci_id; + u16 pci_register; - FUNCTION_TRACE ("Ex_pci_config_space_handler"); + ACPI_FUNCTION_TRACE ("ex_pci_config_space_handler"); /* - * The arguments to Acpi_os(Read|Write)Pci_cfg(Byte|Word|Dword) are: + * The arguments to acpi_os(Read|Write)pci_configuration are: * - * Pci_segment is the PCI bus segment range 0-31 - * Pci_bus is the PCI bus number range 0-255 - * Pci_device is the PCI device number range 0-31 - * Pci_function is the PCI device function number - * Pci_register is the Config space register range 0-255 bytes + * pci_segment is the PCI bus segment range 0-31 + * pci_bus is the PCI bus number range 0-255 + * pci_device is the PCI device number range 0-31 + * pci_function is the PCI device function number + * pci_register is the Config space register range 0-255 bytes * * Value - input value for write, output address for read * */ - pci_id = (acpi_pci_id *) region_context; - pci_register = (u16) address; + pci_id = (struct acpi_pci_id *) region_context; + pci_register = (u16) (u32) address; ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Pci_config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n", + "pci_config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n", function, bit_width, pci_id->segment, pci_id->bus, pci_id->device, pci_id->function, pci_register)); switch (function) { - - case ACPI_READ_ADR_SPACE: + case ACPI_READ: *value = 0; status = acpi_os_read_pci_configuration (pci_id, pci_register, value, bit_width); break; - - case ACPI_WRITE_ADR_SPACE: + case ACPI_WRITE: status = acpi_os_write_pci_configuration (pci_id, pci_register, *value, bit_width); break; - default: status = AE_BAD_PARAMETER; @@ -342,14 +395,14 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_cmos_space_handler + * FUNCTION: acpi_ex_cmos_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write - * Bit_width - Field width in bits (8, 16, or 32) + * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value - * Handler_context - Pointer to Handler's context - * Region_context - Pointer to context specific to the + * handler_context - Pointer to Handler's context + * region_context - Pointer to context specific to the * accessed region * * RETURN: Status @@ -360,17 +413,17 @@ acpi_status acpi_ex_cmos_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context) + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ex_cmos_space_handler"); + ACPI_FUNCTION_TRACE ("ex_cmos_space_handler"); return_ACPI_STATUS (status); @@ -379,37 +432,97 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_pci_bar_space_handler + * FUNCTION: acpi_ex_pci_bar_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write - * Bit_width - Field width in bits (8, 16, or 32) + * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value - * Handler_context - Pointer to Handler's context - * Region_context - Pointer to context specific to the + * handler_context - Pointer to Handler's context + * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * - * DESCRIPTION: Handler for the PCI Bar_target address space (Op Region) + * DESCRIPTION: Handler for the PCI bar_target address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_pci_bar_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context) + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ex_pci_bar_space_handler"); + ACPI_FUNCTION_TRACE ("ex_pci_bar_space_handler"); return_ACPI_STATUS (status); } + +/******************************************************************************* + * + * FUNCTION: acpi_ex_data_table_space_handler + * + * PARAMETERS: Function - Read or Write operation + * Address - Where in the space to read or write + * bit_width - Field width in bits (8, 16, or 32) + * Value - Pointer to in or out value + * handler_context - Pointer to Handler's context + * region_context - Pointer to context specific to the + * accessed region + * + * RETURN: Status + * + * DESCRIPTION: Handler for the Data Table address space (Op Region) + * + ******************************************************************************/ + +acpi_status +acpi_ex_data_table_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context) +{ + acpi_status status = AE_OK; + u32 byte_width = ACPI_DIV_8 (bit_width); + u32 i; + char *logical_addr_ptr; + + + ACPI_FUNCTION_TRACE ("ex_data_table_space_handler"); + + + logical_addr_ptr = ACPI_PHYSADDR_TO_PTR (address); + + + /* Perform the memory read or write */ + + switch (function) { + case ACPI_READ: + + for (i = 0; i < byte_width; i++) { + ((char *) value) [i] = logical_addr_ptr[i]; + } + break; + + case ACPI_WRITE: + default: + + return_ACPI_STATUS (AE_SUPPORT); + } + + return_ACPI_STATUS (status); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exresnte.c linux.21rc5-ac2/drivers/acpi/executer/exresnte.c --- linux.21rc5/drivers/acpi/executer/exresnte.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exresnte.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,51 +2,65 @@ /****************************************************************************** * * Module Name: exresnte - AML Interpreter object resolution - * $Revision: 43 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acparser.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "actables.h" -#include "acevents.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exresnte") + ACPI_MODULE_NAME ("exresnte") /******************************************************************************* * - * FUNCTION: Acpi_ex_resolve_node_to_value + * FUNCTION: acpi_ex_resolve_node_to_value * - * PARAMETERS: Object_ptr - Pointer to a location that contains + * PARAMETERS: object_ptr - Pointer to a location that contains * a pointer to a NS node, and will receive a * pointer to the resolved object. - * Walk_state - Current state. Valid only if executing AML + * walk_state - Current state. Valid only if executing AML * code. NULL if simply resolving an object * * RETURN: Status @@ -67,32 +81,39 @@ acpi_status acpi_ex_resolve_node_to_value ( - acpi_namespace_node **object_ptr, - acpi_walk_state *walk_state) + struct acpi_namespace_node **object_ptr, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; - acpi_operand_object *source_desc; - acpi_operand_object *obj_desc = NULL; - acpi_namespace_node *node; - acpi_object_type8 entry_type; - acpi_integer temp_val; + acpi_status status = AE_OK; + union acpi_operand_object *source_desc; + union acpi_operand_object *obj_desc = NULL; + struct acpi_namespace_node *node; + acpi_object_type entry_type; - FUNCTION_TRACE ("Ex_resolve_node_to_value"); + ACPI_FUNCTION_TRACE ("ex_resolve_node_to_value"); /* - * The stack pointer points to a acpi_namespace_node (Node). Get the + * The stack pointer points to a struct acpi_namespace_node (Node). Get the * object that is attached to the Node. */ - node = *object_ptr; - source_desc = acpi_ns_get_attached_object (node); + node = *object_ptr; + source_desc = acpi_ns_get_attached_object (node); entry_type = acpi_ns_get_type ((acpi_handle) node); - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Entry=%p Source_desc=%p Type=%X\n", - node, source_desc, entry_type)); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Entry=%p source_desc=%p [%s]\n", + node, source_desc, acpi_ut_get_type_name (entry_type))); + if (entry_type == ACPI_TYPE_LOCAL_ALIAS) { + /* There is always exactly one level of indirection */ + + node = (struct acpi_namespace_node *) node->object; + source_desc = acpi_ns_get_attached_object (node); + entry_type = acpi_ns_get_type ((acpi_handle) node); + *object_ptr = node; + } /* * Several object types require no further processing: @@ -115,42 +136,47 @@ * of the attached object or pointer */ switch (entry_type) { - case ACPI_TYPE_PACKAGE: - if (ACPI_TYPE_PACKAGE != source_desc->common.type) { + if (ACPI_GET_OBJECT_TYPE (source_desc) != ACPI_TYPE_PACKAGE) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Object not a Package, type %s\n", - acpi_ut_get_type_name (source_desc->common.type))); + acpi_ut_get_object_type_name (source_desc))); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } - /* Return an additional reference to the object */ + status = acpi_ds_get_package_arguments (source_desc); + if (ACPI_SUCCESS (status)) { + /* Return an additional reference to the object */ - obj_desc = source_desc; - acpi_ut_add_reference (obj_desc); + obj_desc = source_desc; + acpi_ut_add_reference (obj_desc); + } break; case ACPI_TYPE_BUFFER: - if (ACPI_TYPE_BUFFER != source_desc->common.type) { + if (ACPI_GET_OBJECT_TYPE (source_desc) != ACPI_TYPE_BUFFER) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Object not a Buffer, type %s\n", - acpi_ut_get_type_name (source_desc->common.type))); + acpi_ut_get_object_type_name (source_desc))); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } - /* Return an additional reference to the object */ + status = acpi_ds_get_buffer_arguments (source_desc); + if (ACPI_SUCCESS (status)) { + /* Return an additional reference to the object */ - obj_desc = source_desc; - acpi_ut_add_reference (obj_desc); + obj_desc = source_desc; + acpi_ut_add_reference (obj_desc); + } break; case ACPI_TYPE_STRING: - if (ACPI_TYPE_STRING != source_desc->common.type) { + if (ACPI_GET_OBJECT_TYPE (source_desc) != ACPI_TYPE_STRING) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Object not a String, type %s\n", - acpi_ut_get_type_name (source_desc->common.type))); + acpi_ut_get_object_type_name (source_desc))); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } @@ -163,9 +189,9 @@ case ACPI_TYPE_INTEGER: - if (ACPI_TYPE_INTEGER != source_desc->common.type) { + if (ACPI_GET_OBJECT_TYPE (source_desc) != ACPI_TYPE_INTEGER) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Object not a Integer, type %s\n", - acpi_ut_get_type_name (source_desc->common.type))); + acpi_ut_get_object_type_name (source_desc))); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } @@ -177,17 +203,16 @@ case ACPI_TYPE_BUFFER_FIELD: - case INTERNAL_TYPE_REGION_FIELD: - case INTERNAL_TYPE_BANK_FIELD: - case INTERNAL_TYPE_INDEX_FIELD: + case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Field_read Node=%p Source_desc=%p Type=%X\n", + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "field_read Node=%p source_desc=%p Type=%X\n", node, source_desc, entry_type)); - status = acpi_ex_read_data_from_field (source_desc, &obj_desc); + status = acpi_ex_read_data_from_field (walk_state, source_desc, &obj_desc); break; - /* * For these objects, just return the object attached to the Node */ @@ -206,7 +231,7 @@ break; - /* TYPE_Any is untyped, and thus there is no object associated with it */ + /* TYPE_ANY is untyped, and thus there is no object associated with it */ case ACPI_TYPE_ANY: @@ -214,64 +239,16 @@ node)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); /* Cannot be AE_TYPE */ - break; - - - /* - * The only named references allowed are named constants - * e.g. -- Name (\OSFL, Ones) - */ - case INTERNAL_TYPE_REFERENCE: - switch (source_desc->reference.opcode) { - case AML_ZERO_OP: + case ACPI_TYPE_LOCAL_REFERENCE: - temp_val = 0; - break; + /* No named references are allowed here */ - case AML_ONE_OP: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unsupported Reference opcode %X\n", + source_desc->reference.opcode)); - temp_val = 1; - break; - - case AML_ONES_OP: - - temp_val = ACPI_INTEGER_MAX; - break; - - case AML_REVISION_OP: - - temp_val = ACPI_CA_SUPPORT_LEVEL; - break; - - default: - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unsupported reference opcode %X\n", - source_desc->reference.opcode)); - - return_ACPI_STATUS (AE_AML_BAD_OPCODE); - } - - /* Create object for result */ - - obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); - if (!obj_desc) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - obj_desc->integer.value = temp_val; - - /* - * Truncate value if we are executing from a 32-bit ACPI table - * AND actually executing AML code. If we are resolving - * an object in the namespace via an external call to the - * subsystem, we will have a null Walk_state - */ - if (walk_state) { - acpi_ex_truncate_for32bit_table (obj_desc, walk_state); - } - break; + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); /* Default case is for unknown types */ @@ -283,7 +260,7 @@ return_ACPI_STATUS (AE_AML_OPERAND_TYPE); - } /* switch (Entry_type) */ + } /* switch (entry_type) */ /* Put the object descriptor on the stack */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exresolv.c linux.21rc5-ac2/drivers/acpi/executer/exresolv.c --- linux.21rc5/drivers/acpi/executer/exresolv.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exresolv.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,156 +2,66 @@ /****************************************************************************** * * Module Name: exresolv - AML Interpreter object resolution - * $Revision: 101 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acparser.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "actables.h" -#include "acevents.h" +#include +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exresolv") + ACPI_MODULE_NAME ("exresolv") /******************************************************************************* * - * FUNCTION: Acpi_ex_get_buffer_field_value + * FUNCTION: acpi_ex_resolve_to_value * - * PARAMETERS: *Obj_desc - Pointer to a Buffer_field - * *Result_desc - Pointer to an empty descriptor which will - * become an Integer with the field's value - * - * RETURN: Status - * - * DESCRIPTION: Retrieve the value from a Buffer_field - * - ******************************************************************************/ - -acpi_status -acpi_ex_get_buffer_field_value ( - acpi_operand_object *obj_desc, - acpi_operand_object *result_desc) -{ - acpi_status status; - u32 mask; - u8 *location; - - - FUNCTION_TRACE ("Ex_get_buffer_field_value"); - - - /* - * Parameter validation - */ - if (!obj_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Internal - null field pointer\n")); - return_ACPI_STATUS (AE_AML_NO_OPERAND); - } - - if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { - status = acpi_ds_get_buffer_field_arguments (obj_desc); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - } - - if (!obj_desc->buffer_field.buffer_obj) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Internal - null container pointer\n")); - return_ACPI_STATUS (AE_AML_INTERNAL); - } - - if (ACPI_TYPE_BUFFER != obj_desc->buffer_field.buffer_obj->common.type) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Internal - container is not a Buffer\n")); - return_ACPI_STATUS (AE_AML_OPERAND_TYPE); - } - - if (!result_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Internal - null result pointer\n")); - return_ACPI_STATUS (AE_AML_INTERNAL); - } - - - /* Field location is (base of buffer) + (byte offset) */ - - location = obj_desc->buffer_field.buffer_obj->buffer.pointer - + obj_desc->buffer_field.base_byte_offset; - - /* - * Construct Mask with as many 1 bits as the field width - * - * NOTE: Only the bottom 5 bits are valid for a shift operation, so - * special care must be taken for any shift greater than 31 bits. - * - * TBD: [Unhandled] Fields greater than 32 bits will not work. - */ - if (obj_desc->buffer_field.bit_length < 32) { - mask = ((u32) 1 << obj_desc->buffer_field.bit_length) - (u32) 1; - } - else { - mask = ACPI_UINT32_MAX; - } - - result_desc->integer.type = (u8) ACPI_TYPE_INTEGER; - - /* Get the 32 bit value at the location */ - - MOVE_UNALIGNED32_TO_32 (&result_desc->integer.value, location); - - /* - * Shift the 32-bit word containing the field, and mask off the - * resulting value - */ - result_desc->integer.value = - (result_desc->integer.value >> obj_desc->buffer_field.start_field_bit_offset) & mask; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "** Read from buffer %p byte %d bit %d width %d addr %p mask %08X val %8.8X%8.8X\n", - obj_desc->buffer_field.buffer_obj->buffer.pointer, - obj_desc->buffer_field.base_byte_offset, - obj_desc->buffer_field.start_field_bit_offset, - obj_desc->buffer_field.bit_length, - location, mask, - HIDWORD(result_desc->integer.value), - LODWORD(result_desc->integer.value))); - - return_ACPI_STATUS (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ex_resolve_to_value - * - * PARAMETERS: **Stack_ptr - Points to entry on Obj_stack, which can - * be either an (acpi_operand_object *) + * PARAMETERS: **stack_ptr - Points to entry on obj_stack, which can + * be either an (union acpi_operand_object *) * or an acpi_handle. - * Walk_state - Current method state + * walk_state - Current method state * * RETURN: Status * @@ -161,13 +71,13 @@ acpi_status acpi_ex_resolve_to_value ( - acpi_operand_object **stack_ptr, - acpi_walk_state *walk_state) + union acpi_operand_object **stack_ptr, + struct acpi_walk_state *walk_state) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE_PTR ("Ex_resolve_to_value", stack_ptr); + ACPI_FUNCTION_TRACE_PTR ("ex_resolve_to_value", stack_ptr); if (!stack_ptr || !*stack_ptr) { @@ -175,13 +85,12 @@ return_ACPI_STATUS (AE_AML_NO_OPERAND); } - /* - * The entity pointed to by the Stack_ptr can be either - * 1) A valid acpi_operand_object, or - * 2) A acpi_namespace_node (Named_obj) + * The entity pointed to by the stack_ptr can be either + * 1) A valid union acpi_operand_object, or + * 2) A struct acpi_namespace_node (named_obj) */ - if (VALID_DESCRIPTOR_TYPE (*stack_ptr, ACPI_DESC_TYPE_INTERNAL)) { + if (ACPI_GET_DESCRIPTOR_TYPE (*stack_ptr) == ACPI_DESC_TYPE_OPERAND) { status = acpi_ex_resolve_object_to_value (stack_ptr, walk_state); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); @@ -189,30 +98,30 @@ } /* - * Object on the stack may have changed if Acpi_ex_resolve_object_to_value() + * Object on the stack may have changed if acpi_ex_resolve_object_to_value() * was called (i.e., we can't use an _else_ here.) */ - if (VALID_DESCRIPTOR_TYPE (*stack_ptr, ACPI_DESC_TYPE_NAMED)) { - status = acpi_ex_resolve_node_to_value ((acpi_namespace_node **) stack_ptr, + if (ACPI_GET_DESCRIPTOR_TYPE (*stack_ptr) == ACPI_DESC_TYPE_NAMED) { + status = acpi_ex_resolve_node_to_value ( + ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, stack_ptr), walk_state); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Resolved object %p\n", *stack_ptr)); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Resolved object %p\n", *stack_ptr)); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ex_resolve_object_to_value + * FUNCTION: acpi_ex_resolve_object_to_value * - * PARAMETERS: Stack_ptr - Pointer to a stack location that contains a + * PARAMETERS: stack_ptr - Pointer to a stack location that contains a * ptr to an internal object. - * Walk_state - Current method state + * walk_state - Current method state * * RETURN: Status * @@ -223,36 +132,34 @@ acpi_status acpi_ex_resolve_object_to_value ( - acpi_operand_object **stack_ptr, - acpi_walk_state *walk_state) + union acpi_operand_object **stack_ptr, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; - acpi_operand_object *stack_desc; - void *temp_node; - acpi_operand_object *obj_desc; - u16 opcode; + acpi_status status = AE_OK; + union acpi_operand_object *stack_desc; + void *temp_node; + union acpi_operand_object *obj_desc; + u16 opcode; - FUNCTION_TRACE ("Ex_resolve_object_to_value"); + ACPI_FUNCTION_TRACE ("ex_resolve_object_to_value"); stack_desc = *stack_ptr; - /* This is an acpi_operand_object */ + /* This is an union acpi_operand_object */ - switch (stack_desc->common.type) { - - case INTERNAL_TYPE_REFERENCE: + switch (ACPI_GET_OBJECT_TYPE (stack_desc)) { + case ACPI_TYPE_LOCAL_REFERENCE: opcode = stack_desc->reference.opcode; switch (opcode) { - case AML_NAME_OP: /* * Convert indirect name ptr to a direct name ptr. - * Then, Acpi_ex_resolve_node_to_value can be used to get the value + * Then, acpi_ex_resolve_node_to_value can be used to get the value */ temp_node = stack_desc->reference.object; @@ -286,58 +193,11 @@ acpi_ut_remove_reference (stack_desc); *stack_ptr = obj_desc; - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "[Arg/Local %d] Value_obj is %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Arg/Local %d] value_obj is %p\n", stack_desc->reference.offset, obj_desc)); break; - /* - * For constants, we must change the reference/constant object - * to a real integer object - */ - case AML_ZERO_OP: - case AML_ONE_OP: - case AML_ONES_OP: - case AML_REVISION_OP: - - /* Create a new integer object */ - - obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); - if (!obj_desc) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - switch (opcode) { - case AML_ZERO_OP: - obj_desc->integer.value = 0; - break; - - case AML_ONE_OP: - obj_desc->integer.value = 1; - break; - - case AML_ONES_OP: - obj_desc->integer.value = ACPI_INTEGER_MAX; - - /* Truncate value if we are executing from a 32-bit ACPI table */ - - acpi_ex_truncate_for32bit_table (obj_desc, walk_state); - break; - - case AML_REVISION_OP: - obj_desc->integer.value = ACPI_CA_SUPPORT_LEVEL; - break; - } - - /* - * Remove a reference from the original reference object - * and put the new object in its place - */ - acpi_ut_remove_reference (stack_desc); - *stack_ptr = obj_desc; - break; - - case AML_INDEX_OP: switch (stack_desc->reference.target_type) { @@ -348,6 +208,7 @@ case ACPI_TYPE_PACKAGE: + obj_desc = *stack_desc->reference.where; if (obj_desc) { /* @@ -359,7 +220,6 @@ acpi_ut_add_reference (obj_desc); *stack_ptr = obj_desc; } - else { /* * A NULL object descriptor means an unitialized element of @@ -372,83 +232,243 @@ } break; + default: + /* Invalid reference object */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Unknown Target_type %X in Index/Reference obj %p\n", + "Unknown target_type %X in Index/Reference obj %p\n", stack_desc->reference.target_type, stack_desc)); status = AE_AML_INTERNAL; break; } - break; + case AML_REF_OF_OP: case AML_DEBUG_OP: /* Just leave the object as-is */ + break; default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown Reference object subtype %02X in %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown Reference opcode %X in %p\n", opcode, stack_desc)); status = AE_AML_INTERNAL; break; + } + break; - } /* switch (Opcode) */ - break; /* case INTERNAL_TYPE_REFERENCE */ + case ACPI_TYPE_BUFFER: + status = acpi_ds_get_buffer_arguments (stack_desc); + break; - case ACPI_TYPE_BUFFER_FIELD: - obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_ANY); - if (!obj_desc) { - return_ACPI_STATUS (AE_NO_MEMORY); - } + case ACPI_TYPE_PACKAGE: + + status = acpi_ds_get_package_arguments (stack_desc); + break; - status = acpi_ex_get_buffer_field_value (stack_desc, obj_desc); - if (ACPI_FAILURE (status)) { - acpi_ut_remove_reference (obj_desc); - obj_desc = NULL; - } + /* + * These cases may never happen here, but just in case.. + */ + case ACPI_TYPE_BUFFER_FIELD: + case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: + + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "field_read source_desc=%p Type=%X\n", + stack_desc, ACPI_GET_OBJECT_TYPE (stack_desc))); + + status = acpi_ex_read_data_from_field (walk_state, stack_desc, &obj_desc); *stack_ptr = (void *) obj_desc; break; + default: + break; + } - case INTERNAL_TYPE_BANK_FIELD: + return_ACPI_STATUS (status); +} - obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_ANY); - if (!obj_desc) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - /* TBD: WRONG! */ +/******************************************************************************* + * + * FUNCTION: acpi_ex_resolve_multiple + * + * PARAMETERS: walk_state - Current state (contains AML opcode) + * Operand - Starting point for resolution + * return_type - Where the object type is returned + * return_desc - Where the resolved object is returned + * + * RETURN: Status + * + * DESCRIPTION: Return the base object and type. Traverse a reference list if + * necessary to get to the base object. + * + ******************************************************************************/ - status = acpi_ex_get_buffer_field_value (stack_desc, obj_desc); - if (ACPI_FAILURE (status)) { - acpi_ut_remove_reference (obj_desc); - obj_desc = NULL; +acpi_status +acpi_ex_resolve_multiple ( + struct acpi_walk_state *walk_state, + union acpi_operand_object *operand, + acpi_object_type *return_type, + union acpi_operand_object **return_desc) +{ + union acpi_operand_object *obj_desc = (void *) operand; + struct acpi_namespace_node *node; + acpi_object_type type; + + + ACPI_FUNCTION_TRACE ("acpi_ex_resolve_multiple"); + + + /* + * For reference objects created via the ref_of or Index operators, + * we need to get to the base object (as per the ACPI specification + * of the object_type and size_of operators). This means traversing + * the list of possibly many nested references. + */ + while (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) { + switch (obj_desc->reference.opcode) { + case AML_REF_OF_OP: + + /* Dereference the reference pointer */ + + node = obj_desc->reference.object; + + /* All "References" point to a NS node */ + + if (ACPI_GET_DESCRIPTOR_TYPE (node) != ACPI_DESC_TYPE_NAMED) { + return_ACPI_STATUS (AE_AML_INTERNAL); + } + + /* Get the attached object */ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { + /* No object, use the NS node type */ + + type = acpi_ns_get_type (node); + goto exit; + } + + /* Check for circular references */ + + if (obj_desc == operand) { + return_ACPI_STATUS (AE_AML_CIRCULAR_REFERENCE); + } + break; + + + case AML_INDEX_OP: + + /* Get the type of this reference (index into another object) */ + + type = obj_desc->reference.target_type; + if (type != ACPI_TYPE_PACKAGE) { + goto exit; + } + + /* + * The main object is a package, we want to get the type + * of the individual package element that is referenced by + * the index. + * + * This could of course in turn be another reference object. + */ + obj_desc = *(obj_desc->reference.where); + break; + + + case AML_INT_NAMEPATH_OP: + + /* Dereference the reference pointer */ + + node = obj_desc->reference.node; + + /* All "References" point to a NS node */ + + if (ACPI_GET_DESCRIPTOR_TYPE (node) != ACPI_DESC_TYPE_NAMED) { + return_ACPI_STATUS (AE_AML_INTERNAL); + } + + /* Get the attached object */ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { + /* No object, use the NS node type */ + + type = acpi_ns_get_type (node); + goto exit; + } + + /* Check for circular references */ + + if (obj_desc == operand) { + return_ACPI_STATUS (AE_AML_CIRCULAR_REFERENCE); + } + break; + + + case AML_DEBUG_OP: + + /* The Debug Object is of type "debug_object" */ + + type = ACPI_TYPE_DEBUG_OBJECT; + goto exit; + + + default: + + ACPI_REPORT_ERROR (("acpi_ex_resolve_multiple: Unknown Reference subtype %X\n", + obj_desc->reference.opcode)); + return_ACPI_STATUS (AE_AML_INTERNAL); } + } - *stack_ptr = (void *) obj_desc; - break; + /* + * Now we are guaranteed to have an object that has not been created + * via the ref_of or Index operators. + */ + type = ACPI_GET_OBJECT_TYPE (obj_desc); - /* TBD: [Future] - may need to handle Index_field, and Def_field someday */ +exit: + /* Convert internal types to external types */ - default: + switch (type) { + case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: + type = ACPI_TYPE_FIELD_UNIT; break; - } /* switch (Stack_desc->Common.Type) */ + case ACPI_TYPE_LOCAL_SCOPE: + /* Per ACPI Specification, Scope is untyped */ - return_ACPI_STATUS (status); + type = ACPI_TYPE_ANY; + break; + + default: + /* No change to Type required */ + break; + } + + *return_type = type; + if (return_desc) { + *return_desc = obj_desc; + } + return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exresop.c linux.21rc5-ac2/drivers/acpi/executer/exresop.c --- linux.21rc5/drivers/acpi/executer/exresop.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exresop.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,49 +2,63 @@ /****************************************************************************** * * Module Name: exresop - AML Interpreter operand/object resolution - * $Revision: 41 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acparser.h" -#include "acdispat.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "actables.h" -#include "acevents.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exresop") + ACPI_MODULE_NAME ("exresop") /******************************************************************************* * - * FUNCTION: Acpi_ex_check_object_type + * FUNCTION: acpi_ex_check_object_type * - * PARAMETERS: Type_needed Object type needed - * This_type Actual object type + * PARAMETERS: type_needed Object type needed + * this_type Actual object type * Object Object pointer * * RETURN: Status @@ -55,11 +69,11 @@ acpi_status acpi_ex_check_object_type ( - acpi_object_type type_needed, - acpi_object_type this_type, - void *object) + acpi_object_type type_needed, + acpi_object_type this_type, + void *object) { - PROC_NAME ("Ex_check_object_type"); + ACPI_FUNCTION_NAME ("ex_check_object_type"); if (type_needed == ACPI_TYPE_ANY) { @@ -68,8 +82,20 @@ return (AE_OK); } + if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) { + /* + * Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference + * objects and thus allow them to be targets. (As per the ACPI + * specification, a store to a constant is a noop.) + */ + if ((this_type == ACPI_TYPE_INTEGER) && + (((union acpi_operand_object *) object)->common.flags & AOPOBJ_AML_CONSTANT)) { + return (AE_OK); + } + } + if (type_needed != this_type) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Needed [%s], found [%s] %p\n", acpi_ut_get_type_name (type_needed), acpi_ut_get_type_name (this_type), object)); @@ -83,40 +109,42 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_resolve_operands + * FUNCTION: acpi_ex_resolve_operands * - * PARAMETERS: Opcode Opcode being interpreted - * Stack_ptr Top of operand stack + * PARAMETERS: Opcode - Opcode being interpreted + * stack_ptr - Pointer to the operand stack to be + * resolved + * walk_state - Current state * * RETURN: Status * - * DESCRIPTION: Convert stack entries to required types - * - * Each nibble in Arg_types represents one required operand - * and indicates the required Type: + * DESCRIPTION: Convert multiple input operands to the types required by the + * target operator. * - * The corresponding stack entry will be converted to the - * required type if possible, else return an exception + * Each 5-bit group in arg_types represents one required + * operand and indicates the required Type. The corresponding operand + * will be converted to the required type if possible, otherwise we + * abort with an exception. * ******************************************************************************/ acpi_status acpi_ex_resolve_operands ( - u16 opcode, - acpi_operand_object **stack_ptr, - acpi_walk_state *walk_state) + u16 opcode, + union acpi_operand_object **stack_ptr, + struct acpi_walk_state *walk_state) { - acpi_operand_object *obj_desc; - acpi_status status = AE_OK; - u8 object_type; - void *temp_node; - u32 arg_types; - const acpi_opcode_info *op_info; - u32 this_arg_type; - acpi_object_type type_needed; + union acpi_operand_object *obj_desc; + acpi_status status = AE_OK; + u8 object_type; + void *temp_node; + u32 arg_types; + const struct acpi_opcode_info *op_info; + u32 this_arg_type; + acpi_object_type type_needed; - FUNCTION_TRACE_U32 ("Ex_resolve_operands", opcode); + ACPI_FUNCTION_TRACE_U32 ("ex_resolve_operands", opcode); op_info = acpi_ps_get_opcode_info (opcode); @@ -124,7 +152,6 @@ return_ACPI_STATUS (AE_AML_BAD_OPCODE); } - arg_types = op_info->runtime_args; if (arg_types == ARGI_INVALID_OPCODE) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Internal - %X is not a valid AML opcode\n", @@ -133,12 +160,11 @@ return_ACPI_STATUS (AE_AML_INTERNAL); } - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Opcode %X Operand_types=%X \n", - opcode, arg_types)); - + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Opcode %X [%s] operand_types=%X \n", + opcode, op_info->name, arg_types)); /* - * Normal exit is with (Arg_types == 0) at end of argument list. + * Normal exit is with (arg_types == 0) at end of argument list. * Function will return an exception from within the loop upon * finding an entry which is not (or cannot be converted * to) the required type; if stack underflows; or upon @@ -158,27 +184,31 @@ /* Decode the descriptor type */ - if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_NAMED)) { + switch (ACPI_GET_DESCRIPTOR_TYPE (obj_desc)) { + case ACPI_DESC_TYPE_NAMED: + /* Node */ - object_type = ((acpi_namespace_node *) obj_desc)->type; - } + object_type = ((struct acpi_namespace_node *) obj_desc)->type; + break; + + + case ACPI_DESC_TYPE_OPERAND: - else if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_INTERNAL)) { /* ACPI internal object */ - object_type = obj_desc->common.type; + object_type = ACPI_GET_OBJECT_TYPE (obj_desc); /* Check for bad acpi_object_type */ - if (!acpi_ex_validate_object_type (object_type)) { + if (!acpi_ut_valid_object_type (object_type)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Bad operand object type [%X]\n", object_type)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } - if (object_type == (u8) INTERNAL_TYPE_REFERENCE) { + if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) { /* * Decode the Reference */ @@ -187,39 +217,36 @@ return_ACPI_STATUS (AE_AML_BAD_OPCODE); } - switch (obj_desc->reference.opcode) { - case AML_ZERO_OP: - case AML_ONE_OP: - case AML_ONES_OP: case AML_DEBUG_OP: case AML_NAME_OP: case AML_INDEX_OP: + case AML_REF_OF_OP: case AML_ARG_OP: case AML_LOCAL_OP: - case AML_REVISION_OP: - DEBUG_ONLY_MEMBERS (ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_ONLY_MEMBERS (ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Reference Opcode: %s\n", op_info->name))); break; default: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Reference Opcode: Unknown [%02x]\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Unknown Reference Opcode %X\n", obj_desc->reference.opcode)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); - break; } } - } + break; + + + default: - else { /* Invalid descriptor */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Bad descriptor type %X in Obj %p\n", - obj_desc->common.data_type, obj_desc)); + ACPI_GET_DESCRIPTOR_TYPE (obj_desc), obj_desc)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } @@ -231,34 +258,45 @@ this_arg_type = GET_CURRENT_ARG_TYPE (arg_types); INCREMENT_ARG_LIST (arg_types); - /* * Handle cases where the object does not need to be * resolved to a value */ switch (this_arg_type) { + case ARGI_REF_OR_STRING: /* Can be a String or Reference */ - case ARGI_REFERENCE: /* References */ + if ((ACPI_GET_DESCRIPTOR_TYPE (obj_desc) == ACPI_DESC_TYPE_OPERAND) && + (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_STRING)) { + /* + * String found - the string references a named object and must be + * resolved to a node + */ + goto next_operand; + } + + /* Else not a string - fall through to the normal Reference case below */ + /*lint -fallthrough */ + + case ARGI_REFERENCE: /* References: */ case ARGI_INTEGER_REF: case ARGI_OBJECT_REF: case ARGI_DEVICE_REF: - case ARGI_TARGETREF: /* TBD: must implement implicit conversion rules before store */ + case ARGI_TARGETREF: /* Allows implicit conversion rules before store */ case ARGI_FIXED_TARGET: /* No implicit conversion before store to target */ - case ARGI_SIMPLE_TARGET: /* Name, Local, or Arg - no implicit conversion */ + case ARGI_SIMPLE_TARGET: /* Name, Local, or Arg - no implicit conversion */ - /* Need an operand of type INTERNAL_TYPE_REFERENCE */ + /* Need an operand of type ACPI_TYPE_LOCAL_REFERENCE */ - if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_NAMED)) /* direct name ptr OK as-is */ { + if (ACPI_GET_DESCRIPTOR_TYPE (obj_desc) == ACPI_DESC_TYPE_NAMED) /* Node (name) ptr OK as-is */ { goto next_operand; } - status = acpi_ex_check_object_type (INTERNAL_TYPE_REFERENCE, + status = acpi_ex_check_object_type (ACPI_TYPE_LOCAL_REFERENCE, object_type, obj_desc); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - if (AML_NAME_OP == obj_desc->reference.opcode) { /* * Convert an indirect name ptr to direct name ptr and put @@ -268,25 +306,27 @@ acpi_ut_remove_reference (obj_desc); (*stack_ptr) = temp_node; } - goto next_operand; - break; case ARGI_ANYTYPE: /* - * We don't want to resolve Index_op reference objects during - * a store because this would be an implicit De_ref_of operation. + * We don't want to resolve index_op reference objects during + * a store because this would be an implicit de_ref_of operation. * Instead, we just want to store the reference object. * -- All others must be resolved below. */ if ((opcode == AML_STORE_OP) && - ((*stack_ptr)->common.type == INTERNAL_TYPE_REFERENCE) && + (ACPI_GET_OBJECT_TYPE (*stack_ptr) == ACPI_TYPE_LOCAL_REFERENCE) && ((*stack_ptr)->reference.opcode == AML_INDEX_OP)) { goto next_operand; } break; + + default: + /* All cases covered above */ + break; } @@ -298,6 +338,9 @@ return_ACPI_STATUS (status); } + /* Get the resolved object */ + + obj_desc = *stack_ptr; /* * Check the resulting object (value) type @@ -321,20 +364,6 @@ type_needed = ACPI_TYPE_EVENT; break; - case ARGI_REGION: - - /* Need an operand of type ACPI_TYPE_REGION */ - - type_needed = ACPI_TYPE_REGION; - break; - - case ARGI_IF: /* If */ - - /* Need an operand of type INTERNAL_TYPE_IF */ - - type_needed = INTERNAL_TYPE_IF; - break; - case ARGI_PACKAGE: /* Package */ /* Need an operand of type ACPI_TYPE_PACKAGE */ @@ -358,22 +387,21 @@ /* * Need an operand of type ACPI_TYPE_INTEGER, * But we can implicitly convert from a STRING or BUFFER + * Aka - "Implicit Source Operand Conversion" */ - status = acpi_ex_convert_to_integer (*stack_ptr, stack_ptr, walk_state); + status = acpi_ex_convert_to_integer (obj_desc, stack_ptr, walk_state); if (ACPI_FAILURE (status)) { if (status == AE_TYPE) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Needed [Integer/String/Buffer], found [%s] %p\n", - acpi_ut_get_type_name ((*stack_ptr)->common.type), *stack_ptr)); + acpi_ut_get_object_type_name (obj_desc), obj_desc)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } return_ACPI_STATUS (status); } - goto next_operand; - break; case ARGI_BUFFER: @@ -381,22 +409,21 @@ /* * Need an operand of type ACPI_TYPE_BUFFER, * But we can implicitly convert from a STRING or INTEGER + * Aka - "Implicit Source Operand Conversion" */ - status = acpi_ex_convert_to_buffer (*stack_ptr, stack_ptr, walk_state); + status = acpi_ex_convert_to_buffer (obj_desc, stack_ptr, walk_state); if (ACPI_FAILURE (status)) { if (status == AE_TYPE) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Needed [Integer/String/Buffer], found [%s] %p\n", - acpi_ut_get_type_name ((*stack_ptr)->common.type), *stack_ptr)); + acpi_ut_get_object_type_name (obj_desc), obj_desc)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } return_ACPI_STATUS (status); } - goto next_operand; - break; case ARGI_STRING: @@ -404,95 +431,146 @@ /* * Need an operand of type ACPI_TYPE_STRING, * But we can implicitly convert from a BUFFER or INTEGER + * Aka - "Implicit Source Operand Conversion" */ - status = acpi_ex_convert_to_string (*stack_ptr, stack_ptr, 16, ACPI_UINT32_MAX, walk_state); + status = acpi_ex_convert_to_string (obj_desc, stack_ptr, 16, ACPI_UINT32_MAX, walk_state); if (ACPI_FAILURE (status)) { if (status == AE_TYPE) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Needed [Integer/String/Buffer], found [%s] %p\n", - acpi_ut_get_type_name ((*stack_ptr)->common.type), *stack_ptr)); + acpi_ut_get_object_type_name (obj_desc), obj_desc)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } return_ACPI_STATUS (status); } - goto next_operand; - break; case ARGI_COMPUTEDATA: /* Need an operand of type INTEGER, STRING or BUFFER */ - if ((ACPI_TYPE_INTEGER != (*stack_ptr)->common.type) && - (ACPI_TYPE_STRING != (*stack_ptr)->common.type) && - (ACPI_TYPE_BUFFER != (*stack_ptr)->common.type)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_INTEGER: + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: + + /* Valid operand */ + break; + + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Needed [Integer/String/Buffer], found [%s] %p\n", - acpi_ut_get_type_name ((*stack_ptr)->common.type), *stack_ptr)); + acpi_ut_get_object_type_name (obj_desc), obj_desc)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } goto next_operand; - break; - case ARGI_DATAOBJECT: - /* - * ARGI_DATAOBJECT is only used by the Size_of operator. - * - * The ACPI specification allows Size_of to return the size of - * a Buffer, String or Package. However, the MS ACPI.SYS AML - * Interpreter also allows an Node reference to return without - * error with a size of 4. - */ + case ARGI_BUFFER_OR_STRING: + + /* Need an operand of type STRING or BUFFER */ + + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: - /* Need a buffer, string, package or Node reference */ + /* Valid operand */ + break; - if (((*stack_ptr)->common.type != ACPI_TYPE_BUFFER) && - ((*stack_ptr)->common.type != ACPI_TYPE_STRING) && - ((*stack_ptr)->common.type != ACPI_TYPE_PACKAGE) && - ((*stack_ptr)->common.type != INTERNAL_TYPE_REFERENCE)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Needed [Buf/Str/Pkg/Ref], found [%s] %p\n", - acpi_ut_get_type_name ((*stack_ptr)->common.type), *stack_ptr)); + case ACPI_TYPE_INTEGER: + + /* Highest priority conversion is to type Buffer */ + + status = acpi_ex_convert_to_buffer (obj_desc, stack_ptr, walk_state); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + break; + + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Needed [Integer/String/Buffer], found [%s] %p\n", + acpi_ut_get_object_type_name (obj_desc), obj_desc)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } + goto next_operand; + + case ARGI_DATAOBJECT: /* - * If this is a reference, only allow a reference to an Node. + * ARGI_DATAOBJECT is only used by the size_of operator. + * Need a buffer, string, package, or ref_of reference. + * + * The only reference allowed here is a direct reference to + * a namespace node. */ - if ((*stack_ptr)->common.type == INTERNAL_TYPE_REFERENCE) { - if (!(*stack_ptr)->reference.node) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Needed [Node Reference], found [%p]\n", - *stack_ptr)); + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_PACKAGE: + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: + case ACPI_TYPE_LOCAL_REFERENCE: + + /* Valid operand */ + break; + + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Needed [Buffer/String/Package/Reference], found [%s] %p\n", + acpi_ut_get_object_type_name (obj_desc), obj_desc)); - return_ACPI_STATUS (AE_AML_OPERAND_TYPE); - } + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } goto next_operand; - break; case ARGI_COMPLEXOBJ: /* Need a buffer or package or (ACPI 2.0) String */ - if (((*stack_ptr)->common.type != ACPI_TYPE_BUFFER) && - ((*stack_ptr)->common.type != ACPI_TYPE_STRING) && - ((*stack_ptr)->common.type != ACPI_TYPE_PACKAGE)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Needed [Buf/Pkg], found [%s] %p\n", - acpi_ut_get_type_name ((*stack_ptr)->common.type), *stack_ptr)); + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_PACKAGE: + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: + + /* Valid operand */ + break; + + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Needed [Buffer/String/Package], found [%s] %p\n", + acpi_ut_get_object_type_name (obj_desc), obj_desc)); + + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); + } + goto next_operand; + + + case ARGI_REGION_OR_FIELD: + + /* Need an operand of type ACPI_TYPE_REGION or a FIELD in a region */ + + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_REGION: + case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: + + /* Valid operand */ + break; + + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Needed [Region/region_field], found [%s] %p\n", + acpi_ut_get_object_type_name (obj_desc), obj_desc)); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } goto next_operand; - break; default: @@ -500,27 +578,25 @@ /* Unknown type */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Internal - Unknown ARGI type %X\n", + "Internal - Unknown ARGI (required operand) type %X\n", this_arg_type)); return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* * Make sure that the original object was resolved to the * required object type (Simple cases only). */ status = acpi_ex_check_object_type (type_needed, - (*stack_ptr)->common.type, *stack_ptr); + ACPI_GET_OBJECT_TYPE (*stack_ptr), *stack_ptr); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - next_operand: /* - * If more operands needed, decrement Stack_ptr to point + * If more operands needed, decrement stack_ptr to point * to next operand on stack */ if (GET_CURRENT_ARG_TYPE (arg_types)) { @@ -529,7 +605,6 @@ } /* while (*Types) */ - return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exstore.c linux.21rc5-ac2/drivers/acpi/executer/exstore.c --- linux.21rc5/drivers/acpi/executer/exstore.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exstore.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,113 +2,142 @@ /****************************************************************************** * * Module Name: exstore - AML Interpreter object store support - * $Revision: 150 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "actables.h" +#include +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exstore") + ACPI_MODULE_NAME ("exstore") /******************************************************************************* * - * FUNCTION: Acpi_ex_store + * FUNCTION: acpi_ex_store * - * PARAMETERS: *Source_desc - Value to be stored - * *Dest_desc - Where to store it. Must be an NS node - * or an acpi_operand_object of type + * PARAMETERS: *source_desc - Value to be stored + * *dest_desc - Where to store it. Must be an NS node + * or an union acpi_operand_object of type * Reference; + * walk_state - Current walk state * * RETURN: Status * - * DESCRIPTION: Store the value described by Source_desc into the location - * described by Dest_desc. Called by various interpreter + * DESCRIPTION: Store the value described by source_desc into the location + * described by dest_desc. Called by various interpreter * functions to store the result of an operation into - * the destination operand. + * the destination operand -- not just simply the actual "Store" + * ASL operator. * ******************************************************************************/ acpi_status acpi_ex_store ( - acpi_operand_object *source_desc, - acpi_operand_object *dest_desc, - acpi_walk_state *walk_state) + union acpi_operand_object *source_desc, + union acpi_operand_object *dest_desc, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; - acpi_operand_object *ref_desc = dest_desc; + acpi_status status = AE_OK; + union acpi_operand_object *ref_desc = dest_desc; - FUNCTION_TRACE_PTR ("Ex_store", dest_desc); + ACPI_FUNCTION_TRACE_PTR ("ex_store", dest_desc); /* Validate parameters */ if (!source_desc || !dest_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Internal - null pointer\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Null parameter\n")); return_ACPI_STATUS (AE_AML_NO_OPERAND); } - /* Dest_desc can be either a namespace node or an ACPI object */ + /* dest_desc can be either a namespace node or an ACPI object */ - if (VALID_DESCRIPTOR_TYPE (dest_desc, ACPI_DESC_TYPE_NAMED)) { + if (ACPI_GET_DESCRIPTOR_TYPE (dest_desc) == ACPI_DESC_TYPE_NAMED) { /* * Dest is a namespace node, - * Storing an object into a Name "container" + * Storing an object into a Named node. */ status = acpi_ex_store_object_to_node (source_desc, - (acpi_namespace_node *) dest_desc, walk_state); - - /* All done, that's it */ + (struct acpi_namespace_node *) dest_desc, walk_state); return_ACPI_STATUS (status); } + /* Destination object must be a Reference or a Constant object */ + + switch (ACPI_GET_OBJECT_TYPE (dest_desc)) { + case ACPI_TYPE_LOCAL_REFERENCE: + break; + + case ACPI_TYPE_INTEGER: + + /* Allow stores to Constants -- a Noop as per ACPI spec */ + + if (dest_desc->common.flags & AOPOBJ_AML_CONSTANT) { + return_ACPI_STATUS (AE_OK); + } + + /*lint -fallthrough */ - /* Destination object must be an object of type Reference */ + default: - if (dest_desc->common.type != INTERNAL_TYPE_REFERENCE) { /* Destination is not an Reference */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Destination is not a Reference_obj [%p]\n", dest_desc)); + "Destination is not a Reference or Constant object [%p]\n", dest_desc)); - DUMP_STACK_ENTRY (source_desc); - DUMP_STACK_ENTRY (dest_desc); - DUMP_OPERANDS (&dest_desc, IMODE_EXECUTE, "Ex_store", - 2, "Target is not a Reference_obj"); + ACPI_DUMP_STACK_ENTRY (source_desc); + ACPI_DUMP_STACK_ENTRY (dest_desc); + ACPI_DUMP_OPERANDS (&dest_desc, ACPI_IMODE_EXECUTE, "ex_store", + 2, "Target is not a Reference or Constant object"); return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } - /* * Examine the Reference opcode. These cases are handled: * @@ -116,11 +145,10 @@ * 2) Store to an indexed area of a Buffer or Package * 3) Store to a Method Local or Arg * 4) Store to the debug object - * 5) Store to a constant -- a noop */ switch (ref_desc->reference.opcode) { - case AML_NAME_OP: + case AML_REF_OF_OP: /* Storing an object into a Name "container" */ @@ -153,23 +181,24 @@ * Storing to the Debug object causes the value stored to be * displayed and otherwise has no effect -- see ACPI Specification */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "**** Write to Debug Object: ****:\n\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "**** Write to Debug Object: ****:\n\n")); ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %s: ", - acpi_ut_get_type_name (source_desc->common.type))); + acpi_ut_get_object_type_name (source_desc))); - switch (source_desc->common.type) { + switch (ACPI_GET_OBJECT_TYPE (source_desc)) { case ACPI_TYPE_INTEGER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "0x%X (%d)\n", - (u32) source_desc->integer.value, (u32) source_desc->integer.value)); + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "%8.8X%8.8X\n", + ACPI_HIWORD (source_desc->integer.value), + ACPI_LOWORD (source_desc->integer.value))); break; case ACPI_TYPE_BUFFER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "Length 0x%X\n", - (u32) source_desc->buffer.length)); + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "Length %.2X\n", + (u32) source_desc->buffer.length)); break; @@ -181,47 +210,31 @@ case ACPI_TYPE_PACKAGE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "Elements - %p\n", - source_desc->package.elements)); + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "Elements Ptr - %p\n", + source_desc->package.elements)); break; default: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "@0x%p\n", source_desc)); + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "Type %s %p\n", + acpi_ut_get_object_type_name (source_desc), source_desc)); break; } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "\n")); - break; - - - case AML_ZERO_OP: - case AML_ONE_OP: - case AML_ONES_OP: - case AML_REVISION_OP: - - /* - * Storing to a constant is a no-op -- see ACPI Specification - * Delete the reference descriptor, however - */ + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC, "\n")); break; default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Internal - Unknown Reference subtype %02x\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown Reference opcode %X\n", ref_desc->reference.opcode)); - - /* TBD: [Restructure] use object dump routine !! */ - - DUMP_BUFFER (ref_desc, sizeof (acpi_operand_object)); + ACPI_DUMP_ENTRY (ref_desc, ACPI_LV_ERROR); status = AE_AML_INTERNAL; break; - - } /* switch (Ref_desc->Reference.Opcode) */ - + } return_ACPI_STATUS (status); } @@ -229,38 +242,38 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_store_object_to_index + * FUNCTION: acpi_ex_store_object_to_index * - * PARAMETERS: *Source_desc - Value to be stored - * *Node - Named object to receive the value + * PARAMETERS: *source_desc - Value to be stored + * *dest_desc - Named object to receive the value + * walk_state - Current walk state * * RETURN: Status * - * DESCRIPTION: Store the object to the named object. + * DESCRIPTION: Store the object to indexed Buffer or Package element * ******************************************************************************/ acpi_status acpi_ex_store_object_to_index ( - acpi_operand_object *source_desc, - acpi_operand_object *dest_desc, - acpi_walk_state *walk_state) + union acpi_operand_object *source_desc, + union acpi_operand_object *index_desc, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; - acpi_operand_object *obj_desc; - u32 length; - u32 i; - u8 value = 0; + acpi_status status = AE_OK; + union acpi_operand_object *obj_desc; + union acpi_operand_object *new_desc; + u8 value = 0; - FUNCTION_TRACE ("Ex_store_object_to_index"); + ACPI_FUNCTION_TRACE ("ex_store_object_to_index"); /* * Destination must be a reference pointer, and * must point to either a buffer or a package */ - switch (dest_desc->reference.target_type) { + switch (index_desc->reference.target_type) { case ACPI_TYPE_PACKAGE: /* * Storing to a package element is not simple. The source must be @@ -268,74 +281,34 @@ * source is copied into the destination - we can't just point to the * source object. */ - if (dest_desc->reference.target_type == ACPI_TYPE_PACKAGE) { - /* - * The object at *(Dest_desc->Reference.Where) is the - * element within the package that is to be modified. - */ - obj_desc = *(dest_desc->reference.where); - if (obj_desc) { - /* - * If the Destination element is a package, we will delete - * that object and construct a new one. - * - * TBD: [Investigate] Should both the src and dest be required - * to be packages? - * && (Source_desc->Common.Type == ACPI_TYPE_PACKAGE) - */ - if (obj_desc->common.type == ACPI_TYPE_PACKAGE) { - /* Take away the reference for being part of a package */ - - acpi_ut_remove_reference (obj_desc); - obj_desc = NULL; - } - } + /* + * The object at *(index_desc->Reference.Where) is the + * element within the package that is to be modified. + */ + obj_desc = *(index_desc->reference.where); + + /* Do the conversion/store */ - if (!obj_desc) { - /* - * If the Obj_desc is NULL, it means that an uninitialized package - * element has been used as a destination (this is OK), therefore, - * we must create the destination element to match the type of the - * source element NOTE: Source_desccan be of any type. - */ - obj_desc = acpi_ut_create_internal_object (source_desc->common.type); - if (!obj_desc) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - /* - * If the source is a package, copy the source to the new dest - */ - if (ACPI_TYPE_PACKAGE == obj_desc->common.type) { - status = acpi_ut_copy_ipackage_to_ipackage (source_desc, obj_desc, walk_state); - if (ACPI_FAILURE (status)) { - acpi_ut_remove_reference (obj_desc); - return_ACPI_STATUS (status); - } - } + status = acpi_ex_store_object_to_object (source_desc, obj_desc, &new_desc, + walk_state); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not store object to indexed package element\n")); + return_ACPI_STATUS (status); + } - /* Install the new descriptor into the package */ + /* + * If a new object was created, we must install it as the new + * package element + */ + if (new_desc != obj_desc) { + acpi_ut_remove_reference (obj_desc); + *(index_desc->reference.where) = new_desc; - *(dest_desc->reference.where) = obj_desc; - } + /* If same as the original source, add a reference */ - if (ACPI_TYPE_PACKAGE != obj_desc->common.type) { - /* - * The destination element is not a package, so we need to - * convert the contents of the source (Source_desc) and copy into - * the destination (Obj_desc) - */ - status = acpi_ex_store_object_to_object (source_desc, obj_desc, - walk_state); - if (ACPI_FAILURE (status)) { - /* - * An error occurrered when copying the internal object - * so delete the reference. - */ - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Unable to copy the internal object\n")); - return_ACPI_STATUS (AE_AML_OPERAND_TYPE); - } + if (new_desc == source_desc) { + acpi_ut_add_reference (new_desc); } } break; @@ -343,21 +316,20 @@ case ACPI_TYPE_BUFFER_FIELD: - - /* TBD: can probably call the generic Buffer/Field routines */ - /* - * Storing into a buffer at a location defined by an Index. + * Store into a Buffer (not actually a real buffer_field) at a + * location defined by an Index. * - * Each 8-bit element of the source object is written to the - * 8-bit Buffer Field of the Index destination object. + * The first 8-bit element of the source object is written to the + * 8-bit Buffer location defined by the Index destination object, + * according to the ACPI 2.0 specification. */ /* - * Set the Obj_desc to the destination object and type check. + * Make sure the target is a Buffer */ - obj_desc = dest_desc->reference.object; - if (obj_desc->common.type != ACPI_TYPE_BUFFER) { + obj_desc = index_desc->reference.object; + if (ACPI_GET_OBJECT_TYPE (obj_desc) != ACPI_TYPE_BUFFER) { return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } @@ -365,84 +337,65 @@ * The assignment of the individual elements will be slightly * different for each source type. */ - switch (source_desc->common.type) { + switch (ACPI_GET_OBJECT_TYPE (source_desc)) { case ACPI_TYPE_INTEGER: - /* - * Type is Integer, assign bytewise - * This loop to assign each of the elements is somewhat - * backward because of the Big Endian-ness of IA-64 - */ - length = sizeof (acpi_integer); - for (i = length; i != 0; i--) { - value = (u8)(source_desc->integer.value >> (MUL_8 (i - 1))); - obj_desc->buffer.pointer[dest_desc->reference.offset] = value; - } - break; + /* Use the least-significant byte of the integer */ - case ACPI_TYPE_BUFFER: - /* - * Type is Buffer, the Length is in the structure. - * Just loop through the elements and assign each one in turn. - */ - length = source_desc->buffer.length; - for (i = 0; i < length; i++) { - value = source_desc->buffer.pointer[i]; - obj_desc->buffer.pointer[dest_desc->reference.offset] = value; - } + value = (u8) (source_desc->integer.value); break; + case ACPI_TYPE_BUFFER: - case ACPI_TYPE_STRING: - /* - * Type is String, the Length is in the structure. - * Just loop through the elements and assign each one in turn. - */ - length = source_desc->string.length; - for (i = 0; i < length; i++) { - value = source_desc->string.pointer[i]; - obj_desc->buffer.pointer[dest_desc->reference.offset] = value; - } + value = source_desc->buffer.pointer[0]; break; + case ACPI_TYPE_STRING: + + value = (u8) source_desc->string.pointer[0]; + break; default: - /* Other types are invalid */ + /* All other types are invalid */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Source must be Number/Buffer/String type, not %X\n", - source_desc->common.type)); - status = AE_AML_OPERAND_TYPE; - break; + "Source must be Integer/Buffer/String type, not %s\n", + acpi_ut_get_object_type_name (source_desc))); + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } + + /* Store the source value into the target buffer byte */ + + obj_desc->buffer.pointer[index_desc->reference.offset] = value; break; default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Target is not a Package or Buffer_field\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Target is not a Package or buffer_field\n")); status = AE_AML_OPERAND_TYPE; break; } - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ex_store_object_to_node + * FUNCTION: acpi_ex_store_object_to_node * - * PARAMETERS: *Source_desc - Value to be stored - * *Node - Named object to receive the value + * PARAMETERS: source_desc - Value to be stored + * Node - Named object to receive the value + * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Store the object to the named object. * * The Assignment of an object to a named object is handled here - * The val passed in will replace the current value (if any) + * The value passed in will replace the current value (if any) * with the input value. * * When storing into an object the data is converted to the @@ -450,41 +403,34 @@ * that the target object type (for an initialized target) will * not be changed by a store operation. * - * NOTE: the global lock is acquired early. This will result - * in the global lock being held a bit longer. Also, if the - * function fails during set up we may get the lock when we - * don't really need it. I don't think we care. + * Assumes parameters are already validated. * ******************************************************************************/ acpi_status acpi_ex_store_object_to_node ( - acpi_operand_object *source_desc, - acpi_namespace_node *node, - acpi_walk_state *walk_state) + union acpi_operand_object *source_desc, + struct acpi_namespace_node *node, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; - acpi_operand_object *target_desc; - acpi_object_type8 target_type = ACPI_TYPE_ANY; + acpi_status status = AE_OK; + union acpi_operand_object *target_desc; + union acpi_operand_object *new_desc; + acpi_object_type target_type; - FUNCTION_TRACE ("Ex_store_object_to_node"); + ACPI_FUNCTION_TRACE_PTR ("ex_store_object_to_node", source_desc); /* - * Assuming the parameters were already validated - */ - - /* * Get current type of the node, and object attached to Node */ target_type = acpi_ns_get_type (node); target_desc = acpi_ns_get_attached_object (node); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Storing %p(%s) into node %p(%s)\n", - node, acpi_ut_get_type_name (source_desc->common.type), - source_desc, acpi_ut_get_type_name (target_type))); - + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n", + source_desc, acpi_ut_get_object_type_name (source_desc), + node, acpi_ut_get_type_name (target_type))); /* * Resolve the source object to an actual value @@ -495,20 +441,19 @@ return_ACPI_STATUS (status); } - /* * Do the actual store operation */ switch (target_type) { case ACPI_TYPE_BUFFER_FIELD: - case INTERNAL_TYPE_REGION_FIELD: - case INTERNAL_TYPE_BANK_FIELD: - case INTERNAL_TYPE_INDEX_FIELD: + case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: /* * For fields, copy the source data to the target field. */ - status = acpi_ex_write_data_to_field (source_desc, target_desc); + status = acpi_ex_write_data_to_field (source_desc, target_desc, &walk_state->result_obj); break; @@ -522,122 +467,44 @@ * * Copy and/or convert the source object to a new target object */ - status = acpi_ex_store_object (source_desc, target_type, &target_desc, walk_state); + status = acpi_ex_store_object_to_object (source_desc, target_desc, &new_desc, walk_state); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - /* - * Store the new Target_desc as the new value of the Name, and set - * the Name's type to that of the value being stored in it. - * Source_desc reference count is incremented by Attach_object. - */ - status = acpi_ns_attach_object (node, target_desc, target_type); + if (new_desc != target_desc) { + /* + * Store the new new_desc as the new value of the Name, and set + * the Name's type to that of the value being stored in it. + * source_desc reference count is incremented by attach_object. + * + * Note: This may change the type of the node if an explicit store + * has been performed such that the node/object type has been + * changed. + */ + status = acpi_ns_attach_object (node, new_desc, new_desc->common.type); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Store %s into %s via Convert/Attach\n", - acpi_ut_get_type_name (target_desc->common.type), - acpi_ut_get_type_name (target_type))); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, + "Store %s into %s via Convert/Attach\n", + acpi_ut_get_object_type_name (source_desc), + acpi_ut_get_object_type_name (new_desc))); + } break; default: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Storing %s (%p) directly into node (%p), no implicit conversion\n", - acpi_ut_get_type_name (source_desc->common.type), source_desc, node)); + acpi_ut_get_object_type_name (source_desc), source_desc, node)); /* No conversions for all other types. Just attach the source object */ - status = acpi_ns_attach_object (node, source_desc, source_desc->common.type); + status = acpi_ns_attach_object (node, source_desc, ACPI_GET_OBJECT_TYPE (source_desc)); break; } - return_ACPI_STATUS (status); } -/******************************************************************************* - * - * FUNCTION: Acpi_ex_store_object_to_object - * - * PARAMETERS: *Source_desc - Value to be stored - * *Dest_desc - Object to receive the value - * - * RETURN: Status - * - * DESCRIPTION: Store an object to another object. - * - * The Assignment of an object to another (not named) object - * is handled here. - * The val passed in will replace the current value (if any) - * with the input value. - * - * When storing into an object the data is converted to the - * target object type then stored in the object. This means - * that the target object type (for an initialized target) will - * not be changed by a store operation. - * - * This module allows destination types of Number, String, - * and Buffer. - * - ******************************************************************************/ - -acpi_status -acpi_ex_store_object_to_object ( - acpi_operand_object *source_desc, - acpi_operand_object *dest_desc, - acpi_walk_state *walk_state) -{ - acpi_status status = AE_OK; - acpi_object_type8 destination_type = dest_desc->common.type; - - - FUNCTION_TRACE ("Ex_store_object_to_object"); - - - /* - * Assuming the parameters are valid! - */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Storing %p(%s) to %p(%s)\n", - source_desc, acpi_ut_get_type_name (source_desc->common.type), - dest_desc, acpi_ut_get_type_name (dest_desc->common.type))); - - - /* - * From this interface, we only support Integers/Strings/Buffers - */ - switch (destination_type) { - case ACPI_TYPE_INTEGER: - case ACPI_TYPE_STRING: - case ACPI_TYPE_BUFFER: - break; - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Store into %s not implemented\n", - acpi_ut_get_type_name (dest_desc->common.type))); - - return_ACPI_STATUS (AE_NOT_IMPLEMENTED); - } - - - /* - * Resolve the source object to an actual value - * (If it is a reference object) - */ - status = acpi_ex_resolve_object (&source_desc, destination_type, walk_state); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - - /* - * Copy and/or convert the source object to the destination object - */ - status = acpi_ex_store_object (source_desc, destination_type, &dest_desc, walk_state); - - - return_ACPI_STATUS (status); -} - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exstoren.c linux.21rc5-ac2/drivers/acpi/executer/exstoren.c --- linux.21rc5/drivers/acpi/executer/exstoren.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exstoren.c 2003-04-28 17:58:27.000000000 +0100 @@ -3,128 +3,136 @@ * * Module Name: exstoren - AML Interpreter object store support, * Store to Node (namespace object) - * $Revision: 40 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "actables.h" +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exstoren") + ACPI_MODULE_NAME ("exstoren") /******************************************************************************* * - * FUNCTION: Acpi_ex_resolve_object + * FUNCTION: acpi_ex_resolve_object * - * PARAMETERS: Source_desc_ptr - Pointer to the source object - * Target_type - Current type of the target - * Walk_state - Current walk state + * PARAMETERS: source_desc_ptr - Pointer to the source object + * target_type - Current type of the target + * walk_state - Current walk state * - * RETURN: Status, resolved object in Source_desc_ptr. + * RETURN: Status, resolved object in source_desc_ptr. * * DESCRIPTION: Resolve an object. If the object is a reference, dereference - * it and return the actual object in the Source_desc_ptr. + * it and return the actual object in the source_desc_ptr. * ******************************************************************************/ acpi_status acpi_ex_resolve_object ( - acpi_operand_object **source_desc_ptr, - acpi_object_type8 target_type, - acpi_walk_state *walk_state) + union acpi_operand_object **source_desc_ptr, + acpi_object_type target_type, + struct acpi_walk_state *walk_state) { - acpi_operand_object *source_desc = *source_desc_ptr; - acpi_status status = AE_OK; + union acpi_operand_object *source_desc = *source_desc_ptr; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ex_resolve_object"); + ACPI_FUNCTION_TRACE ("ex_resolve_object"); /* - * Ensure we have a Source that can be stored in the target + * Ensure we have a Target that can be stored to */ switch (target_type) { - - /* This case handles the "interchangeable" types Integer, String, and Buffer. */ - - /* - * These cases all require only Integers or values that - * can be converted to Integers (Strings or Buffers) - */ case ACPI_TYPE_BUFFER_FIELD: - case INTERNAL_TYPE_REGION_FIELD: - case INTERNAL_TYPE_BANK_FIELD: - case INTERNAL_TYPE_INDEX_FIELD: + case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: + /* + * These cases all require only Integers or values that + * can be converted to Integers (Strings or Buffers) + */ - /* - * Stores into a Field/Region or into a Buffer/String - * are all essentially the same. - */ case ACPI_TYPE_INTEGER: case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: + /* + * Stores into a Field/Region or into a Integer/Buffer/String + * are all essentially the same. This case handles the + * "interchangeable" types Integer, String, and Buffer. + */ + if (ACPI_GET_OBJECT_TYPE (source_desc) == ACPI_TYPE_LOCAL_REFERENCE) { + /* Resolve a reference object first */ - /* TBD: FIX - check for source==REF, resolve, then check type */ + status = acpi_ex_resolve_to_value (source_desc_ptr, walk_state); + if (ACPI_FAILURE (status)) { + break; + } + } /* - * If Source_desc is not a valid type, try to resolve it to one. + * Must have a Integer, Buffer, or String */ - if ((source_desc->common.type != ACPI_TYPE_INTEGER) && - (source_desc->common.type != ACPI_TYPE_BUFFER) && - (source_desc->common.type != ACPI_TYPE_STRING)) { + if ((ACPI_GET_OBJECT_TYPE (source_desc) != ACPI_TYPE_INTEGER) && + (ACPI_GET_OBJECT_TYPE (source_desc) != ACPI_TYPE_BUFFER) && + (ACPI_GET_OBJECT_TYPE (source_desc) != ACPI_TYPE_STRING)) { /* - * Initially not a valid type, convert + * Conversion successful but still not a valid type */ - status = acpi_ex_resolve_to_value (source_desc_ptr, walk_state); - if (ACPI_SUCCESS (status) && - (source_desc->common.type != ACPI_TYPE_INTEGER) && - (source_desc->common.type != ACPI_TYPE_BUFFER) && - (source_desc->common.type != ACPI_TYPE_STRING)) { - /* - * Conversion successful but still not a valid type - */ - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Cannot assign type %s to %s (must be type Int/Str/Buf)\n", - acpi_ut_get_type_name ((*source_desc_ptr)->common.type), - acpi_ut_get_type_name (target_type))); - status = AE_AML_OPERAND_TYPE; - } + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Cannot assign type %s to %s (must be type Int/Str/Buf)\n", + acpi_ut_get_object_type_name (source_desc), + acpi_ut_get_type_name (target_type))); + status = AE_AML_OPERAND_TYPE; } break; - case INTERNAL_TYPE_ALIAS: + case ACPI_TYPE_LOCAL_ALIAS: /* - * Aliases are resolved by Acpi_ex_prep_operands + * Aliases are resolved by acpi_ex_prep_operands */ ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Store into Alias - should never happen\n")); status = AE_AML_INTERNAL; @@ -147,12 +155,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_store_object + * FUNCTION: acpi_ex_store_object_to_object * - * PARAMETERS: Source_desc - Object to store - * Target_type - Current type of the target - * Target_desc_ptr - Pointer to the target - * Walk_state - Current walk state + * PARAMETERS: source_desc - Object to store + * dest_desc - Object to receive a copy of the source + * new_desc - New object if dest_desc is obsoleted + * walk_state - Current walk state * * RETURN: Status * @@ -161,93 +169,123 @@ * conversion), and a copy of the value of the source to * the target. * + * The Assignment of an object to another (not named) object + * is handled here. + * The Source passed in will replace the current value (if any) + * with the input value. + * + * When storing into an object the data is converted to the + * target object type then stored in the object. This means + * that the target object type (for an initialized target) will + * not be changed by a store operation. + * + * This module allows destination types of Number, String, + * Buffer, and Package. + * + * Assumes parameters are already validated. NOTE: source_desc + * resolution (from a reference object) must be performed by + * the caller if necessary. + * ******************************************************************************/ acpi_status -acpi_ex_store_object ( - acpi_operand_object *source_desc, - acpi_object_type8 target_type, - acpi_operand_object **target_desc_ptr, - acpi_walk_state *walk_state) +acpi_ex_store_object_to_object ( + union acpi_operand_object *source_desc, + union acpi_operand_object *dest_desc, + union acpi_operand_object **new_desc, + struct acpi_walk_state *walk_state) { - acpi_operand_object *target_desc = *target_desc_ptr; - acpi_status status = AE_OK; + union acpi_operand_object *actual_src_desc; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ex_store_object"); + ACPI_FUNCTION_TRACE_PTR ("ex_store_object_to_object", source_desc); - /* - * Perform the "implicit conversion" of the source to the current type - * of the target - As per the ACPI specification. - * - * If no conversion performed, Source_desc is left alone, otherwise it - * is updated with a new object. - */ - status = acpi_ex_convert_to_target_type (target_type, &source_desc, walk_state); - if (ACPI_FAILURE (status)) { + actual_src_desc = source_desc; + if (!dest_desc) { + /* + * There is no destination object (An uninitialized node or + * package element), so we can simply copy the source object + * creating a new destination object + */ + status = acpi_ut_copy_iobject_to_iobject (actual_src_desc, new_desc, walk_state); return_ACPI_STATUS (status); } - /* - * We now have two objects of identical types, and we can perform a - * copy of the *value* of the source object. - */ - switch (target_type) { - case ACPI_TYPE_ANY: - case INTERNAL_TYPE_DEF_ANY: - + if (ACPI_GET_OBJECT_TYPE (source_desc) != ACPI_GET_OBJECT_TYPE (dest_desc)) { /* - * The target namespace node is uninitialized (has no target object), - * and will take on the type of the source object + * The source type does not match the type of the destination. + * Perform the "implicit conversion" of the source to the current type + * of the target as per the ACPI specification. + * + * If no conversion performed, actual_src_desc = source_desc. + * Otherwise, actual_src_desc is a temporary object to hold the + * converted object. */ - *target_desc_ptr = source_desc; - break; + status = acpi_ex_convert_to_target_type (ACPI_GET_OBJECT_TYPE (dest_desc), source_desc, + &actual_src_desc, walk_state); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + if (source_desc == actual_src_desc) { + /* + * No conversion was performed. Return the source_desc as the + * new object. + */ + *new_desc = source_desc; + return_ACPI_STATUS (AE_OK); + } + } + /* + * We now have two objects of identical types, and we can perform a + * copy of the *value* of the source object. + */ + switch (ACPI_GET_OBJECT_TYPE (dest_desc)) { case ACPI_TYPE_INTEGER: - target_desc->integer.value = source_desc->integer.value; + dest_desc->integer.value = actual_src_desc->integer.value; /* Truncate value if we are executing from a 32-bit ACPI table */ - acpi_ex_truncate_for32bit_table (target_desc, walk_state); + acpi_ex_truncate_for32bit_table (dest_desc); break; case ACPI_TYPE_STRING: - status = acpi_ex_copy_string_to_string (source_desc, target_desc); + status = acpi_ex_store_string_to_string (actual_src_desc, dest_desc); break; - case ACPI_TYPE_BUFFER: - status = acpi_ex_copy_buffer_to_buffer (source_desc, target_desc); + status = acpi_ex_store_buffer_to_buffer (actual_src_desc, dest_desc); break; - case ACPI_TYPE_PACKAGE: - /* - * TBD: [Unhandled] Not real sure what to do here - */ - status = AE_NOT_IMPLEMENTED; + status = acpi_ut_copy_iobject_to_iobject (actual_src_desc, &dest_desc, walk_state); break; - default: - /* * All other types come here. */ ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Store into type %s not implemented\n", - acpi_ut_get_type_name (target_type))); + acpi_ut_get_object_type_name (dest_desc))); status = AE_NOT_IMPLEMENTED; break; } + if (actual_src_desc != source_desc) { + /* Delete the intermediate (temporary) source object */ + + acpi_ut_remove_reference (actual_src_desc); + } + *new_desc = dest_desc; return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exstorob.c linux.21rc5-ac2/drivers/acpi/executer/exstorob.c --- linux.21rc5/drivers/acpi/executer/exstorob.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exstorob.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,48 +2,61 @@ /****************************************************************************** * * Module Name: exstorob - AML Interpreter object store support, store to object - * $Revision: 37 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "actables.h" +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exstorob") + ACPI_MODULE_NAME ("exstorob") /******************************************************************************* * - * FUNCTION: Acpi_ex_copy_buffer_to_buffer + * FUNCTION: acpi_ex_store_buffer_to_buffer * - * PARAMETERS: Source_desc - Source object to copy - * Target_desc - Destination object of the copy + * PARAMETERS: source_desc - Source object to copy + * target_desc - Destination object of the copy * * RETURN: Status * @@ -52,33 +65,35 @@ ******************************************************************************/ acpi_status -acpi_ex_copy_buffer_to_buffer ( - acpi_operand_object *source_desc, - acpi_operand_object *target_desc) +acpi_ex_store_buffer_to_buffer ( + union acpi_operand_object *source_desc, + union acpi_operand_object *target_desc) { - u32 length; - u8 *buffer; + u32 length; + u8 *buffer; - PROC_NAME ("Ex_copy_buffer_to_buffer"); + ACPI_FUNCTION_TRACE_PTR ("ex_store_buffer_to_buffer", source_desc); /* - * We know that Source_desc is a buffer by now + * We know that source_desc is a buffer by now */ buffer = (u8 *) source_desc->buffer.pointer; length = source_desc->buffer.length; /* - * If target is a buffer of length zero, allocate a new - * buffer of the proper length + * If target is a buffer of length zero or is a static buffer, + * allocate a new buffer of the proper length */ - if (target_desc->buffer.length == 0) { + if ((target_desc->buffer.length == 0) || + (target_desc->common.flags & AOPOBJ_STATIC_POINTER)) { target_desc->buffer.pointer = ACPI_MEM_ALLOCATE (length); if (!target_desc->buffer.pointer) { - return (AE_NO_MEMORY); + return_ACPI_STATUS (AE_NO_MEMORY); } + target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; target_desc->buffer.length = length; } @@ -89,31 +104,33 @@ if (length <= target_desc->buffer.length) { /* Clear existing buffer and copy in the new one */ - MEMSET (target_desc->buffer.pointer, 0, target_desc->buffer.length); - MEMCPY (target_desc->buffer.pointer, buffer, length); + ACPI_MEMSET (target_desc->buffer.pointer, 0, target_desc->buffer.length); + ACPI_MEMCPY (target_desc->buffer.pointer, buffer, length); } - else { /* * Truncate the source, copy only what will fit */ - MEMCPY (target_desc->buffer.pointer, buffer, target_desc->buffer.length); + ACPI_MEMCPY (target_desc->buffer.pointer, buffer, target_desc->buffer.length); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Truncating src buffer from %X to %X\n", length, target_desc->buffer.length)); } - return (AE_OK); + /* Copy flags */ + + target_desc->buffer.flags = source_desc->buffer.flags; + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ex_copy_string_to_string + * FUNCTION: acpi_ex_store_string_to_string * - * PARAMETERS: Source_desc - Source object to copy - * Target_desc - Destination object of the copy + * PARAMETERS: source_desc - Source object to copy + * target_desc - Destination object of the copy * * RETURN: Status * @@ -122,36 +139,39 @@ ******************************************************************************/ acpi_status -acpi_ex_copy_string_to_string ( - acpi_operand_object *source_desc, - acpi_operand_object *target_desc) +acpi_ex_store_string_to_string ( + union acpi_operand_object *source_desc, + union acpi_operand_object *target_desc) { - u32 length; - u8 *buffer; + u32 length; + u8 *buffer; - FUNCTION_ENTRY (); + ACPI_FUNCTION_TRACE_PTR ("ex_store_string_to_string", source_desc); /* - * We know that Source_desc is a string by now. + * We know that source_desc is a string by now. */ buffer = (u8 *) source_desc->string.pointer; length = source_desc->string.length; /* - * Setting a string value replaces the old string + * Replace existing string value if it will fit and the string + * pointer is not a static pointer (part of an ACPI table) */ - if (length < target_desc->string.length) { - /* Clear old string and copy in the new one */ - - MEMSET (target_desc->string.pointer, 0, target_desc->string.length); - MEMCPY (target_desc->string.pointer, buffer, length); + if ((length < target_desc->string.length) && + (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) { + /* + * String will fit in existing non-static buffer. + * Clear old string and copy in the new one + */ + ACPI_MEMSET (target_desc->string.pointer, 0, (acpi_size) target_desc->string.length + 1); + ACPI_MEMCPY (target_desc->string.pointer, buffer, length); } - else { /* - * Free the current buffer, then allocate a buffer + * Free the current buffer, then allocate a new buffer * large enough to hold the value */ if (target_desc->string.pointer && @@ -162,16 +182,19 @@ ACPI_MEM_FREE (target_desc->string.pointer); } - target_desc->string.pointer = ACPI_MEM_ALLOCATE (length + 1); + target_desc->string.pointer = ACPI_MEM_CALLOCATE ((acpi_size) length + 1); if (!target_desc->string.pointer) { - return (AE_NO_MEMORY); + return_ACPI_STATUS (AE_NO_MEMORY); } - target_desc->string.length = length; - MEMCPY (target_desc->string.pointer, buffer, length); + target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; + ACPI_MEMCPY (target_desc->string.pointer, buffer, length); } - return (AE_OK); + /* Set the new target length */ + + target_desc->string.length = length; + return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exsystem.c linux.21rc5-ac2/drivers/acpi/executer/exsystem.c --- linux.21rc5/drivers/acpi/executer/exsystem.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exsystem.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,42 +2,58 @@ /****************************************************************************** * * Module Name: exsystem - Interface to OS services - * $Revision: 67 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "achware.h" -#include "acevents.h" +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exsystem") + ACPI_MODULE_NAME ("exsystem") /******************************************************************************* * - * FUNCTION: Acpi_ex_system_wait_semaphore + * FUNCTION: acpi_ex_system_wait_semaphore * * PARAMETERS: Semaphore - OSD semaphore to wait on * Timeout - Max time to wait @@ -52,13 +68,14 @@ acpi_status acpi_ex_system_wait_semaphore ( - acpi_handle semaphore, - u32 timeout) + acpi_handle semaphore, + u16 timeout) { - acpi_status status; + acpi_status status; + acpi_status status2; - FUNCTION_TRACE ("Ex_system_wait_semaphore"); + ACPI_FUNCTION_TRACE ("ex_system_wait_semaphore"); status = acpi_os_wait_semaphore (semaphore, 1, 0); @@ -78,11 +95,11 @@ /* Reacquire the interpreter */ - status = acpi_ex_enter_interpreter (); - if (ACPI_SUCCESS (status)) { - /* Restore the timeout exception */ + status2 = acpi_ex_enter_interpreter (); + if (ACPI_FAILURE (status2)) { + /* Report fatal error, could not acquire interpreter */ - status = AE_TIME; + return_ACPI_STATUS (status2); } } @@ -92,21 +109,24 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_system_do_stall + * FUNCTION: acpi_ex_system_do_stall * - * PARAMETERS: How_long - The amount of time to stall + * PARAMETERS: how_long - The amount of time to stall * - * RETURN: None + * RETURN: Status * * DESCRIPTION: Suspend running thread for specified amount of time. * ******************************************************************************/ -void +acpi_status acpi_ex_system_do_stall ( - u32 how_long) + u32 how_long) { - FUNCTION_ENTRY (); + acpi_status status = AE_OK; + + + ACPI_FUNCTION_ENTRY (); if (how_long > 1000) /* 1 millisecond */ { @@ -118,20 +138,22 @@ /* And now we must get the interpreter again */ - acpi_ex_enter_interpreter (); + status = acpi_ex_enter_interpreter (); } else { acpi_os_sleep (0, (how_long / 1000) + 1); } + + return (status); } /******************************************************************************* * - * FUNCTION: Acpi_ex_system_do_suspend + * FUNCTION: acpi_ex_system_do_suspend * - * PARAMETERS: How_long - The amount of time to suspend + * PARAMETERS: how_long - The amount of time to suspend * * RETURN: None * @@ -139,12 +161,14 @@ * ******************************************************************************/ -void +acpi_status acpi_ex_system_do_suspend ( - u32 how_long) + u32 how_long) { + acpi_status status; + - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* Since this thread will sleep, we must release the interpreter */ @@ -156,34 +180,35 @@ /* And now we must get the interpreter again */ - acpi_ex_enter_interpreter (); + status = acpi_ex_enter_interpreter (); + return (status); } /******************************************************************************* * - * FUNCTION: Acpi_ex_system_acquire_mutex + * FUNCTION: acpi_ex_system_acquire_mutex * - * PARAMETERS: *Time_desc - The 'time to delay' object descriptor - * *Obj_desc - The object descriptor for this op + * PARAMETERS: *time_desc - The 'time to delay' object descriptor + * *obj_desc - The object descriptor for this op * * RETURN: Status * * DESCRIPTION: Provides an access point to perform synchronization operations * within the AML. This function will cause a lock to be generated - * for the Mutex pointed to by Obj_desc. + * for the Mutex pointed to by obj_desc. * ******************************************************************************/ acpi_status acpi_ex_system_acquire_mutex ( - acpi_operand_object *time_desc, - acpi_operand_object *obj_desc) + union acpi_operand_object *time_desc, + union acpi_operand_object *obj_desc) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE_PTR ("Ex_system_acquire_mutex", obj_desc); + ACPI_FUNCTION_TRACE_PTR ("ex_system_acquire_mutex", obj_desc); if (!obj_desc) { @@ -194,21 +219,21 @@ * Support for the _GL_ Mutex object -- go get the global lock */ if (obj_desc->mutex.semaphore == acpi_gbl_global_lock_semaphore) { - status = acpi_ev_acquire_global_lock (); + status = acpi_ev_acquire_global_lock ((u16) time_desc->integer.value); return_ACPI_STATUS (status); } status = acpi_ex_system_wait_semaphore (obj_desc->mutex.semaphore, - (u32) time_desc->integer.value); + (u16) time_desc->integer.value); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ex_system_release_mutex + * FUNCTION: acpi_ex_system_release_mutex * - * PARAMETERS: *Obj_desc - The object descriptor for this op + * PARAMETERS: *obj_desc - The object descriptor for this op * * RETURN: Status * @@ -221,12 +246,12 @@ acpi_status acpi_ex_system_release_mutex ( - acpi_operand_object *obj_desc) + union acpi_operand_object *obj_desc) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ex_system_release_mutex"); + ACPI_FUNCTION_TRACE ("ex_system_release_mutex"); if (!obj_desc) { @@ -237,8 +262,8 @@ * Support for the _GL_ Mutex object -- release the global lock */ if (obj_desc->mutex.semaphore == acpi_gbl_global_lock_semaphore) { - acpi_ev_release_global_lock (); - return_ACPI_STATUS (AE_OK); + status = acpi_ev_release_global_lock (); + return_ACPI_STATUS (status); } status = acpi_os_signal_semaphore (obj_desc->mutex.semaphore, 1); @@ -248,9 +273,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_system_signal_event + * FUNCTION: acpi_ex_system_signal_event * - * PARAMETERS: *Obj_desc - The object descriptor for this op + * PARAMETERS: *obj_desc - The object descriptor for this op * * RETURN: AE_OK * @@ -261,12 +286,12 @@ acpi_status acpi_ex_system_signal_event ( - acpi_operand_object *obj_desc) + union acpi_operand_object *obj_desc) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ex_system_signal_event"); + ACPI_FUNCTION_TRACE ("ex_system_signal_event"); if (obj_desc) { @@ -279,10 +304,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_system_wait_event + * FUNCTION: acpi_ex_system_wait_event * - * PARAMETERS: *Time_desc - The 'time to delay' object descriptor - * *Obj_desc - The object descriptor for this op + * PARAMETERS: *time_desc - The 'time to delay' object descriptor + * *obj_desc - The object descriptor for this op * * RETURN: Status * @@ -294,30 +319,29 @@ acpi_status acpi_ex_system_wait_event ( - acpi_operand_object *time_desc, - acpi_operand_object *obj_desc) + union acpi_operand_object *time_desc, + union acpi_operand_object *obj_desc) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ex_system_wait_event"); + ACPI_FUNCTION_TRACE ("ex_system_wait_event"); if (obj_desc) { status = acpi_ex_system_wait_semaphore (obj_desc->event.semaphore, - (u32) time_desc->integer.value); + (u16) time_desc->integer.value); } - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ex_system_reset_event + * FUNCTION: acpi_ex_system_reset_event * - * PARAMETERS: *Obj_desc - The object descriptor for this op + * PARAMETERS: *obj_desc - The object descriptor for this op * * RETURN: Status * @@ -327,13 +351,13 @@ acpi_status acpi_ex_system_reset_event ( - acpi_operand_object *obj_desc) + union acpi_operand_object *obj_desc) { - acpi_status status = AE_OK; - void *temp_semaphore; + acpi_status status = AE_OK; + void *temp_semaphore; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* @@ -342,7 +366,7 @@ */ status = acpi_os_create_semaphore (ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore); if (ACPI_SUCCESS (status)) { - acpi_os_delete_semaphore (obj_desc->event.semaphore); + (void) acpi_os_delete_semaphore (obj_desc->event.semaphore); obj_desc->event.semaphore = temp_semaphore; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/exutils.c linux.21rc5-ac2/drivers/acpi/executer/exutils.c --- linux.21rc5/drivers/acpi/executer/exutils.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/exutils.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,26 +2,44 @@ /****************************************************************************** * * Module Name: exutils - interpreter/scanner utilities - * $Revision: 85 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ @@ -41,45 +59,48 @@ #define DEFINE_AML_GLOBALS -#include "acpi.h" -#include "acparser.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acevents.h" -#include "acparser.h" +#include +#include +#include +#include #define _COMPONENT ACPI_EXECUTER - MODULE_NAME ("exutils") + ACPI_MODULE_NAME ("exutils") +#ifndef ACPI_NO_METHOD_EXECUTION + /******************************************************************************* * - * FUNCTION: Acpi_ex_enter_interpreter + * FUNCTION: acpi_ex_enter_interpreter * * PARAMETERS: None * - * DESCRIPTION: Enter the interpreter execution region - * TBD: should be a macro + * DESCRIPTION: Enter the interpreter execution region. Failure to enter + * the interpreter region is a fatal system error * ******************************************************************************/ acpi_status acpi_ex_enter_interpreter (void) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Ex_enter_interpreter"); + ACPI_FUNCTION_TRACE ("ex_enter_interpreter"); status = acpi_ut_acquire_mutex (ACPI_MTX_EXECUTE); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not acquire interpreter mutex\n")); + } + return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ex_exit_interpreter + * FUNCTION: acpi_ex_exit_interpreter * * PARAMETERS: None * @@ -95,56 +116,31 @@ * already executing * 7) About to invoke a user-installed opregion handler * - * TBD: should be a macro - * ******************************************************************************/ void acpi_ex_exit_interpreter (void) { - FUNCTION_TRACE ("Ex_exit_interpreter"); - - - acpi_ut_release_mutex (ACPI_MTX_EXECUTE); - - return_VOID; -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ex_validate_object_type - * - * PARAMETERS: Type Object type to validate - * - * DESCRIPTION: Determine if a type is a valid ACPI object type - * - ******************************************************************************/ + acpi_status status; -u8 -acpi_ex_validate_object_type ( - acpi_object_type type) -{ - FUNCTION_ENTRY (); + ACPI_FUNCTION_TRACE ("ex_exit_interpreter"); - if ((type > ACPI_TYPE_MAX && type < INTERNAL_TYPE_BEGIN) || - (type > INTERNAL_TYPE_MAX)) { - return (FALSE); + status = acpi_ut_release_mutex (ACPI_MTX_EXECUTE); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not release interpreter mutex\n")); } - return (TRUE); + return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ex_truncate_for32bit_table + * FUNCTION: acpi_ex_truncate_for32bit_table * - * PARAMETERS: Obj_desc - Object to be truncated - * Walk_state - Current walk state - * (A method must be executing) + * PARAMETERS: obj_desc - Object to be truncated * * RETURN: none * @@ -155,11 +151,10 @@ void acpi_ex_truncate_for32bit_table ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state) + union acpi_operand_object *obj_desc) { - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* @@ -167,12 +162,11 @@ * a control method */ if ((!obj_desc) || - (obj_desc->common.type != ACPI_TYPE_INTEGER) || - (!walk_state->method_node)) { + (ACPI_GET_OBJECT_TYPE (obj_desc) != ACPI_TYPE_INTEGER)) { return; } - if (walk_state->method_node->flags & ANOBJ_DATA_WIDTH_32) { + if (acpi_gbl_integer_byte_width == 4) { /* * We are running a method that exists in a 32-bit ACPI table. * Truncate the value to 32 bits by zeroing out the upper 32-bit field @@ -184,9 +178,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_acquire_global_lock + * FUNCTION: acpi_ex_acquire_global_lock * - * PARAMETERS: Rule - Lock rule: Always_lock, Never_lock + * PARAMETERS: field_flags - Flags with Lock rule: + * always_lock or never_lock * * RETURN: TRUE/FALSE indicating whether the lock was actually acquired * @@ -198,25 +193,24 @@ u8 acpi_ex_acquire_global_lock ( - u32 rule) + u32 field_flags) { - u8 locked = FALSE; - acpi_status status; + u8 locked = FALSE; + acpi_status status; - FUNCTION_TRACE ("Ex_acquire_global_lock"); + ACPI_FUNCTION_TRACE ("ex_acquire_global_lock"); - /* Only attempt lock if the Rule says so */ + /* Only attempt lock if the always_lock bit is set */ - if (rule == (u32) GLOCK_ALWAYS_LOCK) { - /* We should attempt to get the lock */ + if (field_flags & AML_FIELD_LOCK_RULE_MASK) { + /* We should attempt to get the lock, wait forever */ - status = acpi_ev_acquire_global_lock (); + status = acpi_ev_acquire_global_lock (ACPI_WAIT_FOREVER); if (ACPI_SUCCESS (status)) { locked = TRUE; } - else { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not acquire Global Lock, %s\n", acpi_format_exception (status))); @@ -229,10 +223,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ex_release_global_lock + * FUNCTION: acpi_ex_release_global_lock * - * PARAMETERS: Locked_by_me - Return value from corresponding call to - * Acquire_global_lock. + * PARAMETERS: locked_by_me - Return value from corresponding call to + * acquire_global_lock. * * RETURN: Status * @@ -240,12 +234,14 @@ * ******************************************************************************/ -acpi_status +void acpi_ex_release_global_lock ( - u8 locked_by_me) + u8 locked_by_me) { + acpi_status status; - FUNCTION_TRACE ("Ex_release_global_lock"); + + ACPI_FUNCTION_TRACE ("ex_release_global_lock"); /* Only attempt unlock if the caller locked it */ @@ -253,17 +249,22 @@ if (locked_by_me) { /* OK, now release the lock */ - acpi_ev_release_global_lock (); - } + status = acpi_ev_release_global_lock (); + if (ACPI_FAILURE (status)) { + /* Report the error, but there isn't much else we can do */ + ACPI_REPORT_ERROR (("Could not release ACPI Global Lock, %s\n", + acpi_format_exception (status))); + } + } - return_ACPI_STATUS (AE_OK); + return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ex_digits_needed + * FUNCTION: acpi_ex_digits_needed * * PARAMETERS: Value - Value to be represented * Base - Base of representation @@ -274,26 +275,27 @@ u32 acpi_ex_digits_needed ( - acpi_integer value, - u32 base) + acpi_integer value, + u32 base) { - u32 num_digits = 0; + u32 num_digits; + acpi_integer current_value; + acpi_integer quotient; - FUNCTION_TRACE ("Ex_digits_needed"); + ACPI_FUNCTION_TRACE ("ex_digits_needed"); - if (base < 1) { - REPORT_ERROR (("Ex_digits_needed: Internal error - Invalid base\n")); - } + /* + * acpi_integer is unsigned, so we don't worry about a '-' + */ + current_value = value; + num_digits = 0; - else { - /* - * acpi_integer is unsigned, which is why we don't worry about a '-' - */ - for (num_digits = 1; - (acpi_ut_short_divide (&value, base, &value, NULL)); - ++num_digits) { ; } + while (current_value) { + (void) acpi_ut_short_divide (¤t_value, base, "ient, NULL); + num_digits++; + current_value = quotient; } return_VALUE (num_digits); @@ -302,115 +304,74 @@ /******************************************************************************* * - * FUNCTION: ntohl - * - * PARAMETERS: Value - Value to be converted - * - * DESCRIPTION: Convert a 32-bit value to big-endian (swap the bytes) - * - ******************************************************************************/ - -static u32 -_ntohl ( - u32 value) -{ - union { - u32 value; - u8 bytes[4]; - } out; - - union { - u32 value; - u8 bytes[4]; - } in; - - - FUNCTION_ENTRY (); - - - in.value = value; - - out.bytes[0] = in.bytes[3]; - out.bytes[1] = in.bytes[2]; - out.bytes[2] = in.bytes[1]; - out.bytes[3] = in.bytes[0]; - - return (out.value); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ex_eisa_id_to_string + * FUNCTION: acpi_ex_eisa_id_to_string * - * PARAMETERS: Numeric_id - EISA ID to be converted - * Out_string - Where to put the converted string (8 bytes) + * PARAMETERS: numeric_id - EISA ID to be converted + * out_string - Where to put the converted string (8 bytes) * * DESCRIPTION: Convert a numeric EISA ID to string representation * ******************************************************************************/ -acpi_status +void acpi_ex_eisa_id_to_string ( - u32 numeric_id, - NATIVE_CHAR *out_string) + u32 numeric_id, + char *out_string) { - u32 id; + u32 eisa_id; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); - /* swap to big-endian to get contiguous bits */ + /* Swap ID to big-endian to get contiguous bits */ - id = _ntohl (numeric_id); + eisa_id = acpi_ut_dword_byte_swap (numeric_id); - out_string[0] = (char) ('@' + ((id >> 26) & 0x1f)); - out_string[1] = (char) ('@' + ((id >> 21) & 0x1f)); - out_string[2] = (char) ('@' + ((id >> 16) & 0x1f)); - out_string[3] = acpi_ut_hex_to_ascii_char (id, 12); - out_string[4] = acpi_ut_hex_to_ascii_char (id, 8); - out_string[5] = acpi_ut_hex_to_ascii_char (id, 4); - out_string[6] = acpi_ut_hex_to_ascii_char (id, 0); + out_string[0] = (char) ('@' + (((unsigned long) eisa_id >> 26) & 0x1f)); + out_string[1] = (char) ('@' + ((eisa_id >> 21) & 0x1f)); + out_string[2] = (char) ('@' + ((eisa_id >> 16) & 0x1f)); + out_string[3] = acpi_ut_hex_to_ascii_char ((acpi_integer) eisa_id, 12); + out_string[4] = acpi_ut_hex_to_ascii_char ((acpi_integer) eisa_id, 8); + out_string[5] = acpi_ut_hex_to_ascii_char ((acpi_integer) eisa_id, 4); + out_string[6] = acpi_ut_hex_to_ascii_char ((acpi_integer) eisa_id, 0); out_string[7] = 0; - - return (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ex_unsigned_integer_to_string + * FUNCTION: acpi_ex_unsigned_integer_to_string * * PARAMETERS: Value - Value to be converted - * Out_string - Where to put the converted string (8 bytes) + * out_string - Where to put the converted string (8 bytes) * * RETURN: Convert a number to string representation * ******************************************************************************/ -acpi_status +void acpi_ex_unsigned_integer_to_string ( - acpi_integer value, - NATIVE_CHAR *out_string) + acpi_integer value, + char *out_string) { - u32 count; - u32 digits_needed; - u32 remainder; + u32 count; + u32 digits_needed; + u32 remainder; + acpi_integer quotient; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); digits_needed = acpi_ex_digits_needed (value, 10); out_string[digits_needed] = 0; for (count = digits_needed; count > 0; count--) { - acpi_ut_short_divide (&value, 10, &value, &remainder); - out_string[count-1] = (NATIVE_CHAR) ('0' + remainder); + (void) acpi_ut_short_divide (&value, 10, "ient, &remainder); + out_string[count-1] = (char) ('0' + remainder);\ + value = quotient; } - - return (AE_OK); } - +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/executer/Makefile linux.21rc5-ac2/drivers/acpi/executer/Makefile --- linux.21rc5/drivers/acpi/executer/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/executer/Makefile 2003-04-28 17:58:27.000000000 +0100 @@ -1,11 +1,10 @@ # # Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory # O_TARGET := $(notdir $(CURDIR)).o -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) +obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c)) EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/fan.c linux.21rc5-ac2/drivers/acpi/fan.c --- linux.21rc5/drivers/acpi/fan.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/fan.c 2003-04-28 17:58:27.000000000 +0100 @@ -0,0 +1,296 @@ +/* + * acpi_fan.c - ACPI Fan Driver ($Revision: 28 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_FAN_COMPONENT +ACPI_MODULE_NAME ("acpi_fan") + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION(ACPI_FAN_DRIVER_NAME); +MODULE_LICENSE("GPL"); + +#define PREFIX "ACPI: " + + +int acpi_fan_add (struct acpi_device *device); +int acpi_fan_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_fan_driver = { + .name = ACPI_FAN_DRIVER_NAME, + .class = ACPI_FAN_CLASS, + .ids = ACPI_FAN_HID, + .ops = { + .add = acpi_fan_add, + .remove = acpi_fan_remove, + }, +}; + +struct acpi_fan { + acpi_handle handle; +}; + + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +struct proc_dir_entry *acpi_fan_dir = NULL; + + +static int +acpi_fan_read_state ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_fan *fan = (struct acpi_fan *) data; + char *p = page; + int len = 0; + int state = 0; + + ACPI_FUNCTION_TRACE("acpi_fan_read_state"); + + if (!fan || (off != 0)) + goto end; + + if (acpi_bus_get_power(fan->handle, &state)) + goto end; + + p += sprintf(p, "status: %s\n", + !state?"on":"off"); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_fan_write_state ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + struct acpi_fan *fan = (struct acpi_fan *) data; + char state_string[12] = {'\0'}; + + ACPI_FUNCTION_TRACE("acpi_fan_write_state"); + + if (!fan || (count > sizeof(state_string) - 1)) + return_VALUE(-EINVAL); + + if (copy_from_user(state_string, buffer, count)) + return_VALUE(-EFAULT); + + state_string[count] = '\0'; + + result = acpi_bus_set_power(fan->handle, + simple_strtoul(state_string, NULL, 0)); + if (result) + return_VALUE(result); + + return_VALUE(count); +} + + +static int +acpi_fan_add_fs ( + struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_fan_add_fs"); + + if (!device) + return_VALUE(-EINVAL); + + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_fan_dir); + if (!acpi_device_dir(device)) + return_VALUE(-ENODEV); + } + + /* 'status' [R/W] */ + entry = create_proc_entry(ACPI_FAN_FILE_STATE, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_FAN_FILE_STATE)); + else { + entry->read_proc = acpi_fan_read_state; + entry->write_proc = acpi_fan_write_state; + entry->data = acpi_driver_data(device); + } + + return_VALUE(0); +} + + +static int +acpi_fan_remove_fs ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_fan_remove_fs"); + + if (acpi_device_dir(device)) { + remove_proc_entry(acpi_device_bid(device), acpi_fan_dir); + acpi_device_dir(device) = NULL; + } + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +int +acpi_fan_add ( + struct acpi_device *device) +{ + int result = 0; + struct acpi_fan *fan = NULL; + int state = 0; + + ACPI_FUNCTION_TRACE("acpi_fan_add"); + + if (!device) + return_VALUE(-EINVAL); + + fan = kmalloc(sizeof(struct acpi_fan), GFP_KERNEL); + if (!fan) + return_VALUE(-ENOMEM); + memset(fan, 0, sizeof(struct acpi_fan)); + + fan->handle = device->handle; + sprintf(acpi_device_name(device), "%s", ACPI_FAN_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_FAN_CLASS); + acpi_driver_data(device) = fan; + + result = acpi_bus_get_power(fan->handle, &state); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error reading power state\n")); + goto end; + } + + result = acpi_fan_add_fs(device); + if (result) + goto end; + + printk(KERN_INFO PREFIX "%s [%s] (%s)\n", + acpi_device_name(device), acpi_device_bid(device), + !device->power.state?"on":"off"); + +end: + if (result) + kfree(fan); + + return_VALUE(result); +} + + +int +acpi_fan_remove ( + struct acpi_device *device, + int type) +{ + struct acpi_fan *fan = NULL; + + ACPI_FUNCTION_TRACE("acpi_fan_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + fan = (struct acpi_fan *) acpi_driver_data(device); + + acpi_fan_remove_fs(device); + + kfree(fan); + + return_VALUE(0); +} + + +int __init +acpi_fan_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_fan_init"); + + acpi_fan_dir = proc_mkdir(ACPI_FAN_CLASS, acpi_root_dir); + if (!acpi_fan_dir) + return_VALUE(-ENODEV); + + result = acpi_bus_register_driver(&acpi_fan_driver); + if (result < 0) { + remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +void __exit +acpi_fan_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_fan_exit"); + + acpi_bus_unregister_driver(&acpi_fan_driver); + + remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); + + return_VOID; +} + + +module_init(acpi_fan_init); +module_exit(acpi_fan_exit); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/hardware/hwacpi.c linux.21rc5-ac2/drivers/acpi/hardware/hwacpi.c --- linux.21rc5/drivers/acpi/hardware/hwacpi.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/hardware/hwacpi.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,40 +2,57 @@ /****************************************************************************** * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface - * $Revision: 46 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" +#include #define _COMPONENT ACPI_HARDWARE - MODULE_NAME ("hwacpi") + ACPI_MODULE_NAME ("hwacpi") /****************************************************************************** * - * FUNCTION: Acpi_hw_initialize + * FUNCTION: acpi_hw_initialize * * PARAMETERS: None * @@ -49,182 +66,120 @@ acpi_hw_initialize ( void) { - acpi_status status = AE_OK; - u32 index; + acpi_status status; - FUNCTION_TRACE ("Hw_initialize"); + ACPI_FUNCTION_TRACE ("hw_initialize"); /* We must have the ACPI tables by the time we get here */ if (!acpi_gbl_FADT) { - acpi_gbl_restore_acpi_chipset = FALSE; - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No FADT!\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "A FADT is not loaded\n")); return_ACPI_STATUS (AE_NO_ACPI_TABLES); } - /* Identify current ACPI/legacy mode */ - - switch (acpi_gbl_system_flags & SYS_MODES_MASK) { - case (SYS_MODE_ACPI): - - acpi_gbl_original_mode = SYS_MODE_ACPI; - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "System supports ACPI mode only.\n")); - break; - - - case (SYS_MODE_LEGACY): - - acpi_gbl_original_mode = SYS_MODE_LEGACY; - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Tables loaded from buffer, hardware assumed to support LEGACY mode only.\n")); - break; - + /* Sanity check the FADT for valid values */ - case (SYS_MODE_ACPI | SYS_MODE_LEGACY): - - if (acpi_hw_get_mode () == SYS_MODE_ACPI) { - acpi_gbl_original_mode = SYS_MODE_ACPI; - } - else { - acpi_gbl_original_mode = SYS_MODE_LEGACY; - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "System supports both ACPI and LEGACY modes.\n")); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "System is currently in %s mode.\n", - (acpi_gbl_original_mode == SYS_MODE_ACPI) ? "ACPI" : "LEGACY")); - break; + status = acpi_ut_validate_fadt (); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - - if (acpi_gbl_system_flags & SYS_MODE_ACPI) { - /* Target system supports ACPI mode */ - - /* - * The purpose of this code is to save the initial state - * of the ACPI event enable registers. An exit function will be - * registered which will restore this state when the application - * exits. The exit function will also clear all of the ACPI event - * status bits prior to restoring the original mode. - * - * The location of the PM1a_evt_blk enable registers is defined as the - * base of PM1a_evt_blk + DIV_2(PM1a_evt_blk_length). Since the spec further - * fully defines the PM1a_evt_blk to be a total of 4 bytes, the offset - * for the enable registers is always 2 from the base. It is hard - * coded here. If this changes in the spec, this code will need to - * be modified. The PM1b_evt_blk behaves as expected. - */ - acpi_gbl_pm1_enable_register_save = (u16) acpi_hw_register_read ( - ACPI_MTX_LOCK, PM1_EN); - - - /* - * The GPEs behave similarly, except that the length of the register - * block is not fixed, so the buffer must be allocated with malloc - */ - if (ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xgpe0blk.address) && - acpi_gbl_FADT->gpe0blk_len) { - /* GPE0 specified in FADT */ - - acpi_gbl_gpe0enable_register_save = ACPI_MEM_ALLOCATE ( - DIV_2 (acpi_gbl_FADT->gpe0blk_len)); - if (!acpi_gbl_gpe0enable_register_save) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - /* Save state of GPE0 enable bits */ - - for (index = 0; index < DIV_2 (acpi_gbl_FADT->gpe0blk_len); index++) { - acpi_gbl_gpe0enable_register_save[index] = - (u8) acpi_hw_register_read (ACPI_MTX_LOCK, GPE0_EN_BLOCK | index); - } - } - - else { - acpi_gbl_gpe0enable_register_save = NULL; - } - - if (ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xgpe1_blk.address) && - acpi_gbl_FADT->gpe1_blk_len) { - /* GPE1 defined */ - - acpi_gbl_gpe1_enable_register_save = ACPI_MEM_ALLOCATE ( - DIV_2 (acpi_gbl_FADT->gpe1_blk_len)); - if (!acpi_gbl_gpe1_enable_register_save) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - /* save state of GPE1 enable bits */ - - for (index = 0; index < DIV_2 (acpi_gbl_FADT->gpe1_blk_len); index++) { - acpi_gbl_gpe1_enable_register_save[index] = - (u8) acpi_hw_register_read (ACPI_MTX_LOCK, GPE1_EN_BLOCK | index); - } - } - - else { - acpi_gbl_gpe1_enable_register_save = NULL; - } - } - - return_ACPI_STATUS (status); + return_ACPI_STATUS (AE_OK); } /****************************************************************************** * - * FUNCTION: Acpi_hw_set_mode + * FUNCTION: acpi_hw_set_mode * * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY * * RETURN: Status * - * DESCRIPTION: Transitions the system into the requested mode or does nothing - * if the system is already in that mode. + * DESCRIPTION: Transitions the system into the requested mode. * ******************************************************************************/ acpi_status acpi_hw_set_mode ( - u32 mode) + u32 mode) { - acpi_status status = AE_NO_HARDWARE_RESPONSE; + acpi_status status; + u32 retry; - FUNCTION_TRACE ("Hw_set_mode"); + ACPI_FUNCTION_TRACE ("hw_set_mode"); + /* + * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, + * system does not support mode transition. + */ + if (!acpi_gbl_FADT->smi_cmd) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No SMI_CMD in FADT, mode transition failed.\n")); + return_ACPI_STATUS (AE_NO_HARDWARE_RESPONSE); + } + + /* + * ACPI 2.0 clarified the meaning of ACPI_ENABLE and ACPI_DISABLE + * in FADT: If it is zero, enabling or disabling is not supported. + * As old systems may have used zero for mode transition, + * we make sure both the numbers are zero to determine these + * transitions are not supported. + */ + if (!acpi_gbl_FADT->acpi_enable && !acpi_gbl_FADT->acpi_disable) { + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "No mode transition supported in this system.\n")); + return_ACPI_STATUS (AE_OK); + } + + switch (mode) { + case ACPI_SYS_MODE_ACPI: - if (mode == SYS_MODE_ACPI) { /* BIOS should have disabled ALL fixed and GP events */ - acpi_os_write_port (acpi_gbl_FADT->smi_cmd, acpi_gbl_FADT->acpi_enable, 8); + status = acpi_os_write_port (acpi_gbl_FADT->smi_cmd, + (u32) acpi_gbl_FADT->acpi_enable, 8); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Attempting to enable ACPI mode\n")); - } + break; + + case ACPI_SYS_MODE_LEGACY: - else if (mode == SYS_MODE_LEGACY) { /* * BIOS should clear all fixed status bits and restore fixed event * enable bits to default */ - acpi_os_write_port (acpi_gbl_FADT->smi_cmd, acpi_gbl_FADT->acpi_disable, 8); + status = acpi_os_write_port (acpi_gbl_FADT->smi_cmd, + (u32) acpi_gbl_FADT->acpi_disable, 8); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Attempting to enable Legacy (non-ACPI) mode\n")); + break; + + default: + return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* Give the platform some time to react */ + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - acpi_os_stall (20000); + /* + * Some hardware takes a LONG time to switch modes. Give them 3 sec to + * do so, but allow faster systems to proceed more quickly. + */ + retry = 3000; + while (retry) { + status = AE_NO_HARDWARE_RESPONSE; - if (acpi_hw_get_mode () == mode) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Mode %X successfully enabled\n", mode)); - status = AE_OK; + if (acpi_hw_get_mode() == mode) { + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Mode %X successfully enabled\n", mode)); + status = AE_OK; + break; + } + acpi_os_stall(1000); + retry--; } return_ACPI_STATUS (status); @@ -233,7 +188,7 @@ /****************************************************************************** * - * FUNCTION: Acpi_hw_get_mode + * FUNCTION: acpi_hw_get_mode * * PARAMETERS: none * @@ -247,74 +202,21 @@ u32 acpi_hw_get_mode (void) { + acpi_status status; + u32 value; - FUNCTION_TRACE ("Hw_get_mode"); + ACPI_FUNCTION_TRACE ("hw_get_mode"); - if (acpi_hw_register_bit_access (ACPI_READ, ACPI_MTX_LOCK, SCI_EN)) { - return_VALUE (SYS_MODE_ACPI); - } - else { - return_VALUE (SYS_MODE_LEGACY); + status = acpi_get_register (ACPI_BITREG_SCI_ENABLE, &value, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return_VALUE (ACPI_SYS_MODE_LEGACY); } -} - - -/****************************************************************************** - * - * FUNCTION: Acpi_hw_get_mode_capabilities - * - * PARAMETERS: none - * - * RETURN: logical OR of SYS_MODE_ACPI and SYS_MODE_LEGACY determined at initial - * system state. - * - * DESCRIPTION: Returns capablities of system - * - ******************************************************************************/ - -u32 -acpi_hw_get_mode_capabilities (void) -{ - - FUNCTION_TRACE ("Hw_get_mode_capabilities"); - - - if (!(acpi_gbl_system_flags & SYS_MODES_MASK)) { - if (acpi_hw_get_mode () == SYS_MODE_LEGACY) { - /* - * Assume that if this call is being made, Acpi_init has been called - * and ACPI support has been established by the presence of the - * tables. Therefore since we're in SYS_MODE_LEGACY, the system - * must support both modes - */ - acpi_gbl_system_flags |= (SYS_MODE_ACPI | SYS_MODE_LEGACY); - } - - else { - /* TBD: [Investigate] !!! this may be unsafe... */ - /* - * system is is ACPI mode, so try to switch back to LEGACY to see if - * it is supported - */ - acpi_hw_set_mode (SYS_MODE_LEGACY); - - if (acpi_hw_get_mode () == SYS_MODE_LEGACY) { - /* Now in SYS_MODE_LEGACY, so both are supported */ - - acpi_gbl_system_flags |= (SYS_MODE_ACPI | SYS_MODE_LEGACY); - acpi_hw_set_mode (SYS_MODE_ACPI); - } - - else { - /* Still in SYS_MODE_ACPI so this must be an ACPI only system */ - acpi_gbl_system_flags |= SYS_MODE_ACPI; - } - } + if (value) { + return_VALUE (ACPI_SYS_MODE_ACPI); + } + else { + return_VALUE (ACPI_SYS_MODE_LEGACY); } - - return_VALUE (acpi_gbl_system_flags & SYS_MODES_MASK); } - - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/hardware/hwgpe.c linux.21rc5-ac2/drivers/acpi/hardware/hwgpe.c --- linux.21rc5/drivers/acpi/hardware/hwgpe.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/hardware/hwgpe.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,42 +2,58 @@ /****************************************************************************** * * Module Name: hwgpe - Low level GPE enable/disable/clear functions - * $Revision: 35 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "acnamesp.h" -#include "acevents.h" +#include +#include #define _COMPONENT ACPI_HARDWARE - MODULE_NAME ("hwgpe") + ACPI_MODULE_NAME ("hwgpe") /****************************************************************************** * - * FUNCTION: Acpi_hw_enable_gpe + * FUNCTION: acpi_hw_enable_gpe * - * PARAMETERS: Gpe_number - The GPE + * PARAMETERS: gpe_number - The GPE * * RETURN: None * @@ -45,43 +61,41 @@ * ******************************************************************************/ -void +acpi_status acpi_hw_enable_gpe ( - u32 gpe_number) + struct acpi_gpe_event_info *gpe_event_info) { - u32 in_byte; - u32 register_index; - u32 bit_mask; - - - FUNCTION_ENTRY (); + u32 in_byte; + acpi_status status; - /* - * Translate GPE number to index into global registers array. - */ - register_index = acpi_gbl_gpe_valid[gpe_number]; + ACPI_FUNCTION_ENTRY (); - /* - * Figure out the bit offset for this GPE within the target register. - */ - bit_mask = acpi_gbl_decode_to8bit [MOD_8 (gpe_number)]; /* * Read the current value of the register, set the appropriate bit * to enable the GPE, and write out the new register. */ - in_byte = 0; - acpi_os_read_port (acpi_gbl_gpe_registers[register_index].enable_addr, &in_byte, 8); - acpi_os_write_port (acpi_gbl_gpe_registers[register_index].enable_addr, - (in_byte | bit_mask), 8); + status = acpi_hw_low_level_read (8, &in_byte, + &gpe_event_info->register_info->enable_address); + if (ACPI_FAILURE (status)) { + return (status); + } + + /* Write with the new GPE bit enabled */ + + status = acpi_hw_low_level_write (8, (in_byte | gpe_event_info->bit_mask), + &gpe_event_info->register_info->enable_address); + + return (status); } + /****************************************************************************** * - * FUNCTION: Acpi_hw_enable_gpe_for_wakeup + * FUNCTION: acpi_hw_enable_gpe_for_wakeup * - * PARAMETERS: Gpe_number - The GPE + * PARAMETERS: gpe_number - The GPE * * RETURN: None * @@ -92,36 +106,33 @@ void acpi_hw_enable_gpe_for_wakeup ( - u32 gpe_number) + struct acpi_gpe_event_info *gpe_event_info) { - u32 register_index; - u32 bit_mask; + struct acpi_gpe_register_info *gpe_register_info; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); - /* - * Translate GPE number to index into global registers array. - */ - register_index = acpi_gbl_gpe_valid[gpe_number]; + /* Get the info block for the entire GPE register */ - /* - * Figure out the bit offset for this GPE within the target register. - */ - bit_mask = acpi_gbl_decode_to8bit [MOD_8 (gpe_number)]; + gpe_register_info = gpe_event_info->register_info; + if (!gpe_register_info) { + return; + } /* * Set the bit so we will not disable this when sleeping */ - acpi_gbl_gpe_registers[register_index].wake_enable |= bit_mask; + gpe_register_info->wake_enable |= gpe_event_info->bit_mask; } + /****************************************************************************** * - * FUNCTION: Acpi_hw_disable_gpe + * FUNCTION: acpi_hw_disable_gpe * - * PARAMETERS: Gpe_number - The GPE + * PARAMETERS: gpe_number - The GPE * * RETURN: None * @@ -129,45 +140,53 @@ * ******************************************************************************/ -void +acpi_status acpi_hw_disable_gpe ( - u32 gpe_number) + struct acpi_gpe_event_info *gpe_event_info) { - u32 in_byte; - u32 register_index; - u32 bit_mask; + u32 in_byte; + acpi_status status; + struct acpi_gpe_register_info *gpe_register_info; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); - /* - * Translate GPE number to index into global registers array. - */ - register_index = acpi_gbl_gpe_valid[gpe_number]; + /* Get the info block for the entire GPE register */ - /* - * Figure out the bit offset for this GPE within the target register. - */ - bit_mask = acpi_gbl_decode_to8bit [MOD_8 (gpe_number)]; + gpe_register_info = gpe_event_info->register_info; + if (!gpe_register_info) { + return (AE_BAD_PARAMETER); + } /* * Read the current value of the register, clear the appropriate bit, * and write out the new register value to disable the GPE. */ - in_byte = 0; - acpi_os_read_port (acpi_gbl_gpe_registers[register_index].enable_addr, &in_byte, 8); - acpi_os_write_port (acpi_gbl_gpe_registers[register_index].enable_addr, - (in_byte & ~bit_mask), 8); + status = acpi_hw_low_level_read (8, &in_byte, + &gpe_register_info->enable_address); + if (ACPI_FAILURE (status)) { + return (status); + } + + /* Write the byte with this GPE bit cleared */ + + status = acpi_hw_low_level_write (8, (in_byte & ~(gpe_event_info->bit_mask)), + &gpe_register_info->enable_address); + if (ACPI_FAILURE (status)) { + return (status); + } - acpi_hw_disable_gpe_for_wakeup(gpe_number); + acpi_hw_disable_gpe_for_wakeup (gpe_event_info); + return (AE_OK); } + /****************************************************************************** * - * FUNCTION: Acpi_hw_disable_gpe_for_wakeup + * FUNCTION: acpi_hw_disable_gpe_for_wakeup * - * PARAMETERS: Gpe_number - The GPE + * PARAMETERS: gpe_number - The GPE * * RETURN: None * @@ -178,36 +197,33 @@ void acpi_hw_disable_gpe_for_wakeup ( - u32 gpe_number) + struct acpi_gpe_event_info *gpe_event_info) { - u32 register_index; - u32 bit_mask; + struct acpi_gpe_register_info *gpe_register_info; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); - /* - * Translate GPE number to index into global registers array. - */ - register_index = acpi_gbl_gpe_valid[gpe_number]; + /* Get the info block for the entire GPE register */ - /* - * Figure out the bit offset for this GPE within the target register. - */ - bit_mask = acpi_gbl_decode_to8bit [MOD_8 (gpe_number)]; + gpe_register_info = gpe_event_info->register_info; + if (!gpe_register_info) { + return; + } /* * Clear the bit so we will disable this when sleeping */ - acpi_gbl_gpe_registers[register_index].wake_enable &= ~bit_mask; + gpe_register_info->wake_enable &= ~(gpe_event_info->bit_mask); } + /****************************************************************************** * - * FUNCTION: Acpi_hw_clear_gpe + * FUNCTION: acpi_hw_clear_gpe * - * PARAMETERS: Gpe_number - The GPE + * PARAMETERS: gpe_number - The GPE * * RETURN: None * @@ -215,40 +231,32 @@ * ******************************************************************************/ -void +acpi_status acpi_hw_clear_gpe ( - u32 gpe_number) + struct acpi_gpe_event_info *gpe_event_info) { - u32 register_index; - u32 bit_mask; + acpi_status status; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* - * Translate GPE number to index into global registers array. - */ - register_index = acpi_gbl_gpe_valid[gpe_number]; - - /* - * Figure out the bit offset for this GPE within the target register. - */ - bit_mask = acpi_gbl_decode_to8bit [MOD_8 (gpe_number)]; - - /* * Write a one to the appropriate bit in the status register to * clear this GPE. */ - acpi_os_write_port (acpi_gbl_gpe_registers[register_index].status_addr, bit_mask, 8); + status = acpi_hw_low_level_write (8, gpe_event_info->bit_mask, + &gpe_event_info->register_info->status_address); + + return (status); } /****************************************************************************** * - * FUNCTION: Acpi_hw_get_gpe_status + * FUNCTION: acpi_hw_get_gpe_status * - * PARAMETERS: Gpe_number - The GPE + * PARAMETERS: gpe_number - The GPE * * RETURN: None * @@ -256,127 +264,317 @@ * ******************************************************************************/ -void +acpi_status acpi_hw_get_gpe_status ( - u32 gpe_number, - acpi_event_status *event_status) + struct acpi_gpe_event_info *gpe_event_info, + acpi_event_status *event_status) { - u32 in_byte = 0; - u32 register_index = 0; - u32 bit_mask = 0; + u32 in_byte; + u8 bit_mask; + struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; + acpi_event_status local_event_status = 0; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); if (!event_status) { - return; + return (AE_BAD_PARAMETER); } - (*event_status) = 0; + /* Get the info block for the entire GPE register */ - /* - * Translate GPE number to index into global registers array. - */ - register_index = acpi_gbl_gpe_valid[gpe_number]; + gpe_register_info = gpe_event_info->register_info; - /* - * Figure out the bit offset for this GPE within the target register. - */ - bit_mask = acpi_gbl_decode_to8bit [MOD_8 (gpe_number)]; + /* Get the register bitmask for this GPE */ + + bit_mask = gpe_event_info->bit_mask; + + /* GPE Enabled? */ + + status = acpi_hw_low_level_read (8, &in_byte, &gpe_register_info->enable_address); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - /* - * Enabled?: - */ - in_byte = 0; - acpi_os_read_port (acpi_gbl_gpe_registers[register_index].enable_addr, &in_byte, 8); if (bit_mask & in_byte) { - (*event_status) |= ACPI_EVENT_FLAG_ENABLED; + local_event_status |= ACPI_EVENT_FLAG_ENABLED; } - /* - * Enabled for wake?: - */ - if (bit_mask & acpi_gbl_gpe_registers[register_index].wake_enable) { - (*event_status) |= ACPI_EVENT_FLAG_WAKE_ENABLED; + /* GPE Enabled for wake? */ + + if (bit_mask & gpe_register_info->wake_enable) { + local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED; + } + + /* GPE active (set)? */ + + status = acpi_hw_low_level_read (8, &in_byte, &gpe_register_info->status_address); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; } - /* - * Set? - */ - in_byte = 0; - acpi_os_read_port (acpi_gbl_gpe_registers[register_index].status_addr, &in_byte, 8); if (bit_mask & in_byte) { - (*event_status) |= ACPI_EVENT_FLAG_SET; + local_event_status |= ACPI_EVENT_FLAG_SET; } + + /* Set return value */ + + (*event_status) = local_event_status; + + +unlock_and_exit: + return (status); } + /****************************************************************************** * - * FUNCTION: Acpi_hw_disable_non_wakeup_gpes + * FUNCTION: acpi_hw_disable_gpe_block * - * PARAMETERS: None + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info + * gpe_block - Gpe Block info * - * RETURN: None + * RETURN: Status * - * DESCRIPTION: Disable all non-wakeup GPEs - * Call with interrupts disabled. The interrupt handler also - * modifies Acpi_gbl_Gpe_registers[i].Enable, so it should not be - * given the chance to run until after non-wake GPEs are - * re-enabled. + * DESCRIPTION: Disable all GPEs within a GPE block * ******************************************************************************/ -void -acpi_hw_disable_non_wakeup_gpes ( - void) +acpi_status +acpi_hw_disable_gpe_block ( + struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block) { - u32 i; + u32 i; + struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; - FUNCTION_ENTRY (); - for (i = 0; i < acpi_gbl_gpe_register_count; i++) { + /* Get the register info for the entire GPE block */ + + gpe_register_info = gpe_block->register_info; + + /* Examine each GPE Register within the block */ + + for (i = 0; i < gpe_block->register_count; i++) { + status = acpi_hw_low_level_write (8, 0x00, + &gpe_block->register_info[i].enable_address); + if (ACPI_FAILURE (status)) { + return (status); + } + } + + return (AE_OK); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_hw_clear_gpe_block + * + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info + * gpe_block - Gpe Block info + * + * RETURN: Status + * + * DESCRIPTION: Clear all GPEs within a GPE block + * + ******************************************************************************/ + +acpi_status +acpi_hw_clear_gpe_block ( + struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block) +{ + u32 i; + struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; + + + /* Get the register info for the entire GPE block */ + + gpe_register_info = gpe_block->register_info; + + /* Examine each GPE Register within the block */ + + for (i = 0; i < gpe_block->register_count; i++) { + status = acpi_hw_low_level_write (8, 0xFF, + &gpe_block->register_info[i].status_address); + if (ACPI_FAILURE (status)) { + return (status); + } + } + + return (AE_OK); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_hw_disable_non_wakeup_gpe_block + * + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info + * gpe_block - Gpe Block info + * + * RETURN: Status + * + * DESCRIPTION: Disable all GPEs except wakeup GPEs in a GPE block + * + ******************************************************************************/ + +static acpi_status +acpi_hw_disable_non_wakeup_gpe_block ( + struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block) +{ + u32 i; + struct acpi_gpe_register_info *gpe_register_info; + u32 in_value; + acpi_status status; + + + /* Get the register info for the entire GPE block */ + + gpe_register_info = gpe_block->register_info; + + /* Examine each GPE Register within the block */ + + for (i = 0; i < gpe_block->register_count; i++) { /* * Read the enabled status of all GPEs. We * will be using it to restore all the GPEs later. */ - acpi_os_read_port (acpi_gbl_gpe_registers[i].enable_addr, - &acpi_gbl_gpe_registers[i].enable, 8); + status = acpi_hw_low_level_read (8, &in_value, + &gpe_register_info->enable_address); + if (ACPI_FAILURE (status)) { + return (status); + } + + gpe_register_info->enable = (u8) in_value; /* - * Disable all GPEs but wakeup GPEs. + * Disable all GPEs except wakeup GPEs. */ - acpi_os_write_port(acpi_gbl_gpe_registers[i].enable_addr, - acpi_gbl_gpe_registers[i].wake_enable, 8); + status = acpi_hw_low_level_write (8, gpe_register_info->wake_enable, + &gpe_register_info->enable_address); + if (ACPI_FAILURE (status)) { + return (status); + } + + gpe_register_info++; } + + return (AE_OK); } + /****************************************************************************** * - * FUNCTION: Acpi_hw_enable_non_wakeup_gpes + * FUNCTION: acpi_hw_disable_non_wakeup_gpes * * PARAMETERS: None * * RETURN: None * - * DESCRIPTION: Enable all non-wakeup GPEs we previously enabled. + * DESCRIPTION: Disable all non-wakeup GPEs + * Called with interrupts disabled. The interrupt handler also + * modifies gpe_register_info->Enable, so it should not be + * given the chance to run until after non-wake GPEs are + * re-enabled. * ******************************************************************************/ -void -acpi_hw_enable_non_wakeup_gpes ( +acpi_status +acpi_hw_disable_non_wakeup_gpes ( void) { - u32 i; + acpi_status status; + + + ACPI_FUNCTION_ENTRY (); + + + status = acpi_ev_walk_gpe_list (acpi_hw_disable_non_wakeup_gpe_block); + + return (status); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_hw_enable_non_wakeup_gpe_block + * + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info + * gpe_block - Gpe Block info + * + * RETURN: Status + * + * DESCRIPTION: Enable a single GPE. + * + ******************************************************************************/ + +static acpi_status +acpi_hw_enable_non_wakeup_gpe_block ( + struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block) +{ + u32 i; + struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; + + + /* This callback processes one entire GPE block */ - FUNCTION_ENTRY (); + /* Get the register info for the entire GPE block */ - for (i = 0; i < acpi_gbl_gpe_register_count; i++) { + gpe_register_info = gpe_block->register_info; + + /* Examine each GPE register within the block */ + + for (i = 0; i < gpe_block->register_count; i++) { /* * We previously stored the enabled status of all GPEs. * Blast them back in. */ - acpi_os_write_port(acpi_gbl_gpe_registers[i].enable_addr, - acpi_gbl_gpe_registers[i].enable, 8); + status = acpi_hw_low_level_write (8, gpe_register_info->enable, + &gpe_register_info->enable_address); + if (ACPI_FAILURE (status)) { + return (status); + } + + gpe_register_info++; } + + + return (AE_OK); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_hw_enable_non_wakeup_gpes + * + * PARAMETERS: None + * + * RETURN: None + * + * DESCRIPTION: Enable all non-wakeup GPEs we previously enabled. + * + ******************************************************************************/ + +acpi_status +acpi_hw_enable_non_wakeup_gpes ( + void) +{ + acpi_status status; + + + ACPI_FUNCTION_ENTRY (); + + + status = acpi_ev_walk_gpe_list (acpi_hw_enable_non_wakeup_gpe_block); + + return (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/hardware/hwregs.c linux.21rc5-ac2/drivers/acpi/hardware/hwregs.c --- linux.21rc5/drivers/acpi/hardware/hwregs.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/hardware/hwregs.c 2003-04-28 17:58:27.000000000 +0100 @@ -3,69 +3,58 @@ * * Module Name: hwregs - Read/write access functions for the various ACPI * control and status registers. - * $Revision: 110 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "acnamesp.h" +#include +#include +#include #define _COMPONENT ACPI_HARDWARE - MODULE_NAME ("hwregs") + ACPI_MODULE_NAME ("hwregs") /******************************************************************************* * - * FUNCTION: Acpi_hw_get_bit_shift - * - * PARAMETERS: Mask - Input mask to determine bit shift from. - * Must have at least 1 bit set. - * - * RETURN: Bit location of the lsb of the mask - * - * DESCRIPTION: Returns the bit number for the low order bit that's set. - * - ******************************************************************************/ - -u32 -acpi_hw_get_bit_shift ( - u32 mask) -{ - u32 shift; - - - FUNCTION_TRACE ("Hw_get_bit_shift"); - - - for (shift = 0; ((mask >> shift) & 1) == 0; shift++) { ; } - - return_VALUE (shift); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_hw_clear_acpi_status + * FUNCTION: acpi_hw_clear_acpi_status * * PARAMETERS: none * @@ -75,506 +64,396 @@ * ******************************************************************************/ -void +acpi_status acpi_hw_clear_acpi_status (void) { - u16 gpe_length; - u16 index; + acpi_status status; - FUNCTION_TRACE ("Hw_clear_acpi_status"); + ACPI_FUNCTION_TRACE ("hw_clear_acpi_status"); ACPI_DEBUG_PRINT ((ACPI_DB_IO, "About to write %04X to %04X\n", - ALL_FIXED_STS_BITS, - (u16) ACPI_GET_ADDRESS (acpi_gbl_FADT->Xpm1a_evt_blk.address))); - + ACPI_BITMASK_ALL_FIXED_STATUS, + (u16) acpi_gbl_FADT->xpm1a_evt_blk.address)); - acpi_ut_acquire_mutex (ACPI_MTX_HARDWARE); - acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, PM1_STS, ALL_FIXED_STS_BITS); - - - if (ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xpm1b_evt_blk.address)) { - acpi_os_write_port ((ACPI_IO_ADDRESS) - ACPI_GET_ADDRESS (acpi_gbl_FADT->Xpm1b_evt_blk.address), - ALL_FIXED_STS_BITS, 16); + status = acpi_ut_acquire_mutex (ACPI_MTX_HARDWARE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - /* now clear the GPE Bits */ + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_STATUS, + ACPI_BITMASK_ALL_FIXED_STATUS); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - if (acpi_gbl_FADT->gpe0blk_len) { - gpe_length = (u16) DIV_2 (acpi_gbl_FADT->gpe0blk_len); + /* Clear the fixed events */ - for (index = 0; index < gpe_length; index++) { - acpi_os_write_port ((ACPI_IO_ADDRESS) ( - ACPI_GET_ADDRESS (acpi_gbl_FADT->Xgpe0blk.address) + index), - 0xFF, 8); + if (acpi_gbl_FADT->xpm1b_evt_blk.address) { + status = acpi_hw_low_level_write (16, ACPI_BITMASK_ALL_FIXED_STATUS, + &acpi_gbl_FADT->xpm1b_evt_blk); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; } } - if (acpi_gbl_FADT->gpe1_blk_len) { - gpe_length = (u16) DIV_2 (acpi_gbl_FADT->gpe1_blk_len); + /* Clear the GPE Bits in all GPE registers in all GPE blocks */ - for (index = 0; index < gpe_length; index++) { - acpi_os_write_port ((ACPI_IO_ADDRESS) ( - ACPI_GET_ADDRESS (acpi_gbl_FADT->Xgpe1_blk.address) + index), - 0xFF, 8); - } - } + status = acpi_ev_walk_gpe_list (acpi_hw_clear_gpe_block); - acpi_ut_release_mutex (ACPI_MTX_HARDWARE); - return_VOID; +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_HARDWARE); + return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_hw_obtain_sleep_type_register_data + * FUNCTION: acpi_get_sleep_type_data * - * PARAMETERS: Sleep_state - Numeric state requested - * *Slp_Typ_a - Pointer to byte to receive SLP_TYPa value - * *Slp_Typ_b - Pointer to byte to receive SLP_TYPb value + * PARAMETERS: sleep_state - Numeric sleep state + * *sleep_type_a - Where SLP_TYPa is returned + * *sleep_type_b - Where SLP_TYPb is returned * * RETURN: Status - ACPI status * - * DESCRIPTION: Acpi_hw_obtain_sleep_type_register_data() obtains the SLP_TYP and - * SLP_TYPb values for the sleep state requested. + * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep + * state. * ******************************************************************************/ acpi_status -acpi_hw_obtain_sleep_type_register_data ( - u8 sleep_state, - u8 *slp_typ_a, - u8 *slp_typ_b) +acpi_get_sleep_type_data ( + u8 sleep_state, + u8 *sleep_type_a, + u8 *sleep_type_b) { - acpi_status status = AE_OK; - acpi_operand_object *obj_desc; + acpi_status status = AE_OK; + union acpi_operand_object *obj_desc; - FUNCTION_TRACE ("Hw_obtain_sleep_type_register_data"); + ACPI_FUNCTION_TRACE ("acpi_get_sleep_type_data"); /* - * Validate parameters + * Validate parameters */ if ((sleep_state > ACPI_S_STATES_MAX) || - !slp_typ_a || !slp_typ_b) { + !sleep_type_a || !sleep_type_b) { return_ACPI_STATUS (AE_BAD_PARAMETER); } /* - * Acpi_evaluate the namespace object containing the values for this state + * Evaluate the namespace object containing the values for this state */ - status = acpi_ns_evaluate_by_name ((NATIVE_CHAR *) acpi_gbl_db_sleep_states[sleep_state], + status = acpi_ns_evaluate_by_name ((char *) acpi_gbl_db_sleep_states[sleep_state], NULL, &obj_desc); if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%s while evaluating sleep_state [%s]\n", + acpi_format_exception (status), acpi_gbl_db_sleep_states[sleep_state])); + return_ACPI_STATUS (status); } + /* Must have a return object */ + if (!obj_desc) { - REPORT_ERROR (("Missing Sleep State object\n")); - return_ACPI_STATUS (AE_NOT_EXIST); + ACPI_REPORT_ERROR (("Missing Sleep State object\n")); + status = AE_NOT_EXIST; } - /* - * We got something, now ensure it is correct. The object must - * be a package and must have at least 2 numeric values as the - * two elements - */ + /* It must be of type Package */ - /* Even though Acpi_evaluate_object resolves package references, - * Ns_evaluate dpesn't. So, we do it here. - */ - status = acpi_ut_resolve_package_references(obj_desc); + else if (ACPI_GET_OBJECT_TYPE (obj_desc) != ACPI_TYPE_PACKAGE) { + ACPI_REPORT_ERROR (("Sleep State object not a Package\n")); + status = AE_AML_OPERAND_TYPE; + } - if (obj_desc->package.count < 2) { - /* Must have at least two elements */ + /* The package must have at least two elements */ - REPORT_ERROR (("Sleep State package does not have at least two elements\n")); - status = AE_ERROR; + else if (obj_desc->package.count < 2) { + ACPI_REPORT_ERROR (("Sleep State package does not have at least two elements\n")); + status = AE_AML_NO_OPERAND; } - else if (((obj_desc->package.elements[0])->common.type != - ACPI_TYPE_INTEGER) || - ((obj_desc->package.elements[1])->common.type != - ACPI_TYPE_INTEGER)) { - /* Must have two */ + /* The first two elements must both be of type Integer */ - REPORT_ERROR (("Sleep State package elements are not both of type Number\n")); - status = AE_ERROR; + else if ((ACPI_GET_OBJECT_TYPE (obj_desc->package.elements[0]) != ACPI_TYPE_INTEGER) || + (ACPI_GET_OBJECT_TYPE (obj_desc->package.elements[1]) != ACPI_TYPE_INTEGER)) { + ACPI_REPORT_ERROR (("Sleep State package elements are not both Integers (%s, %s)\n", + acpi_ut_get_object_type_name (obj_desc->package.elements[0]), + acpi_ut_get_object_type_name (obj_desc->package.elements[1]))); + status = AE_AML_OPERAND_TYPE; } - else { /* - * Valid _Sx_ package size, type, and value + * Valid _Sx_ package size, type, and value */ - *slp_typ_a = (u8) (obj_desc->package.elements[0])->integer.value; - - *slp_typ_b = (u8) (obj_desc->package.elements[1])->integer.value; + *sleep_type_a = (u8) (obj_desc->package.elements[0])->integer.value; + *sleep_type_b = (u8) (obj_desc->package.elements[1])->integer.value; } - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Bad Sleep object %p type %X\n", - obj_desc, obj_desc->common.type)); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "While evaluating sleep_state [%s], bad Sleep object %p type %s\n", + acpi_gbl_db_sleep_states[sleep_state], obj_desc, acpi_ut_get_object_type_name (obj_desc))); } acpi_ut_remove_reference (obj_desc); - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_hw_register_bit_access + * FUNCTION: acpi_hw_get_register_bit_mask * - * PARAMETERS: Read_write - Either ACPI_READ or ACPI_WRITE. - * Use_lock - Lock the hardware - * Register_id - index of ACPI Register to access - * Value - (only used on write) value to write to the - * Register. Shifted all the way right. + * PARAMETERS: register_id - Index of ACPI Register to access * - * RETURN: Value written to or read from specified Register. This value - * is shifted all the way right. + * RETURN: The bit mask to be used when accessing the register * - * DESCRIPTION: Generic ACPI Register read/write function. + * DESCRIPTION: Map register_id into a register bit mask. * ******************************************************************************/ -u32 -acpi_hw_register_bit_access ( - NATIVE_UINT read_write, - u8 use_lock, - u32 register_id, - ...) /* Value (only used on write) */ +struct acpi_bit_register_info * +acpi_hw_get_bit_register_info ( + u32 register_id) { - u32 register_value = 0; - u32 mask = 0; - u32 value = 0; - va_list marker; - + ACPI_FUNCTION_NAME ("hw_get_bit_register_info"); - FUNCTION_TRACE ("Hw_register_bit_access"); - - if (read_write == ACPI_WRITE) { - va_start (marker, register_id); - value = va_arg (marker, u32); - va_end (marker); + if (register_id > ACPI_BITREG_MAX) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid bit_register ID: %X\n", register_id)); + return (NULL); } - if (ACPI_MTX_LOCK == use_lock) { - acpi_ut_acquire_mutex (ACPI_MTX_HARDWARE); - } + return (&acpi_gbl_bit_register_info[register_id]); +} - /* - * Decode the Register ID - * Register id = Register block id | bit id - * - * Check bit id to fine locate Register offset. - * Check Mask to determine Register offset, and then read-write. - */ - switch (REGISTER_BLOCK_ID (register_id)) { - case PM1_STS: - switch (register_id) { - case TMR_STS: - mask = TMR_STS_MASK; - break; +/******************************************************************************* + * + * FUNCTION: acpi_get_register + * + * PARAMETERS: register_id - Index of ACPI Register to access + * use_lock - Lock the hardware + * + * RETURN: Value is read from specified Register. Value returned is + * normalized to bit0 (is shifted all the way right) + * + * DESCRIPTION: ACPI bit_register read function. + * + ******************************************************************************/ - case BM_STS: - mask = BM_STS_MASK; - break; +acpi_status +acpi_get_register ( + u32 register_id, + u32 *return_value, + u32 flags) +{ + u32 register_value = 0; + struct acpi_bit_register_info *bit_reg_info; + acpi_status status; - case GBL_STS: - mask = GBL_STS_MASK; - break; - case PWRBTN_STS: - mask = PWRBTN_STS_MASK; - break; + ACPI_FUNCTION_TRACE ("acpi_get_register"); - case SLPBTN_STS: - mask = SLPBTN_STS_MASK; - break; - case RTC_STS: - mask = RTC_STS_MASK; - break; + /* Get the info structure corresponding to the requested ACPI Register */ - case WAK_STS: - mask = WAK_STS_MASK; - break; + bit_reg_info = acpi_hw_get_bit_register_info (register_id); + if (!bit_reg_info) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } - default: - mask = 0; - break; + if (flags & ACPI_MTX_LOCK) { + status = acpi_ut_acquire_mutex (ACPI_MTX_HARDWARE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } + } - register_value = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, PM1_STS); + status = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, + bit_reg_info->parent_register, ®ister_value); - if (read_write == ACPI_WRITE) { - /* - * Status Registers are different from the rest. Clear by - * writing 1, writing 0 has no effect. So, the only relevent - * information is the single bit we're interested in, all - * others should be written as 0 so they will be left - * unchanged - */ - value <<= acpi_hw_get_bit_shift (mask); - value &= mask; + if (flags & ACPI_MTX_LOCK) { + (void) acpi_ut_release_mutex (ACPI_MTX_HARDWARE); + } - if (value) { - acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, PM1_STS, - (u16) value); - register_value = 0; - } - } + if (ACPI_SUCCESS (status)) { + /* Normalize the value that was read */ - break; + register_value = ((register_value & bit_reg_info->access_bit_mask) + >> bit_reg_info->bit_position); + *return_value = register_value; - case PM1_EN: + ACPI_DEBUG_PRINT ((ACPI_DB_IO, "Read value %X\n", register_value)); + } - switch (register_id) { - case TMR_EN: - mask = TMR_EN_MASK; - break; + return_ACPI_STATUS (status); +} - case GBL_EN: - mask = GBL_EN_MASK; - break; - case PWRBTN_EN: - mask = PWRBTN_EN_MASK; - break; +/******************************************************************************* + * + * FUNCTION: acpi_set_register + * + * PARAMETERS: register_id - ID of ACPI bit_register to access + * Value - (only used on write) value to write to the + * Register, NOT pre-normalized to the bit pos. + * Flags - Lock the hardware or not + * + * RETURN: None + * + * DESCRIPTION: ACPI Bit Register write function. + * + ******************************************************************************/ - case SLPBTN_EN: - mask = SLPBTN_EN_MASK; - break; +acpi_status +acpi_set_register ( + u32 register_id, + u32 value, + u32 flags) +{ + u32 register_value = 0; + struct acpi_bit_register_info *bit_reg_info; + acpi_status status; - case RTC_EN: - mask = RTC_EN_MASK; - break; - default: - mask = 0; - break; - } + ACPI_FUNCTION_TRACE_U32 ("acpi_set_register", register_id); - register_value = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, PM1_EN); - if (read_write == ACPI_WRITE) { - register_value &= ~mask; - value <<= acpi_hw_get_bit_shift (mask); - value &= mask; - register_value |= value; + /* Get the info structure corresponding to the requested ACPI Register */ - acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, PM1_EN, (u16) register_value); + bit_reg_info = acpi_hw_get_bit_register_info (register_id); + if (!bit_reg_info) { + ACPI_REPORT_ERROR (("Bad ACPI HW register_id: %X\n", register_id)); + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + if (flags & ACPI_MTX_LOCK) { + status = acpi_ut_acquire_mutex (ACPI_MTX_HARDWARE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } + } - break; + /* Always do a register read first so we can insert the new bits */ + status = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, + bit_reg_info->parent_register, ®ister_value); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - case PM1_CONTROL: + /* + * Decode the Register ID + * Register id = Register block id | bit id + * + * Check bit id to fine locate Register offset. + * Check Mask to determine Register offset, and then read-write. + */ + switch (bit_reg_info->parent_register) { + case ACPI_REGISTER_PM1_STATUS: - switch (register_id) { - case SCI_EN: - mask = SCI_EN_MASK; - break; + /* + * Status Registers are different from the rest. Clear by + * writing 1, writing 0 has no effect. So, the only relevant + * information is the single bit we're interested in, all others should + * be written as 0 so they will be left unchanged + */ + value = ACPI_REGISTER_PREPARE_BITS (value, + bit_reg_info->bit_position, bit_reg_info->access_bit_mask); + if (value) { + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, + ACPI_REGISTER_PM1_STATUS, (u16) value); + register_value = 0; + } + break; - case BM_RLD: - mask = BM_RLD_MASK; - break; - case GBL_RLS: - mask = GBL_RLS_MASK; - break; + case ACPI_REGISTER_PM1_ENABLE: - case SLP_TYPE_A: - case SLP_TYPE_B: - mask = SLP_TYPE_X_MASK; - break; + ACPI_REGISTER_INSERT_VALUE (register_value, bit_reg_info->bit_position, + bit_reg_info->access_bit_mask, value); - case SLP_EN: - mask = SLP_EN_MASK; - break; + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, + ACPI_REGISTER_PM1_ENABLE, (u16) register_value); + break; - default: - mask = 0; - break; - } + case ACPI_REGISTER_PM1_CONTROL: /* * Read the PM1 Control register. * Note that at this level, the fact that there are actually TWO - * registers (A and B) and that B may not exist, are abstracted. + * registers (A and B - and that B may not exist) is abstracted. */ - register_value = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, PM1_CONTROL); - ACPI_DEBUG_PRINT ((ACPI_DB_IO, "PM1 control: Read %X\n", register_value)); - if (read_write == ACPI_WRITE) { - register_value &= ~mask; - value <<= acpi_hw_get_bit_shift (mask); - value &= mask; - register_value |= value; + ACPI_REGISTER_INSERT_VALUE (register_value, bit_reg_info->bit_position, + bit_reg_info->access_bit_mask, value); - /* - * SLP_TYPE_x Registers are written differently - * than any other control Registers with - * respect to A and B Registers. The value - * for A may be different than the value for B - * - * Therefore, pass the Register_id, not just generic PM1_CONTROL, - * because we need to do different things. Yuck. - */ - acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, register_id, - (u16) register_value); - } + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, register_id, + (u16) register_value); break; - case PM2_CONTROL: + case ACPI_REGISTER_PM2_CONTROL: - switch (register_id) { - case ARB_DIS: - mask = ARB_DIS_MASK; - break; - - default: - mask = 0; - break; + status = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, + ACPI_REGISTER_PM2_CONTROL, ®ister_value); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; } - register_value = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, PM2_CONTROL); - ACPI_DEBUG_PRINT ((ACPI_DB_IO, "PM2 control: Read %X from %8.8X%8.8X\n", - register_value, HIDWORD(acpi_gbl_FADT->Xpm2_cnt_blk.address), - LODWORD(acpi_gbl_FADT->Xpm2_cnt_blk.address))); - - if (read_write == ACPI_WRITE) { - register_value &= ~mask; - value <<= acpi_hw_get_bit_shift (mask); - value &= mask; - register_value |= value; - - ACPI_DEBUG_PRINT ((ACPI_DB_IO, "About to write %04X to %8.8X%8.8X\n", - register_value, - HIDWORD(acpi_gbl_FADT->Xpm2_cnt_blk.address), - LODWORD(acpi_gbl_FADT->Xpm2_cnt_blk.address))); - - acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, - PM2_CONTROL, (u8) (register_value)); - } - break; - - - case PM_TIMER: - - mask = TMR_VAL_MASK; - register_value = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, - PM_TIMER); - ACPI_DEBUG_PRINT ((ACPI_DB_IO, "PM_TIMER: Read %X from %8.8X%8.8X\n", register_value, - HIDWORD(acpi_gbl_FADT->Xpm_tmr_blk.address), - LODWORD(acpi_gbl_FADT->Xpm_tmr_blk.address))); - - break; - - - case GPE1_EN_BLOCK: - case GPE1_STS_BLOCK: - case GPE0_EN_BLOCK: - case GPE0_STS_BLOCK: - - /* Determine the bit to be accessed - * - * (u32) Register_id: - * 31 24 16 8 0 - * +--------+--------+--------+--------+ - * | gpe_block_id | gpe_bit_number | - * +--------+--------+--------+--------+ - * - * gpe_block_id is one of GPE[01]_EN_BLOCK and GPE[01]_STS_BLOCK - * gpe_bit_number is relative from the gpe_block (0x00~0xFF) - */ - mask = REGISTER_BIT_ID(register_id); /* gpe_bit_number */ - register_id = REGISTER_BLOCK_ID(register_id) | (mask >> 3); - mask = acpi_gbl_decode_to8bit [mask % 8]; - - /* - * The base address of the GPE 0 Register Block - * Plus 1/2 the length of the GPE 0 Register Block - * The enable Register is the Register following the Status Register - * and each Register is defined as 1/2 of the total Register Block - */ - - /* - * This sets the bit within Enable_bit that needs to be written to - * the Register indicated in Mask to a 1, all others are 0 - */ + ACPI_HIDWORD (acpi_gbl_FADT->xpm2_cnt_blk.address), + ACPI_LODWORD (acpi_gbl_FADT->xpm2_cnt_blk.address))); - /* Now get the current Enable Bits in the selected Reg */ + ACPI_REGISTER_INSERT_VALUE (register_value, bit_reg_info->bit_position, + bit_reg_info->access_bit_mask, value); - register_value = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, register_id); - ACPI_DEBUG_PRINT ((ACPI_DB_IO, "GPE Enable bits: Read %X from %X\n", - register_value, register_id)); - - if (read_write == ACPI_WRITE) { - register_value &= ~mask; - value <<= acpi_hw_get_bit_shift (mask); - value &= mask; - register_value |= value; + ACPI_DEBUG_PRINT ((ACPI_DB_IO, "About to write %4.4X to %8.8X%8.8X\n", + register_value, + ACPI_HIDWORD (acpi_gbl_FADT->xpm2_cnt_blk.address), + ACPI_LODWORD (acpi_gbl_FADT->xpm2_cnt_blk.address))); - /* - * This write will put the Action state into the General Purpose - * Enable Register indexed by the value in Mask - */ - ACPI_DEBUG_PRINT ((ACPI_DB_IO, "About to write %04X to %04X\n", - register_value, register_id)); - acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, register_id, - (u8) register_value); - register_value = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, - register_id); - } + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, + ACPI_REGISTER_PM2_CONTROL, (u8) (register_value)); break; - case SMI_CMD_BLOCK: - case PROCESSOR_BLOCK: - - /* Not used by any callers at this time - therefore, not implemented */ - default: - - mask = 0; break; } - if (ACPI_MTX_LOCK == use_lock) { - acpi_ut_release_mutex (ACPI_MTX_HARDWARE); + +unlock_and_exit: + + if (flags & ACPI_MTX_LOCK) { + (void) acpi_ut_release_mutex (ACPI_MTX_HARDWARE); } + /* Normalize the value that was read */ - register_value &= mask; - register_value >>= acpi_hw_get_bit_shift (mask); + ACPI_DEBUG_EXEC (register_value = ((register_value & bit_reg_info->access_bit_mask) >> bit_reg_info->bit_position)); - ACPI_DEBUG_PRINT ((ACPI_DB_IO, "Register I/O: returning %X\n", register_value)); - return_VALUE (register_value); + ACPI_DEBUG_PRINT ((ACPI_DB_IO, "ACPI Register Write actual %X\n", register_value)); + return_ACPI_STATUS (status); } /****************************************************************************** * - * FUNCTION: Acpi_hw_register_read + * FUNCTION: acpi_hw_register_read * - * PARAMETERS: Use_lock - Mutex hw access. - * Register_id - Register_iD + Offset. + * PARAMETERS: use_lock - Mutex hw access. + * register_id - register_iD + Offset. * * RETURN: Value read or written. * @@ -583,112 +462,109 @@ * ******************************************************************************/ -u32 +acpi_status acpi_hw_register_read ( - u8 use_lock, - u32 register_id) + u8 use_lock, + u32 register_id, + u32 *return_value) { - u32 value = 0; - u32 bank_offset; + u32 value1 = 0; + u32 value2 = 0; + acpi_status status; - FUNCTION_TRACE ("Hw_register_read"); + ACPI_FUNCTION_TRACE ("hw_register_read"); if (ACPI_MTX_LOCK == use_lock) { - acpi_ut_acquire_mutex (ACPI_MTX_HARDWARE); + status = acpi_ut_acquire_mutex (ACPI_MTX_HARDWARE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } } + switch (register_id) { + case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ - switch (REGISTER_BLOCK_ID(register_id)) { - case PM1_STS: /* 16-bit access */ - - value = acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1a_evt_blk, 0); - value |= acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1b_evt_blk, 0); - break; - + status = acpi_hw_low_level_read (16, &value1, &acpi_gbl_FADT->xpm1a_evt_blk); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - case PM1_EN: /* 16-bit access*/ + /* PM1B is optional */ - bank_offset = DIV_2 (acpi_gbl_FADT->pm1_evt_len); - value = acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1a_evt_blk, bank_offset); - value |= acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1b_evt_blk, bank_offset); + status = acpi_hw_low_level_read (16, &value2, &acpi_gbl_FADT->xpm1b_evt_blk); + value1 |= value2; break; - case PM1_CONTROL: /* 16-bit access */ - - value = acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1a_cnt_blk, 0); - value |= acpi_hw_low_level_read (16, &acpi_gbl_FADT->Xpm1b_cnt_blk, 0); - break; + case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ + status = acpi_hw_low_level_read (16, &value1, &acpi_gbl_xpm1a_enable); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - case PM2_CONTROL: /* 8-bit access */ + /* PM1B is optional */ - value = acpi_hw_low_level_read (8, &acpi_gbl_FADT->Xpm2_cnt_blk, 0); + status = acpi_hw_low_level_read (16, &value2, &acpi_gbl_xpm1b_enable); + value1 |= value2; break; - case PM_TIMER: /* 32-bit access */ - - value = acpi_hw_low_level_read (32, &acpi_gbl_FADT->Xpm_tmr_blk, 0); - break; - + case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ - /* - * For the GPE? Blocks, the lower word of Register_id contains the - * byte offset for which to read, as each part of each block may be - * several bytes long. - */ - case GPE0_STS_BLOCK: /* 8-bit access */ + status = acpi_hw_low_level_read (16, &value1, &acpi_gbl_FADT->xpm1a_cnt_blk); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - bank_offset = REGISTER_BIT_ID(register_id); - value = acpi_hw_low_level_read (8, &acpi_gbl_FADT->Xgpe0blk, bank_offset); + status = acpi_hw_low_level_read (16, &value2, &acpi_gbl_FADT->xpm1b_cnt_blk); + value1 |= value2; break; - case GPE0_EN_BLOCK: /* 8-bit access */ - - bank_offset = DIV_2 (acpi_gbl_FADT->gpe0blk_len) + REGISTER_BIT_ID(register_id); - value = acpi_hw_low_level_read (8, &acpi_gbl_FADT->Xgpe0blk, bank_offset); - break; - case GPE1_STS_BLOCK: /* 8-bit access */ + case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ - bank_offset = REGISTER_BIT_ID(register_id); - value = acpi_hw_low_level_read (8, &acpi_gbl_FADT->Xgpe1_blk, bank_offset); + status = acpi_hw_low_level_read (8, &value1, &acpi_gbl_FADT->xpm2_cnt_blk); break; - case GPE1_EN_BLOCK: /* 8-bit access */ - bank_offset = DIV_2 (acpi_gbl_FADT->gpe1_blk_len) + REGISTER_BIT_ID(register_id); - value = acpi_hw_low_level_read (8, &acpi_gbl_FADT->Xgpe1_blk, bank_offset); + case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ + + status = acpi_hw_low_level_read (32, &value1, &acpi_gbl_FADT->xpm_tmr_blk); break; - case SMI_CMD_BLOCK: /* 8bit */ + case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ - acpi_os_read_port (acpi_gbl_FADT->smi_cmd, &value, 8); + status = acpi_os_read_port (acpi_gbl_FADT->smi_cmd, &value1, 8); break; default: - /* Value will be returned as 0 */ + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown Register ID: %X\n", register_id)); + status = AE_BAD_PARAMETER; break; } - +unlock_and_exit: if (ACPI_MTX_LOCK == use_lock) { - acpi_ut_release_mutex (ACPI_MTX_HARDWARE); + (void) acpi_ut_release_mutex (ACPI_MTX_HARDWARE); + } + + if (ACPI_SUCCESS (status)) { + *return_value = value1; } - return_VALUE (value); + return_ACPI_STATUS (status); } /****************************************************************************** * - * FUNCTION: Acpi_hw_register_write + * FUNCTION: acpi_hw_register_write * - * PARAMETERS: Use_lock - Mutex hw access. - * Register_id - Register_iD + Offset. + * PARAMETERS: use_lock - Mutex hw access. + * register_id - register_iD + Offset. * * RETURN: Value read or written. * @@ -697,160 +573,147 @@ * ******************************************************************************/ -void +acpi_status acpi_hw_register_write ( - u8 use_lock, - u32 register_id, - u32 value) + u8 use_lock, + u32 register_id, + u32 value) { - u32 bank_offset; + acpi_status status; - FUNCTION_TRACE ("Hw_register_write"); + ACPI_FUNCTION_TRACE ("hw_register_write"); if (ACPI_MTX_LOCK == use_lock) { - acpi_ut_acquire_mutex (ACPI_MTX_HARDWARE); + status = acpi_ut_acquire_mutex (ACPI_MTX_HARDWARE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } } + switch (register_id) { + case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ - switch (REGISTER_BLOCK_ID (register_id)) { - case PM1_STS: /* 16-bit access */ - - acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1a_evt_blk, 0); - acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1b_evt_blk, 0); - break; - - - case PM1_EN: /* 16-bit access*/ - - bank_offset = DIV_2 (acpi_gbl_FADT->pm1_evt_len); - acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1a_evt_blk, bank_offset); - acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1b_evt_blk, bank_offset); - break; - + status = acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->xpm1a_evt_blk); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - case PM1_CONTROL: /* 16-bit access */ + /* PM1B is optional */ - acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1a_cnt_blk, 0); - acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1b_cnt_blk, 0); + status = acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->xpm1b_evt_blk); break; - case PM1A_CONTROL: /* 16-bit access */ - - acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1a_cnt_blk, 0); - break; + case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access*/ + status = acpi_hw_low_level_write (16, value, &acpi_gbl_xpm1a_enable); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - case PM1B_CONTROL: /* 16-bit access */ + /* PM1B is optional */ - acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->Xpm1b_cnt_blk, 0); + status = acpi_hw_low_level_write (16, value, &acpi_gbl_xpm1b_enable); break; - case PM2_CONTROL: /* 8-bit access */ - - acpi_hw_low_level_write (8, value, &acpi_gbl_FADT->Xpm2_cnt_blk, 0); - break; - + case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ - case PM_TIMER: /* 32-bit access */ + status = acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->xpm1a_cnt_blk); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } - acpi_hw_low_level_write (32, value, &acpi_gbl_FADT->Xpm_tmr_blk, 0); + status = acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->xpm1b_cnt_blk); break; - case GPE0_STS_BLOCK: /* 8-bit access */ + case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */ - bank_offset = REGISTER_BIT_ID(register_id); - acpi_hw_low_level_write (8, value, &acpi_gbl_FADT->Xgpe0blk, bank_offset); + status = acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->xpm1a_cnt_blk); break; - case GPE0_EN_BLOCK: /* 8-bit access */ + case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */ - bank_offset = DIV_2 (acpi_gbl_FADT->gpe0blk_len) + REGISTER_BIT_ID(register_id); - acpi_hw_low_level_write (8, value, &acpi_gbl_FADT->Xgpe0blk, bank_offset); + status = acpi_hw_low_level_write (16, value, &acpi_gbl_FADT->xpm1b_cnt_blk); break; - case GPE1_STS_BLOCK: /* 8-bit access */ + case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ - bank_offset = REGISTER_BIT_ID(register_id); - acpi_hw_low_level_write (8, value, &acpi_gbl_FADT->Xgpe1_blk, bank_offset); + status = acpi_hw_low_level_write (8, value, &acpi_gbl_FADT->xpm2_cnt_blk); break; - case GPE1_EN_BLOCK: /* 8-bit access */ + case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ - bank_offset = DIV_2 (acpi_gbl_FADT->gpe1_blk_len) + REGISTER_BIT_ID(register_id); - acpi_hw_low_level_write (8, value, &acpi_gbl_FADT->Xgpe1_blk, bank_offset); + status = acpi_hw_low_level_write (32, value, &acpi_gbl_FADT->xpm_tmr_blk); break; - case SMI_CMD_BLOCK: /* 8bit */ + case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ - /* For 2.0, SMI_CMD is always in IO space */ - /* TBD: what about 1.0? 0.71? */ + /* SMI_CMD is currently always in IO space */ - acpi_os_write_port (acpi_gbl_FADT->smi_cmd, value, 8); + status = acpi_os_write_port (acpi_gbl_FADT->smi_cmd, value, 8); break; default: - value = 0; + status = AE_BAD_PARAMETER; break; } - +unlock_and_exit: if (ACPI_MTX_LOCK == use_lock) { - acpi_ut_release_mutex (ACPI_MTX_HARDWARE); + (void) acpi_ut_release_mutex (ACPI_MTX_HARDWARE); } - return_VOID; + return_ACPI_STATUS (status); } /****************************************************************************** * - * FUNCTION: Acpi_hw_low_level_read + * FUNCTION: acpi_hw_low_level_read * - * PARAMETERS: Register - GAS register structure - * Offset - Offset from the base address in the GAS - * Width - 8, 16, or 32 + * PARAMETERS: Width - 8, 16, or 32 + * Value - Where the value is returned + * Register - GAS register structure * - * RETURN: Value read + * RETURN: Status * * DESCRIPTION: Read from either memory, IO, or PCI config space. * ******************************************************************************/ -u32 +acpi_status acpi_hw_low_level_read ( - u32 width, - acpi_generic_address *reg, - u32 offset) + u32 width, + u32 *value, + struct acpi_generic_address *reg) { - u32 value = 0; - ACPI_PHYSICAL_ADDRESS mem_address; - ACPI_IO_ADDRESS io_address; - acpi_pci_id pci_id; - u16 pci_register; + struct acpi_pci_id pci_id; + u16 pci_register; + acpi_status status; - FUNCTION_ENTRY (); + ACPI_FUNCTION_NAME ("hw_low_level_read"); /* * Must have a valid pointer to a GAS structure, and - * a non-zero address within + * a non-zero address within. However, don't return an error + * because the PM1A/B code must not fail if B isn't present. */ if ((!reg) || - (!ACPI_VALID_ADDRESS (reg->address))) { - return 0; + (!reg->address)) { + return (AE_OK); } - + *value = 0; /* * Three address spaces supported: @@ -859,17 +722,16 @@ switch (reg->address_space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: - mem_address = (ACPI_PHYSICAL_ADDRESS) (ACPI_GET_ADDRESS (reg->address) + offset); - - acpi_os_read_memory (mem_address, &value, width); + status = acpi_os_read_memory ( + (acpi_physical_address) reg->address, + value, width); break; case ACPI_ADR_SPACE_SYSTEM_IO: - io_address = (ACPI_IO_ADDRESS) (ACPI_GET_ADDRESS (reg->address) + offset); - - acpi_os_read_port (io_address, &value, width); + status = acpi_os_read_port ((acpi_io_address) reg->address, + value, width); break; @@ -877,60 +739,63 @@ pci_id.segment = 0; pci_id.bus = 0; - pci_id.device = ACPI_PCI_DEVICE (ACPI_GET_ADDRESS (reg->address)); - pci_id.function = ACPI_PCI_FUNCTION (ACPI_GET_ADDRESS (reg->address)); - pci_register = (u16) (ACPI_PCI_REGISTER (ACPI_GET_ADDRESS (reg->address)) + offset); + pci_id.device = ACPI_PCI_DEVICE (reg->address); + pci_id.function = ACPI_PCI_FUNCTION (reg->address); + pci_register = (u16) ACPI_PCI_REGISTER (reg->address); + + status = acpi_os_read_pci_configuration (&pci_id, pci_register, + value, width); + break; + - acpi_os_read_pci_configuration (&pci_id, pci_register, &value, width); + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Unsupported address space: %X\n", reg->address_space_id)); + status = AE_BAD_PARAMETER; break; } - return value; + return (status); } /****************************************************************************** * - * FUNCTION: Acpi_hw_low_level_write + * FUNCTION: acpi_hw_low_level_write * * PARAMETERS: Width - 8, 16, or 32 * Value - To be written * Register - GAS register structure - * Offset - Offset from the base address in the GAS * + * RETURN: Status * - * RETURN: Value read - * - * DESCRIPTION: Read from either memory, IO, or PCI config space. + * DESCRIPTION: Write to either memory, IO, or PCI config space. * ******************************************************************************/ -void +acpi_status acpi_hw_low_level_write ( - u32 width, - u32 value, - acpi_generic_address *reg, - u32 offset) + u32 width, + u32 value, + struct acpi_generic_address *reg) { - ACPI_PHYSICAL_ADDRESS mem_address; - ACPI_IO_ADDRESS io_address; - acpi_pci_id pci_id; - u16 pci_register; + struct acpi_pci_id pci_id; + u16 pci_register; + acpi_status status; - FUNCTION_ENTRY (); + ACPI_FUNCTION_NAME ("hw_low_level_write"); /* * Must have a valid pointer to a GAS structure, and - * a non-zero address within + * a non-zero address within. However, don't return an error + * because the PM1A/B code must not fail if B isn't present. */ if ((!reg) || - (!ACPI_VALID_ADDRESS (reg->address))) { - return; + (!reg->address)) { + return (AE_OK); } - - /* * Three address spaces supported: * Memory, Io, or PCI config. @@ -938,17 +803,16 @@ switch (reg->address_space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: - mem_address = (ACPI_PHYSICAL_ADDRESS) (ACPI_GET_ADDRESS (reg->address) + offset); - - acpi_os_write_memory (mem_address, value, width); + status = acpi_os_write_memory ( + (acpi_physical_address) reg->address, + value, width); break; case ACPI_ADR_SPACE_SYSTEM_IO: - io_address = (ACPI_IO_ADDRESS) (ACPI_GET_ADDRESS (reg->address) + offset); - - acpi_os_write_port (io_address, value, width); + status = acpi_os_write_port ((acpi_io_address) reg->address, + value, width); break; @@ -956,11 +820,21 @@ pci_id.segment = 0; pci_id.bus = 0; - pci_id.device = ACPI_PCI_DEVICE (ACPI_GET_ADDRESS (reg->address)); - pci_id.function = ACPI_PCI_FUNCTION (ACPI_GET_ADDRESS (reg->address)); - pci_register = (u16) (ACPI_PCI_REGISTER (ACPI_GET_ADDRESS (reg->address)) + offset); + pci_id.device = ACPI_PCI_DEVICE (reg->address); + pci_id.function = ACPI_PCI_FUNCTION (reg->address); + pci_register = (u16) ACPI_PCI_REGISTER (reg->address); + + status = acpi_os_write_pci_configuration (&pci_id, pci_register, + (acpi_integer) value, width); + break; - acpi_os_write_pci_configuration (&pci_id, pci_register, value, width); + + default: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Unsupported address space: %X\n", reg->address_space_id)); + status = AE_BAD_PARAMETER; break; } + + return (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/hardware/hwsleep.c linux.21rc5-ac2/drivers/acpi/hardware/hwsleep.c --- linux.21rc5/drivers/acpi/hardware/hwsleep.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/hardware/hwsleep.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,70 +2,82 @@ /****************************************************************************** * * Name: hwsleep.c - ACPI Hardware Sleep/Wake Interface - * $Revision: 22 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acnamesp.h" -#include "achware.h" +#include #define _COMPONENT ACPI_HARDWARE - MODULE_NAME ("hwsleep") + ACPI_MODULE_NAME ("hwsleep") /****************************************************************************** * - * FUNCTION: Acpi_set_firmware_waking_vector + * FUNCTION: acpi_set_firmware_waking_vector * - * PARAMETERS: Physical_address - Physical address of ACPI real mode + * PARAMETERS: physical_address - Physical address of ACPI real mode * entry point. * - * RETURN: AE_OK or AE_ERROR + * RETURN: Status * - * DESCRIPTION: Access function for d_firmware_waking_vector field in FACS + * DESCRIPTION: access function for d_firmware_waking_vector field in FACS * ******************************************************************************/ acpi_status acpi_set_firmware_waking_vector ( - ACPI_PHYSICAL_ADDRESS physical_address) + acpi_physical_address physical_address) { - FUNCTION_TRACE ("Acpi_set_firmware_waking_vector"); - - - /* Make sure that we have an FACS */ + ACPI_FUNCTION_TRACE ("acpi_set_firmware_waking_vector"); - if (!acpi_gbl_FACS) { - return_ACPI_STATUS (AE_NO_ACPI_TABLES); - } /* Set the vector */ - if (acpi_gbl_FACS->vector_width == 32) { - * (u32 *) acpi_gbl_FACS->firmware_waking_vector = (u32) physical_address; + if (acpi_gbl_common_fACS.vector_width == 32) { + *(ACPI_CAST_PTR (u32, acpi_gbl_common_fACS.firmware_waking_vector)) + = (u32) physical_address; } else { - *acpi_gbl_FACS->firmware_waking_vector = physical_address; + *acpi_gbl_common_fACS.firmware_waking_vector + = physical_address; } return_ACPI_STATUS (AE_OK); @@ -74,167 +86,300 @@ /****************************************************************************** * - * FUNCTION: Acpi_get_firmware_waking_vector + * FUNCTION: acpi_get_firmware_waking_vector * - * PARAMETERS: *Physical_address - Output buffer where contents of - * the Firmware_waking_vector field of + * PARAMETERS: *physical_address - Output buffer where contents of + * the firmware_waking_vector field of * the FACS will be stored. * * RETURN: Status * - * DESCRIPTION: Access function for d_firmware_waking_vector field in FACS + * DESCRIPTION: Access function for firmware_waking_vector field in FACS * ******************************************************************************/ acpi_status acpi_get_firmware_waking_vector ( - ACPI_PHYSICAL_ADDRESS *physical_address) + acpi_physical_address *physical_address) { - FUNCTION_TRACE ("Acpi_get_firmware_waking_vector"); + ACPI_FUNCTION_TRACE ("acpi_get_firmware_waking_vector"); if (!physical_address) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* Make sure that we have an FACS */ - - if (!acpi_gbl_FACS) { - return_ACPI_STATUS (AE_NO_ACPI_TABLES); - } - /* Get the vector */ - if (acpi_gbl_FACS->vector_width == 32) { - *physical_address = * (u32 *) acpi_gbl_FACS->firmware_waking_vector; + if (acpi_gbl_common_fACS.vector_width == 32) { + *physical_address = (acpi_physical_address) + *(ACPI_CAST_PTR (u32, acpi_gbl_common_fACS.firmware_waking_vector)); } else { - *physical_address = *acpi_gbl_FACS->firmware_waking_vector; + *physical_address = + *acpi_gbl_common_fACS.firmware_waking_vector; } return_ACPI_STATUS (AE_OK); } + /****************************************************************************** * - * FUNCTION: Acpi_enter_sleep_state + * FUNCTION: acpi_enter_sleep_state_prep * - * PARAMETERS: Sleep_state - Which sleep state to enter + * PARAMETERS: sleep_state - Which sleep state to enter * * RETURN: Status * - * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231) + * DESCRIPTION: Prepare to enter a system sleep state (see ACPI 2.0 spec p 231) + * This function must execute with interrupts enabled. + * We break sleeping into 2 stages so that OSPM can handle + * various OS-specific tasks between the two steps. * ******************************************************************************/ acpi_status -acpi_enter_sleep_state ( - u8 sleep_state) +acpi_enter_sleep_state_prep ( + u8 sleep_state) { - acpi_status status; - acpi_object_list arg_list; - acpi_object arg; - u8 type_a; - u8 type_b; - u16 PM1Acontrol; - u16 PM1Bcontrol; + acpi_status status; + struct acpi_object_list arg_list; + union acpi_object arg; - FUNCTION_TRACE ("Acpi_enter_sleep_state"); + ACPI_FUNCTION_TRACE ("acpi_enter_sleep_state_prep"); /* * _PSW methods could be run here to enable wake-on keyboard, LAN, etc. */ - status = acpi_hw_obtain_sleep_type_register_data (sleep_state, &type_a, &type_b); - if (!ACPI_SUCCESS (status)) { - return status; + status = acpi_get_sleep_type_data (sleep_state, + &acpi_gbl_sleep_type_a, &acpi_gbl_sleep_type_b); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - /* run the _PTS and _GTS methods */ + /* Setup parameter object */ - MEMSET(&arg_list, 0, sizeof(arg_list)); arg_list.count = 1; arg_list.pointer = &arg; - MEMSET(&arg, 0, sizeof(arg)); arg.type = ACPI_TYPE_INTEGER; arg.integer.value = sleep_state; - acpi_evaluate_object (NULL, "\\_PTS", &arg_list, NULL); - acpi_evaluate_object (NULL, "\\_GTS", &arg_list, NULL); + /* Run the _PTS and _GTS methods */ - /* clear wake status */ + status = acpi_evaluate_object (NULL, "\\_PTS", &arg_list, NULL); + if (ACPI_FAILURE (status) && status != AE_NOT_FOUND) { + return_ACPI_STATUS (status); + } - acpi_hw_register_bit_access (ACPI_WRITE, ACPI_MTX_LOCK, WAK_STS, 1); + status = acpi_evaluate_object (NULL, "\\_GTS", &arg_list, NULL); + if (ACPI_FAILURE (status) && status != AE_NOT_FOUND) { + return_ACPI_STATUS (status); + } - disable (); + return_ACPI_STATUS (AE_OK); +} - acpi_hw_disable_non_wakeup_gpes(); - PM1Acontrol = (u16) acpi_hw_register_read (ACPI_MTX_LOCK, PM1_CONTROL); +/****************************************************************************** + * + * FUNCTION: acpi_enter_sleep_state + * + * PARAMETERS: sleep_state - Which sleep state to enter + * + * RETURN: Status + * + * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231) + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED + * + ******************************************************************************/ - ACPI_DEBUG_PRINT ((ACPI_DB_OK, "Entering S%d\n", sleep_state)); +acpi_status +acpi_enter_sleep_state ( + u8 sleep_state) +{ + u32 PM1Acontrol; + u32 PM1Bcontrol; + struct acpi_bit_register_info *sleep_type_reg_info; + struct acpi_bit_register_info *sleep_enable_reg_info; + u32 in_value; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_enter_sleep_state"); + + + if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) || + (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) { + ACPI_REPORT_ERROR (("Sleep values out of range: A=%X B=%X\n", + acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b)); + return_ACPI_STATUS (AE_AML_OPERAND_VALUE); + } + + + sleep_type_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_TYPE_A); + sleep_enable_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_ENABLE); + + /* Clear wake status */ + + status = acpi_set_register (ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + status = acpi_hw_clear_acpi_status(); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Disable BM arbitration */ + + status = acpi_set_register (ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + status = acpi_hw_disable_non_wakeup_gpes(); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Get current value of PM1A control */ + + status = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "Entering sleep state [S%d]\n", sleep_state)); - /* mask off SLP_EN and SLP_TYP fields */ + /* Clear SLP_EN and SLP_TYP fields */ - PM1Acontrol &= ~(SLP_TYPE_X_MASK | SLP_EN_MASK); + PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask | sleep_enable_reg_info->access_bit_mask); PM1Bcontrol = PM1Acontrol; - /* mask in SLP_TYP */ + /* Insert SLP_TYP bits */ - PM1Acontrol |= (type_a << acpi_hw_get_bit_shift (SLP_TYPE_X_MASK)); - PM1Bcontrol |= (type_b << acpi_hw_get_bit_shift (SLP_TYPE_X_MASK)); + PM1Acontrol |= (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position); + PM1Bcontrol |= (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position); - /* write #1: fill in SLP_TYP data */ + /* Write #1: fill in SLP_TYP data */ - acpi_hw_register_write (ACPI_MTX_LOCK, PM1A_CONTROL, PM1Acontrol); - acpi_hw_register_write (ACPI_MTX_LOCK, PM1B_CONTROL, PM1Bcontrol); + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1B_CONTROL, PM1Bcontrol); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - /* mask in SLP_EN */ + /* Insert SLP_ENABLE bit */ - PM1Acontrol |= (1 << acpi_hw_get_bit_shift (SLP_EN_MASK)); - PM1Bcontrol |= (1 << acpi_hw_get_bit_shift (SLP_EN_MASK)); + PM1Acontrol |= sleep_enable_reg_info->access_bit_mask; + PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask; - /* flush caches */ + /* Write #2: SLP_TYP + SLP_EN */ - wbinvd(); + ACPI_FLUSH_CPU_CACHE (); - /* write #2: SLP_TYP + SLP_EN */ + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - acpi_hw_register_write (ACPI_MTX_LOCK, PM1A_CONTROL, PM1Acontrol); - acpi_hw_register_write (ACPI_MTX_LOCK, PM1B_CONTROL, PM1Bcontrol); + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1B_CONTROL, PM1Bcontrol); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* * Wait a second, then try again. This is to get S4/5 to work on all machines. */ if (sleep_state > ACPI_STATE_S3) { - acpi_os_stall(1000000); - - acpi_hw_register_write (ACPI_MTX_LOCK, PM1_CONTROL, - (1 << acpi_hw_get_bit_shift (SLP_EN_MASK))); + /* + * We wait so long to allow chipsets that poll this reg very slowly to + * still read the right value. Ideally, this entire block would go + * away entirely. + */ + acpi_os_stall (10000000); + + status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_CONTROL, + sleep_enable_reg_info->access_bit_mask); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } } - /* wait until we enter sleep state */ + /* Wait until we enter sleep state */ do { - acpi_os_stall(10000); - } - while (!acpi_hw_register_bit_access (ACPI_READ, ACPI_MTX_LOCK, WAK_STS)); + status = acpi_get_register (ACPI_BITREG_WAKE_STATUS, &in_value, ACPI_MTX_DO_NOT_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - acpi_hw_enable_non_wakeup_gpes(); + /* Spin until we wake */ - enable (); + } while (!in_value); return_ACPI_STATUS (AE_OK); } + /****************************************************************************** * - * FUNCTION: Acpi_leave_sleep_state + * FUNCTION: acpi_enter_sleep_state_s4bios * - * PARAMETERS: Sleep_state - Which sleep state we just exited + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Perform a S4 bios request. + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED + * + ******************************************************************************/ + +acpi_status +acpi_enter_sleep_state_s4bios ( + void) +{ + u32 in_value; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_enter_sleep_state_s4bios"); + + acpi_set_register (ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK); + acpi_hw_clear_acpi_status(); + + acpi_hw_disable_non_wakeup_gpes(); + + ACPI_FLUSH_CPU_CACHE(); + + status = acpi_os_write_port (acpi_gbl_FADT->smi_cmd, (u32) acpi_gbl_FADT->S4bios_req, 8); + + do { + acpi_os_stall(1000); + status = acpi_get_register (ACPI_BITREG_WAKE_STATUS, &in_value, ACPI_MTX_DO_NOT_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } while (!in_value); + + return_ACPI_STATUS (AE_OK); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_leave_sleep_state + * + * PARAMETERS: sleep_state - Which sleep state we just exited * * RETURN: Status * @@ -244,29 +389,49 @@ acpi_status acpi_leave_sleep_state ( - u8 sleep_state) + u8 sleep_state) { - acpi_object_list arg_list; - acpi_object arg; + struct acpi_object_list arg_list; + union acpi_object arg; + acpi_status status; - FUNCTION_TRACE ("Acpi_leave_sleep_state"); + ACPI_FUNCTION_TRACE ("acpi_leave_sleep_state"); - MEMSET (&arg_list, 0, sizeof(arg_list)); + /* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */ + + acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID; + + /* Setup parameter object */ + arg_list.count = 1; arg_list.pointer = &arg; - MEMSET (&arg, 0, sizeof(arg)); arg.type = ACPI_TYPE_INTEGER; arg.integer.value = sleep_state; - acpi_evaluate_object (NULL, "\\_BFS", &arg_list, NULL); - acpi_evaluate_object (NULL, "\\_WAK", &arg_list, NULL); + /* Ignore any errors from these methods */ + + status = acpi_evaluate_object (NULL, "\\_BFS", &arg_list, NULL); + if (ACPI_FAILURE (status) && status != AE_NOT_FOUND) { + ACPI_REPORT_ERROR (("Method _BFS failed, %s\n", acpi_format_exception (status))); + } + + status = acpi_evaluate_object (NULL, "\\_WAK", &arg_list, NULL); + if (ACPI_FAILURE (status) && status != AE_NOT_FOUND) { + ACPI_REPORT_ERROR (("Method _WAK failed, %s\n", acpi_format_exception (status))); + } /* _WAK returns stuff - do we want to look at it? */ - acpi_hw_enable_non_wakeup_gpes(); + status = acpi_hw_enable_non_wakeup_gpes(); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - return_ACPI_STATUS (AE_OK); + /* Disable BM arbitration */ + status = acpi_set_register (ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_LOCK); + + return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/hardware/hwtimer.c linux.21rc5-ac2/drivers/acpi/hardware/hwtimer.c --- linux.21rc5/drivers/acpi/hardware/hwtimer.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/hardware/hwtimer.c 2003-04-28 17:58:27.000000000 +0100 @@ -2,38 +2,55 @@ /****************************************************************************** * * Name: hwtimer.c - ACPI Power Management Timer Interface - * $Revision: 14 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" +#include #define _COMPONENT ACPI_HARDWARE - MODULE_NAME ("hwtimer") + ACPI_MODULE_NAME ("hwtimer") /****************************************************************************** * - * FUNCTION: Acpi_get_timer_resolution + * FUNCTION: acpi_get_timer_resolution * * PARAMETERS: none * @@ -45,9 +62,9 @@ acpi_status acpi_get_timer_resolution ( - u32 *resolution) + u32 *resolution) { - FUNCTION_TRACE ("Acpi_get_timer_resolution"); + ACPI_FUNCTION_TRACE ("acpi_get_timer_resolution"); if (!resolution) { @@ -57,7 +74,6 @@ if (0 == acpi_gbl_FADT->tmr_val_ext) { *resolution = 24; } - else { *resolution = 32; } @@ -68,7 +84,7 @@ /****************************************************************************** * - * FUNCTION: Acpi_get_timer + * FUNCTION: acpi_get_timer * * PARAMETERS: none * @@ -80,31 +96,33 @@ acpi_status acpi_get_timer ( - u32 *ticks) + u32 *ticks) { - FUNCTION_TRACE ("Acpi_get_timer"); + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_get_timer"); if (!ticks) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - acpi_os_read_port ((ACPI_IO_ADDRESS) - ACPI_GET_ADDRESS (acpi_gbl_FADT->Xpm_tmr_blk.address), ticks, 32); + status = acpi_hw_low_level_read (32, ticks, &acpi_gbl_FADT->xpm_tmr_blk); - return_ACPI_STATUS (AE_OK); + return_ACPI_STATUS (status); } /****************************************************************************** * - * FUNCTION: Acpi_get_timer_duration + * FUNCTION: acpi_get_timer_duration * - * PARAMETERS: Start_ticks - * End_ticks - * Time_elapsed + * PARAMETERS: start_ticks + * end_ticks + * time_elapsed * - * RETURN: Time_elapsed + * RETURN: time_elapsed * * DESCRIPTION: Computes the time elapsed (in microseconds) between two * PM Timer time stamps, taking into account the possibility of @@ -115,27 +133,29 @@ * transitions (unlike many CPU timestamp counters) -- making it * a versatile and accurate timer. * - * Note that this function accomodates only a single timer + * Note that this function accommodates only a single timer * rollover. Thus for 24-bit timers, this function should only * be used for calculating durations less than ~4.6 seconds - * (~20 hours for 32-bit timers). + * (~20 minutes for 32-bit timers) -- calculations below + * + * 2**24 Ticks / 3,600,000 Ticks/Sec = 4.66 sec + * 2**32 Ticks / 3,600,000 Ticks/Sec = 1193 sec or 19.88 minutes * ******************************************************************************/ acpi_status acpi_get_timer_duration ( - u32 start_ticks, - u32 end_ticks, - u32 *time_elapsed) + u32 start_ticks, + u32 end_ticks, + u32 *time_elapsed) { - u32 delta_ticks = 0; - u32 seconds = 0; - u32 milliseconds = 0; - u32 microseconds = 0; - u32 remainder = 0; + u32 delta_ticks = 0; + union uint64_overlay normalized_ticks; + acpi_status status; + acpi_integer out_quotient; - FUNCTION_TRACE ("Acpi_get_timer_duration"); + ACPI_FUNCTION_TRACE ("acpi_get_timer_duration"); if (!time_elapsed) { @@ -150,21 +170,18 @@ if (start_ticks < end_ticks) { delta_ticks = end_ticks - start_ticks; } - else if (start_ticks > end_ticks) { - /* 24-bit Timer */ - if (0 == acpi_gbl_FADT->tmr_val_ext) { + /* 24-bit Timer */ + delta_ticks = (((0x00FFFFFF - start_ticks) + end_ticks) & 0x00FFFFFF); } - - /* 32-bit Timer */ - else { + /* 32-bit Timer */ + delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks; } } - else { *time_elapsed = 0; return_ACPI_STATUS (AE_OK); @@ -173,49 +190,18 @@ /* * Compute Duration: * ----------------- - * Since certain compilers (gcc/Linux, argh!) don't support 64-bit - * divides in kernel-space we have to do some trickery to preserve - * accuracy while using 32-bit math. - * - * TBD: Change to use 64-bit math when supported. * - * The process is as follows: - * 1. Compute the number of seconds by dividing Delta Ticks by - * the timer frequency. - * 2. Compute the number of milliseconds in the remainder from step #1 - * by multiplying by 1000 and then dividing by the timer frequency. - * 3. Compute the number of microseconds in the remainder from step #2 - * by multiplying by 1000 and then dividing by the timer frequency. - * 4. Add the results from steps 1, 2, and 3 to get the total duration. + * Requires a 64-bit divide: * - * Example: The time elapsed for Delta_ticks = 0xFFFFFFFF should be - * 1199864031 microseconds. This is computed as follows: - * Step #1: Seconds = 1199; Remainder = 3092840 - * Step #2: Milliseconds = 864; Remainder = 113120 - * Step #3: Microseconds = 31; Remainder = + * time_elapsed = (delta_ticks * 1000000) / PM_TIMER_FREQUENCY; */ + normalized_ticks.full = ((u64) delta_ticks) * 1000000; - /* Step #1 */ - - seconds = delta_ticks / PM_TIMER_FREQUENCY; - remainder = delta_ticks % PM_TIMER_FREQUENCY; - - /* Step #2 */ - - milliseconds = (remainder * 1000) / PM_TIMER_FREQUENCY; - remainder = (remainder * 1000) % PM_TIMER_FREQUENCY; + status = acpi_ut_short_divide (&normalized_ticks.full, PM_TIMER_FREQUENCY, + &out_quotient, NULL); - /* Step #3 */ - - microseconds = (remainder * 1000) / PM_TIMER_FREQUENCY; - - /* Step #4 */ - - *time_elapsed = seconds * 1000000; - *time_elapsed += milliseconds * 1000; - *time_elapsed += microseconds; - - return_ACPI_STATUS (AE_OK); + *time_elapsed = (u32) out_quotient; + return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/hardware/Makefile linux.21rc5-ac2/drivers/acpi/hardware/Makefile --- linux.21rc5/drivers/acpi/hardware/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/hardware/Makefile 2003-04-28 17:58:27.000000000 +0100 @@ -1,11 +1,10 @@ # # Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory # O_TARGET := $(notdir $(CURDIR)).o -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) +obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c)) EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acconfig.h linux.21rc5-ac2/drivers/acpi/include/acconfig.h --- linux.21rc5/drivers/acpi/include/acconfig.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acconfig.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,162 +0,0 @@ -/****************************************************************************** - * - * Name: acconfig.h - Global configuration constants - * $Revision: 74 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ACCONFIG_H -#define _ACCONFIG_H - - -/****************************************************************************** - * - * Compile-time options - * - *****************************************************************************/ - -/* - * ACPI_DEBUG - This switch enables all the debug facilities of the ACPI - * subsystem. This includes the DEBUG_PRINT output statements - * When disabled, all DEBUG_PRINT statements are compiled out. - * - * ACPI_APPLICATION - Use this switch if the subsystem is going to be run - * at the application level. - * - */ - - -/****************************************************************************** - * - * Subsystem Constants - * - *****************************************************************************/ - - -/* Version string */ - -#define ACPI_CA_VERSION 0x20011018 - -/* Version of ACPI supported */ - -#define ACPI_CA_SUPPORT_LEVEL 2 - - -/* Maximum objects in the various object caches */ - -#define MAX_STATE_CACHE_DEPTH 64 /* State objects for stacks */ -#define MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */ -#define MAX_EXTPARSE_CACHE_DEPTH 64 /* Parse tree objects */ -#define MAX_OBJECT_CACHE_DEPTH 64 /* Interpreter operand objects */ -#define MAX_WALK_CACHE_DEPTH 4 /* Objects for parse tree walks (method execution) */ - - -/* String size constants */ - -#define MAX_STRING_LENGTH 512 -#define PATHNAME_MAX 256 /* A full namespace pathname */ - - -/* Maximum count for a semaphore object */ - -#define MAX_SEMAPHORE_COUNT 256 - - -/* Max reference count (for debug only) */ - -#define MAX_REFERENCE_COUNT 0x400 - - -/* Size of cached memory mapping for system memory operation region */ - -#define SYSMEM_REGION_WINDOW_SIZE 4096 - - -/* - * Debugger threading model - * Use single threaded if the entire subsystem is contained in an application - * Use multiple threaded when the subsystem is running in the kernel. - * - * By default the model is single threaded if ACPI_APPLICATION is set, - * multi-threaded if ACPI_APPLICATION is not set. - */ - -#define DEBUGGER_SINGLE_THREADED 0 -#define DEBUGGER_MULTI_THREADED 1 - -#ifdef ACPI_APPLICATION -#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED - -#else -#define DEBUGGER_THREADING DEBUGGER_MULTI_THREADED -#endif - - -/****************************************************************************** - * - * ACPI Specification constants (Do not change unless the specification changes) - * - *****************************************************************************/ - -/* - * Method info (in WALK_STATE), containing local variables and argumetns - */ - -#define MTH_NUM_LOCALS 8 -#define MTH_MAX_LOCAL 7 - -#define MTH_NUM_ARGS 7 -#define MTH_MAX_ARG 6 - -/* Maximum length of resulting string when converting from a buffer */ - -#define ACPI_MAX_STRING_CONVERSION 200 - -/* - * Operand Stack (in WALK_STATE), Must be large enough to contain MTH_MAX_ARG - */ - -#define OBJ_NUM_OPERANDS 8 -#define OBJ_MAX_OPERAND 7 - -/* Names within the namespace are 4 bytes long */ - -#define ACPI_NAME_SIZE 4 -#define PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 s8 for separator */ -#define PATH_SEPARATOR '.' - - -/* Constants used in searching for the RSDP in low memory */ - -#define LO_RSDP_WINDOW_BASE 0 /* Physical Address */ -#define HI_RSDP_WINDOW_BASE 0xE0000 /* Physical Address */ -#define LO_RSDP_WINDOW_SIZE 0x400 -#define HI_RSDP_WINDOW_SIZE 0x20000 -#define RSDP_SCAN_STEP 16 - -/* Maximum Space_ids for Operation Regions */ - -#define ACPI_MAX_ADDRESS_SPACE 255 -#define ACPI_NUM_ADDRESS_SPACES 256 - - -#endif /* _ACCONFIG_H */ - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acdebug.h linux.21rc5-ac2/drivers/acpi/include/acdebug.h --- linux.21rc5/drivers/acpi/include/acdebug.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acdebug.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,410 +0,0 @@ -/****************************************************************************** - * - * Name: acdebug.h - ACPI/AML debugger - * $Revision: 50 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACDEBUG_H__ -#define __ACDEBUG_H__ - - -#define DB_MAX_ARGS 8 /* Must be max method args + 1 */ - -#define DB_COMMAND_PROMPT '-' -#define DB_EXECUTE_PROMPT '%' - - -extern int optind; -extern NATIVE_CHAR *optarg; -extern u8 *aml_start; -extern u32 aml_length; - -extern u8 acpi_gbl_db_opt_tables; -extern u8 acpi_gbl_db_opt_disasm; -extern u8 acpi_gbl_db_opt_stats; -extern u8 acpi_gbl_db_opt_parse_jit; -extern u8 acpi_gbl_db_opt_verbose; -extern u8 acpi_gbl_db_opt_ini_methods; - - -extern NATIVE_CHAR *acpi_gbl_db_args[DB_MAX_ARGS]; -extern NATIVE_CHAR acpi_gbl_db_line_buf[80]; -extern NATIVE_CHAR acpi_gbl_db_scope_buf[40]; -extern NATIVE_CHAR acpi_gbl_db_debug_filename[40]; -extern u8 acpi_gbl_db_output_to_file; -extern NATIVE_CHAR *acpi_gbl_db_buffer; -extern NATIVE_CHAR *acpi_gbl_db_filename; -extern NATIVE_CHAR *acpi_gbl_db_disasm_indent; -extern u8 acpi_gbl_db_output_flags; -extern u32 acpi_gbl_db_debug_level; -extern u32 acpi_gbl_db_console_debug_level; -extern acpi_table_header *acpi_gbl_db_table_ptr; - -/* - * Statistic globals - */ -extern u16 acpi_gbl_obj_type_count[INTERNAL_TYPE_NODE_MAX+1]; -extern u16 acpi_gbl_node_type_count[INTERNAL_TYPE_NODE_MAX+1]; -extern u16 acpi_gbl_obj_type_count_misc; -extern u16 acpi_gbl_node_type_count_misc; -extern u32 acpi_gbl_num_nodes; -extern u32 acpi_gbl_num_objects; - - -extern u32 acpi_gbl_size_of_parse_tree; -extern u32 acpi_gbl_size_of_method_trees; -extern u32 acpi_gbl_size_of_node_entries; -extern u32 acpi_gbl_size_of_acpi_objects; - - -#define ACPI_DEBUG_BUFFER_SIZE 4196 - -#define DB_REDIRECTABLE_OUTPUT 0x01 -#define DB_CONSOLE_OUTPUT 0x02 -#define DB_DUPLICATE_OUTPUT 0x03 - - -typedef struct command_info -{ - NATIVE_CHAR *name; /* Command Name */ - u8 min_args; /* Minimum arguments required */ - -} COMMAND_INFO; - - -typedef struct argument_info -{ - NATIVE_CHAR *name; /* Argument Name */ - -} ARGUMENT_INFO; - - -#define PARAM_LIST(pl) pl - -#define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose) - -#define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\ - acpi_os_printf PARAM_LIST(fp);} - -#define EX_NO_SINGLE_STEP 1 -#define EX_SINGLE_STEP 2 - - -/* Prototypes */ - - -/* - * dbapi - external debugger interfaces - */ - -int -acpi_db_initialize ( - void); - -void -acpi_db_terminate ( - void); - -acpi_status -acpi_db_single_step ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - u32 op_type); - - -/* - * dbcmds - debug commands and output routines - */ - - -void -acpi_db_display_table_info ( - NATIVE_CHAR *table_arg); - -void -acpi_db_unload_acpi_table ( - NATIVE_CHAR *table_arg, - NATIVE_CHAR *instance_arg); - -void -acpi_db_set_method_breakpoint ( - NATIVE_CHAR *location, - acpi_walk_state *walk_state, - acpi_parse_object *op); - -void -acpi_db_set_method_call_breakpoint ( - acpi_parse_object *op); - -void -acpi_db_disassemble_aml ( - NATIVE_CHAR *statements, - acpi_parse_object *op); - -void -acpi_db_dump_namespace ( - NATIVE_CHAR *start_arg, - NATIVE_CHAR *depth_arg); - -void -acpi_db_dump_namespace_by_owner ( - NATIVE_CHAR *owner_arg, - NATIVE_CHAR *depth_arg); - -void -acpi_db_send_notify ( - NATIVE_CHAR *name, - u32 value); - -void -acpi_db_set_method_data ( - NATIVE_CHAR *type_arg, - NATIVE_CHAR *index_arg, - NATIVE_CHAR *value_arg); - -acpi_status -acpi_db_display_objects ( - NATIVE_CHAR *obj_type_arg, - NATIVE_CHAR *display_count_arg); - -acpi_status -acpi_db_find_name_in_namespace ( - NATIVE_CHAR *name_arg); - -void -acpi_db_set_scope ( - NATIVE_CHAR *name); - -void -acpi_db_find_references ( - NATIVE_CHAR *object_arg); - -void -acpi_db_display_locks (void); - - -void -acpi_db_display_resources ( - NATIVE_CHAR *object_arg); - - -/* - * dbdisasm - AML disassembler - */ - -void -acpi_db_display_op ( - acpi_walk_state *walk_state, - acpi_parse_object *origin, - u32 num_opcodes); - -void -acpi_db_display_namestring ( - NATIVE_CHAR *name); - -void -acpi_db_display_path ( - acpi_parse_object *op); - -void -acpi_db_display_opcode ( - acpi_walk_state *walk_state, - acpi_parse_object *op); - -void -acpi_db_decode_internal_object ( - acpi_operand_object *obj_desc); - - -/* - * dbdisply - debug display commands - */ - - -void -acpi_db_display_method_info ( - acpi_parse_object *op); - -void -acpi_db_decode_and_display_object ( - NATIVE_CHAR *target, - NATIVE_CHAR *output_type); - -void -acpi_db_display_result_object ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state); - -acpi_status -acpi_db_display_all_methods ( - NATIVE_CHAR *display_count_arg); - -void -acpi_db_display_internal_object ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state); - -void -acpi_db_display_arguments ( - void); - -void -acpi_db_display_locals ( - void); - -void -acpi_db_display_results ( - void); - -void -acpi_db_display_calling_tree ( - void); - -void -acpi_db_display_argument_object ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state); - - -/* - * dbexec - debugger control method execution - */ - -void -acpi_db_execute ( - NATIVE_CHAR *name, - NATIVE_CHAR **args, - u32 flags); - -void -acpi_db_create_execution_threads ( - NATIVE_CHAR *num_threads_arg, - NATIVE_CHAR *num_loops_arg, - NATIVE_CHAR *method_name_arg); - - -/* - * dbfileio - Debugger file I/O commands - */ - -acpi_object_type8 -acpi_db_match_argument ( - NATIVE_CHAR *user_argument, - ARGUMENT_INFO *arguments); - - -void -acpi_db_close_debug_file ( - void); - -void -acpi_db_open_debug_file ( - NATIVE_CHAR *name); - -acpi_status -acpi_db_load_acpi_table ( - NATIVE_CHAR *filename); - - -/* - * dbhistry - debugger HISTORY command - */ - -void -acpi_db_add_to_history ( - NATIVE_CHAR *command_line); - -void -acpi_db_display_history (void); - -NATIVE_CHAR * -acpi_db_get_from_history ( - NATIVE_CHAR *command_num_arg); - - -/* - * dbinput - user front-end to the AML debugger - */ - -acpi_status -acpi_db_command_dispatch ( - NATIVE_CHAR *input_buffer, - acpi_walk_state *walk_state, - acpi_parse_object *op); - -void -acpi_db_execute_thread ( - void *context); - -acpi_status -acpi_db_user_commands ( - NATIVE_CHAR prompt, - acpi_parse_object *op); - - -/* - * dbstats - Generation and display of ACPI table statistics - */ - -void -acpi_db_generate_statistics ( - acpi_parse_object *root, - u8 is_method); - - -acpi_status -acpi_db_display_statistics ( - NATIVE_CHAR *type_arg); - - -/* - * dbutils - AML debugger utilities - */ - -void -acpi_db_set_output_destination ( - u32 where); - -void -acpi_db_dump_buffer ( - u32 address); - -void -acpi_db_dump_object ( - acpi_object *obj_desc, - u32 level); - -void -acpi_db_prep_namestring ( - NATIVE_CHAR *name); - - -acpi_status -acpi_db_second_pass_parse ( - acpi_parse_object *root); - -acpi_namespace_node * -acpi_db_local_ns_lookup ( - NATIVE_CHAR *name); - - -#endif /* __ACDEBUG_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acdispat.h linux.21rc5-ac2/drivers/acpi/include/acdispat.h --- linux.21rc5/drivers/acpi/include/acdispat.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acdispat.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,453 +0,0 @@ -/****************************************************************************** - * - * Name: acdispat.h - dispatcher (parser to interpreter interface) - * $Revision: 45 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#ifndef _ACDISPAT_H_ -#define _ACDISPAT_H_ - - -#define NAMEOF_LOCAL_NTE "__L0" -#define NAMEOF_ARG_NTE "__A0" - - -/* Common interfaces */ - -acpi_status -acpi_ds_obj_stack_push ( - void *object, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_obj_stack_pop ( - u32 pop_count, - acpi_walk_state *walk_state); - -void * -acpi_ds_obj_stack_get_value ( - u32 index, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_obj_stack_pop_object ( - acpi_operand_object **object, - acpi_walk_state *walk_state); - - -/* dsopcode - support for late evaluation */ - -acpi_status -acpi_ds_get_buffer_field_arguments ( - acpi_operand_object *obj_desc); - -acpi_status -acpi_ds_get_region_arguments ( - acpi_operand_object *rgn_desc); - - -/* dsctrl - Parser/Interpreter interface, control stack routines */ - - -acpi_status -acpi_ds_exec_begin_control_op ( - acpi_walk_state *walk_state, - acpi_parse_object *op); - -acpi_status -acpi_ds_exec_end_control_op ( - acpi_walk_state *walk_state, - acpi_parse_object *op); - - -/* dsexec - Parser/Interpreter interface, method execution callbacks */ - - -acpi_status -acpi_ds_get_predicate_value ( - acpi_walk_state *walk_state, - u32 has_result_obj); - -acpi_status -acpi_ds_exec_begin_op ( - acpi_walk_state *walk_state, - acpi_parse_object **out_op); - -acpi_status -acpi_ds_exec_end_op ( - acpi_walk_state *state); - - -/* dsfield - Parser/Interpreter interface for AML fields */ - -acpi_status -acpi_ds_create_field ( - acpi_parse_object *op, - acpi_namespace_node *region_node, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_create_bank_field ( - acpi_parse_object *op, - acpi_namespace_node *region_node, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_create_index_field ( - acpi_parse_object *op, - acpi_namespace_node *region_node, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_create_buffer_field ( - acpi_parse_object *op, - acpi_walk_state *walk_state); - - -/* dsload - Parser/Interpreter interface, namespace load callbacks */ - -acpi_status -acpi_ds_load1_begin_op ( - acpi_walk_state *walk_state, - acpi_parse_object **out_op); - -acpi_status -acpi_ds_load1_end_op ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_load2_begin_op ( - acpi_walk_state *walk_state, - acpi_parse_object **out_op); - -acpi_status -acpi_ds_load2_end_op ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_init_callbacks ( - acpi_walk_state *walk_state, - u32 pass_number); - - -/* dsmthdat - method data (locals/args) */ - - -acpi_status -acpi_ds_store_object_to_local ( - u16 opcode, - u32 index, - acpi_operand_object *src_desc, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_method_data_get_entry ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state, - acpi_operand_object ***node); - -acpi_status -acpi_ds_method_data_delete_all ( - acpi_walk_state *walk_state); - -u8 -acpi_ds_is_method_value ( - acpi_operand_object *obj_desc); - -acpi_object_type8 -acpi_ds_method_data_get_type ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_method_data_get_value ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state, - acpi_operand_object **dest_desc); - -acpi_status -acpi_ds_method_data_delete_value ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_method_data_init_args ( - acpi_operand_object **params, - u32 max_param_count, - acpi_walk_state *walk_state); - -acpi_namespace_node * -acpi_ds_method_data_get_node ( - u16 opcode, - u32 index, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_method_data_init ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_method_data_set_entry ( - u16 opcode, - u32 index, - acpi_operand_object *object, - acpi_walk_state *walk_state); - - -/* dsmethod - Parser/Interpreter interface - control method parsing */ - -acpi_status -acpi_ds_parse_method ( - acpi_handle obj_handle); - -acpi_status -acpi_ds_call_control_method ( - acpi_walk_list *walk_list, - acpi_walk_state *walk_state, - acpi_parse_object *op); - -acpi_status -acpi_ds_restart_control_method ( - acpi_walk_state *walk_state, - acpi_operand_object *return_desc); - -acpi_status -acpi_ds_terminate_control_method ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_begin_method_execution ( - acpi_namespace_node *method_node, - acpi_operand_object *obj_desc, - acpi_namespace_node *calling_method_node); - - -/* dsobj - Parser/Interpreter interface - object initialization and conversion */ - -acpi_status -acpi_ds_init_one_object ( - acpi_handle obj_handle, - u32 level, - void *context, - void **return_value); - -acpi_status -acpi_ds_initialize_objects ( - acpi_table_desc *table_desc, - acpi_namespace_node *start_node); - -acpi_status -acpi_ds_build_internal_package_obj ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_operand_object **obj_desc); - -acpi_status -acpi_ds_build_internal_object ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_operand_object **obj_desc_ptr); - -acpi_status -acpi_ds_init_object_from_op ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - u16 opcode, - acpi_operand_object **obj_desc); - -acpi_status -acpi_ds_create_node ( - acpi_walk_state *walk_state, - acpi_namespace_node *node, - acpi_parse_object *op); - - -/* dsregn - Parser/Interpreter interface - Op Region parsing */ - -acpi_status -acpi_ds_eval_buffer_field_operands ( - acpi_walk_state *walk_state, - acpi_parse_object *op); - -acpi_status -acpi_ds_eval_region_operands ( - acpi_walk_state *walk_state, - acpi_parse_object *op); - -acpi_status -acpi_ds_initialize_region ( - acpi_handle obj_handle); - - -/* dsutils - Parser/Interpreter interface utility routines */ - -u8 -acpi_ds_is_result_used ( - acpi_parse_object *op, - acpi_walk_state *walk_state); - -void -acpi_ds_delete_result_if_not_used ( - acpi_parse_object *op, - acpi_operand_object *result_obj, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_create_operand ( - acpi_walk_state *walk_state, - acpi_parse_object *arg, - u32 args_remaining); - -acpi_status -acpi_ds_create_operands ( - acpi_walk_state *walk_state, - acpi_parse_object *first_arg); - -acpi_status -acpi_ds_resolve_operands ( - acpi_walk_state *walk_state); - -acpi_object_type8 -acpi_ds_map_opcode_to_data_type ( - u16 opcode, - u32 *out_flags); - -acpi_object_type8 -acpi_ds_map_named_opcode_to_data_type ( - u16 opcode); - - -/* - * dswscope - Scope Stack manipulation - */ - -acpi_status -acpi_ds_scope_stack_push ( - acpi_namespace_node *node, - acpi_object_type8 type, - acpi_walk_state *walk_state); - - -acpi_status -acpi_ds_scope_stack_pop ( - acpi_walk_state *walk_state); - -void -acpi_ds_scope_stack_clear ( - acpi_walk_state *walk_state); - - -/* dswstate - parser WALK_STATE management routines */ - -acpi_walk_state * -acpi_ds_create_walk_state ( - acpi_owner_id owner_id, - acpi_parse_object *origin, - acpi_operand_object *mth_desc, - acpi_walk_list *walk_list); - -acpi_status -acpi_ds_init_aml_walk ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_namespace_node *method_node, - u8 *aml_start, - u32 aml_length, - acpi_operand_object **params, - acpi_operand_object **return_obj_desc, - u32 pass_number); - -acpi_status -acpi_ds_obj_stack_delete_all ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_obj_stack_pop_and_delete ( - u32 pop_count, - acpi_walk_state *walk_state); - -void -acpi_ds_delete_walk_state ( - acpi_walk_state *walk_state); - -acpi_walk_state * -acpi_ds_pop_walk_state ( - acpi_walk_list *walk_list); - -void -acpi_ds_push_walk_state ( - acpi_walk_state *walk_state, - acpi_walk_list *walk_list); - -acpi_status -acpi_ds_result_stack_pop ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_result_stack_push ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_result_stack_clear ( - acpi_walk_state *walk_state); - -acpi_walk_state * -acpi_ds_get_current_walk_state ( - acpi_walk_list *walk_list); - -void -acpi_ds_delete_walk_state_cache ( - void); - -acpi_status -acpi_ds_result_insert ( - void *object, - u32 index, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_result_remove ( - acpi_operand_object **object, - u32 index, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_result_pop ( - acpi_operand_object **object, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_result_push ( - acpi_operand_object *object, - acpi_walk_state *walk_state); - -acpi_status -acpi_ds_result_pop_from_bottom ( - acpi_operand_object **object, - acpi_walk_state *walk_state); - -#endif /* _ACDISPAT_H_ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acevents.h linux.21rc5-ac2/drivers/acpi/include/acevents.h --- linux.21rc5/drivers/acpi/include/acevents.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acevents.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,217 +0,0 @@ -/****************************************************************************** - * - * Name: acevents.h - Event subcomponent prototypes and defines - * $Revision: 66 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACEVENTS_H__ -#define __ACEVENTS_H__ - - -acpi_status -acpi_ev_initialize ( - void); - - -/* - * Acpi_evfixed - Fixed event handling - */ - -acpi_status -acpi_ev_fixed_event_initialize ( - void); - -u32 -acpi_ev_fixed_event_detect ( - void); - -u32 -acpi_ev_fixed_event_dispatch ( - u32 acpi_event); - - -/* - * Acpi_evglock - Global Lock support - */ - -acpi_status -acpi_ev_acquire_global_lock( - void); - -void -acpi_ev_release_global_lock( - void); - -acpi_status -acpi_ev_init_global_lock_handler ( - void); - - -/* - * Acpi_evgpe - GPE handling and dispatch - */ - -acpi_status -acpi_ev_gpe_initialize ( - void); - -acpi_status -acpi_ev_init_gpe_control_methods ( - void); - -u32 -acpi_ev_gpe_dispatch ( - u32 gpe_number); - -u32 -acpi_ev_gpe_detect ( - void); - - -/* - * Acpi_evnotify - Device Notify handling and dispatch - */ - -acpi_status -acpi_ev_queue_notify_request ( - acpi_namespace_node *node, - u32 notify_value); - -void -acpi_ev_notify_dispatch ( - void *context); - -/* - * Acpi_evregion - Address Space handling - */ - -acpi_status -acpi_ev_install_default_address_space_handlers ( - void); - -acpi_status -acpi_ev_address_space_dispatch ( - acpi_operand_object *region_obj, - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value); - - -acpi_status -acpi_ev_addr_handler_helper ( - acpi_handle obj_handle, - u32 level, - void *context, - void **return_value); - -void -acpi_ev_disassociate_region_from_handler( - acpi_operand_object *region_obj, - u8 acpi_ns_is_locked); - - -acpi_status -acpi_ev_associate_region_and_handler ( - acpi_operand_object *handler_obj, - acpi_operand_object *region_obj, - u8 acpi_ns_is_locked); - - -/* - * Acpi_evregini - Region initialization and setup - */ - -acpi_status -acpi_ev_system_memory_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context); - -acpi_status -acpi_ev_io_space_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context); - -acpi_status -acpi_ev_pci_config_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context); - -acpi_status -acpi_ev_cmos_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context); - -acpi_status -acpi_ev_pci_bar_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context); - -acpi_status -acpi_ev_default_region_setup ( - acpi_handle handle, - u32 function, - void *handler_context, - void **region_context); - -acpi_status -acpi_ev_initialize_region ( - acpi_operand_object *region_obj, - u8 acpi_ns_locked); - - -/* - * Evsci - SCI (System Control Interrupt) handling/dispatch - */ - -u32 -acpi_ev_install_sci_handler ( - void); - -acpi_status -acpi_ev_remove_sci_handler ( - void); - -u32 -acpi_ev_initialize_sCI ( - u32 program_sCI); - -void -acpi_ev_restore_acpi_state ( - void); - -void -acpi_ev_terminate ( - void); - - -#endif /* __ACEVENTS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acexcep.h linux.21rc5-ac2/drivers/acpi/include/acexcep.h --- linux.21rc5/drivers/acpi/include/acexcep.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acexcep.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,248 +0,0 @@ -/****************************************************************************** - * - * Name: acexcep.h - Exception codes returned by the ACPI subsystem - * $Revision: 50 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACEXCEP_H__ -#define __ACEXCEP_H__ - - -/* - * Exceptions returned by external ACPI interfaces - */ - -#define AE_CODE_ENVIRONMENTAL 0x0000 -#define AE_CODE_PROGRAMMER 0x1000 -#define AE_CODE_ACPI_TABLES 0x2000 -#define AE_CODE_AML 0x3000 -#define AE_CODE_CONTROL 0x4000 -#define AE_CODE_MASK 0xF000 - - -#define ACPI_SUCCESS(a) (!(a)) -#define ACPI_FAILURE(a) (a) - - -#define AE_OK (acpi_status) 0x0000 - -/* - * Environmental exceptions - */ -#define AE_ERROR (acpi_status) (0x0001 | AE_CODE_ENVIRONMENTAL) -#define AE_NO_ACPI_TABLES (acpi_status) (0x0002 | AE_CODE_ENVIRONMENTAL) -#define AE_NO_NAMESPACE (acpi_status) (0x0003 | AE_CODE_ENVIRONMENTAL) -#define AE_NO_MEMORY (acpi_status) (0x0004 | AE_CODE_ENVIRONMENTAL) -#define AE_NOT_FOUND (acpi_status) (0x0005 | AE_CODE_ENVIRONMENTAL) -#define AE_NOT_EXIST (acpi_status) (0x0006 | AE_CODE_ENVIRONMENTAL) -#define AE_EXIST (acpi_status) (0x0007 | AE_CODE_ENVIRONMENTAL) -#define AE_TYPE (acpi_status) (0x0008 | AE_CODE_ENVIRONMENTAL) -#define AE_NULL_OBJECT (acpi_status) (0x0009 | AE_CODE_ENVIRONMENTAL) -#define AE_NULL_ENTRY (acpi_status) (0x000A | AE_CODE_ENVIRONMENTAL) -#define AE_BUFFER_OVERFLOW (acpi_status) (0x000B | AE_CODE_ENVIRONMENTAL) -#define AE_STACK_OVERFLOW (acpi_status) (0x000C | AE_CODE_ENVIRONMENTAL) -#define AE_STACK_UNDERFLOW (acpi_status) (0x000D | AE_CODE_ENVIRONMENTAL) -#define AE_NOT_IMPLEMENTED (acpi_status) (0x000E | AE_CODE_ENVIRONMENTAL) -#define AE_VERSION_MISMATCH (acpi_status) (0x000F | AE_CODE_ENVIRONMENTAL) -#define AE_SUPPORT (acpi_status) (0x0010 | AE_CODE_ENVIRONMENTAL) -#define AE_SHARE (acpi_status) (0x0011 | AE_CODE_ENVIRONMENTAL) -#define AE_LIMIT (acpi_status) (0x0012 | AE_CODE_ENVIRONMENTAL) -#define AE_TIME (acpi_status) (0x0013 | AE_CODE_ENVIRONMENTAL) -#define AE_UNKNOWN_STATUS (acpi_status) (0x0014 | AE_CODE_ENVIRONMENTAL) -#define AE_ACQUIRE_DEADLOCK (acpi_status) (0x0015 | AE_CODE_ENVIRONMENTAL) -#define AE_RELEASE_DEADLOCK (acpi_status) (0x0016 | AE_CODE_ENVIRONMENTAL) -#define AE_NOT_ACQUIRED (acpi_status) (0x0017 | AE_CODE_ENVIRONMENTAL) -#define AE_ALREADY_ACQUIRED (acpi_status) (0x0018 | AE_CODE_ENVIRONMENTAL) -#define AE_NO_HARDWARE_RESPONSE (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL) -#define AE_NO_GLOBAL_LOCK (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL) - -#define AE_CODE_ENV_MAX 0x001A - -/* - * Programmer exceptions - */ -#define AE_BAD_PARAMETER (acpi_status) (0x0001 | AE_CODE_PROGRAMMER) -#define AE_BAD_CHARACTER (acpi_status) (0x0002 | AE_CODE_PROGRAMMER) -#define AE_BAD_PATHNAME (acpi_status) (0x0003 | AE_CODE_PROGRAMMER) -#define AE_BAD_DATA (acpi_status) (0x0004 | AE_CODE_PROGRAMMER) -#define AE_BAD_ADDRESS (acpi_status) (0x0005 | AE_CODE_PROGRAMMER) - -#define AE_CODE_PGM_MAX 0x0005 - - -/* - * Acpi table exceptions - */ -#define AE_BAD_SIGNATURE (acpi_status) (0x0001 | AE_CODE_ACPI_TABLES) -#define AE_BAD_HEADER (acpi_status) (0x0002 | AE_CODE_ACPI_TABLES) -#define AE_BAD_CHECKSUM (acpi_status) (0x0003 | AE_CODE_ACPI_TABLES) -#define AE_BAD_VALUE (acpi_status) (0x0004 | AE_CODE_ACPI_TABLES) - -#define AE_CODE_TBL_MAX 0x0003 - - -/* - * AML exceptions. These are caused by problems with - * the actual AML byte stream - */ -#define AE_AML_ERROR (acpi_status) (0x0001 | AE_CODE_AML) -#define AE_AML_PARSE (acpi_status) (0x0002 | AE_CODE_AML) -#define AE_AML_BAD_OPCODE (acpi_status) (0x0003 | AE_CODE_AML) -#define AE_AML_NO_OPERAND (acpi_status) (0x0004 | AE_CODE_AML) -#define AE_AML_OPERAND_TYPE (acpi_status) (0x0005 | AE_CODE_AML) -#define AE_AML_OPERAND_VALUE (acpi_status) (0x0006 | AE_CODE_AML) -#define AE_AML_UNINITIALIZED_LOCAL (acpi_status) (0x0007 | AE_CODE_AML) -#define AE_AML_UNINITIALIZED_ARG (acpi_status) (0x0008 | AE_CODE_AML) -#define AE_AML_UNINITIALIZED_ELEMENT (acpi_status) (0x0009 | AE_CODE_AML) -#define AE_AML_NUMERIC_OVERFLOW (acpi_status) (0x000A | AE_CODE_AML) -#define AE_AML_REGION_LIMIT (acpi_status) (0x000B | AE_CODE_AML) -#define AE_AML_BUFFER_LIMIT (acpi_status) (0x000C | AE_CODE_AML) -#define AE_AML_PACKAGE_LIMIT (acpi_status) (0x000D | AE_CODE_AML) -#define AE_AML_DIVIDE_BY_ZERO (acpi_status) (0x000E | AE_CODE_AML) -#define AE_AML_BAD_NAME (acpi_status) (0x000F | AE_CODE_AML) -#define AE_AML_NAME_NOT_FOUND (acpi_status) (0x0010 | AE_CODE_AML) -#define AE_AML_INTERNAL (acpi_status) (0x0011 | AE_CODE_AML) -#define AE_AML_INVALID_SPACE_ID (acpi_status) (0x0012 | AE_CODE_AML) -#define AE_AML_STRING_LIMIT (acpi_status) (0x0013 | AE_CODE_AML) -#define AE_AML_NO_RETURN_VALUE (acpi_status) (0x0014 | AE_CODE_AML) -#define AE_AML_METHOD_LIMIT (acpi_status) (0x0015 | AE_CODE_AML) -#define AE_AML_NOT_OWNER (acpi_status) (0x0016 | AE_CODE_AML) -#define AE_AML_MUTEX_ORDER (acpi_status) (0x0017 | AE_CODE_AML) -#define AE_AML_MUTEX_NOT_ACQUIRED (acpi_status) (0x0018 | AE_CODE_AML) -#define AE_AML_INVALID_RESOURCE_TYPE (acpi_status) (0x0019 | AE_CODE_AML) - -#define AE_CODE_AML_MAX 0x0019 - -/* - * Internal exceptions used for control - */ -#define AE_CTRL_RETURN_VALUE (acpi_status) (0x0001 | AE_CODE_CONTROL) -#define AE_CTRL_PENDING (acpi_status) (0x0002 | AE_CODE_CONTROL) -#define AE_CTRL_TERMINATE (acpi_status) (0x0003 | AE_CODE_CONTROL) -#define AE_CTRL_TRUE (acpi_status) (0x0004 | AE_CODE_CONTROL) -#define AE_CTRL_FALSE (acpi_status) (0x0005 | AE_CODE_CONTROL) -#define AE_CTRL_DEPTH (acpi_status) (0x0006 | AE_CODE_CONTROL) -#define AE_CTRL_END (acpi_status) (0x0007 | AE_CODE_CONTROL) -#define AE_CTRL_TRANSFER (acpi_status) (0x0008 | AE_CODE_CONTROL) - -#define AE_CODE_CTRL_MAX 0x0008 - - -#ifdef DEFINE_ACPI_GLOBALS - -/* - * String versions of the exception codes above - * These strings must match the corresponding defines exactly - */ -NATIVE_CHAR const *acpi_gbl_exception_names_env[] = -{ - "AE_OK", - "AE_ERROR", - "AE_NO_ACPI_TABLES", - "AE_NO_NAMESPACE", - "AE_NO_MEMORY", - "AE_NOT_FOUND", - "AE_NOT_EXIST", - "AE_EXIST", - "AE_TYPE", - "AE_NULL_OBJECT", - "AE_NULL_ENTRY", - "AE_BUFFER_OVERFLOW", - "AE_STACK_OVERFLOW", - "AE_STACK_UNDERFLOW", - "AE_NOT_IMPLEMENTED", - "AE_VERSION_MISMATCH", - "AE_SUPPORT", - "AE_SHARE", - "AE_LIMIT", - "AE_TIME", - "AE_UNKNOWN_STATUS", - "AE_ACQUIRE_DEADLOCK", - "AE_RELEASE_DEADLOCK", - "AE_NOT_ACQUIRED", - "AE_ALREADY_ACQUIRED", - "AE_NO_HARDWARE_RESPONSE", - "AE_NO_GLOBAL_LOCK", -}; - -NATIVE_CHAR const *acpi_gbl_exception_names_pgm[] = -{ - "AE_BAD_PARAMETER", - "AE_BAD_CHARACTER", - "AE_BAD_PATHNAME", - "AE_BAD_DATA", - "AE_BAD_ADDRESS", -}; - -NATIVE_CHAR const *acpi_gbl_exception_names_tbl[] = -{ - "AE_BAD_SIGNATURE", - "AE_BAD_HEADER", - "AE_BAD_CHECKSUM", - "AE_BAD_VALUE", -}; - -NATIVE_CHAR const *acpi_gbl_exception_names_aml[] = -{ - "AE_AML_ERROR", - "AE_AML_PARSE", - "AE_AML_BAD_OPCODE", - "AE_AML_NO_OPERAND", - "AE_AML_OPERAND_TYPE", - "AE_AML_OPERAND_VALUE", - "AE_AML_UNINITIALIZED_LOCAL", - "AE_AML_UNINITIALIZED_ARG", - "AE_AML_UNINITIALIZED_ELEMENT", - "AE_AML_NUMERIC_OVERFLOW", - "AE_AML_REGION_LIMIT", - "AE_AML_BUFFER_LIMIT", - "AE_AML_PACKAGE_LIMIT", - "AE_AML_DIVIDE_BY_ZERO", - "AE_AML_BAD_NAME", - "AE_AML_NAME_NOT_FOUND", - "AE_AML_INTERNAL", - "AE_AML_INVALID_SPACE_ID", - "AE_AML_STRING_LIMIT", - "AE_AML_NO_RETURN_VALUE", - "AE_AML_METHOD_LIMIT", - "AE_AML_NOT_OWNER", - "AE_AML_MUTEX_ORDER", - "AE_AML_MUTEX_NOT_ACQUIRED", - "AE_AML_INVALID_RESOURCE_TYPE", -}; - -NATIVE_CHAR const *acpi_gbl_exception_names_ctrl[] = -{ - "AE_CTRL_RETURN_VALUE", - "AE_CTRL_PENDING", - "AE_CTRL_TERMINATE", - "AE_CTRL_TRUE", - "AE_CTRL_FALSE", - "AE_CTRL_DEPTH", - "AE_CTRL_END", - "AE_CTRL_TRANSFER", -}; - -#endif /* ACPI GLOBALS */ - - -#endif /* __ACEXCEP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acglobal.h linux.21rc5-ac2/drivers/acpi/include/acglobal.h --- linux.21rc5/drivers/acpi/include/acglobal.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acglobal.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,232 +0,0 @@ -/****************************************************************************** - * - * Name: acglobal.h - Declarations for global variables - * $Revision: 106 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACGLOBAL_H__ -#define __ACGLOBAL_H__ - - -/* - * Ensure that the globals are actually defined only once. - * - * The use of these defines allows a single list of globals (here) in order - * to simplify maintenance of the code. - */ -#ifdef DEFINE_ACPI_GLOBALS -#define ACPI_EXTERN -#else -#define ACPI_EXTERN extern -#endif - - -/***************************************************************************** - * - * Debug support - * - ****************************************************************************/ - -/* Runtime configuration of debug print levels */ - -extern u32 acpi_dbg_level; -extern u32 acpi_dbg_layer; - -/* Procedure nesting level for debug output */ - -extern u32 acpi_gbl_nesting_level; - - -/***************************************************************************** - * - * ACPI Table globals - * - ****************************************************************************/ - -/* - * Table pointers. - * Although these pointers are somewhat redundant with the global Acpi_table, - * they are convenient because they are typed pointers. - * - * These tables are single-table only; meaning that there can be at most one - * of each in the system. Each global points to the actual table. - * - */ -ACPI_EXTERN RSDP_DESCRIPTOR *acpi_gbl_RSDP; -ACPI_EXTERN xsdt_descriptor *acpi_gbl_XSDT; -ACPI_EXTERN FADT_DESCRIPTOR *acpi_gbl_FADT; -ACPI_EXTERN acpi_table_header *acpi_gbl_DSDT; -ACPI_EXTERN acpi_common_facs *acpi_gbl_FACS; - -/* - * Since there may be multiple SSDTs and PSDTS, a single pointer is not - * sufficient; Therefore, there isn't one! - */ - - -/* - * ACPI Table info arrays - */ -extern acpi_table_desc acpi_gbl_acpi_tables[NUM_ACPI_TABLES]; -extern ACPI_TABLE_SUPPORT acpi_gbl_acpi_table_data[NUM_ACPI_TABLES]; - -/* - * Predefined mutex objects. This array contains the - * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. - * (The table maps local handles to the real OS handles) - */ -ACPI_EXTERN acpi_mutex_info acpi_gbl_acpi_mutex_info [NUM_MTX]; - - -/***************************************************************************** - * - * Miscellaneous globals - * - ****************************************************************************/ - - -ACPI_EXTERN ACPI_MEMORY_LIST acpi_gbl_memory_lists[ACPI_NUM_MEM_LISTS]; -ACPI_EXTERN ACPI_OBJECT_NOTIFY_HANDLER acpi_gbl_drv_notify; -ACPI_EXTERN ACPI_OBJECT_NOTIFY_HANDLER acpi_gbl_sys_notify; -ACPI_EXTERN u8 *acpi_gbl_gpe0enable_register_save; -ACPI_EXTERN u8 *acpi_gbl_gpe1_enable_register_save; -ACPI_EXTERN acpi_walk_state *acpi_gbl_breakpoint_walk; -ACPI_EXTERN acpi_handle acpi_gbl_global_lock_semaphore; - -ACPI_EXTERN u32 acpi_gbl_global_lock_thread_count; -ACPI_EXTERN u32 acpi_gbl_restore_acpi_chipset; -ACPI_EXTERN u32 acpi_gbl_original_mode; -ACPI_EXTERN u32 acpi_gbl_edge_level_save; -ACPI_EXTERN u32 acpi_gbl_irq_enable_save; -ACPI_EXTERN u32 acpi_gbl_rsdp_original_location; -ACPI_EXTERN u32 acpi_gbl_ns_lookup_count; -ACPI_EXTERN u32 acpi_gbl_ps_find_count; -ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save; -ACPI_EXTERN u16 acpi_gbl_next_table_owner_id; -ACPI_EXTERN u16 acpi_gbl_next_method_owner_id; -ACPI_EXTERN u8 acpi_gbl_debugger_configuration; -ACPI_EXTERN u8 acpi_gbl_global_lock_acquired; -ACPI_EXTERN u8 acpi_gbl_step_to_next_call; -ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present; -ACPI_EXTERN u8 acpi_gbl_global_lock_present; - -extern u8 acpi_gbl_shutdown; -extern u32 acpi_gbl_system_flags; -extern u32 acpi_gbl_startup_flags; -extern const u8 acpi_gbl_decode_to8bit[8]; -extern const NATIVE_CHAR *acpi_gbl_db_sleep_states[ACPI_NUM_SLEEP_STATES]; - - -/***************************************************************************** - * - * Namespace globals - * - ****************************************************************************/ - -#define NUM_NS_TYPES INTERNAL_TYPE_INVALID+1 -#define NUM_PREDEFINED_NAMES 9 - - -ACPI_EXTERN acpi_namespace_node acpi_gbl_root_node_struct; -ACPI_EXTERN acpi_namespace_node *acpi_gbl_root_node; - -extern const u8 acpi_gbl_ns_properties[NUM_NS_TYPES]; -extern const predefined_names acpi_gbl_pre_defined_names [NUM_PREDEFINED_NAMES]; - -#ifdef ACPI_DEBUG -ACPI_EXTERN u32 acpi_gbl_current_node_count; -ACPI_EXTERN u32 acpi_gbl_current_node_size; -ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count; -ACPI_EXTERN unsigned long acpi_gbl_entry_stack_pointer; -ACPI_EXTERN u32 acpi_gbl_lowest_stack_pointer; -ACPI_EXTERN u32 acpi_gbl_deepest_nesting; -#endif - -/***************************************************************************** - * - * Interpreter globals - * - ****************************************************************************/ - - -ACPI_EXTERN acpi_walk_list *acpi_gbl_current_walk_list; - -/* Address Space handlers */ - -ACPI_EXTERN acpi_adr_space_info acpi_gbl_address_spaces[ACPI_NUM_ADDRESS_SPACES]; - -/* Control method single step flag */ - -ACPI_EXTERN u8 acpi_gbl_cm_single_step; - - -/***************************************************************************** - * - * Parser globals - * - ****************************************************************************/ - -ACPI_EXTERN acpi_parse_object *acpi_gbl_parsed_namespace_root; - - -/***************************************************************************** - * - * Event globals - * - ****************************************************************************/ - -ACPI_EXTERN acpi_fixed_event_info acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]; -ACPI_EXTERN acpi_handle acpi_gbl_gpe_obj_handle; -ACPI_EXTERN u32 acpi_gbl_gpe_register_count; -ACPI_EXTERN acpi_gpe_registers *acpi_gbl_gpe_registers; -ACPI_EXTERN acpi_gpe_level_info *acpi_gbl_gpe_info; - -/* - * Gpe validation and translation table - * Indexed by the GPE number, returns GPE_INVALID if the GPE is not supported. - * Otherwise, returns a valid index into the global GPE table. - * - * This table is needed because the GPE numbers supported by block 1 do not - * have to be contiguous with the GPE numbers supported by block 0. - */ -ACPI_EXTERN u8 acpi_gbl_gpe_valid [ACPI_NUM_GPE]; - -/* Acpi_event counter for debug only */ - -#ifdef ACPI_DEBUG -ACPI_EXTERN u32 acpi_gbl_event_count[ACPI_NUM_FIXED_EVENTS]; -#endif - - -/***************************************************************************** - * - * Debugger globals - * - ****************************************************************************/ - -#ifdef ENABLE_DEBUGGER -ACPI_EXTERN u8 acpi_gbl_method_executing; -ACPI_EXTERN u8 acpi_gbl_db_terminate_threads; -#endif - - -#endif /* __ACGLOBAL_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/achware.h linux.21rc5-ac2/drivers/acpi/include/achware.h --- linux.21rc5/drivers/acpi/include/achware.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/achware.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,166 +0,0 @@ -/****************************************************************************** - * - * Name: achware.h -- hardware specific interfaces - * $Revision: 56 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACHWARE_H__ -#define __ACHWARE_H__ - - -/* PM Timer ticks per second (HZ) */ -#define PM_TIMER_FREQUENCY 3579545 - - -/* Prototypes */ - - -acpi_status -acpi_hw_initialize ( - void); - -acpi_status -acpi_hw_shutdown ( - void); - -acpi_status -acpi_hw_initialize_system_info ( - void); - -acpi_status -acpi_hw_set_mode ( - u32 mode); - -u32 -acpi_hw_get_mode ( - void); - -u32 -acpi_hw_get_mode_capabilities ( - void); - -/* Register I/O Prototypes */ - - -u32 -acpi_hw_register_bit_access ( - NATIVE_UINT read_write, - u8 use_lock, - u32 register_id, - ... /* DWORD Write Value */); - -u32 -acpi_hw_register_read ( - u8 use_lock, - u32 register_id); - -void -acpi_hw_register_write ( - u8 use_lock, - u32 register_id, - u32 value); - -u32 -acpi_hw_low_level_read ( - u32 width, - acpi_generic_address *reg, - u32 offset); - -void -acpi_hw_low_level_write ( - u32 width, - u32 value, - acpi_generic_address *reg, - u32 offset); - -void -acpi_hw_clear_acpi_status ( - void); - -u32 -acpi_hw_get_bit_shift ( - u32 mask); - - -/* GPE support */ - -void -acpi_hw_enable_gpe ( - u32 gpe_number); - -void -acpi_hw_enable_gpe_for_wakeup ( - u32 gpe_number); - -void -acpi_hw_disable_gpe ( - u32 gpe_number); - -void -acpi_hw_disable_gpe_for_wakeup ( - u32 gpe_number); - -void -acpi_hw_clear_gpe ( - u32 gpe_number); - -void -acpi_hw_get_gpe_status ( - u32 gpe_number, - acpi_event_status *event_status); - -void -acpi_hw_disable_non_wakeup_gpes ( - void); - -void -acpi_hw_enable_non_wakeup_gpes ( - void); - - -/* Sleep Prototypes */ - -acpi_status -acpi_hw_obtain_sleep_type_register_data ( - u8 sleep_state, - u8 *slp_typ_a, - u8 *slp_typ_b); - - -/* ACPI Timer prototypes */ - -acpi_status -acpi_get_timer_resolution ( - u32 *resolution); - -acpi_status -acpi_get_timer ( - u32 *ticks); - -acpi_status -acpi_get_timer_duration ( - u32 start_ticks, - u32 end_ticks, - u32 *time_elapsed); - - -#endif /* __ACHWARE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acinterp.h linux.21rc5-ac2/drivers/acpi/include/acinterp.h --- linux.21rc5/drivers/acpi/include/acinterp.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acinterp.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,704 +0,0 @@ -/****************************************************************************** - * - * Name: acinterp.h - Interpreter subcomponent prototypes and defines - * $Revision: 116 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACINTERP_H__ -#define __ACINTERP_H__ - - -#define WALK_OPERANDS &(walk_state->operands [walk_state->num_operands -1]) - - -/* Interpreter constants */ - -#define AML_END_OF_BLOCK -1 -#define PUSH_PKG_LENGTH 1 -#define DO_NOT_PUSH_PKG_LENGTH 0 - - -#define STACK_TOP 0 -#define STACK_BOTTOM (u32) -1 - -/* Constants for global "When_to_parse_methods" */ - -#define METHOD_PARSE_AT_INIT 0x0 -#define METHOD_PARSE_JUST_IN_TIME 0x1 -#define METHOD_DELETE_AT_COMPLETION 0x2 - - -acpi_status -acpi_ex_resolve_operands ( - u16 opcode, - acpi_operand_object **stack_ptr, - acpi_walk_state *walk_state); - - -/* - * amxface - External interpreter interfaces - */ - -acpi_status -acpi_ex_load_table ( - acpi_table_type table_id); - -acpi_status -acpi_ex_execute_method ( - acpi_namespace_node *method_node, - acpi_operand_object **params, - acpi_operand_object **return_obj_desc); - - -/* - * amconvrt - object conversion - */ - -acpi_status -acpi_ex_convert_to_integer ( - acpi_operand_object *obj_desc, - acpi_operand_object **result_desc, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_convert_to_buffer ( - acpi_operand_object *obj_desc, - acpi_operand_object **result_desc, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_convert_to_string ( - acpi_operand_object *obj_desc, - acpi_operand_object **result_desc, - u32 base, - u32 max_length, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_convert_to_target_type ( - acpi_object_type8 destination_type, - acpi_operand_object **obj_desc, - acpi_walk_state *walk_state); - - -/* - * amfield - ACPI AML (p-code) execution - field manipulation - */ - -acpi_status -acpi_ex_extract_from_field ( - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length); - -acpi_status -acpi_ex_insert_into_field ( - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length); - -acpi_status -acpi_ex_setup_field ( - acpi_operand_object *obj_desc, - u32 field_byte_offset); - -acpi_status -acpi_ex_read_field_datum ( - acpi_operand_object *obj_desc, - u32 field_byte_offset, - u32 *value); - -acpi_status -acpi_ex_common_access_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length); - - -acpi_status -acpi_ex_access_index_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length); - -acpi_status -acpi_ex_access_bank_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length); - -acpi_status -acpi_ex_access_region_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length); - - -acpi_status -acpi_ex_access_buffer_field ( - u32 mode, - acpi_operand_object *obj_desc, - void *buffer, - u32 buffer_length); - -acpi_status -acpi_ex_read_data_from_field ( - acpi_operand_object *obj_desc, - acpi_operand_object **ret_buffer_desc); - -acpi_status -acpi_ex_write_data_to_field ( - acpi_operand_object *source_desc, - acpi_operand_object *obj_desc); - -/* - * ammisc - ACPI AML (p-code) execution - specific opcodes - */ - -acpi_status -acpi_ex_opcode_3A_0T_0R ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_opcode_3A_1T_1R ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_opcode_6A_0T_1R ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_get_object_reference ( - acpi_operand_object *obj_desc, - acpi_operand_object **return_desc, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_do_concatenate ( - acpi_operand_object *obj_desc, - acpi_operand_object *obj_desc2, - acpi_operand_object **actual_return_desc, - acpi_walk_state *walk_state); - -u8 -acpi_ex_do_logical_op ( - u16 opcode, - acpi_integer operand0, - acpi_integer operand1); - -acpi_integer -acpi_ex_do_math_op ( - u16 opcode, - acpi_integer operand0, - acpi_integer operand1); - -acpi_status -acpi_ex_load_op ( - acpi_operand_object *rgn_desc, - acpi_operand_object *ddb_handle); - -acpi_status -acpi_ex_unload_table ( - acpi_operand_object *ddb_handle); - -acpi_status -acpi_ex_create_mutex ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_create_processor ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_create_power_resource ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_create_region ( - u8 *aml_start, - u32 aml_length, - u8 region_space, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_create_table_region ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_create_event ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_create_alias ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_create_method ( - u8 *aml_start, - u32 aml_length, - acpi_walk_state *walk_state); - - -/* - * ammutex - mutex support - */ - -acpi_status -acpi_ex_acquire_mutex ( - acpi_operand_object *time_desc, - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_release_mutex ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_release_all_mutexes ( - acpi_operand_object *mutex_list); - -void -acpi_ex_unlink_mutex ( - acpi_operand_object *obj_desc); - - -/* - * amprep - ACPI AML (p-code) execution - prep utilities - */ - -acpi_status -acpi_ex_prep_common_field_object ( - acpi_operand_object *obj_desc, - u8 field_flags, - u32 field_position, - u32 field_length); - -acpi_status -acpi_ex_prep_region_field_value ( - acpi_namespace_node *node, - acpi_handle region, - u8 field_flags, - u32 field_position, - u32 field_length); - -acpi_status -acpi_ex_prep_bank_field_value ( - acpi_namespace_node *node, - acpi_namespace_node *region_node, - acpi_namespace_node *bank_register_node, - u32 bank_val, - u8 field_flags, - u32 field_position, - u32 field_length); - -acpi_status -acpi_ex_prep_index_field_value ( - acpi_namespace_node *node, - acpi_namespace_node *index_reg, - acpi_namespace_node *data_reg, - u8 field_flags, - u32 field_position, - u32 field_length); - -acpi_status -acpi_ex_prep_field_value ( - ACPI_CREATE_FIELD_INFO *info); - -/* - * amsystem - Interface to OS services - */ - -acpi_status -acpi_ex_system_do_notify_op ( - acpi_operand_object *value, - acpi_operand_object *obj_desc); - -void -acpi_ex_system_do_suspend( - u32 time); - -void -acpi_ex_system_do_stall ( - u32 time); - -acpi_status -acpi_ex_system_acquire_mutex( - acpi_operand_object *time, - acpi_operand_object *obj_desc); - -acpi_status -acpi_ex_system_release_mutex( - acpi_operand_object *obj_desc); - -acpi_status -acpi_ex_system_signal_event( - acpi_operand_object *obj_desc); - -acpi_status -acpi_ex_system_wait_event( - acpi_operand_object *time, - acpi_operand_object *obj_desc); - -acpi_status -acpi_ex_system_reset_event( - acpi_operand_object *obj_desc); - -acpi_status -acpi_ex_system_wait_semaphore ( - acpi_handle semaphore, - u32 timeout); - - -/* - * ammonadic - ACPI AML (p-code) execution, monadic operators - */ - -acpi_status -acpi_ex_opcode_1A_0T_0R ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_opcode_1A_0T_1R ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_opcode_1A_1T_1R ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_opcode_1A_1T_0R ( - acpi_walk_state *walk_state); - -/* - * amdyadic - ACPI AML (p-code) execution, dyadic operators - */ - -acpi_status -acpi_ex_opcode_2A_0T_0R ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_opcode_2A_0T_1R ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_opcode_2A_1T_1R ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_opcode_2A_2T_1R ( - acpi_walk_state *walk_state); - - -/* - * amresolv - Object resolution and get value functions - */ - -acpi_status -acpi_ex_resolve_to_value ( - acpi_operand_object **stack_ptr, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_resolve_node_to_value ( - acpi_namespace_node **stack_ptr, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_resolve_object_to_value ( - acpi_operand_object **stack_ptr, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_get_buffer_field_value ( - acpi_operand_object *field_desc, - acpi_operand_object *result_desc); - - -/* - * amdump - Scanner debug output routines - */ - -void -acpi_ex_show_hex_value ( - u32 byte_count, - u8 *aml_start, - u32 lead_space); - - -acpi_status -acpi_ex_dump_operand ( - acpi_operand_object *entry_desc); - -void -acpi_ex_dump_operands ( - acpi_operand_object **operands, - operating_mode interpreter_mode, - NATIVE_CHAR *ident, - u32 num_levels, - NATIVE_CHAR *note, - NATIVE_CHAR *module_name, - u32 line_number); - -void -acpi_ex_dump_object_descriptor ( - acpi_operand_object *object, - u32 flags); - - -void -acpi_ex_dump_node ( - acpi_namespace_node *node, - u32 flags); - - -/* - * amnames - interpreter/scanner name load/execute - */ - -NATIVE_CHAR * -acpi_ex_allocate_name_string ( - u32 prefix_count, - u32 num_name_segs); - -u32 -acpi_ex_good_char ( - u32 character); - -acpi_status -acpi_ex_name_segment ( - u8 **in_aml_address, - NATIVE_CHAR *name_string); - -acpi_status -acpi_ex_get_name_string ( - acpi_object_type8 data_type, - u8 *in_aml_address, - NATIVE_CHAR **out_name_string, - u32 *out_name_length); - -acpi_status -acpi_ex_do_name ( - acpi_object_type data_type, - operating_mode load_exec_mode); - - -/* - * amstore - Object store support - */ - -acpi_status -acpi_ex_store ( - acpi_operand_object *val_desc, - acpi_operand_object *dest_desc, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_store_object_to_index ( - acpi_operand_object *val_desc, - acpi_operand_object *dest_desc, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_store_object_to_node ( - acpi_operand_object *source_desc, - acpi_namespace_node *node, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_store_object_to_object ( - acpi_operand_object *source_desc, - acpi_operand_object *dest_desc, - acpi_walk_state *walk_state); - - -/* - * - */ - -acpi_status -acpi_ex_resolve_object ( - acpi_operand_object **source_desc_ptr, - acpi_object_type8 target_type, - acpi_walk_state *walk_state); - -acpi_status -acpi_ex_store_object ( - acpi_operand_object *source_desc, - acpi_object_type8 target_type, - acpi_operand_object **target_desc_ptr, - acpi_walk_state *walk_state); - - -/* - * amcopy - object copy - */ - -acpi_status -acpi_ex_copy_buffer_to_buffer ( - acpi_operand_object *source_desc, - acpi_operand_object *target_desc); - -acpi_status -acpi_ex_copy_string_to_string ( - acpi_operand_object *source_desc, - acpi_operand_object *target_desc); - -acpi_status -acpi_ex_copy_integer_to_index_field ( - acpi_operand_object *source_desc, - acpi_operand_object *target_desc); - -acpi_status -acpi_ex_copy_integer_to_bank_field ( - acpi_operand_object *source_desc, - acpi_operand_object *target_desc); - -acpi_status -acpi_ex_copy_data_to_named_field ( - acpi_operand_object *source_desc, - acpi_namespace_node *node); - -acpi_status -acpi_ex_copy_integer_to_buffer_field ( - acpi_operand_object *source_desc, - acpi_operand_object *target_desc); - -/* - * amutils - interpreter/scanner utilities - */ - -acpi_status -acpi_ex_enter_interpreter ( - void); - -void -acpi_ex_exit_interpreter ( - void); - -void -acpi_ex_truncate_for32bit_table ( - acpi_operand_object *obj_desc, - acpi_walk_state *walk_state); - -u8 -acpi_ex_validate_object_type ( - acpi_object_type type); - -u8 -acpi_ex_acquire_global_lock ( - u32 rule); - -acpi_status -acpi_ex_release_global_lock ( - u8 locked); - -u32 -acpi_ex_digits_needed ( - acpi_integer value, - u32 base); - -acpi_status -acpi_ex_eisa_id_to_string ( - u32 numeric_id, - NATIVE_CHAR *out_string); - -acpi_status -acpi_ex_unsigned_integer_to_string ( - acpi_integer value, - NATIVE_CHAR *out_string); - - -/* - * amregion - default Op_region handlers - */ - -acpi_status -acpi_ex_system_memory_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context); - -acpi_status -acpi_ex_system_io_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context); - -acpi_status -acpi_ex_pci_config_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context); - -acpi_status -acpi_ex_cmos_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context); - -acpi_status -acpi_ex_pci_bar_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context); - -acpi_status -acpi_ex_embedded_controller_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context); - -acpi_status -acpi_ex_sm_bus_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context); - - -#endif /* __INTERP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/aclocal.h linux.21rc5-ac2/drivers/acpi/include/aclocal.h --- linux.21rc5/drivers/acpi/include/aclocal.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/aclocal.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,973 +0,0 @@ -/****************************************************************************** - * - * Name: aclocal.h - Internal data types used across the ACPI subsystem - * $Revision: 138 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACLOCAL_H__ -#define __ACLOCAL_H__ - - -#define WAIT_FOREVER ((u32) -1) - -typedef void* acpi_mutex; -typedef u32 ACPI_MUTEX_HANDLE; - - -#define ACPI_MEMORY_MODE 0x01 -#define ACPI_LOGICAL_ADDRESSING 0x00 -#define ACPI_PHYSICAL_ADDRESSING 0x01 - -/* Object descriptor types */ - -#define ACPI_CACHED_OBJECT 0x11 /* ORed in when object is cached */ -#define ACPI_DESC_TYPE_STATE 0x20 -#define ACPI_DESC_TYPE_STATE_UPDATE 0x21 -#define ACPI_DESC_TYPE_STATE_PACKAGE 0x22 -#define ACPI_DESC_TYPE_STATE_CONTROL 0x23 -#define ACPI_DESC_TYPE_STATE_RPSCOPE 0x24 -#define ACPI_DESC_TYPE_STATE_PSCOPE 0x25 -#define ACPI_DESC_TYPE_STATE_WSCOPE 0x26 -#define ACPI_DESC_TYPE_STATE_RESULT 0x27 -#define ACPI_DESC_TYPE_STATE_NOTIFY 0x28 -#define ACPI_DESC_TYPE_WALK 0x44 -#define ACPI_DESC_TYPE_PARSER 0x66 -#define ACPI_DESC_TYPE_INTERNAL 0x88 -#define ACPI_DESC_TYPE_NAMED 0xAA - - -/***************************************************************************** - * - * Mutex typedefs and structs - * - ****************************************************************************/ - - -/* - * Predefined handles for the mutex objects used within the subsystem - * All mutex objects are automatically created by Acpi_ut_mutex_initialize. - * - * The acquire/release ordering protocol is implied via this list. Mutexes - * with a lower value must be acquired before mutexes with a higher value. - * - * NOTE: any changes here must be reflected in the Acpi_gbl_Mutex_names table also! - */ - -#define ACPI_MTX_EXECUTE 0 -#define ACPI_MTX_INTERPRETER 1 -#define ACPI_MTX_PARSER 2 -#define ACPI_MTX_DISPATCHER 3 -#define ACPI_MTX_TABLES 4 -#define ACPI_MTX_OP_REGIONS 5 -#define ACPI_MTX_NAMESPACE 6 -#define ACPI_MTX_EVENTS 7 -#define ACPI_MTX_HARDWARE 8 -#define ACPI_MTX_CACHES 9 -#define ACPI_MTX_MEMORY 10 -#define ACPI_MTX_DEBUG_CMD_COMPLETE 11 -#define ACPI_MTX_DEBUG_CMD_READY 12 - -#define MAX_MTX 12 -#define NUM_MTX MAX_MTX+1 - - -#if defined(ACPI_DEBUG) || defined(ENABLE_DEBUGGER) -#ifdef DEFINE_ACPI_GLOBALS - -/* Names for the mutexes used in the subsystem */ - -static NATIVE_CHAR *acpi_gbl_mutex_names[] = -{ - "ACPI_MTX_Execute", - "ACPI_MTX_Interpreter", - "ACPI_MTX_Parser", - "ACPI_MTX_Dispatcher", - "ACPI_MTX_Tables", - "ACPI_MTX_Op_regions", - "ACPI_MTX_Namespace", - "ACPI_MTX_Events", - "ACPI_MTX_Hardware", - "ACPI_MTX_Caches", - "ACPI_MTX_Memory", - "ACPI_MTX_Debug_cmd_complete", - "ACPI_MTX_Debug_cmd_ready", -}; - -#endif -#endif - - -/* Table for the global mutexes */ - -typedef struct acpi_mutex_info -{ - acpi_mutex mutex; - u32 use_count; - u32 owner_id; - -} acpi_mutex_info; - -/* This owner ID means that the mutex is not in use (unlocked) */ - -#define ACPI_MUTEX_NOT_ACQUIRED (u32) (-1) - - -/* Lock flag parameter for various interfaces */ - -#define ACPI_MTX_DO_NOT_LOCK 0 -#define ACPI_MTX_LOCK 1 - - -typedef u16 acpi_owner_id; -#define OWNER_TYPE_TABLE 0x0 -#define OWNER_TYPE_METHOD 0x1 -#define FIRST_METHOD_ID 0x0000 -#define FIRST_TABLE_ID 0x8000 - -/* TBD: [Restructure] get rid of the need for this! */ - -#define TABLE_ID_DSDT (acpi_owner_id) 0x8000 - - -/* Field access granularities */ - -#define ACPI_FIELD_BYTE_GRANULARITY 1 -#define ACPI_FIELD_WORD_GRANULARITY 2 -#define ACPI_FIELD_DWORD_GRANULARITY 4 -#define ACPI_FIELD_QWORD_GRANULARITY 8 - -/***************************************************************************** - * - * Namespace typedefs and structs - * - ****************************************************************************/ - - -/* Operational modes of the AML interpreter/scanner */ - -typedef enum -{ - IMODE_LOAD_PASS1 = 0x01, - IMODE_LOAD_PASS2 = 0x02, - IMODE_EXECUTE = 0x0E - -} operating_mode; - - -/* - * The Node describes a named object that appears in the AML - * An Acpi_node is used to store Nodes. - * - * Data_type is used to differentiate between internal descriptors, and MUST - * be the first byte in this structure. - */ - -typedef struct acpi_node -{ - u8 data_type; - u8 type; /* Type associated with this name */ - u16 owner_id; - u32 name; /* ACPI Name, always 4 chars per ACPI spec */ - - - union acpi_operand_obj *object; /* Pointer to attached ACPI object (optional) */ - struct acpi_node *child; /* first child */ - struct acpi_node *peer; /* Next peer*/ - u16 reference_count; /* Current count of references and children */ - u8 flags; - -} acpi_namespace_node; - - -#define ENTRY_NOT_FOUND NULL - - -/* Node flags */ - -#define ANOBJ_AML_ATTACHMENT 0x01 -#define ANOBJ_END_OF_PEER_LIST 0x02 -#define ANOBJ_DATA_WIDTH_32 0x04 /* Parent table is 64-bits */ -#define ANOBJ_METHOD_ARG 0x08 -#define ANOBJ_METHOD_LOCAL 0x10 -#define ANOBJ_METHOD_NO_RETVAL 0x20 -#define ANOBJ_METHOD_SOME_NO_RETVAL 0x40 - -#define ANOBJ_IS_BIT_OFFSET 0x80 - - -/* - * ACPI Table Descriptor. One per ACPI table - */ -typedef struct acpi_table_desc -{ - struct acpi_table_desc *prev; - struct acpi_table_desc *next; - struct acpi_table_desc *installed_desc; - acpi_table_header *pointer; - void *base_pointer; - u8 *aml_start; - u64 physical_address; - u32 aml_length; - u32 length; - u32 count; - acpi_owner_id table_id; - u8 type; - u8 allocation; - u8 loaded_into_namespace; - -} acpi_table_desc; - - -typedef struct -{ - NATIVE_CHAR *search_for; - acpi_handle *list; - u32 *count; - -} find_context; - - -typedef struct -{ - acpi_namespace_node *node; -} ns_search_data; - - -/* - * Predefined Namespace items - */ -typedef struct -{ - NATIVE_CHAR *name; - acpi_object_type8 type; - NATIVE_CHAR *val; - -} predefined_names; - - -/* Object types used during package copies */ - - -#define ACPI_COPY_TYPE_SIMPLE 0 -#define ACPI_COPY_TYPE_PACKAGE 1 - -/* Info structure used to convert external<->internal namestrings */ - -typedef struct acpi_namestring_info -{ - NATIVE_CHAR *external_name; - NATIVE_CHAR *next_external_char; - NATIVE_CHAR *internal_name; - u32 length; - u32 num_segments; - u32 num_carats; - u8 fully_qualified; - -} acpi_namestring_info; - - -/* Field creation info */ - -typedef struct -{ - acpi_namespace_node *region_node; - acpi_namespace_node *field_node; - acpi_namespace_node *register_node; - acpi_namespace_node *data_register_node; - u32 bank_value; - u32 field_bit_position; - u32 field_bit_length; - u8 field_flags; - u8 field_type; - -} ACPI_CREATE_FIELD_INFO; - -/* - * Field flags: Bits 00 - 03 : Access_type (Any_acc, Byte_acc, etc.) - * 04 : Lock_rule (1 == Lock) - * 05 - 06 : Update_rule - */ - -#define FIELD_ACCESS_TYPE_MASK 0x0F -#define FIELD_LOCK_RULE_MASK 0x10 -#define FIELD_UPDATE_RULE_MASK 0x60 - - -/***************************************************************************** - * - * Event typedefs and structs - * - ****************************************************************************/ - - -/* Status bits. */ - -#define ACPI_STATUS_PMTIMER 0x0001 -#define ACPI_STATUS_BUSMASTER 0x0010 -#define ACPI_STATUS_GLOBAL 0x0020 -#define ACPI_STATUS_POWER_BUTTON 0x0100 -#define ACPI_STATUS_SLEEP_BUTTON 0x0200 -#define ACPI_STATUS_RTC_ALARM 0x0400 - -/* Enable bits. */ - -#define ACPI_ENABLE_PMTIMER 0x0001 -#define ACPI_ENABLE_GLOBAL 0x0020 -#define ACPI_ENABLE_POWER_BUTTON 0x0100 -#define ACPI_ENABLE_SLEEP_BUTTON 0x0200 -#define ACPI_ENABLE_RTC_ALARM 0x0400 - - -/* - * Entry in the Address_space (AKA Operation Region) table - */ - -typedef struct -{ - acpi_adr_space_handler handler; - void *context; - -} acpi_adr_space_info; - - -/* Values and addresses of the GPE registers (both banks) */ - -typedef struct -{ - u16 status_addr; /* Address of status reg */ - u16 enable_addr; /* Address of enable reg */ - u8 status; /* Current value of status reg */ - u8 enable; /* Current value of enable reg */ - u8 wake_enable; /* Mask of bits to keep enabled when sleeping */ - u8 gpe_base; /* Base GPE number */ - -} acpi_gpe_registers; - - -#define ACPI_GPE_LEVEL_TRIGGERED 1 -#define ACPI_GPE_EDGE_TRIGGERED 2 - - -/* Information about each particular GPE level */ - -typedef struct -{ - u8 type; /* Level or Edge */ - - acpi_handle method_handle; /* Method handle for direct (fast) execution */ - acpi_gpe_handler handler; /* Address of handler, if any */ - void *context; /* Context to be passed to handler */ - -} acpi_gpe_level_info; - - -/* Information about each particular fixed event */ - -typedef struct -{ - acpi_event_handler handler; /* Address of handler. */ - void *context; /* Context to be passed to handler */ - -} acpi_fixed_event_info; - - -/* Information used during field processing */ - -typedef struct -{ - u8 skip_field; - u8 field_flag; - u32 pkg_length; - -} acpi_field_info; - - -/***************************************************************************** - * - * Generic "state" object for stacks - * - ****************************************************************************/ - - -#define CONTROL_NORMAL 0xC0 -#define CONTROL_CONDITIONAL_EXECUTING 0xC1 -#define CONTROL_PREDICATE_EXECUTING 0xC2 -#define CONTROL_PREDICATE_FALSE 0xC3 -#define CONTROL_PREDICATE_TRUE 0xC4 - - -/* Forward declarations */ -struct acpi_walk_state; -struct acpi_walk_list; -struct acpi_parse_obj; -struct acpi_obj_mutex; - - -#define ACPI_STATE_COMMON /* Two 32-bit fields and a pointer */\ - u8 data_type; /* To differentiate various internal objs */\ - u8 flags; \ - u16 value; \ - u16 state; \ - u16 acpi_eval; \ - void *next; \ - -typedef struct acpi_common_state -{ - ACPI_STATE_COMMON -} acpi_common_state; - - -/* - * Update state - used to traverse complex objects such as packages - */ -typedef struct acpi_update_state -{ - ACPI_STATE_COMMON - union acpi_operand_obj *object; - -} acpi_update_state; - - -/* - * Pkg state - used to traverse nested package structures - */ -typedef struct acpi_pkg_state -{ - ACPI_STATE_COMMON - union acpi_operand_obj *source_object; - union acpi_operand_obj *dest_object; - struct acpi_walk_state *walk_state; - void *this_target_obj; - u32 num_packages; - u16 index; - -} acpi_pkg_state; - - -/* - * Control state - one per if/else and while constructs. - * Allows nesting of these constructs - */ -typedef struct acpi_control_state -{ - ACPI_STATE_COMMON - struct acpi_parse_obj *predicate_op; - u8 *aml_predicate_start; /* Start of if/while predicate */ - -} acpi_control_state; - - -/* - * Scope state - current scope during namespace lookups - */ -typedef struct acpi_scope_state -{ - ACPI_STATE_COMMON - acpi_namespace_node *node; - -} acpi_scope_state; - - -typedef struct acpi_pscope_state -{ - ACPI_STATE_COMMON - struct acpi_parse_obj *op; /* current op being parsed */ - u8 *arg_end; /* current argument end */ - u8 *pkg_end; /* current package end */ - u32 arg_list; /* next argument to parse */ - u32 arg_count; /* Number of fixed arguments */ - -} acpi_pscope_state; - - -/* - * Result values - used to accumulate the results of nested - * AML arguments - */ -typedef struct acpi_result_values -{ - ACPI_STATE_COMMON - union acpi_operand_obj *obj_desc [OBJ_NUM_OPERANDS]; - u8 num_results; - u8 last_insert; - -} acpi_result_values; - - -typedef -acpi_status (*acpi_parse_downwards) ( - struct acpi_walk_state *walk_state, - struct acpi_parse_obj **out_op); - -typedef -acpi_status (*acpi_parse_upwards) ( - struct acpi_walk_state *walk_state); - - -/* - * Notify info - used to pass info to the deferred notify - * handler/dispatcher. - */ -typedef struct acpi_notify_info -{ - ACPI_STATE_COMMON - acpi_namespace_node *node; - union acpi_operand_obj *handler_obj; - -} acpi_notify_info; - - -/* Generic state is union of structs above */ - -typedef union acpi_gen_state -{ - acpi_common_state common; - acpi_control_state control; - acpi_update_state update; - acpi_scope_state scope; - acpi_pscope_state parse_scope; - acpi_pkg_state pkg; - acpi_result_values results; - acpi_notify_info notify; - -} acpi_generic_state; - - -/***************************************************************************** - * - * Interpreter typedefs and structs - * - ****************************************************************************/ - -typedef -acpi_status (*ACPI_EXECUTE_OP) ( - struct acpi_walk_state *walk_state); - - -/***************************************************************************** - * - * Parser typedefs and structs - * - ****************************************************************************/ - -/* - * AML opcode, name, and argument layout - */ -typedef struct acpi_opcode_info -{ - u32 parse_args; /* Grammar/Parse time arguments */ - u32 runtime_args; /* Interpret time arguments */ - u16 flags; /* Misc flags */ - u8 class; /* Opcode class */ - u8 type; /* Opcode type */ - -#ifdef _OPCODE_NAMES - NATIVE_CHAR *name; /* op name (debug only) */ -#endif - -} acpi_opcode_info; - - -typedef union acpi_parse_val -{ - acpi_integer integer; /* integer constant (Up to 64 bits) */ - uint64_struct integer64; /* Structure overlay for 2 32-bit Dwords */ - u32 integer32; /* integer constant, 32 bits only */ - u16 integer16; /* integer constant, 16 bits only */ - u8 integer8; /* integer constant, 8 bits only */ - u32 size; /* bytelist or field size */ - NATIVE_CHAR *string; /* NULL terminated string */ - u8 *buffer; /* buffer or string */ - NATIVE_CHAR *name; /* NULL terminated string */ - struct acpi_parse_obj *arg; /* arguments and contained ops */ - -} acpi_parse_value; - - -#define ACPI_PARSE_COMMON \ - u8 data_type; /* To differentiate various internal objs */\ - u8 flags; /* Type of Op */\ - u16 opcode; /* AML opcode */\ - u32 aml_offset; /* offset of declaration in AML */\ - struct acpi_parse_obj *parent; /* parent op */\ - struct acpi_parse_obj *next; /* next op */\ - DEBUG_ONLY_MEMBERS (\ - NATIVE_CHAR op_name[16]) /* op name (debug only) */\ - /* NON-DEBUG members below: */\ - acpi_namespace_node *node; /* for use by interpreter */\ - acpi_parse_value value; /* Value or args associated with the opcode */\ - - -/* - * generic operation (eg. If, While, Store) - */ -typedef struct acpi_parse_obj -{ - ACPI_PARSE_COMMON -} acpi_parse_object; - - -/* - * Extended Op for named ops (Scope, Method, etc.), deferred ops (Methods and Op_regions), - * and bytelists. - */ -typedef struct acpi_parse2_obj -{ - ACPI_PARSE_COMMON - u8 *data; /* AML body or bytelist data */ - u32 length; /* AML length */ - u32 name; /* 4-byte name or zero if no name */ - -} acpi_parse2_object; - - -/* - * Parse state - one state per parser invocation and each control - * method. - */ -typedef struct acpi_parse_state -{ - u32 aml_size; - u8 *aml_start; /* first AML byte */ - u8 *aml; /* next AML byte */ - u8 *aml_end; /* (last + 1) AML byte */ - u8 *pkg_start; /* current package begin */ - u8 *pkg_end; /* current package end */ - - struct acpi_parse_obj *start_op; /* root of parse tree */ - struct acpi_node *start_node; - union acpi_gen_state *scope; /* current scope */ - - - struct acpi_parse_obj *start_scope; - - -} acpi_parse_state; - - -/***************************************************************************** - * - * Hardware and PNP - * - ****************************************************************************/ - - -/* PCI */ -#define PCI_ROOT_HID_STRING "PNP0A03" - -/* - * The #define's and enum below establish an abstract way of identifying what - * register block and register is to be accessed. Do not change any of the - * values as they are used in switch statements and offset calculations. - */ - -#define REGISTER_BLOCK_MASK 0xFF00 /* Register Block Id */ -#define BIT_IN_REGISTER_MASK 0x00FF /* Bit Id in the Register Block Id */ -#define BYTE_IN_REGISTER_MASK 0x00FF /* Register Offset in the Register Block */ - -#define REGISTER_BLOCK_ID(reg_id) (reg_id & REGISTER_BLOCK_MASK) -#define REGISTER_BIT_ID(reg_id) (reg_id & BIT_IN_REGISTER_MASK) -#define REGISTER_OFFSET(reg_id) (reg_id & BYTE_IN_REGISTER_MASK) - -/* - * Access Rule - * To access a Register Bit: - * -> Use Bit Name (= Register Block Id | Bit Id) defined in the enum. - * - * To access a Register: - * -> Use Register Id (= Register Block Id | Register Offset) - */ - - -/* - * Register Block Id - */ -#define PM1_STS 0x0100 -#define PM1_EN 0x0200 -#define PM1_CONTROL 0x0300 -#define PM1A_CONTROL 0x0400 -#define PM1B_CONTROL 0x0500 -#define PM2_CONTROL 0x0600 -#define PM_TIMER 0x0700 -#define PROCESSOR_BLOCK 0x0800 -#define GPE0_STS_BLOCK 0x0900 -#define GPE0_EN_BLOCK 0x0A00 -#define GPE1_STS_BLOCK 0x0B00 -#define GPE1_EN_BLOCK 0x0C00 -#define SMI_CMD_BLOCK 0x0D00 - -/* - * Address space bitmasks for mmio or io spaces - */ - -#define SMI_CMD_ADDRESS_SPACE 0x01 -#define PM1_BLK_ADDRESS_SPACE 0x02 -#define PM2_CNT_BLK_ADDRESS_SPACE 0x04 -#define PM_TMR_BLK_ADDRESS_SPACE 0x08 -#define GPE0_BLK_ADDRESS_SPACE 0x10 -#define GPE1_BLK_ADDRESS_SPACE 0x20 - -/* - * Control bit definitions - */ -#define TMR_STS (PM1_STS | 0x01) -#define BM_STS (PM1_STS | 0x02) -#define GBL_STS (PM1_STS | 0x03) -#define PWRBTN_STS (PM1_STS | 0x04) -#define SLPBTN_STS (PM1_STS | 0x05) -#define RTC_STS (PM1_STS | 0x06) -#define WAK_STS (PM1_STS | 0x07) - -#define TMR_EN (PM1_EN | 0x01) - /* no BM_EN */ -#define GBL_EN (PM1_EN | 0x03) -#define PWRBTN_EN (PM1_EN | 0x04) -#define SLPBTN_EN (PM1_EN | 0x05) -#define RTC_EN (PM1_EN | 0x06) -#define WAK_EN (PM1_EN | 0x07) - -#define SCI_EN (PM1_CONTROL | 0x01) -#define BM_RLD (PM1_CONTROL | 0x02) -#define GBL_RLS (PM1_CONTROL | 0x03) -#define SLP_TYPE_A (PM1_CONTROL | 0x04) -#define SLP_TYPE_B (PM1_CONTROL | 0x05) -#define SLP_EN (PM1_CONTROL | 0x06) - -#define ARB_DIS (PM2_CONTROL | 0x01) - -#define TMR_VAL (PM_TIMER | 0x01) - -#define GPE0_STS (GPE0_STS_BLOCK | 0x01) -#define GPE0_EN (GPE0_EN_BLOCK | 0x01) - -#define GPE1_STS (GPE1_STS_BLOCK | 0x01) -#define GPE1_EN (GPE1_EN_BLOCK | 0x01) - - -#define TMR_STS_MASK 0x0001 -#define BM_STS_MASK 0x0010 -#define GBL_STS_MASK 0x0020 -#define PWRBTN_STS_MASK 0x0100 -#define SLPBTN_STS_MASK 0x0200 -#define RTC_STS_MASK 0x0400 -#define WAK_STS_MASK 0x8000 - -#define ALL_FIXED_STS_BITS (TMR_STS_MASK | BM_STS_MASK | GBL_STS_MASK \ - | PWRBTN_STS_MASK | SLPBTN_STS_MASK \ - | RTC_STS_MASK | WAK_STS_MASK) - -#define TMR_EN_MASK 0x0001 -#define GBL_EN_MASK 0x0020 -#define PWRBTN_EN_MASK 0x0100 -#define SLPBTN_EN_MASK 0x0200 -#define RTC_EN_MASK 0x0400 - -#define SCI_EN_MASK 0x0001 -#define BM_RLD_MASK 0x0002 -#define GBL_RLS_MASK 0x0004 -#define SLP_TYPE_X_MASK 0x1C00 -#define SLP_EN_MASK 0x2000 - -#define ARB_DIS_MASK 0x0001 -#define TMR_VAL_MASK 0xFFFFFFFF - -#define GPE0_STS_MASK -#define GPE0_EN_MASK - -#define GPE1_STS_MASK -#define GPE1_EN_MASK - - -#define ACPI_READ 1 -#define ACPI_WRITE 2 - - -/***************************************************************************** - * - * Resource descriptors - * - ****************************************************************************/ - - -/* Resource_type values */ - -#define RESOURCE_TYPE_MEMORY_RANGE 0 -#define RESOURCE_TYPE_IO_RANGE 1 -#define RESOURCE_TYPE_BUS_NUMBER_RANGE 2 - -/* Resource descriptor types and masks */ - -#define RESOURCE_DESC_TYPE_LARGE 0x80 -#define RESOURCE_DESC_TYPE_SMALL 0x00 - -#define RESOURCE_DESC_TYPE_MASK 0x80 -#define RESOURCE_DESC_SMALL_MASK 0x78 /* Only bits 6:3 contain the type */ - - -/* - * Small resource descriptor types - * Note: The 3 length bits (2:0) must be zero - */ -#define RESOURCE_DESC_IRQ_FORMAT 0x20 -#define RESOURCE_DESC_DMA_FORMAT 0x28 -#define RESOURCE_DESC_START_DEPENDENT 0x30 -#define RESOURCE_DESC_END_DEPENDENT 0x38 -#define RESOURCE_DESC_IO_PORT 0x40 -#define RESOURCE_DESC_FIXED_IO_PORT 0x48 -#define RESOURCE_DESC_SMALL_VENDOR 0x70 -#define RESOURCE_DESC_END_TAG 0x78 - -/* - * Large resource descriptor types - */ - -#define RESOURCE_DESC_MEMORY_24 0x81 -#define RESOURCE_DESC_GENERAL_REGISTER 0x82 -#define RESOURCE_DESC_LARGE_VENDOR 0x84 -#define RESOURCE_DESC_MEMORY_32 0x85 -#define RESOURCE_DESC_FIXED_MEMORY_32 0x86 -#define RESOURCE_DESC_DWORD_ADDRESS_SPACE 0x87 -#define RESOURCE_DESC_WORD_ADDRESS_SPACE 0x88 -#define RESOURCE_DESC_EXTENDED_XRUPT 0x89 -#define RESOURCE_DESC_QWORD_ADDRESS_SPACE 0x8A - - -/* String version of device HIDs and UIDs */ - -#define ACPI_DEVICE_ID_LENGTH 0x09 - -typedef struct -{ - char buffer[ACPI_DEVICE_ID_LENGTH]; - -} acpi_device_id; - - -/***************************************************************************** - * - * Miscellaneous - * - ****************************************************************************/ - -#define ASCII_ZERO 0x30 - -/***************************************************************************** - * - * Debugger - * - ****************************************************************************/ - -typedef struct dbmethodinfo -{ - acpi_handle thread_gate; - NATIVE_CHAR *name; - NATIVE_CHAR **args; - u32 flags; - u32 num_loops; - NATIVE_CHAR pathname[128]; - -} db_method_info; - - -/***************************************************************************** - * - * Debug - * - ****************************************************************************/ - -typedef struct -{ - u32 component_id; - NATIVE_CHAR *proc_name; - NATIVE_CHAR *module_name; - -} acpi_debug_print_info; - - -/* Entry for a memory allocation (debug only) */ - - -#define MEM_MALLOC 0 -#define MEM_CALLOC 1 -#define MAX_MODULE_NAME 16 - -#define ACPI_COMMON_DEBUG_MEM_HEADER \ - struct acpi_debug_mem_block *previous; \ - struct acpi_debug_mem_block *next; \ - u32 size; \ - u32 component; \ - u32 line; \ - NATIVE_CHAR module[MAX_MODULE_NAME]; \ - u8 alloc_type; - - -typedef struct -{ - ACPI_COMMON_DEBUG_MEM_HEADER - -} acpi_debug_mem_header; - -typedef struct acpi_debug_mem_block -{ - ACPI_COMMON_DEBUG_MEM_HEADER - u64 user_space; - -} acpi_debug_mem_block; - - -#define ACPI_MEM_LIST_GLOBAL 0 -#define ACPI_MEM_LIST_NSNODE 1 - -#define ACPI_MEM_LIST_FIRST_CACHE_LIST 2 -#define ACPI_MEM_LIST_STATE 2 -#define ACPI_MEM_LIST_PSNODE 3 -#define ACPI_MEM_LIST_PSNODE_EXT 4 -#define ACPI_MEM_LIST_OPERAND 5 -#define ACPI_MEM_LIST_WALK 6 -#define ACPI_MEM_LIST_MAX 6 -#define ACPI_NUM_MEM_LISTS 7 - - -typedef struct -{ - void *list_head; - u16 link_offset; - u16 max_cache_depth; - u16 cache_depth; - u16 object_size; - -#ifdef ACPI_DBG_TRACK_ALLOCATIONS - - /* Statistics for debug memory tracking only */ - - u32 total_allocated; - u32 total_freed; - u32 current_total_size; - u32 cache_requests; - u32 cache_hits; - char *list_name; -#endif - -} ACPI_MEMORY_LIST; - - -#endif /* __ACLOCAL_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acmacros.h linux.21rc5-ac2/drivers/acpi/include/acmacros.h --- linux.21rc5/drivers/acpi/include/acmacros.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acmacros.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,584 +0,0 @@ -/****************************************************************************** - * - * Name: acmacros.h - C macros for the entire subsystem. - * $Revision: 97 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACMACROS_H__ -#define __ACMACROS_H__ - - -/* - * Data manipulation macros - */ - -#ifndef LOWORD -#define LOWORD(l) ((u16)(NATIVE_UINT)(l)) -#endif - -#ifndef HIWORD -#define HIWORD(l) ((u16)((((NATIVE_UINT)(l)) >> 16) & 0xFFFF)) -#endif - -#ifndef LOBYTE -#define LOBYTE(l) ((u8)(u16)(l)) -#endif - -#ifndef HIBYTE -#define HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF)) -#endif - -#define BIT0(x) ((((x) & 0x01) > 0) ? 1 : 0) -#define BIT1(x) ((((x) & 0x02) > 0) ? 1 : 0) -#define BIT2(x) ((((x) & 0x04) > 0) ? 1 : 0) - -#define BIT3(x) ((((x) & 0x08) > 0) ? 1 : 0) -#define BIT4(x) ((((x) & 0x10) > 0) ? 1 : 0) -#define BIT5(x) ((((x) & 0x20) > 0) ? 1 : 0) -#define BIT6(x) ((((x) & 0x40) > 0) ? 1 : 0) -#define BIT7(x) ((((x) & 0x80) > 0) ? 1 : 0) - -#define LOW_BASE(w) ((u16) ((w) & 0x0000FFFF)) -#define MID_BASE(b) ((u8) (((b) & 0x00FF0000) >> 16)) -#define HI_BASE(b) ((u8) (((b) & 0xFF000000) >> 24)) -#define LOW_LIMIT(w) ((u16) ((w) & 0x0000FFFF)) -#define HI_LIMIT(b) ((u8) (((b) & 0x00FF0000) >> 16)) - - -#ifdef _IA16 -/* - * For 16-bit addresses, we have to assume that the upper 32 bits - * are zero. - */ -#ifndef LODWORD -#define LODWORD(l) (l) -#endif - -#ifndef HIDWORD -#define HIDWORD(l) (0) -#endif - -#define ACPI_GET_ADDRESS(a) ((a).lo) -#define ACPI_STORE_ADDRESS(a,b) {(a).hi=0;(a).lo=(b);} -#define ACPI_VALID_ADDRESS(a) ((a).hi | (a).lo) - -#else -#ifdef ACPI_NO_INTEGER64_SUPPORT -/* - * acpi_integer is 32-bits, no 64-bit support on this platform - */ -#ifndef LODWORD -#define LODWORD(l) ((u32)(l)) -#endif - -#ifndef HIDWORD -#define HIDWORD(l) (0) -#endif - -#define ACPI_GET_ADDRESS(a) (a) -#define ACPI_STORE_ADDRESS(a,b) ((a)=(b)) -#define ACPI_VALID_ADDRESS(a) (a) - -#else - -/* - * Full 64-bit address/integer on both 32-bit and 64-bit platforms - */ -#ifndef LODWORD -#define LODWORD(l) ((u32)(u64)(l)) -#endif - -#ifndef HIDWORD -#define HIDWORD(l) ((u32)(((*(uint64_struct *)(&l))).hi)) -#endif - -#define ACPI_GET_ADDRESS(a) (a) -#define ACPI_STORE_ADDRESS(a,b) ((a)=(b)) -#define ACPI_VALID_ADDRESS(a) (a) -#endif -#endif - - /* - * Extract a byte of data using a pointer. Any more than a byte and we - * get into potential aligment issues -- see the STORE macros below - */ -#define GET8(addr) (*(u8*)(addr)) - -/* Pointer arithmetic */ - - -#define POINTER_ADD(t,a,b) (t *) ((NATIVE_UINT)(a) + (NATIVE_UINT)(b)) -#define POINTER_DIFF(a,b) ((u32) ((NATIVE_UINT)(a) - (NATIVE_UINT)(b))) - -/* - * Macros for moving data around to/from buffers that are possibly unaligned. - * If the hardware supports the transfer of unaligned data, just do the store. - * Otherwise, we have to move one byte at a time. - */ - -#ifdef _HW_ALIGNMENT_SUPPORT - -/* The hardware supports unaligned transfers, just do the move */ - -#define MOVE_UNALIGNED16_TO_16(d,s) *(u16*)(d) = *(u16*)(s) -#define MOVE_UNALIGNED32_TO_32(d,s) *(u32*)(d) = *(u32*)(s) -#define MOVE_UNALIGNED16_TO_32(d,s) *(u32*)(d) = *(u16*)(s) -#define MOVE_UNALIGNED64_TO_64(d,s) *(u64*)(d) = *(u64*)(s) - -#else -/* - * The hardware does not support unaligned transfers. We must move the - * data one byte at a time. These macros work whether the source or - * the destination (or both) is/are unaligned. - */ - -#define MOVE_UNALIGNED16_TO_16(d,s) {((u8 *)(d))[0] = ((u8 *)(s))[0];\ - ((u8 *)(d))[1] = ((u8 *)(s))[1];} - -#define MOVE_UNALIGNED32_TO_32(d,s) {((u8 *)(d))[0] = ((u8 *)(s))[0];\ - ((u8 *)(d))[1] = ((u8 *)(s))[1];\ - ((u8 *)(d))[2] = ((u8 *)(s))[2];\ - ((u8 *)(d))[3] = ((u8 *)(s))[3];} - -#define MOVE_UNALIGNED16_TO_32(d,s) {(*(u32*)(d)) = 0; MOVE_UNALIGNED16_TO_16(d,s);} - -#define MOVE_UNALIGNED64_TO_64(d,s) {((u8 *)(d))[0] = ((u8 *)(s))[0];\ - ((u8 *)(d))[1] = ((u8 *)(s))[1];\ - ((u8 *)(d))[2] = ((u8 *)(s))[2];\ - ((u8 *)(d))[3] = ((u8 *)(s))[3];\ - ((u8 *)(d))[4] = ((u8 *)(s))[4];\ - ((u8 *)(d))[5] = ((u8 *)(s))[5];\ - ((u8 *)(d))[6] = ((u8 *)(s))[6];\ - ((u8 *)(d))[7] = ((u8 *)(s))[7];} - -#endif - - -/* - * Fast power-of-two math macros for non-optimized compilers - */ - -#define _DIV(value,power_of2) ((u32) ((value) >> (power_of2))) -#define _MUL(value,power_of2) ((u32) ((value) << (power_of2))) -#define _MOD(value,divisor) ((u32) ((value) & ((divisor) -1))) - -#define DIV_2(a) _DIV(a,1) -#define MUL_2(a) _MUL(a,1) -#define MOD_2(a) _MOD(a,2) - -#define DIV_4(a) _DIV(a,2) -#define MUL_4(a) _MUL(a,2) -#define MOD_4(a) _MOD(a,4) - -#define DIV_8(a) _DIV(a,3) -#define MUL_8(a) _MUL(a,3) -#define MOD_8(a) _MOD(a,8) - -#define DIV_16(a) _DIV(a,4) -#define MUL_16(a) _MUL(a,4) -#define MOD_16(a) _MOD(a,16) - - -/* - * Rounding macros (Power of two boundaries only) - */ -#define ROUND_DOWN(value,boundary) ((value) & (~((boundary)-1))) -#define ROUND_UP(value,boundary) (((value) + ((boundary)-1)) & (~((boundary)-1))) - -#define ROUND_DOWN_TO_32_BITS(a) ROUND_DOWN(a,4) -#define ROUND_DOWN_TO_64_BITS(a) ROUND_DOWN(a,8) -#define ROUND_DOWN_TO_NATIVE_WORD(a) ROUND_DOWN(a,ALIGNED_ADDRESS_BOUNDARY) - -#define ROUND_UP_TO_32_bITS(a) ROUND_UP(a,4) -#define ROUND_UP_TO_64_bITS(a) ROUND_UP(a,8) -#define ROUND_UP_TO_NATIVE_WORD(a) ROUND_UP(a,ALIGNED_ADDRESS_BOUNDARY) - -#define ROUND_PTR_UP_TO_4(a,b) ((b *)(((NATIVE_UINT)(a) + 3) & ~3)) -#define ROUND_PTR_UP_TO_8(a,b) ((b *)(((NATIVE_UINT)(a) + 7) & ~7)) - -#define ROUND_BITS_UP_TO_BYTES(a) DIV_8((a) + 7) -#define ROUND_BITS_DOWN_TO_BYTES(a) DIV_8((a)) - -#define ROUND_UP_TO_1K(a) (((a) + 1023) >> 10) - -/* Generic (non-power-of-two) rounding */ - -#define ROUND_UP_TO(value,boundary) (((value) + ((boundary)-1)) / (boundary)) - -/* - * Bitmask creation - * Bit positions start at zero. - * MASK_BITS_ABOVE creates a mask starting AT the position and above - * MASK_BITS_BELOW creates a mask starting one bit BELOW the position - */ -#define MASK_BITS_ABOVE(position) (~(((u32)(-1)) << ((u32) (position)))) -#define MASK_BITS_BELOW(position) (((u32)(-1)) << ((u32) (position))) - - -/* Macros for GAS addressing */ - -#ifndef _IA16 - -#define ACPI_PCI_DEVICE_MASK (u64) 0x0000FFFF00000000 -#define ACPI_PCI_FUNCTION_MASK (u64) 0x00000000FFFF0000 -#define ACPI_PCI_REGISTER_MASK (u64) 0x000000000000FFFF - -#define ACPI_PCI_FUNCTION(a) (u16) ((((a) & ACPI_PCI_FUNCTION_MASK) >> 16)) -#define ACPI_PCI_DEVICE(a) (u16) ((((a) & ACPI_PCI_DEVICE_MASK) >> 32)) -#define ACPI_PCI_REGISTER(a) (u16) (((a) & ACPI_PCI_REGISTER_MASK)) - -#else - -/* No support for GAS and PCI IDs in 16-bit mode */ - -#define ACPI_PCI_FUNCTION(a) (u16) ((a) & 0xFFFF0000) -#define ACPI_PCI_DEVICE(a) (u16) ((a) & 0x0000FFFF) -#define ACPI_PCI_REGISTER(a) (u16) ((a) & 0x0000FFFF) - -#endif - -/* - * An acpi_handle (which is actually an acpi_namespace_node *) can appear in some contexts, - * such as on ap_obj_stack, where a pointer to an acpi_operand_object can also - * appear. This macro is used to distinguish them. - * - * The Data_type field is the first field in both structures. - */ -#define VALID_DESCRIPTOR_TYPE(d,t) (((acpi_namespace_node *)d)->data_type == t) - - -/* Macro to test the object type */ - -#define IS_THIS_OBJECT_TYPE(d,t) (((acpi_operand_object *)d)->common.type == (u8)t) - -/* Macro to check the table flags for SINGLE or MULTIPLE tables are allowed */ - -#define IS_SINGLE_TABLE(x) (((x) & 0x01) == ACPI_TABLE_SINGLE ? 1 : 0) - -/* - * Macro to check if a pointer is within an ACPI table. - * Parameter (a) is the pointer to check. Parameter (b) must be defined - * as a pointer to an acpi_table_header. (b+1) then points past the header, - * and ((u8 *)b+b->Length) points one byte past the end of the table. - */ -#ifndef _IA16 -#define IS_IN_ACPI_TABLE(a,b) (((u8 *)(a) >= (u8 *)(b + 1)) &&\ - ((u8 *)(a) < ((u8 *)b + b->length))) - -#else -#define IS_IN_ACPI_TABLE(a,b) (_segment)(a) == (_segment)(b) &&\ - (((u8 *)(a) >= (u8 *)(b + 1)) &&\ - ((u8 *)(a) < ((u8 *)b + b->length))) -#endif - -/* - * Macros for the master AML opcode table - */ -#ifdef ACPI_DEBUG -#define ACPI_OP(name,Pargs,Iargs,class,type,flags) {Pargs,Iargs,flags,class,type,name} -#else -#define ACPI_OP(name,Pargs,Iargs,class,type,flags) {Pargs,Iargs,flags,class,type} -#endif - -#define ARG_TYPE_WIDTH 5 -#define ARG_1(x) ((u32)(x)) -#define ARG_2(x) ((u32)(x) << (1 * ARG_TYPE_WIDTH)) -#define ARG_3(x) ((u32)(x) << (2 * ARG_TYPE_WIDTH)) -#define ARG_4(x) ((u32)(x) << (3 * ARG_TYPE_WIDTH)) -#define ARG_5(x) ((u32)(x) << (4 * ARG_TYPE_WIDTH)) -#define ARG_6(x) ((u32)(x) << (5 * ARG_TYPE_WIDTH)) - -#define ARGI_LIST1(a) (ARG_1(a)) -#define ARGI_LIST2(a,b) (ARG_1(b)|ARG_2(a)) -#define ARGI_LIST3(a,b,c) (ARG_1(c)|ARG_2(b)|ARG_3(a)) -#define ARGI_LIST4(a,b,c,d) (ARG_1(d)|ARG_2(c)|ARG_3(b)|ARG_4(a)) -#define ARGI_LIST5(a,b,c,d,e) (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a)) -#define ARGI_LIST6(a,b,c,d,e,f) (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a)) - -#define ARGP_LIST1(a) (ARG_1(a)) -#define ARGP_LIST2(a,b) (ARG_1(a)|ARG_2(b)) -#define ARGP_LIST3(a,b,c) (ARG_1(a)|ARG_2(b)|ARG_3(c)) -#define ARGP_LIST4(a,b,c,d) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)) -#define ARGP_LIST5(a,b,c,d,e) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)) -#define ARGP_LIST6(a,b,c,d,e,f) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f)) - -#define GET_CURRENT_ARG_TYPE(list) (list & ((u32) 0x1F)) -#define INCREMENT_ARG_LIST(list) (list >>= ((u32) ARG_TYPE_WIDTH)) - - -/* - * Build a GAS structure from earlier ACPI table entries (V1.0 and 0.71 extensions) - * - * 1) Address space - * 2) Length in bytes -- convert to length in bits - * 3) Bit offset is zero - * 4) Reserved field is zero - * 5) Expand address to 64 bits - */ -#define ASL_BUILD_GAS_FROM_ENTRY(a,b,c,d) {a.address_space_id = (u8) d;\ - a.register_bit_width = (u8) MUL_8 (b);\ - a.register_bit_offset = 0;\ - a.reserved = 0;\ - ACPI_STORE_ADDRESS (a.address,c);} - -/* ACPI V1.0 entries -- address space is always I/O */ - -#define ASL_BUILD_GAS_FROM_V1_ENTRY(a,b,c) ASL_BUILD_GAS_FROM_ENTRY(a,b,c,ACPI_ADR_SPACE_SYSTEM_IO) - - -/* - * Reporting macros that are never compiled out - */ - -#define PARAM_LIST(pl) pl - -/* - * Error reporting. These versions add callers module and line#. Since - * _THIS_MODULE gets compiled out when ACPI_DEBUG isn't defined, only - * use it in debug mode. - */ - -#ifdef ACPI_DEBUG - -#define REPORT_INFO(fp) {acpi_ut_report_info(_THIS_MODULE,__LINE__,_COMPONENT); \ - acpi_os_printf PARAM_LIST(fp);} -#define REPORT_ERROR(fp) {acpi_ut_report_error(_THIS_MODULE,__LINE__,_COMPONENT); \ - acpi_os_printf PARAM_LIST(fp);} -#define REPORT_WARNING(fp) {acpi_ut_report_warning(_THIS_MODULE,__LINE__,_COMPONENT); \ - acpi_os_printf PARAM_LIST(fp);} - -#else - -#define REPORT_INFO(fp) {acpi_ut_report_info("ACPI",__LINE__,_COMPONENT); \ - acpi_os_printf PARAM_LIST(fp);} -#define REPORT_ERROR(fp) {acpi_ut_report_error("ACPI",__LINE__,_COMPONENT); \ - acpi_os_printf PARAM_LIST(fp);} -#define REPORT_WARNING(fp) {acpi_ut_report_warning("ACPI",__LINE__,_COMPONENT); \ - acpi_os_printf PARAM_LIST(fp);} - -#endif - -/* Error reporting. These versions pass thru the module and line# */ - -#define _REPORT_INFO(a,b,c,fp) {acpi_ut_report_info(a,b,c); \ - acpi_os_printf PARAM_LIST(fp);} -#define _REPORT_ERROR(a,b,c,fp) {acpi_ut_report_error(a,b,c); \ - acpi_os_printf PARAM_LIST(fp);} -#define _REPORT_WARNING(a,b,c,fp) {acpi_ut_report_warning(a,b,c); \ - acpi_os_printf PARAM_LIST(fp);} - -/* - * Debug macros that are conditionally compiled - */ - -#ifdef ACPI_DEBUG - -#define MODULE_NAME(name) static char *_THIS_MODULE = name; - -/* - * Function entry tracing. - * The first parameter should be the procedure name as a quoted string. This is declared - * as a local string ("_Proc_name) so that it can be also used by the function exit macros below. - */ - -#define PROC_NAME(a) acpi_debug_print_info _dbg; \ - _dbg.component_id = _COMPONENT; \ - _dbg.proc_name = a; \ - _dbg.module_name = _THIS_MODULE; - -#define FUNCTION_TRACE(a) PROC_NAME(a)\ - acpi_ut_trace(__LINE__,&_dbg) -#define FUNCTION_TRACE_PTR(a,b) PROC_NAME(a)\ - acpi_ut_trace_ptr(__LINE__,&_dbg,(void *)b) -#define FUNCTION_TRACE_U32(a,b) PROC_NAME(a)\ - acpi_ut_trace_u32(__LINE__,&_dbg,(u32)b) -#define FUNCTION_TRACE_STR(a,b) PROC_NAME(a)\ - acpi_ut_trace_str(__LINE__,&_dbg,(NATIVE_CHAR *)b) - -#define FUNCTION_ENTRY() acpi_ut_track_stack_ptr() - -/* - * Function exit tracing. - * WARNING: These macros include a return statement. This is usually considered - * bad form, but having a separate exit macro is very ugly and difficult to maintain. - * One of the FUNCTION_TRACE macros above must be used in conjunction with these macros - * so that "_Proc_name" is defined. - */ -#define return_VOID {acpi_ut_exit(__LINE__,&_dbg);return;} -#define return_ACPI_STATUS(s) {acpi_ut_status_exit(__LINE__,&_dbg,s);return(s);} -#define return_VALUE(s) {acpi_ut_value_exit(__LINE__,&_dbg,s);return(s);} -#define return_PTR(s) {acpi_ut_ptr_exit(__LINE__,&_dbg,(u8 *)s);return(s);} - - -/* Conditional execution */ - -#define DEBUG_EXEC(a) a -#define NORMAL_EXEC(a) - -#define DEBUG_DEFINE(a) a; -#define DEBUG_ONLY_MEMBERS(a) a; -#define _OPCODE_NAMES -#define _VERBOSE_STRUCTURES - - -/* Stack and buffer dumping */ - -#define DUMP_STACK_ENTRY(a) acpi_ex_dump_operand(a) -#define DUMP_OPERANDS(a,b,c,d,e) acpi_ex_dump_operands(a,b,c,d,e,_THIS_MODULE,__LINE__) - - -#define DUMP_ENTRY(a,b) acpi_ns_dump_entry (a,b) -#define DUMP_TABLES(a,b) acpi_ns_dump_tables(a,b) -#define DUMP_PATHNAME(a,b,c,d) acpi_ns_dump_pathname(a,b,c,d) -#define DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a) -#define DUMP_BUFFER(a,b) acpi_ut_dump_buffer((u8 *)a,b,DB_BYTE_DISPLAY,_COMPONENT) -#define BREAK_MSG(a) acpi_os_signal (ACPI_SIGNAL_BREAKPOINT,(a)) - - -/* - * Generate INT3 on ACPI_ERROR (Debug only!) - */ - -#define ERROR_BREAK -#ifdef ERROR_BREAK -#define BREAK_ON_ERROR(lvl) if ((lvl)&ACPI_ERROR) acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,"Fatal error encountered\n") -#else -#define BREAK_ON_ERROR(lvl) -#endif - -/* - * Master debug print macros - * Print iff: - * 1) Debug print for the current component is enabled - * 2) Debug error level or trace level for the print statement is enabled - */ - -#define ACPI_DEBUG_PRINT(pl) acpi_ut_debug_print PARAM_LIST(pl) -#define ACPI_DEBUG_PRINT_RAW(pl) acpi_ut_debug_print_raw PARAM_LIST(pl) - - -#else -/* - * This is the non-debug case -- make everything go away, - * leaving no executable debug code! - */ - -#define MODULE_NAME(name) -#define _THIS_MODULE "" - -#define DEBUG_EXEC(a) -#define NORMAL_EXEC(a) a; - -#define DEBUG_DEFINE(a) -#define DEBUG_ONLY_MEMBERS(a) -#define PROC_NAME(a) -#define FUNCTION_TRACE(a) -#define FUNCTION_TRACE_PTR(a,b) -#define FUNCTION_TRACE_U32(a,b) -#define FUNCTION_TRACE_STR(a,b) -#define FUNCTION_EXIT -#define FUNCTION_STATUS_EXIT(s) -#define FUNCTION_VALUE_EXIT(s) -#define FUNCTION_ENTRY() -#define DUMP_STACK_ENTRY(a) -#define DUMP_OPERANDS(a,b,c,d,e) -#define DUMP_ENTRY(a,b) -#define DUMP_TABLES(a,b) -#define DUMP_PATHNAME(a,b,c,d) -#define DUMP_RESOURCE_LIST(a) -#define DUMP_BUFFER(a,b) -#define ACPI_DEBUG_PRINT(pl) -#define ACPI_DEBUG_PRINT_RAW(pl) -#define BREAK_MSG(a) - -#define return_VOID return -#define return_ACPI_STATUS(s) return(s) -#define return_VALUE(s) return(s) -#define return_PTR(s) return(s) - -#endif - -/* - * Some code only gets executed when the debugger is built in. - * Note that this is entirely independent of whether the - * DEBUG_PRINT stuff (set by ACPI_DEBUG) is on, or not. - */ -#ifdef ENABLE_DEBUGGER -#define DEBUGGER_EXEC(a) a -#else -#define DEBUGGER_EXEC(a) -#endif - - -/* - * For 16-bit code, we want to shrink some things even though - * we are using ACPI_DEBUG to get the debug output - */ -#ifdef _IA16 -#undef DEBUG_ONLY_MEMBERS -#undef _VERBOSE_STRUCTURES -#define DEBUG_ONLY_MEMBERS(a) -#endif - - -#ifdef ACPI_DEBUG -/* - * 1) Set name to blanks - * 2) Copy the object name - */ -#define ADD_OBJECT_NAME(a,b) MEMSET (a->common.name, ' ', sizeof (a->common.name));\ - STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name)) -#else - -#define ADD_OBJECT_NAME(a,b) -#endif - - -/* - * Memory allocation tracking (DEBUG ONLY) - */ - -#ifndef ACPI_DBG_TRACK_ALLOCATIONS - -/* Memory allocation */ - -#define ACPI_MEM_ALLOCATE(a) acpi_os_allocate(a) -#define ACPI_MEM_CALLOCATE(a) acpi_os_callocate(a) -#define ACPI_MEM_FREE(a) acpi_os_free(a) -#define ACPI_MEM_TRACKING(a) - - -#else - -/* Memory allocation */ - -#define ACPI_MEM_ALLOCATE(a) acpi_ut_allocate(a,_COMPONENT,_THIS_MODULE,__LINE__) -#define ACPI_MEM_CALLOCATE(a) acpi_ut_callocate(a, _COMPONENT,_THIS_MODULE,__LINE__) -#define ACPI_MEM_FREE(a) acpi_ut_free(a,_COMPONENT,_THIS_MODULE,__LINE__) -#define ACPI_MEM_TRACKING(a) a - -#endif /* ACPI_DBG_TRACK_ALLOCATIONS */ - - -#define ACPI_GET_STACK_POINTER _asm {mov eax, ebx} - -#endif /* ACMACROS_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acnamesp.h linux.21rc5-ac2/drivers/acpi/include/acnamesp.h --- linux.21rc5/drivers/acpi/include/acnamesp.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acnamesp.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,423 +0,0 @@ -/****************************************************************************** - * - * Name: acnamesp.h - Namespace subcomponent prototypes and defines - * $Revision: 110 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACNAMESP_H__ -#define __ACNAMESP_H__ - - -/* To search the entire name space, pass this as Search_base */ - -#define NS_ALL ((acpi_handle)0) - -/* - * Elements of Acpi_ns_properties are bit significant - * and should be one-to-one with values of acpi_object_type - */ -#define NSP_NORMAL 0 -#define NSP_NEWSCOPE 1 /* a definition of this type opens a name scope */ -#define NSP_LOCAL 2 /* suppress search of enclosing scopes */ - - -/* Definitions of the predefined namespace names */ - -#define ACPI_UNKNOWN_NAME (u32) 0x3F3F3F3F /* Unknown name is "????" */ -#define ACPI_ROOT_NAME (u32) 0x2F202020 /* Root name is "/ " */ -#define ACPI_SYS_BUS_NAME (u32) 0x5F53425F /* Sys bus name is "_SB_" */ - -#define NS_ROOT_PATH "/" -#define NS_SYSTEM_BUS "_SB_" - - -/* Flags for Acpi_ns_lookup, Acpi_ns_search_and_enter */ - -#define NS_NO_UPSEARCH 0 -#define NS_SEARCH_PARENT 0x01 -#define NS_DONT_OPEN_SCOPE 0x02 -#define NS_NO_PEER_SEARCH 0x04 -#define NS_ERROR_IF_FOUND 0x08 - -#define NS_WALK_UNLOCK TRUE -#define NS_WALK_NO_UNLOCK FALSE - - -acpi_status -acpi_ns_load_namespace ( - void); - -acpi_status -acpi_ns_initialize_objects ( - void); - -acpi_status -acpi_ns_initialize_devices ( - void); - - -/* Namespace init - nsxfinit */ - -acpi_status -acpi_ns_init_one_device ( - acpi_handle obj_handle, - u32 nesting_level, - void *context, - void **return_value); - -acpi_status -acpi_ns_init_one_object ( - acpi_handle obj_handle, - u32 level, - void *context, - void **return_value); - - -acpi_status -acpi_ns_walk_namespace ( - acpi_object_type8 type, - acpi_handle start_object, - u32 max_depth, - u8 unlock_before_callback, - acpi_walk_callback user_function, - void *context, - void **return_value); - -acpi_namespace_node * -acpi_ns_get_next_node ( - acpi_object_type8 type, - acpi_namespace_node *parent, - acpi_namespace_node *child); - -acpi_status -acpi_ns_delete_namespace_by_owner ( - u16 table_id); - - -/* Namespace loading - nsload */ - -acpi_status -acpi_ns_one_complete_parse ( - u32 pass_number, - acpi_table_desc *table_desc); - -acpi_status -acpi_ns_parse_table ( - acpi_table_desc *table_desc, - acpi_namespace_node *scope); - -acpi_status -acpi_ns_load_table ( - acpi_table_desc *table_desc, - acpi_namespace_node *node); - -acpi_status -acpi_ns_load_table_by_type ( - acpi_table_type table_type); - - -/* - * Top-level namespace access - nsaccess - */ - - -acpi_status -acpi_ns_root_initialize ( - void); - -acpi_status -acpi_ns_lookup ( - acpi_generic_state *scope_info, - NATIVE_CHAR *name, - acpi_object_type8 type, - operating_mode interpreter_mode, - u32 flags, - acpi_walk_state *walk_state, - acpi_namespace_node **ret_node); - - -/* - * Named object allocation/deallocation - nsalloc - */ - - -acpi_namespace_node * -acpi_ns_create_node ( - u32 name); - -void -acpi_ns_delete_node ( - acpi_namespace_node *node); - -acpi_status -acpi_ns_delete_namespace_subtree ( - acpi_namespace_node *parent_handle); - -void -acpi_ns_detach_object ( - acpi_namespace_node *node); - -void -acpi_ns_delete_children ( - acpi_namespace_node *parent); - - -/* - * Namespace modification - nsmodify - */ - -acpi_status -acpi_ns_unload_namespace ( - acpi_handle handle); - -acpi_status -acpi_ns_delete_subtree ( - acpi_handle start_handle); - - -/* - * Namespace dump/print utilities - nsdump - */ - -void -acpi_ns_dump_tables ( - acpi_handle search_base, - u32 max_depth); - -void -acpi_ns_dump_entry ( - acpi_handle handle, - u32 debug_level); - -acpi_status -acpi_ns_dump_pathname ( - acpi_handle handle, - NATIVE_CHAR *msg, - u32 level, - u32 component); - -void -acpi_ns_dump_root_devices ( - void); - -void -acpi_ns_dump_objects ( - acpi_object_type8 type, - u8 display_type, - u32 max_depth, - u32 ownder_id, - acpi_handle start_handle); - - -/* - * Namespace evaluation functions - nseval - */ - -acpi_status -acpi_ns_evaluate_by_handle ( - acpi_namespace_node *prefix_node, - acpi_operand_object **params, - acpi_operand_object **return_object); - -acpi_status -acpi_ns_evaluate_by_name ( - NATIVE_CHAR *pathname, - acpi_operand_object **params, - acpi_operand_object **return_object); - -acpi_status -acpi_ns_evaluate_relative ( - acpi_namespace_node *prefix_node, - NATIVE_CHAR *pathname, - acpi_operand_object **params, - acpi_operand_object **return_object); - -acpi_status -acpi_ns_execute_control_method ( - acpi_namespace_node *method_node, - acpi_operand_object **params, - acpi_operand_object **return_obj_desc); - -acpi_status -acpi_ns_get_object_value ( - acpi_namespace_node *object_node, - acpi_operand_object **return_obj_desc); - - -/* - * Parent/Child/Peer utility functions - nsfamily - */ - -acpi_name -acpi_ns_find_parent_name ( - acpi_namespace_node *node_to_search); - -u8 -acpi_ns_exist_downstream_sibling ( - acpi_namespace_node *this_node); - - -/* - * Scope manipulation - nsscope - */ - -u32 -acpi_ns_opens_scope ( - acpi_object_type8 type); - -NATIVE_CHAR * -acpi_ns_get_table_pathname ( - acpi_namespace_node *node); - -NATIVE_CHAR * -acpi_ns_name_of_current_scope ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ns_handle_to_pathname ( - acpi_handle obj_handle, - u32 *buf_size, - NATIVE_CHAR *user_buffer); - -u8 -acpi_ns_pattern_match ( - acpi_namespace_node *obj_node, - NATIVE_CHAR *search_for); - -acpi_status -acpi_ns_get_node ( - NATIVE_CHAR *pathname, - acpi_namespace_node *in_prefix_node, - acpi_namespace_node **out_node); - -u32 -acpi_ns_get_pathname_length ( - acpi_namespace_node *node); - - -/* - * Object management for NTEs - nsobject - */ - -acpi_status -acpi_ns_attach_object ( - acpi_namespace_node *node, - acpi_operand_object *object, - acpi_object_type8 type); - - -/* - * Namespace searching and entry - nssearch - */ - -acpi_status -acpi_ns_search_and_enter ( - u32 entry_name, - acpi_walk_state *walk_state, - acpi_namespace_node *node, - operating_mode interpreter_mode, - acpi_object_type8 type, - u32 flags, - acpi_namespace_node **ret_node); - -acpi_status -acpi_ns_search_node ( - u32 entry_name, - acpi_namespace_node *node, - acpi_object_type8 type, - acpi_namespace_node **ret_node); - -void -acpi_ns_install_node ( - acpi_walk_state *walk_state, - acpi_namespace_node *parent_node, /* Parent */ - acpi_namespace_node *node, /* New Child*/ - acpi_object_type8 type); - - -/* - * Utility functions - nsutils - */ - -u8 -acpi_ns_valid_root_prefix ( - NATIVE_CHAR prefix); - -u8 -acpi_ns_valid_path_separator ( - NATIVE_CHAR sep); - -acpi_object_type8 -acpi_ns_get_type ( - acpi_namespace_node *node); - -void * -acpi_ns_get_attached_object ( - acpi_namespace_node *node); - -u32 -acpi_ns_local ( - acpi_object_type8 type); - -acpi_status -acpi_ns_build_internal_name ( - acpi_namestring_info *info); - -acpi_status -acpi_ns_get_internal_name_length ( - acpi_namestring_info *info); - -acpi_status -acpi_ns_internalize_name ( - NATIVE_CHAR *dotted_name, - NATIVE_CHAR **converted_name); - -acpi_status -acpi_ns_externalize_name ( - u32 internal_name_length, - NATIVE_CHAR *internal_name, - u32 *converted_name_length, - NATIVE_CHAR **converted_name); - -acpi_namespace_node * -acpi_ns_map_handle_to_node ( - acpi_handle handle); - -acpi_handle -acpi_ns_convert_entry_to_handle( - acpi_namespace_node *node); - -void -acpi_ns_terminate ( - void); - -acpi_namespace_node * -acpi_ns_get_parent_object ( - acpi_namespace_node *node); - - -acpi_namespace_node * -acpi_ns_get_next_valid_node ( - acpi_namespace_node *node); - - -#endif /* __ACNAMESP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acobject.h linux.21rc5-ac2/drivers/acpi/include/acobject.h --- linux.21rc5/drivers/acpi/include/acobject.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acobject.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,451 +0,0 @@ - -/****************************************************************************** - * - * Name: acobject.h - Definition of acpi_operand_object (Internal object only) - * $Revision: 93 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ACOBJECT_H -#define _ACOBJECT_H - - -/* - * The acpi_operand_object is used to pass AML operands from the dispatcher - * to the interpreter, and to keep track of the various handlers such as - * address space handlers and notify handlers. The object is a constant - * size in order to allow them to be cached and reused. - * - * All variants of the acpi_operand_object are defined with the same - * sequence of field types, with fields that are not used in a particular - * variant being named "Reserved". This is not strictly necessary, but - * may in some circumstances simplify understanding if these structures - * need to be displayed in a debugger having limited (or no) support for - * union types. It also simplifies some debug code in Dump_table() which - * dumps multi-level values: fetching Buffer.Pointer suffices to pick up - * the value or next level for any of several types. - */ - -/****************************************************************************** - * - * Common Descriptors - * - *****************************************************************************/ - -/* - * Common area for all objects. - * - * Data_type is used to differentiate between internal descriptors, and MUST - * be the first byte in this structure. - */ - - -#define ACPI_OBJECT_COMMON_HEADER /* SIZE/ALIGNMENT: 32-bits plus trailing 8-bit flag */\ - u8 data_type; /* To differentiate various internal objs */\ - u8 type; /* acpi_object_type */\ - u16 reference_count; /* For object deletion management */\ - u8 flags; \ - -/* Defines for flag byte above */ - -#define AOPOBJ_STATIC_ALLOCATION 0x1 -#define AOPOBJ_STATIC_POINTER 0x2 -#define AOPOBJ_DATA_VALID 0x4 -#define AOPOBJ_ZERO_CONST 0x4 -#define AOPOBJ_INITIALIZED 0x8 - - -/* - * Common bitfield for the field objects - * "Field Datum" -- a datum from the actual field object - * "Buffer Datum" -- a datum from a user buffer, read from or to be written to the field - */ -#define ACPI_COMMON_FIELD_INFO /* SIZE/ALIGNMENT: 24 bits + three 32-bit values */\ - u8 access_flags;\ - u16 bit_length; /* Length of field in bits */\ - u32 base_byte_offset; /* Byte offset within containing object */\ - u8 access_bit_width; /* Read/Write size in bits (from ASL Access_type)*/\ - u8 access_byte_width; /* Read/Write size in bytes */\ - u8 update_rule; /* How neighboring field bits are handled */\ - u8 lock_rule; /* Global Lock: 1 = "Must Lock" */\ - u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\ - u8 datum_valid_bits; /* Valid bit in first "Field datum" */\ - u8 end_field_valid_bits; /* Valid bits in the last "field datum" */\ - u8 end_buffer_valid_bits; /* Valid bits in the last "buffer datum" */\ - u32 value; /* Value to store into the Bank or Index register */ - - -/* Access flag bits */ - -#define AFIELD_SINGLE_DATUM 0x1 - - -/* - * Fields common to both Strings and Buffers - */ -#define ACPI_COMMON_BUFFER_INFO \ - u32 length; - - -/****************************************************************************** - * - * Individual Object Descriptors - * - *****************************************************************************/ - - -typedef struct /* COMMON */ -{ - ACPI_OBJECT_COMMON_HEADER - -} ACPI_OBJECT_COMMON; - - -typedef struct /* CACHE_LIST */ -{ - ACPI_OBJECT_COMMON_HEADER - union acpi_operand_obj *next; /* Link for object cache and internal lists*/ - -} ACPI_OBJECT_CACHE_LIST; - - -typedef struct /* NUMBER - has value */ -{ - ACPI_OBJECT_COMMON_HEADER - - acpi_integer value; - -} ACPI_OBJECT_INTEGER; - - -typedef struct /* STRING - has length and pointer - Null terminated, ASCII characters only */ -{ - ACPI_OBJECT_COMMON_HEADER - ACPI_COMMON_BUFFER_INFO - NATIVE_CHAR *pointer; /* String value in AML stream or in allocated space */ - -} ACPI_OBJECT_STRING; - - -typedef struct /* BUFFER - has length and pointer - not null terminated */ -{ - ACPI_OBJECT_COMMON_HEADER - ACPI_COMMON_BUFFER_INFO - u8 *pointer; /* Buffer value in AML stream or in allocated space */ - -} ACPI_OBJECT_BUFFER; - - -typedef struct /* PACKAGE - has count, elements, next element */ -{ - ACPI_OBJECT_COMMON_HEADER - - u32 count; /* # of elements in package */ - union acpi_operand_obj **elements; /* Array of pointers to Acpi_objects */ - union acpi_operand_obj **next_element; /* used only while initializing */ - -} ACPI_OBJECT_PACKAGE; - - -typedef struct /* DEVICE - has handle and notification handler/context */ -{ - ACPI_OBJECT_COMMON_HEADER - - union acpi_operand_obj *sys_handler; /* Handler for system notifies */ - union acpi_operand_obj *drv_handler; /* Handler for driver notifies */ - union acpi_operand_obj *addr_handler; /* Handler for Address space */ - -} ACPI_OBJECT_DEVICE; - - -typedef struct /* EVENT */ -{ - ACPI_OBJECT_COMMON_HEADER - void *semaphore; - -} ACPI_OBJECT_EVENT; - - -#define INFINITE_CONCURRENCY 0xFF - -typedef struct /* METHOD */ -{ - ACPI_OBJECT_COMMON_HEADER - u8 method_flags; - u8 param_count; - - u32 aml_length; - - void *semaphore; - u8 *aml_start; - - u8 concurrency; - u8 thread_count; - acpi_owner_id owning_id; - -} ACPI_OBJECT_METHOD; - - -typedef struct acpi_obj_mutex /* MUTEX */ -{ - ACPI_OBJECT_COMMON_HEADER - u16 sync_level; - u16 acquisition_depth; - - void *semaphore; - void *owner; - union acpi_operand_obj *prev; /* Link for list of acquired mutexes */ - union acpi_operand_obj *next; /* Link for list of acquired mutexes */ - -} ACPI_OBJECT_MUTEX; - - -typedef struct /* REGION */ -{ - ACPI_OBJECT_COMMON_HEADER - - u8 space_id; - u32 length; - ACPI_PHYSICAL_ADDRESS address; - union acpi_operand_obj *extra; /* Pointer to executable AML (in region definition) */ - - union acpi_operand_obj *addr_handler; /* Handler for system notifies */ - acpi_namespace_node *node; /* containing object */ - union acpi_operand_obj *next; - -} ACPI_OBJECT_REGION; - - -typedef struct /* POWER RESOURCE - has Handle and notification handler/context*/ -{ - ACPI_OBJECT_COMMON_HEADER - - u32 system_level; - u32 resource_order; - - union acpi_operand_obj *sys_handler; /* Handler for system notifies */ - union acpi_operand_obj *drv_handler; /* Handler for driver notifies */ - -} ACPI_OBJECT_POWER_RESOURCE; - - -typedef struct /* PROCESSOR - has Handle and notification handler/context*/ -{ - ACPI_OBJECT_COMMON_HEADER - - u32 proc_id; - u32 length; - ACPI_IO_ADDRESS address; - - union acpi_operand_obj *sys_handler; /* Handler for system notifies */ - union acpi_operand_obj *drv_handler; /* Handler for driver notifies */ - union acpi_operand_obj *addr_handler; /* Handler for Address space */ - -} ACPI_OBJECT_PROCESSOR; - - -typedef struct /* THERMAL ZONE - has Handle and Handler/Context */ -{ - ACPI_OBJECT_COMMON_HEADER - - union acpi_operand_obj *sys_handler; /* Handler for system notifies */ - union acpi_operand_obj *drv_handler; /* Handler for driver notifies */ - union acpi_operand_obj *addr_handler; /* Handler for Address space */ - -} ACPI_OBJECT_THERMAL_ZONE; - - -/* - * Fields. All share a common header/info field. - */ - -typedef struct /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */ -{ - ACPI_OBJECT_COMMON_HEADER - ACPI_COMMON_FIELD_INFO - union acpi_operand_obj *region_obj; /* Containing Operation Region object */ - /* (REGION/BANK fields only) */ -} ACPI_OBJECT_FIELD_COMMON; - - -typedef struct /* REGION FIELD */ -{ - ACPI_OBJECT_COMMON_HEADER - ACPI_COMMON_FIELD_INFO - union acpi_operand_obj *region_obj; /* Containing Op_region object */ - -} ACPI_OBJECT_REGION_FIELD; - - -typedef struct /* BANK FIELD */ -{ - ACPI_OBJECT_COMMON_HEADER - ACPI_COMMON_FIELD_INFO - - union acpi_operand_obj *region_obj; /* Containing Op_region object */ - union acpi_operand_obj *bank_register_obj; /* Bank_select Register object */ - -} ACPI_OBJECT_BANK_FIELD; - - -typedef struct /* INDEX FIELD */ -{ - ACPI_OBJECT_COMMON_HEADER - ACPI_COMMON_FIELD_INFO - - /* - * No "Region_obj" pointer needed since the Index and Data registers - * are each field definitions unto themselves. - */ - union acpi_operand_obj *index_obj; /* Index register */ - union acpi_operand_obj *data_obj; /* Data register */ - - -} ACPI_OBJECT_INDEX_FIELD; - - -/* The Buffer_field is different in that it is part of a Buffer, not an Op_region */ - -typedef struct /* BUFFER FIELD */ -{ - ACPI_OBJECT_COMMON_HEADER - ACPI_COMMON_FIELD_INFO - - union acpi_operand_obj *extra; /* Pointer to executable AML (in field definition) */ - acpi_namespace_node *node; /* Parent (containing) object node */ - union acpi_operand_obj *buffer_obj; /* Containing Buffer object */ - -} ACPI_OBJECT_BUFFER_FIELD; - - -/* - * Handlers - */ - -typedef struct /* NOTIFY HANDLER */ -{ - ACPI_OBJECT_COMMON_HEADER - - acpi_namespace_node *node; /* Parent device */ - acpi_notify_handler handler; - void *context; - -} ACPI_OBJECT_NOTIFY_HANDLER; - - -/* Flags for address handler */ - -#define ADDR_HANDLER_DEFAULT_INSTALLED 0x1 - - -typedef struct /* ADDRESS HANDLER */ -{ - ACPI_OBJECT_COMMON_HEADER - - u8 space_id; - u16 hflags; - acpi_adr_space_handler handler; - - acpi_namespace_node *node; /* Parent device */ - void *context; - acpi_adr_space_setup setup; - union acpi_operand_obj *region_list; /* regions using this handler */ - union acpi_operand_obj *next; - -} ACPI_OBJECT_ADDR_HANDLER; - - -/* - * The Reference object type is used for these opcodes: - * Arg[0-6], Local[0-7], Index_op, Name_op, Zero_op, One_op, Ones_op, Debug_op - */ - -typedef struct /* Reference - Local object type */ -{ - ACPI_OBJECT_COMMON_HEADER - - u8 target_type; /* Used for Index_op */ - u16 opcode; - u32 offset; /* Used for Arg_op, Local_op, and Index_op */ - - void *object; /* Name_op=>HANDLE to obj, Index_op=>acpi_operand_object */ - acpi_namespace_node *node; - union acpi_operand_obj **where; - -} ACPI_OBJECT_REFERENCE; - - -/* - * Extra object is used as additional storage for types that - * have AML code in their declarations (Term_args) that must be - * evaluated at run time. - * - * Currently: Region and Field_unit types - */ - -typedef struct /* EXTRA */ -{ - ACPI_OBJECT_COMMON_HEADER - u8 byte_fill1; - u16 word_fill1; - u32 aml_length; - u8 *aml_start; - acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ - void *region_context; /* Region-specific data */ - -} ACPI_OBJECT_EXTRA; - - -/****************************************************************************** - * - * acpi_operand_object Descriptor - a giant union of all of the above - * - *****************************************************************************/ - -typedef union acpi_operand_obj -{ - ACPI_OBJECT_COMMON common; - ACPI_OBJECT_CACHE_LIST cache; - ACPI_OBJECT_INTEGER integer; - ACPI_OBJECT_STRING string; - ACPI_OBJECT_BUFFER buffer; - ACPI_OBJECT_PACKAGE package; - ACPI_OBJECT_BUFFER_FIELD buffer_field; - ACPI_OBJECT_DEVICE device; - ACPI_OBJECT_EVENT event; - ACPI_OBJECT_METHOD method; - ACPI_OBJECT_MUTEX mutex; - ACPI_OBJECT_REGION region; - ACPI_OBJECT_POWER_RESOURCE power_resource; - ACPI_OBJECT_PROCESSOR processor; - ACPI_OBJECT_THERMAL_ZONE thermal_zone; - ACPI_OBJECT_FIELD_COMMON common_field; - ACPI_OBJECT_REGION_FIELD field; - ACPI_OBJECT_BANK_FIELD bank_field; - ACPI_OBJECT_INDEX_FIELD index_field; - ACPI_OBJECT_REFERENCE reference; - ACPI_OBJECT_NOTIFY_HANDLER notify_handler; - ACPI_OBJECT_ADDR_HANDLER addr_handler; - ACPI_OBJECT_EXTRA extra; - -} acpi_operand_object; - -#endif /* _ACOBJECT_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acoutput.h linux.21rc5-ac2/drivers/acpi/include/acoutput.h --- linux.21rc5/drivers/acpi/include/acoutput.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acoutput.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,182 +0,0 @@ -/****************************************************************************** - * - * Name: acoutput.h -- debug output - * $Revision: 84 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACOUTPUT_H__ -#define __ACOUTPUT_H__ - -/* - * Debug levels and component IDs. These are used to control the - * granularity of the output of the DEBUG_PRINT macro -- on a per- - * component basis and a per-exception-type basis. - */ - -/* Component IDs are used in the global "Debug_layer" */ - -#define ACPI_UTILITIES 0x00000001 -#define ACPI_HARDWARE 0x00000002 -#define ACPI_EVENTS 0x00000004 -#define ACPI_TABLES 0x00000008 -#define ACPI_NAMESPACE 0x00000010 -#define ACPI_PARSER 0x00000020 -#define ACPI_DISPATCHER 0x00000040 -#define ACPI_EXECUTER 0x00000080 -#define ACPI_RESOURCES 0x00000100 -#define ACPI_DEBUGGER 0x00000200 -#define ACPI_OS_SERVICES 0x00000400 - -#define ACPI_BUS 0x00010000 -#define ACPI_SYSTEM 0x00020000 -#define ACPI_POWER 0x00040000 -#define ACPI_EC 0x00080000 -#define ACPI_AC_ADAPTER 0x00100000 -#define ACPI_BATTERY 0x00200000 -#define ACPI_BUTTON 0x00400000 -#define ACPI_PROCESSOR 0x00800000 -#define ACPI_THERMAL 0x01000000 -#define ACPI_FAN 0x02000000 - -#define ACPI_ALL_COMPONENTS 0x0FFFFFFF - -#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS) - - -#define ACPI_COMPILER 0x10000000 -#define ACPI_TOOLS 0x20000000 - - -/* - * Raw debug output levels, do not use these in the DEBUG_PRINT macros - */ - -#define ACPI_LV_OK 0x00000001 -#define ACPI_LV_INFO 0x00000002 -#define ACPI_LV_WARN 0x00000004 -#define ACPI_LV_ERROR 0x00000008 -#define ACPI_LV_FATAL 0x00000010 -#define ACPI_LV_DEBUG_OBJECT 0x00000020 -#define ACPI_LV_ALL_EXCEPTIONS 0x0000003F - - -/* Trace verbosity level 1 [Standard Trace Level] */ - -#define ACPI_LV_PARSE 0x00000040 -#define ACPI_LV_LOAD 0x00000080 -#define ACPI_LV_DISPATCH 0x00000100 -#define ACPI_LV_EXEC 0x00000200 -#define ACPI_LV_NAMES 0x00000400 -#define ACPI_LV_OPREGION 0x00000800 -#define ACPI_LV_BFIELD 0x00001000 -#define ACPI_LV_TABLES 0x00002000 -#define ACPI_LV_VALUES 0x00004000 -#define ACPI_LV_OBJECTS 0x00008000 -#define ACPI_LV_RESOURCES 0x00010000 -#define ACPI_LV_USER_REQUESTS 0x00020000 -#define ACPI_LV_PACKAGE 0x00040000 -#define ACPI_LV_INIT 0x00080000 -#define ACPI_LV_VERBOSITY1 0x000FFF40 | ACPI_LV_ALL_EXCEPTIONS - -/* Trace verbosity level 2 [Function tracing and memory allocation] */ - -#define ACPI_LV_ALLOCATIONS 0x00100000 -#define ACPI_LV_FUNCTIONS 0x00200000 -#define ACPI_LV_VERBOSITY2 0x00300000 | ACPI_LV_VERBOSITY1 -#define ACPI_LV_ALL ACPI_LV_VERBOSITY2 - -/* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ - -#define ACPI_LV_MUTEX 0x01000000 -#define ACPI_LV_THREADS 0x02000000 -#define ACPI_LV_IO 0x04000000 -#define ACPI_LV_INTERRUPTS 0x08000000 -#define ACPI_LV_VERBOSITY3 0x0F000000 | ACPI_LV_VERBOSITY2 - -/* - * Debug level macros that are used in the DEBUG_PRINT macros - */ - -#define ACPI_DEBUG_LEVEL(dl) dl,__LINE__,&_dbg - -/* Exception level -- used in the global "Debug_level" */ - -#define ACPI_DB_OK ACPI_DEBUG_LEVEL (ACPI_LV_OK) -#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO) -#define ACPI_DB_WARN ACPI_DEBUG_LEVEL (ACPI_LV_WARN) -#define ACPI_DB_ERROR ACPI_DEBUG_LEVEL (ACPI_LV_ERROR) -#define ACPI_DB_FATAL ACPI_DEBUG_LEVEL (ACPI_LV_FATAL) -#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT) -#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS) - - -/* Trace level -- also used in the global "Debug_level" */ - -#define ACPI_DB_THREADS ACPI_DEBUG_LEVEL (ACPI_LV_THREADS) -#define ACPI_DB_PARSE ACPI_DEBUG_LEVEL (ACPI_LV_PARSE) -#define ACPI_DB_DISPATCH ACPI_DEBUG_LEVEL (ACPI_LV_DISPATCH) -#define ACPI_DB_LOAD ACPI_DEBUG_LEVEL (ACPI_LV_LOAD) -#define ACPI_DB_EXEC ACPI_DEBUG_LEVEL (ACPI_LV_EXEC) -#define ACPI_DB_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_NAMES) -#define ACPI_DB_OPREGION ACPI_DEBUG_LEVEL (ACPI_LV_OPREGION) -#define ACPI_DB_BFIELD ACPI_DEBUG_LEVEL (ACPI_LV_BFIELD) -#define ACPI_DB_TABLES ACPI_DEBUG_LEVEL (ACPI_LV_TABLES) -#define ACPI_DB_FUNCTIONS ACPI_DEBUG_LEVEL (ACPI_LV_FUNCTIONS) -#define ACPI_DB_VALUES ACPI_DEBUG_LEVEL (ACPI_LV_VALUES) -#define ACPI_DB_OBJECTS ACPI_DEBUG_LEVEL (ACPI_LV_OBJECTS) -#define ACPI_DB_ALLOCATIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALLOCATIONS) -#define ACPI_DB_RESOURCES ACPI_DEBUG_LEVEL (ACPI_LV_RESOURCES) -#define ACPI_DB_IO ACPI_DEBUG_LEVEL (ACPI_LV_IO) -#define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS) -#define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS) -#define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE) -#define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX) -#define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT) - -#define ACPI_DB_ALL ACPI_DEBUG_LEVEL (0x0FFFFF80) - - -/* Exceptionally verbose output -- also used in the global "Debug_level" */ - -#define ACPI_DB_AML_DISASSEMBLE 0x10000000 -#define ACPI_DB_VERBOSE_INFO 0x20000000 -#define ACPI_DB_FULL_TABLES 0x40000000 -#define ACPI_DB_EVENTS 0x80000000 - -#define ACPI_DB_VERBOSE 0xF0000000 - - -/* Defaults for Debug_level, debug and normal */ - -#define DEBUG_DEFAULT (ACPI_LV_OK | ACPI_LV_WARN | ACPI_LV_ERROR | ACPI_LV_DEBUG_OBJECT) -#define NORMAL_DEFAULT (ACPI_LV_OK | ACPI_LV_WARN | ACPI_LV_ERROR | ACPI_LV_DEBUG_OBJECT) -#define DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) - -/* Misc defines */ - -#define HEX 0x01 -#define ASCII 0x02 -#define FULL_ADDRESS 0x04 -#define CHARS_PER_LINE 16 /* used in Dump_buf function */ - - -#endif /* __ACOUTPUT_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acparser.h linux.21rc5-ac2/drivers/acpi/include/acparser.h --- linux.21rc5/drivers/acpi/include/acparser.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acparser.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,293 +0,0 @@ -/****************************************************************************** - * - * Module Name: acparser.h - AML Parser subcomponent prototypes and defines - * $Revision: 54 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#ifndef __ACPARSER_H__ -#define __ACPARSER_H__ - - -#define OP_HAS_RETURN_VALUE 1 - -/* variable # arguments */ - -#define ACPI_VAR_ARGS ACPI_UINT32_MAX - -/* maximum virtual address */ - -#define ACPI_MAX_AML ((u8 *)(~0UL)) - - -#define ACPI_PARSE_DELETE_TREE 0x0001 -#define ACPI_PARSE_NO_TREE_DELETE 0x0000 -#define ACPI_PARSE_TREE_MASK 0x0001 - -#define ACPI_PARSE_LOAD_PASS1 0x0010 -#define ACPI_PARSE_LOAD_PASS2 0x0020 -#define ACPI_PARSE_EXECUTE 0x0030 -#define ACPI_PARSE_MODE_MASK 0x0030 - -/* psapi - Parser external interfaces */ - -acpi_status -acpi_psx_load_table ( - u8 *pcode_addr, - u32 pcode_length); - -acpi_status -acpi_psx_execute ( - acpi_namespace_node *method_node, - acpi_operand_object **params, - acpi_operand_object **return_obj_desc); - -/****************************************************************************** - * - * Parser interfaces - * - *****************************************************************************/ - - -/* psargs - Parse AML opcode arguments */ - -u8 * -acpi_ps_get_next_package_end ( - acpi_parse_state *parser_state); - -u32 -acpi_ps_get_next_package_length ( - acpi_parse_state *parser_state); - -NATIVE_CHAR * -acpi_ps_get_next_namestring ( - acpi_parse_state *parser_state); - -void -acpi_ps_get_next_simple_arg ( - acpi_parse_state *parser_state, - u32 arg_type, /* type of argument */ - acpi_parse_object *arg); /* (OUT) argument data */ - -void -acpi_ps_get_next_namepath ( - acpi_parse_state *parser_state, - acpi_parse_object *arg, - u32 *arg_count, - u8 method_call); - -acpi_parse_object * -acpi_ps_get_next_field ( - acpi_parse_state *parser_state); - -acpi_parse_object * -acpi_ps_get_next_arg ( - acpi_parse_state *parser_state, - u32 arg_type, - u32 *arg_count); - - -/* psopcode - AML Opcode information */ - -const acpi_opcode_info * -acpi_ps_get_opcode_info ( - u16 opcode); - -NATIVE_CHAR * -acpi_ps_get_opcode_name ( - u16 opcode); - - -/* psparse - top level parsing routines */ - -acpi_status -acpi_ps_find_object ( - acpi_walk_state *walk_state, - acpi_parse_object **out_op); - -void -acpi_ps_delete_parse_tree ( - acpi_parse_object *root); - -acpi_status -acpi_ps_parse_loop ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ps_parse_aml ( - acpi_walk_state *walk_state); - -acpi_status -acpi_ps_parse_table ( - u8 *aml, - u32 aml_size, - acpi_parse_downwards descending_callback, - acpi_parse_upwards ascending_callback, - acpi_parse_object **root_object); - -u16 -acpi_ps_peek_opcode ( - acpi_parse_state *state); - - -/* psscope - Scope stack management routines */ - - -acpi_status -acpi_ps_init_scope ( - acpi_parse_state *parser_state, - acpi_parse_object *root); - -acpi_parse_object * -acpi_ps_get_parent_scope ( - acpi_parse_state *state); - -u8 -acpi_ps_has_completed_scope ( - acpi_parse_state *parser_state); - -void -acpi_ps_pop_scope ( - acpi_parse_state *parser_state, - acpi_parse_object **op, - u32 *arg_list, - u32 *arg_count); - -acpi_status -acpi_ps_push_scope ( - acpi_parse_state *parser_state, - acpi_parse_object *op, - u32 remaining_args, - u32 arg_count); - -void -acpi_ps_cleanup_scope ( - acpi_parse_state *state); - - -/* pstree - parse tree manipulation routines */ - -void -acpi_ps_append_arg( - acpi_parse_object *op, - acpi_parse_object *arg); - -acpi_parse_object* -acpi_ps_find ( - acpi_parse_object *scope, - NATIVE_CHAR *path, - u16 opcode, - u32 create); - -acpi_parse_object * -acpi_ps_get_arg( - acpi_parse_object *op, - u32 argn); - -acpi_parse_object * -acpi_ps_get_child ( - acpi_parse_object *op); - -acpi_parse_object * -acpi_ps_get_depth_next ( - acpi_parse_object *origin, - acpi_parse_object *op); - - -/* pswalk - parse tree walk routines */ - -acpi_status -acpi_ps_walk_parsed_aml ( - acpi_parse_object *start_op, - acpi_parse_object *end_op, - acpi_operand_object *mth_desc, - acpi_namespace_node *start_node, - acpi_operand_object **params, - acpi_operand_object **caller_return_desc, - acpi_owner_id owner_id, - acpi_parse_downwards descending_callback, - acpi_parse_upwards ascending_callback); - -acpi_status -acpi_ps_get_next_walk_op ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_parse_upwards ascending_callback); - - -/* psutils - parser utilities */ - -void -acpi_ps_init_op ( - acpi_parse_object *op, - u16 opcode); - -acpi_parse_object * -acpi_ps_alloc_op ( - u16 opcode); - -void -acpi_ps_free_op ( - acpi_parse_object *op); - -void -acpi_ps_delete_parse_cache ( - void); - -u8 -acpi_ps_is_leading_char ( - u32 c); - -u8 -acpi_ps_is_prefix_char ( - u32 c); - -u32 -acpi_ps_get_name( - acpi_parse_object *op); - -void -acpi_ps_set_name( - acpi_parse_object *op, - u32 name); - - -/* psdump - display parser tree */ - -u32 -acpi_ps_sprint_path ( - NATIVE_CHAR *buffer_start, - u32 buffer_size, - acpi_parse_object *op); - -u32 -acpi_ps_sprint_op ( - NATIVE_CHAR *buffer_start, - u32 buffer_size, - acpi_parse_object *op); - -void -acpi_ps_show ( - acpi_parse_object *op); - - -#endif /* __ACPARSER_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acpi.h linux.21rc5-ac2/drivers/acpi/include/acpi.h --- linux.21rc5/drivers/acpi/include/acpi.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acpi.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,51 +0,0 @@ -/****************************************************************************** - * - * Name: acpi.h - Master include file, Publics and external data. - * $Revision: 54 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACPI_H__ -#define __ACPI_H__ - -/* - * Common includes for all ACPI driver files - * We put them here because we don't want to duplicate them - * in the rest of the source code again and again. - */ -#include "acconfig.h" /* Configuration constants */ -#include "platform/acenv.h" /* Target environment specific items */ -#include "actypes.h" /* Fundamental common data types */ -#include "acexcep.h" /* ACPI exception codes */ -#include "acmacros.h" /* C macros */ -#include "actbl.h" /* ACPI table definitions */ -#include "aclocal.h" /* Internal data types */ -#include "acoutput.h" /* Error output and Debug macros */ -#include "acpiosxf.h" /* Interfaces to the ACPI-to-OS layer*/ -#include "acpixf.h" /* ACPI core subsystem external interfaces */ -#include "acobject.h" /* ACPI internal object */ -#include "acstruct.h" /* Common structures */ -#include "acglobal.h" /* All global variables */ -#include "achware.h" /* Hardware defines and interfaces */ -#include "acutils.h" /* Utility interfaces */ - - -#endif /* __ACPI_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acpiosxf.h linux.21rc5-ac2/drivers/acpi/include/acpiosxf.h --- linux.21rc5/drivers/acpi/include/acpiosxf.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acpiosxf.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,307 +0,0 @@ - -/****************************************************************************** - * - * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These - * interfaces must be implemented by OSL to interface the - * ACPI components to the host operating system. - * - *****************************************************************************/ - - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACPIOSXF_H__ -#define __ACPIOSXF_H__ - -#include "platform/acenv.h" -#include "actypes.h" - - -/* Priorities for Acpi_os_queue_for_execution */ - -#define OSD_PRIORITY_GPE 1 -#define OSD_PRIORITY_HIGH 2 -#define OSD_PRIORITY_MED 3 -#define OSD_PRIORITY_LO 4 - -#define ACPI_NO_UNIT_LIMIT ((u32) -1) -#define ACPI_MUTEX_SEM 1 - - -/* Functions for Acpi_os_signal */ - -#define ACPI_SIGNAL_FATAL 0 -#define ACPI_SIGNAL_BREAKPOINT 1 - -typedef struct acpi_fatal_info -{ - u32 type; - u32 code; - u32 argument; - -} ACPI_SIGNAL_FATAL_INFO; - - -/* - * Types specific to the OS service interfaces - */ - -typedef -u32 (*OSD_HANDLER) ( - void *context); - -typedef -void (*OSD_EXECUTION_CALLBACK) ( - void *context); - - -/* - * OSL Initialization and shutdown primitives - */ - -acpi_status -acpi_os_initialize ( - void); - -acpi_status -acpi_os_terminate ( - void); - -acpi_status -acpi_os_get_root_pointer ( - u32 flags, - ACPI_PHYSICAL_ADDRESS *rsdp_physical_address); - - -/* - * Synchronization primitives - */ - -acpi_status -acpi_os_create_semaphore ( - u32 max_units, - u32 initial_units, - acpi_handle *out_handle); - -acpi_status -acpi_os_delete_semaphore ( - acpi_handle handle); - -acpi_status -acpi_os_wait_semaphore ( - acpi_handle handle, - u32 units, - u32 timeout); - -acpi_status -acpi_os_signal_semaphore ( - acpi_handle handle, - u32 units); - - -/* - * Memory allocation and mapping - */ - -void * -acpi_os_allocate ( - u32 size); - -void * -acpi_os_callocate ( - u32 size); - -void -acpi_os_free ( - void * memory); - -acpi_status -acpi_os_map_memory ( - ACPI_PHYSICAL_ADDRESS physical_address, - u32 length, - void **logical_address); - -void -acpi_os_unmap_memory ( - void *logical_address, - u32 length); - -acpi_status -acpi_os_get_physical_address ( - void *logical_address, - ACPI_PHYSICAL_ADDRESS *physical_address); - - -/* - * Interrupt handlers - */ - -acpi_status -acpi_os_install_interrupt_handler ( - u32 interrupt_number, - OSD_HANDLER service_routine, - void *context); - -acpi_status -acpi_os_remove_interrupt_handler ( - u32 interrupt_number, - OSD_HANDLER service_routine); - - -/* - * Threads and Scheduling - */ - -u32 -acpi_os_get_thread_id ( - void); - -acpi_status -acpi_os_queue_for_execution ( - u32 priority, - OSD_EXECUTION_CALLBACK function, - void *context); - -void -acpi_os_sleep ( - u32 seconds, - u32 milliseconds); - -void -acpi_os_stall ( - u32 microseconds); - - -/* - * Platform and hardware-independent I/O interfaces - */ - -acpi_status -acpi_os_read_port ( - ACPI_IO_ADDRESS address, - void *value, - u32 width); - - -acpi_status -acpi_os_write_port ( - ACPI_IO_ADDRESS address, - NATIVE_UINT value, - u32 width); - - -/* - * Platform and hardware-independent physical memory interfaces - */ - -acpi_status -acpi_os_read_memory ( - ACPI_PHYSICAL_ADDRESS address, - void *value, - u32 width); - - -acpi_status -acpi_os_write_memory ( - ACPI_PHYSICAL_ADDRESS address, - NATIVE_UINT value, - u32 width); - - -/* - * Platform and hardware-independent PCI configuration space access - */ - -acpi_status -acpi_os_read_pci_configuration ( - acpi_pci_id *pci_id, - u32 register, - void *value, - u32 width); - - -acpi_status -acpi_os_write_pci_configuration ( - acpi_pci_id *pci_id, - u32 register, - NATIVE_UINT value, - u32 width); - - -/* - * Miscellaneous - */ - -u8 -acpi_os_readable ( - void *pointer, - u32 length); - - -u8 -acpi_os_writable ( - void *pointer, - u32 length); - -u32 -acpi_os_get_timer ( - void); - -acpi_status -acpi_os_signal ( - u32 function, - void *info); - -/* - * Debug print routines - */ - -s32 -acpi_os_printf ( - const NATIVE_CHAR *format, - ...); - -s32 -acpi_os_vprintf ( - const NATIVE_CHAR *format, - va_list args); - - -/* - * Debug input - */ - -u32 -acpi_os_get_line ( - NATIVE_CHAR *buffer); - - -/* - * Debug - */ - -void -acpi_os_dbg_assert( - void *failed_assertion, - void *file_name, - u32 line_number, - NATIVE_CHAR *message); - - -#endif /* __ACPIOSXF_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acpixf.h linux.21rc5-ac2/drivers/acpi/include/acpixf.h --- linux.21rc5/drivers/acpi/include/acpixf.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acpixf.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,326 +0,0 @@ - -/****************************************************************************** - * - * Name: acpixf.h - External interfaces to the ACPI subsystem - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#ifndef __ACXFACE_H__ -#define __ACXFACE_H__ - -#include "actypes.h" -#include "actbl.h" - - - /* - * Global interfaces - */ - -acpi_status -acpi_initialize_subsystem ( - void); - -acpi_status -acpi_enable_subsystem ( - u32 flags); - -acpi_status -acpi_terminate ( - void); - -acpi_status -acpi_subsystem_status ( - void); - -acpi_status -acpi_enable ( - void); - -acpi_status -acpi_disable ( - void); - -acpi_status -acpi_get_system_info ( - acpi_buffer *ret_buffer); - -const char * -acpi_format_exception ( - acpi_status exception); - - -/* - * ACPI Memory manager - */ - -void * -acpi_allocate ( - u32 size); - -void * -acpi_callocate ( - u32 size); - -void -acpi_free ( - void *address); - - -/* - * ACPI table manipulation interfaces - */ - -acpi_status -acpi_find_root_pointer ( - u32 flags, - ACPI_PHYSICAL_ADDRESS *rsdp_physical_address); - -acpi_status -acpi_load_tables ( - void); - -acpi_status -acpi_load_table ( - acpi_table_header *table_ptr); - -acpi_status -acpi_unload_table ( - acpi_table_type table_type); - -acpi_status -acpi_get_table_header ( - acpi_table_type table_type, - u32 instance, - acpi_table_header *out_table_header); - -acpi_status -acpi_get_table ( - acpi_table_type table_type, - u32 instance, - acpi_buffer *ret_buffer); - -acpi_status -acpi_get_firmware_table ( - acpi_string signature, - u32 instance, - u32 flags, - acpi_table_header **table_pointer); - - -/* - * Namespace and name interfaces - */ - -acpi_status -acpi_walk_namespace ( - acpi_object_type type, - acpi_handle start_object, - u32 max_depth, - acpi_walk_callback user_function, - void *context, - void * *return_value); - -acpi_status -acpi_get_devices ( - NATIVE_CHAR *HID, - acpi_walk_callback user_function, - void *context, - void **return_value); - -acpi_status -acpi_get_name ( - acpi_handle handle, - u32 name_type, - acpi_buffer *ret_path_ptr); - -acpi_status -acpi_get_handle ( - acpi_handle parent, - acpi_string pathname, - acpi_handle *ret_handle); - - -/* - * Object manipulation and enumeration - */ - -acpi_status -acpi_evaluate_object ( - acpi_handle object, - acpi_string pathname, - acpi_object_list *parameter_objects, - acpi_buffer *return_object_buffer); - -acpi_status -acpi_get_object_info ( - acpi_handle device, - acpi_device_info *info); - -acpi_status -acpi_get_next_object ( - acpi_object_type type, - acpi_handle parent, - acpi_handle child, - acpi_handle *out_handle); - -acpi_status -acpi_get_type ( - acpi_handle object, - acpi_object_type *out_type); - -acpi_status -acpi_get_parent ( - acpi_handle object, - acpi_handle *out_handle); - - -/* - * Event handler interfaces - */ - -acpi_status -acpi_install_fixed_event_handler ( - u32 acpi_event, - acpi_event_handler handler, - void *context); - -acpi_status -acpi_remove_fixed_event_handler ( - u32 acpi_event, - acpi_event_handler handler); - -acpi_status -acpi_install_notify_handler ( - acpi_handle device, - u32 handler_type, - acpi_notify_handler handler, - void *context); - -acpi_status -acpi_remove_notify_handler ( - acpi_handle device, - u32 handler_type, - acpi_notify_handler handler); - -acpi_status -acpi_install_address_space_handler ( - acpi_handle device, - ACPI_ADR_SPACE_TYPE space_id, - acpi_adr_space_handler handler, - acpi_adr_space_setup setup, - void *context); - -acpi_status -acpi_remove_address_space_handler ( - acpi_handle device, - ACPI_ADR_SPACE_TYPE space_id, - acpi_adr_space_handler handler); - -acpi_status -acpi_install_gpe_handler ( - u32 gpe_number, - u32 type, - acpi_gpe_handler handler, - void *context); - -acpi_status -acpi_acquire_global_lock ( - void); - -acpi_status -acpi_release_global_lock ( - void); - -acpi_status -acpi_remove_gpe_handler ( - u32 gpe_number, - acpi_gpe_handler handler); - -acpi_status -acpi_enable_event ( - u32 acpi_event, - u32 type, - u32 flags); - -acpi_status -acpi_disable_event ( - u32 acpi_event, - u32 type, - u32 flags); - -acpi_status -acpi_clear_event ( - u32 acpi_event, - u32 type); - -acpi_status -acpi_get_event_status ( - u32 acpi_event, - u32 type, - acpi_event_status *event_status); - -/* - * Resource interfaces - */ - -acpi_status -acpi_get_current_resources( - acpi_handle device_handle, - acpi_buffer *ret_buffer); - -acpi_status -acpi_get_possible_resources( - acpi_handle device_handle, - acpi_buffer *ret_buffer); - -acpi_status -acpi_set_current_resources ( - acpi_handle device_handle, - acpi_buffer *in_buffer); - -acpi_status -acpi_get_irq_routing_table ( - acpi_handle bus_device_handle, - acpi_buffer *ret_buffer); - - -/* - * Hardware (ACPI device) interfaces - */ - -acpi_status -acpi_set_firmware_waking_vector ( - ACPI_PHYSICAL_ADDRESS physical_address); - -acpi_status -acpi_get_firmware_waking_vector ( - ACPI_PHYSICAL_ADDRESS *physical_address); - -acpi_status -acpi_enter_sleep_state ( - u8 sleep_state); - -acpi_status -acpi_leave_sleep_state ( - u8 sleep_state); - -#endif /* __ACXFACE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acresrc.h linux.21rc5-ac2/drivers/acpi/include/acresrc.h --- linux.21rc5/drivers/acpi/include/acresrc.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acresrc.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,320 +0,0 @@ -/****************************************************************************** - * - * Name: acresrc.h - Resource Manager function prototypes - * $Revision: 25 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACRESRC_H__ -#define __ACRESRC_H__ - - -/* - * Function prototypes called from Acpi* APIs - */ - -acpi_status -acpi_rs_get_prt_method_data ( - acpi_handle handle, - acpi_buffer *ret_buffer); - - -acpi_status -acpi_rs_get_crs_method_data ( - acpi_handle handle, - acpi_buffer *ret_buffer); - -acpi_status -acpi_rs_get_prs_method_data ( - acpi_handle handle, - acpi_buffer *ret_buffer); - -acpi_status -acpi_rs_set_srs_method_data ( - acpi_handle handle, - acpi_buffer *ret_buffer); - -acpi_status -acpi_rs_create_resource_list ( - acpi_operand_object *byte_stream_buffer, - u8 *output_buffer, - u32 *output_buffer_length); - -acpi_status -acpi_rs_create_byte_stream ( - acpi_resource *linked_list_buffer, - u8 *output_buffer, - u32 *output_buffer_length); - -acpi_status -acpi_rs_create_pci_routing_table ( - acpi_operand_object *method_return_object, - u8 *output_buffer, - u32 *output_buffer_length); - - -/* - *Function prototypes called from Acpi_rs_create*APIs - */ - -void -acpi_rs_dump_resource_list ( - acpi_resource *resource); - -void -acpi_rs_dump_irq_list ( - u8 *route_table); - -acpi_status -acpi_rs_get_byte_stream_start ( - u8 *byte_stream_buffer, - u8 **byte_stream_start, - u32 *size); - -acpi_status -acpi_rs_calculate_list_length ( - u8 *byte_stream_buffer, - u32 byte_stream_buffer_length, - u32 *size_needed); - -acpi_status -acpi_rs_calculate_byte_stream_length ( - acpi_resource *linked_list_buffer, - u32 *size_needed); - -acpi_status -acpi_rs_calculate_pci_routing_table_length ( - acpi_operand_object *package_object, - u32 *buffer_size_needed); - -acpi_status -acpi_rs_byte_stream_to_list ( - u8 *byte_stream_buffer, - u32 byte_stream_buffer_length, - u8 **output_buffer); - -acpi_status -acpi_rs_list_to_byte_stream ( - acpi_resource *linked_list, - u32 byte_stream_size_needed, - u8 **output_buffer); - -acpi_status -acpi_rs_io_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_fixed_io_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_io_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_fixed_io_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_irq_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_irq_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_dma_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_dma_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_address16_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_address16_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_address32_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_address32_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_address64_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_address64_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_start_dependent_functions_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_end_dependent_functions_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_start_dependent_functions_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_end_dependent_functions_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_memory24_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_memory24_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_memory32_range_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size -); - -acpi_status -acpi_rs_fixed_memory32_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_memory32_range_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_fixed_memory32_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_extended_irq_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_extended_irq_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_end_tag_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_end_tag_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -acpi_status -acpi_rs_vendor_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size); - -acpi_status -acpi_rs_vendor_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed); - -u8 -acpi_rs_get_resource_type ( - u8 resource_start_byte); - -#endif /* __ACRESRC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acstruct.h linux.21rc5-ac2/drivers/acpi/include/acstruct.h --- linux.21rc5/drivers/acpi/include/acstruct.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acstruct.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,199 +0,0 @@ -/****************************************************************************** - * - * Name: acstruct.h - Internal structs - * $Revision: 10 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACSTRUCT_H__ -#define __ACSTRUCT_H__ - - -/***************************************************************************** - * - * Tree walking typedefs and structs - * - ****************************************************************************/ - - -/* - * Walk state - current state of a parse tree walk. Used for both a leisurely stroll through - * the tree (for whatever reason), and for control method execution. - */ - -#define NEXT_OP_DOWNWARD 1 -#define NEXT_OP_UPWARD 2 - -#define WALK_NON_METHOD 0 -#define WALK_METHOD 1 -#define WALK_METHOD_RESTART 2 - -typedef struct acpi_walk_state -{ - u8 data_type; /* To differentiate various internal objs MUST BE FIRST!*/\ - acpi_owner_id owner_id; /* Owner of objects created during the walk */ - u8 last_predicate; /* Result of last predicate */ - u8 current_result; /* */ - u8 next_op_info; /* Info about Next_op */ - u8 num_operands; /* Stack pointer for Operands[] array */ - u8 return_used; - u8 walk_type; - u16 current_sync_level; /* Mutex Sync (nested acquire) level */ - u16 opcode; /* Current AML opcode */ - u32 arg_count; /* push for fixed or var args */ - u32 aml_offset; - u32 arg_types; - u32 method_breakpoint; /* For single stepping */ - u32 parse_flags; - u32 prev_arg_types; - - - u8 *aml_last_while; - struct acpi_node arguments[MTH_NUM_ARGS]; /* Control method arguments */ - union acpi_operand_obj **caller_return_desc; - acpi_generic_state *control_state; /* List of control states (nested IFs) */ - struct acpi_node local_variables[MTH_NUM_LOCALS]; /* Control method locals */ - struct acpi_node *method_call_node; /* Called method Node*/ - acpi_parse_object *method_call_op; /* Method_call Op if running a method */ - union acpi_operand_obj *method_desc; /* Method descriptor if running a method */ - struct acpi_node *method_node; /* Method Node if running a method */ - acpi_parse_object *op; /* Current parser op */ - union acpi_operand_obj *operands[OBJ_NUM_OPERANDS+1]; /* Operands passed to the interpreter (+1 for NULL terminator) */ - const acpi_opcode_info *op_info; /* Info on current opcode */ - acpi_parse_object *origin; /* Start of walk [Obsolete] */ - union acpi_operand_obj **params; - acpi_parse_state parser_state; /* Current state of parser */ - union acpi_operand_obj *result_obj; - acpi_generic_state *results; /* Stack of accumulated results */ - union acpi_operand_obj *return_desc; /* Return object, if any */ - acpi_generic_state *scope_info; /* Stack of nested scopes */ - -/* TBD: Obsolete with removal of WALK procedure ? */ - acpi_parse_object *prev_op; /* Last op that was processed */ - acpi_parse_object *next_op; /* next op to be processed */ - - - acpi_parse_downwards descending_callback; - acpi_parse_upwards ascending_callback; - struct acpi_walk_list *walk_list; - struct acpi_walk_state *next; /* Next Walk_state in list */ - - -} acpi_walk_state; - - -/* - * Walk list - head of a tree of walk states. Multiple walk states are created when there - * are nested control methods executing. - */ -typedef struct acpi_walk_list -{ - - acpi_walk_state *walk_state; - ACPI_OBJECT_MUTEX acquired_mutex_list; /* List of all currently acquired mutexes */ - -} acpi_walk_list; - - -/* Info used by Acpi_ps_init_objects */ - -typedef struct acpi_init_walk_info -{ - u16 method_count; - u16 op_region_count; - u16 field_count; - u16 op_region_init; - u16 field_init; - u16 object_count; - acpi_table_desc *table_desc; - -} acpi_init_walk_info; - - -/* Info used by TBD */ - -typedef struct acpi_device_walk_info -{ - u16 device_count; - u16 num_STA; - u16 num_INI; - acpi_table_desc *table_desc; - -} acpi_device_walk_info; - - -/* TBD: [Restructure] Merge with struct above */ - -typedef struct acpi_walk_info -{ - u32 debug_level; - u32 owner_id; - u8 display_type; - -} acpi_walk_info; - -/* Display Types */ - -#define ACPI_DISPLAY_SUMMARY 0 -#define ACPI_DISPLAY_OBJECTS 1 - -typedef struct acpi_get_devices_info -{ - acpi_walk_callback user_function; - void *context; - NATIVE_CHAR *hid; - -} acpi_get_devices_info; - - -typedef union acpi_aml_operands -{ - acpi_operand_object *operands[7]; - - struct - { - ACPI_OBJECT_INTEGER *type; - ACPI_OBJECT_INTEGER *code; - ACPI_OBJECT_INTEGER *argument; - - } fatal; - - struct - { - acpi_operand_object *source; - ACPI_OBJECT_INTEGER *index; - acpi_operand_object *target; - - } index; - - struct - { - acpi_operand_object *source; - ACPI_OBJECT_INTEGER *index; - ACPI_OBJECT_INTEGER *length; - acpi_operand_object *target; - - } mid; - -} ACPI_AML_OPERANDS; - - -#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/actables.h linux.21rc5-ac2/drivers/acpi/include/actables.h --- linux.21rc5/drivers/acpi/include/actables.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/actables.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,200 +0,0 @@ -/****************************************************************************** - * - * Name: actables.h - ACPI table management - * $Revision: 32 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACTABLES_H__ -#define __ACTABLES_H__ - - -/* Used in Acpi_tb_map_acpi_table for size parameter if table header is to be used */ - -#define SIZE_IN_HEADER 0 - - -acpi_status -acpi_tb_handle_to_object ( - u16 table_id, - acpi_table_desc **table_desc); - -/* - * tbconvrt - Table conversion routines - */ - -acpi_status -acpi_tb_convert_to_xsdt ( - acpi_table_desc *table_info, - u32 *number_of_tables); - -acpi_status -acpi_tb_convert_table_fadt ( - void); - -acpi_status -acpi_tb_build_common_facs ( - acpi_table_desc *table_info); - -u32 -acpi_tb_get_table_count ( - RSDP_DESCRIPTOR *RSDP, - acpi_table_header *RSDT); - -/* - * tbget - Table "get" routines - */ - -acpi_status -acpi_tb_get_table_ptr ( - acpi_table_type table_type, - u32 instance, - acpi_table_header **table_ptr_loc); - -acpi_status -acpi_tb_get_table ( - ACPI_PHYSICAL_ADDRESS physical_address, - acpi_table_header *buffer_ptr, - acpi_table_desc *table_info); - -acpi_status -acpi_tb_verify_rsdp ( - ACPI_PHYSICAL_ADDRESS RSDP_physical_address); - -acpi_status -acpi_tb_get_table_facs ( - acpi_table_header *buffer_ptr, - acpi_table_desc *table_info); - -ACPI_PHYSICAL_ADDRESS -acpi_tb_get_rsdt_address ( - void); - -acpi_status -acpi_tb_validate_rsdt ( - acpi_table_header *table_ptr); - -acpi_status -acpi_tb_get_table_pointer ( - ACPI_PHYSICAL_ADDRESS physical_address, - u32 flags, - u32 *size, - acpi_table_header **table_ptr); - -/* - * tbgetall - Get all firmware ACPI tables - */ - -acpi_status -acpi_tb_get_all_tables ( - u32 number_of_tables, - acpi_table_header *buffer_ptr); - - -/* - * tbinstall - Table installation - */ - -acpi_status -acpi_tb_install_table ( - acpi_table_header *table_ptr, - acpi_table_desc *table_info); - -acpi_status -acpi_tb_recognize_table ( - acpi_table_header *table_ptr, - acpi_table_desc *table_info); - -acpi_status -acpi_tb_init_table_descriptor ( - acpi_table_type table_type, - acpi_table_desc *table_info); - - -/* - * tbremove - Table removal and deletion - */ - -void -acpi_tb_delete_acpi_tables ( - void); - -void -acpi_tb_delete_acpi_table ( - acpi_table_type type); - -void -acpi_tb_delete_single_table ( - acpi_table_desc *table_desc); - -acpi_table_desc * -acpi_tb_uninstall_table ( - acpi_table_desc *table_desc); - -void -acpi_tb_free_acpi_tables_of_type ( - acpi_table_desc *table_info); - - -/* - * tbrsd - RSDP, RSDT utilities - */ - -acpi_status -acpi_tb_get_table_rsdt ( - u32 *number_of_tables); - -u8 * -acpi_tb_scan_memory_for_rsdp ( - u8 *start_address, - u32 length); - -acpi_status -acpi_tb_find_rsdp ( - acpi_table_desc *table_info, - u32 flags); - - -/* - * tbutils - common table utilities - */ - -acpi_status -acpi_tb_map_acpi_table ( - ACPI_PHYSICAL_ADDRESS physical_address, - u32 *size, - acpi_table_header **logical_address); - -acpi_status -acpi_tb_verify_table_checksum ( - acpi_table_header *table_header); - -u8 -acpi_tb_checksum ( - void *buffer, - u32 length); - -acpi_status -acpi_tb_validate_table_header ( - acpi_table_header *table_header); - - -#endif /* __ACTABLES_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/actbl1.h linux.21rc5-ac2/drivers/acpi/include/actbl1.h --- linux.21rc5/drivers/acpi/include/actbl1.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/actbl1.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,123 +0,0 @@ -/****************************************************************************** - * - * Name: actbl1.h - ACPI 1.0 tables - * $Revision: 17 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACTBL1_H__ -#define __ACTBL1_H__ - -#pragma pack(1) - -/*************************************/ -/* ACPI Specification Rev 1.0 for */ -/* the Root System Description Table */ -/*************************************/ -typedef struct -{ - acpi_table_header header; /* Table header */ - u32 table_offset_entry [1]; /* Array of pointers to other */ - /* ACPI tables */ -} RSDT_DESCRIPTOR_REV1; - - -/***************************************/ -/* ACPI Specification Rev 1.0 for */ -/* the Firmware ACPI Control Structure */ -/***************************************/ -typedef struct -{ - NATIVE_CHAR signature[4]; /* signature "FACS" */ - u32 length; /* length of structure, in bytes */ - u32 hardware_signature; /* hardware configuration signature */ - u32 firmware_waking_vector; /* ACPI OS waking vector */ - u32 global_lock; /* Global Lock */ - u32 S4bios_f : 1; /* Indicates if S4BIOS support is present */ - u32 reserved1 : 31; /* must be 0 */ - u8 resverved3 [40]; /* reserved - must be zero */ - -} facs_descriptor_rev1; - - -/************************************/ -/* ACPI Specification Rev 1.0 for */ -/* the Fixed ACPI Description Table */ -/************************************/ -typedef struct -{ - acpi_table_header header; /* table header */ - u32 firmware_ctrl; /* Physical address of FACS */ - u32 dsdt; /* Physical address of DSDT */ - u8 model; /* System Interrupt Model */ - u8 reserved1; /* reserved */ - u16 sci_int; /* System vector of SCI interrupt */ - u32 smi_cmd; /* Port address of SMI command port */ - u8 acpi_enable; /* value to write to smi_cmd to enable ACPI */ - u8 acpi_disable; /* value to write to smi_cmd to disable ACPI */ - u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ - u8 reserved2; /* reserved - must be zero */ - u32 pm1a_evt_blk; /* Port address of Power Mgt 1a Acpi_event Reg Blk */ - u32 pm1b_evt_blk; /* Port address of Power Mgt 1b Acpi_event Reg Blk */ - u32 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ - u32 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ - u32 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ - u32 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ - u32 gpe0blk; /* Port addr of General Purpose Acpi_event 0 Reg Blk */ - u32 gpe1_blk; /* Port addr of General Purpose Acpi_event 1 Reg Blk */ - u8 pm1_evt_len; /* Byte Length of ports at pm1_x_evt_blk */ - u8 pm1_cnt_len; /* Byte Length of ports at pm1_x_cnt_blk */ - u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ - u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ - u8 gpe0blk_len; /* Byte Length of ports at gpe0_blk */ - u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ - u8 gpe1_base; /* offset in gpe model where gpe1 events start */ - u8 reserved3; /* reserved */ - u16 plvl2_lat; /* worst case HW latency to enter/exit C2 state */ - u16 plvl3_lat; /* worst case HW latency to enter/exit C3 state */ - u16 flush_size; /* Size of area read to flush caches */ - u16 flush_stride; /* Stride used in flushing caches */ - u8 duty_offset; /* bit location of duty cycle field in p_cnt reg */ - u8 duty_width; /* bit width of duty cycle field in p_cnt reg */ - u8 day_alrm; /* index to day-of-month alarm in RTC CMOS RAM */ - u8 mon_alrm; /* index to month-of-year alarm in RTC CMOS RAM */ - u8 century; /* index to century in RTC CMOS RAM */ - u8 reserved4; /* reserved */ - u8 reserved4a; /* reserved */ - u8 reserved4b; /* reserved */ - u32 wb_invd : 1; /* wbinvd instruction works properly */ - u32 wb_invd_flush : 1; /* wbinvd flushes but does not invalidate */ - u32 proc_c1 : 1; /* all processors support C1 state */ - u32 plvl2_up : 1; /* C2 state works on MP system */ - u32 pwr_button : 1; /* Power button is handled as a generic feature */ - u32 sleep_button : 1; /* Sleep button is handled as a generic feature, or not present */ - u32 fixed_rTC : 1; /* RTC wakeup stat not in fixed register space */ - u32 rtcs4 : 1; /* RTC wakeup stat not possible from S4 */ - u32 tmr_val_ext : 1; /* tmr_val is 32 bits */ - u32 reserved5 : 23; /* reserved - must be zero */ - -} fadt_descriptor_rev1; - -#pragma pack() - -#endif /* __ACTBL1_H__ */ - - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/actbl2.h linux.21rc5-ac2/drivers/acpi/include/actbl2.h --- linux.21rc5/drivers/acpi/include/actbl2.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/actbl2.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,186 +0,0 @@ -/****************************************************************************** - * - * Name: actbl2.h - ACPI Specification Revision 2.0 Tables - * $Revision: 24 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACTBL2_H__ -#define __ACTBL2_H__ - -/* - * Prefered Power Management Profiles - */ -#define PM_UNSPECIFIED 0 -#define PM_DESKTOP 1 -#define PM_MOBILE 2 -#define PM_WORKSTATION 3 -#define PM_ENTERPRISE_SERVER 4 -#define PM_SOHO_SERVER 5 -#define PM_APPLIANCE_PC 6 - -/* - * ACPI Boot Arch Flags - */ -#define BAF_LEGACY_DEVICES 0x0001 -#define BAF_8042_KEYBOARD_CONTROLLER 0x0002 - -#define FADT2_REVISION_ID 3 - - -#pragma pack(1) - -/* - * ACPI Specification Rev 2.0 for the Root System Description Table - */ -typedef struct -{ - acpi_table_header header; /* Table header */ - u32 table_offset_entry [1]; /* Array of pointers to */ - /* other tables' headers */ -} RSDT_DESCRIPTOR_REV2; - - -/* - * ACPI Specification Rev 2.0 for the Extended System Description Table (XSDT) - */ -typedef struct -{ - acpi_table_header header; /* Table header */ - u64 table_offset_entry [1]; /* Array of pointers to */ - /* other tables' headers */ -} XSDT_DESCRIPTOR_REV2; - - -/* - * ACPI Specification Rev 2.0 for the Firmware ACPI Control Structure - */ -typedef struct -{ - NATIVE_CHAR signature[4]; /* signature "FACS" */ - u32 length; /* length of structure, in bytes */ - u32 hardware_signature; /* hardware configuration signature */ - u32 firmware_waking_vector; /* 32bit physical address of the Firmware Waking Vector. */ - u32 global_lock; /* Global Lock used to synchronize access to shared hardware resources */ - u32 S4bios_f : 1; /* Indicates if S4BIOS support is present */ - u32 reserved1 : 31; /* must be 0 */ - u64 Xfirmware_waking_vector; /* 64bit physical address of the Firmware Waking Vector. */ - u8 version; /* Version of this table */ - u8 reserved3 [31]; /* reserved - must be zero */ - -} facs_descriptor_rev2; - - -/* - * ACPI Specification Rev 2.0 for the Generic Address Structure (GAS) - */ -typedef struct -{ - u8 address_space_id; /* Address space where struct or register exists. */ - u8 register_bit_width; /* Size in bits of given register */ - u8 register_bit_offset; /* Bit offset within the register */ - u8 reserved; /* Must be 0 */ - u64 address; /* 64-bit address of struct or register */ - -} acpi_generic_address; - - -/* - * ACPI Specification Rev 2.0 for the Fixed ACPI Description Table - */ -typedef struct -{ - acpi_table_header header; /* table header */ - u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ - u32 V1_dsdt; /* 32-bit physical address of DSDT */ - u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ - u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ - u16 sci_int; /* System vector of SCI interrupt */ - u32 smi_cmd; /* Port address of SMI command port */ - u8 acpi_enable; /* value to write to smi_cmd to enable ACPI */ - u8 acpi_disable; /* value to write to smi_cmd to disable ACPI */ - u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ - u8 pstate_cnt; /* processor performance state control*/ - u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a Acpi_event Reg Blk */ - u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b Acpi_event Reg Blk */ - u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ - u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ - u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ - u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ - u32 V1_gpe0blk; /* Port addr of General Purpose Acpi_event 0 Reg Blk */ - u32 V1_gpe1_blk; /* Port addr of General Purpose Acpi_event 1 Reg Blk */ - u8 pm1_evt_len; /* Byte Length of ports at pm1_x_evt_blk */ - u8 pm1_cnt_len; /* Byte Length of ports at pm1_x_cnt_blk */ - u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ - u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ - u8 gpe0blk_len; /* Byte Length of ports at gpe0_blk */ - u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ - u8 gpe1_base; /* offset in gpe model where gpe1 events start */ - u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ - u16 plvl2_lat; /* worst case HW latency to enter/exit C2 state */ - u16 plvl3_lat; /* worst case HW latency to enter/exit C3 state */ - u16 flush_size; /* number of flush strides that need to be read */ - u16 flush_stride; /* Processor's memory cache line width, in bytes */ - u8 duty_offset; /* Processor_’s duty cycle index in processor's P_CNT reg*/ - u8 duty_width; /* Processor_’s duty cycle value bit width in P_CNT register.*/ - u8 day_alrm; /* index to day-of-month alarm in RTC CMOS RAM */ - u8 mon_alrm; /* index to month-of-year alarm in RTC CMOS RAM */ - u8 century; /* index to century in RTC CMOS RAM */ - u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ - u8 reserved2; /* reserved */ - u32 wb_invd : 1; /* wbinvd instruction works properly */ - u32 wb_invd_flush : 1; /* wbinvd flushes but does not invalidate */ - u32 proc_c1 : 1; /* all processors support C1 state */ - u32 plvl2_up : 1; /* C2 state works on MP system */ - u32 pwr_button : 1; /* Power button is handled as a generic feature */ - u32 sleep_button : 1; /* Sleep button is handled as a generic feature, or not present */ - u32 fixed_rTC : 1; /* RTC wakeup stat not in fixed register space */ - u32 rtcs4 : 1; /* RTC wakeup stat not possible from S4 */ - u32 tmr_val_ext : 1; /* tmr_val is 32 bits */ - u32 dock_cap : 1; /* Supports Docking */ - u32 reset_reg_sup : 1; /* Indicates system supports system reset via the FADT RESET_REG*/ - u32 sealed_case : 1; /* Indicates system has no internal expansion capabilities and case is sealed. */ - u32 headless : 1; /* Indicates system does not have local video capabilities or local input devices.*/ - u32 cpu_sw_sleep : 1; /* Indicates to OSPM that a processor native instruction */ - /* must be executed after writing the SLP_TYPx register. */ - u32 reserved6 : 18; /* reserved - must be zero */ - - acpi_generic_address reset_register; /* Reset register address in GAS format */ - u8 reset_value; /* Value to write to the Reset_register port to reset the system. */ - u8 reserved7[3]; /* These three bytes must be zero */ - u64 Xfirmware_ctrl; /* 64-bit physical address of FACS */ - u64 Xdsdt; /* 64-bit physical address of DSDT */ - acpi_generic_address Xpm1a_evt_blk; /* Extended Power Mgt 1a Acpi_event Reg Blk address */ - acpi_generic_address Xpm1b_evt_blk; /* Extended Power Mgt 1b Acpi_event Reg Blk address */ - acpi_generic_address Xpm1a_cnt_blk; /* Extended Power Mgt 1a Control Reg Blk address */ - acpi_generic_address Xpm1b_cnt_blk; /* Extended Power Mgt 1b Control Reg Blk address */ - acpi_generic_address Xpm2_cnt_blk; /* Extended Power Mgt 2 Control Reg Blk address */ - acpi_generic_address Xpm_tmr_blk; /* Extended Power Mgt Timer Ctrl Reg Blk address */ - acpi_generic_address Xgpe0blk; /* Extended General Purpose Acpi_event 0 Reg Blk address */ - acpi_generic_address Xgpe1_blk; /* Extended General Purpose Acpi_event 1 Reg Blk address */ - -} fadt_descriptor_rev2; - - -#pragma pack() - -#endif /* __ACTBL2_H__ */ - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/actbl71.h linux.21rc5-ac2/drivers/acpi/include/actbl71.h --- linux.21rc5/drivers/acpi/include/actbl71.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/actbl71.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,147 +0,0 @@ -/****************************************************************************** - * - * Name: actbl71.h - IA-64 Extensions to the ACPI Spec Rev. 0.71 - * This file includes tables specific to this - * specification revision. - * $Revision: 11 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACTBL71_H__ -#define __ACTBL71_H__ - - -/* 0.71 FADT Address_space data item bitmasks defines */ -/* If the associated bit is zero then it is in memory space else in io space */ - -#define SMI_CMD_ADDRESS_SPACE 0x01 -#define PM1_BLK_ADDRESS_SPACE 0x02 -#define PM2_CNT_BLK_ADDRESS_SPACE 0x04 -#define PM_TMR_BLK_ADDRESS_SPACE 0x08 -#define GPE0_BLK_ADDRESS_SPACE 0x10 -#define GPE1_BLK_ADDRESS_SPACE 0x20 - -/* Only for clarity in declarations */ - -typedef u64 IO_ADDRESS; - - -#pragma pack(1) -typedef struct /* Root System Descriptor Pointer */ -{ - NATIVE_CHAR signature [8]; /* contains "RSD PTR " */ - u8 checksum; /* to make sum of struct == 0 */ - NATIVE_CHAR oem_id [6]; /* OEM identification */ - u8 reserved; /* Must be 0 for 1.0, 2 for 2.0 */ - u64 rsdt_physical_address; /* 64-bit physical address of RSDT */ -} RSDP_DESCRIPTOR_REV071; - - -/*****************************************/ -/* IA64 Extensions to ACPI Spec Rev 0.71 */ -/* for the Root System Description Table */ -/*****************************************/ -typedef struct -{ - acpi_table_header header; /* Table header */ - u32 reserved_pad; /* IA64 alignment, must be 0 */ - u64 table_offset_entry [1]; /* Array of pointers to other */ - /* tables' headers */ -} RSDT_DESCRIPTOR_REV071; - - -/*******************************************/ -/* IA64 Extensions to ACPI Spec Rev 0.71 */ -/* for the Firmware ACPI Control Structure */ -/*******************************************/ -typedef struct -{ - NATIVE_CHAR signature[4]; /* signature "FACS" */ - u32 length; /* length of structure, in bytes */ - u32 hardware_signature; /* hardware configuration signature */ - u32 reserved4; /* must be 0 */ - u64 firmware_waking_vector; /* ACPI OS waking vector */ - u64 global_lock; /* Global Lock */ - u32 S4bios_f : 1; /* Indicates if S4BIOS support is present */ - u32 reserved1 : 31; /* must be 0 */ - u8 reserved3 [28]; /* reserved - must be zero */ - -} facs_descriptor_rev071; - - -/******************************************/ -/* IA64 Extensions to ACPI Spec Rev 0.71 */ -/* for the Fixed ACPI Description Table */ -/******************************************/ -typedef struct -{ - acpi_table_header header; /* table header */ - u32 reserved_pad; /* IA64 alignment, must be 0 */ - u64 firmware_ctrl; /* 64-bit Physical address of FACS */ - u64 dsdt; /* 64-bit Physical address of DSDT */ - u8 model; /* System Interrupt Model */ - u8 address_space; /* Address Space Bitmask */ - u16 sci_int; /* System vector of SCI interrupt */ - u8 acpi_enable; /* value to write to smi_cmd to enable ACPI */ - u8 acpi_disable; /* value to write to smi_cmd to disable ACPI */ - u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ - u8 reserved2; /* reserved - must be zero */ - u64 smi_cmd; /* Port address of SMI command port */ - u64 pm1a_evt_blk; /* Port address of Power Mgt 1a Acpi_event Reg Blk */ - u64 pm1b_evt_blk; /* Port address of Power Mgt 1b Acpi_event Reg Blk */ - u64 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ - u64 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ - u64 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ - u64 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ - u64 gpe0blk; /* Port addr of General Purpose Acpi_event 0 Reg Blk */ - u64 gpe1_blk; /* Port addr of General Purpose Acpi_event 1 Reg Blk */ - u8 pm1_evt_len; /* Byte Length of ports at pm1_x_evt_blk */ - u8 pm1_cnt_len; /* Byte Length of ports at pm1_x_cnt_blk */ - u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ - u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ - u8 gpe0blk_len; /* Byte Length of ports at gpe0_blk */ - u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ - u8 gpe1_base; /* offset in gpe model where gpe1 events start */ - u8 reserved3; /* reserved */ - u16 plvl2_lat; /* worst case HW latency to enter/exit C2 state */ - u16 plvl3_lat; /* worst case HW latency to enter/exit C3 state */ - u8 day_alrm; /* index to day-of-month alarm in RTC CMOS RAM */ - u8 mon_alrm; /* index to month-of-year alarm in RTC CMOS RAM */ - u8 century; /* index to century in RTC CMOS RAM */ - u8 reserved4; /* reserved */ - u32 flush_cash : 1; /* PAL_FLUSH_CACHE is correctly supported */ - u32 reserved5 : 1; /* reserved - must be zero */ - u32 proc_c1 : 1; /* all processors support C1 state */ - u32 plvl2_up : 1; /* C2 state works on MP system */ - u32 pwr_button : 1; /* Power button is handled as a generic feature */ - u32 sleep_button : 1; /* Sleep button is handled as a generic feature, or not present */ - u32 fixed_rTC : 1; /* RTC wakeup stat not in fixed register space */ - u32 rtcs4 : 1; /* RTC wakeup stat not possible from S4 */ - u32 tmr_val_ext : 1; /* tmr_val is 32 bits */ - u32 dock_cap : 1; /* Supports Docking */ - u32 reserved6 : 22; /* reserved - must be zero */ - -} fadt_descriptor_rev071; - -#pragma pack() - -#endif /* __ACTBL71_H__ */ - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/actbl.h linux.21rc5-ac2/drivers/acpi/include/actbl.h --- linux.21rc5/drivers/acpi/include/actbl.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/actbl.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,217 +0,0 @@ -/****************************************************************************** - * - * Name: actbl.h - Table data structures defined in ACPI specification - * $Revision: 46 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACTBL_H__ -#define __ACTBL_H__ - - -/* - * Values for description table header signatures - */ - -#define RSDP_NAME "RSDP" -#define RSDP_SIG "RSD PTR " /* RSDT Pointer signature */ -#define APIC_SIG "APIC" /* Multiple APIC Description Table */ -#define DSDT_SIG "DSDT" /* Differentiated System Description Table */ -#define FADT_SIG "FACP" /* Fixed ACPI Description Table */ -#define FACS_SIG "FACS" /* Firmware ACPI Control Structure */ -#define PSDT_SIG "PSDT" /* Persistent System Description Table */ -#define RSDT_SIG "RSDT" /* Root System Description Table */ -#define XSDT_SIG "XSDT" /* Extended System Description Table */ -#define SSDT_SIG "SSDT" /* Secondary System Description Table */ -#define SBST_SIG "SBST" /* Smart Battery Specification Table */ -#define SPIC_SIG "SPIC" /* iosapic table */ -#define BOOT_SIG "BOOT" /* Boot table */ - - -#define GL_OWNED 0x02 /* Ownership of global lock is bit 1 */ - -/* values of Mapic.Model */ - -#define DUAL_PIC 0 -#define MULTIPLE_APIC 1 - -/* values of Type in APIC_HEADER */ - -#define APIC_PROC 0 -#define APIC_IO 1 - - -/* - * Common table types. The base code can remain - * constant if the underlying tables are changed - */ -#define RSDT_DESCRIPTOR RSDT_DESCRIPTOR_REV2 -#define xsdt_descriptor XSDT_DESCRIPTOR_REV2 -#define FACS_DESCRIPTOR facs_descriptor_rev2 -#define FADT_DESCRIPTOR fadt_descriptor_rev2 - - -#pragma pack(1) - -/* - * Architecture-independent tables - * The architecture dependent tables are in separate files - */ - -typedef struct /* Root System Descriptor Pointer */ -{ - NATIVE_CHAR signature [8]; /* contains "RSD PTR " */ - u8 checksum; /* to make sum of struct == 0 */ - NATIVE_CHAR oem_id [6]; /* OEM identification */ - u8 revision; /* Must be 0 for 1.0, 2 for 2.0 */ - u32 rsdt_physical_address; /* 32-bit physical address of RSDT */ - u32 length; /* XSDT Length in bytes including hdr */ - u64 xsdt_physical_address; /* 64-bit physical address of XSDT */ - u8 extended_checksum; /* Checksum of entire table */ - NATIVE_CHAR reserved [3]; /* reserved field must be 0 */ - -} RSDP_DESCRIPTOR; - - -typedef struct /* ACPI common table header */ -{ - NATIVE_CHAR signature [4]; /* identifies type of table */ - u32 length; /* length of table, in bytes, - * including header */ - u8 revision; /* specification minor version # */ - u8 checksum; /* to make sum of entire table == 0 */ - NATIVE_CHAR oem_id [6]; /* OEM identification */ - NATIVE_CHAR oem_table_id [8]; /* OEM table identification */ - u32 oem_revision; /* OEM revision number */ - NATIVE_CHAR asl_compiler_id [4]; /* ASL compiler vendor ID */ - u32 asl_compiler_revision; /* ASL compiler revision number */ - -} acpi_table_header; - - -typedef struct /* Common FACS for internal use */ -{ - u32 *global_lock; - u64 *firmware_waking_vector; - u8 vector_width; - -} acpi_common_facs; - - -typedef struct /* APIC Table */ -{ - acpi_table_header header; /* table header */ - u32 local_apic_address; /* Physical address for accessing local APICs */ - u32 PCATcompat : 1; /* a one indicates system also has dual 8259s */ - u32 reserved1 : 31; - -} APIC_TABLE; - - -typedef struct /* APIC Header */ -{ - u8 type; /* APIC type. Either APIC_PROC or APIC_IO */ - u8 length; /* Length of APIC structure */ - -} APIC_HEADER; - - -typedef struct /* Processor APIC */ -{ - APIC_HEADER header; - u8 processor_apic_id; /* ACPI processor id */ - u8 local_apic_id; /* processor's local APIC id */ - u32 processor_enabled: 1; /* Processor is usable if set */ - u32 reserved1 : 31; - -} PROCESSOR_APIC; - - -typedef struct /* IO APIC */ -{ - APIC_HEADER header; - u8 io_apic_id; /* I/O APIC ID */ - u8 reserved; /* reserved - must be zero */ - u32 io_apic_address; /* APIC's physical address */ - u32 vector; /* interrupt vector index where INTI - * lines start */ -} IO_APIC; - - -/* -** IA64 TODO: Add SAPIC Tables -*/ - -/* -** IA64 TODO: Modify Smart Battery Description to comply with ACPI IA64 -** extensions. -*/ -typedef struct /* Smart Battery Description Table */ -{ - acpi_table_header header; - u32 warning_level; - u32 low_level; - u32 critical_level; - -} SMART_BATTERY_DESCRIPTION_TABLE; - - -#pragma pack() - - -/* - * ACPI Table information. We save the table address, length, - * and type of memory allocation (mapped or allocated) for each - * table for 1) when we exit, and 2) if a new table is installed - */ - -#define ACPI_MEM_NOT_ALLOCATED 0 -#define ACPI_MEM_ALLOCATED 1 -#define ACPI_MEM_MAPPED 2 - -/* Definitions for the Flags bitfield member of ACPI_TABLE_SUPPORT */ - -#define ACPI_TABLE_SINGLE 0 -#define ACPI_TABLE_MULTIPLE 1 - - -/* Data about each known table type */ - -typedef struct _acpi_table_support -{ - NATIVE_CHAR *name; - NATIVE_CHAR *signature; - u8 sig_length; - u8 flags; - u16 status; - void **global_ptr; - -} ACPI_TABLE_SUPPORT; - -/* - * Get the architecture-specific tables - */ - -#include "actbl1.h" /* Acpi 1.0 table defintions */ -#include "actbl71.h" /* Acpi 0.71 IA-64 Extension table defintions */ -#include "actbl2.h" /* Acpi 2.0 table definitions */ - -#endif /* __ACTBL_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/actypes.h linux.21rc5-ac2/drivers/acpi/include/actypes.h --- linux.21rc5/drivers/acpi/include/actypes.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/actypes.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,1118 +0,0 @@ -/****************************************************************************** - * - * Name: actypes.h - Common data types for the entire ACPI subsystem - * $Revision: 193 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACTYPES_H__ -#define __ACTYPES_H__ - -/*! [Begin] no source code translation (keep the typedefs) */ - -/* - * Data types - Fixed across all compilation models - * - * BOOLEAN Logical Boolean. - * 1 byte value containing a 0 for FALSE or a 1 for TRUE. - * Other values are undefined. - * - * INT8 8-bit (1 byte) signed value - * UINT8 8-bit (1 byte) unsigned value - * INT16 16-bit (2 byte) signed value - * UINT16 16-bit (2 byte) unsigned value - * INT32 32-bit (4 byte) signed value - * UINT32 32-bit (4 byte) unsigned value - * INT64 64-bit (8 byte) signed value - * UINT64 64-bit (8 byte) unsigned value - * NATIVE_INT 32-bit on IA-32, 64-bit on IA-64 signed value - * NATIVE_UINT 32-bit on IA-32, 64-bit on IA-64 unsigned value - * UCHAR Character. 1 byte unsigned value. - */ - -#ifndef BITS_PER_LONG -#error "define BITS_PER_LONG" -#endif - -#if BITS_PER_LONG == 64 -/* - * 64-bit type definitions - */ -typedef unsigned char UINT8; -typedef unsigned char BOOLEAN; -typedef unsigned char UCHAR; -typedef unsigned short UINT16; -typedef int INT32; -typedef unsigned int UINT32; -typedef COMPILER_DEPENDENT_UINT64 UINT64; - -typedef UINT64 NATIVE_UINT; -typedef long long NATIVE_INT; - -typedef NATIVE_UINT ACPI_TBLPTR; -typedef UINT64 ACPI_IO_ADDRESS; -typedef UINT64 ACPI_PHYSICAL_ADDRESS; - -#define ALIGNED_ADDRESS_BOUNDARY 0x00000008 /* No hardware alignment support in IA64 */ -#define ACPI_USE_NATIVE_DIVIDE /* Native 64-bit integer support */ - - -#elif BITS_PER_LONG == 16 -/* - * 16-bit type definitions - */ -typedef unsigned char UINT8; -typedef unsigned char BOOLEAN; -typedef unsigned char UCHAR; -typedef unsigned int UINT16; -typedef long INT32; -typedef int INT16; -typedef unsigned long UINT32; - -typedef struct -{ - UINT32 Lo; - UINT32 Hi; - -} UINT64; - -typedef UINT16 NATIVE_UINT; -typedef INT16 NATIVE_INT; - -typedef UINT32 ACPI_TBLPTR; -typedef UINT32 ACPI_IO_ADDRESS; -typedef char *ACPI_PHYSICAL_ADDRESS; - -#define ALIGNED_ADDRESS_BOUNDARY 0x00000002 -#define _HW_ALIGNMENT_SUPPORT -#define ACPI_USE_NATIVE_DIVIDE /* No 64-bit integers, ok to use native divide */ - -/* - * (16-bit only) internal integers must be 32-bits, so - * 64-bit integers cannot be supported - */ -#define ACPI_NO_INTEGER64_SUPPORT - - -#elif BITS_PER_LONG == 32 -/* - * 32-bit type definitions (default) - */ -typedef unsigned char UINT8; -typedef unsigned char BOOLEAN; -typedef unsigned char UCHAR; -typedef unsigned short UINT16; -typedef int INT32; -typedef unsigned int UINT32; -typedef COMPILER_DEPENDENT_UINT64 UINT64; - -typedef UINT32 NATIVE_UINT; -typedef INT32 NATIVE_INT; - -typedef NATIVE_UINT ACPI_TBLPTR; -typedef UINT32 ACPI_IO_ADDRESS; -typedef UINT64 ACPI_PHYSICAL_ADDRESS; - -#define ALIGNED_ADDRESS_BOUNDARY 0x00000004 -#define _HW_ALIGNMENT_SUPPORT - -#else -#error "unknown BITS_PER_LONG" -#endif - - - -/* - * Miscellaneous common types - */ - -typedef UINT32 UINT32_BIT; -typedef NATIVE_UINT ACPI_PTRDIFF; -typedef char NATIVE_CHAR; - - -/* - * Data type ranges - */ - -#define ACPI_UINT8_MAX (UINT8) 0xFF -#define ACPI_UINT16_MAX (UINT16) 0xFFFF -#define ACPI_UINT32_MAX (UINT32) 0xFFFFFFFF -#define ACPI_UINT64_MAX (UINT64) 0xFFFFFFFFFFFFFFFF - - -#ifdef DEFINE_ALTERNATE_TYPES -/* - * Types used only in translated source - */ -typedef INT32 s32; -typedef UINT8 u8; -typedef UINT16 u16; -typedef UINT32 u32; -typedef UINT64 u64; -#endif -/*! [End] no source code translation !*/ - - -/* - * Useful defines - */ - -#ifdef FALSE -#undef FALSE -#endif -#define FALSE (1 == 0) - -#ifdef TRUE -#undef TRUE -#endif -#define TRUE (1 == 1) - -#ifndef NULL -#define NULL (void *) 0 -#endif - - -/* - * Local datatypes - */ - -typedef u32 acpi_status; /* All ACPI Exceptions */ -typedef u32 acpi_name; /* 4-byte ACPI name */ -typedef char* acpi_string; /* Null terminated ASCII string */ -typedef void* acpi_handle; /* Actually a ptr to an Node */ - -typedef struct -{ - u32 lo; - u32 hi; - -} uint64_struct; - -typedef union -{ - u64 full; - uint64_struct part; - -} uint64_overlay; - - -/* - * Acpi integer width. In ACPI version 1, integers are - * 32 bits. In ACPI version 2, integers are 64 bits. - * Note that this pertains to the ACPI integer type only, not - * other integers used in the implementation of the ACPI CA - * subsystem. - */ -#ifdef ACPI_NO_INTEGER64_SUPPORT - -/* 32-bit integers only, no 64-bit support */ - -typedef u32 acpi_integer; -#define ACPI_INTEGER_MAX ACPI_UINT32_MAX -#define ACPI_INTEGER_BIT_SIZE 32 -#define ACPI_MAX_BCD_VALUE 99999999 -#define ACPI_MAX_BCD_DIGITS 8 -#define ACPI_MAX_DECIMAL_DIGITS 10 - -#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 32-bit divide */ - - -#else - -/* 64-bit integers */ - -typedef u64 acpi_integer; -#define ACPI_INTEGER_MAX ACPI_UINT64_MAX -#define ACPI_INTEGER_BIT_SIZE 64 -#define ACPI_MAX_BCD_VALUE 9999999999999999 -#define ACPI_MAX_BCD_DIGITS 16 -#define ACPI_MAX_DECIMAL_DIGITS 19 - -#ifdef _IA64 -#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */ -#endif -#endif - - -/* - * Constants with special meanings - */ - -#define ACPI_ROOT_OBJECT (acpi_handle)(-1) - - -/* - * Initialization sequence - */ -#define ACPI_FULL_INITIALIZATION 0x00 -#define ACPI_NO_ADDRESS_SPACE_INIT 0x01 -#define ACPI_NO_HARDWARE_INIT 0x02 -#define ACPI_NO_EVENT_INIT 0x04 -#define ACPI_NO_ACPI_ENABLE 0x08 -#define ACPI_NO_DEVICE_INIT 0x10 -#define ACPI_NO_OBJECT_INIT 0x20 - -/* - * Initialization state - */ -#define ACPI_INITIALIZED_OK 0x01 - -/* - * Power state values - */ - -#define ACPI_STATE_UNKNOWN (u8) 0xFF - -#define ACPI_STATE_S0 (u8) 0 -#define ACPI_STATE_S1 (u8) 1 -#define ACPI_STATE_S2 (u8) 2 -#define ACPI_STATE_S3 (u8) 3 -#define ACPI_STATE_S4 (u8) 4 -#define ACPI_STATE_S5 (u8) 5 -#define ACPI_S_STATES_MAX ACPI_STATE_S5 -#define ACPI_S_STATE_COUNT 6 - -#define ACPI_STATE_D0 (u8) 0 -#define ACPI_STATE_D1 (u8) 1 -#define ACPI_STATE_D2 (u8) 2 -#define ACPI_STATE_D3 (u8) 3 -#define ACPI_D_STATES_MAX ACPI_STATE_D3 -#define ACPI_D_STATE_COUNT 4 - -/* - * Standard notify values - */ -#define ACPI_NOTIFY_BUS_CHECK (u8) 0 -#define ACPI_NOTIFY_DEVICE_CHECK (u8) 1 -#define ACPI_NOTIFY_DEVICE_WAKE (u8) 2 -#define ACPI_NOTIFY_EJECT_REQUEST (u8) 3 -#define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 4 -#define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 5 -#define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 6 -#define ACPI_NOTIFY_POWER_FAULT (u8) 7 - - -/* - * Table types. These values are passed to the table related APIs - */ - -typedef u32 acpi_table_type; - -#define ACPI_TABLE_RSDP (acpi_table_type) 0 -#define ACPI_TABLE_DSDT (acpi_table_type) 1 -#define ACPI_TABLE_FADT (acpi_table_type) 2 -#define ACPI_TABLE_FACS (acpi_table_type) 3 -#define ACPI_TABLE_PSDT (acpi_table_type) 4 -#define ACPI_TABLE_SSDT (acpi_table_type) 5 -#define ACPI_TABLE_XSDT (acpi_table_type) 6 -#define ACPI_TABLE_MAX 6 -#define NUM_ACPI_TABLES (ACPI_TABLE_MAX+1) - - -/* - * Types associated with names. The first group of - * values correspond to the definition of the ACPI - * Object_type operator (See the ACPI Spec). Therefore, - * only add to the first group if the spec changes! - * - * Types must be kept in sync with the Acpi_ns_properties - * and Acpi_ns_type_names arrays - */ - -typedef u32 acpi_object_type; -typedef u8 acpi_object_type8; - - -#define ACPI_TYPE_ANY 0 /* 0x00 */ -#define ACPI_TYPE_INTEGER 1 /* 0x01 Byte/Word/Dword/Zero/One/Ones */ -#define ACPI_TYPE_STRING 2 /* 0x02 */ -#define ACPI_TYPE_BUFFER 3 /* 0x03 */ -#define ACPI_TYPE_PACKAGE 4 /* 0x04 Byte_const, multiple Data_term/Constant/Super_name */ -#define ACPI_TYPE_FIELD_UNIT 5 /* 0x05 */ -#define ACPI_TYPE_DEVICE 6 /* 0x06 Name, multiple Node */ -#define ACPI_TYPE_EVENT 7 /* 0x07 */ -#define ACPI_TYPE_METHOD 8 /* 0x08 Name, Byte_const, multiple Code */ -#define ACPI_TYPE_MUTEX 9 /* 0x09 */ -#define ACPI_TYPE_REGION 10 /* 0x0A */ -#define ACPI_TYPE_POWER 11 /* 0x0B Name,Byte_const,Word_const,multi Node */ -#define ACPI_TYPE_PROCESSOR 12 /* 0x0C Name,Byte_const,DWord_const,Byte_const,multi Nm_o */ -#define ACPI_TYPE_THERMAL 13 /* 0x0D Name, multiple Node */ -#define ACPI_TYPE_BUFFER_FIELD 14 /* 0x0E */ -#define ACPI_TYPE_DDB_HANDLE 15 /* 0x0F */ -#define ACPI_TYPE_DEBUG_OBJECT 16 /* 0x10 */ - -#define ACPI_TYPE_MAX 16 - -/* - * This section contains object types that do not relate to the ACPI Object_type operator. - * They are used for various internal purposes only. If new predefined ACPI_TYPEs are - * added (via the ACPI specification), these internal types must move upwards. - * Also, values exceeding the largest official ACPI Object_type must not overlap with - * defined AML opcodes. - */ -#define INTERNAL_TYPE_BEGIN 17 - -#define INTERNAL_TYPE_REGION_FIELD 17 /* 0x11 */ -#define INTERNAL_TYPE_BANK_FIELD 18 /* 0x12 */ -#define INTERNAL_TYPE_INDEX_FIELD 19 /* 0x13 */ -#define INTERNAL_TYPE_REFERENCE 20 /* 0x14 Arg#, Local#, Name, Debug; used only in descriptors */ -#define INTERNAL_TYPE_ALIAS 21 /* 0x15 */ -#define INTERNAL_TYPE_NOTIFY 22 /* 0x16 */ -#define INTERNAL_TYPE_ADDRESS_HANDLER 23 /* 0x17 */ -#define INTERNAL_TYPE_RESOURCE 24 /* 0x18 */ -#define INTERNAL_TYPE_RESOURCE_FIELD 25 /* 0x19 */ - - -#define INTERNAL_TYPE_NODE_MAX 25 - -/* These are pseudo-types because there are never any namespace nodes with these types */ - -#define INTERNAL_TYPE_FIELD_DEFN 26 /* 0x1A Name, Byte_const, multiple Field_element */ -#define INTERNAL_TYPE_BANK_FIELD_DEFN 27 /* 0x1B 2 Name,DWord_const,Byte_const,multi Field_element */ -#define INTERNAL_TYPE_INDEX_FIELD_DEFN 28 /* 0x1C 2 Name, Byte_const, multiple Field_element */ -#define INTERNAL_TYPE_IF 29 /* 0x1D */ -#define INTERNAL_TYPE_ELSE 30 /* 0x1E */ -#define INTERNAL_TYPE_WHILE 31 /* 0x1F */ -#define INTERNAL_TYPE_SCOPE 32 /* 0x20 Name, multiple Node */ -#define INTERNAL_TYPE_DEF_ANY 33 /* 0x21 type is Any, suppress search of enclosing scopes */ -#define INTERNAL_TYPE_EXTRA 34 /* 0x22 */ - -#define INTERNAL_TYPE_MAX 34 - -#define INTERNAL_TYPE_INVALID 35 -#define ACPI_TYPE_NOT_FOUND 0xFF - - -/* - * Bitmapped ACPI types - * Used internally only - */ -#define ACPI_BTYPE_ANY 0x00000000 -#define ACPI_BTYPE_INTEGER 0x00000001 -#define ACPI_BTYPE_STRING 0x00000002 -#define ACPI_BTYPE_BUFFER 0x00000004 -#define ACPI_BTYPE_PACKAGE 0x00000008 -#define ACPI_BTYPE_FIELD_UNIT 0x00000010 -#define ACPI_BTYPE_DEVICE 0x00000020 -#define ACPI_BTYPE_EVENT 0x00000040 -#define ACPI_BTYPE_METHOD 0x00000080 -#define ACPI_BTYPE_MUTEX 0x00000100 -#define ACPI_BTYPE_REGION 0x00000200 -#define ACPI_BTYPE_POWER 0x00000400 -#define ACPI_BTYPE_PROCESSOR 0x00000800 -#define ACPI_BTYPE_THERMAL 0x00001000 -#define ACPI_BTYPE_BUFFER_FIELD 0x00002000 -#define ACPI_BTYPE_DDB_HANDLE 0x00004000 -#define ACPI_BTYPE_DEBUG_OBJECT 0x00008000 -#define ACPI_BTYPE_REFERENCE 0x00010000 -#define ACPI_BTYPE_RESOURCE 0x00020000 - -#define ACPI_BTYPE_COMPUTE_DATA (ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING | ACPI_BTYPE_BUFFER) - -#define ACPI_BTYPE_DATA (ACPI_BTYPE_COMPUTE_DATA | ACPI_BTYPE_PACKAGE) -#define ACPI_BTYPE_DATA_REFERENCE (ACPI_BTYPE_DATA | ACPI_BTYPE_REFERENCE | ACPI_BTYPE_DDB_HANDLE) -#define ACPI_BTYPE_DEVICE_OBJECTS (ACPI_BTYPE_DEVICE | ACPI_BTYPE_THERMAL | ACPI_BTYPE_PROCESSOR) -#define ACPI_BTYPE_OBJECTS_AND_REFS 0x0001FFFF /* ARG or LOCAL */ -#define ACPI_BTYPE_ALL_OBJECTS 0x0000FFFF - - -/* - * Acpi_event Types: - * ------------ - * Fixed & general purpose... - */ - -typedef u32 acpi_event_type; - -#define ACPI_EVENT_FIXED (acpi_event_type) 0 -#define ACPI_EVENT_GPE (acpi_event_type) 1 - -/* - * Fixed events - */ - -#define ACPI_EVENT_PMTIMER (acpi_event_type) 0 - /* - * There's no bus master event so index 1 is used for IRQ's that are not - * handled by the SCI handler - */ -#define ACPI_EVENT_NOT_USED (acpi_event_type) 1 -#define ACPI_EVENT_GLOBAL (acpi_event_type) 2 -#define ACPI_EVENT_POWER_BUTTON (acpi_event_type) 3 -#define ACPI_EVENT_SLEEP_BUTTON (acpi_event_type) 4 -#define ACPI_EVENT_RTC (acpi_event_type) 5 -#define ACPI_EVENT_GENERAL (acpi_event_type) 6 -#define ACPI_EVENT_MAX 6 -#define ACPI_NUM_FIXED_EVENTS (acpi_event_type) 7 - -#define ACPI_GPE_INVALID 0xFF -#define ACPI_GPE_MAX 0xFF -#define ACPI_NUM_GPE 256 - -#define ACPI_EVENT_LEVEL_TRIGGERED (acpi_event_type) 1 -#define ACPI_EVENT_EDGE_TRIGGERED (acpi_event_type) 2 - -/* - * GPEs - */ -#define ACPI_EVENT_ENABLE 0x1 -#define ACPI_EVENT_WAKE_ENABLE 0x2 - -#define ACPI_EVENT_DISABLE 0x1 -#define ACPI_EVENT_WAKE_DISABLE 0x2 - - -/* - * Acpi_event Status: - * ------------- - * The encoding of acpi_event_status is illustrated below. - * Note that a set bit (1) indicates the property is TRUE - * (e.g. if bit 0 is set then the event is enabled). - * +-------------+-+-+-+ - * | Bits 31:3 |2|1|0| - * +-------------+-+-+-+ - * | | | | - * | | | +- Enabled? - * | | +--- Enabled for wake? - * | +----- Set? - * +----------- - */ -typedef u32 acpi_event_status; - -#define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00 -#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 -#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 -#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 - - -/* Notify types */ - -#define ACPI_SYSTEM_NOTIFY 0 -#define ACPI_DEVICE_NOTIFY 1 -#define ACPI_MAX_NOTIFY_HANDLER_TYPE 1 - -#define MAX_SYS_NOTIFY 0x7f - - -/* Address Space (Operation Region) Types */ - -typedef u8 ACPI_ADR_SPACE_TYPE; - -#define ACPI_ADR_SPACE_SYSTEM_MEMORY (ACPI_ADR_SPACE_TYPE) 0 -#define ACPI_ADR_SPACE_SYSTEM_IO (ACPI_ADR_SPACE_TYPE) 1 -#define ACPI_ADR_SPACE_PCI_CONFIG (ACPI_ADR_SPACE_TYPE) 2 -#define ACPI_ADR_SPACE_EC (ACPI_ADR_SPACE_TYPE) 3 -#define ACPI_ADR_SPACE_SMBUS (ACPI_ADR_SPACE_TYPE) 4 -#define ACPI_ADR_SPACE_CMOS (ACPI_ADR_SPACE_TYPE) 5 -#define ACPI_ADR_SPACE_PCI_BAR_TARGET (ACPI_ADR_SPACE_TYPE) 6 - - -/* - * External ACPI object definition - */ - -typedef union acpi_obj -{ - acpi_object_type type; /* See definition of Acpi_ns_type for values */ - struct - { - acpi_object_type type; - acpi_integer value; /* The actual number */ - } integer; - - struct - { - acpi_object_type type; - u32 length; /* # of bytes in string, excluding trailing null */ - NATIVE_CHAR *pointer; /* points to the string value */ - } string; - - struct - { - acpi_object_type type; - u32 length; /* # of bytes in buffer */ - u8 *pointer; /* points to the buffer */ - } buffer; - - struct - { - acpi_object_type type; - u32 fill1; - acpi_handle handle; /* object reference */ - } reference; - - struct - { - acpi_object_type type; - u32 count; /* # of elements in package */ - union acpi_obj *elements; /* Pointer to an array of ACPI_OBJECTs */ - } package; - - struct - { - acpi_object_type type; - u32 proc_id; - ACPI_IO_ADDRESS pblk_address; - u32 pblk_length; - } processor; - - struct - { - acpi_object_type type; - u32 system_level; - u32 resource_order; - } power_resource; - -} acpi_object, *PACPI_OBJECT; - - -/* - * List of objects, used as a parameter list for control method evaluation - */ - -typedef struct acpi_obj_list -{ - u32 count; - acpi_object *pointer; - -} acpi_object_list, *PACPI_OBJECT_LIST; - - -/* - * Miscellaneous common Data Structures used by the interfaces - */ - -typedef struct -{ - u32 length; /* Length in bytes of the buffer */ - void *pointer; /* pointer to buffer */ - -} acpi_buffer; - - -/* - * Name_type for Acpi_get_name - */ - -#define ACPI_FULL_PATHNAME 0 -#define ACPI_SINGLE_NAME 1 -#define ACPI_NAME_TYPE_MAX 1 - - -/* - * Structure and flags for Acpi_get_system_info - */ - -#define SYS_MODE_UNKNOWN 0x0000 -#define SYS_MODE_ACPI 0x0001 -#define SYS_MODE_LEGACY 0x0002 -#define SYS_MODES_MASK 0x0003 - - -/* - * ACPI Table Info. One per ACPI table _type_ - */ -typedef struct acpi_table_info -{ - u32 count; - -} acpi_table_info; - - -/* - * System info returned by Acpi_get_system_info() - */ - -typedef struct _acpi_sys_info -{ - u32 acpi_ca_version; - u32 flags; - u32 timer_resolution; - u32 reserved1; - u32 reserved2; - u32 debug_level; - u32 debug_layer; - u32 num_table_types; - acpi_table_info table_info [NUM_ACPI_TABLES]; - -} acpi_system_info; - - -/* - * Various handlers and callback procedures - */ - -typedef -u32 (*acpi_event_handler) ( - void *context); - -typedef -void (*acpi_gpe_handler) ( - void *context); - -typedef -void (*acpi_notify_handler) ( - acpi_handle device, - u32 value, - void *context); - - -/* Address Spaces (Operation Regions */ - -#define ACPI_READ_ADR_SPACE 1 -#define ACPI_WRITE_ADR_SPACE 2 - -typedef -acpi_status (*acpi_adr_space_handler) ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context); - -#define ACPI_DEFAULT_HANDLER ((acpi_adr_space_handler) NULL) - - -typedef -acpi_status (*acpi_adr_space_setup) ( - acpi_handle region_handle, - u32 function, - void *handler_context, - void **region_context); - -#define ACPI_REGION_ACTIVATE 0 -#define ACPI_REGION_DEACTIVATE 1 - -typedef -acpi_status (*acpi_walk_callback) ( - acpi_handle obj_handle, - u32 nesting_level, - void *context, - void **return_value); - - -/* Interrupt handler return values */ - -#define INTERRUPT_NOT_HANDLED 0x00 -#define INTERRUPT_HANDLED 0x01 - - -/* Structure and flags for Acpi_get_device_info */ - -#define ACPI_VALID_HID 0x1 -#define ACPI_VALID_UID 0x2 -#define ACPI_VALID_ADR 0x4 -#define ACPI_VALID_STA 0x8 - - -#define ACPI_COMMON_OBJ_INFO \ - acpi_object_type type; /* ACPI object type */ \ - acpi_name name /* ACPI object Name */ - - -typedef struct -{ - ACPI_COMMON_OBJ_INFO; -} acpi_obj_info_header; - - -typedef struct -{ - ACPI_COMMON_OBJ_INFO; - - u32 valid; /* Are the next bits legit? */ - NATIVE_CHAR hardware_id[9]; /* _HID value if any */ - NATIVE_CHAR unique_id[9]; /* _UID value if any */ - acpi_integer address; /* _ADR value if any */ - u32 current_status; /* _STA value */ -} acpi_device_info; - - -/* Context structs for address space handlers */ - -typedef struct -{ - u16 segment; - u16 bus; - u16 device; - u16 function; -} acpi_pci_id; - - -typedef struct -{ - ACPI_PHYSICAL_ADDRESS mapped_physical_address; - u8 *mapped_logical_address; - u32 mapped_length; -} acpi_mem_space_context; - - -/* Sleep states */ - -#define ACPI_NUM_SLEEP_STATES 7 - - -/* - * Definitions for Resource Attributes - */ - -/* - * Memory Attributes - */ -#define READ_ONLY_MEMORY (u8) 0x00 -#define READ_WRITE_MEMORY (u8) 0x01 - -#define NON_CACHEABLE_MEMORY (u8) 0x00 -#define CACHABLE_MEMORY (u8) 0x01 -#define WRITE_COMBINING_MEMORY (u8) 0x02 -#define PREFETCHABLE_MEMORY (u8) 0x03 - -/* - * IO Attributes - * The ISA IO ranges are: n000-n0FFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh. - * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cD0-n_fFFh. - */ -#define NON_ISA_ONLY_RANGES (u8) 0x01 -#define ISA_ONLY_RANGES (u8) 0x02 -#define ENTIRE_RANGE (NON_ISA_ONLY_RANGES | ISA_ONLY_RANGES) - -/* - * IO Port Descriptor Decode - */ -#define DECODE_10 (u8) 0x00 /* 10-bit IO address decode */ -#define DECODE_16 (u8) 0x01 /* 16-bit IO address decode */ - -/* - * IRQ Attributes - */ -#define EDGE_SENSITIVE (u8) 0x00 -#define LEVEL_SENSITIVE (u8) 0x01 - -#define ACTIVE_HIGH (u8) 0x00 -#define ACTIVE_LOW (u8) 0x01 - -#define EXCLUSIVE (u8) 0x00 -#define SHARED (u8) 0x01 - -/* - * DMA Attributes - */ -#define COMPATIBILITY (u8) 0x00 -#define TYPE_A (u8) 0x01 -#define TYPE_B (u8) 0x02 -#define TYPE_F (u8) 0x03 - -#define NOT_BUS_MASTER (u8) 0x00 -#define BUS_MASTER (u8) 0x01 - -#define TRANSFER_8 (u8) 0x00 -#define TRANSFER_8_16 (u8) 0x01 -#define TRANSFER_16 (u8) 0x02 - -/* - * Start Dependent Functions Priority definitions - */ -#define GOOD_CONFIGURATION (u8) 0x00 -#define ACCEPTABLE_CONFIGURATION (u8) 0x01 -#define SUB_OPTIMAL_CONFIGURATION (u8) 0x02 - -/* - * 16, 32 and 64-bit Address Descriptor resource types - */ -#define MEMORY_RANGE (u8) 0x00 -#define IO_RANGE (u8) 0x01 -#define BUS_NUMBER_RANGE (u8) 0x02 - -#define ADDRESS_NOT_FIXED (u8) 0x00 -#define ADDRESS_FIXED (u8) 0x01 - -#define POS_DECODE (u8) 0x00 -#define SUB_DECODE (u8) 0x01 - -#define PRODUCER (u8) 0x00 -#define CONSUMER (u8) 0x01 - - -/* - * Structures used to describe device resources - */ -typedef struct -{ - u32 edge_level; - u32 active_high_low; - u32 shared_exclusive; - u32 number_of_interrupts; - u32 interrupts[1]; - -} acpi_resource_irq; - -typedef struct -{ - u32 type; - u32 bus_master; - u32 transfer; - u32 number_of_channels; - u32 channels[1]; - -} acpi_resource_dma; - -typedef struct -{ - u32 compatibility_priority; - u32 performance_robustness; - -} acpi_resource_start_dpf; - -/* - * END_DEPENDENT_FUNCTIONS_RESOURCE struct is not - * needed because it has no fields - */ - -typedef struct -{ - u32 io_decode; - u32 min_base_address; - u32 max_base_address; - u32 alignment; - u32 range_length; - -} acpi_resource_io; - -typedef struct -{ - u32 base_address; - u32 range_length; - -} acpi_resource_fixed_io; - -typedef struct -{ - u32 length; - u8 reserved[1]; - -} acpi_resource_vendor; - -typedef struct -{ - u32 read_write_attribute; - u32 min_base_address; - u32 max_base_address; - u32 alignment; - u32 range_length; - -} acpi_resource_mem24; - -typedef struct -{ - u32 read_write_attribute; - u32 min_base_address; - u32 max_base_address; - u32 alignment; - u32 range_length; - -} acpi_resource_mem32; - -typedef struct -{ - u32 read_write_attribute; - u32 range_base_address; - u32 range_length; - -} acpi_resource_fixed_mem32; - -typedef struct -{ - u16 cache_attribute; - u16 read_write_attribute; - -} acpi_memory_attribute; - -typedef struct -{ - u16 range_attribute; - u16 reserved; - -} acpi_io_attribute; - -typedef struct -{ - u16 reserved1; - u16 reserved2; - -} acpi_bus_attribute; - -typedef union -{ - acpi_memory_attribute memory; - acpi_io_attribute io; - acpi_bus_attribute bus; - -} acpi_resource_attribute; - -typedef struct -{ - u32 index; - u32 string_length; - NATIVE_CHAR *string_ptr; - -} acpi_resource_source; - -typedef struct -{ - u32 resource_type; - u32 producer_consumer; - u32 decode; - u32 min_address_fixed; - u32 max_address_fixed; - acpi_resource_attribute attribute; - u32 granularity; - u32 min_address_range; - u32 max_address_range; - u32 address_translation_offset; - u32 address_length; - acpi_resource_source resource_source; - -} acpi_resource_address16; - -typedef struct -{ - u32 resource_type; - u32 producer_consumer; - u32 decode; - u32 min_address_fixed; - u32 max_address_fixed; - acpi_resource_attribute attribute; - u32 granularity; - u32 min_address_range; - u32 max_address_range; - u32 address_translation_offset; - u32 address_length; - acpi_resource_source resource_source; - -} acpi_resource_address32; - -typedef struct -{ - u32 resource_type; - u32 producer_consumer; - u32 decode; - u32 min_address_fixed; - u32 max_address_fixed; - acpi_resource_attribute attribute; - u64 granularity; - u64 min_address_range; - u64 max_address_range; - u64 address_translation_offset; - u64 address_length; - acpi_resource_source resource_source; - -} acpi_resource_address64; - -typedef struct -{ - u32 producer_consumer; - u32 edge_level; - u32 active_high_low; - u32 shared_exclusive; - u32 number_of_interrupts; - acpi_resource_source resource_source; - u32 interrupts[1]; - -} acpi_resource_ext_irq; - - -/* ACPI_RESOURCE_TYPEs */ - -#define ACPI_RSTYPE_IRQ 0 -#define ACPI_RSTYPE_DMA 1 -#define ACPI_RSTYPE_START_DPF 2 -#define ACPI_RSTYPE_END_DPF 3 -#define ACPI_RSTYPE_IO 4 -#define ACPI_RSTYPE_FIXED_IO 5 -#define ACPI_RSTYPE_VENDOR 6 -#define ACPI_RSTYPE_END_TAG 7 -#define ACPI_RSTYPE_MEM24 8 -#define ACPI_RSTYPE_MEM32 9 -#define ACPI_RSTYPE_FIXED_MEM32 10 -#define ACPI_RSTYPE_ADDRESS16 11 -#define ACPI_RSTYPE_ADDRESS32 12 -#define ACPI_RSTYPE_ADDRESS64 13 -#define ACPI_RSTYPE_EXT_IRQ 14 - -typedef u32 acpi_resource_type; - -typedef union -{ - acpi_resource_irq irq; - acpi_resource_dma dma; - acpi_resource_start_dpf start_dpf; - acpi_resource_io io; - acpi_resource_fixed_io fixed_io; - acpi_resource_vendor vendor_specific; - acpi_resource_mem24 memory24; - acpi_resource_mem32 memory32; - acpi_resource_fixed_mem32 fixed_memory32; - acpi_resource_address16 address16; - acpi_resource_address32 address32; - acpi_resource_address64 address64; - acpi_resource_ext_irq extended_irq; - -} acpi_resource_data; - -typedef struct acpi_resource -{ - acpi_resource_type id; - u32 length; - acpi_resource_data data; - -} acpi_resource; - -#define ACPI_RESOURCE_LENGTH 12 -#define ACPI_RESOURCE_LENGTH_NO_DATA 8 /* Id + Length fields */ - -#define SIZEOF_RESOURCE(type) (ACPI_RESOURCE_LENGTH_NO_DATA + sizeof (type)) - -#define NEXT_RESOURCE(res) (acpi_resource *)((u8 *) res + res->length) - - -/* - * END: Definitions for Resource Attributes - */ - - -typedef struct pci_routing_table -{ - u32 length; - u32 pin; - acpi_integer address; /* here for 64-bit alignment */ - u32 source_index; - NATIVE_CHAR source[4]; /* pad to 64 bits so sizeof() works in all cases */ - -} pci_routing_table; - - -/* - * END: Definitions for PCI Routing tables - */ - -#endif /* __ACTYPES_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/acutils.h linux.21rc5-ac2/drivers/acpi/include/acutils.h --- linux.21rc5/drivers/acpi/include/acutils.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/acutils.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,662 +0,0 @@ -/****************************************************************************** - * - * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures - * $Revision: 117 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ACUTILS_H -#define _ACUTILS_H - - -typedef -acpi_status (*ACPI_PKG_CALLBACK) ( - u8 object_type, - acpi_operand_object *source_object, - acpi_generic_state *state, - void *context); - - -acpi_status -acpi_ut_walk_package_tree ( - acpi_operand_object *source_object, - void *target_object, - ACPI_PKG_CALLBACK walk_callback, - void *context); - - -typedef struct acpi_pkg_info -{ - u8 *free_space; - u32 length; - u32 object_space; - u32 num_packages; -} acpi_pkg_info; - -#define REF_INCREMENT (u16) 0 -#define REF_DECREMENT (u16) 1 -#define REF_FORCE_DELETE (u16) 2 - -/* Acpi_ut_dump_buffer */ - -#define DB_BYTE_DISPLAY 1 -#define DB_WORD_DISPLAY 2 -#define DB_DWORD_DISPLAY 4 -#define DB_QWORD_DISPLAY 8 - - -/* Global initialization interfaces */ - -void -acpi_ut_init_globals ( - void); - -void -acpi_ut_terminate ( - void); - - -/* - * Ut_init - miscellaneous initialization and shutdown - */ - -acpi_status -acpi_ut_hardware_initialize ( - void); - -acpi_status -acpi_ut_subsystem_shutdown ( - void); - -acpi_status -acpi_ut_validate_fadt ( - void); - -/* - * Ut_global - Global data structures and procedures - */ - -#ifdef ACPI_DEBUG - -NATIVE_CHAR * -acpi_ut_get_mutex_name ( - u32 mutex_id); - -NATIVE_CHAR * -acpi_ut_get_type_name ( - u32 type); - -NATIVE_CHAR * -acpi_ut_get_region_name ( - u8 space_id); - -#endif - - -u8 -acpi_ut_hex_to_ascii_char ( - acpi_integer integer, - u32 position); - -u8 -acpi_ut_valid_object_type ( - u32 type); - -acpi_owner_id -acpi_ut_allocate_owner_id ( - u32 id_type); - - -/* - * Ut_clib - Local implementations of C library functions - */ - -#ifndef ACPI_USE_SYSTEM_CLIBRARY - -u32 -acpi_ut_strlen ( - const NATIVE_CHAR *string); - -NATIVE_CHAR * -acpi_ut_strcpy ( - NATIVE_CHAR *dst_string, - const NATIVE_CHAR *src_string); - -NATIVE_CHAR * -acpi_ut_strncpy ( - NATIVE_CHAR *dst_string, - const NATIVE_CHAR *src_string, - NATIVE_UINT count); - -u32 -acpi_ut_strncmp ( - const NATIVE_CHAR *string1, - const NATIVE_CHAR *string2, - NATIVE_UINT count); - -u32 -acpi_ut_strcmp ( - const NATIVE_CHAR *string1, - const NATIVE_CHAR *string2); - -NATIVE_CHAR * -acpi_ut_strcat ( - NATIVE_CHAR *dst_string, - const NATIVE_CHAR *src_string); - -NATIVE_CHAR * -acpi_ut_strncat ( - NATIVE_CHAR *dst_string, - const NATIVE_CHAR *src_string, - NATIVE_UINT count); - -u32 -acpi_ut_strtoul ( - const NATIVE_CHAR *string, - NATIVE_CHAR **terminator, - u32 base); - -NATIVE_CHAR * -acpi_ut_strstr ( - NATIVE_CHAR *string1, - NATIVE_CHAR *string2); - -void * -acpi_ut_memcpy ( - void *dest, - const void *src, - NATIVE_UINT count); - -void * -acpi_ut_memset ( - void *dest, - NATIVE_UINT value, - NATIVE_UINT count); - -u32 -acpi_ut_to_upper ( - u32 c); - -u32 -acpi_ut_to_lower ( - u32 c); - -#endif /* ACPI_USE_SYSTEM_CLIBRARY */ - -/* - * Ut_copy - Object construction and conversion interfaces - */ - -acpi_status -acpi_ut_build_simple_object( - acpi_operand_object *obj, - acpi_object *user_obj, - u8 *data_space, - u32 *buffer_space_used); - -acpi_status -acpi_ut_build_package_object ( - acpi_operand_object *obj, - u8 *buffer, - u32 *space_used); - -acpi_status -acpi_ut_copy_iobject_to_eobject ( - acpi_operand_object *obj, - acpi_buffer *ret_buffer); - -acpi_status -acpi_ut_copy_esimple_to_isimple( - acpi_object *user_obj, - acpi_operand_object **return_obj); - -acpi_status -acpi_ut_copy_eobject_to_iobject ( - acpi_object *obj, - acpi_operand_object **internal_obj); - -acpi_status -acpi_ut_copy_isimple_to_isimple ( - acpi_operand_object *source_obj, - acpi_operand_object *dest_obj); - -acpi_status -acpi_ut_copy_ipackage_to_ipackage ( - acpi_operand_object *source_obj, - acpi_operand_object *dest_obj, - acpi_walk_state *walk_state); - - -/* - * Ut_create - Object creation - */ - -acpi_status -acpi_ut_update_object_reference ( - acpi_operand_object *object, - u16 action); - - -/* - * Ut_debug - Debug interfaces - */ - -void -acpi_ut_init_stack_ptr_trace ( - void); - -void -acpi_ut_track_stack_ptr ( - void); - -void -acpi_ut_trace ( - u32 line_number, - acpi_debug_print_info *dbg_info); - -void -acpi_ut_trace_ptr ( - u32 line_number, - acpi_debug_print_info *dbg_info, - void *pointer); - -void -acpi_ut_trace_u32 ( - u32 line_number, - acpi_debug_print_info *dbg_info, - u32 integer); - -void -acpi_ut_trace_str ( - u32 line_number, - acpi_debug_print_info *dbg_info, - NATIVE_CHAR *string); - -void -acpi_ut_exit ( - u32 line_number, - acpi_debug_print_info *dbg_info); - -void -acpi_ut_status_exit ( - u32 line_number, - acpi_debug_print_info *dbg_info, - acpi_status status); - -void -acpi_ut_value_exit ( - u32 line_number, - acpi_debug_print_info *dbg_info, - acpi_integer value); - -void -acpi_ut_ptr_exit ( - u32 line_number, - acpi_debug_print_info *dbg_info, - u8 *ptr); - -void -acpi_ut_report_info ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id); - -void -acpi_ut_report_error ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id); - -void -acpi_ut_report_warning ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id); - -void -acpi_ut_dump_buffer ( - u8 *buffer, - u32 count, - u32 display, - u32 component_id); - -void -acpi_ut_debug_print ( - u32 requested_debug_level, - u32 line_number, - acpi_debug_print_info *dbg_info, - char *format, - ...) ACPI_PRINTF_LIKE_FUNC; - -void -acpi_ut_debug_print_raw ( - u32 requested_debug_level, - u32 line_number, - acpi_debug_print_info *dbg_info, - char *format, - ...) ACPI_PRINTF_LIKE_FUNC; - - -/* - * Ut_delete - Object deletion - */ - -void -acpi_ut_delete_internal_obj ( - acpi_operand_object *object); - -void -acpi_ut_delete_internal_package_object ( - acpi_operand_object *object); - -void -acpi_ut_delete_internal_simple_object ( - acpi_operand_object *object); - -acpi_status -acpi_ut_delete_internal_object_list ( - acpi_operand_object **obj_list); - - -/* - * Ut_eval - object evaluation - */ - -/* Method name strings */ - -#define METHOD_NAME__HID "_HID" -#define METHOD_NAME__UID "_UID" -#define METHOD_NAME__ADR "_ADR" -#define METHOD_NAME__STA "_STA" -#define METHOD_NAME__REG "_REG" -#define METHOD_NAME__SEG "_SEG" -#define METHOD_NAME__BBN "_BBN" -#define METHOD_NAME__PRT "_PRT" - - -acpi_status -acpi_ut_evaluate_numeric_object ( - NATIVE_CHAR *object_name, - acpi_namespace_node *device_node, - acpi_integer *address); - -acpi_status -acpi_ut_execute_HID ( - acpi_namespace_node *device_node, - acpi_device_id *hid); - -acpi_status -acpi_ut_execute_STA ( - acpi_namespace_node *device_node, - u32 *status_flags); - -acpi_status -acpi_ut_execute_UID ( - acpi_namespace_node *device_node, - acpi_device_id *uid); - - -/* - * Ut_mutex - mutual exclusion interfaces - */ - -acpi_status -acpi_ut_mutex_initialize ( - void); - -void -acpi_ut_mutex_terminate ( - void); - -acpi_status -acpi_ut_create_mutex ( - ACPI_MUTEX_HANDLE mutex_id); - -acpi_status -acpi_ut_delete_mutex ( - ACPI_MUTEX_HANDLE mutex_id); - -acpi_status -acpi_ut_acquire_mutex ( - ACPI_MUTEX_HANDLE mutex_id); - -acpi_status -acpi_ut_release_mutex ( - ACPI_MUTEX_HANDLE mutex_id); - - -/* - * Ut_object - internal object create/delete/cache routines - */ - -acpi_operand_object * -acpi_ut_create_internal_object_dbg ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id, - acpi_object_type8 type); - -void * -acpi_ut_allocate_object_desc_dbg ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id); - -#define acpi_ut_create_internal_object(t) acpi_ut_create_internal_object_dbg (_THIS_MODULE,__LINE__,_COMPONENT,t) -#define acpi_ut_allocate_object_desc() acpi_ut_allocate_object_desc_dbg (_THIS_MODULE,__LINE__,_COMPONENT) - -void -acpi_ut_delete_object_desc ( - acpi_operand_object *object); - -u8 -acpi_ut_valid_internal_object ( - void *object); - - -/* - * Ut_ref_cnt - Object reference count management - */ - -void -acpi_ut_add_reference ( - acpi_operand_object *object); - -void -acpi_ut_remove_reference ( - acpi_operand_object *object); - -/* - * Ut_size - Object size routines - */ - -acpi_status -acpi_ut_get_simple_object_size ( - acpi_operand_object *obj, - u32 *obj_length); - -acpi_status -acpi_ut_get_package_object_size ( - acpi_operand_object *obj, - u32 *obj_length); - -acpi_status -acpi_ut_get_object_size( - acpi_operand_object *obj, - u32 *obj_length); - - -/* - * Ut_state - Generic state creation/cache routines - */ - -void -acpi_ut_push_generic_state ( - acpi_generic_state **list_head, - acpi_generic_state *state); - -acpi_generic_state * -acpi_ut_pop_generic_state ( - acpi_generic_state **list_head); - - -acpi_generic_state * -acpi_ut_create_generic_state ( - void); - -acpi_generic_state * -acpi_ut_create_update_state ( - acpi_operand_object *object, - u16 action); - -acpi_generic_state * -acpi_ut_create_pkg_state ( - void *internal_object, - void *external_object, - u16 index); - -acpi_status -acpi_ut_create_update_state_and_push ( - acpi_operand_object *object, - u16 action, - acpi_generic_state **state_list); - -acpi_status -acpi_ut_create_pkg_state_and_push ( - void *internal_object, - void *external_object, - u16 index, - acpi_generic_state **state_list); - -acpi_generic_state * -acpi_ut_create_control_state ( - void); - -void -acpi_ut_delete_generic_state ( - acpi_generic_state *state); - -void -acpi_ut_delete_generic_state_cache ( - void); - -void -acpi_ut_delete_object_cache ( - void); - -/* - * utmisc - */ - -acpi_status -acpi_ut_divide ( - acpi_integer *in_dividend, - acpi_integer *in_divisor, - acpi_integer *out_quotient, - acpi_integer *out_remainder); - -acpi_status -acpi_ut_short_divide ( - acpi_integer *in_dividend, - u32 divisor, - acpi_integer *out_quotient, - u32 *out_remainder); - -u8 -acpi_ut_valid_acpi_name ( - u32 name); - -u8 -acpi_ut_valid_acpi_character ( - NATIVE_CHAR character); - -NATIVE_CHAR * -acpi_ut_strupr ( - NATIVE_CHAR *src_string); - -acpi_status -acpi_ut_resolve_package_references ( - acpi_operand_object *obj_desc); - - -#ifdef ACPI_DEBUG -void -acpi_ut_display_init_pathname ( - acpi_handle obj_handle, - char *path); - -#endif - - -/* - * Utalloc - memory allocation and object caching - */ - -void * -acpi_ut_acquire_from_cache ( - u32 list_id); - -void -acpi_ut_release_to_cache ( - u32 list_id, - void *object); - -void -acpi_ut_delete_generic_cache ( - u32 list_id); - - -/* Debug Memory allocation functions */ - -void * -acpi_ut_allocate ( - u32 size, - u32 component, - NATIVE_CHAR *module, - u32 line); - -void * -acpi_ut_callocate ( - u32 size, - u32 component, - NATIVE_CHAR *module, - u32 line); - -void -acpi_ut_free ( - void *address, - u32 component, - NATIVE_CHAR *module, - u32 line); - -#ifdef ACPI_DBG_TRACK_ALLOCATIONS -void -acpi_ut_dump_allocation_info ( - void); - -void -acpi_ut_dump_allocations ( - u32 component, - NATIVE_CHAR *module); -#endif - - -#endif /* _ACUTILS_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/amlcode.h linux.21rc5-ac2/drivers/acpi/include/amlcode.h --- linux.21rc5/drivers/acpi/include/amlcode.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/amlcode.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,459 +0,0 @@ -/****************************************************************************** - * - * Name: amlcode.h - Definitions for AML, as included in "definition blocks" - * Declarations and definitions contained herein are derived - * directly from the ACPI specification. - * $Revision: 58 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __AMLCODE_H__ -#define __AMLCODE_H__ - - -/* primary opcodes */ - -#define AML_NULL_CHAR (u16) 0x00 - -#define AML_ZERO_OP (u16) 0x00 -#define AML_ONE_OP (u16) 0x01 -#define AML_UNASSIGNED (u16) 0x02 -#define AML_ALIAS_OP (u16) 0x06 -#define AML_NAME_OP (u16) 0x08 -#define AML_BYTE_OP (u16) 0x0a -#define AML_WORD_OP (u16) 0x0b -#define AML_DWORD_OP (u16) 0x0c -#define AML_STRING_OP (u16) 0x0d -#define AML_QWORD_OP (u16) 0x0e /* ACPI 2.0 */ -#define AML_SCOPE_OP (u16) 0x10 -#define AML_BUFFER_OP (u16) 0x11 -#define AML_PACKAGE_OP (u16) 0x12 -#define AML_VAR_PACKAGE_OP (u16) 0x13 /* ACPI 2.0 */ -#define AML_METHOD_OP (u16) 0x14 -#define AML_DUAL_NAME_PREFIX (u16) 0x2e -#define AML_MULTI_NAME_PREFIX_OP (u16) 0x2f -#define AML_NAME_CHAR_SUBSEQ (u16) 0x30 -#define AML_NAME_CHAR_FIRST (u16) 0x41 -#define AML_OP_PREFIX (u16) 0x5b -#define AML_ROOT_PREFIX (u16) 0x5c -#define AML_PARENT_PREFIX (u16) 0x5e -#define AML_LOCAL_OP (u16) 0x60 -#define AML_LOCAL0 (u16) 0x60 -#define AML_LOCAL1 (u16) 0x61 -#define AML_LOCAL2 (u16) 0x62 -#define AML_LOCAL3 (u16) 0x63 -#define AML_LOCAL4 (u16) 0x64 -#define AML_LOCAL5 (u16) 0x65 -#define AML_LOCAL6 (u16) 0x66 -#define AML_LOCAL7 (u16) 0x67 -#define AML_ARG_OP (u16) 0x68 -#define AML_ARG0 (u16) 0x68 -#define AML_ARG1 (u16) 0x69 -#define AML_ARG2 (u16) 0x6a -#define AML_ARG3 (u16) 0x6b -#define AML_ARG4 (u16) 0x6c -#define AML_ARG5 (u16) 0x6d -#define AML_ARG6 (u16) 0x6e -#define AML_STORE_OP (u16) 0x70 -#define AML_REF_OF_OP (u16) 0x71 -#define AML_ADD_OP (u16) 0x72 -#define AML_CONCAT_OP (u16) 0x73 -#define AML_SUBTRACT_OP (u16) 0x74 -#define AML_INCREMENT_OP (u16) 0x75 -#define AML_DECREMENT_OP (u16) 0x76 -#define AML_MULTIPLY_OP (u16) 0x77 -#define AML_DIVIDE_OP (u16) 0x78 -#define AML_SHIFT_LEFT_OP (u16) 0x79 -#define AML_SHIFT_RIGHT_OP (u16) 0x7a -#define AML_BIT_AND_OP (u16) 0x7b -#define AML_BIT_NAND_OP (u16) 0x7c -#define AML_BIT_OR_OP (u16) 0x7d -#define AML_BIT_NOR_OP (u16) 0x7e -#define AML_BIT_XOR_OP (u16) 0x7f -#define AML_BIT_NOT_OP (u16) 0x80 -#define AML_FIND_SET_LEFT_BIT_OP (u16) 0x81 -#define AML_FIND_SET_RIGHT_BIT_OP (u16) 0x82 -#define AML_DEREF_OF_OP (u16) 0x83 -#define AML_CONCAT_RES_OP (u16) 0x84 /* ACPI 2.0 */ -#define AML_MOD_OP (u16) 0x85 /* ACPI 2.0 */ -#define AML_NOTIFY_OP (u16) 0x86 -#define AML_SIZE_OF_OP (u16) 0x87 -#define AML_INDEX_OP (u16) 0x88 -#define AML_MATCH_OP (u16) 0x89 -#define AML_CREATE_DWORD_FIELD_OP (u16) 0x8a -#define AML_CREATE_WORD_FIELD_OP (u16) 0x8b -#define AML_CREATE_BYTE_FIELD_OP (u16) 0x8c -#define AML_CREATE_BIT_FIELD_OP (u16) 0x8d -#define AML_TYPE_OP (u16) 0x8e -#define AML_CREATE_QWORD_FIELD_OP (u16) 0x8f /* ACPI 2.0 */ -#define AML_LAND_OP (u16) 0x90 -#define AML_LOR_OP (u16) 0x91 -#define AML_LNOT_OP (u16) 0x92 -#define AML_LEQUAL_OP (u16) 0x93 -#define AML_LGREATER_OP (u16) 0x94 -#define AML_LLESS_OP (u16) 0x95 -#define AML_TO_BUFFER_OP (u16) 0x96 /* ACPI 2.0 */ -#define AML_TO_DECSTRING_OP (u16) 0x97 /* ACPI 2.0 */ -#define AML_TO_HEXSTRING_OP (u16) 0x98 /* ACPI 2.0 */ -#define AML_TO_INTEGER_OP (u16) 0x99 /* ACPI 2.0 */ -#define AML_TO_STRING_OP (u16) 0x9c /* ACPI 2.0 */ -#define AML_COPY_OP (u16) 0x9d /* ACPI 2.0 */ -#define AML_MID_OP (u16) 0x9e /* ACPI 2.0 */ -#define AML_CONTINUE_OP (u16) 0x9f /* ACPI 2.0 */ -#define AML_IF_OP (u16) 0xa0 -#define AML_ELSE_OP (u16) 0xa1 -#define AML_WHILE_OP (u16) 0xa2 -#define AML_NOOP_OP (u16) 0xa3 -#define AML_RETURN_OP (u16) 0xa4 -#define AML_BREAK_OP (u16) 0xa5 -#define AML_BREAK_POINT_OP (u16) 0xcc -#define AML_ONES_OP (u16) 0xff - -/* prefixed opcodes */ - -#define AML_EXTOP (u16) 0x005b - - -#define AML_MUTEX_OP (u16) 0x5b01 -#define AML_EVENT_OP (u16) 0x5b02 -#define AML_SHIFT_RIGHT_BIT_OP (u16) 0x5b10 -#define AML_SHIFT_LEFT_BIT_OP (u16) 0x5b11 -#define AML_COND_REF_OF_OP (u16) 0x5b12 -#define AML_CREATE_FIELD_OP (u16) 0x5b13 -#define AML_LOAD_TABLE_OP (u16) 0x5b1f /* ACPI 2.0 */ -#define AML_LOAD_OP (u16) 0x5b20 -#define AML_STALL_OP (u16) 0x5b21 -#define AML_SLEEP_OP (u16) 0x5b22 -#define AML_ACQUIRE_OP (u16) 0x5b23 -#define AML_SIGNAL_OP (u16) 0x5b24 -#define AML_WAIT_OP (u16) 0x5b25 -#define AML_RESET_OP (u16) 0x5b26 -#define AML_RELEASE_OP (u16) 0x5b27 -#define AML_FROM_BCD_OP (u16) 0x5b28 -#define AML_TO_BCD_OP (u16) 0x5b29 -#define AML_UNLOAD_OP (u16) 0x5b2a -#define AML_REVISION_OP (u16) 0x5b30 -#define AML_DEBUG_OP (u16) 0x5b31 -#define AML_FATAL_OP (u16) 0x5b32 -#define AML_REGION_OP (u16) 0x5b80 -#define AML_FIELD_OP (u16) 0x5b81 -#define AML_DEVICE_OP (u16) 0x5b82 -#define AML_PROCESSOR_OP (u16) 0x5b83 -#define AML_POWER_RES_OP (u16) 0x5b84 -#define AML_THERMAL_ZONE_OP (u16) 0x5b85 -#define AML_INDEX_FIELD_OP (u16) 0x5b86 -#define AML_BANK_FIELD_OP (u16) 0x5b87 -#define AML_DATA_REGION_OP (u16) 0x5b88 /* ACPI 2.0 */ - - -/* Bogus opcodes (they are actually two separate opcodes) */ - -#define AML_LGREATEREQUAL_OP (u16) 0x9295 -#define AML_LLESSEQUAL_OP (u16) 0x9294 -#define AML_LNOTEQUAL_OP (u16) 0x9293 - - -/* - * Internal opcodes - * Use only "Unknown" AML opcodes, don't attempt to use - * any valid ACPI ASCII values (A-Z, 0-9, '-') - */ - -#define AML_INT_NAMEPATH_OP (u16) 0x002d -#define AML_INT_NAMEDFIELD_OP (u16) 0x0030 -#define AML_INT_RESERVEDFIELD_OP (u16) 0x0031 -#define AML_INT_ACCESSFIELD_OP (u16) 0x0032 -#define AML_INT_BYTELIST_OP (u16) 0x0033 -#define AML_INT_STATICSTRING_OP (u16) 0x0034 -#define AML_INT_METHODCALL_OP (u16) 0x0035 -#define AML_INT_RETURN_VALUE_OP (u16) 0x0036 - - -#define ARG_NONE 0x0 - -/* - * Argument types for the AML Parser - * Each field in the Arg_types u32 is 5 bits, allowing for a maximum of 6 arguments. - * There can be up to 31 unique argument types - */ - -#define ARGP_BYTEDATA 0x01 -#define ARGP_BYTELIST 0x02 -#define ARGP_CHARLIST 0x03 -#define ARGP_DATAOBJ 0x04 -#define ARGP_DATAOBJLIST 0x05 -#define ARGP_DWORDDATA 0x06 -#define ARGP_FIELDLIST 0x07 -#define ARGP_NAME 0x08 -#define ARGP_NAMESTRING 0x09 -#define ARGP_OBJLIST 0x0A -#define ARGP_PKGLENGTH 0x0B -#define ARGP_SUPERNAME 0x0C -#define ARGP_TARGET 0x0D -#define ARGP_TERMARG 0x0E -#define ARGP_TERMLIST 0x0F -#define ARGP_WORDDATA 0x10 -#define ARGP_QWORDDATA 0x11 -#define ARGP_SIMPLENAME 0x12 - -/* - * Resolved argument types for the AML Interpreter - * Each field in the Arg_types u32 is 5 bits, allowing for a maximum of 6 arguments. - * There can be up to 31 unique argument types (0 is end-of-arg-list indicator) - */ - -/* "Standard" ACPI types are 1-15 (0x0F) */ - -#define ARGI_INTEGER ACPI_TYPE_INTEGER /* 1 */ -#define ARGI_STRING ACPI_TYPE_STRING /* 2 */ -#define ARGI_BUFFER ACPI_TYPE_BUFFER /* 3 */ -#define ARGI_PACKAGE ACPI_TYPE_PACKAGE /* 4 */ -#define ARGI_EVENT ACPI_TYPE_EVENT -#define ARGI_MUTEX ACPI_TYPE_MUTEX -#define ARGI_REGION ACPI_TYPE_REGION -#define ARGI_DDBHANDLE ACPI_TYPE_DDB_HANDLE - -/* Custom types are 0x10 through 0x1F */ - -#define ARGI_IF 0x10 -#define ARGI_ANYOBJECT 0x11 -#define ARGI_ANYTYPE 0x12 -#define ARGI_COMPUTEDATA 0x13 /* Buffer, String, or Integer */ -#define ARGI_DATAOBJECT 0x14 /* Buffer, String, package or reference to a Node - Used only by Size_of operator*/ -#define ARGI_COMPLEXOBJ 0x15 /* Buffer, String, or package (Used by INDEX op only) */ -#define ARGI_INTEGER_REF 0x16 -#define ARGI_OBJECT_REF 0x17 -#define ARGI_DEVICE_REF 0x18 -#define ARGI_REFERENCE 0x19 -#define ARGI_TARGETREF 0x1A /* Target, subject to implicit conversion */ -#define ARGI_FIXED_TARGET 0x1B /* Target, no implicit conversion */ -#define ARGI_SIMPLE_TARGET 0x1C /* Name, Local, Arg -- no implicit conversion */ -#define ARGI_BUFFERSTRING 0x1D - -#define ARGI_INVALID_OPCODE 0xFFFFFFFF - - -/* - * hash offsets - */ -#define AML_EXTOP_HASH_OFFSET 22 -#define AML_LNOT_HASH_OFFSET 19 - - -/* - * opcode groups and types - */ - -#define OPGRP_NAMED 0x01 -#define OPGRP_FIELD 0x02 -#define OPGRP_BYTELIST 0x04 - - -/* - * Opcode information - */ - -/* Opcode flags */ - -#define AML_HAS_ARGS 0x0800 -#define AML_HAS_TARGET 0x0400 -#define AML_HAS_RETVAL 0x0200 -#define AML_NSOBJECT 0x0100 -#define AML_NSOPCODE 0x0080 -#define AML_NSNODE 0x0040 -#define AML_NAMED 0x0020 -#define AML_DEFER 0x0010 -#define AML_FIELD 0x0008 -#define AML_CREATE 0x0004 -#define AML_MATH 0x0002 -#define AML_LOGICAL 0x0001 - -/* Convenient flag groupings */ - -#define AML_FLAGS_EXEC_1A_0T_0R AML_HAS_ARGS /* Monadic1 */ -#define AML_FLAGS_EXEC_1A_0T_1R AML_HAS_ARGS | AML_HAS_RETVAL /* Monadic2 */ -#define AML_FLAGS_EXEC_1A_1T_0R AML_HAS_ARGS | AML_HAS_TARGET -#define AML_FLAGS_EXEC_1A_1T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL /* Monadic2_r */ -#define AML_FLAGS_EXEC_2A_0T_0R AML_HAS_ARGS /* Dyadic1 */ -#define AML_FLAGS_EXEC_2A_0T_1R AML_HAS_ARGS | AML_HAS_RETVAL /* Dyadic2 */ -#define AML_FLAGS_EXEC_2A_1T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL /* Dyadic2_r */ -#define AML_FLAGS_EXEC_2A_2T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL -#define AML_FLAGS_EXEC_3A_0T_0R AML_HAS_ARGS -#define AML_FLAGS_EXEC_3A_1T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL -#define AML_FLAGS_EXEC_6A_0T_1R AML_HAS_ARGS | AML_HAS_RETVAL - - -/* - * The opcode Type is used in a dispatch table, do not change - * without updating the table. - */ -#define AML_TYPE_EXEC_1A_0T_0R 0x00 /* Monadic1 */ -#define AML_TYPE_EXEC_1A_0T_1R 0x01 /* Monadic2 */ -#define AML_TYPE_EXEC_1A_1T_0R 0x02 -#define AML_TYPE_EXEC_1A_1T_1R 0x03 /* Monadic2_r */ -#define AML_TYPE_EXEC_2A_0T_0R 0x04 /* Dyadic1 */ -#define AML_TYPE_EXEC_2A_0T_1R 0x05 /* Dyadic2 */ -#define AML_TYPE_EXEC_2A_1T_1R 0x06 /* Dyadic2_r */ -#define AML_TYPE_EXEC_2A_2T_1R 0x07 -#define AML_TYPE_EXEC_3A_0T_0R 0x08 -#define AML_TYPE_EXEC_3A_1T_1R 0x09 -#define AML_TYPE_EXEC_6A_0T_1R 0x0A -/* End of types used in dispatch table */ - -#define AML_TYPE_LITERAL 0x0B -#define AML_TYPE_CONSTANT 0x0C -#define AML_TYPE_METHOD_ARGUMENT 0x0D -#define AML_TYPE_LOCAL_VARIABLE 0x0E -#define AML_TYPE_DATA_TERM 0x0F - -/* Generic for an op that returns a value */ - -#define AML_TYPE_METHOD_CALL 0x10 - -/* Misc */ - -#define AML_TYPE_CREATE_FIELD 0x11 -#define AML_TYPE_CONTROL 0x12 -#define AML_TYPE_NAMED_NO_OBJ 0x13 -#define AML_TYPE_NAMED_FIELD 0x14 -#define AML_TYPE_NAMED_SIMPLE 0x15 -#define AML_TYPE_NAMED_COMPLEX 0x16 -#define AML_TYPE_RETURN 0x17 - -#define AML_TYPE_UNDEFINED 0x18 -#define AML_TYPE_BOGUS 0x19 - - -/* - * Opcode classes - */ -#define AML_CLASS_EXECUTE 0x00 -#define AML_CLASS_CREATE 0x01 -#define AML_CLASS_ARGUMENT 0x02 -#define AML_CLASS_NAMED_OBJECT 0x03 -#define AML_CLASS_CONTROL 0x04 -#define AML_CLASS_ASCII 0x05 -#define AML_CLASS_PREFIX 0x06 -#define AML_CLASS_INTERNAL 0x07 -#define AML_CLASS_RETURN_VALUE 0x08 -#define AML_CLASS_METHOD_CALL 0x09 -#define AML_CLASS_UNKNOWN 0x0A - - -/* Predefined Operation Region Space_iDs */ - -typedef enum -{ - REGION_MEMORY = 0, - REGION_IO, - REGION_PCI_CONFIG, - REGION_EC, - REGION_SMBUS, - REGION_CMOS, - REGION_PCI_BAR, - REGION_FIXED_HW = 0x7F, - -} AML_REGION_TYPES; - - -/* Comparison operation codes for Match_op operator */ - -typedef enum -{ - MATCH_MTR = 0, - MATCH_MEQ = 1, - MATCH_MLE = 2, - MATCH_MLT = 3, - MATCH_MGE = 4, - MATCH_MGT = 5 - -} AML_MATCH_OPERATOR; - -#define MAX_MATCH_OPERATOR 5 - - -/* Field Access Types */ - -#define ACCESS_TYPE_MASK 0x0f -#define ACCESS_TYPE_SHIFT 0 - -typedef enum -{ - ACCESS_ANY_ACC = 0, - ACCESS_BYTE_ACC = 1, - ACCESS_WORD_ACC = 2, - ACCESS_DWORD_ACC = 3, - ACCESS_QWORD_ACC = 4, /* ACPI 2.0 */ - ACCESS_BLOCK_ACC = 4, - ACCESS_SMBSEND_RECV_ACC = 5, - ACCESS_SMBQUICK_ACC = 6 - -} AML_ACCESS_TYPE; - - -/* Field Lock Rules */ - -#define LOCK_RULE_MASK 0x10 -#define LOCK_RULE_SHIFT 4 - -typedef enum -{ - GLOCK_NEVER_LOCK = 0, - GLOCK_ALWAYS_LOCK = 1 - -} AML_LOCK_RULE; - - -/* Field Update Rules */ - -#define UPDATE_RULE_MASK 0x060 -#define UPDATE_RULE_SHIFT 5 - -typedef enum -{ - UPDATE_PRESERVE = 0, - UPDATE_WRITE_AS_ONES = 1, - UPDATE_WRITE_AS_ZEROS = 2 - -} AML_UPDATE_RULE; - - -/* bit fields in Method_flags byte */ - -#define METHOD_FLAGS_ARG_COUNT 0x07 -#define METHOD_FLAGS_SERIALIZED 0x08 -#define METHOD_FLAGS_SYNCH_LEVEL 0xF0 - - -/* Array sizes. Used for range checking also */ - -#define NUM_REGION_TYPES 7 -#define NUM_ACCESS_TYPES 7 -#define NUM_UPDATE_RULES 3 -#define NUM_MATCH_OPS 7 -#define NUM_OPCODES 256 -#define NUM_FIELD_NAMES 2 - - -#define USER_REGION_BEGIN 0x80 - - -#endif /* __AMLCODE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/platform/acenv.h linux.21rc5-ac2/drivers/acpi/include/platform/acenv.h --- linux.21rc5/drivers/acpi/include/platform/acenv.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/platform/acenv.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,294 +0,0 @@ -/****************************************************************************** - * - * Name: acenv.h - Generation environment specific items - * $Revision: 77 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACENV_H__ -#define __ACENV_H__ - - -/* - * Configuration for ACPI tools and utilities - */ - -#ifdef _ACPI_DUMP_APP -#define ACPI_DEBUG -#define ACPI_APPLICATION -#define ENABLE_DEBUGGER -#define ACPI_USE_SYSTEM_CLIBRARY -#define PARSER_ONLY -#endif - -#ifdef _ACPI_EXEC_APP -#undef DEBUGGER_THREADING -#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED -#define ACPI_DEBUG -#define ACPI_APPLICATION -#define ENABLE_DEBUGGER -#define ACPI_USE_SYSTEM_CLIBRARY -#endif - -#ifdef _ACPI_ASL_COMPILER -#define ACPI_DEBUG -#define ACPI_APPLICATION -#define ENABLE_DEBUGGER -#define ACPI_USE_SYSTEM_CLIBRARY -#endif - -/* - * Memory allocation tracking. Used only if - * 1) This is the debug version - * 2) This is NOT a 16-bit version of the code (not enough real-mode memory) - */ -#ifdef ACPI_DEBUG -#ifndef _IA16 -#define ACPI_DBG_TRACK_ALLOCATIONS -#endif -#endif - -/* - * Environment configuration. The purpose of this file is to interface to the - * local generation environment. - * - * 1) ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library. - * Otherwise, local versions of string/memory functions will be used. - * 2) ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and - * the standard header files may be used. - * - * The ACPI subsystem only uses low level C library functions that do not call - * operating system services and may therefore be inlined in the code. - * - * It may be necessary to tailor these include files to the target - * generation environment. - * - * - * Functions and constants used from each header: - * - * string.h: memcpy - * memset - * strcat - * strcmp - * strcpy - * strlen - * strncmp - * strncat - * strncpy - * - * stdlib.h: strtoul - * - * stdarg.h: va_list - * va_arg - * va_start - * va_end - * - */ - -/*! [Begin] no source code translation */ - -#ifdef _LINUX -#include "aclinux.h" - -#elif _AED_EFI -#include "acefi.h" - -#elif WIN32 -#include "acwin.h" - -#elif __FreeBSD__ -#include "acfreebsd.h" - -#else - -/* All other environments */ - -#define ACPI_USE_STANDARD_HEADERS - -/* Name of host operating system (returned by the _OS_ namespace object) */ - -#define ACPI_OS_NAME "Intel ACPI/CA Core Subsystem" - -/* This macro is used to tag functions as "printf-like" because - * some compilers can catch printf format string problems. MSVC - * doesn't, so this is proprocessed away. - */ -#define ACPI_PRINTF_LIKE_FUNC - -#endif - - -/*! [End] no source code translation !*/ - -/****************************************************************************** - * - * C library configuration - * - *****************************************************************************/ - -#ifdef ACPI_USE_SYSTEM_CLIBRARY -/* - * Use the standard C library headers. - * We want to keep these to a minimum. - * - */ - -#ifdef ACPI_USE_STANDARD_HEADERS -/* - * Use the standard headers from the standard locations - */ -#include -#include -#include -#include - -#endif /* ACPI_USE_STANDARD_HEADERS */ - -/* - * We will be linking to the standard Clib functions - */ - -#define STRSTR(s1,s2) strstr((s1), (s2)) -#define STRUPR(s) acpi_ut_strupr ((s)) -#define STRLEN(s) (u32) strlen((s)) -#define STRCPY(d,s) strcpy((d), (s)) -#define STRNCPY(d,s,n) strncpy((d), (s), (NATIVE_INT)(n)) -#define STRNCMP(d,s,n) strncmp((d), (s), (NATIVE_INT)(n)) -#define STRCMP(d,s) strcmp((d), (s)) -#define STRCAT(d,s) strcat((d), (s)) -#define STRNCAT(d,s,n) strncat((d), (s), (NATIVE_INT)(n)) -#define STRTOUL(d,s,n) strtoul((d), (s), (NATIVE_INT)(n)) -#define MEMCPY(d,s,n) memcpy((d), (s), (NATIVE_INT)(n)) -#define MEMSET(d,s,n) memset((d), (s), (NATIVE_INT)(n)) -#define TOUPPER toupper -#define TOLOWER tolower -#define IS_XDIGIT isxdigit - -/****************************************************************************** - * - * Not using native C library, use local implementations - * - *****************************************************************************/ -#else - -/* - * Use local definitions of C library macros and functions - * NOTE: The function implementations may not be as efficient - * as an inline or assembly code implementation provided by a - * native C library. - */ - -#ifndef va_arg - -#ifndef _VALIST -#define _VALIST -typedef char *va_list; -#endif /* _VALIST */ - -/* - * Storage alignment properties - */ - -#define _AUPBND (sizeof (NATIVE_INT) - 1) -#define _ADNBND (sizeof (NATIVE_INT) - 1) - -/* - * Variable argument list macro definitions - */ - -#define _bnd(X, bnd) (((sizeof (X)) + (bnd)) & (~(bnd))) -#define va_arg(ap, T) (*(T *)(((ap) += (_bnd (T, _AUPBND))) - (_bnd (T,_ADNBND)))) -#define va_end(ap) (void) 0 -#define va_start(ap, A) (void) ((ap) = (((char *) &(A)) + (_bnd (A,_AUPBND)))) - -#endif /* va_arg */ - - -#define STRSTR(s1,s2) acpi_ut_strstr ((s1), (s2)) -#define STRUPR(s) acpi_ut_strupr ((s)) -#define STRLEN(s) acpi_ut_strlen ((s)) -#define STRCPY(d,s) acpi_ut_strcpy ((d), (s)) -#define STRNCPY(d,s,n) acpi_ut_strncpy ((d), (s), (n)) -#define STRNCMP(d,s,n) acpi_ut_strncmp ((d), (s), (n)) -#define STRCMP(d,s) acpi_ut_strcmp ((d), (s)) -#define STRCAT(d,s) acpi_ut_strcat ((d), (s)) -#define STRNCAT(d,s,n) acpi_ut_strncat ((d), (s), (n)) -#define STRTOUL(d,s,n) acpi_ut_strtoul ((d), (s),(n)) -#define MEMCPY(d,s,n) acpi_ut_memcpy ((d), (s), (n)) -#define MEMSET(d,v,n) acpi_ut_memset ((d), (v), (n)) -#define TOUPPER acpi_ut_to_upper -#define TOLOWER acpi_ut_to_lower - -#endif /* ACPI_USE_SYSTEM_CLIBRARY */ - - -/****************************************************************************** - * - * Assembly code macros - * - *****************************************************************************/ - -/* - * Handle platform- and compiler-specific assembly language differences. - * These should already have been defined by the platform includes above. - * - * Notes: - * 1) Interrupt 3 is used to break into a debugger - * 2) Interrupts are turned off during ACPI register setup - */ - -/* Unrecognized compiler, use defaults */ -#ifndef ACPI_ASM_MACROS - -#define ACPI_ASM_MACROS -#define causeinterrupt(level) -#define BREAKPOINT3 -#define disable() -#define enable() -#define halt() -#define ACPI_ACQUIRE_GLOBAL_LOCK(Glptr, acq) -#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, acq) - -#endif /* ACPI_ASM_MACROS */ - - -#ifdef ACPI_APPLICATION - -/* Don't want software interrupts within a ring3 application */ - -#undef causeinterrupt -#undef BREAKPOINT3 -#define causeinterrupt(level) -#define BREAKPOINT3 -#endif - - -/****************************************************************************** - * - * Compiler-specific - * - *****************************************************************************/ - -/* this has been moved to compiler-specific headers, which are included from the - platform header. */ - - -#endif /* __ACENV_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/platform/acgcc.h linux.21rc5-ac2/drivers/acpi/include/platform/acgcc.h --- linux.21rc5/drivers/acpi/include/platform/acgcc.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/platform/acgcc.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,196 +0,0 @@ -/****************************************************************************** - * - * Name: acgcc.h - GCC specific defines, etc. - * $Revision: 14 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACGCC_H__ -#define __ACGCC_H__ - - -#ifdef __ia64__ -#define _IA64 - -#define COMPILER_DEPENDENT_UINT64 unsigned long -/* Single threaded */ -#define ACPI_APPLICATION - -#define ACPI_ASM_MACROS -#define causeinterrupt(level) -#define BREAKPOINT3 -#define disable() __cli() -#define enable() __sti() - -/*! [Begin] no source code translation */ - -#include - -#define halt() ia64_pal_halt_light() /* PAL_HALT[_LIGHT] */ -#define safe_halt() ia64_pal_halt(1) /* PAL_HALT */ - - -#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ - do { \ - __asm__ volatile ("1: ld4 r29=%1\n" \ - ";;\n" \ - "mov ar.ccv=r29\n" \ - "mov r2=r29\n" \ - "shr.u r30=r29,1\n" \ - "and r29=-4,r29\n" \ - ";;\n" \ - "add r29=2,r29\n" \ - "and r30=1,r30\n" \ - ";;\n" \ - "add r29=r29,r30\n" \ - ";;\n" \ - "cmpxchg4.acq r30=%1,r29,ar.ccv\n" \ - ";;\n" \ - "cmp.eq p6,p7=r2,r30\n" \ - "(p7) br.dpnt.few 1b\n" \ - "cmp.gt p8,p9=3,r29\n" \ - ";;\n" \ - "(p8) mov %0=-1\n" \ - "(p9) mov %0=r0\n" \ - :"=r"(Acq):"m"(GLptr):"r2","r29","r30","memory"); \ - } while (0) - -#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ - do { \ - __asm__ volatile ("1: ld4 r29=%1\n" \ - ";;\n" \ - "mov ar.ccv=r29\n" \ - "mov r2=r29\n" \ - "and r29=-4,r29\n" \ - ";;\n" \ - "cmpxchg4.acq r30=%1,r29,ar.ccv\n" \ - ";;\n" \ - "cmp.eq p6,p7=r2,r30\n" \ - "(p7) br.dpnt.few 1b\n" \ - "and %0=1,r2\n" \ - ";;\n" \ - :"=r"(Acq):"m"(GLptr):"r2","r29","r30","memory"); \ - } while (0) -/*! [End] no source code translation !*/ - - -#elif __i386__ /* DO IA32 */ -#define COMPILER_DEPENDENT_UINT64 unsigned long long -#define ACPI_ASM_MACROS -#define causeinterrupt(level) -#define BREAKPOINT3 -#define disable() __cli() -#define enable() __sti() -#define halt() __asm__ __volatile__ ("sti; hlt":::"memory") - -/*! [Begin] no source code translation - * - * A brief explanation as GNU inline assembly is a bit hairy - * %0 is the output parameter in EAX ("=a") - * %1 and %2 are the input parameters in ECX ("c") - * and an immediate value ("i") respectively - * All actual register references are preceded with "%%" as in "%%edx" - * Immediate values in the assembly are preceded by "$" as in "$0x1" - * The final asm parameter are the operation altered non-output registers. - */ -#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ - do { \ - int dummy; \ - asm("1: movl (%1),%%eax;" \ - "movl %%eax,%%edx;" \ - "andl %2,%%edx;" \ - "btsl $0x1,%%edx;" \ - "adcl $0x0,%%edx;" \ - "lock; cmpxchgl %%edx,(%1);" \ - "jnz 1b;" \ - "cmpb $0x3,%%dl;" \ - "sbbl %%eax,%%eax" \ - :"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~1L):"dx"); \ - } while(0) - -#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ - do { \ - int dummy; \ - asm("1: movl (%1),%%eax;" \ - "movl %%eax,%%edx;" \ - "andl %2,%%edx;" \ - "lock; cmpxchgl %%edx,(%1);" \ - "jnz 1b;" \ - "andl $0x1,%%eax" \ - :"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~3L):"dx"); \ - } while(0) -#elif __x86_64__ -#define COMPILER_DEPENDENT_UINT64 unsigned long long -#define ACPI_ASM_MACROS -#define causeinterrupt(level) -#define BREAKPOINT3 -#define disable() __cli() -#define enable() __sti() -#define halt() __asm__ __volatile__ ("sti; hlt":::"memory") - -/*! [Begin] no source code translation - * - * A brief explanation as GNU inline assembly is a bit hairy - * %0 is the output parameter in RAX ("=a") - * %1 and %2 are the input parameters in RCX ("c") - * and an immediate value ("i") respectively - * All actual register references are preceded with "%%" as in "%%edx" - * Immediate values in the assembly are preceded by "$" as in "$0x1" - * The final asm parameter are the operation altered non-output registers. - */ -#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ - do { \ - unsigned long dummy; \ - asm("1: movl (%2),%%eax;" \ - "movl %%eax,%%edx;" \ - "andq %2,%%rdx;" \ - "btsl $0x1,%%edx;" \ - "adcl $0x0,%%edx;" \ - "lock; cmpxchgl %%edx,(%1);" \ - "jnz 1b;" \ - "cmpb $0x3,%%dl;" \ - "sbbl %%eax,%%eax" \ - :"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~1L):"dx"); \ - } while(0) - -#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ - do { \ - unsigned long dummy; \ - asm("1: movl (%2),%%eax;" \ - "movl %%eax,%%edx;" \ - "andq %2,%%rdx;" \ - "lock; cmpxchgl %%edx,(%1);" \ - "jnz 1b;" \ - "andl $0x1,%%eax" \ - :"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~3L):"dx"); \ - } while(0) - -/*! [End] no source code translation !*/ - -#endif - -/* This macro is used to tag functions as "printf-like" because - * some compilers (like GCC) can catch printf format string problems. - */ -#define ACPI_PRINTF_LIKE_FUNC __attribute__ ((__format__ (__printf__, 4, 5))) - - -#endif /* __ACGCC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/include/platform/aclinux.h linux.21rc5-ac2/drivers/acpi/include/platform/aclinux.h --- linux.21rc5/drivers/acpi/include/platform/aclinux.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/include/platform/aclinux.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,71 +0,0 @@ -/****************************************************************************** - * - * Name: aclinux.h - OS specific defines, etc. - * $Revision: 14 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 R. Byron Moore - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ACLINUX_H__ -#define __ACLINUX_H__ - -#define ACPI_OS_NAME "Linux" - -#define ACPI_USE_SYSTEM_CLIBRARY - -#ifdef __KERNEL__ - -#include -#include -#include -#include -#include -#include -#include - -#define strtoul simple_strtoul - -#else - -#include -#include -#include -#include - -#endif - -/* Linux uses GCC */ - -#include "acgcc.h" - -#undef DEBUGGER_THREADING -#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED - -#if BITS_PER_LONG < 64 -/* Linux ia32 can't do int64 well */ -#define ACPI_NO_INTEGER64_SUPPORT -/* And the ia32 kernel doesn't include 64-bit divide support */ -#define ACPI_DIV64(dividend, divisor) do_div(dividend, divisor) -#else -#define ACPI_DIV64(dividend, divisor) ACPI_DIVIDE(dividend, divisor) -#endif - - -#endif /* __ACLINUX_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/kdb/kdbm_acpi.c linux.21rc5-ac2/drivers/acpi/kdb/kdbm_acpi.c --- linux.21rc5/drivers/acpi/kdb/kdbm_acpi.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/kdb/kdbm_acpi.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,54 +0,0 @@ -/* - * kdbm_acpi.c - kdb debugger module interface for ACPI debugger - * - * Copyright (C) 2000 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include - -#include "acpi.h" -#include "acdebug.h" - -extern int acpi_in_debugger; - -static int -kdbm_acpi(int argc, const char **argv, const char **envp, struct pt_regs *regs) -{ - acpi_in_debugger = 1; - - acpi_db_user_commands(DB_COMMAND_PROMPT, NULL); - - acpi_in_debugger = 0; - - return 0; -} - -int -init_module(void) -{ - kdb_register("acpi", kdbm_acpi, "", "Enter ACPI debugger", 0); - - return 0; -} - -void -cleanup_module(void) -{ - kdb_unregister("acpi"); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/kdb/README.txt linux.21rc5-ac2/drivers/acpi/kdb/README.txt --- linux.21rc5/drivers/acpi/kdb/README.txt 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/kdb/README.txt 1970-01-01 01:00:00.000000000 +0100 @@ -1,36 +0,0 @@ -Using the ACPI debugger with kdb --------------------------------- - -ACPI CA includes a full-featured debugger, which allows the examination of -a running system's ACPI tables, as well as running and stepping through -control methods. - -Configuration -------------- -1) Edit the main acpi Makefile. On the ACPI_CFLAGS line, remove the '#', thus - enabling the debugger. - -2) Download the latest kdb patch from: - - ftp://oss.sgi.com/www/projects/kdb/download/ix86/ - - Follow the instructions at http://oss.sgi.com/projects/kdb/ on how to - install the patch and configure KDB. - -3) This would probably be a good time to recompile the kernel, and make sure - kdb works (Hitting the Pause key should drop you into it. Type "go" to exit - it. - -4) The kdb <--> ACPI debugger interface is a module. Type "make modules", and - it will be built and placed in drivers/acpi/kdb. - -5) Change to that directory and type "insmod kdbm_acpi.o". This loads the - module we just built. - -6) Break back into kdb. If you type help, you should now see "acpi" listed as - a command, at the bottom. - -7) Type "acpi". You are now in the ACPI debugger. While hosted by kdb, it is - wholly separate, and has many ACPI-specific commands. Type "?" or "help" - to get a listing of the command categories, and then "help " for - a list of commands and their descriptions diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/Makefile linux.21rc5-ac2/drivers/acpi/Makefile --- linux.21rc5/drivers/acpi/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/Makefile 2003-04-28 17:58:27.000000000 +0100 @@ -4,51 +4,54 @@ O_TARGET := acpi.o -export-objs := acpi_ksyms.o - export ACPI_CFLAGS -ACPI_CFLAGS := -D_LINUX -I$(CURDIR)/include - -# -# CONFIG_ACPI_KERNEL_CONFIG is currently only IA64 -# -ifdef CONFIG_ACPI_KERNEL_CONFIG - ACPI_CFLAGS += -DCONFIG_ACPI_KERNEL_CONFIG_ONLY -endif - -acpi-subdirs := utilities dispatcher events hardware \ - executer namespace parser resources tables +ACPI_CFLAGS := -Os ifdef CONFIG_ACPI_DEBUG - ACPI_CFLAGS += -DACPI_DEBUG -Wno-unused -endif - -ifdef CONFIG_ACPI_DEBUGGER - ACPI_CFLAGS += -DENABLE_DEBUGGER - acpi-subdirs += debugger + ACPI_CFLAGS += -DACPI_DEBUG_OUTPUT endif -EXTRA_CFLAGS += $(ACPI_CFLAGS) +EXTRA_CFLAGS += $(ACPI_CFLAGS) -mod-subdirs := ospm +export-objs := acpi_ksyms.o processor.o -subdir-$(CONFIG_ACPI) += $(acpi-subdirs) -subdir-$(CONFIG_ACPI_BUSMGR) += ospm +obj-y := acpi_ksyms.o -obj-$(CONFIG_ACPI) += driver.o os.o acpi_ksyms.o -obj-$(CONFIG_ACPI) += $(foreach dir,$(acpi-subdirs),$(dir)/$(dir).o) -ifdef CONFIG_ACPI_KERNEL_CONFIG - obj-$(CONFIG_ACPI) += acpiconf.o osconf.o +# +# ACPI Boot-Time Table Parsing +# +ifeq ($(CONFIG_ACPI_BOOT),y) + obj-y += tables.o blacklist.o endif -ifeq ($(CONFIG_ACPI_BUSMGR),y) - obj-y += ospm/ospm.o +# +# ACPI Core Subsystem (Interpreter) +# +ifeq ($(CONFIG_ACPI_INTERPRETER),y) + obj-y += osl.o utils.o + subdir-y += dispatcher events executer hardware namespace parser \ + resources tables utilities + obj-y += $(foreach dir,$(subdir-y),$(dir)/$(dir).o) endif -# commented out until we distribute it -ASG -#ifeq ($(CONFIG_KDB),y) -# obj-m += kdb/kdbm_acpi.o -#endif +# +# ACPI Bus and Device Drivers +# +ifeq ($(CONFIG_ACPI_BUS),y) + obj-y += bus.o + obj-$(CONFIG_ACPI_AC) += ac.o + obj-$(CONFIG_ACPI_BATTERY) += battery.o + obj-$(CONFIG_ACPI_BUTTON) += button.o + obj-$(CONFIG_ACPI_EC) += ec.o + obj-$(CONFIG_ACPI_FAN) += fan.o + obj-$(CONFIG_ACPI_PCI) += pci_root.o pci_link.o pci_irq.o pci_bind.o + obj-$(CONFIG_ACPI_POWER) += power.o + obj-$(CONFIG_ACPI_PROCESSOR) += processor.o + obj-$(CONFIG_ACPI_THERMAL) += thermal.o + obj-$(CONFIG_ACPI_SYSTEM) += system.o + obj-$(CONFIG_ACPI_NUMA) += numa.o + obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o +endif include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/Makefile linux.21rc5-ac2/drivers/acpi/namespace/Makefile --- linux.21rc5/drivers/acpi/namespace/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/Makefile 2003-04-28 17:58:27.000000000 +0100 @@ -1,11 +1,10 @@ # # Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory # O_TARGET := $(notdir $(CURDIR)).o -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) +obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c)) EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsaccess.c linux.21rc5-ac2/drivers/acpi/namespace/nsaccess.c --- linux.21rc5/drivers/acpi/namespace/nsaccess.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsaccess.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,43 +1,60 @@ /******************************************************************************* * * Module Name: nsaccess - Top-level functions for accessing ACPI namespace - * $Revision: 135 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "acdispat.h" +#include +#include +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsaccess") + ACPI_MODULE_NAME ("nsaccess") /******************************************************************************* * - * FUNCTION: Acpi_ns_root_initialize + * FUNCTION: acpi_ns_root_initialize * * PARAMETERS: None * @@ -52,42 +69,44 @@ acpi_status acpi_ns_root_initialize (void) { - acpi_status status = AE_OK; - const predefined_names *init_val = NULL; - acpi_namespace_node *new_node; - acpi_operand_object *obj_desc; + acpi_status status; + const struct acpi_predefined_names *init_val = NULL; + struct acpi_namespace_node *new_node; + union acpi_operand_object *obj_desc; + acpi_string val = NULL; - FUNCTION_TRACE ("Ns_root_initialize"); + ACPI_FUNCTION_TRACE ("ns_root_initialize"); - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* * The global root ptr is initially NULL, so a non-NULL value indicates - * that Acpi_ns_root_initialize() has already been called; just return. + * that acpi_ns_root_initialize() has already been called; just return. */ if (acpi_gbl_root_node) { status = AE_OK; goto unlock_and_exit; } - /* * Tell the rest of the subsystem that the root is initialized * (This is OK because the namespace is locked) */ acpi_gbl_root_node = &acpi_gbl_root_node_struct; - /* Enter the pre-defined names in the name table */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Entering predefined entries into namespace\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "Entering predefined entries into namespace\n")); for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) { status = acpi_ns_lookup (NULL, init_val->name, init_val->type, - IMODE_LOAD_PASS2, NS_NO_UPSEARCH, - NULL, &new_node); + ACPI_IMODE_LOAD_PASS2, ACPI_NS_NO_UPSEARCH, NULL, &new_node); if (ACPI_FAILURE (status) || (!new_node)) /* Must be on same line for code converter */ { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, @@ -97,10 +116,20 @@ /* * Name entered successfully. - * If entry in Pre_defined_names[] specifies an + * If entry in pre_defined_names[] specifies an * initial value, create the initial value. */ if (init_val->val) { + status = acpi_os_predefined_override (init_val, &val); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not override predefined %s\n", + init_val->name)); + } + + if (!val) { + val = init_val->val; + } + /* * Entry requests an initial value, allocate a * descriptor for it. @@ -116,13 +145,24 @@ * internal representation. Only types actually * used for initial values are implemented here. */ - switch (init_val->type) { + case ACPI_TYPE_METHOD: + obj_desc->method.param_count = + (u8) ACPI_STRTOUL (val, NULL, 10); + obj_desc->common.flags |= AOPOBJ_DATA_VALID; + +#if defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY) + + /* Compiler cheats by putting parameter count in the owner_iD */ + + new_node->owner_id = obj_desc->method.param_count; +#endif + break; case ACPI_TYPE_INTEGER: obj_desc->integer.value = - (acpi_integer) STRTOUL (init_val->val, NULL, 10); + (acpi_integer) ACPI_STRTOUL (val, NULL, 10); break; @@ -131,25 +171,25 @@ /* * Build an object around the static string */ - obj_desc->string.length = STRLEN (init_val->val); - obj_desc->string.pointer = init_val->val; + obj_desc->string.length = (u32) ACPI_STRLEN (val); + obj_desc->string.pointer = val; obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; break; case ACPI_TYPE_MUTEX: + obj_desc->mutex.node = new_node; obj_desc->mutex.sync_level = - (u16) STRTOUL (init_val->val, NULL, 10); + (u16) ACPI_STRTOUL (val, NULL, 10); - if (STRCMP (init_val->name, "_GL_") == 0) { + if (ACPI_STRCMP (init_val->name, "_GL_") == 0) { /* * Create a counting semaphore for the * global lock */ status = acpi_os_create_semaphore (ACPI_NO_UNIT_LIMIT, 1, &obj_desc->mutex.semaphore); - if (ACPI_FAILURE (status)) { goto unlock_and_exit; } @@ -160,13 +200,11 @@ */ acpi_gbl_global_lock_semaphore = obj_desc->mutex.semaphore; } - else { /* Create a mutex */ status = acpi_os_create_semaphore (1, 1, &obj_desc->mutex.semaphore); - if (ACPI_FAILURE (status)) { goto unlock_and_exit; } @@ -175,7 +213,7 @@ default: - REPORT_ERROR (("Unsupported initial type value %X\n", + ACPI_REPORT_ERROR (("Unsupported initial type value %X\n", init_val->type)); acpi_ut_remove_reference (obj_desc); obj_desc = NULL; @@ -184,7 +222,7 @@ /* Store pointer to value descriptor in the Node */ - acpi_ns_attach_object (new_node, obj_desc, obj_desc->common.type); + status = acpi_ns_attach_object (new_node, obj_desc, ACPI_GET_OBJECT_TYPE (obj_desc)); /* Remove local reference to the object */ @@ -194,23 +232,23 @@ unlock_and_exit: - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ns_lookup + * FUNCTION: acpi_ns_lookup * - * PARAMETERS: Prefix_node - Search scope if name is not fully qualified + * PARAMETERS: prefix_node - Search scope if name is not fully qualified * Pathname - Search pathname, in internal format * (as represented in the AML stream) * Type - Type associated with name - * Interpreter_mode - IMODE_LOAD_PASS2 => add name if not found + * interpreter_mode - IMODE_LOAD_PASS2 => add name if not found * Flags - Flags describing the search restrictions - * Walk_state - Current state of the walk - * Return_node - Where the Node is placed (if found + * walk_state - Current state of the walk + * return_node - Where the Node is placed (if found * or created successfully) * * RETURN: Status @@ -224,44 +262,41 @@ acpi_status acpi_ns_lookup ( - acpi_generic_state *scope_info, - NATIVE_CHAR *pathname, - acpi_object_type8 type, - operating_mode interpreter_mode, - u32 flags, - acpi_walk_state *walk_state, - acpi_namespace_node **return_node) + union acpi_generic_state *scope_info, + char *pathname, + acpi_object_type type, + acpi_interpreter_mode interpreter_mode, + u32 flags, + struct acpi_walk_state *walk_state, + struct acpi_namespace_node **return_node) { - acpi_status status; - acpi_namespace_node *prefix_node; - acpi_namespace_node *current_node = NULL; - acpi_namespace_node *scope_to_push = NULL; - acpi_namespace_node *this_node = NULL; - u32 num_segments; - acpi_name simple_name; - u8 null_name_path = FALSE; - acpi_object_type8 type_to_check_for; - acpi_object_type8 this_search_type; - u32 local_flags = flags & ~NS_ERROR_IF_FOUND; - - DEBUG_EXEC (u32 i;) + acpi_status status; + char *path = pathname; + struct acpi_namespace_node *prefix_node; + struct acpi_namespace_node *current_node = NULL; + struct acpi_namespace_node *this_node = NULL; + u32 num_segments; + u32 num_carats; + acpi_name simple_name; + acpi_object_type type_to_check_for; + acpi_object_type this_search_type; + u32 search_parent_flag = ACPI_NS_SEARCH_PARENT; + u32 local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | + ACPI_NS_SEARCH_PARENT); - FUNCTION_TRACE ("Ns_lookup"); + ACPI_FUNCTION_TRACE ("ns_lookup"); if (!return_node) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - acpi_gbl_ns_lookup_count++; - - *return_node = ENTRY_NOT_FOUND; - + *return_node = ACPI_ENTRY_NOT_FOUND; if (!acpi_gbl_root_node) { - return (AE_NO_NAMESPACE); + return_ACPI_STATUS (AE_NO_NAMESPACE); } /* @@ -270,248 +305,273 @@ */ if ((!scope_info) || (!scope_info->scope.node)) { - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Null scope prefix, using root node (%p)\n", + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, + "Null scope prefix, using root node (%p)\n", acpi_gbl_root_node)); prefix_node = acpi_gbl_root_node; } else { prefix_node = scope_info->scope.node; - } - - - /* - * This check is explicitly split to relax the Type_to_check_for - * conditions for Bank_field_defn. Originally, both Bank_field_defn and - * Def_field_defn caused Type_to_check_for to be set to ACPI_TYPE_REGION, - * but the Bank_field_defn may also check for a Field definition as well - * as an Operation_region. - */ - if (INTERNAL_TYPE_FIELD_DEFN == type) { - /* Def_field_defn defines fields in a Region */ - - type_to_check_for = ACPI_TYPE_REGION; - } - - else if (INTERNAL_TYPE_BANK_FIELD_DEFN == type) { - /* Bank_field_defn defines data fields in a Field Object */ - - type_to_check_for = ACPI_TYPE_ANY; - } + if (ACPI_GET_DESCRIPTOR_TYPE (prefix_node) != ACPI_DESC_TYPE_NAMED) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "[%p] Not a namespace node\n", + prefix_node)); + return_ACPI_STATUS (AE_AML_INTERNAL); + } - else { - type_to_check_for = type; + /* + * This node might not be a actual "scope" node (such as a + * Device/Method, etc.) It could be a Package or other object node. + * Backup up the tree to find the containing scope node. + */ + while (!acpi_ns_opens_scope (prefix_node->type) && + prefix_node->type != ACPI_TYPE_ANY) { + prefix_node = acpi_ns_get_parent_node (prefix_node); + } } + /* Save type TBD: may be no longer necessary */ - /* TBD: [Restructure] - Move the pathname stuff into a new procedure */ - - /* Examine the name pointer */ + type_to_check_for = type; + /* + * Begin examination of the actual pathname + */ if (!pathname) { - /* 8-12-98 ASL Grammar Update supports null Name_path */ + /* A Null name_path is allowed and refers to the root */ - null_name_path = TRUE; num_segments = 0; - this_node = acpi_gbl_root_node; + this_node = acpi_gbl_root_node; + path = ""; ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, - "Null Pathname (Zero segments), Flags=%x\n", flags)); + "Null Pathname (Zero segments), Flags=%X\n", flags)); } - else { /* - * Valid name pointer (Internal name format) + * Name pointer is valid (and must be in internal name format) * - * Check for prefixes. As represented in the AML stream, a - * Pathname consists of an optional scope prefix followed by - * a segment part. + * Check for scope prefixes: * - * If present, the scope prefix is either a Root_prefix (in - * which case the name is fully qualified), or zero or more - * Parent_prefixes (in which case the name's scope is relative - * to the current scope). + * As represented in the AML stream, a namepath consists of an + * optional scope prefix followed by a name segment part. * - * The segment part consists of either: - * - A single 4-byte name segment, or - * - A Dual_name_prefix followed by two 4-byte name segments, or - * - A Multi_name_prefix_op, followed by a byte indicating the - * number of segments and the segments themselves. + * If present, the scope prefix is either a Root Prefix (in + * which case the name is fully qualified), or one or more + * Parent Prefixes (in which case the name's scope is relative + * to the current scope). */ - if (*pathname == AML_ROOT_PREFIX) { - /* Pathname is fully qualified, look in root name table */ - - current_node = acpi_gbl_root_node; + if (*path == (u8) AML_ROOT_PREFIX) { + /* Pathname is fully qualified, start from the root */ - /* point to segment part */ + this_node = acpi_gbl_root_node; + search_parent_flag = ACPI_NS_NO_UPSEARCH; - pathname++; + /* Point to name segment part */ - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Searching from root [%p]\n", - current_node)); + path++; - /* Direct reference to root, "\" */ - - if (!(*pathname)) { - this_node = acpi_gbl_root_node; - goto check_for_new_scope_and_exit; - } + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, + "Path is absolute from root [%p]\n", this_node)); } - else { /* Pathname is relative to current scope, start there */ - current_node = prefix_node; - - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Searching relative to pfx scope [%p]\n", - prefix_node)); + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, + "Searching relative to prefix scope [%4.4s] (%p)\n", + prefix_node->name.ascii, prefix_node)); /* - * Handle up-prefix (carat). More than one prefix - * is supported + * Handle multiple Parent Prefixes (carat) by just getting + * the parent node for each prefix instance. */ - while (*pathname == AML_PARENT_PREFIX) { - /* Point to segment part or next Parent_prefix */ + this_node = prefix_node; + num_carats = 0; + while (*path == (u8) AML_PARENT_PREFIX) { + /* Name is fully qualified, no search rules apply */ - pathname++; + search_parent_flag = ACPI_NS_NO_UPSEARCH; + /* + * Point past this prefix to the name segment + * part or the next Parent Prefix + */ + path++; - /* Backup to the parent's scope */ + /* Backup to the parent node */ - this_node = acpi_ns_get_parent_object (current_node); + num_carats++; + this_node = acpi_ns_get_parent_node (this_node); if (!this_node) { /* Current scope has no parent scope */ - REPORT_ERROR ( - ("Too many parent prefixes (^) - reached root\n")); + ACPI_REPORT_ERROR ( + ("ACPI path has too many parent prefixes (^) - reached beyond root node\n")); return_ACPI_STATUS (AE_NOT_FOUND); } + } - current_node = this_node; + if (search_parent_flag == ACPI_NS_NO_UPSEARCH) { + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, + "Search scope is [%4.4s], path has %d carat(s)\n", + this_node->name.ascii, num_carats)); } } - /* - * Examine the name prefix opcode, if any, - * to determine the number of segments + * Determine the number of ACPI name segments in this pathname. + * + * The segment part consists of either: + * - A Null name segment (0) + * - A dual_name_prefix followed by two 4-byte name segments + * - A multi_name_prefix followed by a byte indicating the + * number of segments and the segments themselves. + * - A single 4-byte name segment + * + * Examine the name prefix opcode, if any, to determine the number of + * segments. */ - if (*pathname == AML_DUAL_NAME_PREFIX) { - num_segments = 2; + switch (*path) { + case 0: + /* + * Null name after a root or parent prefixes. We already + * have the correct target node and there are no name segments. + */ + num_segments = 0; + type = this_node->type; + + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, + "Prefix-only Pathname (Zero name segments), Flags=%X\n", flags)); + break; + + case AML_DUAL_NAME_PREFIX: + + /* More than one name_seg, search rules do not apply */ - /* point to first segment */ + search_parent_flag = ACPI_NS_NO_UPSEARCH; - pathname++; + /* Two segments, point to first name segment */ + + num_segments = 2; + path++; ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Dual Pathname (2 segments, Flags=%X)\n", flags)); - } + break; + + case AML_MULTI_NAME_PREFIX_OP: - else if (*pathname == AML_MULTI_NAME_PREFIX_OP) { - num_segments = (u32)* (u8 *) ++pathname; + /* More than one name_seg, search rules do not apply */ - /* point to first segment */ + search_parent_flag = ACPI_NS_NO_UPSEARCH; - pathname++; + /* Extract segment count, point to first name segment */ + + path++; + num_segments = (u32) (u8) *path; + path++; ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Multi Pathname (%d Segments, Flags=%X) \n", num_segments, flags)); - } + break; - else { + default: /* - * No Dual or Multi prefix, hence there is only one - * segment and Pathname is already pointing to it. + * Not a Null name, no Dual or Multi prefix, hence there is + * only one name segment and Pathname is already pointing to it. */ num_segments = 1; ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Simple Pathname (1 segment, Flags=%X)\n", flags)); + break; } -#ifdef ACPI_DEBUG - - /* TBD: [Restructure] Make this a procedure */ - - /* Debug only: print the entire name that we are about to lookup */ - - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "[")); - - for (i = 0; i < num_segments; i++) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_NAMES, "%4.4s/", (char*)&pathname[i * 4])); - } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_NAMES, "]\n")); -#endif + ACPI_DEBUG_EXEC (acpi_ns_print_pathname (num_segments, path)); } /* - * Search namespace for each segment of the name. - * Loop through and verify/add each name segment. + * Search namespace for each segment of the name. Loop through and + * verify (or add to the namespace) each name segment. + * + * The object type is significant only at the last name + * segment. (We don't care about the types along the path, only + * the type of the final target object.) */ - while (num_segments-- && current_node) { - /* - * Search for the current name segment under the current - * named object. The Type is significant only at the last (topmost) - * level. (We don't care about the types along the path, only - * the type of the final target object.) - */ - this_search_type = ACPI_TYPE_ANY; + this_search_type = ACPI_TYPE_ANY; + current_node = this_node; + while (num_segments && current_node) { + num_segments--; if (!num_segments) { + /* + * This is the last segment, enable typechecking + */ this_search_type = type; - local_flags = flags; - } - /* Pluck one ACPI name from the front of the pathname */ + /* + * Only allow automatic parent search (search rules) if the caller + * requested it AND we have a single, non-fully-qualified name_seg + */ + if ((search_parent_flag != ACPI_NS_NO_UPSEARCH) && + (flags & ACPI_NS_SEARCH_PARENT)) { + local_flags |= ACPI_NS_SEARCH_PARENT; + } + + /* Set error flag according to caller */ - MOVE_UNALIGNED32_TO_32 (&simple_name, pathname); + if (flags & ACPI_NS_ERROR_IF_FOUND) { + local_flags |= ACPI_NS_ERROR_IF_FOUND; + } + } - /* Try to find the ACPI name */ + /* Extract one ACPI name from the front of the pathname */ - status = acpi_ns_search_and_enter (simple_name, walk_state, - current_node, interpreter_mode, - this_search_type, local_flags, - &this_node); + ACPI_MOVE_32_TO_32 (&simple_name, path); + /* Try to find the single (4 character) ACPI name */ + + status = acpi_ns_search_and_enter (simple_name, walk_state, current_node, + interpreter_mode, this_search_type, local_flags, &this_node); if (ACPI_FAILURE (status)) { if (status == AE_NOT_FOUND) { - /* Name not found in ACPI namespace */ + /* Name not found in ACPI namespace */ ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, - "Name [%4.4s] not found in scope %p\n", - (char*)&simple_name, current_node)); + "Name [%4.4s] not found in scope [%4.4s] %p\n", + (char *) &simple_name, (char *) ¤t_node->name, + current_node)); } + *return_node = this_node; return_ACPI_STATUS (status); } - /* - * If 1) This is the last segment (Num_segments == 0) - * 2) and looking for a specific type + * Sanity typecheck of the target object: + * + * If 1) This is the last segment (num_segments == 0) + * 2) And we are looking for a specific type * (Not checking for TYPE_ANY) * 3) Which is not an alias - * 4) which is not a local type (TYPE_DEF_ANY) - * 5) which is not a local type (TYPE_SCOPE) - * 6) which is not a local type (TYPE_INDEX_FIELD_DEFN) - * 7) and type of object is known (not TYPE_ANY) - * 8) and object does not match request + * 4) Which is not a local type (TYPE_SCOPE) + * 5) And the type of target object is known (not TYPE_ANY) + * 6) And target object does not match what we are looking for * * Then we have a type mismatch. Just warn and ignore it. */ if ((num_segments == 0) && (type_to_check_for != ACPI_TYPE_ANY) && - (type_to_check_for != INTERNAL_TYPE_ALIAS) && - (type_to_check_for != INTERNAL_TYPE_DEF_ANY) && - (type_to_check_for != INTERNAL_TYPE_SCOPE) && - (type_to_check_for != INTERNAL_TYPE_INDEX_FIELD_DEFN) && + (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) && + (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE) && (this_node->type != ACPI_TYPE_ANY) && (this_node->type != type_to_check_for)) { /* Complain about a type mismatch */ - REPORT_WARNING ( - ("Ns_lookup: %4.4s, type %X, checking for type %X\n", - (char*)&simple_name, this_node->type, type_to_check_for)); + ACPI_REPORT_WARNING ( + ("ns_lookup: Type mismatch on %4.4s (%s), searching for (%s)\n", + (char *) &simple_name, acpi_ut_get_type_name (this_node->type), + acpi_ut_get_type_name (type_to_check_for))); } /* @@ -519,57 +579,29 @@ * specific type, but the type of found object is known, use that type * to see if it opens a scope. */ - if ((0 == num_segments) && (ACPI_TYPE_ANY == type)) { + if ((num_segments == 0) && (type == ACPI_TYPE_ANY)) { type = this_node->type; } - if ((num_segments || acpi_ns_opens_scope (type)) && - (this_node->child == NULL)) { - /* - * More segments or the type implies enclosed scope, - * and the next scope has not been allocated. - */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Load mode=%X This_node=%p\n", - interpreter_mode, this_node)); - } + /* Point to next name segment and make this node current */ + path += ACPI_NAME_SIZE; current_node = this_node; - - /* point to next name segment */ - - pathname += ACPI_NAME_SIZE; } - /* * Always check if we need to open a new scope */ -check_for_new_scope_and_exit: - - if (!(flags & NS_DONT_OPEN_SCOPE) && (walk_state)) { + if (!(flags & ACPI_NS_DONT_OPEN_SCOPE) && (walk_state)) { /* - * If entry is a type which opens a scope, - * push the new scope on the scope stack. + * If entry is a type which opens a scope, push the new scope on the + * scope stack. */ - if (acpi_ns_opens_scope (type_to_check_for)) { - /* 8-12-98 ASL Grammar Update supports null Name_path */ - - if (null_name_path) { - /* TBD: [Investigate] - is this the correct thing to do? */ - - scope_to_push = NULL; - } - else { - scope_to_push = this_node; - } - - status = acpi_ds_scope_stack_push (scope_to_push, type, - walk_state); + if (acpi_ns_opens_scope (type)) { + status = acpi_ds_scope_stack_push (this_node, type, walk_state); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Set global scope to %p\n", scope_to_push)); } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsalloc.c linux.21rc5-ac2/drivers/acpi/namespace/nsalloc.c --- linux.21rc5/drivers/acpi/namespace/nsalloc.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsalloc.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,43 +1,60 @@ /******************************************************************************* * * Module Name: nsalloc - Namespace allocation and deletion utilities - * $Revision: 60 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acnamesp.h" -#include "acinterp.h" +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsalloc") + ACPI_MODULE_NAME ("nsalloc") /******************************************************************************* * - * FUNCTION: Acpi_ns_create_node + * FUNCTION: acpi_ns_create_node * - * PARAMETERS: Acpi_name - Name of the new node + * PARAMETERS: acpi_name - Name of the new node * * RETURN: None * @@ -45,26 +62,26 @@ * ******************************************************************************/ -acpi_namespace_node * +struct acpi_namespace_node * acpi_ns_create_node ( - u32 name) + u32 name) { - acpi_namespace_node *node; + struct acpi_namespace_node *node; - FUNCTION_TRACE ("Ns_create_node"); + ACPI_FUNCTION_TRACE ("ns_create_node"); - node = ACPI_MEM_CALLOCATE (sizeof (acpi_namespace_node)); + node = ACPI_MEM_CALLOCATE (sizeof (struct acpi_namespace_node)); if (!node) { return_PTR (NULL); } ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].total_allocated++); - node->data_type = ACPI_DESC_TYPE_NAMED; - node->name = name; + node->name.integer = name; node->reference_count = 1; + ACPI_SET_DESCRIPTOR_TYPE (node, ACPI_DESC_TYPE_NAMED); return_PTR (node); } @@ -72,7 +89,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_delete_node + * FUNCTION: acpi_ns_delete_node * * PARAMETERS: Node - Node to be deleted * @@ -84,78 +101,150 @@ void acpi_ns_delete_node ( - acpi_namespace_node *node) + struct acpi_namespace_node *node) { - acpi_namespace_node *parent_node; - acpi_namespace_node *prev_node; - acpi_namespace_node *next_node; + struct acpi_namespace_node *parent_node; + struct acpi_namespace_node *prev_node; + struct acpi_namespace_node *next_node; - FUNCTION_TRACE_PTR ("Ns_delete_node", node); + ACPI_FUNCTION_TRACE_PTR ("ns_delete_node", node); - parent_node = acpi_ns_get_parent_object (node); + parent_node = acpi_ns_get_parent_node (node); prev_node = NULL; next_node = parent_node->child; + /* Find the node that is the previous peer in the parent's child list */ + while (next_node != node) { prev_node = next_node; next_node = prev_node->peer; } if (prev_node) { + /* Node is not first child, unlink it */ + prev_node->peer = next_node->peer; if (next_node->flags & ANOBJ_END_OF_PEER_LIST) { prev_node->flags |= ANOBJ_END_OF_PEER_LIST; } } else { - parent_node->child = next_node->peer; + /* Node is first child (has no previous peer) */ + + if (next_node->flags & ANOBJ_END_OF_PEER_LIST) { + /* No peers at all */ + + parent_node->child = NULL; + } + else { /* Link peer list to parent */ + + parent_node->child = next_node->peer; + } } ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].total_freed++); /* - * Detach an object if there is one + * Detach an object if there is one then delete the node */ - if (node->object) { - acpi_ns_detach_object (node); - } - + acpi_ns_detach_object (node); ACPI_MEM_FREE (node); return_VOID; } +#ifdef ACPI_ALPHABETIC_NAMESPACE +/******************************************************************************* + * + * FUNCTION: acpi_ns_compare_names + * + * PARAMETERS: Name1 - First name to compare + * Name2 - Second name to compare + * + * RETURN: value from strncmp + * + * DESCRIPTION: Compare two ACPI names. Names that are prefixed with an + * underscore are forced to be alphabetically first. + * + ******************************************************************************/ + +int +acpi_ns_compare_names ( + char *name1, + char *name2) +{ + char reversed_name1[ACPI_NAME_SIZE]; + char reversed_name2[ACPI_NAME_SIZE]; + u32 i; + u32 j; + + + /* + * Replace all instances of "underscore" with a value that is smaller so + * that all names that are prefixed with underscore(s) are alphabetically + * first. + * + * Reverse the name bytewise so we can just do a 32-bit compare instead + * of a strncmp. + */ + for (i = 0, j= (ACPI_NAME_SIZE - 1); i < ACPI_NAME_SIZE; i++, j--) { + reversed_name1[j] = name1[i]; + if (name1[i] == '_') { + reversed_name1[j] = '*'; + } + + reversed_name2[j] = name2[i]; + if (name2[i] == '_') { + reversed_name2[j] = '*'; + } + } + + return (*(int *) reversed_name1 - *(int *) reversed_name2); +} +#endif + + /******************************************************************************* * - * FUNCTION: Acpi_ns_install_node + * FUNCTION: acpi_ns_install_node * - * PARAMETERS: Walk_state - Current state of the walk - * Parent_node - The parent of the new Node + * PARAMETERS: walk_state - Current state of the walk + * parent_node - The parent of the new Node * Node - The new Node to install * Type - ACPI object type of the new Node * * RETURN: None * - * DESCRIPTION: Initialize a new entry within a namespace table. + * DESCRIPTION: Initialize a new namespace node and install it amongst + * its peers. + * + * Note: Current namespace lookup is linear search. However, the + * nodes are linked in alphabetical order to 1) put all reserved + * names (start with underscore) first, and to 2) make a readable + * namespace dump. * ******************************************************************************/ void acpi_ns_install_node ( - acpi_walk_state *walk_state, - acpi_namespace_node *parent_node, /* Parent */ - acpi_namespace_node *node, /* New Child*/ - acpi_object_type8 type) + struct acpi_walk_state *walk_state, + struct acpi_namespace_node *parent_node, /* Parent */ + struct acpi_namespace_node *node, /* New Child*/ + acpi_object_type type) { - u16 owner_id = TABLE_ID_DSDT; - acpi_namespace_node *child_node; + u16 owner_id = 0; + struct acpi_namespace_node *child_node; +#ifdef ACPI_ALPHABETIC_NAMESPACE + + struct acpi_namespace_node *previous_child_node; +#endif - FUNCTION_TRACE ("Ns_install_node"); + ACPI_FUNCTION_TRACE ("ns_install_node"); /* @@ -167,17 +256,65 @@ owner_id = walk_state->owner_id; } - - /* link the new entry into the parent and existing children */ - - /* TBD: Could be first, last, or alphabetic */ + /* Link the new entry into the parent and existing children */ child_node = parent_node->child; if (!child_node) { parent_node->child = node; + node->flags |= ANOBJ_END_OF_PEER_LIST; + node->peer = parent_node; } - else { +#ifdef ACPI_ALPHABETIC_NAMESPACE + /* + * Walk the list whilst searching for the the correct + * alphabetic placement. + */ + previous_child_node = NULL; + while (acpi_ns_compare_names (child_node->name.ascii, node->name.ascii) < 0) { + if (child_node->flags & ANOBJ_END_OF_PEER_LIST) { + /* Last peer; Clear end-of-list flag */ + + child_node->flags &= ~ANOBJ_END_OF_PEER_LIST; + + /* This node is the new peer to the child node */ + + child_node->peer = node; + + /* This node is the new end-of-list */ + + node->flags |= ANOBJ_END_OF_PEER_LIST; + node->peer = parent_node; + break; + } + + /* Get next peer */ + + previous_child_node = child_node; + child_node = child_node->peer; + } + + /* Did the node get inserted at the end-of-list? */ + + if (!(node->flags & ANOBJ_END_OF_PEER_LIST)) { + /* + * Loop above terminated without reaching the end-of-list. + * Insert the new node at the current location + */ + if (previous_child_node) { + /* Insert node alphabetically */ + + node->peer = child_node; + previous_child_node->peer = node; + } + else { + /* Insert node alphabetically at start of list */ + + node->peer = child_node; + parent_node->child = node; + } + } +#else while (!(child_node->flags & ANOBJ_END_OF_PEER_LIST)) { child_node = child_node->peer; } @@ -187,61 +324,25 @@ /* Clear end-of-list flag */ child_node->flags &= ~ANOBJ_END_OF_PEER_LIST; + node->flags |= ANOBJ_END_OF_PEER_LIST; + node->peer = parent_node; +#endif } /* Init the new entry */ - node->owner_id = owner_id; - node->flags |= ANOBJ_END_OF_PEER_LIST; - node->peer = parent_node; - - - /* - * If adding a name with unknown type, or having to - * add the region in order to define fields in it, we - * have a forward reference. - */ - if ((ACPI_TYPE_ANY == type) || - (INTERNAL_TYPE_FIELD_DEFN == type) || - (INTERNAL_TYPE_BANK_FIELD_DEFN == type)) { - /* - * We don't want to abort here, however! - * We will fill in the actual type when the - * real definition is found later. - */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "[%4.4s] is a forward reference\n", - (char*)&node->name)); - } - - /* - * The Def_field_defn and Bank_field_defn cases are actually - * looking up the Region in which the field will be defined - */ - if ((INTERNAL_TYPE_FIELD_DEFN == type) || - (INTERNAL_TYPE_BANK_FIELD_DEFN == type)) { - type = ACPI_TYPE_REGION; - } - - /* - * Scope, Def_any, and Index_field_defn are bogus "types" which do - * not actually have anything to do with the type of the name - * being looked up. Save any other value of Type as the type of - * the entry. - */ - if ((type != INTERNAL_TYPE_SCOPE) && - (type != INTERNAL_TYPE_DEF_ANY) && - (type != INTERNAL_TYPE_INDEX_FIELD_DEFN)) { - node->type = (u8) type; - } + node->owner_id = owner_id; + node->type = (u8) type; - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "%4.4s added to %p at %p\n", - (char*)&node->name, parent_node, node)); + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "%4.4s (%s) added to %4.4s (%s) %p at %p\n", + node->name.ascii, acpi_ut_get_type_name (node->type), + parent_node->name.ascii, acpi_ut_get_type_name (parent_node->type), parent_node, node)); /* * Increment the reference count(s) of all parents up to * the root! */ - while ((node = acpi_ns_get_parent_object (node)) != NULL) { + while ((node = acpi_ns_get_parent_node (node)) != NULL) { node->reference_count++; } @@ -251,27 +352,28 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_delete_children + * FUNCTION: acpi_ns_delete_children * - * PARAMETERS: Parent_node - Delete this objects children + * PARAMETERS: parent_node - Delete this objects children * * RETURN: None. * - * DESCRIPTION: Delete all children of the parent object. Deletes a - * "scope". + * DESCRIPTION: Delete all children of the parent object. In other words, + * deletes a "scope". * ******************************************************************************/ void acpi_ns_delete_children ( - acpi_namespace_node *parent_node) + struct acpi_namespace_node *parent_node) { - acpi_namespace_node *child_node; - acpi_namespace_node *next_node; - u8 flags; + struct acpi_namespace_node *child_node; + struct acpi_namespace_node *next_node; + struct acpi_namespace_node *node; + u8 flags; - FUNCTION_TRACE_PTR ("Ns_delete_children", parent_node); + ACPI_FUNCTION_TRACE_PTR ("ns_delete_children", parent_node); if (!parent_node) { @@ -305,13 +407,32 @@ ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].total_freed++); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Object %p, Remaining %X\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Object %p, Remaining %X\n", child_node, acpi_gbl_current_node_count)); /* * Detach an object if there is one, then free the child node */ acpi_ns_detach_object (child_node); + + /* + * Decrement the reference count(s) of all parents up to + * the root! (counts were incremented when the node was created) + */ + node = child_node; + while ((node = acpi_ns_get_parent_node (node)) != NULL) { + node->reference_count--; + } + + /* There should be only one reference remaining on this node */ + + if (child_node->reference_count != 1) { + ACPI_REPORT_WARNING (("Existing references (%d) on node being deleted (%p)\n", + child_node->reference_count, child_node)); + } + + /* Now we can delete the node */ + ACPI_MEM_FREE (child_node); /* And move on to the next child in the list */ @@ -331,30 +452,30 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_delete_namespace_subtree + * FUNCTION: acpi_ns_delete_namespace_subtree * - * PARAMETERS: Parent_node - Root of the subtree to be deleted + * PARAMETERS: parent_node - Root of the subtree to be deleted * * RETURN: None. * * DESCRIPTION: Delete a subtree of the namespace. This includes all objects - * stored within the subtree. Scope tables are deleted also + * stored within the subtree. * ******************************************************************************/ -acpi_status +void acpi_ns_delete_namespace_subtree ( - acpi_namespace_node *parent_node) + struct acpi_namespace_node *parent_node) { - acpi_namespace_node *child_node = NULL; - u32 level = 1; + struct acpi_namespace_node *child_node = NULL; + u32 level = 1; - FUNCTION_TRACE ("Ns_delete_namespace_subtree"); + ACPI_FUNCTION_TRACE ("ns_delete_namespace_subtree"); if (!parent_node) { - return_ACPI_STATUS (AE_OK); + return_VOID; } /* @@ -383,7 +504,6 @@ child_node = 0; } } - else { /* * No more children of this parent node. @@ -403,17 +523,17 @@ /* Move up the tree to the grandparent */ - parent_node = acpi_ns_get_parent_object (parent_node); + parent_node = acpi_ns_get_parent_node (parent_node); } } - return_ACPI_STATUS (AE_OK); + return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ns_remove_reference + * FUNCTION: acpi_ns_remove_reference * * PARAMETERS: Node - Named node whose reference count is to be * decremented @@ -426,47 +546,50 @@ * ******************************************************************************/ -static void +void acpi_ns_remove_reference ( - acpi_namespace_node *node) + struct acpi_namespace_node *node) { - acpi_namespace_node *next_node; + struct acpi_namespace_node *parent_node; + struct acpi_namespace_node *this_node; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* * Decrement the reference count(s) of this node and all * nodes up to the root, Delete anything with zero remaining references. */ - next_node = node; - while (next_node) { - /* Decrement the reference count on this node*/ + this_node = node; + while (this_node) { + /* Prepare to move up to parent */ + + parent_node = acpi_ns_get_parent_node (this_node); + + /* Decrement the reference count on this node */ - next_node->reference_count--; + this_node->reference_count--; /* Delete the node if no more references */ - if (!next_node->reference_count) { + if (!this_node->reference_count) { /* Delete all children and delete the node */ - acpi_ns_delete_children (next_node); - acpi_ns_delete_node (next_node); + acpi_ns_delete_children (this_node); + acpi_ns_delete_node (this_node); } - /* Move up to parent */ - - next_node = acpi_ns_get_parent_object (next_node); + this_node = parent_node; } } /******************************************************************************* * - * FUNCTION: Acpi_ns_delete_namespace_by_owner + * FUNCTION: acpi_ns_delete_namespace_by_owner * - * PARAMETERS: Owner_id - All nodes with this owner will be deleted + * PARAMETERS: owner_id - All nodes with this owner will be deleted * * RETURN: Status * @@ -476,65 +599,71 @@ * ******************************************************************************/ -acpi_status +void acpi_ns_delete_namespace_by_owner ( - u16 owner_id) + u16 owner_id) { - acpi_namespace_node *child_node; - u32 level; - acpi_namespace_node *parent_node; + struct acpi_namespace_node *child_node; + struct acpi_namespace_node *deletion_node; + u32 level; + struct acpi_namespace_node *parent_node; - FUNCTION_TRACE ("Ns_delete_namespace_by_owner"); + ACPI_FUNCTION_TRACE_U32 ("ns_delete_namespace_by_owner", owner_id); - parent_node = acpi_gbl_root_node; - child_node = 0; - level = 1; + parent_node = acpi_gbl_root_node; + child_node = NULL; + deletion_node = NULL; + level = 1; /* * Traverse the tree of nodes until we bubble back up * to where we started. */ while (level > 0) { - /* Get the next node in this scope (NULL if none) */ + /* + * Get the next child of this parent node. When child_node is NULL, + * the first child of the parent is returned + */ + child_node = acpi_ns_get_next_node (ACPI_TYPE_ANY, parent_node, child_node); + + if (deletion_node) { + acpi_ns_remove_reference (deletion_node); + deletion_node = NULL; + } - child_node = acpi_ns_get_next_node (ACPI_TYPE_ANY, parent_node, - child_node); if (child_node) { if (child_node->owner_id == owner_id) { - /* Found a child node - detach any attached object */ + /* Found a matching child node - detach any attached object */ acpi_ns_detach_object (child_node); } /* Check if this node has any children */ - if (acpi_ns_get_next_node (ACPI_TYPE_ANY, child_node, 0)) { + if (acpi_ns_get_next_node (ACPI_TYPE_ANY, child_node, NULL)) { /* * There is at least one child of this node, * visit the node */ level++; parent_node = child_node; - child_node = 0; + child_node = NULL; } - else if (child_node->owner_id == owner_id) { - acpi_ns_remove_reference (child_node); + deletion_node = child_node; } } - else { /* * No more children of this parent node. * Move up to the grandparent. */ level--; - if (level != 0) { if (parent_node->owner_id == owner_id) { - acpi_ns_remove_reference (parent_node); + deletion_node = parent_node; } } @@ -544,11 +673,11 @@ /* Move up the tree to the grandparent */ - parent_node = acpi_ns_get_parent_object (parent_node); + parent_node = acpi_ns_get_parent_node (parent_node); } } - return_ACPI_STATUS (AE_OK); + return_VOID; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsdump.c linux.21rc5-ac2/drivers/acpi/namespace/nsdump.c --- linux.21rc5/drivers/acpi/namespace/nsdump.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsdump.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,45 +1,102 @@ /****************************************************************************** * * Module Name: nsdump - table dumping routines for debug - * $Revision: 105 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "actables.h" -#include "acparser.h" +#include +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsdump") + ACPI_MODULE_NAME ("nsdump") +#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) -#if defined(ACPI_DEBUG) || defined(ENABLE_DEBUGGER) /******************************************************************************* * - * FUNCTION: Acpi_ns_dump_pathname + * FUNCTION: acpi_ns_print_pathname + * + * PARAMETERS: num_segment - Number of ACPI name segments + * Pathname - The compressed (internal) path + * + * DESCRIPTION: Print an object's full namespace pathname + * + ******************************************************************************/ + +void +acpi_ns_print_pathname ( + u32 num_segments, + char *pathname) +{ + ACPI_FUNCTION_NAME ("ns_print_pathname"); + + + if (!(acpi_dbg_level & ACPI_LV_NAMES) || !(acpi_dbg_layer & ACPI_NAMESPACE)) { + return; + } + + /* Print the entire name */ + + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "[")); + + while (num_segments) { + acpi_os_printf ("%4.4s", pathname); + pathname += ACPI_NAME_SIZE; + + num_segments--; + if (num_segments) { + acpi_os_printf ("."); + } + } + + acpi_os_printf ("]\n"); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_dump_pathname * * PARAMETERS: Handle - Object * Msg - Prefix message @@ -51,85 +108,65 @@ * ******************************************************************************/ -acpi_status +void acpi_ns_dump_pathname ( - acpi_handle handle, - NATIVE_CHAR *msg, - u32 level, - u32 component) + acpi_handle handle, + char *msg, + u32 level, + u32 component) { - NATIVE_CHAR *buffer; - u32 length; - - FUNCTION_TRACE ("Ns_dump_pathname"); + ACPI_FUNCTION_TRACE ("ns_dump_pathname"); /* Do this only if the requested debug level and component are enabled */ if (!(acpi_dbg_level & level) || !(acpi_dbg_layer & component)) { - return_ACPI_STATUS (AE_OK); - } - - buffer = ACPI_MEM_ALLOCATE (PATHNAME_MAX); - if (!buffer) { - return_ACPI_STATUS (AE_NO_MEMORY); + return_VOID; } /* Convert handle to a full pathname and print it (with supplied message) */ - length = PATHNAME_MAX; - if (ACPI_SUCCESS (acpi_ns_handle_to_pathname (handle, &length, buffer))) { - acpi_os_printf ("%s %s (%p)\n", msg, buffer, handle); - } - - ACPI_MEM_FREE (buffer); - - return_ACPI_STATUS (AE_OK); + acpi_ns_print_node_pathname (handle, msg); + acpi_os_printf ("\n"); + return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ns_dump_one_object + * FUNCTION: acpi_ns_dump_one_object * * PARAMETERS: Handle - Node to be dumped * Level - Nesting level of the handle - * Context - Passed into Walk_namespace + * Context - Passed into walk_namespace * * DESCRIPTION: Dump a single Node - * This procedure is a User_function called by Acpi_ns_walk_namespace. + * This procedure is a user_function called by acpi_ns_walk_namespace. * ******************************************************************************/ acpi_status acpi_ns_dump_one_object ( - acpi_handle obj_handle, - u32 level, - void *context, - void **return_value) + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value) { - acpi_walk_info *info = (acpi_walk_info *) context; - acpi_namespace_node *this_node; - acpi_operand_object *obj_desc = NULL; - acpi_object_type8 obj_type; - acpi_object_type8 type; - u32 bytes_to_dump; - u32 downstream_sibling_mask = 0; - u32 level_tmp; - u32 which_bit; - u32 i; - + struct acpi_walk_info *info = (struct acpi_walk_info *) context; + struct acpi_namespace_node *this_node; + union acpi_operand_object *obj_desc = NULL; + acpi_object_type obj_type; + acpi_object_type type; + u32 bytes_to_dump; + u32 dbg_level; + u32 i; - PROC_NAME ("Ns_dump_one_object"); + ACPI_FUNCTION_NAME ("ns_dump_one_object"); - this_node = acpi_ns_map_handle_to_node (obj_handle); - - level_tmp = level; - type = this_node->type; - which_bit = 1; + /* Is output enabled? */ if (!(acpi_dbg_level & info->debug_level)) { return (AE_OK); @@ -140,6 +177,9 @@ return (AE_OK); } + this_node = acpi_ns_map_handle_to_node (obj_handle); + type = this_node->type; + /* Check if the owner matches */ if ((info->owner_id != ACPI_UINT32_MAX) && @@ -147,68 +187,30 @@ return (AE_OK); } - /* Indent the object according to the level */ - while (level_tmp--) { - - /* Print appropriate characters to form tree structure */ - - if (level_tmp) { - if (downstream_sibling_mask & which_bit) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "|")); - } - - else { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " ")); - } - - which_bit <<= 1; - } - - else { - if (acpi_ns_exist_downstream_sibling (this_node + 1)) { - downstream_sibling_mask |= (1 << (level - 1)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "+")); - } - - else { - downstream_sibling_mask &= ACPI_UINT32_MAX ^ (1 << (level - 1)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "+")); - } - - if (this_node->child == NULL) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "-")); - } - - else if (acpi_ns_exist_downstream_sibling (this_node->child)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "+")); - } - - else { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "-")); - } - } - } - + acpi_os_printf ("%2d%*s", (u32) level - 1, (int) level * 2, " "); - /* Check the integrity of our data */ + /* Check the node type and name */ - if (type > INTERNAL_TYPE_MAX) { - type = INTERNAL_TYPE_DEF_ANY; /* prints as *ERROR* */ + if (type > ACPI_TYPE_LOCAL_MAX) { + ACPI_REPORT_WARNING (("Invalid ACPI Type %08X\n", type)); } - if (!acpi_ut_valid_acpi_name (this_node->name)) { - REPORT_WARNING (("Invalid ACPI Name %08X\n", this_node->name)); + if (!acpi_ut_valid_acpi_name (this_node->name.integer)) { + ACPI_REPORT_WARNING (("Invalid ACPI Name %08X\n", this_node->name.integer)); } /* * Now we can print out the pertinent information */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " %4.4s %-12s %p", - (char*)&this_node->name, acpi_ut_get_type_name (type), this_node)); + acpi_os_printf ("%4.4s %-12s %p ", + this_node->name.ascii, acpi_ut_get_type_name (type), this_node); - obj_desc = this_node->object; + dbg_level = acpi_dbg_level; + acpi_dbg_level = 0; + obj_desc = acpi_ns_get_attached_object (this_node); + acpi_dbg_level = dbg_level; switch (info->display_type) { case ACPI_DISPLAY_SUMMARY: @@ -216,110 +218,148 @@ if (!obj_desc) { /* No attached object, we are done */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "\n")); + acpi_os_printf ("\n"); return (AE_OK); } - switch (type) { case ACPI_TYPE_PROCESSOR: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " ID %d Addr %.4X Len %.4X\n", + + acpi_os_printf ("ID %X Len %.4X Addr %p\n", obj_desc->processor.proc_id, - obj_desc->processor.address, - (unsigned)obj_desc->processor.length)); + obj_desc->processor.length, + (char *) obj_desc->processor.address); break; + case ACPI_TYPE_DEVICE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Notification object: %p", obj_desc)); + + acpi_os_printf ("Notify object: %p", obj_desc); break; + case ACPI_TYPE_METHOD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Args %d Len %.4X Aml %p \n", - obj_desc->method.param_count, + + acpi_os_printf ("Args %X Len %.4X Aml %p\n", + (u32) obj_desc->method.param_count, obj_desc->method.aml_length, - obj_desc->method.aml_start)); + obj_desc->method.aml_start); break; + case ACPI_TYPE_INTEGER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " = %8.8X%8.8X\n", - HIDWORD (obj_desc->integer.value), - LODWORD (obj_desc->integer.value))); + + acpi_os_printf ("= %8.8X%8.8X\n", + ACPI_HIDWORD (obj_desc->integer.value), + ACPI_LODWORD (obj_desc->integer.value)); break; + case ACPI_TYPE_PACKAGE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Elements %.2X\n", - obj_desc->package.count)); + + if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { + acpi_os_printf ("Elements %.2X\n", + obj_desc->package.count); + } + else { + acpi_os_printf ("[Length not yet evaluated]\n"); + } break; - case ACPI_TYPE_BUFFER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Len %.2X", - obj_desc->buffer.length)); - /* Dump some of the buffer */ + case ACPI_TYPE_BUFFER: - if (obj_desc->buffer.length > 0) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " =")); - for (i = 0; (i < obj_desc->buffer.length && i < 12); i++) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " %.2X", - obj_desc->buffer.pointer[i])); + if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { + acpi_os_printf ("Len %.2X", + obj_desc->buffer.length); + + /* Dump some of the buffer */ + + if (obj_desc->buffer.length > 0) { + acpi_os_printf (" ="); + for (i = 0; (i < obj_desc->buffer.length && i < 12); i++) { + acpi_os_printf (" %.2hX", obj_desc->buffer.pointer[i]); + } } + acpi_os_printf ("\n"); + } + else { + acpi_os_printf ("[Length not yet evaluated]\n"); } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "\n")); break; + case ACPI_TYPE_STRING: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Len %.2X", - obj_desc->string.length)); - if (obj_desc->string.length > 0) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " = \"%.32s\"...", - obj_desc->string.pointer)); - } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "\n")); + acpi_os_printf ("Len %.2X ", obj_desc->string.length); + acpi_ut_print_string (obj_desc->string.pointer, 32); + acpi_os_printf ("\n"); break; + case ACPI_TYPE_REGION: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " [%s]", - acpi_ut_get_region_name (obj_desc->region.space_id))); + + acpi_os_printf ("[%s]", acpi_ut_get_region_name (obj_desc->region.space_id)); if (obj_desc->region.flags & AOPOBJ_DATA_VALID) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Addr %8.8X%8.8X Len %.4X\n", - HIDWORD(obj_desc->region.address), - LODWORD(obj_desc->region.address), - obj_desc->region.length)); + acpi_os_printf (" Addr %8.8X%8.8X Len %.4X\n", + ACPI_HIDWORD (obj_desc->region.address), + ACPI_LODWORD (obj_desc->region.address), + obj_desc->region.length); } else { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " [Address/Length not evaluated]\n")); + acpi_os_printf (" [Address/Length not yet evaluated]\n"); } break; - case INTERNAL_TYPE_REFERENCE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " [%s]\n", - acpi_ps_get_opcode_name (obj_desc->reference.opcode))); + + case ACPI_TYPE_LOCAL_REFERENCE: + + acpi_os_printf ("[%s]\n", + acpi_ps_get_opcode_name (obj_desc->reference.opcode)); break; + case ACPI_TYPE_BUFFER_FIELD: - /* TBD: print Buffer name when we can easily get it */ + if (obj_desc->buffer_field.buffer_obj && + obj_desc->buffer_field.buffer_obj->buffer.node) { + acpi_os_printf ("Buf [%4.4s]", + obj_desc->buffer_field.buffer_obj->buffer.node->name.ascii); + } break; - case INTERNAL_TYPE_REGION_FIELD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Rgn [%4.4s]", - (char *) &obj_desc->common_field.region_obj->region.node->name)); + + case ACPI_TYPE_LOCAL_REGION_FIELD: + + acpi_os_printf ("Rgn [%4.4s]", + obj_desc->common_field.region_obj->region.node->name.ascii); break; - case INTERNAL_TYPE_BANK_FIELD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Rgn [%4.4s]", - (char *) &obj_desc->common_field.region_obj->region.node->name)); + + case ACPI_TYPE_LOCAL_BANK_FIELD: + + acpi_os_printf ("Rgn [%4.4s] Bnk [%4.4s]", + obj_desc->common_field.region_obj->region.node->name.ascii, + obj_desc->bank_field.bank_obj->common_field.node->name.ascii); break; - case INTERNAL_TYPE_INDEX_FIELD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Rgn [%4.4s]", - (char *) &obj_desc->index_field.index_obj->common_field.region_obj->region.node->name)); + + case ACPI_TYPE_LOCAL_INDEX_FIELD: + + acpi_os_printf ("Idx [%4.4s] Dat [%4.4s]", + obj_desc->index_field.index_obj->common_field.node->name.ascii, + obj_desc->index_field.data_obj->common_field.node->name.ascii); + break; + + + case ACPI_TYPE_LOCAL_ALIAS: + + acpi_os_printf ("Target %4.4s (%p)\n", ((struct acpi_namespace_node *) obj_desc)->name.ascii, obj_desc); break; default: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Object %p\n", obj_desc)); + acpi_os_printf ("Object %p\n", obj_desc); break; } @@ -327,77 +367,74 @@ switch (type) { case ACPI_TYPE_BUFFER_FIELD: - case INTERNAL_TYPE_REGION_FIELD: - case INTERNAL_TYPE_BANK_FIELD: - case INTERNAL_TYPE_INDEX_FIELD: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Off %.2X Len %.2X Acc %.2d\n", - (obj_desc->common_field.base_byte_offset * 8) + obj_desc->common_field.start_field_bit_offset, - obj_desc->common_field.bit_length, - obj_desc->common_field.access_bit_width)); + case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: + + acpi_os_printf (" Off %.2X Len %.2X Acc %.2hd\n", + (obj_desc->common_field.base_byte_offset * 8) + + obj_desc->common_field.start_field_bit_offset, + obj_desc->common_field.bit_length, + obj_desc->common_field.access_byte_width); break; - } + default: + break; + } break; case ACPI_DISPLAY_OBJECTS: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "%p O:%p", - this_node, obj_desc)); - + acpi_os_printf ("O:%p", obj_desc); if (!obj_desc) { /* No attached object, we are done */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "\n")); + acpi_os_printf ("\n"); return (AE_OK); } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "(R%d)", - obj_desc->common.reference_count)); + acpi_os_printf ("(R%d)", + obj_desc->common.reference_count); switch (type) { - case ACPI_TYPE_METHOD: /* Name is a Method and its AML offset/length are set */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " M:%p-%X\n", - obj_desc->method.aml_start, - obj_desc->method.aml_length)); - + acpi_os_printf (" M:%p-%X\n", obj_desc->method.aml_start, + obj_desc->method.aml_length); break; - case ACPI_TYPE_INTEGER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " N:%X%X\n", - HIDWORD(obj_desc->integer.value), - LODWORD(obj_desc->integer.value))); + acpi_os_printf (" N:%X%X\n", ACPI_HIDWORD(obj_desc->integer.value), + ACPI_LODWORD(obj_desc->integer.value)); break; - case ACPI_TYPE_STRING: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " S:%p-%X\n", - obj_desc->string.pointer, - obj_desc->string.length)); + acpi_os_printf (" S:%p-%X\n", obj_desc->string.pointer, + obj_desc->string.length); break; - case ACPI_TYPE_BUFFER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " B:%p-%X\n", - obj_desc->buffer.pointer, - obj_desc->buffer.length)); + acpi_os_printf (" B:%p-%X\n", obj_desc->buffer.pointer, + obj_desc->buffer.length); break; - default: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "\n")); + acpi_os_printf ("\n"); break; } break; + + + default: + acpi_os_printf ("\n"); + break; } /* If debug turned off, done */ @@ -409,48 +446,55 @@ /* If there is an attached object, display it */ - obj_desc = this_node->object; + dbg_level = acpi_dbg_level; + acpi_dbg_level = 0; + obj_desc = acpi_ns_get_attached_object (this_node); + acpi_dbg_level = dbg_level; /* Dump attached objects */ while (obj_desc) { - obj_type = INTERNAL_TYPE_INVALID; + obj_type = ACPI_TYPE_INVALID; + acpi_os_printf (" Attached Object %p: ", obj_desc); /* Decode the type of attached object and dump the contents */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " Attached Object %p: ", obj_desc)); + switch (ACPI_GET_DESCRIPTOR_TYPE (obj_desc)) { + case ACPI_DESC_TYPE_NAMED: - if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_NAMED)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "(Ptr to Node)\n")); - bytes_to_dump = sizeof (acpi_namespace_node); - } + acpi_os_printf ("(Ptr to Node)\n"); + bytes_to_dump = sizeof (struct acpi_namespace_node); + break; - else if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_INTERNAL)) { - obj_type = obj_desc->common.type; + case ACPI_DESC_TYPE_OPERAND: - if (obj_type > INTERNAL_TYPE_MAX) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "(Ptr to ACPI Object type %X [UNKNOWN])\n", obj_type)); + obj_type = ACPI_GET_OBJECT_TYPE (obj_desc); + + if (obj_type > ACPI_TYPE_LOCAL_MAX) { + acpi_os_printf ("(Ptr to ACPI Object type %X [UNKNOWN])\n", obj_type); bytes_to_dump = 32; } - else { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "(Ptr to ACPI Object type %2.2X [%s])\n", - obj_type, acpi_ut_get_type_name (obj_type))); - bytes_to_dump = sizeof (acpi_operand_object); + acpi_os_printf ("(Ptr to ACPI Object type %s, %X)\n", + acpi_ut_get_type_name (obj_type), obj_type); + bytes_to_dump = sizeof (union acpi_operand_object); } - } + break; + + + default: - else { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "(String or Buffer - not descriptor)\n")); + acpi_os_printf ("(String or Buffer ptr - not an object descriptor)\n"); bytes_to_dump = 16; + break; } - DUMP_BUFFER (obj_desc, bytes_to_dump); + ACPI_DUMP_BUFFER (obj_desc, bytes_to_dump); /* If value is NOT an internal object, we are done */ - if (VALID_DESCRIPTOR_TYPE (obj_desc, ACPI_DESC_TYPE_NAMED)) { + if (ACPI_GET_DESCRIPTOR_TYPE (obj_desc) != ACPI_DESC_TYPE_OPERAND) { goto cleanup; } @@ -459,78 +503,78 @@ */ switch (obj_type) { case ACPI_TYPE_STRING: - obj_desc = (acpi_operand_object *) obj_desc->string.pointer; + obj_desc = (void *) obj_desc->string.pointer; break; case ACPI_TYPE_BUFFER: - obj_desc = (acpi_operand_object *) obj_desc->buffer.pointer; + obj_desc = (void *) obj_desc->buffer.pointer; break; case ACPI_TYPE_BUFFER_FIELD: - obj_desc = (acpi_operand_object *) obj_desc->buffer_field.buffer_obj; + obj_desc = (union acpi_operand_object *) obj_desc->buffer_field.buffer_obj; break; case ACPI_TYPE_PACKAGE: - obj_desc = (acpi_operand_object *) obj_desc->package.elements; + obj_desc = (void *) obj_desc->package.elements; break; case ACPI_TYPE_METHOD: - obj_desc = (acpi_operand_object *) obj_desc->method.aml_start; + obj_desc = (void *) obj_desc->method.aml_start; break; - case INTERNAL_TYPE_REGION_FIELD: - obj_desc = (acpi_operand_object *) obj_desc->field.region_obj; + case ACPI_TYPE_LOCAL_REGION_FIELD: + obj_desc = (void *) obj_desc->field.region_obj; break; - case INTERNAL_TYPE_BANK_FIELD: - obj_desc = (acpi_operand_object *) obj_desc->bank_field.region_obj; + case ACPI_TYPE_LOCAL_BANK_FIELD: + obj_desc = (void *) obj_desc->bank_field.region_obj; break; - case INTERNAL_TYPE_INDEX_FIELD: - obj_desc = (acpi_operand_object *) obj_desc->index_field.index_obj; + case ACPI_TYPE_LOCAL_INDEX_FIELD: + obj_desc = (void *) obj_desc->index_field.index_obj; break; - default: + default: goto cleanup; } - obj_type = INTERNAL_TYPE_INVALID; /* Terminate loop after next pass */ + obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */ } cleanup: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, "\n")); + acpi_os_printf ("\n"); return (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ns_dump_objects + * FUNCTION: acpi_ns_dump_objects * * PARAMETERS: Type - Object type to be dumped - * Max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX + * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX * for an effectively unlimited depth. - * Owner_id - Dump only objects owned by this ID. Use + * owner_id - Dump only objects owned by this ID. Use * ACPI_UINT32_MAX to match all owners. - * Start_handle - Where in namespace to start/end search + * start_handle - Where in namespace to start/end search * * DESCRIPTION: Dump typed objects within the loaded namespace. - * Uses Acpi_ns_walk_namespace in conjunction with Acpi_ns_dump_one_object. + * Uses acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object. * ******************************************************************************/ void acpi_ns_dump_objects ( - acpi_object_type8 type, - u8 display_type, - u32 max_depth, - u32 owner_id, - acpi_handle start_handle) + acpi_object_type type, + u8 display_type, + u32 max_depth, + u32 owner_id, + acpi_handle start_handle) { - acpi_walk_info info; + struct acpi_walk_info info; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); info.debug_level = ACPI_LV_TABLES; @@ -538,97 +582,19 @@ info.display_type = display_type; - acpi_ns_walk_namespace (type, start_handle, max_depth, NS_WALK_NO_UNLOCK, acpi_ns_dump_one_object, - (void *) &info, NULL); -} - - -#ifndef _ACPI_ASL_COMPILER -/******************************************************************************* - * - * FUNCTION: Acpi_ns_dump_one_device - * - * PARAMETERS: Handle - Node to be dumped - * Level - Nesting level of the handle - * Context - Passed into Walk_namespace - * - * DESCRIPTION: Dump a single Node that represents a device - * This procedure is a User_function called by Acpi_ns_walk_namespace. - * - ******************************************************************************/ - -acpi_status -acpi_ns_dump_one_device ( - acpi_handle obj_handle, - u32 level, - void *context, - void **return_value) -{ - acpi_device_info info; - acpi_status status; - u32 i; - - - PROC_NAME ("Ns_dump_one_device"); - - - status = acpi_ns_dump_one_object (obj_handle, level, context, return_value); - - status = acpi_get_object_info (obj_handle, &info); - if (ACPI_SUCCESS (status)) { - for (i = 0; i < level; i++) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " ")); - } - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " HID: %s, ADR: %8.8X%8.8X, Status: %x\n", - info.hardware_id, HIDWORD(info.address), LODWORD(info.address), info.current_status)); - } - - return (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ns_dump_root_devices - * - * PARAMETERS: None - * - * DESCRIPTION: Dump all objects of type "device" - * - ******************************************************************************/ - -void -acpi_ns_dump_root_devices (void) -{ - acpi_handle sys_bus_handle; - - - PROC_NAME ("Ns_dump_root_devices"); - - - /* Only dump the table if tracing is enabled */ - - if (!(ACPI_LV_TABLES & acpi_dbg_level)) { - return; - } - - acpi_get_handle (0, NS_SYSTEM_BUS, &sys_bus_handle); - - ACPI_DEBUG_PRINT ((ACPI_DB_TABLES, "Display of all devices in the namespace:\n")); - acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, sys_bus_handle, ACPI_UINT32_MAX, NS_WALK_NO_UNLOCK, - acpi_ns_dump_one_device, NULL, NULL); + (void) acpi_ns_walk_namespace (type, start_handle, max_depth, + ACPI_NS_WALK_NO_UNLOCK, acpi_ns_dump_one_object, + (void *) &info, NULL); } -#endif /******************************************************************************* * - * FUNCTION: Acpi_ns_dump_tables + * FUNCTION: acpi_ns_dump_tables * - * PARAMETERS: Search_base - Root of subtree to be dumped, or + * PARAMETERS: search_base - Root of subtree to be dumped, or * NS_ALL to dump the entire namespace - * Max_depth - Maximum depth of dump. Use INT_MAX + * max_depth - Maximum depth of dump. Use INT_MAX * for an effectively unlimited depth. * * DESCRIPTION: Dump the name space, or a portion of it. @@ -637,13 +603,13 @@ void acpi_ns_dump_tables ( - acpi_handle search_base, - u32 max_depth) + acpi_handle search_base, + u32 max_depth) { - acpi_handle search_handle = search_base; + acpi_handle search_handle = search_base; - FUNCTION_TRACE ("Ns_dump_tables"); + ACPI_FUNCTION_TRACE ("ns_dump_tables"); if (!acpi_gbl_root_node) { @@ -651,11 +617,11 @@ * If the name space has not been initialized, * there is nothing to dump. */ - ACPI_DEBUG_PRINT ((ACPI_DB_TABLES, "name space not initialized!\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_TABLES, "namespace not initialized!\n")); return_VOID; } - if (NS_ALL == search_base) { + if (ACPI_NS_ALL == search_base) { /* entire namespace */ search_handle = acpi_gbl_root_node; @@ -671,10 +637,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_dump_entry + * FUNCTION: acpi_ns_dump_entry * * PARAMETERS: Handle - Node to be dumped - * Debug_level - Output level + * debug_level - Output level * * DESCRIPTION: Dump a single Node * @@ -682,19 +648,20 @@ void acpi_ns_dump_entry ( - acpi_handle handle, - u32 debug_level) + acpi_handle handle, + u32 debug_level) { - acpi_walk_info info; + struct acpi_walk_info info; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); info.debug_level = debug_level; info.owner_id = ACPI_UINT32_MAX; + info.display_type = ACPI_DISPLAY_SUMMARY; - acpi_ns_dump_one_object (handle, 1, &info, NULL); + (void) acpi_ns_dump_one_object (handle, 1, &info, NULL); } #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsdumpdv.c linux.21rc5-ac2/drivers/acpi/namespace/nsdumpdv.c --- linux.21rc5/drivers/acpi/namespace/nsdumpdv.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsdumpdv.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,141 @@ +/****************************************************************************** + * + * Module Name: nsdump - table dumping routines for debug + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#include +#include + + +#define _COMPONENT ACPI_NAMESPACE + ACPI_MODULE_NAME ("nsdumpdv") + + +#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) + +/******************************************************************************* + * + * FUNCTION: acpi_ns_dump_one_device + * + * PARAMETERS: Handle - Node to be dumped + * Level - Nesting level of the handle + * Context - Passed into walk_namespace + * + * DESCRIPTION: Dump a single Node that represents a device + * This procedure is a user_function called by acpi_ns_walk_namespace. + * + ******************************************************************************/ + +acpi_status +acpi_ns_dump_one_device ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value) +{ + struct acpi_device_info info; + acpi_status status; + u32 i; + + + ACPI_FUNCTION_NAME ("ns_dump_one_device"); + + + status = acpi_ns_dump_one_object (obj_handle, level, context, return_value); + + status = acpi_get_object_info (obj_handle, &info); + if (ACPI_SUCCESS (status)) { + for (i = 0; i < level; i++) { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " ")); + } + + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_TABLES, " HID: %s, ADR: %8.8X%8.8X, Status: %X\n", + info.hardware_id, + ACPI_HIDWORD (info.address), ACPI_LODWORD (info.address), + info.current_status)); + } + + return (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_dump_root_devices + * + * PARAMETERS: None + * + * DESCRIPTION: Dump all objects of type "device" + * + ******************************************************************************/ + +void +acpi_ns_dump_root_devices (void) +{ + acpi_handle sys_bus_handle; + acpi_status status; + + + ACPI_FUNCTION_NAME ("ns_dump_root_devices"); + + + /* Only dump the table if tracing is enabled */ + + if (!(ACPI_LV_TABLES & acpi_dbg_level)) { + return; + } + + status = acpi_get_handle (0, ACPI_NS_SYSTEM_BUS, &sys_bus_handle); + if (ACPI_FAILURE (status)) { + return; + } + + ACPI_DEBUG_PRINT ((ACPI_DB_TABLES, "Display of all devices in the namespace:\n")); + + status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, sys_bus_handle, + ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, + acpi_ns_dump_one_device, NULL, NULL); +} + +#endif + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nseval.c linux.21rc5-ac2/drivers/acpi/namespace/nseval.c --- linux.21rc5/drivers/acpi/namespace/nseval.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nseval.c 2003-04-28 17:59:26.000000000 +0100 @@ -2,51 +2,68 @@ * * Module Name: nseval - Object evaluation interfaces -- includes control * method lookup and execution. - * $Revision: 102 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acparser.h" -#include "acinterp.h" -#include "acnamesp.h" +#include +#include +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nseval") + ACPI_MODULE_NAME ("nseval") /******************************************************************************* * - * FUNCTION: Acpi_ns_evaluate_relative + * FUNCTION: acpi_ns_evaluate_relative * * PARAMETERS: Handle - The relative containing object - * *Pathname - Name of method to execute, If NULL, the + * Pathname - Name of method to execute, If NULL, the * handle is the object to execute - * **Params - List of parameters to pass to the method, + * Params - List of parameters to pass to the method, * terminated by NULL. Params itself may be * NULL if no parameters are being passed. - * *Return_object - Where to put method's return value (if + * return_object - Where to put method's return value (if * any). If NULL, no value is returned. * * RETURN: Status @@ -60,19 +77,19 @@ acpi_status acpi_ns_evaluate_relative ( - acpi_namespace_node *handle, - NATIVE_CHAR *pathname, - acpi_operand_object **params, - acpi_operand_object **return_object) + struct acpi_namespace_node *handle, + char *pathname, + union acpi_operand_object **params, + union acpi_operand_object **return_object) { - acpi_namespace_node *prefix_node; - acpi_status status; - acpi_namespace_node *node = NULL; - NATIVE_CHAR *internal_path = NULL; - acpi_generic_state scope_info; + struct acpi_namespace_node *prefix_node; + acpi_status status; + struct acpi_namespace_node *node = NULL; + char *internal_path = NULL; + union acpi_generic_state scope_info; - FUNCTION_TRACE ("Ns_evaluate_relative"); + ACPI_FUNCTION_TRACE ("ns_evaluate_relative"); /* @@ -91,11 +108,14 @@ /* Get the prefix handle and Node */ - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } prefix_node = acpi_ns_map_handle_to_node (handle); if (!prefix_node) { - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); status = AE_BAD_PARAMETER; goto cleanup; } @@ -104,13 +124,13 @@ scope_info.scope.node = prefix_node; status = acpi_ns_lookup (&scope_info, internal_path, ACPI_TYPE_ANY, - IMODE_EXECUTE, NS_NO_UPSEARCH, NULL, + ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL, &node); - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Object [%s] not found [%s]\n", + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Object [%s] not found [%s]\n", pathname, acpi_format_exception (status))); goto cleanup; } @@ -119,12 +139,12 @@ * Now that we have a handle to the object, we can attempt * to evaluate it. */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%s [%p] Value %p\n", - pathname, node, node->object)); + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "%s [%p] Value %p\n", + pathname, node, acpi_ns_get_attached_object (node))); status = acpi_ns_evaluate_by_handle (node, params, return_object); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "*** Completed eval of object %s ***\n", + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "*** Completed eval of object %s ***\n", pathname)); cleanup: @@ -136,12 +156,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_evaluate_by_name + * FUNCTION: acpi_ns_evaluate_by_name * * PARAMETERS: Pathname - Fully qualified pathname to the object - * *Return_object - Where to put method's return value (if + * return_object - Where to put method's return value (if * any). If NULL, no value is returned. - * **Params - List of parameters to pass to the method, + * Params - List of parameters to pass to the method, * terminated by NULL. Params itself may be * NULL if no parameters are being passed. * @@ -156,16 +176,16 @@ acpi_status acpi_ns_evaluate_by_name ( - NATIVE_CHAR *pathname, - acpi_operand_object **params, - acpi_operand_object **return_object) + char *pathname, + union acpi_operand_object **params, + union acpi_operand_object **return_object) { - acpi_status status; - acpi_namespace_node *node = NULL; - NATIVE_CHAR *internal_path = NULL; + acpi_status status; + struct acpi_namespace_node *node = NULL; + char *internal_path = NULL; - FUNCTION_TRACE ("Ns_evaluate_by_name"); + ACPI_FUNCTION_TRACE ("ns_evaluate_by_name"); /* Build an internal name string for the method */ @@ -175,18 +195,21 @@ return_ACPI_STATUS (status); } - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* Lookup the name in the namespace */ status = acpi_ns_lookup (NULL, internal_path, ACPI_TYPE_ANY, - IMODE_EXECUTE, NS_NO_UPSEARCH, NULL, + ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL, &node); - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Object at [%s] was not found, status=%.4X\n", + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Object at [%s] was not found, status=%.4X\n", pathname, status)); goto cleanup; } @@ -195,12 +218,12 @@ * Now that we have a handle to the object, we can attempt * to evaluate it. */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%s [%p] Value %p\n", - pathname, node, node->object)); + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "%s [%p] Value %p\n", + pathname, node, acpi_ns_get_attached_object (node))); status = acpi_ns_evaluate_by_handle (node, params, return_object); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "*** Completed eval of object %s ***\n", + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "*** Completed eval of object %s ***\n", pathname)); @@ -218,13 +241,13 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_evaluate_by_handle + * FUNCTION: acpi_ns_evaluate_by_handle * * PARAMETERS: Handle - Method Node to execute - * **Params - List of parameters to pass to the method, + * Params - List of parameters to pass to the method, * terminated by NULL. Params itself may be * NULL if no parameters are being passed. - * *Return_object - Where to put method's return value (if + * return_object - Where to put method's return value (if * any). If NULL, no value is returned. * * RETURN: Status @@ -237,16 +260,16 @@ acpi_status acpi_ns_evaluate_by_handle ( - acpi_namespace_node *handle, - acpi_operand_object **params, - acpi_operand_object **return_object) + struct acpi_namespace_node *handle, + union acpi_operand_object **params, + union acpi_operand_object **return_object) { - acpi_namespace_node *node; - acpi_status status; - acpi_operand_object *local_return_object; + struct acpi_namespace_node *node; + acpi_status status; + union acpi_operand_object *local_return_object; - FUNCTION_TRACE ("Ns_evaluate_by_handle"); + ACPI_FUNCTION_TRACE ("ns_evaluate_by_handle"); /* Check if namespace has been initialized */ @@ -269,15 +292,17 @@ /* Get the prefix handle and Node */ - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } node = acpi_ns_map_handle_to_node (handle); if (!node) { - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* * Two major cases here: * 1) The object is an actual control method -- execute it. @@ -285,7 +310,7 @@ * value * * In both cases, the namespace is unlocked by the - * Acpi_ns* procedure + * acpi_ns* procedure */ if (acpi_ns_get_type (node) == ACPI_TYPE_METHOD) { /* @@ -294,7 +319,6 @@ status = acpi_ns_execute_control_method (node, params, &local_return_object); } - else { /* * Case 2) Object is NOT a method, just return its @@ -303,7 +327,6 @@ status = acpi_ns_get_object_value (node, &local_return_object); } - /* * Check if there is a return value on the stack that must * be dealt with @@ -323,16 +346,13 @@ *return_object = local_return_object; } + /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */ - /* Map AE_RETURN_VALUE to AE_OK, we are done with it */ - - if (status == AE_CTRL_RETURN_VALUE) { - status = AE_OK; - } + status = AE_OK; } /* - * Namespace was unlocked by the handling Acpi_ns* function, + * Namespace was unlocked by the handling acpi_ns* function, * so we just return */ return_ACPI_STATUS (status); @@ -341,13 +361,13 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_execute_control_method + * FUNCTION: acpi_ns_execute_control_method * - * PARAMETERS: Method_node - The object/method - * **Params - List of parameters to pass to the method, + * PARAMETERS: method_node - The method to execute + * Params - List of parameters to pass to the method, * terminated by NULL. Params itself may be * NULL if no parameters are being passed. - * **Return_obj_desc - List of result objects to be returned + * return_obj_desc - List of result objects to be returned * from the method. * * RETURN: Status @@ -360,15 +380,15 @@ acpi_status acpi_ns_execute_control_method ( - acpi_namespace_node *method_node, - acpi_operand_object **params, - acpi_operand_object **return_obj_desc) + struct acpi_namespace_node *method_node, + union acpi_operand_object **params, + union acpi_operand_object **return_obj_desc) { - acpi_status status; - acpi_operand_object *obj_desc; + acpi_status status; + union acpi_operand_object *obj_desc; - FUNCTION_TRACE ("Ns_execute_control_method"); + ACPI_FUNCTION_TRACE ("ns_execute_control_method"); /* Verify that there is a method associated with this object */ @@ -377,21 +397,16 @@ if (!obj_desc) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No attached method object\n")); - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return_ACPI_STATUS (AE_NULL_OBJECT); } + ACPI_DUMP_PATHNAME (method_node, "Execute Method:", + ACPI_LV_INFO, _COMPONENT); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Control method at Offset %p Length %x]\n", + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Method at AML address %p Length %X\n", obj_desc->method.aml_start + 1, obj_desc->method.aml_length - 1)); - DUMP_PATHNAME (method_node, "Ns_execute_control_method: Executing", - ACPI_LV_NAMES, _COMPONENT); - - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "At offset %p\n", - obj_desc->method.aml_start + 1)); - - /* * Unlock the namespace before execution. This allows namespace access * via the external Acpi* interfaces while a method is being executed. @@ -399,7 +414,10 @@ * interpreter locks to ensure that no thread is using the portion of the * namespace that is being deleted. */ - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* * Execute the method via the interpreter. The interpreter is locked @@ -419,129 +437,79 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_get_object_value + * FUNCTION: acpi_ns_get_object_value * - * PARAMETERS: Node - The object + * PARAMETERS: Node - The object + * return_obj_desc - Where the objects value is returned * * RETURN: Status * * DESCRIPTION: Return the current value of the object * - * MUTEX: Assumes namespace is locked + * MUTEX: Assumes namespace is locked, leaves namespace unlocked * ******************************************************************************/ acpi_status acpi_ns_get_object_value ( - acpi_namespace_node *node, - acpi_operand_object **return_obj_desc) + struct acpi_namespace_node *node, + union acpi_operand_object **return_obj_desc) { - acpi_status status = AE_OK; - acpi_operand_object *obj_desc; - acpi_operand_object *source_desc; + acpi_status status = AE_OK; + struct acpi_namespace_node *resolved_node = node; - FUNCTION_TRACE ("Ns_get_object_value"); + ACPI_FUNCTION_TRACE ("ns_get_object_value"); /* - * We take the value from certain objects directly + * Objects require additional resolution steps (e.g., the + * Node may be a field that must be read, etc.) -- we can't just grab + * the object out of the node. */ - if ((node->type == ACPI_TYPE_PROCESSOR) || - (node->type == ACPI_TYPE_POWER)) { - /* - * Create a Reference object to contain the object - */ - obj_desc = acpi_ut_create_internal_object (node->type); - if (!obj_desc) { - status = AE_NO_MEMORY; - goto unlock_and_exit; - } - - /* - * Get the attached object - */ - source_desc = acpi_ns_get_attached_object (node); - if (!source_desc) { - status = AE_NULL_OBJECT; - goto unlock_and_exit; - } - - /* - * Just copy from the original to the return object - * - * TBD: [Future] - need a low-level object copy that handles - * the reference count automatically. (Don't want to copy it) - */ - MEMCPY (obj_desc, source_desc, sizeof (acpi_operand_object)); - obj_desc->common.reference_count = 1; - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); - } - /* - * Other objects require a reference object wrapper which we - * then attempt to resolve. + * Use resolve_node_to_value() to get the associated value. This call + * always deletes obj_desc (allocated above). + * + * NOTE: we can get away with passing in NULL for a walk state + * because obj_desc is guaranteed to not be a reference to either + * a method local or a method argument (because this interface can only be + * called from the acpi_evaluate external interface, never called from + * a running control method.) + * + * Even though we do not directly invoke the interpreter + * for this, we must enter it because we could access an opregion. + * The opregion access code assumes that the interpreter + * is locked. + * + * We must release the namespace lock before entering the + * intepreter. */ - else { - /* Create an Reference object to contain the object */ - - obj_desc = acpi_ut_create_internal_object (INTERNAL_TYPE_REFERENCE); - if (!obj_desc) { - status = AE_NO_MEMORY; - goto unlock_and_exit; - } - - /* Construct a descriptor pointing to the name */ - - obj_desc->reference.opcode = (u8) AML_NAME_OP; - obj_desc->reference.object = (void *) node; + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + status = acpi_ex_enter_interpreter (); + if (ACPI_SUCCESS (status)) { + status = acpi_ex_resolve_node_to_value (&resolved_node, NULL); /* - * Use Resolve_to_value() to get the associated value. This call - * always deletes Obj_desc (allocated above). - * - * NOTE: we can get away with passing in NULL for a walk state - * because Obj_desc is guaranteed to not be a reference to either - * a method local or a method argument - * - * Even though we do not directly invoke the interpreter - * for this, we must enter it because we could access an opregion. - * The opregion access code assumes that the interpreter - * is locked. - * - * We must release the namespace lock before entering the - * intepreter. + * If acpi_ex_resolve_node_to_value() succeeded, the return value was + * placed in resolved_node. */ - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); - status = acpi_ex_enter_interpreter (); - if (ACPI_SUCCESS (status)) { - status = acpi_ex_resolve_to_value (&obj_desc, NULL); + acpi_ex_exit_interpreter (); - acpi_ex_exit_interpreter (); + if (ACPI_SUCCESS (status)) { + status = AE_CTRL_RETURN_VALUE; + *return_obj_desc = ACPI_CAST_PTR (union acpi_operand_object, resolved_node); + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Returning object %p [%s]\n", + *return_obj_desc, acpi_ut_get_object_type_name (*return_obj_desc))); } } - /* - * If Acpi_ex_resolve_to_value() succeeded, the return value was - * placed in Obj_desc. - */ - if (ACPI_SUCCESS (status)) { - status = AE_CTRL_RETURN_VALUE; - - *return_obj_desc = obj_desc; - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Returning obj %p\n", *return_obj_desc)); - } - /* Namespace is unlocked */ return_ACPI_STATUS (status); - - -unlock_and_exit: - - /* Unlock the namespace */ - - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); - return_ACPI_STATUS (status); } + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsinit.c linux.21rc5-ac2/drivers/acpi/namespace/nsinit.c --- linux.21rc5/drivers/acpi/namespace/nsinit.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsinit.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,41 +1,59 @@ /****************************************************************************** * * Module Name: nsinit - namespace initialization - * $Revision: 33 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acnamesp.h" -#include "acdispat.h" -#include "acinterp.h" +#include +#include +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsinit") + ACPI_MODULE_NAME ("nsinit") /******************************************************************************* * - * FUNCTION: Acpi_ns_initialize_objects + * FUNCTION: acpi_ns_initialize_objects * * PARAMETERS: None * @@ -50,24 +68,20 @@ acpi_ns_initialize_objects ( void) { - acpi_status status; - acpi_init_walk_info info; + acpi_status status; + struct acpi_init_walk_info info; - FUNCTION_TRACE ("Ns_initialize_objects"); + ACPI_FUNCTION_TRACE ("ns_initialize_objects"); ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, "**** Starting initialization of namespace objects ****\n")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, "Completing Region and Field initialization:")); + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "Completing Region/Field/Buffer/Package initialization:")); + /* Set all init info to zero */ - info.field_count = 0; - info.field_init = 0; - info.op_region_count = 0; - info.op_region_init = 0; - info.object_count = 0; - + ACPI_MEMSET (&info, 0, sizeof (struct acpi_init_walk_info)); /* Walk entire namespace from the supplied root */ @@ -75,17 +89,21 @@ ACPI_UINT32_MAX, acpi_ns_init_one_object, &info, NULL); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Walk_namespace failed! %x\n", status)); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "walk_namespace failed! %s\n", + acpi_format_exception (status))); } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - "\n%d/%d Regions, %d/%d Fields initialized (%d nodes total)\n", - info.op_region_init, info.op_region_count, info.field_init, - info.field_count, info.object_count)); + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, + "\nInitialized %hd/%hd Regions %hd/%hd Fields %hd/%hd Buffers %hd/%hd Packages (%hd nodes)\n", + info.op_region_init, info.op_region_count, + info.field_init, info.field_count, + info.buffer_init, info.buffer_count, + info.package_init, info.package_count, info.object_count)); + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "%d Control Methods found\n", info.method_count)); + "%hd Control Methods found\n", info.method_count)); ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, - "%d Op Regions found\n", info.op_region_count)); + "%hd Op Regions found\n", info.op_region_count)); return_ACPI_STATUS (AE_OK); } @@ -93,7 +111,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_initialize_devices + * FUNCTION: acpi_ns_initialize_devices * * PARAMETERS: None * @@ -111,30 +129,33 @@ acpi_ns_initialize_devices ( void) { - acpi_status status; - acpi_device_walk_info info; + acpi_status status; + struct acpi_device_walk_info info; + + ACPI_FUNCTION_TRACE ("ns_initialize_devices"); - FUNCTION_TRACE ("Ns_initialize_devices"); + /* Init counters */ info.device_count = 0; info.num_STA = 0; info.num_INI = 0; + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, "Executing all Device _STA and_INI methods:")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, "Executing device _INI methods:")); + /* Walk namespace for all objects of type Device */ status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, FALSE, acpi_ns_init_one_device, &info, NULL); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Walk_namespace failed! %x\n", status)); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "walk_namespace failed! %s\n", + acpi_format_exception (status))); } - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - "\n%d Devices found: %d _STA, %d _INI\n", + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, + "\n%hd Devices found containing: %hd _STA, %hd _INI methods\n", info.device_count, info.num_STA, info.num_INI)); return_ACPI_STATUS (status); @@ -143,16 +164,16 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_init_one_object + * FUNCTION: acpi_ns_init_one_object * - * PARAMETERS: Obj_handle - Node + * PARAMETERS: obj_handle - Node * Level - Current nesting level * Context - Points to a init info struct - * Return_value - Not used + * return_value - Not used * * RETURN: Status * - * DESCRIPTION: Callback from Acpi_walk_namespace. Invoked for every object + * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every object * within the namespace. * * Currently, the only objects that require initialization are: @@ -163,37 +184,62 @@ acpi_status acpi_ns_init_one_object ( - acpi_handle obj_handle, - u32 level, - void *context, - void **return_value) + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value) { - acpi_object_type8 type; - acpi_status status; - acpi_init_walk_info *info = (acpi_init_walk_info *) context; - acpi_namespace_node *node = (acpi_namespace_node *) obj_handle; - acpi_operand_object *obj_desc; + acpi_object_type type; + acpi_status status; + struct acpi_init_walk_info *info = (struct acpi_init_walk_info *) context; + struct acpi_namespace_node *node = (struct acpi_namespace_node *) obj_handle; + union acpi_operand_object *obj_desc; - PROC_NAME ("Ns_init_one_object"); + ACPI_FUNCTION_NAME ("ns_init_one_object"); info->object_count++; - /* And even then, we are only interested in a few object types */ type = acpi_ns_get_type (obj_handle); - obj_desc = node->object; + obj_desc = acpi_ns_get_attached_object (node); if (!obj_desc) { return (AE_OK); } - if ((type != ACPI_TYPE_REGION) && - (type != ACPI_TYPE_BUFFER_FIELD)) { + /* Increment counters for object types we are looking for */ + + switch (type) { + case ACPI_TYPE_REGION: + info->op_region_count++; + break; + + case ACPI_TYPE_BUFFER_FIELD: + info->field_count++; + break; + + case ACPI_TYPE_BUFFER: + info->buffer_count++; + break; + + case ACPI_TYPE_PACKAGE: + info->package_count++; + break; + + default: + + /* No init required, just exit now */ return (AE_OK); } + /* + * If the object is already initialized, nothing else to do + */ + if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { + return (AE_OK); + } /* * Must lock the interpreter before executing AML code @@ -203,61 +249,59 @@ return (status); } + /* + * Each of these types can contain executable AML code within + * the declaration. + */ switch (type) { - case ACPI_TYPE_REGION: - info->op_region_count++; - if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { - break; - } - info->op_region_init++; status = acpi_ds_get_region_arguments (obj_desc); - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_ERROR, "\n")); - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "%s while getting region arguments [%4.4s]\n", - acpi_format_exception (status), (char*)&node->name)); - } - - if (!(acpi_dbg_level & ACPI_LV_INIT)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, ".")); - } - break; case ACPI_TYPE_BUFFER_FIELD: - info->field_count++; - if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { - break; - } - info->field_init++; status = acpi_ds_get_buffer_field_arguments (obj_desc); - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_ERROR, "\n")); - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "%s while getting buffer field arguments [%4.4s]\n", - acpi_format_exception (status), (char*)&node->name)); - } - if (!(acpi_dbg_level & ACPI_LV_INIT)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, ".")); - } + break; + + case ACPI_TYPE_BUFFER: + info->buffer_init++; + status = acpi_ds_get_buffer_arguments (obj_desc); + break; + + + case ACPI_TYPE_PACKAGE: + + info->package_init++; + status = acpi_ds_get_package_arguments (obj_desc); break; default: + /* No other types can get here */ break; } + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_ERROR, "\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not execute arguments for [%4.4s] (%s), %s\n", + node->name.ascii, acpi_ut_get_type_name (type), acpi_format_exception (status))); + } + + /* Print a dot for each object unless we are going to print the entire pathname */ + + if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, ".")); + } /* * We ignore errors from above, and always return OK, since - * we don't want to abort the walk on a single error. + * we don't want to abort the walk on any single error. */ acpi_ex_exit_interpreter (); return (AE_OK); @@ -266,7 +310,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_init_one_device + * FUNCTION: acpi_ns_init_one_device * * PARAMETERS: acpi_walk_callback * @@ -280,40 +324,46 @@ acpi_status acpi_ns_init_one_device ( - acpi_handle obj_handle, - u32 nesting_level, - void *context, - void **return_value) + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value) { - acpi_status status; - acpi_namespace_node *node; - u32 flags; - acpi_device_walk_info *info = (acpi_device_walk_info *) context; + acpi_status status; + struct acpi_namespace_node *node; + u32 flags; + struct acpi_device_walk_info *info = (struct acpi_device_walk_info *) context; - FUNCTION_TRACE ("Ns_init_one_device"); + ACPI_FUNCTION_TRACE ("ns_init_one_device"); - if (!(acpi_dbg_level & ACPI_LV_INIT)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, ".")); + if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) && (!(acpi_dbg_level & ACPI_LV_INFO))) { + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, ".")); } info->device_count++; - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } node = acpi_ns_map_handle_to_node (obj_handle); if (!node) { - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); - return (AE_BAD_PARAMETER); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (AE_BAD_PARAMETER); } - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* * Run _STA to determine if we can run _INI on the device. */ - DEBUG_EXEC (acpi_ut_display_init_pathname (node, "_STA [Method]")); + ACPI_DEBUG_EXEC (acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, node, "_STA")); status = acpi_ut_execute_STA (node, &flags); if (ACPI_FAILURE (status)) { /* Ignore error and move on to next device */ @@ -329,36 +379,41 @@ return_ACPI_STATUS(AE_CTRL_DEPTH); } - /* * The device is present. Run _INI. */ - DEBUG_EXEC (acpi_ut_display_init_pathname (obj_handle, "_INI [Method]")); + ACPI_DEBUG_EXEC (acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, obj_handle, "_INI")); status = acpi_ns_evaluate_relative (obj_handle, "_INI", NULL, NULL); - if (AE_NOT_FOUND == status) { - /* No _INI means device requires no initialization */ + if (ACPI_FAILURE (status)) { + /* No _INI (AE_NOT_FOUND) means device requires no initialization */ - status = AE_OK; - } + if (status != AE_NOT_FOUND) { + /* Ignore error and move on to next device */ - else if (ACPI_FAILURE (status)) { - /* Ignore error and move on to next device */ + #ifdef ACPI_DEBUG_OUTPUT + char *scope_name = acpi_ns_get_external_pathname (obj_handle); -#ifdef ACPI_DEBUG - NATIVE_CHAR *scope_name = acpi_ns_get_table_pathname (obj_handle); + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "%s._INI failed: %s\n", + scope_name, acpi_format_exception (status))); - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "%s._INI failed: %s\n", - scope_name, acpi_format_exception (status))); + ACPI_MEM_FREE (scope_name); + #endif + } - ACPI_MEM_FREE (scope_name); -#endif + status = AE_OK; } - else { /* Count of successful INIs */ info->num_INI++; } - return_ACPI_STATUS (AE_OK); + if (acpi_gbl_init_handler) { + /* External initialization handler is present, call it */ + + status = acpi_gbl_init_handler (obj_handle, ACPI_INIT_DEVICE_INI); + } + + + return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsload.c linux.21rc5-ac2/drivers/acpi/namespace/nsload.c --- linux.21rc5/drivers/acpi/namespace/nsload.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsload.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,239 +1,91 @@ /****************************************************************************** * * Module Name: nsload - namespace loading/expanding/contracting procedures - * $Revision: 47 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "amlcode.h" -#include "acparser.h" -#include "acdispat.h" -#include "acdebug.h" +#include +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsload") + ACPI_MODULE_NAME ("nsload") -/******************************************************************************* - * - * FUNCTION: Acpi_load_namespace - * - * PARAMETERS: Display_aml_during_load - * - * RETURN: Status - * - * DESCRIPTION: Load the name space from what ever is pointed to by DSDT. - * (DSDT points to either the BIOS or a buffer.) - * - ******************************************************************************/ - -acpi_status -acpi_ns_load_namespace ( - void) -{ - acpi_status status; - - - FUNCTION_TRACE ("Acpi_load_name_space"); - - - /* There must be at least a DSDT installed */ - - if (acpi_gbl_DSDT == NULL) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "DSDT is not in memory\n")); - return_ACPI_STATUS (AE_NO_ACPI_TABLES); - } - - - /* - * Load the namespace. The DSDT is required, - * but the SSDT and PSDT tables are optional. - */ - status = acpi_ns_load_table_by_type (ACPI_TABLE_DSDT); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - /* Ignore exceptions from these */ - - acpi_ns_load_table_by_type (ACPI_TABLE_SSDT); - acpi_ns_load_table_by_type (ACPI_TABLE_PSDT); - - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - "ACPI Namespace successfully loaded at root %p\n", - acpi_gbl_root_node)); - - - return_ACPI_STATUS (status); -} - +#ifndef ACPI_NO_METHOD_EXECUTION /******************************************************************************* * - * FUNCTION: Acpi_ns_one_parse_pass + * FUNCTION: acpi_ns_load_table * - * PARAMETERS: + * PARAMETERS: table_desc - Descriptor for table to be loaded + * Node - Owning NS node * * RETURN: Status * - * DESCRIPTION: + * DESCRIPTION: Load one ACPI table into the namespace * ******************************************************************************/ acpi_status -acpi_ns_one_complete_parse ( - u32 pass_number, - acpi_table_desc *table_desc) +acpi_ns_load_table ( + struct acpi_table_desc *table_desc, + struct acpi_namespace_node *node) { - acpi_parse_object *parse_root; - acpi_status status; - acpi_walk_state *walk_state; + acpi_status status; - FUNCTION_TRACE ("Ns_one_complete_parse"); - - - /* Create and init a Root Node */ - - parse_root = acpi_ps_alloc_op (AML_SCOPE_OP); - if (!parse_root) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - ((acpi_parse2_object *) parse_root)->name = ACPI_ROOT_NAME; + ACPI_FUNCTION_TRACE ("ns_load_table"); - /* Create and initialize a new walk state */ + /* Check if table contains valid AML (must be DSDT, PSDT, SSDT, etc.) */ - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, - NULL, NULL, NULL); - if (!walk_state) { - acpi_ps_free_op (parse_root); - return_ACPI_STATUS (AE_NO_MEMORY); - } + if (!(acpi_gbl_table_data[table_desc->type].flags & ACPI_TABLE_EXECUTABLE)) { + /* Just ignore this table */ - status = acpi_ds_init_aml_walk (walk_state, parse_root, NULL, table_desc->aml_start, - table_desc->aml_length, NULL, NULL, pass_number); - if (ACPI_FAILURE (status)) { - acpi_ds_delete_walk_state (walk_state); - return_ACPI_STATUS (status); + return_ACPI_STATUS (AE_OK); } - /* Parse the AML */ - - ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "*PARSE* pass %d parse\n", pass_number)); - status = acpi_ps_parse_aml (walk_state); - - acpi_ps_delete_parse_tree (parse_root); - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ns_parse_table - * - * PARAMETERS: Table_desc - An ACPI table descriptor for table to parse - * Start_node - Where to enter the table into the namespace - * - * RETURN: Status - * - * DESCRIPTION: Parse AML within an ACPI table and return a tree of ops - * - ******************************************************************************/ - -acpi_status -acpi_ns_parse_table ( - acpi_table_desc *table_desc, - acpi_namespace_node *start_node) -{ - acpi_status status; - - - FUNCTION_TRACE ("Ns_parse_table"); - - - /* - * AML Parse, pass 1 - * - * In this pass, we load most of the namespace. Control methods - * are not parsed until later. A parse tree is not created. Instead, - * each Parser Op subtree is deleted when it is finished. This saves - * a great deal of memory, and allows a small cache of parse objects - * to service the entire parse. The second pass of the parse then - * performs another complete parse of the AML.. - */ - status = acpi_ns_one_complete_parse (1, table_desc); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - - /* - * AML Parse, pass 2 - * - * In this pass, we resolve forward references and other things - * that could not be completed during the first pass. - * Another complete parse of the AML is performed, but the - * overhead of this is compensated for by the fact that the - * parse objects are all cached. - */ - status = acpi_ns_one_complete_parse (2, table_desc); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ns_load_table - * - * PARAMETERS: Table_desc - Descriptor for table to be loaded - * Node - Owning NS node - * - * RETURN: Status - * - * DESCRIPTION: Load one ACPI table into the namespace - * - ******************************************************************************/ - -acpi_status -acpi_ns_load_table ( - acpi_table_desc *table_desc, - acpi_namespace_node *node) -{ - acpi_status status; - - - FUNCTION_TRACE ("Ns_load_table"); - + /* Check validity of the AML start and length */ if (!table_desc->aml_start) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Null AML pointer\n")); @@ -242,13 +94,13 @@ ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "AML block at %p\n", table_desc->aml_start)); + /* Ignore table if there is no AML contained within */ if (!table_desc->aml_length) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Zero-length AML block\n")); - return_ACPI_STATUS (AE_BAD_PARAMETER); + ACPI_REPORT_WARNING (("Zero-length AML block in table [%4.4s]\n", table_desc->pointer->signature)); + return_ACPI_STATUS (AE_OK); } - /* * Parse the table and load the namespace with all named * objects found within. Control methods are NOT parsed @@ -260,9 +112,13 @@ */ ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "**** Loading table into namespace ****\n")); - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + status = acpi_ns_parse_table (table_desc, node->child); - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); @@ -288,9 +144,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_load_table_by_type + * FUNCTION: acpi_ns_load_table_by_type * - * PARAMETERS: Table_type - Id of the table type to load + * PARAMETERS: table_type - Id of the table type to load * * RETURN: Status * @@ -302,30 +158,31 @@ acpi_status acpi_ns_load_table_by_type ( - acpi_table_type table_type) + acpi_table_type table_type) { - u32 i; - acpi_status status = AE_OK; - acpi_table_desc *table_desc; - + u32 i; + acpi_status status; + struct acpi_table_desc *table_desc; - FUNCTION_TRACE ("Ns_load_table_by_type"); + ACPI_FUNCTION_TRACE ("ns_load_table_by_type"); - acpi_ut_acquire_mutex (ACPI_MTX_TABLES); + status = acpi_ut_acquire_mutex (ACPI_MTX_TABLES); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* * Table types supported are: * DSDT (one), SSDT/PSDT (multiple) */ switch (table_type) { - case ACPI_TABLE_DSDT: ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Loading DSDT\n")); - table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_DSDT]; + table_desc = acpi_gbl_table_lists[ACPI_TABLE_DSDT].next; /* If table already loaded into namespace, just return */ @@ -333,8 +190,6 @@ goto unlock_and_exit; } - table_desc->table_id = TABLE_ID_DSDT; - /* Now load the single DSDT */ status = acpi_ns_load_table (table_desc, acpi_gbl_root_node); @@ -348,13 +203,13 @@ case ACPI_TABLE_SSDT: ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Loading %d SSDTs\n", - acpi_gbl_acpi_tables[ACPI_TABLE_SSDT].count)); + acpi_gbl_table_lists[ACPI_TABLE_SSDT].count)); /* * Traverse list of SSDT tables */ - table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_SSDT]; - for (i = 0; i < acpi_gbl_acpi_tables[ACPI_TABLE_SSDT].count; i++) { + table_desc = acpi_gbl_table_lists[ACPI_TABLE_SSDT].next; + for (i = 0; i < acpi_gbl_table_lists[ACPI_TABLE_SSDT].count; i++) { /* * Only attempt to load table if it is not * already loaded! @@ -376,14 +231,14 @@ case ACPI_TABLE_PSDT: ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Loading %d PSDTs\n", - acpi_gbl_acpi_tables[ACPI_TABLE_PSDT].count)); + acpi_gbl_table_lists[ACPI_TABLE_PSDT].count)); /* * Traverse list of PSDT tables */ - table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_PSDT]; + table_desc = acpi_gbl_table_lists[ACPI_TABLE_PSDT].next; - for (i = 0; i < acpi_gbl_acpi_tables[ACPI_TABLE_PSDT].count; i++) { + for (i = 0; i < acpi_gbl_table_lists[ACPI_TABLE_PSDT].count; i++) { /* Only attempt to load table if it is not already loaded! */ if (!table_desc->loaded_into_namespace) { @@ -408,43 +263,92 @@ unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_TABLES); + return_ACPI_STATUS (status); +} - acpi_ut_release_mutex (ACPI_MTX_TABLES); - return_ACPI_STATUS (status); +/******************************************************************************* + * + * FUNCTION: acpi_load_namespace + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Load the name space from what ever is pointed to by DSDT. + * (DSDT points to either the BIOS or a buffer.) + * + ******************************************************************************/ + +acpi_status +acpi_ns_load_namespace ( + void) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE ("acpi_load_name_space"); + + + /* There must be at least a DSDT installed */ + + if (acpi_gbl_DSDT == NULL) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "DSDT is not in memory\n")); + return_ACPI_STATUS (AE_NO_ACPI_TABLES); + } + + /* + * Load the namespace. The DSDT is required, + * but the SSDT and PSDT tables are optional. + */ + status = acpi_ns_load_table_by_type (ACPI_TABLE_DSDT); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Ignore exceptions from these */ + + (void) acpi_ns_load_table_by_type (ACPI_TABLE_SSDT); + (void) acpi_ns_load_table_by_type (ACPI_TABLE_PSDT); + + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, + "ACPI Namespace successfully loaded at root %p\n", + acpi_gbl_root_node)); + + return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ns_delete_subtree + * FUNCTION: acpi_ns_delete_subtree * - * PARAMETERS: Start_handle - Handle in namespace where search begins + * PARAMETERS: start_handle - Handle in namespace where search begins * * RETURNS Status * * DESCRIPTION: Walks the namespace starting at the given handle and deletes * all objects, entries, and scopes in the entire subtree. * - * TBD: [Investigate] What if any part of this subtree is in use? - * (i.e. on one of the object stacks?) + * Namespace/Interpreter should be locked or the subsystem should + * be in shutdown before this routine is called. * ******************************************************************************/ acpi_status acpi_ns_delete_subtree ( - acpi_handle start_handle) + acpi_handle start_handle) { - acpi_status status; - acpi_handle child_handle; - acpi_handle parent_handle; - acpi_handle next_child_handle; - acpi_handle dummy; - u32 level; + acpi_status status; + acpi_handle child_handle; + acpi_handle parent_handle; + acpi_handle next_child_handle; + acpi_handle dummy; + u32 level; - FUNCTION_TRACE ("Ns_delete_subtree"); + ACPI_FUNCTION_TRACE ("ns_delete_subtree"); parent_handle = start_handle; @@ -463,7 +367,6 @@ child_handle = next_child_handle; - /* Did we get a new object? */ if (ACPI_SUCCESS (status)) { @@ -480,7 +383,6 @@ child_handle = 0; } } - else { /* * No more children in this object, go back up to @@ -493,7 +395,10 @@ acpi_ns_delete_children (child_handle); child_handle = parent_handle; - acpi_get_parent (parent_handle, &parent_handle); + status = acpi_get_parent (parent_handle, &parent_handle); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } } } @@ -507,7 +412,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_unload_name_space + * FUNCTION: acpi_ns_unload_name_space * * PARAMETERS: Handle - Root of namespace subtree to be deleted * @@ -521,12 +426,12 @@ acpi_status acpi_ns_unload_namespace ( - acpi_handle handle) + acpi_handle handle) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Ns_unload_name_space"); + ACPI_FUNCTION_TRACE ("ns_unload_name_space"); /* Parameter validation */ @@ -539,7 +444,6 @@ return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* This function does the real work */ status = acpi_ns_delete_subtree (handle); @@ -547,4 +451,5 @@ return_ACPI_STATUS (status); } +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsnames.c linux.21rc5-ac2/drivers/acpi/namespace/nsnames.c --- linux.21rc5/drivers/acpi/namespace/nsnames.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsnames.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,126 +1,175 @@ /******************************************************************************* * * Module Name: nsnames - Name manipulation and search - * $Revision: 64 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acinterp.h" -#include "acnamesp.h" +#include +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsnames") + ACPI_MODULE_NAME ("nsnames") /******************************************************************************* * - * FUNCTION: Acpi_ns_get_table_pathname + * FUNCTION: acpi_ns_build_external_path * - * PARAMETERS: Node - Scope whose name is needed + * PARAMETERS: Node - NS node whose pathname is needed + * Size - Size of the pathname + * *name_buffer - Where to return the pathname * - * RETURN: Pointer to storage containing the fully qualified name of - * the scope, in Label format (all segments strung together - * with no separators) + * RETURN: Places the pathname into the name_buffer, in external format + * (name segments separated by path separators) * - * DESCRIPTION: Used for debug printing in Acpi_ns_search_table(). + * DESCRIPTION: Generate a full pathaname * ******************************************************************************/ -NATIVE_CHAR * -acpi_ns_get_table_pathname ( - acpi_namespace_node *node) +void +acpi_ns_build_external_path ( + struct acpi_namespace_node *node, + acpi_size size, + char *name_buffer) { - NATIVE_CHAR *name_buffer; - u32 size; - acpi_name name; - acpi_namespace_node *child_node; - acpi_namespace_node *parent_node; + acpi_size index; + struct acpi_namespace_node *parent_node; - FUNCTION_TRACE_PTR ("Ns_get_table_pathname", node); + ACPI_FUNCTION_NAME ("ns_build_external_path"); - if (!acpi_gbl_root_node || !node) { - /* - * If the name space has not been initialized, - * this function should not have been called. - */ - return_PTR (NULL); + /* Special case for root */ + + index = size - 1; + if (index < ACPI_NAME_SIZE) { + name_buffer[0] = AML_ROOT_PREFIX; + name_buffer[1] = 0; + return; } - child_node = node->child; + /* Store terminator byte, then build name backwards */ + parent_node = node; + name_buffer[index] = 0; - /* Calculate required buffer size based on depth below root */ + while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) { + index -= ACPI_NAME_SIZE; + + /* Put the name into the buffer */ - size = 1; - parent_node = child_node; - while (parent_node) { - parent_node = acpi_ns_get_parent_object (parent_node); - if (parent_node) { - size += ACPI_NAME_SIZE; - } + ACPI_MOVE_32_TO_32 ((name_buffer + index), &parent_node->name); + parent_node = acpi_ns_get_parent_node (parent_node); + + /* Prefix name with the path separator */ + + index--; + name_buffer[index] = ACPI_PATH_SEPARATOR; } + /* Overwrite final separator with the root prefix character */ - /* Allocate a buffer to be returned to caller */ + name_buffer[index] = AML_ROOT_PREFIX; - name_buffer = ACPI_MEM_CALLOCATE (size + 1); - if (!name_buffer) { - REPORT_ERROR (("Ns_get_table_pathname: allocation failure\n")); - return_PTR (NULL); + if (index != 0) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not construct pathname; index=%X, size=%X, Path=%s\n", + (u32) index, (u32) size, &name_buffer[size])); } + return; +} - /* Store terminator byte, then build name backwards */ - name_buffer[size] = '\0'; - while ((size > ACPI_NAME_SIZE) && - acpi_ns_get_parent_object (child_node)) { - size -= ACPI_NAME_SIZE; - name = acpi_ns_find_parent_name (child_node); +#ifdef ACPI_DEBUG_OUTPUT +/******************************************************************************* + * + * FUNCTION: acpi_ns_get_external_pathname + * + * PARAMETERS: Node - NS node whose pathname is needed + * + * RETURN: Pointer to storage containing the fully qualified name of + * the node, In external format (name segments separated by path + * separators.) + * + * DESCRIPTION: Used for debug printing in acpi_ns_search_table(). + * + ******************************************************************************/ - /* Put the name into the buffer */ +char * +acpi_ns_get_external_pathname ( + struct acpi_namespace_node *node) +{ + char *name_buffer; + acpi_size size; + + + ACPI_FUNCTION_TRACE_PTR ("ns_get_external_pathname", node); - MOVE_UNALIGNED32_TO_32 ((name_buffer + size), &name); - child_node = acpi_ns_get_parent_object (child_node); - } - name_buffer[--size] = AML_ROOT_PREFIX; + /* Calculate required buffer size based on depth below root */ + + size = acpi_ns_get_pathname_length (node); + + /* Allocate a buffer to be returned to caller */ - if (size != 0) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Bad pointer returned; size=%X\n", size)); + name_buffer = ACPI_MEM_CALLOCATE (size); + if (!name_buffer) { + ACPI_REPORT_ERROR (("ns_get_table_pathname: allocation failure\n")); + return_PTR (NULL); } + /* Build the path in the allocated buffer */ + + acpi_ns_build_external_path (node, size, name_buffer); return_PTR (name_buffer); } +#endif /******************************************************************************* * - * FUNCTION: Acpi_ns_get_pathname_length + * FUNCTION: acpi_ns_get_pathname_length * * PARAMETERS: Node - Namespace node * @@ -130,31 +179,27 @@ * ******************************************************************************/ -u32 +acpi_size acpi_ns_get_pathname_length ( - acpi_namespace_node *node) + struct acpi_namespace_node *node) { - u32 size; - acpi_namespace_node *next_node; + acpi_size size; + struct acpi_namespace_node *next_node; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* * Compute length of pathname as 5 * number of name segments. * Go back up the parent tree to the root */ - for (size = 0, next_node = node; - acpi_ns_get_parent_object (next_node); - next_node = acpi_ns_get_parent_object (next_node)) { - size += PATH_SEGMENT_LENGTH; - } - - /* Special case for size still 0 - no parent for "special" nodes */ + size = 0; + next_node = node; - if (!size) { - size = PATH_SEGMENT_LENGTH; + while (next_node && (next_node != acpi_gbl_root_node)) { + size += ACPI_PATH_SEGMENT_LENGTH; + next_node = acpi_ns_get_parent_node (next_node); } return (size + 1); @@ -163,100 +208,53 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_handle_to_pathname + * FUNCTION: acpi_ns_handle_to_pathname * - * PARAMETERS: Target_handle - Handle of named object whose name is + * PARAMETERS: target_handle - Handle of named object whose name is * to be found - * Buf_size - Size of the buffer provided - * User_buffer - Where the pathname is returned + * Buffer - Where the pathname is returned * * RETURN: Status, Buffer is filled with pathname if status is AE_OK * * DESCRIPTION: Build and return a full namespace pathname * - * MUTEX: Locks Namespace - * ******************************************************************************/ acpi_status acpi_ns_handle_to_pathname ( - acpi_handle target_handle, - u32 *buf_size, - NATIVE_CHAR *user_buffer) + acpi_handle target_handle, + struct acpi_buffer *buffer) { - acpi_status status = AE_OK; - acpi_namespace_node *node; - u32 path_length; - u32 user_buf_size; - acpi_name name; - u32 size; - - - FUNCTION_TRACE_PTR ("Ns_handle_to_pathname", target_handle); - - - if (!acpi_gbl_root_node) { - /* - * If the name space has not been initialized, - * this function should not have been called. - */ - return_ACPI_STATUS (AE_NO_NAMESPACE); - } + acpi_status status; + struct acpi_namespace_node *node; + acpi_size required_size; - node = acpi_ns_map_handle_to_node (target_handle); - if (!node) { - return_ACPI_STATUS (AE_BAD_PARAMETER); - } - - - /* Set return length to the required path length */ - - path_length = acpi_ns_get_pathname_length (node); - size = path_length - 1; - user_buf_size = *buf_size; - *buf_size = path_length; + ACPI_FUNCTION_TRACE_PTR ("ns_handle_to_pathname", target_handle); - /* Check if the user buffer is sufficiently large */ - if (path_length > user_buf_size) { - status = AE_BUFFER_OVERFLOW; - goto exit; + node = acpi_ns_map_handle_to_node (target_handle); + if (!node) { + return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* Store null terminator */ + /* Determine size required for the caller buffer */ - user_buffer[size] = 0; - size -= ACPI_NAME_SIZE; + required_size = acpi_ns_get_pathname_length (node); - /* Put the original ACPI name at the end of the path */ + /* Validate/Allocate/Clear caller buffer */ - MOVE_UNALIGNED32_TO_32 ((user_buffer + size), - &node->name); - - user_buffer[--size] = PATH_SEPARATOR; - - /* Build name backwards, putting "." between segments */ - - while ((size > ACPI_NAME_SIZE) && node) { - size -= ACPI_NAME_SIZE; - name = acpi_ns_find_parent_name (node); - MOVE_UNALIGNED32_TO_32 ((user_buffer + size), &name); - - user_buffer[--size] = PATH_SEPARATOR; - node = acpi_ns_get_parent_object (node); + status = acpi_ut_initialize_buffer (buffer, required_size); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - /* - * Overlay the "." preceding the first segment with - * the root name "\" - */ - user_buffer[size] = '\\'; + /* Build the path in the caller buffer */ - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Len=%X, %s \n", path_length, user_buffer)); + acpi_ns_build_external_path (node, required_size, buffer->pointer); -exit: - return_ACPI_STATUS (status); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%s [%X] \n", (char *) buffer->pointer, (u32) required_size)); + return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsobject.c linux.21rc5-ac2/drivers/acpi/namespace/nsobject.c --- linux.21rc5/drivers/acpi/namespace/nsobject.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsobject.c 2003-04-28 17:59:26.000000000 +0100 @@ -2,43 +2,58 @@ * * Module Name: nsobject - Utilities for objects attached to namespace * table entries - * $Revision: 67 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "actables.h" +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsobject") + ACPI_MODULE_NAME ("nsobject") /******************************************************************************* * - * FUNCTION: Acpi_ns_attach_object + * FUNCTION: acpi_ns_attach_object * * PARAMETERS: Node - Parent Node * Object - Object to be attached @@ -48,6 +63,8 @@ * DESCRIPTION: Record the given object as the value associated with the * name whose acpi_handle is passed. If Object is NULL * and Type is ACPI_TYPE_ANY, set the name as having no value. + * Note: Future may require that the Node->Flags field be passed + * as a parameter. * * MUTEX: Assumes namespace is locked * @@ -55,206 +72,169 @@ acpi_status acpi_ns_attach_object ( - acpi_namespace_node *node, - acpi_operand_object *object, - acpi_object_type8 type) + struct acpi_namespace_node *node, + union acpi_operand_object *object, + acpi_object_type type) { - acpi_operand_object *obj_desc; - acpi_operand_object *previous_obj_desc; - acpi_object_type8 obj_type = ACPI_TYPE_ANY; - u8 flags; + union acpi_operand_object *obj_desc; + union acpi_operand_object *last_obj_desc; + acpi_object_type object_type = ACPI_TYPE_ANY; - FUNCTION_TRACE ("Ns_attach_object"); + ACPI_FUNCTION_TRACE ("ns_attach_object"); /* * Parameter validation */ - if (!acpi_gbl_root_node) { - /* Name space not initialized */ - - REPORT_ERROR (("Ns_attach_object: Namespace not initialized\n")); - return_ACPI_STATUS (AE_NO_NAMESPACE); - } - if (!node) { /* Invalid handle */ - REPORT_ERROR (("Ns_attach_object: Null Named_obj handle\n")); + ACPI_REPORT_ERROR (("ns_attach_object: Null named_obj handle\n")); return_ACPI_STATUS (AE_BAD_PARAMETER); } if (!object && (ACPI_TYPE_ANY != type)) { /* Null object */ - REPORT_ERROR (("Ns_attach_object: Null object, but type not ACPI_TYPE_ANY\n")); + ACPI_REPORT_ERROR (("ns_attach_object: Null object, but type not ACPI_TYPE_ANY\n")); return_ACPI_STATUS (AE_BAD_PARAMETER); } - if (!VALID_DESCRIPTOR_TYPE (node, ACPI_DESC_TYPE_NAMED)) { + if (ACPI_GET_DESCRIPTOR_TYPE (node) != ACPI_DESC_TYPE_NAMED) { /* Not a name handle */ - REPORT_ERROR (("Ns_attach_object: Invalid handle\n")); + ACPI_REPORT_ERROR (("ns_attach_object: Invalid handle\n")); return_ACPI_STATUS (AE_BAD_PARAMETER); } /* Check if this object is already attached */ if (node->object == object) { - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj %p already installed in Name_obj %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj %p already installed in name_obj %p\n", object, node)); return_ACPI_STATUS (AE_OK); } - - /* Get the current flags field of the Node */ - - flags = node->flags; - flags &= ~ANOBJ_AML_ATTACHMENT; - - /* If null object, we will just install it */ if (!object) { - obj_desc = NULL; - obj_type = ACPI_TYPE_ANY; + obj_desc = NULL; + object_type = ACPI_TYPE_ANY; } /* * If the source object is a namespace Node with an attached object, * we will use that (attached) object */ - else if (VALID_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_NAMED) && - ((acpi_namespace_node *) object)->object) { + else if ((ACPI_GET_DESCRIPTOR_TYPE (object) == ACPI_DESC_TYPE_NAMED) && + ((struct acpi_namespace_node *) object)->object) { /* * Value passed is a name handle and that name has a * non-null value. Use that name's value and type. */ - obj_desc = ((acpi_namespace_node *) object)->object; - obj_type = ((acpi_namespace_node *) object)->type; - - /* - * Copy appropriate flags - */ - if (((acpi_namespace_node *) object)->flags & ANOBJ_AML_ATTACHMENT) { - flags |= ANOBJ_AML_ATTACHMENT; - } + obj_desc = ((struct acpi_namespace_node *) object)->object; + object_type = ((struct acpi_namespace_node *) object)->type; } - /* * Otherwise, we will use the parameter object, but we must type * it first */ else { - obj_desc = (acpi_operand_object *) object; + obj_desc = (union acpi_operand_object *) object; - /* If a valid type (non-ANY) was given, just use it */ - - if (ACPI_TYPE_ANY != type) { - obj_type = type; - } + /* Use the given type */ - else { - /* - * Cannot figure out the type -- set to Def_any which - * will print as an error in the name table dump - */ - if (acpi_dbg_level > 0) { - DUMP_PATHNAME (node, - "Ns_attach_object confused: setting bogus type for ", - ACPI_LV_INFO, _COMPONENT); - - if (VALID_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_NAMED)) { - DUMP_PATHNAME (object, "name ", ACPI_LV_INFO, _COMPONENT); - } - - else { - DUMP_PATHNAME (object, "object ", ACPI_LV_INFO, _COMPONENT); - DUMP_STACK_ENTRY (object); - } - } - - obj_type = INTERNAL_TYPE_DEF_ANY; - } + object_type = type; } - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Installing %p into Node %p [%4.4s]\n", - obj_desc, node, (char*)&node->name)); + obj_desc, node, node->name.ascii)); + /* Detach an existing attached object if present */ - /* - * Must increment the new value's reference count - * (if it is an internal object) - */ - acpi_ut_add_reference (obj_desc); - - /* Save the existing object (if any) for deletion later */ - - previous_obj_desc = node->object; - - /* Install the object and set the type, flags */ - - node->object = obj_desc; - node->type = (u8) obj_type; - node->flags |= flags; - + if (node->object) { + acpi_ns_detach_object (node); + } - /* - * Delete an existing attached object. - */ - if (previous_obj_desc) { - /* One for the attach to the Node */ + if (obj_desc) { + /* + * Must increment the new value's reference count + * (if it is an internal object) + */ + acpi_ut_add_reference (obj_desc); - acpi_ut_remove_reference (previous_obj_desc); + /* + * Handle objects with multiple descriptors - walk + * to the end of the descriptor list + */ + last_obj_desc = obj_desc; + while (last_obj_desc->common.next_object) { + last_obj_desc = last_obj_desc->common.next_object; + } - /* Now delete */ + /* Install the object at the front of the object list */ - acpi_ut_remove_reference (previous_obj_desc); + last_obj_desc->common.next_object = node->object; } + node->type = (u8) object_type; + node->object = obj_desc; + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ns_detach_object + * FUNCTION: acpi_ns_detach_object * - * PARAMETERS: Node - An object whose Value will be deleted + * PARAMETERS: Node - An node whose object will be detached * * RETURN: None. * - * DESCRIPTION: Delete the Value associated with a namespace object. If the - * Value is an allocated object, it is freed. Otherwise, the - * field is simply cleared. + * DESCRIPTION: Detach/delete an object associated with a namespace node. + * if the object is an allocated object, it is freed. + * Otherwise, the field is simply cleared. * ******************************************************************************/ void acpi_ns_detach_object ( - acpi_namespace_node *node) + struct acpi_namespace_node *node) { - acpi_operand_object *obj_desc; + union acpi_operand_object *obj_desc; - FUNCTION_TRACE ("Ns_detach_object"); + ACPI_FUNCTION_TRACE ("ns_detach_object"); obj_desc = node->object; - if (!obj_desc) { + + if (!obj_desc || + (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_LOCAL_DATA)) { return_VOID; } /* Clear the entry in all cases */ node->object = NULL; + if (ACPI_GET_DESCRIPTOR_TYPE (obj_desc) == ACPI_DESC_TYPE_OPERAND) { + node->object = obj_desc->common.next_object; + if (node->object && + (ACPI_GET_OBJECT_TYPE (node->object) != ACPI_TYPE_LOCAL_DATA)) { + node->object = node->object->common.next_object; + } + } + + /* Reset the node type to untyped */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Object=%p Value=%p Name %4.4s\n", - node, obj_desc, (char*)&node->name)); + node->type = ACPI_TYPE_ANY; + + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Node %p [%4.4s] Object %p\n", + node, node->name.ascii, obj_desc)); /* Remove one reference on the object (and all subobjects) */ @@ -265,30 +245,216 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_get_attached_object + * FUNCTION: acpi_ns_get_attached_object * * PARAMETERS: Node - Parent Node to be examined * * RETURN: Current value of the object field from the Node whose * handle is passed * + * DESCRIPTION: Obtain the object attached to a namespace node. + * ******************************************************************************/ -void * +union acpi_operand_object * acpi_ns_get_attached_object ( - acpi_namespace_node *node) + struct acpi_namespace_node *node) { - FUNCTION_TRACE_PTR ("Ns_get_attached_object", node); + ACPI_FUNCTION_TRACE_PTR ("ns_get_attached_object", node); if (!node) { - /* handle invalid */ - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Null Node ptr\n")); return_PTR (NULL); } + if (!node->object || + ((ACPI_GET_DESCRIPTOR_TYPE (node->object) != ACPI_DESC_TYPE_OPERAND) && + (ACPI_GET_DESCRIPTOR_TYPE (node->object) != ACPI_DESC_TYPE_NAMED)) || + (ACPI_GET_OBJECT_TYPE (node->object) == ACPI_TYPE_LOCAL_DATA)) { + return_PTR (NULL); + } + return_PTR (node->object); } +/******************************************************************************* + * + * FUNCTION: acpi_ns_get_secondary_object + * + * PARAMETERS: Node - Parent Node to be examined + * + * RETURN: Current value of the object field from the Node whose + * handle is passed. + * + * DESCRIPTION: Obtain a secondary object associated with a namespace node. + * + ******************************************************************************/ + +union acpi_operand_object * +acpi_ns_get_secondary_object ( + union acpi_operand_object *obj_desc) +{ + ACPI_FUNCTION_TRACE_PTR ("ns_get_secondary_object", obj_desc); + + + if ((!obj_desc) || + (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_LOCAL_DATA) || + (!obj_desc->common.next_object) || + (ACPI_GET_OBJECT_TYPE (obj_desc->common.next_object) == ACPI_TYPE_LOCAL_DATA)) { + return_PTR (NULL); + } + + return_PTR (obj_desc->common.next_object); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_attach_data + * + * PARAMETERS: Node - Namespace node + * Handler - Handler to be associated with the data + * Data - Data to be attached + * + * RETURN: Status + * + * DESCRIPTION: Low-level attach data. Create and attach a Data object. + * + ******************************************************************************/ + +acpi_status +acpi_ns_attach_data ( + struct acpi_namespace_node *node, + acpi_object_handler handler, + void *data) +{ + union acpi_operand_object *prev_obj_desc; + union acpi_operand_object *obj_desc; + union acpi_operand_object *data_desc; + + + /* We only allow one attachment per handler */ + + prev_obj_desc = NULL; + obj_desc = node->object; + while (obj_desc) { + if ((ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_LOCAL_DATA) && + (obj_desc->data.handler == handler)) { + return (AE_ALREADY_EXISTS); + } + + prev_obj_desc = obj_desc; + obj_desc = obj_desc->common.next_object; + } + + /* Create an internal object for the data */ + + data_desc = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_DATA); + if (!data_desc) { + return (AE_NO_MEMORY); + } + + data_desc->data.handler = handler; + data_desc->data.pointer = data; + + /* Install the data object */ + + if (prev_obj_desc) { + prev_obj_desc->common.next_object = data_desc; + } + else { + node->object = data_desc; + } + + return (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_detach_data + * + * PARAMETERS: Node - Namespace node + * Handler - Handler associated with the data + * + * RETURN: Status + * + * DESCRIPTION: Low-level detach data. Delete the data node, but the caller + * is responsible for the actual data. + * + ******************************************************************************/ + +acpi_status +acpi_ns_detach_data ( + struct acpi_namespace_node *node, + acpi_object_handler handler) +{ + union acpi_operand_object *obj_desc; + union acpi_operand_object *prev_obj_desc; + + + prev_obj_desc = NULL; + obj_desc = node->object; + while (obj_desc) { + if ((ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_LOCAL_DATA) && + (obj_desc->data.handler == handler)) { + if (prev_obj_desc) { + prev_obj_desc->common.next_object = obj_desc->common.next_object; + } + else { + node->object = obj_desc->common.next_object; + } + + acpi_ut_remove_reference (obj_desc); + return (AE_OK); + } + + prev_obj_desc = obj_desc; + obj_desc = obj_desc->common.next_object; + } + + return (AE_NOT_FOUND); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_get_attached_data + * + * PARAMETERS: Node - Namespace node + * Handler - Handler associated with the data + * Data - Where the data is returned + * + * RETURN: Status + * + * DESCRIPTION: Low level interface to obtain data previously associated with + * a namespace node. + * + ******************************************************************************/ + +acpi_status +acpi_ns_get_attached_data ( + struct acpi_namespace_node *node, + acpi_object_handler handler, + void **data) +{ + union acpi_operand_object *obj_desc; + + + obj_desc = node->object; + while (obj_desc) { + if ((ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_LOCAL_DATA) && + (obj_desc->data.handler == handler)) { + *data = obj_desc->data.pointer; + return (AE_OK); + } + + obj_desc = obj_desc->common.next_object; + } + + return (AE_NOT_FOUND); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsparse.c linux.21rc5-ac2/drivers/acpi/namespace/nsparse.c --- linux.21rc5/drivers/acpi/namespace/nsparse.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsparse.c 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,170 @@ +/****************************************************************************** + * + * Module Name: nsparse - namespace interface to AML parser + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#include +#include +#include +#include + + +#define _COMPONENT ACPI_NAMESPACE + ACPI_MODULE_NAME ("nsparse") + + +/******************************************************************************* + * + * FUNCTION: ns_one_complete_parse + * + * PARAMETERS: pass_number - 1 or 2 + * table_desc - The table to be parsed. + * + * RETURN: Status + * + * DESCRIPTION: Perform one complete parse of an ACPI/AML table. + * + ******************************************************************************/ + +acpi_status +acpi_ns_one_complete_parse ( + u32 pass_number, + struct acpi_table_desc *table_desc) +{ + union acpi_parse_object *parse_root; + acpi_status status; + struct acpi_walk_state *walk_state; + + + ACPI_FUNCTION_TRACE ("ns_one_complete_parse"); + + + /* Create and init a Root Node */ + + parse_root = acpi_ps_create_scope_op (); + if (!parse_root) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* Create and initialize a new walk state */ + + walk_state = acpi_ds_create_walk_state (table_desc->table_id, + NULL, NULL, NULL); + if (!walk_state) { + acpi_ps_free_op (parse_root); + return_ACPI_STATUS (AE_NO_MEMORY); + } + + status = acpi_ds_init_aml_walk (walk_state, parse_root, NULL, table_desc->aml_start, + table_desc->aml_length, NULL, NULL, pass_number); + if (ACPI_FAILURE (status)) { + acpi_ds_delete_walk_state (walk_state); + return_ACPI_STATUS (status); + } + + /* Parse the AML */ + + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "*PARSE* pass %d parse\n", pass_number)); + status = acpi_ps_parse_aml (walk_state); + + acpi_ps_delete_parse_tree (parse_root); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_parse_table + * + * PARAMETERS: table_desc - An ACPI table descriptor for table to parse + * start_node - Where to enter the table into the namespace + * + * RETURN: Status + * + * DESCRIPTION: Parse AML within an ACPI table and return a tree of ops + * + ******************************************************************************/ + +acpi_status +acpi_ns_parse_table ( + struct acpi_table_desc *table_desc, + struct acpi_namespace_node *start_node) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ns_parse_table"); + + + /* + * AML Parse, pass 1 + * + * In this pass, we load most of the namespace. Control methods + * are not parsed until later. A parse tree is not created. Instead, + * each Parser Op subtree is deleted when it is finished. This saves + * a great deal of memory, and allows a small cache of parse objects + * to service the entire parse. The second pass of the parse then + * performs another complete parse of the AML.. + */ + status = acpi_ns_one_complete_parse (1, table_desc); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* + * AML Parse, pass 2 + * + * In this pass, we resolve forward references and other things + * that could not be completed during the first pass. + * Another complete parse of the AML is performed, but the + * overhead of this is compensated for by the fact that the + * parse objects are all cached. + */ + status = acpi_ns_one_complete_parse (2, table_desc); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + return_ACPI_STATUS (status); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nssearch.c linux.21rc5-ac2/drivers/acpi/namespace/nssearch.c --- linux.21rc5/drivers/acpi/namespace/nssearch.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nssearch.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,52 +1,69 @@ /******************************************************************************* * * Module Name: nssearch - Namespace search - * $Revision: 75 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acinterp.h" -#include "acnamesp.h" +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nssearch") + ACPI_MODULE_NAME ("nssearch") /******************************************************************************* * - * FUNCTION: Acpi_ns_search_node + * FUNCTION: acpi_ns_search_node * - * PARAMETERS: *Target_name - Ascii ACPI name to search for - * *Node - Starting table where search will begin + * PARAMETERS: *target_name - Ascii ACPI name to search for + * *Node - Starting node where search will begin * Type - Object type to match - * **Return_node - Where the matched Named obj is returned + * **return_node - Where the matched Named obj is returned * * RETURN: Status * - * DESCRIPTION: Search a single namespace table. Performs a simple search, - * does not add entries or search parents. + * DESCRIPTION: Search a single level of the namespace. Performs a + * simple search of the specified level, and does not add + * entries or search parents. * * * Named object lists are built (and subsequently dumped) in the @@ -62,75 +79,51 @@ acpi_status acpi_ns_search_node ( - u32 target_name, - acpi_namespace_node *node, - acpi_object_type8 type, - acpi_namespace_node **return_node) + u32 target_name, + struct acpi_namespace_node *node, + acpi_object_type type, + struct acpi_namespace_node **return_node) { - acpi_namespace_node *next_node; + struct acpi_namespace_node *next_node; - FUNCTION_TRACE ("Ns_search_node"); + ACPI_FUNCTION_TRACE ("ns_search_node"); -#ifdef ACPI_DEBUG +#ifdef ACPI_DEBUG_OUTPUT if (ACPI_LV_NAMES & acpi_dbg_level) { - NATIVE_CHAR *scope_name; + char *scope_name; - scope_name = acpi_ns_get_table_pathname (node); + scope_name = acpi_ns_get_external_pathname (node); if (scope_name) { - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Searching %s [%p] For %4.4s (type %X)\n", - scope_name, node, (char*)&target_name, type)); + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Searching %s [%p] For %4.4s (%s)\n", + scope_name, node, (char *) &target_name, acpi_ut_get_type_name (type))); ACPI_MEM_FREE (scope_name); } } #endif - /* - * Search for name in this table, which is to say that we must search - * for the name among the children of this object + * Search for name at this namespace level, which is to say that we + * must search for the name among the children of this object */ next_node = node->child; while (next_node) { /* Check for match against the name */ - if (next_node->name == target_name) { - /* - * Found matching entry. Capture the type if appropriate, before - * returning the entry. - * - * The Def_field_defn and Bank_field_defn cases are actually looking up - * the Region in which the field will be defined - */ - if ((INTERNAL_TYPE_FIELD_DEFN == type) || - (INTERNAL_TYPE_BANK_FIELD_DEFN == type)) { - type = ACPI_TYPE_REGION; - } - + if (next_node->name.integer == target_name) { /* - * Scope, Def_any, and Index_field_defn are bogus "types" which do not - * actually have anything to do with the type of the name being - * looked up. For any other value of Type, if the type stored in - * the entry is Any (i.e. unknown), save the actual type. + * Found matching entry. */ - if (type != INTERNAL_TYPE_SCOPE && - type != INTERNAL_TYPE_DEF_ANY && - type != INTERNAL_TYPE_INDEX_FIELD_DEFN && - next_node->type == ACPI_TYPE_ANY) { - next_node->type = (u8) type; - } - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, - "Name %4.4s (actual type %X) found at %p\n", - (char*)&target_name, next_node->type, next_node)); + "Name %4.4s Type [%s] found at %p\n", + (char *) &target_name, acpi_ut_get_type_name (next_node->type), next_node)); *return_node = next_node; return_ACPI_STATUS (AE_OK); } - /* * The last entry in the list points back to the parent, * so a flag is used to indicate the end-of-list @@ -146,11 +139,10 @@ next_node = next_node->peer; } + /* Searched entire namespace level, not found */ - /* Searched entire table, not found */ - - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Name %4.4s (type %X) not found at %p\n", - (char*)&target_name, type, next_node)); + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Name %4.4s Type [%s] not found at %p\n", + (char *) &target_name, acpi_ut_get_type_name (type), next_node)); return_ACPI_STATUS (AE_NOT_FOUND); } @@ -158,18 +150,18 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_search_parent_tree + * FUNCTION: acpi_ns_search_parent_tree * - * PARAMETERS: *Target_name - Ascii ACPI name to search for - * *Node - Starting table where search will begin + * PARAMETERS: *target_name - Ascii ACPI name to search for + * *Node - Starting node where search will begin * Type - Object type to match - * **Return_node - Where the matched Named Obj is returned + * **return_node - Where the matched Named Obj is returned * * RETURN: Status * * DESCRIPTION: Called when a name has not been found in the current namespace - * table. Before adding it or giving up, ACPI scope rules require - * searching enclosing scopes in cases identified by Acpi_ns_local(). + * level. Before adding it or giving up, ACPI scope rules require + * searching enclosing scopes in cases identified by acpi_ns_local(). * * "A name is located by finding the matching name in the current * name space, and then in the parent name space. If the parent @@ -183,55 +175,53 @@ static acpi_status acpi_ns_search_parent_tree ( - u32 target_name, - acpi_namespace_node *node, - acpi_object_type8 type, - acpi_namespace_node **return_node) + u32 target_name, + struct acpi_namespace_node *node, + acpi_object_type type, + struct acpi_namespace_node **return_node) { - acpi_status status; - acpi_namespace_node *parent_node; + acpi_status status; + struct acpi_namespace_node *parent_node; - FUNCTION_TRACE ("Ns_search_parent_tree"); + ACPI_FUNCTION_TRACE ("ns_search_parent_tree"); - parent_node = acpi_ns_get_parent_object (node); + parent_node = acpi_ns_get_parent_node (node); /* - * If there is no parent (at the root) or type is "local", we won't be - * searching the parent tree. + * If there is no parent (i.e., we are at the root) or + * type is "local", we won't be searching the parent tree. */ - if ((acpi_ns_local (type)) || - (!parent_node)) { - if (!parent_node) { - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "[%4.4s] has no parent\n", - (char*)&target_name)); - } - - if (acpi_ns_local (type)) { - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "[%4.4s] type %X is local(no search)\n", - (char*)&target_name, type)); - } + if (!parent_node) { + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "[%4.4s] has no parent\n", + (char *) &target_name)); + return_ACPI_STATUS (AE_NOT_FOUND); + } + if (acpi_ns_local (type)) { + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, + "[%4.4s] type [%s] must be local to this scope (no parent search)\n", + (char *) &target_name, acpi_ut_get_type_name (type))); return_ACPI_STATUS (AE_NOT_FOUND); } - /* Search the parent tree */ - ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Searching parent for %4.4s\n", (char*)&target_name)); + ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Searching parent for %4.4s\n", (char *) &target_name)); /* * Search parents until found the target or we have backed up to * the root */ while (parent_node) { - /* Search parent scope */ - /* TBD: [Investigate] Why ACPI_TYPE_ANY? */ - + /* + * Search parent scope. Use TYPE_ANY because we don't care about the + * object type at this point, we only care about the existence of + * the actual name we are searching for. Typechecking comes later. + */ status = acpi_ns_search_node (target_name, parent_node, ACPI_TYPE_ANY, return_node); - if (ACPI_SUCCESS (status)) { return_ACPI_STATUS (status); } @@ -240,10 +230,9 @@ * Not found here, go up another level * (until we reach the root) */ - parent_node = acpi_ns_get_parent_object (parent_node); + parent_node = acpi_ns_get_parent_node (parent_node); } - /* Not found in parent tree */ return_ACPI_STATUS (AE_NOT_FOUND); @@ -252,71 +241,67 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_search_and_enter + * FUNCTION: acpi_ns_search_and_enter * - * PARAMETERS: Target_name - Ascii ACPI name to search for (4 chars) - * Walk_state - Current state of the walk - * *Node - Starting table where search will begin - * Interpreter_mode - Add names only in MODE_Load_pass_x. + * PARAMETERS: target_name - Ascii ACPI name to search for (4 chars) + * walk_state - Current state of the walk + * *Node - Starting node where search will begin + * interpreter_mode - Add names only in ACPI_MODE_LOAD_PASS_x. * Otherwise,search only. * Type - Object type to match * Flags - Flags describing the search restrictions - * **Return_node - Where the Node is returned + * **return_node - Where the Node is returned * * RETURN: Status * - * DESCRIPTION: Search for a name segment in a single name table, + * DESCRIPTION: Search for a name segment in a single namespace level, * optionally adding it if it is not found. If the passed * Type is not Any and the type previously stored in the * entry was Any (i.e. unknown), update the stored type. * - * In IMODE_EXECUTE, search only. + * In ACPI_IMODE_EXECUTE, search only. * In other modes, search and add if not found. * ******************************************************************************/ acpi_status acpi_ns_search_and_enter ( - u32 target_name, - acpi_walk_state *walk_state, - acpi_namespace_node *node, - operating_mode interpreter_mode, - acpi_object_type8 type, - u32 flags, - acpi_namespace_node **return_node) + u32 target_name, + struct acpi_walk_state *walk_state, + struct acpi_namespace_node *node, + acpi_interpreter_mode interpreter_mode, + acpi_object_type type, + u32 flags, + struct acpi_namespace_node **return_node) { - acpi_status status; - acpi_namespace_node *new_node; + acpi_status status; + struct acpi_namespace_node *new_node; - FUNCTION_TRACE ("Ns_search_and_enter"); + ACPI_FUNCTION_TRACE ("ns_search_and_enter"); /* Parameter validation */ if (!node || !target_name || !return_node) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Null param- Table %p Name %X Return %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Null param: Node %p Name %X return_node %p\n", node, target_name, return_node)); - REPORT_ERROR (("Ns_search_and_enter: bad (null) parameter\n")); + ACPI_REPORT_ERROR (("ns_search_and_enter: Null parameter\n")); return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* Name must consist of printable characters */ if (!acpi_ut_valid_acpi_name (target_name)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "*** Bad character in name: %08x *** \n", + ACPI_REPORT_ERROR (("ns_search_and_enter: Bad character in ACPI Name: %X\n", target_name)); - - REPORT_ERROR (("Ns_search_and_enter: Bad character in ACPI Name\n")); return_ACPI_STATUS (AE_BAD_CHARACTER); } + /* Try to find the name in the namespace level specified by the caller */ - /* Try to find the name in the table specified by the caller */ - - *return_node = ENTRY_NOT_FOUND; + *return_node = ACPI_ENTRY_NOT_FOUND; status = acpi_ns_search_node (target_name, node, type, return_node); if (status != AE_NOT_FOUND) { /* @@ -324,8 +309,8 @@ * return the error */ if ((status == AE_OK) && - (flags & NS_ERROR_IF_FOUND)) { - status = AE_EXIST; + (flags & ACPI_NS_ERROR_IF_FOUND)) { + status = AE_ALREADY_EXISTS; } /* @@ -335,9 +320,8 @@ return_ACPI_STATUS (status); } - /* - * Not found in the table. If we are NOT performing the + * The name was not found. If we are NOT performing the * first pass (name entry) of loading the namespace, search * the parent tree (all the way to the root if necessary.) * We don't want to perform the parent search when the @@ -345,10 +329,10 @@ * the search when namespace references are being resolved * (load pass 2) and during the execution phase. */ - if ((interpreter_mode != IMODE_LOAD_PASS1) && - (flags & NS_SEARCH_PARENT)) { + if ((interpreter_mode != ACPI_IMODE_LOAD_PASS1) && + (flags & ACPI_NS_SEARCH_PARENT)) { /* - * Not found in table - search parent tree according + * Not found at this level - search parent tree according * to ACPI specification */ status = acpi_ns_search_parent_tree (target_name, node, @@ -358,18 +342,16 @@ } } - /* * In execute mode, just search, never add names. Exit now. */ - if (interpreter_mode == IMODE_EXECUTE) { + if (interpreter_mode == ACPI_IMODE_EXECUTE) { ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "%4.4s Not found in %p [Not adding]\n", - (char*)&target_name, node)); + (char *) &target_name, node)); return_ACPI_STATUS (AE_NOT_FOUND); } - /* Create the new named object */ new_node = acpi_ns_create_node (target_name); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsutils.c linux.21rc5-ac2/drivers/acpi/namespace/nsutils.c --- linux.21rc5/drivers/acpi/namespace/nsutils.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsutils.c 2003-05-29 01:02:16.000000000 +0100 @@ -2,42 +2,194 @@ * * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing * parents and siblings and Scope manipulation - * $Revision: 92 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "amlcode.h" -#include "actables.h" +#include +#include +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsutils") + ACPI_MODULE_NAME ("nsutils") /******************************************************************************* * - * FUNCTION: Acpi_ns_valid_root_prefix + * FUNCTION: acpi_ns_report_error + * + * PARAMETERS: module_name - Caller's module name (for error output) + * line_number - Caller's line number (for error output) + * component_id - Caller's component ID (for error output) + * Message - Error message to use on failure + * + * RETURN: None + * + * DESCRIPTION: Print warning message with full pathname + * + ******************************************************************************/ + +void +acpi_ns_report_error ( + char *module_name, + u32 line_number, + u32 component_id, + char *internal_name, + acpi_status lookup_status) +{ + acpi_status status; + char *name = NULL; + + + acpi_os_printf ("%8s-%04d: *** Error: Looking up ", + module_name, line_number); + + if (lookup_status == AE_BAD_CHARACTER) { + /* There is a non-ascii character in the name */ + + acpi_os_printf ("[0x%4.4X] (NON-ASCII)\n", *(ACPI_CAST_PTR (u32, internal_name))); + } + else { + /* Convert path to external format */ + + status = acpi_ns_externalize_name (ACPI_UINT32_MAX, internal_name, NULL, &name); + + /* Print target name */ + + if (ACPI_SUCCESS (status)) { + acpi_os_printf ("[%s]", name); + } + else { + acpi_os_printf ("[COULD NOT EXTERNALIZE NAME]"); + } + + if (name) { + ACPI_MEM_FREE (name); + } + } + + acpi_os_printf (" in namespace, %s\n", + acpi_format_exception (lookup_status)); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_report_method_error + * + * PARAMETERS: module_name - Caller's module name (for error output) + * line_number - Caller's line number (for error output) + * component_id - Caller's component ID (for error output) + * Message - Error message to use on failure + * + * RETURN: None + * + * DESCRIPTION: Print warning message with full pathname + * + ******************************************************************************/ + +void +acpi_ns_report_method_error ( + char *module_name, + u32 line_number, + u32 component_id, + char *message, + struct acpi_namespace_node *prefix_node, + char *path, + acpi_status method_status) +{ + acpi_status status; + struct acpi_namespace_node *node = prefix_node; + + + if (path) { + status = acpi_ns_get_node_by_path (path, prefix_node, ACPI_NS_NO_UPSEARCH, &node); + if (ACPI_FAILURE (status)) { + acpi_os_printf ("report_method_error: Could not get node\n"); + return; + } + } + + acpi_os_printf ("%8s-%04d: *** Error: ", module_name, line_number); + acpi_ns_print_node_pathname (node, message); + acpi_os_printf (", %s\n", acpi_format_exception (method_status)); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_print_node_pathname + * + * PARAMETERS: Node - Object + * Msg - Prefix message + * + * DESCRIPTION: Print an object's full namespace pathname + * Manages allocation/freeing of a pathname buffer + * + ******************************************************************************/ + +void +acpi_ns_print_node_pathname ( + struct acpi_namespace_node *node, + char *msg) +{ + struct acpi_buffer buffer; + acpi_status status; + + + /* Convert handle to a full pathname and print it (with supplied message) */ + + buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; + + status = acpi_ns_handle_to_pathname (node, &buffer); + if (ACPI_SUCCESS (status)) { + acpi_os_printf ("%s [%s] (Node %p)", msg, (char *) buffer.pointer, node); + ACPI_MEM_FREE (buffer.pointer); + } +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_valid_root_prefix * * PARAMETERS: Prefix - Character to be checked * @@ -49,7 +201,7 @@ u8 acpi_ns_valid_root_prefix ( - NATIVE_CHAR prefix) + char prefix) { return ((u8) (prefix == '\\')); @@ -58,7 +210,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_valid_path_separator + * FUNCTION: acpi_ns_valid_path_separator * * PARAMETERS: Sep - Character to be checked * @@ -70,7 +222,7 @@ u8 acpi_ns_valid_path_separator ( - NATIVE_CHAR sep) + char sep) { return ((u8) (sep == '.')); @@ -79,7 +231,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_get_type + * FUNCTION: acpi_ns_get_type * * PARAMETERS: Handle - Parent Node to be examined * @@ -87,25 +239,25 @@ * ******************************************************************************/ -acpi_object_type8 +acpi_object_type acpi_ns_get_type ( - acpi_namespace_node *node) + struct acpi_namespace_node *node) { - FUNCTION_TRACE ("Ns_get_type"); + ACPI_FUNCTION_TRACE ("ns_get_type"); if (!node) { - REPORT_WARNING (("Ns_get_type: Null Node ptr")); + ACPI_REPORT_WARNING (("ns_get_type: Null Node ptr")); return_VALUE (ACPI_TYPE_ANY); } - return_VALUE (node->type); + return_VALUE ((acpi_object_type) node->type); } /******************************************************************************* * - * FUNCTION: Acpi_ns_local + * FUNCTION: acpi_ns_local * * PARAMETERS: Type - A namespace object type * @@ -116,25 +268,25 @@ u32 acpi_ns_local ( - acpi_object_type8 type) + acpi_object_type type) { - FUNCTION_TRACE ("Ns_local"); + ACPI_FUNCTION_TRACE ("ns_local"); if (!acpi_ut_valid_object_type (type)) { /* Type code out of range */ - REPORT_WARNING (("Ns_local: Invalid Object Type\n")); - return_VALUE (NSP_NORMAL); + ACPI_REPORT_WARNING (("ns_local: Invalid Object Type\n")); + return_VALUE (ACPI_NS_NORMAL); } - return_VALUE ((u32) acpi_gbl_ns_properties[type] & NSP_LOCAL); + return_VALUE ((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL); } /******************************************************************************* * - * FUNCTION: Acpi_ns_get_internal_name_length + * FUNCTION: acpi_ns_get_internal_name_length * * PARAMETERS: Info - Info struct initialized with the * external name pointer. @@ -146,15 +298,15 @@ * ******************************************************************************/ -acpi_status +void acpi_ns_get_internal_name_length ( - acpi_namestring_info *info) + struct acpi_namestring_info *info) { - NATIVE_CHAR *next_external_char; - u32 i; + char *next_external_char; + u32 i; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); next_external_char = info->external_name; @@ -164,18 +316,17 @@ /* * For the internal name, the required length is 4 bytes - * per segment, plus 1 each for Root_prefix, Multi_name_prefix_op, + * per segment, plus 1 each for root_prefix, multi_name_prefix_op, * segment count, trailing null (which is not really needed, * but no there's harm in putting it there) * - * strlen() + 1 covers the first Name_seg, which has no + * strlen() + 1 covers the first name_seg, which has no * path separator */ if (acpi_ns_valid_root_prefix (next_external_char[0])) { info->fully_qualified = TRUE; next_external_char++; } - else { /* * Handle Carat prefixes @@ -205,14 +356,12 @@ 4 + info->num_carats; info->next_external_char = next_external_char; - - return (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ns_build_internal_name + * FUNCTION: acpi_ns_build_internal_name * * PARAMETERS: Info - Info struct fully initialized * @@ -225,16 +374,16 @@ acpi_status acpi_ns_build_internal_name ( - acpi_namestring_info *info) + struct acpi_namestring_info *info) { - u32 num_segments = info->num_segments; - NATIVE_CHAR *internal_name = info->internal_name; - NATIVE_CHAR *external_name = info->next_external_char; - NATIVE_CHAR *result = NULL; - u32 i; + u32 num_segments = info->num_segments; + char *internal_name = info->internal_name; + char *external_name = info->next_external_char; + char *result = NULL; + acpi_native_uint i; - FUNCTION_TRACE ("Ns_build_internal_name"); + ACPI_FUNCTION_TRACE ("ns_build_internal_name"); /* Setup the correct prefixes, counts, and pointers */ @@ -255,7 +404,6 @@ result = &internal_name[3]; } } - else { /* * Not fully qualified. @@ -268,23 +416,20 @@ } } - if (num_segments == 1) { + if (num_segments <= 1) { result = &internal_name[i]; } - else if (num_segments == 2) { internal_name[i] = AML_DUAL_NAME_PREFIX; - result = &internal_name[i+1]; + result = &internal_name[(acpi_native_uint) (i+1)]; } - else { internal_name[i] = AML_MULTI_NAME_PREFIX_OP; - internal_name[i+1] = (char) num_segments; - result = &internal_name[i+2]; + internal_name[(acpi_native_uint) (i+1)] = (char) num_segments; + result = &internal_name[(acpi_native_uint) (i+2)]; } } - /* Build the name (minus path separators) */ for (; num_segments; num_segments--) { @@ -295,11 +440,10 @@ result[i] = '_'; } - else { /* Convert the character to uppercase and save it */ - result[i] = (char) TOUPPER (*external_name); + result[i] = (char) ACPI_TOUPPER ((int) *external_name); external_name++; } } @@ -317,18 +461,17 @@ result += ACPI_NAME_SIZE; } - /* Terminate the string */ *result = 0; if (info->fully_qualified) { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "returning [%p] (abs) \"\\%s\"\n", - internal_name, &internal_name[0])); + internal_name, internal_name)); } else { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "returning [%p] (rel) \"%s\"\n", - internal_name, &internal_name[2])); + internal_name, internal_name)); } return_ACPI_STATUS (AE_OK); @@ -337,9 +480,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_internalize_name + * FUNCTION: acpi_ns_internalize_name * - * PARAMETERS: *External_name - External representation of name + * PARAMETERS: *external_name - External representation of name * **Converted Name - Where to return the resulting * internal represention of the name * @@ -352,15 +495,15 @@ acpi_status acpi_ns_internalize_name ( - NATIVE_CHAR *external_name, - NATIVE_CHAR **converted_name) + char *external_name, + char **converted_name) { - NATIVE_CHAR *internal_name; - acpi_namestring_info info; - acpi_status status; + char *internal_name; + struct acpi_namestring_info info; + acpi_status status; - FUNCTION_TRACE ("Ns_internalize_name"); + ACPI_FUNCTION_TRACE ("ns_internalize_name"); if ((!external_name) || @@ -369,7 +512,6 @@ return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* Get the length of the new internal name */ info.external_name = external_name; @@ -398,10 +540,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_externalize_name + * FUNCTION: acpi_ns_externalize_name * - * PARAMETERS: *Internal_name - Internal representation of name - * **Converted_name - Where to return the resulting + * PARAMETERS: *internal_name - Internal representation of name + * **converted_name - Where to return the resulting * external representation of name * * RETURN: Status @@ -413,29 +555,28 @@ acpi_status acpi_ns_externalize_name ( - u32 internal_name_length, - char *internal_name, - u32 *converted_name_length, - char **converted_name) -{ - u32 prefix_length = 0; - u32 names_index = 0; - u32 names_count = 0; - u32 i = 0; - u32 j = 0; + u32 internal_name_length, + char *internal_name, + u32 *converted_name_length, + char **converted_name) +{ + acpi_native_uint names_index = 0; + acpi_native_uint num_segments = 0; + acpi_native_uint required_length; + acpi_native_uint prefix_length = 0; + acpi_native_uint i = 0; + acpi_native_uint j = 0; - FUNCTION_TRACE ("Ns_externalize_name"); + ACPI_FUNCTION_TRACE ("ns_externalize_name"); if (!internal_name_length || !internal_name || - !converted_name_length || !converted_name) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* * Check for a prefix (one '\' | one or more '^'). */ @@ -446,9 +587,12 @@ case '^': for (i = 0; i < internal_name_length; i++) { - if (internal_name[i] != '^') { + if (internal_name[i] == '^') { prefix_length = i + 1; } + else { + break; + } } if (i == internal_name_length) { @@ -456,6 +600,9 @@ } break; + + default: + break; } /* @@ -464,62 +611,61 @@ */ if (prefix_length < internal_name_length) { switch (internal_name[prefix_length]) { + case AML_MULTI_NAME_PREFIX_OP: - /* 4-byte names */ + /* 4-byte names */ - case AML_MULTI_NAME_PREFIX_OP: names_index = prefix_length + 2; - names_count = (u32) internal_name[prefix_length + 1]; + num_segments = (acpi_native_uint) (u8) internal_name[(acpi_native_uint) (prefix_length + 1)]; break; + case AML_DUAL_NAME_PREFIX: - /* two 4-byte names */ + /* Two 4-byte names */ - case AML_DUAL_NAME_PREFIX: names_index = prefix_length + 1; - names_count = 2; + num_segments = 2; break; + case 0: - /* Null_name */ + /* null_name */ - case 0: names_index = 0; - names_count = 0; + num_segments = 0; break; + default: - /* one 4-byte name */ + /* one 4-byte name */ - default: names_index = prefix_length; - names_count = 1; + num_segments = 1; break; } } /* - * Calculate the length of Converted_name, which equals the length + * Calculate the length of converted_name, which equals the length * of the prefix, length of all object names, length of any required * punctuation ('.') between object names, plus the NULL terminator. */ - *converted_name_length = prefix_length + (4 * names_count) + - ((names_count > 0) ? (names_count - 1) : 0) + 1; + required_length = prefix_length + (4 * num_segments) + + ((num_segments > 0) ? (num_segments - 1) : 0) + 1; /* * Check to see if we're still in bounds. If not, there's a problem - * with Internal_name (invalid format). + * with internal_name (invalid format). */ - if (*converted_name_length > internal_name_length) { - REPORT_ERROR (("Ns_externalize_name: Invalid internal name\n")); + if (required_length > internal_name_length) { + ACPI_REPORT_ERROR (("ns_externalize_name: Invalid internal name\n")); return_ACPI_STATUS (AE_BAD_PATHNAME); } /* - * Build Converted_name... + * Build converted_name... */ - - (*converted_name) = ACPI_MEM_CALLOCATE (*converted_name_length); + *converted_name = ACPI_MEM_CALLOCATE (required_length); if (!(*converted_name)) { return_ACPI_STATUS (AE_NO_MEMORY); } @@ -530,8 +676,8 @@ (*converted_name)[j++] = internal_name[i]; } - if (names_count > 0) { - for (i = 0; i < names_count; i++) { + if (num_segments > 0) { + for (i = 0; i < num_segments; i++) { if (i > 0) { (*converted_name)[j++] = '.'; } @@ -543,13 +689,17 @@ } } + if (converted_name_length) { + *converted_name_length = (u32) required_length; + } + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ns_map_handle_to_node + * FUNCTION: acpi_ns_map_handle_to_node * * PARAMETERS: Handle - Handle to be converted to an Node * @@ -557,20 +707,21 @@ * * DESCRIPTION: Convert a namespace handle to a real Node * + * Note: Real integer handles allow for more verification + * and keep all pointers within this subsystem. + * ******************************************************************************/ -acpi_namespace_node * +struct acpi_namespace_node * acpi_ns_map_handle_to_node ( - acpi_handle handle) + acpi_handle handle) { - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* - * Simple implementation for now; - * TBD: [Future] Real integer handles allow for more verification - * and keep all pointers within this subsystem! + * Simple implementation. */ if (!handle) { return (NULL); @@ -580,20 +731,19 @@ return (acpi_gbl_root_node); } - /* We can at least attempt to verify the handle */ - if (!VALID_DESCRIPTOR_TYPE (handle, ACPI_DESC_TYPE_NAMED)) { + if (ACPI_GET_DESCRIPTOR_TYPE (handle) != ACPI_DESC_TYPE_NAMED) { return (NULL); } - return ((acpi_namespace_node *) handle); + return ((struct acpi_namespace_node *) handle); } /******************************************************************************* * - * FUNCTION: Acpi_ns_convert_entry_to_handle + * FUNCTION: acpi_ns_convert_entry_to_handle * * PARAMETERS: Node - Node to be converted to a Handle * @@ -605,14 +755,12 @@ acpi_handle acpi_ns_convert_entry_to_handle ( - acpi_namespace_node *node) + struct acpi_namespace_node *node) { /* * Simple implementation for now; - * TBD: [Future] Real integer handles allow for more verification - * and keep all pointers within this subsystem! */ return ((acpi_handle) node); @@ -624,7 +772,7 @@ return (NULL); } - if (Node == Acpi_gbl_Root_node) + if (Node == acpi_gbl_root_node) { return (ACPI_ROOT_OBJECT); } @@ -637,7 +785,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_terminate + * FUNCTION: acpi_ns_terminate * * PARAMETERS: none * @@ -650,39 +798,32 @@ void acpi_ns_terminate (void) { - acpi_operand_object *obj_desc; - acpi_namespace_node *this_node; + union acpi_operand_object *obj_desc; - FUNCTION_TRACE ("Ns_terminate"); + ACPI_FUNCTION_TRACE ("ns_terminate"); - this_node = acpi_gbl_root_node; - /* - * 1) Free the entire namespace -- all objects, tables, and stacks + * 1) Free the entire namespace -- all nodes and objects * - * Delete all objects linked to the root - * (additional table descriptors) + * Delete all object descriptors attached to namepsace nodes */ - acpi_ns_delete_namespace_subtree (this_node); + acpi_ns_delete_namespace_subtree (acpi_gbl_root_node); - /* Detach any object(s) attached to the root */ + /* Detach any objects attached to the root */ - obj_desc = acpi_ns_get_attached_object (this_node); + obj_desc = acpi_ns_get_attached_object (acpi_gbl_root_node); if (obj_desc) { - acpi_ns_detach_object (this_node); - acpi_ut_remove_reference (obj_desc); + acpi_ns_detach_object (acpi_gbl_root_node); } - acpi_ns_delete_children (this_node); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Namespace freed\n")); - /* * 2) Now we can delete the ACPI tables */ - acpi_tb_delete_acpi_tables (); + acpi_tb_delete_all_tables (); ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "ACPI Tables freed\n")); return_VOID; @@ -691,7 +832,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_opens_scope + * FUNCTION: acpi_ns_opens_scope * * PARAMETERS: Type - A valid namespace type * @@ -702,34 +843,36 @@ u32 acpi_ns_opens_scope ( - acpi_object_type8 type) + acpi_object_type type) { - FUNCTION_TRACE_U32 ("Ns_opens_scope", type); + ACPI_FUNCTION_TRACE_STR ("ns_opens_scope", acpi_ut_get_type_name (type)); if (!acpi_ut_valid_object_type (type)) { /* type code out of range */ - REPORT_WARNING (("Ns_opens_scope: Invalid Object Type\n")); - return_VALUE (NSP_NORMAL); + ACPI_REPORT_WARNING (("ns_opens_scope: Invalid Object Type %X\n", type)); + return_VALUE (ACPI_NS_NORMAL); } - return_VALUE (((u32) acpi_gbl_ns_properties[type]) & NSP_NEWSCOPE); + return_VALUE (((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE); } /******************************************************************************* * - * FUNCTION: Acpi_ns_get_node + * FUNCTION: acpi_ns_get_node_by_path * * PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The * \ (backslash) and ^ (carat) prefixes, and the * . (period) to separate segments are supported. - * Start_node - Root of subtree to be searched, or NS_ALL for the + * start_node - Root of subtree to be searched, or NS_ALL for the * root of the name space. If Name is fully * qualified (first s8 is '\'), the passed value * of Scope will not be accessed. - * Return_node - Where the Node is returned + * Flags - Used to indicate whether to perform upsearch or + * not. + * return_node - Where the Node is returned * * DESCRIPTION: Look up a name relative to a given scope and return the * corresponding Node. NOTE: Scope can be null. @@ -739,40 +882,36 @@ ******************************************************************************/ acpi_status -acpi_ns_get_node ( - NATIVE_CHAR *pathname, - acpi_namespace_node *start_node, - acpi_namespace_node **return_node) +acpi_ns_get_node_by_path ( + char *pathname, + struct acpi_namespace_node *start_node, + u32 flags, + struct acpi_namespace_node **return_node) { - acpi_generic_state scope_info; - acpi_status status; - NATIVE_CHAR *internal_path = NULL; + union acpi_generic_state scope_info; + acpi_status status; + char *internal_path = NULL; - FUNCTION_TRACE_PTR ("Ns_get_node", pathname); + ACPI_FUNCTION_TRACE_PTR ("ns_get_node_by_path", pathname); - /* Ensure that the namespace has been initialized */ - - if (!acpi_gbl_root_node) { - return_ACPI_STATUS (AE_NO_NAMESPACE); - } + if (pathname) { + /* Convert path to internal representation */ - if (!pathname) { - return_ACPI_STATUS (AE_BAD_PARAMETER); + status = acpi_ns_internalize_name (pathname, &internal_path); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } } + /* Must lock namespace during lookup */ - /* Convert path to internal representation */ - - status = acpi_ns_internalize_name (pathname, &internal_path); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); - /* Setup lookup scope (search starting point) */ scope_info.scope.node = start_node; @@ -780,30 +919,30 @@ /* Lookup the name in the namespace */ status = acpi_ns_lookup (&scope_info, internal_path, - ACPI_TYPE_ANY, IMODE_EXECUTE, - NS_NO_UPSEARCH | NS_DONT_OPEN_SCOPE, + ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, + (flags | ACPI_NS_DONT_OPEN_SCOPE), NULL, return_node); - if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%s, %s\n", internal_path, acpi_format_exception (status))); } - - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); - /* Cleanup */ - ACPI_MEM_FREE (internal_path); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + + if (internal_path) { + ACPI_MEM_FREE (internal_path); + } return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ns_find_parent_name + * FUNCTION: acpi_ns_find_parent_name * - * PARAMETERS: *Child_node - Named Obj whose name is to be found + * PARAMETERS: *child_node - Named Obj whose name is to be found * * RETURN: The ACPI name * @@ -815,75 +954,39 @@ acpi_name acpi_ns_find_parent_name ( - acpi_namespace_node *child_node) + struct acpi_namespace_node *child_node) { - acpi_namespace_node *parent_node; + struct acpi_namespace_node *parent_node; - FUNCTION_TRACE ("Ns_find_parent_name"); + ACPI_FUNCTION_TRACE ("ns_find_parent_name"); if (child_node) { /* Valid entry. Get the parent Node */ - parent_node = acpi_ns_get_parent_object (child_node); + parent_node = acpi_ns_get_parent_node (child_node); if (parent_node) { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Parent of %p [%4.4s] is %p [%4.4s]\n", - child_node, (char*)&child_node->name, parent_node, (char*)&parent_node->name)); + child_node, child_node->name.ascii, + parent_node, parent_node->name.ascii)); - if (parent_node->name) { - return_VALUE (parent_node->name); + if (parent_node->name.integer) { + return_VALUE ((acpi_name) parent_node->name.integer); } } ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "unable to find parent of %p (%4.4s)\n", - child_node, (char*)&child_node->name)); + child_node, child_node->name.ascii)); } return_VALUE (ACPI_UNKNOWN_NAME); } -#if defined(ACPI_DEBUG) || defined(ENABLE_DEBUGGER) - /******************************************************************************* * - * FUNCTION: Acpi_ns_exist_downstream_sibling - * - * PARAMETERS: *Node - pointer to first Node to examine - * - * RETURN: TRUE if sibling is found, FALSE otherwise - * - * DESCRIPTION: Searches remainder of scope being processed to determine - * whether there is a downstream sibling to the current - * object. This function is used to determine what type of - * line drawing character to use when displaying namespace - * trees. - * - ******************************************************************************/ - -u8 -acpi_ns_exist_downstream_sibling ( - acpi_namespace_node *node) -{ - - if (!node) { - return (FALSE); - } - - if (node->name) { - return (TRUE); - } - - return (FALSE); -} - -#endif /* ACPI_DEBUG */ - - -/******************************************************************************* - * - * FUNCTION: Acpi_ns_get_parent_object + * FUNCTION: acpi_ns_get_parent_node * * PARAMETERS: Node - Current table entry * @@ -894,13 +997,11 @@ ******************************************************************************/ -acpi_namespace_node * -acpi_ns_get_parent_object ( - acpi_namespace_node *node) +struct acpi_namespace_node * +acpi_ns_get_parent_node ( + struct acpi_namespace_node *node) { - - - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); if (!node) { @@ -925,7 +1026,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_get_next_valid_node + * FUNCTION: acpi_ns_get_next_valid_node * * PARAMETERS: Node - Current table entry * @@ -938,9 +1039,9 @@ ******************************************************************************/ -acpi_namespace_node * +struct acpi_namespace_node * acpi_ns_get_next_valid_node ( - acpi_namespace_node *node) + struct acpi_namespace_node *node) { /* If we are at the end of this peer list, return NULL */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nswalk.c linux.21rc5-ac2/drivers/acpi/namespace/nswalk.c --- linux.21rc5/drivers/acpi/namespace/nswalk.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nswalk.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,49 +1,66 @@ /****************************************************************************** * * Module Name: nswalk - Functions for walking the ACPI namespace - * $Revision: 26 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nswalk") + ACPI_MODULE_NAME ("nswalk") /******************************************************************************* * - * FUNCTION: Acpi_ns_get_next_node + * FUNCTION: acpi_ns_get_next_node * * PARAMETERS: Type - Type of node to be searched for - * Parent_node - Parent node whose children we are + * parent_node - Parent node whose children we are * getting - * Child_node - Previous child that was found. + * child_node - Previous child that was found. * The NEXT child will be returned * - * RETURN: acpi_namespace_node - Pointer to the NEXT child or NULL if + * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if * none is found. * * DESCRIPTION: Return the next peer node within the namespace. If Handle @@ -52,16 +69,16 @@ * ******************************************************************************/ -acpi_namespace_node * +struct acpi_namespace_node * acpi_ns_get_next_node ( - acpi_object_type8 type, - acpi_namespace_node *parent_node, - acpi_namespace_node *child_node) + acpi_object_type type, + struct acpi_namespace_node *parent_node, + struct acpi_namespace_node *child_node) { - acpi_namespace_node *next_node = NULL; + struct acpi_namespace_node *next_node = NULL; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); if (!child_node) { @@ -81,7 +98,7 @@ /* If any type is OK, we are done */ if (type == ACPI_TYPE_ANY) { - /* Next_node is NULL if we are at the end-of-list */ + /* next_node is NULL if we are at the end-of-list */ return (next_node); } @@ -108,22 +125,22 @@ /******************************************************************************* * - * FUNCTION: Acpi_ns_walk_namespace + * FUNCTION: acpi_ns_walk_namespace * * PARAMETERS: Type - acpi_object_type to search for - * Start_node - Handle in namespace where search begins - * Max_depth - Depth to which search is to reach - * Unlock_before_callback- Whether to unlock the NS before invoking + * start_node - Handle in namespace where search begins + * max_depth - Depth to which search is to reach + * unlock_before_callback- Whether to unlock the NS before invoking * the callback routine - * User_function - Called when an object of "Type" is found + * user_function - Called when an object of "Type" is found * Context - Passed to user function - * Return_value - from the User_function if terminated early. + * return_value - from the user_function if terminated early. * Otherwise, returns NULL. * RETURNS: Status * * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, - * starting (and ending) at the node specified by Start_handle. - * The User_function is called whenever a node that matches + * starting (and ending) at the node specified by start_handle. + * The user_function is called whenever a node that matches * the type parameter is found. If the user function returns * a non-zero value, the search is terminated immediately and this * value is returned to the caller. @@ -138,22 +155,23 @@ acpi_status acpi_ns_walk_namespace ( - acpi_object_type8 type, - acpi_handle start_node, - u32 max_depth, - u8 unlock_before_callback, - acpi_walk_callback user_function, - void *context, - void **return_value) + acpi_object_type type, + acpi_handle start_node, + u32 max_depth, + u8 unlock_before_callback, + acpi_walk_callback user_function, + void *context, + void **return_value) { - acpi_status status; - acpi_namespace_node *child_node; - acpi_namespace_node *parent_node; - acpi_object_type8 child_type; - u32 level; + acpi_status status; + acpi_status mutex_status; + struct acpi_namespace_node *child_node; + struct acpi_namespace_node *parent_node; + acpi_object_type child_type; + u32 level; - FUNCTION_TRACE ("Ns_walk_namespace"); + ACPI_FUNCTION_TRACE ("ns_walk_namespace"); /* Special case for the namespace Root Node */ @@ -172,7 +190,7 @@ /* * Traverse the tree of nodes until we bubble back up to where we * started. When Level is zero, the loop is done because we have - * bubbled up to (and passed) the original parent handle (Start_entry) + * bubbled up to (and passed) the original parent handle (start_entry) */ while (level > 0) { /* Get the next node in this scope. Null if not found */ @@ -194,14 +212,20 @@ * callback function */ if (unlock_before_callback) { - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + mutex_status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (mutex_status)) { + return_ACPI_STATUS (mutex_status); + } } status = user_function (child_node, level, context, return_value); if (unlock_before_callback) { - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + mutex_status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (mutex_status)) { + return_ACPI_STATUS (mutex_status); + } } switch (status) { @@ -216,14 +240,12 @@ /* Exit now, with OK status */ return_ACPI_STATUS (AE_OK); - break; default: /* All others are valid exceptions */ return_ACPI_STATUS (status); - break; } } @@ -247,16 +269,15 @@ } } } - else { /* - * No more children of this node (Acpi_ns_get_next_node + * No more children of this node (acpi_ns_get_next_node * failed), go back upwards in the namespace tree to * the node's parent. */ level--; child_node = parent_node; - parent_node = acpi_ns_get_parent_object (parent_node); + parent_node = acpi_ns_get_parent_node (parent_node); } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsxfeval.c linux.21rc5-ac2/drivers/acpi/namespace/nsxfeval.c --- linux.21rc5/drivers/acpi/namespace/nsxfeval.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsxfeval.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,742 @@ +/******************************************************************************* + * + * Module Name: nsxfeval - Public interfaces to the ACPI subsystem + * ACPI Object evaluation interfaces + * + ******************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#include +#include + + +#define _COMPONENT ACPI_NAMESPACE + ACPI_MODULE_NAME ("nsxfeval") + + +/******************************************************************************* + * + * FUNCTION: acpi_evaluate_object_typed + * + * PARAMETERS: Handle - Object handle (optional) + * *Pathname - Object pathname (optional) + * **external_params - List of parameters to pass to method, + * terminated by NULL. May be NULL + * if no parameters are being passed. + * *return_buffer - Where to put method's return value (if + * any). If NULL, no value is returned. + * return_type - Expected type of return object + * + * RETURN: Status + * + * DESCRIPTION: Find and evaluate the given object, passing the given + * parameters if necessary. One of "Handle" or "Pathname" must + * be valid (non-null) + * + ******************************************************************************/ + +acpi_status +acpi_evaluate_object_typed ( + acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *external_params, + struct acpi_buffer *return_buffer, + acpi_object_type return_type) +{ + acpi_status status; + u8 must_free = FALSE; + + + ACPI_FUNCTION_TRACE ("acpi_evaluate_object_typed"); + + + /* Return buffer must be valid */ + + if (!return_buffer) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + if (return_buffer->length == ACPI_ALLOCATE_BUFFER) { + must_free = TRUE; + } + + /* Evaluate the object */ + + status = acpi_evaluate_object (handle, pathname, external_params, return_buffer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Type ANY means "don't care" */ + + if (return_type == ACPI_TYPE_ANY) { + return_ACPI_STATUS (AE_OK); + } + + if (return_buffer->length == 0) { + /* Error because caller specifically asked for a return value */ + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "No return value\n")); + + return_ACPI_STATUS (AE_NULL_OBJECT); + } + + /* Examine the object type returned from evaluate_object */ + + if (((union acpi_object *) return_buffer->pointer)->type == return_type) { + return_ACPI_STATUS (AE_OK); + } + + /* Return object type does not match requested type */ + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Incorrect return type [%s] requested [%s]\n", + acpi_ut_get_type_name (((union acpi_object *) return_buffer->pointer)->type), + acpi_ut_get_type_name (return_type))); + + if (must_free) { + /* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */ + + acpi_os_free (return_buffer->pointer); + return_buffer->pointer = NULL; + } + + return_buffer->length = 0; + return_ACPI_STATUS (AE_TYPE); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_evaluate_object + * + * PARAMETERS: Handle - Object handle (optional) + * *Pathname - Object pathname (optional) + * **external_params - List of parameters to pass to method, + * terminated by NULL. May be NULL + * if no parameters are being passed. + * *return_buffer - Where to put method's return value (if + * any). If NULL, no value is returned. + * + * RETURN: Status + * + * DESCRIPTION: Find and evaluate the given object, passing the given + * parameters if necessary. One of "Handle" or "Pathname" must + * be valid (non-null) + * + ******************************************************************************/ + +acpi_status +acpi_evaluate_object ( + acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *external_params, + struct acpi_buffer *return_buffer) +{ + acpi_status status; + union acpi_operand_object **internal_params = NULL; + union acpi_operand_object *internal_return_obj = NULL; + acpi_size buffer_space_needed; + u32 i; + + + ACPI_FUNCTION_TRACE ("acpi_evaluate_object"); + + + /* + * If there are parameters to be passed to the object + * (which must be a control method), the external objects + * must be converted to internal objects + */ + if (external_params && external_params->count) { + /* + * Allocate a new parameter block for the internal objects + * Add 1 to count to allow for null terminated internal list + */ + internal_params = ACPI_MEM_CALLOCATE (((acpi_size) external_params->count + 1) * + sizeof (void *)); + if (!internal_params) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* + * Convert each external object in the list to an + * internal object + */ + for (i = 0; i < external_params->count; i++) { + status = acpi_ut_copy_eobject_to_iobject (&external_params->pointer[i], + &internal_params[i]); + if (ACPI_FAILURE (status)) { + acpi_ut_delete_internal_object_list (internal_params); + return_ACPI_STATUS (status); + } + } + internal_params[external_params->count] = NULL; + } + + /* + * Three major cases: + * 1) Fully qualified pathname + * 2) No handle, not fully qualified pathname (error) + * 3) Valid handle + */ + if ((pathname) && + (acpi_ns_valid_root_prefix (pathname[0]))) { + /* + * The path is fully qualified, just evaluate by name + */ + status = acpi_ns_evaluate_by_name (pathname, internal_params, + &internal_return_obj); + } + else if (!handle) { + /* + * A handle is optional iff a fully qualified pathname + * is specified. Since we've already handled fully + * qualified names above, this is an error + */ + if (!pathname) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Both Handle and Pathname are NULL\n")); + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Handle is NULL and Pathname is relative\n")); + } + + status = AE_BAD_PARAMETER; + } + else { + /* + * We get here if we have a handle -- and if we have a + * pathname it is relative. The handle will be validated + * in the lower procedures + */ + if (!pathname) { + /* + * The null pathname case means the handle is for + * the actual object to be evaluated + */ + status = acpi_ns_evaluate_by_handle (handle, internal_params, + &internal_return_obj); + } + else { + /* + * Both a Handle and a relative Pathname + */ + status = acpi_ns_evaluate_relative (handle, pathname, internal_params, + &internal_return_obj); + } + } + + + /* + * If we are expecting a return value, and all went well above, + * copy the return value to an external object. + */ + if (return_buffer) { + if (!internal_return_obj) { + return_buffer->length = 0; + } + else { + if (ACPI_GET_DESCRIPTOR_TYPE (internal_return_obj) == ACPI_DESC_TYPE_NAMED) { + /* + * If we received a NS Node as a return object, this means that + * the object we are evaluating has nothing interesting to + * return (such as a mutex, etc.) We return an error because + * these types are essentially unsupported by this interface. + * We don't check up front because this makes it easier to add + * support for various types at a later date if necessary. + */ + status = AE_TYPE; + internal_return_obj = NULL; /* No need to delete a NS Node */ + return_buffer->length = 0; + } + + if (ACPI_SUCCESS (status)) { + /* + * Find out how large a buffer is needed + * to contain the returned object + */ + status = acpi_ut_get_object_size (internal_return_obj, + &buffer_space_needed); + if (ACPI_SUCCESS (status)) { + /* Validate/Allocate/Clear caller buffer */ + + status = acpi_ut_initialize_buffer (return_buffer, buffer_space_needed); + if (ACPI_FAILURE (status)) { + /* + * Caller's buffer is too small or a new one can't be allocated + */ + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "Needed buffer size %X, %s\n", + (u32) buffer_space_needed, acpi_format_exception (status))); + } + else { + /* + * We have enough space for the object, build it + */ + status = acpi_ut_copy_iobject_to_eobject (internal_return_obj, + return_buffer); + } + } + } + } + } + + /* Delete the return and parameter objects */ + + if (internal_return_obj) { + /* + * Delete the internal return object. (Or at least + * decrement the reference count by one) + */ + acpi_ut_remove_reference (internal_return_obj); + } + + /* + * Free the input parameter list (if we created one), + */ + if (internal_params) { + /* Free the allocated parameter block */ + + acpi_ut_delete_internal_object_list (internal_params); + } + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_walk_namespace + * + * PARAMETERS: Type - acpi_object_type to search for + * start_object - Handle in namespace where search begins + * max_depth - Depth to which search is to reach + * user_function - Called when an object of "Type" is found + * Context - Passed to user function + * return_value - Location where return value of + * user_function is put if terminated early + * + * RETURNS Return value from the user_function if terminated early. + * Otherwise, returns NULL. + * + * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, + * starting (and ending) at the object specified by start_handle. + * The user_function is called whenever an object that matches + * the type parameter is found. If the user function returns + * a non-zero value, the search is terminated immediately and this + * value is returned to the caller. + * + * The point of this procedure is to provide a generic namespace + * walk routine that can be called from multiple places to + * provide multiple services; the User Function can be tailored + * to each task, whether it is a print function, a compare + * function, etc. + * + ******************************************************************************/ + +acpi_status +acpi_walk_namespace ( + acpi_object_type type, + acpi_handle start_object, + u32 max_depth, + acpi_walk_callback user_function, + void *context, + void **return_value) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_walk_namespace"); + + + /* Parameter validation */ + + if ((type > ACPI_TYPE_EXTERNAL_MAX) || + (!max_depth) || + (!user_function)) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* + * Lock the namespace around the walk. + * The namespace will be unlocked/locked around each call + * to the user function - since this function + * must be allowed to make Acpi calls itself. + */ + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + status = acpi_ns_walk_namespace (type, start_object, max_depth, ACPI_NS_WALK_UNLOCK, + user_function, context, return_value); + + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ns_get_device_callback + * + * PARAMETERS: Callback from acpi_get_device + * + * RETURN: Status + * + * DESCRIPTION: Takes callbacks from walk_namespace and filters out all non- + * present devices, or if they specified a HID, it filters based + * on that. + * + ******************************************************************************/ + +static acpi_status +acpi_ns_get_device_callback ( + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value) +{ + acpi_status status; + struct acpi_namespace_node *node; + u32 flags; + struct acpi_device_id hid; + struct acpi_device_id cid; + struct acpi_get_devices_info *info; + + + info = context; + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } + + node = acpi_ns_map_handle_to_node (obj_handle); + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } + + if (!node) { + return (AE_BAD_PARAMETER); + } + + /* + * Run _STA to determine if device is present + */ + status = acpi_ut_execute_STA (node, &flags); + if (ACPI_FAILURE (status)) { + return (AE_CTRL_DEPTH); + } + + if (!(flags & 0x01)) { + /* Don't return at the device or children of the device if not there */ + return (AE_CTRL_DEPTH); + } + + /* + * Filter based on device HID & CID + */ + if (info->hid != NULL) { + status = acpi_ut_execute_HID (node, &hid); + if (status == AE_NOT_FOUND) { + return (AE_OK); + } + else if (ACPI_FAILURE (status)) { + return (AE_CTRL_DEPTH); + } + + if (ACPI_STRNCMP (hid.buffer, info->hid, sizeof (hid.buffer)) != 0) { + status = acpi_ut_execute_CID (node, &cid); + if (status == AE_NOT_FOUND) { + return (AE_OK); + } + else if (ACPI_FAILURE (status)) { + return (AE_CTRL_DEPTH); + } + + /* TBD: Handle CID packages */ + + if (ACPI_STRNCMP (cid.buffer, info->hid, sizeof (cid.buffer)) != 0) { + return (AE_OK); + } + } + } + + status = info->user_function (obj_handle, nesting_level, info->context, return_value); + return (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_get_devices + * + * PARAMETERS: HID - HID to search for. Can be NULL. + * user_function - Called when a matching object is found + * Context - Passed to user function + * return_value - Location where return value of + * user_function is put if terminated early + * + * RETURNS Return value from the user_function if terminated early. + * Otherwise, returns NULL. + * + * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, + * starting (and ending) at the object specified by start_handle. + * The user_function is called whenever an object of type + * Device is found. If the user function returns + * a non-zero value, the search is terminated immediately and this + * value is returned to the caller. + * + * This is a wrapper for walk_namespace, but the callback performs + * additional filtering. Please see acpi_get_device_callback. + * + ******************************************************************************/ + +acpi_status +acpi_get_devices ( + char *HID, + acpi_walk_callback user_function, + void *context, + void **return_value) +{ + acpi_status status; + struct acpi_get_devices_info info; + + + ACPI_FUNCTION_TRACE ("acpi_get_devices"); + + + /* Parameter validation */ + + if (!user_function) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* + * We're going to call their callback from OUR callback, so we need + * to know what it is, and their context parameter. + */ + info.context = context; + info.user_function = user_function; + info.hid = HID; + + /* + * Lock the namespace around the walk. + * The namespace will be unlocked/locked around each call + * to the user function - since this function + * must be allowed to make Acpi calls itself. + */ + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, + ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, + ACPI_NS_WALK_UNLOCK, + acpi_ns_get_device_callback, &info, + return_value); + + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_attach_data + * + * PARAMETERS: obj_handle - Namespace node + * Handler - Handler for this attachment + * Data - Pointer to data to be attached + * + * RETURN: Status + * + * DESCRIPTION: Attach arbitrary data and handler to a namespace node. + * + ******************************************************************************/ + +acpi_status +acpi_attach_data ( + acpi_handle obj_handle, + acpi_object_handler handler, + void *data) +{ + struct acpi_namespace_node *node; + acpi_status status; + + + /* Parameter validation */ + + if (!obj_handle || + !handler || + !data) { + return (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } + + /* Convert and validate the handle */ + + node = acpi_ns_map_handle_to_node (obj_handle); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + status = acpi_ns_attach_data (node, handler, data); + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_detach_data + * + * PARAMETERS: obj_handle - Namespace node handle + * Handler - Handler used in call to acpi_attach_data + * + * RETURN: Status + * + * DESCRIPTION: Remove data that was previously attached to a node. + * + ******************************************************************************/ + +acpi_status +acpi_detach_data ( + acpi_handle obj_handle, + acpi_object_handler handler) +{ + struct acpi_namespace_node *node; + acpi_status status; + + + /* Parameter validation */ + + if (!obj_handle || + !handler) { + return (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } + + /* Convert and validate the handle */ + + node = acpi_ns_map_handle_to_node (obj_handle); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + status = acpi_ns_detach_data (node, handler); + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_get_data + * + * PARAMETERS: obj_handle - Namespace node + * Handler - Handler used in call to attach_data + * Data - Where the data is returned + * + * RETURN: Status + * + * DESCRIPTION: Retrieve data that was previously attached to a namespace node. + * + ******************************************************************************/ + +acpi_status +acpi_get_data ( + acpi_handle obj_handle, + acpi_object_handler handler, + void **data) +{ + struct acpi_namespace_node *node; + acpi_status status; + + + /* Parameter validation */ + + if (!obj_handle || + !handler || + !data) { + return (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } + + /* Convert and validate the handle */ + + node = acpi_ns_map_handle_to_node (obj_handle); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + status = acpi_ns_get_attached_data (node, handler, data); + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return (status); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsxfname.c linux.21rc5-ac2/drivers/acpi/namespace/nsxfname.c --- linux.21rc5/drivers/acpi/namespace/nsxfname.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsxfname.c 2003-04-28 17:59:26.000000000 +0100 @@ -2,50 +2,63 @@ * * Module Name: nsxfname - Public interfaces to the ACPI subsystem * ACPI Namespace oriented interfaces - * $Revision: 82 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "amlcode.h" -#include "acparser.h" -#include "acdispat.h" -#include "acevents.h" +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsxfname") + ACPI_MODULE_NAME ("nsxfname") /**************************************************************************** * - * FUNCTION: Acpi_get_handle + * FUNCTION: acpi_get_handle * * PARAMETERS: Parent - Object to search under (search scope). - * Path_name - Pointer to an asciiz string containing the + * path_name - Pointer to an asciiz string containing the * name - * Ret_handle - Where the return handle is placed + * ret_handle - Where the return handle is placed * * RETURN: Status * @@ -58,16 +71,16 @@ acpi_status acpi_get_handle ( - acpi_handle parent, - acpi_string pathname, - acpi_handle *ret_handle) + acpi_handle parent, + acpi_string pathname, + acpi_handle *ret_handle) { - acpi_status status; - acpi_namespace_node *node = NULL; - acpi_namespace_node *prefix_node = NULL; + acpi_status status; + struct acpi_namespace_node *node = NULL; + struct acpi_namespace_node *prefix_node = NULL; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* Parameter Validation */ @@ -79,20 +92,26 @@ /* Convert a parent handle to a prefix node */ if (parent) { - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } prefix_node = acpi_ns_map_handle_to_node (parent); if (!prefix_node) { - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return (AE_BAD_PARAMETER); } - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } } /* Special case for root, since we can't search for it */ - if (STRCMP (pathname, NS_ROOT_PATH) == 0) { + if (ACPI_STRCMP (pathname, ACPI_NS_ROOT_PATH) == 0) { *ret_handle = acpi_ns_convert_entry_to_handle (acpi_gbl_root_node); return (AE_OK); } @@ -100,7 +119,7 @@ /* * Find the Node and convert to a handle */ - status = acpi_ns_get_node (pathname, prefix_node, &node); + status = acpi_ns_get_node_by_path (pathname, prefix_node, ACPI_NS_NO_UPSEARCH, &node); *ret_handle = NULL; if (ACPI_SUCCESS (status)) { @@ -113,88 +132,88 @@ /**************************************************************************** * - * FUNCTION: Acpi_get_name + * FUNCTION: acpi_get_name * * PARAMETERS: Handle - Handle to be converted to a pathname - * Name_type - Full pathname or single segment - * Ret_path_ptr - Buffer for returned path + * name_type - Full pathname or single segment + * Buffer - Buffer for returned path * * RETURN: Pointer to a string containing the fully qualified Name. * * DESCRIPTION: This routine returns the fully qualified name associated with - * the Handle parameter. This and the Acpi_pathname_to_handle are + * the Handle parameter. This and the acpi_pathname_to_handle are * complementary functions. * ******************************************************************************/ acpi_status acpi_get_name ( - acpi_handle handle, - u32 name_type, - acpi_buffer *ret_path_ptr) + acpi_handle handle, + u32 name_type, + struct acpi_buffer *buffer) { - acpi_status status; - acpi_namespace_node *node; + acpi_status status; + struct acpi_namespace_node *node; - /* Buffer pointer must be valid always */ + /* Parameter validation */ - if (!ret_path_ptr || (name_type > ACPI_NAME_TYPE_MAX)) { + if (name_type > ACPI_NAME_TYPE_MAX) { return (AE_BAD_PARAMETER); } - /* Allow length to be zero and ignore the pointer */ - - if ((ret_path_ptr->length) && - (!ret_path_ptr->pointer)) { - return (AE_BAD_PARAMETER); + status = acpi_ut_validate_buffer (buffer); + if (ACPI_FAILURE (status)) { + return (status); } if (name_type == ACPI_FULL_PATHNAME) { /* Get the full pathname (From the namespace root) */ - status = acpi_ns_handle_to_pathname (handle, &ret_path_ptr->length, - ret_path_ptr->pointer); + status = acpi_ns_handle_to_pathname (handle, buffer); return (status); } /* * Wants the single segment ACPI name. - * Validate handle and convert to an Node + * Validate handle and convert to a namespace Node */ - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } + node = acpi_ns_map_handle_to_node (handle); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } - /* Check if name will fit in buffer */ + /* Validate/Allocate/Clear caller buffer */ - if (ret_path_ptr->length < PATH_SEGMENT_LENGTH) { - ret_path_ptr->length = PATH_SEGMENT_LENGTH; - status = AE_BUFFER_OVERFLOW; + status = acpi_ut_initialize_buffer (buffer, ACPI_PATH_SEGMENT_LENGTH); + if (ACPI_FAILURE (status)) { goto unlock_and_exit; } /* Just copy the ACPI name from the Node and zero terminate it */ - STRNCPY (ret_path_ptr->pointer, (NATIVE_CHAR *) &node->name, + ACPI_STRNCPY (buffer->pointer, node->name.ascii, ACPI_NAME_SIZE); - ((NATIVE_CHAR *) ret_path_ptr->pointer) [ACPI_NAME_SIZE] = 0; + ((char *) buffer->pointer) [ACPI_NAME_SIZE] = 0; status = AE_OK; unlock_and_exit: - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return (status); } /**************************************************************************** * - * FUNCTION: Acpi_get_object_info + * FUNCTION: acpi_get_object_info * * PARAMETERS: Handle - Object Handle * Info - Where the info is returned @@ -209,15 +228,15 @@ acpi_status acpi_get_object_info ( - acpi_handle handle, - acpi_device_info *info) + acpi_handle handle, + struct acpi_device_info *info) { - acpi_device_id hid; - acpi_device_id uid; - acpi_status status; - u32 device_status = 0; - acpi_integer address = 0; - acpi_namespace_node *node; + struct acpi_device_id hid; + struct acpi_device_id uid; + acpi_status status; + u32 device_status = 0; + acpi_integer address = 0; + struct acpi_namespace_node *node; /* Parameter validation */ @@ -226,18 +245,24 @@ return (AE_BAD_PARAMETER); } - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } node = acpi_ns_map_handle_to_node (handle); if (!node) { - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return (AE_BAD_PARAMETER); } - info->type = node->type; - info->name = node->name; + info->type = node->type; + info->name = node->name.integer; - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } /* * If not a device, we are all done. @@ -260,8 +285,7 @@ status = acpi_ut_execute_HID (node, &hid); if (ACPI_SUCCESS (status)) { - STRNCPY (info->hardware_id, hid.buffer, sizeof(info->hardware_id)); - + ACPI_STRNCPY (info->hardware_id, hid.buffer, sizeof(info->hardware_id)); info->valid |= ACPI_VALID_HID; } @@ -269,8 +293,7 @@ status = acpi_ut_execute_UID (node, &uid); if (ACPI_SUCCESS (status)) { - STRCPY (info->unique_id, uid.buffer); - + ACPI_STRCPY (info->unique_id, uid.buffer); info->valid |= ACPI_VALID_UID; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/namespace/nsxfobj.c linux.21rc5-ac2/drivers/acpi/namespace/nsxfobj.c --- linux.21rc5/drivers/acpi/namespace/nsxfobj.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/namespace/nsxfobj.c 2003-04-28 17:59:26.000000000 +0100 @@ -2,345 +2,60 @@ * * Module Name: nsxfobj - Public interfaces to the ACPI subsystem * ACPI Object oriented interfaces - * $Revision: 95 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "acdispat.h" +#include +#include #define _COMPONENT ACPI_NAMESPACE - MODULE_NAME ("nsxfobj") - - -/******************************************************************************* - * - * FUNCTION: Acpi_evaluate_object - * - * PARAMETERS: Handle - Object handle (optional) - * *Pathname - Object pathname (optional) - * **External_params - List of parameters to pass to method, - * terminated by NULL. May be NULL - * if no parameters are being passed. - * *Return_buffer - Where to put method's return value (if - * any). If NULL, no value is returned. - * - * RETURN: Status - * - * DESCRIPTION: Find and evaluate the given object, passing the given - * parameters if necessary. One of "Handle" or "Pathname" must - * be valid (non-null) - * - ******************************************************************************/ - -acpi_status -acpi_evaluate_object ( - acpi_handle handle, - acpi_string pathname, - acpi_object_list *external_params, - acpi_buffer *return_buffer) -{ - acpi_status status; - acpi_operand_object **internal_params = NULL; - acpi_operand_object *internal_return_obj = NULL; - u32 buffer_space_needed; - u32 user_buffer_length; - u32 i; - - - FUNCTION_TRACE ("Acpi_evaluate_object"); - - - /* - * If there are parameters to be passed to the object - * (which must be a control method), the external objects - * must be converted to internal objects - */ - if (external_params && external_params->count) { - /* - * Allocate a new parameter block for the internal objects - * Add 1 to count to allow for null terminated internal list - */ - internal_params = ACPI_MEM_CALLOCATE ((external_params->count + 1) * sizeof (void *)); - if (!internal_params) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - - /* - * Convert each external object in the list to an - * internal object - */ - for (i = 0; i < external_params->count; i++) { - status = acpi_ut_copy_eobject_to_iobject (&external_params->pointer[i], - &internal_params[i]); - - if (ACPI_FAILURE (status)) { - acpi_ut_delete_internal_object_list (internal_params); - return_ACPI_STATUS (status); - } - } - internal_params[external_params->count] = NULL; - } - - - /* - * Three major cases: - * 1) Fully qualified pathname - * 2) No handle, not fully qualified pathname (error) - * 3) Valid handle - */ - if ((pathname) && - (acpi_ns_valid_root_prefix (pathname[0]))) { - /* - * The path is fully qualified, just evaluate by name - */ - status = acpi_ns_evaluate_by_name (pathname, internal_params, &internal_return_obj); - } - - else if (!handle) { - /* - * A handle is optional iff a fully qualified pathname - * is specified. Since we've already handled fully - * qualified names above, this is an error - */ - if (!pathname) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Both Handle and Pathname are NULL\n")); - } - - else { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Handle is NULL and Pathname is relative\n")); - } - - status = AE_BAD_PARAMETER; - } - - else { - /* - * We get here if we have a handle -- and if we have a - * pathname it is relative. The handle will be validated - * in the lower procedures - */ - if (!pathname) { - /* - * The null pathname case means the handle is for - * the actual object to be evaluated - */ - status = acpi_ns_evaluate_by_handle (handle, internal_params, &internal_return_obj); - } - - else { - /* - * Both a Handle and a relative Pathname - */ - status = acpi_ns_evaluate_relative (handle, pathname, internal_params, - &internal_return_obj); - } - } - - - /* - * If we are expecting a return value, and all went well above, - * copy the return value to an external object. - */ - - if (return_buffer) { - user_buffer_length = return_buffer->length; - return_buffer->length = 0; - - if (internal_return_obj) { - if (VALID_DESCRIPTOR_TYPE (internal_return_obj, ACPI_DESC_TYPE_NAMED)) { - /* - * If we got an Node as a return object, - * this means the object we are evaluating - * has nothing interesting to return (such - * as a mutex, etc.) We return an error - * because these types are essentially - * unsupported by this interface. We - * don't check up front because this makes - * it easier to add support for various - * types at a later date if necessary. - */ - status = AE_TYPE; - internal_return_obj = NULL; /* No need to delete an Node */ - } - - if (ACPI_SUCCESS (status)) { - /* - * Find out how large a buffer is needed - * to contain the returned object - */ - status = acpi_ut_get_object_size (internal_return_obj, - &buffer_space_needed); - if (ACPI_SUCCESS (status)) { - /* - * Check if there is enough room in the - * caller's buffer - */ - if (user_buffer_length < buffer_space_needed) { - /* - * Caller's buffer is too small, can't - * give him partial results fail the call - * but return the buffer size needed - */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Needed buffer size %X, received %X\n", - buffer_space_needed, user_buffer_length)); - - return_buffer->length = buffer_space_needed; - status = AE_BUFFER_OVERFLOW; - } - - else { - /* - * We have enough space for the object, build it - */ - status = acpi_ut_copy_iobject_to_eobject (internal_return_obj, - return_buffer); - return_buffer->length = buffer_space_needed; - } - } - } - } - } - - - /* Delete the return and parameter objects */ - - if (internal_return_obj) { - /* - * Delete the internal return object. (Or at least - * decrement the reference count by one) - */ - acpi_ut_remove_reference (internal_return_obj); - } - - /* - * Free the input parameter list (if we created one), - */ - if (internal_params) { - /* Free the allocated parameter block */ - - acpi_ut_delete_internal_object_list (internal_params); - } - - return_ACPI_STATUS (status); -} - + ACPI_MODULE_NAME ("nsxfobj") /******************************************************************************* * - * FUNCTION: Acpi_get_next_object - * - * PARAMETERS: Type - Type of object to be searched for - * Parent - Parent object whose children we are getting - * Last_child - Previous child that was found. - * The NEXT child will be returned - * Ret_handle - Where handle to the next object is placed - * - * RETURN: Status - * - * DESCRIPTION: Return the next peer object within the namespace. If Handle is - * valid, Scope is ignored. Otherwise, the first object within - * Scope is returned. - * - ******************************************************************************/ - -acpi_status -acpi_get_next_object ( - acpi_object_type type, - acpi_handle parent, - acpi_handle child, - acpi_handle *ret_handle) -{ - acpi_status status = AE_OK; - acpi_namespace_node *node; - acpi_namespace_node *parent_node = NULL; - acpi_namespace_node *child_node = NULL; - - - /* Parameter validation */ - - if (type > ACPI_TYPE_MAX) { - return (AE_BAD_PARAMETER); - } - - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); - - /* If null handle, use the parent */ - - if (!child) { - /* Start search at the beginning of the specified scope */ - - parent_node = acpi_ns_map_handle_to_node (parent); - if (!parent_node) { - status = AE_BAD_PARAMETER; - goto unlock_and_exit; - } - } - - /* Non-null handle, ignore the parent */ - - else { - /* Convert and validate the handle */ - - child_node = acpi_ns_map_handle_to_node (child); - if (!child_node) { - status = AE_BAD_PARAMETER; - goto unlock_and_exit; - } - } - - - /* Internal function does the real work */ - - node = acpi_ns_get_next_node ((acpi_object_type8) type, - parent_node, child_node); - if (!node) { - status = AE_NOT_FOUND; - goto unlock_and_exit; - } - - if (ret_handle) { - *ret_handle = acpi_ns_convert_entry_to_handle (node); - } - - -unlock_and_exit: - - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); - return (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_get_type + * FUNCTION: acpi_get_type * * PARAMETERS: Handle - Handle of object whose type is desired - * *Ret_type - Where the type will be placed + * *ret_type - Where the type will be placed * * RETURN: Status * @@ -350,10 +65,11 @@ acpi_status acpi_get_type ( - acpi_handle handle, - acpi_object_type *ret_type) + acpi_handle handle, + acpi_object_type *ret_type) { - acpi_namespace_node *node; + struct acpi_namespace_node *node; + acpi_status status; /* Parameter Validation */ @@ -371,30 +87,33 @@ return (AE_OK); } - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } /* Convert and validate the handle */ node = acpi_ns_map_handle_to_node (handle); if (!node) { - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return (AE_BAD_PARAMETER); } *ret_type = node->type; - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); - return (AE_OK); + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return (status); } /******************************************************************************* * - * FUNCTION: Acpi_get_parent + * FUNCTION: acpi_get_parent * * PARAMETERS: Handle - Handle of object whose parent is desired - * Ret_handle - Where the parent handle will be placed + * ret_handle - Where the parent handle will be placed * * RETURN: Status * @@ -405,11 +124,11 @@ acpi_status acpi_get_parent ( - acpi_handle handle, - acpi_handle *ret_handle) + acpi_handle handle, + acpi_handle *ret_handle) { - acpi_namespace_node *node; - acpi_status status = AE_OK; + struct acpi_namespace_node *node; + acpi_status status; if (!ret_handle) { @@ -422,8 +141,10 @@ return (AE_NULL_ENTRY); } - - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } /* Convert and validate the handle */ @@ -433,237 +154,106 @@ goto unlock_and_exit; } - /* Get the parent entry */ *ret_handle = - acpi_ns_convert_entry_to_handle (acpi_ns_get_parent_object (node)); + acpi_ns_convert_entry_to_handle (acpi_ns_get_parent_node (node)); - /* Return exeption if parent is null */ + /* Return exception if parent is null */ - if (!acpi_ns_get_parent_object (node)) { + if (!acpi_ns_get_parent_node (node)) { status = AE_NULL_ENTRY; } unlock_and_exit: - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); return (status); } /******************************************************************************* * - * FUNCTION: Acpi_walk_namespace - * - * PARAMETERS: Type - acpi_object_type to search for - * Start_object - Handle in namespace where search begins - * Max_depth - Depth to which search is to reach - * User_function - Called when an object of "Type" is found - * Context - Passed to user function - * Return_value - Location where return value of - * User_function is put if terminated early - * - * RETURNS Return value from the User_function if terminated early. - * Otherwise, returns NULL. - * - * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, - * starting (and ending) at the object specified by Start_handle. - * The User_function is called whenever an object that matches - * the type parameter is found. If the user function returns - * a non-zero value, the search is terminated immediately and this - * value is returned to the caller. - * - * The point of this procedure is to provide a generic namespace - * walk routine that can be called from multiple places to - * provide multiple services; the User Function can be tailored - * to each task, whether it is a print function, a compare - * function, etc. + * FUNCTION: acpi_get_next_object * - ******************************************************************************/ - -acpi_status -acpi_walk_namespace ( - acpi_object_type type, - acpi_handle start_object, - u32 max_depth, - acpi_walk_callback user_function, - void *context, - void **return_value) -{ - acpi_status status; - - - FUNCTION_TRACE ("Acpi_walk_namespace"); - - - /* Parameter validation */ - - if ((type > ACPI_TYPE_MAX) || - (!max_depth) || - (!user_function)) { - return_ACPI_STATUS (AE_BAD_PARAMETER); - } - - /* - * Lock the namespace around the walk. - * The namespace will be unlocked/locked around each call - * to the user function - since this function - * must be allowed to make Acpi calls itself. - */ - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); - status = acpi_ns_walk_namespace ((acpi_object_type8) type, start_object, - max_depth, NS_WALK_UNLOCK, user_function, context, - return_value); - - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); - - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ns_get_device_callback - * - * PARAMETERS: Callback from Acpi_get_device + * PARAMETERS: Type - Type of object to be searched for + * Parent - Parent object whose children we are getting + * last_child - Previous child that was found. + * The NEXT child will be returned + * ret_handle - Where handle to the next object is placed * * RETURN: Status * - * DESCRIPTION: Takes callbacks from Walk_namespace and filters out all non- - * present devices, or if they specified a HID, it filters based - * on that. + * DESCRIPTION: Return the next peer object within the namespace. If Handle is + * valid, Scope is ignored. Otherwise, the first object within + * Scope is returned. * ******************************************************************************/ -static acpi_status -acpi_ns_get_device_callback ( - acpi_handle obj_handle, - u32 nesting_level, - void *context, - void **return_value) +acpi_status +acpi_get_next_object ( + acpi_object_type type, + acpi_handle parent, + acpi_handle child, + acpi_handle *ret_handle) { - acpi_status status; - acpi_namespace_node *node; - u32 flags; - acpi_device_id device_id; - acpi_get_devices_info *info; + acpi_status status; + struct acpi_namespace_node *node; + struct acpi_namespace_node *parent_node = NULL; + struct acpi_namespace_node *child_node = NULL; - info = context; - - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); - node = acpi_ns_map_handle_to_node (obj_handle); - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + /* Parameter validation */ - if (!node) { + if (type > ACPI_TYPE_EXTERNAL_MAX) { return (AE_BAD_PARAMETER); } - /* - * Run _STA to determine if device is present - */ - status = acpi_ut_execute_STA (node, &flags); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); if (ACPI_FAILURE (status)) { - return (AE_CTRL_DEPTH); + return (status); } - if (!(flags & 0x01)) { - /* don't return at the device or children of the device if not there */ - return (AE_CTRL_DEPTH); - } + /* If null handle, use the parent */ - /* - * Filter based on device HID - */ - if (info->hid != NULL) { - status = acpi_ut_execute_HID (node, &device_id); - if (status == AE_NOT_FOUND) { - return (AE_OK); - } + if (!child) { + /* Start search at the beginning of the specified scope */ - else if (ACPI_FAILURE (status)) { - return (AE_CTRL_DEPTH); + parent_node = acpi_ns_map_handle_to_node (parent); + if (!parent_node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; } + } + else { + /* Non-null handle, ignore the parent */ + /* Convert and validate the handle */ - if (STRNCMP (device_id.buffer, info->hid, sizeof (device_id.buffer)) != 0) { - return (AE_OK); + child_node = acpi_ns_map_handle_to_node (child); + if (!child_node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; } } - info->user_function (obj_handle, nesting_level, info->context, return_value); - return (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_get_devices - * - * PARAMETERS: HID - HID to search for. Can be NULL. - * User_function - Called when a matching object is found - * Context - Passed to user function - * Return_value - Location where return value of - * User_function is put if terminated early - * - * RETURNS Return value from the User_function if terminated early. - * Otherwise, returns NULL. - * - * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, - * starting (and ending) at the object specified by Start_handle. - * The User_function is called whenever an object that matches - * the type parameter is found. If the user function returns - * a non-zero value, the search is terminated immediately and this - * value is returned to the caller. - * - * This is a wrapper for Walk_namespace, but the callback performs - * additional filtering. Please see Acpi_get_device_callback. - * - ******************************************************************************/ - -acpi_status -acpi_get_devices ( - NATIVE_CHAR *HID, - acpi_walk_callback user_function, - void *context, - void **return_value) -{ - acpi_status status; - acpi_get_devices_info info; - - - FUNCTION_TRACE ("Acpi_get_devices"); - - - /* Parameter validation */ + /* Internal function does the real work */ - if (!user_function) { - return_ACPI_STATUS (AE_BAD_PARAMETER); + node = acpi_ns_get_next_node (type, parent_node, child_node); + if (!node) { + status = AE_NOT_FOUND; + goto unlock_and_exit; } - /* - * We're going to call their callback from OUR callback, so we need - * to know what it is, and their context parameter. - */ - info.context = context; - info.user_function = user_function; - info.hid = HID; + if (ret_handle) { + *ret_handle = acpi_ns_convert_entry_to_handle (node); + } - /* - * Lock the namespace around the walk. - * The namespace will be unlocked/locked around each call - * to the user function - since this function - * must be allowed to make Acpi calls itself. - */ - acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); - status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, - ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, - NS_WALK_UNLOCK, - acpi_ns_get_device_callback, &info, - return_value); - acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); +unlock_and_exit: - return_ACPI_STATUS (status); + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return (status); } + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/numa.c linux.21rc5-ac2/drivers/acpi/numa.c --- linux.21rc5/drivers/acpi/numa.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/numa.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,187 @@ +/* + * acpi_numa.c - ACPI NUMA support + * + * Copyright (C) 2002 Takayoshi Kochi + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include +#include +#include +#include +#include +#include + +#define PREFIX "ACPI: " + +extern int __init acpi_table_parse_madt_family (enum acpi_table_id id, unsigned long madt_size, int entry_id, acpi_madt_entry_handler handler); + +void __init +acpi_table_print_srat_entry ( + acpi_table_entry_header *header) +{ + if (!header) + return; + + switch (header->type) { + + case ACPI_SRAT_PROCESSOR_AFFINITY: + { + struct acpi_table_processor_affinity *p = + (struct acpi_table_processor_affinity*) header; + printk(KERN_INFO PREFIX "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", + p->apic_id, p->lsapic_eid, p->proximity_domain, + p->flags.enabled?"enabled":"disabled"); + } + break; + + case ACPI_SRAT_MEMORY_AFFINITY: + { + struct acpi_table_memory_affinity *p = + (struct acpi_table_memory_affinity*) header; + printk(KERN_INFO PREFIX "SRAT Memory (0x%08x%08x length 0x%08x%08x type 0x%x) in proximity domain %d %s%s\n", + p->base_addr_hi, p->base_addr_lo, p->length_hi, p->length_lo, + p->memory_type, p->proximity_domain, + p->flags.enabled ? "enabled" : "disabled", + p->flags.hot_pluggable ? " hot-pluggable" : ""); + } + break; + + default: + printk(KERN_WARNING PREFIX "Found unsupported SRAT entry (type = 0x%x)\n", + header->type); + break; + } +} + + +static int __init +acpi_parse_slit (unsigned long phys_addr, unsigned long size) +{ + struct acpi_table_slit *slit; + u32 localities; + + if (!phys_addr || !size) + return -EINVAL; + + slit = (struct acpi_table_slit *) __va(phys_addr); + + /* downcast just for %llu vs %lu for i386/ia64 */ + localities = (u32) slit->localities; + + printk(KERN_INFO PREFIX "SLIT localities %ux%u\n", localities, localities); + + acpi_numa_slit_init(slit); + + return 0; +} + + +static int __init +acpi_parse_processor_affinity (acpi_table_entry_header *header) +{ + struct acpi_table_processor_affinity *processor_affinity = NULL; + + processor_affinity = (struct acpi_table_processor_affinity*) header; + if (!processor_affinity) + return -EINVAL; + + acpi_table_print_srat_entry(header); + + /* let architecture-dependent part to do it */ + acpi_numa_processor_affinity_init(processor_affinity); + + return 0; +} + + +static int __init +acpi_parse_memory_affinity (acpi_table_entry_header *header) +{ + struct acpi_table_memory_affinity *memory_affinity = NULL; + + memory_affinity = (struct acpi_table_memory_affinity*) header; + if (!memory_affinity) + return -EINVAL; + + acpi_table_print_srat_entry(header); + + /* let architecture-dependent part to do it */ + acpi_numa_memory_affinity_init(memory_affinity); + + return 0; +} + + +static int __init +acpi_parse_srat (unsigned long phys_addr, unsigned long size) +{ + struct acpi_table_srat *srat = NULL; + + if (!phys_addr || !size) + return -EINVAL; + + srat = (struct acpi_table_srat *) __va(phys_addr); + + printk(KERN_INFO PREFIX "SRAT revision %d\n", srat->table_revision); + + return 0; +} + + +int __init +acpi_table_parse_srat ( + enum acpi_srat_entry_id id, + acpi_madt_entry_handler handler) +{ + return acpi_table_parse_madt_family(ACPI_SRAT, sizeof(struct acpi_table_srat), + id, handler); +} + + +int __init +acpi_numa_init() +{ + int result; + + /* SRAT: Static Resource Affinity Table */ + result = acpi_table_parse(ACPI_SRAT, acpi_parse_srat); + + if (result > 0) { + result = acpi_table_parse_srat(ACPI_SRAT_PROCESSOR_AFFINITY, + acpi_parse_processor_affinity); + result = acpi_table_parse_srat(ACPI_SRAT_MEMORY_AFFINITY, + acpi_parse_memory_affinity); + } else { + /* FIXME */ + printk("Warning: acpi_table_parse(ACPI_SRAT) returned %d!\n",result); + } + + /* SLIT: System Locality Information Table */ + result = acpi_table_parse(ACPI_SLIT, acpi_parse_slit); + if (result < 1) { + /* FIXME */ + printk("Warning: acpi_table_parse(ACPI_SLIT) returned %d!\n",result); + } + + acpi_numa_arch_fixup(); + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/os.c linux.21rc5-ac2/drivers/acpi/os.c --- linux.21rc5/drivers/acpi/os.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/os.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,920 +0,0 @@ -/****************************************************************************** - * - * Module Name: os.c - Linux OSL functions - * $Revision: 49 $ - * - *****************************************************************************/ - -/* - * os.c - OS-dependent functions - * - * Copyright (C) 2000 Andrew Henroid - * Copyright (C) 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -/* Changes - * - * Christopher Liebman 2001-5-15 - * - Fixed improper kernel_thread parameters - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_ACPI_EFI -#include -#endif - -#ifdef _IA64 -#include -#endif - -#define _COMPONENT ACPI_OS_SERVICES - MODULE_NAME ("os") - -typedef struct -{ - OSD_EXECUTION_CALLBACK function; - void *context; -} ACPI_OS_DPC; - - -/***************************************************************************** - * Debugger Stuff - *****************************************************************************/ - -#ifdef ENABLE_DEBUGGER - -#include - -/* stuff for debugger support */ -int acpi_in_debugger = 0; -extern NATIVE_CHAR line_buf[80]; - -#endif - - -/***************************************************************************** - * Globals - *****************************************************************************/ - -static int acpi_irq_irq = 0; -static OSD_HANDLER acpi_irq_handler = NULL; -static void *acpi_irq_context = NULL; - - -/****************************************************************************** - * Functions - *****************************************************************************/ - -acpi_status -acpi_os_initialize(void) -{ - return AE_OK; -} - -acpi_status -acpi_os_terminate(void) -{ - if (acpi_irq_handler) { - acpi_os_remove_interrupt_handler(acpi_irq_irq, - acpi_irq_handler); - } - - return AE_OK; -} - -s32 -acpi_os_printf(const NATIVE_CHAR *fmt,...) -{ - s32 size; - va_list args; - va_start(args, fmt); - size = acpi_os_vprintf(fmt, args); - va_end(args); - - return size; -} - -s32 -acpi_os_vprintf(const NATIVE_CHAR *fmt, va_list args) -{ - static char buffer[512]; - int size = vsprintf(buffer, fmt, args); - -#ifdef ENABLE_DEBUGGER - if (acpi_in_debugger) { - kdb_printf("%s", buffer); - } else { - printk("%s", buffer); - } -#else - printk("%s", buffer); -#endif - - return size; -} - -void * -acpi_os_allocate(u32 size) -{ - return kmalloc(size, GFP_KERNEL); -} - -void * -acpi_os_callocate(u32 size) -{ - void *ptr = acpi_os_allocate(size); - if (ptr) - memset(ptr, 0, size); - - return ptr; -} - -void -acpi_os_free(void *ptr) -{ - kfree(ptr); -} - - -acpi_status -acpi_os_get_root_pointer(u32 flags, ACPI_PHYSICAL_ADDRESS *phys_addr) -{ -#ifndef CONFIG_ACPI_EFI - if (ACPI_FAILURE(acpi_find_root_pointer(flags, phys_addr))) { - printk(KERN_ERR "ACPI: System description tables not found\n"); - return AE_ERROR; - } -#else /*CONFIG_ACPI_EFI*/ - if (efi.acpi20) - *phys_addr = (ACPI_PHYSICAL_ADDRESS) efi.acpi20; - else if (efi.acpi) - *phys_addr = (ACPI_PHYSICAL_ADDRESS) efi.acpi; - else { - printk(KERN_ERR "ACPI: System description tables not found\n"); - *phys_addr = NULL; - return AE_ERROR; - } -#endif /*CONFIG_ACPI_EFI*/ - - return AE_OK; -} - -acpi_status -acpi_os_map_memory(ACPI_PHYSICAL_ADDRESS phys, u32 size, void **virt) -{ - if (phys > ULONG_MAX) { - printk(KERN_ERR "ACPI: Cannot map memory that high\n"); - return AE_ERROR; - } - - *virt = ioremap((unsigned long) phys, size); - if (!*virt) - return AE_ERROR; - - return AE_OK; -} - -void -acpi_os_unmap_memory(void *virt, u32 size) -{ - iounmap(virt); -} - -acpi_status -acpi_os_get_physical_address(void *virt, ACPI_PHYSICAL_ADDRESS *phys) -{ - if(!phys || !virt) - return AE_BAD_PARAMETER; - - *phys = virt_to_phys(virt); - - return AE_OK; -} - -static void -acpi_irq(int irq, void *dev_id, struct pt_regs *regs) -{ - (*acpi_irq_handler)(acpi_irq_context); -} - -acpi_status -acpi_os_install_interrupt_handler(u32 irq, OSD_HANDLER handler, void *context) -{ -#ifdef _IA64 - irq = isa_irq_to_vector(irq); -#endif /*_IA64*/ - acpi_irq_irq = irq; - acpi_irq_handler = handler; - acpi_irq_context = context; - if (request_irq(irq, - acpi_irq, - SA_SHIRQ, - "acpi", - acpi_irq)) { - printk(KERN_ERR "ACPI: SCI (IRQ%d) allocation failed\n", irq); - return AE_ERROR; - } - - return AE_OK; -} - -acpi_status -acpi_os_remove_interrupt_handler(u32 irq, OSD_HANDLER handler) -{ - if (acpi_irq_handler) { -#ifdef _IA64 - irq = isa_irq_to_vector(irq); -#endif /*_IA64*/ - free_irq(irq, acpi_irq); - acpi_irq_handler = NULL; - } - - return AE_OK; -} - -/* - * Running in interpreter thread context, safe to sleep - */ - -void -acpi_os_sleep(u32 sec, u32 ms) -{ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(HZ * sec + (ms * HZ) / 1000); -} - -void -acpi_os_stall(u32 us) -{ - if (us > 10000) { - mdelay(us / 1000); - } - else { - udelay(us); - } -} - -acpi_status -acpi_os_read_port( - ACPI_IO_ADDRESS port, - void *value, - u32 width) -{ - u32 dummy; - - if (!value) - value = &dummy; - - switch (width) - { - case 8: - *(u8*) value = inb(port); - break; - case 16: - *(u16*) value = inw(port); - break; - case 32: - *(u32*) value = inl(port); - break; - default: - BUG(); - } - - return AE_OK; -} - -acpi_status -acpi_os_write_port( - ACPI_IO_ADDRESS port, - NATIVE_UINT value, - u32 width) -{ - switch (width) - { - case 8: - outb(value, port); - break; - case 16: - outw(value, port); - break; - case 32: - outl(value, port); - break; - default: - BUG(); - } - - return AE_OK; -} - -acpi_status -acpi_os_read_memory( - ACPI_PHYSICAL_ADDRESS phys_addr, - void *value, - u32 width) -{ - u32 dummy; - - if (!value) - value = &dummy; - - switch (width) - { - case 8: - *(u8*) value = *(u8*) phys_to_virt(phys_addr); - break; - case 16: - *(u16*) value = *(u16*) phys_to_virt(phys_addr); - break; - case 32: - *(u32*) value = *(u32*) phys_to_virt(phys_addr); - break; - default: - BUG(); - } - - return AE_OK; -} - -acpi_status -acpi_os_write_memory( - ACPI_PHYSICAL_ADDRESS phys_addr, - NATIVE_UINT value, - u32 width) -{ - switch (width) - { - case 8: - *(u8*) phys_to_virt(phys_addr) = value; - break; - case 16: - *(u16*) phys_to_virt(phys_addr) = value; - break; - case 32: - *(u32*) phys_to_virt(phys_addr) = value; - break; - default: - BUG(); - } - - return AE_OK; -} - - -#ifdef CONFIG_ACPI_PCI - -/* Architecture-dependent low-level PCI configuration access functions. */ -extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *val); -extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 val); - -acpi_status -acpi_os_read_pci_configuration ( - acpi_pci_id *pci_id, - u32 reg, - void *value, - u32 width) -{ - int result = 0; - if (!value) - return AE_ERROR; - - switch (width) - { - case 8: - result = pci_config_read(pci_id->segment, pci_id->bus, - pci_id->device, pci_id->function, reg, 1, value); - break; - case 16: - result = pci_config_read(pci_id->segment, pci_id->bus, - pci_id->device, pci_id->function, reg, 2, value); - break; - case 32: - result = pci_config_read(pci_id->segment, pci_id->bus, - pci_id->device, pci_id->function, reg, 4, value); - break; - default: - BUG(); - } - - return (result ? AE_ERROR : AE_OK); -} - -ACPI_STATUS -acpi_os_write_pci_configuration ( - acpi_pci_id *pci_id, - u32 reg, - NATIVE_UINT value, - u32 width) -{ - int result = 0; - - switch (width) - { - case 8: - result = pci_config_write(pci_id->segment, pci_id->bus, - pci_id->device, pci_id->function, reg, 1, value); - break; - case 16: - result = pci_config_write(pci_id->segment, pci_id->bus, - pci_id->device, pci_id->function, reg, 2, value); - break; - case 32: - result = pci_config_write(pci_id->segment, pci_id->bus, - pci_id->device, pci_id->function, reg, 4, value); - break; - default: - BUG(); - } - - return (result ? AE_ERROR : AE_OK); -} - -#else /*CONFIG_ACPI_PCI*/ - -acpi_status -acpi_os_read_pci_configuration ( - acpi_pci_id *pci_id, - u32 reg, - void *value, - u32 width) -{ - int devfn = PCI_DEVFN(pci_id->device, pci_id->function); - struct pci_dev *dev = pci_find_slot(pci_id->bus, devfn); - - if (!value || !dev) - return AE_ERROR; - - switch (width) - { - case 8: - if (pci_read_config_byte(dev, reg, (u8*) value)) - return AE_ERROR; - break; - case 16: - if (pci_read_config_word(dev, reg, (u16*) value)) - return AE_ERROR; - break; - case 32: - if (pci_read_config_dword(dev, reg, (u32*) value)) - return AE_ERROR; - break; - default: - BUG(); - } - - return AE_OK; -} - -acpi_status -acpi_os_write_pci_configuration ( - acpi_pci_id *pci_id, - u32 reg, - NATIVE_UINT value, - u32 width) -{ - int devfn = PCI_DEVFN(pci_id->device, pci_id->function); - struct pci_dev *dev = pci_find_slot(pci_id->bus, devfn); - - if (!dev) - return AE_ERROR; - - switch (width) - { - case 8: - if (pci_write_config_byte(dev, reg, value)) - return AE_ERROR; - break; - case 16: - if (pci_write_config_word(dev, reg, value)) - return AE_ERROR; - break; - case 32: - if (pci_write_config_dword(dev, reg, value)) - return AE_ERROR; - break; - default: - BUG(); - } - - return AE_OK; -} - -#endif /*CONFIG_ACPI_PCI*/ - - -acpi_status -acpi_os_load_module ( - char *module_name) -{ - PROC_NAME("acpi_os_load_module"); - - if (!module_name) - return AE_BAD_PARAMETER; - - if (0 > request_module(module_name)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to load module [%s].\n", module_name)); - return AE_ERROR; - } - - return AE_OK; -} - -acpi_status -acpi_os_unload_module ( - char *module_name) -{ - if (!module_name) - return AE_BAD_PARAMETER; - - /* TODO: How on Linux? */ - /* this is done automatically for all modules with - use_count = 0, I think. see: MOD_INC_USE_COUNT -ASG */ - - return AE_OK; -} - - -/* - * See acpi_os_queue_for_execution() - */ -static int -acpi_os_queue_exec ( - void *context) -{ - ACPI_OS_DPC *dpc = (ACPI_OS_DPC*)context; - - PROC_NAME("acpi_os_queue_exec"); - - daemonize(); - strcpy(current->comm, "kacpidpc"); - - if (!dpc || !dpc->function) - return AE_BAD_PARAMETER; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Executing function [%p(%p)].\n", dpc->function, dpc->context)); - - dpc->function(dpc->context); - - kfree(dpc); - - return 1; -} - -static void -acpi_os_schedule_exec ( - void *context) -{ - ACPI_OS_DPC *dpc = NULL; - int thread_pid = -1; - - PROC_NAME("acpi_os_schedule_exec"); - - dpc = (ACPI_OS_DPC*)context; - if (!dpc) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); - return; - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Creating new thread to run function [%p(%p)].\n", dpc->function, dpc->context)); - - thread_pid = kernel_thread(acpi_os_queue_exec, dpc, - (CLONE_FS | CLONE_FILES | SIGCHLD)); - if (thread_pid < 0) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to kernel_thread() failed.\n")); - acpi_os_free(dpc); - } -} - -acpi_status -acpi_os_queue_for_execution( - u32 priority, - OSD_EXECUTION_CALLBACK function, - void *context) -{ - acpi_status status = AE_OK; - ACPI_OS_DPC *dpc = NULL; - - PROC_NAME("acpi_os_queue_for_execution"); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); - - if (!function) - return AE_BAD_PARAMETER; - - /* - * Queue via DPC: - * -------------- - * Note that we have to use two different processes for queuing DPCs: - * Interrupt-Level: Use schedule_task; can't spawn a new thread. - * Kernel-Level: Spawn a new kernel thread, as schedule_task has - * its limitations (e.g. single-threaded model), and - * all other task queues run at interrupt-level. - */ - switch (priority) { - - case OSD_PRIORITY_GPE: - { - static struct tq_struct task; - - /* - * Allocate/initialize DPC structure. Note that this memory will be - * freed by the callee. - */ - dpc = kmalloc(sizeof(ACPI_OS_DPC), GFP_ATOMIC); - if (!dpc) - return AE_NO_MEMORY; - - dpc->function = function; - dpc->context = context; - - memset(&task, 0, sizeof(struct tq_struct)); - - task.routine = acpi_os_schedule_exec; - task.data = (void*)dpc; - - if (schedule_task(&task) < 0) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to schedule_task() failed.\n")); - status = AE_ERROR; - } - } - break; - - default: - /* - * Allocate/initialize DPC structure. Note that this memory will be - * freed by the callee. - */ - dpc = kmalloc(sizeof(ACPI_OS_DPC), GFP_KERNEL); - if (!dpc) - return AE_NO_MEMORY; - - dpc->function = function; - dpc->context = context; - - acpi_os_schedule_exec(dpc); - break; - } - - return status; -} - - -acpi_status -acpi_os_create_semaphore( - u32 max_units, - u32 initial_units, - acpi_handle *handle) -{ - struct semaphore *sem = NULL; - - PROC_NAME("acpi_os_create_semaphore"); - - sem = acpi_os_callocate(sizeof(struct semaphore)); - if (!sem) - return AE_NO_MEMORY; - - sema_init(sem, initial_units); - - *handle = (acpi_handle*)sem; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Creating semaphore[%p|%d].\n", *handle, initial_units)); - - return AE_OK; -} - - -/* - * TODO: A better way to delete semaphores? Linux doesn't have a - * 'delete_semaphore()' function -- may result in an invalid - * pointer dereference for non-synchronized consumers. Should - * we at least check for blocked threads and signal/cancel them? - */ - -acpi_status -acpi_os_delete_semaphore( - acpi_handle handle) -{ - struct semaphore *sem = (struct semaphore*) handle; - - PROC_NAME("acpi_os_delete_semaphore"); - - if (!sem) - return AE_BAD_PARAMETER; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Deleting semaphore[%p].\n", handle)); - - acpi_os_free(sem); sem = NULL; - - return AE_OK; -} - - -/* - * TODO: The kernel doesn't have a 'down_timeout' function -- had to - * improvise. The process is to sleep for one scheduler quantum - * until the semaphore becomes available. Downside is that this - * may result in starvation for timeout-based waits when there's - * lots of semaphore activity. - * - * TODO: Support for units > 1? - */ -acpi_status -acpi_os_wait_semaphore( - acpi_handle handle, - u32 units, - u32 timeout) -{ - acpi_status status = AE_OK; - struct semaphore *sem = (struct semaphore*)handle; - int ret = 0; - - PROC_NAME("acpi_os_wait_semaphore"); - - if (!sem || (units < 1)) - return AE_BAD_PARAMETER; - - if (units > 1) - return AE_SUPPORT; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Waiting for semaphore[%p|%d|%d]\n", handle, units, timeout)); - - switch (timeout) - { - /* - * No Wait: - * -------- - * A zero timeout value indicates that we shouldn't wait - just - * acquire the semaphore if available otherwise return AE_TIME - * (a.k.a. 'would block'). - */ - case 0: - if(down_trylock(sem)) - status = AE_TIME; - break; - - /* - * Wait Indefinitely: - * ------------------ - */ - case WAIT_FOREVER: - ret = down_interruptible(sem); - if (ret < 0) - status = AE_ERROR; - break; - - /* - * Wait w/ Timeout: - * ---------------- - */ - default: - // TODO: A better timeout algorithm? - { - int i = 0; - static const int quantum_ms = 1000/HZ; - - ret = down_trylock(sem); - for (i = timeout; (i > 0 && ret < 0); i -= quantum_ms) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); - ret = down_trylock(sem); - } - - if (ret != 0) - status = AE_TIME; - } - break; - } - - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Failed to acquire semaphore[%p|%d|%d]\n", handle, units, timeout)); - } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Acquired semaphore[%p|%d|%d]\n", handle, units, timeout)); - } - - return status; -} - - -/* - * TODO: Support for units > 1? - */ -acpi_status -acpi_os_signal_semaphore( - acpi_handle handle, - u32 units) -{ - struct semaphore *sem = (struct semaphore *) handle; - - PROC_NAME("acpi_os_signal_semaphore"); - - if (!sem || (units < 1)) - return AE_BAD_PARAMETER; - - if (units > 1) - return AE_SUPPORT; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Signaling semaphore[%p|%d]\n", handle, units)); - - up(sem); - - return AE_OK; -} - -u32 -acpi_os_get_line(NATIVE_CHAR *buffer) -{ - -#ifdef ENABLE_DEBUGGER - if (acpi_in_debugger) { - u32 chars; - - kdb_read(buffer, sizeof(line_buf)); - - /* remove the CR kdb includes */ - chars = strlen(buffer) - 1; - buffer[chars] = '\0'; - } -#endif - - return 0; -} - -/* - * We just have to assume we're dealing with valid memory - */ - -BOOLEAN -acpi_os_readable(void *ptr, u32 len) -{ - return 1; -} - -BOOLEAN -acpi_os_writable(void *ptr, u32 len) -{ - return 1; -} - -u32 -acpi_os_get_thread_id (void) -{ - if (!in_interrupt()) - return current->pid; - - return 0; -} - -acpi_status -acpi_os_signal ( - u32 function, - void *info) -{ - switch (function) - { - case ACPI_SIGNAL_FATAL: - printk(KERN_ERR "ACPI: Fatal opcode executed\n"); - break; - case ACPI_SIGNAL_BREAKPOINT: - { - char *bp_info = (char*) info; - - printk(KERN_ERR "ACPI breakpoint: %s\n", bp_info); - } - default: - break; - } - - return AE_OK; -} - -acpi_status -acpi_os_breakpoint(NATIVE_CHAR *msg) -{ - acpi_os_printf("breakpoint: %s", msg); - - return AE_OK; -} - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/osl.c linux.21rc5-ac2/drivers/acpi/osl.c --- linux.21rc5/drivers/acpi/osl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/osl.c 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,1013 @@ +/* + * acpi_osl.c - OS-dependent functions ($Revision: 80 $) + * + * Copyright (C) 2000 Andrew Henroid + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_ACPI_EFI +#include +u64 efi_mem_attributes (u64 phys_addr); +#endif + + +#define _COMPONENT ACPI_OS_SERVICES +ACPI_MODULE_NAME ("osl") + +#define PREFIX "ACPI: " + +struct acpi_os_dpc +{ + OSD_EXECUTION_CALLBACK function; + void *context; +}; + + +#ifdef ENABLE_DEBUGGER +#include +/* stuff for debugger support */ +int acpi_in_debugger = 0; +extern char line_buf[80]; +#endif /*ENABLE_DEBUGGER*/ + +static int acpi_irq_irq = 0; +static OSD_HANDLER acpi_irq_handler = NULL; +static void *acpi_irq_context = NULL; + + +acpi_status +acpi_os_initialize(void) +{ + /* + * Initialize PCI configuration space access, as we'll need to access + * it while walking the namespace (bus 0 and root bridges w/ _BBNs). + */ +#ifdef CONFIG_ACPI_PCI + pcibios_config_init(); + if (!pci_config_read || !pci_config_write) { + printk(KERN_ERR PREFIX "Access to PCI configuration space unavailable\n"); + return AE_NULL_ENTRY; + } +#endif + + return AE_OK; +} + +acpi_status +acpi_os_terminate(void) +{ + if (acpi_irq_handler) { + acpi_os_remove_interrupt_handler(acpi_irq_irq, + acpi_irq_handler); + } + + return AE_OK; +} + +void +acpi_os_printf(const char *fmt,...) +{ + va_list args; + va_start(args, fmt); + acpi_os_vprintf(fmt, args); + va_end(args); +} + +void +acpi_os_vprintf(const char *fmt, va_list args) +{ + static char buffer[512]; + + vsprintf(buffer, fmt, args); + +#ifdef ENABLE_DEBUGGER + if (acpi_in_debugger) { + kdb_printf("%s", buffer); + } else { + printk("%s", buffer); + } +#else + printk("%s", buffer); +#endif +} + +void * +acpi_os_allocate(acpi_size size) +{ + return kmalloc(size, GFP_KERNEL); +} + +void +acpi_os_free(void *ptr) +{ + kfree(ptr); +} + +acpi_status +acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr) +{ +#ifdef CONFIG_ACPI_EFI + addr->pointer_type = ACPI_PHYSICAL_POINTER; + if (efi.acpi20) + addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi20); + else if (efi.acpi) + addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi); + else { + printk(KERN_ERR PREFIX "System description tables not found\n"); + return AE_NOT_FOUND; + } +#else + if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) { + printk(KERN_ERR PREFIX "System description tables not found\n"); + return AE_NOT_FOUND; + } +#endif /*CONFIG_ACPI_EFI*/ + + return AE_OK; +} + +acpi_status +acpi_os_map_memory(acpi_physical_address phys, acpi_size size, void **virt) +{ +#ifdef CONFIG_ACPI_EFI + if (EFI_MEMORY_WB & efi_mem_attributes(phys)) { + *virt = phys_to_virt(phys); + } else { + *virt = ioremap(phys, size); + } +#else + if (phys > ULONG_MAX) { + printk(KERN_ERR PREFIX "Cannot map memory that high\n"); + return AE_BAD_PARAMETER; + } + /* + * ioremap checks to ensure this is in reserved space + */ + *virt = ioremap((unsigned long) phys, size); +#endif + + if (!*virt) + return AE_NO_MEMORY; + + return AE_OK; +} + +void +acpi_os_unmap_memory(void *virt, acpi_size size) +{ + iounmap(virt); +} + +acpi_status +acpi_os_get_physical_address(void *virt, acpi_physical_address *phys) +{ + if(!phys || !virt) + return AE_BAD_PARAMETER; + + *phys = virt_to_phys(virt); + + return AE_OK; +} + +#define ACPI_MAX_OVERRIDE_LEN 100 + +static char __initdata acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; + +acpi_status +acpi_os_predefined_override (const struct acpi_predefined_names *init_val, + acpi_string *new_val) +{ + if (!init_val || !new_val) + return AE_BAD_PARAMETER; + + *new_val = NULL; + if (!memcmp (init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { + printk(KERN_INFO PREFIX "Overriding _OS definition: %s\n", + acpi_os_name); + *new_val = acpi_os_name; + } + + return AE_OK; +} + +acpi_status +acpi_os_table_override (struct acpi_table_header *existing_table, + struct acpi_table_header **new_table) +{ + if (!existing_table || !new_table) + return AE_BAD_PARAMETER; + + *new_table = NULL; + return AE_OK; +} + +static void +acpi_irq(int irq, void *dev_id, struct pt_regs *regs) +{ + (*acpi_irq_handler)(acpi_irq_context); +} + +acpi_status +acpi_os_install_interrupt_handler(u32 irq, OSD_HANDLER handler, void *context) +{ + /* + * Ignore the irq from the core, and use the value in our copy of the + * FADT. It may not be the same if an interrupt source override exists + * for the SCI. + */ + irq = acpi_fadt.sci_int; + +#ifdef CONFIG_IA64 + irq = gsi_to_vector(irq); +#endif + acpi_irq_irq = irq; + acpi_irq_handler = handler; + acpi_irq_context = context; + if (request_irq(irq, acpi_irq, SA_SHIRQ, "acpi", acpi_irq)) { + printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); + return AE_NOT_ACQUIRED; + } + + return AE_OK; +} + +acpi_status +acpi_os_remove_interrupt_handler(u32 irq, OSD_HANDLER handler) +{ + if (acpi_irq_handler) { +#ifdef CONFIG_IA64 + irq = gsi_to_vector(irq); +#endif + free_irq(irq, acpi_irq); + acpi_irq_handler = NULL; + } + + return AE_OK; +} + +/* + * Running in interpreter thread context, safe to sleep + */ + +void +acpi_os_sleep(u32 sec, u32 ms) +{ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(HZ * sec + (ms * HZ) / 1000); +} + +void +acpi_os_stall(u32 us) +{ + if (us > 10000) { + mdelay(us / 1000); + } + else { + udelay(us); + } +} + +acpi_status +acpi_os_read_port( + acpi_io_address port, + u32 *value, + u32 width) +{ + u32 dummy; + + if (!value) + value = &dummy; + + switch (width) + { + case 8: + *(u8*) value = inb(port); + break; + case 16: + *(u16*) value = inw(port); + break; + case 32: + *(u32*) value = inl(port); + break; + default: + BUG(); + } + + return AE_OK; +} + +acpi_status +acpi_os_write_port( + acpi_io_address port, + u32 value, + u32 width) +{ + switch (width) + { + case 8: + outb(value, port); + break; + case 16: + outw(value, port); + break; + case 32: + outl(value, port); + break; + default: + BUG(); + } + + return AE_OK; +} + +acpi_status +acpi_os_read_memory( + acpi_physical_address phys_addr, + u32 *value, + u32 width) +{ + u32 dummy; + void *virt_addr; + +#ifdef CONFIG_ACPI_EFI + int iomem = 0; + + if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { + virt_addr = phys_to_virt(phys_addr); + } else { + iomem = 1; + virt_addr = ioremap(phys_addr, width); + } +#else + virt_addr = phys_to_virt(phys_addr); +#endif + if (!value) + value = &dummy; + + switch (width) { + case 8: + *(u8*) value = *(u8*) virt_addr; + break; + case 16: + *(u16*) value = *(u16*) virt_addr; + break; + case 32: + *(u32*) value = *(u32*) virt_addr; + break; + default: + BUG(); + } + +#ifdef CONFIG_ACPI_EFI + if (iomem) + iounmap(virt_addr); +#endif + + return AE_OK; +} + +acpi_status +acpi_os_write_memory( + acpi_physical_address phys_addr, + u32 value, + u32 width) +{ + void *virt_addr; + +#ifdef CONFIG_ACPI_EFI + int iomem = 0; + + if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { + virt_addr = phys_to_virt(phys_addr); + } else { + iomem = 1; + virt_addr = ioremap(phys_addr, width); + } +#else + virt_addr = phys_to_virt(phys_addr); +#endif + + switch (width) { + case 8: + *(u8*) virt_addr = value; + break; + case 16: + *(u16*) virt_addr = value; + break; + case 32: + *(u32*) virt_addr = value; + break; + default: + BUG(); + } + +#ifdef CONFIG_ACPI_EFI + if (iomem) + iounmap(virt_addr); +#endif + + return AE_OK; +} + +#ifdef CONFIG_ACPI_PCI + +acpi_status +acpi_os_read_pci_configuration ( + struct acpi_pci_id *pci_id, + u32 reg, + void *value, + u32 width) +{ + int result = 0; + if (!value) + return AE_BAD_PARAMETER; + + switch (width) + { + case 8: + result = pci_config_read(pci_id->segment, pci_id->bus, + pci_id->device, pci_id->function, reg, 1, value); + break; + case 16: + result = pci_config_read(pci_id->segment, pci_id->bus, + pci_id->device, pci_id->function, reg, 2, value); + break; + case 32: + result = pci_config_read(pci_id->segment, pci_id->bus, + pci_id->device, pci_id->function, reg, 4, value); + break; + default: + BUG(); + } + + return (result ? AE_ERROR : AE_OK); +} + +acpi_status +acpi_os_write_pci_configuration ( + struct acpi_pci_id *pci_id, + u32 reg, + acpi_integer value, + u32 width) +{ + int result = 0; + + switch (width) + { + case 8: + result = pci_config_write(pci_id->segment, pci_id->bus, + pci_id->device, pci_id->function, reg, 1, value); + break; + case 16: + result = pci_config_write(pci_id->segment, pci_id->bus, + pci_id->device, pci_id->function, reg, 2, value); + break; + case 32: + result = pci_config_write(pci_id->segment, pci_id->bus, + pci_id->device, pci_id->function, reg, 4, value); + break; + default: + BUG(); + } + + return (result ? AE_ERROR : AE_OK); +} + +static void +acpi_os_derive_pci_id_2 ( + acpi_handle rhandle, /* upper bound */ + acpi_handle chandle, /* current node */ + struct acpi_pci_id **id, + int *is_bridge, + u8 *bus_number) +{ + acpi_handle handle; + struct acpi_pci_id *pci_id = *id; + acpi_status status; + unsigned long temp; + acpi_object_type type; + u8 tu8; + + acpi_get_parent(chandle, &handle); + if (handle != rhandle) { + acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge, bus_number); + + status = acpi_get_type(handle, &type); + if ( (ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE) ) + return; + + status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &temp); + if (ACPI_SUCCESS(status)) { + pci_id->device = ACPI_HIWORD (ACPI_LODWORD (temp)); + pci_id->function = ACPI_LOWORD (ACPI_LODWORD (temp)); + + if (*is_bridge) + pci_id->bus = *bus_number; + + /* any nicer way to get bus number of bridge ? */ + status = acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8, 8); + if (ACPI_SUCCESS(status) && + ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) { + status = acpi_os_read_pci_configuration(pci_id, 0x18, &tu8, 8); + if (!ACPI_SUCCESS(status)) { + /* Certainly broken... FIX ME */ + return; + } + *is_bridge = 1; + pci_id->bus = tu8; + status = acpi_os_read_pci_configuration(pci_id, 0x19, &tu8, 8); + if (ACPI_SUCCESS(status)) { + *bus_number = tu8; + } + } else + *is_bridge = 0; + } + } +} + +void +acpi_os_derive_pci_id ( + acpi_handle rhandle, /* upper bound */ + acpi_handle chandle, /* current node */ + struct acpi_pci_id **id) +{ + int is_bridge = 1; + u8 bus_number = (*id)->bus; + + acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); +} + +#else /*!CONFIG_ACPI_PCI*/ + +acpi_status +acpi_os_write_pci_configuration ( + struct acpi_pci_id *pci_id, + u32 reg, + acpi_integer value, + u32 width) +{ + return (AE_SUPPORT); +} + +acpi_status +acpi_os_read_pci_configuration ( + struct acpi_pci_id *pci_id, + u32 reg, + void *value, + u32 width) +{ + return (AE_SUPPORT); +} + +void +acpi_os_derive_pci_id ( + acpi_handle rhandle, /* upper bound */ + acpi_handle chandle, /* current node */ + struct acpi_pci_id **id) +{ +} + +#endif /*CONFIG_ACPI_PCI*/ + +static void +acpi_os_execute_deferred ( + void *context) +{ + struct acpi_os_dpc *dpc = NULL; + + ACPI_FUNCTION_TRACE ("os_execute_deferred"); + + dpc = (struct acpi_os_dpc *) context; + if (!dpc) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); + return_VOID; + } + + dpc->function(dpc->context); + + kfree(dpc); + + return_VOID; +} + +acpi_status +acpi_os_queue_for_execution( + u32 priority, + OSD_EXECUTION_CALLBACK function, + void *context) +{ + acpi_status status = AE_OK; + struct acpi_os_dpc *dpc = NULL; + struct tq_struct *task; + + ACPI_FUNCTION_TRACE ("os_queue_for_execution"); + + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); + + if (!function) + return_ACPI_STATUS (AE_BAD_PARAMETER); + + /* + * Allocate/initialize DPC structure. Note that this memory will be + * freed by the callee. The kernel handles the tq_struct list in a + * way that allows us to also free its memory inside the callee. + * Because we may want to schedule several tasks with different + * parameters we can't use the approach some kernel code uses of + * having a static tq_struct. + * We can save time and code by allocating the DPC and tq_structs + * from the same memory. + */ + dpc = kmalloc(sizeof(struct acpi_os_dpc)+sizeof(struct tq_struct), GFP_ATOMIC); + if (!dpc) + return_ACPI_STATUS (AE_NO_MEMORY); + + dpc->function = function; + dpc->context = context; + + task = (void *)(dpc+1); + INIT_TQUEUE(task, acpi_os_execute_deferred, (void*)dpc); + + if (!schedule_task(task)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to schedule_task() failed.\n")); + kfree(dpc); + status = AE_ERROR; + } + + return_ACPI_STATUS (status); +} + +/* + * Allocate the memory for a spinlock and initialize it. + */ +acpi_status +acpi_os_create_lock ( + acpi_handle *out_handle) +{ + spinlock_t *lock_ptr; + + ACPI_FUNCTION_TRACE ("os_create_lock"); + + lock_ptr = acpi_os_allocate(sizeof(spinlock_t)); + + spin_lock_init(lock_ptr); + + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr)); + + *out_handle = lock_ptr; + + return_ACPI_STATUS (AE_OK); +} + + +/* + * Deallocate the memory for a spinlock. + */ +void +acpi_os_delete_lock ( + acpi_handle handle) +{ + ACPI_FUNCTION_TRACE ("os_create_lock"); + + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle)); + + acpi_os_free(handle); + + return_VOID; +} + +/* + * Acquire a spinlock. + * + * handle is a pointer to the spinlock_t. + * flags is *not* the result of save_flags - it is an ACPI-specific flag variable + * that indicates whether we are at interrupt level. + */ +void +acpi_os_acquire_lock ( + acpi_handle handle, + u32 flags) +{ + ACPI_FUNCTION_TRACE ("os_acquire_lock"); + + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquiring spinlock[%p] from %s level\n", handle, + ((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt"))); + + if (flags & ACPI_NOT_ISR) + ACPI_DISABLE_IRQS(); + + spin_lock(handle); + + return_VOID; +} + + +/* + * Release a spinlock. See above. + */ +void +acpi_os_release_lock ( + acpi_handle handle, + u32 flags) +{ + ACPI_FUNCTION_TRACE ("os_release_lock"); + + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Releasing spinlock[%p] from %s level\n", handle, + ((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt"))); + + spin_unlock(handle); + + if (flags & ACPI_NOT_ISR) + ACPI_ENABLE_IRQS(); + + return_VOID; +} + + +acpi_status +acpi_os_create_semaphore( + u32 max_units, + u32 initial_units, + acpi_handle *handle) +{ + struct semaphore *sem = NULL; + + ACPI_FUNCTION_TRACE ("os_create_semaphore"); + + sem = acpi_os_allocate(sizeof(struct semaphore)); + if (!sem) + return_ACPI_STATUS (AE_NO_MEMORY); + memset(sem, 0, sizeof(struct semaphore)); + + sema_init(sem, initial_units); + + *handle = (acpi_handle*)sem; + + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", *handle, initial_units)); + + return_ACPI_STATUS (AE_OK); +} + + +/* + * TODO: A better way to delete semaphores? Linux doesn't have a + * 'delete_semaphore()' function -- may result in an invalid + * pointer dereference for non-synchronized consumers. Should + * we at least check for blocked threads and signal/cancel them? + */ + +acpi_status +acpi_os_delete_semaphore( + acpi_handle handle) +{ + struct semaphore *sem = (struct semaphore*) handle; + + ACPI_FUNCTION_TRACE ("os_delete_semaphore"); + + if (!sem) + return_ACPI_STATUS (AE_BAD_PARAMETER); + + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); + + acpi_os_free(sem); sem = NULL; + + return_ACPI_STATUS (AE_OK); +} + + +/* + * TODO: The kernel doesn't have a 'down_timeout' function -- had to + * improvise. The process is to sleep for one scheduler quantum + * until the semaphore becomes available. Downside is that this + * may result in starvation for timeout-based waits when there's + * lots of semaphore activity. + * + * TODO: Support for units > 1? + */ +acpi_status +acpi_os_wait_semaphore( + acpi_handle handle, + u32 units, + u16 timeout) +{ + acpi_status status = AE_OK; + struct semaphore *sem = (struct semaphore*)handle; + int ret = 0; + + ACPI_FUNCTION_TRACE ("os_wait_semaphore"); + + if (!sem || (units < 1)) + return_ACPI_STATUS (AE_BAD_PARAMETER); + + if (units > 1) + return_ACPI_STATUS (AE_SUPPORT); + + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", handle, units, timeout)); + + if (in_interrupt()) + timeout = 0; + + switch (timeout) + { + /* + * No Wait: + * -------- + * A zero timeout value indicates that we shouldn't wait - just + * acquire the semaphore if available otherwise return AE_TIME + * (a.k.a. 'would block'). + */ + case 0: + if(down_trylock(sem)) + status = AE_TIME; + break; + + /* + * Wait Indefinitely: + * ------------------ + */ + case ACPI_WAIT_FOREVER: + down(sem); + break; + + /* + * Wait w/ Timeout: + * ---------------- + */ + default: + // TODO: A better timeout algorithm? + { + int i = 0; + static const int quantum_ms = 1000/HZ; + + ret = down_trylock(sem); + for (i = timeout; (i > 0 && ret < 0); i -= quantum_ms) { + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + ret = down_trylock(sem); + } + + if (ret != 0) + status = AE_TIME; + } + break; + } + + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Failed to acquire semaphore[%p|%d|%d], %s\n", + handle, units, timeout, acpi_format_exception(status))); + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquired semaphore[%p|%d|%d]\n", handle, units, timeout)); + } + + return_ACPI_STATUS (status); +} + + +/* + * TODO: Support for units > 1? + */ +acpi_status +acpi_os_signal_semaphore( + acpi_handle handle, + u32 units) +{ + struct semaphore *sem = (struct semaphore *) handle; + + ACPI_FUNCTION_TRACE ("os_signal_semaphore"); + + if (!sem || (units < 1)) + return_ACPI_STATUS (AE_BAD_PARAMETER); + + if (units > 1) + return_ACPI_STATUS (AE_SUPPORT); + + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, units)); + + up(sem); + + return_ACPI_STATUS (AE_OK); +} + +u32 +acpi_os_get_line(char *buffer) +{ + +#ifdef ENABLE_DEBUGGER + if (acpi_in_debugger) { + u32 chars; + + kdb_read(buffer, sizeof(line_buf)); + + /* remove the CR kdb includes */ + chars = strlen(buffer) - 1; + buffer[chars] = '\0'; + } +#endif + + return 0; +} + +/* + * We just have to assume we're dealing with valid memory + */ + +BOOLEAN +acpi_os_readable(void *ptr, u32 len) +{ + return 1; +} + +BOOLEAN +acpi_os_writable(void *ptr, u32 len) +{ + return 1; +} + +u32 +acpi_os_get_thread_id (void) +{ + if (!in_interrupt()) + return current->pid; + + return 0; +} + +acpi_status +acpi_os_signal ( + u32 function, + void *info) +{ + switch (function) + { + case ACPI_SIGNAL_FATAL: + printk(KERN_ERR PREFIX "Fatal opcode executed\n"); + break; + case ACPI_SIGNAL_BREAKPOINT: + { + char *bp_info = (char*) info; + + printk(KERN_ERR "ACPI breakpoint: %s\n", bp_info); + } + default: + break; + } + + return AE_OK; +} + +int __init +acpi_os_name_setup(char *str) +{ + char *p = acpi_os_name; + int count = ACPI_MAX_OVERRIDE_LEN-1; + + if (!str || !*str) + return 0; + + for (; count-- && str && *str; str++) { + if (isalnum(*str) || *str == ' ' || *str == ':') + *p++ = *str; + else if (*str == '\'' || *str == '"') + continue; + else + break; + } + *p = 0; + + return 1; + +} + +__setup("acpi_os_name=", acpi_os_name_setup); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/ac_adapter/ac.c linux.21rc5-ac2/drivers/acpi/ospm/ac_adapter/ac.c --- linux.21rc5/drivers/acpi/ospm/ac_adapter/ac.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/ac_adapter/ac.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,398 +0,0 @@ -/***************************************************************************** - * - * Module Name: ac.c - * $Revision: 23 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include "ac.h" - - -#define _COMPONENT ACPI_AC_ADAPTER - MODULE_NAME ("ac") - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: ac_print - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Prints out information on a specific ac_adapter. - * - ****************************************************************************/ - -void -ac_print ( - AC_CONTEXT *ac_adapter) -{ -#ifdef ACPI_DEBUG - - acpi_buffer buffer; - - PROC_NAME("ac_print"); - - if (!ac_adapter) { - return; - } - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return; - } - - /* - * Get the full pathname for this ACPI object. - */ - acpi_get_name(ac_adapter->acpi_handle, ACPI_FULL_PATHNAME, &buffer); - - /* - * Print out basic adapter information. - */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| AC Adapter[%02x]:[%p] %s\n", ac_adapter->device_handle, ac_adapter->acpi_handle, (char*)buffer.pointer)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - acpi_os_free(buffer.pointer); -#endif /*ACPI_DEBUG*/ - - return; -} - - -/**************************************************************************** - * - * FUNCTION: ac_add_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ac_add_device( - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - AC_CONTEXT *ac_adapter = NULL; - acpi_device_info info; - - FUNCTION_TRACE("ac_add_device"); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Adding ac_adapter device [%02x].\n", device_handle)); - - if (!context || *context) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.")); - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Get information on this device. - */ - status = bm_get_device_info(device_handle, &device); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Allocate a new AC_CONTEXT structure. - */ - ac_adapter = acpi_os_callocate(sizeof(AC_CONTEXT)); - if (!ac_adapter) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - ac_adapter->device_handle = device->handle; - ac_adapter->acpi_handle = device->acpi_handle; - - /* - * Get information on this object. - */ - status = acpi_get_object_info(ac_adapter->acpi_handle, &info); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unable to get object info for ac_adapter device.")); - goto end; - } - - /* - * _UID? - * ----- - */ - if (info.valid & ACPI_VALID_UID) { - strncpy(ac_adapter->uid, info.unique_id, sizeof(info.unique_id)); - } - else { - strncpy(ac_adapter->uid, "0", sizeof("0")); - } - - /* - * _STA? - * ----- - */ - if (!(info.valid & ACPI_VALID_STA)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Must have valid _STA.\n")); - status = AE_ERROR; - goto end; - } - - status = ac_osl_add_device(ac_adapter); - if (ACPI_FAILURE(status)) { - goto end; - } - - *context = ac_adapter; - - ac_print(ac_adapter); - -end: - if (ACPI_FAILURE(status)) { - acpi_os_free(ac_adapter); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ac_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ac_remove_device ( - void **context) -{ - acpi_status status = AE_OK; - AC_CONTEXT *ac_adapter = NULL; - - FUNCTION_TRACE("ac_remove_device"); - - if (!context || !*context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - ac_adapter = (AC_CONTEXT*)*context; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing ac_adapter device [%02x].\n", ac_adapter->device_handle)); - - ac_osl_remove_device(ac_adapter); - - acpi_os_free(ac_adapter); - - *context = NULL; - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: ac_initialize - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ac_initialize (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("ac_initialize"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - driver.notify = &ac_notify; - driver.request = &ac_request; - - /* - * Register driver for AC Adapter devices. - */ - MEMCPY(criteria.hid, AC_HID_AC_ADAPTER, sizeof(AC_HID_AC_ADAPTER)); - - status = bm_register_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ac_terminate - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ac_terminate (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("ac_terminate"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Unregister driver for AC Adapter devices. - */ - MEMCPY(criteria.hid, AC_HID_AC_ADAPTER, sizeof(AC_HID_AC_ADAPTER)); - - driver.notify = &ac_notify; - driver.request = &ac_request; - - status = bm_unregister_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/***************************************************************************** - * - * FUNCTION: ac_notify - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ -acpi_status -ac_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ac_notify"); - - if (!context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - switch (notify_type) { - - case BM_NOTIFY_DEVICE_ADDED: - status = ac_add_device(device_handle, context); - break; - - case BM_NOTIFY_DEVICE_REMOVED: - status = ac_remove_device(context); - break; - - case AC_NOTIFY_STATUS_CHANGE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Status change event detected.\n")); - status = ac_osl_generate_event(notify_type, - ((AC_CONTEXT*)*context)); - break; - - default: - status = AE_SUPPORT; - break; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ac_request - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ac_request ( - BM_REQUEST *request, - void *context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ac_request"); - - /* - * Must have a valid request structure and context. - */ - if (!request || !context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Handle Request: - * --------------- - */ - switch (request->command) { - - default: - status = AE_SUPPORT; - break; - } - - request->status = status; - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/ac_adapter/ac_osl.c linux.21rc5-ac2/drivers/acpi/ospm/ac_adapter/ac_osl.c --- linux.21rc5/drivers/acpi/ospm/ac_adapter/ac_osl.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/ac_adapter/ac_osl.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,257 +0,0 @@ -/***************************************************************************** - * - * Module Name: ac_osl.c - * $Revision: 10 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include -#include -#include -#include -#include "ac.h" - - -MODULE_AUTHOR("Andrew Grover"); -MODULE_DESCRIPTION("ACPI Component Architecture (CA) - AC Adapter Driver"); -MODULE_LICENSE("GPL"); - - -#define AC_PROC_ROOT "ac_adapter" -#define AC_PROC_STATUS "status" -#define AC_ON_LINE "on-line" -#define AC_OFF_LINE "off-line" - -extern struct proc_dir_entry *bm_proc_root; -static struct proc_dir_entry *ac_proc_root = NULL; - - -/**************************************************************************** - * - * FUNCTION: ac_osl_proc_read_status - * - ****************************************************************************/ - -static int -ac_osl_proc_read_status ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - acpi_status status = AE_OK; - AC_CONTEXT *ac_adapter = NULL; - char *p = page; - int len; - - if (!context) { - goto end; - } - - ac_adapter = (AC_CONTEXT*)context; - - /* don't get status more than once for a single proc read */ - if (off != 0) { - goto end; - } - - status = bm_evaluate_simple_integer(ac_adapter->acpi_handle, - "_PSR", &(ac_adapter->is_online)); - if (ACPI_FAILURE(status)) { - p += sprintf(p, "Error reading AC Adapter status\n"); - goto end; - } - - if (ac_adapter->is_online) { - p += sprintf(p, "Status: %s\n", - AC_ON_LINE); - } - else { - p += sprintf(p, "Status: %s\n", - AC_OFF_LINE); - } - -end: - len = (p - page); - if (len <= off+count) *eof = 1; - *start = page + off; - len -= off; - if (len>count) len = count; - if (len<0) len = 0; - - return(len); -} - - -/**************************************************************************** - * - * FUNCTION: ac_osl_add_device - * - ****************************************************************************/ - -acpi_status -ac_osl_add_device( - AC_CONTEXT *ac_adapter) -{ - struct proc_dir_entry *proc_entry = NULL; - - if (!ac_adapter) { - return(AE_BAD_PARAMETER); - } - - printk(KERN_INFO "ACPI: AC Adapter found\n"); - - proc_entry = proc_mkdir(ac_adapter->uid, ac_proc_root); - if (!proc_entry) { - return(AE_ERROR); - } - - create_proc_read_entry(AC_PROC_STATUS, S_IFREG | S_IRUGO, - proc_entry, ac_osl_proc_read_status, (void*)ac_adapter); - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: ac_osl_remove_device - * - ****************************************************************************/ - -acpi_status -ac_osl_remove_device ( - AC_CONTEXT *ac_adapter) -{ - char proc_entry[64]; - - if (!ac_adapter) { - return(AE_BAD_PARAMETER); - } - - sprintf(proc_entry, "%s/%s", ac_adapter->uid, AC_PROC_STATUS); - remove_proc_entry(proc_entry, ac_proc_root); - - sprintf(proc_entry, "%s", ac_adapter->uid); - remove_proc_entry(proc_entry, ac_proc_root); - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: ac_osl_generate_event - * - ****************************************************************************/ - -acpi_status -ac_osl_generate_event ( - u32 event, - AC_CONTEXT *ac_adapter) -{ - acpi_status status = AE_OK; - - if (!ac_adapter) { - return(AE_BAD_PARAMETER); - } - - switch (event) { - - case AC_NOTIFY_STATUS_CHANGE: - status = bm_osl_generate_event(ac_adapter->device_handle, - AC_PROC_ROOT, ac_adapter->uid, event, 0); - break; - - default: - return(AE_BAD_PARAMETER); - break; - } - - return(status); -} - - -/**************************************************************************** - * - * FUNCTION: ac_osl_init - * - * PARAMETERS: - * - * RETURN: 0: Success - * - * DESCRIPTION: Module initialization. - * - ****************************************************************************/ - -static int __init -ac_osl_init (void) -{ - acpi_status status = AE_OK; - - ac_proc_root = proc_mkdir(AC_PROC_ROOT, bm_proc_root); - if (!ac_proc_root) { - status = AE_ERROR; - } - else { - status = ac_initialize(); - if (ACPI_FAILURE(status)) { - remove_proc_entry(AC_PROC_ROOT, bm_proc_root); - } - - } - - return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; -} - - -/**************************************************************************** - * - * FUNCTION: ac_osl_cleanup - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Module cleanup. - * - ****************************************************************************/ - -static void __exit -ac_osl_cleanup (void) -{ - ac_terminate(); - - if (ac_proc_root) { - remove_proc_entry(AC_PROC_ROOT, bm_proc_root); - } - - return; -} - - -module_init(ac_osl_init); -module_exit(ac_osl_cleanup); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/ac_adapter/Makefile linux.21rc5-ac2/drivers/acpi/ospm/ac_adapter/Makefile --- linux.21rc5/drivers/acpi/ospm/ac_adapter/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/ac_adapter/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -O_TARGET := ospm_$(notdir $(CURDIR)).o -obj-m := $(O_TARGET) -EXTRA_CFLAGS += $(ACPI_CFLAGS) -obj-y := $(patsubst %.c,%.o,$(wildcard *.c)) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/battery/bt.c linux.21rc5-ac2/drivers/acpi/ospm/battery/bt.c --- linux.21rc5/drivers/acpi/ospm/battery/bt.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/battery/bt.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,654 +0,0 @@ -/***************************************************************************** - * - * Module Name: bt.c - * $Revision: 29 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include "bt.h" - - -#define _COMPONENT ACPI_BATTERY - MODULE_NAME ("bt") - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bt_print - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Prints out information on a specific battery. - * - ****************************************************************************/ - -void -bt_print ( - BT_CONTEXT *battery) -{ -#ifdef ACPI_DEBUG - acpi_buffer buffer; - - PROC_NAME("bt_print"); - - if (!battery) { - return; - } - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return; - } - - /* - * Get the full pathname for this ACPI object. - */ - acpi_get_name(battery->acpi_handle, ACPI_FULL_PATHNAME, &buffer); - - /* - * Print out basic battery information. - */ - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| Battery[%02x]:[%p] %s\n", battery->device_handle, battery->acpi_handle, (char*)buffer.pointer)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| uid[%s] is_present[%d] power_units[%s]\n", battery->uid, battery->is_present, battery->power_units)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - acpi_os_free(buffer.pointer); -#endif /*ACPI_DEBUG*/ - - return; -} - - -/**************************************************************************** - * - * FUNCTION: bt_get_info - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - * NOTES: Allocates battery_info - which must be freed by the caller. - * - ****************************************************************************/ - -acpi_status -bt_get_info ( - BT_CONTEXT *battery, - BT_BATTERY_INFO **battery_info) -{ - acpi_status status = AE_OK; - acpi_buffer bif_buffer, package_format, package_data; - acpi_object *package = NULL; - - FUNCTION_TRACE("bt_get_info"); - - if (!battery || !battery_info || *battery_info) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - memset(&bif_buffer, 0, sizeof(acpi_buffer)); - - /* - * Evalute _BIF: - * ------------- - * And be sure to deallocate bif_buffer.pointer! - */ - status = bm_evaluate_object(battery->acpi_handle, "_BIF", NULL, - &bif_buffer); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Extract Package Data: - * --------------------- - * Type-cast this bif_buffer to a package and use helper - * functions to convert results into BT_BATTERY_INFO structure. - * The first attempt is just to get the size of the package - * data; the second gets the data (once we know the required - * bif_buffer size). - */ - status = bm_cast_buffer(&bif_buffer, (void**)&package, - sizeof(acpi_object)); - if (ACPI_FAILURE(status)) { - goto end; - } - - package_format.length = sizeof("NNNNNNNNNSSSS"); - package_format.pointer = "NNNNNNNNNSSSS"; - - memset(&package_data, 0, sizeof(acpi_buffer)); - - status = bm_extract_package_data(package, &package_format, - &package_data); - if (status != AE_BUFFER_OVERFLOW) { - if (status == AE_OK) { - status = AE_ERROR; - } - goto end; - } - - package_data.pointer = acpi_os_callocate(package_data.length); - if (!package_data.pointer) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - status = bm_extract_package_data(package, &package_format, - &package_data); - if (ACPI_FAILURE(status)) { - acpi_os_free(package_data.pointer); - goto end; - } - - *battery_info = package_data.pointer; - -end: - acpi_os_free(bif_buffer.pointer); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bt_get_status - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bt_get_status ( - BT_CONTEXT *battery, - BT_BATTERY_STATUS **battery_status) -{ - acpi_status status = AE_OK; - acpi_buffer bst_buffer, package_format, package_data; - acpi_object *package = NULL; - - FUNCTION_TRACE("bt_get_status"); - - if (!battery || !battery_status || *battery_status) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - memset(&bst_buffer, 0, sizeof(acpi_buffer)); - - /* - * Evalute _BST: - * ------------- - * And be sure to deallocate bst_buffer.pointer! - */ - status = bm_evaluate_object(battery->acpi_handle, "_BST", - NULL, &bst_buffer); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Extract Package Data: - * --------------------- - * Type-cast this bst_buffer to a package and use helper - * functions to convert results into BT_BATTERY_STATUS structure. - * The first attempt is just to get the size of the package data; - * the second gets the data (once we know the required bst_buffer - * size). - */ - status = bm_cast_buffer(&bst_buffer, (void**)&package, - sizeof(acpi_object)); - if (ACPI_FAILURE(status)) { - goto end; - } - - package_format.length = sizeof("NNNN"); - package_format.pointer = "NNNN"; - - memset(&package_data, 0, sizeof(acpi_buffer)); - - status = bm_extract_package_data(package, &package_format, - &package_data); - if (status != AE_BUFFER_OVERFLOW) { - if (status == AE_OK) { - status = AE_ERROR; - } - goto end; - } - - package_data.pointer = acpi_os_callocate(package_data.length); - if (!package_data.pointer) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - status = bm_extract_package_data(package, &package_format, - &package_data); - if (ACPI_FAILURE(status)) { - acpi_os_free(package_data.pointer); - goto end; - } - - *battery_status = package_data.pointer; - -end: - acpi_os_free(bst_buffer.pointer); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bt_check_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bt_check_device ( - BT_CONTEXT *battery) -{ - acpi_status status = AE_OK; - BM_DEVICE_STATUS battery_status = BM_STATUS_UNKNOWN; - u32 was_present = FALSE; - BT_BATTERY_INFO *battery_info = NULL; - - FUNCTION_TRACE("bt_check_device"); - - if (!battery) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - was_present = battery->is_present; - - /* - * Battery Present? - * ---------------- - * Get the device status and check if battery slot is occupied. - */ - status = bm_get_device_status(battery->device_handle, &battery_status); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Unable to get battery status.\n")); - return_ACPI_STATUS(status); - } - - if (battery_status & BM_STATUS_BATTERY_PRESENT) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Battery socket occupied.\n")); - battery->is_present = TRUE; - } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Battery socket not occupied.\n")); - battery->is_present = FALSE; - } - - /* - * Battery Appeared? - * ----------------- - */ - if (!was_present && battery->is_present) { - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Battery insertion detected.\n")); - - /* - * Units of Power? - * --------------- - * Get the 'units of power', as we'll need this to report - * status information. - */ - status = bt_get_info(battery, &battery_info); - if (ACPI_SUCCESS(status)) { - battery->power_units = (battery_info->power_unit) - ? BT_POWER_UNITS_AMPS : BT_POWER_UNITS_WATTS; - acpi_os_free(battery_info); - } - } - - /* - * Battery Disappeared? - * -------------------- - */ - else if (was_present && !battery->is_present) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Battery removal detected.\n")); - battery->power_units = BT_POWER_UNITS_DEFAULT; - } - - return_ACPI_STATUS(status); -} - - -/***************************************************************************** - * - * FUNCTION: bt_add_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bt_add_device ( - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - BT_CONTEXT *battery = NULL; - - FUNCTION_TRACE("bt_add_device"); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Adding battery device [%02x].\n", device_handle)); - - if (!context || *context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Get information on this device. - */ - status = bm_get_device_info(device_handle, &device); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Allocate a new BT_CONTEXT structure. - */ - battery = acpi_os_callocate(sizeof(BT_CONTEXT)); - if (!battery) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - battery->device_handle = device->handle; - battery->acpi_handle = device->acpi_handle; - strncpy(battery->uid, device->id.uid, sizeof(battery->uid)); - - battery->power_units = BT_POWER_UNITS_DEFAULT; - battery->is_present = FALSE; - - /* - * See if battery is really present. - */ - status = bt_check_device(battery); - if (ACPI_FAILURE(status)) { - goto end; - } - - status = bt_osl_add_device(battery); - if (ACPI_FAILURE(status)) { - goto end; - } - - *context = battery; - - bt_print(battery); - -end: - if (ACPI_FAILURE(status)) { - acpi_os_free(battery); - } - - return_ACPI_STATUS(status); -} - - -/***************************************************************************** - * - * FUNCTION: bt_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bt_remove_device ( - void **context) -{ - acpi_status status = AE_OK; - BT_CONTEXT *battery = NULL; - - FUNCTION_TRACE("bt_remove_device"); - - if (!context || !*context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - battery = (BT_CONTEXT*)*context; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing battery device [%02x].\n", battery->device_handle)); - - bt_osl_remove_device(battery); - - acpi_os_free(battery); - - *context = NULL; - - return_ACPI_STATUS(status); -} - - -/***************************************************************************** - * External Functions - *****************************************************************************/ - -/***************************************************************************** - * - * FUNCTION: bt_initialize - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bt_initialize (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("bt_initialize"); - - memset(&criteria, 0, sizeof(BM_DEVICE_ID)); - memset(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Register driver for driver method battery devices. - */ - MEMCPY(criteria.hid, BT_HID_CM_BATTERY, sizeof(BT_HID_CM_BATTERY)); - - driver.notify = &bt_notify; - driver.request = &bt_request; - - status = bm_register_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bt_terminate - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bt_terminate (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("bt_terminate"); - - memset(&criteria, 0, sizeof(BM_DEVICE_ID)); - memset(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Unregister driver for driver method battery devices. - */ - MEMCPY(criteria.hid, BT_HID_CM_BATTERY, sizeof(BT_HID_CM_BATTERY)); - - driver.notify = &bt_notify; - driver.request = &bt_request; - - status = bm_unregister_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bt_notify - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bt_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("bt_notify"); - - if (!context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - switch (notify_type) { - - case BM_NOTIFY_DEVICE_ADDED: - status = bt_add_device(device_handle, context); - break; - - case BM_NOTIFY_DEVICE_REMOVED: - status = bt_remove_device(context); - break; - - case BT_NOTIFY_STATUS_CHANGE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Status change (_BST) event detected.\n")); - status = bt_osl_generate_event(notify_type, - ((BT_CONTEXT*)*context)); - break; - - case BT_NOTIFY_INFORMATION_CHANGE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Information change (_BIF) event detected.\n")); - status = bt_check_device((BT_CONTEXT*)*context); - if (ACPI_SUCCESS(status)) { - status = bt_osl_generate_event(notify_type, - ((BT_CONTEXT*)*context)); - } - break; - - default: - status = AE_SUPPORT; - break; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bt_request - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bt_request ( - BM_REQUEST *request, - void *context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("bt_request"); - - /* - * Must have a valid request structure and context. - */ - if (!request || !context) - return_ACPI_STATUS(AE_BAD_PARAMETER); - - /* - * Handle request: - * --------------- - */ - switch (request->command) { - - default: - status = AE_SUPPORT; - break; - } - - request->status = status; - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/battery/bt_osl.c linux.21rc5-ac2/drivers/acpi/ospm/battery/bt_osl.c --- linux.21rc5/drivers/acpi/ospm/battery/bt_osl.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/battery/bt_osl.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,443 +0,0 @@ -/****************************************************************************** - * - * Module Name: bt_osl.c - * $Revision: 24 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -/* - * Changes: - * Brendan Burns 2000-11-15 - * - added proc battery interface - * - parse returned data from _BST and _BIF - * Andy Grover 2000-12-8 - * - improved proc interface - */ - - -#include -#include -#include -#include -#include -#include -#include "bt.h" - - -MODULE_AUTHOR("Andrew Grover"); -MODULE_DESCRIPTION("ACPI Component Architecture (CA) - Control Method Battery Driver"); -MODULE_LICENSE("GPL"); - - -#define BT_PROC_ROOT "battery" -#define BT_PROC_STATUS "status" -#define BT_PROC_INFO "info" - -extern struct proc_dir_entry *bm_proc_root; -static struct proc_dir_entry *bt_proc_root = NULL; - - -/**************************************************************************** - * - * FUNCTION: bt_osl_proc_read_info - * - ****************************************************************************/ - -static int -bt_osl_proc_read_info ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - BT_CONTEXT *battery = NULL; - BT_BATTERY_INFO *battery_info = NULL; - char *p = page; - int len = 0; - - if (!context || (off != 0)) { - goto end; - } - - battery = (BT_CONTEXT*)context; - - /* - * Battery Present? - * ---------------- - */ - if (!battery->is_present) { - p += sprintf(p, "Present: no\n"); - goto end; - } - else { - p += sprintf(p, "Present: yes\n"); - } - - /* - * Get Battery Information: - * ------------------------ - */ - if (ACPI_FAILURE(bt_get_info(battery, &battery_info))) { - p += sprintf(p, "Error reading battery information (_BIF)\n"); - goto end; - } - - if (battery_info->design_capacity == BT_UNKNOWN) { - p += sprintf(p, "Design Capacity: unknown\n"); - } - else { - p += sprintf(p, "Design Capacity: %d %sh\n", - (u32)battery_info->design_capacity, - battery->power_units); - } - - if (battery_info->last_full_capacity == BT_UNKNOWN) { - p += sprintf(p, "Last Full Capacity: unknown\n"); - } - else { - p += sprintf(p, "Last Full Capacity: %d %sh\n", - (u32)battery_info->last_full_capacity, - battery->power_units); - } - - if (battery_info->battery_technology == 0) { - p += sprintf(p, "Battery Technology: primary (non-rechargeable)\n"); - } - else if (battery_info->battery_technology == 1) { - p += sprintf(p, "Battery Technology: secondary (rechargeable)\n"); - } - else { - p += sprintf(p, "Battery Technology: unknown\n"); - } - - if (battery_info->design_voltage == BT_UNKNOWN) { - p += sprintf(p, "Design Voltage: unknown\n"); - } - else { - p += sprintf(p, "Design Voltage: %d mV\n", - (u32)battery_info->design_voltage); - } - - p += sprintf(p, "Design Capacity Warning: %d %sh\n", - (u32)battery_info->design_capacity_warning, - battery->power_units); - p += sprintf(p, "Design Capacity Low: %d %sh\n", - (u32)battery_info->design_capacity_low, - battery->power_units); - p += sprintf(p, "Capacity Granularity 1: %d %sh\n", - (u32)battery_info->battery_capacity_granularity_1, - battery->power_units); - p += sprintf(p, "Capacity Granularity 2: %d %sh\n", - (u32)battery_info->battery_capacity_granularity_2, - battery->power_units); - p += sprintf(p, "Model Number: %s\n", - battery_info->model_number); - p += sprintf(p, "Serial Number: %s\n", - battery_info->serial_number); - p += sprintf(p, "Battery Type: %s\n", - battery_info->battery_type); - p += sprintf(p, "OEM Info: %s\n", - battery_info->oem_info); - -end: - len = (p - page); - if (len <= off+count) *eof = 1; - *start = page + off; - len -= off; - if (len>count) len = count; - if (len<0) len = 0; - - acpi_os_free(battery_info); - - return(len); -} - - -/**************************************************************************** - * - * FUNCTION: bt_osl_proc_read_status - * - ****************************************************************************/ - -static int -bt_osl_proc_read_status ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - BT_CONTEXT *battery = NULL; - BT_BATTERY_STATUS *battery_status = NULL; - char *p = page; - int len = 0; - - if (!context || (off != 0)) { - goto end; - } - - battery = (BT_CONTEXT*)context; - - /* - * Battery Present? - * ---------------- - */ - if (!battery->is_present) { - p += sprintf(p, "Present: no\n"); - goto end; - } - else { - p += sprintf(p, "Present: yes\n"); - } - - /* - * Get Battery Status: - * ------------------- - */ - if (ACPI_FAILURE(bt_get_status(battery, &battery_status))) { - p += sprintf(p, "Error reading battery status (_BST)\n"); - goto end; - } - - /* - * Store Data: - * ----------- - */ - - if (!battery_status->state) { - p += sprintf(p, "State: ok\n"); - } - else { - if (battery_status->state & 0x1) - p += sprintf(p, "State: discharging\n"); - if (battery_status->state & 0x2) - p += sprintf(p, "State: charging\n"); - if (battery_status->state & 0x4) - p += sprintf(p, "State: critically low\n"); - } - - if (battery_status->present_rate == BT_UNKNOWN) { - p += sprintf(p, "Present Rate: unknown\n"); - } - else { - p += sprintf(p, "Present Rate: %d %s\n", - (u32)battery_status->present_rate, - battery->power_units); - } - - if (battery_status->remaining_capacity == BT_UNKNOWN) { - p += sprintf(p, "Remaining Capacity: unknown\n"); - } - else { - p += sprintf(p, "Remaining Capacity: %d %sh\n", - (u32)battery_status->remaining_capacity, - battery->power_units); - } - - if (battery_status->present_voltage == BT_UNKNOWN) { - p += sprintf(p, "Battery Voltage: unknown\n"); - } - else { - p += sprintf(p, "Battery Voltage: %d mV\n", - (u32)battery_status->present_voltage); - } - -end: - len = (p - page); - if (len <= off+count) *eof = 1; - *start = page + off; - len -= off; - if (len>count) len = count; - if (len<0) len = 0; - - acpi_os_free(battery_status); - - return(len); -} - - -/**************************************************************************** - * - * FUNCTION: bt_osl_add_device - * - ****************************************************************************/ - -acpi_status -bt_osl_add_device( - BT_CONTEXT *battery) -{ - struct proc_dir_entry *proc_entry = NULL; - - if (!battery) { - return(AE_BAD_PARAMETER); - } - - if (battery->is_present) { - printk("ACPI: Battery socket found, battery present\n"); - } - else { - printk("ACPI: Battery socket found, battery absent\n"); - } - - proc_entry = proc_mkdir(battery->uid, bt_proc_root); - if (!proc_entry) { - return(AE_ERROR); - } - - create_proc_read_entry(BT_PROC_STATUS, S_IFREG | S_IRUGO, - proc_entry, bt_osl_proc_read_status, (void*)battery); - - create_proc_read_entry(BT_PROC_INFO, S_IFREG | S_IRUGO, - proc_entry, bt_osl_proc_read_info, (void*)battery); - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bt_osl_remove_device - * - ****************************************************************************/ - -acpi_status -bt_osl_remove_device ( - BT_CONTEXT *battery) -{ - char proc_entry[64]; - - if (!battery) { - return(AE_BAD_PARAMETER); - } - - sprintf(proc_entry, "%s/%s", battery->uid, BT_PROC_INFO); - remove_proc_entry(proc_entry, bt_proc_root); - - sprintf(proc_entry, "%s/%s", battery->uid, BT_PROC_STATUS); - remove_proc_entry(proc_entry, bt_proc_root); - - sprintf(proc_entry, "%s", battery->uid); - remove_proc_entry(proc_entry, bt_proc_root); - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bt_osl_generate_event - * - ****************************************************************************/ - -acpi_status -bt_osl_generate_event ( - u32 event, - BT_CONTEXT *battery) -{ - acpi_status status = AE_OK; - - if (!battery) { - return(AE_BAD_PARAMETER); - } - - switch (event) { - - case BT_NOTIFY_STATUS_CHANGE: - case BT_NOTIFY_INFORMATION_CHANGE: - status = bm_osl_generate_event(battery->device_handle, - BT_PROC_ROOT, battery->uid, event, 0); - break; - - default: - return(AE_BAD_PARAMETER); - break; - } - - return(status); -} - - -/**************************************************************************** - * - * FUNCTION: bt_osl_init - * - * PARAMETERS: - * - * RETURN: 0: Success - * - * DESCRIPTION: Module initialization. - * - ****************************************************************************/ - -static int __init -bt_osl_init (void) -{ - acpi_status status = AE_OK; - - /* abort if no busmgr */ - if (!bm_proc_root) - return -ENODEV; - - bt_proc_root = proc_mkdir(BT_PROC_ROOT, bm_proc_root); - if (!bt_proc_root) { - status = AE_ERROR; - } - else { - status = bt_initialize(); - if (ACPI_FAILURE(status)) { - remove_proc_entry(BT_PROC_ROOT, bm_proc_root); - } - } - - return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; -} - - -/**************************************************************************** - * - * FUNCTION: bt_osl_cleanup - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Module cleanup. - * - ****************************************************************************/ - -static void __exit -bt_osl_cleanup (void) -{ - bt_terminate(); - - if (bt_proc_root) { - remove_proc_entry(BT_PROC_ROOT, bm_proc_root); - } - - return; -} - - -module_init(bt_osl_init); -module_exit(bt_osl_cleanup); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/battery/Makefile linux.21rc5-ac2/drivers/acpi/ospm/battery/Makefile --- linux.21rc5/drivers/acpi/ospm/battery/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/battery/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -O_TARGET := ospm_$(notdir $(CURDIR)).o -obj-m := $(O_TARGET) -EXTRA_CFLAGS += $(ACPI_CFLAGS) -obj-y := $(patsubst %.c,%.o,$(wildcard *.c)) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/bm.c linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bm.c --- linux.21rc5/drivers/acpi/ospm/busmgr/bm.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bm.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,1146 +0,0 @@ -/****************************************************************************** - * - * Module Name: bm.c - * $Revision: 48 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include "bm.h" - - -#define _COMPONENT ACPI_BUS - MODULE_NAME ("bm") - - -/**************************************************************************** - * Globals - ****************************************************************************/ - -extern fadt_descriptor_rev2 acpi_fadt; -/* TBD: Make dynamically sizeable. */ -BM_NODE_LIST node_list; - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/***************************************************************************** - * - * FUNCTION: bm_print_object - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -void -bm_print_object ( - acpi_handle handle) -{ - acpi_buffer buffer; - acpi_handle parent; - acpi_object_type type; - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return; - } - - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - acpi_get_parent(handle, &parent); - acpi_get_type(handle, &type); - - /* - * TBD: Hack to get around scope identification problem. - */ - if (type == ACPI_TYPE_ANY) { - if (ACPI_SUCCESS(acpi_get_next_object(ACPI_TYPE_ANY, - handle, 0, NULL))) { - type = INTERNAL_TYPE_SCOPE; - } - } - - switch (type) - { - case INTERNAL_TYPE_SCOPE: - acpi_os_printf("SCOPE: "); - break; - case ACPI_TYPE_INTEGER: - acpi_os_printf("SIMPLE (number): "); - break; - case ACPI_TYPE_STRING: - acpi_os_printf("SIMPLE (string): "); - break; - case ACPI_TYPE_BUFFER: - acpi_os_printf("SIMPLE (buffer): "); - break; - case ACPI_TYPE_PACKAGE: - acpi_os_printf("SIMPLE (package): "); - break; - case ACPI_TYPE_FIELD_UNIT: - acpi_os_printf("FIELD UNIT: "); - break; - case ACPI_TYPE_DEVICE: - acpi_os_printf("DEVICE: "); - break; - case ACPI_TYPE_EVENT: - acpi_os_printf("EVENT: "); - break; - case ACPI_TYPE_METHOD: - acpi_os_printf("CONTROL METHOD: "); - break; - case ACPI_TYPE_MUTEX: - acpi_os_printf("MUTEX: "); - break; - case ACPI_TYPE_REGION: - acpi_os_printf("OPERATION REGION: "); - break; - case ACPI_TYPE_POWER: - acpi_os_printf("POWER RESOURCE: "); - break; - case ACPI_TYPE_PROCESSOR: - acpi_os_printf("PROCESSOR: "); - break; - case ACPI_TYPE_THERMAL: - acpi_os_printf("THERMAL ZONE: "); - break; - case ACPI_TYPE_BUFFER_FIELD: - acpi_os_printf("BUFFER FIELD: "); - break; - case ACPI_TYPE_DDB_HANDLE: - acpi_os_printf("DDB HANDLE: "); - break; - default: - acpi_os_printf("OTHER (%d): ", type); - break; - } - - acpi_os_printf("Object[%p][%s] parent[%p].\n", handle, (char*)buffer.pointer, parent); - - acpi_os_free(buffer.pointer); -} - - -/**************************************************************************** - * - * FUNCTION: bm_print_node - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -void -bm_print_node ( - BM_NODE *node, - u32 flags) -{ -#ifdef ACPI_DEBUG - acpi_buffer buffer; - BM_DEVICE *device = NULL; - char *type_string = NULL; - - PROC_NAME("bm_print_node"); - - if (!node) { - return; - } - - device = &(node->device); - - if (flags & BM_PRINT_PRESENT) { - if (!BM_DEVICE_PRESENT(device)) { - return; - } - } - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return; - } - - acpi_get_name(device->acpi_handle, ACPI_FULL_PATHNAME, &buffer); - - switch(device->id.type) { - case BM_TYPE_SYSTEM: - type_string = " System"; - break; - case BM_TYPE_SCOPE: - type_string = " Scope"; - break; - case BM_TYPE_PROCESSOR: - type_string = " Proc"; - break; - case BM_TYPE_THERMAL_ZONE: - type_string = "Thermal"; - break; - case BM_TYPE_POWER_RESOURCE: - type_string = " Power"; - break; - case BM_TYPE_FIXED_BUTTON: - type_string = " Button"; - break; - case BM_TYPE_DEVICE: - type_string = " Device"; - break; - default: - type_string = "Unknown"; - break; - } - - if (!(flags & BM_PRINT_GROUP)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+-------------------------------------------------------------------------------\n")); - } - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| %s[%02x]:[%p] flags[%02x] hid[%s] %s\n", type_string, device->handle, device->acpi_handle, device->flags, (device->id.hid[0] ? device->id.hid : " "), (char*)buffer.pointer)); - - if (flags & BM_PRINT_IDENTIFICATION) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| identification: uid[%s] adr[%08x]\n", device->id.uid, device->id.adr)); - } - - if (flags & BM_PRINT_LINKAGE) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| linkage: this[%p] parent[%p] next[%p]\n", node, node->parent, node->next)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| scope.head[%p] scope.tail[%p]\n", node->scope.head, node->scope.tail)); - } - - if (flags & BM_PRINT_POWER) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| power: state[D%d] flags[%08x]\n", device->power.state, device->power.flags)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| S0[%02x] S1[%02x] S2[%02x] S3[%02x] S4[%02x] S5[%02x]\n", device->power.dx_supported[0], device->power.dx_supported[1], device->power.dx_supported[2], device->power.dx_supported[3], device->power.dx_supported[4], device->power.dx_supported[5])); - } - - if (!(flags & BM_PRINT_GROUP)) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+-------------------------------------------------------------------------------\n")); - } - - acpi_os_free(buffer.pointer); -#endif /*ACPI_DEBUG*/ - - return; -} - - -/**************************************************************************** - * - * FUNCTION: bm_print_hierarchy - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -void -bm_print_hierarchy (void) -{ -#ifdef ACPI_DEBUG - u32 i = 0; - - FUNCTION_TRACE("bm_print_hierarchy"); - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - for (i = 0; i < node_list.count; i++) { - bm_print_node(node_list.nodes[i], BM_PRINT_GROUP | BM_PRINT_PRESENT); - } - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); -#endif /*ACPI_DEBUG*/ - - return_VOID; -} - - -/**************************************************************************** - * - * FUNCTION: bm_get_status - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_get_status ( - BM_DEVICE *device) -{ - acpi_status status = AE_OK; - - if (!device) { - return AE_BAD_PARAMETER; - } - - device->status = BM_STATUS_UNKNOWN; - - /* - * Dynamic Status? - * --------------- - * If _STA isn't present we just return the default status. - */ - if (!(device->flags & BM_FLAGS_DYNAMIC_STATUS)) { - device->status = BM_STATUS_DEFAULT; - return AE_OK; - } - - /* - * Evaluate _STA: - * -------------- - */ - status = bm_evaluate_simple_integer(device->acpi_handle, "_STA", - &(device->status)); - - return status; -} - - -/**************************************************************************** - * - * FUNCTION: bm_get_identification - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_get_identification ( - BM_DEVICE *device) -{ - acpi_status status = AE_OK; - acpi_device_info info; - - if (!device) { - return AE_BAD_PARAMETER; - } - - if (!(device->flags & BM_FLAGS_IDENTIFIABLE)) { - return AE_OK; - } - - device->id.uid[0] = BM_UID_UNKNOWN; - device->id.hid[0] = BM_HID_UNKNOWN; - device->id.adr = BM_ADDRESS_UNKNOWN; - - /* - * Get Object Info: - * ---------------- - * Evalute _UID, _HID, and _ADR... - */ - status = acpi_get_object_info(device->acpi_handle, &info); - if (ACPI_FAILURE(status)) { - return status; - } - - if (info.valid & ACPI_VALID_UID) { - MEMCPY((void*)device->id.uid, (void*)info.unique_id, - sizeof(BM_DEVICE_UID)); - } - - if (info.valid & ACPI_VALID_HID) { - MEMCPY((void*)device->id.hid, (void*)info.hardware_id, - sizeof(BM_DEVICE_HID)); - } - - if (info.valid & ACPI_VALID_ADR) { - device->id.adr = info.address; - } - - return status; -} - - -/**************************************************************************** - * - * FUNCTION: bm_get_flags - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_get_flags ( - BM_DEVICE *device) -{ - acpi_handle acpi_handle = NULL; - - if (!device) { - return AE_BAD_PARAMETER; - } - - device->flags = BM_FLAGS_UNKNOWN; - - switch (device->id.type) { - - case BM_TYPE_DEVICE: - - /* - * Presence of _DCK indicates a docking station. - */ - if (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, - "_DCK", &acpi_handle))) { - device->flags |= BM_FLAGS_DOCKING_STATION; - } - - /* - * Presence of _EJD and/or _EJx indicates 'ejectable'. - * TBD: _EJx... - */ - if (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, - "_EJD", &acpi_handle))) { - device->flags |= BM_FLAGS_EJECTABLE; - } - - /* - * Presence of _PR0 or _PS0 indicates 'power manageable'. - */ - if (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, - "_PR0", &acpi_handle)) || - ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, - "_PS0", &acpi_handle))) { - device->flags |= BM_FLAGS_POWER_CONTROL; - } - - /* - * Presence of _CRS indicates 'configurable'. - */ - if (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, - "_CRS", &acpi_handle))) { - device->flags |= BM_FLAGS_CONFIGURABLE; - } - - /* Fall through to next case statement. */ - - case BM_TYPE_PROCESSOR: - case BM_TYPE_THERMAL_ZONE: - case BM_TYPE_POWER_RESOURCE: - /* - * Presence of _HID or _ADR indicates 'identifiable'. - */ - if (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, - "_HID", &acpi_handle)) || - ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, - "_ADR", &acpi_handle))) { - device->flags |= BM_FLAGS_IDENTIFIABLE; - } - - /* - * Presence of _STA indicates 'dynamic status'. - */ - if (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, - "_STA", &acpi_handle))) { - device->flags |= BM_FLAGS_DYNAMIC_STATUS; - } - - break; - } - - return AE_OK; -} - - -/**************************************************************************** - * - * FUNCTION: bm_add_namespace_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_add_namespace_device ( - acpi_handle acpi_handle, - acpi_object_type acpi_type, - BM_NODE *parent, - BM_NODE **child) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - BM_DEVICE *device = NULL; - - FUNCTION_TRACE("bm_add_namespace_device"); - - if (!parent || !child) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (node_list.count > BM_HANDLES_MAX) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - (*child) = NULL; - - /* - * Create Node: - * ------------ - */ - node = acpi_os_callocate(sizeof(BM_NODE)); - if (!node) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - node->parent = parent; - node->next = NULL; - - device = &(node->device); - - device->handle = node_list.count; - device->acpi_handle = acpi_handle; - - /* - * Device Type: - * ------------ - */ - switch (acpi_type) { - case INTERNAL_TYPE_SCOPE: - device->id.type = BM_TYPE_SCOPE; - break; - case ACPI_TYPE_PROCESSOR: - device->id.type = BM_TYPE_PROCESSOR; - break; - case ACPI_TYPE_THERMAL: - device->id.type = BM_TYPE_THERMAL_ZONE; - break; - case ACPI_TYPE_POWER: - device->id.type = BM_TYPE_POWER_RESOURCE; - break; - case ACPI_TYPE_DEVICE: - device->id.type = BM_TYPE_DEVICE; - break; - } - - /* - * Get Other Device Info: - * ---------------------- - * But only if this device's parent is present (which implies - * this device MAY be present). - */ - if (BM_NODE_PRESENT(node->parent)) { - /* - * Device Flags - */ - status = bm_get_flags(device); - if (ACPI_FAILURE(status)) { - goto end; - } - - /* - * Device Identification - */ - status = bm_get_identification(device); - if (ACPI_FAILURE(status)) { - goto end; - } - - /* - * Device Status - */ - status = bm_get_status(device); - if (ACPI_FAILURE(status)) { - goto end; - } - - /* - * Power Management: - * ----------------- - * If this node doesn't provide direct power control - * then we inherit PM capabilities from its parent. - * - * TBD: Inherit! - */ - if (BM_IS_POWER_CONTROL(device)) { - status = bm_get_pm_capabilities(node); - if (ACPI_FAILURE(status)) { - goto end; - } - } - } - -end: - if (ACPI_FAILURE(status)) { - acpi_os_free(node); - } - else { - /* - * Add to the node_list. - */ - node_list.nodes[node_list.count++] = node; - - /* - * Formulate Hierarchy: - * -------------------- - * Arrange within the namespace by assigning the parent and - * adding to the parent device's list of children (scope). - */ - if (!parent->scope.head) { - parent->scope.head = node; - } - else { - if (!parent->scope.tail) { - (parent->scope.head)->next = node; - } - else { - (parent->scope.tail)->next = node; - } - } - parent->scope.tail = node; - - (*child) = node; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_enumerate_namespace - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_enumerate_namespace (void) -{ - acpi_status status = AE_OK; - acpi_handle parent_handle = ACPI_ROOT_OBJECT; - acpi_handle child_handle = NULL; - BM_NODE *parent = NULL; - BM_NODE *child = NULL; - acpi_object_type acpi_type = 0; - u32 level = 1; - - FUNCTION_TRACE("bm_enumerate_namespace"); - - parent = node_list.nodes[0]; - - /* - * Enumerate ACPI Namespace: - * ------------------------- - * Parse through the ACPI namespace, identify all 'devices', - * and create a new entry for each in our collection. - */ - while (level > 0) { - - /* - * Get the next object at this level. - */ - status = acpi_get_next_object(ACPI_TYPE_ANY, parent_handle, child_handle, &child_handle); - if (ACPI_SUCCESS(status)) { - /* - * TBD: This is a hack to get around the problem - * identifying scope objects. Scopes - * somehow need to be uniquely identified. - */ - status = acpi_get_type(child_handle, &acpi_type); - if (ACPI_SUCCESS(status) && (acpi_type == ACPI_TYPE_ANY)) { - status = acpi_get_next_object(ACPI_TYPE_ANY, child_handle, 0, NULL); - if (ACPI_SUCCESS(status)) { - acpi_type = INTERNAL_TYPE_SCOPE; - } - } - - /* - * Device? - * ------- - * If this object is a 'device', insert into the - * ACPI Bus Manager's local hierarchy and search - * the object's scope for any child devices (a - * depth-first search). - */ - switch (acpi_type) { - case INTERNAL_TYPE_SCOPE: - case ACPI_TYPE_DEVICE: - case ACPI_TYPE_PROCESSOR: - case ACPI_TYPE_THERMAL: - case ACPI_TYPE_POWER: - status = bm_add_namespace_device(child_handle, acpi_type, parent, &child); - if (ACPI_SUCCESS(status)) { - status = acpi_get_next_object(ACPI_TYPE_ANY, child_handle, 0, NULL); - if (ACPI_SUCCESS(status)) { - level++; - parent_handle = child_handle; - child_handle = 0; - parent = child; - } - } - break; - } - } - - /* - * Scope Exhausted: - * ---------------- - * No more children in this object's scope, Go back up - * in the namespace tree to the object's parent. - */ - else { - level--; - child_handle = parent_handle; - acpi_get_parent(parent_handle, - &parent_handle); - - if (parent) { - parent = parent->parent; - } - else { - return_ACPI_STATUS(AE_NULL_ENTRY); - } - } - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_add_fixed_feature_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_add_fixed_feature_device ( - BM_NODE *parent, - BM_DEVICE_TYPE device_type, - char *device_hid) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - - FUNCTION_TRACE("bm_add_fixed_feature_device"); - - if (!parent) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (node_list.count > BM_HANDLES_MAX) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - /* - * Allocate the new device and add to the device array. - */ - node = acpi_os_callocate(sizeof(BM_NODE)); - if (!node) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - /* - * Get device info. - */ - node->device.handle = node_list.count; - node->device.acpi_handle = ACPI_ROOT_OBJECT; - node->device.id.type = BM_TYPE_FIXED_BUTTON; - if (device_hid) { - MEMCPY((void*)node->device.id.hid, device_hid, - sizeof(node->device.id.hid)); - } - node->device.flags = BM_FLAGS_FIXED_FEATURE; - node->device.status = BM_STATUS_DEFAULT; - /* TBD: Device PM capabilities */ - - /* - * Add to the node_list. - */ - node_list.nodes[node_list.count++] = node; - - /* - * Formulate Hierarchy: - * -------------------- - * Arrange within the namespace by assigning the parent and - * adding to the parent device's list of children (scope). - */ - node->parent = parent; - node->next = NULL; - - if (parent) { - if (!parent->scope.head) { - parent->scope.head = node; - } - else { - if (!parent->scope.tail) { - (parent->scope.head)->next = node; - } - else { - (parent->scope.tail)->next = node; - } - } - parent->scope.tail = node; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_enumerate_fixed_features - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_enumerate_fixed_features (void) -{ - FUNCTION_TRACE("bm_enumerate_fixed_features"); - - /* - * Root Object: - * ------------ - * Fabricate the root object, which happens to always get a - * device_handle of zero. - */ - node_list.nodes[0] = acpi_os_callocate(sizeof(BM_NODE)); - if (NULL == (node_list.nodes[0])) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - node_list.nodes[0]->device.handle = BM_HANDLE_ROOT; - node_list.nodes[0]->device.acpi_handle = ACPI_ROOT_OBJECT; - node_list.nodes[0]->device.flags = BM_FLAGS_UNKNOWN; - node_list.nodes[0]->device.status = BM_STATUS_DEFAULT; - node_list.nodes[0]->device.id.type = BM_TYPE_SYSTEM; - /* TBD: Get system PM capabilities (Sx states?) */ - - node_list.count++; - - /* - * Fixed Features: - * --------------- - * Enumerate fixed-feature devices (e.g. power and sleep buttons). - */ - if (acpi_fadt.pwr_button == 0) { - bm_add_fixed_feature_device(node_list.nodes[0], - BM_TYPE_FIXED_BUTTON, BM_HID_POWER_BUTTON); - } - - if (acpi_fadt.sleep_button == 0) { - bm_add_fixed_feature_device(node_list.nodes[0], - BM_TYPE_FIXED_BUTTON, BM_HID_SLEEP_BUTTON); - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_get_handle - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_get_handle ( - acpi_handle acpi_handle, - BM_HANDLE *device_handle) -{ - acpi_status status = AE_NOT_FOUND; - u32 i = 0; - - FUNCTION_TRACE("bm_get_handle"); - - if (!device_handle) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - *device_handle = BM_HANDLE_UNKNOWN; - - /* - * Search all devices for a match on the ACPI handle. - */ - for (i=0; idevice.acpi_handle == acpi_handle) { - *device_handle = node_list.nodes[i]->device.handle; - status = AE_OK; - break; - } - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_get_node - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_get_node ( - BM_HANDLE device_handle, - acpi_handle acpi_handle, - BM_NODE **node) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("bm_get_node"); - - if (!node) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* busmgr failed to init, but we're being called by subordinate drivers */ - if (node_list.count < 1) { - return_ACPI_STATUS(AE_NOT_FOUND); - } - - /* - * If no device handle, resolve acpi handle to device handle. - */ - if (!device_handle && acpi_handle) { - status = bm_get_handle(acpi_handle, &device_handle); - if (ACPI_FAILURE(status)) - return_ACPI_STATUS(status); - } - - /* - * Valid device handle? - */ - if (device_handle > BM_HANDLES_MAX) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid node handle [%02x] detected.\n", device_handle)); - return_ACPI_STATUS(AE_ERROR); - } - - *node = node_list.nodes[device_handle]; - - /* - * Valid node? - */ - if (!(*node)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) node entry [%02x] detected.\n", device_handle)); - return_ACPI_STATUS(AE_NULL_ENTRY); - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_initialize - * - * PARAMETERS: - * - * RETURN: Exception code. - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_initialize (void) -{ - acpi_status status = AE_OK; - u32 start = 0; - u32 stop = 0; - u32 elapsed = 0; - - FUNCTION_TRACE("bm_initialize"); - - MEMSET(&node_list, 0, sizeof(BM_NODE_LIST)); - - status = acpi_get_timer(&start); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Building device hierarchy.\n")); - - /* - * Enumerate ACPI fixed-feature devices. - */ - status = bm_enumerate_fixed_features(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Enumerate the ACPI namespace. - */ - status = bm_enumerate_namespace(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - acpi_get_timer(&stop); - acpi_get_timer_duration(start, stop, &elapsed); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Building device hierarchy took [%d] microseconds.\n", elapsed)); - - /* - * Display hierarchy. - */ - bm_print_hierarchy(); - - /* - * Register for all standard and device-specific notifications. - */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Registering for all device notifications.\n")); - - status = acpi_install_notify_handler(ACPI_ROOT_OBJECT, - ACPI_SYSTEM_NOTIFY, &bm_notify, NULL); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unable to register for standard notifications.\n")); - return_ACPI_STATUS(status); - } - - status = acpi_install_notify_handler(ACPI_ROOT_OBJECT, - ACPI_DEVICE_NOTIFY, &bm_notify, NULL); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unable to register for device-specific notifications.\n")); - return_ACPI_STATUS(status); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "ACPI Bus Manager enabled.\n")); - - /* - * Initialize built-in power resource driver. - */ - bm_pr_initialize(); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_terminate - * - * PARAMETERS: - * - * RETURN: Exception code. - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_terminate (void) -{ - acpi_status status = AE_OK; - u32 i = 0; - - FUNCTION_TRACE("bm_terminate"); - - /* - * Terminate built-in power resource driver. - */ - bm_pr_terminate(); - - /* - * Unregister for all notifications. - */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Unregistering for device notifications.\n")); - - status = acpi_remove_notify_handler(ACPI_ROOT_OBJECT, - ACPI_SYSTEM_NOTIFY, &bm_notify); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unable to un-register for standard notifications.\n")); - } - - status = acpi_remove_notify_handler(ACPI_ROOT_OBJECT, - ACPI_DEVICE_NOTIFY, &bm_notify); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unable to un-register for device-specific notifications.\n")); - } - - /* - * Parse through the device array, freeing all entries. - */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing device hierarchy.\n")); - for (i = 0; i < node_list.count; i++) { - if (node_list.nodes[i]) { - acpi_os_free(node_list.nodes[i]); - } - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "ACPI Bus Manager disabled.\n")); - - return_ACPI_STATUS(AE_OK); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/bmdriver.c linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmdriver.c --- linux.21rc5/drivers/acpi/ospm/busmgr/bmdriver.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmdriver.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,469 +0,0 @@ -/***************************************************************************** - * - * Module Name: bmdriver.c - * $Revision: 21 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "bm.h" - -#define _COMPONENT ACPI_BUS - MODULE_NAME ("bmdriver") - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_get_device_power_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_get_device_power_state ( - BM_HANDLE device_handle, - BM_POWER_STATE *state) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - - FUNCTION_TRACE("bm_get_device_power_state"); - - if (!state) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - *state = ACPI_STATE_UNKNOWN; - - /* - * Resolve device handle to node. - */ - status = bm_get_node(device_handle, 0, &node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Get the current power state. - */ - status = bm_get_power_state(node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - *state = node->device.power.state; - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_set_device_power_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_set_device_power_state ( - BM_HANDLE device_handle, - BM_POWER_STATE state) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - - FUNCTION_TRACE("bm_set_device_power_state"); - - /* - * Resolve device handle to node. - */ - status = bm_get_node(device_handle, 0, &node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Set the current power state. - */ - status = bm_set_power_state(node, state); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_get_device_status - * - * PARAMETERS: - * device_handle is really an index number into the array of BM_DEVICE - * structures in info_list. This data item is passed to - * the registered program's "notify" callback. It is used - * to retrieve the specific BM_DEVICE structure instance - * associated with the callback. - * device_status is a pointer that receives the result of processing - * the device's associated ACPI _STA. - * - * RETURN: - * The acpi_status value indicates success AE_OK or failure of the function - * - * DESCRIPTION: Evaluates the device's ACPI _STA, if it is present. - * - ****************************************************************************/ - -acpi_status -bm_get_device_status ( - BM_HANDLE device_handle, - BM_DEVICE_STATUS *device_status) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - - FUNCTION_TRACE("bm_get_device_status"); - - if (!device_status) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - *device_status = BM_STATUS_UNKNOWN; - - /* - * Resolve device handle to node. - */ - status = bm_get_node(device_handle, 0, &node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Parent Present? - * --------------- - * If the parent isn't present we can't evalute _STA on the child. - * Return an unknown status. - */ - if (!BM_NODE_PRESENT(node->parent)) { - return_ACPI_STATUS(AE_OK); - } - - /* - * Dynamic Status? - * --------------- - * If _STA isn't present we just return the default status. - */ - if (!(node->device.flags & BM_FLAGS_DYNAMIC_STATUS)) { - *device_status = BM_STATUS_DEFAULT; - return_ACPI_STATUS(AE_OK); - } - - /* - * Evaluate _STA: - * -------------- - */ - status = bm_evaluate_simple_integer(node->device.acpi_handle, "_STA", - &(node->device.status)); - if (ACPI_SUCCESS(status)) { - *device_status = node->device.status; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_get_device_info - * - * PARAMETERS: - * device_handle An index used to retrieve the associated BM_DEVICE info. - * device A pointer to a BM_DEVICE structure instance pointer. - * This pointed to BM_DEVICE structure will contain the - * this device's information. - * - * RETURN: - * The acpi_status value indicates success AE_OK or failure of the function - * - * DESCRIPTION: - * Using the device_handle this function retrieves this device's - * BM_DEVICE structure instance and save's it in device. - * - ****************************************************************************/ - -acpi_status -bm_get_device_info ( - BM_HANDLE device_handle, - BM_DEVICE **device) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - - FUNCTION_TRACE("bm_get_device_info"); - - if (!device) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Resolve device handle to internal device. - */ - status = bm_get_node(device_handle, 0, &node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - *device = &(node->device); - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_get_device_context - * - * device_handle An index used to retrieve the associated BM_DEVICE info. - * context A pointer to a BM_DRIVER_CONTEXT structure instance. - * - * RETURN: - * The acpi_status value indicates success AE_OK or failure of the function - * - * DESCRIPTION: - * Using the device_handle this function retrieves this device's - * BM_DRIVER_CONTEXT structure instance and save's it in context. - * - ****************************************************************************/ - -acpi_status -bm_get_device_context ( - BM_HANDLE device_handle, - BM_DRIVER_CONTEXT *context) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - - FUNCTION_TRACE("bm_get_device_context"); - - if (!context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - *context = NULL; - - /* - * Resolve device handle to internal device. - */ - status = bm_get_node(device_handle, 0, &node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - if (!node->driver.context) { - return_ACPI_STATUS(AE_NULL_ENTRY); - } - - *context = node->driver.context; - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_register_driver - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_register_driver ( - BM_DEVICE_ID *criteria, - BM_DRIVER *driver) -{ - acpi_status status = AE_NOT_FOUND; - BM_HANDLE_LIST device_list; - BM_NODE *node = NULL; - BM_DEVICE *device = NULL; - u32 i = 0; - - FUNCTION_TRACE("bm_register_driver"); - - if (!criteria || !driver || !driver->notify || !driver->request) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - MEMSET(&device_list, 0, sizeof(BM_HANDLE_LIST)); - - /* - * Find Matches: - * ------------- - * Search through the entire device hierarchy for matches against - * the given device criteria. - */ - status = bm_search(BM_HANDLE_ROOT, criteria, &device_list); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Install driver: - * ---------------- - * For each match, record the driver information and execute the - * driver's Notify() funciton (if present) to notify the driver - * of the device's presence. - */ - for (i = 0; i < device_list.count; i++) { - - /* Resolve the device handle. */ - status = bm_get_node(device_list.handles[i], 0, &node); - if (ACPI_FAILURE(status)) { - continue; - } - - device = &(node->device); - - /* - * Make sure another driver hasn't already registered for - * this device. - */ - if (BM_IS_DRIVER_CONTROL(device)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Another driver has already registered for device [%02x].\n", device->handle)); - continue; - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Registering driver for device [%02x].\n", device->handle)); - - /* Notify driver of new device. */ - status = driver->notify(BM_NOTIFY_DEVICE_ADDED, - node->device.handle, &(node->driver.context)); - if (ACPI_SUCCESS(status)) { - node->driver.notify = driver->notify; - node->driver.request = driver->request; - node->device.flags |= BM_FLAGS_DRIVER_CONTROL; - } - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_unregister_driver - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_unregister_driver ( - BM_DEVICE_ID *criteria, - BM_DRIVER *driver) -{ - acpi_status status = AE_NOT_FOUND; - BM_HANDLE_LIST device_list; - BM_NODE *node = NULL; - BM_DEVICE *device = NULL; - u32 i = 0; - - FUNCTION_TRACE("bm_unregister_driver"); - - if (!criteria || !driver || !driver->notify || !driver->request) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - MEMSET(&device_list, 0, sizeof(BM_HANDLE_LIST)); - - /* - * Find Matches: - * ------------- - * Search through the entire device hierarchy for matches against - * the given device criteria. - */ - status = bm_search(BM_HANDLE_ROOT, criteria, &device_list); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Remove driver: - * --------------- - * For each match, execute the driver's Notify() function to allow - * the driver to cleanup each device instance. - */ - for (i = 0; i < device_list.count; i++) { - - /* Resolve the device handle. */ - status = bm_get_node(device_list.handles[i], 0, &node); - if (ACPI_FAILURE(status)) { - continue; - } - - device = &(node->device); - - /* - * Make sure driver has really registered for this device. - */ - if (!BM_IS_DRIVER_CONTROL(device)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Driver hasn't registered for device [%02x].\n", device->handle)); - continue; - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Unregistering driver for device [%02x].\n", device->handle)); - - /* Notify driver of device removal. */ - status = node->driver.notify(BM_NOTIFY_DEVICE_REMOVED, - node->device.handle, &(node->driver.context)); - if (ACPI_SUCCESS(status)) { - node->driver.notify = NULL; - node->driver.request = NULL; - node->driver.context = NULL; - node->device.flags &= ~BM_FLAGS_DRIVER_CONTROL; - } - } - - return_ACPI_STATUS(AE_OK); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/bmnotify.c linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmnotify.c --- linux.21rc5/drivers/acpi/ospm/busmgr/bmnotify.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmnotify.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,312 +0,0 @@ -/***************************************************************************** - * - * Module Name: bmnotify.c - * $Revision: 21 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "bm.h" - - -#define _COMPONENT ACPI_BUS - MODULE_NAME ("bmnotify") - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_generate_notify - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_generate_notify ( - BM_NODE *node, - u32 notify_type) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - - FUNCTION_TRACE("bm_generate_notify"); - - if (!node) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - device = &(node->device); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Sending notify [%02x] to device [%02x].\n", notify_type, node->device.handle)); - - if (!BM_IS_DRIVER_CONTROL(device)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "No driver installed for device [%02x].\n", device->handle)); - return_ACPI_STATUS(AE_NOT_EXIST); - } - - status = node->driver.notify(notify_type, node->device.handle, - &(node->driver.context)); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_device_check - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_device_check ( - BM_NODE *node, - u32 *status_change) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - BM_DEVICE_STATUS old_status = BM_STATUS_UNKNOWN; - - FUNCTION_TRACE("bm_device_check"); - - if (!node) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - device = &(node->device); - - if (status_change) { - *status_change = FALSE; - } - - old_status = device->status; - - /* - * Parent Present? - * --------------- - * Only check this device if its parent is present (which implies - * this device MAY be present). - */ - if (!BM_NODE_PRESENT(node->parent)) { - return_ACPI_STATUS(AE_OK); - } - - /* - * Get Status: - * ----------- - * And see if the status has changed. - */ - status = bm_get_status(device); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - if (old_status == node->device.status) { - return_ACPI_STATUS(AE_OK); - } - - if (status_change) { - *status_change = TRUE; - } - - /* - * Device Insertion? - * ----------------- - */ - if ((device->status & BM_STATUS_PRESENT) && - !(old_status & BM_STATUS_PRESENT)) { - /* TBD: Make sure driver is loaded, and if not, load. */ - status = bm_generate_notify(node, BM_NOTIFY_DEVICE_ADDED); - } - - /* - * Device Removal? - * --------------- - */ - else if (!(device->status & BM_STATUS_PRESENT) && - (old_status & BM_STATUS_PRESENT)) { - /* TBD: Unload driver if last device instance. */ - status = bm_generate_notify(node, BM_NOTIFY_DEVICE_REMOVED); - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_bus_check - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_bus_check ( - BM_NODE *parent_node) -{ - acpi_status status = AE_OK; - u32 status_change = FALSE; - - FUNCTION_TRACE("bm_bus_check"); - - if (!parent_node) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Status Change? - * -------------- - */ - status = bm_device_check(parent_node, &status_change); - if (ACPI_FAILURE(status) || !status_change) { - return_ACPI_STATUS(status); - } - - /* - * Enumerate Scope: - * ---------------- - * TBD: Enumerate child devices within this device's scope and - * run bm_device_check()'s on them... - */ - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_notify - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -void -bm_notify ( - acpi_handle acpi_handle, - u32 notify_value, - void *context) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - - FUNCTION_TRACE("bm_notify"); - - /* - * Resolve the ACPI handle. - */ - status = bm_get_node(0, acpi_handle, &node); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Recieved notify [%02x] for unknown device [%p].\n", notify_value, acpi_handle)); - return_VOID; - } - - /* - * Device-Specific or Standard? - * ---------------------------- - * Device-specific notifies are forwarded to the control module's - * notify() function for processing. Standard notifies are handled - * internally. - */ - if (notify_value > 0x7F) { - status = bm_generate_notify(node, notify_value); - } - else { - switch (notify_value) { - - case BM_NOTIFY_BUS_CHECK: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received BUS CHECK notification for device [%02x].\n", node->device.handle)); - status = bm_bus_check(node); - break; - - case BM_NOTIFY_DEVICE_CHECK: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received DEVICE CHECK notification for device [%02x].\n", node->device.handle)); - status = bm_device_check(node, NULL); - break; - - case BM_NOTIFY_DEVICE_WAKE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received DEVICE WAKE notification for device [%02x].\n", node->device.handle)); - /* TBD */ - break; - - case BM_NOTIFY_EJECT_REQUEST: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received EJECT REQUEST notification for device [%02x].\n", node->device.handle)); - /* TBD */ - break; - - case BM_NOTIFY_DEVICE_CHECK_LIGHT: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received DEVICE CHECK LIGHT notification for device [%02x].\n", node->device.handle)); - /* TBD: Exactly what does the 'light' mean? */ - status = bm_device_check(node, NULL); - break; - - case BM_NOTIFY_FREQUENCY_MISMATCH: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received FREQUENCY MISMATCH notification for device [%02x].\n", node->device.handle)); - /* TBD */ - break; - - case BM_NOTIFY_BUS_MODE_MISMATCH: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received BUS MODE MISMATCH notification for device [%02x].\n", node->device.handle)); - /* TBD */ - break; - - case BM_NOTIFY_POWER_FAULT: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received POWER FAULT notification.\n")); - /* TBD */ - break; - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received unknown/unsupported notification.\n")); - break; - } - } - - return_VOID; -} - - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/bm_osl.c linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bm_osl.c --- linux.21rc5/drivers/acpi/ospm/busmgr/bm_osl.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bm_osl.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,390 +0,0 @@ -/***************************************************************************** - * - * Module Name: bm_osl.c - * $Revision: 17 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "bm.h" - - -MODULE_AUTHOR("Andrew Grover"); -MODULE_DESCRIPTION("ACPI Component Architecture (CA) - ACPI Bus Manager"); -MODULE_LICENSE("GPL"); - - -/***************************************************************************** - * Types & Defines - *****************************************************************************/ - -typedef struct -{ - BM_HANDLE device_handle; - char *device_type; - char *device_instance; - u32 event_type; - u32 event_data; - struct list_head list; -} BM_OSL_EVENT; - - -#define BM_PROC_ROOT "acpi" -#define BM_PROC_EVENT "event" -#define BM_PROC_DEVICES "devices" - -#define BM_MAX_STRING_LENGTH 80 - - -/**************************************************************************** - * Globals - ****************************************************************************/ - -struct proc_dir_entry *bm_proc_root = NULL; -static struct proc_dir_entry *bm_proc_event = NULL; - -#ifdef ACPI_DEBUG -static u32 save_dbg_layer; -static u32 save_dbg_level; -#endif /*ACPI_DEBUG*/ - -extern BM_NODE_LIST node_list; - -static spinlock_t bm_osl_event_lock = SPIN_LOCK_UNLOCKED; - -static LIST_HEAD(bm_event_list); - -static DECLARE_WAIT_QUEUE_HEAD(bm_event_wait_queue); - -static int event_is_open = 0; - - -/**************************************************************************** - * Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_osl_generate_event - * - * DESCRIPTION: Generates an event for user-space consumption by writing - * the event data to the 'event' file. - * - ****************************************************************************/ - -acpi_status -bm_osl_generate_event ( - BM_HANDLE device_handle, - char *device_type, - char *device_instance, - u32 event_type, - u32 event_data) -{ - BM_OSL_EVENT *event = NULL; - unsigned long flags = 0; - - /* drop event on the floor if no one's listening */ - if (!event_is_open) - return (AE_OK); - - /* - * Allocate a new event structure. - */ - event = acpi_os_callocate(sizeof(BM_OSL_EVENT)); - if (!event) - goto alloc_error; - - event->device_type = acpi_os_callocate(strlen(device_type) - + sizeof(char)); - if (!event->device_type) - goto alloc_error; - - event->device_instance = acpi_os_callocate(strlen(device_instance) - + sizeof(char)); - if (!event->device_instance) - goto alloc_error; - - /* - * Set event data. - */ - event->device_handle = device_handle; - strcpy(event->device_type, device_type); - strcpy(event->device_instance, device_instance); - event->event_type = event_type; - event->event_data = event_data; - - /* - * Add to the end of our event list. - */ - spin_lock_irqsave(&bm_osl_event_lock, flags); - list_add_tail(&event->list, &bm_event_list); - spin_unlock_irqrestore(&bm_osl_event_lock, flags); - - /* - * Signal waiting threads (if any). - */ - wake_up_interruptible(&bm_event_wait_queue); - - return(AE_OK); - -alloc_error: - if (event->device_instance) - acpi_os_free(event->device_instance); - - if (event->device_type) - acpi_os_free(event->device_type); - - if (event) - acpi_os_free(event); - - return (AE_NO_MEMORY); -} - -static int bm_osl_open_event(struct inode *inode, struct file *file) -{ - spin_lock_irq (&bm_osl_event_lock); - - if(event_is_open) - goto out_busy; - - event_is_open = 1; - - spin_unlock_irq (&bm_osl_event_lock); - return 0; - -out_busy: - spin_unlock_irq (&bm_osl_event_lock); - return -EBUSY; -} - - -static int bm_osl_close_event(struct inode *inode, struct file *file) -{ - event_is_open = 0; - return 0; -} - -/**************************************************************************** - * - * FUNCTION: bm_osl_read_event - * - * DESCRIPTION: Handles reads to the 'event' file by blocking user-mode - * threads until data (an event) is generated. - * - ****************************************************************************/ -static ssize_t -bm_osl_read_event( - struct file *file, - char *buf, - size_t count, - loff_t *ppos) -{ - BM_OSL_EVENT *event = NULL; - unsigned long flags = 0; - static char str[BM_MAX_STRING_LENGTH]; - static int chars_remaining = 0; - static char *ptr; - - if (!chars_remaining) { - DECLARE_WAITQUEUE(wait, current); - - if (list_empty(&bm_event_list)) { - - if (file->f_flags & O_NONBLOCK) - return -EAGAIN; - - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&bm_event_wait_queue, &wait); - - if (list_empty(&bm_event_list)) { - schedule(); - } - - remove_wait_queue(&bm_event_wait_queue, &wait); - set_current_state(TASK_RUNNING); - - if (signal_pending(current)) { - return -ERESTARTSYS; - } - } - - spin_lock_irqsave(&bm_osl_event_lock, flags); - event = list_entry(bm_event_list.next, BM_OSL_EVENT, list); - list_del(&event->list); - spin_unlock_irqrestore(&bm_osl_event_lock, flags); - - chars_remaining = sprintf(str, "%s %s %08x %08x\n", - event->device_type, event->device_instance, - event->event_type, event->event_data); - ptr = str; - - acpi_os_free(event->device_type); - acpi_os_free(event->device_instance); - acpi_os_free(event); - } - - if (chars_remaining < count) - count = chars_remaining; - - if (copy_to_user(buf, ptr, count)) - return -EFAULT; - - *ppos += count; - chars_remaining -= count; - ptr += count; - - return count; -} - -/**************************************************************************** - * - * FUNCTION: bm_osl_poll_event - * - * DESCRIPTION: Handles poll() of the 'event' file by blocking user-mode - * threads until data (an event) is generated. - * - ****************************************************************************/ -static unsigned int -bm_osl_poll_event( - struct file *file, - poll_table *wait) -{ - poll_wait(file, &bm_event_wait_queue, wait); - if (!list_empty(&bm_event_list)) - return POLLIN | POLLRDNORM; - return 0; -} - -struct file_operations proc_event_operations = { - open: bm_osl_open_event, - read: bm_osl_read_event, - release: bm_osl_close_event, - poll: bm_osl_poll_event, -}; - -/**************************************************************************** - * - * FUNCTION: bm_osl_init - * - ****************************************************************************/ - -int -bm_osl_init(void) -{ - acpi_status status = AE_OK; - - status = acpi_subsystem_status(); - if (ACPI_FAILURE(status)) - return -ENODEV; - - bm_proc_root = proc_mkdir(BM_PROC_ROOT, NULL); - if (!bm_proc_root) { - return(AE_ERROR); - } - - bm_proc_event = create_proc_entry(BM_PROC_EVENT, S_IRUSR, bm_proc_root); - if (bm_proc_event) { - bm_proc_event->proc_fops = &proc_event_operations; - } - - status = bm_initialize(); - - return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; -} - - -/**************************************************************************** - * - * FUNCTION: bm_osl_cleanup - * - ****************************************************************************/ - -void -bm_osl_cleanup(void) -{ - bm_terminate(); - - if (bm_proc_event) { - remove_proc_entry(BM_PROC_EVENT, bm_proc_root); - bm_proc_event = NULL; - } - - if (bm_proc_root) { - remove_proc_entry(BM_PROC_ROOT, NULL); - bm_proc_root = NULL; - } - - return; -} - - -module_init(bm_osl_init); -module_exit(bm_osl_cleanup); - - -/**************************************************************************** - * Symbols - ****************************************************************************/ - -/* bm.c */ - -EXPORT_SYMBOL(bm_get_node); - -/* bmdriver.c */ - -EXPORT_SYMBOL(bm_get_device_power_state); -EXPORT_SYMBOL(bm_set_device_power_state); -EXPORT_SYMBOL(bm_get_device_info); -EXPORT_SYMBOL(bm_get_device_status); -EXPORT_SYMBOL(bm_get_device_context); -EXPORT_SYMBOL(bm_register_driver); -EXPORT_SYMBOL(bm_unregister_driver); - -/* bmsearch.c */ - -EXPORT_SYMBOL(bm_search); - -/* bmrequest.c */ - -EXPORT_SYMBOL(bm_request); - -/* bmutils.c */ - -EXPORT_SYMBOL(bm_extract_package_data); -EXPORT_SYMBOL(bm_evaluate_object); -EXPORT_SYMBOL(bm_evaluate_simple_integer); -EXPORT_SYMBOL(bm_evaluate_reference_list); -EXPORT_SYMBOL(bm_copy_to_buffer); -EXPORT_SYMBOL(bm_cast_buffer); - -/* bm_proc.c */ - -EXPORT_SYMBOL(bm_osl_generate_event); -EXPORT_SYMBOL(bm_proc_root); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/bmpm.c linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmpm.c --- linux.21rc5/drivers/acpi/ospm/busmgr/bmpm.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmpm.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,442 +0,0 @@ -/***************************************************************************** - * - * Module Name: bmpm.c - * $Revision: 14 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "bm.h" -#include "bmpower.h" - - -#define _COMPONENT ACPI_BUS - MODULE_NAME ("bmpm") - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_get_inferred_power_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_get_inferred_power_state ( - BM_DEVICE *device) -{ - acpi_status status = AE_OK; - BM_HANDLE_LIST pr_list; - BM_POWER_STATE list_state = ACPI_STATE_UNKNOWN; - char object_name[5] = {'_','P','R','0','\0'}; - u32 i = 0; - - FUNCTION_TRACE("bm_get_inferred_power_state"); - - if (!device) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - MEMSET(&pr_list, 0, sizeof(BM_HANDLE_LIST)); - - device->power.state = ACPI_STATE_D3; - - /* - * Calculate Power State: - * ---------------------- - * Try to infer the devices's power state by checking the state of - * the devices's power resources. We start by evaluating _PR0 - * (resource requirements at D0) and work through _PR1 and _PR2. - * We know the current devices power state when all resources (for - * a give Dx state) are ON. If no power resources are on then the - * device is assumed to be off (D3). - */ - for (i=ACPI_STATE_D0; iacpi_handle, - object_name, &pr_list); - - if (ACPI_SUCCESS(status)) { - - status = bm_pr_list_get_state(&pr_list, &list_state); - - if (ACPI_SUCCESS(status)) { - - if (list_state == ACPI_STATE_D0) { - device->power.state = i; - break; - } - } - } - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_get_power_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_get_power_state ( - BM_NODE *node) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - - FUNCTION_TRACE("bm_get_power_state"); - - if (!node || !node->parent) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - device = &(node->device); - - device->power.state = ACPI_STATE_UNKNOWN; - - /* - * Power Control? - * -------------- - * If this device isn't directly power manageable (e.g. doesn't - * include _PR0/_PS0) then there's nothing to do (state is static). - */ - if (!BM_IS_POWER_CONTROL(device)) { - return_ACPI_STATUS(AE_OK); - } - - /* - * Parent Present? - * --------------- - * Make sure the parent is present before mucking with the child. - */ - if (!BM_NODE_PRESENT(node->parent)) { - return_ACPI_STATUS(AE_NOT_EXIST); - } - - /* - * Get Power State: - * ---------------- - * Either directly (via _PSC) or inferred (via power resource - * dependencies). - */ - if (BM_IS_POWER_STATE(device)) { - status = bm_evaluate_simple_integer(device->acpi_handle, - "_PSC", &(device->power.state)); - } - else { - status = bm_get_inferred_power_state(device); - } - - if (ACPI_SUCCESS(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Device [%02x] is at power state [D%d].\n", device->handle, device->power.state)); - } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Error getting power state for device [%02x]\n", device->handle)); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_set_power_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_set_power_state ( - BM_NODE *node, - BM_POWER_STATE state) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - BM_DEVICE *parent_device = NULL; - BM_HANDLE_LIST current_list; - BM_HANDLE_LIST target_list; - char object_name[5] = {'_','P','R','0','\0'}; - - FUNCTION_TRACE("bm_set_power_state"); - - if (!node || !node->parent || (state > ACPI_STATE_D3)) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - MEMSET(¤t_list, 0, sizeof(BM_HANDLE_LIST)); - MEMSET(&target_list, 0, sizeof(BM_HANDLE_LIST)); - - device = &(node->device); - parent_device = &(node->parent->device); - - /* - * Power Control? - * -------------- - * If this device isn't directly power manageable (e.g. doesn't - * include _PR0/_PS0) then return an error (can't set state). - */ - if (!BM_IS_POWER_CONTROL(device)) { - return_ACPI_STATUS(AE_ERROR); - } - - /* - * Parent Present? - * --------------- - * Make sure the parent is present before mucking with the child. - */ - if (!BM_NODE_PRESENT(node->parent)) { - return_ACPI_STATUS(AE_NOT_EXIST); - } - - /* - * Check Parent's Power State: - * --------------------------- - * Can't be in a higher power state (lower Dx value) than parent. - */ - if (state < parent_device->power.state) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Cannot set device [%02x] to a higher-powered state than parent_device.\n", device->handle)); - return_ACPI_STATUS(AE_ERROR); - } - - /* - * Get Resources: - * -------------- - * Get the power resources associated with the device's current - * and target power states. - */ - if (device->power.state != ACPI_STATE_UNKNOWN) { - object_name[3] = '0' + device->power.state; - bm_evaluate_reference_list(device->acpi_handle, - object_name, ¤t_list); - } - - object_name[3] = '0' + state; - bm_evaluate_reference_list(device->acpi_handle, object_name, - &target_list); - - /* - * Transition Resources: - * --------------------- - * Transition all power resources referenced by this device to - * the correct power state (taking into consideration sequencing - * and dependencies to other devices). - */ - if (current_list.count || target_list.count) { - status = bm_pr_list_transition(¤t_list, &target_list); - } - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Execute _PSx: - * ------------- - * Execute the _PSx method corresponding to the target Dx state, - * if it exists. - */ - object_name[2] = 'S'; - object_name[3] = '0' + state; - bm_evaluate_object(device->acpi_handle, object_name, NULL, NULL); - - if (ACPI_SUCCESS(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Device [%02x] is now at [D%d].\n", device->handle, state)); - device->power.state = state; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_get_pm_capabilities - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_get_pm_capabilities ( - BM_NODE *node) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - BM_DEVICE *parent_device = NULL; - acpi_handle acpi_handle = NULL; - BM_POWER_STATE dx_supported = ACPI_STATE_UNKNOWN; - char object_name[5] = {'_','S','0','D','\0'}; - u32 i = 0; - - FUNCTION_TRACE("bm_get_pm_capabilities"); - - if (!node || !node->parent) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - device = &(node->device); - parent_device = &(node->parent->device); - - /* - * Power Management Flags: - * ----------------------- - */ - if (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, "_PSC", - &acpi_handle))) { - device->power.flags |= BM_FLAGS_POWER_STATE; - } - - if (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, "_IRC", - &acpi_handle))) { - device->power.flags |= BM_FLAGS_INRUSH_CURRENT; - } - - if (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, "_PRW", - &acpi_handle))) { - device->power.flags |= BM_FLAGS_WAKE_CAPABLE; - } - - /* - * Device Power State: - * ------------------- - * Note that we can't get the device's power state until we've - * initialized all power resources, so for now we just set to - * unknown. - */ - device->power.state = ACPI_STATE_UNKNOWN; - - /* - * Dx Supported in S0: - * ------------------- - * Figure out which Dx states are supported by this device for the - * S0 (working) state. Note that D0 and D3 are required (assumed). - */ - device->power.dx_supported[ACPI_STATE_S0] = BM_FLAGS_D0_SUPPORT | - BM_FLAGS_D3_SUPPORT; - - if ((ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, "_PR1", - &acpi_handle))) || - (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, "_PS1", - &acpi_handle)))) { - device->power.dx_supported[ACPI_STATE_S0] |= - BM_FLAGS_D1_SUPPORT; - } - - if ((ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, "_PR2", - &acpi_handle))) || - (ACPI_SUCCESS(acpi_get_handle(device->acpi_handle, "_PS2", - &acpi_handle)))) { - device->power.dx_supported[ACPI_STATE_S0] |= - BM_FLAGS_D2_SUPPORT; - } - - /* - * Dx Supported in S1-S5: - * ---------------------- - * Figure out which Dx states are supported by this device for - * all other Sx states. - */ - for (i = ACPI_STATE_S1; i <= ACPI_STATE_S5; i++) { - - /* - * D3 support is assumed (off is always possible!). - */ - device->power.dx_supported[i] = BM_FLAGS_D3_SUPPORT; - - /* - * Evalute _Sx_d: - * ------------- - * Which returns the highest (power) Dx state supported in - * this system (Sx) state. We convert this value to a bit - * mask of supported states (conceptually simpler). - */ - status = bm_evaluate_simple_integer(device->acpi_handle, - object_name, &dx_supported); - if (ACPI_SUCCESS(status)) { - switch (dx_supported) { - case 0: - device->power.dx_supported[i] |= - BM_FLAGS_D0_SUPPORT; - /* fall through */ - case 1: - device->power.dx_supported[i] |= - BM_FLAGS_D1_SUPPORT; - /* fall through */ - case 2: - device->power.dx_supported[i] |= - BM_FLAGS_D2_SUPPORT; - /* fall through */ - case 3: - device->power.dx_supported[i] |= - BM_FLAGS_D3_SUPPORT; - break; - } - - /* - * Validate: - * --------- - * Mask of any states that _Sx_d falsely advertises - * (e.g.claims D1 support but neither _PR2 or _PS2 - * exist). In other words, S1-S5 can't offer a Dx - * state that isn't supported by S0. - */ - device->power.dx_supported[i] &= - device->power.dx_supported[ACPI_STATE_S0]; - } - - object_name[2]++; - } - - return_ACPI_STATUS(AE_OK); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/bmpower.c linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmpower.c --- linux.21rc5/drivers/acpi/ospm/busmgr/bmpower.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmpower.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,664 +0,0 @@ -/**************************************************************************** - * - * Module Name: bmpower.c - Driver for ACPI Power Resource 'devices' - * $Revision: 20 $ - * - ****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -/* - * TBD: 1. Sequencing of power resource list transitions. - * 2. Global serialization of power resource transtions (see ACPI - * spec section 7.1.2/7.1.3). - * 3. Better error handling. - */ - - -#include -#include "bm.h" -#include "bmpower.h" - - -#define _COMPONENT ACPI_BUS - MODULE_NAME ("bmpower") - - -/**************************************************************************** - * Function Prototypes - ****************************************************************************/ - -acpi_status -bm_pr_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - void **context); - -acpi_status -bm_pr_request ( - BM_REQUEST *request, - void *context); - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_pr_print - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_print ( - BM_POWER_RESOURCE *pr) -{ - acpi_buffer buffer; - - PROC_NAME("bm_pr_print"); - - if (!pr) { - return(AE_BAD_PARAMETER); - } - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return(AE_NO_MEMORY); - } - - acpi_get_name(pr->acpi_handle, ACPI_FULL_PATHNAME, &buffer); - - acpi_os_printf("Power Resource: found\n"); - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| Power_resource[%02x]:[%p] %s\n", pr->device_handle, pr->acpi_handle, (char*)buffer.pointer)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| system_level[S%d] resource_order[%d]\n", pr->system_level, pr->resource_order)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| state[D%d] reference_count[%d]\n", pr->state, pr->reference_count)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - acpi_os_free(buffer.pointer); - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_pr_get_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_get_state ( - BM_POWER_RESOURCE *pr) -{ - acpi_status status = AE_OK; - BM_DEVICE_STATUS device_status = BM_STATUS_UNKNOWN; - - FUNCTION_TRACE("bm_pr_get_state"); - - if (!pr) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - pr->state = ACPI_STATE_UNKNOWN; - - /* - * Evaluate _STA: - * -------------- - * Evalute _STA to determine whether the power resource is ON or OFF. - * Note that if the power resource isn't present we'll get AE_OK but - * an unknown status. - */ - status = bm_get_device_status(pr->device_handle, &device_status); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Error reading status for power resource [%02x].\n", pr->device_handle)); - return_ACPI_STATUS(status); - } - - /* - * Mask off all bits but the first as some systems return non-standard - * values (e.g. 0x51). - */ - switch (device_status & 0x01) { - case 0: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Power resource [%02x] is OFF.\n", pr->device_handle)); - pr->state = ACPI_STATE_D3; - break; - case 1: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Power resource [%02x] is ON.\n", pr->device_handle)); - pr->state = ACPI_STATE_D0; - break; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_pr_set_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_set_state ( - BM_POWER_RESOURCE *pr, - BM_POWER_STATE target_state) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("bm_pr_set_state"); - - if (!pr) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - status = bm_pr_get_state(pr); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - if (target_state == pr->state) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Power resource [%02x] already at target power state [D%d].\n", pr->device_handle, pr->state)); - return_ACPI_STATUS(AE_OK); - } - - switch (target_state) { - - case ACPI_STATE_D0: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Turning power resource [%02x] ON.\n", pr->device_handle)); - status = bm_evaluate_object(pr->acpi_handle, "_ON", NULL, NULL); - break; - - case ACPI_STATE_D3: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Turning power resource [%02x] OFF.\n", pr->device_handle)); - status = bm_evaluate_object(pr->acpi_handle, "_OFF", NULL, NULL); - break; - - default: - status = AE_BAD_PARAMETER; - break; - } - - status = bm_pr_get_state(pr); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_pr_list_get_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_list_get_state ( - BM_HANDLE_LIST *pr_list, - BM_POWER_STATE *power_state) -{ - acpi_status status = AE_OK; - BM_POWER_RESOURCE *pr = NULL; - u32 i = 0; - - FUNCTION_TRACE("bm_pr_list_get_state"); - - if (!pr_list || !power_state) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (pr_list->count < 1) { - pr->state = ACPI_STATE_UNKNOWN; - return_ACPI_STATUS(AE_ERROR); - } - - (*power_state) = ACPI_STATE_D0; - - /* - * Calculate Current power_state: - * ----------------------------- - * The current state of a list of power resources is ON if all - * power resources are currently in the ON state. In other words, - * if any power resource in the list is OFF then the collection - * isn't fully ON. - */ - for (i = 0; i < pr_list->count; i++) { - - status = bm_get_device_context(pr_list->handles[i], - (BM_DRIVER_CONTEXT*)(&pr)); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Invalid reference to power resource [%02x].\n", pr_list->handles[i])); - (*power_state) = ACPI_STATE_UNKNOWN; - break; - } - - status = bm_pr_get_state(pr); - if (ACPI_FAILURE(status)) { - (*power_state) = ACPI_STATE_UNKNOWN; - break; - } - - if (pr->state != ACPI_STATE_D0) { - (*power_state) = pr->state; - break; - } - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_pr_list_transition - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_list_transition ( - BM_HANDLE_LIST *current_list, - BM_HANDLE_LIST *target_list) -{ - acpi_status status = AE_OK; - BM_POWER_RESOURCE *pr = NULL; - u32 i = 0; - - FUNCTION_TRACE("bm_pr_list_transition"); - - if (!current_list || !target_list) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Reference Target: - * ----------------- - * Reference all resources for the target power state first (so - * the device doesn't get turned off while transitioning). Power - * resources that aren't on (new reference count of 1) are turned on. - */ - for (i = 0; i < target_list->count; i++) { - - status = bm_get_device_context(target_list->handles[i], - (BM_DRIVER_CONTEXT*)(&pr)); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Invalid reference to power resource [%02x].\n", target_list->handles[i])); - continue; - } - - if (++pr->reference_count == 1) { - /* TBD: Need ordering based upon resource_order */ - status = bm_pr_set_state(pr, ACPI_STATE_D0); - if (ACPI_FAILURE(status)) { - /* TBD: How do we handle this? */ - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to change power state for power resource [%02x].\n", target_list->handles[i])); - } - } - } - - /* - * Dereference Current: - * -------------------- - * Dereference all resources for the current power state. Power - * resources no longer referenced (new reference count of 0) are - * turned off. - */ - for (i = 0; i < current_list->count; i++) { - - status = bm_get_device_context(current_list->handles[i], - (BM_DRIVER_CONTEXT*)(&pr)); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Invalid reference to power resource [%02x].\n", target_list->handles[i])); - continue; - } - - if (--pr->reference_count == 0) { - /* TBD: Need ordering based upon resource_order */ - status = bm_pr_set_state(pr, ACPI_STATE_D3); - if (ACPI_FAILURE(status)) { - /* TBD: How do we handle this? */ - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unable to change power state for power resource [%02x].\n", current_list->handles[i])); - } - } - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_pr_add_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_add_device ( - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - BM_POWER_RESOURCE *pr = NULL; - BM_DEVICE *device = NULL; - acpi_buffer buffer; - acpi_object acpi_object; - - FUNCTION_TRACE("bm_pr_add_device"); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Adding power resource [%02x].\n", device_handle)); - - if (!context || *context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - buffer.length = sizeof(acpi_object); - buffer.pointer = &acpi_object; - - /* - * Get information on this device. - */ - status = bm_get_device_info(device_handle, &device); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Allocate a new BM_POWER_RESOURCE structure. - */ - pr = acpi_os_callocate(sizeof(BM_POWER_RESOURCE)); - if (!pr) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - pr->device_handle = device->handle; - pr->acpi_handle = device->acpi_handle; - - /* - * Get information on this power resource. - */ - status = acpi_evaluate_object(pr->acpi_handle, NULL, NULL, &buffer); - if (ACPI_FAILURE(status)) { - goto end; - } - - pr->system_level = acpi_object.power_resource.system_level; - pr->resource_order = acpi_object.power_resource.resource_order; - pr->state = ACPI_STATE_UNKNOWN; - pr->reference_count = 0; - - /* - * Get the power resource's current state (ON|OFF). - */ - status = bm_pr_get_state(pr); - -end: - if (ACPI_FAILURE(status)) { - acpi_os_free(pr); - } - else { - *context = pr; - bm_pr_print(pr); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_pr_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_remove_device ( - void **context) -{ - acpi_status status = AE_OK; - BM_POWER_RESOURCE *pr = NULL; - - FUNCTION_TRACE("bm_pr_remove_device"); - - if (!context || !*context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - pr = (BM_POWER_RESOURCE*)*context; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing power resource [%02x].\n", pr->device_handle)); - - acpi_os_free(pr); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_pr_initialize - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_initialize (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("bm_pr_initialize"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - criteria.type = BM_TYPE_POWER_RESOURCE; - - driver.notify = &bm_pr_notify; - driver.request = &bm_pr_request; - - status = bm_register_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_pr_terminate - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_terminate (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("bm_pr_terminate"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - criteria.type = BM_TYPE_POWER_RESOURCE; - - driver.notify = &bm_pr_notify; - driver.request = &bm_pr_request; - - status = bm_unregister_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_pr_notify - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("bm_pr_notify"); - - switch (notify_type) { - - case BM_NOTIFY_DEVICE_ADDED: - status = bm_pr_add_device(device_handle, context); - break; - - case BM_NOTIFY_DEVICE_REMOVED: - status = bm_pr_remove_device(context); - break; - - default: - status = AE_SUPPORT; - break; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_pr_request - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_pr_request ( - BM_REQUEST *request, - void *context) -{ - acpi_status status = AE_OK; - BM_POWER_RESOURCE *pr = NULL; - - FUNCTION_TRACE("bm_pr_request"); - - /* - * Must have a valid request structure and context. - */ - if (!request || !context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * context contains information specific to this power resource. - */ - pr = (BM_POWER_RESOURCE*)context; - - /* - * Handle request: - * --------------- - */ - switch (request->command) { - - default: - status = AE_SUPPORT; - break; - } - - request->status = status; - - return_ACPI_STATUS(status); -} - - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/bmrequest.c linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmrequest.c --- linux.21rc5/drivers/acpi/ospm/busmgr/bmrequest.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmrequest.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,164 +0,0 @@ -/****************************************************************************** - * - * Module Name: bmrequest.c - * $Revision: 16 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "bm.h" - -#define _COMPONENT ACPI_BUS - MODULE_NAME ("bmrequest") - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_generate_request - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_generate_request ( - BM_NODE *node, - BM_REQUEST *request) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - - FUNCTION_TRACE("bm_generate_request"); - - if (!node || !request) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - device = &(node->device); - - if (!BM_IS_DRIVER_CONTROL(device)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "No driver installed for device [%02x].\n", device->handle)); - return_ACPI_STATUS(AE_NOT_EXIST); - } - - status = node->driver.request(request, node->driver.context); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_request - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_request ( - BM_REQUEST *request) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - BM_DEVICE *device = NULL; - - FUNCTION_TRACE("bm_request"); - - /* - * Must have a valid request structure. - */ - if (!request) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Received request for device [%02x] command [%02x].\n", request->handle, request->command)); - - /* - * Resolve the node. - */ - status = bm_get_node(request->handle, 0, &node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - device = &(node->device); - - /* - * Device-Specific Request? - * ------------------------ - * If a device-specific command (>=0x80) forward this request to - * the appropriate driver. - */ - if (request->command & BM_COMMAND_DEVICE_SPECIFIC) { - status = bm_generate_request(node, request); - return_ACPI_STATUS(status); - } - - /* - * Bus-Specific Requests: - * ---------------------- - */ - switch (request->command) { - - case BM_COMMAND_GET_POWER_STATE: - status = bm_get_power_state(node); - if (ACPI_FAILURE(status)) { - break; - } - status = bm_copy_to_buffer(&(request->buffer), - &(device->power.state), sizeof(BM_POWER_STATE)); - break; - - case BM_COMMAND_SET_POWER_STATE: - { - BM_POWER_STATE *power_state = NULL; - - status = bm_cast_buffer(&(request->buffer), - (void**)&power_state, sizeof(BM_POWER_STATE)); - if (ACPI_FAILURE(status)) { - break; - } - status = bm_set_power_state(node, *power_state); - } - break; - - default: - status = AE_SUPPORT; - request->status = AE_SUPPORT; - break; - } - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/bmsearch.c linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmsearch.c --- linux.21rc5/drivers/acpi/ospm/busmgr/bmsearch.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmsearch.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,192 +0,0 @@ -/****************************************************************************** - * - * Module Name: bmsearch.c - * $Revision: 16 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "bm.h" - - -#define _COMPONENT ACPI_BUS - MODULE_NAME ("bmsearch") - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_compare - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_compare ( - BM_DEVICE *device, - BM_DEVICE_ID *criteria) -{ - if (!device || !criteria) { - return AE_BAD_PARAMETER; - } - - /* - * Present? - * -------- - * We're only going to match on devices that are present. - * TBD: Optimize in bm_search (don't have to call here). - */ - if (!BM_DEVICE_PRESENT(device)) { - return AE_NOT_FOUND; - } - - /* - * Type? - */ - if (criteria->type && (criteria->type != device->id.type)) { - return AE_NOT_FOUND; - } - - /* - * HID? - */ - if ((criteria->hid[0]) && (0 != STRNCMP(criteria->hid, - device->id.hid, sizeof(BM_DEVICE_HID)))) { - return AE_NOT_FOUND; - } - - /* - * ADR? - */ - if ((criteria->adr) && (criteria->adr != device->id.adr)) { - return AE_NOT_FOUND; - } - - return AE_OK; -} - - -/**************************************************************************** - * - * FUNCTION: bm_search - * - * PARAMETERS: - * - * RETURN: AE_BAD_PARAMETER- invalid input parameter - * AE_NOT_EXIST - start_device_handle doesn't exist - * AE_NOT_FOUND - no matches to Search_info.criteria found - * AE_OK - success - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_search( - BM_HANDLE device_handle, - BM_DEVICE_ID *criteria, - BM_HANDLE_LIST *results) -{ - acpi_status status = AE_OK; - BM_NODE *node = NULL; - - FUNCTION_TRACE("bm_search"); - - if (!criteria || !results) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - results->count = 0; - - /* - * Locate Starting Point: - * ---------------------- - * Locate the node in the hierarchy where we'll begin our search. - */ - status = bm_get_node(device_handle, 0, &node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Parse Hierarchy: - * ---------------- - * Parse through the node hierarchy looking for matches. - */ - while (node && (results->count<=BM_HANDLES_MAX)) { - /* - * Depth-first: - * ------------ - * Searches are always performed depth-first. - */ - if (node->scope.head) { - status = bm_compare(&(node->device), criteria); - if (ACPI_SUCCESS(status)) { - results->handles[results->count++] = - node->device.handle; - } - node = node->scope.head; - } - - /* - * Now Breadth: - * ------------ - * Search all peers until scope is exhausted. - */ - else { - status = bm_compare(&(node->device), criteria); - if (ACPI_SUCCESS(status)) { - results->handles[results->count++] = - node->device.handle; - } - - /* - * Locate Next Device: - * ------------------- - * The next node is either a peer at this level - * (node->next is valid), or we work are way back - * up the tree until we either find a non-parsed - * peer or hit the top (node->parent is NULL). - */ - while (!node->next && node->parent) { - node = node->parent; - } - node = node->next; - } - } - - if (results->count == 0) { - return_ACPI_STATUS(AE_NOT_FOUND); - } - else { - return_ACPI_STATUS(AE_OK); - } -} - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/bmutils.c linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmutils.c --- linux.21rc5/drivers/acpi/ospm/busmgr/bmutils.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/bmutils.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,611 +0,0 @@ -/***************************************************************************** - * - * Module Name: bmutils.c - * $Revision: 43 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "bm.h" - - -#define _COMPONENT ACPI_BUS - MODULE_NAME ("bmutils") - - -#ifdef ACPI_DEBUG -#define DEBUG_EVAL_ERROR(l,h,p,s) bm_print_eval_error(l,h,p,s) -#else -#define DEBUG_EVAL_ERROR(l,h,p,s) -#endif - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: bm_print_eval_error - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -void -bm_print_eval_error ( - u32 debug_level, - acpi_handle handle, - acpi_string pathname, - acpi_status status) -{ - acpi_buffer buffer; - acpi_status local_status; - - PROC_NAME("bm_print_eval_error"); - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return; - } - - local_status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - if (ACPI_FAILURE(local_status)) { - ACPI_DEBUG_PRINT((ACPI_DEBUG_LEVEL(debug_level), "Evaluate object [%p], %s\n", handle, - acpi_format_exception(status))); - return; - } - - if (pathname) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate object [%s.%s], %s\n", (char*)buffer.pointer, pathname, - acpi_format_exception(status))); - } - else { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate object [%s], %s\n", (char*)buffer.pointer, - acpi_format_exception(status))); - } - - acpi_os_free(buffer.pointer); -} - - -/**************************************************************************** - * - * FUNCTION: bm_copy_to_buffer - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_copy_to_buffer ( - acpi_buffer *buffer, - void *data, - u32 length) -{ - FUNCTION_TRACE("bm_copy_to_buffer"); - - if (!buffer || (!buffer->pointer) || !data || (length == 0)) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (length > buffer->length) { - buffer->length = length; - return_ACPI_STATUS(AE_BUFFER_OVERFLOW); - } - - buffer->length = length; - MEMCPY(buffer->pointer, data, length); - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_cast_buffer - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_cast_buffer ( - acpi_buffer *buffer, - void **pointer, - u32 length) -{ - FUNCTION_TRACE("bm_cast_buffer"); - - if (!buffer || !buffer->pointer || !pointer || length == 0) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (length > buffer->length) { - return_ACPI_STATUS(AE_BAD_DATA); - } - - *pointer = buffer->pointer; - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_extract_package_data - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_extract_package_data ( - acpi_object *package, - acpi_buffer *format, - acpi_buffer *buffer) -{ - u32 tail_offset = 0; - u32 size_required = 0; - char *format_string = NULL; - u32 format_count = 0; - u32 i = 0; - u8 *head = NULL; - u8 *tail = NULL; - - FUNCTION_TRACE("bm_extract_package_data"); - - if (!package || (package->type != ACPI_TYPE_PACKAGE) || (package->package.count < 1)) { - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid 'package' argument\n")); - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (!format || !format->pointer || (format->length < 1)) { - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid 'format' argument\n")); - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (!buffer) { - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid 'buffer' argument\n")); - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - format_count = (format->length/sizeof(char)) - 1; - if (format_count > package->package.count) { - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Format specifies more objects [%d] than exist in package [%d].", format_count, package->package.count)); - return_ACPI_STATUS(AE_BAD_DATA); - } - - format_string = (char*)format->pointer; - - /* - * Calculate size_required. - */ - for (i=0; ipackage.elements[i]); - - if (!element) { - return_ACPI_STATUS(AE_BAD_DATA); - } - - switch (element->type) { - - case ACPI_TYPE_INTEGER: - switch (format_string[i]) { - case 'N': - size_required += sizeof(acpi_integer); - tail_offset += sizeof(acpi_integer); - break; - case 'S': - size_required += sizeof(char*) + sizeof(acpi_integer) + sizeof(char); - tail_offset += sizeof(char*); - break; - default: - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid package element [%d]: got number, expecing [%c].\n", i, format_string[i])); - return_ACPI_STATUS(AE_BAD_DATA); - break; - } - break; - - case ACPI_TYPE_STRING: - case ACPI_TYPE_BUFFER: - switch (format_string[i]) { - case 'S': - size_required += sizeof(char*) + (element->string.length * sizeof(char)) + sizeof(char); - tail_offset += sizeof(char*); - break; - case 'B': - size_required += sizeof(u8*) + (element->buffer.length * sizeof(u8)); - tail_offset += sizeof(u8*); - break; - default: - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid package element [%d] got string/buffer, expecing [%c].\n", i, format_string[i])); - return_ACPI_STATUS(AE_BAD_DATA); - break; - } - break; - - case ACPI_TYPE_PACKAGE: - default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unsupported element at index=%d\n", i)); - /* TBD: handle nested packages... */ - return_ACPI_STATUS(AE_SUPPORT); - break; - } - } - - /* - * Validate output buffer. - */ - if (buffer->length < size_required) { - buffer->length = size_required; - return_ACPI_STATUS(AE_BUFFER_OVERFLOW); - } - else if (buffer->length != size_required || !buffer->pointer) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - head = buffer->pointer; - tail = buffer->pointer + tail_offset; - - /* - * Extract package data. - */ - for (i=0; ipackage.elements[i]); - - switch (element->type) { - - case ACPI_TYPE_INTEGER: - switch (format_string[i]) { - case 'N': - *((acpi_integer*)head) = element->integer.value; - head += sizeof(acpi_integer); - break; - case 'S': - pointer = (u8**)head; - *pointer = tail; - *((acpi_integer*)tail) = element->integer.value; - head += sizeof(acpi_integer*); - tail += sizeof(acpi_integer); - /* NULL terminate string */ - *tail = (char)0; - tail += sizeof(char); - break; - default: - /* Should never get here */ - break; - } - break; - - case ACPI_TYPE_STRING: - case ACPI_TYPE_BUFFER: - switch (format_string[i]) { - case 'S': - pointer = (u8**)head; - *pointer = tail; - memcpy(tail, element->string.pointer, element->string.length); - head += sizeof(char*); - tail += element->string.length * sizeof(char); - /* NULL terminate string */ - *tail = (char)0; - tail += sizeof(char); - break; - case 'B': - pointer = (u8**)head; - *pointer = tail; - memcpy(tail, element->buffer.pointer, element->buffer.length); - head += sizeof(u8*); - tail += element->buffer.length * sizeof(u8); - break; - default: - /* Should never get here */ - break; - } - break; - - case ACPI_TYPE_PACKAGE: - /* TBD: handle nested packages... */ - default: - /* Should never get here */ - break; - } - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bm_evaluate_object - * - * PARAMETERS: - * - * RETURN: AE_OK - * AE_BUFFER_OVERFLOW Evaluated object returned data, but - * caller did not provide buffer. - * - * DESCRIPTION: Helper for acpi_evaluate_object that handles buffer - * allocation. Note that the caller is responsible for - * freeing buffer->pointer! - * - ****************************************************************************/ - -acpi_status -bm_evaluate_object ( - acpi_handle handle, - acpi_string pathname, - acpi_object_list *arguments, - acpi_buffer *buffer) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("bm_evaluate_object"); - - /* If caller provided a buffer it must be unallocated/zero'd. */ - if ((buffer) && (buffer->length != 0 || buffer->pointer)) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Evalute Object: - * --------------- - * The first attempt is just to get the size of the object data - * (that is unless there's no return data, e.g. _INI); the second - * gets the data. - */ - status = acpi_evaluate_object(handle, pathname, arguments, buffer); - if (ACPI_SUCCESS(status)) { - return_ACPI_STATUS(status); - } - else if ((buffer) && (status == AE_BUFFER_OVERFLOW)) { - - /* Gotta allocate -- CALLER MUST FREE! */ - buffer->pointer = acpi_os_callocate(buffer->length); - if (!buffer->pointer) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - /* Re-evaluate -- this time it should work */ - status = acpi_evaluate_object(handle, pathname, - arguments, buffer); - } - - if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - DEBUG_EVAL_ERROR(ACPI_LV_WARN, handle, pathname, - status); - } - if (buffer && buffer->pointer) { - acpi_os_free(buffer->pointer); - buffer->pointer = NULL; - buffer->length = 0; - } - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_evaluate_simple_integer - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_evaluate_simple_integer ( - acpi_handle handle, - acpi_string pathname, - u32 *data) -{ - acpi_status status = AE_OK; - acpi_object *element = NULL; - acpi_buffer buffer; - - FUNCTION_TRACE("bm_evaluate_simple_integer"); - - if (!data) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - MEMSET(&buffer, 0, sizeof(acpi_buffer)); - - /* - * Evaluate Object: - * ---------------- - */ - status = bm_evaluate_object(handle, pathname, NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "failed to evaluate object (%s)\n", - acpi_format_exception(status))); - goto end; - } - - /* - * Validate Data: - * -------------- - */ - status = bm_cast_buffer(&buffer, (void**)&element, - sizeof(acpi_object)); - if (ACPI_FAILURE(status)) { - DEBUG_EVAL_ERROR(ACPI_LV_WARN, handle, pathname, status); - goto end; - } - - if (element->type != ACPI_TYPE_INTEGER) { - status = AE_BAD_DATA; - DEBUG_EVAL_ERROR(ACPI_LV_WARN, handle, pathname, status); - goto end; - } - - *data = element->integer.value; - -end: - acpi_os_free(buffer.pointer); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bm_evaluate_reference_list - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bm_evaluate_reference_list ( - acpi_handle handle, - acpi_string pathname, - BM_HANDLE_LIST *reference_list) -{ - acpi_status status = AE_OK; - acpi_object *package = NULL; - acpi_object *element = NULL; - acpi_handle reference_handle = NULL; - acpi_buffer buffer; - u32 i = 0; - - FUNCTION_TRACE("bm_evaluate_reference_list"); - - if (!reference_list) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - MEMSET(&buffer, 0, sizeof(acpi_buffer)); - - /* - * Evaluate Object: - * ---------------- - */ - status = bm_evaluate_object(handle, pathname, NULL, &buffer); - if (ACPI_FAILURE(status)) { - goto end; - } - - /* - * Validate Package: - * ----------------- - */ - status = bm_cast_buffer(&buffer, (void**)&package, - sizeof(acpi_object)); - if (ACPI_FAILURE(status)) { - DEBUG_EVAL_ERROR(ACPI_LV_WARN, handle, pathname, status); - goto end; - } - - if (package->type != ACPI_TYPE_PACKAGE) { - status = AE_BAD_DATA; - DEBUG_EVAL_ERROR(ACPI_LV_WARN, handle, pathname, status); - goto end; - } - - if (package->package.count > BM_HANDLES_MAX) { - package->package.count = BM_HANDLES_MAX; - } - - /* - * Parse Package Data: - * ------------------- - */ - for (i = 0; i < package->package.count; i++) { - - element = &(package->package.elements[i]); - - if (!element || (element->type != ACPI_TYPE_STRING)) { - status = AE_BAD_DATA; - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid element in package (not a device reference).\n")); - DEBUG_EVAL_ERROR (ACPI_LV_WARN, handle, pathname, status); - break; - } - - /* - * Resolve reference string (e.g. "\_PR_.CPU_") to an - * acpi_handle. - */ - status = acpi_get_handle(handle, - element->string.pointer, &reference_handle); - if (ACPI_FAILURE(status)) { - status = AE_BAD_DATA; - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Unable to resolve device reference [%s].\n", element->string.pointer)); - DEBUG_EVAL_ERROR (ACPI_LV_WARN, handle, pathname, status); - break; - } - - /* - * Resolve acpi_handle to BM_HANDLE. - */ - status = bm_get_handle(reference_handle, - &(reference_list->handles[i])); - if (ACPI_FAILURE(status)) { - status = AE_BAD_DATA; - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Unable to resolve device reference for [%p].\n", reference_handle)); - DEBUG_EVAL_ERROR (ACPI_LV_WARN, handle, pathname, status); - break; - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resolved reference [%s]->[%p]->[%02x]\n", element->string.pointer, reference_handle, reference_list->handles[i])); - - (reference_list->count)++; - } - -end: - acpi_os_free(buffer.pointer); - - return_ACPI_STATUS(status); -} - - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/busmgr/Makefile linux.21rc5-ac2/drivers/acpi/ospm/busmgr/Makefile --- linux.21rc5/drivers/acpi/ospm/busmgr/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/busmgr/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -export-objs := bm_osl.o - -O_TARGET := ospm_$(notdir $(CURDIR)).o -obj-m := $(O_TARGET) -EXTRA_CFLAGS += $(ACPI_CFLAGS) -obj-y := $(patsubst %.c,%.o,$(wildcard *.c)) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/button/bn.c linux.21rc5-ac2/drivers/acpi/ospm/button/bn.c --- linux.21rc5/drivers/acpi/ospm/button/bn.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/button/bn.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,507 +0,0 @@ -/***************************************************************************** - * - * Module Name: bn.c - * $Revision: 27 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Plxxe, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "bn.h" - - -#define _COMPONENT ACPI_BUTTON - MODULE_NAME ("bn") - - -/***************************************************************************** - * Internal Functions - *****************************************************************************/ - -/***************************************************************************** - * - * FUNCTION: bn_print - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Prints out information on a specific button. - * - ****************************************************************************/ - -void -bn_print ( - BN_CONTEXT *button) -{ -#ifdef ACPI_DEBUG - acpi_buffer buffer; - - PROC_NAME("bn_print"); - - if (!button) { - return; - } - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return; - } - - /* - * Get the full pathname for this ACPI object. - */ - acpi_get_name(button->acpi_handle, ACPI_FULL_PATHNAME, &buffer); - - /* - * Print out basic button information. - */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - switch (button->type) { - - case BN_TYPE_POWER_BUTTON: - case BN_TYPE_POWER_BUTTON_FIXED: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| Power_button[%02x]:[%p] %s\n", button->device_handle, button->acpi_handle, (char*)buffer.pointer)); - break; - - case BN_TYPE_SLEEP_BUTTON: - case BN_TYPE_SLEEP_BUTTON_FIXED: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| Sleep_button[%02x]:[%p] %s\n", button->device_handle, button->acpi_handle, (char*)buffer.pointer)); - break; - - case BN_TYPE_LID_SWITCH: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| Lid_switch[%02x]:[%p] %s\n", button->device_handle, button->acpi_handle, (char*)buffer.pointer)); - break; - } - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - acpi_os_free(buffer.pointer); -#endif /*ACPI_DEBUG*/ - - return; -} - - -/**************************************************************************** - * - * FUNCTION: bn_add_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bn_add_device( - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - BN_CONTEXT *button = NULL; - - FUNCTION_TRACE("bn_add_device"); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Adding button device [%02x].\n", device_handle)); - - if (!context || *context) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid context.\n")); - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Get information on this device. - */ - status = bm_get_device_info( device_handle, &device ); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Allocate a new BN_CONTEXT structure. - */ - button = acpi_os_callocate(sizeof(BN_CONTEXT)); - if (!button) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - button->device_handle = device->handle; - button->acpi_handle = device->acpi_handle; - - /* - * Power Button? - * ------------- - * Either fixed-feature or generic (namespace) types. - */ - if (strncmp(device->id.hid, BN_HID_POWER_BUTTON, - sizeof(BM_DEVICE_HID)) == 0) { - - if (device->id.type == BM_TYPE_FIXED_BUTTON) { - - button->type = BN_TYPE_POWER_BUTTON_FIXED; - - /* Register for fixed-feature events. */ - status = acpi_install_fixed_event_handler( - ACPI_EVENT_POWER_BUTTON, bn_notify_fixed, - (void*)button); - } - else { - button->type = BN_TYPE_POWER_BUTTON; - } - - } - - /* - * Sleep Button? - * ------------- - * Either fixed-feature or generic (namespace) types. - */ - else if (strncmp( device->id.hid, BN_HID_SLEEP_BUTTON, - sizeof(BM_DEVICE_HID)) == 0) { - - if (device->id.type == BM_TYPE_FIXED_BUTTON) { - - button->type = BN_TYPE_SLEEP_BUTTON_FIXED; - - /* Register for fixed-feature events. */ - status = acpi_install_fixed_event_handler( - ACPI_EVENT_SLEEP_BUTTON, bn_notify_fixed, - (void*)button); - } - else { - button->type = BN_TYPE_SLEEP_BUTTON; - } - } - - /* - * LID Switch? - * ----------- - */ - else if (strncmp( device->id.hid, BN_HID_LID_SWITCH, - sizeof(BM_DEVICE_HID)) == 0) { - button->type = BN_TYPE_LID_SWITCH; - } - - status = bn_osl_add_device(button); - if (ACPI_FAILURE(status)) { - goto end; - } - - *context = button; - - bn_print(button); - -end: - if (ACPI_FAILURE(status)) { - acpi_os_free(button); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bn_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bn_remove_device( - void **context) -{ - acpi_status status = AE_OK; - BN_CONTEXT *button = NULL; - - FUNCTION_TRACE("bn_remove_device"); - - if (!context || !*context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - button = (BN_CONTEXT*)*context; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing button device [%02x].\n", button->device_handle)); - - /* - * Unregister for fixed-feature events. - */ - switch (button->type) { - case BN_TYPE_POWER_BUTTON_FIXED: - status = acpi_remove_fixed_event_handler( - ACPI_EVENT_POWER_BUTTON, bn_notify_fixed); - break; - case BN_TYPE_SLEEP_BUTTON_FIXED: - status = acpi_remove_fixed_event_handler( - ACPI_EVENT_SLEEP_BUTTON, bn_notify_fixed); - break; - } - - bn_osl_remove_device(button); - - acpi_os_free(button); - - *context = NULL; - - return_ACPI_STATUS(status); -} - - -/***************************************************************************** - * External Functions - *****************************************************************************/ - -/***************************************************************************** - * - * FUNCTION: bn_initialize - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - - ****************************************************************************/ - -acpi_status -bn_initialize (void) -{ - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("bn_initialize"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - driver.notify = &bn_notify; - driver.request = &bn_request; - - /* - * Register for power buttons. - */ - MEMCPY(criteria.hid, BN_HID_POWER_BUTTON, sizeof(BN_HID_POWER_BUTTON)); - bm_register_driver(&criteria, &driver); - - /* - * Register for sleep buttons. - */ - MEMCPY(criteria.hid, BN_HID_SLEEP_BUTTON, sizeof(BN_HID_SLEEP_BUTTON)); - bm_register_driver(&criteria, &driver); - - /* - * Register for LID switches. - */ - MEMCPY(criteria.hid, BN_HID_LID_SWITCH, sizeof(BN_HID_LID_SWITCH)); - bm_register_driver(&criteria, &driver); - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bn_terminate - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bn_terminate (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("bn_terminate"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - driver.notify = &bn_notify; - driver.request = &bn_request; - - /* - * Unregister for power buttons. - */ - MEMCPY(criteria.hid, BN_HID_POWER_BUTTON, sizeof(BN_HID_POWER_BUTTON)); - status = bm_unregister_driver(&criteria, &driver); - - /* - * Unregister for sleep buttons. - */ - MEMCPY(criteria.hid, BN_HID_SLEEP_BUTTON, sizeof(BN_HID_SLEEP_BUTTON)); - status = bm_unregister_driver(&criteria, &driver); - - /* - * Unregister for LID switches. - */ - MEMCPY(criteria.hid, BN_HID_LID_SWITCH, sizeof(BN_HID_LID_SWITCH)); - status = bm_unregister_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bn_notify_fixed - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bn_notify_fixed ( - void *context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("bn_notify_fixed"); - - if (!context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Status change event detected.\n")); - - status = bn_osl_generate_event(BN_NOTIFY_STATUS_CHANGE, - ((BN_CONTEXT*)context)); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bn_notify - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bn_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("bn_notify"); - - if (!context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - switch (notify_type) { - case BM_NOTIFY_DEVICE_ADDED: - status = bn_add_device(device_handle, context); - break; - - case BM_NOTIFY_DEVICE_REMOVED: - status = bn_remove_device(context); - break; - - case BN_NOTIFY_STATUS_CHANGE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Status change event detected.\n")); - status = bn_osl_generate_event(BN_NOTIFY_STATUS_CHANGE, - ((BN_CONTEXT*)*context)); - break; - - default: - status = AE_SUPPORT; - break; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: bn_request - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -bn_request ( - BM_REQUEST *request, - void *context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("bn_request"); - - /* - * Must have a valid request structure and context. - */ - if (!request || !context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Handle Request: - * --------------- - */ - switch (request->command) { - - default: - status = AE_SUPPORT; - break; - } - - request->status = status; - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/button/bn_osl.c linux.21rc5-ac2/drivers/acpi/ospm/button/bn_osl.c --- linux.21rc5/drivers/acpi/ospm/button/bn_osl.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/button/bn_osl.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,311 +0,0 @@ -/****************************************************************************** - * - * Module Name: bn_osl.c - * $Revision: 16 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include -#include -#include -#include -#include "bn.h" - - -MODULE_AUTHOR("Andrew Grover"); -MODULE_DESCRIPTION("ACPI Component Architecture (CA) - Button Driver"); -MODULE_LICENSE("GPL"); - - -#define BN_PROC_ROOT "button" -#define BN_PROC_POWER_BUTTON "power" -#define BN_PROC_SLEEP_BUTTON "sleep" -#define BN_PROC_LID_SWITCH "lid" - -extern struct proc_dir_entry *bm_proc_root; -static struct proc_dir_entry *bn_proc_root = NULL; - - -#define BN_TYPE_UNKNOWN 0 -#define BN_TYPE_FIXED 1 -#define BN_TYPE_GENERIC 2 - -static int bn_power_button = BN_TYPE_UNKNOWN; -static int bn_sleep_button = BN_TYPE_UNKNOWN; -static int bn_lid_switch = BN_TYPE_UNKNOWN; - - -/**************************************************************************** - * - * FUNCTION: bn_osl_add_device - * - ****************************************************************************/ - -acpi_status -bn_osl_add_device( - BN_CONTEXT *button) -{ - acpi_status status = AE_OK; - - if (!button) { - return(AE_BAD_PARAMETER); - } - - switch (button->type) { - - case BN_TYPE_POWER_BUTTON_FIXED: - bn_power_button = BN_TYPE_FIXED; - printk(KERN_INFO "ACPI: Power Button (FF) found\n"); - if (!proc_mkdir(BN_PROC_POWER_BUTTON, bn_proc_root)) { - status = AE_ERROR; - } - break; - - case BN_TYPE_POWER_BUTTON: - /* - * Avoid creating multiple /proc entries when (buggy) ACPI - * BIOS tables erroneously list both fixed- and generic- - * feature buttons. Note that fixed-feature buttons are - * always enumerated first (and there can only be one) so - * we only need to check here. - */ - switch (bn_power_button) { - case BN_TYPE_GENERIC: - printk(KERN_WARNING "ACPI: Multiple generic-space power buttons detected, using first\n"); - break; - case BN_TYPE_FIXED: - printk(KERN_WARNING "ACPI: Multiple power buttons detected, ignoring fixed-feature\n"); - default: - printk(KERN_INFO "ACPI: Power Button (CM) found\n"); - bn_power_button = BN_TYPE_GENERIC; - if (!proc_mkdir(BN_PROC_POWER_BUTTON, bn_proc_root)) { - status = AE_ERROR; - } - break; - } - break; - - case BN_TYPE_SLEEP_BUTTON_FIXED: - bn_sleep_button = BN_TYPE_FIXED; - printk(KERN_INFO "ACPI: Sleep Button (FF) found\n"); - if (!proc_mkdir(BN_PROC_SLEEP_BUTTON, bn_proc_root)) { - status = AE_ERROR; - } - break; - - case BN_TYPE_SLEEP_BUTTON: - /* - * Avoid creating multiple /proc entries when (buggy) ACPI - * BIOS tables erroneously list both fixed- and generic- - * feature buttons. Note that fixed-feature buttons are - * always enumerated first (and there can only be one) so - * we only need to check here. - */ - switch (bn_sleep_button) { - case BN_TYPE_GENERIC: - printk(KERN_WARNING "ACPI: Multiple generic-space sleep buttons detected, using first\n"); - break; - case BN_TYPE_FIXED: - printk(KERN_WARNING "ACPI: Multiple sleep buttons detected, ignoring fixed-feature\n"); - default: - bn_sleep_button = BN_TYPE_GENERIC; - printk(KERN_INFO "ACPI: Sleep Button (CM) found\n"); - if (!proc_mkdir(BN_PROC_SLEEP_BUTTON, bn_proc_root)) { - status = AE_ERROR; - } - break; - } - break; - - case BN_TYPE_LID_SWITCH: - if (bn_lid_switch) { - printk(KERN_WARNING "ACPI: Multiple generic-space lid switches detected, using first\n"); - break; - } - bn_lid_switch = BN_TYPE_GENERIC; - printk(KERN_INFO "ACPI: Lid Switch (CM) found\n"); - if (!proc_mkdir(BN_PROC_LID_SWITCH, bn_proc_root)) { - status = AE_ERROR; - } - break; - } - - return(status); -} - - -/**************************************************************************** - * - * FUNCTION: bn_osl_remove_device - * - ****************************************************************************/ - -acpi_status -bn_osl_remove_device ( - BN_CONTEXT *button) -{ - if (!button) { - return(AE_BAD_PARAMETER); - } - - switch (button->type) { - - case BN_TYPE_POWER_BUTTON: - case BN_TYPE_POWER_BUTTON_FIXED: - remove_proc_entry(BN_PROC_POWER_BUTTON, bn_proc_root); - break; - - case BN_TYPE_SLEEP_BUTTON: - case BN_TYPE_SLEEP_BUTTON_FIXED: - remove_proc_entry(BN_PROC_SLEEP_BUTTON, bn_proc_root); - break; - - case BN_TYPE_LID_SWITCH: - remove_proc_entry(BN_PROC_LID_SWITCH, bn_proc_root); - break; - } - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: bn_osl_generate_event - * - ****************************************************************************/ - -acpi_status -bn_osl_generate_event ( - u32 event, - BN_CONTEXT *button) -{ - acpi_status status = AE_OK; - - if (!button) { - return(AE_BAD_PARAMETER); - } - - switch (event) { - - case BN_NOTIFY_STATUS_CHANGE: - - switch(button->type) { - - case BN_TYPE_POWER_BUTTON: - case BN_TYPE_POWER_BUTTON_FIXED: - status = bm_osl_generate_event(button->device_handle, - BN_PROC_ROOT, BN_PROC_POWER_BUTTON, event, 0); - break; - - case BN_TYPE_SLEEP_BUTTON: - case BN_TYPE_SLEEP_BUTTON_FIXED: - status = bm_osl_generate_event(button->device_handle, - BN_PROC_ROOT, BN_PROC_SLEEP_BUTTON, event, 0); - break; - - case BN_TYPE_LID_SWITCH: - status = bm_osl_generate_event(button->device_handle, - BN_PROC_ROOT, BN_PROC_LID_SWITCH, event, 0); - break; - - default: - status = AE_SUPPORT; - break; - } - - break; - - default: - return(AE_BAD_PARAMETER); - break; - } - - return(status); -} - - -/**************************************************************************** - * - * FUNCTION: bn_osl_init - * - * PARAMETERS: - * - * RETURN: 0: Success - * - * DESCRIPTION: Module initialization. - * - ****************************************************************************/ - -static int __init -bn_osl_init (void) -{ - acpi_status status = AE_OK; - - /* abort if no busmgr */ - if (!bm_proc_root) - return -ENODEV; - - bn_proc_root = proc_mkdir(BN_PROC_ROOT, bm_proc_root); - if (!bn_proc_root) { - status = AE_ERROR; - } - else { - status = bn_initialize(); - if (ACPI_FAILURE(status)) { - remove_proc_entry(BN_PROC_ROOT, bm_proc_root); - } - } - - return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; -} - - -/**************************************************************************** - * - * FUNCTION: bn_osl_cleanup - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Module cleanup. - * - ****************************************************************************/ - -static void __exit -bn_osl_cleanup (void) -{ - bn_terminate(); - - if (bn_proc_root) { - remove_proc_entry(BN_PROC_ROOT, bm_proc_root); - } - - return; -} - - -module_init(bn_osl_init); -module_exit(bn_osl_cleanup); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/button/Makefile linux.21rc5-ac2/drivers/acpi/ospm/button/Makefile --- linux.21rc5/drivers/acpi/ospm/button/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/button/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -O_TARGET := ospm_$(notdir $(CURDIR)).o -obj-m := $(O_TARGET) -EXTRA_CFLAGS += $(ACPI_CFLAGS) -obj-y := $(patsubst %.c,%.o,$(wildcard *.c)) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/ec/ecgpe.c linux.21rc5-ac2/drivers/acpi/ospm/ec/ecgpe.c --- linux.21rc5/drivers/acpi/ospm/ec/ecgpe.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/ec/ecgpe.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,249 +0,0 @@ -/***************************************************************************** - * - * Module Name: ecgpe.c - * $Revision: 28 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "ec.h" - -#define _COMPONENT ACPI_EC - MODULE_NAME ("ecgpe") - - -/**************************************************************************** - * - * FUNCTION: ec_query_handler - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -void -ec_query_handler ( - void *context) -{ - EC_CONTEXT *ec = (EC_CONTEXT*)context; - static char object_name[5] = {'_','Q','0','0','\0'}; - const char hex[] = {'0','1','2','3','4','5','6','7','8', - '9','A','B','C','D','E','F'}; - - FUNCTION_TRACE("ec_query_handler"); - - if (!ec) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); - return_VOID; - } - - /* - * Evaluate _Qxx: - * -------------- - * Evaluate corresponding _Qxx method. Note that a zero query value - * indicates a spurious EC_SCI (no such thing as _Q00). - */ - object_name[2] = hex[((ec->query_data >> 4) & 0x0F)]; - object_name[3] = hex[(ec->query_data & 0x0F)]; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Evaluating [%s] for ec [%02x].\n", object_name, ec->device_handle)); - - bm_evaluate_object(ec->acpi_handle, object_name, NULL, NULL); - - return_VOID; -} - - -/**************************************************************************** - * - * FUNCTION: ec_gpe_handler - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -void -ec_gpe_handler ( - void *context) -{ - acpi_status status = AE_OK; - EC_CONTEXT *ec = (EC_CONTEXT*)context; - EC_STATUS ec_status = 0; - - FUNCTION_TRACE("ec_gpe_handler"); - - if (!ec) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); - return_VOID; - } - - /* TBD: synchronize w/ transaction (ectransx). */ - - /* - * EC_SCI? - * ------- - * Check the EC_SCI bit to see if this is an EC_SCI event. If not (e.g. - * OBF/IBE) just return, as we already poll to detect these events. - */ - acpi_os_read_port(ec->status_port, &ec_status, 8); - if (!(ec_status & EC_FLAG_SCI)) { - return_VOID; - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "EC_SCI event detected on ec [%02x] - running query.\n", ec->device_handle)); - - /* - * Run Query: - * ---------- - * Query the EC to find out which _Qxx method we need to evaluate. - * Note that successful completion of the query causes the EC_SCI - * bit to be cleared (and thus clearing the interrupt source). - */ - status = ec_io_write(ec, ec->command_port, EC_COMMAND_QUERY, - EC_EVENT_OUTPUT_BUFFER_FULL); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to send 'query command' to EC.\n")); - return_VOID; - } - - status = ec_io_read(ec, ec->data_port, &(ec->query_data), - EC_EVENT_NONE); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Error reading query data.\n")); - return_VOID; - } - - /* TBD: un-synchronize w/ transaction (ectransx). */ - - /* - * Spurious EC_SCI? - * ---------------- - */ - if (!ec->query_data) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Spurious EC SCI detected.\n")); - return_VOID; - } - - /* - * Defer _Qxx Execution: - * --------------------- - * Can't evaluate this method now 'cause we're at interrupt-level. - */ - status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, - ec_query_handler, ec); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to defer _Qxx method evaluation.\n")); - return_VOID; - } - - return_VOID; -} - - -/**************************************************************************** - * - * FUNCTION: ec_install_gpe_handler - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_install_gpe_handler ( - EC_CONTEXT *ec) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ec_install_gpe_handler"); - - if (!ec) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Evaluate _GPE: - * -------------- - * Evaluate the "_GPE" object (required) to find out which GPE bit - * is used by this EC to signal events (SCIs). - */ - status = bm_evaluate_simple_integer(ec->acpi_handle, - "_GPE", &(ec->gpe_bit)); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Install GPE Handler: - * -------------------- - * Install a handler for this EC's GPE bit. - */ - status = acpi_install_gpe_handler(ec->gpe_bit, ACPI_EVENT_EDGE_TRIGGERED, - &ec_gpe_handler, ec); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "acpi_install_gpe_handler() failed for GPE bit [%02x] with status [%08x].\n", ec->gpe_bit, status)); - ec->gpe_bit = EC_GPE_UNKNOWN; - return_ACPI_STATUS(status); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_remove_gpe_handler - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_remove_gpe_handler ( - EC_CONTEXT *ec) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ec_remove_gpe_handler"); - - if (!ec) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - status = acpi_remove_gpe_handler(ec->gpe_bit, &ec_gpe_handler); - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/ec/ecmain.c linux.21rc5-ac2/drivers/acpi/ospm/ec/ecmain.c --- linux.21rc5/drivers/acpi/ospm/ec/ecmain.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/ec/ecmain.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,498 +0,0 @@ -/***************************************************************************** - * - * Module Name: ecmain.c - * $Revision: 29 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "ec.h" - -#define _COMPONENT ACPI_EC - MODULE_NAME ("ecmain") - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: ec_print - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Prints out information on a specific ec. - * - ****************************************************************************/ - -void -ec_print ( - EC_CONTEXT *ec) -{ -#ifdef ACPI_DEBUG - acpi_buffer buffer; -#endif /*ACPI_DEBUG*/ - - PROC_NAME("ec_print"); - - if (!ec) { - return; - } - - acpi_os_printf("EC: found, GPE %d\n", ec->gpe_bit); - -#ifdef ACPI_DEBUG - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return; - } - - /* - * Get the full pathname for this ACPI object. - */ - acpi_get_name(ec->acpi_handle, ACPI_FULL_PATHNAME, &buffer); - - /* - * Print out basic thermal zone information. - */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| Embedded_controller[%02x]:[%p] %s\n", ec->device_handle, ec->acpi_handle, (char*)buffer.pointer)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| gpe_bit[%02x] status/command_port[%02x] data_port[%02x]\n", ec->gpe_bit, ec->status_port, ec->data_port)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - acpi_os_free(buffer.pointer); -#endif /*ACPI_DEBUG*/ - - return; -} - - -/**************************************************************************** - * - * FUNCTION: ec_get_port_values - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Evaluate _CRS to get the current resources (I/O port - * addresses) for this EC. - * - ****************************************************************************/ - -acpi_status -ec_get_port_values( - EC_CONTEXT *ec) -{ - acpi_status status = AE_OK; - acpi_buffer buffer; - acpi_resource *resource = NULL; - - FUNCTION_TRACE("ec_get_port_values"); - - if (!ec) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - buffer.length = 0; - buffer.pointer = NULL; - - status = acpi_get_current_resources(ec->acpi_handle, &buffer); - if (status != AE_BUFFER_OVERFLOW) { - return_ACPI_STATUS(status); - } - - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - status = acpi_get_current_resources(ec->acpi_handle, &buffer); - if (ACPI_FAILURE(status)) { - goto end; - } - - resource = (acpi_resource *) buffer.pointer; - ec->data_port = resource->data.io.min_base_address; - - resource = NEXT_RESOURCE(resource); - - ec->status_port = ec->command_port = - resource->data.io.min_base_address; -end: - acpi_os_free(buffer.pointer); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_add_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_add_device( - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - EC_CONTEXT *ec = NULL; - u8 gpe_handler = FALSE; - u8 space_handler = FALSE; - - FUNCTION_TRACE("ec_add_device"); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Adding EC device [%02x].\n", device_handle)); - - if (!context || *context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Get information on this device. - */ - status = bm_get_device_info(device_handle, &device); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Allocate a new EC_CONTEXT structure. - */ - ec = acpi_os_callocate(sizeof(EC_CONTEXT)); - if (!ec) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - ec->device_handle = device->handle; - ec->acpi_handle = device->acpi_handle; - - /* - * Get the I/O port addresses for the command/status and data ports. - */ - status = ec_get_port_values(ec); - if (ACPI_FAILURE(status)) { - goto end; - } - - /* - * See if we need to obtain the global lock for EC transactions. - */ - status = bm_evaluate_simple_integer(ec->acpi_handle, "_GLK", - &ec->use_global_lock); - if (status == AE_NOT_FOUND) { - ec->use_global_lock = 0; - } - else if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "EC _GLK failed\n")); - goto end; - } - - /* - * Install a handler for servicing this EC's GPE. - */ - status = ec_install_gpe_handler(ec); - if (ACPI_FAILURE(status)) { - goto end; - } - else { - gpe_handler = TRUE; - } - - /* - * Install a handler for servicing this EC's address space. - */ - status = ec_install_space_handler(ec); - if (ACPI_FAILURE(status)) { - goto end; - } - else { - space_handler = TRUE; - } - - /* - * Create a semaphore to serialize EC transactions. - */ - status = acpi_os_create_semaphore(1,1, &(ec->mutex)); - if (ACPI_FAILURE(status)) { - goto end; - } - - /* - * Context now contains information specific to this EC. Note - * that we'll get this pointer back on every ec_request() and - * ec_notify(). - */ - *context = ec; - - ec_print(ec); - -end: - if (ACPI_FAILURE(status)) { - - if (gpe_handler) { - ec_remove_gpe_handler(ec); - } - - if (space_handler) { - ec_remove_space_handler(ec); - } - - if (ec->mutex) { - acpi_os_delete_semaphore(ec->mutex); - } - - acpi_os_free(ec); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_remove_device( - void **context) -{ - acpi_status status = AE_OK; - EC_CONTEXT *ec = NULL; - - FUNCTION_TRACE("ec_remove_device"); - - if (!context || !*context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - ec = (EC_CONTEXT*)*context; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing EC device [%02x].\n", ec->device_handle)); - - ec_remove_space_handler(ec); - - ec_remove_gpe_handler(ec); - - if (ec->mutex) { - acpi_os_delete_semaphore(ec->mutex); - } - - acpi_os_free(ec); - - *context = NULL; - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: ec_initialize - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_initialize (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("ec_initialize"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Register driver for AC Adapter devices. - */ - MEMCPY(criteria.hid, EC_HID_EC, sizeof(EC_HID_EC)); - - driver.notify = &ec_notify; - driver.request = &ec_request; - - status = bm_register_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_terminate - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_terminate(void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("ec_terminate"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Unregister driver for AC Adapter devices. - */ - MEMCPY(criteria.hid, EC_HID_EC, sizeof(EC_HID_EC)); - - driver.notify = &ec_notify; - driver.request = &ec_request; - - status = bm_unregister_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_notify - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_notify ( - BM_NOTIFY notify, - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ec_notify"); - - switch (notify) { - - case BM_NOTIFY_DEVICE_ADDED: - status = ec_add_device(device_handle, context); - break; - - case BM_NOTIFY_DEVICE_REMOVED: - status = ec_remove_device(context); - break; - - default: - status = AE_SUPPORT; - break; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_request - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_request ( - BM_REQUEST *request, - void *context) -{ - acpi_status status = AE_OK; - EC_REQUEST *ec_request = NULL; - EC_CONTEXT *ec = NULL; - - FUNCTION_TRACE("ec_request"); - - /* - * Must have a valid request structure and context. - */ - if (!request || !context) - return_ACPI_STATUS(AE_BAD_PARAMETER); - - /* - * buffer must contain a valid EC_REQUEST structure. - */ - status = bm_cast_buffer(&(request->buffer), (void**)&ec_request, - sizeof(EC_REQUEST)); - if (ACPI_FAILURE(status)) - return_ACPI_STATUS(status); - - /* - * context contains information specific to this EC. - */ - ec = (EC_CONTEXT*)context; - - /* - * Perform the Transaction. - */ - status = ec_transaction(ec, ec_request); - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/ec/ec_osl.c linux.21rc5-ac2/drivers/acpi/ospm/ec/ec_osl.c --- linux.21rc5/drivers/acpi/ospm/ec/ec_osl.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/ec/ec_osl.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -/***************************************************************************** - * - * Module Name: ec_osl.c - * $Revision: 11 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include -#include -#include -#include -#include -#include "ec.h" - - -MODULE_AUTHOR("Andrew Grover"); -MODULE_DESCRIPTION("ACPI Component Architecture (CA) - Embedded Controller Driver"); -MODULE_LICENSE("GPL"); - -extern struct proc_dir_entry *bm_proc_root; - - -/**************************************************************************** - * - * FUNCTION: ec_osl_init - * - * PARAMETERS: - * - * RETURN: 0: Success - * - * DESCRIPTION: Module initialization. - * - ****************************************************************************/ - -static int __init -ec_osl_init (void) -{ - acpi_status status = AE_OK; - - /* abort if no busmgr */ - if (!bm_proc_root) - return -ENODEV; - - status = ec_initialize(); - - return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; -} - -/**************************************************************************** - * - * FUNCTION: ec_osl_cleanup - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Module cleanup. - * - ****************************************************************************/ - -static void __exit -ec_osl_cleanup(void) -{ - ec_terminate(); - - return; -} - -module_init(ec_osl_init); -module_exit(ec_osl_cleanup); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/ec/ecspace.c linux.21rc5-ac2/drivers/acpi/ospm/ec/ecspace.c --- linux.21rc5/drivers/acpi/ospm/ec/ecspace.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/ec/ecspace.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,192 +0,0 @@ -/***************************************************************************** - * - * Module Name: ecspace.c - * $Revision: 23 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "ec.h" - -#define _COMPONENT ACPI_EC - MODULE_NAME ("ecspace") - - -/**************************************************************************** - * - * FUNCTION: ec_space_setup - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_space_setup ( - acpi_handle region_handle, - u32 function, - void *handler_context, - void **return_context) -{ - /* - * The EC object is in the handler context and is needed - * when calling the ec_space_handler. - */ - *return_context = handler_context; - - return AE_OK; -} - - -/**************************************************************************** - * - * FUNCTION: ec_space_handler - * - * PARAMETERS: function - Read or Write operation - * address - Where in the space to read or write - * bit_width - Field width in bits (should be 8) - * value - Pointer to in or out value - * context - context pointer - * - * RETURN: - * - * DESCRIPTION: Handler for the Embedded Controller (EC) address space - * (Op Region) - * - ****************************************************************************/ - -acpi_status -ec_space_handler ( - u32 function, - ACPI_PHYSICAL_ADDRESS address, - u32 bit_width, - u32 *value, - void *handler_context, - void *region_context) -{ - acpi_status status = AE_OK; - EC_CONTEXT *ec = NULL; - EC_REQUEST ec_request; - - FUNCTION_TRACE("ec_space_handler"); - - if (address > 0xFF || bit_width != 8 || !value || !handler_context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - ec = (EC_CONTEXT*)handler_context; - - switch (function) { - - case ACPI_READ_ADR_SPACE: - ec_request.command = EC_COMMAND_READ; - ec_request.address = address; - ec_request.data = 0; - break; - - case ACPI_WRITE_ADR_SPACE: - ec_request.command = EC_COMMAND_WRITE; - ec_request.address = address; - ec_request.data = (u8)(*value); - break; - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Received request with invalid function [%X].\n", function)); - return_ACPI_STATUS(AE_BAD_PARAMETER); - break; - } - - /* - * Perform the Transaction. - */ - status = ec_transaction(ec, &ec_request); - if (ACPI_SUCCESS(status)) { - (*value) = (u32)ec_request.data; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_install_space_handler - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_install_space_handler ( - EC_CONTEXT *ec) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ec_install_space_handler"); - - if (!ec) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - status = acpi_install_address_space_handler (ec->acpi_handle, - ACPI_ADR_SPACE_EC, &ec_space_handler, &ec_space_setup, ec); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_remove_space_handler - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_remove_space_handler ( - EC_CONTEXT *ec) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ec_remove_space_handler"); - - if (!ec) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - status = acpi_remove_address_space_handler(ec->acpi_handle, - ACPI_ADR_SPACE_EC, &ec_space_handler); - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/ec/ectransx.c linux.21rc5-ac2/drivers/acpi/ospm/ec/ectransx.c --- linux.21rc5/drivers/acpi/ospm/ec/ectransx.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/ec/ectransx.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,343 +0,0 @@ -/***************************************************************************** - * - * Module Name: ectransx.c - * $Revision: 24 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include "ec.h" - -#define _COMPONENT ACPI_EC - MODULE_NAME ("ectransx") - - -/**************************************************************************** - * - * FUNCTION: ec_io_wait - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_io_wait ( - EC_CONTEXT *ec, - EC_EVENT wait_event) -{ - EC_STATUS ec_status = 0; - u32 i = 100; - - if (!ec || ((wait_event != EC_EVENT_OUTPUT_BUFFER_FULL) - && (wait_event != EC_EVENT_INPUT_BUFFER_EMPTY))) { - return(AE_BAD_PARAMETER); - } - - /* - * Wait for Event: - * --------------- - * Poll the EC status register waiting for the event to occur. - * Note that we'll wait a maximum of 1ms in 10us chunks. - */ - switch (wait_event) { - - case EC_EVENT_OUTPUT_BUFFER_FULL: - do { - acpi_os_read_port(ec->status_port, &ec_status, 8); - if (ec_status & EC_FLAG_OUTPUT_BUFFER) { - return(AE_OK); - } - acpi_os_stall(10); - } while (--i>0); - break; - - case EC_EVENT_INPUT_BUFFER_EMPTY: - do { - acpi_os_read_port(ec->status_port, &ec_status, 8); - if (!(ec_status & EC_FLAG_INPUT_BUFFER)) { - return(AE_OK); - } - acpi_os_stall(10); - } while (--i>0); - break; - } - - return(AE_TIME); -} - - -/**************************************************************************** - * - * FUNCTION: ec_io_read - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_io_read ( - EC_CONTEXT *ec, - u32 io_port, - u8 *data, - EC_EVENT wait_event) -{ - acpi_status status = AE_OK; - - if (!ec || !data) { - return(AE_BAD_PARAMETER); - } - - acpi_os_read_port(io_port, (u32*) data, 8); - - if (wait_event) { - status = ec_io_wait(ec, wait_event); - } - - return(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_io_write - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_io_write ( - EC_CONTEXT *ec, - u32 io_port, - u8 data, - EC_EVENT wait_event) -{ - acpi_status status = AE_OK; - - if (!ec) { - return(AE_BAD_PARAMETER); - } - - acpi_os_write_port(io_port, data, 8); - - if (wait_event) { - status = ec_io_wait(ec, wait_event); - } - - return(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_read - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_read ( - EC_CONTEXT *ec, - u8 address, - u8 *data) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ec_read"); - - if (!ec || !data) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (ec->use_global_lock) { - status = acpi_acquire_global_lock(); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Could not acquire Global Lock\n")); - return_ACPI_STATUS(status); - } - } - - status = ec_io_write(ec, ec->command_port, EC_COMMAND_READ, - EC_EVENT_INPUT_BUFFER_EMPTY); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to send 'read command' to EC.\n")); - return_ACPI_STATUS(status); - } - - status = ec_io_write(ec, ec->data_port, address, - EC_EVENT_OUTPUT_BUFFER_FULL); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to send 'read address' to EC.\n")); - return_ACPI_STATUS(status); - } - - status = ec_io_read(ec, ec->data_port, data, EC_EVENT_NONE); - - if (ec->use_global_lock) { - acpi_release_global_lock(); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Read data [%02x] from address [%02x] on ec [%02x].\n", (*data), address, ec->device_handle)); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_write - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_write ( - EC_CONTEXT *ec, - u8 address, - u8 data) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ec_write"); - - if (!ec) - return_ACPI_STATUS(AE_BAD_PARAMETER); - - if (ec->use_global_lock) { - status = acpi_acquire_global_lock(); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Could not acquire Global Lock\n")); - return_ACPI_STATUS(status); - } - } - - status = ec_io_write(ec, ec->command_port, EC_COMMAND_WRITE, - EC_EVENT_INPUT_BUFFER_EMPTY); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to send 'write command' to EC.\n")); - return_ACPI_STATUS(status); - } - - status = ec_io_write(ec, ec->data_port, address, - EC_EVENT_INPUT_BUFFER_EMPTY); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to send 'write address' to EC.\n")); - return_ACPI_STATUS(status); - } - - status = ec_io_write(ec, ec->data_port, data, - EC_EVENT_INPUT_BUFFER_EMPTY); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to send 'write data' to EC.\n")); - return_ACPI_STATUS(status); - } - - if (ec->use_global_lock) { - acpi_release_global_lock(); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Wrote data [%02x] to address [%02x] on ec [%02x].\n", data, address, ec->device_handle)); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: ec_transaction - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -ec_transaction ( - EC_CONTEXT *ec, - EC_REQUEST *request) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("ec_transaction"); - - if (!ec || !request) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Obtain mutex to serialize all EC transactions. - */ - status = acpi_os_wait_semaphore(ec->mutex, 1, EC_DEFAULT_TIMEOUT); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Perform the transaction. - */ - switch (request->command) { - - case EC_COMMAND_READ: - status = ec_read(ec, request->address, &(request->data)); - break; - - case EC_COMMAND_WRITE: - status = ec_write(ec, request->address, request->data); - break; - - default: - status = AE_SUPPORT; - break; - } - - /* - * Signal the mutex to indicate transaction completion. - */ - acpi_os_signal_semaphore(ec->mutex, 1); - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/ec/Makefile linux.21rc5-ac2/drivers/acpi/ospm/ec/Makefile --- linux.21rc5/drivers/acpi/ospm/ec/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/ec/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -O_TARGET := ospm_$(notdir $(CURDIR)).o -obj-m := $(O_TARGET) -EXTRA_CFLAGS += $(ACPI_CFLAGS) -obj-y := $(patsubst %.c,%.o,$(wildcard *.c)) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/include/ac.h linux.21rc5-ac2/drivers/acpi/ospm/include/ac.h --- linux.21rc5/drivers/acpi/ospm/include/ac.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/include/ac.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,102 +0,0 @@ -/***************************************************************************** - * - * Module Name: ac.h - * $Revision: 6 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#ifndef __AC_H__ -#define __AC_H__ - -#include -#include -#include - - -/***************************************************************************** - * Types & Other Defines - *****************************************************************************/ - -/* - * Notifications: - * -------------- - */ -#define AC_NOTIFY_STATUS_CHANGE ((BM_NOTIFY) 0x80) - -/* - * Hardware IDs: - * ------------- - */ -#define AC_HID_AC_ADAPTER "ACPI0003" - - -/* - * Device Context: - * --------------- - */ -typedef struct -{ - BM_HANDLE device_handle; - acpi_handle acpi_handle; - char uid[9]; - u32 is_online; -} AC_CONTEXT; - - -/***************************************************************************** - * Function Prototypes - *****************************************************************************/ - -acpi_status -ac_initialize (void); - -acpi_status -ac_terminate (void); - -acpi_status -ac_notify ( - u32 notify_type, - u32 device, - void **context); - -acpi_status -ac_request( - BM_REQUEST *request_info, - void *context); - -/* AC Adapter Driver OSL */ - -acpi_status -ac_osl_add_device ( - AC_CONTEXT *ac_adapter); - -acpi_status -ac_osl_remove_device ( - AC_CONTEXT *ac_adapter); - -acpi_status -ac_osl_generate_event ( - u32 event, - AC_CONTEXT *ac_adapter); - - -#endif /* __AC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/include/bm.h linux.21rc5-ac2/drivers/acpi/ospm/include/bm.h --- linux.21rc5/drivers/acpi/ospm/include/bm.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/include/bm.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,583 +0,0 @@ -/***************************************************************************** - * - * Module name: bm.h - * $Revision: 41 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __BM_H__ -#define __BM_H__ - -#include -#include - - -/***************************************************************************** - * Types & Defines - *****************************************************************************/ - -/* - * Output Flags (Debug): - * --------------------- - */ -#define BM_PRINT_ALL (0x00000000) -#define BM_PRINT_GROUP (0x00000001) -#define BM_PRINT_LINKAGE (0x00000002) -#define BM_PRINT_IDENTIFICATION (0x00000004) -#define BM_PRINT_POWER (0x00000008) -#define BM_PRINT_PRESENT (0x00000010) - - -/* - * BM_COMMAND: - * ----------- - */ -typedef u32 BM_COMMAND; - -#define BM_COMMAND_UNKNOWN ((BM_COMMAND) 0x00) - -#define BM_COMMAND_GET_POWER_STATE ((BM_COMMAND) 0x01) -#define BM_COMMAND_SET_POWER_STATE ((BM_COMMAND) 0x02) - -#define BM_COMMAND_DEVICE_SPECIFIC ((BM_COMMAND) 0x80) - -/* - * BM_NOTIFY: - * ---------- - * Standard ACPI notification values, from section 5.6.3 of the ACPI 2.0 - * specification. Note that the Bus Manager internally handles all - * standard ACPI notifications -- driver modules are never sent these - * values (see "Bus Manager Notifications", below). - */ -typedef u32 BM_NOTIFY; - -#define BM_NOTIFY_BUS_CHECK ((BM_NOTIFY) 0x00) -#define BM_NOTIFY_DEVICE_CHECK ((BM_NOTIFY) 0x01) -#define BM_NOTIFY_DEVICE_WAKE ((BM_NOTIFY) 0x02) -#define BM_NOTIFY_EJECT_REQUEST ((BM_NOTIFY) 0x03) -#define BM_NOTIFY_DEVICE_CHECK_LIGHT ((BM_NOTIFY) 0x04) -#define BM_NOTIFY_FREQUENCY_MISMATCH ((BM_NOTIFY) 0x05) -#define BM_NOTIFY_BUS_MODE_MISMATCH ((BM_NOTIFY) 0x06) -#define BM_NOTIFY_POWER_FAULT ((BM_NOTIFY) 0x07) - -/* - * These are a higher-level abstraction of ACPI notifications, intended - * for consumption by driver modules to facilitate Pn_p. - */ -#define BM_NOTIFY_UNKNOWN ((BM_NOTIFY) 0x00) -#define BM_NOTIFY_DEVICE_ADDED ((BM_NOTIFY) 0x01) -#define BM_NOTIFY_DEVICE_REMOVED ((BM_NOTIFY) 0x02) - - -/* - * BM_HANDLE: - * ---------- - */ -typedef u32 BM_HANDLE; - -#define BM_HANDLE_ROOT ((BM_HANDLE) 0x00000000) -#define BM_HANDLE_UNKNOWN ((BM_HANDLE) 0xFFFFFFFF) -#define BM_HANDLES_MAX 100 - - -/* - * BM_HANDLE_LIST: - * --------------- - */ -typedef struct -{ - u32 count; - BM_HANDLE handles[BM_HANDLES_MAX]; -} BM_HANDLE_LIST; - - -/* - * BM_DEVICE_TYPE: - * --------------- - */ -typedef u32 BM_DEVICE_TYPE; - -#define BM_TYPE_UNKNOWN ((BM_DEVICE_TYPE) 0x00000000) - -#define BM_TYPE_SYSTEM ((BM_DEVICE_TYPE) 0x00000001) -#define BM_TYPE_SCOPE ((BM_DEVICE_TYPE) 0x00000002) -#define BM_TYPE_PROCESSOR ((BM_DEVICE_TYPE) 0x00000003) -#define BM_TYPE_THERMAL_ZONE ((BM_DEVICE_TYPE) 0x00000004) -#define BM_TYPE_POWER_RESOURCE ((BM_DEVICE_TYPE) 0x00000005) -#define BM_TYPE_DEVICE ((BM_DEVICE_TYPE) 0x00000006) -#define BM_TYPE_FIXED_BUTTON ((BM_DEVICE_TYPE) 0x00000007) - - -/* - * BM_DEVICE_UID: - * -------------- - */ -typedef char BM_DEVICE_UID[9]; - -#define BM_UID_UNKNOWN '0' - - -/* - * BM_DEVICE_HID: - * -------------- - */ -typedef char BM_DEVICE_HID[9]; - -#define BM_HID_UNKNOWN '\0' -#define BM_HID_POWER_BUTTON "PNP0C0C" -#define BM_HID_SLEEP_BUTTON "PNP0C0E" - -/* - * BM_DEVICE_ADR: - * -------------- - */ -typedef u32 BM_DEVICE_ADR; - -#define BM_ADDRESS_UNKNOWN 0 - - -/* - * BM_DEVICE_FLAGS: - * ---------------- - * The encoding of BM_DEVICE_FLAGS is illustrated below. - * Note that a set bit (1) indicates the property is TRUE - * (e.g. if bit 0 is set then the device has dynamic status). - * +--+------------+-+-+-+-+-+-+-+ - * |31| Bits 30:7 |6|5|4|3|2|1|0| - * +--+------------+-+-+-+-+-+-+-+ - * | | | | | | | | | - * | | | | | | | | +- Dynamic status? - * | | | | | | | +--- Identifiable? - * | | | | | | +----- Configurable? - * | | | | | +------- Power Control? - * | | | | +--------- Ejectable? - * | | | +----------- Docking Station? - * | | +------------- Fixed-Feature? - * | +-------------------- - * +---------------------------- Driver Control? - * - * Dynamic status: Device has a _STA object. - * Identifiable: Device has a _HID and/or _ADR and possibly other - * identification objects defined. - * Configurable: Device has a _CRS and possibly other configuration - * objects defined. - * Power Control: Device has a _PR0 and/or _PS0 and possibly other - * power management objects defined. - * Ejectable: Device has an _EJD and/or _EJx and possibly other - * dynamic insertion/removal objects defined. - * Docking Station: Device has a _DCK object defined. - * Fixed-Feature: Device does not exist in the namespace; was - * enumerated as a fixed-feature (e.g. power button). - * Driver Control: A driver has been installed for this device. - */ -typedef u32 BM_DEVICE_FLAGS; - -#define BM_FLAGS_UNKNOWN ((BM_DEVICE_FLAGS) 0x00000000) - -#define BM_FLAGS_DYNAMIC_STATUS ((BM_DEVICE_FLAGS) 0x00000001) -#define BM_FLAGS_IDENTIFIABLE ((BM_DEVICE_FLAGS) 0x00000002) -#define BM_FLAGS_CONFIGURABLE ((BM_DEVICE_FLAGS) 0x00000004) -#define BM_FLAGS_POWER_CONTROL ((BM_DEVICE_FLAGS) 0x00000008) -#define BM_FLAGS_EJECTABLE ((BM_DEVICE_FLAGS) 0x00000010) -#define BM_FLAGS_DOCKING_STATION ((BM_DEVICE_FLAGS) 0x00000020) -#define BM_FLAGS_FIXED_FEATURE ((BM_DEVICE_FLAGS) 0x00000040) -#define BM_FLAGS_DRIVER_CONTROL ((BM_DEVICE_FLAGS) 0x80000000) - - -/* - * Device PM Flags: - * ---------------- - * +-----------+-+-+-+-+-+-+-+ - * | Bits 31:7 |6|5|4|3|2|1|0| - * +-----------+-+-+-+-+-+-+-+ - * | | | | | | | | - * | | | | | | | +- D0 Support? - * | | | | | | +--- D1 Support? - * | | | | | +----- D2 Support? - * | | | | +------- D3 Support? - * | | | +--------- Power State Queriable? - * | | +----------- Inrush Current? - * | +------------- Wake Capable? - * +-------------------- - * - * D0-D3 Support: Device supports corresponding Dx state. - * Power State: Device has a _PSC (current power state) object defined. - * Inrush Current: Device has an _IRC (inrush current) object defined. - * Wake Capable: Device has a _PRW (wake-capable) object defined. - */ -#define BM_FLAGS_D0_SUPPORT ((BM_DEVICE_FLAGS) 0x00000001) -#define BM_FLAGS_D1_SUPPORT ((BM_DEVICE_FLAGS) 0x00000002) -#define BM_FLAGS_D2_SUPPORT ((BM_DEVICE_FLAGS) 0x00000004) -#define BM_FLAGS_D3_SUPPORT ((BM_DEVICE_FLAGS) 0x00000008) -#define BM_FLAGS_POWER_STATE ((BM_DEVICE_FLAGS) 0x00000010) -#define BM_FLAGS_INRUSH_CURRENT ((BM_DEVICE_FLAGS) 0x00000020) -#define BM_FLAGS_WAKE_CAPABLE ((BM_DEVICE_FLAGS) 0x00000040) - - -/* - * BM_DEVICE_STATUS: - * ----------------- - * The encoding of BM_DEVICE_STATUS is illustrated below. - * Note that a set bit (1) indicates the property is TRUE - * (e.g. if bit 0 is set then the device is present). - * +-----------+-+-+-+-+-+ - * | Bits 31:4 |4|3|2|1|0| - * +-----------+-+-+-+-+-+ - * | | | | | | - * | | | | | +- Present? - * | | | | +--- Enabled? - * | | | +----- Show in UI? - * | | +------- Functioning? - * | +--------- Battery Present? - * +---------------- - */ -typedef u32 BM_DEVICE_STATUS; - -#define BM_STATUS_UNKNOWN ((BM_DEVICE_STATUS) 0x00000000) -#define BM_STATUS_PRESENT ((BM_DEVICE_STATUS) 0x00000001) -#define BM_STATUS_ENABLED ((BM_DEVICE_STATUS) 0x00000002) -#define BM_STATUS_SHOW_UI ((BM_DEVICE_STATUS) 0x00000004) -#define BM_STATUS_FUNCTIONING ((BM_DEVICE_STATUS) 0x00000008) -#define BM_STATUS_BATTERY_PRESENT ((BM_DEVICE_STATUS) 0x00000010) -#define BM_STATUS_DEFAULT ((BM_DEVICE_STATUS) 0x0000000F) - - -/* - * BM_POWER_STATE: - * --------------- - */ -typedef u32 BM_POWER_STATE; - - -/* - * BM_DEVICE_ID: - * ------------- - */ -typedef struct -{ - BM_DEVICE_TYPE type; - BM_DEVICE_UID uid; - BM_DEVICE_HID hid; - BM_DEVICE_ADR adr; -} BM_DEVICE_ID; - - -/* - * BM_DEVICE_POWER: - * ---------------- - * Structure containing basic device power management information. - */ -typedef struct -{ - BM_DEVICE_FLAGS flags; - BM_POWER_STATE state; - BM_DEVICE_FLAGS dx_supported[ACPI_S_STATE_COUNT]; -} BM_DEVICE_POWER; - - -/* - * BM_DEVICE: - * ---------- - */ -typedef struct -{ - BM_HANDLE handle; - acpi_handle acpi_handle; - BM_DEVICE_FLAGS flags; - BM_DEVICE_STATUS status; - BM_DEVICE_ID id; - BM_DEVICE_POWER power; -} BM_DEVICE; - - -/* - * BM_SEARCH: - * ---------- - * Structure used for searching the ACPI Bus Manager's device hierarchy. - */ -typedef struct -{ - BM_DEVICE_ID criteria; - BM_HANDLE_LIST results; -} BM_SEARCH; - - -/* - * BM_REQUEST: - * ----------- - * Structure used for sending requests to/through the ACPI Bus Manager. - */ -typedef struct -{ - acpi_status status; - BM_COMMAND command; - BM_HANDLE handle; - acpi_buffer buffer; -} BM_REQUEST; - - -/* - * Driver Registration: - * -------------------- - */ - -/* Driver Context */ -typedef void * BM_DRIVER_CONTEXT; - -/* Notification Callback Function */ -typedef -acpi_status (*BM_DRIVER_NOTIFY) ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - BM_DRIVER_CONTEXT *context); - -/* Request Callback Function */ -typedef -acpi_status (*BM_DRIVER_REQUEST) ( - BM_REQUEST *request, - BM_DRIVER_CONTEXT context); - -/* Driver Registration */ -typedef struct -{ - BM_DRIVER_NOTIFY notify; - BM_DRIVER_REQUEST request; - BM_DRIVER_CONTEXT context; -} BM_DRIVER; - - -/* - * BM_NODE: - * -------- - * Structure used to maintain the device hierarchy. - */ -typedef struct _BM_NODE -{ - BM_DEVICE device; - BM_DRIVER driver; - struct _BM_NODE *parent; - struct _BM_NODE *next; - struct - { - struct _BM_NODE *head; - struct _BM_NODE *tail; - } scope; -} BM_NODE; - - -/* - * BM_NODE_LIST: - * ------------- - * Structure used to maintain an array of node pointers. - */ -typedef struct -{ - u32 count; - BM_NODE *nodes[BM_HANDLES_MAX]; -} BM_NODE_LIST; - - -/***************************************************************************** - * Macros - *****************************************************************************/ - -/* - * Device Presence: - * ---------------- - * Note that status (_STA) means something different for power resources - * (they're assumed to always be present). - */ -#define BM_DEVICE_PRESENT(d) ((d->id.type!=BM_TYPE_POWER_RESOURCE)?(d->status & BM_STATUS_PRESENT):TRUE) -#define BM_NODE_PRESENT(n) ((n->device.id.type!=BM_TYPE_POWER_RESOURCE)?(n->device.status & BM_STATUS_PRESENT):TRUE) - -/* - * Device Flags: - * ------------- - */ -#define BM_IS_DRIVER_CONTROL(d) (d->flags & BM_FLAGS_DRIVER_CONTROL) -#define BM_IS_POWER_CONTROL(d) (d->flags & BM_FLAGS_POWER_CONTROL) - - /* - * Device Power Flags: - * ------------------- - */ -#define BM_IS_POWER_STATE(d) (d->power.flags & BM_FLAGS_POWER_STATE) - -/***************************************************************************** - * Function Prototypes - *****************************************************************************/ - -/* bm.c */ - -acpi_status -bm_initialize (void); - -acpi_status -bm_terminate (void); - -acpi_status -bm_get_status ( - BM_DEVICE *device); - -acpi_status -bm_get_handle ( - acpi_handle acpi_handle, - BM_HANDLE *device_handle); - -acpi_status -bm_get_node ( - BM_HANDLE device_handle, - acpi_handle acpi_handle, - BM_NODE **node); - -/* bmsearch.c */ - -acpi_status -bm_search( - BM_HANDLE device_handle, - BM_DEVICE_ID *criteria, - BM_HANDLE_LIST *results); - -/* bmnotify.c */ - -void -bm_notify ( - acpi_handle acpi_handle, - u32 notify_value, - void *context); - -/* bm_request.c */ - -acpi_status -bm_request ( - BM_REQUEST *request_info); - -/* bmdriver.c */ - -acpi_status -bm_get_device_power_state ( - BM_HANDLE device_handle, - BM_POWER_STATE *state); - -acpi_status -bm_set_device_power_state ( - BM_HANDLE device_handle, - BM_POWER_STATE state); - -acpi_status -bm_get_device_status ( - BM_HANDLE device_handle, - BM_DEVICE_STATUS *device_status); - -acpi_status -bm_get_device_info ( - BM_HANDLE device_handle, - BM_DEVICE **device_info); - -acpi_status -bm_get_device_context ( - BM_HANDLE device_handle, - BM_DRIVER_CONTEXT *context); - -acpi_status -bm_register_driver ( - BM_DEVICE_ID *criteria, - BM_DRIVER *driver); - -acpi_status -bm_unregister_driver ( - BM_DEVICE_ID *criteria, - BM_DRIVER *driver); - -/* bmpm.c */ - -acpi_status -bm_get_pm_capabilities ( - BM_NODE *node); - -acpi_status -bm_get_power_state ( - BM_NODE *node); - -acpi_status -bm_set_power_state ( - BM_NODE *node, - BM_POWER_STATE target_state); - -/* bmpower.c */ - -acpi_status -bm_pr_initialize (void); - -acpi_status -bm_pr_terminate (void); - -/* bmutils.c */ - -acpi_status -bm_cast_buffer ( - acpi_buffer *buffer, - void **pointer, - u32 length); - -acpi_status -bm_copy_to_buffer ( - acpi_buffer *buffer, - void *data, - u32 length); - -acpi_status -bm_extract_package_data ( - acpi_object *package, - acpi_buffer *format, - acpi_buffer *buffer); - -acpi_status -bm_evaluate_object ( - acpi_handle acpi_handle, - acpi_string pathname, - acpi_object_list *arguments, - acpi_buffer *buffer); - -acpi_status -bm_evaluate_simple_integer ( - acpi_handle acpi_handle, - acpi_string pathname, - u32 *data); - -acpi_status -bm_evaluate_reference_list ( - acpi_handle acpi_handle, - acpi_string pathname, - BM_HANDLE_LIST *reference_list); - -/* ACPI Bus Driver OSL */ - -acpi_status -bm_osl_generate_event ( - BM_HANDLE device_handle, - char *device_type, - char *device_instance, - u32 event_type, - u32 event_data); - - -#endif /* __BM_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/include/bmpower.h linux.21rc5-ac2/drivers/acpi/ospm/include/bmpower.h --- linux.21rc5/drivers/acpi/ospm/include/bmpower.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/include/bmpower.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -/***************************************************************************** - * - * Module name: bmpower.h - * $Revision: 9 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __BMPOWER_H__ -#define __BMPOWER_H__ - -#include "bm.h" - - -/***************************************************************************** - * Types & Defines - *****************************************************************************/ - - -/* - * BM_POWER_RESOURCE: - * ------------------ - */ -typedef struct -{ - BM_HANDLE device_handle; - acpi_handle acpi_handle; - BM_POWER_STATE system_level; - u32 resource_order; - BM_POWER_STATE state; - u32 reference_count; -} BM_POWER_RESOURCE; - - -/***************************************************************************** - * Function Prototypes - *****************************************************************************/ - -/* bmpower.c */ - -acpi_status -bm_pr_initialize (void); - -acpi_status -bm_pr_terminate (void); - -acpi_status -bm_pr_list_get_state ( - BM_HANDLE_LIST *resource_list, - BM_POWER_STATE *power_state); - -acpi_status -bm_pr_list_transition ( - BM_HANDLE_LIST *current_list, - BM_HANDLE_LIST *target_list); - - -#endif /* __BMPOWER_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/include/bn.h linux.21rc5-ac2/drivers/acpi/ospm/include/bn.h --- linux.21rc5/drivers/acpi/ospm/include/bn.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/include/bn.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,122 +0,0 @@ -/****************************************************************************** - * - * Module Name: bn.h - * $Revision: 12 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#ifndef __BN_H__ -#define __BN_H__ - -#include -#include -#include - - -/***************************************************************************** - * Types & Other Defines - *****************************************************************************/ - -/* - * Notifications: - * --------------------- - */ -#define BN_NOTIFY_STATUS_CHANGE ((BM_NOTIFY) 0x80) - - -/* - * Types: - * ------ - */ -#define BN_TYPE_POWER_BUTTON 0x01 -#define BN_TYPE_POWER_BUTTON_FIXED 0x02 -#define BN_TYPE_SLEEP_BUTTON 0x03 -#define BN_TYPE_SLEEP_BUTTON_FIXED 0x04 -#define BN_TYPE_LID_SWITCH 0x05 - - -/* - * Hardware IDs: - * ------------- - * TBD: Power and Sleep button HIDs also exist in . Should all - * HIDs (ACPI well-known devices) exist in one place (e.g. - * acpi_hid.h)? - */ -#define BN_HID_POWER_BUTTON "PNP0C0C" -#define BN_HID_SLEEP_BUTTON "PNP0C0E" -#define BN_HID_LID_SWITCH "PNP0C0D" - - -/* - * Device Context: - * --------------- - */ -typedef struct -{ - BM_HANDLE device_handle; - acpi_handle acpi_handle; - u32 type; -} BN_CONTEXT; - - -/****************************************************************************** - * Function Prototypes - *****************************************************************************/ - -acpi_status -bn_initialize (void); - -acpi_status -bn_terminate (void); - -acpi_status -bn_notify_fixed ( - void *context); - -acpi_status -bn_notify ( - u32 notify_type, - u32 device, - void **context); - -acpi_status -bn_request( - BM_REQUEST *request_info, - void *context); - -/* Button OSL */ - -acpi_status -bn_osl_add_device ( - BN_CONTEXT *button); - -acpi_status -bn_osl_remove_device ( - BN_CONTEXT *button); - -acpi_status -bn_osl_generate_event ( - u32 event, - BN_CONTEXT *button); - - -#endif /* __BN_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/include/bt.h linux.21rc5-ac2/drivers/acpi/ospm/include/bt.h --- linux.21rc5/drivers/acpi/ospm/include/bt.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/include/bt.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,164 +0,0 @@ -/****************************************************************************** - * - * Module Name: bt.h - * $Revision: 18 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#ifndef __BT_H__ -#define __BT_H__ - -#include -#include -#include - - -/***************************************************************************** - * Types & Other Defines - *****************************************************************************/ - -/*! [Begin] no source code translation */ - -#define BT_UNKNOWN 0xFFFFFFFF -#define BT_POWER_UNITS_DEFAULT "?" -#define BT_POWER_UNITS_WATTS "mW" -#define BT_POWER_UNITS_AMPS "mA" - -/*! [End] no source code translation !*/ - -/* - * Battery Notifications: - * ---------------------- - */ -#define BT_NOTIFY_STATUS_CHANGE ((BM_NOTIFY) 0x80) -#define BT_NOTIFY_INFORMATION_CHANGE ((BM_NOTIFY) 0x81) - - -/* - * Hardware IDs: - * ------------- - */ -#define BT_HID_CM_BATTERY "PNP0C0A" - - -/* - * BT_CM_BATTERY_INFO: - * ------------------- - */ -typedef struct -{ - acpi_integer power_unit; - acpi_integer design_capacity; - acpi_integer last_full_capacity; - acpi_integer battery_technology; - acpi_integer design_voltage; - acpi_integer design_capacity_warning; - acpi_integer design_capacity_low; - acpi_integer battery_capacity_granularity_1; - acpi_integer battery_capacity_granularity_2; - acpi_string model_number; - acpi_string serial_number; - acpi_string battery_type; - acpi_string oem_info; - -} BT_BATTERY_INFO; - - -/* - * BT_CM_BATTERY_STATUS: - * --------------------- - */ -typedef struct -{ - acpi_integer state; - acpi_integer present_rate; - acpi_integer remaining_capacity; - acpi_integer present_voltage; - -} BT_BATTERY_STATUS; - - -/* - * BT_CONTEXT: - * ----------- - */ -typedef struct -{ - BM_HANDLE device_handle; - acpi_handle acpi_handle; - char uid[9]; - acpi_string power_units; - u8 is_present; - -} BT_CONTEXT; - - -/***************************************************************************** - * Function Prototypes - *****************************************************************************/ - -/* bt.c */ - -acpi_status -bt_initialize (void); - -acpi_status -bt_terminate (void); - -acpi_status -bt_notify ( - u32 notify_type, - u32 device, - void **context); - -acpi_status -bt_request( - BM_REQUEST *request_info, - void *context); - -acpi_status -bt_get_status ( - BT_CONTEXT *battery, - BT_BATTERY_STATUS **battery_status); - -acpi_status -bt_get_info ( - BT_CONTEXT *battery, - BT_BATTERY_INFO **battery_info); - -/* Battery OSL */ - -acpi_status -bt_osl_add_device ( - BT_CONTEXT *battery); - -acpi_status -bt_osl_remove_device ( - BT_CONTEXT *battery); - -acpi_status -bt_osl_generate_event ( - u32 event, - BT_CONTEXT *battery); - - -#endif /* __BT_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/include/ec.h linux.21rc5-ac2/drivers/acpi/ospm/include/ec.h --- linux.21rc5/drivers/acpi/ospm/include/ec.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/include/ec.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ -/***************************************************************************** - * - * Module Name: ec.h - * $Revision: 19 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#ifndef __EC_H__ -#define __EC_H__ - -#include -#include -#include -#include -#include - - -/***************************************************************************** - * Types & Other Defines - *****************************************************************************/ - -#define EC_DEFAULT_TIMEOUT 1000 /* 1 second */ -#define EC_GPE_UNKNOWN 0xFFFFFFFF -#define EC_PORT_UNKNOWN 0x00000000 -#define EC_BURST_ENABLE_ACKNOWLEDGE 0x90 - - -/* - * Commands: - * --------- - */ -typedef u8 EC_COMMAND; - -#define EC_COMMAND_UNKNOWN ((EC_COMMAND) 0x00) -#define EC_COMMAND_READ ((EC_COMMAND) 0x80) -#define EC_COMMAND_WRITE ((EC_COMMAND) 0x81) -#define EC_COMMAND_QUERY ((EC_COMMAND) 0x84) - - -/* - * EC_STATUS: - * ---------- - * The encoding of the EC status register is illustrated below. - * Note that a set bit (1) indicates the property is TRUE - * (e.g. if bit 0 is set then the output buffer is full). - * +-+-+-+-+-+-+-+-+ - * |7|6|5|4|3|2|1|0| - * +-+-+-+-+-+-+-+-+ - * | | | | | | | | - * | | | | | | | +- Output Buffer Full (OBF)? - * | | | | | | +--- Input Buffer Full (IBF)? - * | | | | | +----- - * | | | | +------- data Register is command Byte? - * | | | +--------- Burst Mode Enabled? - * | | +----------- SCI event? - * | +------------- SMI event? - * +--------------- - * - */ -typedef u32 EC_STATUS; - -#define EC_FLAG_OUTPUT_BUFFER ((EC_STATUS) 0x01) -#define EC_FLAG_INPUT_BUFFER ((EC_STATUS) 0x02) -#define EC_FLAG_BURST_MODE ((EC_STATUS) 0x10) -#define EC_FLAG_SCI ((EC_STATUS) 0x20) - - -/* - * EC_EVENT: - * --------- - */ -typedef u32 EC_EVENT; - -#define EC_EVENT_UNKNOWN ((EC_EVENT) 0x00) -#define EC_EVENT_NONE ((EC_EVENT) 0x00) -#define EC_EVENT_OUTPUT_BUFFER_FULL ((EC_EVENT) 0x01) -#define EC_EVENT_INPUT_BUFFER_EMPTY ((EC_EVENT) 0x02) -#define EC_EVENT_SCI ((EC_EVENT) 0x03) - - -/* - * Hardware IDs: - * ------------- - */ -#define EC_HID_EC "PNP0C09" - - -/* - * EC_REQUEST: - * ----------- - */ -typedef struct -{ - EC_COMMAND command; - u8 address; - u8 data; -} EC_REQUEST; - - -/* - * Device Context: - * --------------- - */ -typedef struct -{ - BM_HANDLE device_handle; - acpi_handle acpi_handle; - u32 gpe_bit; - u32 status_port; - u32 command_port; - u32 data_port; - u32 use_global_lock; - u8 query_data; - acpi_handle mutex; -} EC_CONTEXT; - - -/***************************************************************************** - * Function Prototypes - *****************************************************************************/ - -/* ec.c */ - -acpi_status -ec_initialize(void); - -acpi_status -ec_terminate(void); - -acpi_status -ec_notify ( - u32 notify_type, - u32 device, - void **context); - -acpi_status -ec_request( - BM_REQUEST *request_info, - void *context); - -/* ectransx.c */ - -acpi_status -ec_transaction ( - EC_CONTEXT *ec, - EC_REQUEST *ec_request); - -acpi_status -ec_io_read ( - EC_CONTEXT *ec, - u32 io_port, - u8 *data, - EC_EVENT wait_event); - -acpi_status -ec_io_write ( - EC_CONTEXT *ec, - u32 io_port, - u8 data, - EC_EVENT wait_event); - -/* ecgpe.c */ - -acpi_status -ec_install_gpe_handler ( - EC_CONTEXT *ec); - -acpi_status -ec_remove_gpe_handler ( - EC_CONTEXT *ec); - -/* ecspace.c */ - -acpi_status -ec_install_space_handler ( - EC_CONTEXT *ec); - -acpi_status -ec_remove_space_handler ( - EC_CONTEXT *ec); - - -#endif /* __EC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/include/pr.h linux.21rc5-ac2/drivers/acpi/ospm/include/pr.h --- linux.21rc5/drivers/acpi/ospm/include/pr.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/include/pr.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,265 +0,0 @@ -/****************************************************************************** - * - * Module Name: processor.h - * $Revision: 13 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __PR_H__ -#define __PR_H__ - -#include - - -/***************************************************************************** - * Types & Other Defines - *****************************************************************************/ - - -#define PR_MAX_POWER_STATES 4 -#define PR_MAX_THROTTLE_STATES 8 -#define PR_MAX_PERF_STATES 32 -#define PR_MAX_C2_LATENCY 100 -#define PR_MAX_C3_LATENCY 1000 - - -/* - * Commands: - * --------- - */ -#define PR_COMMAND_GET_POWER_INFO ((BM_COMMAND) 0x80) -#define PR_COMMAND_SET_POWER_INFO ((BM_COMMAND) 0x81) -#define PR_COMMAND_GET_PERF_INFO ((BM_COMMAND) 0x82) -#define PR_COMMAND_GET_PERF_STATE ((BM_COMMAND) 0x83) -#define PR_COMMAND_SET_PERF_LIMIT ((BM_COMMAND) 0x84) - - -/* - * Notifications: - * -------------- - */ -#define PR_NOTIFY_PERF_STATES ((BM_NOTIFY) 0x80) -#define PR_NOTIFY_POWER_STATES ((BM_NOTIFY) 0x81) - - -/* - * Performance Control: - * -------------------- - */ -#define PR_PERF_DEC 0x00 -#define PR_PERF_INC 0x01 -#define PR_PERF_MAX 0xFF - - -/* - * Power States: - * ------------- - */ -#define PR_C0 0x00 -#define PR_C1 0x01 -#define PR_C2 0x02 -#define PR_C3 0x03 - -#define PR_C1_FLAG 0x01; -#define PR_C2_FLAG 0x02; -#define PR_C3_FLAG 0x04; - - -/* - * PR_CX_POLICY_VALUES: - * -------------------- - */ -typedef struct -{ - u32 time_threshold; - u32 count_threshold; - u32 bm_threshold; - u32 target_state; - u32 count; -} PR_CX_POLICY_VALUES; - - -/* - * PR_CX: - * ------ - */ -typedef struct -{ - u32 latency; - u32 utilization; - u8 is_valid; - PR_CX_POLICY_VALUES promotion; - PR_CX_POLICY_VALUES demotion; -} PR_CX; - - -/* - * PR_POWER: - * --------- - */ -typedef struct -{ - ACPI_PHYSICAL_ADDRESS p_lvl2; - ACPI_PHYSICAL_ADDRESS p_lvl3; - u32 bm_activity; - u32 active_state; - u32 default_state; - u32 busy_metric; - u32 state_count; - PR_CX state[PR_MAX_POWER_STATES]; -} PR_POWER; - - -/* - * PR_PERFORMANCE_STATE: - * --------------------- - */ -typedef struct -{ - u32 performance; - u32 power; -} PR_PERFORMANCE_STATE; - - -/* - * PR_PERFORMANCE: - * --------------- - */ -typedef struct -{ - u32 active_state; - u32 thermal_limit; - u32 power_limit; - u32 state_count; - PR_PERFORMANCE_STATE state[PR_MAX_PERF_STATES]; -} PR_PERFORMANCE; - - -/* - * PR_PBLOCK: - * ---------- - */ -typedef struct -{ - u32 length; - ACPI_PHYSICAL_ADDRESS address; -} PR_PBLOCK; - - -/* - * PR_CONTEXT: - * ----------- - */ -typedef struct -{ - BM_HANDLE device_handle; - acpi_handle acpi_handle; - u32 uid; - PR_PBLOCK pblk; - PR_POWER power; - PR_PERFORMANCE performance; -} PR_CONTEXT; - - -/****************************************************************************** - * Function Prototypes - *****************************************************************************/ - -/* processor.c */ - -acpi_status -pr_initialize(void); - -acpi_status -pr_terminate(void); - -acpi_status -pr_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - void **context); - -acpi_status -pr_request( - BM_REQUEST *request, - void *context); - -/* prpower.c */ - -void -pr_power_idle (void); - -acpi_status -pr_power_add_device ( - PR_CONTEXT *processor); - -acpi_status -pr_power_remove_device ( - PR_CONTEXT *processor); - -acpi_status -pr_power_initialize (void); - -acpi_status -pr_power_terminate (void); - -/* prperf.c */ - -acpi_status -pr_perf_get_state ( - PR_CONTEXT *processor, - u32 *state); - -acpi_status -pr_perf_set_state ( - PR_CONTEXT *processor, - u32 state); - -acpi_status -pr_perf_set_limit ( - PR_CONTEXT *processor, - u32 limit); - -acpi_status -pr_perf_add_device ( - PR_CONTEXT *processor); - -acpi_status -pr_perf_remove_device ( - PR_CONTEXT *processor); - -/* Processor Driver OSL */ - -acpi_status -pr_osl_add_device ( - PR_CONTEXT *processor); - -acpi_status -pr_osl_remove_device ( - PR_CONTEXT *processor); - -acpi_status -pr_osl_generate_event ( - u32 event, - PR_CONTEXT *processor); - - -#endif /* __PR_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/include/sm.h linux.21rc5-ac2/drivers/acpi/ospm/include/sm.h --- linux.21rc5/drivers/acpi/ospm/include/sm.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/include/sm.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -/***************************************************************************** - * - * Module Name: sm.h - * $Revision: 3 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#ifndef __SM_H__ -#define __SM_H__ - -#include -#include -#include - - -/***************************************************************************** - * Types & Other Defines - *****************************************************************************/ - -#define SM_MAX_SYSTEM_STATES 6 /* S0-S5 */ - - - /* - * Device Context: - * --------------- - */ -typedef struct -{ - BM_HANDLE device_handle; - acpi_handle acpi_handle; - u8 states[SM_MAX_SYSTEM_STATES]; -} SM_CONTEXT; - - -/***************************************************************************** - * Function Prototypes - *****************************************************************************/ - -acpi_status -sm_initialize (void); - -acpi_status -sm_terminate (void); - -acpi_status -sm_notify ( - u32 notify_type, - u32 device, - void **context); - -acpi_status -sm_request( - BM_REQUEST *request_info, - void *context); - -/* System Driver OSL */ - -acpi_status -sm_osl_add_device ( - SM_CONTEXT *system); - -acpi_status -sm_osl_remove_device ( - SM_CONTEXT *system); - -acpi_status -sm_osl_generate_event ( - u32 event, - SM_CONTEXT *system); - - -#endif /* __SM_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/include/tz.h linux.21rc5-ac2/drivers/acpi/ospm/include/tz.h --- linux.21rc5/drivers/acpi/ospm/include/tz.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/include/tz.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,252 +0,0 @@ -/***************************************************************************** - * - * Module Name: tz.h - * $Revision: 24 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __TZ_H__ -#define __TZ_H__ - -/* TBD: Linux-specific */ -#include -#include - -#include -#include - - -/***************************************************************************** - * Types & Other Defines - *****************************************************************************/ - -#define TZ_MAX_THRESHOLDS 12 /* _AC0 through _AC9 + _CRT + _PSV */ -#define TZ_MAX_ACTIVE_THRESHOLDS 10 /* _AC0 through _AC9 */ -#define TZ_MAX_COOLING_DEVICES 10 /* TBD: Make size dynamic */ - - -/* - * Notifications: - * -------------- - */ -#define TZ_NOTIFY_TEMPERATURE_CHANGE ((BM_NOTIFY) 0x80) -#define TZ_NOTIFY_THRESHOLD_CHANGE ((BM_NOTIFY) 0x81) -#define TZ_NOTIFY_DEVICE_LISTS_CHANGE ((BM_NOTIFY) 0x82) - - -/* - * TZ_THRESHOLD_TYPE: - * ------------------ - */ -typedef u32 TZ_THRESHOLD_TYPE; - -#define TZ_THRESHOLD_UNKNOWN ((TZ_THRESHOLD_TYPE) 0x00) -#define TZ_THRESHOLD_CRITICAL ((TZ_THRESHOLD_TYPE) 0x01) - -#define TZ_THRESHOLD_PASSIVE ((TZ_THRESHOLD_TYPE) 0x02) -#define TZ_THRESHOLD_ACTIVE ((TZ_THRESHOLD_TYPE) 0x03) - - -/* - * TZ_COOLING_STATE: - * ----------------- - */ -typedef u32 TZ_COOLING_STATE; - -#define TZ_COOLING_UNKNOWN ((TZ_COOLING_STATE) 0x00) -#define TZ_COOLING_ENABLED ((TZ_COOLING_STATE) 0x01) -#define TZ_COOLING_DISABLED ((TZ_COOLING_STATE) 0x02) - - -/* - * TZ_COOLING_MODE: - * ---------------- - */ -typedef u32 TZ_COOLING_MODE; - -#define TZ_COOLING_MODE_ACTIVE ((TZ_COOLING_MODE) 0x00) -#define TZ_COOLING_MODE_PASSIVE ((TZ_COOLING_MODE) 0x01) - - -/* - * Thermal State: - * -------------- - * The encoding of TZ_STATE is illustrated below. - * Note that a set bit (1) indicates the property is TRUE - * (e.g. if bit 0 is set then the device has dynamic status). - * No bits set indicates an OK cooling state. - * +--+--+--+-----------+----------+ - * |31|30|29| Bits 27:4 | Bits 3:0 | - * +--+--+--+-----------+----------+ - * | | | | | - * | | | | +------ Active Index - * | | | +----------------- - * | | +------------------------- Active - * | +---------------------------- Passive - * +------------------------------- Critical - * - * Active Index: Value representing the level of active cooling - * presently applied (e.g. 0=_AL0, 9=_AL9). Only - * valid when 'Active' is set. - * Active: If set, indicates that the system temperature - * has crossed at least one active threshold (_ALx). - * Passive: If set, indicates that the system temperature - * has crossed the passive threshold (_PSL). - * Passive: If set, indicates that the system temperature - * has crossed the critical threshold (_CRT). - */ -typedef u32 TZ_STATE; - -#define TZ_STATE_OK ((TZ_STATE) 0x00000000) -#define TZ_STATE_HOT ((TZ_STATE) 0x10000000) -#define TZ_STATE_ACTIVE ((TZ_STATE) 0x20000000) -#define TZ_STATE_PASSIVE ((TZ_STATE) 0x40000000) -#define TZ_STATE_CRITICAL ((TZ_STATE) 0x80000000) - -typedef struct { - u32 temperature; -} TZ_CRITICAL_THRESHOLD; - -typedef struct { - u8 is_valid; - u32 temperature; -} TZ_HOT_THRESHOLD; - -typedef struct { - u8 is_valid; - u32 temperature; - u32 tc1; - u32 tc2; - u32 tsp; - BM_HANDLE_LIST devices; -} TZ_PASSIVE_THRESHOLD; - -typedef struct { - u8 is_valid; - u32 temperature; - TZ_COOLING_STATE cooling_state; - BM_HANDLE_LIST devices; -} TZ_ACTIVE_THRESHOLD; - -typedef struct { - TZ_CRITICAL_THRESHOLD critical; - TZ_HOT_THRESHOLD hot; - TZ_PASSIVE_THRESHOLD passive; - TZ_ACTIVE_THRESHOLD active[TZ_MAX_ACTIVE_THRESHOLDS]; -} TZ_THRESHOLDS; - -/* - * TZ_POLICY: - * --------- - */ -typedef struct { - u32 temperature; - TZ_STATE state; - TZ_COOLING_MODE cooling_mode; - u32 polling_freq; - TZ_THRESHOLDS thresholds; - struct timer_list timer; -} TZ_POLICY; - - -/* - * TZ_CONTEXT: - * ----------- - */ -typedef struct { - BM_HANDLE device_handle; - acpi_handle acpi_handle; - char uid[9]; - TZ_POLICY policy; -} TZ_CONTEXT; - - -/***************************************************************************** - * Function Prototypes - *****************************************************************************/ - -/* tz.c */ - -acpi_status -tz_initialize (void); - -acpi_status -tz_terminate (void); - -acpi_status -tz_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - BM_DRIVER_CONTEXT *context); - -acpi_status -tz_request ( - BM_REQUEST *request, - BM_DRIVER_CONTEXT context); - -acpi_status -tz_get_temperature ( - TZ_CONTEXT *tz); - -acpi_status -tz_get_thresholds ( - TZ_CONTEXT *tz); - -acpi_status -tz_set_cooling_preference ( - TZ_CONTEXT *tz, - TZ_COOLING_MODE cooling_mode); - -void -tz_print ( - TZ_CONTEXT *tz); - -/* tzpolicy.c */ - -acpi_status -tz_policy_add_device ( - TZ_CONTEXT *tz); - -acpi_status -tz_policy_remove_device ( - TZ_CONTEXT *tz); - -void -tz_policy_check ( - void *context); - -/* tz_osl.c */ - -acpi_status -tz_osl_add_device ( - TZ_CONTEXT *tz); - -acpi_status -tz_osl_remove_device ( - TZ_CONTEXT *tz); - -acpi_status -tz_osl_generate_event ( - u32 event, - TZ_CONTEXT *tz); - - -#endif /* __TZ_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/Makefile linux.21rc5-ac2/drivers/acpi/ospm/Makefile --- linux.21rc5/drivers/acpi/ospm/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ -# -# Makefile for the Linux OSPM code. -# - -O_TARGET := $(notdir $(CURDIR)).o - -ACPI_CFLAGS += -I$(CURDIR)/include - -EXTRA_CFLAGS += $(ACPI_CFLAGS) - -subdir-$(CONFIG_ACPI_BUSMGR) += busmgr -subdir-$(CONFIG_ACPI_EC) += ec -subdir-$(CONFIG_ACPI_SYS) += system -subdir-$(CONFIG_ACPI_CPU) += processor -subdir-$(CONFIG_ACPI_CMBATT) += battery -subdir-$(CONFIG_ACPI_AC) += ac_adapter -subdir-$(CONFIG_ACPI_BUTTON) += button -subdir-$(CONFIG_ACPI_THERMAL) += thermal - -obj-y += $(foreach dir,$(subdir-y),$(dir)/ospm_$(dir).o) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/processor/Makefile linux.21rc5-ac2/drivers/acpi/ospm/processor/Makefile --- linux.21rc5/drivers/acpi/ospm/processor/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/processor/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -O_TARGET := ospm_$(notdir $(CURDIR)).o -obj-m := $(O_TARGET) -EXTRA_CFLAGS += $(ACPI_CFLAGS) -obj-y := $(patsubst %.c,%.o,$(wildcard *.c)) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/processor/pr.c linux.21rc5-ac2/drivers/acpi/ospm/processor/pr.c --- linux.21rc5/drivers/acpi/ospm/processor/pr.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/processor/pr.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,497 +0,0 @@ -/***************************************************************************** - * - * Module Name: pr.c - * $Revision: 34 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include "pr.h" - - -#define _COMPONENT ACPI_PROCESSOR - MODULE_NAME ("pr") - - -/**************************************************************************** - * Globals - ****************************************************************************/ - -extern fadt_descriptor_rev2 acpi_fadt; - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: pr_print - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Prints out information on a specific thermal zone. - * - ****************************************************************************/ - -void -pr_print ( - PR_CONTEXT *processor) -{ -#ifdef ACPI_DEBUG - acpi_buffer buffer; - - FUNCTION_TRACE("pr_print"); - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return; - } - - /* - * Get the full pathname for this ACPI object. - */ - acpi_get_name(processor->acpi_handle, ACPI_FULL_PATHNAME, &buffer); - - /* - * Print out basic processor information. - */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| Processor[%02x]:[%p] uid[%02x] %s\n", processor->device_handle, processor->acpi_handle, processor->uid, (char*)buffer.pointer)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| power: %cC0 %cC1 %cC2[%d] %cC3[%d]\n", (processor->power.state[0].is_valid?'+':'-'), (processor->power.state[1].is_valid?'+':'-'), (processor->power.state[2].is_valid?'+':'-'), processor->power.state[2].latency, (processor->power.state[3].is_valid?'+':'-'), processor->power.state[3].latency)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| performance: states[%d]\n", processor->performance.state_count)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - acpi_os_free(buffer.pointer); -#endif /* ACPI_DEBUG */ - - return; -} - - -/**************************************************************************** - * - * FUNCTION: pr_add_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_add_device( - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - PR_CONTEXT *processor = NULL; - BM_DEVICE *device = NULL; - acpi_buffer buffer; - acpi_object acpi_object; - static u32 processor_count = 0; - - - FUNCTION_TRACE("pr_add_device"); - - if (!context || *context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - status = bm_get_device_info(device_handle, &device); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - processor = acpi_os_callocate(sizeof(PR_CONTEXT)); - if (!processor) { - return AE_NO_MEMORY; - } - - processor->device_handle = device->handle; - processor->acpi_handle = device->acpi_handle; - - /* - * Processor Block: - * ---------------- - */ - memset(&acpi_object, 0, sizeof(acpi_object)); - - buffer.length = sizeof(acpi_object); - buffer.pointer = &acpi_object; - - status = acpi_evaluate_object(processor->acpi_handle, NULL, NULL, &buffer); - if (ACPI_FAILURE(status)) { - goto end; - } - - /* - * Processor ID: - * ------------- - * TBD: We need to synchronize the processor ID values in ACPI - * with those of the APIC. For example, an IBM T20 has a - * proc_id value of '1', where the Linux value for the - * first CPU on this system is '0'. Since x86 CPUs are - * mapped 1:1 we can simply use a zero-based counter. Note - * that this assumes that processor objects are enumerated - * in the proper order. - */ - /* processor->uid = acpi_object.processor.proc_id; */ - processor->uid = processor_count++; - - processor->pblk.length = acpi_object.processor.pblk_length; - processor->pblk.address = acpi_object.processor.pblk_address; - - status = pr_power_add_device(processor); - if (ACPI_FAILURE(status)) { - goto end; - } - - status = pr_perf_add_device(processor); - if (ACPI_FAILURE(status)) { - goto end; - } - - status = pr_osl_add_device(processor); - if (ACPI_FAILURE(status)) { - goto end; - } - - *context = processor; - - pr_print(processor); - -end: - if (ACPI_FAILURE(status)) { - acpi_os_free(processor); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: pr_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_remove_device ( - void **context) -{ - acpi_status status = AE_OK; - PR_CONTEXT *processor= NULL; - - FUNCTION_TRACE("pr_remove_device"); - - if (!context || !*context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - processor = (PR_CONTEXT*)(*context); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing processor device [%02x].\n", processor->device_handle)); - - pr_osl_remove_device(processor); - - pr_perf_remove_device(processor); - - pr_power_remove_device(processor); - - acpi_os_free(processor); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: pr_initialize - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_initialize (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("pr_initialize"); - - memset(&criteria, 0, sizeof(BM_DEVICE_ID)); - memset(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Initialize power (Cx state) policy. - */ - status = pr_power_initialize(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Register driver for processor devices. - */ - criteria.type = BM_TYPE_PROCESSOR; - - driver.notify = &pr_notify; - driver.request = &pr_request; - - status = bm_register_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: pr_terminate - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_terminate (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("pr_terminate"); - - memset(&criteria, 0, sizeof(BM_DEVICE_ID)); - memset(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Terminate power (Cx state) policy. - */ - status = pr_power_terminate(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Unegister driver for processor devices. - */ - criteria.type = BM_TYPE_PROCESSOR; - - driver.notify = &pr_notify; - driver.request = &pr_request; - - status = bm_unregister_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: pr_notify - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - PR_CONTEXT *processor = NULL; - - FUNCTION_TRACE("pr_notify"); - - processor = (PR_CONTEXT*)*context; - - switch (notify_type) { - - case BM_NOTIFY_DEVICE_ADDED: - status = pr_add_device(device_handle, context); - break; - - case BM_NOTIFY_DEVICE_REMOVED: - status = pr_remove_device(context); - break; - - case PR_NOTIFY_PERF_STATES: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Performance states change event detected on processor [%02x].\n", device_handle)); - /* TBD: Streamline (this is simple but overkill). */ - status = pr_perf_remove_device(processor); - if (ACPI_SUCCESS(status)) { - status = pr_perf_add_device(processor); - } - if (ACPI_SUCCESS(status)) { - status = pr_osl_generate_event(notify_type, - (processor)); - } - break; - - case PR_NOTIFY_POWER_STATES: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Power states change event detected on processor [%02x].\n", device_handle)); - /* TBD: Streamline (this is simple but overkill). */ - status = pr_power_remove_device(processor); - if (ACPI_SUCCESS(status)) { - status = pr_power_add_device(processor); - } - if (ACPI_SUCCESS(status)) { - status = pr_osl_generate_event(notify_type, - (processor)); - } - break; - - default: - status = AE_SUPPORT; - break; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: pr_request - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_request ( - BM_REQUEST *request, - void *context) -{ - acpi_status status = AE_OK; - PR_CONTEXT *processor = NULL; - - FUNCTION_TRACE("pr_request"); - - /* - * Must have a valid request structure and context. - */ - if (!request || !context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - processor = (PR_CONTEXT*)context; - - /* - * Handle request: - * --------------- - */ - switch (request->command) { - - case PR_COMMAND_GET_POWER_INFO: - status = bm_copy_to_buffer(&(request->buffer), - &(processor->power), sizeof(PR_POWER)); - break; - - case PR_COMMAND_SET_POWER_INFO: - { - PR_POWER *power_info = NULL; - u32 i = 0; - - status = bm_cast_buffer(&(request->buffer), - (void**)&power_info, sizeof(PR_POWER)); - if (ACPI_SUCCESS(status)) { - for (i=0; ipower.state_count; i++) { - MEMCPY(&(processor->power.state[i].promotion), - &(power_info->state[i].promotion), - sizeof(PR_CX_POLICY_VALUES)); - MEMCPY(&(processor->power.state[i].demotion), - &(power_info->state[i].demotion), - sizeof(PR_CX_POLICY_VALUES)); - } - } - } - break; - - case PR_COMMAND_GET_PERF_INFO: - status = bm_copy_to_buffer(&(request->buffer), - &(processor->performance), sizeof(PR_PERFORMANCE)); - break; - - case PR_COMMAND_GET_PERF_STATE: - status = bm_copy_to_buffer(&(request->buffer), - &(processor->performance.active_state), sizeof(u32)); - break; - - case PR_COMMAND_SET_PERF_LIMIT: - { - u32 *limit = NULL; - - status = bm_cast_buffer(&(request->buffer), - (void**)&limit, sizeof(u32)); - if (ACPI_SUCCESS(status)) { - status = pr_perf_set_limit(processor, *limit); - } - } - break; - - default: - status = AE_SUPPORT; - break; - } - - request->status = status; - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/processor/pr_osl.c linux.21rc5-ac2/drivers/acpi/ospm/processor/pr_osl.c --- linux.21rc5/drivers/acpi/ospm/processor/pr_osl.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/processor/pr_osl.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,344 +0,0 @@ -/****************************************************************************** - * - * Module Name: pr_osl.c - * $Revision: 21 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include "pr.h" - - -MODULE_AUTHOR("Andrew Grover"); -MODULE_DESCRIPTION("ACPI Component Architecture (CA) - IA32 Processor Driver"); -MODULE_LICENSE("GPL"); - - -#define PR_PROC_ROOT "processor" -#define PR_PROC_STATUS "status" -#define PR_PROC_INFO "info" - -extern struct proc_dir_entry *bm_proc_root; -static struct proc_dir_entry *pr_proc_root = NULL; -extern unsigned short acpi_piix4_bmisx; - - -/**************************************************************************** - * - * FUNCTION: pr_osl_proc_read_status - * - ****************************************************************************/ - -static int -pr_osl_proc_read_status ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - PR_CONTEXT *processor = NULL; - char *p = page; - int len = 0; - - if (!context || (off != 0)) { - goto end; - } - - processor = (PR_CONTEXT*)context; - - p += sprintf(p, "Bus Mastering Activity: %08x\n", - processor->power.bm_activity); - - p += sprintf(p, "C-State Utilization: C1[%d] C2[%d] C3[%d]\n", - processor->power.state[PR_C1].utilization, - processor->power.state[PR_C2].utilization, - processor->power.state[PR_C3].utilization); - -end: - len = (p - page); - if (len <= off+count) *eof = 1; - *start = page + off; - len -= off; - if (len>count) len = count; - if (len<0) len = 0; - - return(len); -} - - -/**************************************************************************** - * - * FUNCTION: pr_osl_proc_read_info - * - ****************************************************************************/ - -static int -pr_osl_proc_read_info ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - PR_CONTEXT *processor = NULL; - char *p = page; - int len = 0; - - if (!context || (off != 0)) { - goto end; - } - - processor = (PR_CONTEXT*)context; - - p += sprintf(p, "\n"); - -end: - len = (p - page); - if (len <= off+count) *eof = 1; - *start = page + off; - len -= off; - if (len>count) len = count; - if (len<0) len = 0; - - return(len); -} - - -/**************************************************************************** - * - * FUNCTION: pr_osl_add_device - * - ****************************************************************************/ - -acpi_status -pr_osl_add_device( - PR_CONTEXT *processor) -{ - u32 i = 0; - struct proc_dir_entry *proc_entry = NULL, *proc; - char processor_uid[16]; - - if (!processor) { - return(AE_BAD_PARAMETER); - } - - printk("Processor[%x]:", processor->uid); - for (i=0; ipower.state_count; i++) { - if (processor->power.state[i].is_valid) { - printk(" C%d", i); - } - } - if (processor->performance.state_count > 1) - printk(", %d throttling states", processor->performance.state_count); - if (acpi_piix4_bmisx && processor->power.state[3].is_valid) - printk(" (PIIX errata enabled)"); - printk("\n"); - - sprintf(processor_uid, "%d", processor->uid); - - proc_entry = proc_mkdir(processor_uid, pr_proc_root); - if (!proc_entry) - return(AE_ERROR); - - proc = create_proc_read_entry(PR_PROC_STATUS, S_IFREG | S_IRUGO, - proc_entry, pr_osl_proc_read_status, (void*)processor); - if (!proc_entry) - return(AE_ERROR); - - proc = create_proc_read_entry(PR_PROC_INFO, S_IFREG | S_IRUGO, - proc_entry, pr_osl_proc_read_info, (void*)processor); - if (!proc_entry) - return(AE_ERROR); - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: pr_osl_remove_device - * - ****************************************************************************/ - -acpi_status -pr_osl_remove_device ( - PR_CONTEXT *processor) -{ - char proc_entry[64]; - - if (!processor) { - return(AE_BAD_PARAMETER); - } - - sprintf(proc_entry, "%d/%s", processor->uid, PR_PROC_INFO); - remove_proc_entry(proc_entry, pr_proc_root); - - sprintf(proc_entry, "%d/%s", processor->uid, PR_PROC_STATUS); - remove_proc_entry(proc_entry, pr_proc_root); - - sprintf(proc_entry, "%d", processor->uid); - remove_proc_entry(proc_entry, pr_proc_root); - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: pr_osl_generate_event - * - ****************************************************************************/ - -acpi_status -pr_osl_generate_event ( - u32 event, - PR_CONTEXT *processor) -{ - acpi_status status = AE_OK; - char processor_uid[16]; - - if (!processor) { - return(AE_BAD_PARAMETER); - } - - switch (event) { - - case PR_NOTIFY_PERF_STATES: - case PR_NOTIFY_POWER_STATES: - sprintf(processor_uid, "%d", processor->uid); - status = bm_osl_generate_event(processor->device_handle, - PR_PROC_ROOT, processor_uid, event, 0); - break; - - default: - return(AE_BAD_PARAMETER); - break; - } - - return(status); -} - - -/**************************************************************************** - * Errata Handling - ****************************************************************************/ - -void acpi_pr_errata (void) -{ - struct pci_dev *dev = NULL; - - while ((dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, - PCI_ANY_ID, PCI_ANY_ID, dev))) { - switch (dev->device) { - case PCI_DEVICE_ID_INTEL_82801BA_8: /* PIIX4U4 */ - case PCI_DEVICE_ID_INTEL_82801BA_9: /* PIIX4U3 */ - case PCI_DEVICE_ID_INTEL_82451NX: /* PIIX4NX */ - case PCI_DEVICE_ID_INTEL_82372FB_1: /* PIIX4U2 */ - case PCI_DEVICE_ID_INTEL_82801AA_1: /* PIIX4U */ - case PCI_DEVICE_ID_INTEL_82443MX_1: /* PIIX4E2 */ - case PCI_DEVICE_ID_INTEL_82801AB_1: /* PIIX4E */ - case PCI_DEVICE_ID_INTEL_82371AB: /* PIIX4 */ - acpi_piix4_bmisx = pci_resource_start(dev, 4); - return; - } - } - - return; -} - - -/**************************************************************************** - * - * FUNCTION: pr_osl_init - * - * PARAMETERS: - * - * RETURN: 0: Success - * - * DESCRIPTION: Module initialization. - * - ****************************************************************************/ - -static int __init -pr_osl_init (void) -{ - acpi_status status = AE_OK; - - /* abort if no busmgr */ - if (!bm_proc_root) - return -ENODEV; - - acpi_pr_errata(); - - pr_proc_root = proc_mkdir(PR_PROC_ROOT, bm_proc_root); - if (!pr_proc_root) { - status = AE_ERROR; - } - else { - status = pr_initialize(); - if (ACPI_FAILURE(status)) { - remove_proc_entry(PR_PROC_ROOT, bm_proc_root); - } - - } - - return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; -} - - -/**************************************************************************** - * - * FUNCTION: pr_osl_cleanup - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Module cleanup. - * - ****************************************************************************/ - -static void __exit -pr_osl_cleanup (void) -{ - pr_terminate(); - - if (pr_proc_root) { - remove_proc_entry(PR_PROC_ROOT, bm_proc_root); - } - - return; -} - - -module_init(pr_osl_init); -module_exit(pr_osl_cleanup); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/processor/prperf.c linux.21rc5-ac2/drivers/acpi/ospm/processor/prperf.c --- linux.21rc5/drivers/acpi/ospm/processor/prperf.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/processor/prperf.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,456 +0,0 @@ -/***************************************************************************** - * - * Module Name: prperf.c - * $Revision: 21 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -/* - * TBD: 1. Support ACPI 2.0 processor performance states (not just throttling). - * 2. Fully implement thermal -vs- power management limit control. - */ - - -#include -#include -#include "pr.h" - -#define _COMPONENT ACPI_PROCESSOR - MODULE_NAME ("prperf") - - -/**************************************************************************** - * Globals - ****************************************************************************/ - -extern fadt_descriptor_rev2 acpi_fadt; -const u32 POWER_OF_2[] = {1,2,4,8,16,32,64,128,256,512}; - - -/**************************************************************************** - * - * FUNCTION: pr_perf_get_frequency - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_perf_get_frequency ( - PR_CONTEXT *processor, - u32 *frequency) { - acpi_status status = AE_OK; - - FUNCTION_TRACE("pr_perf_get_frequency"); - - if (!processor || !frequency) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* TBD: Generic method to calculate processor frequency. */ - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: pr_perf_get_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -/* TBD: Include support for _real_ performance states (not just throttling). */ - -acpi_status -pr_perf_get_state ( - PR_CONTEXT *processor, - u32 *state) -{ - u32 pblk_value = 0; - u32 duty_mask = 0; - u32 duty_cycle = 0; - - FUNCTION_TRACE("pr_perf_get_state"); - - if (!processor || !state) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (processor->performance.state_count == 1) { - *state = 0; - return_ACPI_STATUS(AE_OK); - } - - acpi_os_read_port(processor->pblk.address, &pblk_value, 32); - - /* - * Throttling Enabled? - * ------------------- - * If so, calculate the current throttling state, otherwise return - * '100% performance' (state 0). - */ - if (pblk_value & 0x00000010) { - - duty_mask = processor->performance.state_count - 1; - duty_mask <<= acpi_fadt.duty_offset; - - duty_cycle = pblk_value & duty_mask; - duty_cycle >>= acpi_fadt.duty_offset; - - if (duty_cycle == 0) { - *state = 0; - } - else { - *state = processor->performance.state_count - - duty_cycle; - } - } - else { - *state = 0; - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Processor [%02x] is at performance state [%d%%].\n", processor->device_handle, processor->performance.state[*state].performance)); - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: pr_perf_set_state - * - * PARAMETERS: - * - * RETURN: AE_OK - * AE_BAD_PARAMETER - * AE_BAD_DATA Invalid target throttling state. - * - * DESCRIPTION: - * - ****************************************************************************/ - -/* TBD: Includes support for _real_ performance states (not just throttling). */ - -acpi_status -pr_perf_set_state ( - PR_CONTEXT *processor, - u32 state) -{ - u32 pblk_value = 0; - u32 duty_mask = 0; - u32 duty_cycle = 0; - u32 i = 0; - - FUNCTION_TRACE ("pr_perf_set_state"); - - if (!processor) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (state > (processor->performance.state_count - 1)) { - return_ACPI_STATUS(AE_BAD_DATA); - } - - if ((state == processor->performance.active_state) || - (processor->performance.state_count == 1)) { - return_ACPI_STATUS(AE_OK); - } - - /* - * Calculate Duty Cycle/Mask: - * -------------------------- - * Note that we don't support duty_cycle values that span bit 4. - */ - if (state) { - duty_cycle = processor->performance.state_count - state; - duty_cycle <<= acpi_fadt.duty_offset; - } - else { - duty_cycle = 0; - } - - duty_mask = ~((u32)(processor->performance.state_count - 1)); - for (i=0; ipblk.address, &pblk_value, 32); - if (pblk_value & 0x00000010) { - pblk_value &= 0xFFFFFFEF; - acpi_os_write_port(processor->pblk.address, pblk_value, 32); - } - - /* - * Set Duty Cycle: - * --------------- - * Mask off the old duty_cycle value, mask in the new. - */ - pblk_value &= duty_mask; - pblk_value |= duty_cycle; - acpi_os_write_port(processor->pblk.address, pblk_value, 32); - - /* - * Enable Throttling: - * ------------------ - * But only for non-zero (non-100% performance) states. - */ - if (state) { - pblk_value |= 0x00000010; - acpi_os_write_port(processor->pblk.address, pblk_value, 32); - } - - processor->performance.active_state = state; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Processor [%02x] set to performance state [%d%%].\n", processor->device_handle, processor->performance.state[state].performance)); - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: pr_perf_set_limit - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_perf_set_limit ( - PR_CONTEXT *processor, - u32 limit) -{ - acpi_status status = AE_OK; - PR_PERFORMANCE *performance = NULL; - - FUNCTION_TRACE ("pr_perf_set_limit"); - - if (!processor) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - performance = &(processor->performance); - - /* - * Set Limit: - * ---------- - * TBD: Properly manage thermal and power limits (only set - * performance state iff...). - */ - switch (limit) { - - case PR_PERF_DEC: - if (performance->active_state < - (performance->state_count-1)) { - status = pr_perf_set_state(processor, - (performance->active_state+1)); - } - break; - - case PR_PERF_INC: - if (performance->active_state > 0) { - status = pr_perf_set_state(processor, - (performance->active_state-1)); - } - break; - - case PR_PERF_MAX: - if (performance->active_state != 0) { - status = pr_perf_set_state(processor, 0); - } - break; - - default: - return_ACPI_STATUS(AE_BAD_DATA); - break; - } - - if (ACPI_SUCCESS(status)) { - performance->thermal_limit = performance->active_state; - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Processor [%02x] thermal performance limit set to [%d%%].\n", processor->device_handle, processor->performance.state[performance->active_state].performance)); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: pr_perf_add_device - * - * PARAMETERS: processor Our processor-specific context. - * - * RETURN: AE_OK - * AE_BAD_PARAMETER - * - * DESCRIPTION: Calculates the number of throttling states and the state - * performance/power values. - * - ****************************************************************************/ - -/* TBD: Support duty_cycle values that span bit 4. */ - -acpi_status -pr_perf_add_device ( - PR_CONTEXT *processor) -{ - acpi_status status = AE_OK; - u32 i = 0; - u32 performance_step = 0; - u32 percentage = 0; - - FUNCTION_TRACE("pr_perf_add_device"); - - if (!processor) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Valid PBLK? - * ----------- - * For SMP it is common to have the first (boot) processor have a - * valid PBLK while all others do not -- which implies that - * throttling has system-wide effects (duty_cycle programmed into - * the chipset effects all processors). - */ - if ((processor->pblk.length < 6) || !processor->pblk.address) { - processor->performance.state_count = 1; - } - - /* - * Valid Duty Offset/Width? - * ------------------------ - * We currently only support duty_cycle values that fall within - * bits 0-3, as things get complicated when this value spans bit 4 - * (the throttling enable/disable bit). - */ - else if ((acpi_fadt.duty_offset + acpi_fadt.duty_width) > 4) { - processor->performance.state_count = 1; - } - - /* - * Compute State Count: - * -------------------- - * The number of throttling states is computed as 2^duty_width, - * but limited by PR_MAX_THROTTLE_STATES. Note that a duty_width - * of zero results is one throttling state (100%). - */ - else { - processor->performance.state_count = - POWER_OF_2[acpi_fadt.duty_width]; - } - - if (processor->performance.state_count > PR_MAX_THROTTLE_STATES) { - processor->performance.state_count = PR_MAX_THROTTLE_STATES; - } - - /* - * Compute State Values: - * --------------------- - * Note that clock throttling displays a linear power/performance - * relationship (at 50% performance the CPU will consume 50% power). - */ - performance_step = (1000 / processor->performance.state_count); - - for (i=0; iperformance.state_count; i++) { - percentage = (1000 - (performance_step * i))/10; - processor->performance.state[i].performance = percentage; - processor->performance.state[i].power = percentage; - } - - /* - * Get Current State: - * ------------------ - */ - status = pr_perf_get_state(processor, &(processor->performance.active_state)); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Set to Maximum Performance: - * --------------------------- - * We'll let subsequent policy (e.g. thermal/power) decide to lower - * performance if it so chooses, but for now crank up the speed. - */ - if (0 != processor->performance.active_state) { - status = pr_perf_set_state(processor, 0); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: pr_perf_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_perf_remove_device ( - PR_CONTEXT *processor) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("pr_perf_remove_device"); - - if (!processor) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - MEMSET(&(processor->performance), 0, sizeof(PR_PERFORMANCE)); - - return_ACPI_STATUS(status); -} - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/processor/prpower.c linux.21rc5-ac2/drivers/acpi/ospm/processor/prpower.c --- linux.21rc5/drivers/acpi/ospm/processor/prpower.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/processor/prpower.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,665 +0,0 @@ -/***************************************************************************** - * - * Module Name: prpower.c - * $Revision: 32 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -/* TBD: Linux specific */ -#include -#include -#include - -#include -#include -#include "pr.h" - -#define _COMPONENT ACPI_PROCESSOR - MODULE_NAME ("prpower") - - -/**************************************************************************** - * Globals - ****************************************************************************/ - -extern FADT_DESCRIPTOR acpi_fadt; -static u32 last_idle_jiffies = 0; -static PR_CONTEXT *processor_list[NR_CPUS]; -static void (*pr_pm_idle_save)(void) = NULL; -static u8 bm_control = 0; - - -/* Used for PIIX4 errata handling. */ -unsigned short acpi_piix4_bmisx = 0; - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: pr_power_activate_state - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -void -pr_power_activate_state ( - PR_CONTEXT *processor, - u32 next_state) -{ - - PROC_NAME("pr_power_activate_state"); - - if (!processor) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); - return; - } - - processor->power.state[processor->power.active_state].promotion.count = 0; - processor->power.state[processor->power.active_state].demotion.count = 0; - - /* - * Cleanup from old state. - */ - switch (processor->power.active_state) { - - case PR_C3: - /* Disable bus master reload */ - acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_DO_NOT_LOCK, - BM_RLD, 0); - break; - } - - /* - * Prepare to use new state. - */ - switch (next_state) { - - case PR_C3: - /* Enable bus master reload */ - acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_DO_NOT_LOCK, - BM_RLD, 1); - break; - } - - processor->power.active_state = next_state; - - return; -} - - -/**************************************************************************** - * - * FUNCTION: pr_power_idle - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -void -pr_power_idle (void) -{ - PR_CX *c_state = NULL; - u32 next_state = 0; - u32 start_ticks, end_ticks, time_elapsed; - PR_CONTEXT *processor = NULL; - - PROC_NAME("pr_power_idle"); - - processor = processor_list[smp_processor_id()]; - - if (!processor || processor->power.active_state == PR_C0) { - return; - } - - next_state = processor->power.active_state; - - /* - * Check OS Idleness: - * ------------------ - * If the OS has been busy (hasn't called the idle handler in a while) - * then automatically demote to the default power state (e.g. C1). - * - * TBD: Optimize by having scheduler determine business instead - * of having us try to calculate it. - */ - if (processor->power.active_state != processor->power.default_state) { - if ((jiffies - last_idle_jiffies) >= processor->power.busy_metric) { - next_state = processor->power.default_state; - if (next_state != processor->power.active_state) { - pr_power_activate_state(processor, next_state); - } - } - } - - disable(); - - /* - * Log BM Activity: - * ---------------- - * Read BM_STS and record its value for later use by C3 policy. - * (Note that we save the BM_STS values for the last 32 cycles). - */ - if (bm_control) { - processor->power.bm_activity <<= 1; - if (acpi_hw_register_bit_access(ACPI_READ, ACPI_MTX_DO_NOT_LOCK, BM_STS)) { - processor->power.bm_activity |= 1; - acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_DO_NOT_LOCK, - BM_STS, 1); - } - else if (acpi_piix4_bmisx) { - /* - * PIIX4 Errata: - * ------------- - * This code is a workaround for errata #18 "C3 Power State/ - * BMIDE and Type-F DMA Livelock" from the July '01 PIIX4 - * specification update. Note that BM_STS doesn't always - * reflect the true state of bus mastering activity; forcing - * us to manually check the BMIDEA bit of each IDE channel. - */ - if ((inb_p(acpi_piix4_bmisx + 0x02) & 0x01) || - (inb_p(acpi_piix4_bmisx + 0x0A) & 0x01)) - processor->power.bm_activity |= 1; - } - } - - c_state = &(processor->power.state[processor->power.active_state]); - - c_state->utilization++; - - /* - * Sleep: - * ------ - * Invoke the current Cx state to put the processor to sleep. - */ - switch (processor->power.active_state) { - - case PR_C1: - /* Invoke C1 */ - enable(); halt(); - /* - * TBD: Can't get time duration while in C1, as resumes - * go to an ISR rather than here. - */ - time_elapsed = 0xFFFFFFFF; - break; - - case PR_C2: - /* See how long we're asleep for */ - acpi_get_timer(&start_ticks); - /* Invoke C2 */ - acpi_os_read_port(processor->power.p_lvl2, NULL, 8); - /* Dummy op - must do something useless after P_LVL2 read */ - acpi_hw_register_bit_access(ACPI_READ, ACPI_MTX_DO_NOT_LOCK, BM_STS); - /* Compute time elapsed */ - acpi_get_timer(&end_ticks); - /* Re-enable interrupts */ - enable(); - acpi_get_timer_duration(start_ticks, end_ticks, &time_elapsed); - break; - - case PR_C3: - /* Disable bus master arbitration */ - acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_DO_NOT_LOCK, ARB_DIS, 1); - /* See how long we're asleep for */ - acpi_get_timer(&start_ticks); - /* Invoke C3 */ - acpi_os_read_port(processor->power.p_lvl3, NULL, 8); - /* Dummy op - must do something useless after P_LVL3 read */ - acpi_hw_register_bit_access(ACPI_READ, ACPI_MTX_DO_NOT_LOCK, BM_STS); - /* Compute time elapsed */ - acpi_get_timer(&end_ticks); - /* Enable bus master arbitration */ - acpi_hw_register_bit_access(ACPI_WRITE, ACPI_MTX_DO_NOT_LOCK, - ARB_DIS, 0); - /* Re-enable interrupts */ - enable(); - acpi_get_timer_duration(start_ticks, end_ticks, &time_elapsed); - break; - - default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Attempt to use unsupported power state C%d.\n", processor->power.active_state)); - enable(); - break; - } - - /* - * Promotion? - * ---------- - * Track the number of successful sleeps (time asleep is greater - * than time_threshold) and promote when count_threshold is - * reached. - */ - if ((c_state->promotion.target_state) && - (time_elapsed >= c_state->promotion.time_threshold)) { - - c_state->promotion.count++; - c_state->demotion.count = 0; - - if (c_state->promotion.count >= c_state->promotion.count_threshold) { - /* - * Bus Mastering Activity, if active and used - * by this state's promotion policy, prevents - * promotions from occuring. - */ - if (!bm_control || !(processor->power.bm_activity & c_state->promotion.bm_threshold)) - next_state = c_state->promotion.target_state; - } - } - - /* - * Demotion? - * --------- - * Track the number of shorts (time asleep is less than - * time_threshold) and demote when count_threshold is reached. - */ - if (c_state->demotion.target_state) { - - if (time_elapsed < c_state->demotion.time_threshold) { - - c_state->demotion.count++; - c_state->promotion.count = 0; - - if (c_state->demotion.count >= - c_state->demotion.count_threshold) { - next_state = c_state->demotion.target_state; - } - } - - /* - * Bus Mastering Activity, if active and used by this - * state's promotion policy, causes an immediate demotion - * to occur. - */ - if (bm_control && (processor->power.bm_activity & c_state->demotion.bm_threshold)) - next_state = c_state->demotion.target_state; - } - - /* - * New Cx State? - * ------------- - * If we're going to start using a new Cx state we must clean up - * from the previous and prepare to use the new. - */ - if (next_state != processor->power.active_state) { - pr_power_activate_state(processor, next_state); - processor->power.active_state = processor->power.active_state; - } - - /* - * Track OS Idleness: - * ------------------ - * Record a jiffies timestamp to compute time elapsed between calls - * to the idle handler. - */ - last_idle_jiffies = jiffies; - - return; -} - - -/***************************************************************************** - * - * FUNCTION: pr_power_set_default_policy - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Sets the default Cx state policy (OS idle handler). Our - * scheme is to promote quickly to C2 but more conservatively - * to C3. We're favoring C2 for its characteristics of low - * latency (quick response), good power savings, and ability - * to allow bus mastering activity. - * - * Note that Cx state policy is completely customizable, with - * the goal of having heuristics to alter policy dynamically. - * - ****************************************************************************/ - -acpi_status -pr_power_set_default_policy ( - PR_CONTEXT *processor) -{ - FUNCTION_TRACE("pr_power_set_default_policy"); - - if (!processor) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Busy Metric: - * ------------ - * Used to determine when the OS has been busy and thus when - * policy should return to using the default Cx state (e.g. C1). - * On Linux we use the number of jiffies (scheduler quantums) - * that transpire between calls to the idle handler. - * - * TBD: Linux-specific. - */ - processor->power.busy_metric = 2; - - /* - * C1: - * --- - * C1 serves as our default state. It must be valid. - */ - if (processor->power.state[PR_C1].is_valid) { - processor->power.active_state = - processor->power.default_state = PR_C1; - } - else { - processor->power.active_state = - processor->power.default_state = PR_C0; - return_ACPI_STATUS(AE_OK); - } - - /* - * C2: - * --- - * Set default C1 promotion and C2 demotion policies. - */ - if (processor->power.state[PR_C2].is_valid) { - /* - * Promote from C1 to C2 anytime we're asleep in C1 for - * longer than two times the C2 latency (to amortize cost - * of transition). Demote from C2 to C1 anytime we're - * asleep in C2 for less than this time. - */ - processor->power.state[PR_C1].promotion.count_threshold = 10; - processor->power.state[PR_C1].promotion.time_threshold = - 2 * processor->power.state[PR_C2].latency; - processor->power.state[PR_C1].promotion.target_state = PR_C2; - - processor->power.state[PR_C2].demotion.count_threshold = 1; - processor->power.state[PR_C2].demotion.time_threshold = - 2 * processor->power.state[PR_C2].latency; - processor->power.state[PR_C2].demotion.target_state = PR_C1; - } - - /* - * C3: - * --- - * Set default C2 promotion and C3 demotion policies. - */ - if ((processor->power.state[PR_C2].is_valid) && - (processor->power.state[PR_C3].is_valid)) { - /* - * Promote from C2 to C3 after 4 cycles of no bus - * mastering activity (while maintaining sleep time - * criteria). Demote immediately on a short or - * whenever bus mastering activity occurs. - */ - processor->power.state[PR_C2].promotion.count_threshold = 1; - processor->power.state[PR_C2].promotion.time_threshold = - 2 * processor->power.state[PR_C3].latency; - processor->power.state[PR_C2].promotion.bm_threshold = - 0x0000000F; - processor->power.state[PR_C2].promotion.target_state = - PR_C3; - - processor->power.state[PR_C3].demotion.count_threshold = 1; - processor->power.state[PR_C3].demotion.time_threshold = - 2 * processor->power.state[PR_C3].latency; - processor->power.state[PR_C3].demotion.bm_threshold = - 0x0000000F; - processor->power.state[PR_C3].demotion.target_state = - PR_C2; - } - - return_ACPI_STATUS(AE_OK); -} - -/***************************************************************************** - * - * FUNCTION: pr_power_add_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -/* - * TBD: 1. PROC_C1 support. - * 2. Symmetric Cx state support (different Cx states supported - * by different CPUs results in lowest common denominator). - */ - -acpi_status -pr_power_add_device ( - PR_CONTEXT *processor) -{ - FUNCTION_TRACE("pr_power_add_device"); - - if (!processor) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * State Count: - * ------------ - * Fixed at four (C0-C3). We use is_valid to determine whether or - * not a state actually gets used. - */ - processor->power.state_count = PR_MAX_POWER_STATES; - - /* - * C0: - * --- - * C0 exists only as filler in our array. (Let's assume its valid!) - */ - processor->power.state[PR_C0].is_valid = TRUE; - - /* - * C1: - * --- - * ACPI states that C1 must be supported by all processors - * with a latency so small that it can be ignored. - * - * TBD: What about PROC_C1 support? - */ - processor->power.state[PR_C1].is_valid = TRUE; - - /* - * C2: - * --- - * We're only supporting C2 on UP systems with latencies <= 100us. - * - * TBD: Support for C2 on MP (P_LVL2_UP) -- I'm taking the - * conservative approach for now. - */ - processor->power.state[PR_C2].latency = acpi_fadt.plvl2_lat; - -#ifdef CONFIG_SMP - if (smp_num_cpus == 1) { -#endif /*CONFIG_SMP*/ - if (acpi_fadt.plvl2_lat <= PR_MAX_C2_LATENCY) { - processor->power.state[PR_C2].is_valid = TRUE; - processor->power.p_lvl2 = processor->pblk.address + 4; - } -#ifdef CONFIG_SMP - } -#endif /*CONFIG_SMP*/ - - - /* - * C3: - * --- - * We're only supporting C3 on UP systems with latencies <= 1000us, - * and that include the ability to disable bus mastering while in - * C3 (ARB_DIS) but allows bus mastering requests to wake the system - * from C3 (BM_RLD). Note his method of maintaining cache coherency - * (disabling of bus mastering) cannot be used on SMP systems, and - * flushing caches (e.g. WBINVD) is simply too costly at this time. - * - * TBD: Support for C3 on MP -- I'm taking the conservative - * approach for now. - */ - processor->power.state[PR_C3].latency = acpi_fadt.plvl3_lat; - -#ifdef CONFIG_SMP - if (smp_num_cpus == 1) { -#endif /*CONFIG_SMP*/ - if ((acpi_fadt.plvl3_lat <= PR_MAX_C3_LATENCY) && bm_control) { - processor->power.state[PR_C3].is_valid = TRUE; - processor->power.p_lvl3 = processor->pblk.address + 5; - } -#ifdef CONFIG_SMP - } -#endif /*CONFIG_SMP*/ - - /* - * Set Default Policy: - * ------------------- - * Now that we know which state are supported, set the default - * policy. Note that this policy can be changed dynamically - * (e.g. encourage deeper sleeps to conserve battery life when - * not on AC). - */ - pr_power_set_default_policy(processor); - - /* - * Save Processor Context: - * ----------------------- - * TBD: Enhance Linux idle handler to take processor context - * parameter. - */ - processor_list[processor->uid] = processor; - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: pr_power_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_power_remove_device ( - PR_CONTEXT *processor) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("pr_power_remove_device"); - - if (!processor) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - MEMSET(&(processor->power), 0, sizeof(PR_POWER)); - - processor_list[processor->uid] = NULL; - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: pr_power_initialize - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_power_initialize (void) -{ - u32 i = 0; - - FUNCTION_TRACE("pr_power_initialize"); - - /* TBD: Linux-specific. */ - for (i=0; i - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -pr_power_terminate (void) -{ - FUNCTION_TRACE("pr_power_terminate"); - - /* - * Remove idle handler. - * - * TBD: Linux-specific (need OSL function). - */ - pm_idle = pr_pm_idle_save; - - return_ACPI_STATUS(AE_OK); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/system/Makefile linux.21rc5-ac2/drivers/acpi/ospm/system/Makefile --- linux.21rc5/drivers/acpi/ospm/system/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/system/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -O_TARGET := ospm_$(notdir $(CURDIR)).o -obj-m := $(O_TARGET) -EXTRA_CFLAGS += $(ACPI_CFLAGS) -obj-y := $(patsubst %.c,%.o,$(wildcard *.c)) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/system/sm.c linux.21rc5-ac2/drivers/acpi/ospm/system/sm.c --- linux.21rc5/drivers/acpi/ospm/system/sm.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/system/sm.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,373 +0,0 @@ -/***************************************************************************** - * - * Module Name: sm.c - * $Revision: 20 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include "sm.h" - - -#define _COMPONENT ACPI_SYSTEM - MODULE_NAME ("sm") - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: sm_print - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Prints out information on a specific system. - * - ****************************************************************************/ - -void -sm_print ( - SM_CONTEXT *system) -{ -#ifdef ACPI_DEBUG - acpi_buffer buffer; - - PROC_NAME("sm_print"); - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) { - return; - } - - /* - * Get the full pathname for this ACPI object. - */ - acpi_get_name(system->acpi_handle, ACPI_FULL_PATHNAME, &buffer); - - /* - * Print out basic system information. - */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| System[%02x]:[%p] %s\n", system->device_handle, system->acpi_handle, (char*)buffer.pointer)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| states: %cS0 %cS1 %cS2 %cS3 %cS4 %cS5\n", (system->states[0]?'+':'-'), (system->states[1]?'+':'-'), (system->states[2]?'+':'-'), (system->states[3]?'+':'-'), (system->states[4]?'+':'-'), (system->states[5]?'+':'-'))); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - acpi_os_free(buffer.pointer); -#endif /*ACPI_DEBUG*/ - - return; -} - - -/**************************************************************************** - * - * FUNCTION: sm_add_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -sm_add_device( - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - BM_DEVICE *device = NULL; - SM_CONTEXT *system = NULL; - u8 i, type_a, type_b; - - - FUNCTION_TRACE("sm_add_device"); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Adding system device [%02x].\n", device_handle)); - - if (!context || *context) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.")); - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Allocate a new SM_CONTEXT structure. - */ - system = acpi_os_callocate(sizeof(SM_CONTEXT)); - if (!system) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - /* - * Get information on this device. - */ - status = bm_get_device_info(device_handle, &device); - if (ACPI_FAILURE(status)) { - goto end; - } - - system->device_handle = device->handle; - system->acpi_handle = device->acpi_handle; - - /* - * Sx States: - * ---------- - * Figure out which Sx states are supported. - */ - for (i=0; istates[i] = TRUE; - } - } - - status = sm_osl_add_device(system); - if (ACPI_FAILURE(status)) { - goto end; - } - - *context = system; - - sm_print(system); - -end: - if (ACPI_FAILURE(status)) { - acpi_os_free(system); - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: sm_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -sm_remove_device ( - void **context) -{ - acpi_status status = AE_OK; - SM_CONTEXT *system = NULL; - - FUNCTION_TRACE("sm_remove_device"); - - if (!context || !*context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - system = (SM_CONTEXT*)*context; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing system device [%02x].\n", system->device_handle)); - - status = sm_osl_remove_device(system); - - acpi_os_free(system); - - *context = NULL; - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: sm_initialize - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -sm_initialize (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("sm_initialize"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Register driver for the System device. - */ - criteria.type = BM_TYPE_SYSTEM; - - driver.notify = &sm_notify; - driver.request = &sm_request; - - status = bm_register_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: sm_terminate - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -sm_terminate (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("sm_terminate"); - - MEMSET(&criteria, 0, sizeof(BM_DEVICE_ID)); - MEMSET(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Unregister driver for System devices. - */ - criteria.type = BM_TYPE_SYSTEM; - - driver.notify = &sm_notify; - driver.request = &sm_request; - - status = bm_unregister_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/***************************************************************************** - * - * FUNCTION: sm_notify - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ -acpi_status -sm_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("sm_notify"); - - if (!context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - switch (notify_type) { - - case BM_NOTIFY_DEVICE_ADDED: - status = sm_add_device(device_handle, context); - break; - - case BM_NOTIFY_DEVICE_REMOVED: - status = sm_remove_device(context); - break; - - default: - status = AE_SUPPORT; - break; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: sm_request - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -sm_request ( - BM_REQUEST *request, - void *context) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("sm_request"); - - /* - * Must have a valid request structure and context. - */ - if (!request || !context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Handle Request: - * --------------- - */ - switch (request->command) { - - default: - status = AE_SUPPORT; - break; - } - - request->status = status; - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/system/sm_osl.c linux.21rc5-ac2/drivers/acpi/ospm/system/sm_osl.c --- linux.21rc5/drivers/acpi/ospm/system/sm_osl.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/system/sm_osl.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,922 +0,0 @@ -/****************************************************************************** - * - * Module Name: sm_osl.c - * $Revision: 16 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "sm.h" - - -MODULE_AUTHOR("Andrew Grover"); -MODULE_DESCRIPTION("ACPI Component Architecture (CA) - ACPI System Driver"); -MODULE_LICENSE("GPL"); - - -#define SM_PROC_INFO "info" -#define SM_PROC_DSDT "dsdt" - -extern struct proc_dir_entry *bm_proc_root; -struct proc_dir_entry *sm_proc_root = NULL; -static void (*sm_pm_power_off)(void) = NULL; - -static ssize_t sm_osl_read_dsdt(struct file *, char *, size_t, loff_t *); - -static struct file_operations proc_dsdt_operations = { - read: sm_osl_read_dsdt, -}; - -static acpi_status sm_osl_suspend(u32 state); - -struct proc_dir_entry *bm_proc_sleep; -struct proc_dir_entry *bm_proc_alarm; -struct proc_dir_entry *bm_proc_gpe; - -static int -sm_osl_proc_read_sleep ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - SM_CONTEXT *system = (SM_CONTEXT*) context; - char *str = page; - int len; - int i; - - if (!system) - goto end; - - if (off != 0) - goto end; - - for (i = 0; i <= ACPI_S5; i++) { - if (system->states[i]) - str += sprintf(str,"S%d ", i); - } - - str += sprintf(str, "\n"); - -end: - - len = (str - page); - if (len < (off + count)) - *eof = 1; - - *start = page + off; - len -= off; - - if (len > count) - len = count; - - if (len < 0) - len = 0; - - return (len); -} - -int sm_osl_proc_write_sleep (struct file *file, - const char *buffer, - unsigned long count, - void *data) -{ - SM_CONTEXT *system = (SM_CONTEXT*) data; - char str[10]; - char *strend; - unsigned long value; - - if (count > (sizeof(str) - 1)) - return -EINVAL; - - if (copy_from_user(str,buffer,count)) - return -EFAULT; - - str[count] = '\0'; - - value = simple_strtoul(str,&strend,0); - if (str == strend) - return -EINVAL; - - if (value == 0 || value >= ACPI_S5) - return -EINVAL; - - /* - * make sure that the sleep state is supported - */ - if (system->states[value] != TRUE) - return -EINVAL; - - sm_osl_suspend(value); - - return (count); -} - - -/**************************************************************************** - * - * FUNCTION: sm_osl_proc_read_info - * - ****************************************************************************/ - -static int -sm_osl_proc_read_info ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - acpi_status status = AE_OK; - SM_CONTEXT *system = NULL; - char *p = page; - int len; - acpi_system_info system_info; - acpi_buffer buffer; - u32 i = 0; - - if (!context) { - goto end; - } - - system = (SM_CONTEXT*) context; - - /* don't get status more than once for a single proc read */ - if (off != 0) { - goto end; - } - - /* - * Get ACPI CA Information. - */ - buffer.length = sizeof(system_info); - buffer.pointer = &system_info; - - status = acpi_get_system_info(&buffer); - if (ACPI_FAILURE(status)) { - p += sprintf(p, "ACPI-CA Version: unknown\n"); - } - else { - p += sprintf(p, "ACPI-CA Version: %x\n", - system_info.acpi_ca_version); - } - - p += sprintf(p, "Sx States Supported: "); - for (i=0; istates[i]) { - p += sprintf(p, "S%d ", i); - } - } - p += sprintf(p, "\n"); - -end: - len = (p - page); - if (len <= off+count) *eof = 1; - *start = page + off; - len -= off; - if (len>count) len = count; - if (len<0) len = 0; - - return(len); -} - -/**************************************************************************** - * - * FUNCTION: sm_osl_read_dsdt - * - ****************************************************************************/ - -static ssize_t -sm_osl_read_dsdt( - struct file *file, - char *buf, - size_t count, - loff_t *ppos) -{ - acpi_buffer acpi_buf; - void *data; - size_t size = 0; - - acpi_buf.length = 0; - acpi_buf.pointer = NULL; - - - /* determine what buffer size we will need */ - if (acpi_get_table(ACPI_TABLE_DSDT, 1, &acpi_buf) != AE_BUFFER_OVERFLOW) { - return 0; - } - - acpi_buf.pointer = kmalloc(acpi_buf.length, GFP_KERNEL); - if (!acpi_buf.pointer) { - return -ENOMEM; - } - - /* get the table for real */ - if (!ACPI_SUCCESS(acpi_get_table(ACPI_TABLE_DSDT, 1, &acpi_buf))) { - kfree(acpi_buf.pointer); - return 0; - } - - if (*ppos < acpi_buf.length) { - data = acpi_buf.pointer + file->f_pos; - size = acpi_buf.length - file->f_pos; - if (size > count) - size = count; - if (copy_to_user(buf, data, size)) { - kfree(acpi_buf.pointer); - return -EFAULT; - } - } - - kfree(acpi_buf.pointer); - - *ppos += size; - - return size; -} - -static int -sm_osl_proc_read_alarm ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - char *str = page; - int len; - u32 sec,min,hr; - u32 day,mo,yr; - - if (off != 0) goto out; - - spin_lock(&rtc_lock); - sec = CMOS_READ(RTC_SECONDS_ALARM); - min = CMOS_READ(RTC_MINUTES_ALARM); - hr = CMOS_READ(RTC_HOURS_ALARM); - -#if 0 - /* if I ever get an FACP with proper values, maybe I'll enable this code */ - if (acpi_gbl_FADT->day_alrm) - day = CMOS_READ(acpi_gbl_FADT->day_alrm); - else - day = CMOS_READ(RTC_DAY_OF_MONTH); - if (acpi_gbl_FADT->mon_alrm) - mo = CMOS_READ(acpi_gbl_FADT->mon_alrm); - else - mo = CMOS_READ(RTC_MONTH);; - if (acpi_gbl_FADT->century) - yr = CMOS_READ(acpi_gbl_FADT->century) * 100 + CMOS_READ(RTC_YEAR); - else - yr = CMOS_READ(RTC_YEAR); -#else - day = CMOS_READ(RTC_DAY_OF_MONTH); - mo = CMOS_READ(RTC_MONTH); - yr = CMOS_READ(RTC_YEAR); -#endif - spin_unlock(&rtc_lock); - - BCD_TO_BIN(sec); - BCD_TO_BIN(min); - BCD_TO_BIN(hr); - BCD_TO_BIN(day); - BCD_TO_BIN(mo); - BCD_TO_BIN(yr); - - str += sprintf(str,"%4.4u-",yr); - - str += (mo > 12) ? - sprintf(str,"**-") : - sprintf(str,"%2.2u-",mo); - - str += (day > 31) ? - sprintf(str,"** ") : - sprintf(str,"%2.2u ",day); - - str += (hr > 23) ? - sprintf(str,"**:") : - sprintf(str,"%2.2u:",hr); - - str += (min > 59) ? - sprintf(str,"**:") : - sprintf(str,"%2.2u:",min); - - str += (sec > 59) ? - sprintf(str,"**\n") : - sprintf(str,"%2.2u\n",sec); - - out: - len = str - page; - - if (len < count) *eof = 1; - else if (len > count) len = count; - - if (len < 0) len = 0; - - *start = page; - - return len; -} - -static int get_date_field(char **str, u32 *value) -{ - char *next,*strend; - int error = -EINVAL; - - /* try to find delimeter, only to insert null; - * the end of string won't have one, but is still valid - */ - next = strpbrk(*str,"- :"); - if (next) *next++ = '\0'; - - *value = simple_strtoul(*str,&strend,10); - - /* signal success if we got a good digit */ - if (strend != *str) error = 0; - - if (next) *str = next; - return error; -} - - - -int sm_osl_proc_write_alarm ( - struct file *file, - const char *buffer, - unsigned long count, - void *data) -{ - char buf[30]; - char *str = buf; - u32 sec,min,hr; - u32 day,mo,yr; - int adjust = 0; - unsigned char rtc_control; - int error = -EINVAL; - - if (count > sizeof(buf) - 1) return -EINVAL; - - if (copy_from_user(str,buffer,count)) return -EFAULT; - - str[count] = '\0'; - /* check for time adjustment */ - if (str[0] == '+') { - str++; - adjust = 1; - } - - if ((error = get_date_field(&str,&yr))) goto out; - if ((error = get_date_field(&str,&mo))) goto out; - if ((error = get_date_field(&str,&day))) goto out; - if ((error = get_date_field(&str,&hr))) goto out; - if ((error = get_date_field(&str,&min))) goto out; - if ((error = get_date_field(&str,&sec))) goto out; - - - if (sec > 59) { - min += 1; - sec -= 60; - } - if (min > 59) { - hr += 1; - min -= 60; - } - if (hr > 23) { - day += 1; - hr -= 24; - } - if (day > 31) { - mo += 1; - day -= 31; - } - if (mo > 12) { - yr += 1; - mo -= 12; - } - - spin_lock_irq(&rtc_lock); - rtc_control = CMOS_READ(RTC_CONTROL); - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(yr); - BIN_TO_BCD(mo); - BIN_TO_BCD(day); - BIN_TO_BCD(hr); - BIN_TO_BCD(min); - BIN_TO_BCD(sec); - } - - if (adjust) { - yr += CMOS_READ(RTC_YEAR); - mo += CMOS_READ(RTC_MONTH); - day += CMOS_READ(RTC_DAY_OF_MONTH); - hr += CMOS_READ(RTC_HOURS); - min += CMOS_READ(RTC_MINUTES); - sec += CMOS_READ(RTC_SECONDS); - } - spin_unlock_irq(&rtc_lock); - - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BCD_TO_BIN(yr); - BCD_TO_BIN(mo); - BCD_TO_BIN(day); - BCD_TO_BIN(hr); - BCD_TO_BIN(min); - BCD_TO_BIN(sec); - } - - if (sec > 59) { - min++; - sec -= 60; - } - if (min > 59) { - hr++; - min -= 60; - } - if (hr > 23) { - day++; - hr -= 24; - } - if (day > 31) { - mo++; - day -= 31; - } - if (mo > 12) { - yr++; - mo -= 12; - } - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(yr); - BIN_TO_BCD(mo); - BIN_TO_BCD(day); - BIN_TO_BCD(hr); - BIN_TO_BCD(min); - BIN_TO_BCD(sec); - } - - spin_lock_irq(&rtc_lock); - /* write the fields the rtc knows about */ - CMOS_WRITE(hr,RTC_HOURS_ALARM); - CMOS_WRITE(min,RTC_MINUTES_ALARM); - CMOS_WRITE(sec,RTC_SECONDS_ALARM); - - /* If the system supports an enhanced alarm, it will have non-zero - * offsets into the CMOS RAM here. - * Which for some reason are pointing to the RTC area of memory. - */ -#if 0 - if (acpi_gbl_FADT->day_alrm) CMOS_WRITE(day,acpi_gbl_FADT->day_alrm); - if (acpi_gbl_FADT->mon_alrm) CMOS_WRITE(mo,acpi_gbl_FADT->mon_alrm); - if (acpi_gbl_FADT->century) CMOS_WRITE(yr / 100,acpi_gbl_FADT->century); -#endif - /* enable the rtc alarm interrupt */ - if (!(rtc_control & RTC_AIE)) { - rtc_control |= RTC_AIE; - CMOS_WRITE(rtc_control,RTC_CONTROL); - CMOS_READ(RTC_INTR_FLAGS); - } - - /* unlock the lock on the rtc now that we're done with it */ - spin_unlock_irq(&rtc_lock); - - acpi_hw_register_bit_access(ACPI_WRITE,ACPI_MTX_LOCK, RTC_EN, 1); - - file->f_pos += count; - - error = 0; - out: - return error ? error : count; -} - -static int -sm_osl_proc_read_gpe( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - char *str = page; - int size; - int length; - int i; - u32 addr,data; - - if (off) goto out; - - if (acpi_gbl_FADT->V1_gpe0blk) { - length = acpi_gbl_FADT->gpe0blk_len / 2; - - str += sprintf(str,"GPE0: "); - - for (i = length; i > 0; i--) { - addr = GPE0_EN_BLOCK | (i - 1); - data = acpi_hw_register_read(ACPI_MTX_LOCK,addr); - str += sprintf(str,"%2.2x ",data); - } - str += sprintf(str,"\n"); - - str += sprintf(str,"Status: "); - for (i = length; i > 0; i--) { - addr = GPE0_STS_BLOCK | (i - 1); - data = acpi_hw_register_read(ACPI_MTX_LOCK,addr); - str += sprintf(str,"%2.2x ",data); - } - str += sprintf(str,"\n"); - } - - if (acpi_gbl_FADT->V1_gpe1_blk) { - length = acpi_gbl_FADT->gpe1_blk_len / 2; - - - str += sprintf(str,"GPE1: "); - for (i = length; i > 0; i--) { - addr = GPE1_EN_BLOCK | (i - 1); - data = acpi_hw_register_read(ACPI_MTX_LOCK,addr); - str += sprintf(str,"%2.2x",data); - } - str += sprintf(str,"\n"); - - str += sprintf(str,"Status: "); - for (i = length; i > 0; i--) { - addr = GPE1_STS_BLOCK | (i - 1); - data = acpi_hw_register_read(ACPI_MTX_LOCK,addr); - str += sprintf(str,"%2.2x",data); - } - str += sprintf(str,"\n"); - } - out: - size = str - page; - if (size < count) *eof = 1; - else if (size > count) size = count; - - if (size < 0) size = 0; - *start = page; - - return size; -} - -static int -sm_osl_proc_write_gpe ( - struct file *file, - const char *buffer, - unsigned long count, - void *data) -{ - char buf[256]; - char *str = buf; - char *next; - int error = -EINVAL; - u32 addr,value = 0; - - if (count > sizeof(buf) + 1) return -EINVAL; - - if (copy_from_user(str,buffer,count)) return -EFAULT; - - str[count] = '\0'; - - /* set addr to which block to refer to */ - if (!strncmp(str,"GPE0 ",5)) addr = GPE0_EN_BLOCK; - else if (!strncmp(str,"GPE1 ",5)) addr = GPE1_EN_BLOCK; - else goto out; - - str += 5; - - /* set low order bits to index of bit to set */ - addr |= simple_strtoul(str,&next,0); - if (next == str) goto out; - - if (next) { - str = ++next; - value = simple_strtoul(str,&next,0); - if (next == str) value = 1; - } - - value = acpi_hw_register_bit_access(ACPI_WRITE,ACPI_MTX_LOCK,addr,(value ? 1 : 0)); - - error = 0; - out: - return error ? error : count; -} - - -/**************************************************************************** - * - * FUNCTION: sm_osl_suspend - * - * PARAMETERS: %state: Sleep state to enter. Assumed that caller has filtered - * out bogus values, so it's one of S1, S2, S3 or S4 - * - * RETURN: ACPI_STATUS, whether or not we successfully entered and - * exited sleep. - * - * DESCRIPTION: - * This function is the meat of the sleep routine, as far as the ACPI-CA is - * concerned. - * - * See Chapter 9 of the ACPI 2.0 spec for details concerning the methodology here. - * - * It will do the following things: - * - Call arch-specific routines to save the processor and kernel state - * - Call acpi_enter_sleep_state to actually go to sleep - * .... - * When we wake back up, we will: - * - Restore the processor and kernel state - * - Return to the user - * - * By having this routine in here, it hides it from every part of the CA, - * so it can remain OS-independent. The only function that calls this is - * sm_proc_write_sleep, which gets the sleep state to enter from the user. - * - ****************************************************************************/ -static acpi_status -sm_osl_suspend(u32 state) -{ - acpi_status status = AE_ERROR; - unsigned long wakeup_address; - - /* get out if state is invalid */ - if (state < ACPI_S1 || state > ACPI_S5) - goto acpi_sleep_done; - - /* make sure we don't get any suprises */ - disable(); - - /* TODO: save device state and suspend them */ - - /* save the processor state to memory if going into S2 or S3; - * save it to disk if going into S4. - * Also, set the FWV if going into an STR state - */ - if (state == ACPI_S2 || state == ACPI_S3) { -#ifdef DONT_USE_UNTIL_LOWLEVEL_CODE_EXISTS - /* That && trick is *not going to work*. Read gcc - specs. That explicitely says: jumping from other - function is *not allowed*. */ - wakeup_address = acpi_save_state_mem((unsigned long)&&acpi_sleep_done); - - if (!wakeup_address) goto acpi_sleep_done; - - acpi_set_firmware_waking_vector( - (ACPI_PHYSICAL_ADDRESS)wakeup_address); -#endif - } else if (state == ACPI_S4) -#ifdef DONT_USE_UNTIL_LOWLEVEL_CODE_EXISTS - if (acpi_save_state_disk((unsigned long)&&acpi_sleep_done)) - goto acpi_sleep_done; -#endif - - /* set status, since acpi_enter_sleep_state won't return unless something - * goes wrong, or it's just S1. - */ - status = AE_OK; - - mdelay(10); - status = acpi_enter_sleep_state(state); - - acpi_sleep_done: - - /* pause for a bit to allow devices to come back on */ - mdelay(10); - - /* make sure that the firmware waking vector is reset */ - acpi_set_firmware_waking_vector((ACPI_PHYSICAL_ADDRESS)0); - - acpi_leave_sleep_state(state); - - /* TODO: resume devices and restore their state */ - - enable(); - return status; -} - - -/**************************************************************************** - * - * FUNCTION: sm_osl_power_down - * - ****************************************************************************/ - -void -sm_osl_power_down (void) -{ - /* Power down the system (S5 = soft off). */ - sm_osl_suspend(ACPI_S5); -} - - -/**************************************************************************** - * - * FUNCTION: sm_osl_add_device - * - ****************************************************************************/ - -acpi_status -sm_osl_add_device( - SM_CONTEXT *system) -{ - u32 i = 0; - struct proc_dir_entry *bm_proc_dsdt; - - if (!system) { - return(AE_BAD_PARAMETER); - } - - printk("ACPI: System firmware supports"); - for (i=0; istates[i]) { - printk(" S%d", i); - } - } - printk("\n"); - - if (system->states[ACPI_STATE_S5]) { - sm_pm_power_off = pm_power_off; - pm_power_off = sm_osl_power_down; - } - - create_proc_read_entry(SM_PROC_INFO, S_IRUGO, - sm_proc_root, sm_osl_proc_read_info, (void*)system); - - bm_proc_sleep = create_proc_read_entry("sleep", S_IFREG | S_IRUGO | S_IWUSR, - sm_proc_root, sm_osl_proc_read_sleep, (void*)system); - if (bm_proc_sleep) - bm_proc_sleep->write_proc = sm_osl_proc_write_sleep; - - bm_proc_alarm = create_proc_read_entry("alarm", S_IFREG | S_IRUGO | S_IWUSR, - sm_proc_root,sm_osl_proc_read_alarm, NULL); - if (bm_proc_alarm) - bm_proc_alarm->write_proc = sm_osl_proc_write_alarm; - - bm_proc_gpe = create_proc_read_entry("gpe", S_IFREG | S_IRUGO | S_IWUSR, - sm_proc_root,sm_osl_proc_read_gpe,NULL); - if (bm_proc_gpe) - bm_proc_gpe->write_proc = sm_osl_proc_write_gpe; - - /* - * Get a wakeup address for use when we come back from sleep. - * At least on IA-32, this needs to be in low memory. - * When sleep is supported on other arch's, then we may want - * to move this out to another place, but GFP_LOW should suffice - * for now. - */ -#if 0 - if (system->states[ACPI_S3] || system->states[ACPI_S4]) { - acpi_wakeup_address = (unsigned long)virt_to_phys(get_free_page(GFP_LOWMEM)); - printk(KERN_INFO "ACPI: Have wakeup address 0x%8.8x\n",acpi_wakeup_address); - } -#endif - - /* - * This returns more than a page, so we need to use our own file ops, - * not proc's generic ones - */ - bm_proc_dsdt = create_proc_entry(SM_PROC_DSDT, S_IRUSR, sm_proc_root); - if (bm_proc_dsdt) { - bm_proc_dsdt->proc_fops = &proc_dsdt_operations; - } - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: sm_osl_remove_device - * - ****************************************************************************/ - -acpi_status -sm_osl_remove_device ( - SM_CONTEXT *system) -{ - if (!system) { - return(AE_BAD_PARAMETER); - } - - remove_proc_entry(SM_PROC_INFO, sm_proc_root); - remove_proc_entry(SM_PROC_DSDT, sm_proc_root); - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: sm_osl_generate_event - * - ****************************************************************************/ - -acpi_status -sm_osl_generate_event ( - u32 event, - SM_CONTEXT *system) -{ - acpi_status status = AE_OK; - - if (!system) { - return(AE_BAD_PARAMETER); - } - - switch (event) { - - default: - return(AE_BAD_PARAMETER); - break; - } - - return(status); -} - - -/**************************************************************************** - * - * FUNCTION: sm_osl_init - * - * PARAMETERS: - * - * RETURN: 0: Success - * - * DESCRIPTION: Module initialization. - * - ****************************************************************************/ - -static int __init -sm_osl_init (void) -{ - acpi_status status = AE_OK; - - /* abort if no busmgr */ - if (!bm_proc_root) - return -ENODEV; - - sm_proc_root = bm_proc_root; - if (!sm_proc_root) { - status = AE_ERROR; - } - else { - status = sm_initialize(); - } - - return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; -} - - -/**************************************************************************** - * - * FUNCTION: sm_osl_cleanup - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Module cleanup. - * - ****************************************************************************/ - -static void __exit -sm_osl_cleanup (void) -{ - sm_terminate(); - - return; -} - - -module_init(sm_osl_init); -module_exit(sm_osl_cleanup); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/thermal/Makefile linux.21rc5-ac2/drivers/acpi/ospm/thermal/Makefile --- linux.21rc5/drivers/acpi/ospm/thermal/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/thermal/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -O_TARGET := ospm_$(notdir $(CURDIR)).o -obj-m := $(O_TARGET) -EXTRA_CFLAGS += $(ACPI_CFLAGS) -obj-y := $(patsubst %.c,%.o,$(wildcard *.c)) - -include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/thermal/tz.c linux.21rc5-ac2/drivers/acpi/ospm/thermal/tz.c --- linux.21rc5/drivers/acpi/ospm/thermal/tz.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/thermal/tz.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,642 +0,0 @@ -/***************************************************************************** - * - * Module Name: tz.c - * $Revision: 44 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include "tz.h" - - -#define _COMPONENT ACPI_THERMAL - MODULE_NAME ("tz") - - -/**************************************************************************** - * Globals - ****************************************************************************/ - -extern int TZP; - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: tz_print - * - ****************************************************************************/ - -void -tz_print ( - TZ_CONTEXT *tz) -{ -#ifdef ACPI_DEBUG - acpi_buffer buffer; - u32 i,j = 0; - TZ_THRESHOLDS *thresholds = NULL; - - FUNCTION_TRACE("tz_print"); - - if (!tz) - return; - - thresholds = &(tz->policy.thresholds); - - buffer.length = 256; - buffer.pointer = acpi_os_callocate(buffer.length); - if (!buffer.pointer) - return; - - /* - * Get the full pathname for this ACPI object. - */ - acpi_get_name(tz->acpi_handle, ACPI_FULL_PATHNAME, &buffer); - - /* - * Print out basic thermal zone information. - */ - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| Thermal_zone[%02x]:[%p] %s\n", tz->device_handle, tz->acpi_handle, (char*)buffer.pointer)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| temperature[%d] state[%08x]\n", tz->policy.temperature, tz->policy.state)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| cooling_mode[%08x] polling_freq[%d]\n", tz->policy.cooling_mode, tz->policy.polling_freq)); - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| critical[%d]\n", thresholds->critical.temperature)); - if (thresholds->hot.is_valid) - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| hot[%d]\n", thresholds->hot.temperature)); - if (thresholds->passive.is_valid) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| passive[%d]: tc1[%d] tc2[%d] tsp[%d]\n", thresholds->passive.temperature, thresholds->passive.tc1, thresholds->passive.tc2, thresholds->passive.tsp)); - if (thresholds->passive.devices.count > 0) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| devices")); - for (j=0; (jpassive.devices.count && j<10); j++) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "[%02x]", thresholds->passive.devices.handles[j])); - } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "\n")); - } - } - for (i=0; iactive[i].is_valid) - break; - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| active[%d]: index[%d]\n", thresholds->active[i].temperature, i)); - if (thresholds->active[i].devices.count > 0) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "| devices")); - for (j=0; (jactive[i].devices.count && j<10); j++) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "[%02x]", thresholds->active[i].devices.handles[j])); - } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "\n")); - } - } - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO, "+------------------------------------------------------------\n")); - - acpi_os_free(buffer.pointer); -#endif /*ACPI_DEBUG*/ - - return; -} - - -/**************************************************************************** - * - * FUNCTION: tz_get_temperaturee - * - ****************************************************************************/ - -acpi_status -tz_get_temperature ( - TZ_CONTEXT *tz) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("tz_get_temperature"); - - if (!tz) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Evaluate the _TMP method to get the current temperature. - */ - status = bm_evaluate_simple_integer(tz->acpi_handle, "_TMP", &(tz->policy.temperature)); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %d d_k\n", tz->policy.temperature)); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: tz_set_cooling_preference - * - ****************************************************************************/ - -acpi_status -tz_set_cooling_preference ( - TZ_CONTEXT *tz, - TZ_COOLING_MODE cooling_mode) -{ - acpi_status status = AE_OK; - acpi_object_list arg_list; - acpi_object arg0; - - FUNCTION_TRACE("tz_set_cooling_preference"); - - if (!tz || ((cooling_mode != TZ_COOLING_MODE_ACTIVE) && (cooling_mode != TZ_COOLING_MODE_PASSIVE))) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Build the argument list, which simply consists of the current - * cooling preference. - */ - memset(&arg_list, 0, sizeof(acpi_object)); - arg_list.count = 1; - arg_list.pointer = &arg0; - - memset(&arg0, 0, sizeof(acpi_object)); - arg0.type = ACPI_TYPE_INTEGER; - arg0.integer.value = cooling_mode; - - /* - * Evaluate "_SCP" - setting the new cooling preference. - */ - status = acpi_evaluate_object(tz->acpi_handle, "_SCP", &arg_list, NULL); - if (ACPI_FAILURE(status)) { - tz->policy.cooling_mode = -1; - return_ACPI_STATUS(status); - } - - tz->policy.cooling_mode = cooling_mode; - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: tz_get_thresholds - * - ****************************************************************************/ - -acpi_status -tz_get_thresholds ( - TZ_CONTEXT *tz) -{ - acpi_status status = AE_OK; - TZ_THRESHOLDS *thresholds = NULL; - u32 value = 0; - u32 i = 0; - - FUNCTION_TRACE("acpi_tz_get_thresholds"); - - if (!tz) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - thresholds = &(tz->policy.thresholds); - - /* Critical Shutdown (required) */ - - status = bm_evaluate_simple_integer(tz->acpi_handle, "_CRT", &value); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "No critical threshold\n")); - return_ACPI_STATUS(status); - } - else { - thresholds->critical.temperature = value; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found critical threshold [%d]\n", thresholds->critical.temperature)); - - } - - /* Critical Sleep (optional) */ - - status = bm_evaluate_simple_integer(tz->acpi_handle, "_HOT", &value); - if (ACPI_FAILURE(status)) { - thresholds->hot.is_valid = 0; - thresholds->hot.temperature = 0; - } - else { - thresholds->hot.is_valid = 1; - thresholds->hot.temperature = value; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found hot threshold [%d]\n", thresholds->hot.temperature)); - } - - /* Passive: Processors (optional) */ - - status = bm_evaluate_simple_integer(tz->acpi_handle, "_PSV", &value); - if (ACPI_FAILURE(status)) { - thresholds->passive.is_valid = 0; - thresholds->passive.temperature = 0; - } - else { - thresholds->passive.is_valid = 1; - thresholds->passive.temperature = value; - - status = bm_evaluate_simple_integer(tz->acpi_handle, "_TC1", &value); - if (ACPI_FAILURE(status)) { - thresholds->passive.is_valid = 0; - } - thresholds->passive.tc1 = value; - - status = bm_evaluate_simple_integer(tz->acpi_handle, "_TC2", &value); - if (ACPI_FAILURE(status)) { - thresholds->passive.is_valid = 0; - } - thresholds->passive.tc2 = value; - - status = bm_evaluate_simple_integer(tz->acpi_handle, "_TSP", &value); - if (ACPI_FAILURE(status)) { - thresholds->passive.is_valid = 0; - } - thresholds->passive.tsp = value; - - status = bm_evaluate_reference_list(tz->acpi_handle, "_PSL", &(thresholds->passive.devices)); - if (ACPI_FAILURE(status)) { - thresholds->passive.is_valid = 0; - } - - if (thresholds->passive.is_valid) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found passive threshold [%d]\n", thresholds->passive.temperature)); - } - else { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid passive threshold\n")); - } - } - - /* Active: Fans, etc. (optional) */ - - for (i=0; iacpi_handle, name, &value); - if (ACPI_FAILURE(status)) { - thresholds->active[i].is_valid = 0; - thresholds->active[i].temperature = 0; - break; - } - - thresholds->active[i].temperature = value; - name[2] = 'L'; - - status = bm_evaluate_reference_list(tz->acpi_handle, name, &(thresholds->active[i].devices)); - if (ACPI_SUCCESS(status)) { - thresholds->active[i].is_valid = 1; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found active threshold [%d]:[%d]\n", i, thresholds->active[i].temperature)); - } - else { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid active threshold [%d]\n", i)); - } - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: tz_add_device - * - ****************************************************************************/ - -acpi_status -tz_add_device ( - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - TZ_CONTEXT *tz = NULL; - BM_DEVICE *device = NULL; - acpi_handle tmp_handle = NULL; - static u32 zone_count = 0; - - FUNCTION_TRACE("tz_add_device"); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Adding thermal zone [%02x].\n", device_handle)); - - if (!context || *context) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Invalid context for device [%02x].\n", device_handle)); - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - /* - * Get information on this device. - */ - status = bm_get_device_info(device_handle, &device); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Allocate a new Thermal Zone device. - */ - tz = acpi_os_callocate(sizeof(TZ_CONTEXT)); - if (!tz) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - tz->device_handle = device->handle; - tz->acpi_handle = device->acpi_handle; - - /* TBD: How to manage 'uid' when zones are Pn_p? */ - sprintf(tz->uid, "%d", zone_count++); - - /* - * Temperature: - * ------------ - * Make sure we can read the zone's current temperature (_TMP). - * If we can't, there's no use in doing any policy (abort). - */ - status = tz_get_temperature(tz); - if (ACPI_FAILURE(status)) - goto end; - - /* - * Polling Frequency: - * ------------------ - * If _TZP doesn't exist use the OS default polling frequency. - */ - status = bm_evaluate_simple_integer(tz->acpi_handle, "_TZP", &(tz->policy.polling_freq)); - if (ACPI_FAILURE(status)) { - tz->policy.polling_freq = TZP; - } - status = AE_OK; - - /* - * Cooling Preference: - * ------------------- - * Default to ACTIVE (noisy) cooling until policy decides otherwise. - * Note that _SCP is optional. - */ - tz_set_cooling_preference(tz, TZ_COOLING_MODE_ACTIVE); - - /* - * Start Policy: - * ------------- - * Thermal policy is included in the kernel (this driver) because - * of the critical role it plays in avoiding nuclear meltdown. =O - */ - status = tz_policy_add_device(tz); - if (ACPI_FAILURE(status)) - goto end; - - status = tz_osl_add_device(tz); - if (ACPI_FAILURE(status)) - goto end; - - *context = tz; - - tz_print(tz); - -end: - if (ACPI_FAILURE(status)) - acpi_os_free(tz); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: tz_remove_device - * - ****************************************************************************/ - -acpi_status -tz_remove_device ( - void **context) -{ - acpi_status status = AE_OK; - TZ_CONTEXT *tz = NULL; - - FUNCTION_TRACE("tz_remove_device"); - - if (!context || !*context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - tz = (TZ_CONTEXT*)(*context); - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing thermal zone [%02x].\n", tz->device_handle)); - - status = tz_osl_remove_device(tz); - - /* - * Remove Policy: - * -------------- - * TBD: Move all thermal zone policy to user-mode daemon... - */ - status = tz_policy_remove_device(tz); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - acpi_os_free(tz); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * External Functions - ****************************************************************************/ - -/**************************************************************************** - * - * FUNCTION: tz_initialize - * - ****************************************************************************/ - -acpi_status -tz_initialize (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("tz_initialize"); - - memset(&criteria, 0, sizeof(BM_DEVICE_ID)); - memset(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Register driver for thermal zone devices. - */ - criteria.type = BM_TYPE_THERMAL_ZONE; - - driver.notify = &tz_notify; - driver.request = &tz_request; - - status = bm_register_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: tz_terminate - * - ****************************************************************************/ - -acpi_status -tz_terminate (void) -{ - acpi_status status = AE_OK; - BM_DEVICE_ID criteria; - BM_DRIVER driver; - - FUNCTION_TRACE("tz_terminate"); - - memset(&criteria, 0, sizeof(BM_DEVICE_ID)); - memset(&driver, 0, sizeof(BM_DRIVER)); - - /* - * Unregister driver for thermal zone devices. - */ - criteria.type = BM_TYPE_THERMAL_ZONE; - - driver.notify = &tz_notify; - driver.request = &tz_request; - - status = bm_unregister_driver(&criteria, &driver); - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: tz_notify - * - ****************************************************************************/ - -acpi_status -tz_notify ( - BM_NOTIFY notify_type, - BM_HANDLE device_handle, - void **context) -{ - acpi_status status = AE_OK; - TZ_CONTEXT *tz = NULL; - - FUNCTION_TRACE("tz_notify"); - - if (!context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - tz = (TZ_CONTEXT*)*context; - - switch (notify_type) { - - case BM_NOTIFY_DEVICE_ADDED: - status = tz_add_device(device_handle, context); - break; - - case BM_NOTIFY_DEVICE_REMOVED: - status = tz_remove_device(context); - break; - - case TZ_NOTIFY_TEMPERATURE_CHANGE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Temperature (_TMP) change event detected.\n")); - tz_policy_check(*context); - status = tz_get_temperature(tz); - if (ACPI_SUCCESS(status)) { - status = tz_osl_generate_event(notify_type, tz); - } - break; - - case TZ_NOTIFY_THRESHOLD_CHANGE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Threshold (_SCP) change event detected.\n")); - status = tz_policy_remove_device(tz); - if (ACPI_SUCCESS(status)) { - status = tz_policy_add_device(tz); - } - status = tz_osl_generate_event(notify_type, tz); - break; - - case TZ_NOTIFY_DEVICE_LISTS_CHANGE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Device lists (_ALx, _PSL, _TZD) change event detected.\n")); - status = tz_policy_remove_device(tz); - if (ACPI_SUCCESS(status)) { - status = tz_policy_add_device(tz); - } - status = tz_osl_generate_event(notify_type, tz); - break; - - default: - status = AE_SUPPORT; - break; - } - - return_ACPI_STATUS(status); -} - - -/**************************************************************************** - * - * FUNCTION: tz_request - * - ****************************************************************************/ - -acpi_status -tz_request ( - BM_REQUEST *request, - void *context) -{ - acpi_status status = AE_OK; - TZ_CONTEXT *tz = NULL; - - FUNCTION_TRACE("tz_request"); - - /* - * Must have a valid request structure and context. - */ - if (!request || !context) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - tz = (TZ_CONTEXT*)context; - - /* - * Handle request: - * --------------- - */ - switch (request->command) { - - default: - status = AE_SUPPORT; - break; - } - - request->status = status; - - return_ACPI_STATUS(status); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/thermal/tz_osl.c linux.21rc5-ac2/drivers/acpi/ospm/thermal/tz_osl.c --- linux.21rc5/drivers/acpi/ospm/thermal/tz_osl.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/thermal/tz_osl.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,398 +0,0 @@ -/****************************************************************************** - * - * Module Name: tz_osl.c - * $Revision: 25 $ - * - *****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include -#include -#include -#include -#include -#include -#include "tz.h" - - -MODULE_AUTHOR("Andrew Grover"); -MODULE_DESCRIPTION("ACPI Component Architecture (CA) - Thermal Zone Driver"); -MODULE_LICENSE("GPL"); - -int TZP = 0; -MODULE_PARM(TZP, "i"); -MODULE_PARM_DESC(TZP, "Thermal zone polling frequency, in 1/10 seconds.\n"); - - -#define TZ_PROC_ROOT "thermal" -#define TZ_PROC_STATUS "status" -#define TZ_PROC_INFO "info" - -extern struct proc_dir_entry *bm_proc_root; -static struct proc_dir_entry *tz_proc_root = NULL; - - -/**************************************************************************** - * - * FUNCTION: tz_osl_proc_read_info - * - ****************************************************************************/ - -static int -tz_osl_proc_read_info ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - acpi_status status = AE_OK; - char name[5]; - acpi_buffer buffer = {sizeof(name), &name}; - TZ_CONTEXT *tz = NULL; - TZ_THRESHOLDS *thresholds = NULL; - char *p = page; - int len = 0; - u32 i,j; - u32 t = 0; - - if (!context || (off != 0)) - goto end; - - tz = (TZ_CONTEXT*)context; - - thresholds = &(tz->policy.thresholds); - - p += sprintf(p, "critical (S5): trip=%d\n", thresholds->critical.temperature); - - if (thresholds->hot.is_valid) - p += sprintf(p, "critical (S4): trip=%d\n", thresholds->hot.temperature); - - if (thresholds->passive.is_valid) { - p += sprintf(p, "passive: trip=%d tc1=%d tc2=%d tsp=%d devices=", thresholds->passive.temperature, thresholds->passive.tc1, thresholds->passive.tc2, thresholds->passive.tsp); - for (j=0; jpassive.devices.count; j++) - p += sprintf(p, "%08x%c", thresholds->passive.devices.handles[j], (j==thresholds->passive.devices.count-1)?'\n':','); - } - - for (i=0; iactive[i].is_valid)) - break; - p += sprintf(p, "active[%d]: trip=%d devices=", i, thresholds->active[i].temperature); - for (j=0; jactive[i].devices.count; j++) - p += sprintf(p, "%08x%c", thresholds->active[i].devices.handles[j], (j==thresholds->passive.devices.count-1)?'\n':','); - } - - p += sprintf(p, "cooling mode: "); - switch (tz->policy.cooling_mode) { - case TZ_COOLING_MODE_ACTIVE: - p += sprintf(p, "active (noisy)\n"); - break; - case TZ_COOLING_MODE_PASSIVE: - p += sprintf(p, "passive (quiet)\n"); - break; - default: - p += sprintf(p, "unknown\n"); - break; - } - - p += sprintf(p, "polling: "); - switch (tz->policy.polling_freq) { - case 0: - p += sprintf(p, "disabled\n"); - break; - default: - p += sprintf(p, "%d dS\n", tz->policy.polling_freq); - break; - } - -end: - len = (p - page); - if (len <= off+count) *eof = 1; - *start = page + off; - len -= off; - if (len>count) len = count; - if (len<0) len = 0; - - return len; -} - - -/**************************************************************************** - * - * FUNCTION: tz_osl_proc_write_info - * - ****************************************************************************/ - -static int tz_osl_proc_write_info ( - struct file *file, - const char *buffer, - unsigned long count, - void *data) -{ - TZ_CONTEXT *tz = NULL; - u32 state = 0; - u32 size = 0; - - if (!buffer || (count==0) || !data) { - goto end; - } - - tz = (TZ_CONTEXT*)data; - - size = strlen(buffer); - if (size < 4) - goto end; - - /* Cooling preference: "scp=0" (active) or "scp=1" (passive) */ - if (0 == strncmp(buffer, "scp=", 4)) { - tz_set_cooling_preference(tz, (buffer[4] - '0')); - } - - /* Polling frequency: "tzp=X" (poll every X [0-9] seconds) */ - else if (0 == strncmp(buffer, "tzp=", 4)) { - tz->policy.polling_freq = (buffer[4] - '0') * 10; - tz_policy_check(tz); - } - -end: - return count; -} - - -/**************************************************************************** - * - * FUNCTION: tz_osl_proc_read_status - * - ****************************************************************************/ - -static int -tz_osl_proc_read_status ( - char *page, - char **start, - off_t off, - int count, - int *eof, - void *context) -{ - TZ_CONTEXT *tz = NULL; - char *p = page; - int len = 0; - - if (!context || (off != 0)) { - goto end; - } - - tz = (TZ_CONTEXT*)context; - - /* Temperature */ - - tz_get_temperature(tz); - - p += sprintf(p, "temperature: %d dK\n", tz->policy.temperature); - - p += sprintf(p, "state: "); - if (tz->policy.state == 0) - p += sprintf(p, "ok\n"); - else if (tz->policy.state & TZ_STATE_CRITICAL) - p += sprintf(p, "critical\n"); - else if (tz->policy.state & TZ_STATE_HOT) - p += sprintf(p, "hot\n"); - else { - if (tz->policy.state & TZ_STATE_ACTIVE) - p += sprintf(p, "active[%d] ", tz->policy.state & 0x07); - if (tz->policy.state & TZ_STATE_PASSIVE) - p += sprintf(p, "passive "); - p += sprintf(p, "\n"); - } - -end: - len = (p - page); - if (len <= off+count) *eof = 1; - *start = page + off; - len -= off; - if (len>count) len = count; - if (len<0) len = 0; - - return(len); -} - - -/**************************************************************************** - * - * FUNCTION: tz_osl_add_device - * - ****************************************************************************/ - -acpi_status -tz_osl_add_device( - TZ_CONTEXT *tz) -{ - struct proc_dir_entry *proc_entry = NULL; - struct proc_dir_entry *proc_child_entry = NULL; - - if (!tz) { - return(AE_BAD_PARAMETER); - } - - printk("ACPI: Thermal Zone found\n"); - - proc_entry = proc_mkdir(tz->uid, tz_proc_root); - if (!proc_entry) - return(AE_ERROR); - - proc_child_entry = create_proc_read_entry(TZ_PROC_STATUS, S_IFREG | S_IRUGO, proc_entry, tz_osl_proc_read_status, (void*)tz); - if (!proc_child_entry) - return(AE_ERROR); - - proc_child_entry = create_proc_entry(TZ_PROC_INFO, S_IFREG | 0644, proc_entry); - if (!proc_child_entry) - return(AE_ERROR); - - proc_child_entry->read_proc = tz_osl_proc_read_info; - proc_child_entry->write_proc = tz_osl_proc_write_info; - proc_child_entry->data = (void*)tz; - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: tz_osl_remove_device - * - ****************************************************************************/ - -acpi_status -tz_osl_remove_device ( - TZ_CONTEXT *tz) -{ - char proc_entry[64]; - - if (!tz) { - return(AE_BAD_PARAMETER); - } - - sprintf(proc_entry, "%s/%s", tz->uid, TZ_PROC_INFO); - remove_proc_entry(proc_entry, tz_proc_root); - - sprintf(proc_entry, "%s/%s", tz->uid, TZ_PROC_STATUS); - remove_proc_entry(proc_entry, tz_proc_root); - - sprintf(proc_entry, "%s", tz->uid); - remove_proc_entry(proc_entry, tz_proc_root); - - return(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: tz_osl_generate_event - * - ****************************************************************************/ - -acpi_status -tz_osl_generate_event ( - u32 event, - TZ_CONTEXT *tz) -{ - acpi_status status = AE_OK; - - if (!tz) { - return(AE_BAD_PARAMETER); - } - - switch (event) { - - case TZ_NOTIFY_TEMPERATURE_CHANGE: - status = bm_osl_generate_event(tz->device_handle, - TZ_PROC_ROOT, tz->uid, event, - tz->policy.temperature); - break; - - case TZ_NOTIFY_THRESHOLD_CHANGE: - case TZ_NOTIFY_DEVICE_LISTS_CHANGE: - status = bm_osl_generate_event(tz->device_handle, - TZ_PROC_ROOT, tz->uid, event, 0); - break; - - default: - return(AE_BAD_PARAMETER); - break; - } - - return(status); -} - - -/**************************************************************************** - * - * FUNCTION: tz_osl_init - * - ****************************************************************************/ - -static int __init -tz_osl_init (void) -{ - acpi_status status = AE_OK; - - /* abort if no busmgr */ - if (!bm_proc_root) - return -ENODEV; - - tz_proc_root = proc_mkdir(TZ_PROC_ROOT, bm_proc_root); - if (!tz_proc_root) { - status = AE_ERROR; - } - else { - status = tz_initialize(); - if (ACPI_FAILURE(status)) { - remove_proc_entry(TZ_PROC_ROOT, bm_proc_root); - } - - } - - return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; -} - - -/**************************************************************************** - * - * FUNCTION: tz_osl_cleanup - * - ****************************************************************************/ - -static void __exit -tz_osl_cleanup (void) -{ - tz_terminate(); - - if (tz_proc_root) { - remove_proc_entry(TZ_PROC_ROOT, bm_proc_root); - } - - return; -} - - -module_init(tz_osl_init); -module_exit(tz_osl_cleanup); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/ospm/thermal/tzpolicy.c linux.21rc5-ac2/drivers/acpi/ospm/thermal/tzpolicy.c --- linux.21rc5/drivers/acpi/ospm/thermal/tzpolicy.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/ospm/thermal/tzpolicy.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,578 +0,0 @@ -/**************************************************************************** - * - * Module Name: tzpolicy.c - - * $Revision: 30 $ - * - ****************************************************************************/ - -/* - * Copyright (C) 2000, 2001 Andrew Grover - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -/* - * TBD: 1. Support performance-limit control for non-processor devices - * (those listed in _TZD, e.g. graphics). - */ - -#include -#include -#include - -#include -#include -#include "tz.h" - - -#define _COMPONENT ACPI_THERMAL - MODULE_NAME ("tzpolicy") - - -/**************************************************************************** - * Globals - ****************************************************************************/ - -void -tz_policy_run ( - unsigned long data); - - -/**************************************************************************** - * Internal Functions - ****************************************************************************/ - -acpi_status -set_performance_limit ( - BM_HANDLE device_handle, - u32 flag) -{ - acpi_status status; - BM_REQUEST request; - - request.status = AE_OK; - request.handle = device_handle; - request.command = PR_COMMAND_SET_PERF_LIMIT; - request.buffer.length = sizeof(u32); - request.buffer.pointer = &flag; - - status = bm_request(&request); - - if (ACPI_FAILURE(status)) - return status; - else - return request.status; -} - - -/**************************************************************************** - * - * FUNCTION: tz_policy_critical - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -tz_policy_critical( - TZ_CONTEXT *tz) -{ - FUNCTION_TRACE("tz_policy_critical"); - - if (!tz) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (tz->policy.temperature >= tz->policy.thresholds.critical.temperature) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Critical (S5) threshold reached.\n")); - /* TBD: Need method for shutting down system. */ - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: tz_policy_hot - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -tz_policy_hot( - TZ_CONTEXT *tz) -{ - FUNCTION_TRACE("tz_policy_hot"); - - if (!tz || !tz->policy.thresholds.hot.is_valid) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (tz->policy.temperature >= tz->policy.thresholds.hot.temperature) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Critical (S4) threshold reached.\n")); - /* TBD: Need method for invoking OS-level critical suspend. */ - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: tz_policy_passive - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -tz_policy_passive( - TZ_CONTEXT *tz) -{ - TZ_PASSIVE_THRESHOLD *passive = NULL; - static u32 last_temperature = 0; - s32 trend = 0; - u32 i = 0; - - FUNCTION_TRACE("tz_policy_passive"); - - if (!tz || !tz->policy.thresholds.passive.is_valid) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - passive = &(tz->policy.thresholds.passive); - - if (tz->policy.temperature >= passive->temperature) { - /* - * Thermal trend? - * -------------- - * Using the passive cooling equation (see the ACPI - * Specification), calculate the current thermal trend - * (a.k.a. performance delta). - */ - trend = passive->tc1 * (tz->policy.temperature - last_temperature) + passive->tc2 * (tz->policy.temperature - passive->temperature); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "trend[%d] = TC1[%d]*(temp[%d]-last[%d]) + TC2[%d]*(temp[%d]-passive[%d])\n", trend, passive->tc1, tz->policy.temperature, last_temperature, passive->tc2, tz->policy.temperature, passive->temperature)); - - last_temperature = tz->policy.temperature; - - /* - * Heating Up? - * ----------- - * Decrease thermal performance limit on all passive - * cooling devices (processors). - */ - if (trend > 0) { - for (i=0; idevices.count; i++) - set_performance_limit(passive->devices.handles[i], PR_PERF_DEC); - } - /* - * Cooling Off? - * ------------ - * Increase thermal performance limit on all passive - * cooling devices (processors). - */ - else if (trend < 0) { - for (i=0; idevices.count; i++) - set_performance_limit(passive->devices.handles[i], PR_PERF_INC); - } - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: tz_policy_active - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -tz_policy_active( - TZ_CONTEXT *tz) -{ - acpi_status status = AE_OK; - TZ_ACTIVE_THRESHOLD *active = NULL; - u32 i,j = 0; - - FUNCTION_TRACE("tz_policy_active"); - - if (!tz || !tz->policy.thresholds.active[0].is_valid) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - for (i=0; ipolicy.thresholds.active[i]); - if (!active || !active->is_valid) - break; - - /* - * Above Threshold? - * ---------------- - * If not already enabled, turn ON all cooling devices - * associated with this active threshold. - */ - if ((tz->policy.temperature >= active->temperature) && (active->cooling_state != TZ_COOLING_ENABLED)) { - for (j = 0; j < active->devices.count; j++) { - status = bm_set_device_power_state(active->devices.handles[j], ACPI_STATE_D0); - if (ACPI_SUCCESS(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Cooling device [%02x] now ON.\n", active->devices.handles[j])); - } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Unable to turn ON cooling device [%02x].\n", active->devices.handles[j])); - } - } - active->cooling_state = TZ_COOLING_ENABLED; - } - /* - * Below Threshold? - * ---------------- - * Turn OFF all cooling devices associated with this - * threshold. Note that by checking "if not disabled" we - * turn off all cooling devices for thresholds in the - * TZ_COOLING_STATE_UNKNOWN state, useful as a level-set - * during the first pass. - */ - else if (active->cooling_state != TZ_COOLING_DISABLED) { - for (j = 0; j < active->devices.count; j++) { - status = bm_set_device_power_state(active->devices.handles[j], ACPI_STATE_D3); - if (ACPI_SUCCESS(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Cooling device [%02x] now OFF.\n", active->devices.handles[j])); - } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Unable to turn OFF cooling device [%02x].\n", active->devices.handles[j])); - } - } - active->cooling_state = TZ_COOLING_DISABLED; - } - } - - return_ACPI_STATUS(AE_OK); -} - - -/**************************************************************************** - * - * FUNCTION: tz_policy_check - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: Note that this function will get called whenever: - * 1. A thermal event occurs. - * 2. The polling/sampling time period expires. - * - ****************************************************************************/ - -void -tz_policy_check ( - void *context) -{ - acpi_status status = AE_OK; - TZ_CONTEXT *tz = NULL; - TZ_POLICY *policy = NULL; - TZ_THRESHOLDS *thresholds = NULL; - u32 previous_temperature = 0; - u32 previous_state = 0; - u32 active_index = 0; - u32 i = 0; - u32 sleep_time = 0; - - FUNCTION_TRACE("tz_policy_check"); - - if (!context) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); - return_VOID; - } - - tz = (TZ_CONTEXT*)context; - policy = &(tz->policy); - thresholds = &(tz->policy.thresholds); - - /* - * Preserve Previous State: - * ------------------------ - */ - previous_temperature = tz->policy.temperature; - previous_state = tz->policy.state; - - /* - * Get Temperature: - * ---------------- - */ - status = tz_get_temperature(tz); - if (ACPI_FAILURE(status)) { - return_VOID; - } - - /* - * Calculate State: - * ---------------- - */ - policy->state = TZ_STATE_OK; - - /* Critical? */ - if (policy->temperature >= thresholds->critical.temperature) - policy->state |= TZ_STATE_CRITICAL; - - /* Hot? */ - if ((thresholds->hot.is_valid) && (policy->temperature >= thresholds->hot.temperature)) - policy->state |= TZ_STATE_CRITICAL; - - /* Passive? */ - if ((thresholds->passive.is_valid) && (policy->temperature >= thresholds->passive.temperature)) - policy->state |= TZ_STATE_PASSIVE; - - /* Active? */ - if (thresholds->active[0].is_valid) { - for (i=0; iactive[i].is_valid) && (policy->temperature >= thresholds->active[i].temperature)) { - policy->state |= TZ_STATE_ACTIVE; - if (i > active_index) - active_index = i; - } - } - policy->state |= active_index; - } - - /* - * Invoke Policy: - * -------------- - * Note that policy must be invoked both when 'going into' a - * policy state (e.g. to allow fans to be turned on) and 'going - * out of' a policy state (e.g. to allow fans to be turned off); - * thus we must preserve the previous state. - */ - if (policy->state & TZ_STATE_CRITICAL) - tz_policy_critical(tz); - if (policy->state & TZ_STATE_HOT) - tz_policy_hot(tz); - if ((policy->state & TZ_STATE_PASSIVE) || (previous_state & TZ_STATE_PASSIVE)) - tz_policy_passive(tz); - if ((policy->state & TZ_STATE_ACTIVE) || (previous_state & TZ_STATE_ACTIVE)) - tz_policy_active(tz); - - /* - * Calculate Sleep Time: - * --------------------- - * If we're in the passive state, use _TSP's value. Otherwise - * use _TZP or the OS's default polling frequency. If no polling - * frequency is specified then we'll wait forever (that is, until - * a thermal event occurs -- e.g. never poll). Note that _TSP - * and _TZD values are given in 1/10th seconds. - */ - if (policy->state & TZ_STATE_PASSIVE) - sleep_time = thresholds->passive.tsp * 100; - else if (policy->polling_freq > 0) - sleep_time = policy->polling_freq * 100; - else - sleep_time = WAIT_FOREVER; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Thermal_zone[%02x]: temperature[%d] state[%08x]\n", tz->device_handle, policy->temperature, policy->state)); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Scheduling next poll in [%d]ms.\n", sleep_time)); - - /* - * Schedule Next Poll: - * ------------------- - */ - if (sleep_time < WAIT_FOREVER) { - if (timer_pending(&(policy->timer))) - mod_timer(&(policy->timer), (HZ*sleep_time)/1000); - else { - policy->timer.data = (unsigned long)tz; - policy->timer.function = tz_policy_run; - policy->timer.expires = jiffies + (HZ*sleep_time)/1000; - add_timer(&(policy->timer)); - } - } - else { - if (timer_pending(&(policy->timer))) - del_timer(&(policy->timer)); - } - - return_VOID; -} - - -/**************************************************************************** - * - * FUNCTION: tz_policy_run - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - - -void -tz_policy_run ( - unsigned long data) -{ - acpi_status status = AE_OK; - - FUNCTION_TRACE("tz_policy_run"); - - if (!data) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); - return_VOID; - } - - /* - * Defer to Non-Interrupt Level: - * ----------------------------- - * Note that all Linux kernel timers run at interrupt-level (ack!). - */ - status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, tz_policy_check, (void*)data); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Error invoking thermal policy.\n")); - } - - return_VOID; -} - - -/**************************************************************************** - * - * FUNCTION: tz_policy_add_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -tz_policy_add_device ( - TZ_CONTEXT *tz) -{ - acpi_status status = AE_OK; - TZ_THRESHOLDS *thresholds = NULL; - u32 i,j = 0; - - FUNCTION_TRACE("tz_policy_add_device"); - - if (!tz) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Adding policy for thermal zone [%02x].\n", tz->device_handle)); - - /* - * Get Thresholds: - * --------------- - */ - status = tz_get_thresholds(tz); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Initialize Policies: - * -------------------- - */ - if (tz->policy.thresholds.passive.is_valid) { - for (i=0; ipolicy.thresholds.passive.devices.count; i++) - set_performance_limit(tz->policy.thresholds.passive.devices.handles[i], PR_PERF_MAX); - tz_policy_passive(tz); - } - if (tz->policy.thresholds.active[0].is_valid) - tz_policy_active(tz); - - /* - * Initialize Policy Timer: - * ------------------------ - */ - init_timer(&(tz->policy.timer)); - - /* - * Start Policy: - * ------------- - * Run an initial check using this zone's policy. - */ - tz_policy_check(tz); - - return_ACPI_STATUS(AE_OK); -} - - -/***************************************************************************** - * - * FUNCTION: tz_policy_remove_device - * - * PARAMETERS: - * - * RETURN: - * - * DESCRIPTION: - * - ****************************************************************************/ - -acpi_status -tz_policy_remove_device( - TZ_CONTEXT *tz) -{ - u32 i = 0; - - FUNCTION_TRACE("tz_remove_device"); - - if (!tz) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing policy for thermal zone [%02x].\n", tz->device_handle)); - - /* - * Delete the thermal zone policy timer entry, if exists. - */ - if (timer_pending(&(tz->policy.timer))) - del_timer(&(tz->policy.timer)); - - /* - * Reset thermal performance limit on all processors back to max. - */ - if (tz->policy.thresholds.passive.is_valid) { - for (i=0; ipolicy.thresholds.passive.devices.count; i++) - set_performance_limit(tz->policy.thresholds.passive.devices.handles[i], PR_PERF_MAX); - } - - return_ACPI_STATUS(AE_OK); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/parser/Makefile linux.21rc5-ac2/drivers/acpi/parser/Makefile --- linux.21rc5/drivers/acpi/parser/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/parser/Makefile 2003-04-28 17:59:26.000000000 +0100 @@ -1,11 +1,10 @@ # # Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory # O_TARGET := $(notdir $(CURDIR)).o -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) +obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c)) EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/parser/psargs.c linux.21rc5-ac2/drivers/acpi/parser/psargs.c --- linux.21rc5/drivers/acpi/parser/psargs.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/parser/psargs.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,43 +1,61 @@ /****************************************************************************** * * Module Name: psargs - Parse AML opcode arguments - * $Revision: 52 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" -#include "acnamesp.h" +#include +#include +#include +#include #define _COMPONENT ACPI_PARSER - MODULE_NAME ("psargs") + ACPI_MODULE_NAME ("psargs") /******************************************************************************* * - * FUNCTION: Acpi_ps_get_next_package_length + * FUNCTION: acpi_ps_get_next_package_length * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * * RETURN: Decoded package length. On completion, the AML pointer points * past the length byte or bytes. @@ -48,16 +66,16 @@ u32 acpi_ps_get_next_package_length ( - acpi_parse_state *parser_state) + struct acpi_parse_state *parser_state) { - u32 encoded_length; - u32 length = 0; + u32 encoded_length; + u32 length = 0; - FUNCTION_TRACE ("Ps_get_next_package_length"); + ACPI_FUNCTION_TRACE ("ps_get_next_package_length"); - encoded_length = (u32) GET8 (parser_state->aml); + encoded_length = (u32) ACPI_GET8 (parser_state->aml); parser_state->aml++; @@ -70,7 +88,7 @@ case 1: /* 2-byte encoding (next byte + bits 0-3) */ - length = ((GET8 (parser_state->aml) << 04) | + length = ((ACPI_GET8 (parser_state->aml) << 04) | (encoded_length & 0x0F)); parser_state->aml++; break; @@ -78,8 +96,8 @@ case 2: /* 3-byte encoding (next 2 bytes + bits 0-3) */ - length = ((GET8 (parser_state->aml + 1) << 12) | - (GET8 (parser_state->aml) << 04) | + length = ((ACPI_GET8 (parser_state->aml + 1) << 12) | + (ACPI_GET8 (parser_state->aml) << 04) | (encoded_length & 0x0F)); parser_state->aml += 2; break; @@ -87,12 +105,17 @@ case 3: /* 4-byte encoding (next 3 bytes + bits 0-3) */ - length = ((GET8 (parser_state->aml + 2) << 20) | - (GET8 (parser_state->aml + 1) << 12) | - (GET8 (parser_state->aml) << 04) | + length = ((ACPI_GET8 (parser_state->aml + 2) << 20) | + (ACPI_GET8 (parser_state->aml + 1) << 12) | + (ACPI_GET8 (parser_state->aml) << 04) | (encoded_length & 0x0F)); parser_state->aml += 3; break; + + default: + + /* Can't get here, only 2 bits / 4 cases */ + break; } return_VALUE (length); @@ -101,9 +124,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_get_next_package_end + * FUNCTION: acpi_ps_get_next_package_end * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * * RETURN: Pointer to end-of-package +1 * @@ -114,16 +137,18 @@ u8 * acpi_ps_get_next_package_end ( - acpi_parse_state *parser_state) + struct acpi_parse_state *parser_state) { - u8 *start = parser_state->aml; - NATIVE_UINT length; + u8 *start = parser_state->aml; + acpi_native_uint length; - FUNCTION_TRACE ("Ps_get_next_package_end"); + ACPI_FUNCTION_TRACE ("ps_get_next_package_end"); - length = (NATIVE_UINT) acpi_ps_get_next_package_length (parser_state); + /* Function below changes parser_state->Aml */ + + length = (acpi_native_uint) acpi_ps_get_next_package_length (parser_state); return_PTR (start + length); /* end of package */ } @@ -131,9 +156,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_get_next_namestring + * FUNCTION: acpi_ps_get_next_namestring * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * * RETURN: Pointer to the start of the name string (pointer points into * the AML. @@ -144,32 +169,31 @@ * ******************************************************************************/ -NATIVE_CHAR * +char * acpi_ps_get_next_namestring ( - acpi_parse_state *parser_state) + struct acpi_parse_state *parser_state) { - u8 *start = parser_state->aml; - u8 *end = parser_state->aml; - u32 length; + u8 *start = parser_state->aml; + u8 *end = parser_state->aml; - FUNCTION_TRACE ("Ps_get_next_namestring"); + ACPI_FUNCTION_TRACE ("ps_get_next_namestring"); /* Handle multiple prefix characters */ - while (acpi_ps_is_prefix_char (GET8 (end))) { - /* include prefix '\\' or '^' */ + while (acpi_ps_is_prefix_char (ACPI_GET8 (end))) { + /* Include prefix '\\' or '^' */ end++; } /* Decode the path */ - switch (GET8 (end)) { + switch (ACPI_GET8 (end)) { case 0: - /* Null_name */ + /* null_name */ if (end == start) { start = NULL; @@ -177,183 +201,77 @@ end++; break; - case AML_DUAL_NAME_PREFIX: - /* two name segments */ + /* Two name segments */ - end += 9; + end += 1 + (2 * ACPI_NAME_SIZE); break; - case AML_MULTI_NAME_PREFIX_OP: - /* multiple name segments */ + /* Multiple name segments, 4 chars each */ - length = (u32) GET8 (end + 1) * 4; - end += 2 + length; + end += 2 + ((acpi_size) ACPI_GET8 (end + 1) * ACPI_NAME_SIZE); break; - default: - /* single name segment */ - /* assert (Acpi_ps_is_lead (GET8 (End))); */ + /* Single name segment */ - end += 4; + end += ACPI_NAME_SIZE; break; } parser_state->aml = (u8*) end; - - return_PTR ((NATIVE_CHAR *) start); + return_PTR ((char *) start); } /******************************************************************************* * - * FUNCTION: Acpi_ps_get_next_namepath + * FUNCTION: acpi_ps_get_next_namepath * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * Arg - Where the namepath will be stored - * Arg_count - If the namepath points to a control method + * arg_count - If the namepath points to a control method * the method's argument is returned here. - * Method_call - Whether the namepath can be the start - * of a method call + * method_call - Whether the namepath can possibly be the + * start of a method call * - * RETURN: None + * RETURN: Status * - * DESCRIPTION: Get next name (if method call, push appropriate # args). Names - * are looked up in either the parsed or internal namespace to - * determine if the name represents a control method. If a method + * DESCRIPTION: Get next name (if method call, return # of required args). + * Names are looked up in the internal namespace to determine + * if the name represents a control method. If a method * is found, the number of arguments to the method is returned. * This information is critical for parsing to continue correctly. * ******************************************************************************/ - -#ifdef PARSER_ONLY - -void +acpi_status acpi_ps_get_next_namepath ( - acpi_parse_state *parser_state, - acpi_parse_object *arg, - u32 *arg_count, - u8 method_call) + struct acpi_walk_state *walk_state, + struct acpi_parse_state *parser_state, + union acpi_parse_object *arg, + u8 method_call) { - NATIVE_CHAR *path; - acpi_parse_object *name_op; - acpi_parse_object *op; - acpi_parse_object *count; + char *path; + union acpi_parse_object *name_op; + acpi_status status = AE_OK; + union acpi_operand_object *method_desc; + struct acpi_namespace_node *node; + union acpi_generic_state scope_info; - FUNCTION_TRACE ("Ps_get_next_namepath"); + ACPI_FUNCTION_TRACE ("ps_get_next_namepath"); path = acpi_ps_get_next_namestring (parser_state); - if (!path || !method_call) { - /* Null name case, create a null namepath object */ - - acpi_ps_init_op (arg, AML_INT_NAMEPATH_OP); - arg->value.name = path; - return_VOID; - } + /* Null path case is allowed */ - if (acpi_gbl_parsed_namespace_root) { - /* - * Lookup the name in the parsed namespace - */ - op = NULL; - if (method_call) { - op = acpi_ps_find (acpi_ps_get_parent_scope (parser_state), - path, AML_METHOD_OP, 0); - } - - if (op) { - if (op->opcode == AML_METHOD_OP) { - /* - * The name refers to a control method, so this namepath is a - * method invocation. We need to 1) Get the number of arguments - * associated with this method, and 2) Change the NAMEPATH - * object into a METHODCALL object. - */ - count = acpi_ps_get_arg (op, 0); - if (count && count->opcode == AML_BYTE_OP) { - name_op = acpi_ps_alloc_op (AML_INT_NAMEPATH_OP); - if (name_op) { - /* Change arg into a METHOD CALL and attach the name */ - - acpi_ps_init_op (arg, AML_INT_METHODCALL_OP); - - name_op->value.name = path; - - /* Point METHODCALL/NAME to the METHOD Node */ - - name_op->node = (acpi_namespace_node *) op; - acpi_ps_append_arg (arg, name_op); - - *arg_count = (u32) count->value.integer & - METHOD_FLAGS_ARG_COUNT; - } - } - - return_VOID; - } - - /* - * Else this is normal named object reference. - * Just init the NAMEPATH object with the pathname. - * (See code below) - */ - } - } - - /* - * Either we didn't find the object in the namespace, or the object is - * something other than a control method. Just initialize the Op with the - * pathname - */ - acpi_ps_init_op (arg, AML_INT_NAMEPATH_OP); - arg->value.name = path; - - - return_VOID; -} - - -#else - - -void -acpi_ps_get_next_namepath ( - acpi_parse_state *parser_state, - acpi_parse_object *arg, - u32 *arg_count, - u8 method_call) -{ - NATIVE_CHAR *path; - acpi_parse_object *name_op; - acpi_status status; - acpi_namespace_node *method_node = NULL; - acpi_namespace_node *node; - acpi_generic_state scope_info; - - - FUNCTION_TRACE ("Ps_get_next_namepath"); - - - path = acpi_ps_get_next_namestring (parser_state); - if (!path || !method_call) { - /* Null name case, create a null namepath object */ - - acpi_ps_init_op (arg, AML_INT_NAMEPATH_OP); - arg->value.name = path; - return_VOID; - } - - - if (method_call) { + if (path) { /* * Lookup the name in the internal namespace */ @@ -369,36 +287,48 @@ * parent tree, but don't open a new scope -- we just want to lookup the * object (MUST BE mode EXECUTE to perform upsearch) */ - status = acpi_ns_lookup (&scope_info, path, ACPI_TYPE_ANY, IMODE_EXECUTE, - NS_SEARCH_PARENT | NS_DONT_OPEN_SCOPE, NULL, - &node); - if (ACPI_SUCCESS (status)) { + status = acpi_ns_lookup (&scope_info, path, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, + ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, NULL, &node); + if (ACPI_SUCCESS (status) && method_call) { if (node->type == ACPI_TYPE_METHOD) { - method_node = node; - ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "method - %p Path=%p\n", - method_node, path)); + /* + * This name is actually a control method invocation + */ + method_desc = acpi_ns_get_attached_object (node); + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, + "Control Method - %p Desc %p Path=%p\n", + node, method_desc, path)); name_op = acpi_ps_alloc_op (AML_INT_NAMEPATH_OP); - if (name_op) { - /* Change arg into a METHOD CALL and attach name to it */ - - acpi_ps_init_op (arg, AML_INT_METHODCALL_OP); + if (!name_op) { + return_ACPI_STATUS (AE_NO_MEMORY); + } - name_op->value.name = path; + /* Change arg into a METHOD CALL and attach name to it */ - /* Point METHODCALL/NAME to the METHOD Node */ + acpi_ps_init_op (arg, AML_INT_METHODCALL_OP); + name_op->common.value.name = path; - name_op->node = method_node; - acpi_ps_append_arg (arg, name_op); + /* Point METHODCALL/NAME to the METHOD Node */ - if (!method_node->object) { - return_VOID; - } + name_op->common.node = node; + acpi_ps_append_arg (arg, name_op); - *arg_count = (method_node->object)->method.param_count; + if (!method_desc) { + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, + "Control Method - %p has no attached object\n", + node)); + return_ACPI_STATUS (AE_AML_INTERNAL); } - return_VOID; + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, + "Control Method - %p Args %X\n", + node, method_desc->method.param_count)); + + /* Get the number of arguments to expect */ + + walk_state->arg_count = method_desc->method.param_count; + return_ACPI_STATUS (AE_OK); } /* @@ -407,28 +337,48 @@ * (See code below) */ } + + if (ACPI_FAILURE (status)) { + /* + * 1) Any error other than NOT_FOUND is always severe + * 2) NOT_FOUND is only important if we are executing a method. + * 3) If executing a cond_ref_of opcode, NOT_FOUND is ok. + */ + if ((((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) == ACPI_PARSE_EXECUTE) && + (status == AE_NOT_FOUND) && + (walk_state->op->common.aml_opcode != AML_COND_REF_OF_OP)) || + + (status != AE_NOT_FOUND)) { + ACPI_REPORT_NSERROR (path, status); + } + else { + /* + * We got a NOT_FOUND during table load or we encountered + * a cond_ref_of(x) where the target does not exist. + * -- either case is ok + */ + status = AE_OK; + } + } } /* - * Either we didn't find the object in the namespace, or the object is - * something other than a control method. Just initialize the Op with the - * pathname. + * Regardless of success/failure above, + * Just initialize the Op with the pathname. */ acpi_ps_init_op (arg, AML_INT_NAMEPATH_OP); - arg->value.name = path; - + arg->common.value.name = path; - return_VOID; + return_ACPI_STATUS (status); } -#endif /******************************************************************************* * - * FUNCTION: Acpi_ps_get_next_simple_arg + * FUNCTION: acpi_ps_get_next_simple_arg * - * PARAMETERS: Parser_state - Current parser state object - * Arg_type - The argument type (AML_*_ARG) + * PARAMETERS: parser_state - Current parser state object + * arg_type - The argument type (AML_*_ARG) * Arg - Where the argument is returned * * RETURN: None @@ -439,20 +389,19 @@ void acpi_ps_get_next_simple_arg ( - acpi_parse_state *parser_state, - u32 arg_type, - acpi_parse_object *arg) + struct acpi_parse_state *parser_state, + u32 arg_type, + union acpi_parse_object *arg) { - FUNCTION_TRACE_U32 ("Ps_get_next_simple_arg", arg_type); + ACPI_FUNCTION_TRACE_U32 ("ps_get_next_simple_arg", arg_type); switch (arg_type) { - case ARGP_BYTEDATA: acpi_ps_init_op (arg, AML_BYTE_OP); - arg->value.integer = (u32) GET8 (parser_state->aml); + arg->common.value.integer = (u32) ACPI_GET8 (parser_state->aml); parser_state->aml++; break; @@ -463,7 +412,7 @@ /* Get 2 bytes from the AML stream */ - MOVE_UNALIGNED16_TO_32 (&arg->value.integer, parser_state->aml); + ACPI_MOVE_16_TO_32 (&arg->common.value.integer, parser_state->aml); parser_state->aml += 2; break; @@ -474,7 +423,7 @@ /* Get 4 bytes from the AML stream */ - MOVE_UNALIGNED32_TO_32 (&arg->value.integer, parser_state->aml); + ACPI_MOVE_32_TO_32 (&arg->common.value.integer, parser_state->aml); parser_state->aml += 4; break; @@ -485,7 +434,7 @@ /* Get 8 bytes from the AML stream */ - MOVE_UNALIGNED64_TO_64 (&arg->value.integer, parser_state->aml); + ACPI_MOVE_64_TO_64 (&arg->common.value.integer, parser_state->aml); parser_state->aml += 8; break; @@ -493,9 +442,9 @@ case ARGP_CHARLIST: acpi_ps_init_op (arg, AML_STRING_OP); - arg->value.string = (char*) parser_state->aml; + arg->common.value.string = (char *) parser_state->aml; - while (GET8 (parser_state->aml) != '\0') { + while (ACPI_GET8 (parser_state->aml) != '\0') { parser_state->aml++; } parser_state->aml++; @@ -506,7 +455,13 @@ case ARGP_NAMESTRING: acpi_ps_init_op (arg, AML_INT_NAMEPATH_OP); - arg->value.name = acpi_ps_get_next_namestring (parser_state); + arg->common.value.name = acpi_ps_get_next_namestring (parser_state); + break; + + + default: + + ACPI_REPORT_ERROR (("Invalid arg_type %X\n", arg_type)); break; } @@ -516,47 +471,44 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_get_next_field + * FUNCTION: acpi_ps_get_next_field * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * * RETURN: A newly allocated FIELD op * - * DESCRIPTION: Get next field (Named_field, Reserved_field, or Access_field) + * DESCRIPTION: Get next field (named_field, reserved_field, or access_field) * ******************************************************************************/ -acpi_parse_object * +union acpi_parse_object * acpi_ps_get_next_field ( - acpi_parse_state *parser_state) + struct acpi_parse_state *parser_state) { - u32 aml_offset = parser_state->aml - - parser_state->aml_start; - acpi_parse_object *field; - u16 opcode; - u32 name; + u32 aml_offset = ACPI_PTR_DIFF (parser_state->aml, + parser_state->aml_start); + union acpi_parse_object *field; + u16 opcode; + u32 name; - FUNCTION_TRACE ("Ps_get_next_field"); + ACPI_FUNCTION_TRACE ("ps_get_next_field"); /* determine field type */ - switch (GET8 (parser_state->aml)) { - + switch (ACPI_GET8 (parser_state->aml)) { default: opcode = AML_INT_NAMEDFIELD_OP; break; - case 0x00: opcode = AML_INT_RESERVEDFIELD_OP; parser_state->aml++; break; - case 0x01: opcode = AML_INT_ACCESSFIELD_OP; @@ -568,43 +520,53 @@ /* Allocate a new field op */ field = acpi_ps_alloc_op (opcode); - if (field) { - field->aml_offset = aml_offset; + if (!field) { + return_PTR (NULL); + } - /* Decode the field type */ + field->common.aml_offset = aml_offset; - switch (opcode) { - case AML_INT_NAMEDFIELD_OP: + /* Decode the field type */ - /* Get the 4-character name */ + switch (opcode) { + case AML_INT_NAMEDFIELD_OP: - MOVE_UNALIGNED32_TO_32 (&name, parser_state->aml); - acpi_ps_set_name (field, name); - parser_state->aml += 4; + /* Get the 4-character name */ - /* Get the length which is encoded as a package length */ + ACPI_MOVE_32_TO_32 (&name, parser_state->aml); + acpi_ps_set_name (field, name); + parser_state->aml += ACPI_NAME_SIZE; - field->value.size = acpi_ps_get_next_package_length (parser_state); - break; + /* Get the length which is encoded as a package length */ + field->common.value.size = acpi_ps_get_next_package_length (parser_state); + break; - case AML_INT_RESERVEDFIELD_OP: - /* Get the length which is encoded as a package length */ + case AML_INT_RESERVEDFIELD_OP: - field->value.size = acpi_ps_get_next_package_length (parser_state); - break; + /* Get the length which is encoded as a package length */ + field->common.value.size = acpi_ps_get_next_package_length (parser_state); + break; - case AML_INT_ACCESSFIELD_OP: - /* Get Access_type and Access_atrib and merge into the field Op */ + case AML_INT_ACCESSFIELD_OP: - field->value.integer = ((GET8 (parser_state->aml) << 8) | - GET8 (parser_state->aml)); - parser_state->aml += 2; - break; - } + /* + * Get access_type and access_attrib and merge into the field Op + * access_type is first operand, access_attribute is second + */ + field->common.value.integer = (ACPI_GET8 (parser_state->aml) << 8); + parser_state->aml++; + field->common.value.integer |= ACPI_GET8 (parser_state->aml); + parser_state->aml++; + break; + + default: + + /* Opcode was set in previous switch */ + break; } return_PTR (field); @@ -613,33 +575,35 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_get_next_arg + * FUNCTION: acpi_ps_get_next_arg * - * PARAMETERS: Parser_state - Current parser state object - * Arg_type - The argument type (AML_*_ARG) - * Arg_count - If the argument points to a control method + * PARAMETERS: parser_state - Current parser state object + * arg_type - The argument type (AML_*_ARG) + * arg_count - If the argument points to a control method * the method's argument is returned here. * - * RETURN: An op object containing the next argument. + * RETURN: Status, and an op object containing the next argument. * * DESCRIPTION: Get next argument (including complex list arguments that require * pushing the parser stack) * ******************************************************************************/ -acpi_parse_object * +acpi_status acpi_ps_get_next_arg ( - acpi_parse_state *parser_state, - u32 arg_type, - u32 *arg_count) + struct acpi_walk_state *walk_state, + struct acpi_parse_state *parser_state, + u32 arg_type, + union acpi_parse_object **return_arg) { - acpi_parse_object *arg = NULL; - acpi_parse_object *prev = NULL; - acpi_parse_object *field; - u32 subop; + union acpi_parse_object *arg = NULL; + union acpi_parse_object *prev = NULL; + union acpi_parse_object *field; + u32 subop; + acpi_status status = AE_OK; - FUNCTION_TRACE_PTR ("Ps_get_next_arg", parser_state); + ACPI_FUNCTION_TRACE_PTR ("ps_get_next_arg", parser_state); switch (arg_type) { @@ -653,15 +617,16 @@ /* constants, strings, and namestrings are all the same size */ arg = acpi_ps_alloc_op (AML_BYTE_OP); - if (arg) { - acpi_ps_get_next_simple_arg (parser_state, arg_type, arg); + if (!arg) { + return_ACPI_STATUS (AE_NO_MEMORY); } + acpi_ps_get_next_simple_arg (parser_state, arg_type, arg); break; case ARGP_PKGLENGTH: - /* package length, nothing returned */ + /* Package length, nothing returned */ parser_state->pkg_end = acpi_ps_get_next_package_end (parser_state); break; @@ -670,18 +635,17 @@ case ARGP_FIELDLIST: if (parser_state->aml < parser_state->pkg_end) { - /* non-empty list */ + /* Non-empty list */ while (parser_state->aml < parser_state->pkg_end) { field = acpi_ps_get_next_field (parser_state); if (!field) { - break; + return_ACPI_STATUS (AE_NO_MEMORY); } if (prev) { - prev->next = field; + prev->common.next = field; } - else { arg = field; } @@ -689,7 +653,7 @@ prev = field; } - /* skip to End of byte data */ + /* Skip to End of byte data */ parser_state->aml = parser_state->pkg_end; } @@ -699,17 +663,20 @@ case ARGP_BYTELIST: if (parser_state->aml < parser_state->pkg_end) { - /* non-empty list */ + /* Non-empty list */ arg = acpi_ps_alloc_op (AML_INT_BYTELIST_OP); - if (arg) { - /* fill in bytelist data */ - - arg->value.size = (parser_state->pkg_end - parser_state->aml); - ((acpi_parse2_object *) arg)->data = parser_state->aml; + if (!arg) { + return_ACPI_STATUS (AE_NO_MEMORY); } - /* skip to End of byte data */ + /* Fill in bytelist data */ + + arg->common.value.size = ACPI_PTR_DIFF (parser_state->pkg_end, + parser_state->aml); + arg->named.data = parser_state->aml; + + /* Skip to End of byte data */ parser_state->aml = parser_state->pkg_end; } @@ -717,24 +684,26 @@ case ARGP_TARGET: - case ARGP_SUPERNAME: { - subop = acpi_ps_peek_opcode (parser_state); - if (subop == 0 || - acpi_ps_is_leading_char (subop) || - acpi_ps_is_prefix_char (subop)) { - /* Null_name or Name_string */ - - arg = acpi_ps_alloc_op (AML_INT_NAMEPATH_OP); - if (arg) { - acpi_ps_get_next_namepath (parser_state, arg, arg_count, 0); - } + case ARGP_SUPERNAME: + case ARGP_SIMPLENAME: + + subop = acpi_ps_peek_opcode (parser_state); + if (subop == 0 || + acpi_ps_is_leading_char (subop) || + acpi_ps_is_prefix_char (subop)) { + /* null_name or name_string */ + + arg = acpi_ps_alloc_op (AML_INT_NAMEPATH_OP); + if (!arg) { + return_ACPI_STATUS (AE_NO_MEMORY); } - else { - /* single complex argument, nothing returned */ + status = acpi_ps_get_next_namepath (walk_state, parser_state, arg, 0); + } + else { + /* single complex argument, nothing returned */ - *arg_count = 1; - } + walk_state->arg_count = 1; } break; @@ -744,7 +713,7 @@ /* single complex argument, nothing returned */ - *arg_count = 1; + walk_state->arg_count = 1; break; @@ -755,10 +724,18 @@ if (parser_state->aml < parser_state->pkg_end) { /* non-empty list of variable arguments, nothing returned */ - *arg_count = ACPI_VAR_ARGS; + walk_state->arg_count = ACPI_VAR_ARGS; } break; + + + default: + + ACPI_REPORT_ERROR (("Invalid arg_type: %X\n", arg_type)); + status = AE_AML_OPERAND_TYPE; + break; } - return_PTR (arg); + *return_arg = arg; + return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/parser/psopcode.c linux.21rc5-ac2/drivers/acpi/parser/psopcode.c --- linux.21rc5/drivers/acpi/parser/psopcode.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/parser/psopcode.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,36 +1,54 @@ /****************************************************************************** * * Module Name: psopcode - Parser/Interpreter opcode information table - * $Revision: 49 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" +#include +#include +#include #define _COMPONENT ACPI_PARSER - MODULE_NAME ("psopcode") + ACPI_MODULE_NAME ("psopcode") #define _UNK 0x6B @@ -45,14 +63,14 @@ #define _UNKNOWN_OPCODE 0x02 /* An example unknown opcode */ #define MAX_EXTENDED_OPCODE 0x88 -#define NUM_EXTENDED_OPCODE MAX_EXTENDED_OPCODE + 1 +#define NUM_EXTENDED_OPCODE (MAX_EXTENDED_OPCODE + 1) #define MAX_INTERNAL_OPCODE -#define NUM_INTERNAL_OPCODE MAX_INTERNAL_OPCODE + 1 +#define NUM_INTERNAL_OPCODE (MAX_INTERNAL_OPCODE + 1) /******************************************************************************* * - * NAME: Acpi_gbl_Aml_op_info + * NAME: acpi_gbl_aml_op_info * * DESCRIPTION: Opcode table. Each entry contains * The name is a simple ascii string, the operand specifier is an @@ -184,7 +202,7 @@ #define ARGP_TO_STRING_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_TYPE_OP ARGP_LIST1 (ARGP_SUPERNAME) #define ARGP_UNLOAD_OP ARGP_LIST1 (ARGP_SUPERNAME) -#define ARGP_VAR_PACKAGE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_BYTEDATA, ARGP_DATAOBJLIST) +#define ARGP_VAR_PACKAGE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_DATAOBJLIST) #define ARGP_WAIT_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_TERMARG) #define ARGP_WHILE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_TERMLIST) #define ARGP_WORD_OP ARGP_LIST1 (ARGP_WORDDATA) @@ -195,7 +213,7 @@ * All AML opcodes and the runtime arguments for each. Used by the AML interpreter Each list is compressed * into a 32-bit number and stored in the master opcode table at the end of this file. * - * (Used by Prep_operands procedure and the ASL Compiler) + * (Used by prep_operands procedure and the ASL Compiler) */ @@ -219,7 +237,7 @@ #define ARGI_BIT_XOR_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) #define ARGI_BREAK_OP ARG_NONE #define ARGI_BREAK_POINT_OP ARG_NONE -#define ARGI_BUFFER_OP ARGI_INVALID_OPCODE +#define ARGI_BUFFER_OP ARGI_LIST1 (ARGI_INTEGER) #define ARGI_BYTE_OP ARGI_INVALID_OPCODE #define ARGI_BYTELIST_OP ARGI_INVALID_OPCODE #define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF) @@ -236,7 +254,7 @@ #define ARGI_DATA_REGION_OP ARGI_LIST3 (ARGI_STRING, ARGI_STRING, ARGI_STRING) #define ARGI_DEBUG_OP ARG_NONE #define ARGI_DECREMENT_OP ARGI_LIST1 (ARGI_INTEGER_REF) -#define ARGI_DEREF_OF_OP ARGI_LIST1 (ARGI_REFERENCE) +#define ARGI_DEREF_OF_OP ARGI_LIST1 (ARGI_REF_OR_STRING) #define ARGI_DEVICE_OP ARGI_INVALID_OPCODE #define ARGI_DIVIDE_OP ARGI_LIST4 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF, ARGI_TARGETREF) #define ARGI_DWORD_OP ARGI_INVALID_OPCODE @@ -259,8 +277,8 @@ #define ARGI_LLESSEQUAL_OP ARGI_INVALID_OPCODE #define ARGI_LNOT_OP ARGI_LIST1 (ARGI_INTEGER) #define ARGI_LNOTEQUAL_OP ARGI_INVALID_OPCODE -#define ARGI_LOAD_OP ARGI_LIST2 (ARGI_REGION, ARGI_TARGETREF) -#define ARGI_LOAD_TABLE_OP ARGI_LIST6 (ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_TARGETREF) +#define ARGI_LOAD_OP ARGI_LIST2 (ARGI_REGION_OR_FIELD,ARGI_TARGETREF) +#define ARGI_LOAD_TABLE_OP ARGI_LIST6 (ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_ANYTYPE) #define ARGI_LOCAL0 ARG_NONE #define ARGI_LOCAL1 ARG_NONE #define ARGI_LOCAL2 ARG_NONE @@ -273,7 +291,7 @@ #define ARGI_MATCH_OP ARGI_LIST6 (ARGI_PACKAGE, ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER) #define ARGI_METHOD_OP ARGI_INVALID_OPCODE #define ARGI_METHODCALL_OP ARGI_INVALID_OPCODE -#define ARGI_MID_OP ARGI_LIST4 (ARGI_BUFFERSTRING,ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) +#define ARGI_MID_OP ARGI_LIST4 (ARGI_BUFFER_OR_STRING,ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) #define ARGI_MOD_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) #define ARGI_MULTIPLY_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) #define ARGI_MUTEX_OP ARGI_INVALID_OPCODE @@ -284,7 +302,7 @@ #define ARGI_NOTIFY_OP ARGI_LIST2 (ARGI_DEVICE_REF, ARGI_INTEGER) #define ARGI_ONE_OP ARG_NONE #define ARGI_ONES_OP ARG_NONE -#define ARGI_PACKAGE_OP ARGI_INVALID_OPCODE +#define ARGI_PACKAGE_OP ARGI_LIST1 (ARGI_INTEGER) #define ARGI_POWER_RES_OP ARGI_INVALID_OPCODE #define ARGI_PROCESSOR_OP ARGI_INVALID_OPCODE #define ARGI_QWORD_OP ARGI_INVALID_OPCODE @@ -315,7 +333,7 @@ #define ARGI_TO_STRING_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_FIXED_TARGET) #define ARGI_TYPE_OP ARGI_LIST1 (ARGI_ANYTYPE) #define ARGI_UNLOAD_OP ARGI_LIST1 (ARGI_DDBHANDLE) -#define ARGI_VAR_PACKAGE_OP ARGI_INVALID_OPCODE +#define ARGI_VAR_PACKAGE_OP ARGI_LIST1 (ARGI_INTEGER) #define ARGI_WAIT_OP ARGI_LIST2 (ARGI_EVENT, ARGI_INTEGER) #define ARGI_WHILE_OP ARGI_INVALID_OPCODE #define ARGI_WORD_OP ARGI_INVALID_OPCODE @@ -328,7 +346,7 @@ /****************************************************************************** - Opcodes that have associated namespace objects + Opcodes that have associated namespace objects (AML_NSOBJECT flag) AML_SCOPE_OP AML_DEVICE_OP @@ -354,7 +372,7 @@ AML_INT_METHODCALL_OP AML_INT_NAMEPATH_OP - Opcodes that are "namespace" opcodes + Opcodes that are "namespace" opcodes (AML_NSOPCODE flag) AML_SCOPE_OP AML_DEVICE_OP @@ -372,7 +390,7 @@ AML_REGION_OP AML_INT_NAMEDFIELD_OP - Opcodes that have an associated namespace node + Opcodes that have an associated namespace node (AML_NSNODE flag) AML_SCOPE_OP AML_DEVICE_OP @@ -395,7 +413,7 @@ AML_INT_METHODCALL_OP AML_INT_NAMEPATH_OP - Opcodes that define named ACPI objects + Opcodes that define named ACPI objects (AML_NAMED flag) AML_SCOPE_OP AML_DEVICE_OP @@ -410,8 +428,8 @@ AML_REGION_OP AML_INT_NAMEDFIELD_OP - Opcodes that contain executable AML as part of the definition that - must be deferred until needed + Opcodes that contain executable AML as part of the definition that + must be deferred until needed AML_METHOD_OP AML_VAR_PACKAGE_OP @@ -422,6 +440,7 @@ AML_CREATE_DWORD_FIELD_OP AML_CREATE_QWORD_FIELD_OP AML_REGION_OP + AML_BUFFER_OP Field opcodes @@ -447,146 +466,149 @@ */ -static const acpi_opcode_info aml_op_info[] = +const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { -/* Index Name Parser Args Interpreter Args Class Type Flags */ +/*! [Begin] no source code translation */ +/* Index Name Parser Args Interpreter Args ObjectType Class Type Flags */ -/* 00 */ ACPI_OP ("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, 0), -/* 01 */ ACPI_OP ("One", ARGP_ONE_OP, ARGI_ONE_OP, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, 0), -/* 02 */ ACPI_OP ("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), -/* 03 */ ACPI_OP ("Name", ARGP_NAME_OP, ARGI_NAME_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), -/* 04 */ ACPI_OP ("Byte_const", ARGP_BYTE_OP, ARGI_BYTE_OP, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, 0), -/* 05 */ ACPI_OP ("Word_const", ARGP_WORD_OP, ARGI_WORD_OP, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, 0), -/* 06 */ ACPI_OP ("Dword_const", ARGP_DWORD_OP, ARGI_DWORD_OP, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, 0), -/* 07 */ ACPI_OP ("String", ARGP_STRING_OP, ARGI_STRING_OP, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, 0), -/* 08 */ ACPI_OP ("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), -/* 09 */ ACPI_OP ("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP, AML_CLASS_ARGUMENT, AML_TYPE_DATA_TERM, AML_HAS_ARGS), -/* 0A */ ACPI_OP ("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP, AML_CLASS_ARGUMENT, AML_TYPE_DATA_TERM, AML_HAS_ARGS), -/* 0B */ ACPI_OP ("Method", ARGP_METHOD_OP, ARGI_METHOD_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED | AML_DEFER), -/* 0C */ ACPI_OP ("Local0", ARGP_LOCAL0, ARGI_LOCAL0, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), -/* 0D */ ACPI_OP ("Local1", ARGP_LOCAL1, ARGI_LOCAL1, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), -/* 0E */ ACPI_OP ("Local2", ARGP_LOCAL2, ARGI_LOCAL2, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), -/* 0F */ ACPI_OP ("Local3", ARGP_LOCAL3, ARGI_LOCAL3, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), -/* 10 */ ACPI_OP ("Local4", ARGP_LOCAL4, ARGI_LOCAL4, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), -/* 11 */ ACPI_OP ("Local5", ARGP_LOCAL5, ARGI_LOCAL5, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), -/* 12 */ ACPI_OP ("Local6", ARGP_LOCAL6, ARGI_LOCAL6, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), -/* 13 */ ACPI_OP ("Local7", ARGP_LOCAL7, ARGI_LOCAL7, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), -/* 14 */ ACPI_OP ("Arg0", ARGP_ARG0, ARGI_ARG0, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), -/* 15 */ ACPI_OP ("Arg1", ARGP_ARG1, ARGI_ARG1, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), -/* 16 */ ACPI_OP ("Arg2", ARGP_ARG2, ARGI_ARG2, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), -/* 17 */ ACPI_OP ("Arg3", ARGP_ARG3, ARGI_ARG3, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), -/* 18 */ ACPI_OP ("Arg4", ARGP_ARG4, ARGI_ARG4, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), -/* 19 */ ACPI_OP ("Arg5", ARGP_ARG5, ARGI_ARG5, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), -/* 1_a */ ACPI_OP ("Arg6", ARGP_ARG6, ARGI_ARG6, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), -/* 1_b */ ACPI_OP ("Store", ARGP_STORE_OP, ARGI_STORE_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 1_c */ ACPI_OP ("Ref_of", ARGP_REF_OF_OP, ARGI_REF_OF_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), -/* 1_d */ ACPI_OP ("Add", ARGP_ADD_OP, ARGI_ADD_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 1_e */ ACPI_OP ("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R), -/* 1_f */ ACPI_OP ("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 20 */ ACPI_OP ("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), -/* 21 */ ACPI_OP ("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), -/* 22 */ ACPI_OP ("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 23 */ ACPI_OP ("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_2T_1R, AML_FLAGS_EXEC_2A_2T_1R), -/* 24 */ ACPI_OP ("Shift_left", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 25 */ ACPI_OP ("Shift_right", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 26 */ ACPI_OP ("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 27 */ ACPI_OP ("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 28 */ ACPI_OP ("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 29 */ ACPI_OP ("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 2_a */ ACPI_OP ("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH), -/* 2_b */ ACPI_OP ("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 2_c */ ACPI_OP ("Find_set_left_bit", ARGP_FIND_SET_LEFT_BIT_OP, ARGI_FIND_SET_LEFT_BIT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 2_d */ ACPI_OP ("Find_set_right_bit", ARGP_FIND_SET_RIGHT_BIT_OP,ARGI_FIND_SET_RIGHT_BIT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 2_e */ ACPI_OP ("Deref_of", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), -/* 2_f */ ACPI_OP ("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R), -/* 30 */ ACPI_OP ("Size_of", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), -/* 31 */ ACPI_OP ("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R), -/* 32 */ ACPI_OP ("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R), -/* 33 */ ACPI_OP ("Create_dWord_field", ARGP_CREATE_DWORD_FIELD_OP,ARGI_CREATE_DWORD_FIELD_OP, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), -/* 34 */ ACPI_OP ("Create_word_field", ARGP_CREATE_WORD_FIELD_OP, ARGI_CREATE_WORD_FIELD_OP, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), -/* 35 */ ACPI_OP ("Create_byte_field", ARGP_CREATE_BYTE_FIELD_OP, ARGI_CREATE_BYTE_FIELD_OP, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), -/* 36 */ ACPI_OP ("Create_bit_field", ARGP_CREATE_BIT_FIELD_OP, ARGI_CREATE_BIT_FIELD_OP, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), -/* 37 */ ACPI_OP ("Object_type", ARGP_TYPE_OP, ARGI_TYPE_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), -/* 38 */ ACPI_OP ("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL), -/* 39 */ ACPI_OP ("LOr", ARGP_LOR_OP, ARGI_LOR_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL), -/* 3_a */ ACPI_OP ("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), -/* 3_b */ ACPI_OP ("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL), -/* 3_c */ ACPI_OP ("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL), -/* 3_d */ ACPI_OP ("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL), -/* 3_e */ ACPI_OP ("If", ARGP_IF_OP, ARGI_IF_OP, AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), -/* 3_f */ ACPI_OP ("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), -/* 40 */ ACPI_OP ("While", ARGP_WHILE_OP, ARGI_WHILE_OP, AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), -/* 41 */ ACPI_OP ("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), -/* 42 */ ACPI_OP ("Return", ARGP_RETURN_OP, ARGI_RETURN_OP, AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), -/* 43 */ ACPI_OP ("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), -/* 44 */ ACPI_OP ("Break_point", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), -/* 45 */ ACPI_OP ("Ones", ARGP_ONES_OP, ARGI_ONES_OP, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, 0), +/* 00 */ ACPI_OP ("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), +/* 01 */ ACPI_OP ("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), +/* 02 */ ACPI_OP ("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP, ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), +/* 03 */ ACPI_OP ("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), +/* 04 */ ACPI_OP ("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP, ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, AML_CONSTANT), +/* 05 */ ACPI_OP ("WordConst", ARGP_WORD_OP, ARGI_WORD_OP, ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, AML_CONSTANT), +/* 06 */ ACPI_OP ("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP, ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, AML_CONSTANT), +/* 07 */ ACPI_OP ("String", ARGP_STRING_OP, ARGI_STRING_OP, ACPI_TYPE_STRING, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, AML_CONSTANT), +/* 08 */ ACPI_OP ("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP, ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), +/* 09 */ ACPI_OP ("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP, ACPI_TYPE_BUFFER, AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT, AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), +/* 0A */ ACPI_OP ("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP, ACPI_TYPE_PACKAGE, AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT, AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), +/* 0B */ ACPI_OP ("Method", ARGP_METHOD_OP, ARGI_METHOD_OP, ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED | AML_DEFER), +/* 0C */ ACPI_OP ("Local0", ARGP_LOCAL0, ARGI_LOCAL0, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), +/* 0D */ ACPI_OP ("Local1", ARGP_LOCAL1, ARGI_LOCAL1, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), +/* 0E */ ACPI_OP ("Local2", ARGP_LOCAL2, ARGI_LOCAL2, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), +/* 0F */ ACPI_OP ("Local3", ARGP_LOCAL3, ARGI_LOCAL3, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), +/* 10 */ ACPI_OP ("Local4", ARGP_LOCAL4, ARGI_LOCAL4, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), +/* 11 */ ACPI_OP ("Local5", ARGP_LOCAL5, ARGI_LOCAL5, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), +/* 12 */ ACPI_OP ("Local6", ARGP_LOCAL6, ARGI_LOCAL6, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), +/* 13 */ ACPI_OP ("Local7", ARGP_LOCAL7, ARGI_LOCAL7, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_LOCAL_VARIABLE, 0), +/* 14 */ ACPI_OP ("Arg0", ARGP_ARG0, ARGI_ARG0, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), +/* 15 */ ACPI_OP ("Arg1", ARGP_ARG1, ARGI_ARG1, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), +/* 16 */ ACPI_OP ("Arg2", ARGP_ARG2, ARGI_ARG2, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), +/* 17 */ ACPI_OP ("Arg3", ARGP_ARG3, ARGI_ARG3, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), +/* 18 */ ACPI_OP ("Arg4", ARGP_ARG4, ARGI_ARG4, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), +/* 19 */ ACPI_OP ("Arg5", ARGP_ARG5, ARGI_ARG5, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), +/* 1A */ ACPI_OP ("Arg6", ARGP_ARG6, ARGI_ARG6, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_METHOD_ARGUMENT, 0), +/* 1B */ ACPI_OP ("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), +/* 1C */ ACPI_OP ("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), +/* 1D */ ACPI_OP ("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 1E */ ACPI_OP ("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), +/* 1F */ ACPI_OP ("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 20 */ ACPI_OP ("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), +/* 21 */ ACPI_OP ("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), +/* 22 */ ACPI_OP ("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 23 */ ACPI_OP ("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_2T_1R, AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT), +/* 24 */ ACPI_OP ("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 25 */ ACPI_OP ("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 26 */ ACPI_OP ("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 27 */ ACPI_OP ("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 28 */ ACPI_OP ("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 29 */ ACPI_OP ("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 2A */ ACPI_OP ("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), +/* 2B */ ACPI_OP ("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), +/* 2C */ ACPI_OP ("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP, ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), +/* 2D */ ACPI_OP ("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP,ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), +/* 2E */ ACPI_OP ("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), +/* 2F */ ACPI_OP ("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R), +/* 30 */ ACPI_OP ("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), +/* 31 */ ACPI_OP ("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), +/* 32 */ ACPI_OP ("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT), +/* 33 */ ACPI_OP ("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP,ARGI_CREATE_DWORD_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), +/* 34 */ ACPI_OP ("CreateWordField", ARGP_CREATE_WORD_FIELD_OP, ARGI_CREATE_WORD_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), +/* 35 */ ACPI_OP ("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP, ARGI_CREATE_BYTE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), +/* 36 */ ACPI_OP ("CreateBitField", ARGP_CREATE_BIT_FIELD_OP, ARGI_CREATE_BIT_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), +/* 37 */ ACPI_OP ("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), +/* 38 */ ACPI_OP ("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), +/* 39 */ ACPI_OP ("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), +/* 3A */ ACPI_OP ("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), +/* 3B */ ACPI_OP ("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), +/* 3C */ ACPI_OP ("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), +/* 3D */ ACPI_OP ("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), +/* 3E */ ACPI_OP ("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), +/* 3F */ ACPI_OP ("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), +/* 40 */ ACPI_OP ("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), +/* 41 */ ACPI_OP ("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), +/* 42 */ ACPI_OP ("Return", ARGP_RETURN_OP, ARGI_RETURN_OP, ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), +/* 43 */ ACPI_OP ("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), +/* 44 */ ACPI_OP ("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP, ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), +/* 45 */ ACPI_OP ("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), /* Prefixed opcodes (Two-byte opcodes with a prefix op) */ -/* 46 */ ACPI_OP ("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), -/* 47 */ ACPI_OP ("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED ), -/* 48 */ ACPI_OP ("Cond_ref_of", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 49 */ ACPI_OP ("Create_field", ARGP_CREATE_FIELD_OP, ARGI_CREATE_FIELD_OP, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_FIELD | AML_CREATE), -/* 4_a */ ACPI_OP ("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R, AML_FLAGS_EXEC_1A_1T_0R), -/* 4_b */ ACPI_OP ("Stall", ARGP_STALL_OP, ARGI_STALL_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), -/* 4_c */ ACPI_OP ("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), -/* 4_d */ ACPI_OP ("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R), -/* 4_e */ ACPI_OP ("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), -/* 4_f */ ACPI_OP ("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R), -/* 50 */ ACPI_OP ("Reset", ARGP_RESET_OP, ARGI_RESET_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), -/* 51 */ ACPI_OP ("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), -/* 52 */ ACPI_OP ("From_bCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 53 */ ACPI_OP ("To_bCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 54 */ ACPI_OP ("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), -/* 55 */ ACPI_OP ("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, 0), -/* 56 */ ACPI_OP ("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, 0), -/* 57 */ ACPI_OP ("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R), -/* 58 */ ACPI_OP ("Op_region", ARGP_REGION_OP, ARGI_REGION_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED | AML_DEFER), -/* 59 */ ACPI_OP ("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), -/* 5_a */ ACPI_OP ("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), -/* 5_b */ ACPI_OP ("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), -/* 5_c */ ACPI_OP ("Power_resource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), -/* 5_d */ ACPI_OP ("Thermal_zone", ARGP_THERMAL_ZONE_OP, ARGI_THERMAL_ZONE_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), -/* 5_e */ ACPI_OP ("Index_field", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), -/* 5_f */ ACPI_OP ("Bank_field", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), +/* 46 */ ACPI_OP ("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), +/* 47 */ ACPI_OP ("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED ), +/* 48 */ ACPI_OP ("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), +/* 49 */ ACPI_OP ("CreateField", ARGP_CREATE_FIELD_OP, ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_FIELD | AML_CREATE), +/* 4A */ ACPI_OP ("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R, AML_FLAGS_EXEC_1A_1T_0R), +/* 4B */ ACPI_OP ("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), +/* 4C */ ACPI_OP ("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), +/* 4D */ ACPI_OP ("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R), +/* 4E */ ACPI_OP ("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), +/* 4F */ ACPI_OP ("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R), +/* 50 */ ACPI_OP ("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), +/* 51 */ ACPI_OP ("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), +/* 52 */ ACPI_OP ("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), +/* 53 */ ACPI_OP ("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), +/* 54 */ ACPI_OP ("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), +/* 55 */ ACPI_OP ("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP, ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, 0), +/* 56 */ ACPI_OP ("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, 0), +/* 57 */ ACPI_OP ("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R), +/* 58 */ ACPI_OP ("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP, ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED | AML_DEFER), +/* 59 */ ACPI_OP ("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), +/* 5A */ ACPI_OP ("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), +/* 5B */ ACPI_OP ("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP, ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), +/* 5C */ ACPI_OP ("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP, ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), +/* 5D */ ACPI_OP ("ThermalZone", ARGP_THERMAL_ZONE_OP, ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), +/* 5E */ ACPI_OP ("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), +/* 5F */ ACPI_OP ("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), /* Internal opcodes that map to invalid AML opcodes */ -/* 60 */ ACPI_OP ("LNot_equal", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS), -/* 61 */ ACPI_OP ("LLess_equal", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS), -/* 62 */ ACPI_OP ("LGreater_equal", ARGP_LGREATEREQUAL_OP, ARGI_LGREATEREQUAL_OP, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS), -/* 63 */ ACPI_OP ("[Name_path]", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE ), -/* 64 */ ACPI_OP ("[Method_call]", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP, AML_CLASS_METHOD_CALL, AML_TYPE_METHOD_CALL, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE), -/* 65 */ ACPI_OP ("[Byte_list]", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, 0), -/* 66 */ ACPI_OP ("[Reserved_field]", ARGP_RESERVEDFIELD_OP, ARGI_RESERVEDFIELD_OP, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), -/* 67 */ ACPI_OP ("[Named_field]", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED ), -/* 68 */ ACPI_OP ("[Access_field]", ARGP_ACCESSFIELD_OP, ARGI_ACCESSFIELD_OP, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), -/* 69 */ ACPI_OP ("[Static_string", ARGP_STATICSTRING_OP, ARGI_STATICSTRING_OP, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), -/* 6_a */ ACPI_OP ("[Return Value]", ARG_NONE, ARG_NONE, AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN, AML_HAS_ARGS | AML_HAS_RETVAL), -/* 6_b */ ACPI_OP ("UNKNOWN_OP!", ARG_NONE, ARG_NONE, AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS), -/* 6_c */ ACPI_OP ("ASCII_ONLY!", ARG_NONE, ARG_NONE, AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS), -/* 6_d */ ACPI_OP ("PREFIX_ONLY!", ARG_NONE, ARG_NONE, AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS), +/* 60 */ ACPI_OP ("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP, ACPI_TYPE_ANY, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), +/* 61 */ ACPI_OP ("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP, ACPI_TYPE_ANY, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), +/* 62 */ ACPI_OP ("LGreaterEqual", ARGP_LGREATEREQUAL_OP, ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), +/* 63 */ ACPI_OP ("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP, ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE ), +/* 64 */ ACPI_OP ("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP, ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL, AML_TYPE_METHOD_CALL, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE), +/* 65 */ ACPI_OP ("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP, ACPI_TYPE_ANY, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, 0), +/* 66 */ ACPI_OP ("-ReservedField-", ARGP_RESERVEDFIELD_OP, ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), +/* 67 */ ACPI_OP ("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP, ACPI_TYPE_ANY, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED ), +/* 68 */ ACPI_OP ("-AccessField-", ARGP_ACCESSFIELD_OP, ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), +/* 69 */ ACPI_OP ("-StaticString", ARGP_STATICSTRING_OP, ARGI_STATICSTRING_OP, ACPI_TYPE_ANY, AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), +/* 6A */ ACPI_OP ("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN, AML_HAS_ARGS | AML_HAS_RETVAL), +/* 6B */ ACPI_OP ("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID, AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS), +/* 6C */ ACPI_OP ("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS), +/* 6D */ ACPI_OP ("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS), /* ACPI 2.0 opcodes */ -/* 6_e */ ACPI_OP ("Qword_const", ARGP_QWORD_OP, ARGI_QWORD_OP, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, 0), -/* 6_f */ ACPI_OP ("Var_package", ARGP_VAR_PACKAGE_OP, ARGI_VAR_PACKAGE_OP, AML_CLASS_ARGUMENT, AML_TYPE_DATA_TERM, AML_HAS_ARGS | AML_DEFER), -/* 70 */ ACPI_OP ("Concat_res", ARGP_CONCAT_RES_OP, ARGI_CONCAT_RES_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R), -/* 71 */ ACPI_OP ("Mod", ARGP_MOD_OP, ARGI_MOD_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R), -/* 72 */ ACPI_OP ("Create_qWord_field", ARGP_CREATE_QWORD_FIELD_OP,ARGI_CREATE_QWORD_FIELD_OP, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), -/* 73 */ ACPI_OP ("To_buffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 74 */ ACPI_OP ("To_decimal_string", ARGP_TO_DEC_STR_OP, ARGI_TO_DEC_STR_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 75 */ ACPI_OP ("To_hex_string", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 76 */ ACPI_OP ("To_integer", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 77 */ ACPI_OP ("To_string", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R), -/* 78 */ ACPI_OP ("Copy_object", ARGP_COPY_OP, ARGI_COPY_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), -/* 79 */ ACPI_OP ("Mid", ARGP_MID_OP, ARGI_MID_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R, AML_FLAGS_EXEC_3A_1T_1R), -/* 7_a */ ACPI_OP ("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), -/* 7_b */ ACPI_OP ("Load_table", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R), -/* 7_c */ ACPI_OP ("Data_op_region", ARGP_DATA_REGION_OP, ARGI_DATA_REGION_OP, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), +/* 6E */ ACPI_OP ("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP, ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, AML_TYPE_LITERAL, AML_CONSTANT), +/* 6F */ ACPI_OP ("Package /*Var*/", ARGP_VAR_PACKAGE_OP, ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE, AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT, AML_HAS_ARGS | AML_DEFER), +/* 70 */ ACPI_OP ("ConcatenateResTemplate", ARGP_CONCAT_RES_OP, ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), +/* 71 */ ACPI_OP ("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), +/* 72 */ ACPI_OP ("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP,ARGI_CREATE_QWORD_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_CREATE), +/* 73 */ ACPI_OP ("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), +/* 74 */ ACPI_OP ("ToDecimalString", ARGP_TO_DEC_STR_OP, ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), +/* 75 */ ACPI_OP ("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), +/* 76 */ ACPI_OP ("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), +/* 77 */ ACPI_OP ("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), +/* 78 */ ACPI_OP ("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), +/* 79 */ ACPI_OP ("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R, AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT), +/* 7A */ ACPI_OP ("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP, ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), +/* 7B */ ACPI_OP ("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R), +/* 7C */ ACPI_OP ("DataTableRegion", ARGP_DATA_REGION_OP, ARGI_DATA_REGION_OP, ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), +/* 7D */ ACPI_OP ("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE) +/*! [End] no source code translation !*/ }; /* @@ -594,7 +616,7 @@ * index into the table above */ -static const u8 aml_short_op_info_index[256] = +static const u8 acpi_gbl_short_op_index[256] = { /* 0 1 2 3 4 5 6 7 */ /* 8 9 A B C D E F */ @@ -604,7 +626,7 @@ /* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX, -/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, _UNK, +/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D, /* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, /* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, @@ -633,7 +655,7 @@ }; -static const u8 aml_long_op_info_index[NUM_EXTENDED_OPCODE] = +static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = { /* 0 1 2 3 4 5 6 7 */ /* 8 9 A B C D E F */ @@ -660,7 +682,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_get_opcode_info + * FUNCTION: acpi_ps_get_opcode_info * * PARAMETERS: Opcode - The AML opcode * @@ -672,67 +694,50 @@ * ******************************************************************************/ -const acpi_opcode_info * +const struct acpi_opcode_info * acpi_ps_get_opcode_info ( - u16 opcode) + u16 opcode) { - const acpi_opcode_info *op_info; - u8 upper_opcode; - u8 lower_opcode; - - - PROC_NAME ("Ps_get_opcode_info"); - - - /* Split the 16-bit opcode into separate bytes */ - - upper_opcode = (u8) (opcode >> 8); - lower_opcode = (u8) opcode; - - /* Default is "unknown opcode" */ - - op_info = &aml_op_info [_UNK]; + ACPI_FUNCTION_NAME ("ps_get_opcode_info"); /* * Detect normal 8-bit opcode or extended 16-bit opcode */ - - switch (upper_opcode) { + switch ((u8) (opcode >> 8)) { case 0: /* Simple (8-bit) opcode: 0-255, can't index beyond table */ - op_info = &aml_op_info [aml_short_op_info_index [lower_opcode]]; - break; - + return (&acpi_gbl_aml_op_info [acpi_gbl_short_op_index [(u8) opcode]]); case AML_EXTOP: /* Extended (16-bit, prefix+opcode) opcode */ - if (lower_opcode <= MAX_EXTENDED_OPCODE) { - op_info = &aml_op_info [aml_long_op_info_index [lower_opcode]]; + if (((u8) opcode) <= MAX_EXTENDED_OPCODE) { + return (&acpi_gbl_aml_op_info [acpi_gbl_long_op_index [(u8) opcode]]); } - break; + /* Else fall through to error case below */ + /*lint -fallthrough */ default: - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown extended opcode=%X\n", opcode)); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown extended opcode [%X]\n", opcode)); break; } - /* Get the Op info pointer for this opcode */ + /* Default is "unknown opcode" */ - return (op_info); + return (&acpi_gbl_aml_op_info [_UNK]); } /******************************************************************************* * - * FUNCTION: Acpi_ps_get_opcode_name + * FUNCTION: acpi_ps_get_opcode_name * * PARAMETERS: Opcode - The AML opcode * @@ -743,22 +748,24 @@ * ******************************************************************************/ -NATIVE_CHAR * +char * acpi_ps_get_opcode_name ( - u16 opcode) + u16 opcode) { - const acpi_opcode_info *op; +#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT) + + const struct acpi_opcode_info *op; op = acpi_ps_get_opcode_info (opcode); /* Always guaranteed to return a valid pointer */ -#ifdef ACPI_DEBUG return (op->name); + #else return ("AE_NOT_CONFIGURED"); + #endif } - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/parser/psparse.c linux.21rc5-ac2/drivers/acpi/parser/psparse.c --- linux.21rc5/drivers/acpi/parser/psparse.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/parser/psparse.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,26 +1,44 @@ /****************************************************************************** * * Module Name: psparse - Parser top level AML parse routines - * $Revision: 104 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ @@ -30,28 +48,26 @@ * generated parser to tightly constrain stack and dynamic memory * usage. At the same time, parsing is kept flexible and the code * fairly compact by parsing based on a list of AML opcode - * templates in Aml_op_info[] + * templates in aml_op_info[] */ -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "amlcode.h" -#include "acnamesp.h" -#include "acdebug.h" -#include "acinterp.h" +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_PARSER - MODULE_NAME ("psparse") + ACPI_MODULE_NAME ("psparse") -u32 acpi_gbl_depth = 0; -extern u32 acpi_gbl_scope_depth; +static u32 acpi_gbl_depth = 0; /******************************************************************************* * - * FUNCTION: Acpi_ps_get_opcode_size + * FUNCTION: acpi_ps_get_opcode_size * * PARAMETERS: Opcode - An AML opcode * @@ -61,9 +77,9 @@ * ******************************************************************************/ -static u32 +u32 acpi_ps_get_opcode_size ( - u32 opcode) + u32 opcode) { /* Extended (2-byte) opcode if > 255 */ @@ -80,9 +96,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_peek_opcode + * FUNCTION: acpi_ps_peek_opcode * - * PARAMETERS: Parser_state - A parser state object + * PARAMETERS: parser_state - A parser state object * * RETURN: Status * @@ -92,122 +108,59 @@ u16 acpi_ps_peek_opcode ( - acpi_parse_state *parser_state) + struct acpi_parse_state *parser_state) { - u8 *aml; - u16 opcode; + u8 *aml; + u16 opcode; aml = parser_state->aml; - opcode = (u16) GET8 (aml); + opcode = (u16) ACPI_GET8 (aml); - aml++; - - /* - * Original code special cased LNOTEQUAL, LLESSEQUAL, LGREATEREQUAL. - * These opcodes are no longer recognized. Instead, they are broken into - * two opcodes. - * - * - * if (Opcode == AML_EXTOP - * || (Opcode == AML_LNOT - * && (GET8 (Aml) == AML_LEQUAL - * || GET8 (Aml) == AML_LGREATER - * || GET8 (Aml) == AML_LLESS))) - * - * extended Opcode, !=, <=, or >= - */ if (opcode == AML_EXTOP) { /* Extended opcode */ - opcode = (u16) ((opcode << 8) | GET8 (aml)); + aml++; + opcode = (u16) ((opcode << 8) | ACPI_GET8 (aml)); } - return (opcode); } /******************************************************************************* * - * FUNCTION: Acpi_ps_find_object - * - * PARAMETERS: Opcode - Current opcode - * Parser_state - Current state - * Walk_state - Current state - * *Op - Where found/new op is returned - * - * RETURN: Status - * - * DESCRIPTION: Find a named object. Two versions - one to search the parse - * tree (for parser-only applications such as acpidump), another - * to search the ACPI internal namespace (the parse tree may no - * longer exist) - * - ******************************************************************************/ - -#ifdef PARSER_ONLY - -acpi_status -acpi_ps_find_object ( - acpi_walk_state *walk_state, - acpi_parse_object **out_op) -{ - NATIVE_CHAR *path; - - - /* We are only interested in opcodes that have an associated name */ - - if (!(walk_state->op_info->flags & AML_NAMED)) { - *out_op = walk_state->op; - return (AE_OK); - } - - /* Find the name in the parse tree */ - - path = acpi_ps_get_next_namestring (&walk_state->parser_state); - - *out_op = acpi_ps_find (acpi_ps_get_parent_scope (&walk_state->parser_state), - path, walk_state->opcode, 1); - - if (!(*out_op)) { - return (AE_NOT_FOUND); - } - - return (AE_OK); -} - -#endif - - -/******************************************************************************* + * FUNCTION: acpi_ps_complete_this_op * - * FUNCTION: Acpi_ps_complete_this_op - * - * PARAMETERS: Walk_state - Current State + * PARAMETERS: walk_state - Current State * Op - Op to complete * - * RETURN: TRUE if Op and subtree was deleted + * RETURN: None. * * DESCRIPTION: Perform any cleanup at the completion of an Op. * ******************************************************************************/ -static u8 +void acpi_ps_complete_this_op ( - acpi_walk_state *walk_state, - acpi_parse_object *op) + struct acpi_walk_state *walk_state, + union acpi_parse_object *op) { -#ifndef PARSER_ONLY - acpi_parse_object *prev; - acpi_parse_object *next; - const acpi_opcode_info *parent_info; - acpi_parse_object *replacement_op = NULL; + union acpi_parse_object *prev; + union acpi_parse_object *next; + const struct acpi_opcode_info *parent_info; + union acpi_parse_object *replacement_op = NULL; + + ACPI_FUNCTION_TRACE_PTR ("ps_complete_this_op", op); - FUNCTION_TRACE_PTR ("Ps_complete_this_op", op); + /* Check for null Op, can happen if AML code is corrupt */ + + if (!op) { + return_VOID; + } /* Delete this op and the subtree below it if asked to */ @@ -215,60 +168,84 @@ (walk_state->op_info->class != AML_CLASS_ARGUMENT)) { /* Make sure that we only delete this subtree */ - if (op->parent) { + if (op->common.parent) { /* * Check if we need to replace the operator and its subtree * with a return value op (placeholder op) */ - parent_info = acpi_ps_get_opcode_info (op->parent->opcode); + parent_info = acpi_ps_get_opcode_info (op->common.parent->common.aml_opcode); switch (parent_info->class) { - case AML_CLASS_CONTROL: /* IF, ELSE, WHILE only */ + case AML_CLASS_CONTROL: break; - case AML_CLASS_NAMED_OBJECT: /* Scope, method, etc. */ case AML_CLASS_CREATE: /* - * These opcodes contain Term_arg operands. The current - * op must be replace by a placeholder return op + * These opcodes contain term_arg operands. The current + * op must be replaced by a placeholder return op */ - if ((op->parent->opcode == AML_REGION_OP) || - (op->parent->opcode == AML_CREATE_FIELD_OP) || - (op->parent->opcode == AML_CREATE_BIT_FIELD_OP) || - (op->parent->opcode == AML_CREATE_BYTE_FIELD_OP) || - (op->parent->opcode == AML_CREATE_WORD_FIELD_OP) || - (op->parent->opcode == AML_CREATE_DWORD_FIELD_OP) || - (op->parent->opcode == AML_CREATE_QWORD_FIELD_OP)) { + replacement_op = acpi_ps_alloc_op (AML_INT_RETURN_VALUE_OP); + if (!replacement_op) { + return_VOID; + } + break; + + case AML_CLASS_NAMED_OBJECT: + + /* + * These opcodes contain term_arg operands. The current + * op must be replaced by a placeholder return op + */ + if ((op->common.parent->common.aml_opcode == AML_REGION_OP) || + (op->common.parent->common.aml_opcode == AML_DATA_REGION_OP) || + (op->common.parent->common.aml_opcode == AML_BUFFER_OP) || + (op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || + (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) { replacement_op = acpi_ps_alloc_op (AML_INT_RETURN_VALUE_OP); if (!replacement_op) { - return_VALUE (FALSE); + return_VOID; } } + if ((op->common.parent->common.aml_opcode == AML_NAME_OP) && + (walk_state->descending_callback != acpi_ds_exec_begin_op)) { + if ((op->common.aml_opcode == AML_BUFFER_OP) || + (op->common.aml_opcode == AML_PACKAGE_OP) || + (op->common.aml_opcode == AML_VAR_PACKAGE_OP)) { + replacement_op = acpi_ps_alloc_op (op->common.aml_opcode); + if (!replacement_op) { + return_VOID; + } + + replacement_op->named.data = op->named.data; + replacement_op->named.length = op->named.length; + } + } break; default: replacement_op = acpi_ps_alloc_op (AML_INT_RETURN_VALUE_OP); if (!replacement_op) { - return_VALUE (FALSE); + return_VOID; } } /* We must unlink this op from the parent tree */ - prev = op->parent->value.arg; + prev = op->common.parent->common.value.arg; if (prev == op) { /* This op is the first in the list */ if (replacement_op) { - replacement_op->parent = op->parent; - replacement_op->value.arg = NULL; - op->parent->value.arg = replacement_op; - replacement_op->next = op->next; + replacement_op->common.parent = op->common.parent; + replacement_op->common.value.arg = NULL; + replacement_op->common.node = op->common.node; + op->common.parent->common.value.arg = replacement_op; + replacement_op->common.next = op->common.next; } else { - op->parent->value.arg = op->next; + op->common.parent->common.value.arg = op->common.next; } } @@ -277,66 +254,61 @@ else while (prev) { /* Traverse all siblings in the parent's argument list */ - next = prev->next; + next = prev->common.next; if (next == op) { if (replacement_op) { - replacement_op->parent = op->parent; - replacement_op->value.arg = NULL; - prev->next = replacement_op; - replacement_op->next = op->next; + replacement_op->common.parent = op->common.parent; + replacement_op->common.value.arg = NULL; + replacement_op->common.node = op->common.node; + prev->common.next = replacement_op; + replacement_op->common.next = op->common.next; next = NULL; } else { - prev->next = op->next; + prev->common.next = op->common.next; next = NULL; } } prev = next; } - } /* Now we can actually delete the subtree rooted at op */ acpi_ps_delete_parse_tree (op); - return_VALUE (TRUE); + return_VOID; } - return_VALUE (FALSE); - -#else - return (FALSE); -#endif + return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ps_next_parse_state + * FUNCTION: acpi_ps_next_parse_state * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * - * RETURN: + * RETURN: Status * - * DESCRIPTION: + * DESCRIPTION: Update the parser state based upon the return exception from + * the parser callback. * ******************************************************************************/ -static acpi_status +acpi_status acpi_ps_next_parse_state ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_status callback_status) + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + acpi_status callback_status) { - acpi_parse_state *parser_state = &walk_state->parser_state; - acpi_status status = AE_CTRL_PENDING; - u8 *start; - u32 package_length; + struct acpi_parse_state *parser_state = &walk_state->parser_state; + acpi_status status = AE_CTRL_PENDING; - FUNCTION_TRACE_PTR ("Ps_next_parse_state", op); + ACPI_FUNCTION_TRACE_PTR ("ps_next_parse_state", op); switch (callback_status) { @@ -351,32 +323,40 @@ break; - case AE_CTRL_PENDING: + case AE_CTRL_BREAK: + + parser_state->aml = walk_state->aml_last_while; + walk_state->control_state->common.value = FALSE; + status = AE_CTRL_BREAK; + break; + + case AE_CTRL_CONTINUE: - /* - * Predicate of a WHILE was true and the loop just completed an - * execution. Go back to the start of the loop and reevaluate the - * predicate. - */ - /* TBD: How to handle a break within a while. */ - /* This code attempts it */ + parser_state->aml = walk_state->aml_last_while; + status = AE_CTRL_CONTINUE; + break; + + case AE_CTRL_PENDING: parser_state->aml = walk_state->aml_last_while; break; +#if 0 + case AE_CTRL_SKIP: + + parser_state->aml = parser_state->scope->parse_scope.pkg_end; + status = AE_OK; + break; +#endif case AE_CTRL_TRUE: + /* * Predicate of an IF was true, and we are at the matching ELSE. * Just close out this package - * - * Note: Parser_state->Aml is modified by the package length procedure - * TBD: [Investigate] perhaps it shouldn't, too much trouble */ - start = parser_state->aml; - package_length = acpi_ps_get_next_package_length (parser_state); - parser_state->aml = start + package_length; + parser_state->aml = acpi_ps_get_next_package_end (parser_state); break; @@ -406,7 +386,7 @@ status = AE_CTRL_TRANSFER; walk_state->prev_op = op; walk_state->method_call_op = op; - walk_state->method_call_node = (op->value.arg)->node; + walk_state->method_call_node = (op->common.value.arg)->common.node; /* Will return value (if any) be used by the caller? */ @@ -428,9 +408,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_parse_loop + * FUNCTION: acpi_ps_parse_loop * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * * RETURN: Status * @@ -441,24 +421,28 @@ acpi_status acpi_ps_parse_loop ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; - acpi_parse_object *op = NULL; /* current op */ - acpi_parse_object *arg = NULL; - acpi_parse_object pre_op; - acpi_parse_state *parser_state; - u8 *aml_op_start; + acpi_status status = AE_OK; + union acpi_parse_object *op = NULL; /* current op */ + union acpi_parse_object *arg = NULL; + union acpi_parse_object pre_op; + struct acpi_parse_state *parser_state; + u8 *aml_op_start = NULL; + + ACPI_FUNCTION_TRACE_PTR ("ps_parse_loop", walk_state); - FUNCTION_TRACE_PTR ("Ps_parse_loop", walk_state); + if (walk_state->descending_callback == NULL) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } parser_state = &walk_state->parser_state; walk_state->arg_types = 0; -#ifndef PARSER_ONLY - if (walk_state->walk_type & WALK_METHOD_RESTART) { +#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY)) + if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) { /* We are restarting a preempted control method */ if (acpi_ps_has_completed_scope (parser_state)) { @@ -467,18 +451,17 @@ * was just completed */ if ((parser_state->scope->parse_scope.op) && - ((parser_state->scope->parse_scope.op->opcode == AML_IF_OP) || - (parser_state->scope->parse_scope.op->opcode == AML_WHILE_OP)) && + ((parser_state->scope->parse_scope.op->common.aml_opcode == AML_IF_OP) || + (parser_state->scope->parse_scope.op->common.aml_opcode == AML_WHILE_OP)) && (walk_state->control_state) && (walk_state->control_state->common.state == - CONTROL_PREDICATE_EXECUTING)) { - + ACPI_CONTROL_PREDICATE_EXECUTING)) { /* * A predicate was just completed, get the value of the * predicate and branch based on that value */ walk_state->op = NULL; - status = acpi_ds_get_predicate_value (walk_state, TRUE); + status = acpi_ds_get_predicate_value (walk_state, ACPI_TO_POINTER (TRUE)); if (ACPI_FAILURE (status) && ((status & AE_CODE_MASK) != AE_CODE_CONTROL)) { if (status == AE_AML_NO_RETURN_VALUE) { @@ -487,7 +470,7 @@ acpi_format_exception (status))); } - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Get_predicate Failed, %s\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "get_predicate Failed, %s\n", acpi_format_exception (status))); return_ACPI_STATUS (status); } @@ -495,10 +478,10 @@ status = acpi_ps_next_parse_state (walk_state, op, status); } - acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types, &walk_state->arg_count); + acpi_ps_pop_scope (parser_state, &op, + &walk_state->arg_types, &walk_state->arg_count); ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Popped scope, Op=%p\n", op)); } - else if (walk_state->prev_op) { /* We were in the middle of an op */ @@ -512,12 +495,13 @@ * Iterative parsing loop, while there is more aml to process: */ while ((parser_state->aml < parser_state->aml_end) || (op)) { + aml_op_start = parser_state->aml; if (!op) { /* Get the next opcode from the AML stream */ - aml_op_start = parser_state->aml; - walk_state->aml_offset = parser_state->aml - parser_state->aml_start; - walk_state->opcode = acpi_ps_peek_opcode (parser_state); + walk_state->aml_offset = ACPI_PTR_DIFF (parser_state->aml, + parser_state->aml_start); + walk_state->opcode = acpi_ps_peek_opcode (parser_state); /* * First cut to determine what we have found: @@ -542,10 +526,10 @@ /* The opcode is unrecognized. Just skip unknown opcodes */ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Found unknown opcode %X at AML offset %X, ignoring\n", - walk_state->opcode, walk_state->aml_offset)); + "Found unknown opcode %X at AML address %p offset %X, ignoring\n", + walk_state->opcode, parser_state->aml, walk_state->aml_offset)); - DUMP_BUFFER (parser_state->aml, 128); + ACPI_DUMP_BUFFER (parser_state->aml, 128); /* Assume one-byte bad opcode */ @@ -559,70 +543,76 @@ parser_state->aml += acpi_ps_get_opcode_size (walk_state->opcode); walk_state->arg_types = walk_state->op_info->parse_args; break; - } - /* Create Op structure and append to parent's argument list */ if (walk_state->op_info->flags & AML_NAMED) { - pre_op.value.arg = NULL; - pre_op.opcode = walk_state->opcode; + pre_op.common.value.arg = NULL; + pre_op.common.aml_opcode = walk_state->opcode; + + /* + * Get and append arguments until we find the node that contains + * the name (the type ARGP_NAME). + */ + while (GET_CURRENT_ARG_TYPE (walk_state->arg_types) && + (GET_CURRENT_ARG_TYPE (walk_state->arg_types) != ARGP_NAME)) { + status = acpi_ps_get_next_arg (walk_state, parser_state, + GET_CURRENT_ARG_TYPE (walk_state->arg_types), &arg); + if (ACPI_FAILURE (status)) { + goto close_this_op; + } - while (GET_CURRENT_ARG_TYPE (walk_state->arg_types) != ARGP_NAME) { - arg = acpi_ps_get_next_arg (parser_state, - GET_CURRENT_ARG_TYPE (walk_state->arg_types), - &walk_state->arg_count); acpi_ps_append_arg (&pre_op, arg); INCREMENT_ARG_LIST (walk_state->arg_types); } + /* Make sure that we found a NAME and didn't run out of arguments */ + + if (!GET_CURRENT_ARG_TYPE (walk_state->arg_types)) { + return_ACPI_STATUS (AE_AML_NO_OPERAND); + } /* We know that this arg is a name, move to next arg */ INCREMENT_ARG_LIST (walk_state->arg_types); - if (walk_state->descending_callback != NULL) { - /* - * Find the object. This will either insert the object into - * the namespace or simply look it up - */ - walk_state->op = NULL; - - status = walk_state->descending_callback (walk_state, &op); + /* + * Find the object. This will either insert the object into + * the namespace or simply look it up + */ + walk_state->op = NULL; - /* TBD: check status here? */ + status = walk_state->descending_callback (walk_state, &op); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "During name lookup/catalog, %s\n", + acpi_format_exception (status))); + goto close_this_op; + } - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "During name lookup/catalog, %s\n", - acpi_format_exception (status))); - goto close_this_op; - } + if (op == NULL) { + continue; + } - if (op == NULL) { - continue; - } - status = acpi_ps_next_parse_state (walk_state, op, status); - if (status == AE_CTRL_PENDING) { - status = AE_OK; - goto close_this_op; - } + status = acpi_ps_next_parse_state (walk_state, op, status); + if (status == AE_CTRL_PENDING) { + status = AE_OK; + goto close_this_op; + } - if (ACPI_FAILURE (status)) { - goto close_this_op; - } + if (ACPI_FAILURE (status)) { + goto close_this_op; } - acpi_ps_append_arg (op, pre_op.value.arg); + acpi_ps_append_arg (op, pre_op.common.value.arg); acpi_gbl_depth++; - - if (op->opcode == AML_REGION_OP) { + if (op->common.aml_opcode == AML_REGION_OP) { /* - * Defer final parsing of an Operation_region body, + * Defer final parsing of an operation_region body, * because we don't have enough info in the first pass * to parse it correctly (i.e., there may be method - * calls within the Term_arg elements of the body. + * calls within the term_arg elements of the body.) * * However, we must continue parsing because * the opregion is not a standalone package -- @@ -630,12 +620,10 @@ * * (Length is unknown until parse of the body complete) */ - ((acpi_parse2_object * ) op)->data = aml_op_start; - ((acpi_parse2_object * ) op)->length = 0; + op->named.data = aml_op_start; + op->named.length = 0; } } - - else { /* Not a named opcode, just allocate Op and append to parent */ @@ -645,14 +633,13 @@ return_ACPI_STATUS (AE_NO_MEMORY); } - if (walk_state->op_info->flags & AML_CREATE) { /* - * Backup to beginning of Create_xXXfield declaration - * Body_length is unknown until we parse the body + * Backup to beginning of create_xXXfield declaration + * body_length is unknown until we parse the body */ - ((acpi_parse2_object * ) op)->data = aml_op_start; - ((acpi_parse2_object * ) op)->length = 0; + op->named.data = aml_op_start; + op->named.length = 0; } acpi_ps_append_arg (acpi_ps_get_parent_scope (parser_state), op); @@ -662,7 +649,7 @@ * Find the object. This will either insert the object into * the namespace or simply look it up */ - walk_state->op = op; + walk_state->op = op; status = walk_state->descending_callback (walk_state, &op); status = acpi_ps_next_parse_state (walk_state, op, status); @@ -677,40 +664,44 @@ } } - op->aml_offset = walk_state->aml_offset; + op->common.aml_offset = walk_state->aml_offset; if (walk_state->op_info) { ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, - "Op=%p Opcode=%4.4X Aml %p Oft=%5.5X\n", - op, op->opcode, parser_state->aml, op->aml_offset)); + "Opcode %4.4X [%s] Op %p Aml %p aml_offset %5.5X\n", + (u32) op->common.aml_opcode, walk_state->op_info->name, + op, parser_state->aml, op->common.aml_offset)); } } - /* Start Arg_count at zero because we don't know if there are any args yet */ + /* Start arg_count at zero because we don't know if there are any args yet */ walk_state->arg_count = 0; - if (walk_state->arg_types) /* Are there any arguments that must be processed? */ { - /* get arguments */ + /* Get arguments */ - switch (op->opcode) { + switch (op->common.aml_opcode) { case AML_BYTE_OP: /* AML_BYTEDATA_ARG */ case AML_WORD_OP: /* AML_WORDDATA_ARG */ case AML_DWORD_OP: /* AML_DWORDATA_ARG */ case AML_QWORD_OP: /* AML_QWORDATA_ARG */ case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */ - /* fill in constant or string argument directly */ + /* Fill in constant or string argument directly */ acpi_ps_get_next_simple_arg (parser_state, - GET_CURRENT_ARG_TYPE (walk_state->arg_types), op); + GET_CURRENT_ARG_TYPE (walk_state->arg_types), op); break; case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */ - acpi_ps_get_next_namepath (parser_state, op, &walk_state->arg_count, 1); + status = acpi_ps_get_next_namepath (walk_state, parser_state, op, 1); + if (ACPI_FAILURE (status)) { + goto close_this_op; + } + walk_state->arg_types = 0; break; @@ -719,202 +710,268 @@ /* Op is not a constant or string, append each argument */ - while (GET_CURRENT_ARG_TYPE (walk_state->arg_types) && !walk_state->arg_count) { - walk_state->aml_offset = parser_state->aml - parser_state->aml_start; - arg = acpi_ps_get_next_arg (parser_state, - GET_CURRENT_ARG_TYPE (walk_state->arg_types), - &walk_state->arg_count); + while (GET_CURRENT_ARG_TYPE (walk_state->arg_types) && + !walk_state->arg_count) { + walk_state->aml_offset = ACPI_PTR_DIFF (parser_state->aml, + parser_state->aml_start); + status = acpi_ps_get_next_arg (walk_state, parser_state, + GET_CURRENT_ARG_TYPE (walk_state->arg_types), &arg); + if (ACPI_FAILURE (status)) { + goto close_this_op; + } + if (arg) { - arg->aml_offset = walk_state->aml_offset; + arg->common.aml_offset = walk_state->aml_offset; acpi_ps_append_arg (op, arg); } - INCREMENT_ARG_LIST (walk_state->arg_types); } + switch (op->common.aml_opcode) { + case AML_METHOD_OP: - /* For a method, save the length and address of the body */ + /* For a method, save the length and address of the body */ - if (op->opcode == AML_METHOD_OP) { /* * Skip parsing of control method or opregion body, * because we don't have enough info in the first pass * to parse them correctly. */ - ((acpi_parse2_object * ) op)->data = parser_state->aml; - ((acpi_parse2_object * ) op)->length = (u32) (parser_state->pkg_end - - parser_state->aml); - + op->named.data = parser_state->aml; + op->named.length = (u32) (parser_state->pkg_end - parser_state->aml); /* - * Skip body of method. For Op_regions, we must continue + * Skip body of method. For op_regions, we must continue * parsing because the opregion is not a standalone * package (We don't know where the end is). */ parser_state->aml = parser_state->pkg_end; - walk_state->arg_count = 0; - } + walk_state->arg_count = 0; + break; + + case AML_BUFFER_OP: + case AML_PACKAGE_OP: + case AML_VAR_PACKAGE_OP: + + if ((op->common.parent) && + (op->common.parent->common.aml_opcode == AML_NAME_OP) && + (walk_state->descending_callback != acpi_ds_exec_begin_op)) { + /* + * Skip parsing of + * because we don't have enough info in the first pass + * to parse them correctly. + */ + op->named.data = aml_op_start; + op->named.length = (u32) (parser_state->pkg_end - aml_op_start); + /* + * Skip body + */ + parser_state->aml = parser_state->pkg_end; + walk_state->arg_count = 0; + } + break; + + case AML_WHILE_OP: + + if (walk_state->control_state) { + walk_state->control_state->control.package_end = parser_state->pkg_end; + } + break; + default: + /* No action for all other opcodes */ + break; + } break; } } + /* Check for arguments that need to be processed */ - /* - * Zero Arg_count means that all arguments for this op have been processed - */ - if (!walk_state->arg_count) { - /* completed Op, prepare for next */ + if (walk_state->arg_count) { + /* There are arguments (complex ones), push Op and prepare for argument */ - walk_state->op_info = acpi_ps_get_opcode_info (op->opcode); - if (walk_state->op_info->flags & AML_NAMED) { - if (acpi_gbl_depth) { - acpi_gbl_depth--; - } + status = acpi_ps_push_scope (parser_state, op, + walk_state->arg_types, walk_state->arg_count); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + op = NULL; + continue; + } - if (op->opcode == AML_REGION_OP) { - /* - * Skip parsing of control method or opregion body, - * because we don't have enough info in the first pass - * to parse them correctly. - * - * Completed parsing an Op_region declaration, we now - * know the length. - */ - ((acpi_parse2_object * ) op)->length = (u32) (parser_state->aml - - ((acpi_parse2_object * ) op)->data); - } + /* All arguments have been processed -- Op is complete, prepare for next */ + + walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode); + if (walk_state->op_info->flags & AML_NAMED) { + if (acpi_gbl_depth) { + acpi_gbl_depth--; } - if (walk_state->op_info->flags & AML_CREATE) { + if (op->common.aml_opcode == AML_REGION_OP) { /* - * Backup to beginning of Create_xXXfield declaration (1 for - * Opcode) + * Skip parsing of control method or opregion body, + * because we don't have enough info in the first pass + * to parse them correctly. * - * Body_length is unknown until we parse the body + * Completed parsing an op_region declaration, we now + * know the length. */ - ((acpi_parse2_object * ) op)->length = (u32) (parser_state->aml - - ((acpi_parse2_object * ) op)->data); + op->named.length = (u32) (parser_state->aml - op->named.data); } + } - /* This op complete, notify the dispatcher */ + if (walk_state->op_info->flags & AML_CREATE) { + /* + * Backup to beginning of create_xXXfield declaration (1 for + * Opcode) + * + * body_length is unknown until we parse the body + */ + op->named.length = (u32) (parser_state->aml - op->named.data); + } - if (walk_state->ascending_callback != NULL) { - walk_state->op = op; - walk_state->opcode = op->opcode; + /* This op complete, notify the dispatcher */ - status = walk_state->ascending_callback (walk_state); - status = acpi_ps_next_parse_state (walk_state, op, status); - if (status == AE_CTRL_PENDING) { - status = AE_OK; - goto close_this_op; - } + if (walk_state->ascending_callback != NULL) { + walk_state->op = op; + walk_state->opcode = op->common.aml_opcode; + + status = walk_state->ascending_callback (walk_state); + status = acpi_ps_next_parse_state (walk_state, op, status); + if (status == AE_CTRL_PENDING) { + status = AE_OK; + goto close_this_op; } + } close_this_op: + /* + * Finished one argument of the containing scope + */ + parser_state->scope->parse_scope.arg_count--; - /* - * Finished one argument of the containing scope - */ - parser_state->scope->parse_scope.arg_count--; - - /* Close this Op (may result in parse subtree deletion) */ - - if (acpi_ps_complete_this_op (walk_state, op)) { - op = NULL; - } + /* Close this Op (will result in parse subtree deletion) */ + acpi_ps_complete_this_op (walk_state, op); + op = NULL; - switch (status) { - case AE_OK: - break; + switch (status) { + case AE_OK: + break; - case AE_CTRL_TRANSFER: + case AE_CTRL_TRANSFER: - /* - * We are about to transfer to a called method. - */ - walk_state->prev_op = op; - walk_state->prev_arg_types = walk_state->arg_types; - return_ACPI_STATUS (status); - break; + /* + * We are about to transfer to a called method. + */ + walk_state->prev_op = op; + walk_state->prev_arg_types = walk_state->arg_types; + return_ACPI_STATUS (status); - case AE_CTRL_END: + case AE_CTRL_END: - acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types, &walk_state->arg_count); + acpi_ps_pop_scope (parser_state, &op, + &walk_state->arg_types, &walk_state->arg_count); + if (op) { walk_state->op = op; - walk_state->op_info = acpi_ps_get_opcode_info (op->opcode); - walk_state->opcode = op->opcode; + walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode); + walk_state->opcode = op->common.aml_opcode; status = walk_state->ascending_callback (walk_state); status = acpi_ps_next_parse_state (walk_state, op, status); acpi_ps_complete_this_op (walk_state, op); op = NULL; - status = AE_OK; - break; + } + status = AE_OK; + break; - case AE_CTRL_TERMINATE: + case AE_CTRL_BREAK: + case AE_CTRL_CONTINUE: - status = AE_OK; + /* Pop off scopes until we find the While */ - /* Clean up */ - do { - if (op) { - acpi_ps_complete_this_op (walk_state, op); - } + while (!op || (op->common.aml_opcode != AML_WHILE_OP)) { + acpi_ps_pop_scope (parser_state, &op, + &walk_state->arg_types, &walk_state->arg_count); + } - acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types, &walk_state->arg_count); - } while (op); + /* Close this iteration of the While loop */ - return_ACPI_STATUS (status); - break; + walk_state->op = op; + walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode); + walk_state->opcode = op->common.aml_opcode; + + status = walk_state->ascending_callback (walk_state); + status = acpi_ps_next_parse_state (walk_state, op, status); + + acpi_ps_complete_this_op (walk_state, op); + op = NULL; + status = AE_OK; + break; - default: /* All other non-AE_OK status */ - if (op == NULL) { - acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types, &walk_state->arg_count); + case AE_CTRL_TERMINATE: + + status = AE_OK; + + /* Clean up */ + do { + if (op) { + acpi_ps_complete_this_op (walk_state, op); } - walk_state->prev_op = op; - walk_state->prev_arg_types = walk_state->arg_types; + acpi_ps_pop_scope (parser_state, &op, + &walk_state->arg_types, &walk_state->arg_count); - /* - * TEMP: - */ + } while (op); - return_ACPI_STATUS (status); - break; - } + return_ACPI_STATUS (status); - /* This scope complete? */ - if (acpi_ps_has_completed_scope (parser_state)) { - acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types, &walk_state->arg_count); - ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Popped scope, Op=%p\n", op)); - } + default: /* All other non-AE_OK status */ - else { - op = NULL; - } + do { + if (op) { + acpi_ps_complete_this_op (walk_state, op); + } + acpi_ps_pop_scope (parser_state, &op, + &walk_state->arg_types, &walk_state->arg_count); - } + } while (op); - /* Arg_count is non-zero */ + /* + * TBD: Cleanup parse ops on error + */ +#if 0 + if (op == NULL) { + acpi_ps_pop_scope (parser_state, &op, + &walk_state->arg_types, &walk_state->arg_count); + } +#endif + walk_state->prev_op = op; + walk_state->prev_arg_types = walk_state->arg_types; + return_ACPI_STATUS (status); + } - else { - /* complex argument, push Op and prepare for argument */ + /* This scope complete? */ - acpi_ps_push_scope (parser_state, op, walk_state->arg_types, walk_state->arg_count); + if (acpi_ps_has_completed_scope (parser_state)) { + acpi_ps_pop_scope (parser_state, &op, + &walk_state->arg_types, &walk_state->arg_count); + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Popped scope, Op=%p\n", op)); + } + else { op = NULL; } - } /* while Parser_state->Aml */ + } /* while parser_state->Aml */ /* @@ -928,8 +985,8 @@ if (op) { if (walk_state->ascending_callback != NULL) { walk_state->op = op; - walk_state->op_info = acpi_ps_get_opcode_info (op->opcode); - walk_state->opcode = op->opcode; + walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode); + walk_state->opcode = op->common.aml_opcode; status = walk_state->ascending_callback (walk_state); status = acpi_ps_next_parse_state (walk_state, op, status); @@ -947,7 +1004,8 @@ acpi_ps_complete_this_op (walk_state, op); } - acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types, &walk_state->arg_count); + acpi_ps_pop_scope (parser_state, &op, + &walk_state->arg_types, &walk_state->arg_count); } while (op); @@ -963,7 +1021,8 @@ acpi_ps_complete_this_op (walk_state, op); } - acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types, &walk_state->arg_count); + acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types, + &walk_state->arg_count); } while (op); @@ -973,12 +1032,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_parse_aml + * FUNCTION: acpi_ps_parse_aml * - * PARAMETERS: Start_scope - The starting point of the parse. Becomes the + * PARAMETERS: start_scope - The starting point of the parse. Becomes the * root of the parsed op tree. * Aml - Pointer to the raw AML code to parse - * Aml_size - Length of the AML to parse + * aml_size - Length of the AML to parse * * * RETURN: Status @@ -989,33 +1048,36 @@ acpi_status acpi_ps_parse_aml ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { - acpi_status status; - acpi_walk_list walk_list; - acpi_walk_list *prev_walk_list = acpi_gbl_current_walk_list; - acpi_walk_state *previous_walk_state; + acpi_status status; + acpi_status terminate_status; + struct acpi_thread_state *thread; + struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list; + struct acpi_walk_state *previous_walk_state; - FUNCTION_TRACE ("Ps_parse_aml"); + ACPI_FUNCTION_TRACE ("ps_parse_aml"); - ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Entered with Walk_state=%p Aml=%p size=%X\n", + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Entered with walk_state=%p Aml=%p size=%X\n", walk_state, walk_state->parser_state.aml, walk_state->parser_state.aml_size)); - /* Create and initialize a new walk list */ + /* Create and initialize a new thread state */ - walk_list.walk_state = NULL; - walk_list.acquired_mutex_list.prev = NULL; - walk_list.acquired_mutex_list.next = NULL; - - walk_state->walk_list = &walk_list; - acpi_ds_push_walk_state (walk_state, &walk_list); + thread = acpi_ut_create_thread_state (); + if (!thread) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + walk_state->thread = thread; + acpi_ds_push_walk_state (walk_state, thread); - /* TBD: [Restructure] TEMP until we pass Walk_state to the interpreter + /* + * This global allows the AML debugger to get a handle to the currently + * executing control method. */ - acpi_gbl_current_walk_list = &walk_list; + acpi_gbl_current_walk_list = thread; /* * Execute the walk loop as long as there is a valid Walk State. This @@ -1027,37 +1089,41 @@ while (walk_state) { if (ACPI_SUCCESS (status)) { /* - * The Parse_loop executes AML until the method terminates + * The parse_loop executes AML until the method terminates * or calls another method. */ status = acpi_ps_parse_loop (walk_state); } ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, - "Completed one call to walk loop, State=%p\n", walk_state)); + "Completed one call to walk loop, %s State=%p\n", + acpi_format_exception (status), walk_state)); if (status == AE_CTRL_TRANSFER) { /* * A method call was detected. * Transfer control to the called control method */ - status = acpi_ds_call_control_method (&walk_list, walk_state, NULL); + status = acpi_ds_call_control_method (thread, walk_state, NULL); /* * If the transfer to the new method method call worked, a new walk * state was created -- get it */ - walk_state = acpi_ds_get_current_walk_state (&walk_list); + walk_state = acpi_ds_get_current_walk_state (thread); continue; } - else if (status == AE_CTRL_TERMINATE) { status = AE_OK; } + else if (status != AE_OK) { + ACPI_REPORT_METHOD_ERROR ("Method execution failed", + walk_state->method_node, NULL, status); + } /* We are done with this walk, move on to the parent if any */ - walk_state = acpi_ds_pop_walk_state (&walk_list); + walk_state = acpi_ds_pop_walk_state (thread); /* Reset the current scope to the beginning of scope stack */ @@ -1068,7 +1134,13 @@ * there's lots of cleanup to do */ if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) == ACPI_PARSE_EXECUTE) { - acpi_ds_terminate_control_method (walk_state); + terminate_status = acpi_ds_terminate_control_method (walk_state); + if (ACPI_FAILURE (terminate_status)) { + ACPI_REPORT_ERROR (( + "Could not terminate control method properly\n")); + + /* Ignore error and continue */ + } } /* Delete this walk state and all linked control states */ @@ -1077,22 +1149,29 @@ previous_walk_state = walk_state; - ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Return_value=%p, State=%p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "return_value=%p, State=%p\n", walk_state->return_desc, walk_state)); /* Check if we have restarted a preempted walk */ - walk_state = acpi_ds_get_current_walk_state (&walk_list); + walk_state = acpi_ds_get_current_walk_state (thread); if (walk_state) { if (ACPI_SUCCESS (status)) { - /* There is another walk state, restart it */ - /* - * If the method returned value is not used by the parent, + * There is another walk state, restart it. + * If the method return value is not used by the parent, * The object is deleted */ - acpi_ds_restart_control_method (walk_state, previous_walk_state->return_desc); - walk_state->walk_type |= WALK_METHOD_RESTART; + status = acpi_ds_restart_control_method (walk_state, + previous_walk_state->return_desc); + if (ACPI_SUCCESS (status)) { + walk_state->walk_type |= ACPI_WALK_METHOD_RESTART; + } + } + else { + /* On error, delete any return object */ + + acpi_ut_remove_reference (previous_walk_state->return_desc); } } @@ -1103,7 +1182,6 @@ else if (previous_walk_state->caller_return_desc) { *(previous_walk_state->caller_return_desc) = previous_walk_state->return_desc; /* NULL if no return value */ } - else if (previous_walk_state->return_desc) { /* Caller doesn't want it, must delete it */ @@ -1113,10 +1191,10 @@ acpi_ds_delete_walk_state (previous_walk_state); } - /* Normal exit */ - acpi_ex_release_all_mutexes ((acpi_operand_object *) &walk_list.acquired_mutex_list); + acpi_ex_release_all_mutexes (thread); + acpi_ut_delete_generic_state (ACPI_CAST_PTR (union acpi_generic_state, thread)); acpi_gbl_current_walk_list = prev_walk_list; return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/parser/psscope.c linux.21rc5-ac2/drivers/acpi/parser/psscope.c --- linux.21rc5/drivers/acpi/parser/psscope.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/parser/psscope.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,41 +1,59 @@ /****************************************************************************** * * Module Name: psscope - Parser scope stack management routines - * $Revision: 30 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" +#include +#include #define _COMPONENT ACPI_PARSER - MODULE_NAME ("psscope") + ACPI_MODULE_NAME ("psscope") /******************************************************************************* * - * FUNCTION: Acpi_ps_get_parent_scope + * FUNCTION: acpi_ps_get_parent_scope * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * * RETURN: Pointer to an Op object * @@ -43,9 +61,9 @@ * ******************************************************************************/ -acpi_parse_object * +union acpi_parse_object * acpi_ps_get_parent_scope ( - acpi_parse_state *parser_state) + struct acpi_parse_state *parser_state) { return (parser_state->scope->parse_scope.op); } @@ -53,9 +71,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_has_completed_scope + * FUNCTION: acpi_ps_has_completed_scope * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * * RETURN: Boolean, TRUE = scope completed. * @@ -67,7 +85,7 @@ u8 acpi_ps_has_completed_scope ( - acpi_parse_state *parser_state) + struct acpi_parse_state *parser_state) { return ((u8) ((parser_state->aml >= parser_state->scope->parse_scope.arg_end || !parser_state->scope->parse_scope.arg_count))); @@ -76,9 +94,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_init_scope + * FUNCTION: acpi_ps_init_scope * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * Root - the Root Node of this new scope * * RETURN: Status @@ -89,13 +107,13 @@ acpi_status acpi_ps_init_scope ( - acpi_parse_state *parser_state, - acpi_parse_object *root_op) + struct acpi_parse_state *parser_state, + union acpi_parse_object *root_op) { - acpi_generic_state *scope; + union acpi_generic_state *scope; - FUNCTION_TRACE_PTR ("Ps_init_scope", root_op); + ACPI_FUNCTION_TRACE_PTR ("ps_init_scope", root_op); scope = acpi_ut_create_generic_state (); @@ -118,12 +136,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_push_scope + * FUNCTION: acpi_ps_push_scope * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * Op - Current op to be pushed - * Remaining_args - List of args remaining - * Arg_count - Fixed or variable number of args + * remaining_args - List of args remaining + * arg_count - Fixed or variable number of args * * RETURN: Status * @@ -133,20 +151,20 @@ acpi_status acpi_ps_push_scope ( - acpi_parse_state *parser_state, - acpi_parse_object *op, - u32 remaining_args, - u32 arg_count) + struct acpi_parse_state *parser_state, + union acpi_parse_object *op, + u32 remaining_args, + u32 arg_count) { - acpi_generic_state *scope; + union acpi_generic_state *scope; - FUNCTION_TRACE_PTR ("Ps_push_scope", op); + ACPI_FUNCTION_TRACE_PTR ("ps_push_scope", op); scope = acpi_ut_create_generic_state (); if (!scope) { - return (AE_NO_MEMORY); + return_ACPI_STATUS (AE_NO_MEMORY); } @@ -170,7 +188,7 @@ else { /* single argument */ - scope->parse_scope.arg_end = ACPI_MAX_AML; + scope->parse_scope.arg_end = ACPI_TO_POINTER (ACPI_MAX_PTR); } return_ACPI_STATUS (AE_OK); @@ -179,13 +197,13 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_pop_scope + * FUNCTION: acpi_ps_pop_scope * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * Op - Where the popped op is returned - * Arg_list - Where the popped "next argument" is + * arg_list - Where the popped "next argument" is * returned - * Arg_count - Count of objects in Arg_list + * arg_count - Count of objects in arg_list * * RETURN: Status * @@ -195,15 +213,15 @@ void acpi_ps_pop_scope ( - acpi_parse_state *parser_state, - acpi_parse_object **op, - u32 *arg_list, - u32 *arg_count) + struct acpi_parse_state *parser_state, + union acpi_parse_object **op, + u32 *arg_list, + u32 *arg_count) { - acpi_generic_state *scope = parser_state->scope; + union acpi_generic_state *scope = parser_state->scope; - FUNCTION_TRACE ("Ps_pop_scope"); + ACPI_FUNCTION_TRACE ("ps_pop_scope"); /* @@ -240,9 +258,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_cleanup_scope + * FUNCTION: acpi_ps_cleanup_scope * - * PARAMETERS: Parser_state - Current parser state object + * PARAMETERS: parser_state - Current parser state object * * RETURN: Status * @@ -253,11 +271,11 @@ void acpi_ps_cleanup_scope ( - acpi_parse_state *parser_state) + struct acpi_parse_state *parser_state) { - acpi_generic_state *scope; + union acpi_generic_state *scope; - FUNCTION_TRACE_PTR ("Ps_cleanup_scope", parser_state); + ACPI_FUNCTION_TRACE_PTR ("ps_cleanup_scope", parser_state); if (!parser_state) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/parser/pstree.c linux.21rc5-ac2/drivers/acpi/parser/pstree.c --- linux.21rc5/drivers/acpi/parser/pstree.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/parser/pstree.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,40 +1,58 @@ /****************************************************************************** * * Module Name: pstree - Parser op tree manipulation/traversal/search - * $Revision: 35 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" +#include +#include +#include #define _COMPONENT ACPI_PARSER - MODULE_NAME ("pstree") + ACPI_MODULE_NAME ("pstree") /******************************************************************************* * - * FUNCTION: Acpi_ps_get_arg + * FUNCTION: acpi_ps_get_arg * * PARAMETERS: Op - Get an argument for this op * Argn - Nth argument to get @@ -45,21 +63,21 @@ * ******************************************************************************/ -acpi_parse_object * +union acpi_parse_object * acpi_ps_get_arg ( - acpi_parse_object *op, - u32 argn) + union acpi_parse_object *op, + u32 argn) { - acpi_parse_object *arg = NULL; - const acpi_opcode_info *op_info; + union acpi_parse_object *arg = NULL; + const struct acpi_opcode_info *op_info; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* Get the info structure for this opcode */ - op_info = acpi_ps_get_opcode_info (op->opcode); + op_info = acpi_ps_get_opcode_info (op->common.aml_opcode); if (op_info->class == AML_CLASS_UNKNOWN) { /* Invalid opcode or ASCII character */ @@ -76,10 +94,10 @@ /* Get the requested argument object */ - arg = op->value.arg; + arg = op->common.value.arg; while (arg && argn) { argn--; - arg = arg->next; + arg = arg->common.next; } return (arg); @@ -88,7 +106,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_append_arg + * FUNCTION: acpi_ps_append_arg * * PARAMETERS: Op - Append an argument to this Op. * Arg - Argument Op to append @@ -101,14 +119,14 @@ void acpi_ps_append_arg ( - acpi_parse_object *op, - acpi_parse_object *arg) + union acpi_parse_object *op, + union acpi_parse_object *arg) { - acpi_parse_object *prev_arg; - const acpi_opcode_info *op_info; + union acpi_parse_object *prev_arg; + const struct acpi_opcode_info *op_info; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); if (!op) { @@ -117,11 +135,12 @@ /* Get the info structure for this opcode */ - op_info = acpi_ps_get_opcode_info (op->opcode); + op_info = acpi_ps_get_opcode_info (op->common.aml_opcode); if (op_info->class == AML_CLASS_UNKNOWN) { /* Invalid opcode */ - REPORT_ERROR (("Ps_append_arg: Invalid AML Opcode: 0x%2.2X\n", op->opcode)); + ACPI_REPORT_ERROR (("ps_append_arg: Invalid AML Opcode: 0x%2.2X\n", + op->common.aml_opcode)); return; } @@ -136,35 +155,35 @@ /* Append the argument to the linked argument list */ - if (op->value.arg) { + if (op->common.value.arg) { /* Append to existing argument list */ - prev_arg = op->value.arg; - while (prev_arg->next) { - prev_arg = prev_arg->next; + prev_arg = op->common.value.arg; + while (prev_arg->common.next) { + prev_arg = prev_arg->common.next; } - prev_arg->next = arg; + prev_arg->common.next = arg; } else { /* No argument list, this will be the first argument */ - op->value.arg = arg; + op->common.value.arg = arg; } /* Set the parent in this arg and any args linked after it */ while (arg) { - arg->parent = op; - arg = arg->next; + arg->common.parent = op; + arg = arg->common.next; } } /******************************************************************************* * - * FUNCTION: Acpi_ps_get_child + * FUNCTION: acpi_ps_get_child * * PARAMETERS: Op - Get the child of this Op * @@ -174,17 +193,17 @@ * ******************************************************************************/ -acpi_parse_object * +union acpi_parse_object * acpi_ps_get_child ( - acpi_parse_object *op) + union acpi_parse_object *op) { - acpi_parse_object *child = NULL; + union acpi_parse_object *child = NULL; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); - switch (op->opcode) { + switch (op->common.aml_opcode) { case AML_SCOPE_OP: case AML_ELSE_OP: case AML_DEVICE_OP: @@ -219,6 +238,10 @@ child = acpi_ps_get_arg (op, 3); break; + + default: + /* All others have no children */ + break; } return (child); @@ -227,7 +250,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_get_depth_next + * FUNCTION: acpi_ps_get_depth_next * * PARAMETERS: Origin - Root of subtree to search * Op - Last (previous) Op that was found @@ -239,17 +262,17 @@ * ******************************************************************************/ -acpi_parse_object * +union acpi_parse_object * acpi_ps_get_depth_next ( - acpi_parse_object *origin, - acpi_parse_object *op) + union acpi_parse_object *origin, + union acpi_parse_object *op) { - acpi_parse_object *next = NULL; - acpi_parse_object *parent; - acpi_parse_object *arg; + union acpi_parse_object *next = NULL; + union acpi_parse_object *parent; + union acpi_parse_object *arg; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); if (!op) { @@ -265,19 +288,19 @@ /* look for a sibling */ - next = op->next; + next = op->common.next; if (next) { return (next); } /* look for a sibling of parent */ - parent = op->parent; + parent = op->common.parent; while (parent) { arg = acpi_ps_get_arg (parent, 0); while (arg && (arg != origin) && (arg != op)) { - arg = arg->next; + arg = arg->common.next; } if (arg == origin) { @@ -286,13 +309,14 @@ return (NULL); } - if (parent->next) { + if (parent->common.next) { /* found sibling of parent */ - return (parent->next); + + return (parent->common.next); } op = parent; - parent = parent->parent; + parent = parent->common.parent; } return (next); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/parser/psutils.c linux.21rc5-ac2/drivers/acpi/parser/psutils.c --- linux.21rc5/drivers/acpi/parser/psutils.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/parser/psutils.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,47 +1,89 @@ /****************************************************************************** * * Module Name: psutils - Parser miscellaneous utilities (Parser only) - * $Revision: 44 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "amlcode.h" +#include +#include +#include +#include #define _COMPONENT ACPI_PARSER - MODULE_NAME ("psutils") + ACPI_MODULE_NAME ("psutils") -#define PARSEOP_GENERIC 0x01 -#define PARSEOP_NAMED 0x02 -#define PARSEOP_DEFERRED 0x04 -#define PARSEOP_BYTELIST 0x08 -#define PARSEOP_IN_CACHE 0x80 +/******************************************************************************* + * + * FUNCTION: acpi_ps_create_scope_op + * + * PARAMETERS: None + * + * RETURN: scope_op + * + * DESCRIPTION: Create a Scope and associated namepath op with the root name + * + ******************************************************************************/ + +union acpi_parse_object * +acpi_ps_create_scope_op ( + void) +{ + union acpi_parse_object *scope_op; + + + scope_op = acpi_ps_alloc_op (AML_SCOPE_OP); + if (!scope_op) { + return (NULL); + } + + + scope_op->named.name = ACPI_ROOT_NAME; + return (scope_op); +} /******************************************************************************* * - * FUNCTION: Acpi_ps_init_op + * FUNCTION: acpi_ps_init_op * * PARAMETERS: Op - A newly allocated Op object * Opcode - Opcode to store in the Op @@ -55,28 +97,23 @@ void acpi_ps_init_op ( - acpi_parse_object *op, - u16 opcode) + union acpi_parse_object *op, + u16 opcode) { - const acpi_opcode_info *aml_op; - - - FUNCTION_ENTRY (); - + ACPI_FUNCTION_ENTRY (); - op->data_type = ACPI_DESC_TYPE_PARSER; - op->opcode = opcode; - aml_op = acpi_ps_get_opcode_info (opcode); + op->common.data_type = ACPI_DESC_TYPE_PARSER; + op->common.aml_opcode = opcode; - DEBUG_ONLY_MEMBERS (STRNCPY (op->op_name, aml_op->name, - sizeof (op->op_name))); + ACPI_DISASM_ONLY_MEMBERS (ACPI_STRNCPY (op->common.aml_op_name, + (acpi_ps_get_opcode_info (opcode))->name, sizeof (op->common.aml_op_name))); } /******************************************************************************* * - * FUNCTION: Acpi_ps_alloc_op + * FUNCTION: acpi_ps_alloc_op * * PARAMETERS: Opcode - Opcode that will be stored in the new Op * @@ -88,17 +125,17 @@ * ******************************************************************************/ -acpi_parse_object* +union acpi_parse_object* acpi_ps_alloc_op ( - u16 opcode) + u16 opcode) { - acpi_parse_object *op = NULL; - u32 size; - u8 flags; - const acpi_opcode_info *op_info; + union acpi_parse_object *op = NULL; + u32 size; + u8 flags; + const struct acpi_opcode_info *op_info; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); op_info = acpi_ps_get_opcode_info (opcode); @@ -106,33 +143,28 @@ /* Allocate the minimum required size object */ if (op_info->flags & AML_DEFER) { - size = sizeof (acpi_parse2_object); - flags = PARSEOP_DEFERRED; + size = sizeof (struct acpi_parse_obj_named); + flags = ACPI_PARSEOP_DEFERRED; } - else if (op_info->flags & AML_NAMED) { - size = sizeof (acpi_parse2_object); - flags = PARSEOP_NAMED; + size = sizeof (struct acpi_parse_obj_named); + flags = ACPI_PARSEOP_NAMED; } - else if (opcode == AML_INT_BYTELIST_OP) { - size = sizeof (acpi_parse2_object); - flags = PARSEOP_BYTELIST; + size = sizeof (struct acpi_parse_obj_named); + flags = ACPI_PARSEOP_BYTELIST; } - else { - size = sizeof (acpi_parse_object); - flags = PARSEOP_GENERIC; + size = sizeof (struct acpi_parse_obj_common); + flags = ACPI_PARSEOP_GENERIC; } - - if (size == sizeof (acpi_parse_object)) { + if (size == sizeof (struct acpi_parse_obj_common)) { /* * The generic op is by far the most common (16 to 1) */ op = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_PSNODE); } - else { op = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_PSNODE_EXT); } @@ -141,7 +173,7 @@ if (op) { acpi_ps_init_op (op, opcode); - op->flags = flags; + op->common.flags = flags; } return (op); @@ -150,7 +182,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_free_op + * FUNCTION: acpi_ps_free_op * * PARAMETERS: Op - Op to be freed * @@ -163,19 +195,18 @@ void acpi_ps_free_op ( - acpi_parse_object *op) + union acpi_parse_object *op) { - PROC_NAME ("Ps_free_op"); + ACPI_FUNCTION_NAME ("ps_free_op"); - if (op->opcode == AML_INT_RETURN_VALUE_OP) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Free retval op: %p\n", op)); + if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n", op)); } - if (op->flags == PARSEOP_GENERIC) { + if (op->common.flags & ACPI_PARSEOP_GENERIC) { acpi_ut_release_to_cache (ACPI_MEM_LIST_PSNODE, op); } - else { acpi_ut_release_to_cache (ACPI_MEM_LIST_PSNODE_EXT, op); } @@ -184,7 +215,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_delete_parse_cache + * FUNCTION: acpi_ps_delete_parse_cache * * PARAMETERS: None * @@ -198,7 +229,7 @@ acpi_ps_delete_parse_cache ( void) { - FUNCTION_TRACE ("Ps_delete_parse_cache"); + ACPI_FUNCTION_TRACE ("ps_delete_parse_cache"); acpi_ut_delete_generic_cache (ACPI_MEM_LIST_PSNODE); @@ -221,7 +252,7 @@ */ u8 acpi_ps_is_leading_char ( - u32 c) + u32 c) { return ((u8) (c == '_' || (c >= 'A' && c <= 'Z'))); } @@ -232,7 +263,7 @@ */ u8 acpi_ps_is_prefix_char ( - u32 c) + u32 c) { return ((u8) (c == '\\' || c == '^')); } @@ -243,19 +274,19 @@ */ u32 acpi_ps_get_name ( - acpi_parse_object *op) + union acpi_parse_object *op) { /* The "generic" object has no name associated with it */ - if (op->flags & PARSEOP_GENERIC) { + if (op->common.flags & ACPI_PARSEOP_GENERIC) { return (0); } /* Only the "Extended" parse objects have a name */ - return (((acpi_parse2_object *) op)->name); + return (op->named.name); } @@ -264,16 +295,16 @@ */ void acpi_ps_set_name ( - acpi_parse_object *op, - u32 name) + union acpi_parse_object *op, + u32 name) { /* The "generic" object has no name associated with it */ - if (op->flags & PARSEOP_GENERIC) { + if (op->common.flags & ACPI_PARSEOP_GENERIC) { return; } - ((acpi_parse2_object *) op)->name = name; + op->named.name = name; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/parser/pswalk.c linux.21rc5-ac2/drivers/acpi/parser/pswalk.c --- linux.21rc5/drivers/acpi/parser/pswalk.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/parser/pswalk.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,47 +1,62 @@ /****************************************************************************** * * Module Name: pswalk - Parser routines to walk parsed op tree(s) - * $Revision: 58 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "amlcode.h" -#include "acparser.h" -#include "acdispat.h" -#include "acnamesp.h" -#include "acinterp.h" +#include +#include +#include #define _COMPONENT ACPI_PARSER - MODULE_NAME ("pswalk") + ACPI_MODULE_NAME ("pswalk") /******************************************************************************* * - * FUNCTION: Acpi_ps_get_next_walk_op + * FUNCTION: acpi_ps_get_next_walk_op * - * PARAMETERS: Walk_state - Current state of the walk + * PARAMETERS: walk_state - Current state of the walk * Op - Current Op to be walked - * Ascending_callback - Procedure called when Op is complete + * ascending_callback - Procedure called when Op is complete * * RETURN: Status * @@ -51,22 +66,22 @@ acpi_status acpi_ps_get_next_walk_op ( - acpi_walk_state *walk_state, - acpi_parse_object *op, - acpi_parse_upwards ascending_callback) + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + acpi_parse_upwards ascending_callback) { - acpi_parse_object *next; - acpi_parse_object *parent; - acpi_parse_object *grand_parent; - acpi_status status; + union acpi_parse_object *next; + union acpi_parse_object *parent; + union acpi_parse_object *grand_parent; + acpi_status status; - FUNCTION_TRACE_PTR ("Ps_get_next_walk_op", op); + ACPI_FUNCTION_TRACE_PTR ("ps_get_next_walk_op", op); /* Check for a argument only if we are descending in the tree */ - if (walk_state->next_op_info != NEXT_OP_UPWARD) { + if (walk_state->next_op_info != ACPI_NEXT_OP_UPWARD) { /* Look for an argument or child of the current op */ next = acpi_ps_get_arg (op, 0); @@ -75,22 +90,21 @@ walk_state->prev_op = op; walk_state->next_op = next; - walk_state->next_op_info = NEXT_OP_DOWNWARD; + walk_state->next_op_info = ACPI_NEXT_OP_DOWNWARD; return_ACPI_STATUS (AE_OK); } - /* * No more children, this Op is complete. Save Next and Parent * in case the Op object gets deleted by the callback routine */ - next = op->next; - parent = op->parent; + next = op->common.next; + parent = op->common.parent; walk_state->op = op; - walk_state->op_info = acpi_ps_get_opcode_info (op->opcode); - walk_state->opcode = op->opcode; + walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode); + walk_state->opcode = op->common.aml_opcode; status = ascending_callback (walk_state); @@ -115,7 +129,7 @@ walk_state->prev_op = op; walk_state->next_op = next; - walk_state->next_op_info = NEXT_OP_DOWNWARD; + walk_state->next_op_info = ACPI_NEXT_OP_DOWNWARD; /* Continue downward */ @@ -127,7 +141,6 @@ * the tree */ } - else { /* * We are resuming a walk, and we were (are) going upward in the tree. @@ -136,7 +149,6 @@ parent = op; } - /* * Look for a sibling of the current Op's parent * Continue moving up the tree until we find a node that has not been @@ -145,12 +157,12 @@ while (parent) { /* We are moving up the tree, therefore this parent Op is complete */ - grand_parent = parent->parent; - next = parent->next; + grand_parent = parent->common.parent; + next = parent->common.next; walk_state->op = parent; - walk_state->op_info = acpi_ps_get_opcode_info (parent->opcode); - walk_state->opcode = parent->opcode; + walk_state->op_info = acpi_ps_get_opcode_info (parent->common.aml_opcode); + walk_state->opcode = parent->common.aml_opcode; status = ascending_callback (walk_state); @@ -175,7 +187,7 @@ walk_state->prev_op = parent; walk_state->next_op = next; - walk_state->next_op_info = NEXT_OP_DOWNWARD; + walk_state->next_op_info = ACPI_NEXT_OP_DOWNWARD; return_ACPI_STATUS (status); } @@ -188,9 +200,10 @@ } - /* Got all the way to the top of the tree, we must be done! */ - /* However, the code should have terminated in the loop above */ - + /* + * Got all the way to the top of the tree, we must be done! + * However, the code should have terminated in the loop above + */ walk_state->next_op = NULL; return_ACPI_STATUS (AE_OK); @@ -199,22 +212,22 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_delete_completed_op + * FUNCTION: acpi_ps_delete_completed_op * * PARAMETERS: State - Walk state * Op - Completed op * * RETURN: AE_OK * - * DESCRIPTION: Callback function for Acpi_ps_get_next_walk_op(). Used during - * Acpi_ps_delete_parse tree to delete Op objects when all sub-objects + * DESCRIPTION: Callback function for acpi_ps_get_next_walk_op(). Used during + * acpi_ps_delete_parse tree to delete Op objects when all sub-objects * have been visited (and deleted.) * ******************************************************************************/ -static acpi_status +acpi_status acpi_ps_delete_completed_op ( - acpi_walk_state *walk_state) + struct acpi_walk_state *walk_state) { acpi_ps_free_op (walk_state->op); @@ -224,9 +237,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ps_delete_parse_tree + * FUNCTION: acpi_ps_delete_parse_tree * - * PARAMETERS: Subtree_root - Root of tree (or subtree) to delete + * PARAMETERS: subtree_root - Root of tree (or subtree) to delete * * RETURN: None * @@ -236,13 +249,14 @@ void acpi_ps_delete_parse_tree ( - acpi_parse_object *subtree_root) + union acpi_parse_object *subtree_root) { - acpi_walk_state *walk_state; - acpi_walk_list walk_list; + struct acpi_walk_state *walk_state; + struct acpi_thread_state *thread; + acpi_status status; - FUNCTION_TRACE_PTR ("Ps_delete_parse_tree", subtree_root); + ACPI_FUNCTION_TRACE_PTR ("ps_delete_parse_tree", subtree_root); if (!subtree_root) { @@ -251,11 +265,12 @@ /* Create and initialize a new walk list */ - walk_list.walk_state = NULL; - walk_list.acquired_mutex_list.prev = NULL; - walk_list.acquired_mutex_list.next = NULL; + thread = acpi_ut_create_thread_state (); + if (!thread) { + return_VOID; + } - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, NULL, NULL, &walk_list); + walk_state = acpi_ds_create_walk_state (0, NULL, NULL, thread); if (!walk_state) { return_VOID; } @@ -264,25 +279,26 @@ walk_state->descending_callback = NULL; walk_state->ascending_callback = NULL; - walk_state->origin = subtree_root; walk_state->next_op = subtree_root; - /* Head downward in the tree */ - walk_state->next_op_info = NEXT_OP_DOWNWARD; + walk_state->next_op_info = ACPI_NEXT_OP_DOWNWARD; /* Visit all nodes in the subtree */ while (walk_state->next_op) { - acpi_ps_get_next_walk_op (walk_state, walk_state->next_op, + status = acpi_ps_get_next_walk_op (walk_state, walk_state->next_op, acpi_ps_delete_completed_op); + if (ACPI_FAILURE (status)) { + break; + } } /* We are done with this walk */ - acpi_ex_release_all_mutexes ((acpi_operand_object *) &walk_list.acquired_mutex_list); + acpi_ut_delete_generic_state (ACPI_CAST_PTR (union acpi_generic_state, thread)); acpi_ds_delete_walk_state (walk_state); return_VOID; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/parser/psxface.c linux.21rc5-ac2/drivers/acpi/parser/psxface.c --- linux.21rc5/drivers/acpi/parser/psxface.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/parser/psxface.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,51 +1,68 @@ /****************************************************************************** * * Module Name: psxface - Parser external interfaces - * $Revision: 52 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acdispat.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acnamesp.h" +#include +#include +#include +#include +#include #define _COMPONENT ACPI_PARSER - MODULE_NAME ("psxface") + ACPI_MODULE_NAME ("psxface") /******************************************************************************* * - * FUNCTION: Acpi_psx_execute + * FUNCTION: acpi_psx_execute * - * PARAMETERS: Method_node - A method object containing both the AML + * PARAMETERS: method_node - A method object containing both the AML * address and length. * **Params - List of parameters to pass to method, * terminated by NULL. Params itself may be * NULL if no parameters are being passed. - * **Return_obj_desc - Return object from execution of the + * **return_obj_desc - Return object from execution of the * method. * * RETURN: Status @@ -56,18 +73,18 @@ acpi_status acpi_psx_execute ( - acpi_namespace_node *method_node, - acpi_operand_object **params, - acpi_operand_object **return_obj_desc) + struct acpi_namespace_node *method_node, + union acpi_operand_object **params, + union acpi_operand_object **return_obj_desc) { - acpi_status status; - acpi_operand_object *obj_desc; - u32 i; - acpi_parse_object *op; - acpi_walk_state *walk_state; + acpi_status status; + union acpi_operand_object *obj_desc; + u32 i; + union acpi_parse_object *op; + struct acpi_walk_state *walk_state; - FUNCTION_TRACE ("Psx_execute"); + ACPI_FUNCTION_TRACE ("psx_execute"); /* Validate the Node and get the attached object */ @@ -102,20 +119,27 @@ * 1) Perform the first pass parse of the method to enter any * named objects that it creates into the namespace */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "**** Begin Method Parse **** Entry=%p obj=%p\n", method_node, obj_desc)); /* Create and init a Root Node */ - op = acpi_ps_alloc_op (AML_SCOPE_OP); + op = acpi_ps_create_scope_op (); if (!op) { return_ACPI_STATUS (AE_NO_MEMORY); } + /* + * Get a new owner_id for objects created by this method. Namespace + * objects (such as Operation Regions) can be created during the + * first pass parse. + */ + obj_desc->method.owning_id = acpi_ut_allocate_owner_id (ACPI_OWNER_TYPE_METHOD); + /* Create and initialize a new walk state */ - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, + walk_state = acpi_ds_create_walk_state (obj_desc->method.owning_id, NULL, NULL, NULL); if (!walk_state) { return_ACPI_STATUS (AE_NO_MEMORY); @@ -124,7 +148,7 @@ status = acpi_ds_init_aml_walk (walk_state, op, method_node, obj_desc->method.aml_start, obj_desc->method.aml_length, NULL, NULL, 1); if (ACPI_FAILURE (status)) { - /* TBD: delete walk state */ + acpi_ds_delete_walk_state (walk_state); return_ACPI_STATUS (status); } @@ -133,30 +157,28 @@ status = acpi_ps_parse_aml (walk_state); acpi_ps_delete_parse_tree (op); - /* * 2) Execute the method. Performs second pass parse simultaneously */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "**** Begin Method Execution **** Entry=%p obj=%p\n", method_node, obj_desc)); /* Create and init a Root Node */ - op = acpi_ps_alloc_op (AML_SCOPE_OP); + op = acpi_ps_create_scope_op (); if (!op) { return_ACPI_STATUS (AE_NO_MEMORY); } /* Init new op with the method name and pointer back to the NS node */ - acpi_ps_set_name (op, method_node->name); - op->node = method_node; + acpi_ps_set_name (op, method_node->name.integer); + op->common.node = method_node; /* Create and initialize a new walk state */ - walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, - NULL, NULL, NULL); + walk_state = acpi_ds_create_walk_state (0, NULL, NULL, NULL); if (!walk_state) { return_ACPI_STATUS (AE_NO_MEMORY); } @@ -164,7 +186,7 @@ status = acpi_ds_init_aml_walk (walk_state, op, method_node, obj_desc->method.aml_start, obj_desc->method.aml_length, params, return_obj_desc, 3); if (ACPI_FAILURE (status)) { - /* TBD: delete walk state */ + acpi_ds_delete_walk_state (walk_state); return_ACPI_STATUS (status); } @@ -178,30 +200,24 @@ /* Take away the extra reference that we gave the parameters above */ for (i = 0; params[i]; i++) { - acpi_ut_update_object_reference (params[i], REF_DECREMENT); - } - } - + /* Ignore errors, just do them all */ - if (ACPI_FAILURE (status)) { - DUMP_PATHNAME (method_node, "Ps_execute: method failed -", - ACPI_LV_ERROR, _COMPONENT); + (void) acpi_ut_update_object_reference (params[i], REF_DECREMENT); + } } - /* * If the method has returned an object, signal this to the caller with * a control exception code */ if (*return_obj_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Method returned Obj_desc=%p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Method returned obj_desc=%p\n", *return_obj_desc)); - DUMP_STACK_ENTRY (*return_obj_desc); + ACPI_DUMP_STACK_ENTRY (*return_obj_desc); status = AE_CTRL_RETURN_VALUE; } - return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/pci_bind.c linux.21rc5-ac2/drivers/acpi/pci_bind.c --- linux.21rc5/drivers/acpi/pci_bind.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/pci_bind.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,309 @@ +/* + * pci_bind.c - ACPI PCI Device Binding ($Revision: 3 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_PCI_COMPONENT +ACPI_MODULE_NAME ("pci_bind") + +#define PREFIX "ACPI: " + + +struct acpi_pci_data { + struct acpi_pci_id id; + struct pci_bus *bus; + struct pci_dev *dev; +}; + + +void +acpi_pci_data_handler ( + acpi_handle handle, + u32 function, + void *context) +{ + ACPI_FUNCTION_TRACE("acpi_pci_data_handler"); + + /* TBD: Anything we need to do here? */ + + return_VOID; +} + + +/** + * acpi_os_get_pci_id + * ------------------ + * This function is used by the ACPI Interpreter (a.k.a. Core Subsystem) + * to resolve PCI information for ACPI-PCI devices defined in the namespace. + * This typically occurs when resolving PCI operation region information. + */ +acpi_status +acpi_os_get_pci_id ( + acpi_handle handle, + struct acpi_pci_id *id) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_device *device = NULL; + struct acpi_pci_data *data = NULL; + + ACPI_FUNCTION_TRACE("acpi_os_get_pci_id"); + + if (!id) + return_ACPI_STATUS(AE_BAD_PARAMETER); + + result = acpi_bus_get_device(handle, &device); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid ACPI Bus context for device %s\n", + acpi_device_bid(device))); + return_ACPI_STATUS(AE_NOT_EXIST); + } + + status = acpi_get_data(handle, acpi_pci_data_handler, (void**) &data); + if (ACPI_FAILURE(status) || !data || !data->dev) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid ACPI-PCI context for device %s\n", + acpi_device_bid(device))); + return_ACPI_STATUS(status); + } + + *id = data->id; + + /* + id->segment = data->id.segment; + id->bus = data->id.bus; + id->device = data->id.device; + id->function = data->id.function; + */ + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Device %s has PCI address %02x:%02x:%02x.%02x\n", + acpi_device_bid(device), id->segment, id->bus, + id->device, id->function)); + + return_ACPI_STATUS(AE_OK); +} + + +int +acpi_pci_bind ( + struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_pci_data *data = NULL; + struct acpi_pci_data *pdata = NULL; + char pathname[ACPI_PATHNAME_MAX] = {0}; + struct acpi_buffer buffer = {ACPI_PATHNAME_MAX, pathname}; + acpi_handle handle = NULL; + + ACPI_FUNCTION_TRACE("acpi_pci_bind"); + + if (!device || !device->parent) + return_VALUE(-EINVAL); + + data = kmalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); + if (!data) + return_VALUE(-ENOMEM); + memset(data, 0, sizeof(struct acpi_pci_data)); + + acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n", + pathname)); + + /* + * Segment & Bus + * ------------- + * These are obtained via the parent device's ACPI-PCI context. + */ + status = acpi_get_data(device->parent->handle, acpi_pci_data_handler, + (void**) &pdata); + if (ACPI_FAILURE(status) || !pdata || !pdata->bus) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid ACPI-PCI context for parent device %s\n", + acpi_device_bid(device->parent))); + result = -ENODEV; + goto end; + } + data->id.segment = pdata->id.segment; + data->id.bus = pdata->bus->number; + + /* + * Device & Function + * ----------------- + * These are simply obtained from the device's _ADR method. Note + * that a value of zero is valid. + */ + data->id.device = device->pnp.bus_address >> 16; + data->id.function = device->pnp.bus_address & 0xFFFF; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %02x:%02x:%02x.%02x\n", + data->id.segment, data->id.bus, data->id.device, + data->id.function)); + + /* + * TBD: Support slot devices (e.g. function=0xFFFF). + */ + + /* + * Locate PCI Device + * ----------------- + * Locate matching device in PCI namespace. If it doesn't exist + * this typically means that the device isn't currently inserted + * (e.g. docking station, port replicator, etc.). + */ + data->dev = pci_find_slot(data->id.bus, PCI_DEVFN(data->id.device, data->id.function)); + if (!data->dev) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Device %02x:%02x:%02x.%02x not present in PCI namespace\n", + data->id.segment, data->id.bus, + data->id.device, data->id.function)); + result = -ENODEV; + goto end; + } + if (!data->dev->bus) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Device %02x:%02x:%02x.%02x has invalid 'bus' field\n", + data->id.segment, data->id.bus, + data->id.device, data->id.function)); + result = -ENODEV; + goto end; + } + + /* + * PCI Bridge? + * ----------- + * If so, set the 'bus' field and install the 'bind' function to + * facilitate callbacks for all of its children. + */ + if (data->dev->subordinate) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Device %02x:%02x:%02x.%02x is a PCI bridge\n", + data->id.segment, data->id.bus, + data->id.device, data->id.function)); + data->bus = data->dev->subordinate; + device->ops.bind = acpi_pci_bind; + } + + /* + * Attach ACPI-PCI Context + * ----------------------- + * Thus binding the ACPI and PCI devices. + */ + status = acpi_attach_data(device->handle, acpi_pci_data_handler, data); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to attach ACPI-PCI context to device %s\n", + acpi_device_bid(device))); + result = -ENODEV; + goto end; + } + + /* + * PCI Routing Table + * ----------------- + * Evaluate and parse _PRT, if exists. This code is independent of + * PCI bridges (above) to allow parsing of _PRT objects within the + * scope of non-bridge devices. Note that _PRTs within the scope of + * a PCI bridge assume the bridge's subordinate bus number. + * + * TBD: Can _PRTs exist within the scope of non-bridge PCI devices? + */ + status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); + if (ACPI_SUCCESS(status)) { + if (data->bus) /* PCI-PCI bridge */ + acpi_pci_irq_add_prt(device->handle, data->id.segment, + data->bus->number); + else /* non-bridge PCI device */ + acpi_pci_irq_add_prt(device->handle, data->id.segment, + data->id.bus); + } + +end: + if (result) + kfree(data); + + return_VALUE(result); +} + + +int +acpi_pci_bind_root ( + struct acpi_device *device, + struct acpi_pci_id *id, + struct pci_bus *bus) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_pci_data *data = NULL; + char pathname[ACPI_PATHNAME_MAX] = {0}; + struct acpi_buffer buffer = {ACPI_PATHNAME_MAX, pathname}; + + ACPI_FUNCTION_TRACE("acpi_pci_bind_root"); + + if (!device || !id || !bus) + return_VALUE(-EINVAL); + + data = kmalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); + if (!data) + return_VALUE(-ENOMEM); + memset(data, 0, sizeof(struct acpi_pci_data)); + + data->id = *id; + data->bus = bus; + device->ops.bind = acpi_pci_bind; + + acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to " + "%02x:%02x\n", pathname, id->segment, id->bus)); + + status = acpi_attach_data(device->handle, acpi_pci_data_handler, data); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to attach ACPI-PCI context to device %s\n", + pathname)); + result = -ENODEV; + goto end; + } + +end: + if (result != 0) + kfree(data); + + return_VALUE(result); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/pci_irq.c linux.21rc5-ac2/drivers/acpi/pci_irq.c --- linux.21rc5/drivers/acpi/pci_irq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/pci_irq.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,411 @@ +/* + * pci_irq.c - ACPI PCI Interrupt Routing ($Revision: 11 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2002 Dominik Brodowski + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_X86_IO_APIC +#include +#endif +#include +#include + + +#define _COMPONENT ACPI_PCI_COMPONENT +ACPI_MODULE_NAME ("pci_irq") + +#define PREFIX "PCI: " + +struct acpi_prt_list acpi_prt; + +#ifdef CONFIG_X86 +extern void eisa_set_level_irq(unsigned int irq); +#endif + + +/* -------------------------------------------------------------------------- + PCI IRQ Routing Table (PRT) Support + -------------------------------------------------------------------------- */ + +static struct acpi_prt_entry * +acpi_pci_irq_find_prt_entry ( + int segment, + int bus, + int device, + int pin) +{ + struct list_head *node = NULL; + struct acpi_prt_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_pci_irq_find_prt_entry"); + + /* + * Parse through all PRT entries looking for a match on the specified + * PCI device's segment, bus, device, and pin (don't care about func). + * + * TBD: Acquire/release lock + */ + list_for_each(node, &acpi_prt.entries) { + entry = list_entry(node, struct acpi_prt_entry, node); + if ((segment == entry->id.segment) + && (bus == entry->id.bus) + && (device == entry->id.device) + && (pin == entry->pin)) { + return_PTR(entry); + } + } + + return_PTR(NULL); +} + + +static int +acpi_pci_irq_add_entry ( + acpi_handle handle, + int segment, + int bus, + struct acpi_pci_routing_table *prt) +{ + struct acpi_prt_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_pci_irq_add_entry"); + + if (!prt) + return_VALUE(-EINVAL); + + entry = kmalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); + if (!entry) + return_VALUE(-ENOMEM); + memset(entry, 0, sizeof(struct acpi_prt_entry)); + + entry->id.segment = segment; + entry->id.bus = bus; + entry->id.device = (prt->address >> 16) & 0xFFFF; + entry->id.function = prt->address & 0xFFFF; + entry->pin = prt->pin; + + /* + * Type 1: Dynamic + * --------------- + * The 'source' field specifies the PCI interrupt link device used to + * configure the IRQ assigned to this slot|dev|pin. The 'source_index' + * indicates which resource descriptor in the resource template (of + * the link device) this interrupt is allocated from. + * + * NOTE: Don't query the Link Device for IRQ information at this time + * because Link Device enumeration may not have occurred yet + * (e.g. exists somewhere 'below' this _PRT entry in the ACPI + * namespace). + */ + if (prt->source[0]) { + acpi_get_handle(handle, prt->source, &entry->link.handle); + entry->link.index = prt->source_index; + } + /* + * Type 2: Static + * -------------- + * The 'source' field is NULL, and the 'source_index' field specifies + * the IRQ value, which is hardwired to specific interrupt inputs on + * the interrupt controller. + */ + else + entry->link.index = prt->source_index; + + ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, + " %02X:%02X:%02X[%c] -> %s[%d]\n", + entry->id.segment, entry->id.bus, entry->id.device, + ('A' + entry->pin), prt->source, entry->link.index)); + + /* TBD: Acquire/release lock */ + list_add_tail(&entry->node, &acpi_prt.entries); + acpi_prt.count++; + + return_VALUE(0); +} + + +int +acpi_pci_irq_add_prt ( + acpi_handle handle, + int segment, + int bus) +{ + acpi_status status = AE_OK; + char pathname[ACPI_PATHNAME_MAX] = {0}; + struct acpi_buffer buffer = {0, NULL}; + struct acpi_pci_routing_table *prt = NULL; + struct acpi_pci_routing_table *entry = NULL; + static int first_time = 1; + + ACPI_FUNCTION_TRACE("acpi_pci_irq_add_prt"); + + if (first_time) { + acpi_prt.count = 0; + INIT_LIST_HEAD(&acpi_prt.entries); + first_time = 0; + } + + /* + * NOTE: We're given a 'handle' to the _PRT object's parent device + * (either a PCI root bridge or PCI-PCI bridge). + */ + + buffer.length = sizeof(pathname); + buffer.pointer = pathname; + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + + printk(KERN_DEBUG "ACPI: PCI Interrupt Routing Table [%s._PRT]\n", + pathname); + + /* + * Evaluate this _PRT and add its entries to our global list (acpi_prt). + */ + + buffer.length = 0; + buffer.pointer = NULL; + status = acpi_get_irq_routing_table(handle, &buffer); + if (status != AE_BUFFER_OVERFLOW) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PRT [%s]\n", + acpi_format_exception(status))); + return_VALUE(-ENODEV); + } + + prt = kmalloc(buffer.length, GFP_KERNEL); + if (!prt) + return_VALUE(-ENOMEM); + memset(prt, 0, buffer.length); + buffer.pointer = prt; + + status = acpi_get_irq_routing_table(handle, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PRT [%s]\n", + acpi_format_exception(status))); + kfree(buffer.pointer); + return_VALUE(-ENODEV); + } + + entry = prt; + + while (entry && (entry->length > 0)) { + acpi_pci_irq_add_entry(handle, segment, bus, entry); + entry = (struct acpi_pci_routing_table *) + ((unsigned long) entry + entry->length); + } + + kfree(prt); + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + PCI Interrupt Routing Support + -------------------------------------------------------------------------- */ + +int +acpi_pci_irq_lookup ( + int segment, + int bus, + int device, + int pin) +{ + struct acpi_prt_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_pci_irq_lookup"); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Searching for PRT entry for %02x:%02x:%02x[%c]\n", + segment, bus, device, ('A' + pin))); + + entry = acpi_pci_irq_find_prt_entry(segment, bus, device, pin); + if (!entry) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PRT entry not found\n")); + return_VALUE(0); + } + + if (!entry->irq && entry->link.handle) { + entry->irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index); + if (!entry->irq) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid IRQ link routing entry\n")); + return_VALUE(0); + } + } + else if (!entry->irq) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid static routing entry (IRQ 0)\n")); + return_VALUE(0); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found IRQ %d\n", entry->irq)); + + return_VALUE(entry->irq); +} + + +static int +acpi_pci_irq_derive ( + struct pci_dev *dev, + int pin) +{ + struct pci_dev *bridge = dev; + int irq = 0; + + ACPI_FUNCTION_TRACE("acpi_pci_irq_derive"); + + if (!dev) + return_VALUE(-EINVAL); + + /* + * Attempt to derive an IRQ for this device from a parent bridge's + * PCI interrupt routing entry (a.k.a. the "bridge swizzle"). + */ + while (!irq && bridge->bus->self) { + pin = (pin + PCI_SLOT(bridge->devfn)) % 4; + bridge = bridge->bus->self; + irq = acpi_pci_irq_lookup(0, bridge->bus->number, PCI_SLOT(bridge->devfn), pin); + } + + if (!irq) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Unable to derive IRQ for device %s\n", dev->slot_name)); + return_VALUE(0); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Derived IRQ %d\n", irq)); + + return_VALUE(irq); +} + + +int +acpi_pci_irq_enable ( + struct pci_dev *dev) +{ + int irq = 0; + u8 pin = 0; + static u16 irq_mask = 0; + + ACPI_FUNCTION_TRACE("acpi_pci_irq_enable"); + + if (!dev) + return_VALUE(-EINVAL); + + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + if (!pin) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No interrupt pin configured for device %s\n", dev->slot_name)); + return_VALUE(0); + } + pin--; + + if (!dev->bus) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid (NULL) 'bus' field\n")); + return_VALUE(-ENODEV); + } + + /* + * First we check the PCI IRQ routing table (PRT) for an IRQ. PRT + * values override any BIOS-assigned IRQs set during boot. + */ + irq = acpi_pci_irq_lookup(0, dev->bus->number, PCI_SLOT(dev->devfn), pin); + + /* + * If no PRT entry was found, we'll try to derive an IRQ from the + * device's parent bridge. + */ + if (!irq) + irq = acpi_pci_irq_derive(dev, pin); + + /* + * No IRQ known to the ACPI subsystem - maybe the BIOS / + * driver reported one, then use it. Exit in any case. + */ + if (!irq) { + printk(KERN_WARNING PREFIX "No IRQ known for interrupt pin %c of device %s", ('A' + pin), dev->slot_name); + /* Interrupt Line values above 0xF are forbidden */ + if (dev->irq && dev->irq >= 0xF) { + printk(" - using IRQ %d\n", dev->irq); + return_VALUE(dev->irq); + } + else { + printk("\n"); + return_VALUE(0); + } + } + + dev->irq = irq; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device %s using IRQ %d\n", dev->slot_name, dev->irq)); + + /* + * Make sure all (legacy) PCI IRQs are set as level-triggered. + */ +#ifdef CONFIG_X86 + if ((dev->irq < 16) && !((1 << dev->irq) & irq_mask)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Setting IRQ %d as level-triggered\n", dev->irq)); + irq_mask |= (1 << dev->irq); + eisa_set_level_irq(dev->irq); + } +#endif + + return_VALUE(dev->irq); +} + + +int __init +acpi_pci_irq_init (void) +{ + struct pci_dev *dev = NULL; + + ACPI_FUNCTION_TRACE("acpi_pci_irq_init"); + + if (!acpi_prt.count) { + printk(KERN_WARNING PREFIX "ACPI tables contain no PCI IRQ " + "routing entries\n"); + return_VALUE(-ENODEV); + } + + /* Make sure all link devices have a valid IRQ. */ + acpi_pci_link_check(); + +#ifdef CONFIG_X86_IO_APIC + /* Program IOAPICs using data from PRT entries. */ + if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) + mp_parse_prt(); +#endif +#ifdef CONFIG_IOSAPIC + if (acpi_irq_model == ACPI_IRQ_MODEL_IOSAPIC) + iosapic_parse_prt(); +#endif + + pci_for_each_dev(dev) + acpi_pci_irq_enable(dev); + + return_VALUE(0); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/pci_link.c linux.21rc5-ac2/drivers/acpi/pci_link.c --- linux.21rc5/drivers/acpi/pci_link.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/pci_link.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,603 @@ +/* + * pci_link.c - ACPI PCI Interrupt Link Device Driver ($Revision: 35 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2002 Dominik Brodowski + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * TBD: + * 1. Support more than one IRQ resource entry per link device (index). + * 2. Implement start/stop mechanism and use ACPI Bus Driver facilities + * for IRQ management (e.g. start()->_SRS). + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +#define _COMPONENT ACPI_PCI_COMPONENT +ACPI_MODULE_NAME ("pci_link") + +#define PREFIX "ACPI: " + + +#define ACPI_PCI_LINK_MAX_POSSIBLE 16 + +static int acpi_pci_link_add (struct acpi_device *device); +static int acpi_pci_link_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_pci_link_driver = { + .name = ACPI_PCI_LINK_DRIVER_NAME, + .class = ACPI_PCI_LINK_CLASS, + .ids = ACPI_PCI_LINK_HID, + .ops = { + .add = acpi_pci_link_add, + .remove = acpi_pci_link_remove, + }, +}; + +struct acpi_pci_link_irq { + u8 active; /* Current IRQ */ + u8 possible_count; + u8 possible[ACPI_PCI_LINK_MAX_POSSIBLE]; +}; + +struct acpi_pci_link { + struct list_head node; + struct acpi_device *device; + acpi_handle handle; + struct acpi_pci_link_irq irq; +}; + +static struct { + int count; + struct list_head entries; +} acpi_link; + + +/* -------------------------------------------------------------------------- + PCI Link Device Management + -------------------------------------------------------------------------- */ + +static acpi_status +acpi_pci_link_check_possible ( + struct acpi_resource *resource, + void *context) +{ + struct acpi_pci_link *link = (struct acpi_pci_link *) context; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_pci_link_check_possible"); + + switch (resource->id) { + case ACPI_RSTYPE_START_DPF: + return AE_OK; + case ACPI_RSTYPE_IRQ: + { + struct acpi_resource_irq *p = &resource->data.irq; + if (!p || !p->number_of_interrupts) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Blank IRQ resource\n")); + return AE_OK; + } + for (i = 0; (inumber_of_interrupts && iinterrupts[i]) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid IRQ %d\n", p->interrupts[i])); + continue; + } + link->irq.possible[i] = p->interrupts[i]; + link->irq.possible_count++; + } + break; + } + case ACPI_RSTYPE_EXT_IRQ: + { + struct acpi_resource_ext_irq *p = &resource->data.extended_irq; + if (!p || !p->number_of_interrupts) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Blank IRQ resource\n")); + return AE_OK; + } + for (i = 0; (inumber_of_interrupts && iinterrupts[i]) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid IRQ %d\n", p->interrupts[i])); + continue; + } + link->irq.possible[i] = p->interrupts[i]; + link->irq.possible_count++; + } + break; + } + default: + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Resource is not an IRQ entry\n")); + return AE_OK; + } + + return AE_CTRL_TERMINATE; +} + + +static int +acpi_pci_link_get_possible ( + struct acpi_pci_link *link) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE("acpi_pci_link_get_possible"); + + if (!link) + return_VALUE(-EINVAL); + + status = acpi_walk_resources(link->handle, METHOD_NAME__PRS, + acpi_pci_link_check_possible, link); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PRS\n")); + return_VALUE(-ENODEV); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Found %d possible IRQs\n", link->irq.possible_count)); + + return_VALUE(0); +} + + +static acpi_status +acpi_pci_link_check_current ( + struct acpi_resource *resource, + void *context) +{ + int *irq = (int *) context; + + ACPI_FUNCTION_TRACE("acpi_pci_link_check_current"); + + switch (resource->id) { + case ACPI_RSTYPE_IRQ: + { + struct acpi_resource_irq *p = &resource->data.irq; + if (!p || !p->number_of_interrupts) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Blank IRQ resource\n")); + return AE_OK; + } + *irq = p->interrupts[0]; + break; + } + case ACPI_RSTYPE_EXT_IRQ: + { + struct acpi_resource_ext_irq *p = &resource->data.extended_irq; + if (!p || !p->number_of_interrupts) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Blank IRQ resource\n")); + return AE_OK; + } + *irq = p->interrupts[0]; + break; + } + default: + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Resource isn't an IRQ\n")); + return AE_OK; + } + return AE_CTRL_TERMINATE; +} + + +static int +acpi_pci_link_get_current ( + struct acpi_pci_link *link) +{ + int result = 0; + acpi_status status = AE_OK; + int irq = 0; + + ACPI_FUNCTION_TRACE("acpi_pci_link_get_current"); + + if (!link || !link->handle) + return_VALUE(-EINVAL); + + link->irq.active = 0; + + /* Make sure the link is enabled (no use querying if it isn't). */ + result = acpi_bus_get_status(link->device); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to read status\n")); + goto end; + } + if (!link->device->status.enabled) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link disabled\n")); + return_VALUE(0); + } + + /* + * Query and parse _CRS to get the current IRQ assignment. + */ + + status = acpi_walk_resources(link->handle, METHOD_NAME__CRS, + acpi_pci_link_check_current, &irq); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _CRS\n")); + result = -ENODEV; + goto end; + } + + if (!irq) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "No IRQ resource found\n")); + result = -ENODEV; + goto end; + } + + /* + * Note that we don't validate that the current IRQ (_CRS) exists + * within the possible IRQs (_PRS): we blindly assume that whatever + * IRQ a boot-enabled Link device is set to is the correct one. + * (Required to support systems such as the Toshiba 5005-S504.) + */ + + link->irq.active = irq; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link at IRQ %d \n", link->irq.active)); + +end: + return_VALUE(result); +} + + +static int +acpi_pci_link_set ( + struct acpi_pci_link *link, + int irq) +{ + int result = 0; + acpi_status status = AE_OK; + struct { + struct acpi_resource res; + struct acpi_resource end; + } resource; + struct acpi_buffer buffer = {sizeof(resource)+1, &resource}; + int i = 0; + int valid = 0; + + ACPI_FUNCTION_TRACE("acpi_pci_link_set"); + + if (!link || !irq) + return_VALUE(-EINVAL); + + /* See if we're already at the target IRQ. */ + if (irq == link->irq.active) + return_VALUE(0); + + /* Make sure the target IRQ in the list of possible IRQs. */ + for (i=0; iirq.possible_count; i++) { + if (irq == link->irq.possible[i]) + valid = 1; + } + if (!valid) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Target IRQ %d invalid\n", irq)); + return_VALUE(-EINVAL); + } + + memset(&resource, 0, sizeof(resource)); + + /* NOTE: PCI interrupts are always level / active_low / shared. */ + if (irq <= 15) { + resource.res.id = ACPI_RSTYPE_IRQ; + resource.res.length = sizeof(struct acpi_resource); + resource.res.data.irq.edge_level = ACPI_LEVEL_SENSITIVE; + resource.res.data.irq.active_high_low = ACPI_ACTIVE_LOW; + resource.res.data.irq.shared_exclusive = ACPI_SHARED; + resource.res.data.irq.number_of_interrupts = 1; + resource.res.data.irq.interrupts[0] = irq; + } + else { + resource.res.id = ACPI_RSTYPE_EXT_IRQ; + resource.res.length = sizeof(struct acpi_resource); + resource.res.data.extended_irq.producer_consumer = ACPI_CONSUMER; + resource.res.data.extended_irq.edge_level = ACPI_LEVEL_SENSITIVE; + resource.res.data.extended_irq.active_high_low = ACPI_ACTIVE_LOW; + resource.res.data.extended_irq.shared_exclusive = ACPI_SHARED; + resource.res.data.extended_irq.number_of_interrupts = 1; + resource.res.data.extended_irq.interrupts[0] = irq; + /* ignore resource_source, it's optional */ + } + resource.end.id = ACPI_RSTYPE_END_TAG; + + status = acpi_set_current_resources(link->handle, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _SRS\n")); + return_VALUE(-ENODEV); + } + + /* Make sure the device is enabled. */ + result = acpi_bus_get_status(link->device); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to read status\n")); + return_VALUE(result); + } + if (!link->device->status.enabled) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link disabled\n")); + return_VALUE(-ENODEV); + } + + /* Make sure the active IRQ is the one we requested. */ + result = acpi_pci_link_get_current(link); + if (result) { + return_VALUE(result); + } + if (link->irq.active != irq) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Attempt to enable at IRQ %d resulted in IRQ %d\n", + irq, link->irq.active)); + link->irq.active = 0; + return_VALUE(-ENODEV); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Set IRQ %d\n", link->irq.active)); + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + PCI Link IRQ Management + -------------------------------------------------------------------------- */ + +#define ACPI_MAX_IRQS 256 +#define ACPI_MAX_ISA_IRQ 16 + +/* + * IRQ penalties are used to promote PCI IRQ balancing. We set each ISA- + * possible IRQ (0-15) with a default penalty relative to its feasibility + * for PCI's use: + * + * Never use: 0, 1, 2 (timer, keyboard, and cascade) + * Avoid using: 13, 14, and 15 (FP error and IDE) + * Penalize: 3, 4, 6, 7, 12 (known ISA uses) + * + * Thus we're left with IRQs 5, 9, 10, 11, and everything above 15 (IO[S]APIC) + * as 'best bets' for PCI use. + */ + +static int acpi_irq_penalty[ACPI_MAX_IRQS] = { + 1000000, 1000000, 1000000, 10000, + 10000, 0, 10000, 10000, + 10000, 0, 0, 0, + 10000, 100000, 100000, 100000, +}; + + +int +acpi_pci_link_check (void) +{ + struct list_head *node = NULL; + struct acpi_pci_link *link = NULL; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_pci_link_check"); + + /* + * Pass #1: Update penalties to facilitate IRQ balancing. + */ + list_for_each(node, &acpi_link.entries) { + + link = list_entry(node, struct acpi_pci_link, node); + if (!link) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid link context\n")); + continue; + } + + if (link->irq.active) + acpi_irq_penalty[link->irq.active] += 100; + else if (link->irq.possible_count) { + int penalty = 100 / link->irq.possible_count; + for (i=0; iirq.possible_count; i++) { + if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) + acpi_irq_penalty[link->irq.possible[i]] += penalty; + } + } + } + + /* + * Pass #2: Enable boot-disabled Links at 'best' IRQ. + */ + list_for_each(node, &acpi_link.entries) { + int irq = 0; + int i = 0; + + link = list_entry(node, struct acpi_pci_link, node); + if (!link || !link->irq.possible_count) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid link context\n")); + continue; + } + + if (link->irq.active) + continue; + + irq = link->irq.possible[0]; + + /* + * Select the best IRQ. This is done in reverse to promote + * the use of IRQs 9, 10, 11, and >15. + */ + for (i=(link->irq.possible_count-1); i>0; i--) { + if (acpi_irq_penalty[irq] > acpi_irq_penalty[link->irq.possible[i]]) + irq = link->irq.possible[i]; + } + + /* Enable the link device at this IRQ. */ + acpi_pci_link_set(link, irq); + + acpi_irq_penalty[link->irq.active] += 100; + + printk(PREFIX "%s [%s] enabled at IRQ %d\n", + acpi_device_name(link->device), + acpi_device_bid(link->device), link->irq.active); + } + + return_VALUE(0); +} + + +int +acpi_pci_link_get_irq ( + acpi_handle handle, + int index) +{ + int result = 0; + struct acpi_device *device = NULL; + struct acpi_pci_link *link = NULL; + + ACPI_FUNCTION_TRACE("acpi_pci_link_get_irq"); + + result = acpi_bus_get_device(handle, &device); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid link device\n")); + return_VALUE(0); + } + + link = (struct acpi_pci_link *) acpi_driver_data(device); + if (!link) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid link context\n")); + return_VALUE(0); + } + + /* TBD: Support multiple index (IRQ) entries per Link Device */ + if (index) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid index %d\n", index)); + return_VALUE(0); + } + + if (!link->irq.active) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link disabled\n")); + return_VALUE(0); + } + + return_VALUE(link->irq.active); +} + + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +static int +acpi_pci_link_add ( + struct acpi_device *device) +{ + int result = 0; + struct acpi_pci_link *link = NULL; + int i = 0; + int found = 0; + + ACPI_FUNCTION_TRACE("acpi_pci_link_add"); + + if (!device) + return_VALUE(-EINVAL); + + link = kmalloc(sizeof(struct acpi_pci_link), GFP_KERNEL); + if (!link) + return_VALUE(-ENOMEM); + memset(link, 0, sizeof(struct acpi_pci_link)); + + link->device = device; + link->handle = device->handle; + sprintf(acpi_device_name(device), "%s", ACPI_PCI_LINK_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_PCI_LINK_CLASS); + acpi_driver_data(device) = link; + + result = acpi_pci_link_get_possible(link); + if (result) + goto end; + + acpi_pci_link_get_current(link); + + printk(PREFIX "%s [%s] (IRQs", acpi_device_name(device), acpi_device_bid(device)); + for (i = 0; i < link->irq.possible_count; i++) { + if (link->irq.active == link->irq.possible[i]) { + printk(" *%d", link->irq.possible[i]); + found = 1; + } + else + printk(" %d", link->irq.possible[i]); + } + if (!link->irq.active) + printk(", disabled"); + else if (!found) + printk(", enabled at IRQ %d", link->irq.active); + printk(")\n"); + + /* TBD: Acquire/release lock */ + list_add_tail(&link->node, &acpi_link.entries); + acpi_link.count++; + +end: + if (result) + kfree(link); + + return_VALUE(result); +} + + +static int +acpi_pci_link_remove ( + struct acpi_device *device, + int type) +{ + struct acpi_pci_link *link = NULL; + + ACPI_FUNCTION_TRACE("acpi_pci_link_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + link = (struct acpi_pci_link *) acpi_driver_data(device); + + /* TBD: Acquire/release lock */ + list_del(&link->node); + + kfree(link); + + return_VALUE(0); +} + + +int __init +acpi_pci_link_init (void) +{ + ACPI_FUNCTION_TRACE("acpi_pci_link_init"); + + acpi_link.count = 0; + INIT_LIST_HEAD(&acpi_link.entries); + + if (acpi_bus_register_driver(&acpi_pci_link_driver) < 0) + return_VALUE(-ENODEV); + + return_VALUE(0); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/pci_root.c linux.21rc5-ac2/drivers/acpi/pci_root.c --- linux.21rc5/drivers/acpi/pci_root.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/pci_root.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,348 @@ +/* + * pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 41 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_PCI_COMPONENT +ACPI_MODULE_NAME ("pci_root") + +extern struct pci_ops *pci_root_ops; + +#define PREFIX "ACPI: " + +static int acpi_pci_root_add (struct acpi_device *device); +static int acpi_pci_root_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_pci_root_driver = { + .name = ACPI_PCI_ROOT_DRIVER_NAME, + .class = ACPI_PCI_ROOT_CLASS, + .ids = ACPI_PCI_ROOT_HID, + .ops = { + .add = acpi_pci_root_add, + .remove = acpi_pci_root_remove, + }, +}; + + struct acpi_pci_root { + struct list_head node; + acpi_handle handle; + struct acpi_pci_id id; + struct pci_bus *bus; + u64 mem_tra; + u64 io_tra; + }; + +struct list_head acpi_pci_roots; + + +void +acpi_pci_get_translations ( + struct acpi_pci_id *id, + u64 *mem_tra, + u64 *io_tra) +{ + struct list_head *node = NULL; + struct acpi_pci_root *entry; + + /* TBD: Locking */ + list_for_each(node, &acpi_pci_roots) { + entry = list_entry(node, struct acpi_pci_root, node); + if ((id->segment == entry->id.segment) + && (id->bus == entry->id.bus)) { + *mem_tra = entry->mem_tra; + *io_tra = entry->io_tra; + return; + } + } + + *mem_tra = 0; + *io_tra = 0; +} + + +static u64 +acpi_pci_root_bus_tra ( + struct acpi_resource *resource, + int type) +{ + struct acpi_resource_address16 *address16; + struct acpi_resource_address32 *address32; + struct acpi_resource_address64 *address64; + + while (1) { + switch (resource->id) { + case ACPI_RSTYPE_END_TAG: + return 0; + + case ACPI_RSTYPE_ADDRESS16: + address16 = (struct acpi_resource_address16 *) &resource->data; + if (type == address16->resource_type) { + return address16->address_translation_offset; + } + break; + + case ACPI_RSTYPE_ADDRESS32: + address32 = (struct acpi_resource_address32 *) &resource->data; + if (type == address32->resource_type) { + return address32->address_translation_offset; + } + break; + + case ACPI_RSTYPE_ADDRESS64: + address64 = (struct acpi_resource_address64 *) &resource->data; + if (type == address64->resource_type) { + return address64->address_translation_offset; + } + break; + } + resource = ACPI_PTR_ADD (struct acpi_resource, + resource, resource->length); + } + + return 0; +} + + +static int +acpi_pci_evaluate_crs ( + struct acpi_pci_root *root) +{ + acpi_status status; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + + ACPI_FUNCTION_TRACE("acpi_pci_evaluate_crs"); + + status = acpi_get_current_resources (root->handle, &buffer); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + root->io_tra = acpi_pci_root_bus_tra ((struct acpi_resource *) + buffer.pointer, ACPI_IO_RANGE); + root->mem_tra = acpi_pci_root_bus_tra ((struct acpi_resource *) + buffer.pointer, ACPI_MEMORY_RANGE); + + acpi_os_free(buffer.pointer); + return_VALUE(0); +} + + +static int +acpi_pci_root_add ( + struct acpi_device *device) +{ + int result = 0; + struct acpi_pci_root *root = NULL; + acpi_status status = AE_OK; + unsigned long value = 0; + acpi_handle handle = NULL; + + ACPI_FUNCTION_TRACE("acpi_pci_root_add"); + + if (!device) + return_VALUE(-EINVAL); + + root = kmalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); + if (!root) + return_VALUE(-ENOMEM); + memset(root, 0, sizeof(struct acpi_pci_root)); + + root->handle = device->handle; + sprintf(acpi_device_name(device), "%s", ACPI_PCI_ROOT_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_PCI_ROOT_CLASS); + acpi_driver_data(device) = root; + + /* + * TBD: Doesn't the bus driver automatically set this? + */ + device->ops.bind = acpi_pci_bind; + + /* + * Segment + * ------- + * Obtained via _SEG, if exists, otherwise assumed to be zero (0). + */ + status = acpi_evaluate_integer(root->handle, METHOD_NAME__SEG, NULL, + &value); + switch (status) { + case AE_OK: + root->id.segment = (u16) value; + printk("_SEG exists! Unsupported. Abort.\n"); + BUG(); + break; + case AE_NOT_FOUND: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Assuming segment 0 (no _SEG)\n")); + root->id.segment = 0; + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _SEG\n")); + result = -ENODEV; + goto end; + } + + /* + * Bus + * --- + * Obtained via _BBN, if exists, otherwise assumed to be zero (0). + */ + status = acpi_evaluate_integer(root->handle, METHOD_NAME__BBN, NULL, + &value); + switch (status) { + case AE_OK: + root->id.bus = (u16) value; + break; + case AE_NOT_FOUND: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assuming bus 0 (no _BBN)\n")); + root->id.bus = 0; + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _BBN\n")); + result = -ENODEV; + goto end; + } + + /* + * Device & Function + * ----------------- + * Obtained from _ADR (which has already been evaluated for us). + */ + root->id.device = device->pnp.bus_address >> 16; + root->id.function = device->pnp.bus_address & 0xFFFF; + + /* + * Evaluate _CRS to get root bridge resources + * TBD: Need PCI interface for enumeration/configuration of roots. + */ + acpi_pci_evaluate_crs(root); + + /* TBD: Locking */ + list_add_tail(&root->node, &acpi_pci_roots); + + printk(KERN_INFO PREFIX "%s [%s] (%02x:%02x)\n", + acpi_device_name(device), acpi_device_bid(device), + root->id.segment, root->id.bus); + + /* + * Scan the Root Bridge + * -------------------- + * Must do this prior to any attempt to bind the root device, as the + * PCI namespace does not get created until this call is made (and + * thus the root bridge's pci_dev does not exist). + */ + root->bus = pcibios_scan_root(root->id.bus); + if (!root->bus) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Bus %02x:%02x not present in PCI namespace\n", + root->id.segment, root->id.bus)); + result = -ENODEV; + goto end; + } + + /* + * Attach ACPI-PCI Context + * ----------------------- + * Thus binding the ACPI and PCI devices. + */ + result = acpi_pci_bind_root(device, &root->id, root->bus); + if (result) + goto end; + + /* + * PCI Routing Table + * ----------------- + * Evaluate and parse _PRT, if exists. + */ + status = acpi_get_handle(root->handle, METHOD_NAME__PRT, &handle); + if (ACPI_SUCCESS(status)) + result = acpi_pci_irq_add_prt(root->handle, root->id.segment, + root->id.bus); + +end: + if (result) + kfree(root); + + return_VALUE(result); +} + + +static int +acpi_pci_root_remove ( + struct acpi_device *device, + int type) +{ + struct acpi_pci_root *root = NULL; + + ACPI_FUNCTION_TRACE("acpi_pci_root_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + root = (struct acpi_pci_root *) acpi_driver_data(device); + + kfree(root); + + return_VALUE(0); +} + + +int __init +acpi_pci_root_init (void) +{ + ACPI_FUNCTION_TRACE("acpi_pci_root_init"); + + /* DEBUG: + acpi_dbg_layer = ACPI_PCI_COMPONENT; + acpi_dbg_level = 0xFFFFFFFF; + */ + + INIT_LIST_HEAD(&acpi_pci_roots); + + if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) + return_VALUE(-ENODEV); + + return_VALUE(0); +} + + +void __exit +acpi_pci_root_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_pci_root_exit"); + + acpi_bus_unregister_driver(&acpi_pci_root_driver); + + return_VOID; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/power.c linux.21rc5-ac2/drivers/acpi/power.c --- linux.21rc5/drivers/acpi/power.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/power.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,604 @@ +/* + * acpi_power.c - ACPI Bus Power Management ($Revision: 38 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_POWER_COMPONENT +ACPI_MODULE_NAME ("acpi_power") + +#define PREFIX "ACPI: " + + +int acpi_power_add (struct acpi_device *device); +int acpi_power_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_power_driver = { + .name = ACPI_POWER_DRIVER_NAME, + .class = ACPI_POWER_CLASS, + .ids = ACPI_POWER_HID, + .ops = { + .add = acpi_power_add, + .remove = acpi_power_remove, + }, +}; + +struct acpi_power_resource +{ + acpi_handle handle; + acpi_bus_id name; + u32 system_level; + u32 order; + int state; + int references; +}; + +static struct list_head acpi_power_resource_list; + + +/* -------------------------------------------------------------------------- + Power Resource Management + -------------------------------------------------------------------------- */ + +static int +acpi_power_get_context ( + acpi_handle handle, + struct acpi_power_resource **resource) +{ + int result = 0; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_power_get_context"); + + if (!resource) + return_VALUE(-ENODEV); + + result = acpi_bus_get_device(handle, &device); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error getting context [%p]\n", + handle)); + return_VALUE(result); + } + + *resource = (struct acpi_power_resource *) acpi_driver_data(device); + if (!resource) + return_VALUE(-ENODEV); + + return_VALUE(0); +} + + +static int +acpi_power_get_state ( + struct acpi_power_resource *resource) +{ + acpi_status status = AE_OK; + unsigned long sta = 0; + + ACPI_FUNCTION_TRACE("acpi_power_get_state"); + + if (!resource) + return_VALUE(-EINVAL); + + status = acpi_evaluate_integer(resource->handle, "_STA", NULL, &sta); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + if (sta & 0x01) + resource->state = ACPI_POWER_RESOURCE_STATE_ON; + else + resource->state = ACPI_POWER_RESOURCE_STATE_OFF; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n", + resource->name, resource->state?"on":"off")); + + return_VALUE(0); +} + + +static int +acpi_power_get_list_state ( + struct acpi_handle_list *list, + int *state) +{ + int result = 0; + struct acpi_power_resource *resource = NULL; + u32 i = 0; + + ACPI_FUNCTION_TRACE("acpi_power_get_list_state"); + + if (!list || !state) + return_VALUE(-EINVAL); + + /* The state of the list is 'on' IFF all resources are 'on'. */ + + for (i=0; icount; i++) { + result = acpi_power_get_context(list->handles[i], &resource); + if (result) + return_VALUE(result); + result = acpi_power_get_state(resource); + if (result) + return_VALUE(result); + + *state = resource->state; + + if (*state != ACPI_POWER_RESOURCE_STATE_ON) + break; + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n", + *state?"on":"off")); + + return_VALUE(result); +} + + +static int +acpi_power_on ( + acpi_handle handle) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_device *device = NULL; + struct acpi_power_resource *resource = NULL; + + ACPI_FUNCTION_TRACE("acpi_power_on"); + + result = acpi_power_get_context(handle, &resource); + if (result) + return_VALUE(result); + + resource->references++; + + if ((resource->references > 1) + || (resource->state == ACPI_POWER_RESOURCE_STATE_ON)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] already on\n", + resource->name)); + return_VALUE(0); + } + + status = acpi_evaluate_object(resource->handle, "_ON", NULL, NULL); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + result = acpi_power_get_state(resource); + if (result) + return_VALUE(result); + if (resource->state != ACPI_POWER_RESOURCE_STATE_ON) + return_VALUE(-ENOEXEC); + + /* Update the power resource's _device_ power state */ + result = acpi_bus_get_device(resource->handle, &device); + if (result) + return_VALUE(result); + device->power.state = ACPI_STATE_D0; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned on\n", + resource->name)); + + return_VALUE(0); +} + + +static int +acpi_power_off_device ( + acpi_handle handle) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_device *device = NULL; + struct acpi_power_resource *resource = NULL; + + ACPI_FUNCTION_TRACE("acpi_power_off_device"); + + result = acpi_power_get_context(handle, &resource); + if (result) + return_VALUE(result); + + if (resource->references) + resource->references--; + + if (resource->references) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Resource [%s] is still in use, dereferencing\n", + device->pnp.bus_id)); + return_VALUE(0); + } + + if (resource->state == ACPI_POWER_RESOURCE_STATE_OFF) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] already off\n", + device->pnp.bus_id)); + return_VALUE(0); + } + + status = acpi_evaluate_object(resource->handle, "_OFF", NULL, NULL); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + result = acpi_power_get_state(resource); + if (result) + return_VALUE(result); + if (resource->state != ACPI_POWER_RESOURCE_STATE_OFF) + return_VALUE(-ENOEXEC); + + /* Update the power resource's _device_ power state */ + result = acpi_bus_get_device(resource->handle, &device); + if (result) + return_VALUE(result); + device->power.state = ACPI_STATE_D3; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned off\n", + resource->name)); + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Device Power Management + -------------------------------------------------------------------------- */ + +int +acpi_power_get_inferred_state ( + struct acpi_device *device) +{ + int result = 0; + struct acpi_handle_list *list = NULL; + int list_state = 0; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_power_get_inferred_state"); + + if (!device) + return_VALUE(-EINVAL); + + device->power.state = ACPI_STATE_UNKNOWN; + + /* + * We know a device's inferred power state when all the resources + * required for a given D-state are 'on'. + */ + for (i=ACPI_STATE_D0; ipower.states[i].resources; + if (list->count < 1) + continue; + + result = acpi_power_get_list_state(list, &list_state); + if (result) + return_VALUE(result); + + if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { + device->power.state = i; + return_VALUE(0); + } + } + + device->power.state = ACPI_STATE_D3; + + return_VALUE(0); +} + + +int +acpi_power_transition ( + struct acpi_device *device, + int state) +{ + int result = 0; + struct acpi_handle_list *cl = NULL; /* Current Resources */ + struct acpi_handle_list *tl = NULL; /* Target Resources */ + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_power_transition"); + + if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) + return_VALUE(-EINVAL); + + cl = &device->power.states[device->power.state].resources; + tl = &device->power.states[state].resources; + + device->power.state = ACPI_STATE_UNKNOWN; + + if (!cl->count && !tl->count) { + result = -ENODEV; + goto end; + } + + /* TBD: Resources must be ordered. */ + + /* + * First we reference all power resources required in the target list + * (e.g. so the device doesn't loose power while transitioning). + */ + for (i=0; icount; i++) { + result = acpi_power_on(tl->handles[i]); + if (result) + goto end; + } + + device->power.state = state; + + /* + * Then we dereference all power resources used in the current list. + */ + for (i=0; icount; i++) { + result = acpi_power_off_device(cl->handles[i]); + if (result) + goto end; + } + +end: + if (result) + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Error transitioning device [%s] to D%d\n", + device->pnp.bus_id, state)); + + return_VALUE(result); +} + + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +struct proc_dir_entry *acpi_power_dir = NULL; + + +static int +acpi_power_read_status ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_power_resource *resource = NULL; + char *p = page; + int len; + + ACPI_FUNCTION_TRACE("acpi_power_read_status"); + + if (!data || (off != 0)) + goto end; + + resource = (struct acpi_power_resource *) data; + + p += sprintf(p, "state: "); + switch (resource->state) { + case ACPI_POWER_RESOURCE_STATE_ON: + p += sprintf(p, "on\n"); + break; + case ACPI_POWER_RESOURCE_STATE_OFF: + p += sprintf(p, "off\n"); + break; + default: + p += sprintf(p, "unknown\n"); + break; + } + + p += sprintf(p, "system level: S%d\n", + resource->system_level); + p += sprintf(p, "order: %d\n", + resource->order); + p += sprintf(p, "reference count: %d\n", + resource->references); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_power_add_fs ( + struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_power_add_fs"); + + if (!device) + return_VALUE(-EINVAL); + + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_power_dir); + if (!acpi_device_dir(device)) + return_VALUE(-ENODEV); + } + + /* 'status' [R] */ + entry = create_proc_entry(ACPI_POWER_FILE_STATUS, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_POWER_FILE_STATUS)); + else { + entry->read_proc = acpi_power_read_status; + entry->data = acpi_driver_data(device); + } + + return_VALUE(0); +} + + +static int +acpi_power_remove_fs ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_power_remove_fs"); + + if (acpi_device_dir(device)) { + remove_proc_entry(acpi_device_bid(device), acpi_power_dir); + acpi_device_dir(device) = NULL; + } + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +int +acpi_power_add ( + struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_power_resource *resource = NULL; + union acpi_object acpi_object; + struct acpi_buffer buffer = {sizeof(acpi_object), &acpi_object}; + + ACPI_FUNCTION_TRACE("acpi_power_add"); + + if (!device) + return_VALUE(-EINVAL); + + resource = kmalloc(sizeof(struct acpi_power_resource), GFP_KERNEL); + if (!resource) + return_VALUE(-ENOMEM); + memset(resource, 0, sizeof(struct acpi_power_resource)); + + resource->handle = device->handle; + sprintf(resource->name, "%s", device->pnp.bus_id); + sprintf(acpi_device_name(device), "%s", ACPI_POWER_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_POWER_CLASS); + acpi_driver_data(device) = resource; + + /* Evalute the object to get the system level and resource order. */ + status = acpi_evaluate_object(resource->handle, NULL, NULL, &buffer); + if (ACPI_FAILURE(status)) { + result = -ENODEV; + goto end; + } + resource->system_level = acpi_object.power_resource.system_level; + resource->order = acpi_object.power_resource.resource_order; + + result = acpi_power_get_state(resource); + if (result) + goto end; + + switch (resource->state) { + case ACPI_POWER_RESOURCE_STATE_ON: + device->power.state = ACPI_STATE_D0; + break; + case ACPI_POWER_RESOURCE_STATE_OFF: + device->power.state = ACPI_STATE_D3; + break; + default: + device->power.state = ACPI_STATE_UNKNOWN; + break; + } + + result = acpi_power_add_fs(device); + if (result) + goto end; + + printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device), + acpi_device_bid(device), resource->state?"on":"off"); + +end: + if (result) + kfree(resource); + + return_VALUE(result); +} + + +int +acpi_power_remove ( + struct acpi_device *device, + int type) +{ + struct acpi_power_resource *resource = NULL; + + ACPI_FUNCTION_TRACE("acpi_power_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + resource = (struct acpi_power_resource *) acpi_driver_data(device); + + acpi_power_remove_fs(device); + + kfree(resource); + + return_VALUE(0); +} + + +int __init +acpi_power_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_power_init"); + + INIT_LIST_HEAD(&acpi_power_resource_list); + + acpi_power_dir = proc_mkdir(ACPI_POWER_CLASS, acpi_root_dir); + if (!acpi_power_dir) + return_VALUE(-ENODEV); + + result = acpi_bus_register_driver(&acpi_power_driver); + if (result < 0) { + remove_proc_entry(ACPI_POWER_CLASS, acpi_root_dir); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +void __exit +acpi_power_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_power_exit"); + + /* TBD: Empty acpi_power_resource_list */ + + acpi_bus_unregister_driver(&acpi_power_driver); + + remove_proc_entry(ACPI_POWER_CLASS, acpi_root_dir); + + return_VOID; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/processor.c linux.21rc5-ac2/drivers/acpi/processor.c --- linux.21rc5/drivers/acpi/processor.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/processor.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,2329 @@ +/* + * acpi_processor.c - ACPI Processor Driver ($Revision: 69 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * TBD: + * 1. Make # power/performance states dynamic. + * 2. Support duty_cycle values that span bit 4. + * 3. Optimize by having scheduler determine business instead of + * having us try to calculate it here. + * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_PROCESSOR_COMPONENT +ACPI_MODULE_NAME ("acpi_processor") + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION(ACPI_PROCESSOR_DRIVER_NAME); +MODULE_LICENSE("GPL"); + +#define PREFIX "ACPI: " + +#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) +#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ +#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ + +#define ACPI_PROCESSOR_BUSY_METRIC 10 + +#define ACPI_PROCESSOR_MAX_POWER ACPI_C_STATE_COUNT +#define ACPI_PROCESSOR_MAX_C2_LATENCY 100 +#define ACPI_PROCESSOR_MAX_C3_LATENCY 1000 + +#define ACPI_PROCESSOR_MAX_PERFORMANCE 8 + +#define ACPI_PROCESSOR_MAX_THROTTLING 16 +#define ACPI_PROCESSOR_MAX_THROTTLE 250 /* 25% */ +#define ACPI_PROCESSOR_MAX_DUTY_WIDTH 4 + +const u32 POWER_OF_2[] = {1,2,4,8,16,32,64}; + +#define ACPI_PROCESSOR_LIMIT_USER 0 +#define ACPI_PROCESSOR_LIMIT_THERMAL 1 + +static int acpi_processor_add (struct acpi_device *device); +static int acpi_processor_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_processor_driver = { + .name = ACPI_PROCESSOR_DRIVER_NAME, + .class = ACPI_PROCESSOR_CLASS, + .ids = ACPI_PROCESSOR_HID, + .ops = { + .add = acpi_processor_add, + .remove = acpi_processor_remove, + }, +}; + +/* Power Management */ + +struct acpi_processor_cx_policy { + u32 count; + int state; + struct { + u32 time; + u32 ticks; + u32 count; + u32 bm; + } threshold; +}; + +struct acpi_processor_cx { + u8 valid; + u32 address; + u32 latency; + u32 latency_ticks; + u32 power; + u32 usage; + struct acpi_processor_cx_policy promotion; + struct acpi_processor_cx_policy demotion; +}; + +struct acpi_processor_power { + int state; + int default_state; + u32 bm_activity; + struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; +}; + +/* Performance Management */ + +struct acpi_pct_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 reserved; + u64 address; +} __attribute__ ((packed)); + +struct acpi_processor_px { + acpi_integer core_frequency; /* megahertz */ + acpi_integer power; /* milliWatts */ + acpi_integer transition_latency; /* microseconds */ + acpi_integer bus_master_latency; /* microseconds */ + acpi_integer control; /* control value */ + acpi_integer status; /* success indicator */ +}; + +struct acpi_processor_performance { + int state; + int platform_limit; + u16 control_register; + u16 status_register; + int state_count; + struct acpi_processor_px states[ACPI_PROCESSOR_MAX_PERFORMANCE]; +}; + + +/* Throttling Control */ + +struct acpi_processor_tx { + u16 power; + u16 performance; +}; + +struct acpi_processor_throttling { + int state; + u32 address; + u8 duty_offset; + u8 duty_width; + int state_count; + struct acpi_processor_tx states[ACPI_PROCESSOR_MAX_THROTTLING]; +}; + +/* Limit Interface */ + +struct acpi_processor_lx { + int px; /* performace state */ + int tx; /* throttle level */ +}; + +struct acpi_processor_limit { + struct acpi_processor_lx state; /* current limit */ + struct acpi_processor_lx thermal; /* thermal limit */ + struct acpi_processor_lx user; /* user limit */ +}; + + +struct acpi_processor_flags { + u8 power:1; + u8 performance:1; + u8 throttling:1; + u8 limit:1; + u8 bm_control:1; + u8 bm_check:1; + u8 reserved:2; +}; + +struct acpi_processor { + acpi_handle handle; + u32 acpi_id; + u32 id; + struct acpi_processor_flags flags; + struct acpi_processor_power power; + struct acpi_processor_performance performance; + struct acpi_processor_throttling throttling; + struct acpi_processor_limit limit; +}; + +struct acpi_processor_errata { + u8 smp; + struct { + u8 throttle:1; + u8 fdma:1; + u8 reserved:6; + u32 bmisx; + } piix4; +}; + +static struct acpi_processor *processors[NR_CPUS]; +static struct acpi_processor_errata errata; +static void (*pm_idle_save)(void) = NULL; + + +/* -------------------------------------------------------------------------- + Errata Handling + -------------------------------------------------------------------------- */ + +int +acpi_processor_errata_piix4 ( + struct pci_dev *dev) +{ + u8 rev = 0; + u8 value1 = 0; + u8 value2 = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_errata_piix4"); + + if (!dev) + return_VALUE(-EINVAL); + + /* + * Note that 'dev' references the PIIX4 ACPI Controller. + */ + + pci_read_config_byte(dev, PCI_REVISION_ID, &rev); + + switch (rev) { + case 0: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); + break; + case 1: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); + break; + case 2: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); + break; + case 3: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); + break; + } + + switch (rev) { + + case 0: /* PIIX4 A-step */ + case 1: /* PIIX4 B-step */ + /* + * See specification changes #13 ("Manual Throttle Duty Cycle") + * and #14 ("Enabling and Disabling Manual Throttle"), plus + * erratum #5 ("STPCLK# Deassertion Time") from the January + * 2002 PIIX4 specification update. Applies to only older + * PIIX4 models. + */ + errata.piix4.throttle = 1; + + case 2: /* PIIX4E */ + case 3: /* PIIX4M */ + /* + * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA + * Livelock") from the January 2002 PIIX4 specification update. + * Applies to all PIIX4 models. + */ + + /* + * BM-IDE + * ------ + * Find the PIIX4 IDE Controller and get the Bus Master IDE + * Status register address. We'll use this later to read + * each IDE controller's DMA status to make sure we catch all + * DMA activity. + */ + dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82371AB, + PCI_ANY_ID, PCI_ANY_ID, NULL); + if (dev) + errata.piix4.bmisx = pci_resource_start(dev, 4); + + /* + * Type-F DMA + * ---------- + * Find the PIIX4 ISA Controller and read the Motherboard + * DMA controller's status to see if Type-F (Fast) DMA mode + * is enabled (bit 7) on either channel. Note that we'll + * disable C3 support if this is enabled, as some legacy + * devices won't operate well if fast DMA is disabled. + */ + dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82371AB_0, + PCI_ANY_ID, PCI_ANY_ID, NULL); + if (dev) { + pci_read_config_byte(dev, 0x76, &value1); + pci_read_config_byte(dev, 0x77, &value2); + if ((value1 & 0x80) || (value2 & 0x80)) + errata.piix4.fdma = 1; + } + + break; + } + + if (errata.piix4.bmisx) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Bus master activity detection (BM-IDE) erratum enabled\n")); + if (errata.piix4.fdma) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Type-F DMA livelock erratum (C3 disabled)\n")); + + return_VALUE(0); +} + + +int +acpi_processor_errata ( + struct acpi_processor *pr) +{ + int result = 0; + struct pci_dev *dev = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_errata"); + + if (!pr) + return_VALUE(-EINVAL); + + /* + * PIIX4 + */ + dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, PCI_ANY_ID, NULL); + if (dev) + result = acpi_processor_errata_piix4(dev); + + return_VALUE(result); +} + + +/* -------------------------------------------------------------------------- + Power Management + -------------------------------------------------------------------------- */ + +static inline u32 +ticks_elapsed ( + u32 t1, + u32 t2) +{ + if (t2 >= t1) + return (t2 - t1); + else if (!acpi_fadt.tmr_val_ext) + return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); + else + return ((0xFFFFFFFF - t1) + t2); +} + + +static void +acpi_processor_power_activate ( + struct acpi_processor *pr, + int state) +{ + if (!pr) + return; + + pr->power.states[pr->power.state].promotion.count = 0; + pr->power.states[pr->power.state].demotion.count = 0; + + /* Cleanup from old state. */ + switch (pr->power.state) { + case ACPI_STATE_C3: + /* Disable bus master reload */ + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK); + break; + } + + /* Prepare to use new state. */ + switch (state) { + case ACPI_STATE_C3: + /* Enable bus master reload */ + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK); + break; + } + + pr->power.state = state; + + return; +} + + +static void +acpi_processor_idle (void) +{ + struct acpi_processor *pr = NULL; + struct acpi_processor_cx *cx = NULL; + int next_state = 0; + int sleep_ticks = 0; + u32 t1, t2 = 0; + + pr = processors[smp_processor_id()]; + if (!pr) + return; + + /* + * Interrupts must be disabled during bus mastering calculations and + * for C2/C3 transitions. + */ + __cli(); + + cx = &(pr->power.states[pr->power.state]); + + /* + * Check BM Activity + * ----------------- + * Check for bus mastering activity (if required), record, and check + * for demotion. + */ + if (pr->flags.bm_check) { + u32 bm_status = 0; + + pr->power.bm_activity <<= 1; + + acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, + &bm_status, ACPI_MTX_DO_NOT_LOCK); + if (bm_status) { + pr->power.bm_activity++; + acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, + 1, ACPI_MTX_DO_NOT_LOCK); + } + /* + * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect + * the true state of bus mastering activity; forcing us to + * manually check the BMIDEA bit of each IDE channel. + */ + else if (errata.piix4.bmisx) { + if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) + || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) + pr->power.bm_activity++; + } + /* + * Apply bus mastering demotion policy. Automatically demote + * to avoid a faulty transition. Note that the processor + * won't enter a low-power state during this call (to this + * funciton) but should upon the next. + * + * TBD: A better policy might be to fallback to the demotion + * state (use it for this quantum only) istead of + * demoting -- and rely on duration as our sole demotion + * qualification. This may, however, introduce DMA + * issues (e.g. floppy DMA transfer overrun/underrun). + */ + if (pr->power.bm_activity & cx->demotion.threshold.bm) { + __sti(); + next_state = cx->demotion.state; + goto end; + } + } + + cx->usage++; + + /* + * Sleep: + * ------ + * Invoke the current Cx state to put the processor to sleep. + */ + switch (pr->power.state) { + + case ACPI_STATE_C1: + /* Invoke C1. */ + safe_halt(); + /* + * TBD: Can't get time duration while in C1, as resumes + * go to an ISR rather than here. Need to instrument + * base interrupt handler. + */ + sleep_ticks = 0xFFFFFFFF; + break; + + case ACPI_STATE_C2: + /* Get start time (ticks) */ + t1 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Invoke C2 */ + inb(pr->power.states[ACPI_STATE_C2].address); + /* Dummy op - must do something useless after P_LVL2 read */ + t2 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Get end time (ticks) */ + t2 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Re-enable interrupts */ + __sti(); + /* Compute time (ticks) that we were actually asleep */ + sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; + break; + + case ACPI_STATE_C3: + /* Disable bus master arbitration */ + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK); + /* Get start time (ticks) */ + t1 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Invoke C3 */ + inb(pr->power.states[ACPI_STATE_C3].address); + /* Dummy op - must do something useless after P_LVL3 read */ + t2 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Get end time (ticks) */ + t2 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Enable bus master arbitration */ + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK); + /* Re-enable interrupts */ + __sti(); + /* Compute time (ticks) that we were actually asleep */ + sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; + break; + + default: + __sti(); + return; + } + + next_state = pr->power.state; + + /* + * Promotion? + * ---------- + * Track the number of longs (time asleep is greater than threshold) + * and promote when the count threshold is reached. Note that bus + * mastering activity may prevent promotions. + */ + if (cx->promotion.state) { + if (sleep_ticks > cx->promotion.threshold.ticks) { + cx->promotion.count++; + cx->demotion.count = 0; + if (cx->promotion.count >= cx->promotion.threshold.count) { + if (pr->flags.bm_check) { + if (!(pr->power.bm_activity & cx->promotion.threshold.bm)) { + next_state = cx->promotion.state; + goto end; + } + } + else { + next_state = cx->promotion.state; + goto end; + } + } + } + } + + /* + * Demotion? + * --------- + * Track the number of shorts (time asleep is less than time threshold) + * and demote when the usage threshold is reached. + */ + if (cx->demotion.state) { + if (sleep_ticks < cx->demotion.threshold.ticks) { + cx->demotion.count++; + cx->promotion.count = 0; + if (cx->demotion.count >= cx->demotion.threshold.count) { + next_state = cx->demotion.state; + goto end; + } + } + } + +end: + /* + * New Cx State? + * ------------- + * If we're going to start using a new Cx state we must clean up + * from the previous and prepare to use the new. + */ + if (next_state != pr->power.state) + acpi_processor_power_activate(pr, next_state); + + return; +} + + +static int +acpi_processor_set_power_policy ( + struct acpi_processor *pr) +{ + ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy"); + + /* + * This function sets the default Cx state policy (OS idle handler). + * Our scheme is to promote quickly to C2 but more conservatively + * to C3. We're favoring C2 for its characteristics of low latency + * (quick response), good power savings, and ability to allow bus + * mastering activity. Note that the Cx state policy is completely + * customizable and can be altered dynamically. + */ + + if (!pr) + return_VALUE(-EINVAL); + + /* + * C0/C1 + * ----- + */ + pr->power.state = ACPI_STATE_C1; + pr->power.default_state = ACPI_STATE_C1; + + /* + * C1/C2 + * ----- + * Set the default C1 promotion and C2 demotion policies, where we + * promote from C1 to C2 after several (10) successive C1 transitions, + * as we cannot (currently) measure the time spent in C1. Demote from + * C2 to C1 anytime we experience a 'short' (time spent in C2 is less + * than the C2 transtion latency). Note the simplifying assumption + * that the 'cost' of a transition is amortized when we sleep for at + * least as long as the transition's latency (thus the total transition + * time is two times the latency). + * + * TBD: Measure C1 sleep times by instrumenting the core IRQ handler. + * TBD: Demote to default C-State after long periods of activity. + * TBD: Investigate policy's use of CPU utilization -vs- sleep duration. + */ + if (pr->power.states[ACPI_STATE_C2].valid) { + pr->power.states[ACPI_STATE_C1].promotion.threshold.count = 10; + pr->power.states[ACPI_STATE_C1].promotion.threshold.ticks = + pr->power.states[ACPI_STATE_C2].latency_ticks; + pr->power.states[ACPI_STATE_C1].promotion.state = ACPI_STATE_C2; + + pr->power.states[ACPI_STATE_C2].demotion.threshold.count = 1; + pr->power.states[ACPI_STATE_C2].demotion.threshold.ticks = + pr->power.states[ACPI_STATE_C2].latency_ticks; + pr->power.states[ACPI_STATE_C2].demotion.state = ACPI_STATE_C1; + } + + /* + * C2/C3 + * ----- + * Set default C2 promotion and C3 demotion policies, where we promote + * from C2 to C3 after several (4) cycles of no bus mastering activity + * while maintaining sleep time criteria. Demote immediately on a + * short or whenever bus mastering activity occurs. + */ + if ((pr->power.states[ACPI_STATE_C2].valid) && + (pr->power.states[ACPI_STATE_C3].valid)) { + pr->power.states[ACPI_STATE_C2].promotion.threshold.count = 4; + pr->power.states[ACPI_STATE_C2].promotion.threshold.ticks = + pr->power.states[ACPI_STATE_C3].latency_ticks; + pr->power.states[ACPI_STATE_C2].promotion.threshold.bm = 0x0F; + pr->power.states[ACPI_STATE_C2].promotion.state = ACPI_STATE_C3; + + pr->power.states[ACPI_STATE_C3].demotion.threshold.count = 1; + pr->power.states[ACPI_STATE_C3].demotion.threshold.ticks = + pr->power.states[ACPI_STATE_C3].latency_ticks; + pr->power.states[ACPI_STATE_C3].demotion.threshold.bm = 0x0F; + pr->power.states[ACPI_STATE_C3].demotion.state = ACPI_STATE_C2; + } + + return_VALUE(0); +} + + +int +acpi_processor_get_power_info ( + struct acpi_processor *pr) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_get_power_info"); + + if (!pr) + return_VALUE(-EINVAL); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "lvl2[0x%08x] lvl3[0x%08x]\n", + pr->power.states[ACPI_STATE_C2].address, + pr->power.states[ACPI_STATE_C3].address)); + + /* TBD: Support ACPI 2.0 objects */ + + /* + * C0 + * -- + * This state exists only as filler in our array. + */ + pr->power.states[ACPI_STATE_C0].valid = 1; + + /* + * C1 + * -- + * ACPI requires C1 support for all processors. + * + * TBD: What about PROC_C1? + */ + pr->power.states[ACPI_STATE_C1].valid = 1; + + /* + * C2 + * -- + * We're (currently) only supporting C2 on UP systems. + * + * TBD: Support for C2 on MP (P_LVL2_UP). + */ + if (pr->power.states[ACPI_STATE_C2].address) { + + pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; + + /* + * C2 latency must be less than or equal to 100 microseconds. + */ + if (acpi_fadt.plvl2_lat > ACPI_PROCESSOR_MAX_C2_LATENCY) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C2 latency too large [%d]\n", + acpi_fadt.plvl2_lat)); + /* + * Only support C2 on UP systems (see TBD above). + */ + else if (errata.smp) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C2 not supported in SMP mode\n")); + /* + * Otherwise we've met all of our C2 requirements. + * Normalize the C2 latency to expidite policy. + */ + else { + pr->power.states[ACPI_STATE_C2].valid = 1; + pr->power.states[ACPI_STATE_C2].latency_ticks = + US_TO_PM_TIMER_TICKS(acpi_fadt.plvl2_lat); + } + } + + /* + * C3 + * -- + * TBD: Investigate use of WBINVD on UP/SMP system in absence of + * bm_control. + */ + if (pr->power.states[ACPI_STATE_C3].address) { + + pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; + + /* + * C3 latency must be less than or equal to 1000 microseconds. + */ + if (acpi_fadt.plvl3_lat > ACPI_PROCESSOR_MAX_C3_LATENCY) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 latency too large [%d]\n", + acpi_fadt.plvl3_lat)); + /* + * Only support C3 when bus mastering arbitration control + * is present (able to disable bus mastering to maintain + * cache coherency while in C3). + */ + else if (!pr->flags.bm_control) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 support requires bus mastering control\n")); + /* + * Only support C3 on UP systems, as bm_control is only viable + * on a UP system and flushing caches (e.g. WBINVD) is simply + * too costly (at this time). + */ + else if (errata.smp) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 not supported in SMP mode\n")); + /* + * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) + * DMA transfers are used by any ISA device to avoid livelock. + * Note that we could disable Type-F DMA (as recommended by + * the erratum), but this is known to disrupt certain ISA + * devices thus we take the conservative approach. + */ + else if (errata.piix4.fdma) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 not supported on PIIX4 with Type-F DMA\n")); + } + /* + * Otherwise we've met all of our C3 requirements. + * Normalize the C2 latency to expidite policy. Enable + * checking of bus mastering status (bm_check) so we can + * use this in our C3 policy. + */ + else { + pr->power.states[ACPI_STATE_C3].valid = 1; + pr->power.states[ACPI_STATE_C3].latency_ticks = + US_TO_PM_TIMER_TICKS(acpi_fadt.plvl3_lat); + pr->flags.bm_check = 1; + } + } + + /* + * Set Default Policy + * ------------------ + * Now that we know which state are supported, set the default + * policy. Note that this policy can be changed dynamically + * (e.g. encourage deeper sleeps to conserve battery life when + * not on AC). + */ + result = acpi_processor_set_power_policy(pr); + if (result) + return_VALUE(result); + + /* + * If this processor supports C2 or C3 we denote it as being 'power + * manageable'. Note that there's really no policy involved for + * when only C1 is supported. + */ + if (pr->power.states[ACPI_STATE_C2].valid + || pr->power.states[ACPI_STATE_C3].valid) + pr->flags.power = 1; + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Performance Management + -------------------------------------------------------------------------- */ + +static int +acpi_processor_get_platform_limit ( + struct acpi_processor* pr) +{ + acpi_status status = 0; + unsigned long ppc = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit"); + + if (!pr) + return_VALUE(-EINVAL); + + /* + * _PPC indicates the maximum state currently supported by the platform + * (e.g. 0 = states 0..n; 1 = states 1..n; etc. + */ + status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); + if(ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PPC\n")); + return_VALUE(-ENODEV); + } + + pr->performance.platform_limit = (int) ppc; + + return_VALUE(0); +} + + +static int +acpi_processor_get_performance_control ( + struct acpi_processor *pr) +{ + int result = 0; + acpi_status status = 0; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *pct = NULL; + union acpi_object obj = {0}; + struct acpi_pct_register *reg = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control"); + + status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); + if(ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PCT\n")); + return_VALUE(-ENODEV); + } + + pct = (union acpi_object *) buffer.pointer; + if (!pct || (pct->type != ACPI_TYPE_PACKAGE) + || (pct->package.count != 2)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data\n")); + result = -EFAULT; + goto end; + } + + /* + * control_register + */ + + obj = pct->package.elements[0]; + + if ((obj.type != ACPI_TYPE_BUFFER) + || (obj.buffer.length < sizeof(struct acpi_pct_register)) + || (obj.buffer.pointer == NULL)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid _PCT data (control_register)\n")); + result = -EFAULT; + goto end; + } + + reg = (struct acpi_pct_register *) (obj.buffer.pointer); + + if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unsupported address space [%d] (control_register)\n", + (u32) reg->space_id)); + result = -EFAULT; + goto end; + } + + pr->performance.control_register = (u16) reg->address; + + /* + * status_register + */ + + obj = pct->package.elements[1]; + + if ((obj.type != ACPI_TYPE_BUFFER) + || (obj.buffer.length < sizeof(struct acpi_pct_register)) + || (obj.buffer.pointer == NULL)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid _PCT data (status_register)\n")); + result = -EFAULT; + goto end; + } + + reg = (struct acpi_pct_register *) (obj.buffer.pointer); + + if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unsupported address space [%d] (status_register)\n", + (u32) reg->space_id)); + result = -EFAULT; + goto end; + } + + pr->performance.status_register = (u16) reg->address; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "control_register[0x%04x] status_register[0x%04x]\n", + pr->performance.control_register, + pr->performance.status_register)); + +end: + acpi_os_free(buffer.pointer); + + return_VALUE(result); +} + + +static int +acpi_processor_get_performance_states ( + struct acpi_processor* pr) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + struct acpi_buffer format = {sizeof("NNNNNN"), "NNNNNN"}; + struct acpi_buffer state = {0, NULL}; + union acpi_object *pss = NULL; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states"); + + status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); + if(ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PSS\n")); + return_VALUE(-ENODEV); + } + + pss = (union acpi_object *) buffer.pointer; + if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n")); + result = -EFAULT; + goto end; + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", + pss->package.count)); + + if (pss->package.count > ACPI_PROCESSOR_MAX_PERFORMANCE) { + pr->performance.state_count = ACPI_PROCESSOR_MAX_PERFORMANCE; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Limiting number of states to max (%d)\n", + ACPI_PROCESSOR_MAX_PERFORMANCE)); + } + else + pr->performance.state_count = pss->package.count; + + if (pr->performance.state_count > 1) + pr->flags.performance = 1; + + for (i = 0; i < pr->performance.state_count; i++) { + + struct acpi_processor_px *px = &(pr->performance.states[i]); + + state.length = sizeof(struct acpi_processor_px); + state.pointer = px; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); + + status = acpi_extract_package(&(pss->package.elements[i]), + &format, &state); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n")); + result = -EFAULT; + goto end; + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", + i, + (u32) px->core_frequency, + (u32) px->power, + (u32) px->transition_latency, + (u32) px->bus_master_latency, + (u32) px->control, + (u32) px->status)); + } + +end: + acpi_os_free(buffer.pointer); + + return_VALUE(result); +} + + +static int +acpi_processor_set_performance ( + struct acpi_processor *pr, + int state) +{ + u16 port = 0; + u8 value = 0; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_set_performance"); + + if (!pr) + return_VALUE(-EINVAL); + + if (!pr->flags.performance) + return_VALUE(-ENODEV); + + if (state >= pr->performance.state_count) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Invalid target state (P%d)\n", state)); + return_VALUE(-ENODEV); + } + + if (state < pr->performance.platform_limit) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Platform limit (P%d) overrides target state (P%d)\n", + pr->performance.platform_limit, state)); + return_VALUE(-ENODEV); + } + + if (state == pr->performance.state) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Already at target state (P%d)\n", state)); + return_VALUE(0); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Transitioning from P%d to P%d\n", + pr->performance.state, state)); + + /* + * First we write the target state's 'control' value to the + * control_register. + */ + + port = pr->performance.control_register; + value = (u16) pr->performance.states[state].control; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Writing 0x%02x to port 0x%04x\n", value, port)); + + outb(value, port); + + /* + * Then we read the 'status_register' and compare the value with the + * target state's 'status' to make sure the transition was successful. + * Note that we'll poll for up to 1ms (100 cycles of 10us) before + * giving up. + */ + + port = pr->performance.status_register; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Looking for 0x%02x from port 0x%04x\n", + (u8) pr->performance.states[state].status, port)); + + for (i=0; i<100; i++) { + value = inb(port); + if (value == (u8) pr->performance.states[state].status) + break; + udelay(10); + } + + if (value != pr->performance.states[state].status) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Transition failed\n")); + return_VALUE(-ENODEV); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Transition successful after %d microseconds\n", + i * 10)); + + pr->performance.state = state; + + return_VALUE(0); +} + + +static int +acpi_processor_get_performance_info ( + struct acpi_processor *pr) +{ + int result = 0; + acpi_status status = AE_OK; + acpi_handle handle = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info"); + + if (!pr) + return_VALUE(-EINVAL); + + status = acpi_get_handle(pr->handle, "_PCT", &handle); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "ACPI-based processor performance control unavailable\n")); + return_VALUE(0); + } + + result = acpi_processor_get_performance_control(pr); + if (result) + return_VALUE(result); + + result = acpi_processor_get_performance_states(pr); + if (result) + return_VALUE(result); + + result = acpi_processor_get_platform_limit(pr); + if (result) + return_VALUE(result); + + /* + * TBD: Don't trust the latency values we get from BIOS, but rather + * measure the latencies during run-time (e.g. get_latencies). + */ + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Throttling Control + -------------------------------------------------------------------------- */ + +static int +acpi_processor_get_throttling ( + struct acpi_processor *pr) +{ + int state = 0; + u32 value = 0; + u32 duty_mask = 0; + u32 duty_value = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_get_throttling"); + + if (!pr) + return_VALUE(-EINVAL); + + if (!pr->flags.throttling) + return_VALUE(-ENODEV); + + pr->throttling.state = 0; + + __cli(); + + duty_mask = pr->throttling.state_count - 1; + + duty_mask <<= pr->throttling.duty_offset; + + value = inl(pr->throttling.address); + + /* + * Compute the current throttling state when throttling is enabled + * (bit 4 is on). + */ + if (value & 0x10) { + duty_value = value & duty_mask; + duty_value >>= pr->throttling.duty_offset; + + if (duty_value) + state = pr->throttling.state_count-duty_value; + } + + pr->throttling.state = state; + + __sti(); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Throttling state is T%d (%d%% throttling applied)\n", + state, pr->throttling.states[state].performance)); + + return_VALUE(0); +} + + +static int +acpi_processor_set_throttling ( + struct acpi_processor *pr, + int state) +{ + u32 value = 0; + u32 duty_mask = 0; + u32 duty_value = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_set_throttling"); + + if (!pr) + return_VALUE(-EINVAL); + + if ((state < 0) || (state > (pr->throttling.state_count - 1))) + return_VALUE(-EINVAL); + + if (!pr->flags.throttling) + return_VALUE(-ENODEV); + + if (state == pr->throttling.state) + return_VALUE(0); + + __cli(); + + /* + * Calculate the duty_value and duty_mask. + */ + if (state) { + duty_value = pr->throttling.state_count - state; + + duty_value <<= pr->throttling.duty_offset; + + /* Used to clear all duty_value bits */ + duty_mask = pr->throttling.state_count - 1; + + duty_mask <<= acpi_fadt.duty_offset; + duty_mask = ~duty_mask; + } + + /* + * Disable throttling by writing a 0 to bit 4. Note that we must + * turn it off before you can change the duty_value. + */ + value = inl(pr->throttling.address); + if (value & 0x10) { + value &= 0xFFFFFFEF; + outl(value, pr->throttling.address); + } + + /* + * Write the new duty_value and then enable throttling. Note + * that a state value of 0 leaves throttling disabled. + */ + if (state) { + value &= duty_mask; + value |= duty_value; + outl(value, pr->throttling.address); + + value |= 0x00000010; + outl(value, pr->throttling.address); + } + + pr->throttling.state = state; + + __sti(); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Throttling state set to T%d (%d%%)\n", state, + (pr->throttling.states[state].performance?pr->throttling.states[state].performance/10:0))); + + return_VALUE(0); +} + + +static int +acpi_processor_get_throttling_info ( + struct acpi_processor *pr) +{ + int result = 0; + int step = 0; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_get_throttling_info"); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", + pr->throttling.address, + pr->throttling.duty_offset, + pr->throttling.duty_width)); + + if (!pr) + return_VALUE(-EINVAL); + + /* TBD: Support ACPI 2.0 objects */ + + if (!pr->throttling.address) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); + return_VALUE(0); + } + else if (!pr->throttling.duty_width) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); + return_VALUE(0); + } + /* TBD: Support duty_cycle values that span bit 4. */ + else if ((pr->throttling.duty_offset + + pr->throttling.duty_width) > 4) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "duty_cycle spans bit 4\n")); + return_VALUE(0); + } + + /* + * PIIX4 Errata: We don't support throttling on the original PIIX4. + * This shouldn't be an issue as few (if any) mobile systems ever + * used this part. + */ + if (errata.piix4.throttle) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Throttling not supported on PIIX4 A- or B-step\n")); + return_VALUE(0); + } + + pr->throttling.state_count = POWER_OF_2[acpi_fadt.duty_width]; + + /* + * Compute state values. Note that throttling displays a linear power/ + * performance relationship (at 50% performance the CPU will consume + * 50% power). Values are in 1/10th of a percent to preserve accuracy. + */ + + step = (1000 / pr->throttling.state_count); + + for (i=0; ithrottling.state_count; i++) { + pr->throttling.states[i].performance = step * i; + pr->throttling.states[i].power = step * i; + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", + pr->throttling.state_count)); + + pr->flags.throttling = 1; + + /* + * Disable throttling (if enabled). We'll let subsequent policy (e.g. + * thermal) decide to lower performance if it so chooses, but for now + * we'll crank up the speed. + */ + + result = acpi_processor_get_throttling(pr); + if (result) + goto end; + + if (pr->throttling.state) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabling throttling (was T%d)\n", + pr->throttling.state)); + result = acpi_processor_set_throttling(pr, 0); + if (result) + goto end; + } + +end: + if (result) + pr->flags.throttling = 0; + + return_VALUE(result); +} + + +/* -------------------------------------------------------------------------- + Limit Interface + -------------------------------------------------------------------------- */ + +static int +acpi_processor_apply_limit ( + struct acpi_processor* pr) +{ + int result = 0; + u16 px = 0; + u16 tx = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_apply_limit"); + + if (!pr) + return_VALUE(-EINVAL); + + if (!pr->flags.limit) + return_VALUE(-ENODEV); + + if (pr->flags.performance) { + px = pr->performance.platform_limit; + if (pr->limit.user.px > px) + px = pr->limit.user.px; + if (pr->limit.thermal.px > px) + px = pr->limit.thermal.px; + + result = acpi_processor_set_performance(pr, px); + if (result) + goto end; + } + + if (pr->flags.throttling) { + if (pr->limit.user.tx > tx) + tx = pr->limit.user.tx; + if (pr->limit.thermal.tx > tx) + tx = pr->limit.thermal.tx; + + result = acpi_processor_set_throttling(pr, tx); + if (result) + goto end; + } + + pr->limit.state.px = px; + pr->limit.state.tx = tx; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d] limit set to (P%d:T%d)\n", + pr->id, + pr->limit.state.px, + pr->limit.state.tx)); + +end: + if (result) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to set limit\n")); + + return_VALUE(result); +} + + +int +acpi_processor_set_thermal_limit ( + acpi_handle handle, + int type) +{ + int result = 0; + struct acpi_processor *pr = NULL; + struct acpi_device *device = NULL; + int px = 0; + int tx = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_set_thermal_limit"); + + if ((type < ACPI_PROCESSOR_LIMIT_NONE) + || (type > ACPI_PROCESSOR_LIMIT_DECREMENT)) + return_VALUE(-EINVAL); + + result = acpi_bus_get_device(handle, &device); + if (result) + return_VALUE(result); + + pr = (struct acpi_processor *) acpi_driver_data(device); + if (!pr) + return_VALUE(-ENODEV); + + if (!pr->flags.limit) + return_VALUE(-ENODEV); + + /* Thermal limits are always relative to the current Px/Tx state. */ + if (pr->flags.performance) + pr->limit.thermal.px = pr->performance.state; + if (pr->flags.throttling) + pr->limit.thermal.tx = pr->throttling.state; + + /* + * Our default policy is to only use throttling at the lowest + * performance state. + */ + + px = pr->limit.thermal.px; + tx = pr->limit.thermal.tx; + + switch (type) { + + case ACPI_PROCESSOR_LIMIT_NONE: + px = 0; + tx = 0; + break; + + case ACPI_PROCESSOR_LIMIT_INCREMENT: + if (pr->flags.performance) { + if (px == (pr->performance.state_count - 1)) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "At maximum performance state\n")); + else { + px++; + goto end; + } + } + if (pr->flags.throttling) { + if (tx == (pr->throttling.state_count - 1)) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "At maximum throttling state\n")); + else + tx++; + } + break; + + case ACPI_PROCESSOR_LIMIT_DECREMENT: + if (pr->flags.performance) { + if (px == pr->performance.platform_limit) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "At minimum performance state\n")); + else { + px--; + goto end; + } + } + if (pr->flags.throttling) { + if (tx == 0) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "At minimum throttling state\n")); + else + tx--; + } + break; + } + +end: + pr->limit.thermal.px = px; + pr->limit.thermal.tx = tx; + + result = acpi_processor_apply_limit(pr); + if (result) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to set thermal limit\n")); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n", + pr->limit.thermal.px, + pr->limit.thermal.tx)); + + return_VALUE(result); +} + + +static int +acpi_processor_get_limit_info ( + struct acpi_processor *pr) +{ + ACPI_FUNCTION_TRACE("acpi_processor_get_limit_info"); + + if (!pr) + return_VALUE(-EINVAL); + + if (pr->flags.performance || pr->flags.throttling) + pr->flags.limit = 1; + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +struct proc_dir_entry *acpi_processor_dir = NULL; + +static int +acpi_processor_read_info ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_processor *pr = (struct acpi_processor *) data; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_read_info"); + + if (!pr || (off != 0)) + goto end; + + p += sprintf(p, "processor id: %d\n", + pr->id); + + p += sprintf(p, "acpi id: %d\n", + pr->acpi_id); + + p += sprintf(p, "bus mastering control: %s\n", + pr->flags.bm_control ? "yes" : "no"); + + p += sprintf(p, "power management: %s\n", + pr->flags.power ? "yes" : "no"); + + p += sprintf(p, "throttling control: %s\n", + pr->flags.throttling ? "yes" : "no"); + + p += sprintf(p, "performance management: %s\n", + pr->flags.performance ? "yes" : "no"); + + p += sprintf(p, "limit interface: %s\n", + pr->flags.limit ? "yes" : "no"); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_processor_read_power ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_processor *pr = (struct acpi_processor *) data; + char *p = page; + int len = 0; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_read_power"); + + if (!pr || (off != 0)) + goto end; + + p += sprintf(p, "active state: C%d\n", + pr->power.state); + + p += sprintf(p, "default state: C%d\n", + pr->power.default_state); + + p += sprintf(p, "bus master activity: %08x\n", + pr->power.bm_activity); + + p += sprintf(p, "states:\n"); + + for (i=1; ipower.state?'*':' '), i); + + if (!pr->power.states[i].valid) { + p += sprintf(p, "\n"); + continue; + } + + if (pr->power.states[i].promotion.state) + p += sprintf(p, "promotion[C%d] ", + pr->power.states[i].promotion.state); + else + p += sprintf(p, "promotion[--] "); + + if (pr->power.states[i].demotion.state) + p += sprintf(p, "demotion[C%d] ", + pr->power.states[i].demotion.state); + else + p += sprintf(p, "demotion[--] "); + + p += sprintf(p, "latency[%03d] usage[%08d]\n", + pr->power.states[i].latency, + pr->power.states[i].usage); + } + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_processor_read_performance ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_processor *pr = (struct acpi_processor *) data; + char *p = page; + int len = 0; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_read_performance"); + + if (!pr || (off != 0)) + goto end; + + if (!pr->flags.performance) { + p += sprintf(p, "\n"); + goto end; + } + + p += sprintf(p, "state count: %d\n", + pr->performance.state_count); + + p += sprintf(p, "active state: P%d\n", + pr->performance.state); + + p += sprintf(p, "states:\n"); + + for (i=0; iperformance.state_count; i++) + p += sprintf(p, " %cP%d: %d MHz, %d mW, %d uS\n", + (i == pr->performance.state?'*':' '), i, + (u32) pr->performance.states[i].core_frequency, + (u32) pr->performance.states[i].power, + (u32) pr->performance.states[i].transition_latency); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_processor_write_performance ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + struct acpi_processor *pr = (struct acpi_processor *) data; + char state_string[12] = {'\0'}; + + ACPI_FUNCTION_TRACE("acpi_processor_write_performance"); + + if (!pr || (count > sizeof(state_string) - 1)) + return_VALUE(-EINVAL); + + if (copy_from_user(state_string, buffer, count)) + return_VALUE(-EFAULT); + + state_string[count] = '\0'; + + result = acpi_processor_set_performance(pr, + simple_strtoul(state_string, NULL, 0)); + if (result) + return_VALUE(result); + + return_VALUE(count); +} + + +static int +acpi_processor_read_throttling ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_processor *pr = (struct acpi_processor *) data; + char *p = page; + int len = 0; + int i = 0; + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_read_throttling"); + + if (!pr || (off != 0)) + goto end; + + if (!(pr->throttling.state_count > 0)) { + p += sprintf(p, "\n"); + goto end; + } + + result = acpi_processor_get_throttling(pr); + + if (result) { + p += sprintf(p, "Could not determine current throttling state.\n"); + goto end; + } + + p += sprintf(p, "state count: %d\n", + pr->throttling.state_count); + + p += sprintf(p, "active state: T%d\n", + pr->throttling.state); + + p += sprintf(p, "states:\n"); + + for (i=0; ithrottling.state_count; i++) + p += sprintf(p, " %cT%d: %02d%%\n", + (i == pr->throttling.state?'*':' '), i, + (pr->throttling.states[i].performance?pr->throttling.states[i].performance/10:0)); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_processor_write_throttling ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + struct acpi_processor *pr = (struct acpi_processor *) data; + char state_string[12] = {'\0'}; + + ACPI_FUNCTION_TRACE("acpi_processor_write_throttling"); + + if (!pr || (count > sizeof(state_string) - 1)) + return_VALUE(-EINVAL); + + if (copy_from_user(state_string, buffer, count)) + return_VALUE(-EFAULT); + + state_string[count] = '\0'; + + result = acpi_processor_set_throttling(pr, + simple_strtoul(state_string, NULL, 0)); + if (result) + return_VALUE(result); + + return_VALUE(count); +} + + +static int +acpi_processor_read_limit ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_processor *pr = (struct acpi_processor *) data; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_read_limit"); + + if (!pr || (off != 0)) + goto end; + + if (!pr->flags.limit) { + p += sprintf(p, "\n"); + goto end; + } + + p += sprintf(p, "active limit: P%d:T%d\n", + pr->limit.state.px, pr->limit.state.tx); + + p += sprintf(p, "platform limit: P%d:T0\n", + pr->flags.performance?pr->performance.platform_limit:0); + + p += sprintf(p, "user limit: P%d:T%d\n", + pr->limit.user.px, pr->limit.user.tx); + + p += sprintf(p, "thermal limit: P%d:T%d\n", + pr->limit.thermal.px, pr->limit.thermal.tx); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_processor_write_limit ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + struct acpi_processor *pr = (struct acpi_processor *) data; + char limit_string[25] = {'\0'}; + int px = 0; + int tx = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_write_limit"); + + if (!pr || (count > sizeof(limit_string) - 1)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid argument\n")); + return_VALUE(-EINVAL); + } + + if (copy_from_user(limit_string, buffer, count)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data\n")); + return_VALUE(-EFAULT); + } + + limit_string[count] = '\0'; + + if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data format\n")); + return_VALUE(-EINVAL); + } + + if (pr->flags.performance) { + if ((px < pr->performance.platform_limit) + || (px > (pr->performance.state_count - 1))) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid px\n")); + return_VALUE(-EINVAL); + } + pr->limit.user.px = px; + } + + if (pr->flags.throttling) { + if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid tx\n")); + return_VALUE(-EINVAL); + } + pr->limit.user.tx = tx; + } + + result = acpi_processor_apply_limit(pr); + + return_VALUE(count); +} + + +static int +acpi_processor_add_fs ( + struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_add_fs"); + + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_processor_dir); + if (!acpi_device_dir(device)) + return_VALUE(-ENODEV); + } + + /* 'info' [R] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_INFO)); + else { + entry->read_proc = acpi_processor_read_info; + entry->data = acpi_driver_data(device); + } + + /* 'power' [R] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_POWER)); + else { + entry->read_proc = acpi_processor_read_power; + entry->data = acpi_driver_data(device); + } + + /* 'performance' [R/W] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_PERFORMANCE)); + else { + entry->read_proc = acpi_processor_read_performance; + entry->write_proc = acpi_processor_write_performance; + entry->data = acpi_driver_data(device); + } + + /* 'throttling' [R/W] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_THROTTLING)); + else { + entry->read_proc = acpi_processor_read_throttling; + entry->write_proc = acpi_processor_write_throttling; + entry->data = acpi_driver_data(device); + } + + /* 'limit' [R/W] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_LIMIT)); + else { + entry->read_proc = acpi_processor_read_limit; + entry->write_proc = acpi_processor_write_limit; + entry->data = acpi_driver_data(device); + } + + return_VALUE(0); +} + + +static int +acpi_processor_remove_fs ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_processor_remove_fs"); + + if (acpi_device_dir(device)) { + remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); + acpi_device_dir(device) = NULL; + } + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +static int +acpi_processor_get_info ( + struct acpi_processor *pr) +{ + acpi_status status = 0; + union acpi_object object = {0}; + struct acpi_buffer buffer = {sizeof(union acpi_object), &object}; + static int cpu_index = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_get_info"); + + if (!pr) + return_VALUE(-EINVAL); + +#ifdef CONFIG_SMP + if (smp_num_cpus > 1) + errata.smp = smp_num_cpus; + + /* + * Extra Processor objects may be enumerated on MP systems with + * less than the max # of CPUs. They should be ignored. + */ + if ((cpu_index + 1) > smp_num_cpus) + return_VALUE(-ENODEV); +#endif + + acpi_processor_errata(pr); + + /* + * Check to see if we have bus mastering arbitration control. This + * is required for proper C3 usage (to maintain cache coherency). + */ + if (acpi_fadt.V1_pm2_cnt_blk && acpi_fadt.pm2_cnt_len) { + pr->flags.bm_control = 1; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Bus mastering arbitration control present\n")); + } + else + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "No bus mastering arbitration control\n")); + + /* + * Evalute the processor object. Note that it is common on SMP to + * have the first (boot) processor with a valid PBLK address while + * all others have a NULL address. + */ + status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error evaluating processor object\n")); + return_VALUE(-ENODEV); + } + + /* + * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. + * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c + */ + pr->id = cpu_index++; + pr->acpi_id = object.processor.proc_id; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, + pr->acpi_id)); + + if (!object.processor.pblk_address) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); + else if (object.processor.pblk_length != 6) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid PBLK length [%d]\n", + object.processor.pblk_length)); + else { + pr->throttling.address = object.processor.pblk_address; + pr->throttling.duty_offset = acpi_fadt.duty_offset; + pr->throttling.duty_width = acpi_fadt.duty_width; + pr->power.states[ACPI_STATE_C2].address = + object.processor.pblk_address + 4; + pr->power.states[ACPI_STATE_C3].address = + object.processor.pblk_address + 5; + } + + acpi_processor_get_power_info(pr); + acpi_processor_get_performance_info(pr); + acpi_processor_get_throttling_info(pr); + acpi_processor_get_limit_info(pr); + + return_VALUE(0); +} + + +static void +acpi_processor_notify ( + acpi_handle handle, + u32 event, + void *data) +{ + int result = 0; + struct acpi_processor *pr = (struct acpi_processor *) data; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_notify"); + + if (!pr) + return_VOID; + + if (acpi_bus_get_device(pr->handle, &device)) + return_VOID; + + switch (event) { + case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: + result = acpi_processor_get_platform_limit(pr); + if (!result) + acpi_processor_apply_limit(pr); + + acpi_bus_generate_event(device, event, + pr->performance.platform_limit); + break; + case ACPI_PROCESSOR_NOTIFY_POWER: + /* TBD */ + acpi_bus_generate_event(device, event, 0); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Unsupported event [0x%x]\n", event)); + break; + } + + return_VOID; +} + + +static int +acpi_processor_add ( + struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_processor *pr = NULL; + u32 i = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_add"); + + if (!device) + return_VALUE(-EINVAL); + + pr = kmalloc(sizeof(struct acpi_processor), GFP_KERNEL); + if (!pr) + return_VALUE(-ENOMEM); + memset(pr, 0, sizeof(struct acpi_processor)); + + pr->handle = device->handle; + sprintf(acpi_device_name(device), "%s", ACPI_PROCESSOR_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_PROCESSOR_CLASS); + acpi_driver_data(device) = pr; + + result = acpi_processor_get_info(pr); + if (result) + goto end; + + result = acpi_processor_add_fs(device); + if (result) + goto end; + + status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, + acpi_processor_notify, pr); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing notify handler\n")); + result = -ENODEV; + goto end; + } + + processors[pr->id] = pr; + + /* + * Install the idle handler if processor power management is supported. + * Note that the default idle handler (default_idle) will be used on + * platforms that only support C1. + */ + if ((pr->id == 0) && (pr->flags.power)) { + pm_idle_save = pm_idle; + pm_idle = acpi_processor_idle; + } + + printk(KERN_INFO PREFIX "%s [%s] (supports", + acpi_device_name(device), acpi_device_bid(device)); + for (i=1; ipower.states[i].valid) + printk(" C%d", i); + if (pr->flags.performance) + printk(", %d performance states", pr->performance.state_count); + if (pr->flags.throttling) + printk(", %d throttling states", pr->throttling.state_count); + printk(")\n"); + +end: + if (result) { + acpi_processor_remove_fs(device); + kfree(pr); + } + + return_VALUE(result); +} + + +static int +acpi_processor_remove ( + struct acpi_device *device, + int type) +{ + acpi_status status = AE_OK; + struct acpi_processor *pr = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + pr = (struct acpi_processor *) acpi_driver_data(device); + + /* Unregister the idle handler when processor #0 is removed. */ + if (pr->id == 0) + pm_idle = pm_idle_save; + + status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, + acpi_processor_notify); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error removing notify handler\n")); + return_VALUE(-ENODEV); + } + + acpi_processor_remove_fs(device); + + processors[pr->id] = NULL; + + kfree(pr); + + return_VALUE(0); +} + + +static int __init +acpi_processor_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_init"); + + memset(&processors, 0, sizeof(processors)); + memset(&errata, 0, sizeof(errata)); + + acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); + if (!acpi_processor_dir) + return_VALUE(-ENODEV); + + result = acpi_bus_register_driver(&acpi_processor_driver); + if (result < 0) { + remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +static void __exit +acpi_processor_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_processor_exit"); + + acpi_bus_unregister_driver(&acpi_processor_driver); + + remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); + + return_VOID; +} + + +module_init(acpi_processor_init); +module_exit(acpi_processor_exit); + +EXPORT_SYMBOL(acpi_processor_set_thermal_limit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/Makefile linux.21rc5-ac2/drivers/acpi/resources/Makefile --- linux.21rc5/drivers/acpi/resources/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/Makefile 2003-04-28 17:59:26.000000000 +0100 @@ -1,11 +1,10 @@ # # Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory # O_TARGET := $(notdir $(CURDIR)).o -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) +obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c)) EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rsaddr.c linux.21rc5-ac2/drivers/acpi/resources/rsaddr.c --- linux.21rc5/drivers/acpi/resources/rsaddr.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rsaddr.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,81 +1,98 @@ /******************************************************************************* * * Module Name: rsaddr - Address resource descriptors (16/32/64) - * $Revision: 19 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acresrc.h" +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rsaddr") + ACPI_MODULE_NAME ("rsaddr") /******************************************************************************* * - * FUNCTION: Acpi_rs_address16_resource + * FUNCTION: acpi_rs_address16_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_address16_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - NATIVE_CHAR *temp_ptr; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_address16); - u32 index; - u16 temp16; - u8 temp8; + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u8 *temp_ptr; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_address16); + u32 index; + u16 temp16; + u8 temp8; - FUNCTION_TRACE ("Rs_address16_resource"); + ACPI_FUNCTION_TRACE ("rs_address16_resource"); /* * Point past the Descriptor to get the number of bytes consumed */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); *bytes_consumed = temp16 + 3; output_struct->id = ACPI_RSTYPE_ADDRESS16; @@ -87,6 +104,7 @@ temp8 = *buffer; /* Values 0-2 are valid */ + if (temp8 > 2) { return_ACPI_STATUS (AE_AML_INVALID_RESOURCE_TYPE); } @@ -99,24 +117,20 @@ buffer += 1; temp8 = *buffer; - /* - * Producer / Consumer - */ + /* Producer / Consumer */ + output_struct->data.address16.producer_consumer = temp8 & 0x01; - /* - * Decode - */ + /* Decode */ + output_struct->data.address16.decode = (temp8 >> 1) & 0x01; - /* - * Min Address Fixed - */ + /* Min Address Fixed */ + output_struct->data.address16.min_address_fixed = (temp8 >> 2) & 0x01; - /* - * Max Address Fixed - */ + /* Max Address Fixed */ + output_struct->data.address16.max_address_fixed = (temp8 >> 3) & 0x01; /* @@ -125,21 +139,21 @@ buffer += 1; temp8 = *buffer; - if (MEMORY_RANGE == output_struct->data.address16.resource_type) { + if (ACPI_MEMORY_RANGE == output_struct->data.address16.resource_type) { output_struct->data.address16.attribute.memory.read_write_attribute = (u16) (temp8 & 0x01); output_struct->data.address16.attribute.memory.cache_attribute = (u16) ((temp8 >> 1) & 0x0F); } - else { - if (IO_RANGE == output_struct->data.address16.resource_type) { + if (ACPI_IO_RANGE == output_struct->data.address16.resource_type) { output_struct->data.address16.attribute.io.range_attribute = (u16) (temp8 & 0x03); + output_struct->data.address16.attribute.io.translation_attribute = + (u16) ((temp8 >> 4) & 0x03); } - else { - /* BUS_NUMBER_RANGE == Address16.Data->Resource_type */ + /* BUS_NUMBER_RANGE == Address16.Data->resource_type */ /* Nothing needs to be filled in */ } } @@ -148,36 +162,31 @@ * Get Granularity (Bytes 6-7) */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&output_struct->data.address16.granularity, - buffer); + ACPI_MOVE_16_TO_32 (&output_struct->data.address16.granularity, buffer); /* - * Get Min_address_range (Bytes 8-9) + * Get min_address_range (Bytes 8-9) */ buffer += 2; - MOVE_UNALIGNED16_TO_16 (&output_struct->data.address16.min_address_range, - buffer); + ACPI_MOVE_16_TO_32 (&output_struct->data.address16.min_address_range, buffer); /* - * Get Max_address_range (Bytes 10-11) + * Get max_address_range (Bytes 10-11) */ buffer += 2; - MOVE_UNALIGNED16_TO_16 (&output_struct->data.address16.max_address_range, - buffer); + ACPI_MOVE_16_TO_32 (&output_struct->data.address16.max_address_range, buffer); /* - * Get Address_translation_offset (Bytes 12-13) + * Get address_translation_offset (Bytes 12-13) */ buffer += 2; - MOVE_UNALIGNED16_TO_16 (&output_struct->data.address16.address_translation_offset, - buffer); + ACPI_MOVE_16_TO_32 (&output_struct->data.address16.address_translation_offset, buffer); /* - * Get Address_length (Bytes 14-15) + * Get address_length (Bytes 14-15) */ buffer += 2; - MOVE_UNALIGNED16_TO_16 (&output_struct->data.address16.address_length, - buffer); + ACPI_MOVE_16_TO_32 (&output_struct->data.address16.address_length, buffer); /* * Resource Source Index (if present) @@ -204,9 +213,9 @@ /* Point the String pointer to the end of this structure */ output_struct->data.address16.resource_source.string_ptr = - (NATIVE_CHAR *)((u8 * )output_struct + struct_size); + (char *)((u8 * )output_struct + struct_size); - temp_ptr = output_struct->data.address16.resource_source.string_ptr; + temp_ptr = (u8 *) output_struct->data.address16.resource_source.string_ptr; /* Copy the string into the buffer */ @@ -228,14 +237,13 @@ output_struct->data.address16.resource_source.string_length = index + 1; /* - * In order for the Struct_size to fall on a 32-bit boundary, + * In order for the struct_size to fall on a 32-bit boundary, * calculate the length of the string and expand the - * Struct_size to the next 32-bit boundary. + * struct_size to the next 32-bit boundary. */ temp8 = (u8) (index + 1); - struct_size += ROUND_UP_TO_32_bITS (temp8); + struct_size += ACPI_ROUND_UP_to_32_bITS (temp8); } - else { output_struct->data.address16.resource_source.index = 0x00; output_struct->data.address16.resource_source.string_length = 0; @@ -245,7 +253,7 @@ /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -257,13 +265,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_address16_stream + * FUNCTION: acpi_rs_address16_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -274,18 +281,18 @@ acpi_status acpi_rs_address16_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u8 *length_field; - u8 temp8; - NATIVE_CHAR *temp_pointer = NULL; - u32 actual_bytes; + u8 *buffer = *output_buffer; + u8 *length_field; + u8 temp8; + char *temp_pointer = NULL; + acpi_size actual_bytes; - FUNCTION_TRACE ("Rs_address16_stream"); + ACPI_FUNCTION_TRACE ("rs_address16_stream"); /* @@ -301,7 +308,7 @@ buffer += 2; /* - * Set the Resource Type (Memory, Io, Bus_number) + * Set the Resource Type (Memory, Io, bus_number) */ temp8 = (u8) (linked_list->data.address16.resource_type & 0x03); *buffer = temp8; @@ -324,7 +331,7 @@ */ temp8 = 0; - if (MEMORY_RANGE == linked_list->data.address16.resource_type) { + if (ACPI_MEMORY_RANGE == linked_list->data.address16.resource_type) { temp8 = (u8) (linked_list->data.address16.attribute.memory.read_write_attribute & 0x01); @@ -333,11 +340,13 @@ (linked_list->data.address16.attribute.memory.cache_attribute & 0x0F) << 1; } - - else if (IO_RANGE == linked_list->data.address16.resource_type) { + else if (ACPI_IO_RANGE == linked_list->data.address16.resource_type) { temp8 = (u8) (linked_list->data.address16.attribute.io.range_attribute & 0x03); + temp8 |= + (linked_list->data.address16.attribute.io.translation_attribute & + 0x03) << 4; } *buffer = temp8; @@ -346,36 +355,31 @@ /* * Set the address space granularity */ - MOVE_UNALIGNED16_TO_16 (buffer, - &linked_list->data.address16.granularity); + ACPI_MOVE_32_TO_16 (buffer, &linked_list->data.address16.granularity); buffer += 2; /* * Set the address range minimum */ - MOVE_UNALIGNED16_TO_16 (buffer, - &linked_list->data.address16.min_address_range); + ACPI_MOVE_32_TO_16 (buffer, &linked_list->data.address16.min_address_range); buffer += 2; /* * Set the address range maximum */ - MOVE_UNALIGNED16_TO_16 (buffer, - &linked_list->data.address16.max_address_range); + ACPI_MOVE_32_TO_16 (buffer, &linked_list->data.address16.max_address_range); buffer += 2; /* * Set the address translation offset */ - MOVE_UNALIGNED16_TO_16 (buffer, - &linked_list->data.address16.address_translation_offset); + ACPI_MOVE_32_TO_16 (buffer, &linked_list->data.address16.address_translation_offset); buffer += 2; /* * Set the address length */ - MOVE_UNALIGNED16_TO_16 (buffer, - &linked_list->data.address16.address_length); + ACPI_MOVE_32_TO_16 (buffer, &linked_list->data.address16.address_length); buffer += 2; /* @@ -387,26 +391,25 @@ *buffer = temp8; buffer += 1; - temp_pointer = (NATIVE_CHAR *) buffer; + temp_pointer = (char *) buffer; /* * Copy the string */ - STRCPY (temp_pointer, + ACPI_STRCPY (temp_pointer, linked_list->data.address16.resource_source.string_ptr); /* * Buffer needs to be set to the length of the sting + one for the * terminating null */ - buffer += (STRLEN (linked_list->data.address16.resource_source.string_ptr) - + 1); + buffer += (acpi_size)(ACPI_STRLEN (linked_list->data.address16.resource_source.string_ptr) + 1); } /* * Return the number of bytes consumed in this operation */ - actual_bytes = POINTER_DIFF (buffer, *output_buffer); + actual_bytes = ACPI_PTR_DIFF (buffer, *output_buffer); *bytes_consumed = actual_bytes; /* @@ -414,63 +417,59 @@ * minus the header size (3 bytes) */ actual_bytes -= 3; - MOVE_UNALIGNED16_TO_16 (length_field, &actual_bytes); + ACPI_MOVE_SIZE_TO_16 (length_field, &actual_bytes); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_address32_resource + * FUNCTION: acpi_rs_address32_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_address32_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer; - acpi_resource *output_struct; - u16 temp16; - u8 temp8; - NATIVE_CHAR *temp_ptr; - u32 struct_size; - u32 index; + u8 *buffer; + struct acpi_resource *output_struct= (void *) *output_buffer; + u16 temp16; + u8 temp8; + u8 *temp_ptr; + acpi_size struct_size; + u32 index; - FUNCTION_TRACE ("Rs_address32_resource"); + ACPI_FUNCTION_TRACE ("rs_address32_resource"); buffer = byte_stream_buffer; - output_struct = (acpi_resource *) *output_buffer; - - struct_size = SIZEOF_RESOURCE (acpi_resource_address32); + struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_address32); /* * Point past the Descriptor to get the number of bytes consumed */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); - + ACPI_MOVE_16_TO_16 (&temp16, buffer); *bytes_consumed = temp16 + 3; output_struct->id = ACPI_RSTYPE_ADDRESS32; @@ -520,22 +519,22 @@ buffer += 1; temp8 = *buffer; - if (MEMORY_RANGE == output_struct->data.address32.resource_type) { + if (ACPI_MEMORY_RANGE == output_struct->data.address32.resource_type) { output_struct->data.address32.attribute.memory.read_write_attribute = (u16) (temp8 & 0x01); output_struct->data.address32.attribute.memory.cache_attribute = (u16) ((temp8 >> 1) & 0x0F); } - else { - if (IO_RANGE == output_struct->data.address32.resource_type) { + if (ACPI_IO_RANGE == output_struct->data.address32.resource_type) { output_struct->data.address32.attribute.io.range_attribute = (u16) (temp8 & 0x03); + output_struct->data.address32.attribute.io.translation_attribute = + (u16) ((temp8 >> 4) & 0x03); } - else { - /* BUS_NUMBER_RANGE == Output_struct->Data.Address32.Resource_type */ + /* BUS_NUMBER_RANGE == output_struct->Data.Address32.resource_type */ /* Nothing needs to be filled in */ } } @@ -544,36 +543,31 @@ * Get Granularity (Bytes 6-9) */ buffer += 1; - MOVE_UNALIGNED32_TO_32 (&output_struct->data.address32.granularity, - buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.address32.granularity, buffer); /* - * Get Min_address_range (Bytes 10-13) + * Get min_address_range (Bytes 10-13) */ buffer += 4; - MOVE_UNALIGNED32_TO_32 (&output_struct->data.address32.min_address_range, - buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.address32.min_address_range, buffer); /* - * Get Max_address_range (Bytes 14-17) + * Get max_address_range (Bytes 14-17) */ buffer += 4; - MOVE_UNALIGNED32_TO_32 (&output_struct->data.address32.max_address_range, - buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.address32.max_address_range, buffer); /* - * Get Address_translation_offset (Bytes 18-21) + * Get address_translation_offset (Bytes 18-21) */ buffer += 4; - MOVE_UNALIGNED32_TO_32 (&output_struct->data.address32.address_translation_offset, - buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.address32.address_translation_offset, buffer); /* - * Get Address_length (Bytes 22-25) + * Get address_length (Bytes 22-25) */ buffer += 4; - MOVE_UNALIGNED32_TO_32 (&output_struct->data.address32.address_length, - buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.address32.address_length, buffer); /* * Resource Source Index (if present) @@ -599,14 +593,13 @@ /* Point the String pointer to the end of this structure */ output_struct->data.address32.resource_source.string_ptr = - (NATIVE_CHAR *)((u8 *)output_struct + struct_size); + (char *)((u8 *)output_struct + struct_size); - temp_ptr = output_struct->data.address32.resource_source.string_ptr; + temp_ptr = (u8 *) output_struct->data.address32.resource_source.string_ptr; /* Copy the string into the buffer */ index = 0; - while (0x00 != *buffer) { *temp_ptr = *buffer; @@ -622,14 +615,13 @@ output_struct->data.address32.resource_source.string_length = index + 1; /* - * In order for the Struct_size to fall on a 32-bit boundary, + * In order for the struct_size to fall on a 32-bit boundary, * calculate the length of the string and expand the - * Struct_size to the next 32-bit boundary. + * struct_size to the next 32-bit boundary. */ temp8 = (u8) (index + 1); - struct_size += ROUND_UP_TO_32_bITS (temp8); + struct_size += ACPI_ROUND_UP_to_32_bITS (temp8); } - else { output_struct->data.address32.resource_source.index = 0x00; output_struct->data.address32.resource_source.string_length = 0; @@ -639,7 +631,7 @@ /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -651,13 +643,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_address32_stream + * FUNCTION: acpi_rs_address32_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -668,17 +659,17 @@ acpi_status acpi_rs_address32_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer; - u16 *length_field; - u8 temp8; - NATIVE_CHAR *temp_pointer; + u8 *buffer; + u16 *length_field; + u8 temp8; + char *temp_pointer; - FUNCTION_TRACE ("Rs_address32_stream"); + ACPI_FUNCTION_TRACE ("rs_address32_stream"); buffer = *output_buffer; @@ -692,12 +683,11 @@ /* * Set a pointer to the Length field - to be filled in later */ - - length_field = (u16 *) buffer; + length_field = ACPI_CAST_PTR (u16, buffer); buffer += 2; /* - * Set the Resource Type (Memory, Io, Bus_number) + * Set the Resource Type (Memory, Io, bus_number) */ temp8 = (u8) (linked_list->data.address32.resource_type & 0x03); @@ -720,7 +710,7 @@ */ temp8 = 0; - if(MEMORY_RANGE == linked_list->data.address32.resource_type) { + if (ACPI_MEMORY_RANGE == linked_list->data.address32.resource_type) { temp8 = (u8) (linked_list->data.address32.attribute.memory.read_write_attribute & 0x01); @@ -729,11 +719,13 @@ (linked_list->data.address32.attribute.memory.cache_attribute & 0x0F) << 1; } - - else if (IO_RANGE == linked_list->data.address32.resource_type) { + else if (ACPI_IO_RANGE == linked_list->data.address32.resource_type) { temp8 = (u8) (linked_list->data.address32.attribute.io.range_attribute & 0x03); + temp8 |= + (linked_list->data.address32.attribute.io.translation_attribute & + 0x03) << 4; } *buffer = temp8; @@ -742,36 +734,31 @@ /* * Set the address space granularity */ - MOVE_UNALIGNED32_TO_32 (buffer, - &linked_list->data.address32.granularity); + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.address32.granularity); buffer += 4; /* * Set the address range minimum */ - MOVE_UNALIGNED32_TO_32 (buffer, - &linked_list->data.address32.min_address_range); + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.address32.min_address_range); buffer += 4; /* * Set the address range maximum */ - MOVE_UNALIGNED32_TO_32 (buffer, - &linked_list->data.address32.max_address_range); + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.address32.max_address_range); buffer += 4; /* * Set the address translation offset */ - MOVE_UNALIGNED32_TO_32 (buffer, - &linked_list->data.address32.address_translation_offset); + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.address32.address_translation_offset); buffer += 4; /* * Set the address length */ - MOVE_UNALIGNED32_TO_32 (buffer, - &linked_list->data.address32.address_length); + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.address32.address_length); buffer += 4; /* @@ -783,25 +770,25 @@ *buffer = temp8; buffer += 1; - temp_pointer = (NATIVE_CHAR *) buffer; + temp_pointer = (char *) buffer; /* * Copy the string */ - STRCPY (temp_pointer, + ACPI_STRCPY (temp_pointer, linked_list->data.address32.resource_source.string_ptr); /* * Buffer needs to be set to the length of the sting + one for the * terminating null */ - buffer += (STRLEN (linked_list->data.address32.resource_source.string_ptr) + 1); + buffer += (acpi_size)(ACPI_STRLEN (linked_list->data.address32.resource_source.string_ptr) + 1); } /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); /* * Set the length field to the number of bytes consumed @@ -814,55 +801,52 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_address64_resource + * FUNCTION: acpi_rs_address64_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_address64_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer; - acpi_resource *output_struct; - u16 temp16; - u8 temp8; - NATIVE_CHAR *temp_ptr; - u32 struct_size; - u32 index; + u8 *buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u16 temp16; + u8 temp8; + u8 *temp_ptr; + acpi_size struct_size; + u32 index; - FUNCTION_TRACE ("Rs_address64_resource"); + ACPI_FUNCTION_TRACE ("rs_address64_resource"); buffer = byte_stream_buffer; - output_struct = (acpi_resource *) *output_buffer; - - struct_size = SIZEOF_RESOURCE (acpi_resource_address64); + struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_address64); /* * Point past the Descriptor to get the number of bytes consumed */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); *bytes_consumed = temp16 + 3; output_struct->id = ACPI_RSTYPE_ADDRESS64; @@ -874,6 +858,7 @@ temp8 = *buffer; /* Values 0-2 are valid */ + if(temp8 > 2) { return_ACPI_STATUS (AE_AML_INVALID_RESOURCE_TYPE); } @@ -912,22 +897,22 @@ buffer += 1; temp8 = *buffer; - if (MEMORY_RANGE == output_struct->data.address64.resource_type) { + if (ACPI_MEMORY_RANGE == output_struct->data.address64.resource_type) { output_struct->data.address64.attribute.memory.read_write_attribute = (u16) (temp8 & 0x01); output_struct->data.address64.attribute.memory.cache_attribute = (u16) ((temp8 >> 1) & 0x0F); } - else { - if (IO_RANGE == output_struct->data.address64.resource_type) { + if (ACPI_IO_RANGE == output_struct->data.address64.resource_type) { output_struct->data.address64.attribute.io.range_attribute = (u16) (temp8 & 0x03); + output_struct->data.address64.attribute.io.translation_attribute = + (u16) ((temp8 >> 4) & 0x03); } - else { - /* BUS_NUMBER_RANGE == Output_struct->Data.Address64.Resource_type */ + /* BUS_NUMBER_RANGE == output_struct->Data.Address64.resource_type */ /* Nothing needs to be filled in */ } } @@ -936,36 +921,31 @@ * Get Granularity (Bytes 6-13) */ buffer += 1; - MOVE_UNALIGNED64_TO_64 (&output_struct->data.address64.granularity, - buffer); + ACPI_MOVE_64_TO_64 (&output_struct->data.address64.granularity, buffer); /* - * Get Min_address_range (Bytes 14-21) + * Get min_address_range (Bytes 14-21) */ buffer += 8; - MOVE_UNALIGNED64_TO_64 (&output_struct->data.address64.min_address_range, - buffer); + ACPI_MOVE_64_TO_64 (&output_struct->data.address64.min_address_range, buffer); /* - * Get Max_address_range (Bytes 22-29) + * Get max_address_range (Bytes 22-29) */ buffer += 8; - MOVE_UNALIGNED64_TO_64 (&output_struct->data.address64.max_address_range, - buffer); + ACPI_MOVE_64_TO_64 (&output_struct->data.address64.max_address_range, buffer); /* - * Get Address_translation_offset (Bytes 30-37) + * Get address_translation_offset (Bytes 30-37) */ buffer += 8; - MOVE_UNALIGNED64_TO_64 (&output_struct->data.address64.address_translation_offset, - buffer); + ACPI_MOVE_64_TO_64 (&output_struct->data.address64.address_translation_offset, buffer); /* - * Get Address_length (Bytes 38-45) + * Get address_length (Bytes 38-45) */ buffer += 8; - MOVE_UNALIGNED64_TO_64 (&output_struct->data.address64.address_length, - buffer); + ACPI_MOVE_64_TO_64 (&output_struct->data.address64.address_length, buffer); /* * Resource Source Index (if present) @@ -993,14 +973,13 @@ /* Point the String pointer to the end of this structure */ output_struct->data.address64.resource_source.string_ptr = - (NATIVE_CHAR *)((u8 *)output_struct + struct_size); + (char *)((u8 *)output_struct + struct_size); - temp_ptr = output_struct->data.address64.resource_source.string_ptr; + temp_ptr = (u8 *) output_struct->data.address64.resource_source.string_ptr; /* Copy the string into the buffer */ index = 0; - while (0x00 != *buffer) { *temp_ptr = *buffer; @@ -1017,14 +996,13 @@ output_struct->data.address64.resource_source.string_length = index + 1; /* - * In order for the Struct_size to fall on a 32-bit boundary, + * In order for the struct_size to fall on a 32-bit boundary, * calculate the length of the string and expand the - * Struct_size to the next 32-bit boundary. + * struct_size to the next 32-bit boundary. */ temp8 = (u8) (index + 1); - struct_size += ROUND_UP_TO_32_bITS (temp8); + struct_size += ACPI_ROUND_UP_to_32_bITS (temp8); } - else { output_struct->data.address64.resource_source.index = 0x00; output_struct->data.address64.resource_source.string_length = 0; @@ -1034,7 +1012,7 @@ /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -1046,13 +1024,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_address64_stream + * FUNCTION: acpi_rs_address64_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -1063,17 +1040,17 @@ acpi_status acpi_rs_address64_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer; - u16 *length_field; - u8 temp8; - NATIVE_CHAR *temp_pointer; + u8 *buffer; + u16 *length_field; + u8 temp8; + char *temp_pointer; - FUNCTION_TRACE ("Rs_address64_stream"); + ACPI_FUNCTION_TRACE ("rs_address64_stream"); buffer = *output_buffer; @@ -1088,11 +1065,11 @@ * Set a pointer to the Length field - to be filled in later */ - length_field = (u16 *)buffer; + length_field = ACPI_CAST_PTR (u16, buffer); buffer += 2; /* - * Set the Resource Type (Memory, Io, Bus_number) + * Set the Resource Type (Memory, Io, bus_number) */ temp8 = (u8) (linked_list->data.address64.resource_type & 0x03); @@ -1115,7 +1092,7 @@ */ temp8 = 0; - if(MEMORY_RANGE == linked_list->data.address64.resource_type) { + if (ACPI_MEMORY_RANGE == linked_list->data.address64.resource_type) { temp8 = (u8) (linked_list->data.address64.attribute.memory.read_write_attribute & 0x01); @@ -1124,11 +1101,13 @@ (linked_list->data.address64.attribute.memory.cache_attribute & 0x0F) << 1; } - - else if (IO_RANGE == linked_list->data.address64.resource_type) { + else if (ACPI_IO_RANGE == linked_list->data.address64.resource_type) { temp8 = (u8) (linked_list->data.address64.attribute.io.range_attribute & 0x03); + temp8 |= + (linked_list->data.address64.attribute.io.range_attribute & + 0x03) << 4; } *buffer = temp8; @@ -1137,36 +1116,31 @@ /* * Set the address space granularity */ - MOVE_UNALIGNED64_TO_64 (buffer, - &linked_list->data.address64.granularity); + ACPI_MOVE_64_TO_64 (buffer, &linked_list->data.address64.granularity); buffer += 8; /* * Set the address range minimum */ - MOVE_UNALIGNED64_TO_64 (buffer, - &linked_list->data.address64.min_address_range); + ACPI_MOVE_64_TO_64 (buffer, &linked_list->data.address64.min_address_range); buffer += 8; /* * Set the address range maximum */ - MOVE_UNALIGNED64_TO_64 (buffer, - &linked_list->data.address64.max_address_range); + ACPI_MOVE_64_TO_64 (buffer, &linked_list->data.address64.max_address_range); buffer += 8; /* * Set the address translation offset */ - MOVE_UNALIGNED64_TO_64 (buffer, - &linked_list->data.address64.address_translation_offset); + ACPI_MOVE_64_TO_64 (buffer, &linked_list->data.address64.address_translation_offset); buffer += 8; /* * Set the address length */ - MOVE_UNALIGNED64_TO_64 (buffer, - &linked_list->data.address64.address_length); + ACPI_MOVE_64_TO_64 (buffer, &linked_list->data.address64.address_length); buffer += 8; /* @@ -1178,24 +1152,24 @@ *buffer = temp8; buffer += 1; - temp_pointer = (NATIVE_CHAR *) buffer; + temp_pointer = (char *) buffer; /* * Copy the string */ - STRCPY (temp_pointer, linked_list->data.address64.resource_source.string_ptr); + ACPI_STRCPY (temp_pointer, linked_list->data.address64.resource_source.string_ptr); /* * Buffer needs to be set to the length of the sting + one for the * terminating null */ - buffer += (STRLEN (linked_list->data.address64.resource_source.string_ptr) + 1); + buffer += (acpi_size)(ACPI_STRLEN (linked_list->data.address64.resource_source.string_ptr) + 1); } /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); /* * Set the length field to the number of bytes consumed diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rscalc.c linux.21rc5-ac2/drivers/acpi/resources/rscalc.c --- linux.21rc5/drivers/acpi/resources/rscalc.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rscalc.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,44 +1,62 @@ /******************************************************************************* * * Module Name: rscalc - Calculate stream and list lengths - * $Revision: 32 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acresrc.h" -#include "amlcode.h" -#include "acnamesp.h" +#include +#include +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rscalc") + ACPI_MODULE_NAME ("rscalc") /******************************************************************************* * - * FUNCTION: Acpi_rs_calculate_byte_stream_length + * FUNCTION: acpi_rs_get_byte_stream_length * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Size_needed - u32 pointer of the size buffer needed + * PARAMETERS: linked_list - Pointer to the resource linked list + * size_needed - u32 pointer of the size buffer needed * to properly return the parsed data * * RETURN: Status @@ -50,17 +68,17 @@ ******************************************************************************/ acpi_status -acpi_rs_calculate_byte_stream_length ( - acpi_resource *linked_list, - u32 *size_needed) +acpi_rs_get_byte_stream_length ( + struct acpi_resource *linked_list, + acpi_size *size_needed) { - u32 byte_stream_size_needed = 0; - u32 segment_size; - acpi_resource_ext_irq *ex_irq = NULL; - u8 done = FALSE; + acpi_size byte_stream_size_needed = 0; + acpi_size segment_size; + struct acpi_resource_ext_irq *ex_irq = NULL; + u8 done = FALSE; - FUNCTION_TRACE ("Rs_calculate_byte_stream_length"); + ACPI_FUNCTION_TRACE ("rs_get_byte_stream_length"); while (!done) { @@ -90,7 +108,7 @@ case ACPI_RSTYPE_START_DPF: /* * Start Dependent Functions Resource - * For a Start_dependent_functions Resource, Byte 1, + * For a start_dependent_functions Resource, Byte 1, * although optional, will always be created. */ segment_size = 2; @@ -180,9 +198,9 @@ */ segment_size = 16; - if (NULL != linked_list->data.address16.resource_source.string_ptr) { - segment_size += (1 + - linked_list->data.address16.resource_source.string_length); + if (linked_list->data.address16.resource_source.string_ptr) { + segment_size += linked_list->data.address16.resource_source.string_length; + segment_size++; } break; @@ -196,9 +214,9 @@ */ segment_size = 26; - if (NULL != linked_list->data.address32.resource_source.string_ptr) { - segment_size += (1 + - linked_list->data.address32.resource_source.string_length); + if (linked_list->data.address32.resource_source.string_ptr) { + segment_size += linked_list->data.address32.resource_source.string_length; + segment_size++; } break; @@ -212,9 +230,9 @@ */ segment_size = 46; - if (NULL != linked_list->data.address64.resource_source.string_ptr) { - segment_size += (1 + - linked_list->data.address64.resource_source.string_length); + if (linked_list->data.address64.resource_source.string_ptr) { + segment_size += linked_list->data.address64.resource_source.string_length; + segment_size++; } break; @@ -229,11 +247,11 @@ * Resource Source + 1 for the null. */ segment_size = 9 + - ((linked_list->data.extended_irq.number_of_interrupts - 1) * 4); + (((acpi_size) linked_list->data.extended_irq.number_of_interrupts - 1) * 4); - if (NULL != ex_irq->resource_source.string_ptr) { - segment_size += (1 + - linked_list->data.extended_irq.resource_source.string_length); + if (ex_irq && ex_irq->resource_source.string_ptr) { + segment_size += linked_list->data.extended_irq.resource_source.string_length; + segment_size++; } break; @@ -243,9 +261,8 @@ * so exit with an error */ return_ACPI_STATUS (AE_AML_INVALID_RESOURCE_TYPE); - break; - } /* switch (Linked_list->Id) */ + } /* switch (linked_list->Id) */ /* * Update the total @@ -255,7 +272,7 @@ /* * Point to the next object */ - linked_list = POINTER_ADD (acpi_resource, + linked_list = ACPI_PTR_ADD (struct acpi_resource, linked_list, linked_list->length); } @@ -269,11 +286,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_calculate_list_length + * FUNCTION: acpi_rs_get_list_length * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource byte stream - * Byte_stream_buffer_length - Size of Byte_stream_buffer - * Size_needed - u32 pointer of the size buffer + * PARAMETERS: byte_stream_buffer - Pointer to the resource byte stream + * byte_stream_buffer_length - Size of byte_stream_buffer + * size_needed - u32 pointer of the size buffer * needed to properly return the * parsed data * @@ -286,26 +303,26 @@ ******************************************************************************/ acpi_status -acpi_rs_calculate_list_length ( - u8 *byte_stream_buffer, - u32 byte_stream_buffer_length, - u32 *size_needed) +acpi_rs_get_list_length ( + u8 *byte_stream_buffer, + u32 byte_stream_buffer_length, + acpi_size *size_needed) { - u32 buffer_size = 0; - u32 bytes_parsed = 0; - u8 number_of_interrupts = 0; - u8 number_of_channels = 0; - u8 resource_type; - u32 structure_size; - u32 bytes_consumed; - u8 *buffer; - u8 temp8; - u16 temp16; - u8 index; - u8 additional_bytes; + u32 buffer_size = 0; + u32 bytes_parsed = 0; + u8 number_of_interrupts = 0; + u8 number_of_channels = 0; + u8 resource_type; + u32 structure_size; + u32 bytes_consumed; + u8 *buffer; + u8 temp8; + u16 temp16; + u8 index; + u8 additional_bytes; - FUNCTION_TRACE ("Rs_calculate_list_length"); + ACPI_FUNCTION_TRACE ("rs_get_list_length"); while (bytes_parsed < byte_stream_buffer_length) { @@ -315,65 +332,65 @@ resource_type = acpi_rs_get_resource_type (*byte_stream_buffer); switch (resource_type) { - case RESOURCE_DESC_MEMORY_24: + case ACPI_RDESC_TYPE_MEMORY_24: /* * 24-Bit Memory Resource */ bytes_consumed = 12; - structure_size = SIZEOF_RESOURCE (acpi_resource_mem24); + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_mem24); break; - case RESOURCE_DESC_LARGE_VENDOR: + case ACPI_RDESC_TYPE_LARGE_VENDOR: /* * Vendor Defined Resource */ buffer = byte_stream_buffer; ++buffer; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); bytes_consumed = temp16 + 3; /* * Ensure a 32-bit boundary for the structure */ - temp16 = (u16) ROUND_UP_TO_32_bITS (temp16); + temp16 = (u16) ACPI_ROUND_UP_to_32_bITS (temp16); - structure_size = SIZEOF_RESOURCE (acpi_resource_vendor) + + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_vendor) + (temp16 * sizeof (u8)); break; - case RESOURCE_DESC_MEMORY_32: + case ACPI_RDESC_TYPE_MEMORY_32: /* * 32-Bit Memory Range Resource */ bytes_consumed = 20; - structure_size = SIZEOF_RESOURCE (acpi_resource_mem32); + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_mem32); break; - case RESOURCE_DESC_FIXED_MEMORY_32: + case ACPI_RDESC_TYPE_FIXED_MEMORY_32: /* * 32-Bit Fixed Memory Resource */ bytes_consumed = 12; - structure_size = SIZEOF_RESOURCE (acpi_resource_fixed_mem32); + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_fixed_mem32); break; - case RESOURCE_DESC_QWORD_ADDRESS_SPACE: + case ACPI_RDESC_TYPE_QWORD_ADDRESS_SPACE: /* * 64-Bit Address Resource */ buffer = byte_stream_buffer; ++buffer; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); bytes_consumed = temp16 + 3; @@ -397,21 +414,21 @@ /* * Ensure a 64-bit boundary for the structure */ - temp8 = (u8) ROUND_UP_TO_64_bITS (temp8); + temp8 = (u8) ACPI_ROUND_UP_to_64_bITS (temp8); - structure_size = SIZEOF_RESOURCE (acpi_resource_address64) + + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_address64) + (temp8 * sizeof (u8)); break; - case RESOURCE_DESC_DWORD_ADDRESS_SPACE: + case ACPI_RDESC_TYPE_DWORD_ADDRESS_SPACE: /* * 32-Bit Address Resource */ buffer = byte_stream_buffer; ++buffer; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); bytes_consumed = temp16 + 3; @@ -435,21 +452,21 @@ /* * Ensure a 32-bit boundary for the structure */ - temp8 = (u8) ROUND_UP_TO_32_bITS (temp8); + temp8 = (u8) ACPI_ROUND_UP_to_32_bITS (temp8); - structure_size = SIZEOF_RESOURCE (acpi_resource_address32) + + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_address32) + (temp8 * sizeof (u8)); break; - case RESOURCE_DESC_WORD_ADDRESS_SPACE: + case ACPI_RDESC_TYPE_WORD_ADDRESS_SPACE: /* * 16-Bit Address Resource */ buffer = byte_stream_buffer; ++buffer; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); bytes_consumed = temp16 + 3; @@ -473,21 +490,21 @@ /* * Ensure a 32-bit boundary for the structure */ - temp8 = (u8) ROUND_UP_TO_32_bITS (temp8); + temp8 = (u8) ACPI_ROUND_UP_to_32_bITS (temp8); - structure_size = SIZEOF_RESOURCE (acpi_resource_address16) + + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_address16) + (temp8 * sizeof (u8)); break; - case RESOURCE_DESC_EXTENDED_XRUPT: + case ACPI_RDESC_TYPE_EXTENDED_XRUPT: /* * Extended IRQ */ buffer = byte_stream_buffer; ++buffer; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); bytes_consumed = temp16 + 3; @@ -518,7 +535,6 @@ if (9 + additional_bytes < temp16) { temp8 = (u8) (temp16 - (9 + additional_bytes)); } - else { temp8 = 0; } @@ -526,15 +542,15 @@ /* * Ensure a 32-bit boundary for the structure */ - temp8 = (u8) ROUND_UP_TO_32_bITS (temp8); + temp8 = (u8) ACPI_ROUND_UP_to_32_bITS (temp8); - structure_size = SIZEOF_RESOURCE (acpi_resource_ext_irq) + + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_ext_irq) + (additional_bytes * sizeof (u8)) + (temp8 * sizeof (u8)); break; - case RESOURCE_DESC_IRQ_FORMAT: + case ACPI_RDESC_TYPE_IRQ_FORMAT: /* * IRQ Resource. * Determine if it there are two or three trailing bytes @@ -545,7 +561,6 @@ if(temp8 & 0x01) { bytes_consumed = 4; } - else { bytes_consumed = 3; } @@ -558,7 +573,7 @@ /* * Look at the number of bits set */ - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); for (index = 0; index < 16; index++) { if (temp16 & 0x1) { @@ -568,12 +583,12 @@ temp16 >>= 1; } - structure_size = SIZEOF_RESOURCE (acpi_resource_io) + + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_io) + (number_of_interrupts * sizeof (u32)); break; - case RESOURCE_DESC_DMA_FORMAT: + case ACPI_RDESC_TYPE_DMA_FORMAT: /* * DMA Resource */ @@ -598,12 +613,12 @@ temp8 >>= 1; } - structure_size = SIZEOF_RESOURCE (acpi_resource_dma) + + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_dma) + (number_of_channels * sizeof (u32)); break; - case RESOURCE_DESC_START_DEPENDENT: + case ACPI_RDESC_TYPE_START_DEPENDENT: /* * Start Dependent Functions Resource * Determine if it there are two or three trailing bytes @@ -618,11 +633,11 @@ bytes_consumed = 1; } - structure_size = SIZEOF_RESOURCE (acpi_resource_start_dpf); + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_start_dpf); break; - case RESOURCE_DESC_END_DEPENDENT: + case ACPI_RDESC_TYPE_END_DEPENDENT: /* * End Dependent Functions Resource */ @@ -631,25 +646,25 @@ break; - case RESOURCE_DESC_IO_PORT: + case ACPI_RDESC_TYPE_IO_PORT: /* * IO Port Resource */ bytes_consumed = 8; - structure_size = SIZEOF_RESOURCE (acpi_resource_io); + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_io); break; - case RESOURCE_DESC_FIXED_IO_PORT: + case ACPI_RDESC_TYPE_FIXED_IO_PORT: /* * Fixed IO Port Resource */ bytes_consumed = 4; - structure_size = SIZEOF_RESOURCE (acpi_resource_fixed_io); + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_fixed_io); break; - case RESOURCE_DESC_SMALL_VENDOR: + case ACPI_RDESC_TYPE_SMALL_VENDOR: /* * Vendor Specific Resource */ @@ -662,13 +677,13 @@ /* * Ensure a 32-bit boundary for the structure */ - temp8 = (u8) ROUND_UP_TO_32_bITS (temp8); - structure_size = SIZEOF_RESOURCE (acpi_resource_vendor) + + temp8 = (u8) ACPI_ROUND_UP_to_32_bITS (temp8); + structure_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_vendor) + (temp8 * sizeof (u8)); break; - case RESOURCE_DESC_END_TAG: + case ACPI_RDESC_TYPE_END_TAG: /* * End Tag */ @@ -684,14 +699,12 @@ * so exit with an error */ return_ACPI_STATUS (AE_AML_INVALID_RESOURCE_TYPE); - break; } - /* * Update the return value and counter */ - buffer_size += structure_size; + buffer_size += ACPI_ALIGN_RESOURCE_SIZE(structure_size); bytes_parsed += bytes_consumed; /* @@ -700,7 +713,6 @@ byte_stream_buffer += bytes_consumed; } - /* * This is the data the caller needs */ @@ -711,10 +723,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_calculate_pci_routing_table_length + * FUNCTION: acpi_rs_get_pci_routing_table_length * - * PARAMETERS: Package_object - Pointer to the package object - * Buffer_size_needed - u32 pointer of the size buffer + * PARAMETERS: package_object - Pointer to the package object + * buffer_size_needed - u32 pointer of the size buffer * needed to properly return the * parsed data * @@ -727,21 +739,21 @@ ******************************************************************************/ acpi_status -acpi_rs_calculate_pci_routing_table_length ( - acpi_operand_object *package_object, - u32 *buffer_size_needed) +acpi_rs_get_pci_routing_table_length ( + union acpi_operand_object *package_object, + acpi_size *buffer_size_needed) { - u32 number_of_elements; - u32 temp_size_needed = 0; - acpi_operand_object **top_object_list; - u32 index; - acpi_operand_object *package_element; - acpi_operand_object **sub_object_list; - u8 name_found; - u32 table_index; + u32 number_of_elements; + acpi_size temp_size_needed = 0; + union acpi_operand_object **top_object_list; + u32 index; + union acpi_operand_object *package_element; + union acpi_operand_object **sub_object_list; + u8 name_found; + u32 table_index; - FUNCTION_TRACE ("Rs_calculate_pci_routing_table_length"); + ACPI_FUNCTION_TRACE ("rs_get_pci_routing_table_length"); number_of_elements = package_object->package.count; @@ -765,23 +777,22 @@ package_element = *top_object_list; /* - * The Sub_object_list will now point to an array of the - * four IRQ elements: Address, Pin, Source and Source_index + * The sub_object_list will now point to an array of the + * four IRQ elements: Address, Pin, Source and source_index */ sub_object_list = package_element->package.elements; /* - * Scan the Irq_table_elements for the Source Name String + * Scan the irq_table_elements for the Source Name String */ name_found = FALSE; for (table_index = 0; table_index < 4 && !name_found; table_index++) { - if ((ACPI_TYPE_STRING == (*sub_object_list)->common.type) || - ((INTERNAL_TYPE_REFERENCE == (*sub_object_list)->common.type) && + if ((ACPI_TYPE_STRING == ACPI_GET_OBJECT_TYPE (*sub_object_list)) || + ((ACPI_TYPE_LOCAL_REFERENCE == ACPI_GET_OBJECT_TYPE (*sub_object_list)) && ((*sub_object_list)->reference.opcode == AML_INT_NAMEPATH_OP))) { name_found = TRUE; } - else { /* * Look at the next element @@ -790,26 +801,24 @@ } } - temp_size_needed += (sizeof (pci_routing_table) - 4); + temp_size_needed += (sizeof (struct acpi_pci_routing_table) - 4); /* * Was a String type found? */ - if (TRUE == name_found) { - if (ACPI_TYPE_STRING == (*sub_object_list)->common.type) { + if (name_found) { + if (ACPI_GET_OBJECT_TYPE (*sub_object_list) == ACPI_TYPE_STRING) { /* - * The length String.Length field includes the - * terminating NULL + * The length String.Length field does not include the + * terminating NULL, add 1 */ - temp_size_needed += (*sub_object_list)->string.length; + temp_size_needed += ((acpi_size) (*sub_object_list)->string.length + 1); } - else { temp_size_needed += acpi_ns_get_pathname_length ( (*sub_object_list)->reference.node); } } - else { /* * If no name was found, then this is a NULL, which is @@ -820,18 +829,17 @@ /* Round up the size since each element must be aligned */ - temp_size_needed = ROUND_UP_TO_64_bITS (temp_size_needed); + temp_size_needed = ACPI_ROUND_UP_to_64_bITS (temp_size_needed); /* - * Point to the next acpi_operand_object + * Point to the next union acpi_operand_object */ top_object_list++; } - /* * Adding an extra element to the end of the list, essentially a NULL terminator */ - *buffer_size_needed = temp_size_needed + sizeof (pci_routing_table); + *buffer_size_needed = temp_size_needed + sizeof (struct acpi_pci_routing_table); return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rscreate.c linux.21rc5-ac2/drivers/acpi/resources/rscreate.c --- linux.21rc5/drivers/acpi/resources/rscreate.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rscreate.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,50 +1,67 @@ /******************************************************************************* * * Module Name: rscreate - Create resource lists/tables - * $Revision: 36 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acresrc.h" -#include "amlcode.h" -#include "acnamesp.h" +#include +#include +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rscreate") + ACPI_MODULE_NAME ("rscreate") /******************************************************************************* * - * FUNCTION: Acpi_rs_create_resource_list + * FUNCTION: acpi_rs_create_resource_list * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource byte stream - * Output_buffer - Pointer to the user's buffer - * Output_buffer_length - Pointer to the size of Output_buffer + * PARAMETERS: byte_stream_buffer - Pointer to the resource byte stream + * output_buffer - Pointer to the user's buffer * * RETURN: Status - AE_OK if okay, else a valid acpi_status code - * If Output_buffer is not large enough, Output_buffer_length - * indicates how large Output_buffer should be, else it - * indicates how may u8 elements of Output_buffer are valid. + * If output_buffer is not large enough, output_buffer_length + * indicates how large output_buffer should be, else it + * indicates how may u8 elements of output_buffer are valid. * * DESCRIPTION: Takes the byte stream returned from a _CRS, _PRS control method * execution and parses the stream to create a linked list @@ -54,21 +71,21 @@ acpi_status acpi_rs_create_resource_list ( - acpi_operand_object *byte_stream_buffer, - u8 *output_buffer, - u32 *output_buffer_length) + union acpi_operand_object *byte_stream_buffer, + struct acpi_buffer *output_buffer) { - acpi_status status; - u8 *byte_stream_start; - u32 list_size_needed = 0; - u32 byte_stream_buffer_length; + acpi_status status; + u8 *byte_stream_start; + acpi_size list_size_needed = 0; + u32 byte_stream_buffer_length; - FUNCTION_TRACE ("Rs_create_resource_list"); + ACPI_FUNCTION_TRACE ("rs_create_resource_list"); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Byte_stream_buffer = %p\n", byte_stream_buffer)); + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "byte_stream_buffer = %p\n", + byte_stream_buffer)); /* * Params already validated, so we don't re-validate here @@ -77,300 +94,280 @@ byte_stream_start = byte_stream_buffer->buffer.pointer; /* - * Pass the Byte_stream_buffer into a module that can calculate + * Pass the byte_stream_buffer into a module that can calculate * the buffer size needed for the linked list */ - status = acpi_rs_calculate_list_length (byte_stream_start, byte_stream_buffer_length, + status = acpi_rs_get_list_length (byte_stream_start, byte_stream_buffer_length, &list_size_needed); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Status=%X List_size_needed=%X\n", - status, list_size_needed)); - - /* - * Exit with the error passed back - */ + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Status=%X list_size_needed=%X\n", + status, (u32) list_size_needed)); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - /* - * If the linked list will fit into the available buffer - * call to fill in the list - */ - if (list_size_needed <= *output_buffer_length) { - /* - * Zero out the return buffer before proceeding - */ - MEMSET (output_buffer, 0x00, *output_buffer_length); - - status = acpi_rs_byte_stream_to_list (byte_stream_start, byte_stream_buffer_length, - &output_buffer); + /* Validate/Allocate/Clear caller buffer */ - /* - * Exit with the error passed back - */ - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Output_buffer = %p\n", output_buffer)); + status = acpi_ut_initialize_buffer (output_buffer, list_size_needed); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - else { - *output_buffer_length = list_size_needed; - return_ACPI_STATUS (AE_BUFFER_OVERFLOW); + /* Do the conversion */ + + status = acpi_rs_byte_stream_to_list (byte_stream_start, byte_stream_buffer_length, + output_buffer->pointer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - *output_buffer_length = list_size_needed; + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "output_buffer %p Length %X\n", + output_buffer->pointer, (u32) output_buffer->length)); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_create_pci_routing_table + * FUNCTION: acpi_rs_create_pci_routing_table * - * PARAMETERS: Package_object - Pointer to an acpi_operand_object + * PARAMETERS: package_object - Pointer to an union acpi_operand_object * package - * Output_buffer - Pointer to the user's buffer - * Output_buffer_length - Size of Output_buffer + * output_buffer - Pointer to the user's buffer * * RETURN: Status AE_OK if okay, else a valid acpi_status code. - * If the Output_buffer is too small, the error will be - * AE_BUFFER_OVERFLOW and Output_buffer_length will point + * If the output_buffer is too small, the error will be + * AE_BUFFER_OVERFLOW and output_buffer->Length will point * to the size buffer needed. * - * DESCRIPTION: Takes the acpi_operand_object package and creates a + * DESCRIPTION: Takes the union acpi_operand_object package and creates a * linked list of PCI interrupt descriptions * + * NOTE: It is the caller's responsibility to ensure that the start of the + * output buffer is aligned properly (if necessary). + * ******************************************************************************/ acpi_status acpi_rs_create_pci_routing_table ( - acpi_operand_object *package_object, - u8 *output_buffer, - u32 *output_buffer_length) + union acpi_operand_object *package_object, + struct acpi_buffer *output_buffer) { - u8 *buffer = output_buffer; - acpi_operand_object **top_object_list = NULL; - acpi_operand_object **sub_object_list = NULL; - acpi_operand_object *package_element = NULL; - u32 buffer_size_needed = 0; - u32 number_of_elements = 0; - u32 index = 0; - pci_routing_table *user_prt = NULL; - acpi_namespace_node *node; - acpi_status status; + u8 *buffer; + union acpi_operand_object **top_object_list; + union acpi_operand_object **sub_object_list; + union acpi_operand_object *obj_desc; + acpi_size buffer_size_needed = 0; + u32 number_of_elements; + u32 index; + struct acpi_pci_routing_table *user_prt; + struct acpi_namespace_node *node; + acpi_status status; + struct acpi_buffer path_buffer; - FUNCTION_TRACE ("Rs_create_pci_routing_table"); + ACPI_FUNCTION_TRACE ("rs_create_pci_routing_table"); + /* Params already validated, so we don't re-validate here */ + /* - * Params already validated, so we don't re-validate here + * Get the required buffer length */ - status = acpi_rs_calculate_pci_routing_table_length (package_object, + status = acpi_rs_get_pci_routing_table_length (package_object, &buffer_size_needed); - - if (!ACPI_SUCCESS(status)) { + if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Buffer_size_needed = %X\n", buffer_size_needed)); + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "buffer_size_needed = %X\n", + (u32) buffer_size_needed)); + + /* Validate/Allocate/Clear caller buffer */ + + status = acpi_ut_initialize_buffer (output_buffer, buffer_size_needed); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* - * If the data will fit into the available buffer - * call to fill in the list + * Loop through the ACPI_INTERNAL_OBJECTS - Each object + * should be a package that in turn contains an + * acpi_integer Address, a u8 Pin, a Name and a u8 source_index. */ - if (buffer_size_needed <= *output_buffer_length) { + top_object_list = package_object->package.elements; + number_of_elements = package_object->package.count; + buffer = output_buffer->pointer; + user_prt = ACPI_CAST_PTR (struct acpi_pci_routing_table, buffer); + + for (index = 0; index < number_of_elements; index++) { /* - * Zero out the return buffer before proceeding + * Point user_prt past this current structure + * + * NOTE: On the first iteration, user_prt->Length will + * be zero because we cleared the return buffer earlier */ - MEMSET (output_buffer, 0x00, *output_buffer_length); + buffer += user_prt->length; + user_prt = ACPI_CAST_PTR (struct acpi_pci_routing_table, buffer); /* - * Loop through the ACPI_INTERNAL_OBJECTS - Each object should - * contain a u32 Address, a u8 Pin, a Name and a u8 - * Source_index. + * Fill in the Length field with the information we have at this point. + * The minus four is to subtract the size of the u8 Source[4] member + * because it is added below. */ - top_object_list = package_object->package.elements; - number_of_elements = package_object->package.count; - user_prt = (pci_routing_table *) buffer; - - - buffer = ROUND_PTR_UP_TO_8 (buffer, u8); - - for (index = 0; index < number_of_elements; index++) { - /* - * Point User_prt past this current structure - * - * NOTE: On the first iteration, User_prt->Length will - * be zero because we cleared the return buffer earlier - */ - buffer += user_prt->length; - user_prt = (pci_routing_table *) buffer; + user_prt->length = (sizeof (struct acpi_pci_routing_table) - 4); + /* + * Each element of the top-level package must also be a package + */ + if (ACPI_GET_OBJECT_TYPE (*top_object_list) != ACPI_TYPE_PACKAGE) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "(PRT[%X]) Need sub-package, found %s\n", + index, acpi_ut_get_object_type_name (*top_object_list))); + return_ACPI_STATUS (AE_AML_OPERAND_TYPE); + } - /* - * Fill in the Length field with the information we - * have at this point. - * The minus four is to subtract the size of the - * u8 Source[4] member because it is added below. - */ - user_prt->length = (sizeof (pci_routing_table) -4); - - /* - * Dereference the sub-package - */ - package_element = *top_object_list; - - /* - * The Sub_object_list will now point to an array of - * the four IRQ elements: Address, Pin, Source and - * Source_index - */ - sub_object_list = package_element->package.elements; + /* Each sub-package must be of length 4 */ - /* - * 1) First subobject: Dereference the Address - */ - if (ACPI_TYPE_INTEGER == (*sub_object_list)->common.type) { - user_prt->address = (*sub_object_list)->integer.value; - } + if ((*top_object_list)->package.count != 4) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "(PRT[%X]) Need package of length 4, found length %d\n", + index, (*top_object_list)->package.count)); + return_ACPI_STATUS (AE_AML_PACKAGE_LIMIT); + } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Need Integer, found %s\n", - acpi_ut_get_type_name ((*sub_object_list)->common.type))); - return_ACPI_STATUS (AE_BAD_DATA); - } + /* + * Dereference the sub-package. + * The sub_object_list will now point to an array of the four IRQ + * elements: [Address, Pin, Source, source_index] + */ + sub_object_list = (*top_object_list)->package.elements; - /* - * 2) Second subobject: Dereference the Pin - */ - sub_object_list++; + /* + * 1) First subobject: Dereference the PRT.Address + */ + obj_desc = sub_object_list[0]; + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_INTEGER) { + user_prt->address = obj_desc->integer.value; + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "(PRT[%X].Address) Need Integer, found %s\n", + index, acpi_ut_get_object_type_name (obj_desc))); + return_ACPI_STATUS (AE_BAD_DATA); + } - if (ACPI_TYPE_INTEGER == (*sub_object_list)->common.type) { - user_prt->pin = (u32) (*sub_object_list)->integer.value; - } + /* + * 2) Second subobject: Dereference the PRT.Pin + */ + obj_desc = sub_object_list[1]; + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_INTEGER) { + user_prt->pin = (u32) obj_desc->integer.value; + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "(PRT[%X].Pin) Need Integer, found %s\n", + index, acpi_ut_get_object_type_name (obj_desc))); + return_ACPI_STATUS (AE_BAD_DATA); + } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Need Integer, found %s\n", - acpi_ut_get_type_name ((*sub_object_list)->common.type))); + /* + * 3) Third subobject: Dereference the PRT.source_name + */ + obj_desc = sub_object_list[2]; + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_LOCAL_REFERENCE: + + if (obj_desc->reference.opcode != AML_INT_NAMEPATH_OP) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "(PRT[%X].Source) Need name, found reference op %X\n", + index, obj_desc->reference.opcode)); return_ACPI_STATUS (AE_BAD_DATA); } - /* - * 3) Third subobject: Dereference the Source Name - */ - sub_object_list++; + node = obj_desc->reference.node; - switch ((*sub_object_list)->common.type) { - case INTERNAL_TYPE_REFERENCE: + /* Use *remaining* length of the buffer as max for pathname */ - if ((*sub_object_list)->reference.opcode != AML_INT_NAMEPATH_OP) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Need name, found reference op %X\n", - (*sub_object_list)->reference.opcode)); - return_ACPI_STATUS (AE_BAD_DATA); - } + path_buffer.length = output_buffer->length - + (u32) ((u8 *) user_prt->source - + (u8 *) output_buffer->pointer); + path_buffer.pointer = user_prt->source; - node = (*sub_object_list)->reference.node; + status = acpi_ns_handle_to_pathname ((acpi_handle) node, &path_buffer); - /* TBD: use *remaining* length of the buffer! */ + user_prt->length += (u32) ACPI_STRLEN (user_prt->source) + 1; /* include null terminator */ + break; - status = acpi_ns_handle_to_pathname ((acpi_handle *) node, - output_buffer_length, user_prt->source); - user_prt->length += STRLEN (user_prt->source) + 1; /* include null terminator */ - break; + case ACPI_TYPE_STRING: + ACPI_STRCPY (user_prt->source, obj_desc->string.pointer); - case ACPI_TYPE_STRING: + /* Add to the Length field the length of the string (add 1 for terminator) */ - STRCPY (user_prt->source, - (*sub_object_list)->string.pointer); + user_prt->length += obj_desc->string.length + 1; + break; - /* - * Add to the Length field the length of the string - */ - user_prt->length += (*sub_object_list)->string.length; - break; - - - case ACPI_TYPE_INTEGER: - /* - * If this is a number, then the Source Name - * is NULL, since the entire buffer was zeroed - * out, we can leave this alone. - */ - /* - * Add to the Length field the length of - * the u32 NULL - */ - user_prt->length += sizeof (u32); - break; - - - default: - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Need Integer, found %s\n", - acpi_ut_get_type_name ((*sub_object_list)->common.type))); - return_ACPI_STATUS (AE_BAD_DATA); - break; - } - - /* Now align the current length */ - - user_prt->length = ROUND_UP_TO_64_bITS (user_prt->length); + case ACPI_TYPE_INTEGER: /* - * 4) Fourth subobject: Dereference the Source Index + * If this is a number, then the Source Name is NULL, since the + * entire buffer was zeroed out, we can leave this alone. + * + * Add to the Length field the length of the u32 NULL */ - sub_object_list++; + user_prt->length += sizeof (u32); + break; - if (ACPI_TYPE_INTEGER == (*sub_object_list)->common.type) { - user_prt->source_index = (u32) (*sub_object_list)->integer.value; - } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Need Integer, found %s\n", - acpi_ut_get_type_name ((*sub_object_list)->common.type))); - return_ACPI_STATUS (AE_BAD_DATA); - } + default: - /* - * Point to the next acpi_operand_object - */ - top_object_list++; + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "(PRT[%X].Source) Need Ref/String/Integer, found %s\n", + index, acpi_ut_get_object_type_name (obj_desc))); + return_ACPI_STATUS (AE_BAD_DATA); } - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Output_buffer = %p\n", output_buffer)); - } + /* Now align the current length */ + + user_prt->length = ACPI_ROUND_UP_to_64_bITS (user_prt->length); + + /* + * 4) Fourth subobject: Dereference the PRT.source_index + */ + obj_desc = sub_object_list[3]; + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_INTEGER) { + user_prt->source_index = (u32) obj_desc->integer.value; + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "(PRT[%X].source_index) Need Integer, found %s\n", + index, acpi_ut_get_object_type_name (obj_desc))); + return_ACPI_STATUS (AE_BAD_DATA); + } - else { - *output_buffer_length = buffer_size_needed; + /* Point to the next union acpi_operand_object in the top level package */ - return_ACPI_STATUS (AE_BUFFER_OVERFLOW); + top_object_list++; } - /* - * Report the amount of buffer used - */ - *output_buffer_length = buffer_size_needed; + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "output_buffer %p Length %X\n", + output_buffer->pointer, (u32) output_buffer->length)); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_create_byte_stream + * FUNCTION: acpi_rs_create_byte_stream * - * PARAMETERS: Linked_list_buffer - Pointer to the resource linked list - * Output_buffer - Pointer to the user's buffer - * Output_buffer_length - Size of Output_buffer + * PARAMETERS: linked_list_buffer - Pointer to the resource linked list + * output_buffer - Pointer to the user's buffer * * RETURN: Status AE_OK if okay, else a valid acpi_status code. - * If the Output_buffer is too small, the error will be - * AE_BUFFER_OVERFLOW and Output_buffer_length will point + * If the output_buffer is too small, the error will be + * AE_BUFFER_OVERFLOW and output_buffer->Length will point * to the size buffer needed. * * DESCRIPTION: Takes the linked list of device resources and @@ -381,66 +378,51 @@ acpi_status acpi_rs_create_byte_stream ( - acpi_resource *linked_list_buffer, - u8 *output_buffer, - u32 *output_buffer_length) + struct acpi_resource *linked_list_buffer, + struct acpi_buffer *output_buffer) { - acpi_status status; - u32 byte_stream_size_needed = 0; + acpi_status status; + acpi_size byte_stream_size_needed = 0; - FUNCTION_TRACE ("Rs_create_byte_stream"); + ACPI_FUNCTION_TRACE ("rs_create_byte_stream"); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Linked_list_buffer = %p\n", linked_list_buffer)); + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "linked_list_buffer = %p\n", + linked_list_buffer)); /* * Params already validated, so we don't re-validate here * - * Pass the Linked_list_buffer into a module that can calculate + * Pass the linked_list_buffer into a module that calculates * the buffer size needed for the byte stream. */ - status = acpi_rs_calculate_byte_stream_length (linked_list_buffer, + status = acpi_rs_get_byte_stream_length (linked_list_buffer, &byte_stream_size_needed); - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Byte_stream_size_needed=%X, %s\n", - byte_stream_size_needed, acpi_format_exception (status))); - - /* - * Exit with the error passed back - */ + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "byte_stream_size_needed=%X, %s\n", + (u32) byte_stream_size_needed, acpi_format_exception (status))); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - /* - * If the linked list will fit into the available buffer - * call to fill in the list - */ - if (byte_stream_size_needed <= *output_buffer_length) { - /* - * Zero out the return buffer before proceeding - */ - MEMSET (output_buffer, 0x00, *output_buffer_length); - - status = acpi_rs_list_to_byte_stream (linked_list_buffer, byte_stream_size_needed, - &output_buffer); + /* Validate/Allocate/Clear caller buffer */ - /* - * Exit with the error passed back - */ - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Output_buffer = %p\n", output_buffer)); + status = acpi_ut_initialize_buffer (output_buffer, byte_stream_size_needed); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - else { - *output_buffer_length = byte_stream_size_needed; - return_ACPI_STATUS (AE_BUFFER_OVERFLOW); + /* Do the conversion */ + + status = acpi_rs_list_to_byte_stream (linked_list_buffer, byte_stream_size_needed, + output_buffer->pointer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "output_buffer %p Length %X\n", + output_buffer->pointer, (u32) output_buffer->length)); return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rsdump.c linux.21rc5-ac2/drivers/acpi/resources/rsdump.c --- linux.21rc5/drivers/acpi/resources/rsdump.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rsdump.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,41 +1,59 @@ /******************************************************************************* * * Module Name: rsdump - Functions to display the resource structures. - * $Revision: 23 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acresrc.h" +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rsdump") + ACPI_MODULE_NAME ("rsdump") -#ifdef ACPI_DEBUG +#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_irq + * FUNCTION: acpi_rs_dump_irq * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -47,25 +65,25 @@ void acpi_rs_dump_irq ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_irq *irq_data = (acpi_resource_irq *) data; - u8 index = 0; + struct acpi_resource_irq *irq_data = (struct acpi_resource_irq *) data; + u8 index = 0; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("IRQ Resource\n"); acpi_os_printf (" %s Triggered\n", - LEVEL_SENSITIVE == irq_data->edge_level ? "Level" : "Edge"); + ACPI_LEVEL_SENSITIVE == irq_data->edge_level ? "Level" : "Edge"); acpi_os_printf (" Active %s\n", - ACTIVE_LOW == irq_data->active_high_low ? "Low" : "High"); + ACPI_ACTIVE_LOW == irq_data->active_high_low ? "Low" : "High"); acpi_os_printf (" %s\n", - SHARED == irq_data->shared_exclusive ? "Shared" : "Exclusive"); + ACPI_SHARED == irq_data->shared_exclusive ? "Shared" : "Exclusive"); acpi_os_printf (" %X Interrupts ( ", irq_data->number_of_interrupts); @@ -80,7 +98,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_dma + * FUNCTION: acpi_rs_dump_dma * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -92,31 +110,31 @@ void acpi_rs_dump_dma ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_dma *dma_data = (acpi_resource_dma *) data; - u8 index = 0; + struct acpi_resource_dma *dma_data = (struct acpi_resource_dma *) data; + u8 index = 0; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("DMA Resource\n"); switch (dma_data->type) { - case COMPATIBILITY: + case ACPI_COMPATIBILITY: acpi_os_printf (" Compatibility mode\n"); break; - case TYPE_A: + case ACPI_TYPE_A: acpi_os_printf (" Type A\n"); break; - case TYPE_B: + case ACPI_TYPE_B: acpi_os_printf (" Type B\n"); break; - case TYPE_F: + case ACPI_TYPE_F: acpi_os_printf (" Type F\n"); break; @@ -126,19 +144,19 @@ } acpi_os_printf (" %sBus Master\n", - BUS_MASTER == dma_data->bus_master ? "" : "Not a "); + ACPI_BUS_MASTER == dma_data->bus_master ? "" : "Not a "); switch (dma_data->transfer) { - case TRANSFER_8: + case ACPI_TRANSFER_8: acpi_os_printf (" 8-bit only transfer\n"); break; - case TRANSFER_8_16: + case ACPI_TRANSFER_8_16: acpi_os_printf (" 8 and 16-bit transfer\n"); break; - case TRANSFER_16: + case ACPI_TRANSFER_16: acpi_os_printf (" 16 bit only transfer\n"); break; @@ -160,7 +178,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_start_dependent_functions + * FUNCTION: acpi_rs_dump_start_depend_fns * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -171,27 +189,27 @@ ******************************************************************************/ void -acpi_rs_dump_start_dependent_functions ( - acpi_resource_data *data) +acpi_rs_dump_start_depend_fns ( + union acpi_resource_data *data) { - acpi_resource_start_dpf *sdf_data = (acpi_resource_start_dpf *) data; + struct acpi_resource_start_dpf *sdf_data = (struct acpi_resource_start_dpf *) data; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("Start Dependent Functions Resource\n"); switch (sdf_data->compatibility_priority) { - case GOOD_CONFIGURATION: + case ACPI_GOOD_CONFIGURATION: acpi_os_printf (" Good configuration\n"); break; - case ACCEPTABLE_CONFIGURATION: + case ACPI_ACCEPTABLE_CONFIGURATION: acpi_os_printf (" Acceptable configuration\n"); break; - case SUB_OPTIMAL_CONFIGURATION: + case ACPI_SUB_OPTIMAL_CONFIGURATION: acpi_os_printf (" Sub-optimal configuration\n"); break; @@ -201,15 +219,15 @@ } switch(sdf_data->performance_robustness) { - case GOOD_CONFIGURATION: + case ACPI_GOOD_CONFIGURATION: acpi_os_printf (" Good configuration\n"); break; - case ACCEPTABLE_CONFIGURATION: + case ACPI_ACCEPTABLE_CONFIGURATION: acpi_os_printf (" Acceptable configuration\n"); break; - case SUB_OPTIMAL_CONFIGURATION: + case ACPI_SUB_OPTIMAL_CONFIGURATION: acpi_os_printf (" Sub-optimal configuration\n"); break; @@ -225,7 +243,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_io + * FUNCTION: acpi_rs_dump_io * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -237,18 +255,18 @@ void acpi_rs_dump_io ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_io *io_data = (acpi_resource_io *) data; + struct acpi_resource_io *io_data = (struct acpi_resource_io *) data; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("Io Resource\n"); acpi_os_printf (" %d bit decode\n", - DECODE_16 == io_data->io_decode ? 16 : 10); + ACPI_DECODE_16 == io_data->io_decode ? 16 : 10); acpi_os_printf (" Range minimum base: %08X\n", io_data->min_base_address); @@ -268,7 +286,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_fixed_io + * FUNCTION: acpi_rs_dump_fixed_io * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -280,12 +298,12 @@ void acpi_rs_dump_fixed_io ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_fixed_io *fixed_io_data = (acpi_resource_fixed_io *) data; + struct acpi_resource_fixed_io *fixed_io_data = (struct acpi_resource_fixed_io *) data; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("Fixed Io Resource\n"); @@ -301,7 +319,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_vendor_specific + * FUNCTION: acpi_rs_dump_vendor_specific * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -313,13 +331,13 @@ void acpi_rs_dump_vendor_specific ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_vendor *vendor_data = (acpi_resource_vendor *) data; - u16 index = 0; + struct acpi_resource_vendor *vendor_data = (struct acpi_resource_vendor *) data; + u16 index = 0; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("Vendor Specific Resource\n"); @@ -337,7 +355,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_memory24 + * FUNCTION: acpi_rs_dump_memory24 * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -349,18 +367,18 @@ void acpi_rs_dump_memory24 ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_mem24 *memory24_data = (acpi_resource_mem24 *) data; + struct acpi_resource_mem24 *memory24_data = (struct acpi_resource_mem24 *) data; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("24-Bit Memory Range Resource\n"); acpi_os_printf (" Read%s\n", - READ_WRITE_MEMORY == + ACPI_READ_WRITE_MEMORY == memory24_data->read_write_attribute ? "/Write" : " only"); @@ -382,7 +400,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_memory32 + * FUNCTION: acpi_rs_dump_memory32 * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -394,18 +412,18 @@ void acpi_rs_dump_memory32 ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_mem32 *memory32_data = (acpi_resource_mem32 *) data; + struct acpi_resource_mem32 *memory32_data = (struct acpi_resource_mem32 *) data; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("32-Bit Memory Range Resource\n"); acpi_os_printf (" Read%s\n", - READ_WRITE_MEMORY == + ACPI_READ_WRITE_MEMORY == memory32_data->read_write_attribute ? "/Write" : " only"); @@ -427,7 +445,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_fixed_memory32 + * FUNCTION: acpi_rs_dump_fixed_memory32 * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -439,18 +457,18 @@ void acpi_rs_dump_fixed_memory32 ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_fixed_mem32 *fixed_memory32_data = (acpi_resource_fixed_mem32 *) data; + struct acpi_resource_fixed_mem32 *fixed_memory32_data = (struct acpi_resource_fixed_mem32 *) data; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("32-Bit Fixed Location Memory Range Resource\n"); acpi_os_printf (" Read%s\n", - READ_WRITE_MEMORY == + ACPI_READ_WRITE_MEMORY == fixed_memory32_data->read_write_attribute ? "/Write" : " Only"); @@ -466,7 +484,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_address16 + * FUNCTION: acpi_rs_dump_address16 * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -478,39 +496,39 @@ void acpi_rs_dump_address16 ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_address16 *address16_data = (acpi_resource_address16 *) data; + struct acpi_resource_address16 *address16_data = (struct acpi_resource_address16 *) data; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("16-Bit Address Space Resource\n"); acpi_os_printf (" Resource Type: "); switch (address16_data->resource_type) { - case MEMORY_RANGE: + case ACPI_MEMORY_RANGE: acpi_os_printf ("Memory Range\n"); switch (address16_data->attribute.memory.cache_attribute) { - case NON_CACHEABLE_MEMORY: + case ACPI_NON_CACHEABLE_MEMORY: acpi_os_printf (" Type Specific: " "Noncacheable memory\n"); break; - case CACHABLE_MEMORY: + case ACPI_CACHABLE_MEMORY: acpi_os_printf (" Type Specific: " "Cacheable memory\n"); break; - case WRITE_COMBINING_MEMORY: + case ACPI_WRITE_COMBINING_MEMORY: acpi_os_printf (" Type Specific: " "Write-combining memory\n"); break; - case PREFETCHABLE_MEMORY: + case ACPI_PREFETCHABLE_MEMORY: acpi_os_printf (" Type Specific: " "Prefetchable memory\n"); break; @@ -522,27 +540,27 @@ } acpi_os_printf (" Type Specific: Read%s\n", - READ_WRITE_MEMORY == + ACPI_READ_WRITE_MEMORY == address16_data->attribute.memory.read_write_attribute ? "/Write" : " Only"); break; - case IO_RANGE: + case ACPI_IO_RANGE: acpi_os_printf ("I/O Range\n"); switch (address16_data->attribute.io.range_attribute) { - case NON_ISA_ONLY_RANGES: + case ACPI_NON_ISA_ONLY_RANGES: acpi_os_printf (" Type Specific: " "Non-ISA Io Addresses\n"); break; - case ISA_ONLY_RANGES: + case ACPI_ISA_ONLY_RANGES: acpi_os_printf (" Type Specific: " "ISA Io Addresses\n"); break; - case ENTIRE_RANGE: + case ACPI_ENTIRE_RANGE: acpi_os_printf (" Type Specific: " "ISA and non-ISA Io Addresses\n"); break; @@ -552,9 +570,14 @@ "Invalid range attribute\n"); break; } + + acpi_os_printf (" Type Specific: %s Translation\n", + ACPI_SPARSE_TRANSLATION == + address16_data->attribute.io.translation_attribute ? + "Sparse" : "Dense"); break; - case BUS_NUMBER_RANGE: + case ACPI_BUS_NUMBER_RANGE: acpi_os_printf ("Bus Number Range\n"); break; @@ -566,19 +589,19 @@ } acpi_os_printf (" Resource %s\n", - CONSUMER == address16_data->producer_consumer ? + ACPI_CONSUMER == address16_data->producer_consumer ? "Consumer" : "Producer"); acpi_os_printf (" %s decode\n", - SUB_DECODE == address16_data->decode ? + ACPI_SUB_DECODE == address16_data->decode ? "Subtractive" : "Positive"); acpi_os_printf (" Min address is %s fixed\n", - ADDRESS_FIXED == address16_data->min_address_fixed ? + ACPI_ADDRESS_FIXED == address16_data->min_address_fixed ? "" : "not"); acpi_os_printf (" Max address is %s fixed\n", - ADDRESS_FIXED == address16_data->max_address_fixed ? + ACPI_ADDRESS_FIXED == address16_data->max_address_fixed ? "" : "not"); acpi_os_printf (" Granularity: %08X\n", @@ -609,7 +632,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_address32 + * FUNCTION: acpi_rs_dump_address32 * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -621,38 +644,38 @@ void acpi_rs_dump_address32 ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_address32 *address32_data = (acpi_resource_address32 *) data; + struct acpi_resource_address32 *address32_data = (struct acpi_resource_address32 *) data; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("32-Bit Address Space Resource\n"); switch (address32_data->resource_type) { - case MEMORY_RANGE: + case ACPI_MEMORY_RANGE: acpi_os_printf (" Resource Type: Memory Range\n"); switch (address32_data->attribute.memory.cache_attribute) { - case NON_CACHEABLE_MEMORY: + case ACPI_NON_CACHEABLE_MEMORY: acpi_os_printf (" Type Specific: " "Noncacheable memory\n"); break; - case CACHABLE_MEMORY: + case ACPI_CACHABLE_MEMORY: acpi_os_printf (" Type Specific: " "Cacheable memory\n"); break; - case WRITE_COMBINING_MEMORY: + case ACPI_WRITE_COMBINING_MEMORY: acpi_os_printf (" Type Specific: " "Write-combining memory\n"); break; - case PREFETCHABLE_MEMORY: + case ACPI_PREFETCHABLE_MEMORY: acpi_os_printf (" Type Specific: " "Prefetchable memory\n"); break; @@ -664,39 +687,44 @@ } acpi_os_printf (" Type Specific: Read%s\n", - READ_WRITE_MEMORY == + ACPI_READ_WRITE_MEMORY == address32_data->attribute.memory.read_write_attribute ? "/Write" : " Only"); break; - case IO_RANGE: + case ACPI_IO_RANGE: acpi_os_printf (" Resource Type: Io Range\n"); switch (address32_data->attribute.io.range_attribute) { - case NON_ISA_ONLY_RANGES: - acpi_os_printf (" Type Specific: " - "Non-ISA Io Addresses\n"); - break; + case ACPI_NON_ISA_ONLY_RANGES: + acpi_os_printf (" Type Specific: " + "Non-ISA Io Addresses\n"); + break; - case ISA_ONLY_RANGES: - acpi_os_printf (" Type Specific: " - "ISA Io Addresses\n"); - break; + case ACPI_ISA_ONLY_RANGES: + acpi_os_printf (" Type Specific: " + "ISA Io Addresses\n"); + break; - case ENTIRE_RANGE: - acpi_os_printf (" Type Specific: " - "ISA and non-ISA Io Addresses\n"); - break; + case ACPI_ENTIRE_RANGE: + acpi_os_printf (" Type Specific: " + "ISA and non-ISA Io Addresses\n"); + break; - default: - acpi_os_printf (" Type Specific: " - "Invalid Range attribute"); - break; - } + default: + acpi_os_printf (" Type Specific: " + "Invalid Range attribute"); + break; + } + + acpi_os_printf (" Type Specific: %s Translation\n", + ACPI_SPARSE_TRANSLATION == + address32_data->attribute.io.translation_attribute ? + "Sparse" : "Dense"); break; - case BUS_NUMBER_RANGE: + case ACPI_BUS_NUMBER_RANGE: acpi_os_printf (" Resource Type: Bus Number Range\n"); break; @@ -708,19 +736,19 @@ } acpi_os_printf (" Resource %s\n", - CONSUMER == address32_data->producer_consumer ? + ACPI_CONSUMER == address32_data->producer_consumer ? "Consumer" : "Producer"); acpi_os_printf (" %s decode\n", - SUB_DECODE == address32_data->decode ? + ACPI_SUB_DECODE == address32_data->decode ? "Subtractive" : "Positive"); acpi_os_printf (" Min address is %s fixed\n", - ADDRESS_FIXED == address32_data->min_address_fixed ? + ACPI_ADDRESS_FIXED == address32_data->min_address_fixed ? "" : "not "); acpi_os_printf (" Max address is %s fixed\n", - ADDRESS_FIXED == address32_data->max_address_fixed ? + ACPI_ADDRESS_FIXED == address32_data->max_address_fixed ? "" : "not "); acpi_os_printf (" Granularity: %08X\n", @@ -751,7 +779,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_address64 + * FUNCTION: acpi_rs_dump_address64 * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -763,38 +791,38 @@ void acpi_rs_dump_address64 ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_address64 *address64_data = (acpi_resource_address64 *) data; + struct acpi_resource_address64 *address64_data = (struct acpi_resource_address64 *) data; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("64-Bit Address Space Resource\n"); switch (address64_data->resource_type) { - case MEMORY_RANGE: + case ACPI_MEMORY_RANGE: acpi_os_printf (" Resource Type: Memory Range\n"); switch (address64_data->attribute.memory.cache_attribute) { - case NON_CACHEABLE_MEMORY: + case ACPI_NON_CACHEABLE_MEMORY: acpi_os_printf (" Type Specific: " "Noncacheable memory\n"); break; - case CACHABLE_MEMORY: + case ACPI_CACHABLE_MEMORY: acpi_os_printf (" Type Specific: " "Cacheable memory\n"); break; - case WRITE_COMBINING_MEMORY: + case ACPI_WRITE_COMBINING_MEMORY: acpi_os_printf (" Type Specific: " "Write-combining memory\n"); break; - case PREFETCHABLE_MEMORY: + case ACPI_PREFETCHABLE_MEMORY: acpi_os_printf (" Type Specific: " "Prefetchable memory\n"); break; @@ -806,39 +834,44 @@ } acpi_os_printf (" Type Specific: Read%s\n", - READ_WRITE_MEMORY == + ACPI_READ_WRITE_MEMORY == address64_data->attribute.memory.read_write_attribute ? "/Write" : " Only"); break; - case IO_RANGE: + case ACPI_IO_RANGE: acpi_os_printf (" Resource Type: Io Range\n"); switch (address64_data->attribute.io.range_attribute) { - case NON_ISA_ONLY_RANGES: - acpi_os_printf (" Type Specific: " - "Non-ISA Io Addresses\n"); - break; + case ACPI_NON_ISA_ONLY_RANGES: + acpi_os_printf (" Type Specific: " + "Non-ISA Io Addresses\n"); + break; - case ISA_ONLY_RANGES: - acpi_os_printf (" Type Specific: " - "ISA Io Addresses\n"); - break; + case ACPI_ISA_ONLY_RANGES: + acpi_os_printf (" Type Specific: " + "ISA Io Addresses\n"); + break; - case ENTIRE_RANGE: - acpi_os_printf (" Type Specific: " - "ISA and non-ISA Io Addresses\n"); - break; + case ACPI_ENTIRE_RANGE: + acpi_os_printf (" Type Specific: " + "ISA and non-ISA Io Addresses\n"); + break; - default: - acpi_os_printf (" Type Specific: " - "Invalid Range attribute"); - break; - } + default: + acpi_os_printf (" Type Specific: " + "Invalid Range attribute"); + break; + } + + acpi_os_printf (" Type Specific: %s Translation\n", + ACPI_SPARSE_TRANSLATION == + address64_data->attribute.io.translation_attribute ? + "Sparse" : "Dense"); break; - case BUS_NUMBER_RANGE: + case ACPI_BUS_NUMBER_RANGE: acpi_os_printf (" Resource Type: Bus Number Range\n"); break; @@ -850,35 +883,40 @@ } acpi_os_printf (" Resource %s\n", - CONSUMER == address64_data->producer_consumer ? + ACPI_CONSUMER == address64_data->producer_consumer ? "Consumer" : "Producer"); acpi_os_printf (" %s decode\n", - SUB_DECODE == address64_data->decode ? + ACPI_SUB_DECODE == address64_data->decode ? "Subtractive" : "Positive"); acpi_os_printf (" Min address is %s fixed\n", - ADDRESS_FIXED == address64_data->min_address_fixed ? + ACPI_ADDRESS_FIXED == address64_data->min_address_fixed ? "" : "not "); acpi_os_printf (" Max address is %s fixed\n", - ADDRESS_FIXED == address64_data->max_address_fixed ? + ACPI_ADDRESS_FIXED == address64_data->max_address_fixed ? "" : "not "); - acpi_os_printf (" Granularity: %16X\n", - address64_data->granularity); - - acpi_os_printf (" Address range min: %16X\n", - address64_data->min_address_range); - - acpi_os_printf (" Address range max: %16X\n", - address64_data->max_address_range); - - acpi_os_printf (" Address translation offset: %16X\n", - address64_data->address_translation_offset); - - acpi_os_printf (" Address Length: %16X\n", - address64_data->address_length); + acpi_os_printf (" Granularity: %8.8X%8.8X\n", + ACPI_HIDWORD (address64_data->granularity), + ACPI_LODWORD (address64_data->granularity)); + + acpi_os_printf (" Address range min: %8.8X%8.8X\n", + ACPI_HIDWORD (address64_data->min_address_range), + ACPI_HIDWORD (address64_data->min_address_range)); + + acpi_os_printf (" Address range max: %8.8X%8.8X\n", + ACPI_HIDWORD (address64_data->max_address_range), + ACPI_HIDWORD (address64_data->max_address_range)); + + acpi_os_printf (" Address translation offset: %8.8X%8.8X\n", + ACPI_HIDWORD (address64_data->address_translation_offset), + ACPI_HIDWORD (address64_data->address_translation_offset)); + + acpi_os_printf (" Address Length: %8.8X%8.8X\n", + ACPI_HIDWORD (address64_data->address_length), + ACPI_HIDWORD (address64_data->address_length)); if(0xFF != address64_data->resource_source.index) { acpi_os_printf (" Resource Source Index: %X\n", @@ -893,7 +931,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_extended_irq + * FUNCTION: acpi_rs_dump_extended_irq * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -905,31 +943,31 @@ void acpi_rs_dump_extended_irq ( - acpi_resource_data *data) + union acpi_resource_data *data) { - acpi_resource_ext_irq *ext_irq_data = (acpi_resource_ext_irq *) data; - u8 index = 0; + struct acpi_resource_ext_irq *ext_irq_data = (struct acpi_resource_ext_irq *) data; + u8 index = 0; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); acpi_os_printf ("Extended IRQ Resource\n"); acpi_os_printf (" Resource %s\n", - CONSUMER == ext_irq_data->producer_consumer ? + ACPI_CONSUMER == ext_irq_data->producer_consumer ? "Consumer" : "Producer"); acpi_os_printf (" %s\n", - LEVEL_SENSITIVE == ext_irq_data->edge_level ? + ACPI_LEVEL_SENSITIVE == ext_irq_data->edge_level ? "Level" : "Edge"); acpi_os_printf (" Active %s\n", - ACTIVE_LOW == ext_irq_data->active_high_low ? + ACPI_ACTIVE_LOW == ext_irq_data->active_high_low ? "low" : "high"); acpi_os_printf (" %s\n", - SHARED == ext_irq_data->shared_exclusive ? + ACPI_SHARED == ext_irq_data->shared_exclusive ? "Shared" : "Exclusive"); acpi_os_printf (" Interrupts : %X ( ", @@ -954,7 +992,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_resource_list + * FUNCTION: acpi_rs_dump_resource_list * * PARAMETERS: Data - pointer to the resource structure to dump. * @@ -966,18 +1004,18 @@ void acpi_rs_dump_resource_list ( - acpi_resource *resource) + struct acpi_resource *resource) { - u8 count = 0; - u8 done = FALSE; + u8 count = 0; + u8 done = FALSE; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); if (acpi_dbg_level & ACPI_LV_RESOURCES && _COMPONENT & acpi_dbg_layer) { while (!done) { - acpi_os_printf ("Resource structure %x.\n", count++); + acpi_os_printf ("Resource structure %X.\n", count++); switch (resource->id) { case ACPI_RSTYPE_IRQ: @@ -989,12 +1027,12 @@ break; case ACPI_RSTYPE_START_DPF: - acpi_rs_dump_start_dependent_functions (&resource->data); + acpi_rs_dump_start_depend_fns (&resource->data); break; case ACPI_RSTYPE_END_DPF: - acpi_os_printf ("End_dependent_functions Resource\n"); - /* Acpi_rs_dump_end_dependent_functions (Resource->Data);*/ + acpi_os_printf ("end_dependent_functions Resource\n"); + /* acpi_rs_dump_end_dependent_functions (Resource->Data);*/ break; case ACPI_RSTYPE_IO: @@ -1010,8 +1048,8 @@ break; case ACPI_RSTYPE_END_TAG: - /*Rs_dump_end_tag (Resource->Data);*/ - acpi_os_printf ("End_tag Resource\n"); + /*rs_dump_end_tag (Resource->Data);*/ + acpi_os_printf ("end_tag Resource\n"); done = TRUE; break; @@ -1049,7 +1087,7 @@ } - resource = POINTER_ADD (acpi_resource, resource, resource->length); + resource = ACPI_PTR_ADD (struct acpi_resource, resource, resource->length); } } @@ -1058,7 +1096,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dump_irq_list + * FUNCTION: acpi_rs_dump_irq_list * * PARAMETERS: Data - pointer to the routing table to dump. * @@ -1070,36 +1108,37 @@ void acpi_rs_dump_irq_list ( - u8 *route_table) + u8 *route_table) { - u8 *buffer = route_table; - u8 count = 0; - u8 done = FALSE; - pci_routing_table *prt_element; + u8 *buffer = route_table; + u8 count = 0; + u8 done = FALSE; + struct acpi_pci_routing_table *prt_element; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); if (acpi_dbg_level & ACPI_LV_RESOURCES && _COMPONENT & acpi_dbg_layer) { - prt_element = (pci_routing_table *) buffer; + prt_element = ACPI_CAST_PTR (struct acpi_pci_routing_table, buffer); while (!done) { acpi_os_printf ("PCI IRQ Routing Table structure %X.\n", count++); - acpi_os_printf (" Address: %X\n", - prt_element->address); + acpi_os_printf (" Address: %8.8X%8.8X\n", + ACPI_HIDWORD (prt_element->address), + ACPI_LODWORD (prt_element->address)); acpi_os_printf (" Pin: %X\n", prt_element->pin); acpi_os_printf (" Source: %s\n", prt_element->source); - acpi_os_printf (" Source_index: %X\n", + acpi_os_printf (" source_index: %X\n", prt_element->source_index); buffer += prt_element->length; - prt_element = (pci_routing_table *) buffer; + prt_element = ACPI_CAST_PTR (struct acpi_pci_routing_table, buffer); if(0 == prt_element->length) { done = TRUE; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rsio.c linux.21rc5-ac2/drivers/acpi/resources/rsio.c --- linux.21rc5/drivers/acpi/resources/rsio.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rsio.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,73 +1,90 @@ /******************************************************************************* * * Module Name: rsio - IO and DMA resource descriptors - * $Revision: 14 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acresrc.h" +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rsio") + ACPI_MODULE_NAME ("rsio") /******************************************************************************* * - * FUNCTION: Acpi_rs_io_resource + * FUNCTION: acpi_rs_io_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_io_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_io); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_io); - FUNCTION_TRACE ("Rs_io_resource"); + ACPI_FUNCTION_TRACE ("rs_io_resource"); /* @@ -86,18 +103,18 @@ output_struct->data.io.io_decode = temp8 & 0x01; /* - * Check Min_base Address + * Check min_base Address */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); output_struct->data.io.min_base_address = temp16; /* - * Check Max_base Address + * Check max_base Address */ buffer += 2; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); output_struct->data.io.max_base_address = temp16; @@ -110,7 +127,7 @@ output_struct->data.io.alignment = temp8; /* - * Check Range_length + * Check range_length */ buffer += 1; temp8 = *buffer; @@ -120,7 +137,7 @@ /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -132,41 +149,40 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_fixed_io_resource + * FUNCTION: acpi_rs_fixed_io_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_fixed_io_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_fixed_io); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_fixed_io); - FUNCTION_TRACE ("Rs_fixed_io_resource"); + ACPI_FUNCTION_TRACE ("rs_fixed_io_resource"); /* @@ -180,12 +196,12 @@ * Check Range Base Address */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); output_struct->data.fixed_io.base_address = temp16; /* - * Check Range_length + * Check range_length */ buffer += 2; temp8 = *buffer; @@ -195,7 +211,7 @@ /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -207,13 +223,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_io_stream + * FUNCTION: acpi_rs_io_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -224,16 +239,16 @@ acpi_status acpi_rs_io_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; + u8 *buffer = *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; - FUNCTION_TRACE ("Rs_io_stream"); + ACPI_FUNCTION_TRACE ("rs_io_stream"); /* @@ -255,7 +270,7 @@ */ temp16 = (u16) linked_list->data.io.min_base_address; - MOVE_UNALIGNED16_TO_16 (buffer, &temp16); + ACPI_MOVE_16_TO_16 (buffer, &temp16); buffer += 2; /* @@ -263,7 +278,7 @@ */ temp16 = (u16) linked_list->data.io.max_base_address; - MOVE_UNALIGNED16_TO_16 (buffer, &temp16); + ACPI_MOVE_16_TO_16 (buffer, &temp16); buffer += 2; /* @@ -285,20 +300,19 @@ /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_fixed_io_stream + * FUNCTION: acpi_rs_fixed_io_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -309,16 +323,16 @@ acpi_status acpi_rs_fixed_io_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; + u8 *buffer = *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; - FUNCTION_TRACE ("Rs_fixed_io_stream"); + ACPI_FUNCTION_TRACE ("rs_fixed_io_stream"); /* @@ -333,7 +347,7 @@ */ temp16 = (u16) linked_list->data.fixed_io.base_address; - MOVE_UNALIGNED16_TO_16 (buffer, &temp16); + ACPI_MOVE_16_TO_16 (buffer, &temp16); buffer += 2; /* @@ -347,49 +361,48 @@ /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_dma_resource + * FUNCTION: acpi_rs_dma_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_dma_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u8 temp8 = 0; - u8 index; - u8 i; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_dma); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u8 temp8 = 0; + u8 index; + u8 i; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_dma); - FUNCTION_TRACE ("Rs_dma_resource"); + ACPI_FUNCTION_TRACE ("rs_dma_resource"); /* @@ -404,7 +417,7 @@ buffer += 1; temp8 = *buffer; - /* Decode the IRQ bits */ + /* Decode the DMA channel bits */ for (i = 0, index = 0; index < 8; index++) { if ((temp8 >> index) & 0x01) { @@ -412,13 +425,16 @@ i++; } } - output_struct->data.dma.number_of_channels = i; + /* Zero DMA channels is valid */ - /* - * Calculate the structure size based upon the number of interrupts - */ - struct_size += (output_struct->data.dma.number_of_channels - 1) * 4; + output_struct->data.dma.number_of_channels = i; + if (i > 0) { + /* + * Calculate the structure size based upon the number of interrupts + */ + struct_size += ((acpi_size) i - 1) * 4; + } /* * Point to Byte 2 @@ -432,6 +448,7 @@ output_struct->data.dma.transfer = temp8 & 0x03; if (0x03 == output_struct->data.dma.transfer) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid DMA.Transfer preference (3)\n")); return_ACPI_STATUS (AE_BAD_DATA); } @@ -448,7 +465,7 @@ /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -460,13 +477,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_dma_stream + * FUNCTION: acpi_rs_dma_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -477,17 +493,17 @@ acpi_status acpi_rs_dma_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u8 index; + u8 *buffer = *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + u8 index; - FUNCTION_TRACE ("Rs_dma_stream"); + ACPI_FUNCTION_TRACE ("rs_dma_stream"); /* @@ -523,7 +539,7 @@ /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rsirq.c linux.21rc5-ac2/drivers/acpi/resources/rsirq.c --- linux.21rc5/drivers/acpi/resources/rsirq.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rsirq.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,75 +1,92 @@ /******************************************************************************* * * Module Name: rsirq - IRQ resource descriptors - * $Revision: 18 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acresrc.h" +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rsirq") + ACPI_MODULE_NAME ("rsirq") /******************************************************************************* * - * FUNCTION: Acpi_rs_irq_resource + * FUNCTION: acpi_rs_irq_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_irq_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u8 index; - u8 i; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_irq); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + u8 index; + u8 i; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_irq); - FUNCTION_TRACE ("Rs_irq_resource"); + ACPI_FUNCTION_TRACE ("rs_irq_resource"); /* @@ -84,24 +101,28 @@ * Point to the 16-bits of Bytes 1 and 2 */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); output_struct->data.irq.number_of_interrupts = 0; /* Decode the IRQ bits */ for (i = 0, index = 0; index < 16; index++) { - if((temp16 >> index) & 0x01) { + if ((temp16 >> index) & 0x01) { output_struct->data.irq.interrupts[i] = index; i++; } } - output_struct->data.irq.number_of_interrupts = i; - /* - * Calculate the structure size based upon the number of interrupts - */ - struct_size += (output_struct->data.irq.number_of_interrupts - 1) * 4; + /* Zero interrupts is valid */ + + output_struct->data.irq.number_of_interrupts = i; + if (i > 0) { + /* + * Calculate the structure size based upon the number of interrupts + */ + struct_size += ((acpi_size) i - 1) * 4; + } /* * Point to Byte 3 if it is used @@ -114,22 +135,21 @@ * Check for HE, LL or HL */ if (temp8 & 0x01) { - output_struct->data.irq.edge_level = EDGE_SENSITIVE; - output_struct->data.irq.active_high_low = ACTIVE_HIGH; + output_struct->data.irq.edge_level = ACPI_EDGE_SENSITIVE; + output_struct->data.irq.active_high_low = ACPI_ACTIVE_HIGH; } - else { if (temp8 & 0x8) { - output_struct->data.irq.edge_level = LEVEL_SENSITIVE; - output_struct->data.irq.active_high_low = ACTIVE_LOW; + output_struct->data.irq.edge_level = ACPI_LEVEL_SENSITIVE; + output_struct->data.irq.active_high_low = ACPI_ACTIVE_LOW; } - else { /* * Only _LL and _HE polarity/trigger interrupts * are allowed (ACPI spec v1.0b ection 6.4.2.1), * so an error will occur if we reach this point */ + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid interrupt polarity/trigger in resource list\n")); return_ACPI_STATUS (AE_BAD_DATA); } } @@ -139,21 +159,20 @@ */ output_struct->data.irq.shared_exclusive = (temp8 >> 3) & 0x01; } - else { /* * Assume Edge Sensitive, Active High, Non-Sharable * per ACPI Specification */ - output_struct->data.irq.edge_level = EDGE_SENSITIVE; - output_struct->data.irq.active_high_low = ACTIVE_HIGH; - output_struct->data.irq.shared_exclusive = EXCLUSIVE; + output_struct->data.irq.edge_level = ACPI_EDGE_SENSITIVE; + output_struct->data.irq.active_high_low = ACPI_ACTIVE_HIGH; + output_struct->data.irq.shared_exclusive = ACPI_EXCLUSIVE; } /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -165,13 +184,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_irq_stream + * FUNCTION: acpi_rs_irq_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -182,34 +200,33 @@ acpi_status acpi_rs_irq_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u8 index; - u8 IRQinfo_byte_needed; + u8 *buffer = *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + u8 index; + u8 IRqinfo_byte_needed; - FUNCTION_TRACE ("Rs_irq_stream"); + ACPI_FUNCTION_TRACE ("rs_irq_stream"); /* * The descriptor field is set based upon whether a third byte is * needed to contain the IRQ Information. */ - if (EDGE_SENSITIVE == linked_list->data.irq.edge_level && - ACTIVE_HIGH == linked_list->data.irq.active_high_low && - EXCLUSIVE == linked_list->data.irq.shared_exclusive) { + if (ACPI_EDGE_SENSITIVE == linked_list->data.irq.edge_level && + ACPI_ACTIVE_HIGH == linked_list->data.irq.active_high_low && + ACPI_EXCLUSIVE == linked_list->data.irq.shared_exclusive) { *buffer = 0x22; - IRQinfo_byte_needed = FALSE; + IRqinfo_byte_needed = FALSE; } - else { *buffer = 0x23; - IRQinfo_byte_needed = TRUE; + IRqinfo_byte_needed = TRUE; } buffer += 1; @@ -225,22 +242,21 @@ temp16 |= 0x1 << temp8; } - MOVE_UNALIGNED16_TO_16 (buffer, &temp16); + ACPI_MOVE_16_TO_16 (buffer, &temp16); buffer += 2; /* * Set the IRQ Info byte if needed. */ - if (IRQinfo_byte_needed) { + if (IRqinfo_byte_needed) { temp8 = 0; temp8 = (u8) ((linked_list->data.irq.shared_exclusive & 0x01) << 4); - if (LEVEL_SENSITIVE == linked_list->data.irq.edge_level && - ACTIVE_LOW == linked_list->data.irq.active_high_low) { + if (ACPI_LEVEL_SENSITIVE == linked_list->data.irq.edge_level && + ACPI_ACTIVE_LOW == linked_list->data.irq.active_high_low) { temp8 |= 0x08; } - else { temp8 |= 0x01; } @@ -252,57 +268,56 @@ /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_extended_irq_resource + * FUNCTION: acpi_rs_extended_irq_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_extended_irq_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - NATIVE_CHAR *temp_ptr; - u8 index; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_ext_irq); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + u8 *temp_ptr; + u8 index; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_ext_irq); - FUNCTION_TRACE ("Rs_extended_irq_resource"); + ACPI_FUNCTION_TRACE ("rs_extended_irq_resource"); /* * Point past the Descriptor to get the number of bytes consumed */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); *bytes_consumed = temp16 + 3; output_struct->id = ACPI_RSTYPE_EXT_IRQ; @@ -316,28 +331,20 @@ output_struct->data.extended_irq.producer_consumer = temp8 & 0x01; /* - * Check for HE, LL or HL + * Check for Interrupt Mode + * + * The definition of an Extended IRQ changed between ACPI spec v1.0b + * and ACPI spec 2.0 (section 6.4.3.6 in both). + * + * - Edge/Level are defined opposite in the table vs the headers */ - if(temp8 & 0x02) { - output_struct->data.extended_irq.edge_level = EDGE_SENSITIVE; - output_struct->data.extended_irq.active_high_low = ACTIVE_HIGH; - } + output_struct->data.extended_irq.edge_level = + (temp8 & 0x2) ? ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; - else { - if(temp8 & 0x4) { - output_struct->data.extended_irq.edge_level = LEVEL_SENSITIVE; - output_struct->data.extended_irq.active_high_low = ACTIVE_LOW; - } - - else { - /* - * Only _LL and _HE polarity/trigger interrupts - * are allowed (ACPI spec v1.0b ection 6.4.2.1), - * so an error will occur if we reach this point - */ - return_ACPI_STATUS (AE_BAD_DATA); - } - } + /* + * Check Interrupt Polarity + */ + output_struct->data.extended_irq.active_high_low = (temp8 >> 2) & 0x1; /* * Check for sharable @@ -367,8 +374,8 @@ * Cycle through every IRQ in the table */ for (index = 0; index < temp8; index++) { - output_struct->data.extended_irq.interrupts[index] = - (u32)*buffer; + ACPI_MOVE_32_TO_32 ( + &output_struct->data.extended_irq.interrupts[index], buffer); /* Point to the next IRQ */ @@ -383,7 +390,7 @@ * stream that are default. */ if (*bytes_consumed > - (u32)(output_struct->data.extended_irq.number_of_interrupts * 4) + 5) { + ((acpi_size) output_struct->data.extended_irq.number_of_interrupts * 4) + 5) { /* Dereference the Index */ temp8 = *buffer; @@ -397,14 +404,13 @@ * Point the String pointer to the end of this structure. */ output_struct->data.extended_irq.resource_source.string_ptr = - (NATIVE_CHAR *)(output_struct + struct_size); + (char *)(output_struct + struct_size); - temp_ptr = output_struct->data.extended_irq.resource_source.string_ptr; + temp_ptr = (u8 *) output_struct->data.extended_irq.resource_source.string_ptr; /* Copy the string into the buffer */ index = 0; - while (0x00 != *buffer) { *temp_ptr = *buffer; @@ -420,14 +426,13 @@ output_struct->data.extended_irq.resource_source.string_length = index + 1; /* - * In order for the Struct_size to fall on a 32-bit boundary, + * In order for the struct_size to fall on a 32-bit boundary, * calculate the length of the string and expand the - * Struct_size to the next 32-bit boundary. + * struct_size to the next 32-bit boundary. */ temp8 = (u8) (index + 1); - struct_size += ROUND_UP_TO_32_bITS (temp8); + struct_size += ACPI_ROUND_UP_to_32_bITS (temp8); } - else { output_struct->data.extended_irq.resource_source.index = 0x00; output_struct->data.extended_irq.resource_source.string_length = 0; @@ -437,7 +442,7 @@ /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -449,13 +454,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_extended_irq_stream + * FUNCTION: acpi_rs_extended_irq_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -466,18 +470,18 @@ acpi_status acpi_rs_extended_irq_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u16 *length_field; - u8 temp8 = 0; - u8 index; - NATIVE_CHAR *temp_pointer = NULL; + u8 *buffer = *output_buffer; + u16 *length_field; + u8 temp8 = 0; + u8 index; + char *temp_pointer = NULL; - FUNCTION_TRACE ("Rs_extended_irq_stream"); + ACPI_FUNCTION_TRACE ("rs_extended_irq_stream"); /* @@ -489,7 +493,7 @@ /* * Set a pointer to the Length field - to be filled in later */ - length_field = (u16 *)buffer; + length_field = ACPI_CAST_PTR (u16, buffer); buffer += 2; /* @@ -498,14 +502,24 @@ temp8 = (u8)(linked_list->data.extended_irq.producer_consumer & 0x01); temp8 |= ((linked_list->data.extended_irq.shared_exclusive & 0x01) << 3); - if (LEVEL_SENSITIVE == linked_list->data.extended_irq.edge_level && - ACTIVE_LOW == linked_list->data.extended_irq.active_high_low) { - temp8 |= 0x04; - } - else { - temp8 |= 0x02; + /* + * Set the Interrupt Mode + * + * The definition of an Extended IRQ changed between ACPI spec v1.0b + * and ACPI spec 2.0 (section 6.4.3.6 in both). This code does not + * implement the more restrictive definition of 1.0b + * + * - Edge/Level are defined opposite in the table vs the headers + */ + if (ACPI_EDGE_SENSITIVE == linked_list->data.extended_irq.edge_level) { + temp8 |= 0x2; } + /* + * Set the Interrupt Polarity + */ + temp8 |= ((linked_list->data.extended_irq.active_high_low & 0x1) << 2); + *buffer = temp8; buffer += 1; @@ -519,7 +533,7 @@ for (index = 0; index < linked_list->data.extended_irq.number_of_interrupts; index++) { - MOVE_UNALIGNED32_TO_32 (buffer, + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.extended_irq.interrupts[index]); buffer += 4; } @@ -531,25 +545,25 @@ *buffer = (u8) linked_list->data.extended_irq.resource_source.index; buffer += 1; - temp_pointer = (NATIVE_CHAR *) buffer; + temp_pointer = (char *) buffer; /* * Copy the string */ - STRCPY (temp_pointer, + ACPI_STRCPY (temp_pointer, linked_list->data.extended_irq.resource_source.string_ptr); /* * Buffer needs to be set to the length of the sting + one for the * terminating null */ - buffer += (STRLEN (linked_list->data.extended_irq.resource_source.string_ptr) + 1); + buffer += (acpi_size)(ACPI_STRLEN (linked_list->data.extended_irq.resource_source.string_ptr) + 1); } /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); /* * Set the length field to the number of bytes consumed diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rslist.c linux.21rc5-ac2/drivers/acpi/resources/rslist.c --- linux.21rc5/drivers/acpi/resources/rslist.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rslist.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,41 +1,59 @@ /******************************************************************************* * * Module Name: rslist - Linked list utilities - * $Revision: 19 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acresrc.h" +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rslist") + ACPI_MODULE_NAME ("rslist") /******************************************************************************* * - * FUNCTION: Acpi_rs_get_resource_type + * FUNCTION: acpi_rs_get_resource_type * - * PARAMETERS: Resource_start_byte - Byte 0 of a resource descriptor + * PARAMETERS: resource_start_byte - Byte 0 of a resource descriptor * * RETURN: The Resource Type (Name) with no extraneous bits * @@ -46,31 +64,34 @@ u8 acpi_rs_get_resource_type ( - u8 resource_start_byte) + u8 resource_start_byte) { - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* * Determine if this is a small or large resource */ - switch (resource_start_byte & RESOURCE_DESC_TYPE_MASK) { - case RESOURCE_DESC_TYPE_SMALL: + switch (resource_start_byte & ACPI_RDESC_TYPE_MASK) { + case ACPI_RDESC_TYPE_SMALL: /* * Small Resource Type -- Only bits 6:3 are valid */ - return ((u8) (resource_start_byte & RESOURCE_DESC_SMALL_MASK)); - break; + return ((u8) (resource_start_byte & ACPI_RDESC_SMALL_MASK)); - case RESOURCE_DESC_TYPE_LARGE: + case ACPI_RDESC_TYPE_LARGE: /* * Large Resource Type -- All bits are valid */ return (resource_start_byte); + + + default: + /* No other types of resource descriptor */ break; } @@ -80,11 +101,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_byte_stream_to_list + * FUNCTION: acpi_rs_byte_stream_to_list * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource byte stream - * Byte_stream_buffer_length - Length of Byte_stream_buffer - * Output_buffer - Pointer to the buffer that will + * PARAMETERS: byte_stream_buffer - Pointer to the resource byte stream + * byte_stream_buffer_length - Length of byte_stream_buffer + * output_buffer - Pointer to the buffer that will * contain the output structures * * RETURN: Status @@ -96,185 +117,184 @@ acpi_status acpi_rs_byte_stream_to_list ( - u8 *byte_stream_buffer, - u32 byte_stream_buffer_length, - u8 **output_buffer) + u8 *byte_stream_buffer, + u32 byte_stream_buffer_length, + u8 *output_buffer) { - acpi_status status; - u32 bytes_parsed = 0; - u8 resource_type = 0; - u32 bytes_consumed = 0; - u8 **buffer = output_buffer; - u32 structure_size = 0; - u8 end_tag_processed = FALSE; + acpi_status status; + acpi_size bytes_parsed = 0; + u8 resource_type = 0; + acpi_size bytes_consumed = 0; + u8 *buffer = output_buffer; + acpi_size structure_size = 0; + u8 end_tag_processed = FALSE; + struct acpi_resource *resource; - - FUNCTION_TRACE ("Rs_byte_stream_to_list"); + ACPI_FUNCTION_TRACE ("rs_byte_stream_to_list"); while (bytes_parsed < byte_stream_buffer_length && - FALSE == end_tag_processed) { + !end_tag_processed) { /* * The next byte in the stream is the resource type */ resource_type = acpi_rs_get_resource_type (*byte_stream_buffer); switch (resource_type) { - case RESOURCE_DESC_MEMORY_24: + case ACPI_RDESC_TYPE_MEMORY_24: /* * 24-Bit Memory Resource */ status = acpi_rs_memory24_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_LARGE_VENDOR: + case ACPI_RDESC_TYPE_LARGE_VENDOR: /* * Vendor Defined Resource */ status = acpi_rs_vendor_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_MEMORY_32: + case ACPI_RDESC_TYPE_MEMORY_32: /* * 32-Bit Memory Range Resource */ status = acpi_rs_memory32_range_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_FIXED_MEMORY_32: + case ACPI_RDESC_TYPE_FIXED_MEMORY_32: /* * 32-Bit Fixed Memory Resource */ status = acpi_rs_fixed_memory32_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_QWORD_ADDRESS_SPACE: + case ACPI_RDESC_TYPE_QWORD_ADDRESS_SPACE: /* * 64-Bit Address Resource */ status = acpi_rs_address64_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_DWORD_ADDRESS_SPACE: + case ACPI_RDESC_TYPE_DWORD_ADDRESS_SPACE: /* * 32-Bit Address Resource */ status = acpi_rs_address32_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_WORD_ADDRESS_SPACE: + case ACPI_RDESC_TYPE_WORD_ADDRESS_SPACE: /* * 16-Bit Address Resource */ status = acpi_rs_address16_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_EXTENDED_XRUPT: + case ACPI_RDESC_TYPE_EXTENDED_XRUPT: /* * Extended IRQ */ status = acpi_rs_extended_irq_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_IRQ_FORMAT: + case ACPI_RDESC_TYPE_IRQ_FORMAT: /* * IRQ Resource */ status = acpi_rs_irq_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_DMA_FORMAT: + case ACPI_RDESC_TYPE_DMA_FORMAT: /* * DMA Resource */ status = acpi_rs_dma_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_START_DEPENDENT: + case ACPI_RDESC_TYPE_START_DEPENDENT: /* * Start Dependent Functions Resource */ - status = acpi_rs_start_dependent_functions_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + status = acpi_rs_start_depend_fns_resource (byte_stream_buffer, + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_END_DEPENDENT: + case ACPI_RDESC_TYPE_END_DEPENDENT: /* * End Dependent Functions Resource */ - status = acpi_rs_end_dependent_functions_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + status = acpi_rs_end_depend_fns_resource (byte_stream_buffer, + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_IO_PORT: + case ACPI_RDESC_TYPE_IO_PORT: /* * IO Port Resource */ status = acpi_rs_io_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_FIXED_IO_PORT: + case ACPI_RDESC_TYPE_FIXED_IO_PORT: /* * Fixed IO Port Resource */ status = acpi_rs_fixed_io_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_SMALL_VENDOR: + case ACPI_RDESC_TYPE_SMALL_VENDOR: /* * Vendor Specific Resource */ status = acpi_rs_vendor_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; - case RESOURCE_DESC_END_TAG: + case ACPI_RDESC_TYPE_END_TAG: /* * End Tag */ end_tag_processed = TRUE; status = acpi_rs_end_tag_resource (byte_stream_buffer, - &bytes_consumed, buffer, &structure_size); + &bytes_consumed, &buffer, &structure_size); break; default: /* - * Invalid/Unknowns resource type + * Invalid/Unknown resource type */ - status = AE_AML_ERROR; + status = AE_AML_INVALID_RESOURCE_TYPE; break; } - - if (!ACPI_SUCCESS(status)) { + if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } @@ -291,15 +311,17 @@ /* * Set the Buffer to the next structure */ - *buffer += structure_size; + resource = ACPI_CAST_PTR (struct acpi_resource, buffer); + resource->length = ACPI_ALIGN_RESOURCE_SIZE(resource->length); + buffer += ACPI_ALIGN_RESOURCE_SIZE(structure_size); } /* end while */ /* * Check the reason for exiting the while loop */ - if (TRUE != end_tag_processed) { - return_ACPI_STATUS (AE_AML_ERROR); + if (!end_tag_processed) { + return_ACPI_STATUS (AE_AML_NO_RESOURCE_END_TAG); } return_ACPI_STATUS (AE_OK); @@ -308,16 +330,16 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_list_to_byte_stream + * FUNCTION: acpi_rs_list_to_byte_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Byte_steam_size_needed - Calculated size of the byte stream + * PARAMETERS: linked_list - Pointer to the resource linked list + * byte_steam_size_needed - Calculated size of the byte stream * needed from calling - * Acpi_rs_calculate_byte_stream_length() - * The size of the Output_buffer is + * acpi_rs_get_byte_stream_length() + * The size of the output_buffer is * guaranteed to be >= - * Byte_stream_size_needed - * Output_buffer - Pointer to the buffer that will + * byte_stream_size_needed + * output_buffer - Pointer to the buffer that will * contain the byte stream * * RETURN: Status @@ -329,17 +351,17 @@ acpi_status acpi_rs_list_to_byte_stream ( - acpi_resource *linked_list, - u32 byte_stream_size_needed, - u8 **output_buffer) + struct acpi_resource *linked_list, + acpi_size byte_stream_size_needed, + u8 *output_buffer) { - acpi_status status; - u8 *buffer = *output_buffer; - u32 bytes_consumed = 0; - u8 done = FALSE; + acpi_status status; + u8 *buffer = output_buffer; + acpi_size bytes_consumed = 0; + u8 done = FALSE; - FUNCTION_TRACE ("Rs_list_to_byte_stream"); + ACPI_FUNCTION_TRACE ("rs_list_to_byte_stream"); while (!done) { @@ -362,7 +384,7 @@ /* * Start Dependent Functions Resource */ - status = acpi_rs_start_dependent_functions_stream (linked_list, + status = acpi_rs_start_depend_fns_stream (linked_list, &buffer, &bytes_consumed); break; @@ -370,7 +392,7 @@ /* * End Dependent Functions Resource */ - status = acpi_rs_end_dependent_functions_stream (linked_list, + status = acpi_rs_end_depend_fns_stream (linked_list, &buffer, &bytes_consumed); break; @@ -467,13 +489,14 @@ * If we get here, everything is out of sync, * so exit with an error */ + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid descriptor type (%X) in resource list\n", + linked_list->id)); status = AE_BAD_DATA; break; - } /* switch (Linked_list->Id) */ - + } /* switch (linked_list->Id) */ - if (!ACPI_SUCCESS(status)) { + if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } @@ -485,7 +508,7 @@ /* * Point to the next object */ - linked_list = POINTER_ADD (acpi_resource, + linked_list = ACPI_PTR_ADD (struct acpi_resource, linked_list, linked_list->length); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rsmemory.c linux.21rc5-ac2/drivers/acpi/resources/rsmemory.c --- linux.21rc5/drivers/acpi/resources/rsmemory.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rsmemory.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,73 +1,90 @@ /******************************************************************************* * * Module Name: rsmem24 - Memory resource descriptors - * $Revision: 14 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acresrc.h" +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rsmemory") + ACPI_MODULE_NAME ("rsmemory") /******************************************************************************* * - * FUNCTION: Acpi_rs_memory24_resource + * FUNCTION: acpi_rs_memory24_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_memory24_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_mem24); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_mem24); - FUNCTION_TRACE ("Rs_memory24_resource"); + ACPI_FUNCTION_TRACE ("rs_memory24_resource"); /* @@ -75,9 +92,9 @@ */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); buffer += 2; - *bytes_consumed = temp16 + 3; + *bytes_consumed = (acpi_size) temp16 + 3; output_struct->id = ACPI_RSTYPE_MEM24; /* @@ -88,36 +105,36 @@ output_struct->data.memory24.read_write_attribute = temp8 & 0x01; /* - * Get Min_base_address (Bytes 4-5) + * Get min_base_address (Bytes 4-5) */ - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); buffer += 2; output_struct->data.memory24.min_base_address = temp16; /* - * Get Max_base_address (Bytes 6-7) + * Get max_base_address (Bytes 6-7) */ - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); buffer += 2; output_struct->data.memory24.max_base_address = temp16; /* * Get Alignment (Bytes 8-9) */ - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); buffer += 2; output_struct->data.memory24.alignment = temp16; /* - * Get Range_length (Bytes 10-11) + * Get range_length (Bytes 10-11) */ - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); output_struct->data.memory24.range_length = temp16; /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -129,13 +146,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_memory24_stream + * FUNCTION: acpi_rs_memory24_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -146,16 +162,16 @@ acpi_status acpi_rs_memory24_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; + u8 *buffer = *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; - FUNCTION_TRACE ("Rs_memory24_stream"); + ACPI_FUNCTION_TRACE ("rs_memory24_stream"); /* @@ -168,7 +184,7 @@ * The length field is static */ temp16 = 0x09; - MOVE_UNALIGNED16_TO_16 (buffer, &temp16); + ACPI_MOVE_16_TO_16 (buffer, &temp16); buffer += 2; /* @@ -181,72 +197,71 @@ /* * Set the Range minimum base address */ - MOVE_UNALIGNED16_TO_16 (buffer, &linked_list->data.memory24.min_base_address); + ACPI_MOVE_32_TO_16 (buffer, &linked_list->data.memory24.min_base_address); buffer += 2; /* * Set the Range maximum base address */ - MOVE_UNALIGNED16_TO_16 (buffer, &linked_list->data.memory24.max_base_address); + ACPI_MOVE_32_TO_16 (buffer, &linked_list->data.memory24.max_base_address); buffer += 2; /* * Set the base alignment */ - MOVE_UNALIGNED16_TO_16 (buffer, &linked_list->data.memory24.alignment); + ACPI_MOVE_32_TO_16 (buffer, &linked_list->data.memory24.alignment); buffer += 2; /* * Set the range length */ - MOVE_UNALIGNED16_TO_16 (buffer, &linked_list->data.memory24.range_length); + ACPI_MOVE_32_TO_16 (buffer, &linked_list->data.memory24.range_length); buffer += 2; /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_memory32_range_resource + * FUNCTION: acpi_rs_memory32_range_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_memory32_range_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_mem32); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_mem32); - FUNCTION_TRACE ("Rs_memory32_range_resource"); + ACPI_FUNCTION_TRACE ("rs_memory32_range_resource"); /* @@ -254,19 +269,19 @@ */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); buffer += 2; - *bytes_consumed = temp16 + 3; + *bytes_consumed = (acpi_size) temp16 + 3; output_struct->id = ACPI_RSTYPE_MEM32; /* * Point to the place in the output buffer where the data portion will * begin. - * 1. Set the RESOURCE_DATA * Data to point to it's own address, then + * 1. Set the RESOURCE_DATA * Data to point to its own address, then * 2. Set the pointer to the next address. * - * NOTE: Output_struct->Data is cast to u8, otherwise, this addition adds + * NOTE: output_struct->Data is cast to u8, otherwise, this addition adds * 4 * sizeof(RESOURCE_DATA) instead of 4 * sizeof(u8) */ @@ -279,34 +294,32 @@ output_struct->data.memory32.read_write_attribute = temp8 & 0x01; /* - * Get Min_base_address (Bytes 4-7) + * Get min_base_address (Bytes 4-7) */ - MOVE_UNALIGNED32_TO_32 (&output_struct->data.memory32.min_base_address, - buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.memory32.min_base_address, buffer); buffer += 4; /* - * Get Max_base_address (Bytes 8-11) + * Get max_base_address (Bytes 8-11) */ - MOVE_UNALIGNED32_TO_32 (&output_struct->data.memory32.max_base_address, - buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.memory32.max_base_address, buffer); buffer += 4; /* * Get Alignment (Bytes 12-15) */ - MOVE_UNALIGNED32_TO_32 (&output_struct->data.memory32.alignment, buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.memory32.alignment, buffer); buffer += 4; /* - * Get Range_length (Bytes 16-19) + * Get range_length (Bytes 16-19) */ - MOVE_UNALIGNED32_TO_32 (&output_struct->data.memory32.range_length, buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.memory32.range_length, buffer); /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -318,51 +331,50 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_fixed_memory32_resource + * FUNCTION: acpi_rs_fixed_memory32_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_fixed_memory32_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_fixed_mem32); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_fixed_mem32); - FUNCTION_TRACE ("Rs_fixed_memory32_resource"); + ACPI_FUNCTION_TRACE ("rs_fixed_memory32_resource"); /* * Point past the Descriptor to get the number of bytes consumed */ buffer += 1; - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); buffer += 2; - *bytes_consumed = temp16 + 3; + *bytes_consumed = (acpi_size) temp16 + 3; output_struct->id = ACPI_RSTYPE_FIXED_MEM32; @@ -374,22 +386,20 @@ output_struct->data.fixed_memory32.read_write_attribute = temp8 & 0x01; /* - * Get Range_base_address (Bytes 4-7) + * Get range_base_address (Bytes 4-7) */ - MOVE_UNALIGNED32_TO_32 (&output_struct->data.fixed_memory32.range_base_address, - buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.fixed_memory32.range_base_address, buffer); buffer += 4; /* - * Get Range_length (Bytes 8-11) + * Get range_length (Bytes 8-11) */ - MOVE_UNALIGNED32_TO_32 (&output_struct->data.fixed_memory32.range_length, - buffer); + ACPI_MOVE_32_TO_32 (&output_struct->data.fixed_memory32.range_length, buffer); /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -401,13 +411,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_memory32_range_stream + * FUNCTION: acpi_rs_memory32_range_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -418,16 +427,16 @@ acpi_status acpi_rs_memory32_range_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; + u8 *buffer = *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; - FUNCTION_TRACE ("Rs_memory32_range_stream"); + ACPI_FUNCTION_TRACE ("rs_memory32_range_stream"); /* @@ -441,7 +450,7 @@ */ temp16 = 0x11; - MOVE_UNALIGNED16_TO_16 (buffer, &temp16); + ACPI_MOVE_16_TO_16 (buffer, &temp16); buffer += 2; /* @@ -454,44 +463,43 @@ /* * Set the Range minimum base address */ - MOVE_UNALIGNED32_TO_32 (buffer, &linked_list->data.memory32.min_base_address); + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.memory32.min_base_address); buffer += 4; /* * Set the Range maximum base address */ - MOVE_UNALIGNED32_TO_32 (buffer, &linked_list->data.memory32.max_base_address); + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.memory32.max_base_address); buffer += 4; /* * Set the base alignment */ - MOVE_UNALIGNED32_TO_32 (buffer, &linked_list->data.memory32.alignment); + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.memory32.alignment); buffer += 4; /* * Set the range length */ - MOVE_UNALIGNED32_TO_32 (buffer, &linked_list->data.memory32.range_length); + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.memory32.range_length); buffer += 4; /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_fixed_memory32_stream + * FUNCTION: acpi_rs_fixed_memory32_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -502,16 +510,16 @@ acpi_status acpi_rs_fixed_memory32_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; + u8 *buffer = *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; - FUNCTION_TRACE ("Rs_fixed_memory32_stream"); + ACPI_FUNCTION_TRACE ("rs_fixed_memory32_stream"); /* @@ -525,7 +533,7 @@ */ temp16 = 0x09; - MOVE_UNALIGNED16_TO_16 (buffer, &temp16); + ACPI_MOVE_16_TO_16 (buffer, &temp16); buffer += 2; /* @@ -538,21 +546,21 @@ /* * Set the Range base address */ - MOVE_UNALIGNED32_TO_32 (buffer, + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.fixed_memory32.range_base_address); buffer += 4; /* * Set the range length */ - MOVE_UNALIGNED32_TO_32 (buffer, + ACPI_MOVE_32_TO_32 (buffer, &linked_list->data.fixed_memory32.range_length); buffer += 4; /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rsmisc.c linux.21rc5-ac2/drivers/acpi/resources/rsmisc.c --- linux.21rc5/drivers/acpi/resources/rsmisc.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rsmisc.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,70 +1,87 @@ /******************************************************************************* * * Module Name: rsmisc - Miscellaneous resource descriptors - * $Revision: 16 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acresrc.h" +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rsmisc") + ACPI_MODULE_NAME ("rsmisc") /******************************************************************************* * - * FUNCTION: Acpi_rs_end_tag_resource + * FUNCTION: acpi_rs_end_tag_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_end_tag_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u32 struct_size = ACPI_RESOURCE_LENGTH; + struct acpi_resource *output_struct = (void *) *output_buffer; + acpi_size struct_size = ACPI_RESOURCE_LENGTH; - FUNCTION_TRACE ("Rs_end_tag_resource"); + ACPI_FUNCTION_TRACE ("rs_end_tag_resource"); /* @@ -92,13 +109,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_end_tag_stream + * FUNCTION: acpi_rs_end_tag_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -109,15 +125,15 @@ acpi_status acpi_rs_end_tag_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u8 temp8 = 0; + u8 *buffer = *output_buffer; + u8 temp8 = 0; - FUNCTION_TRACE ("Rs_end_tag_stream"); + ACPI_FUNCTION_TRACE ("rs_end_tag_stream"); /* @@ -138,49 +154,48 @@ /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_vendor_resource + * FUNCTION: acpi_rs_vendor_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status acpi_rs_vendor_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u8 index; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_vendor); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + u8 index; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_vendor); - FUNCTION_TRACE ("Rs_vendor_resource"); + ACPI_FUNCTION_TRACE ("rs_vendor_resource"); /* @@ -190,34 +205,31 @@ if (temp8 & 0x80) { /* - * Large Item - * Point to the length field + * Large Item, point to the length field */ buffer += 1; /* Dereference */ - MOVE_UNALIGNED16_TO_16 (&temp16, buffer); + ACPI_MOVE_16_TO_16 (&temp16, buffer); /* Calculate bytes consumed */ - *bytes_consumed = temp16 + 3; + *bytes_consumed = (acpi_size) temp16 + 3; /* Point to the first vendor byte */ buffer += 2; } - else { /* - * Small Item - * Dereference the size + * Small Item, dereference the size */ temp16 = (u8)(*buffer & 0x07); /* Calculate bytes consumed */ - *bytes_consumed = temp16 + 1; + *bytes_consumed = (acpi_size) temp16 + 1; /* Point to the first vendor byte */ @@ -233,16 +245,16 @@ } /* - * In order for the Struct_size to fall on a 32-bit boundary, + * In order for the struct_size to fall on a 32-bit boundary, * calculate the length of the vendor string and expand the - * Struct_size to the next 32-bit boundary. + * struct_size to the next 32-bit boundary. */ - struct_size += ROUND_UP_TO_32_bITS (temp16); + struct_size += ACPI_ROUND_UP_to_32_bITS (temp16); /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -254,13 +266,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_vendor_stream + * FUNCTION: acpi_rs_vendor_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -271,17 +282,17 @@ acpi_status acpi_rs_vendor_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u16 temp16 = 0; - u8 temp8 = 0; - u8 index; + u8 *buffer = *output_buffer; + u16 temp16 = 0; + u8 temp8 = 0; + u8 index; - FUNCTION_TRACE ("Rs_vendor_stream"); + ACPI_FUNCTION_TRACE ("rs_vendor_stream"); /* @@ -289,25 +300,22 @@ */ if(linked_list->data.vendor_specific.length > 7) { /* - * Large Item - * Set the descriptor field and length bytes + * Large Item, Set the descriptor field and length bytes */ *buffer = 0x84; buffer += 1; temp16 = (u16) linked_list->data.vendor_specific.length; - MOVE_UNALIGNED16_TO_16 (buffer, &temp16); + ACPI_MOVE_16_TO_16 (buffer, &temp16); buffer += 2; } - else { /* - * Small Item - * Set the descriptor field + * Small Item, Set the descriptor field */ temp8 = 0x70; - temp8 |= linked_list->data.vendor_specific.length; + temp8 |= (u8) linked_list->data.vendor_specific.length; *buffer = temp8; buffer += 1; @@ -326,47 +334,46 @@ /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_start_dependent_functions_resource + * FUNCTION: acpi_rs_start_depend_fns_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status -acpi_rs_start_dependent_functions_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) +acpi_rs_start_depend_fns_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - u8 *buffer = byte_stream_buffer; - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u8 temp8 = 0; - u32 struct_size = SIZEOF_RESOURCE (acpi_resource_start_dpf); + u8 *buffer = byte_stream_buffer; + struct acpi_resource *output_struct = (void *) *output_buffer; + u8 temp8 = 0; + acpi_size struct_size = ACPI_SIZEOF_RESOURCE (struct acpi_resource_start_dpf); - FUNCTION_TRACE ("Rs_start_dependent_functions_resource"); + ACPI_FUNCTION_TRACE ("rs_start_depend_fns_resource"); /* @@ -391,7 +398,7 @@ output_struct->data.start_dpf.compatibility_priority = temp8 & 0x03; if (3 == output_struct->data.start_dpf.compatibility_priority) { - return_ACPI_STATUS (AE_AML_ERROR); + return_ACPI_STATUS (AE_AML_BAD_RESOURCE_VALUE); } /* @@ -400,22 +407,21 @@ output_struct->data.start_dpf.performance_robustness = (temp8 >> 2) & 0x03; if (3 == output_struct->data.start_dpf.performance_robustness) { - return_ACPI_STATUS (AE_AML_ERROR); + return_ACPI_STATUS (AE_AML_BAD_RESOURCE_VALUE); } } - else { output_struct->data.start_dpf.compatibility_priority = - ACCEPTABLE_CONFIGURATION; + ACPI_ACCEPTABLE_CONFIGURATION; output_struct->data.start_dpf.performance_robustness = - ACCEPTABLE_CONFIGURATION; + ACPI_ACCEPTABLE_CONFIGURATION; } /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -427,38 +433,37 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_end_dependent_functions_resource + * FUNCTION: acpi_rs_end_depend_fns_resource * - * PARAMETERS: Byte_stream_buffer - Pointer to the resource input byte + * PARAMETERS: byte_stream_buffer - Pointer to the resource input byte * stream - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes consumed from - * the Byte_stream_buffer - * Output_buffer - Pointer to the user's return buffer - * Structure_size - u32 pointer that is filled with - * the number of bytes in the filled - * in structure + * bytes_consumed - Pointer to where the number of bytes + * consumed the byte_stream_buffer is + * returned + * output_buffer - Pointer to the return data buffer + * structure_size - Pointer to where the number of bytes + * in the return data struct is returned * * RETURN: Status * * DESCRIPTION: Take the resource byte stream and fill out the appropriate - * structure pointed to by the Output_buffer. Return the + * structure pointed to by the output_buffer. Return the * number of bytes consumed from the byte stream. * ******************************************************************************/ acpi_status -acpi_rs_end_dependent_functions_resource ( - u8 *byte_stream_buffer, - u32 *bytes_consumed, - u8 **output_buffer, - u32 *structure_size) +acpi_rs_end_depend_fns_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size) { - acpi_resource *output_struct = (acpi_resource *) *output_buffer; - u32 struct_size = ACPI_RESOURCE_LENGTH; + struct acpi_resource *output_struct = (void *) *output_buffer; + acpi_size struct_size = ACPI_RESOURCE_LENGTH; - FUNCTION_TRACE ("Rs_end_dependent_functions_resource"); + ACPI_FUNCTION_TRACE ("rs_end_depend_fns_resource"); /* @@ -474,7 +479,7 @@ /* * Set the Length parameter */ - output_struct->length = struct_size; + output_struct->length = (u32) struct_size; /* * Return the final size of the structure @@ -486,13 +491,13 @@ /******************************************************************************* * - * FUNCTION: Acpi_rs_start_dependent_functions_stream + * FUNCTION: acpi_rs_start_depend_fns_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - u32 pointer that is filled with * the number of bytes of the - * Output_buffer used + * output_buffer used * * RETURN: Status * @@ -502,25 +507,25 @@ ******************************************************************************/ acpi_status -acpi_rs_start_dependent_functions_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed) +acpi_rs_start_depend_fns_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; - u8 temp8 = 0; + u8 *buffer = *output_buffer; + u8 temp8 = 0; - FUNCTION_TRACE ("Rs_start_dependent_functions_stream"); + ACPI_FUNCTION_TRACE ("rs_start_depend_fns_stream"); /* * The descriptor field is set based upon whether a byte is needed * to contain Priority data. */ - if (ACCEPTABLE_CONFIGURATION == + if (ACPI_ACCEPTABLE_CONFIGURATION == linked_list->data.start_dpf.compatibility_priority && - ACCEPTABLE_CONFIGURATION == + ACPI_ACCEPTABLE_CONFIGURATION == linked_list->data.start_dpf.performance_robustness) { *buffer = 0x30; } @@ -544,20 +549,19 @@ /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_rs_end_dependent_functions_stream + * FUNCTION: acpi_rs_end_depend_fns_stream * - * PARAMETERS: Linked_list - Pointer to the resource linked list - * Output_buffer - Pointer to the user's return buffer - * Bytes_consumed - u32 pointer that is filled with - * the number of bytes of the - * Output_buffer used + * PARAMETERS: linked_list - Pointer to the resource linked list + * output_buffer - Pointer to the user's return buffer + * bytes_consumed - Pointer to where the number of bytes + * used in the output_buffer is returned * * RETURN: Status * @@ -567,16 +571,15 @@ ******************************************************************************/ acpi_status -acpi_rs_end_dependent_functions_stream ( - acpi_resource *linked_list, - u8 **output_buffer, - u32 *bytes_consumed - ) +acpi_rs_end_depend_fns_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed) { - u8 *buffer = *output_buffer; + u8 *buffer = *output_buffer; - FUNCTION_TRACE ("Rs_end_dependent_functions_stream"); + ACPI_FUNCTION_TRACE ("rs_end_depend_fns_stream"); /* @@ -588,7 +591,7 @@ /* * Return the number of bytes consumed in this operation */ - *bytes_consumed = POINTER_DIFF (buffer, *output_buffer); + *bytes_consumed = ACPI_PTR_DIFF (buffer, *output_buffer); return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rsutils.c linux.21rc5-ac2/drivers/acpi/resources/rsutils.c --- linux.21rc5/drivers/acpi/resources/rsutils.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rsutils.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,44 +1,62 @@ /******************************************************************************* * * Module Name: rsutils - Utilities for the resource manager - * $Revision: 23 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acnamesp.h" -#include "acresrc.h" +#include +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rsutils") + ACPI_MODULE_NAME ("rsutils") /******************************************************************************* * - * FUNCTION: Acpi_rs_get_prt_method_data + * FUNCTION: acpi_rs_get_prt_method_data * * PARAMETERS: Handle - a handle to the containing object - * Ret_buffer - a pointer to a buffer structure for the + * ret_buffer - a pointer to a buffer structure for the * results * * RETURN: Status @@ -53,78 +71,45 @@ acpi_status acpi_rs_get_prt_method_data ( - acpi_handle handle, - acpi_buffer *ret_buffer) + acpi_handle handle, + struct acpi_buffer *ret_buffer) { - acpi_operand_object *ret_obj; - acpi_status status; - u32 buffer_space_needed; + union acpi_operand_object *obj_desc; + acpi_status status; - FUNCTION_TRACE ("Rs_get_prt_method_data"); + ACPI_FUNCTION_TRACE ("rs_get_prt_method_data"); - /* already validated params, so we won't repeat here */ - - buffer_space_needed = ret_buffer->length; + /* Parameters guaranteed valid by caller */ /* - * Execute the method, no parameters + * Execute the method, no parameters */ - status = acpi_ns_evaluate_relative (handle, "_PRT", NULL, &ret_obj); + status = acpi_ut_evaluate_object (handle, "_PRT", ACPI_BTYPE_PACKAGE, &obj_desc); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - if (!ret_obj) { - /* Return object is required */ - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No object was returned from _PRT\n")); - return_ACPI_STATUS (AE_TYPE); - } - - - /* - * The return object will be a package, so check the - * parameters. If the return object is not a package, - * then the underlying AML code is corrupt or improperly - * written. - */ - if (ACPI_TYPE_PACKAGE != ret_obj->common.type) { - status = AE_AML_OPERAND_TYPE; - goto cleanup; - } - - /* - * Make the call to create a resource linked list from the - * byte stream buffer that comes back from the _CRS method - * execution. - */ - status = acpi_rs_create_pci_routing_table (ret_obj, ret_buffer->pointer, - &buffer_space_needed); - /* - * Tell the user how much of the buffer we have used or is needed - * and return the final status. + * Create a resource linked list from the byte stream buffer that comes + * back from the _CRS method execution. */ - ret_buffer->length = buffer_space_needed; - + status = acpi_rs_create_pci_routing_table (obj_desc, ret_buffer); /* On exit, we must delete the object returned by evaluate_object */ -cleanup: - - acpi_ut_remove_reference (ret_obj); + acpi_ut_remove_reference (obj_desc); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_rs_get_crs_method_data + * FUNCTION: acpi_rs_get_crs_method_data * * PARAMETERS: Handle - a handle to the containing object - * Ret_buffer - a pointer to a buffer structure for the + * ret_buffer - a pointer to a buffer structure for the * results * * RETURN: Status @@ -139,76 +124,46 @@ acpi_status acpi_rs_get_crs_method_data ( - acpi_handle handle, - acpi_buffer *ret_buffer) + acpi_handle handle, + struct acpi_buffer *ret_buffer) { - acpi_operand_object *ret_obj; - acpi_status status; - u32 buffer_space_needed = ret_buffer->length; + union acpi_operand_object *obj_desc; + acpi_status status; - FUNCTION_TRACE ("Rs_get_crs_method_data"); + ACPI_FUNCTION_TRACE ("rs_get_crs_method_data"); - /* already validated params, so we won't repeat here */ + /* Parameters guaranteed valid by caller */ /* - * Execute the method, no parameters + * Execute the method, no parameters */ - status = acpi_ns_evaluate_relative (handle, "_CRS", NULL, &ret_obj); + status = acpi_ut_evaluate_object (handle, "_CRS", ACPI_BTYPE_BUFFER, &obj_desc); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - if (!ret_obj) { - /* Return object is required */ - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No object was returned from _CRS\n")); - return_ACPI_STATUS (AE_TYPE); - } - - /* - * The return object will be a buffer, but check the - * parameters. If the return object is not a buffer, - * then the underlying AML code is corrupt or improperly - * written. - */ - if (ACPI_TYPE_BUFFER != ret_obj->common.type) { - status = AE_AML_OPERAND_TYPE; - goto cleanup; - } - /* * Make the call to create a resource linked list from the - * byte stream buffer that comes back from the _CRS method - * execution. + * byte stream buffer that comes back from the _CRS method + * execution. */ - status = acpi_rs_create_resource_list (ret_obj, ret_buffer->pointer, - &buffer_space_needed); + status = acpi_rs_create_resource_list (obj_desc, ret_buffer); + /* on exit, we must delete the object returned by evaluate_object */ - /* - * Tell the user how much of the buffer we have used or is needed - * and return the final status. - */ - ret_buffer->length = buffer_space_needed; - - - /* On exit, we must delete the object returned by evaluate_object */ - -cleanup: - - acpi_ut_remove_reference (ret_obj); + acpi_ut_remove_reference (obj_desc); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_rs_get_prs_method_data + * FUNCTION: acpi_rs_get_prs_method_data * * PARAMETERS: Handle - a handle to the containing object - * Ret_buffer - a pointer to a buffer structure for the + * ret_buffer - a pointer to a buffer structure for the * results * * RETURN: Status @@ -223,75 +178,100 @@ acpi_status acpi_rs_get_prs_method_data ( - acpi_handle handle, - acpi_buffer *ret_buffer) + acpi_handle handle, + struct acpi_buffer *ret_buffer) { - acpi_operand_object *ret_obj; - acpi_status status; - u32 buffer_space_needed = ret_buffer->length; + union acpi_operand_object *obj_desc; + acpi_status status; - FUNCTION_TRACE ("Rs_get_prs_method_data"); + ACPI_FUNCTION_TRACE ("rs_get_prs_method_data"); - /* already validated params, so we won't repeat here */ + /* Parameters guaranteed valid by caller */ /* - * Execute the method, no parameters + * Execute the method, no parameters */ - status = acpi_ns_evaluate_relative (handle, "_PRS", NULL, &ret_obj); + status = acpi_ut_evaluate_object (handle, "_PRS", ACPI_BTYPE_BUFFER, &obj_desc); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - if (!ret_obj) { - /* Return object is required */ + /* + * Make the call to create a resource linked list from the + * byte stream buffer that comes back from the _CRS method + * execution. + */ + status = acpi_rs_create_resource_list (obj_desc, ret_buffer); - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No object was returned from _PRS\n")); - return_ACPI_STATUS (AE_TYPE); - } + /* on exit, we must delete the object returned by evaluate_object */ + + acpi_ut_remove_reference (obj_desc); + return_ACPI_STATUS (status); +} - /* - * The return object will be a buffer, but check the - * parameters. If the return object is not a buffer, - * then the underlying AML code is corrupt or improperly - * written.. - */ - if (ACPI_TYPE_BUFFER != ret_obj->common.type) { - status = AE_AML_OPERAND_TYPE; - goto cleanup; - } + +/******************************************************************************* + * + * FUNCTION: acpi_rs_get_method_data + * + * PARAMETERS: Handle - a handle to the containing object + * ret_buffer - a pointer to a buffer structure for the + * results + * + * RETURN: Status + * + * DESCRIPTION: This function is called to get the _CRS or _PRS value of an + * object contained in an object specified by the handle passed in + * + * If the function fails an appropriate status will be returned + * and the contents of the callers buffer is undefined. + * + ******************************************************************************/ + +acpi_status +acpi_rs_get_method_data ( + acpi_handle handle, + char *path, + struct acpi_buffer *ret_buffer) +{ + union acpi_operand_object *obj_desc; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("rs_get_method_data"); + + + /* Parameters guaranteed valid by caller */ /* - * Make the call to create a resource linked list from the - * byte stream buffer that comes back from the _CRS method - * execution. + * Execute the method, no parameters */ - status = acpi_rs_create_resource_list (ret_obj, ret_buffer->pointer, - &buffer_space_needed); + status = acpi_ut_evaluate_object (handle, path, ACPI_BTYPE_BUFFER, &obj_desc); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* - * Tell the user how much of the buffer we have used or is needed - * and return the final status. + * Make the call to create a resource linked list from the + * byte stream buffer that comes back from the method + * execution. */ - ret_buffer->length = buffer_space_needed; - + status = acpi_rs_create_resource_list (obj_desc, ret_buffer); /* On exit, we must delete the object returned by evaluate_object */ -cleanup: - - acpi_ut_remove_reference (ret_obj); + acpi_ut_remove_reference (obj_desc); return_ACPI_STATUS (status); } - /******************************************************************************* * - * FUNCTION: Acpi_rs_set_srs_method_data + * FUNCTION: acpi_rs_set_srs_method_data * * PARAMETERS: Handle - a handle to the containing object - * In_buffer - a pointer to a buffer structure of the + * in_buffer - a pointer to a buffer structure of the * parameter * * RETURN: Status @@ -306,55 +286,30 @@ acpi_status acpi_rs_set_srs_method_data ( - acpi_handle handle, - acpi_buffer *in_buffer) + acpi_handle handle, + struct acpi_buffer *in_buffer) { - acpi_operand_object *params[2]; - acpi_status status; - u8 *byte_stream = NULL; - u32 buffer_size_needed = 0; + union acpi_operand_object *params[2]; + acpi_status status; + struct acpi_buffer buffer; - FUNCTION_TRACE ("Rs_set_srs_method_data"); + ACPI_FUNCTION_TRACE ("rs_set_srs_method_data"); - /* already validated params, so we won't repeat here */ + /* Parameters guaranteed valid by caller */ /* - * The In_buffer parameter will point to a linked list of + * The in_buffer parameter will point to a linked list of * resource parameters. It needs to be formatted into a - * byte stream to be sent in as an input parameter. - */ - buffer_size_needed = 0; - - /* - * First call is to get the buffer size needed - */ - status = acpi_rs_create_byte_stream (in_buffer->pointer, byte_stream, - &buffer_size_needed); - /* - * We expect a return of AE_BUFFER_OVERFLOW - * if not, exit with the error - */ - if (AE_BUFFER_OVERFLOW != status) { - return_ACPI_STATUS (status); - } - - /* - * Allocate the buffer needed + * byte stream to be sent in as an input parameter to _SRS + * + * Convert the linked list into a byte stream */ - byte_stream = ACPI_MEM_CALLOCATE (buffer_size_needed); - if (NULL == byte_stream) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - /* - * Now call to convert the linked list into a byte stream - */ - status = acpi_rs_create_byte_stream (in_buffer->pointer, byte_stream, - &buffer_size_needed); + buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; + status = acpi_rs_create_byte_stream (in_buffer->pointer, &buffer); if (ACPI_FAILURE (status)) { - goto cleanup; + return_ACPI_STATUS (status); } /* @@ -362,28 +317,27 @@ */ params[0] = acpi_ut_create_internal_object (ACPI_TYPE_BUFFER); if (!params[0]) { - status = AE_NO_MEMORY; - goto cleanup; + acpi_os_free (buffer.pointer); + return_ACPI_STATUS (AE_NO_MEMORY); } - params [1] = NULL; /* - * Set up the parameter object + * Set up the parameter object */ - params[0]->buffer.length = buffer_size_needed; - params[0]->buffer.pointer = byte_stream; + params[0]->buffer.length = (u32) buffer.length; + params[0]->buffer.pointer = buffer.pointer; + params[0]->common.flags = AOPOBJ_DATA_VALID; + params[1] = NULL; /* * Execute the method, no return value */ status = acpi_ns_evaluate_relative (handle, "_SRS", params, NULL); - acpi_ut_remove_reference (params[0]); /* - * Clean up and return the status from Acpi_ns_evaluate_relative + * Clean up and return the status from acpi_ns_evaluate_relative */ -cleanup: - + acpi_ut_remove_reference (params[0]); return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/resources/rsxface.c linux.21rc5-ac2/drivers/acpi/resources/rsxface.c --- linux.21rc5/drivers/acpi/resources/rsxface.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/resources/rsxface.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,44 +1,60 @@ /******************************************************************************* * * Module Name: rsxface - Public interfaces to the resource manager - * $Revision: 15 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "acresrc.h" +#include +#include #define _COMPONENT ACPI_RESOURCES - MODULE_NAME ("rsxface") + ACPI_MODULE_NAME ("rsxface") /******************************************************************************* * - * FUNCTION: Acpi_get_irq_routing_table + * FUNCTION: acpi_get_irq_routing_table * - * PARAMETERS: Device_handle - a handle to the Bus device we are querying - * Ret_buffer - a pointer to a buffer to receive the + * PARAMETERS: device_handle - a handle to the Bus device we are querying + * ret_buffer - a pointer to a buffer to receive the * current resources for the device * * RETURN: Status @@ -46,25 +62,25 @@ * DESCRIPTION: This function is called to get the IRQ routing table for a * specific bus. The caller must first acquire a handle for the * desired bus. The routine table is placed in the buffer pointed - * to by the Ret_buffer variable parameter. + * to by the ret_buffer variable parameter. * * If the function fails an appropriate status will be returned - * and the value of Ret_buffer is undefined. + * and the value of ret_buffer is undefined. * * This function attempts to execute the _PRT method contained in - * the object indicated by the passed Device_handle. + * the object indicated by the passed device_handle. * ******************************************************************************/ acpi_status acpi_get_irq_routing_table ( - acpi_handle device_handle, - acpi_buffer *ret_buffer) + acpi_handle device_handle, + struct acpi_buffer *ret_buffer) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Acpi_get_irq_routing_table "); + ACPI_FUNCTION_TRACE ("acpi_get_irq_routing_table "); /* @@ -73,12 +89,15 @@ * we also need a valid pointer in the buffer. If it's a zero buffer length, * we'll be returning the needed buffer size, so keep going. */ - if ((!device_handle) || - (!ret_buffer) || - ((!ret_buffer->pointer) && (ret_buffer->length))) { + if (!device_handle) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + status = acpi_ut_validate_buffer (ret_buffer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + status = acpi_rs_get_prt_method_data (device_handle, ret_buffer); return_ACPI_STATUS (status); } @@ -86,11 +105,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_get_current_resources + * FUNCTION: acpi_get_current_resources * - * PARAMETERS: Device_handle - a handle to the device object for the + * PARAMETERS: device_handle - a handle to the device object for the * device we are querying - * Ret_buffer - a pointer to a buffer to receive the + * ret_buffer - a pointer to a buffer to receive the * current resources for the device * * RETURN: Status @@ -98,25 +117,25 @@ * DESCRIPTION: This function is called to get the current resources for a * specific device. The caller must first acquire a handle for * the desired device. The resource data is placed in the buffer - * pointed to by the Ret_buffer variable parameter. + * pointed to by the ret_buffer variable parameter. * * If the function fails an appropriate status will be returned - * and the value of Ret_buffer is undefined. + * and the value of ret_buffer is undefined. * * This function attempts to execute the _CRS method contained in - * the object indicated by the passed Device_handle. + * the object indicated by the passed device_handle. * ******************************************************************************/ acpi_status acpi_get_current_resources ( - acpi_handle device_handle, - acpi_buffer *ret_buffer) + acpi_handle device_handle, + struct acpi_buffer *ret_buffer) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Acpi_get_current_resources"); + ACPI_FUNCTION_TRACE ("acpi_get_current_resources"); /* @@ -125,12 +144,15 @@ * we also need a valid pointer in the buffer. If it's a zero buffer length, * we'll be returning the needed buffer size, so keep going. */ - if ((!device_handle) || - (!ret_buffer) || - ((ret_buffer->length) && (!ret_buffer->pointer))) { + if (!device_handle) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + status = acpi_ut_validate_buffer (ret_buffer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + status = acpi_rs_get_crs_method_data (device_handle, ret_buffer); return_ACPI_STATUS (status); } @@ -138,11 +160,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_get_possible_resources + * FUNCTION: acpi_get_possible_resources * - * PARAMETERS: Device_handle - a handle to the device object for the + * PARAMETERS: device_handle - a handle to the device object for the * device we are querying - * Ret_buffer - a pointer to a buffer to receive the + * ret_buffer - a pointer to a buffer to receive the * resources for the device * * RETURN: Status @@ -150,22 +172,22 @@ * DESCRIPTION: This function is called to get a list of the possible resources * for a specific device. The caller must first acquire a handle * for the desired device. The resource data is placed in the - * buffer pointed to by the Ret_buffer variable. + * buffer pointed to by the ret_buffer variable. * * If the function fails an appropriate status will be returned - * and the value of Ret_buffer is undefined. + * and the value of ret_buffer is undefined. * ******************************************************************************/ acpi_status acpi_get_possible_resources ( - acpi_handle device_handle, - acpi_buffer *ret_buffer) + acpi_handle device_handle, + struct acpi_buffer *ret_buffer) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Acpi_get_possible_resources"); + ACPI_FUNCTION_TRACE ("acpi_get_possible_resources"); /* @@ -174,12 +196,15 @@ * we also need a valid pointer in the buffer. If it's a zero buffer length, * we'll be returning the needed buffer size, so keep going. */ - if ((!device_handle) || - (!ret_buffer) || - ((ret_buffer->length) && (!ret_buffer->pointer))) { + if (!device_handle) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + status = acpi_ut_validate_buffer (ret_buffer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + status = acpi_rs_get_prs_method_data (device_handle, ret_buffer); return_ACPI_STATUS (status); } @@ -187,11 +212,96 @@ /******************************************************************************* * - * FUNCTION: Acpi_set_current_resources + * FUNCTION: acpi_walk_resources + * + * PARAMETERS: device_handle - a handle to the device object for the + * device we are querying + * Path - method name of the resources we want + * (METHOD_NAME__CRS or METHOD_NAME__PRS) + * user_function - called for each resource + * Context - passed to user_function + * + * RETURN: Status + * + * DESCRIPTION: Retrieves the current or possible resource list for the + * specified device. The user_function is called once for + * each resource in the list. * - * PARAMETERS: Device_handle - a handle to the device object for the + ******************************************************************************/ + +acpi_status +acpi_walk_resources ( + acpi_handle device_handle, + char *path, + ACPI_WALK_RESOURCE_CALLBACK user_function, + void *context) +{ + acpi_status status; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + struct acpi_resource *resource; + + + ACPI_FUNCTION_TRACE ("acpi_walk_resources"); + + + if (!device_handle || + (ACPI_STRNCMP (path, METHOD_NAME__CRS, sizeof (METHOD_NAME__CRS)) && + ACPI_STRNCMP (path, METHOD_NAME__PRS, sizeof (METHOD_NAME__PRS)))) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_rs_get_method_data (device_handle, path, &buffer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + resource = (struct acpi_resource *) buffer.pointer; + for (;;) { + if (!resource || resource->id == ACPI_RSTYPE_END_TAG) { + break; + } + + status = user_function (resource, context); + + switch (status) { + case AE_OK: + case AE_CTRL_DEPTH: + + /* Just keep going */ + status = AE_OK; + break; + + case AE_CTRL_TERMINATE: + + /* Exit now, with OK stats */ + + status = AE_OK; + goto cleanup; + + default: + + /* All others are valid exceptions */ + + goto cleanup; + } + + resource = ACPI_NEXT_RESOURCE (resource); + } + +cleanup: + + acpi_os_free (buffer.pointer); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_set_current_resources + * + * PARAMETERS: device_handle - a handle to the device object for the * device we are changing the resources of - * In_buffer - a pointer to a buffer containing the + * in_buffer - a pointer to a buffer containing the * resources to be set for the device * * RETURN: Status @@ -199,19 +309,19 @@ * DESCRIPTION: This function is called to set the current resources for a * specific device. The caller must first acquire a handle for * the desired device. The resource data is passed to the routine - * the buffer pointed to by the In_buffer variable. + * the buffer pointed to by the in_buffer variable. * ******************************************************************************/ acpi_status acpi_set_current_resources ( - acpi_handle device_handle, - acpi_buffer *in_buffer) + acpi_handle device_handle, + struct acpi_buffer *in_buffer) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Acpi_set_current_resources"); + ACPI_FUNCTION_TRACE ("acpi_set_current_resources"); /* @@ -227,3 +337,76 @@ status = acpi_rs_set_srs_method_data (device_handle, in_buffer); return_ACPI_STATUS (status); } + + +#define ACPI_COPY_FIELD(out, in, field) ((out)->field = (in)->field) +#define ACPI_COPY_ADDRESS(out, in) \ + ACPI_COPY_FIELD(out, in, resource_type); \ + ACPI_COPY_FIELD(out, in, producer_consumer); \ + ACPI_COPY_FIELD(out, in, decode); \ + ACPI_COPY_FIELD(out, in, min_address_fixed); \ + ACPI_COPY_FIELD(out, in, max_address_fixed); \ + ACPI_COPY_FIELD(out, in, attribute); \ + ACPI_COPY_FIELD(out, in, granularity); \ + ACPI_COPY_FIELD(out, in, min_address_range); \ + ACPI_COPY_FIELD(out, in, max_address_range); \ + ACPI_COPY_FIELD(out, in, address_translation_offset); \ + ACPI_COPY_FIELD(out, in, address_length); \ + ACPI_COPY_FIELD(out, in, resource_source); + +/****************************************************************************** + * + * FUNCTION: acpi_resource_to_address64 + * + * PARAMETERS: resource - Pointer to a resource + * out - Pointer to the users's return + * buffer (a struct + * struct acpi_resource_address64) + * + * RETURN: Status + * + * DESCRIPTION: If the resource is an address16, address32, or address64, + * copy it to the address64 return buffer. This saves the + * caller from having to duplicate code for different-sized + * addresses. + * + ******************************************************************************/ + +acpi_status +acpi_resource_to_address64 ( + struct acpi_resource *resource, + struct acpi_resource_address64 *out) +{ + struct acpi_resource_address16 *address16; + struct acpi_resource_address32 *address32; + + + switch (resource->id) { + case ACPI_RSTYPE_ADDRESS16: + + address16 = (struct acpi_resource_address16 *) &resource->data; + ACPI_COPY_ADDRESS(out, address16); + break; + + + case ACPI_RSTYPE_ADDRESS32: + + address32 = (struct acpi_resource_address32 *) &resource->data; + ACPI_COPY_ADDRESS(out, address32); + break; + + + case ACPI_RSTYPE_ADDRESS64: + + /* Simple copy for 64 bit source */ + + ACPI_MEMCPY (out, &resource->data, sizeof (struct acpi_resource_address64)); + break; + + + default: + return (AE_BAD_PARAMETER); + } + + return (AE_OK); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/system.c linux.21rc5-ac2/drivers/acpi/system.c --- linux.21rc5/drivers/acpi/system.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/system.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,1326 @@ +/* + * acpi_system.c - ACPI System Driver ($Revision: 57 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_X86 +#ifdef CONFIG_ACPI_SLEEP +#include +#include +#include +#endif +#endif + + +#define _COMPONENT ACPI_SYSTEM_COMPONENT +ACPI_MODULE_NAME ("acpi_system") + +#define PREFIX "ACPI: " + +extern FADT_DESCRIPTOR acpi_fadt; + +static int acpi_system_add (struct acpi_device *device); +static int acpi_system_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_system_driver = { + .name = ACPI_SYSTEM_DRIVER_NAME, + .class = ACPI_SYSTEM_CLASS, + .ids = ACPI_SYSTEM_HID, + .ops = { + .add = acpi_system_add, + .remove = acpi_system_remove + }, +}; + +struct acpi_system +{ + acpi_handle handle; + u8 states[ACPI_S_STATE_COUNT]; +}; + +/* Global vars for handling event proc entry */ +static spinlock_t acpi_system_event_lock = SPIN_LOCK_UNLOCKED; +int event_is_open = 0; +extern struct list_head acpi_bus_event_list; +extern wait_queue_head_t acpi_bus_event_queue; + +/* -------------------------------------------------------------------------- + System Sleep + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_PM + +static void +acpi_power_off (void) +{ + acpi_enter_sleep_state_prep(ACPI_STATE_S5); + ACPI_DISABLE_IRQS(); + acpi_enter_sleep_state(ACPI_STATE_S5); +} + +#endif /*CONFIG_PM*/ + + +#ifdef CONFIG_ACPI_SLEEP + +/** + * acpi_system_restore_state - OS-specific restoration of state + * @state: sleep state we're exiting + * + * Note that if we're coming back from S4, the memory image should have + * already been loaded from the disk and is already in place. (Otherwise how + * else would we be here?). + */ +acpi_status +acpi_system_restore_state( + u32 state) +{ + /* + * We should only be here if we're coming back from STR or STD. + * And, in the case of the latter, the memory image should have already + * been loaded from disk. + */ + if (state > ACPI_STATE_S1) { + acpi_restore_state_mem(); + + /* Do _early_ resume for irqs. Required by + * ACPI specs. + */ + /* TBD: call arch dependant reinitialization of the + * interrupts. + */ +#ifdef CONFIG_X86 + init_8259A(0); +#endif + /* wait for power to come back */ + mdelay(1000); + + } + + /* Be really sure that irqs are disabled. */ + ACPI_DISABLE_IRQS(); + + /* Wait a little again, just in case... */ + mdelay(1000); + + /* enable interrupts once again */ + ACPI_ENABLE_IRQS(); + + /* turn all the devices back on */ + if (state > ACPI_STATE_S1) + pm_send_all(PM_RESUME, (void *)0); + + return AE_OK; +} + + +/** + * acpi_system_save_state - save OS specific state and power down devices + * @state: sleep state we're entering. + * + * This handles saving all context to memory, and possibly disk. + * First, we call to the device driver layer to save device state. + * Once we have that, we save whatevery processor and kernel state we + * need to memory. + * If we're entering S4, we then write the memory image to disk. + * + * Only then it is safe for us to power down devices, since we may need + * the disks and upstream buses to write to. + */ +acpi_status +acpi_system_save_state( + u32 state) +{ + int error = 0; + + /* Send notification to devices that they will be suspended. + * If any device or driver cannot make the transition, either up + * or down, we'll get an error back. + */ + if (state > ACPI_STATE_S1) { + error = pm_send_all(PM_SAVE_STATE, (void *)3); + if (error) + return AE_ERROR; + } + + if (state < ACPI_STATE_S5) { + /* Tell devices to stop I/O and actually save their state. + * It is theoretically possible that something could fail, + * so handle that gracefully.. + */ + if (state > ACPI_STATE_S1 && state != ACPI_STATE_S5) { + error = pm_send_all(PM_SUSPEND, (void *)3); + if (error) { + /* Tell devices to restore state if they have + * it saved and to start taking I/O requests. + */ + pm_send_all(PM_RESUME, (void *)0); + return error; + } + } + + /* flush caches */ + ACPI_FLUSH_CPU_CACHE(); + + /* Do arch specific saving of state. */ + if (state > ACPI_STATE_S1) { + error = acpi_save_state_mem(); + + /* TBD: if no s4bios, write codes for + * acpi_save_state_disk()... + */ +#if 0 + if (!error && (state == ACPI_STATE_S4)) + error = acpi_save_state_disk(); +#endif + if (error) { + pm_send_all(PM_RESUME, (void *)0); + return error; + } + } + } + /* disable interrupts + * Note that acpi_suspend -- our caller -- will do this once we return. + * But, we want it done early, so we don't get any suprises during + * the device suspend sequence. + */ + ACPI_DISABLE_IRQS(); + + /* Unconditionally turn off devices. + * Obvious if we enter a sleep state. + * If entering S5 (soft off), this should put devices in a + * quiescent state. + */ + + if (state > ACPI_STATE_S1) { + error = pm_send_all(PM_SUSPEND, (void *)3); + + /* We're pretty screwed if we got an error from this. + * We try to recover by simply calling our own restore_state + * function; see above for definition. + * + * If it's S5 though, go through with it anyway.. + */ + if (error && state != ACPI_STATE_S5) + acpi_system_restore_state(state); + } + return error ? AE_ERROR : AE_OK; +} + + +/**************************************************************************** + * + * FUNCTION: acpi_system_suspend + * + * PARAMETERS: %state: Sleep state to enter. + * + * RETURN: acpi_status, whether or not we successfully entered and + * exited sleep. + * + * DESCRIPTION: Perform OS-specific action to enter sleep state. + * This is the final step in going to sleep, per spec. If we + * know we're coming back (i.e. not entering S5), we save the + * processor flags. [ We'll have to save and restore them anyway, + * so we use the arch-agnostic save_flags and restore_flags + * here.] We then set the place to return to in arch-specific + * globals using arch_set_return_point. Finally, we call the + * ACPI function to write the proper values to I/O ports. + * + ****************************************************************************/ + +acpi_status +acpi_system_suspend( + u32 state) +{ + acpi_status status = AE_ERROR; + unsigned long flags = 0; + + local_irq_save(flags); + /* kernel_fpu_begin(); */ + + switch (state) { + case ACPI_STATE_S1: + barrier(); + status = acpi_enter_sleep_state(state); + break; + case ACPI_STATE_S4: + do_suspend_lowlevel_s4bios(0); + break; + } + + /* kernel_fpu_end(); */ + local_irq_restore(flags); + + return status; +} + + + +/** + * acpi_suspend - OS-agnostic system suspend/resume support (S? states) + * @state: state we're entering + * + */ +acpi_status +acpi_suspend ( + u32 state) +{ + acpi_status status; + + /* only support S1 and S5 on kernel 2.4 */ + if (state != ACPI_STATE_S1 && state != ACPI_STATE_S4 + && state != ACPI_STATE_S5) + return AE_ERROR; + + + if (ACPI_STATE_S4 == state) { + /* For s4bios, we need a wakeup address. */ + if (1 == acpi_gbl_FACS->S4bios_f && + 0 != acpi_gbl_FADT->smi_cmd) { + if (!acpi_wakeup_address) + return AE_ERROR; + acpi_set_firmware_waking_vector((acpi_physical_address) acpi_wakeup_address); + } else + /* We don't support S4 under 2.4. Give up */ + return AE_ERROR; + } + + status = acpi_system_save_state(state); + if (!ACPI_SUCCESS(status) && state != ACPI_STATE_S5) + return status; + + acpi_enter_sleep_state_prep(state); + + /* disable interrupts and flush caches */ + ACPI_DISABLE_IRQS(); + ACPI_FLUSH_CPU_CACHE(); + + /* perform OS-specific sleep actions */ + status = acpi_system_suspend(state); + + /* Even if we failed to go to sleep, all of the devices are in an suspended + * mode. So, we run these unconditionaly to make sure we have a usable system + * no matter what. + */ + acpi_leave_sleep_state(state); + acpi_system_restore_state(state); + + /* make sure interrupts are enabled */ + ACPI_ENABLE_IRQS(); + + /* reset firmware waking vector */ + acpi_set_firmware_waking_vector((acpi_physical_address) 0); + + return status; +} + +#endif /* CONFIG_ACPI_SLEEP */ + + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +static int +acpi_system_read_info ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_system *system = (struct acpi_system *) data; + char *p = page; + int size = 0; + u32 i = 0; + + ACPI_FUNCTION_TRACE("acpi_system_read_info"); + + if (!system || (off != 0)) + goto end; + + p += sprintf(p, "version: %x\n", ACPI_CA_VERSION); + + p += sprintf(p, "states: "); + for (i=0; istates[i]) { + p += sprintf(p, "S%d ", i); + if (i == ACPI_STATE_S4 && + acpi_gbl_FACS->S4bios_f && + 0 != acpi_gbl_FADT->smi_cmd) + p += sprintf(p, "S4Bios "); + } + } + p += sprintf(p, "\n"); + +end: + size = (p - page); + if (size <= off+count) *eof = 1; + *start = page + off; + size -= off; + if (size>count) size = count; + if (size<0) size = 0; + + return_VALUE(size); +} + +static int acpi_system_open_event(struct inode *inode, struct file *file); +static ssize_t acpi_system_read_event (struct file*, char*, size_t, loff_t*); +static int acpi_system_close_event(struct inode *inode, struct file *file); +static unsigned int acpi_system_poll_event(struct file *file, poll_table *wait); + + +static struct file_operations acpi_system_event_ops = { + .open = acpi_system_open_event, + .read = acpi_system_read_event, + .release = acpi_system_close_event, + .poll = acpi_system_poll_event, +}; + +static int +acpi_system_open_event(struct inode *inode, struct file *file) +{ + spin_lock_irq (&acpi_system_event_lock); + + if(event_is_open) + goto out_busy; + + event_is_open = 1; + + spin_unlock_irq (&acpi_system_event_lock); + return 0; + +out_busy: + spin_unlock_irq (&acpi_system_event_lock); + return -EBUSY; +} + +static ssize_t +acpi_system_read_event ( + struct file *file, + char *buffer, + size_t count, + loff_t *ppos) +{ + int result = 0; + struct acpi_bus_event event; + static char str[ACPI_MAX_STRING]; + static int chars_remaining = 0; + static char *ptr; + + + ACPI_FUNCTION_TRACE("acpi_system_read_event"); + + if (!chars_remaining) { + memset(&event, 0, sizeof(struct acpi_bus_event)); + + if ((file->f_flags & O_NONBLOCK) + && (list_empty(&acpi_bus_event_list))) + return_VALUE(-EAGAIN); + + result = acpi_bus_receive_event(&event); + if (result) { + return_VALUE(-EIO); + } + + chars_remaining = sprintf(str, "%s %s %08x %08x\n", + event.device_class?event.device_class:"", + event.bus_id?event.bus_id:"", + event.type, event.data); + ptr = str; + } + + if (chars_remaining < count) { + count = chars_remaining; + } + + if (copy_to_user(buffer, ptr, count)) + return_VALUE(-EFAULT); + + *ppos += count; + chars_remaining -= count; + ptr += count; + + return_VALUE(count); +} + +static int +acpi_system_close_event(struct inode *inode, struct file *file) +{ + spin_lock_irq (&acpi_system_event_lock); + event_is_open = 0; + spin_unlock_irq (&acpi_system_event_lock); + return 0; +} + +static unsigned int +acpi_system_poll_event( + struct file *file, + poll_table *wait) +{ + poll_wait(file, &acpi_bus_event_queue, wait); + if (!list_empty(&acpi_bus_event_list)) + return POLLIN | POLLRDNORM; + return 0; +} + +static ssize_t acpi_system_read_dsdt (struct file*, char*, size_t, loff_t*); + +static struct file_operations acpi_system_dsdt_ops = { + .read = acpi_system_read_dsdt, +}; + +static ssize_t +acpi_system_read_dsdt ( + struct file *file, + char *buffer, + size_t count, + loff_t *ppos) +{ + acpi_status status = AE_OK; + struct acpi_buffer dsdt = {ACPI_ALLOCATE_BUFFER, NULL}; + void *data = 0; + size_t size = 0; + + ACPI_FUNCTION_TRACE("acpi_system_read_dsdt"); + + status = acpi_get_table(ACPI_TABLE_DSDT, 1, &dsdt); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + if (*ppos < dsdt.length) { + data = dsdt.pointer + file->f_pos; + size = dsdt.length - file->f_pos; + if (size > count) + size = count; + if (copy_to_user(buffer, data, size)) { + acpi_os_free(dsdt.pointer); + return_VALUE(-EFAULT); + } + } + + acpi_os_free(dsdt.pointer); + + *ppos += size; + + return_VALUE(size); +} + + +static ssize_t acpi_system_read_fadt (struct file*, char*, size_t, loff_t*); + +static struct file_operations acpi_system_fadt_ops = { + .read = acpi_system_read_fadt, +}; + +static ssize_t +acpi_system_read_fadt ( + struct file *file, + char *buffer, + size_t count, + loff_t *ppos) +{ + acpi_status status = AE_OK; + struct acpi_buffer fadt = {ACPI_ALLOCATE_BUFFER, NULL}; + void *data = 0; + size_t size = 0; + + ACPI_FUNCTION_TRACE("acpi_system_read_fadt"); + + status = acpi_get_table(ACPI_TABLE_FADT, 1, &fadt); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + if (*ppos < fadt.length) { + data = fadt.pointer + file->f_pos; + size = fadt.length - file->f_pos; + if (size > count) + size = count; + if (copy_to_user(buffer, data, size)) { + acpi_os_free(fadt.pointer); + return_VALUE(-EFAULT); + } + } + + acpi_os_free(fadt.pointer); + + *ppos += size; + + return_VALUE(size); +} + + +#ifdef ACPI_DEBUG_OUTPUT + +static int +acpi_system_read_debug ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + char *p = page; + int size = 0; + + if (off != 0) + goto end; + + switch ((unsigned long) data) { + case 0: + p += sprintf(p, "0x%08x\n", acpi_dbg_layer); + break; + case 1: + p += sprintf(p, "0x%08x\n", acpi_dbg_level); + break; + default: + p += sprintf(p, "Invalid debug option\n"); + break; + } + +end: + size = (p - page); + if (size <= off+count) *eof = 1; + *start = page + off; + size -= off; + if (size>count) size = count; + if (size<0) size = 0; + + return size; +} + + +static int +acpi_system_write_debug ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + char debug_string[12] = {'\0'}; + + ACPI_FUNCTION_TRACE("acpi_system_write_debug"); + + if (count > sizeof(debug_string) - 1) + return_VALUE(-EINVAL); + + if (copy_from_user(debug_string, buffer, count)) + return_VALUE(-EFAULT); + + debug_string[count] = '\0'; + + switch ((unsigned long) data) { + case 0: + acpi_dbg_layer = simple_strtoul(debug_string, NULL, 0); + break; + case 1: + acpi_dbg_level = simple_strtoul(debug_string, NULL, 0); + break; + default: + return_VALUE(-EINVAL); + } + + return_VALUE(count); +} + +#endif /* ACPI_DEBUG_OUTPUT */ + + +#ifdef CONFIG_ACPI_SLEEP + +static int +acpi_system_read_sleep ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_system *system = (struct acpi_system *) data; + char *p = page; + int size; + int i; + + ACPI_FUNCTION_TRACE("acpi_system_read_sleep"); + + if (!system || (off != 0)) + goto end; + + for (i = 0; i <= ACPI_STATE_S5; i++) { + if (system->states[i]) { + p += sprintf(p,"S%d ", i); + if (i == ACPI_STATE_S4 && acpi_gbl_FACS->S4bios_f && + acpi_gbl_FADT->smi_cmd != 0) + p += sprintf(p, "S4Bios "); + } + } + + p += sprintf(p, "\n"); + +end: + size = (p - page); + if (size <= off+count) *eof = 1; + *start = page + off; + size -= off; + if (size>count) size = count; + if (size<0) size = 0; + + return_VALUE(size); +} + + +static int +acpi_system_write_sleep ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + acpi_status status = AE_OK; + struct acpi_system *system = (struct acpi_system *) data; + char state_string[12] = {'\0'}; + u32 state = 0; + + ACPI_FUNCTION_TRACE("acpi_system_write_sleep"); + + if (!system || (count > sizeof(state_string) - 1)) + return_VALUE(-EINVAL); + + if (copy_from_user(state_string, buffer, count)) + return_VALUE(-EFAULT); + + state_string[count] = '\0'; + + state = simple_strtoul(state_string, NULL, 0); + + if (!system->states[state]) + return_VALUE(-ENODEV); + + /* + * If S4 is supported by the OS, then we should assume that + * echo 4b > /proc/acpi/sleep is for s4bios. + * Since we have only s4bios, we assume that acpi_suspend failed + * if no s4bios support. + */ + status = acpi_suspend(state); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + return_VALUE(count); +} + + +static int +acpi_system_read_alarm ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *context) +{ + char *p = page; + int size = 0; + u32 sec, min, hr; + u32 day, mo, yr; + + ACPI_FUNCTION_TRACE("acpi_system_read_alarm"); + + if (off != 0) + goto end; + + spin_lock(&rtc_lock); + + sec = CMOS_READ(RTC_SECONDS_ALARM); + min = CMOS_READ(RTC_MINUTES_ALARM); + hr = CMOS_READ(RTC_HOURS_ALARM); + +#if 0 /* If we ever get an FACP with proper values... */ + if (acpi_gbl_FADT->day_alrm) + day = CMOS_READ(acpi_gbl_FADT->day_alrm); + else + day = CMOS_READ(RTC_DAY_OF_MONTH); + if (acpi_gbl_FADT->mon_alrm) + mo = CMOS_READ(acpi_gbl_FADT->mon_alrm); + else + mo = CMOS_READ(RTC_MONTH);; + if (acpi_gbl_FADT->century) + yr = CMOS_READ(acpi_gbl_FADT->century) * 100 + CMOS_READ(RTC_YEAR); + else + yr = CMOS_READ(RTC_YEAR); +#else + day = CMOS_READ(RTC_DAY_OF_MONTH); + mo = CMOS_READ(RTC_MONTH); + yr = CMOS_READ(RTC_YEAR); +#endif + + spin_unlock(&rtc_lock); + + BCD_TO_BIN(sec); + BCD_TO_BIN(min); + BCD_TO_BIN(hr); + BCD_TO_BIN(day); + BCD_TO_BIN(mo); + BCD_TO_BIN(yr); + +#if 0 + /* we're trusting the FADT (see above)*/ +#else + /* If we're not trusting the FADT, we should at least make it + * right for _this_ century... ehm, what is _this_ century? + * + * TBD: + * ASAP: find piece of code in the kernel, e.g. star tracker driver, + * which we can trust to determine the century correctly. Atom + * watch driver would be nice, too... + * + * if that has not happened, change for first release in 2050: + * if (yr<50) + * yr += 2100; + * else + * yr += 2000; // current line of code + * + * if that has not happened either, please do on 2099/12/31:23:59:59 + * s/2000/2100 + * + */ + yr += 2000; +#endif + + p += sprintf(p,"%4.4u-", yr); + p += (mo > 12) ? sprintf(p, "**-") : sprintf(p, "%2.2u-", mo); + p += (day > 31) ? sprintf(p, "** ") : sprintf(p, "%2.2u ", day); + p += (hr > 23) ? sprintf(p, "**:") : sprintf(p, "%2.2u:", hr); + p += (min > 59) ? sprintf(p, "**:") : sprintf(p, "%2.2u:", min); + p += (sec > 59) ? sprintf(p, "**\n") : sprintf(p, "%2.2u\n", sec); + + end: + size = p - page; + if (size < count) *eof = 1; + else if (size > count) size = count; + if (size < 0) size = 0; + *start = page; + + return_VALUE(size); +} + + +static int +get_date_field ( + char **p, + u32 *value) +{ + char *next = NULL; + char *string_end = NULL; + int result = -EINVAL; + + /* + * Try to find delimeter, only to insert null. The end of the + * string won't have one, but is still valid. + */ + next = strpbrk(*p, "- :"); + if (next) + *next++ = '\0'; + + *value = simple_strtoul(*p, &string_end, 10); + + /* Signal success if we got a good digit */ + if (string_end != *p) + result = 0; + + if (next) + *p = next; + + return result; +} + + +static int +acpi_system_write_alarm ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + char alarm_string[30] = {'\0'}; + char *p = alarm_string; + u32 sec, min, hr, day, mo, yr; + int adjust = 0; + unsigned char rtc_control = 0; + + ACPI_FUNCTION_TRACE("acpi_system_write_alarm"); + + if (count > sizeof(alarm_string) - 1) + return_VALUE(-EINVAL); + + if (copy_from_user(alarm_string, buffer, count)) + return_VALUE(-EFAULT); + + alarm_string[count] = '\0'; + + /* check for time adjustment */ + if (alarm_string[0] == '+') { + p++; + adjust = 1; + } + + if ((result = get_date_field(&p, &yr))) + goto end; + if ((result = get_date_field(&p, &mo))) + goto end; + if ((result = get_date_field(&p, &day))) + goto end; + if ((result = get_date_field(&p, &hr))) + goto end; + if ((result = get_date_field(&p, &min))) + goto end; + if ((result = get_date_field(&p, &sec))) + goto end; + + if (sec > 59) { + min += 1; + sec -= 60; + } + if (min > 59) { + hr += 1; + min -= 60; + } + if (hr > 23) { + day += 1; + hr -= 24; + } + if (day > 31) { + mo += 1; + day -= 31; + } + if (mo > 12) { + yr += 1; + mo -= 12; + } + + spin_lock_irq(&rtc_lock); + + rtc_control = CMOS_READ(RTC_CONTROL); + if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BIN_TO_BCD(yr); + BIN_TO_BCD(mo); + BIN_TO_BCD(day); + BIN_TO_BCD(hr); + BIN_TO_BCD(min); + BIN_TO_BCD(sec); + } + + if (adjust) { + yr += CMOS_READ(RTC_YEAR); + mo += CMOS_READ(RTC_MONTH); + day += CMOS_READ(RTC_DAY_OF_MONTH); + hr += CMOS_READ(RTC_HOURS); + min += CMOS_READ(RTC_MINUTES); + sec += CMOS_READ(RTC_SECONDS); + } + + spin_unlock_irq(&rtc_lock); + + if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BCD_TO_BIN(yr); + BCD_TO_BIN(mo); + BCD_TO_BIN(day); + BCD_TO_BIN(hr); + BCD_TO_BIN(min); + BCD_TO_BIN(sec); + } + + if (sec > 59) { + min++; + sec -= 60; + } + if (min > 59) { + hr++; + min -= 60; + } + if (hr > 23) { + day++; + hr -= 24; + } + if (day > 31) { + mo++; + day -= 31; + } + if (mo > 12) { + yr++; + mo -= 12; + } + if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BIN_TO_BCD(yr); + BIN_TO_BCD(mo); + BIN_TO_BCD(day); + BIN_TO_BCD(hr); + BIN_TO_BCD(min); + BIN_TO_BCD(sec); + } + + spin_lock_irq(&rtc_lock); + + /* write the fields the rtc knows about */ + CMOS_WRITE(hr, RTC_HOURS_ALARM); + CMOS_WRITE(min, RTC_MINUTES_ALARM); + CMOS_WRITE(sec, RTC_SECONDS_ALARM); + + /* + * If the system supports an enhanced alarm it will have non-zero + * offsets into the CMOS RAM here -- which for some reason are pointing + * to the RTC area of memory. + */ +#if 0 + if (acpi_gbl_FADT->day_alrm) + CMOS_WRITE(day, acpi_gbl_FADT->day_alrm); + if (acpi_gbl_FADT->mon_alrm) + CMOS_WRITE(mo, acpi_gbl_FADT->mon_alrm); + if (acpi_gbl_FADT->century) + CMOS_WRITE(yr/100, acpi_gbl_FADT->century); +#endif + /* enable the rtc alarm interrupt */ + if (!(rtc_control & RTC_AIE)) { + rtc_control |= RTC_AIE; + CMOS_WRITE(rtc_control,RTC_CONTROL); + CMOS_READ(RTC_INTR_FLAGS); + } + + spin_unlock_irq(&rtc_lock); + + acpi_set_register(ACPI_BITREG_RT_CLOCK_ENABLE, 1, ACPI_MTX_LOCK); + + file->f_pos += count; + + result = 0; +end: + return_VALUE(result ? result : count); +} + +#endif /*CONFIG_ACPI_SLEEP*/ + + +static int +acpi_system_add_fs ( + struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_system_add_fs"); + + if (!device) + return_VALUE(-EINVAL); + + /* 'info' [R] */ + entry = create_proc_entry(ACPI_SYSTEM_FILE_INFO, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_SYSTEM_FILE_INFO)); + else { + entry->read_proc = acpi_system_read_info; + entry->data = acpi_driver_data(device); + } + + /* 'dsdt' [R] */ + entry = create_proc_entry(ACPI_SYSTEM_FILE_DSDT, + S_IRUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_SYSTEM_FILE_DSDT)); + else + entry->proc_fops = &acpi_system_dsdt_ops; + + /* 'fadt' [R] */ + entry = create_proc_entry(ACPI_SYSTEM_FILE_FADT, + S_IRUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_SYSTEM_FILE_FADT)); + else + entry->proc_fops = &acpi_system_fadt_ops; + + /* 'event' [R] */ + entry = create_proc_entry(ACPI_SYSTEM_FILE_EVENT, + S_IRUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_SYSTEM_FILE_EVENT)); + else + entry->proc_fops = &acpi_system_event_ops; + +#ifdef CONFIG_ACPI_SLEEP + + /* 'sleep' [R/W]*/ + entry = create_proc_entry(ACPI_SYSTEM_FILE_SLEEP, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_SYSTEM_FILE_SLEEP)); + else { + entry->read_proc = acpi_system_read_sleep; + entry->write_proc = acpi_system_write_sleep; + entry->data = acpi_driver_data(device); + } + + /* 'alarm' [R/W] */ + entry = create_proc_entry(ACPI_SYSTEM_FILE_ALARM, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_SYSTEM_FILE_ALARM)); + else { + entry->read_proc = acpi_system_read_alarm; + entry->write_proc = acpi_system_write_alarm; + entry->data = acpi_driver_data(device); + } + +#endif /*CONFIG_ACPI_SLEEP*/ + +#ifdef ACPI_DEBUG_OUTPUT + + /* 'debug_layer' [R/W] */ + entry = create_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LAYER, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_SYSTEM_FILE_DEBUG_LAYER)); + else { + entry->read_proc = acpi_system_read_debug; + entry->write_proc = acpi_system_write_debug; + entry->data = (void *) 0; + } + + /* 'debug_level' [R/W] */ + entry = create_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LEVEL, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_SYSTEM_FILE_DEBUG_LEVEL)); + else { + entry->read_proc = acpi_system_read_debug; + entry->write_proc = acpi_system_write_debug; + entry->data = (void *) 1; + } + +#endif /*ACPI_DEBUG_OUTPUT */ + + return_VALUE(0); +} + + +static int +acpi_system_remove_fs ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_system_remove_fs"); + + if (!device) + return_VALUE(-EINVAL); + + remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_device_dir(device)); + remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_device_dir(device)); + remove_proc_entry(ACPI_SYSTEM_FILE_EVENT, acpi_device_dir(device)); +#ifdef CONFIG_ACPI_SLEEP + remove_proc_entry(ACPI_SYSTEM_FILE_SLEEP, acpi_device_dir(device)); + remove_proc_entry(ACPI_SYSTEM_FILE_ALARM, acpi_device_dir(device)); +#endif +#ifdef ACPI_DEBUG_OUTPUT + remove_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LAYER, + acpi_device_dir(device)); + remove_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LEVEL, + acpi_device_dir(device)); +#endif + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +#if defined(CONFIG_MAGIC_SYSRQ) && defined(CONFIG_PM) + +/* Simple wrapper calling power down function. */ +static void acpi_sysrq_power_off(int key, struct pt_regs *pt_regs, + struct kbd_struct *kbd, struct tty_struct *tty) +{ + acpi_power_off(); +} + +struct sysrq_key_op sysrq_acpi_poweroff_op = { + .handler = &acpi_sysrq_power_off, + .help_msg = "Off", + .action_msg = "Power Off\n" +}; + +#endif /* CONFIG_MAGIC_SYSRQ */ + +static int +acpi_system_add ( + struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_system *system = NULL; + u8 i = 0; + + ACPI_FUNCTION_TRACE("acpi_system_add"); + + if (!device) + return_VALUE(-EINVAL); + + system = kmalloc(sizeof(struct acpi_system), GFP_KERNEL); + if (!system) + return_VALUE(-ENOMEM); + memset(system, 0, sizeof(struct acpi_system)); + + system->handle = device->handle; + sprintf(acpi_device_name(device), "%s", ACPI_SYSTEM_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_SYSTEM_CLASS); + acpi_driver_data(device) = system; + + result = acpi_system_add_fs(device); + if (result) + goto end; + + printk(KERN_INFO PREFIX "%s [%s] (supports", + acpi_device_name(device), acpi_device_bid(device)); + for (i=0; iS4bios_f && + 0 != acpi_gbl_FADT->smi_cmd) { + printk(" S4bios"); + system->states[i] = 1; + } + /* no break */ + default: + if (ACPI_SUCCESS(status)) { + system->states[i] = 1; + printk(" S%d", i); + } + } + } + printk(")\n"); + +#ifdef CONFIG_PM + /* Install the soft-off (S5) handler. */ + if (system->states[ACPI_STATE_S5]) { + pm_power_off = acpi_power_off; + register_sysrq_key('o', &sysrq_acpi_poweroff_op); + } +#endif + +end: + if (result) + kfree(system); + + return_VALUE(result); +} + + +static int +acpi_system_remove ( + struct acpi_device *device, + int type) +{ + struct acpi_system *system = NULL; + + ACPI_FUNCTION_TRACE("acpi_system_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + system = (struct acpi_system *) acpi_driver_data(device); + +#ifdef CONFIG_PM + /* Remove the soft-off (S5) handler. */ + if (system->states[ACPI_STATE_S5]) { + unregister_sysrq_key('o', &sysrq_acpi_poweroff_op); + pm_power_off = NULL; + } +#endif + + acpi_system_remove_fs(device); + + kfree(system); + + return 0; +} + + +int __init +acpi_system_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_system_init"); + + result = acpi_bus_register_driver(&acpi_system_driver); + if (result < 0) + return_VALUE(-ENODEV); + + return_VALUE(0); +} + + +void __exit +acpi_system_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_system_exit"); + acpi_bus_unregister_driver(&acpi_system_driver); + return_VOID; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables/Makefile linux.21rc5-ac2/drivers/acpi/tables/Makefile --- linux.21rc5/drivers/acpi/tables/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables/Makefile 2003-04-28 17:59:26.000000000 +0100 @@ -1,11 +1,10 @@ # # Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory # O_TARGET := $(notdir $(CURDIR)).o -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) +obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c)) EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables/tbconvrt.c linux.21rc5-ac2/drivers/acpi/tables/tbconvrt.c --- linux.21rc5/drivers/acpi/tables/tbconvrt.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables/tbconvrt.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,68 +1,85 @@ /****************************************************************************** * * Module Name: tbconvrt - ACPI Table conversion utilities - * $Revision: 28 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "actables.h" -#include "actbl.h" +#include +#include #define _COMPONENT ACPI_TABLES - MODULE_NAME ("tbconvrt") + ACPI_MODULE_NAME ("tbconvrt") /******************************************************************************* * - * FUNCTION: Acpi_tb_get_table_count + * FUNCTION: acpi_tb_get_table_count * - * PARAMETERS: + * PARAMETERS: RSDP - Pointer to the RSDP + * RSDT - Pointer to the RSDT/XSDT * - * RETURN: + * RETURN: The number of tables pointed to by the RSDT or XSDT. * - * DESCRIPTION: Calculate the number of tables + * DESCRIPTION: Calculate the number of tables. Automatically handles either + * an RSDT or XSDT. * ******************************************************************************/ u32 acpi_tb_get_table_count ( - RSDP_DESCRIPTOR *RSDP, - acpi_table_header *RSDT) + struct rsdp_descriptor *RSDP, + struct acpi_table_header *RSDT) { - u32 pointer_size; + u32 pointer_size; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); -#ifndef _IA64 +#if ACPI_MACHINE_WIDTH != 64 if (RSDP->revision < 2) { pointer_size = sizeof (u32); } - else #endif { @@ -75,17 +92,17 @@ * pointers contained within the RSDT/XSDT. The size of the pointers * is architecture-dependent. */ - return ((RSDT->length - sizeof (acpi_table_header)) / pointer_size); + return ((RSDT->length - sizeof (struct acpi_table_header)) / pointer_size); } /******************************************************************************* * - * FUNCTION: Acpi_tb_convert_to_xsdt + * FUNCTION: acpi_tb_convert_to_xsdt * - * PARAMETERS: + * PARAMETERS: table_info - Info about the RSDT * - * RETURN: + * RETURN: Status * * DESCRIPTION: Convert an RSDT to an XSDT (internal common format) * @@ -93,24 +110,20 @@ acpi_status acpi_tb_convert_to_xsdt ( - acpi_table_desc *table_info, - u32 *number_of_tables) + struct acpi_table_desc *table_info) { - u32 table_size; - u32 i; - xsdt_descriptor *new_table; - - - FUNCTION_ENTRY (); + acpi_size table_size; + u32 i; + XSDT_DESCRIPTOR *new_table; - *number_of_tables = acpi_tb_get_table_count (acpi_gbl_RSDP, table_info->pointer); + ACPI_FUNCTION_ENTRY (); /* Compute size of the converted XSDT */ - table_size = (*number_of_tables * sizeof (u64)) + sizeof (acpi_table_header); - + table_size = ((acpi_size) acpi_gbl_rsdt_table_count * sizeof (u64)) + + sizeof (struct acpi_table_header); /* Allocate an XSDT */ @@ -121,37 +134,29 @@ /* Copy the header and set the length */ - MEMCPY (new_table, table_info->pointer, sizeof (acpi_table_header)); - new_table->header.length = table_size; + ACPI_MEMCPY (new_table, table_info->pointer, sizeof (struct acpi_table_header)); + new_table->header.length = (u32) table_size; /* Copy the table pointers */ - for (i = 0; i < *number_of_tables; i++) { + for (i = 0; i < acpi_gbl_rsdt_table_count; i++) { if (acpi_gbl_RSDP->revision < 2) { -#ifdef _IA64 - new_table->table_offset_entry[i] = - ((RSDT_DESCRIPTOR_REV071 *) table_info->pointer)->table_offset_entry[i]; -#else ACPI_STORE_ADDRESS (new_table->table_offset_entry[i], - ((RSDT_DESCRIPTOR_REV1 *) table_info->pointer)->table_offset_entry[i]); -#endif + ((struct rsdt_descriptor_rev1 *) table_info->pointer)->table_offset_entry[i]); } else { new_table->table_offset_entry[i] = - ((xsdt_descriptor *) table_info->pointer)->table_offset_entry[i]; + ((XSDT_DESCRIPTOR *) table_info->pointer)->table_offset_entry[i]; } } - /* Delete the original table (either mapped or in a buffer) */ acpi_tb_delete_single_table (table_info); - /* Point the table descriptor to the new table */ - table_info->pointer = (acpi_table_header *) new_table; - table_info->base_pointer = (acpi_table_header *) new_table; + table_info->pointer = (struct acpi_table_header *) new_table; table_info->length = table_size; table_info->allocation = ACPI_MEM_ALLOCATED; @@ -159,327 +164,317 @@ } -/******************************************************************************* +/****************************************************************************** * - * FUNCTION: Acpi_tb_convert_table_fadt + * FUNCTION: acpi_tb_init_generic_address * - * PARAMETERS: + * PARAMETERS: new_gas_struct - GAS struct to be initialized + * register_bit_width - Width of this register + * Address - Address of the register * - * RETURN: + * RETURN: None * - * DESCRIPTION: - * Converts BIOS supplied 1.0 and 0.71 ACPI FADT to an intermediate - * ACPI 2.0 FADT. If the BIOS supplied a 2.0 FADT then it is simply - * copied to the intermediate FADT. The ACPI CA software uses this - * intermediate FADT. Thus a significant amount of special #ifdef - * type codeing is saved. This intermediate FADT will need to be - * freed at some point. + * DESCRIPTION: Initialize a GAS structure. * ******************************************************************************/ -acpi_status -acpi_tb_convert_table_fadt (void) +static void +acpi_tb_init_generic_address ( + struct acpi_generic_address *new_gas_struct, + u8 register_bit_width, + acpi_physical_address address) { -#ifdef _IA64 - fadt_descriptor_rev071 *FADT71; - u8 pm1_address_space; - u8 pm2_address_space; - u8 pm_timer_address_space; - u8 gpe0address_space; - u8 gpe1_address_space; -#else - fadt_descriptor_rev1 *FADT1; -#endif - - fadt_descriptor_rev2 *FADT2; - acpi_table_desc *table_desc; - - - FUNCTION_TRACE ("Tb_convert_table_fadt"); - - - /* Acpi_gbl_FADT is valid */ - /* Allocate and zero the 2.0 buffer */ - - FADT2 = ACPI_MEM_CALLOCATE (sizeof (fadt_descriptor_rev2)); - if (FADT2 == NULL) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - - /* The ACPI FADT revision number is FADT2_REVISION_ID=3 */ - /* So, if the current table revision is less than 3 it is type 1.0 or 0.71 */ - - if (acpi_gbl_FADT->header.revision >= FADT2_REVISION_ID) { - /* We have an ACPI 2.0 FADT but we must copy it to our local buffer */ - - *FADT2 = *((fadt_descriptor_rev2*) acpi_gbl_FADT); - - } - - else { - -#ifdef _IA64 - /* - * For the 64-bit case only, a revision ID less than V2.0 means the - * tables are the 0.71 extensions - */ - - /* The BIOS stored FADT should agree with Revision 0.71 */ - - FADT71 = (fadt_descriptor_rev071 *) acpi_gbl_FADT; - - /* Copy the table header*/ + ACPI_STORE_ADDRESS (new_gas_struct->address, address); - FADT2->header = FADT71->header; - - /* Copy the common fields */ - - FADT2->sci_int = FADT71->sci_int; - FADT2->acpi_enable = FADT71->acpi_enable; - FADT2->acpi_disable = FADT71->acpi_disable; - FADT2->S4bios_req = FADT71->S4bios_req; - FADT2->plvl2_lat = FADT71->plvl2_lat; - FADT2->plvl3_lat = FADT71->plvl3_lat; - FADT2->day_alrm = FADT71->day_alrm; - FADT2->mon_alrm = FADT71->mon_alrm; - FADT2->century = FADT71->century; - FADT2->gpe1_base = FADT71->gpe1_base; - - /* - * We still use the block length registers even though - * the GAS structure should obsolete them. This is because - * these registers are byte lengths versus the GAS which - * contains a bit width - */ - FADT2->pm1_evt_len = FADT71->pm1_evt_len; - FADT2->pm1_cnt_len = FADT71->pm1_cnt_len; - FADT2->pm2_cnt_len = FADT71->pm2_cnt_len; - FADT2->pm_tm_len = FADT71->pm_tm_len; - FADT2->gpe0blk_len = FADT71->gpe0blk_len; - FADT2->gpe1_blk_len = FADT71->gpe1_blk_len; - FADT2->gpe1_base = FADT71->gpe1_base; - - /* Copy the existing 0.71 flags to 2.0. The other bits are zero.*/ - - FADT2->wb_invd = FADT71->flush_cash; - FADT2->proc_c1 = FADT71->proc_c1; - FADT2->plvl2_up = FADT71->plvl2_up; - FADT2->pwr_button = FADT71->pwr_button; - FADT2->sleep_button = FADT71->sleep_button; - FADT2->fixed_rTC = FADT71->fixed_rTC; - FADT2->rtcs4 = FADT71->rtcs4; - FADT2->tmr_val_ext = FADT71->tmr_val_ext; - FADT2->dock_cap = FADT71->dock_cap; - - - /* We should not use these next two addresses */ - /* Since our buffer is pre-zeroed nothing to do for */ - /* the next three data items in the structure */ - /* FADT2->Firmware_ctrl = 0; */ - /* FADT2->Dsdt = 0; */ - - /* System Interrupt Model isn't used in ACPI 2.0*/ - /* FADT2->Reserved1 = 0; */ - - /* This field is set by the OEM to convey the preferred */ - /* power management profile to OSPM. It doesn't have any*/ - /* 0.71 equivalence. Since we don't know what kind of */ - /* 64-bit system this is, we will pick unspecified. */ - - FADT2->prefer_PM_profile = PM_UNSPECIFIED; - - - /* Port address of SMI command port */ - /* We shouldn't use this port because IA64 doesn't */ - /* have or use SMI. It has PMI. */ + new_gas_struct->address_space_id = ACPI_ADR_SPACE_SYSTEM_IO; + new_gas_struct->register_bit_width = register_bit_width; + new_gas_struct->register_bit_offset = 0; + new_gas_struct->reserved = 0; +} - FADT2->smi_cmd = (u32)(FADT71->smi_cmd & 0xFFFFFFFF); +/******************************************************************************* + * + * FUNCTION: acpi_tb_convert_fadt1 + * + * PARAMETERS: local_fadt - Pointer to new FADT + * original_fadt - Pointer to old FADT + * + * RETURN: Populates local_fadt + * + * DESCRIPTION: Convert an ACPI 1.0 FADT to common internal format + * + ******************************************************************************/ - /* processor performance state control*/ - /* The value OSPM writes to the SMI_CMD register to assume */ - /* processor performance state control responsibility. */ - /* There isn't any equivalence in 0.71 */ - /* Again this should be meaningless for IA64 */ - /* FADT2->Pstate_cnt = 0; */ +static void +acpi_tb_convert_fadt1 ( + struct fadt_descriptor_rev2 *local_fadt, + struct fadt_descriptor_rev1 *original_fadt) +{ - /* The 32-bit Power management and GPE registers are */ - /* not valid in IA-64 and we are not going to use them */ - /* so leaving them pre-zeroed. */ - /* Support for the _CST object and C States change notification.*/ - /* This data item hasn't any 0.71 equivalence so leaving it zero.*/ - /* FADT2->Cst_cnt = 0; */ + /* ACPI 1.0 FACS */ + /* The BIOS stored FADT should agree with Revision 1.0 */ - /* number of flush strides that need to be read */ - /* No 0.71 equivalence. Leave pre-zeroed. */ - /* FADT2->Flush_size = 0; */ + /* + * Copy the table header and the common part of the tables. + * + * The 2.0 table is an extension of the 1.0 table, so the entire 1.0 + * table can be copied first, then expand some fields to 64 bits. + */ + ACPI_MEMCPY (local_fadt, original_fadt, sizeof (struct fadt_descriptor_rev1)); - /* Processor's memory cache line width, in bytes */ - /* No 0.71 equivalence. Leave pre-zeroed. */ - /* FADT2->Flush_stride = 0; */ + /* Convert table pointers to 64-bit fields */ - /* Processor's duty cycle index in processor's P_CNT reg*/ - /* No 0.71 equivalence. Leave pre-zeroed. */ - /* FADT2->Duty_offset = 0; */ + ACPI_STORE_ADDRESS (local_fadt->xfirmware_ctrl, local_fadt->V1_firmware_ctrl); + ACPI_STORE_ADDRESS (local_fadt->Xdsdt, local_fadt->V1_dsdt); - /* Processor's duty cycle value bit width in P_CNT register.*/ - /* No 0.71 equivalence. Leave pre-zeroed. */ - /* FADT2->Duty_width = 0; */ + /* + * System Interrupt Model isn't used in ACPI 2.0 (local_fadt->Reserved1 = 0;) + */ + /* + * This field is set by the OEM to convey the preferred power management + * profile to OSPM. It doesn't have any 1.0 equivalence. Since we don't + * know what kind of 32-bit system this is, we will use "unspecified". + */ + local_fadt->prefer_PM_profile = PM_UNSPECIFIED; - /* Since there isn't any equivalence in 0.71 */ - /* and since Big_sur had to support legacy */ + /* + * Processor Performance State Control. This is the value OSPM writes to + * the SMI_CMD register to assume processor performance state control + * responsibility. There isn't any equivalence in 1.0, leave it zeroed. + */ + local_fadt->pstate_cnt = 0; - FADT2->iapc_boot_arch = BAF_LEGACY_DEVICES; + /* + * Support for the _CST object and C States change notification. + * This data item hasn't any 1.0 equivalence so leave it zero. + */ + local_fadt->cst_cnt = 0; - /* Copy to ACPI 2.0 64-BIT Extended Addresses */ + /* + * Since there isn't any equivalence in 1.0 and since it highly likely + * that a 1.0 system has legacy support. + */ + local_fadt->iapc_boot_arch = BAF_LEGACY_DEVICES; - FADT2->Xfirmware_ctrl = FADT71->firmware_ctrl; - FADT2->Xdsdt = FADT71->dsdt; + /* + * Convert the V1.0 block addresses to V2.0 GAS structures + */ + acpi_tb_init_generic_address (&local_fadt->xpm1a_evt_blk, local_fadt->pm1_evt_len, + (acpi_physical_address) local_fadt->V1_pm1a_evt_blk); + acpi_tb_init_generic_address (&local_fadt->xpm1b_evt_blk, local_fadt->pm1_evt_len, + (acpi_physical_address) local_fadt->V1_pm1b_evt_blk); + acpi_tb_init_generic_address (&local_fadt->xpm1a_cnt_blk, local_fadt->pm1_cnt_len, + (acpi_physical_address) local_fadt->V1_pm1a_cnt_blk); + acpi_tb_init_generic_address (&local_fadt->xpm1b_cnt_blk, local_fadt->pm1_cnt_len, + (acpi_physical_address) local_fadt->V1_pm1b_cnt_blk); + acpi_tb_init_generic_address (&local_fadt->xpm2_cnt_blk, local_fadt->pm2_cnt_len, + (acpi_physical_address) local_fadt->V1_pm2_cnt_blk); + acpi_tb_init_generic_address (&local_fadt->xpm_tmr_blk, local_fadt->pm_tm_len, + (acpi_physical_address) local_fadt->V1_pm_tmr_blk); + acpi_tb_init_generic_address (&local_fadt->xgpe0_blk, 0, + (acpi_physical_address) local_fadt->V1_gpe0_blk); + acpi_tb_init_generic_address (&local_fadt->xgpe1_blk, 0, + (acpi_physical_address) local_fadt->V1_gpe1_blk); + + /* Create separate GAS structs for the PM1 Enable registers */ + + acpi_tb_init_generic_address (&acpi_gbl_xpm1a_enable, + (u8) ACPI_DIV_2 (acpi_gbl_FADT->pm1_evt_len), + (acpi_physical_address) (local_fadt->xpm1a_evt_blk.address + + ACPI_DIV_2 (acpi_gbl_FADT->pm1_evt_len))); + + acpi_tb_init_generic_address (&acpi_gbl_xpm1b_enable, + (u8) ACPI_DIV_2 (acpi_gbl_FADT->pm1_evt_len), + (acpi_physical_address) (local_fadt->xpm1b_evt_blk.address + + ACPI_DIV_2 (acpi_gbl_FADT->pm1_evt_len))); +} - /* Extract the address space IDs */ +/******************************************************************************* + * + * FUNCTION: acpi_tb_convert_fadt2 + * + * PARAMETERS: local_fadt - Pointer to new FADT + * original_fadt - Pointer to old FADT + * + * RETURN: Populates local_fadt + * + * DESCRIPTION: Convert an ACPI 2.0 FADT to common internal format. + * Handles optional "X" fields. + * + ******************************************************************************/ - pm1_address_space = (u8)((FADT71->address_space & PM1_BLK_ADDRESS_SPACE) >> 1); - pm2_address_space = (u8)((FADT71->address_space & PM2_CNT_BLK_ADDRESS_SPACE) >> 2); - pm_timer_address_space = (u8)((FADT71->address_space & PM_TMR_BLK_ADDRESS_SPACE) >> 3); - gpe0address_space = (u8)((FADT71->address_space & GPE0_BLK_ADDRESS_SPACE) >> 4); - gpe1_address_space = (u8)((FADT71->address_space & GPE1_BLK_ADDRESS_SPACE) >> 5); +static void +acpi_tb_convert_fadt2 ( + struct fadt_descriptor_rev2 *local_fadt, + struct fadt_descriptor_rev2 *original_fadt) +{ - /* - * Convert the 0.71 (non-GAS style) Block addresses to V2.0 GAS structures, - * in this order: - * - * PM 1_a Events - * PM 1_b Events - * PM 1_a Control - * PM 1_b Control - * PM 2 Control - * PM Timer Control - * GPE Block 0 - * GPE Block 1 - */ + /* We have an ACPI 2.0 FADT but we must copy it to our local buffer */ - ASL_BUILD_GAS_FROM_ENTRY (FADT2->Xpm1a_evt_blk, FADT71->pm1_evt_len, FADT71->pm1a_evt_blk, pm1_address_space); - ASL_BUILD_GAS_FROM_ENTRY (FADT2->Xpm1b_evt_blk, FADT71->pm1_evt_len, FADT71->pm1b_evt_blk, pm1_address_space); - ASL_BUILD_GAS_FROM_ENTRY (FADT2->Xpm1a_cnt_blk, FADT71->pm1_cnt_len, FADT71->pm1a_cnt_blk, pm1_address_space); - ASL_BUILD_GAS_FROM_ENTRY (FADT2->Xpm1b_cnt_blk, FADT71->pm1_cnt_len, FADT71->pm1b_cnt_blk, pm1_address_space); - ASL_BUILD_GAS_FROM_ENTRY (FADT2->Xpm2_cnt_blk, FADT71->pm2_cnt_len, FADT71->pm2_cnt_blk, pm2_address_space); - ASL_BUILD_GAS_FROM_ENTRY (FADT2->Xpm_tmr_blk, FADT71->pm_tm_len, FADT71->pm_tmr_blk, pm_timer_address_space); - ASL_BUILD_GAS_FROM_ENTRY (FADT2->Xgpe0blk, FADT71->gpe0blk_len, FADT71->gpe0blk, gpe0address_space); - ASL_BUILD_GAS_FROM_ENTRY (FADT2->Xgpe1_blk, FADT71->gpe1_blk_len, FADT71->gpe1_blk, gpe1_address_space); + ACPI_MEMCPY (local_fadt, original_fadt, sizeof (struct fadt_descriptor_rev2)); -#else + /* + * "X" fields are optional extensions to the original V1.0 fields, so + * we must selectively expand V1.0 fields if the corresponding X field + * is zero. + */ + if (!(local_fadt->xfirmware_ctrl)) { + ACPI_STORE_ADDRESS (local_fadt->xfirmware_ctrl, local_fadt->V1_firmware_ctrl); + } - /* ACPI 1.0 FACS */ + if (!(local_fadt->Xdsdt)) { + ACPI_STORE_ADDRESS (local_fadt->Xdsdt, local_fadt->V1_dsdt); + } + if (!(local_fadt->xpm1a_evt_blk.address)) { + acpi_tb_init_generic_address (&local_fadt->xpm1a_evt_blk, + local_fadt->pm1_evt_len, (acpi_physical_address) local_fadt->V1_pm1a_evt_blk); + } - /* The BIOS stored FADT should agree with Revision 1.0 */ + if (!(local_fadt->xpm1b_evt_blk.address)) { + acpi_tb_init_generic_address (&local_fadt->xpm1b_evt_blk, + local_fadt->pm1_evt_len, (acpi_physical_address) local_fadt->V1_pm1b_evt_blk); + } - FADT1 = (fadt_descriptor_rev1*) acpi_gbl_FADT; + if (!(local_fadt->xpm1a_cnt_blk.address)) { + acpi_tb_init_generic_address (&local_fadt->xpm1a_cnt_blk, + local_fadt->pm1_cnt_len, (acpi_physical_address) local_fadt->V1_pm1a_cnt_blk); + } - /* - * Copy the table header and the common part of the tables - * The 2.0 table is an extension of the 1.0 table, so the - * entire 1.0 table can be copied first, then expand some - * fields to 64 bits. - */ - MEMCPY (FADT2, FADT1, sizeof (fadt_descriptor_rev1)); + if (!(local_fadt->xpm1b_cnt_blk.address)) { + acpi_tb_init_generic_address (&local_fadt->xpm1b_cnt_blk, + local_fadt->pm1_cnt_len, (acpi_physical_address) local_fadt->V1_pm1b_cnt_blk); + } + if (!(local_fadt->xpm2_cnt_blk.address)) { + acpi_tb_init_generic_address (&local_fadt->xpm2_cnt_blk, + local_fadt->pm2_cnt_len, (acpi_physical_address) local_fadt->V1_pm2_cnt_blk); + } - /* Convert table pointers to 64-bit fields */ + if (!(local_fadt->xpm_tmr_blk.address)) { + acpi_tb_init_generic_address (&local_fadt->xpm_tmr_blk, + local_fadt->pm_tm_len, (acpi_physical_address) local_fadt->V1_pm_tmr_blk); + } - ACPI_STORE_ADDRESS (FADT2->Xfirmware_ctrl, FADT1->firmware_ctrl); - ACPI_STORE_ADDRESS (FADT2->Xdsdt, FADT1->dsdt); + if (!(local_fadt->xgpe0_blk.address)) { + acpi_tb_init_generic_address (&local_fadt->xgpe0_blk, + 0, (acpi_physical_address) local_fadt->V1_gpe0_blk); + } - /* System Interrupt Model isn't used in ACPI 2.0*/ - /* FADT2->Reserved1 = 0; */ + if (!(local_fadt->xgpe1_blk.address)) { + acpi_tb_init_generic_address (&local_fadt->xgpe1_blk, + 0, (acpi_physical_address) local_fadt->V1_gpe1_blk); + } - /* This field is set by the OEM to convey the preferred */ - /* power management profile to OSPM. It doesn't have any*/ - /* 1.0 equivalence. Since we don't know what kind of */ - /* 32-bit system this is, we will pick unspecified. */ + /* Create separate GAS structs for the PM1 Enable registers */ - FADT2->prefer_PM_profile = PM_UNSPECIFIED; + acpi_tb_init_generic_address (&acpi_gbl_xpm1a_enable, + (u8) ACPI_DIV_2 (acpi_gbl_FADT->pm1_evt_len), + (acpi_physical_address) (local_fadt->xpm1a_evt_blk.address + + ACPI_DIV_2 (acpi_gbl_FADT->pm1_evt_len))); + acpi_gbl_xpm1a_enable.address_space_id = local_fadt->xpm1a_evt_blk.address_space_id; + + acpi_tb_init_generic_address (&acpi_gbl_xpm1b_enable, + (u8) ACPI_DIV_2 (acpi_gbl_FADT->pm1_evt_len), + (acpi_physical_address) (local_fadt->xpm1b_evt_blk.address + + ACPI_DIV_2 (acpi_gbl_FADT->pm1_evt_len))); + acpi_gbl_xpm1b_enable.address_space_id = local_fadt->xpm1b_evt_blk.address_space_id; +} - /* Processor Performance State Control. This is the value */ - /* OSPM writes to the SMI_CMD register to assume processor */ - /* performance state control responsibility. There isn't */ - /* any equivalence in 1.0. So leave it zeroed. */ +/******************************************************************************* + * + * FUNCTION: acpi_tb_convert_table_fadt + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Converts a BIOS supplied ACPI 1.0 FADT to a local + * ACPI 2.0 FADT. If the BIOS supplied a 2.0 FADT then it is simply + * copied to the local FADT. The ACPI CA software uses this + * local FADT. Thus a significant amount of special #ifdef + * type codeing is saved. + * + ******************************************************************************/ - FADT2->pstate_cnt = 0; +acpi_status +acpi_tb_convert_table_fadt (void) +{ + struct fadt_descriptor_rev2 *local_fadt; + struct acpi_table_desc *table_desc; - /* Support for the _CST object and C States change notification.*/ - /* This data item hasn't any 1.0 equivalence so leaving it zero.*/ + ACPI_FUNCTION_TRACE ("tb_convert_table_fadt"); - FADT2->cst_cnt = 0; + /* + * acpi_gbl_FADT is valid + * Allocate and zero the 2.0 FADT buffer + */ + local_fadt = ACPI_MEM_CALLOCATE (sizeof (struct fadt_descriptor_rev2)); + if (local_fadt == NULL) { + return_ACPI_STATUS (AE_NO_MEMORY); + } - /* Since there isn't any equivalence in 1.0 and since it */ - /* is highly likely that a 1.0 system has legacy support. */ + /* + * FADT length and version validation. The table must be at least as + * long as the version 1.0 FADT + */ + if (acpi_gbl_FADT->header.length < sizeof (struct fadt_descriptor_rev1)) { + ACPI_REPORT_ERROR (("Invalid FADT table length: 0x%X\n", acpi_gbl_FADT->header.length)); + return_ACPI_STATUS (AE_INVALID_TABLE_LENGTH); + } - FADT2->iapc_boot_arch = BAF_LEGACY_DEVICES; + if (acpi_gbl_FADT->header.revision >= FADT2_REVISION_ID) { + if (acpi_gbl_FADT->header.length < sizeof (struct fadt_descriptor_rev2)) { + /* Length is too short to be a V2.0 table */ + ACPI_REPORT_WARNING (("Inconsistent FADT length (0x%X) and revision (0x%X), using FADT V1.0 portion of table\n", + acpi_gbl_FADT->header.length, acpi_gbl_FADT->header.revision)); - /* - * Convert the V1.0 Block addresses to V2.0 GAS structures - * in this order: - * - * PM 1_a Events - * PM 1_b Events - * PM 1_a Control - * PM 1_b Control - * PM 2 Control - * PM Timer Control - * GPE Block 0 - * GPE Block 1 - */ + acpi_tb_convert_fadt1 (local_fadt, (void *) acpi_gbl_FADT); + } + else { + /* Valid V2.0 table */ - ASL_BUILD_GAS_FROM_V1_ENTRY (FADT2->Xpm1a_evt_blk, FADT1->pm1_evt_len, FADT1->pm1a_evt_blk); - ASL_BUILD_GAS_FROM_V1_ENTRY (FADT2->Xpm1b_evt_blk, FADT1->pm1_evt_len, FADT1->pm1b_evt_blk); - ASL_BUILD_GAS_FROM_V1_ENTRY (FADT2->Xpm1a_cnt_blk, FADT1->pm1_cnt_len, FADT1->pm1a_cnt_blk); - ASL_BUILD_GAS_FROM_V1_ENTRY (FADT2->Xpm1b_cnt_blk, FADT1->pm1_cnt_len, FADT1->pm1b_cnt_blk); - ASL_BUILD_GAS_FROM_V1_ENTRY (FADT2->Xpm2_cnt_blk, FADT1->pm2_cnt_len, FADT1->pm2_cnt_blk); - ASL_BUILD_GAS_FROM_V1_ENTRY (FADT2->Xpm_tmr_blk, FADT1->pm_tm_len, FADT1->pm_tmr_blk); - ASL_BUILD_GAS_FROM_V1_ENTRY (FADT2->Xgpe0blk, FADT1->gpe0blk_len, FADT1->gpe0blk); - ASL_BUILD_GAS_FROM_V1_ENTRY (FADT2->Xgpe1_blk, FADT1->gpe1_blk_len, FADT1->gpe1_blk); -#endif + acpi_tb_convert_fadt2 (local_fadt, acpi_gbl_FADT); + } } + else { + /* Valid V1.0 table */ + acpi_tb_convert_fadt1 (local_fadt, (void *) acpi_gbl_FADT); + } /* - * Global FADT pointer will point to the common V2.0 FADT + * Global FADT pointer will point to the new common V2.0 FADT */ - acpi_gbl_FADT = FADT2; + acpi_gbl_FADT = local_fadt; acpi_gbl_FADT->header.length = sizeof (FADT_DESCRIPTOR); - /* Free the original table */ - table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_FADT]; + table_desc = acpi_gbl_table_lists[ACPI_TABLE_FADT].next; acpi_tb_delete_single_table (table_desc); - /* Install the new table */ - table_desc->pointer = (acpi_table_header *) acpi_gbl_FADT; - table_desc->base_pointer = acpi_gbl_FADT; - table_desc->allocation = ACPI_MEM_ALLOCATED; - table_desc->length = sizeof (fadt_descriptor_rev2); - + table_desc->pointer = (struct acpi_table_header *) acpi_gbl_FADT; + table_desc->allocation = ACPI_MEM_ALLOCATED; + table_desc->length = sizeof (struct fadt_descriptor_rev2); /* Dump the entire FADT */ ACPI_DEBUG_PRINT ((ACPI_DB_TABLES, "Hex dump of common internal FADT, size %d (%X)\n", acpi_gbl_FADT->header.length, acpi_gbl_FADT->header.length)); - DUMP_BUFFER ((u8 *) (acpi_gbl_FADT), acpi_gbl_FADT->header.length); - + ACPI_DUMP_BUFFER ((u8 *) (acpi_gbl_FADT), acpi_gbl_FADT->header.length); return_ACPI_STATUS (AE_OK); } @@ -487,82 +482,57 @@ /******************************************************************************* * - * FUNCTION: Acpi_tb_convert_table_facs + * FUNCTION: acpi_tb_convert_table_facs * - * PARAMETERS: + * PARAMETERS: table_info - Info for currently installad FACS * - * RETURN: + * RETURN: Status * - * DESCRIPTION: + * DESCRIPTION: Convert ACPI 1.0 and ACPI 2.0 FACS to a common internal + * table format. * ******************************************************************************/ acpi_status acpi_tb_build_common_facs ( - acpi_table_desc *table_info) + struct acpi_table_desc *table_info) { - acpi_common_facs *common_facs; - -#ifdef _IA64 - facs_descriptor_rev071 *FACS71; -#else - facs_descriptor_rev1 *FACS1; -#endif - - facs_descriptor_rev2 *FACS2; + ACPI_FUNCTION_TRACE ("tb_build_common_facs"); - FUNCTION_TRACE ("Tb_build_common_facs"); + /* Absolute minimum length is 24, but the ACPI spec says 64 */ - /* Allocate a common FACS */ - - common_facs = ACPI_MEM_CALLOCATE (sizeof (acpi_common_facs)); - if (!common_facs) { - return_ACPI_STATUS (AE_NO_MEMORY); + if (acpi_gbl_FACS->length < 24) { + ACPI_REPORT_ERROR (("Invalid FACS table length: 0x%X\n", acpi_gbl_FACS->length)); + return_ACPI_STATUS (AE_INVALID_TABLE_LENGTH); } + if (acpi_gbl_FACS->length < 64) { + ACPI_REPORT_WARNING (("FACS is shorter than the ACPI specification allows: 0x%X, using anyway\n", + acpi_gbl_FACS->length)); + } /* Copy fields to the new FACS */ - if (acpi_gbl_RSDP->revision < 2) { -#ifdef _IA64 - /* 0.71 FACS */ - - FACS71 = (facs_descriptor_rev071 *) acpi_gbl_FACS; - - common_facs->global_lock = (u32 *) &(FACS71->global_lock); - common_facs->firmware_waking_vector = &FACS71->firmware_waking_vector; - common_facs->vector_width = 64; -#else - /* ACPI 1.0 FACS */ - - FACS1 = (facs_descriptor_rev1 *) acpi_gbl_FACS; - - common_facs->global_lock = &(FACS1->global_lock); - common_facs->firmware_waking_vector = (u64 *) &FACS1->firmware_waking_vector; - common_facs->vector_width = 32; + acpi_gbl_common_fACS.global_lock = &(acpi_gbl_FACS->global_lock); -#endif - } + if ((acpi_gbl_RSDP->revision < 2) || + (acpi_gbl_FACS->length < 32) || + (!(acpi_gbl_FACS->xfirmware_waking_vector))) { + /* ACPI 1.0 FACS or short table or optional X_ field is zero */ + acpi_gbl_common_fACS.firmware_waking_vector = ACPI_CAST_PTR (u64, &(acpi_gbl_FACS->firmware_waking_vector)); + acpi_gbl_common_fACS.vector_width = 32; + } else { - /* ACPI 2.0 FACS */ + /* ACPI 2.0 FACS with valid X_ field */ - FACS2 = (facs_descriptor_rev2 *) acpi_gbl_FACS; - - common_facs->global_lock = &(FACS2->global_lock); - common_facs->firmware_waking_vector = &FACS2->Xfirmware_waking_vector; - common_facs->vector_width = 64; + acpi_gbl_common_fACS.firmware_waking_vector = &acpi_gbl_FACS->xfirmware_waking_vector; + acpi_gbl_common_fACS.vector_width = 64; } - - /* Set the global FACS pointer to point to the common FACS */ - - - acpi_gbl_FACS = common_facs; - - return_ACPI_STATUS (AE_OK); + return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables/tbgetall.c linux.21rc5-ac2/drivers/acpi/tables/tbgetall.c --- linux.21rc5/drivers/acpi/tables/tbgetall.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables/tbgetall.c 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,314 @@ +/****************************************************************************** + * + * Module Name: tbgetall - Get all required ACPI tables + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#include +#include + + +#define _COMPONENT ACPI_TABLES + ACPI_MODULE_NAME ("tbgetall") + + +/******************************************************************************* + * + * FUNCTION: acpi_tb_get_primary_table + * + * PARAMETERS: Address - Physical address of table to retrieve + * *table_info - Where the table info is returned + * + * RETURN: Status + * + * DESCRIPTION: Maps the physical address of table into a logical address + * + ******************************************************************************/ + +acpi_status +acpi_tb_get_primary_table ( + struct acpi_pointer *address, + struct acpi_table_desc *table_info) +{ + acpi_status status; + struct acpi_table_header header; + + + ACPI_FUNCTION_TRACE ("tb_get_primary_table"); + + + /* Ignore a NULL address in the RSDT */ + + if (!address->pointer.value) { + return_ACPI_STATUS (AE_OK); + } + + /* + * Get the header in order to get signature and table size + */ + status = acpi_tb_get_table_header (address, &header); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Clear the table_info */ + + ACPI_MEMSET (table_info, 0, sizeof (struct acpi_table_desc)); + + /* + * Check the table signature and make sure it is recognized. + * Also checks the header checksum + */ + table_info->pointer = &header; + status = acpi_tb_recognize_table (table_info, ACPI_TABLE_PRIMARY); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Get the entire table */ + + status = acpi_tb_get_table_body (address, &header, table_info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Install the table */ + + status = acpi_tb_install_table (table_info); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_tb_get_secondary_table + * + * PARAMETERS: Address - Physical address of table to retrieve + * *table_info - Where the table info is returned + * + * RETURN: Status + * + * DESCRIPTION: Maps the physical address of table into a logical address + * + ******************************************************************************/ + +acpi_status +acpi_tb_get_secondary_table ( + struct acpi_pointer *address, + acpi_string signature, + struct acpi_table_desc *table_info) +{ + acpi_status status; + struct acpi_table_header header; + + + ACPI_FUNCTION_TRACE_STR ("tb_get_secondary_table", signature); + + + /* Get the header in order to match the signature */ + + status = acpi_tb_get_table_header (address, &header); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Signature must match request */ + + if (ACPI_STRNCMP (header.signature, signature, ACPI_NAME_SIZE)) { + ACPI_REPORT_ERROR (("Incorrect table signature - wanted [%s] found [%4.4s]\n", + signature, header.signature)); + return_ACPI_STATUS (AE_BAD_SIGNATURE); + } + + /* + * Check the table signature and make sure it is recognized. + * Also checks the header checksum + */ + table_info->pointer = &header; + status = acpi_tb_recognize_table (table_info, ACPI_TABLE_SECONDARY); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Get the entire table */ + + status = acpi_tb_get_table_body (address, &header, table_info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Install the table */ + + status = acpi_tb_install_table (table_info); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_tb_get_required_tables + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Load and validate tables other than the RSDT. The RSDT must + * already be loaded and validated. + * + * Get the minimum set of ACPI tables, namely: + * + * 1) FADT (via RSDT in loop below) + * 2) FACS (via FADT) + * 3) DSDT (via FADT) + * + ******************************************************************************/ + +acpi_status +acpi_tb_get_required_tables ( + void) +{ + acpi_status status = AE_OK; + u32 i; + struct acpi_table_desc table_info; + struct acpi_pointer address; + + + ACPI_FUNCTION_TRACE ("tb_get_required_tables"); + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%d ACPI tables in RSDT\n", + acpi_gbl_rsdt_table_count)); + + + address.pointer_type = acpi_gbl_table_flags | ACPI_LOGICAL_ADDRESSING; + + /* + * Loop through all table pointers found in RSDT. + * This will NOT include the FACS and DSDT - we must get + * them after the loop. + * + * The only tables we are interested in getting here is the FADT and + * any SSDTs. + */ + for (i = 0; i < acpi_gbl_rsdt_table_count; i++) { + /* Get the table address from the common internal XSDT */ + + address.pointer.value = acpi_gbl_XSDT->table_offset_entry[i]; + + /* + * Get the tables needed by this subsystem (FADT and any SSDTs). + * NOTE: All other tables are completely ignored at this time. + */ + status = acpi_tb_get_primary_table (&address, &table_info); + if ((status != AE_OK) && (status != AE_TABLE_NOT_SUPPORTED)) { + ACPI_REPORT_WARNING (("%s, while getting table at %8.8X%8.8X\n", + acpi_format_exception (status), + ACPI_HIDWORD (address.pointer.value), + ACPI_LODWORD (address.pointer.value))); + } + } + + /* We must have a FADT to continue */ + + if (!acpi_gbl_FADT) { + ACPI_REPORT_ERROR (("No FADT present in RSDT/XSDT\n")); + return_ACPI_STATUS (AE_NO_ACPI_TABLES); + } + + /* + * Convert the FADT to a common format. This allows earlier revisions of the + * table to coexist with newer versions, using common access code. + */ + status = acpi_tb_convert_table_fadt (); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not convert FADT to internal common format\n")); + return_ACPI_STATUS (status); + } + + /* + * Get the FACS (Pointed to by the FADT) + */ + address.pointer.value = acpi_gbl_FADT->xfirmware_ctrl; + + status = acpi_tb_get_secondary_table (&address, FACS_SIG, &table_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not get/install the FACS, %s\n", + acpi_format_exception (status))); + return_ACPI_STATUS (status); + } + + /* + * Create the common FACS pointer table + * (Contains pointers to the original table) + */ + status = acpi_tb_build_common_facs (&table_info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* + * Get/install the DSDT (Pointed to by the FADT) + */ + address.pointer.value = acpi_gbl_FADT->Xdsdt; + + status = acpi_tb_get_secondary_table (&address, DSDT_SIG, &table_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not get/install the DSDT\n")); + return_ACPI_STATUS (status); + } + + /* Set Integer Width (32/64) based upon DSDT revision */ + + acpi_ut_set_integer_width (acpi_gbl_DSDT->revision); + + /* Dump the entire DSDT */ + + ACPI_DEBUG_PRINT ((ACPI_DB_TABLES, + "Hex dump of entire DSDT, size %d (0x%X), Integer width = %d\n", + acpi_gbl_DSDT->length, acpi_gbl_DSDT->length, acpi_gbl_integer_bit_width)); + ACPI_DUMP_BUFFER ((u8 *) acpi_gbl_DSDT, acpi_gbl_DSDT->length); + + /* Always delete the RSDP mapping, we are done with it */ + + acpi_tb_delete_tables_by_type (ACPI_TABLE_RSDP); + return_ACPI_STATUS (status); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables/tbget.c linux.21rc5-ac2/drivers/acpi/tables/tbget.c --- linux.21rc5/drivers/acpi/tables/tbget.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables/tbget.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,754 +1,492 @@ /****************************************************************************** * * Module Name: tbget - ACPI Table get* routines - * $Revision: 56 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "actables.h" +#include +#include #define _COMPONENT ACPI_TABLES - MODULE_NAME ("tbget") - -#define RSDP_CHECKSUM_LENGTH 20 + ACPI_MODULE_NAME ("tbget") /******************************************************************************* * - * FUNCTION: Acpi_tb_get_table_ptr + * FUNCTION: acpi_tb_get_table * - * PARAMETERS: Table_type - one of the defined table types - * Instance - Which table of this type - * Table_ptr_loc - pointer to location to place the pointer for - * return + * PARAMETERS: Address - Address of table to retrieve. Can be + * Logical or Physical + * table_info - Where table info is returned * - * RETURN: Status + * RETURN: None * - * DESCRIPTION: This function is called to get the pointer to an ACPI table. + * DESCRIPTION: Get entire table of unknown size. * ******************************************************************************/ acpi_status -acpi_tb_get_table_ptr ( - acpi_table_type table_type, - u32 instance, - acpi_table_header **table_ptr_loc) +acpi_tb_get_table ( + struct acpi_pointer *address, + struct acpi_table_desc *table_info) { - acpi_table_desc *table_desc; - u32 i; - - - FUNCTION_TRACE ("Tb_get_table_ptr"); - + acpi_status status; + struct acpi_table_header header; - if (!acpi_gbl_DSDT) { - return_ACPI_STATUS (AE_NO_ACPI_TABLES); - } - if (table_type > ACPI_TABLE_MAX) { - return_ACPI_STATUS (AE_BAD_PARAMETER); - } + ACPI_FUNCTION_TRACE ("tb_get_table"); /* - * For all table types (Single/Multiple), the first - * instance is always in the list head. + * Get the header in order to get signature and table size */ - if (instance == 1) { - /* - * Just pluck the pointer out of the global table! - * Will be null if no table is present - */ - *table_ptr_loc = acpi_gbl_acpi_tables[table_type].pointer; - return_ACPI_STATUS (AE_OK); + status = acpi_tb_get_table_header (address, &header); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } + /* Get the entire table */ - /* - * Check for instance out of range - */ - if (instance > acpi_gbl_acpi_tables[table_type].count) { - return_ACPI_STATUS (AE_NOT_EXIST); - } - - /* Walk the list to get the desired table - * Since the if (Instance == 1) check above checked for the - * first table, setting Table_desc equal to the .Next member - * is actually pointing to the second table. Therefore, we - * need to walk from the 2nd table until we reach the Instance - * that the user is looking for and return its table pointer. - */ - table_desc = acpi_gbl_acpi_tables[table_type].next; - for (i = 2; i < instance; i++) { - table_desc = table_desc->next; + status = acpi_tb_get_table_body (address, &header, table_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not get ACPI table (size %X), %s\n", + header.length, acpi_format_exception (status))); + return_ACPI_STATUS (status); } - /* We are now pointing to the requested table's descriptor */ - - *table_ptr_loc = table_desc->pointer; - return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_tb_get_table + * FUNCTION: acpi_tb_get_table_header * - * PARAMETERS: Physical_address - Physical address of table to retrieve - * *Buffer_ptr - If Buffer_ptr is valid, read data from - * buffer rather than searching memory - * *Table_info - Where the table info is returned + * PARAMETERS: Address - Address of table to retrieve. Can be + * Logical or Physical + * return_header - Where the table header is returned * * RETURN: Status * - * DESCRIPTION: Maps the physical address of table into a logical address + * DESCRIPTION: Get an ACPI table header. Works in both physical or virtual + * addressing mode. Works with both physical or logical pointers. + * Table is either copied or mapped, depending on the pointer + * type and mode of the processor. * ******************************************************************************/ acpi_status -acpi_tb_get_table ( - ACPI_PHYSICAL_ADDRESS physical_address, - acpi_table_header *buffer_ptr, - acpi_table_desc *table_info) +acpi_tb_get_table_header ( + struct acpi_pointer *address, + struct acpi_table_header *return_header) { - acpi_table_header *table_header = NULL; - acpi_table_header *full_table = NULL; - u32 size; - u8 allocation; - acpi_status status = AE_OK; - - - FUNCTION_TRACE ("Tb_get_table"); - - - if (!table_info) { - return_ACPI_STATUS (AE_BAD_PARAMETER); - } - - - if (buffer_ptr) { - /* - * Getting data from a buffer, not BIOS tables - */ - table_header = buffer_ptr; - status = acpi_tb_validate_table_header (table_header); - if (ACPI_FAILURE (status)) { - /* Table failed verification, map all errors to BAD_DATA */ + acpi_status status = AE_OK; + struct acpi_table_header *header = NULL; - return_ACPI_STATUS (AE_BAD_DATA); - } - /* Allocate buffer for the entire table */ + ACPI_FUNCTION_TRACE ("tb_get_table_header"); - full_table = ACPI_MEM_ALLOCATE (table_header->length); - if (!full_table) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - /* Copy the entire table (including header) to the local buffer */ + /* + * Flags contains the current processor mode (Virtual or Physical addressing) + * The pointer_type is either Logical or Physical + */ + switch (address->pointer_type) { + case ACPI_PHYSMODE_PHYSPTR: + case ACPI_LOGMODE_LOGPTR: - size = table_header->length; - MEMCPY (full_table, buffer_ptr, size); + /* Pointer matches processor mode, copy the header */ - /* Save allocation type */ + ACPI_MEMCPY (return_header, address->pointer.logical, sizeof (struct acpi_table_header)); + break; - allocation = ACPI_MEM_ALLOCATED; - } + case ACPI_LOGMODE_PHYSPTR: - /* - * Not reading from a buffer, just map the table's physical memory - * into our address space. - */ - else { - size = SIZE_IN_HEADER; + /* Create a logical address for the physical pointer*/ - status = acpi_tb_map_acpi_table (physical_address, &size, &full_table); + status = acpi_os_map_memory (address->pointer.physical, sizeof (struct acpi_table_header), + (void **) &header); if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not map memory at %8.8X%8.8X for length %X\n", + ACPI_HIDWORD (address->pointer.physical), + ACPI_LODWORD (address->pointer.physical), + sizeof (struct acpi_table_header))); return_ACPI_STATUS (status); } - /* Save allocation type */ + /* Copy header and delete mapping */ - allocation = ACPI_MEM_MAPPED; - } + ACPI_MEMCPY (return_header, header, sizeof (struct acpi_table_header)); + acpi_os_unmap_memory (header, sizeof (struct acpi_table_header)); + break; - /* Return values */ + default: - table_info->pointer = full_table; - table_info->length = size; - table_info->allocation = allocation; - table_info->base_pointer = full_table; + ACPI_REPORT_ERROR (("Invalid address flags %X\n", + address->pointer_type)); + return_ACPI_STATUS (AE_BAD_PARAMETER); + } - return_ACPI_STATUS (status); + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_tb_get_all_tables + * FUNCTION: acpi_tb_get_table_body * - * PARAMETERS: Number_of_tables - Number of tables to get - * Table_ptr - Input buffer pointer, optional + * PARAMETERS: Address - Address of table to retrieve. Can be + * Logical or Physical + * Header - Header of the table to retrieve + * table_info - Where the table info is returned * * RETURN: Status * - * DESCRIPTION: Load and validate all tables other than the RSDT. The RSDT must - * already be loaded and validated. + * DESCRIPTION: Get an entire ACPI table with support to allow the host OS to + * replace the table with a newer version (table override.) + * Works in both physical or virtual + * addressing mode. Works with both physical or logical pointers. + * Table is either copied or mapped, depending on the pointer + * type and mode of the processor. * ******************************************************************************/ acpi_status -acpi_tb_get_all_tables ( - u32 number_of_tables, - acpi_table_header *table_ptr) +acpi_tb_get_table_body ( + struct acpi_pointer *address, + struct acpi_table_header *header, + struct acpi_table_desc *table_info) { - acpi_status status = AE_OK; - u32 index; - acpi_table_desc table_info; - - - FUNCTION_TRACE ("Tb_get_all_tables"); + acpi_status status; - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Number of tables: %d\n", number_of_tables)); + ACPI_FUNCTION_TRACE ("tb_get_table_body"); - /* - * Loop through all table pointers found in RSDT. - * This will NOT include the FACS and DSDT - we must get - * them after the loop - */ - for (index = 0; index < number_of_tables; index++) { - /* Clear the Table_info each time */ - - MEMSET (&table_info, 0, sizeof (acpi_table_desc)); - - /* Get the table via the XSDT */ - - status = acpi_tb_get_table ((ACPI_PHYSICAL_ADDRESS) - ACPI_GET_ADDRESS (acpi_gbl_XSDT->table_offset_entry[index]), - table_ptr, &table_info); - - /* Ignore a table that failed verification */ - - if (status == AE_BAD_DATA) { - continue; - } - - /* However, abort on serious errors */ - - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - /* Recognize and install the table */ - - status = acpi_tb_install_table (table_ptr, &table_info); - if (ACPI_FAILURE (status)) { - /* - * Unrecognized or unsupported table, delete it and ignore the - * error. Just get as many tables as we can, later we will - * determine if there are enough tables to continue. - */ - acpi_tb_uninstall_table (&table_info); - } - } - - - /* - * Convert the FADT to a common format. This allows earlier revisions of the - * table to coexist with newer versions, using common access code. - */ - status = acpi_tb_convert_table_fadt (); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - - /* - * Get the minimum set of ACPI tables, namely: - * - * 1) FADT (via RSDT in loop above) - * 2) FACS - * 3) DSDT - * - */ - - /* - * Get the FACS (must have the FADT first, from loop above) - * Acpi_tb_get_table_facs will fail if FADT pointer is not valid - */ - status = acpi_tb_get_table_facs (table_ptr, &table_info); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - /* Install the FACS */ - - status = acpi_tb_install_table (table_ptr, &table_info); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - /* - * Create the common FACS pointer table - * (Contains pointers to the original table) - */ - status = acpi_tb_build_common_facs (&table_info); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + if (!table_info || !address) { + return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* - * Get the DSDT (We know that the FADT is valid now) + * Attempt table override. */ - status = acpi_tb_get_table ((ACPI_PHYSICAL_ADDRESS) ACPI_GET_ADDRESS (acpi_gbl_FADT->Xdsdt), - table_ptr, &table_info); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - /* Install the DSDT */ + status = acpi_tb_table_override (header, table_info); + if (ACPI_SUCCESS (status)) { + /* Table was overridden by the host OS */ - status = acpi_tb_install_table (table_ptr, &table_info); - if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } - /* Dump the DSDT Header */ - - ACPI_DEBUG_PRINT ((ACPI_DB_TABLES, "Hex dump of DSDT Header:\n")); - DUMP_BUFFER ((u8 *) acpi_gbl_DSDT, sizeof (acpi_table_header)); - - /* Dump the entire DSDT */ - - ACPI_DEBUG_PRINT ((ACPI_DB_TABLES, - "Hex dump of DSDT (After header), size %d (%x)\n", - acpi_gbl_DSDT->length, acpi_gbl_DSDT->length)); - DUMP_BUFFER ((u8 *) (acpi_gbl_DSDT + 1), acpi_gbl_DSDT->length); - - /* - * Initialize the capabilities flags. - * Assumes that platform supports ACPI_MODE since we have tables! - */ - acpi_gbl_system_flags |= acpi_hw_get_mode_capabilities (); - - - /* Always delete the RSDP mapping, we are done with it */ - - acpi_tb_delete_acpi_table (ACPI_TABLE_RSDP); + /* No override, get the original table */ + status = acpi_tb_get_this_table (address, header, table_info); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_tb_verify_rsdp + * FUNCTION: acpi_tb_table_override * - * PARAMETERS: Number_of_tables - Where the table count is placed + * PARAMETERS: Header - Pointer to table header + * table_info - Return info if table is overridden * - * RETURN: Status + * RETURN: None * - * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table) + * DESCRIPTION: Attempts override of current table with a new one if provided + * by the host OS. * ******************************************************************************/ acpi_status -acpi_tb_verify_rsdp ( - ACPI_PHYSICAL_ADDRESS rsdp_physical_address) +acpi_tb_table_override ( + struct acpi_table_header *header, + struct acpi_table_desc *table_info) { - acpi_table_desc table_info; - acpi_status status; - u8 *table_ptr; + struct acpi_table_header *new_table; + acpi_status status; + struct acpi_pointer address; - FUNCTION_TRACE ("Tb_verify_rsdp"); + ACPI_FUNCTION_TRACE ("tb_table_override"); /* - * Obtain access to the RSDP structure + * The OSL will examine the header and decide whether to override this + * table. If it decides to override, a table will be returned in new_table, + * which we will then copy. */ - status = acpi_os_map_memory (rsdp_physical_address, sizeof (RSDP_DESCRIPTOR), - (void **) &table_ptr); + status = acpi_os_table_override (header, &new_table); if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - /* - * The signature and checksum must both be correct - */ - if (STRNCMP ((NATIVE_CHAR *) table_ptr, RSDP_SIG, sizeof (RSDP_SIG)-1) != 0) { - /* Nope, BAD Signature */ + /* Some severe error from the OSL, but we basically ignore it */ - status = AE_BAD_SIGNATURE; - goto cleanup; + ACPI_REPORT_ERROR (("Could not override ACPI table, %s\n", + acpi_format_exception (status))); + return_ACPI_STATUS (status); } - if (acpi_tb_checksum (table_ptr, RSDP_CHECKSUM_LENGTH) != 0) { - /* Nope, BAD Checksum */ + if (!new_table) { + /* No table override */ - status = AE_BAD_CHECKSUM; - goto cleanup; + return_ACPI_STATUS (AE_NO_ACPI_TABLES); } - /* TBD: Check extended checksum if table version >= 2 */ - - /* The RSDP supplied is OK */ - - table_info.pointer = (acpi_table_header *) table_ptr; - table_info.length = sizeof (RSDP_DESCRIPTOR); - table_info.allocation = ACPI_MEM_MAPPED; - table_info.base_pointer = table_ptr; - - /* Save the table pointers and allocation info */ + /* + * We have a new table to override the old one. Get a copy of + * the new one. We know that the new table has a logical pointer. + */ + address.pointer_type = ACPI_LOGICAL_POINTER | ACPI_LOGICAL_ADDRESSING; + address.pointer.logical = new_table; - status = acpi_tb_init_table_descriptor (ACPI_TABLE_RSDP, &table_info); + status = acpi_tb_get_this_table (&address, new_table, table_info); if (ACPI_FAILURE (status)) { - goto cleanup; + ACPI_REPORT_ERROR (("Could not copy override ACPI table, %s\n", + acpi_format_exception (status))); + return_ACPI_STATUS (status); } + /* Copy the table info */ - /* Save the RSDP in a global for easy access */ - - acpi_gbl_RSDP = (RSDP_DESCRIPTOR *) table_info.pointer; - return_ACPI_STATUS (status); - - - /* Error exit */ -cleanup: + ACPI_REPORT_INFO (("Table [%4.4s] replaced by host OS\n", + table_info->pointer->signature)); - acpi_os_unmap_memory (table_ptr, sizeof (RSDP_DESCRIPTOR)); - return_ACPI_STATUS (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_tb_get_rsdt_address - * - * PARAMETERS: None - * - * RETURN: RSDT physical address - * - * DESCRIPTION: Extract the address of the RSDT or XSDT, depending on the - * version of the RSDP - * - ******************************************************************************/ - -ACPI_PHYSICAL_ADDRESS -acpi_tb_get_rsdt_address (void) -{ - ACPI_PHYSICAL_ADDRESS physical_address; - - - FUNCTION_ENTRY (); - - - /* - * For RSDP revision 0 or 1, we use the RSDT. - * For RSDP revision 2 (and above), we use the XSDT - */ - if (acpi_gbl_RSDP->revision < 2) { -#ifdef _IA64 - /* 0.71 RSDP has 64bit Rsdt address field */ - physical_address = ((RSDP_DESCRIPTOR_REV071 *)acpi_gbl_RSDP)->rsdt_physical_address; -#else - physical_address = (ACPI_PHYSICAL_ADDRESS) acpi_gbl_RSDP->rsdt_physical_address; -#endif - } - - else { - physical_address = (ACPI_PHYSICAL_ADDRESS) - ACPI_GET_ADDRESS (acpi_gbl_RSDP->xsdt_physical_address); - } - - return (physical_address); + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_tb_validate_rsdt + * FUNCTION: acpi_tb_get_this_table * - * PARAMETERS: Table_ptr - Addressable pointer to the RSDT. + * PARAMETERS: Address - Address of table to retrieve. Can be + * Logical or Physical + * Header - Header of the table to retrieve + * table_info - Where the table info is returned * * RETURN: Status * - * DESCRIPTION: Validate signature for the RSDT or XSDT + * DESCRIPTION: Get an entire ACPI table. Works in both physical or virtual + * addressing mode. Works with both physical or logical pointers. + * Table is either copied or mapped, depending on the pointer + * type and mode of the processor. * ******************************************************************************/ acpi_status -acpi_tb_validate_rsdt ( - acpi_table_header *table_ptr) +acpi_tb_get_this_table ( + struct acpi_pointer *address, + struct acpi_table_header *header, + struct acpi_table_desc *table_info) { - u32 no_match; + struct acpi_table_header *full_table = NULL; + u8 allocation; + acpi_status status = AE_OK; - PROC_NAME ("Tb_validate_rsdt"); + ACPI_FUNCTION_TRACE ("tb_get_this_table"); /* - * For RSDP revision 0 or 1, we use the RSDT. - * For RSDP revision 2 (and above), we use the XSDT + * Flags contains the current processor mode (Virtual or Physical addressing) + * The pointer_type is either Logical or Physical */ - if (acpi_gbl_RSDP->revision < 2) { - no_match = STRNCMP ((char *) table_ptr, RSDT_SIG, - sizeof (RSDT_SIG) -1); - } - else { - no_match = STRNCMP ((char *) table_ptr, XSDT_SIG, - sizeof (XSDT_SIG) -1); - } - - - if (no_match) { - /* Invalid RSDT or XSDT signature */ - - REPORT_ERROR (("Invalid signature where RSDP indicates RSDT/XSDT should be located\n")); - - DUMP_BUFFER (acpi_gbl_RSDP, 20); - - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_ERROR, - "RSDT/XSDT signature at %X is invalid\n", - acpi_gbl_RSDP->rsdt_physical_address)); - - return (AE_BAD_SIGNATURE); - } - - return (AE_OK); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_tb_get_table_pointer - * - * PARAMETERS: Physical_address - Address from RSDT - * Flags - virtual or physical addressing - * Table_ptr - Addressable address (output) - * - * RETURN: Status - * - * DESCRIPTION: Create an addressable pointer to an ACPI table - * - ******************************************************************************/ - -acpi_status -acpi_tb_get_table_pointer ( - ACPI_PHYSICAL_ADDRESS physical_address, - u32 flags, - u32 *size, - acpi_table_header **table_ptr) -{ - acpi_status status; - - - FUNCTION_ENTRY (); - - - if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) { - *size = SIZE_IN_HEADER; - status = acpi_tb_map_acpi_table (physical_address, size, table_ptr); - } - - else { - *size = 0; - *table_ptr = (acpi_table_header *) (ACPI_TBLPTR) physical_address; - - status = AE_OK; - } - - return (status); -} - + switch (address->pointer_type) { + case ACPI_PHYSMODE_PHYSPTR: + case ACPI_LOGMODE_LOGPTR: -/******************************************************************************* - * - * FUNCTION: Acpi_tb_get_table_rsdt - * - * PARAMETERS: Number_of_tables - Where the table count is placed - * - * RETURN: Status - * - * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table) - * - ******************************************************************************/ + /* Pointer matches processor mode, copy the table to a new buffer */ -acpi_status -acpi_tb_get_table_rsdt ( - u32 *number_of_tables) -{ - acpi_table_desc table_info; - acpi_status status; - ACPI_PHYSICAL_ADDRESS physical_address; + full_table = ACPI_MEM_ALLOCATE (header->length); + if (!full_table) { + ACPI_REPORT_ERROR (("Could not allocate table memory for [%4.4s] length %X\n", + header->signature, header->length)); + return_ACPI_STATUS (AE_NO_MEMORY); + } + /* Copy the entire table (including header) to the local buffer */ - FUNCTION_TRACE ("Tb_get_table_rsdt"); + ACPI_MEMCPY (full_table, address->pointer.logical, header->length); + /* Save allocation type */ - /* - * Get the RSDT from the RSDP - */ - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "RSDP located at %p, RSDT physical=%8.8X%8.8X \n", - acpi_gbl_RSDP, HIDWORD(acpi_gbl_RSDP->rsdt_physical_address), - LODWORD(acpi_gbl_RSDP->rsdt_physical_address))); + allocation = ACPI_MEM_ALLOCATED; + break; - physical_address = acpi_tb_get_rsdt_address (); + case ACPI_LOGMODE_PHYSPTR: + /* + * Just map the table's physical memory + * into our address space. + */ + status = acpi_os_map_memory (address->pointer.physical, (acpi_size) header->length, + (void **) &full_table); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not map memory for table [%4.4s] at %8.8X%8.8X for length %X\n", + header->signature, + ACPI_HIDWORD (address->pointer.physical), + ACPI_LODWORD (address->pointer.physical), header->length)); + return (status); + } - /* Get the RSDT/XSDT */ + /* Save allocation type */ - status = acpi_tb_get_table (physical_address, NULL, &table_info); - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not get the RSDT, %s\n", - acpi_format_exception (status))); - return_ACPI_STATUS (status); - } + allocation = ACPI_MEM_MAPPED; + break; - /* Check the RSDT or XSDT signature */ + default: - status = acpi_tb_validate_rsdt (table_info.pointer); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid address flags %X\n", + address->pointer_type)); + return_ACPI_STATUS (AE_BAD_PARAMETER); } - /* - * Valid RSDT signature, verify the checksum. If it fails, just - * print a warning and ignore it. + * Validate checksum for _most_ tables, + * even the ones whose signature we don't recognize */ - status = acpi_tb_verify_table_checksum (table_info.pointer); + if (table_info->type != ACPI_TABLE_FACS) { + status = acpi_tb_verify_table_checksum (full_table); +#if (!ACPI_CHECKSUM_ABORT) + if (ACPI_FAILURE (status)) { + /* Ignore the error if configuration says so */ - /* Convert and/or copy to an XSDT structure */ - - status = acpi_tb_convert_to_xsdt (&table_info, number_of_tables); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + status = AE_OK; + } +#endif } - /* Save the table pointers and allocation info */ - - status = acpi_tb_init_table_descriptor (ACPI_TABLE_XSDT, &table_info); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } + /* Return values */ - acpi_gbl_XSDT = (xsdt_descriptor *) table_info.pointer; + table_info->pointer = full_table; + table_info->length = (acpi_size) header->length; + table_info->allocation = allocation; - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "XSDT located at %p\n", acpi_gbl_XSDT)); + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "Found table [%4.4s] at %8.8X%8.8X, mapped/copied to %p\n", + full_table->signature, + ACPI_HIDWORD (address->pointer.physical), + ACPI_LODWORD (address->pointer.physical), full_table)); return_ACPI_STATUS (status); } -/****************************************************************************** +/******************************************************************************* * - * FUNCTION: Acpi_tb_get_table_facs + * FUNCTION: acpi_tb_get_table_ptr * - * PARAMETERS: *Buffer_ptr - If Buffer_ptr is valid, read data from - * buffer rather than searching memory - * *Table_info - Where the table info is returned + * PARAMETERS: table_type - one of the defined table types + * Instance - Which table of this type + * table_ptr_loc - pointer to location to place the pointer for + * return * * RETURN: Status * - * DESCRIPTION: Returns a pointer to the FACS as defined in FADT. This - * function assumes the global variable FADT has been - * correctly initialized. The value of FADT->Firmware_ctrl - * into a far pointer which is returned. + * DESCRIPTION: This function is called to get the pointer to an ACPI table. * - *****************************************************************************/ + ******************************************************************************/ acpi_status -acpi_tb_get_table_facs ( - acpi_table_header *buffer_ptr, - acpi_table_desc *table_info) +acpi_tb_get_table_ptr ( + acpi_table_type table_type, + u32 instance, + struct acpi_table_header **table_ptr_loc) { - acpi_table_header *table_ptr = NULL; - u32 size; - u8 allocation; - acpi_status status = AE_OK; + struct acpi_table_desc *table_desc; + u32 i; - FUNCTION_TRACE ("Tb_get_table_facs"); + ACPI_FUNCTION_TRACE ("tb_get_table_ptr"); - /* Must have a valid FADT pointer */ - - if (!acpi_gbl_FADT) { + if (!acpi_gbl_DSDT) { return_ACPI_STATUS (AE_NO_ACPI_TABLES); } - size = sizeof (FACS_DESCRIPTOR); - if (buffer_ptr) { - /* - * Getting table from a file -- allocate a buffer and - * read the table. - */ - table_ptr = ACPI_MEM_ALLOCATE (size); - if(!table_ptr) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - MEMCPY (table_ptr, buffer_ptr, size); - - /* Save allocation type */ - - allocation = ACPI_MEM_ALLOCATED; + if (table_type > ACPI_TABLE_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); } - else { - /* Just map the physical memory to our address space */ + /* + * For all table types (Single/Multiple), the first + * instance is always in the list head. + */ + if (instance == 1) { + /* Get the first */ - status = acpi_tb_map_acpi_table ((ACPI_PHYSICAL_ADDRESS) ACPI_GET_ADDRESS (acpi_gbl_FADT->Xfirmware_ctrl), - &size, &table_ptr); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + if (acpi_gbl_table_lists[table_type].next) { + *table_ptr_loc = acpi_gbl_table_lists[table_type].next->pointer; } + return_ACPI_STATUS (AE_OK); + } - /* Save allocation type */ - - allocation = ACPI_MEM_MAPPED; + /* + * Check for instance out of range + */ + if (instance > acpi_gbl_table_lists[table_type].count) { + return_ACPI_STATUS (AE_NOT_EXIST); } + /* Walk the list to get the desired table + * Since the if (Instance == 1) check above checked for the + * first table, setting table_desc equal to the .Next member + * is actually pointing to the second table. Therefore, we + * need to walk from the 2nd table until we reach the Instance + * that the user is looking for and return its table pointer. + */ + table_desc = acpi_gbl_table_lists[table_type].next; + for (i = 2; i < instance; i++) { + table_desc = table_desc->next; + } - /* Return values */ + /* We are now pointing to the requested table's descriptor */ - table_info->pointer = table_ptr; - table_info->length = size; - table_info->allocation = allocation; - table_info->base_pointer = table_ptr; + *table_ptr_loc = table_desc->pointer; - return_ACPI_STATUS (status); + return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables/tbinstal.c linux.21rc5-ac2/drivers/acpi/tables/tbinstal.c --- linux.21rc5/drivers/acpi/tables/tbinstal.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables/tbinstal.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,44 +1,118 @@ /****************************************************************************** * * Module Name: tbinstal - ACPI table installation and removal - * $Revision: 45 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "actables.h" +#include +#include #define _COMPONENT ACPI_TABLES - MODULE_NAME ("tbinstal") + ACPI_MODULE_NAME ("tbinstal") /******************************************************************************* * - * FUNCTION: Acpi_tb_install_table + * FUNCTION: acpi_tb_match_signature * - * PARAMETERS: Table_ptr - Input buffer pointer, optional - * Table_info - Return value from Acpi_tb_get_table + * PARAMETERS: Signature - Table signature to match + * table_info - Return data + * + * RETURN: Status + * + * DESCRIPTION: Compare signature against the list of "ACPI-subsystem-owned" + * tables (DSDT/FADT/SSDT, etc.) Returns the table_type_iD on match. + * + ******************************************************************************/ + +acpi_status +acpi_tb_match_signature ( + char *signature, + struct acpi_table_desc *table_info, + u8 search_type) +{ + acpi_native_uint i; + + + ACPI_FUNCTION_TRACE ("tb_match_signature"); + + + /* + * Search for a signature match among the known table types + */ + for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) { + if (!(acpi_gbl_table_data[i].flags & search_type)) { + continue; + } + + if (!ACPI_STRNCMP (signature, acpi_gbl_table_data[i].signature, + acpi_gbl_table_data[i].sig_length)) { + /* Found a signature match, return index if requested */ + + if (table_info) { + table_info->type = (u8) i; + } + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "Table [%4.4s] is an ACPI table consumed by the core subsystem\n", + (char *) acpi_gbl_table_data[i].signature)); + + return_ACPI_STATUS (AE_OK); + } + } + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "Table [%4.4s] is not an ACPI table consumed by the core subsystem - ignored\n", + (char *) signature)); + + return_ACPI_STATUS (AE_TABLE_NOT_SUPPORTED); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_tb_install_table + * + * PARAMETERS: table_info - Return value from acpi_tb_get_table_body * * RETURN: Status * @@ -50,45 +124,43 @@ acpi_status acpi_tb_install_table ( - acpi_table_header *table_ptr, - acpi_table_desc *table_info) + struct acpi_table_desc *table_info) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Tb_install_table"); + ACPI_FUNCTION_TRACE ("tb_install_table"); - /* - * Check the table signature and make sure it is recognized - * Also checks the header checksum - */ - status = acpi_tb_recognize_table (table_ptr, table_info); + /* Lock tables while installing */ + + status = acpi_ut_acquire_mutex (ACPI_MTX_TABLES); if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not acquire table mutex for [%4.4s], %s\n", + table_info->pointer->signature, acpi_format_exception (status))); return_ACPI_STATUS (status); } - /* Lock tables while installing */ - - acpi_ut_acquire_mutex (ACPI_MTX_TABLES); - /* Install the table into the global data structure */ status = acpi_tb_init_table_descriptor (table_info->type, table_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not install ACPI table [%4.4s], %s\n", + table_info->pointer->signature, acpi_format_exception (status))); + } ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%s located at %p\n", - acpi_gbl_acpi_table_data[table_info->type].name, table_info->pointer)); + acpi_gbl_table_data[table_info->type].name, table_info->pointer)); - acpi_ut_release_mutex (ACPI_MTX_TABLES); + (void) acpi_ut_release_mutex (ACPI_MTX_TABLES); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_tb_recognize_table + * FUNCTION: acpi_tb_recognize_table * - * PARAMETERS: Table_ptr - Input buffer pointer, optional - * Table_info - Return value from Acpi_tb_get_table + * PARAMETERS: table_info - Return value from acpi_tb_get_table_body * * RETURN: Status * @@ -106,73 +178,44 @@ acpi_status acpi_tb_recognize_table ( - acpi_table_header *table_ptr, - acpi_table_desc *table_info) + struct acpi_table_desc *table_info, + u8 search_type) { - acpi_table_header *table_header; - acpi_status status; - acpi_table_type table_type = 0; - u32 i; + struct acpi_table_header *table_header; + acpi_status status; - FUNCTION_TRACE ("Tb_recognize_table"); + ACPI_FUNCTION_TRACE ("tb_recognize_table"); /* Ensure that we have a valid table pointer */ - table_header = (acpi_table_header *) table_info->pointer; + table_header = (struct acpi_table_header *) table_info->pointer; if (!table_header) { return_ACPI_STATUS (AE_BAD_PARAMETER); } /* - * Search for a signature match among the known table types - * Start at index one -> Skip the RSDP + * We only "recognize" a limited number of ACPI tables -- namely, the + * ones that are used by the subsystem (DSDT, FADT, etc.) + * + * An AE_TABLE_NOT_SUPPORTED means that the table was not recognized. + * This can be any one of many valid ACPI tables, it just isn't one of + * the tables that is consumed by the core subsystem */ - status = AE_SUPPORT; - for (i = 1; i < NUM_ACPI_TABLES; i++) { - if (!STRNCMP (table_header->signature, - acpi_gbl_acpi_table_data[i].signature, - acpi_gbl_acpi_table_data[i].sig_length)) { - /* - * Found a signature match, get the pertinent info from the - * Table_data structure - */ - table_type = i; - status = acpi_gbl_acpi_table_data[i].status; - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Found %4.4s\n", - (char*)acpi_gbl_acpi_table_data[i].signature)); - break; - } + status = acpi_tb_match_signature (table_header->signature, table_info, search_type); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - /* Return the table type and length via the info struct */ - - table_info->type = (u8) table_type; - table_info->length = table_header->length; - - - /* - * Validate checksum for _most_ tables, - * even the ones whose signature we don't recognize - */ - if (table_type != ACPI_TABLE_FACS) { - /* But don't abort if the checksum is wrong */ - /* TBD: [Future] make this a configuration option? */ - - acpi_tb_verify_table_checksum (table_header); + status = acpi_tb_validate_table_header (table_header); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - /* - * An AE_SUPPORT means that the table was not recognized. - * We basically ignore this; just print a debug message - */ - if (status == AE_SUPPORT) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Unsupported table %s (Type %X) was found and discarded\n", - acpi_gbl_acpi_table_data[table_type].name, table_type)); - } + /* Return the table type and length via the info struct */ + + table_info->length = (acpi_size) table_header->length; return_ACPI_STATUS (status); } @@ -180,10 +223,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_tb_init_table_descriptor + * FUNCTION: acpi_tb_init_table_descriptor * - * PARAMETERS: Table_type - The type of the table - * Table_info - A table info struct + * PARAMETERS: table_type - The type of the table + * table_info - A table info struct * * RETURN: None. * @@ -193,94 +236,78 @@ acpi_status acpi_tb_init_table_descriptor ( - acpi_table_type table_type, - acpi_table_desc *table_info) + acpi_table_type table_type, + struct acpi_table_desc *table_info) { - acpi_table_desc *list_head; - acpi_table_desc *table_desc; + struct acpi_table_list *list_head; + struct acpi_table_desc *table_desc; + + ACPI_FUNCTION_TRACE_U32 ("tb_init_table_descriptor", table_type); - FUNCTION_TRACE_U32 ("Tb_init_table_descriptor", table_type); + + /* Allocate a descriptor for this table */ + + table_desc = ACPI_MEM_CALLOCATE (sizeof (struct acpi_table_desc)); + if (!table_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } /* * Install the table into the global data structure */ - list_head = &acpi_gbl_acpi_tables[table_type]; - table_desc = list_head; - + list_head = &acpi_gbl_table_lists[table_type]; /* * Two major types of tables: 1) Only one instance is allowed. This * includes most ACPI tables such as the DSDT. 2) Multiple instances of * the table are allowed. This includes SSDT and PSDTs. */ - if (IS_SINGLE_TABLE (acpi_gbl_acpi_table_data[table_type].flags)) { + if (ACPI_IS_SINGLE_TABLE (acpi_gbl_table_data[table_type].flags)) { /* * Only one table allowed, and a table has alread been installed * at this location, so return an error. */ - if (list_head->pointer) { - return_ACPI_STATUS (AE_EXIST); + if (list_head->next) { + return_ACPI_STATUS (AE_ALREADY_EXISTS); } - - table_desc->count = 1; } + /* + * Link the new table in to the list of tables of this type. + * Just insert at the start of the list, order unimportant. + * + * table_desc->Prev is already NULL from calloc() + */ + table_desc->next = list_head->next; + list_head->next = table_desc; - else { - /* - * Multiple tables allowed for this table type, we must link - * the new table in to the list of tables of this type. - */ - if (list_head->pointer) { - table_desc = ACPI_MEM_CALLOCATE (sizeof (acpi_table_desc)); - if (!table_desc) { - return_ACPI_STATUS (AE_NO_MEMORY); - } - - list_head->count++; - - /* Update the original previous */ - - list_head->prev->next = table_desc; - - /* Update new entry */ - - table_desc->prev = list_head->prev; - table_desc->next = list_head; - - /* Update list head */ - - list_head->prev = table_desc; - } - - else { - table_desc->count = 1; - } + if (table_desc->next) { + table_desc->next->prev = table_desc; } + list_head->count++; - /* Common initialization of the table descriptor */ + /* Finish initialization of the table descriptor */ + table_desc->type = (u8) table_type; table_desc->pointer = table_info->pointer; - table_desc->base_pointer = table_info->base_pointer; table_desc->length = table_info->length; table_desc->allocation = table_info->allocation; table_desc->aml_start = (u8 *) (table_desc->pointer + 1), table_desc->aml_length = (u32) (table_desc->length - - (u32) sizeof (acpi_table_header)); - table_desc->table_id = acpi_ut_allocate_owner_id (OWNER_TYPE_TABLE); + (u32) sizeof (struct acpi_table_header)); + table_desc->table_id = acpi_ut_allocate_owner_id (ACPI_OWNER_TYPE_TABLE); table_desc->loaded_into_namespace = FALSE; /* * Set the appropriate global pointer (if there is one) to point to the * newly installed table */ - if (acpi_gbl_acpi_table_data[table_type].global_ptr) { - *(acpi_gbl_acpi_table_data[table_type].global_ptr) = table_info->pointer; + if (acpi_gbl_table_data[table_type].global_ptr) { + *(acpi_gbl_table_data[table_type].global_ptr) = table_info->pointer; } - /* Return Data */ table_info->table_id = table_desc->table_id; @@ -292,7 +319,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_tb_delete_acpi_tables + * FUNCTION: acpi_tb_delete_all_tables * * PARAMETERS: None. * @@ -303,25 +330,24 @@ ******************************************************************************/ void -acpi_tb_delete_acpi_tables (void) +acpi_tb_delete_all_tables (void) { - acpi_table_type type; + acpi_table_type type; /* * Free memory allocated for ACPI tables * Memory can either be mapped or allocated */ - for (type = 0; type < NUM_ACPI_TABLES; type++) { - acpi_tb_delete_acpi_table (type); + for (type = 0; type < NUM_ACPI_TABLE_TYPES; type++) { + acpi_tb_delete_tables_by_type (type); } - } /******************************************************************************* * - * FUNCTION: Acpi_tb_delete_acpi_table + * FUNCTION: acpi_tb_delete_tables_by_type * * PARAMETERS: Type - The table type to be deleted * @@ -333,23 +359,24 @@ ******************************************************************************/ void -acpi_tb_delete_acpi_table ( - acpi_table_type type) +acpi_tb_delete_tables_by_type ( + acpi_table_type type) { - FUNCTION_TRACE_U32 ("Tb_delete_acpi_table", type); + struct acpi_table_desc *table_desc; + u32 count; + u32 i; + + + ACPI_FUNCTION_TRACE_U32 ("tb_delete_tables_by_type", type); if (type > ACPI_TABLE_MAX) { return_VOID; } - - acpi_ut_acquire_mutex (ACPI_MTX_TABLES); - - /* Free the table */ - - acpi_tb_free_acpi_tables_of_type (&acpi_gbl_acpi_tables[type]); - + if (ACPI_FAILURE (acpi_ut_acquire_mutex (ACPI_MTX_TABLES))) { + return; + } /* Clear the appropriate "typed" global table pointer */ @@ -380,41 +407,11 @@ break; } - acpi_ut_release_mutex (ACPI_MTX_TABLES); - - return_VOID; -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_tb_free_acpi_tables_of_type - * - * PARAMETERS: Table_info - A table info struct - * - * RETURN: None. - * - * DESCRIPTION: Free the memory associated with an internal ACPI table - * Table mutex should be locked. - * - ******************************************************************************/ - -void -acpi_tb_free_acpi_tables_of_type ( - acpi_table_desc *list_head) -{ - acpi_table_desc *table_desc; - u32 count; - u32 i; - - - FUNCTION_TRACE_PTR ("Tb_free_acpi_tables_of_type", list_head); - - + /* Free the table */ /* Get the head of the list */ - table_desc = list_head; - count = list_head->count; + table_desc = acpi_gbl_table_lists[type].next; + count = acpi_gbl_table_lists[type].count; /* * Walk the entire list, deleting both the allocated tables @@ -424,15 +421,16 @@ table_desc = acpi_tb_uninstall_table (table_desc); } + (void) acpi_ut_release_mutex (ACPI_MTX_TABLES); return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_tb_delete_single_table + * FUNCTION: acpi_tb_delete_single_table * - * PARAMETERS: Table_info - A table info struct + * PARAMETERS: table_info - A table info struct * * RETURN: None. * @@ -443,44 +441,45 @@ void acpi_tb_delete_single_table ( - acpi_table_desc *table_desc) + struct acpi_table_desc *table_desc) { - if (!table_desc) { + /* Must have a valid table descriptor and pointer */ + + if ((!table_desc) || + (!table_desc->pointer)) { return; } - if (table_desc->pointer) { - /* Valid table, determine type of memory allocation */ - - switch (table_desc->allocation) { - - case ACPI_MEM_NOT_ALLOCATED: - break; + /* Valid table, determine type of memory allocation */ + switch (table_desc->allocation) { + case ACPI_MEM_NOT_ALLOCATED: + break; - case ACPI_MEM_ALLOCATED: + case ACPI_MEM_ALLOCATED: - ACPI_MEM_FREE (table_desc->base_pointer); - break; + ACPI_MEM_FREE (table_desc->pointer); + break; + case ACPI_MEM_MAPPED: - case ACPI_MEM_MAPPED: + acpi_os_unmap_memory (table_desc->pointer, table_desc->length); + break; - acpi_os_unmap_memory (table_desc->base_pointer, table_desc->length); - break; - } + default: + break; } } /******************************************************************************* * - * FUNCTION: Acpi_tb_uninstall_table + * FUNCTION: acpi_tb_uninstall_table * - * PARAMETERS: Table_info - A table info struct + * PARAMETERS: table_info - A table info struct * - * RETURN: None. + * RETURN: Pointer to the next table in the list (of same type) * * DESCRIPTION: Free the memory associated with an internal ACPI table that * is either installed or has never been installed. @@ -488,58 +487,45 @@ * ******************************************************************************/ -acpi_table_desc * +struct acpi_table_desc * acpi_tb_uninstall_table ( - acpi_table_desc *table_desc) + struct acpi_table_desc *table_desc) { - acpi_table_desc *next_desc; + struct acpi_table_desc *next_desc; - FUNCTION_TRACE_PTR ("Tb_delete_single_table", table_desc); + ACPI_FUNCTION_TRACE_PTR ("tb_uninstall_table", table_desc); if (!table_desc) { return_PTR (NULL); } - - /* Unlink the descriptor */ + /* Unlink the descriptor from the doubly linked list */ if (table_desc->prev) { table_desc->prev->next = table_desc->next; } + else { + /* Is first on list, update list head */ + + acpi_gbl_table_lists[table_desc->type].next = table_desc->next; + } if (table_desc->next) { table_desc->next->prev = table_desc->prev; } - /* Free the memory allocated for the table itself */ acpi_tb_delete_single_table (table_desc); + /* Free the table descriptor */ - /* Free the table descriptor (Don't delete the list head, tho) */ - - if ((table_desc->prev) == (table_desc->next)) { - - next_desc = NULL; - - /* Clear the list head */ - - table_desc->pointer = NULL; - table_desc->length = 0; - table_desc->count = 0; - - } - - else { - /* Free the table descriptor */ - - next_desc = table_desc->next; - ACPI_MEM_FREE (table_desc); - } + next_desc = table_desc->next; + ACPI_MEM_FREE (table_desc); + /* Return pointer to the next descriptor */ return_PTR (next_desc); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables/tbrsdt.c linux.21rc5-ac2/drivers/acpi/tables/tbrsdt.c --- linux.21rc5/drivers/acpi/tables/tbrsdt.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables/tbrsdt.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,315 @@ +/****************************************************************************** + * + * Module Name: tbrsdt - ACPI RSDT table utilities + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#include +#include + + +#define _COMPONENT ACPI_TABLES + ACPI_MODULE_NAME ("tbrsdt") + + +/******************************************************************************* + * + * FUNCTION: acpi_tb_verify_rsdp + * + * PARAMETERS: Address - RSDP (Pointer to RSDT) + * + * RETURN: Status + * + * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table) + * + ******************************************************************************/ + +acpi_status +acpi_tb_verify_rsdp ( + struct acpi_pointer *address) +{ + struct acpi_table_desc table_info; + acpi_status status; + struct rsdp_descriptor *rsdp; + + + ACPI_FUNCTION_TRACE ("tb_verify_rsdp"); + + + switch (address->pointer_type) { + case ACPI_LOGICAL_POINTER: + + rsdp = address->pointer.logical; + break; + + case ACPI_PHYSICAL_POINTER: + /* + * Obtain access to the RSDP structure + */ + status = acpi_os_map_memory (address->pointer.physical, sizeof (struct rsdp_descriptor), + (void **) &rsdp); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + break; + + default: + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* + * The signature and checksum must both be correct + */ + if (ACPI_STRNCMP ((char *) rsdp, RSDP_SIG, sizeof (RSDP_SIG)-1) != 0) { + /* Nope, BAD Signature */ + + status = AE_BAD_SIGNATURE; + goto cleanup; + } + + /* Check the standard checksum */ + + if (acpi_tb_checksum (rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { + status = AE_BAD_CHECKSUM; + goto cleanup; + } + + /* Check extended checksum if table version >= 2 */ + + if (rsdp->revision >= 2) { + if (acpi_tb_checksum (rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0) { + status = AE_BAD_CHECKSUM; + goto cleanup; + } + } + + /* The RSDP supplied is OK */ + + table_info.pointer = ACPI_CAST_PTR (struct acpi_table_header, rsdp); + table_info.length = sizeof (struct rsdp_descriptor); + table_info.allocation = ACPI_MEM_MAPPED; + + /* Save the table pointers and allocation info */ + + status = acpi_tb_init_table_descriptor (ACPI_TABLE_RSDP, &table_info); + if (ACPI_FAILURE (status)) { + goto cleanup; + } + + /* Save the RSDP in a global for easy access */ + + acpi_gbl_RSDP = ACPI_CAST_PTR (struct rsdp_descriptor, table_info.pointer); + return_ACPI_STATUS (status); + + + /* Error exit */ +cleanup: + + if (acpi_gbl_table_flags & ACPI_PHYSICAL_POINTER) { + acpi_os_unmap_memory (rsdp, sizeof (struct rsdp_descriptor)); + } + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_tb_get_rsdt_address + * + * PARAMETERS: None + * + * RETURN: RSDT physical address + * + * DESCRIPTION: Extract the address of the RSDT or XSDT, depending on the + * version of the RSDP + * + ******************************************************************************/ + +void +acpi_tb_get_rsdt_address ( + struct acpi_pointer *out_address) +{ + + ACPI_FUNCTION_ENTRY (); + + + out_address->pointer_type = acpi_gbl_table_flags | ACPI_LOGICAL_ADDRESSING; + + /* + * For RSDP revision 0 or 1, we use the RSDT. + * For RSDP revision 2 (and above), we use the XSDT + */ + if (acpi_gbl_RSDP->revision < 2) { + out_address->pointer.value = acpi_gbl_RSDP->rsdt_physical_address; + } + else { + out_address->pointer.value = acpi_gbl_RSDP->xsdt_physical_address; + } +} + + +/******************************************************************************* + * + * FUNCTION: acpi_tb_validate_rsdt + * + * PARAMETERS: table_ptr - Addressable pointer to the RSDT. + * + * RETURN: Status + * + * DESCRIPTION: Validate signature for the RSDT or XSDT + * + ******************************************************************************/ + +acpi_status +acpi_tb_validate_rsdt ( + struct acpi_table_header *table_ptr) +{ + int no_match; + + + ACPI_FUNCTION_NAME ("tb_validate_rsdt"); + + + /* + * For RSDP revision 0 or 1, we use the RSDT. + * For RSDP revision 2 and above, we use the XSDT + */ + if (acpi_gbl_RSDP->revision < 2) { + no_match = ACPI_STRNCMP ((char *) table_ptr, RSDT_SIG, + sizeof (RSDT_SIG) -1); + } + else { + no_match = ACPI_STRNCMP ((char *) table_ptr, XSDT_SIG, + sizeof (XSDT_SIG) -1); + } + + if (no_match) { + /* Invalid RSDT or XSDT signature */ + + ACPI_REPORT_ERROR (("Invalid signature where RSDP indicates RSDT/XSDT should be located\n")); + + ACPI_DUMP_BUFFER (acpi_gbl_RSDP, 20); + + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_ERROR, + "RSDT/XSDT signature at %X (%p) is invalid\n", + acpi_gbl_RSDP->rsdt_physical_address, + (void *) (acpi_native_uint) acpi_gbl_RSDP->rsdt_physical_address)); + + return (AE_BAD_SIGNATURE); + } + + return (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_tb_get_table_rsdt + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table) + * + ******************************************************************************/ + +acpi_status +acpi_tb_get_table_rsdt ( + void) +{ + struct acpi_table_desc table_info; + acpi_status status; + struct acpi_pointer address; + + + ACPI_FUNCTION_TRACE ("tb_get_table_rsdt"); + + + /* Get the RSDT/XSDT via the RSDP */ + + acpi_tb_get_rsdt_address (&address); + + status = acpi_tb_get_table (&address, &table_info); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not get the RSDT/XSDT, %s\n", + acpi_format_exception (status))); + return_ACPI_STATUS (status); + } + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "RSDP located at %p, points to RSDT physical=%8.8X%8.8X \n", + acpi_gbl_RSDP, + ACPI_HIDWORD (address.pointer.value), + ACPI_LODWORD (address.pointer.value))); + + /* Check the RSDT or XSDT signature */ + + status = acpi_tb_validate_rsdt (table_info.pointer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Get the number of tables defined in the RSDT or XSDT */ + + acpi_gbl_rsdt_table_count = acpi_tb_get_table_count (acpi_gbl_RSDP, table_info.pointer); + + /* Convert and/or copy to an XSDT structure */ + + status = acpi_tb_convert_to_xsdt (&table_info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Save the table pointers and allocation info */ + + status = acpi_tb_init_table_descriptor (ACPI_TABLE_XSDT, &table_info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + acpi_gbl_XSDT = (XSDT_DESCRIPTOR *) table_info.pointer; + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "XSDT located at %p\n", acpi_gbl_XSDT)); + return_ACPI_STATUS (status); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables/tbutils.c linux.21rc5-ac2/drivers/acpi/tables/tbutils.c --- linux.21rc5/drivers/acpi/tables/tbutils.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables/tbutils.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,162 +1,102 @@ /****************************************************************************** * * Module Name: tbutils - Table manipulation utilities - * $Revision: 42 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "actables.h" -#include "acinterp.h" +#include +#include #define _COMPONENT ACPI_TABLES - MODULE_NAME ("tbutils") + ACPI_MODULE_NAME ("tbutils") /******************************************************************************* * - * FUNCTION: Acpi_tb_handle_to_object + * FUNCTION: acpi_tb_handle_to_object * - * PARAMETERS: Table_id - Id for which the function is searching - * Table_desc - Pointer to return the matching table + * PARAMETERS: table_id - Id for which the function is searching + * table_desc - Pointer to return the matching table * descriptor. * - * RETURN: Search the tables to find one with a matching Table_id and + * RETURN: Search the tables to find one with a matching table_id and * return a pointer to that table descriptor. * ******************************************************************************/ acpi_status acpi_tb_handle_to_object ( - u16 table_id, - acpi_table_desc **table_desc) + u16 table_id, + struct acpi_table_desc **return_table_desc) { - u32 i; - acpi_table_desc *list_head; + u32 i; + struct acpi_table_desc *table_desc; - PROC_NAME ("Tb_handle_to_object"); + ACPI_FUNCTION_NAME ("tb_handle_to_object"); for (i = 0; i < ACPI_TABLE_MAX; i++) { - list_head = &acpi_gbl_acpi_tables[i]; - do { - if (list_head->table_id == table_id) { - *table_desc = list_head; + table_desc = acpi_gbl_table_lists[i].next; + while (table_desc) { + if (table_desc->table_id == table_id) { + *return_table_desc = table_desc; return (AE_OK); } - list_head = list_head->next; - - } while (list_head != &acpi_gbl_acpi_tables[i]); - } - - - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Table_id=%X does not exist\n", table_id)); - return (AE_BAD_PARAMETER); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_tb_system_table_pointer - * - * PARAMETERS: *Where - Pointer to be examined - * - * RETURN: TRUE if Where is within the AML stream (in one of the ACPI - * system tables such as the DSDT or an SSDT.) - * FALSE otherwise - * - ******************************************************************************/ - -u8 -acpi_tb_system_table_pointer ( - void *where) -{ - u32 i; - acpi_table_desc *table_desc; - acpi_table_header *table; - - - /* No function trace, called too often! */ - - - /* Ignore null pointer */ - - if (!where) { - return (FALSE); - } - - - /* Check for a pointer within the DSDT */ - - if ((acpi_gbl_DSDT) && - (IS_IN_ACPI_TABLE (where, acpi_gbl_DSDT))) { - return (TRUE); - } - - - /* Check each of the loaded SSDTs (if any)*/ - - table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_SSDT]; - - for (i = 0; i < acpi_gbl_acpi_tables[ACPI_TABLE_SSDT].count; i++) { - table = table_desc->pointer; - - if (IS_IN_ACPI_TABLE (where, table)) { - return (TRUE); + table_desc = table_desc->next; } - - table_desc = table_desc->next; } - - /* Check each of the loaded PSDTs (if any)*/ - - table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_PSDT]; - - for (i = 0; i < acpi_gbl_acpi_tables[ACPI_TABLE_PSDT].count; i++) { - table = table_desc->pointer; - - if (IS_IN_ACPI_TABLE (where, table)) { - return (TRUE); - } - - table_desc = table_desc->next; - } - - - /* Pointer does not point into any system table */ - - return (FALSE); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "table_id=%X does not exist\n", table_id)); + return (AE_BAD_PARAMETER); } /******************************************************************************* * - * FUNCTION: Acpi_tb_validate_table_header + * FUNCTION: acpi_tb_validate_table_header * - * PARAMETERS: Table_header - Logical pointer to the table + * PARAMETERS: table_header - Logical pointer to the table * * RETURN: Status * @@ -168,52 +108,52 @@ * name * 3) Table must be readable for length specified in the header * 4) Table checksum must be valid (with the exception of the FACS - * which has no checksum for some odd reason) + * which has no checksum because it contains variable fields) * ******************************************************************************/ acpi_status acpi_tb_validate_table_header ( - acpi_table_header *table_header) + struct acpi_table_header *table_header) { - acpi_name signature; + acpi_name signature; - PROC_NAME ("Tb_validate_table_header"); + ACPI_FUNCTION_NAME ("tb_validate_table_header"); /* Verify that this is a valid address */ - if (!acpi_os_readable (table_header, sizeof (acpi_table_header))) { + if (!acpi_os_readable (table_header, sizeof (struct acpi_table_header))) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Cannot read table header at %p\n", table_header)); return (AE_BAD_ADDRESS); } - /* Ensure that the signature is 4 ASCII characters */ - MOVE_UNALIGNED32_TO_32 (&signature, &table_header->signature); + ACPI_MOVE_32_TO_32 (&signature, table_header->signature); if (!acpi_ut_valid_acpi_name (signature)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Table signature at %p [%p] has invalid characters\n", table_header, &signature)); - REPORT_WARNING (("Invalid table signature %4.4s found\n", (char*)&signature)); - DUMP_BUFFER (table_header, sizeof (acpi_table_header)); + ACPI_REPORT_WARNING (("Invalid table signature found: [%4.4s]\n", + (char *) &signature)); + ACPI_DUMP_BUFFER (table_header, sizeof (struct acpi_table_header)); return (AE_BAD_SIGNATURE); } - /* Validate the table length */ - if (table_header->length < sizeof (acpi_table_header)) { + if (table_header->length < sizeof (struct acpi_table_header)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid length in table header %p name %4.4s\n", - table_header, (char*)&signature)); + table_header, (char *) &signature)); - REPORT_WARNING (("Invalid table header length found\n")); - DUMP_BUFFER (table_header, sizeof (acpi_table_header)); + ACPI_REPORT_WARNING (("Invalid table header length (0x%X) found\n", + (u32) table_header->length)); + ACPI_DUMP_BUFFER (table_header, sizeof (struct acpi_table_header)); return (AE_BAD_HEADER); } @@ -223,90 +163,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_tb_map_acpi_table - * - * PARAMETERS: Physical_address - Physical address of table to map - * *Size - Size of the table. If zero, the size - * from the table header is used. - * Actual size is returned here. - * **Logical_address - Logical address of mapped table - * - * RETURN: Logical address of the mapped table. - * - * DESCRIPTION: Maps the physical address of table into a logical address - * - ******************************************************************************/ - -acpi_status -acpi_tb_map_acpi_table ( - ACPI_PHYSICAL_ADDRESS physical_address, - u32 *size, - acpi_table_header **logical_address) -{ - acpi_table_header *table; - u32 table_size = *size; - acpi_status status = AE_OK; - - - PROC_NAME ("Tb_map_acpi_table"); - - - /* If size is zero, look at the table header to get the actual size */ - - if ((*size) == 0) { - /* Get the table header so we can extract the table length */ - - status = acpi_os_map_memory (physical_address, sizeof (acpi_table_header), - (void **) &table); - if (ACPI_FAILURE (status)) { - return (status); - } - - /* Extract the full table length before we delete the mapping */ - - table_size = table->length; - - /* - * Validate the header and delete the mapping. - * We will create a mapping for the full table below. - */ - status = acpi_tb_validate_table_header (table); - - /* Always unmap the memory for the header */ - - acpi_os_unmap_memory (table, sizeof (acpi_table_header)); - - /* Exit if header invalid */ - - if (ACPI_FAILURE (status)) { - return (status); - } - } - - - /* Map the physical memory for the correct length */ - - status = acpi_os_map_memory (physical_address, table_size, (void **) &table); - if (ACPI_FAILURE (status)) { - return (status); - } - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "Mapped memory for ACPI table, length=%d(%X) at %p\n", - table_size, table_size, table)); - - *size = table_size; - *logical_address = table; - - return (status); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_tb_verify_table_checksum + * FUNCTION: acpi_tb_verify_table_checksum * - * PARAMETERS: *Table_header - ACPI table to verify + * PARAMETERS: *table_header - ACPI table to verify * * RETURN: 8 bit checksum of table * @@ -317,13 +176,13 @@ acpi_status acpi_tb_verify_table_checksum ( - acpi_table_header *table_header) + struct acpi_table_header *table_header) { - u8 checksum; - acpi_status status = AE_OK; + u8 checksum; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Tb_verify_table_checksum"); + ACPI_FUNCTION_TRACE ("tb_verify_table_checksum"); /* Compute the checksum on the table */ @@ -333,20 +192,18 @@ /* Return the appropriate exception */ if (checksum) { - REPORT_WARNING (("Invalid checksum (%X) in table %4.4s\n", - checksum, (char*)&table_header->signature)); + ACPI_REPORT_WARNING (("Invalid checksum in table [%4.4s] (%02X, sum %02X is not zero)\n", + table_header->signature, (u32) table_header->checksum, (u32) checksum)); status = AE_BAD_CHECKSUM; } - - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_tb_checksum + * FUNCTION: acpi_tb_checksum * * PARAMETERS: Buffer - Buffer to checksum * Length - Size of the buffer @@ -359,12 +216,12 @@ u8 acpi_tb_checksum ( - void *buffer, - u32 length) + void *buffer, + u32 length) { - u8 *limit; - u8 *rover; - u8 sum = 0; + const u8 *limit; + const u8 *rover; + u8 sum = 0; if (buffer && length) { @@ -376,7 +233,6 @@ sum = (u8) (sum + *rover); } } - return (sum); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables/tbxface.c linux.21rc5-ac2/drivers/acpi/tables/tbxface.c --- linux.21rc5/drivers/acpi/tables/tbxface.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables/tbxface.c 2003-05-29 01:02:16.000000000 +0100 @@ -2,42 +2,59 @@ * * Module Name: tbxface - Public interfaces to the ACPI subsystem * ACPI table oriented interfaces - * $Revision: 45 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "actables.h" +#include +#include +#include #define _COMPONENT ACPI_TABLES - MODULE_NAME ("tbxface") + ACPI_MODULE_NAME ("tbxface") /******************************************************************************* * - * FUNCTION: Acpi_load_tables + * FUNCTION: acpi_load_tables * * PARAMETERS: None * @@ -51,59 +68,60 @@ acpi_status acpi_load_tables (void) { - ACPI_PHYSICAL_ADDRESS rsdp_physical_address; - acpi_status status; - u32 number_of_tables = 0; + struct acpi_pointer rsdp_address; + acpi_status status; - FUNCTION_TRACE ("Acpi_load_tables"); + ACPI_FUNCTION_TRACE ("acpi_load_tables"); /* Get the RSDP */ status = acpi_os_get_root_pointer (ACPI_LOGICAL_ADDRESSING, - &rsdp_physical_address); + &rsdp_address); if (ACPI_FAILURE (status)) { - REPORT_ERROR (("Acpi_load_tables: Could not get RSDP, %s\n", + ACPI_REPORT_ERROR (("acpi_load_tables: Could not get RSDP, %s\n", acpi_format_exception (status))); goto error_exit; } /* Map and validate the RSDP */ - status = acpi_tb_verify_rsdp (rsdp_physical_address); + acpi_gbl_table_flags = rsdp_address.pointer_type; + + status = acpi_tb_verify_rsdp (&rsdp_address); if (ACPI_FAILURE (status)) { - REPORT_ERROR (("Acpi_load_tables: RSDP Failed validation: %s\n", + ACPI_REPORT_ERROR (("acpi_load_tables: RSDP Failed validation: %s\n", acpi_format_exception (status))); goto error_exit; } /* Get the RSDT via the RSDP */ - status = acpi_tb_get_table_rsdt (&number_of_tables); + status = acpi_tb_get_table_rsdt (); if (ACPI_FAILURE (status)) { - REPORT_ERROR (("Acpi_load_tables: Could not load RSDT: %s\n", + ACPI_REPORT_ERROR (("acpi_load_tables: Could not load RSDT: %s\n", acpi_format_exception (status))); goto error_exit; } - /* Now get the rest of the tables */ + /* Now get the tables needed by this subsystem (FADT, DSDT, etc.) */ - status = acpi_tb_get_all_tables (number_of_tables, NULL); + status = acpi_tb_get_required_tables (); if (ACPI_FAILURE (status)) { - REPORT_ERROR (("Acpi_load_tables: Error getting required tables (DSDT/FADT/FACS): %s\n", + ACPI_REPORT_ERROR (("acpi_load_tables: Error getting required tables (DSDT/FADT/FACS): %s\n", acpi_format_exception (status))); goto error_exit; } - ACPI_DEBUG_PRINT ((ACPI_DB_OK, "ACPI Tables successfully loaded\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "ACPI Tables successfully acquired\n")); /* Load the namespace from the tables */ status = acpi_ns_load_namespace (); if (ACPI_FAILURE (status)) { - REPORT_ERROR (("Acpi_load_tables: Could not load namespace: %s\n", + ACPI_REPORT_ERROR (("acpi_load_tables: Could not load namespace: %s\n", acpi_format_exception (status))); goto error_exit; } @@ -112,7 +130,7 @@ error_exit: - REPORT_ERROR (("Acpi_load_tables: Could not load tables: %s\n", + ACPI_REPORT_ERROR (("acpi_load_tables: Could not load tables: %s\n", acpi_format_exception (status))); return_ACPI_STATUS (status); @@ -121,9 +139,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_load_table + * FUNCTION: acpi_load_table * - * PARAMETERS: Table_ptr - pointer to a buffer containing the entire + * PARAMETERS: table_ptr - pointer to a buffer containing the entire * table to be loaded * * RETURN: Status @@ -137,13 +155,14 @@ acpi_status acpi_load_table ( - acpi_table_header *table_ptr) + struct acpi_table_header *table_ptr) { - acpi_status status; - acpi_table_desc table_info; + acpi_status status; + struct acpi_table_desc table_info; + struct acpi_pointer address; - FUNCTION_TRACE ("Acpi_load_table"); + ACPI_FUNCTION_TRACE ("acpi_load_table"); if (!table_ptr) { @@ -152,40 +171,59 @@ /* Copy the table to a local buffer */ - status = acpi_tb_get_table (0, table_ptr, &table_info); + address.pointer_type = ACPI_LOGICAL_POINTER | ACPI_LOGICAL_ADDRESSING; + address.pointer.logical = table_ptr; + + status = acpi_tb_get_table_body (&address, table_ptr, &table_info); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } /* Install the new table into the local data structures */ - status = acpi_tb_install_table (NULL, &table_info); + status = acpi_tb_install_table (&table_info); if (ACPI_FAILURE (status)) { - /* Free table allocated by Acpi_tb_get_table */ + /* Free table allocated by acpi_tb_get_table_body */ acpi_tb_delete_single_table (&table_info); return_ACPI_STATUS (status); } + /* Convert the table to common format if necessary */ + + switch (table_info.type) { + case ACPI_TABLE_FADT: + + status = acpi_tb_convert_table_fadt (); + break; + + case ACPI_TABLE_FACS: + + status = acpi_tb_build_common_facs (&table_info); + break; + + default: + /* Load table into namespace if it contains executable AML */ + + status = acpi_ns_load_table (table_info.installed_desc, acpi_gbl_root_node); + break; + } - status = acpi_ns_load_table (table_info.installed_desc, acpi_gbl_root_node); if (ACPI_FAILURE (status)) { /* Uninstall table and free the buffer */ - acpi_tb_uninstall_table (table_info.installed_desc); - return_ACPI_STATUS (status); + (void) acpi_tb_uninstall_table (table_info.installed_desc); } - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_unload_table + * FUNCTION: acpi_unload_table * - * PARAMETERS: Table_type - Type of table to be unloaded + * PARAMETERS: table_type - Type of table to be unloaded * * RETURN: Status * @@ -195,12 +233,12 @@ acpi_status acpi_unload_table ( - acpi_table_type table_type) + acpi_table_type table_type) { - acpi_table_desc *list_head; + struct acpi_table_desc *table_desc; - FUNCTION_TRACE ("Acpi_unload_table"); + ACPI_FUNCTION_TRACE ("acpi_unload_table"); /* Parameter validation */ @@ -212,39 +250,39 @@ /* Find all tables of the requested type */ - list_head = &acpi_gbl_acpi_tables[table_type]; - do { + table_desc = acpi_gbl_table_lists[table_type].next; + while (table_desc); { /* * Delete all namespace entries owned by this table. Note that these * entries can appear anywhere in the namespace by virtue of the AML * "Scope" operator. Thus, we need to track ownership by an ID, not * simply a position within the hierarchy */ - acpi_ns_delete_namespace_by_owner (list_head->table_id); - - /* Delete (or unmap) the actual table */ + acpi_ns_delete_namespace_by_owner (table_desc->table_id); - acpi_tb_delete_acpi_table (table_type); + table_desc = table_desc->next; + } - } while (list_head != &acpi_gbl_acpi_tables[table_type]); + /* Delete (or unmap) all tables of this type */ + acpi_tb_delete_tables_by_type (table_type); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_get_table_header + * FUNCTION: acpi_get_table_header * - * PARAMETERS: Table_type - one of the defined table types + * PARAMETERS: table_type - one of the defined table types * Instance - the non zero instance of the table, allows * support for multiple tables of the same type - * see Acpi_gbl_Acpi_table_flag - * Out_table_header - pointer to the acpi_table_header if successful + * see acpi_gbl_acpi_table_flag + * out_table_header - pointer to the struct acpi_table_header if successful * * DESCRIPTION: This function is called to get an ACPI table header. The caller * supplies an pointer to a data area sufficient to contain an ACPI - * acpi_table_header structure. + * struct acpi_table_header structure. * * The header contains a length field that can be used to determine * the size of the buffer needed to contain the entire table. This @@ -255,15 +293,15 @@ acpi_status acpi_get_table_header ( - acpi_table_type table_type, - u32 instance, - acpi_table_header *out_table_header) + acpi_table_type table_type, + u32 instance, + struct acpi_table_header *out_table_header) { - acpi_table_header *tbl_ptr; - acpi_status status; + struct acpi_table_header *tbl_ptr; + acpi_status status; - FUNCTION_TRACE ("Acpi_get_table_header"); + ACPI_FUNCTION_TRACE ("acpi_get_table_header"); if ((instance == 0) || @@ -275,7 +313,7 @@ /* Check the table type and instance */ if ((table_type > ACPI_TABLE_MAX) || - (IS_SINGLE_TABLE (acpi_gbl_acpi_table_data[table_type].flags) && + (ACPI_IS_SINGLE_TABLE (acpi_gbl_table_data[table_type].flags) && instance > 1)) { return_ACPI_STATUS (AE_BAD_PARAMETER); } @@ -298,8 +336,8 @@ /* * Copy the header to the caller's buffer */ - MEMCPY ((void *) out_table_header, (void *) tbl_ptr, - sizeof (acpi_table_header)); + ACPI_MEMCPY ((void *) out_table_header, (void *) tbl_ptr, + sizeof (struct acpi_table_header)); return_ACPI_STATUS (status); } @@ -307,54 +345,56 @@ /******************************************************************************* * - * FUNCTION: Acpi_get_table + * FUNCTION: acpi_get_table * - * PARAMETERS: Table_type - one of the defined table types + * PARAMETERS: table_type - one of the defined table types * Instance - the non zero instance of the table, allows * support for multiple tables of the same type - * see Acpi_gbl_Acpi_table_flag - * Ret_buffer - pointer to a structure containing a buffer to + * see acpi_gbl_acpi_table_flag + * ret_buffer - pointer to a structure containing a buffer to * receive the table * * RETURN: Status * * DESCRIPTION: This function is called to get an ACPI table. The caller - * supplies an Out_buffer large enough to contain the entire ACPI - * table. The caller should call the Acpi_get_table_header function + * supplies an out_buffer large enough to contain the entire ACPI + * table. The caller should call the acpi_get_table_header function * first to determine the buffer size needed. Upon completion - * the Out_buffer->Length field will indicate the number of bytes - * copied into the Out_buffer->Buf_ptr buffer. This table will be + * the out_buffer->Length field will indicate the number of bytes + * copied into the out_buffer->buf_ptr buffer. This table will be * a complete table including the header. * ******************************************************************************/ acpi_status acpi_get_table ( - acpi_table_type table_type, - u32 instance, - acpi_buffer *ret_buffer) + acpi_table_type table_type, + u32 instance, + struct acpi_buffer *ret_buffer) { - acpi_table_header *tbl_ptr; - acpi_status status; - u32 ret_buf_len; + struct acpi_table_header *tbl_ptr; + acpi_status status; + acpi_size table_length; - FUNCTION_TRACE ("Acpi_get_table"); + ACPI_FUNCTION_TRACE ("acpi_get_table"); - /* - * If we have a buffer, we must have a length too - */ - if ((instance == 0) || - (!ret_buffer) || - ((!ret_buffer->pointer) && (ret_buffer->length))) { + /* Parameter validation */ + + if (instance == 0) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + status = acpi_ut_validate_buffer (ret_buffer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + /* Check the table type and instance */ if ((table_type > ACPI_TABLE_MAX) || - (IS_SINGLE_TABLE (acpi_gbl_acpi_table_data[table_type].flags) && + (ACPI_IS_SINGLE_TABLE (acpi_gbl_table_data[table_type].flags) && instance > 1)) { return_ACPI_STATUS (AE_BAD_PARAMETER); } @@ -368,38 +408,35 @@ } /* - * Acpi_tb_get_table_ptr will return a NULL pointer if the + * acpi_tb_get_table_ptr will return a NULL pointer if the * table is not loaded. */ if (tbl_ptr == NULL) { return_ACPI_STATUS (AE_NOT_EXIST); } - /* - * Got a table ptr, assume it's ok and copy it to the user's buffer - */ + /* Get the table length */ + if (table_type == ACPI_TABLE_RSDP) { /* * RSD PTR is the only "table" without a header */ - ret_buf_len = sizeof (RSDP_DESCRIPTOR); + table_length = sizeof (struct rsdp_descriptor); } else { - ret_buf_len = tbl_ptr->length; + table_length = (acpi_size) tbl_ptr->length; } - /* - * Verify we have space in the caller's buffer for the table - */ - if (ret_buffer->length < ret_buf_len) { - ret_buffer->length = ret_buf_len; - return_ACPI_STATUS (AE_BUFFER_OVERFLOW); - } + /* Validate/Allocate/Clear caller buffer */ - ret_buffer->length = ret_buf_len; + status = acpi_ut_initialize_buffer (ret_buffer, table_length); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } - MEMCPY ((void *) ret_buffer->pointer, (void *) tbl_ptr, ret_buf_len); + /* Copy the table to the buffer */ + ACPI_MEMCPY ((void *) ret_buffer->pointer, (void *) tbl_ptr, table_length); return_ACPI_STATUS (AE_OK); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables/tbxfroot.c linux.21rc5-ac2/drivers/acpi/tables/tbxfroot.c --- linux.21rc5/drivers/acpi/tables/tbxfroot.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables/tbxfroot.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,46 +1,302 @@ /****************************************************************************** * * Module Name: tbxfroot - Find the root ACPI table (RSDT) - * $Revision: 52 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "actables.h" +#include +#include #define _COMPONENT ACPI_TABLES - MODULE_NAME ("tbxfroot") + ACPI_MODULE_NAME ("tbxfroot") -#define RSDP_CHECKSUM_LENGTH 20 + +/******************************************************************************* + * + * FUNCTION: acpi_tb_find_table + * + * PARAMETERS: Signature - String with ACPI table signature + * oem_id - String with the table OEM ID + * oem_table_id - String with the OEM Table ID. + * + * RETURN: Status + * + * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the + * Signature, OEM ID and OEM Table ID. + * + ******************************************************************************/ + +acpi_status +acpi_tb_find_table ( + char *signature, + char *oem_id, + char *oem_table_id, + struct acpi_table_header **table_ptr) +{ + acpi_status status; + struct acpi_table_header *table; + + + ACPI_FUNCTION_TRACE ("tb_find_table"); + + + /* Validate string lengths */ + + if ((ACPI_STRLEN (signature) > ACPI_NAME_SIZE) || + (ACPI_STRLEN (oem_id) > sizeof (table->oem_id)) || + (ACPI_STRLEN (oem_table_id) > sizeof (table->oem_table_id))) { + return_ACPI_STATUS (AE_AML_STRING_LIMIT); + } + + /* Find the table */ + + status = acpi_get_firmware_table (signature, 1, + ACPI_LOGICAL_ADDRESSING, &table); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Check oem_id and oem_table_id */ + + if ((oem_id[0] && ACPI_STRCMP (oem_id, table->oem_id)) || + (oem_table_id[0] && ACPI_STRCMP (oem_table_id, table->oem_table_id))) { + return_ACPI_STATUS (AE_AML_NAME_NOT_FOUND); + } + + *table_ptr = table; + return_ACPI_STATUS (AE_OK); +} /******************************************************************************* * - * FUNCTION: Acpi_find_root_pointer + * FUNCTION: acpi_get_firmware_table * - * PARAMETERS: **Rsdp_physical_address - Where to place the RSDP address - * Flags - Logical/Physical addressing + * PARAMETERS: Signature - Any ACPI table signature + * Instance - the non zero instance of the table, allows + * support for multiple tables of the same type + * Flags - Physical/Virtual support + * ret_buffer - pointer to a structure containing a buffer to + * receive the table + * + * RETURN: Status + * + * DESCRIPTION: This function is called to get an ACPI table. The caller + * supplies an out_buffer large enough to contain the entire ACPI + * table. Upon completion + * the out_buffer->Length field will indicate the number of bytes + * copied into the out_buffer->buf_ptr buffer. This table will be + * a complete table including the header. + * + ******************************************************************************/ + +acpi_status +acpi_get_firmware_table ( + acpi_string signature, + u32 instance, + u32 flags, + struct acpi_table_header **table_pointer) +{ + struct acpi_pointer rsdp_address; + struct acpi_pointer address; + acpi_status status; + struct acpi_table_header header; + struct acpi_table_desc table_info; + struct acpi_table_desc rsdt_info; + u32 table_count; + u32 i; + u32 j; + + + ACPI_FUNCTION_TRACE ("acpi_get_firmware_table"); + + + /* + * Ensure that at least the table manager is initialized. We don't + * require that the entire ACPI subsystem is up for this interface + */ + + /* + * If we have a buffer, we must have a length too + */ + if ((instance == 0) || + (!signature) || + (!table_pointer)) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + rsdt_info.pointer = NULL; + + if (!acpi_gbl_RSDP) { + /* Get the RSDP */ + + status = acpi_os_get_root_pointer (flags, &rsdp_address); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "RSDP not found\n")); + return_ACPI_STATUS (AE_NO_ACPI_TABLES); + } + + /* Map and validate the RSDP */ + + if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) { + status = acpi_os_map_memory (rsdp_address.pointer.physical, sizeof (struct rsdp_descriptor), + (void **) &acpi_gbl_RSDP); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + else { + acpi_gbl_RSDP = rsdp_address.pointer.logical; + } + + /* + * The signature and checksum must both be correct + */ + if (ACPI_STRNCMP ((char *) acpi_gbl_RSDP, RSDP_SIG, sizeof (RSDP_SIG)-1) != 0) { + /* Nope, BAD Signature */ + + return_ACPI_STATUS (AE_BAD_SIGNATURE); + } + + if (acpi_tb_checksum (acpi_gbl_RSDP, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { + /* Nope, BAD Checksum */ + + return_ACPI_STATUS (AE_BAD_CHECKSUM); + } + } + + /* Get the RSDT and validate it */ + + acpi_tb_get_rsdt_address (&address); + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "RSDP located at %p, RSDT physical=%8.8X%8.8X \n", + acpi_gbl_RSDP, + ACPI_HIDWORD (address.pointer.value), + ACPI_LODWORD (address.pointer.value))); + + /* Insert processor_mode flags */ + + address.pointer_type |= flags; + + status = acpi_tb_get_table (&address, &rsdt_info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + status = acpi_tb_validate_rsdt (rsdt_info.pointer); + if (ACPI_FAILURE (status)) { + goto cleanup; + } + + /* Get the number of table pointers within the RSDT */ + + table_count = acpi_tb_get_table_count (acpi_gbl_RSDP, rsdt_info.pointer); + + address.pointer_type = acpi_gbl_table_flags | flags; + + /* + * Search the RSDT/XSDT for the correct instance of the + * requested table + */ + for (i = 0, j = 0; i < table_count; i++) { + /* Get the next table pointer, handle RSDT vs. XSDT */ + + if (acpi_gbl_RSDP->revision < 2) { + address.pointer.value = ((RSDT_DESCRIPTOR *) rsdt_info.pointer)->table_offset_entry[i]; + } + else { + address.pointer.value = + ((XSDT_DESCRIPTOR *) rsdt_info.pointer)->table_offset_entry[i]; + } + + /* Get the table header */ + + status = acpi_tb_get_table_header (&address, &header); + if (ACPI_FAILURE (status)) { + goto cleanup; + } + + /* Compare table signatures and table instance */ + + if (!ACPI_STRNCMP (header.signature, signature, ACPI_NAME_SIZE)) { + /* An instance of the table was found */ + + j++; + if (j >= instance) { + /* Found the correct instance, get the entire table */ + + status = acpi_tb_get_table_body (&address, &header, &table_info); + if (ACPI_FAILURE (status)) { + goto cleanup; + } + + *table_pointer = table_info.pointer; + goto cleanup; + } + } + } + + /* Did not find the table */ + + status = AE_NOT_EXIST; + + +cleanup: + acpi_os_unmap_memory (rsdt_info.pointer, (acpi_size) rsdt_info.pointer->length); + return_ACPI_STATUS (status); +} + + +/* TBD: Move to a new file */ + +#if ACPI_MACHINE_WIDTH != 16 + +/******************************************************************************* + * + * FUNCTION: acpi_find_root_pointer + * + * PARAMETERS: **rsdp_address - Where to place the RSDP address + * Flags - Logical/Physical addressing * * RETURN: Status, Physical address of the RSDP * @@ -50,35 +306,36 @@ acpi_status acpi_find_root_pointer ( - u32 flags, - ACPI_PHYSICAL_ADDRESS *rsdp_physical_address) + u32 flags, + struct acpi_pointer *rsdp_address) { - acpi_table_desc table_info; - acpi_status status; + struct acpi_table_desc table_info; + acpi_status status; - FUNCTION_TRACE ("Acpi_find_root_pointer"); + ACPI_FUNCTION_TRACE ("acpi_find_root_pointer"); /* Get the RSDP */ status = acpi_tb_find_rsdp (&table_info, flags); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "RSDP structure not found\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "RSDP structure not found, %s Flags=%X\n", + acpi_format_exception (status), flags)); return_ACPI_STATUS (AE_NO_ACPI_TABLES); } - *rsdp_physical_address = table_info.physical_address; - + rsdp_address->pointer_type = ACPI_PHYSICAL_POINTER; + rsdp_address->pointer.physical = table_info.physical_address; return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_tb_scan_memory_for_rsdp + * FUNCTION: acpi_tb_scan_memory_for_rsdp * - * PARAMETERS: Start_address - Starting pointer for search + * PARAMETERS: start_address - Starting pointer for search * Length - Maximum length to search * * RETURN: Pointer to the RSDP if found, otherwise NULL. @@ -89,27 +346,27 @@ u8 * acpi_tb_scan_memory_for_rsdp ( - u8 *start_address, - u32 length) + u8 *start_address, + u32 length) { - u32 offset; - u8 *mem_rover; + u32 offset; + u8 *mem_rover; - FUNCTION_TRACE ("Tb_scan_memory_for_rsdp"); + ACPI_FUNCTION_TRACE ("tb_scan_memory_for_rsdp"); /* Search from given start addr for the requested length */ for (offset = 0, mem_rover = start_address; offset < length; - offset += RSDP_SCAN_STEP, mem_rover += RSDP_SCAN_STEP) { + offset += ACPI_RSDP_SCAN_STEP, mem_rover += ACPI_RSDP_SCAN_STEP) { /* The signature and checksum must both be correct */ - if (STRNCMP ((NATIVE_CHAR *) mem_rover, + if (ACPI_STRNCMP ((char *) mem_rover, RSDP_SIG, sizeof (RSDP_SIG)-1) == 0 && - acpi_tb_checksum (mem_rover, RSDP_CHECKSUM_LENGTH) == 0) { + acpi_tb_checksum (mem_rover, ACPI_RSDP_CHECKSUM_LENGTH) == 0) { /* If so, we have found the RSDP */ ACPI_DEBUG_PRINT ((ACPI_DB_INFO, @@ -121,25 +378,24 @@ /* Searched entire block, no RSDP was found */ ACPI_DEBUG_PRINT ((ACPI_DB_INFO,"Searched entire block, no RSDP was found.\n")); - return_PTR (NULL); } /******************************************************************************* * - * FUNCTION: Acpi_tb_find_rsdp + * FUNCTION: acpi_tb_find_rsdp * - * PARAMETERS: *Table_info - Where the table info is returned + * PARAMETERS: *table_info - Where the table info is returned * Flags - Current memory mode (logical vs. * physical addressing) * * RETURN: Status * - * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor + * DESCRIPTION: search lower 1_mbyte of memory for the root system descriptor * pointer structure. If it is found, set *RSDP to point to it. * - * NOTE: The RSDP must be either in the first 1_k of the Extended + * NOTE: The RSDp must be either in the first 1_k of the Extended * BIOS Data Area or between E0000 and FFFFF (ACPI 1.0 section * 5.2.2; assertion #421). * @@ -147,16 +403,16 @@ acpi_status acpi_tb_find_rsdp ( - acpi_table_desc *table_info, - u32 flags) + struct acpi_table_desc *table_info, + u32 flags) { - u8 *table_ptr; - u8 *mem_rover; - u64 phys_addr; - acpi_status status = AE_OK; + u8 *table_ptr; + u8 *mem_rover; + u64 phys_addr; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Tb_find_rsdp"); + ACPI_FUNCTION_TRACE ("tb_find_rsdp"); /* @@ -166,51 +422,52 @@ /* * 1) Search EBDA (low memory) paragraphs */ - status = acpi_os_map_memory (LO_RSDP_WINDOW_BASE, LO_RSDP_WINDOW_SIZE, + status = acpi_os_map_memory ((u64) ACPI_LO_RSDP_WINDOW_BASE, ACPI_LO_RSDP_WINDOW_SIZE, (void **) &table_ptr); if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not map memory at %X for length %X\n", + ACPI_LO_RSDP_WINDOW_BASE, ACPI_LO_RSDP_WINDOW_SIZE)); return_ACPI_STATUS (status); } - mem_rover = acpi_tb_scan_memory_for_rsdp (table_ptr, LO_RSDP_WINDOW_SIZE); - acpi_os_unmap_memory (table_ptr, LO_RSDP_WINDOW_SIZE); + mem_rover = acpi_tb_scan_memory_for_rsdp (table_ptr, ACPI_LO_RSDP_WINDOW_SIZE); + acpi_os_unmap_memory (table_ptr, ACPI_LO_RSDP_WINDOW_SIZE); if (mem_rover) { /* Found it, return the physical address */ - phys_addr = LO_RSDP_WINDOW_BASE; - phys_addr += (mem_rover - table_ptr); + phys_addr = ACPI_LO_RSDP_WINDOW_BASE; + phys_addr += ACPI_PTR_DIFF (mem_rover,table_ptr); table_info->physical_address = phys_addr; - return_ACPI_STATUS (AE_OK); } /* * 2) Search upper memory: 16-byte boundaries in E0000h-F0000h */ - status = acpi_os_map_memory (HI_RSDP_WINDOW_BASE, HI_RSDP_WINDOW_SIZE, + status = acpi_os_map_memory ((u64) ACPI_HI_RSDP_WINDOW_BASE, ACPI_HI_RSDP_WINDOW_SIZE, (void **) &table_ptr); if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not map memory at %X for length %X\n", + ACPI_HI_RSDP_WINDOW_BASE, ACPI_HI_RSDP_WINDOW_SIZE)); return_ACPI_STATUS (status); } - mem_rover = acpi_tb_scan_memory_for_rsdp (table_ptr, HI_RSDP_WINDOW_SIZE); - acpi_os_unmap_memory (table_ptr, HI_RSDP_WINDOW_SIZE); + mem_rover = acpi_tb_scan_memory_for_rsdp (table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); + acpi_os_unmap_memory (table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); if (mem_rover) { /* Found it, return the physical address */ - phys_addr = HI_RSDP_WINDOW_BASE; - phys_addr += (mem_rover - table_ptr); + phys_addr = ACPI_HI_RSDP_WINDOW_BASE; + phys_addr += ACPI_PTR_DIFF (mem_rover, table_ptr); table_info->physical_address = phys_addr; - return_ACPI_STATUS (AE_OK); } } - /* * Physical addressing */ @@ -218,212 +475,32 @@ /* * 1) Search EBDA (low memory) paragraphs */ - mem_rover = acpi_tb_scan_memory_for_rsdp ((u8 *) LO_RSDP_WINDOW_BASE, - LO_RSDP_WINDOW_SIZE); + mem_rover = acpi_tb_scan_memory_for_rsdp (ACPI_PHYSADDR_TO_PTR (ACPI_LO_RSDP_WINDOW_BASE), + ACPI_LO_RSDP_WINDOW_SIZE); if (mem_rover) { /* Found it, return the physical address */ - table_info->physical_address = (ACPI_TBLPTR) mem_rover; + table_info->physical_address = ACPI_TO_INTEGER (mem_rover); return_ACPI_STATUS (AE_OK); } /* * 2) Search upper memory: 16-byte boundaries in E0000h-F0000h */ - mem_rover = acpi_tb_scan_memory_for_rsdp ((u8 *) HI_RSDP_WINDOW_BASE, - HI_RSDP_WINDOW_SIZE); + mem_rover = acpi_tb_scan_memory_for_rsdp (ACPI_PHYSADDR_TO_PTR (ACPI_HI_RSDP_WINDOW_BASE), + ACPI_HI_RSDP_WINDOW_SIZE); if (mem_rover) { /* Found it, return the physical address */ - table_info->physical_address = (ACPI_TBLPTR) mem_rover; + table_info->physical_address = ACPI_TO_INTEGER (mem_rover); return_ACPI_STATUS (AE_OK); } } - /* RSDP signature was not found */ return_ACPI_STATUS (AE_NOT_FOUND); } - -/******************************************************************************* - * - * FUNCTION: Acpi_get_firmware_table - * - * PARAMETERS: Signature - Any ACPI table signature - * Instance - the non zero instance of the table, allows - * support for multiple tables of the same type - * Flags - 0: Physical/Virtual support - * Ret_buffer - pointer to a structure containing a buffer to - * receive the table - * - * RETURN: Status - * - * DESCRIPTION: This function is called to get an ACPI table. The caller - * supplies an Out_buffer large enough to contain the entire ACPI - * table. Upon completion - * the Out_buffer->Length field will indicate the number of bytes - * copied into the Out_buffer->Buf_ptr buffer. This table will be - * a complete table including the header. - * - ******************************************************************************/ - -acpi_status -acpi_get_firmware_table ( - acpi_string signature, - u32 instance, - u32 flags, - acpi_table_header **table_pointer) -{ - ACPI_PHYSICAL_ADDRESS physical_address; - acpi_table_header *rsdt_ptr = NULL; - acpi_table_header *table_ptr; - acpi_status status; - u32 rsdt_size = 0; - u32 table_size; - u32 table_count; - u32 i; - u32 j; - - - FUNCTION_TRACE ("Acpi_get_firmware_table"); - - - /* - * Ensure that at least the table manager is initialized. We don't - * require that the entire ACPI subsystem is up for this interface - */ - - /* - * If we have a buffer, we must have a length too - */ - if ((instance == 0) || - (!signature) || - (!table_pointer)) { - return_ACPI_STATUS (AE_BAD_PARAMETER); - } - - if (!acpi_gbl_RSDP) { - /* Get the RSDP */ - - status = acpi_os_get_root_pointer (flags, &physical_address); - if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "RSDP not found\n")); - return_ACPI_STATUS (AE_NO_ACPI_TABLES); - } - - /* Map and validate the RSDP */ - - if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) { - status = acpi_os_map_memory (physical_address, sizeof (RSDP_DESCRIPTOR), - (void **) &acpi_gbl_RSDP); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - } - else { - acpi_gbl_RSDP = (void *) (NATIVE_UINT) physical_address; - } - - /* - * The signature and checksum must both be correct - */ - if (STRNCMP ((NATIVE_CHAR *) acpi_gbl_RSDP, RSDP_SIG, sizeof (RSDP_SIG)-1) != 0) { - /* Nope, BAD Signature */ - - status = AE_BAD_SIGNATURE; - goto cleanup; - } - - if (acpi_tb_checksum (acpi_gbl_RSDP, RSDP_CHECKSUM_LENGTH) != 0) { - /* Nope, BAD Checksum */ - - status = AE_BAD_CHECKSUM; - goto cleanup; - } - } - - - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "RSDP located at %p, RSDT physical=%8.8X%8.8X \n", - acpi_gbl_RSDP, HIDWORD(acpi_gbl_RSDP->rsdt_physical_address), - LODWORD(acpi_gbl_RSDP->rsdt_physical_address))); - - - /* Get the RSDT and validate it */ - - physical_address = acpi_tb_get_rsdt_address (); - status = acpi_tb_get_table_pointer (physical_address, flags, &rsdt_size, &rsdt_ptr); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - status = acpi_tb_validate_rsdt (rsdt_ptr); - if (ACPI_FAILURE (status)) { - goto cleanup; - } - - - /* Get the number of table pointers within the RSDT */ - - table_count = acpi_tb_get_table_count (acpi_gbl_RSDP, rsdt_ptr); - - - /* - * Search the RSDT/XSDT for the correct instance of the - * requested table - */ - for (i = 0, j = 0; i < table_count; i++) { - /* Get the next table pointer */ - - if (acpi_gbl_RSDP->revision < 2) { - physical_address = ((RSDT_DESCRIPTOR *) rsdt_ptr)->table_offset_entry[i]; - } - else { - physical_address = (ACPI_PHYSICAL_ADDRESS) - ACPI_GET_ADDRESS (((xsdt_descriptor *) rsdt_ptr)->table_offset_entry[i]); - } - - /* Get addressibility if necessary */ - - status = acpi_tb_get_table_pointer (physical_address, flags, &table_size, &table_ptr); - if (ACPI_FAILURE (status)) { - goto cleanup; - } - - /* Compare table signatures and table instance */ - - if (!STRNCMP ((char *) table_ptr, signature, STRLEN (signature))) { - /* An instance of the table was found */ - - j++; - if (j >= instance) { - /* Found the correct instance */ - - *table_pointer = table_ptr; - goto cleanup; - } - } - - /* Delete table mapping if using virtual addressing */ - - if ((table_size) && - ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING)) { - acpi_os_unmap_memory (table_ptr, table_size); - } - } - - /* Did not find the table */ - - status = AE_NOT_EXIST; - - -cleanup: - if (rsdt_size) { - acpi_os_unmap_memory (rsdt_ptr, rsdt_size); - } - return_ACPI_STATUS (status); -} - +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/tables.c linux.21rc5-ac2/drivers/acpi/tables.c --- linux.21rc5/drivers/acpi/tables.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/tables.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,564 @@ +/* + * acpi_tables.c - ACPI Boot-Time Table Parsing + * + * Copyright (C) 2001 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFIX "ACPI: " + +#define ACPI_MAX_TABLES 256 + +static char *acpi_table_signatures[ACPI_TABLE_COUNT] = { + [ACPI_TABLE_UNKNOWN] = "????", + [ACPI_APIC] = "APIC", + [ACPI_BOOT] = "BOOT", + [ACPI_DBGP] = "DBGP", + [ACPI_DSDT] = "DSDT", + [ACPI_ECDT] = "ECDT", + [ACPI_ETDT] = "ETDT", + [ACPI_FADT] = "FACP", + [ACPI_FACS] = "FACS", + [ACPI_OEMX] = "OEM", + [ACPI_PSDT] = "PSDT", + [ACPI_SBST] = "SBST", + [ACPI_SLIT] = "SLIT", + [ACPI_SPCR] = "SPCR", + [ACPI_SRAT] = "SRAT", + [ACPI_SSDT] = "SSDT", + [ACPI_SPMI] = "SPMI", + [ACPI_HPET] = "HPET" +}; + +/* System Description Table (RSDT/XSDT) */ +struct acpi_table_sdt { + unsigned long pa; /* Physical Address */ + unsigned long count; /* Table count */ + struct { + unsigned long pa; + enum acpi_table_id id; + unsigned long size; + } entry[ACPI_MAX_TABLES]; +} __attribute__ ((packed)); + +static struct acpi_table_sdt sdt; + +void +acpi_table_print ( + struct acpi_table_header *header, + unsigned long phys_addr) +{ + char *name = NULL; + + if (!header) + return; + + /* Some table signatures aren't good table names */ + + if (!strncmp((char *) &header->signature, + acpi_table_signatures[ACPI_APIC], + sizeof(header->signature))) { + name = "MADT"; + } + else if (!strncmp((char *) &header->signature, + acpi_table_signatures[ACPI_FADT], + sizeof(header->signature))) { + name = "FADT"; + } + else + name = header->signature; + + printk(KERN_INFO PREFIX "%.4s (v%3.3d %6.6s %8.8s %5.5d.%5.5d) @ 0x%p\n", + name, header->revision, header->oem_id, + header->oem_table_id, header->oem_revision >> 16, + header->oem_revision & 0xffff, (void *) phys_addr); +} + + +void +acpi_table_print_madt_entry ( + acpi_table_entry_header *header) +{ + if (!header) + return; + + switch (header->type) { + + case ACPI_MADT_LAPIC: + { + struct acpi_table_lapic *p = + (struct acpi_table_lapic*) header; + printk(KERN_INFO PREFIX "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", + p->acpi_id, p->id, p->flags.enabled?"enabled":"disabled"); + } + break; + + case ACPI_MADT_IOAPIC: + { + struct acpi_table_ioapic *p = + (struct acpi_table_ioapic*) header; + printk(KERN_INFO PREFIX "IOAPIC (id[0x%02x] address[0x%08x] global_irq_base[0x%x])\n", + p->id, p->address, p->global_irq_base); + } + break; + + case ACPI_MADT_INT_SRC_OVR: + { + struct acpi_table_int_src_ovr *p = + (struct acpi_table_int_src_ovr*) header; + printk(KERN_INFO PREFIX "INT_SRC_OVR (bus[%d] irq[0x%x] global_irq[0x%x] polarity[0x%x] trigger[0x%x])\n", + p->bus, p->bus_irq, p->global_irq, p->flags.polarity, p->flags.trigger); + } + break; + + case ACPI_MADT_NMI_SRC: + { + struct acpi_table_nmi_src *p = + (struct acpi_table_nmi_src*) header; + printk(KERN_INFO PREFIX "NMI_SRC (polarity[0x%x] trigger[0x%x] global_irq[0x%x])\n", + p->flags.polarity, p->flags.trigger, p->global_irq); + } + break; + + case ACPI_MADT_LAPIC_NMI: + { + struct acpi_table_lapic_nmi *p = + (struct acpi_table_lapic_nmi*) header; + printk(KERN_INFO PREFIX "LAPIC_NMI (acpi_id[0x%02x] polarity[0x%x] trigger[0x%x] lint[0x%x])\n", + p->acpi_id, p->flags.polarity, p->flags.trigger, p->lint); + } + break; + + case ACPI_MADT_LAPIC_ADDR_OVR: + { + struct acpi_table_lapic_addr_ovr *p = + (struct acpi_table_lapic_addr_ovr*) header; + printk(KERN_INFO PREFIX "LAPIC_ADDR_OVR (address[%p])\n", + (void *) (unsigned long) p->address); + } + break; + + case ACPI_MADT_IOSAPIC: + { + struct acpi_table_iosapic *p = + (struct acpi_table_iosapic*) header; + printk(KERN_INFO PREFIX "IOSAPIC (id[0x%x] global_irq_base[0x%x] address[%p])\n", + p->id, p->global_irq_base, (void *) (unsigned long) p->address); + } + break; + + case ACPI_MADT_LSAPIC: + { + struct acpi_table_lsapic *p = + (struct acpi_table_lsapic*) header; + printk(KERN_INFO PREFIX "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n", + p->acpi_id, p->id, p->eid, p->flags.enabled?"enabled":"disabled"); + } + break; + + case ACPI_MADT_PLAT_INT_SRC: + { + struct acpi_table_plat_int_src *p = + (struct acpi_table_plat_int_src*) header; + printk(KERN_INFO PREFIX "PLAT_INT_SRC (polarity[0x%x] trigger[0x%x] type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n", + p->flags.polarity, p->flags.trigger, p->type, p->id, p->eid, p->iosapic_vector, p->global_irq); + } + break; + + default: + printk(KERN_WARNING PREFIX "Found unsupported MADT entry (type = 0x%x)\n", + header->type); + break; + } +} + + +static int +acpi_table_compute_checksum ( + void *table_pointer, + unsigned long length) +{ + u8 *p = (u8 *) table_pointer; + unsigned long remains = length; + unsigned long sum = 0; + + if (!p || !length) + return -EINVAL; + + while (remains--) + sum += *p++; + + return (sum & 0xFF); +} + +int __init +acpi_get_table_header_early ( + enum acpi_table_id id, + struct acpi_table_header **header) +{ + int i; + enum acpi_table_id temp_id; + + /* DSDT is different from the rest */ + if (id == ACPI_DSDT) + temp_id = ACPI_FADT; + else + temp_id = id; + + /* Locate the table. */ + + for (i = 0; i < sdt.count; i++) { + if (sdt.entry[i].id != temp_id) + continue; + *header = (void *) + __acpi_map_table(sdt.entry[i].pa, sdt.entry[i].size); + if (!*header) { + printk(KERN_WARNING PREFIX "Unable to map %s\n", + acpi_table_signatures[temp_id]); + return -ENODEV; + } + break; + } + + if (!*header) { + printk(KERN_WARNING PREFIX "%s not present\n", + acpi_table_signatures[id]); + return -ENODEV; + } + + /* Map the DSDT header via the pointer in the FADT */ + if (id == ACPI_DSDT) { + struct acpi_table_fadt *fadt = (struct acpi_table_fadt *) *header; + + *header = (void *) __acpi_map_table(fadt->dsdt_addr, + sizeof(struct acpi_table_header)); + if (!*header) { + printk(KERN_WARNING PREFIX "Unable to map DSDT\n"); + return -ENODEV; + } + } + + return 0; +} + + +int __init +acpi_table_parse_madt_family ( + enum acpi_table_id id, + unsigned long madt_size, + int entry_id, + acpi_madt_entry_handler handler) +{ + void *madt = NULL; + acpi_table_entry_header *entry = NULL; + unsigned long count = 0; + unsigned long madt_end = 0; + int i = 0; + + if (!handler) + return -EINVAL; + + /* Locate the MADT (if exists). There should only be one. */ + + for (i = 0; i < sdt.count; i++) { + if (sdt.entry[i].id != id) + continue; + madt = (void *) + __acpi_map_table(sdt.entry[i].pa, sdt.entry[i].size); + if (!madt) { + printk(KERN_WARNING PREFIX "Unable to map %s\n", + acpi_table_signatures[id]); + return -ENODEV; + } + break; + } + + if (!madt) { + printk(KERN_WARNING PREFIX "%s not present\n", + acpi_table_signatures[id]); + return -ENODEV; + } + + madt_end = (unsigned long) madt + sdt.entry[i].size; + + /* Parse all entries looking for a match. */ + + entry = (acpi_table_entry_header *) + ((unsigned long) madt + madt_size); + + while (((unsigned long) entry) < madt_end) { + if (entry->type == entry_id) { + count++; + handler(entry); + } + entry = (acpi_table_entry_header *) + ((unsigned long) entry + entry->length); + } + + return count; +} + + +int __init +acpi_table_parse_madt ( + enum acpi_madt_entry_id id, + acpi_madt_entry_handler handler) +{ + return acpi_table_parse_madt_family(ACPI_APIC, sizeof(struct acpi_table_madt), + id, handler); +} + + +int __init +acpi_table_parse ( + enum acpi_table_id id, + acpi_table_handler handler) +{ + int count = 0; + int i = 0; + + if (!handler) + return -EINVAL; + + for (i = 0; i < sdt.count; i++) { + if (sdt.entry[i].id != id) + continue; + handler(sdt.entry[i].pa, sdt.entry[i].size); + count++; + } + + return count; +} + + +static int __init +acpi_table_get_sdt ( + struct acpi_table_rsdp *rsdp) +{ + struct acpi_table_header *header = NULL; + int i, id = 0; + + if (!rsdp) + return -EINVAL; + + /* First check XSDT (but only on ACPI 2.0-compatible systems) */ + + if ((rsdp->revision >= 2) && + (((struct acpi20_table_rsdp*)rsdp)->xsdt_address)) { + + struct acpi_table_xsdt *mapped_xsdt = NULL; + + sdt.pa = ((struct acpi20_table_rsdp*)rsdp)->xsdt_address; + + /* map in just the header */ + header = (struct acpi_table_header *) + __acpi_map_table(sdt.pa, sizeof(struct acpi_table_header)); + + if (!header) { + printk(KERN_WARNING PREFIX "Unable to map XSDT header\n"); + return -ENODEV; + } + + /* remap in the entire table before processing */ + mapped_xsdt = (struct acpi_table_xsdt *) + __acpi_map_table(sdt.pa, header->length); + if (!mapped_xsdt) { + printk(KERN_WARNING PREFIX "Unable to map XSDT\n"); + return -ENODEV; + } + header = &mapped_xsdt->header; + + if (strncmp(header->signature, "XSDT", 4)) { + printk(KERN_WARNING PREFIX "XSDT signature incorrect\n"); + return -ENODEV; + } + + if (acpi_table_compute_checksum(header, header->length)) { + printk(KERN_WARNING PREFIX "Invalid XSDT checksum\n"); + return -ENODEV; + } + + sdt.count = (header->length - sizeof(struct acpi_table_header)) >> 3; + if (sdt.count > ACPI_MAX_TABLES) { + printk(KERN_WARNING PREFIX "Truncated %lu XSDT entries\n", + (sdt.count - ACPI_MAX_TABLES)); + sdt.count = ACPI_MAX_TABLES; + } + + for (i = 0; i < sdt.count; i++) + sdt.entry[i].pa = (unsigned long) mapped_xsdt->entry[i]; + } + + /* Then check RSDT */ + + else if (rsdp->rsdt_address) { + + struct acpi_table_rsdt *mapped_rsdt = NULL; + + sdt.pa = rsdp->rsdt_address; + + /* map in just the header */ + header = (struct acpi_table_header *) + __acpi_map_table(sdt.pa, sizeof(struct acpi_table_header)); + if (!header) { + printk(KERN_WARNING PREFIX "Unable to map RSDT header\n"); + return -ENODEV; + } + + /* remap in the entire table before processing */ + mapped_rsdt = (struct acpi_table_rsdt *) + __acpi_map_table(sdt.pa, header->length); + if (!mapped_rsdt) { + printk(KERN_WARNING PREFIX "Unable to map RSDT\n"); + return -ENODEV; + } + header = &mapped_rsdt->header; + + if (strncmp(header->signature, "RSDT", 4)) { + printk(KERN_WARNING PREFIX "RSDT signature incorrect\n"); + return -ENODEV; + } + + if (acpi_table_compute_checksum(header, header->length)) { + printk(KERN_WARNING PREFIX "Invalid RSDT checksum\n"); + return -ENODEV; + } + + sdt.count = (header->length - sizeof(struct acpi_table_header)) >> 2; + if (sdt.count > ACPI_MAX_TABLES) { + printk(KERN_WARNING PREFIX "Truncated %lu RSDT entries\n", + (sdt.count - ACPI_TABLE_COUNT)); + sdt.count = ACPI_MAX_TABLES; + } + + for (i = 0; i < sdt.count; i++) + sdt.entry[i].pa = (unsigned long) mapped_rsdt->entry[i]; + } + + else { + printk(KERN_WARNING PREFIX "No System Description Table (RSDT/XSDT) specified in RSDP\n"); + return -ENODEV; + } + + acpi_table_print(header, sdt.pa); + + for (i = 0; i < sdt.count; i++) { + + /* map in just the header */ + header = (struct acpi_table_header *) + __acpi_map_table(sdt.entry[i].pa, + sizeof(struct acpi_table_header)); + if (!header) + continue; + + /* remap in the entire table before processing */ + header = (struct acpi_table_header *) + __acpi_map_table(sdt.entry[i].pa, + header->length); + if (!header) + continue; + + acpi_table_print(header, sdt.entry[i].pa); + + if (acpi_table_compute_checksum(header, header->length)) { + printk(KERN_WARNING " >>> ERROR: Invalid checksum\n"); + continue; + } + + sdt.entry[i].size = header->length; + + for (id = 0; id < ACPI_TABLE_COUNT; id++) { + if (!strncmp((char *) &header->signature, + acpi_table_signatures[id], + sizeof(header->signature))) { + sdt.entry[i].id = id; + } + } + } + + /* + * The DSDT is *not* in the RSDT (why not? no idea.) but we want + * to print its info, because this is what people usually blacklist + * against. Unfortunately, we don't know the phys_addr, so just + * print 0. Maybe no one will notice. + */ + if(!acpi_get_table_header_early(ACPI_DSDT, &header)) + acpi_table_print(header, 0); + + return 0; +} + + +int __init +acpi_table_init (void) +{ + struct acpi_table_rsdp *rsdp = NULL; + unsigned long rsdp_phys = 0; + int result = 0; + + memset(&sdt, 0, sizeof(struct acpi_table_sdt)); + + /* Locate and map the Root System Description Table (RSDP) */ + + rsdp_phys = acpi_find_rsdp(); + if (!rsdp_phys) { + printk(KERN_ERR PREFIX "Unable to locate RSDP\n"); + return -ENODEV; + } + + rsdp = (struct acpi_table_rsdp *) __va(rsdp_phys); + if (!rsdp) { + printk(KERN_WARNING PREFIX "Unable to map RSDP\n"); + return -ENODEV; + } + + printk(KERN_INFO PREFIX "RSDP (v%3.3d %6.6s ) @ 0x%p\n", + rsdp->revision, rsdp->oem_id, (void *) rsdp_phys); + + if (rsdp->revision < 2) + result = acpi_table_compute_checksum(rsdp, sizeof(struct acpi_table_rsdp)); + else + result = acpi_table_compute_checksum(rsdp, ((struct acpi20_table_rsdp *)rsdp)->length); + + if (result) { + printk(KERN_WARNING " >>> ERROR: Invalid checksum\n"); + return -ENODEV; + } + + /* Locate and map the System Description table (RSDT/XSDT) */ + + if (acpi_table_get_sdt(rsdp)) + return -ENODEV; + + return 0; +} + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/thermal.c linux.21rc5-ac2/drivers/acpi/thermal.c --- linux.21rc5/drivers/acpi/thermal.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/thermal.c 2003-05-29 01:05:36.000000000 +0100 @@ -0,0 +1,1359 @@ +/* + * acpi_thermal.c - ACPI Thermal Zone Driver ($Revision: 40 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This driver fully implements the ACPI thermal policy as described in the + * ACPI 2.0 Specification. + * + * TBD: 1. Implement passive cooling hysteresis. + * 2. Enhance passive cooling (CPU) states/limit interface to support + * concepts of 'multiple limiters', upper/lower limits, etc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_THERMAL_COMPONENT +ACPI_MODULE_NAME ("acpi_thermal") + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION(ACPI_THERMAL_DRIVER_NAME); +MODULE_LICENSE("GPL"); + +static int tzp = 0; +MODULE_PARM(tzp, "i"); +MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.\n"); + +#define PREFIX "ACPI: " + + +#define ACPI_THERMAL_MAX_ACTIVE 10 + +#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732>=0) ? ((long)t-2732+5)/10 : ((long)t-2732-5)/10) +#define CELSIUS_TO_KELVIN(t) ((t+273)*10) + +static int acpi_thermal_add (struct acpi_device *device); +static int acpi_thermal_remove (struct acpi_device *device, int type); + +static struct acpi_driver acpi_thermal_driver = { + .name = ACPI_THERMAL_DRIVER_NAME, + .class = ACPI_THERMAL_CLASS, + .ids = ACPI_THERMAL_HID, + .ops = { + .add = acpi_thermal_add, + .remove = acpi_thermal_remove, + }, +}; + +struct acpi_thermal_state { + u8 critical:1; + u8 hot:1; + u8 passive:1; + u8 active:1; + u8 reserved:4; + int active_index; +}; + +struct acpi_thermal_state_flags { + u8 valid:1; + u8 enabled:1; + u8 reserved:6; +}; + +struct acpi_thermal_critical { + struct acpi_thermal_state_flags flags; + unsigned long temperature; +}; + +struct acpi_thermal_hot { + struct acpi_thermal_state_flags flags; + unsigned long temperature; +}; + +struct acpi_thermal_passive { + struct acpi_thermal_state_flags flags; + unsigned long temperature; + unsigned long tc1; + unsigned long tc2; + unsigned long tsp; + struct acpi_handle_list devices; +}; + +struct acpi_thermal_active { + struct acpi_thermal_state_flags flags; + unsigned long temperature; + struct acpi_handle_list devices; +}; + +struct acpi_thermal_trips { + struct acpi_thermal_critical critical; + struct acpi_thermal_hot hot; + struct acpi_thermal_passive passive; + struct acpi_thermal_active active[ACPI_THERMAL_MAX_ACTIVE]; +}; + +struct acpi_thermal_flags { + u8 cooling_mode:1; /* _SCP */ + u8 devices:1; /* _TZD */ + u8 reserved:6; +}; + +struct acpi_thermal { + acpi_handle handle; + acpi_bus_id name; + unsigned long temperature; + unsigned long last_temperature; + unsigned long polling_frequency; + u8 cooling_mode; + struct acpi_thermal_flags flags; + struct acpi_thermal_state state; + struct acpi_thermal_trips trips; + struct acpi_handle_list devices; + struct timer_list timer; +}; + + +/* -------------------------------------------------------------------------- + Thermal Zone Management + -------------------------------------------------------------------------- */ + +static int +acpi_thermal_get_temperature ( + struct acpi_thermal *tz) +{ + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_thermal_get_temperature"); + + if (!tz) + return_VALUE(-EINVAL); + + tz->last_temperature = tz->temperature; + + status = acpi_evaluate_integer(tz->handle, "_TMP", NULL, &tz->temperature); + if (ACPI_FAILURE(status)) + return -ENODEV; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %lu dK\n", tz->temperature)); + + return_VALUE(0); +} + + +static int +acpi_thermal_get_polling_frequency ( + struct acpi_thermal *tz) +{ + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_thermal_get_polling_frequency"); + + if (!tz) + return_VALUE(-EINVAL); + + status = acpi_evaluate_integer(tz->handle, "_TZP", NULL, &tz->polling_frequency); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency is %lu dS\n", tz->polling_frequency)); + + return_VALUE(0); +} + + +static int +acpi_thermal_set_polling ( + struct acpi_thermal *tz, + int seconds) +{ + ACPI_FUNCTION_TRACE("acpi_thermal_set_polling"); + + if (!tz) + return_VALUE(-EINVAL); + + tz->polling_frequency = seconds * 10; /* Convert value to deci-seconds */ + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency set to %lu seconds\n", tz->polling_frequency)); + + return_VALUE(0); +} + + +static int +acpi_thermal_set_cooling_mode ( + struct acpi_thermal *tz, + int mode) +{ + acpi_status status = AE_OK; + union acpi_object arg0 = {ACPI_TYPE_INTEGER}; + struct acpi_object_list arg_list = {1, &arg0}; + acpi_handle handle = NULL; + + ACPI_FUNCTION_TRACE("acpi_thermal_set_cooling_mode"); + + if (!tz) + return_VALUE(-EINVAL); + + status = acpi_get_handle(tz->handle, "_SCP", &handle); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n")); + return_VALUE(-ENODEV); + } + + arg0.integer.value = mode; + + status = acpi_evaluate_object(handle, NULL, &arg_list, NULL); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + tz->cooling_mode = mode; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cooling mode [%s]\n", + mode?"passive":"active")); + + return_VALUE(0); +} + + +static int +acpi_thermal_get_trip_points ( + struct acpi_thermal *tz) +{ + acpi_status status = AE_OK; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_get_trip_points"); + + if (!tz) + return_VALUE(-EINVAL); + + /* Critical Shutdown (required) */ + + status = acpi_evaluate_integer(tz->handle, "_CRT", NULL, + &tz->trips.critical.temperature); + if (ACPI_FAILURE(status)) { + tz->trips.critical.flags.valid = 0; + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "No critical threshold\n")); + return -ENODEV; + } + else { + tz->trips.critical.flags.valid = 1; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found critical threshold [%lu]\n", tz->trips.critical.temperature)); + } + + /* Critical Sleep (optional) */ + + status = acpi_evaluate_integer(tz->handle, "_HOT", NULL, &tz->trips.hot.temperature); + if (ACPI_FAILURE(status)) { + tz->trips.hot.flags.valid = 0; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No hot threshold\n")); + } + else { + tz->trips.hot.flags.valid = 1; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found hot threshold [%lu]\n", tz->trips.hot.temperature)); + } + + /* Passive: Processors (optional) */ + + status = acpi_evaluate_integer(tz->handle, "_PSV", NULL, &tz->trips.passive.temperature); + if (ACPI_FAILURE(status)) { + tz->trips.passive.flags.valid = 0; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No passive threshold\n")); + } + else { + tz->trips.passive.flags.valid = 1; + + status = acpi_evaluate_integer(tz->handle, "_TC1", NULL, &tz->trips.passive.tc1); + if (ACPI_FAILURE(status)) + tz->trips.passive.flags.valid = 0; + + status = acpi_evaluate_integer(tz->handle, "_TC2", NULL, &tz->trips.passive.tc2); + if (ACPI_FAILURE(status)) + tz->trips.passive.flags.valid = 0; + + status = acpi_evaluate_integer(tz->handle, "_TSP", NULL, &tz->trips.passive.tsp); + if (ACPI_FAILURE(status)) + tz->trips.passive.flags.valid = 0; + + status = acpi_evaluate_reference(tz->handle, "_PSL", NULL, &tz->trips.passive.devices); + if (ACPI_FAILURE(status)) + tz->trips.passive.flags.valid = 0; + + if (!tz->trips.passive.flags.valid) + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid passive threshold\n")); + else + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found passive threshold [%lu]\n", tz->trips.passive.temperature)); + } + + /* Active: Fans, etc. (optional) */ + + for (i=0; ihandle, name, NULL, &tz->trips.active[i].temperature); + if (ACPI_FAILURE(status)) + break; + + name[2] = 'L'; + status = acpi_evaluate_reference(tz->handle, name, NULL, &tz->trips.active[i].devices); + if (ACPI_SUCCESS(status)) { + tz->trips.active[i].flags.valid = 1; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found active threshold [%d]:[%lu]\n", i, tz->trips.active[i].temperature)); + } + else + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid active threshold [%d]\n", i)); + } + + return_VALUE(0); +} + + +static int +acpi_thermal_get_devices ( + struct acpi_thermal *tz) +{ + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_thermal_get_devices"); + + if (!tz) + return_VALUE(-EINVAL); + + status = acpi_evaluate_reference(tz->handle, "_TZD", NULL, &tz->devices); + if (ACPI_FAILURE(status)) + return_VALUE(-ENODEV); + + return_VALUE(0); +} + + +static int +acpi_thermal_call_usermode ( + char *path) +{ + char *argv[2] = {NULL, NULL}; + char *envp[3] = {NULL, NULL, NULL}; + + ACPI_FUNCTION_TRACE("acpi_thermal_call_usermode"); + + if (!path) + return_VALUE(-EINVAL); + + argv[0] = path; + + /* minimal command environment */ + envp[0] = "HOME=/"; + envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + + call_usermodehelper(argv[0], argv, envp); + + return_VALUE(0); +} + + +static int +acpi_thermal_critical ( + struct acpi_thermal *tz) +{ + int result = 0; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_thermal_critical"); + + if (!tz || !tz->trips.critical.flags.valid) + return_VALUE(-EINVAL); + + if (tz->temperature >= tz->trips.critical.temperature) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Critical trip point\n")); + tz->trips.critical.flags.enabled = 1; + } + else if (tz->trips.critical.flags.enabled) + tz->trips.critical.flags.enabled = 0; + + result = acpi_bus_get_device(tz->handle, &device); + if (result) + return_VALUE(result); + + acpi_bus_generate_event(device, ACPI_THERMAL_NOTIFY_CRITICAL, tz->trips.critical.flags.enabled); + + acpi_thermal_call_usermode(ACPI_THERMAL_PATH_POWEROFF); + + return_VALUE(0); +} + + +static int +acpi_thermal_hot ( + struct acpi_thermal *tz) +{ + int result = 0; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_thermal_hot"); + + if (!tz || !tz->trips.hot.flags.valid) + return_VALUE(-EINVAL); + + if (tz->temperature >= tz->trips.hot.temperature) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Hot trip point\n")); + tz->trips.hot.flags.enabled = 1; + } + else if (tz->trips.hot.flags.enabled) + tz->trips.hot.flags.enabled = 0; + + result = acpi_bus_get_device(tz->handle, &device); + if (result) + return_VALUE(result); + + acpi_bus_generate_event(device, ACPI_THERMAL_NOTIFY_HOT, tz->trips.hot.flags.enabled); + + /* TBD: Call user-mode "sleep(S4)" function */ + + return_VALUE(0); +} + + +static int +acpi_thermal_passive ( + struct acpi_thermal *tz) +{ + int result = 0; + struct acpi_thermal_passive *passive = NULL; + int trend = 0; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_passive"); + + if (!tz || !tz->trips.passive.flags.valid) + return_VALUE(-EINVAL); + + passive = &(tz->trips.passive); + + /* + * Above Trip? + * ----------- + * Calculate the thermal trend (using the passive cooling equation) + * and modify the performance limit for all passive cooling devices + * accordingly. Note that we assume symmetry. + */ + if (tz->temperature >= passive->temperature) { + trend = (passive->tc1 * (tz->temperature - tz->last_temperature)) + (passive->tc2 * (tz->temperature - passive->temperature)); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "trend[%d]=(tc1[%lu]*(tmp[%lu]-last[%lu]))+(tc2[%lu]*(tmp[%lu]-psv[%lu]))\n", + trend, passive->tc1, tz->temperature, + tz->last_temperature, passive->tc2, + tz->temperature, passive->temperature)); + tz->trips.passive.flags.enabled = 1; + /* Heating up? */ + if (trend > 0) + for (i=0; idevices.count; i++) + acpi_processor_set_thermal_limit( + passive->devices.handles[i], + ACPI_PROCESSOR_LIMIT_INCREMENT); + /* Cooling off? */ + else if (trend < 0) + for (i=0; idevices.count; i++) + acpi_processor_set_thermal_limit( + passive->devices.handles[i], + ACPI_PROCESSOR_LIMIT_DECREMENT); + } + + /* + * Below Trip? + * ----------- + * Implement passive cooling hysteresis to slowly increase performance + * and avoid thrashing around the passive trip point. Note that we + * assume symmetry. + */ + else if (tz->trips.passive.flags.enabled) { + for (i=0; idevices.count; i++) + result = acpi_processor_set_thermal_limit( + passive->devices.handles[i], + ACPI_PROCESSOR_LIMIT_DECREMENT); + if (result == 1) { + tz->trips.passive.flags.enabled = 0; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Disabling passive cooling (zone is cool)\n")); + } + } + + return_VALUE(0); +} + + +static int +acpi_thermal_active ( + struct acpi_thermal *tz) +{ + int result = 0; + struct acpi_thermal_active *active = NULL; + int i = 0; + int j = 0; + unsigned long maxtemp = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_active"); + + if (!tz) + return_VALUE(-EINVAL); + + for (i=0; itrips.active[i]); + if (!active || !active->flags.valid) + break; + + /* + * Above Threshold? + * ---------------- + * If not already enabled, turn ON all cooling devices + * associated with this active threshold. + */ + if (tz->temperature >= active->temperature) { + if (active->temperature > maxtemp) + tz->state.active_index = i, maxtemp = active->temperature; + if (!active->flags.enabled) { + for (j = 0; j < active->devices.count; j++) { + result = acpi_bus_set_power(active->devices.handles[j], ACPI_STATE_D0); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Unable to turn cooling device [%p] 'on'\n", active->devices.handles[j])); + continue; + } + active->flags.enabled = 1; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cooling device [%p] now 'on'\n", active->devices.handles[j])); + } + } + } + /* + * Below Threshold? + * ---------------- + * Turn OFF all cooling devices associated with this + * threshold. + */ + else if (active->flags.enabled) { + for (j = 0; j < active->devices.count; j++) { + result = acpi_bus_set_power(active->devices.handles[j], ACPI_STATE_D3); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Unable to turn cooling device [%p] 'off'\n", active->devices.handles[j])); + continue; + } + active->flags.enabled = 0; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cooling device [%p] now 'off'\n", active->devices.handles[j])); + } + } + } + + return_VALUE(0); +} + + +static void acpi_thermal_check (void *context); + +static void +acpi_thermal_run ( + unsigned long data) +{ + acpi_os_queue_for_execution(OSD_PRIORITY_GPE, acpi_thermal_check, (void *) data); +} + + +static void +acpi_thermal_check ( + void *data) +{ + int result = 0; + struct acpi_thermal *tz = (struct acpi_thermal *) data; + unsigned long sleep_time = 0; + int i = 0; + struct acpi_thermal_state state = tz->state; + + ACPI_FUNCTION_TRACE("acpi_thermal_check"); + + if (!tz) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); + return_VOID; + } + + result = acpi_thermal_get_temperature(tz); + if (result) + return_VOID; + + memset(&tz->state, 0, sizeof(tz->state)); + + /* + * Check Trip Points + * ----------------- + * Compare the current temperature to the trip point values to see + * if we've entered one of the thermal policy states. Note that + * this function determines when a state is entered, but the + * individual policy decides when it is exited (e.g. hysteresis). + */ + if (tz->trips.critical.flags.valid) + state.critical |= (tz->temperature >= tz->trips.critical.temperature); + if (tz->trips.hot.flags.valid) + state.hot |= (tz->temperature >= tz->trips.hot.temperature); + if (tz->trips.passive.flags.valid) + state.passive |= (tz->temperature >= tz->trips.passive.temperature); + for (i=0; itrips.active[i].flags.valid) + state.active |= (tz->temperature >= tz->trips.active[i].temperature); + + /* + * Invoke Policy + * ------------- + * Separated from the above check to allow individual policy to + * determine when to exit a given state. + */ + if (state.critical) + acpi_thermal_critical(tz); + if (state.hot) + acpi_thermal_hot(tz); + if (state.passive) + acpi_thermal_passive(tz); + if (state.active) + acpi_thermal_active(tz); + + /* + * Calculate State + * --------------- + * Again, separated from the above two to allow independent policy + * decisions. + */ + if (tz->trips.critical.flags.enabled) + tz->state.critical = 1; + if (tz->trips.hot.flags.enabled) + tz->state.hot = 1; + if (tz->trips.passive.flags.enabled) + tz->state.passive = 1; + for (i=0; itrips.active[i].flags.enabled) + tz->state.active = 1; + + /* + * Calculate Sleep Time + * -------------------- + * If we're in the passive state, use _TSP's value. Otherwise + * use the default polling frequency (e.g. _TZP). If no polling + * frequency is specified then we'll wait forever (at least until + * a thermal event occurs). Note that _TSP and _TZD values are + * given in 1/10th seconds (we must covert to milliseconds). + */ + if (tz->state.passive) + sleep_time = tz->trips.passive.tsp * 100; + else if (tz->polling_frequency > 0) + sleep_time = tz->polling_frequency * 100; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s: temperature[%lu] sleep[%lu]\n", + tz->name, tz->temperature, sleep_time)); + + /* + * Schedule Next Poll + * ------------------ + */ + if (!sleep_time) { + if (timer_pending(&(tz->timer))) + del_timer(&(tz->timer)); + } + else { + if (timer_pending(&(tz->timer))) + mod_timer(&(tz->timer), (HZ * sleep_time) / 1000); + else { + tz->timer.data = (unsigned long) tz; + tz->timer.function = acpi_thermal_run; + tz->timer.expires = jiffies + (HZ * sleep_time) / 1000; + add_timer(&(tz->timer)); + } + } + + return_VOID; +} + + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +struct proc_dir_entry *acpi_thermal_dir = NULL; + + +static int +acpi_thermal_read_state ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_thermal *tz = (struct acpi_thermal *) data; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_read_state"); + + if (!tz || (off != 0)) + goto end; + + p += sprintf(p, "state: "); + + if (!tz->state.critical && !tz->state.hot && !tz->state.passive && !tz->state.active) + p += sprintf(p, "ok\n"); + else { + if (tz->state.critical) + p += sprintf(p, "critical "); + if (tz->state.hot) + p += sprintf(p, "hot "); + if (tz->state.passive) + p += sprintf(p, "passive "); + if (tz->state.active) + p += sprintf(p, "active[%d]", tz->state.active_index); + p += sprintf(p, "\n"); + } + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_thermal_read_temperature ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + int result = 0; + struct acpi_thermal *tz = (struct acpi_thermal *) data; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_read_temperature"); + + if (!tz || (off != 0)) + goto end; + + result = acpi_thermal_get_temperature(tz); + if (result) + goto end; + + p += sprintf(p, "temperature: %ld C\n", + KELVIN_TO_CELSIUS(tz->temperature)); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_thermal_read_trip_points ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_thermal *tz = (struct acpi_thermal *) data; + char *p = page; + int len = 0; + int i = 0; + int j = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_read_trip_points"); + + if (!tz || (off != 0)) + goto end; + + if (tz->trips.critical.flags.valid) + p += sprintf(p, "critical (S5): %ld C\n", + KELVIN_TO_CELSIUS(tz->trips.critical.temperature)); + + if (tz->trips.hot.flags.valid) + p += sprintf(p, "hot (S4): %ld C\n", + KELVIN_TO_CELSIUS(tz->trips.hot.temperature)); + + if (tz->trips.passive.flags.valid) { + p += sprintf(p, "passive: %ld C: tc1=%lu tc2=%lu tsp=%lu devices=", + KELVIN_TO_CELSIUS(tz->trips.passive.temperature), + tz->trips.passive.tc1, + tz->trips.passive.tc2, + tz->trips.passive.tsp); + for (j=0; jtrips.passive.devices.count; j++) { + + p += sprintf(p, "0x%p ", tz->trips.passive.devices.handles[j]); + } + p += sprintf(p, "\n"); + } + + for (i=0; itrips.active[i].flags.valid)) + break; + p += sprintf(p, "active[%d]: %ld C: devices=", + i, KELVIN_TO_CELSIUS(tz->trips.active[i].temperature)); + for (j=0; jtrips.active[i].devices.count; j++) + p += sprintf(p, "0x%p ", + tz->trips.active[i].devices.handles[j]); + p += sprintf(p, "\n"); + } + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_thermal_write_trip_points ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + struct acpi_thermal *tz = (struct acpi_thermal *) data; + char limit_string[25] = {'\0'}; + int critical, hot, passive, active0, active1; + + ACPI_FUNCTION_TRACE("acpi_thermal_write_trip_points"); + + if (!tz || (count > sizeof(limit_string) - 1)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid argument\n")); + return_VALUE(-EINVAL); + } + + if (copy_from_user(limit_string, buffer, count)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data\n")); + return_VALUE(-EFAULT); + } + + limit_string[count] = '\0'; + + if (sscanf(limit_string, "%d:%d:%d:%d:%d", &critical, &hot, &passive, &active0, &active1) != 5) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data format\n")); + return_VALUE(-EINVAL); + } + + tz->trips.critical.temperature = CELSIUS_TO_KELVIN(critical); + tz->trips.hot.temperature = CELSIUS_TO_KELVIN(hot); + tz->trips.passive.temperature = CELSIUS_TO_KELVIN(passive); + tz->trips.active[0].temperature = CELSIUS_TO_KELVIN(active0); + tz->trips.active[1].temperature = CELSIUS_TO_KELVIN(active1); + + return_VALUE(count); +} + + +static int +acpi_thermal_read_cooling_mode ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_thermal *tz = (struct acpi_thermal *) data; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_read_cooling_mode"); + + if (!tz || (off != 0)) + goto end; + + if (!tz->flags.cooling_mode) { + p += sprintf(p, "\n"); + goto end; + } + + p += sprintf(p, "cooling mode: %s\n", + tz->cooling_mode?"passive":"active"); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_thermal_write_cooling_mode ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + struct acpi_thermal *tz = (struct acpi_thermal *) data; + char mode_string[12] = {'\0'}; + + ACPI_FUNCTION_TRACE("acpi_thermal_write_cooling_mode"); + + if (!tz || (count > sizeof(mode_string) - 1)) + return_VALUE(-EINVAL); + + if (!tz->flags.cooling_mode) + return_VALUE(-ENODEV); + + if (copy_from_user(mode_string, buffer, count)) + return_VALUE(-EFAULT); + + mode_string[count] = '\0'; + + result = acpi_thermal_set_cooling_mode(tz, + simple_strtoul(mode_string, NULL, 0)); + if (result) + return_VALUE(result); + + return_VALUE(count); +} + + +static int +acpi_thermal_read_polling ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + struct acpi_thermal *tz = (struct acpi_thermal *) data; + char *p = page; + int len = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_read_polling"); + + if (!tz || (off != 0)) + goto end; + + if (!tz->polling_frequency) { + p += sprintf(p, "\n"); + goto end; + } + + p += sprintf(p, "polling frequency: %lu seconds\n", + (tz->polling_frequency / 10)); + +end: + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + + return_VALUE(len); +} + + +static int +acpi_thermal_write_polling ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + struct acpi_thermal *tz = (struct acpi_thermal *) data; + char polling_string[12] = {'\0'}; + int seconds = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_write_polling"); + + if (!tz || (count > sizeof(polling_string) - 1)) + return_VALUE(-EINVAL); + + if (copy_from_user(polling_string, buffer, count)) + return_VALUE(-EFAULT); + + polling_string[count] = '\0'; + + seconds = simple_strtoul(polling_string, NULL, 0); + + result = acpi_thermal_set_polling(tz, seconds); + if (result) + return_VALUE(result); + + acpi_thermal_check(tz); + + return_VALUE(count); +} + + +static int +acpi_thermal_add_fs ( + struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_thermal_add_fs"); + + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_thermal_dir); + if (!acpi_device_dir(device)) + return_VALUE(-ENODEV); + } + + /* 'state' [R] */ + entry = create_proc_entry(ACPI_THERMAL_FILE_STATE, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_THERMAL_FILE_STATE)); + else { + entry->read_proc = acpi_thermal_read_state; + entry->data = acpi_driver_data(device); + } + + /* 'temperature' [R] */ + entry = create_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_THERMAL_FILE_TEMPERATURE)); + else { + entry->read_proc = acpi_thermal_read_temperature; + entry->data = acpi_driver_data(device); + } + + /* 'trip_points' [R/W] */ + entry = create_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_THERMAL_FILE_POLLING_FREQ)); + else { + entry->read_proc = acpi_thermal_read_trip_points; + entry->write_proc = acpi_thermal_write_trip_points; + entry->data = acpi_driver_data(device); + } + + /* 'cooling_mode' [R/W] */ + entry = create_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_THERMAL_FILE_COOLING_MODE)); + else { + entry->read_proc = acpi_thermal_read_cooling_mode; + entry->write_proc = acpi_thermal_write_cooling_mode; + entry->data = acpi_driver_data(device); + } + + /* 'polling_frequency' [R/W] */ + entry = create_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ, + S_IFREG|S_IRUGO|S_IWUSR, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_THERMAL_FILE_POLLING_FREQ)); + else { + entry->read_proc = acpi_thermal_read_polling; + entry->write_proc = acpi_thermal_write_polling; + entry->data = acpi_driver_data(device); + } + + return_VALUE(0); +} + + +static int +acpi_thermal_remove_fs ( + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_thermal_remove_fs"); + + if (acpi_device_dir(device)) { + remove_proc_entry(acpi_device_bid(device), acpi_thermal_dir); + acpi_device_dir(device) = NULL; + } + + return_VALUE(0); +} + + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +static void +acpi_thermal_notify ( + acpi_handle handle, + u32 event, + void *data) +{ + struct acpi_thermal *tz = (struct acpi_thermal *) data; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_thermal_notify"); + + if (!tz) + return_VOID; + + if (acpi_bus_get_device(tz->handle, &device)) + return_VOID; + + switch (event) { + case ACPI_THERMAL_NOTIFY_TEMPERATURE: + acpi_thermal_check(tz); + break; + case ACPI_THERMAL_NOTIFY_THRESHOLDS: + acpi_thermal_get_trip_points(tz); + acpi_thermal_check(tz); + acpi_bus_generate_event(device, event, 0); + break; + case ACPI_THERMAL_NOTIFY_DEVICES: + if (tz->flags.devices) + acpi_thermal_get_devices(tz); + acpi_bus_generate_event(device, event, 0); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Unsupported event [0x%x]\n", event)); + break; + } + + return_VOID; +} + + +static int +acpi_thermal_get_info ( + struct acpi_thermal *tz) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_get_info"); + + if (!tz) + return_VALUE(-EINVAL); + + /* Get temperature [_TMP] (required) */ + result = acpi_thermal_get_temperature(tz); + if (result) + return_VALUE(result); + + /* Set the cooling mode [_SCP] to active cooling (default) */ + result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE); + if (!result) + tz->flags.cooling_mode = 1; + + /* Get trip points [_CRT, _PSV, etc.] (required) */ + result = acpi_thermal_get_trip_points(tz); + if (result) + return_VALUE(result); + + /* Get default polling frequency [_TZP] (optional) */ + if (tzp) + tz->polling_frequency = tzp; + else + acpi_thermal_get_polling_frequency(tz); + + /* Get devices in this thermal zone [_TZD] (optional) */ + result = acpi_thermal_get_devices(tz); + if (!result) + tz->flags.devices = 1; + + return_VALUE(0); +} + + +static int +acpi_thermal_add ( + struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_thermal *tz = NULL; + + ACPI_FUNCTION_TRACE("acpi_thermal_add"); + + if (!device) + return_VALUE(-EINVAL); + + tz = kmalloc(sizeof(struct acpi_thermal), GFP_KERNEL); + if (!tz) + return_VALUE(-ENOMEM); + memset(tz, 0, sizeof(struct acpi_thermal)); + + tz->handle = device->handle; + sprintf(tz->name, "%s", device->pnp.bus_id); + sprintf(acpi_device_name(device), "%s", ACPI_THERMAL_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_THERMAL_CLASS); + acpi_driver_data(device) = tz; + + result = acpi_thermal_get_info(tz); + if (result) + goto end; + + result = acpi_thermal_add_fs(device); + if (result) + return_VALUE(result); + + init_timer(&tz->timer); + + acpi_thermal_check(tz); + + status = acpi_install_notify_handler(tz->handle, + ACPI_DEVICE_NOTIFY, acpi_thermal_notify, tz); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing notify handler\n")); + result = -ENODEV; + goto end; + } + + printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n", + acpi_device_name(device), acpi_device_bid(device), + KELVIN_TO_CELSIUS(tz->temperature)); + +end: + if (result) { + acpi_thermal_remove_fs(device); + kfree(tz); + } + + return_VALUE(result); +} + + +static int +acpi_thermal_remove ( + struct acpi_device *device, + int type) +{ + acpi_status status = AE_OK; + struct acpi_thermal *tz = NULL; + + ACPI_FUNCTION_TRACE("acpi_thermal_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + tz = (struct acpi_thermal *) acpi_driver_data(device); + + if (timer_pending(&(tz->timer))) + del_timer(&(tz->timer)); + + status = acpi_remove_notify_handler(tz->handle, + ACPI_DEVICE_NOTIFY, acpi_thermal_notify); + if (ACPI_FAILURE(status)) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error removing notify handler\n")); + + /* Terminate policy */ + if (tz->trips.passive.flags.valid + && tz->trips.passive.flags.enabled) { + tz->trips.passive.flags.enabled = 0; + acpi_thermal_passive(tz); + } + if (tz->trips.active[0].flags.valid + && tz->trips.active[0].flags.enabled) { + tz->trips.active[0].flags.enabled = 0; + acpi_thermal_active(tz); + } + + acpi_thermal_remove_fs(device); + + return_VALUE(0); +} + + +static int __init +acpi_thermal_init (void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_thermal_init"); + + acpi_thermal_dir = proc_mkdir(ACPI_THERMAL_CLASS, acpi_root_dir); + if (!acpi_thermal_dir) + return_VALUE(-ENODEV); + + result = acpi_bus_register_driver(&acpi_thermal_driver); + if (result < 0) { + remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir); + return_VALUE(-ENODEV); + } + + return_VALUE(0); +} + + +static void __exit +acpi_thermal_exit (void) +{ + ACPI_FUNCTION_TRACE("acpi_thermal_exit"); + + acpi_bus_unregister_driver(&acpi_thermal_driver); + + remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir); + + return_VOID; +} + + +module_init(acpi_thermal_init); +module_exit(acpi_thermal_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/toshiba_acpi.c linux.21rc5-ac2/drivers/acpi/toshiba_acpi.c --- linux.21rc5/drivers/acpi/toshiba_acpi.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/toshiba_acpi.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,541 @@ +/* + * toshiba_acpi.c - Toshiba Laptop ACPI Extras + * + * + * Copyright (C) 2002-2003 John Belmonte + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * The devolpment page for this driver is located at + * http://memebeam.org/toys/ToshibaAcpiDriver. + * + * Credits: + * Jonathan A. Buzzard - Toshiba HCI info, and critical tips on reverse + * engineering the Windows drivers + * Yasushi Nagato - changes for linux kernel 2.4 -> 2.5 + * Rob Miller - TV out and hotkeys help + * + * + * TODO + * + */ + +#define TOSHIBA_ACPI_VERSION "0.14" +#define PROC_INTERFACE_VERSION 1 + +#include +#include +#include +#include +#include +#include + +#include + +MODULE_AUTHOR("John Belmonte"); +MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver"); +MODULE_LICENSE("GPL"); + +/* Toshiba ACPI method paths */ +#define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM" +#define METHOD_HCI "\\_SB_.VALD.GHCI" +#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX" + +/* Toshiba HCI interface definitions + * + * HCI is Toshiba's "Hardware Control Interface" which is supposed to + * be uniform across all their models. Ideally we would just call + * dedicated ACPI methods instead of using this primitive interface. + * However the ACPI methods seem to be incomplete in some areas (for + * example they allow setting, but not reading, the LCD brightness value), + * so this is still useful. + */ + +#define HCI_WORDS 6 + +/* operations */ +#define HCI_SET 0xff00 +#define HCI_GET 0xfe00 + +/* return codes */ +#define HCI_SUCCESS 0x0000 +#define HCI_FAILURE 0x1000 +#define HCI_NOT_SUPPORTED 0x8000 +#define HCI_EMPTY 0x8c00 + +/* registers */ +#define HCI_FAN 0x0004 +#define HCI_SYSTEM_EVENT 0x0016 +#define HCI_VIDEO_OUT 0x001c +#define HCI_HOTKEY_EVENT 0x001e +#define HCI_LCD_BRIGHTNESS 0x002a + +/* field definitions */ +#define HCI_LCD_BRIGHTNESS_BITS 3 +#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS) +#define HCI_LCD_BRIGHTNESS_LEVELS (1 << HCI_LCD_BRIGHTNESS_BITS) +#define HCI_VIDEO_OUT_LCD 0x1 +#define HCI_VIDEO_OUT_CRT 0x2 +#define HCI_VIDEO_OUT_TV 0x4 + +/* utility + */ + +static __inline__ void +_set_bit(u32* word, u32 mask, int value) +{ + *word = (*word & ~mask) | (mask * value); +} + +/* an sscanf that takes explicit string length */ +static int +snscanf(const char* str, int n, const char* format, ...) +{ + va_list args; + int result; + char* str2 = kmalloc(n + 1, GFP_KERNEL); + if (str2 == 0) return 0; + strncpy(str2, str, n); + str2[n] = 0; + va_start(args, format); + result = vsscanf(str2, format, args); + va_end(args); + kfree(str2); + return result; +} + +/* acpi interface wrappers + */ + +static int +write_acpi_int(const char* methodName, int val) +{ + struct acpi_object_list params; + union acpi_object in_objs[1]; + acpi_status status; + + params.count = sizeof(in_objs)/sizeof(in_objs[0]); + params.pointer = in_objs; + in_objs[0].type = ACPI_TYPE_INTEGER; + in_objs[0].integer.value = val; + + status = acpi_evaluate_object(0, (char*)methodName, ¶ms, 0); + return (status == AE_OK); +} + +#if 0 +static int +read_acpi_int(const char* methodName, int* pVal) +{ + struct acpi_buffer results; + union acpi_object out_objs[1]; + acpi_status status; + + results.length = sizeof(out_objs); + results.pointer = out_objs; + + status = acpi_evaluate_object(0, (char*)methodName, 0, &results); + *pVal = out_objs[0].integer.value; + + return (status == AE_OK) && (out_objs[0].type == ACPI_TYPE_INTEGER); +} +#endif + +/* Perform a raw HCI call. Here we don't care about input or output buffer + * format. + */ +static acpi_status +hci_raw(const u32 in[HCI_WORDS], u32 out[HCI_WORDS]) +{ + struct acpi_object_list params; + union acpi_object in_objs[HCI_WORDS]; + struct acpi_buffer results; + union acpi_object out_objs[HCI_WORDS+1]; + acpi_status status; + int i; + + params.count = HCI_WORDS; + params.pointer = in_objs; + for (i = 0; i < HCI_WORDS; ++i) { + in_objs[i].type = ACPI_TYPE_INTEGER; + in_objs[i].integer.value = in[i]; + /*printk("%04x ", in[i]);*/ + } + /*printk("\n");*/ + + results.length = sizeof(out_objs); + results.pointer = out_objs; + + status = acpi_evaluate_object(0, METHOD_HCI, ¶ms, + &results); + if ((status == AE_OK) && (out_objs->package.count <= HCI_WORDS)) { + for (i = 0; i < out_objs->package.count; ++i) { + out[i] = out_objs->package.elements[i].integer.value; + /*printk("%04x ", out[i]);*/ + } + /*printk("\n");*/ + } + + return status; +} + +/* common hci tasks (get or set one value) + * + * In addition to the ACPI status, the HCI system returns a result which + * may be useful (such as "not supported"). + */ + +static acpi_status +hci_write1(u32 reg, u32 in1, u32* result) +{ + u32 in[HCI_WORDS] = { HCI_SET, reg, in1, 0, 0, 0 }; + u32 out[HCI_WORDS]; + acpi_status status = hci_raw(in, out); + *result = (status == AE_OK) ? out[0] : HCI_FAILURE; + return status; +} + +static acpi_status +hci_read1(u32 reg, u32* out1, u32* result) +{ + u32 in[HCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 }; + u32 out[HCI_WORDS]; + acpi_status status = hci_raw(in, out); + *out1 = out[2]; + *result = (status == AE_OK) ? out[0] : HCI_FAILURE; + return status; +} + +static struct proc_dir_entry* toshiba_proc_dir = NULL; +static int force_fan; +static int last_key_event; +static int key_event_valid; + +typedef struct _ProcItem +{ + char* name; + char* (*read_func)(char*); + unsigned long (*write_func)(const char*, unsigned long); +} ProcItem; + +/* proc file handlers + */ + +static int +dispatch_read(char* page, char** start, off_t off, int count, int* eof, + ProcItem* item) +{ + char* p = page; + int len; + + if (off == 0) + p = item->read_func(p); + + /* ISSUE: I don't understand this code */ + len = (p - page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + return len; +} + +static int +dispatch_write(struct file* file, const char* buffer, unsigned long count, + ProcItem* item) +{ + return item->write_func(buffer, count); +} + +static char* +read_lcd(char* p) +{ + u32 hci_result; + u32 value; + + hci_read1(HCI_LCD_BRIGHTNESS, &value, &hci_result); + if (hci_result == HCI_SUCCESS) { + value = value >> HCI_LCD_BRIGHTNESS_SHIFT; + p += sprintf(p, "brightness: %d\n", value); + p += sprintf(p, "brightness_levels: %d\n", + HCI_LCD_BRIGHTNESS_LEVELS); + } else { + p += sprintf(p, "ERROR\n"); + } + + return p; +} + +static unsigned long +write_lcd(const char* buffer, unsigned long count) +{ + int value; + /*int byte_count;*/ + u32 hci_result; + + /* ISSUE: %i doesn't work with hex values as advertised */ + if (snscanf(buffer, count, " brightness : %i", &value) == 1 && + value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) { + value = value << HCI_LCD_BRIGHTNESS_SHIFT; + hci_write1(HCI_LCD_BRIGHTNESS, value, &hci_result); + if (hci_result != HCI_SUCCESS) + return -EFAULT; + } else { + return -EINVAL; + } + + return count; +} + +static char* +read_video(char* p) +{ + u32 hci_result; + u32 value; + + hci_read1(HCI_VIDEO_OUT, &value, &hci_result); + if (hci_result == HCI_SUCCESS) { + int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0; + int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0; + int is_tv = (value & HCI_VIDEO_OUT_TV ) ? 1 : 0; + p += sprintf(p, "lcd_out: %d\n", is_lcd); + p += sprintf(p, "crt_out: %d\n", is_crt); + p += sprintf(p, "tv_out: %d\n", is_tv); + } else { + p += sprintf(p, "ERROR\n"); + } + + return p; +} + +static unsigned long +write_video(const char* buffer, unsigned long count) +{ + int value; + const char* buffer_end = buffer + count; + int lcd_out = -1; + int crt_out = -1; + int tv_out = -1; + u32 hci_result; + int video_out; + + /* scan expression. Multiple expressions may be delimited with ; */ + do { + if (snscanf(buffer, count, " lcd_out : %i", &value) == 1) + lcd_out = value & 1; + else if (snscanf(buffer, count, " crt_out : %i", &value) == 1) + crt_out = value & 1; + else if (snscanf(buffer, count, " tv_out : %i", &value) == 1) + tv_out = value & 1; + /* advance to one character past the next ; */ + do ++buffer; + while ((buffer < buffer_end) && (*(buffer-1) != ';')); + } while (buffer < buffer_end); + + hci_read1(HCI_VIDEO_OUT, &video_out, &hci_result); + if (hci_result == HCI_SUCCESS) { + int new_video_out = video_out; + if (lcd_out != -1) + _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out); + if (crt_out != -1) + _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out); + if (tv_out != -1) + _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out); + /* To avoid unnecessary video disruption, only write the new + * video setting if something changed. */ + if (new_video_out != video_out) + write_acpi_int(METHOD_VIDEO_OUT, new_video_out); + } + + return count; +} + +static char* +read_fan(char* p) +{ + u32 hci_result; + u32 value; + + hci_read1(HCI_FAN, &value, &hci_result); + if (hci_result == HCI_SUCCESS) { + p += sprintf(p, "running: %d\n", (value > 0)); + p += sprintf(p, "force_on: %d\n", force_fan); + } else { + p += sprintf(p, "ERROR\n"); + } + + return p; +} + +static unsigned long +write_fan(const char* buffer, unsigned long count) +{ + int value; + u32 hci_result; + + if (snscanf(buffer, count, " force_on : %i", &value) == 1 && + value >= 0 && value <= 1) { + hci_write1(HCI_FAN, value, &hci_result); + if (hci_result != HCI_SUCCESS) + return -EFAULT; + else + force_fan = value; + } else { + return -EINVAL; + } + + return count; +} + +static char* +read_keys(char* p) +{ + u32 hci_result; + u32 value; + + if (!key_event_valid) { + hci_read1(HCI_SYSTEM_EVENT, &value, &hci_result); + if (hci_result == HCI_SUCCESS) { + key_event_valid = 1; + last_key_event = value; + } else if (hci_result == HCI_EMPTY) { + /* better luck next time */ + } else { + p += sprintf(p, "ERROR\n"); + goto end; + } + } + + p += sprintf(p, "hotkey_ready: %d\n", key_event_valid); + p += sprintf(p, "hotkey: 0x%04x\n", last_key_event); + +end: + return p; +} + +static unsigned long +write_keys(const char* buffer, unsigned long count) +{ + int value; + + if (snscanf(buffer, count, " hotkey_ready : %i", &value) == 1 && + value == 0) { + key_event_valid = 0; + } else { + return -EINVAL; + } + + return count; +} + +static char* +read_version(char* p) +{ + p += sprintf(p, "driver: %s\n", TOSHIBA_ACPI_VERSION); + p += sprintf(p, "proc_interface: %d\n", + PROC_INTERFACE_VERSION); + return p; +} + +/* proc and module init + */ + +#define PROC_TOSHIBA "toshiba" + +ProcItem proc_items[] = +{ + { "lcd" , read_lcd , write_lcd }, + { "video" , read_video , write_video }, + { "fan" , read_fan , write_fan }, + { "keys" , read_keys , write_keys }, + { "version" , read_version , 0 }, + { 0 , 0 , 0 }, +}; + +static acpi_status +add_device(void) +{ + struct proc_dir_entry* proc; + ProcItem* item; + + for (item = proc_items; item->name; ++item) + { + proc = create_proc_read_entry(item->name, + S_IFREG | S_IRUGO | S_IWUSR, + toshiba_proc_dir, (read_proc_t*)dispatch_read, item); + if (proc && item->write_func) + proc->write_proc = (write_proc_t*)dispatch_write; + } + + return(AE_OK); +} + +static acpi_status +remove_device(void) +{ + ProcItem* item; + + for (item = proc_items; item->name; ++item) + remove_proc_entry(item->name, toshiba_proc_dir); + return(AE_OK); +} + +static int __init +toshiba_acpi_init(void) +{ + acpi_status status = AE_OK; + int value; + u32 hci_result; + + /* simple device detection: try reading an HCI register */ + hci_read1(HCI_LCD_BRIGHTNESS, &value, &hci_result); + if (hci_result != HCI_SUCCESS) + return -ENODEV; + + printk("Toshiba Laptop ACPI Extras version %s\n", TOSHIBA_ACPI_VERSION); + + force_fan = 0; + key_event_valid = 0; + + /* enable event fifo */ + hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result); + + toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir); + if (!toshiba_proc_dir) { + status = AE_ERROR; + } else { + status = add_device(); + if (ACPI_FAILURE(status)) + remove_proc_entry(PROC_TOSHIBA, acpi_root_dir); + } + + return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; +} + +static void __exit +toshiba_acpi_exit(void) +{ + remove_device(); + + if (toshiba_proc_dir) + remove_proc_entry(PROC_TOSHIBA, acpi_root_dir); + + return; +} + +module_init(toshiba_acpi_init); +module_exit(toshiba_acpi_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/Makefile linux.21rc5-ac2/drivers/acpi/utilities/Makefile --- linux.21rc5/drivers/acpi/utilities/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/Makefile 2003-04-28 17:59:26.000000000 +0100 @@ -1,11 +1,10 @@ # # Makefile for all Linux ACPI interpreter subdirectories -# EXCEPT for the ospm directory # O_TARGET := $(notdir $(CURDIR)).o -obj-$(CONFIG_ACPI) := $(patsubst %.c,%.o,$(wildcard *.c)) +obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c)) EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utalloc.c linux.21rc5-ac2/drivers/acpi/utilities/utalloc.c --- linux.21rc5/drivers/acpi/utilities/utalloc.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utalloc.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,44 +1,58 @@ /****************************************************************************** * * Module Name: utalloc - local cache and memory allocation routines - * $Revision: 106 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acparser.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "acglobal.h" +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utalloc") + ACPI_MODULE_NAME ("utalloc") /****************************************************************************** * - * FUNCTION: Acpi_ut_release_to_cache + * FUNCTION: acpi_ut_release_to_cache * - * PARAMETERS: List_id - Memory list/cache ID + * PARAMETERS: list_id - Memory list/cache ID * Object - The object to be released * * RETURN: None @@ -50,13 +64,13 @@ void acpi_ut_release_to_cache ( - u32 list_id, - void *object) + u32 list_id, + void *object) { - ACPI_MEMORY_LIST *cache_info; + struct acpi_memory_list *cache_info; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* If walk cache is full, just free this wallkstate object */ @@ -70,29 +84,31 @@ /* Otherwise put this object back into the cache */ else { - acpi_ut_acquire_mutex (ACPI_MTX_CACHES); + if (ACPI_FAILURE (acpi_ut_acquire_mutex (ACPI_MTX_CACHES))) { + return; + } /* Mark the object as cached */ - MEMSET (object, 0xCA, cache_info->object_size); - ((acpi_operand_object *) object)->common.data_type = ACPI_CACHED_OBJECT; + ACPI_MEMSET (object, 0xCA, cache_info->object_size); + ACPI_SET_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_CACHED); /* Put the object at the head of the cache list */ - * (char **) (((char *) object) + cache_info->link_offset) = cache_info->list_head; + * (ACPI_CAST_INDIRECT_PTR (char, &(((char *) object)[cache_info->link_offset]))) = cache_info->list_head; cache_info->list_head = object; cache_info->cache_depth++; - acpi_ut_release_mutex (ACPI_MTX_CACHES); + (void) acpi_ut_release_mutex (ACPI_MTX_CACHES); } } /****************************************************************************** * - * FUNCTION: Acpi_ut_acquire_from_cache + * FUNCTION: acpi_ut_acquire_from_cache * - * PARAMETERS: List_id - Memory list ID + * PARAMETERS: list_id - Memory list ID * * RETURN: A requested object. NULL if the object could not be * allocated. @@ -104,17 +120,20 @@ void * acpi_ut_acquire_from_cache ( - u32 list_id) + u32 list_id) { - ACPI_MEMORY_LIST *cache_info; - void *object; + struct acpi_memory_list *cache_info; + void *object; - PROC_NAME ("Ut_acquire_from_cache"); + ACPI_FUNCTION_NAME ("ut_acquire_from_cache"); cache_info = &acpi_gbl_memory_lists[list_id]; - acpi_ut_acquire_mutex (ACPI_MTX_CACHES); + if (ACPI_FAILURE (acpi_ut_acquire_mutex (ACPI_MTX_CACHES))) { + return (NULL); + } + ACPI_MEM_TRACKING (cache_info->cache_requests++); /* Check the cache first */ @@ -123,7 +142,7 @@ /* There is an object available, use it */ object = cache_info->list_head; - cache_info->list_head = * (char **) (((char *) object) + cache_info->link_offset); + cache_info->list_head = *(ACPI_CAST_INDIRECT_PTR (char, &(((char *) object)[cache_info->link_offset]))); ACPI_MEM_TRACKING (cache_info->cache_hits++); cache_info->cache_depth--; @@ -133,11 +152,13 @@ object, acpi_gbl_memory_lists[list_id].list_name)); #endif - acpi_ut_release_mutex (ACPI_MTX_CACHES); + if (ACPI_FAILURE (acpi_ut_release_mutex (ACPI_MTX_CACHES))) { + return (NULL); + } /* Clear (zero) the previously used Object */ - MEMSET (object, 0, cache_info->object_size); + ACPI_MEMSET (object, 0, cache_info->object_size); } else { @@ -145,7 +166,9 @@ /* Avoid deadlock with ACPI_MEM_CALLOCATE */ - acpi_ut_release_mutex (ACPI_MTX_CACHES); + if (ACPI_FAILURE (acpi_ut_release_mutex (ACPI_MTX_CACHES))) { + return (NULL); + } object = ACPI_MEM_CALLOCATE (cache_info->object_size); ACPI_MEM_TRACKING (cache_info->total_allocated++); @@ -157,9 +180,9 @@ /****************************************************************************** * - * FUNCTION: Acpi_ut_delete_generic_cache + * FUNCTION: acpi_ut_delete_generic_cache * - * PARAMETERS: List_id - Memory list ID + * PARAMETERS: list_id - Memory list ID * * RETURN: None * @@ -169,20 +192,20 @@ void acpi_ut_delete_generic_cache ( - u32 list_id) + u32 list_id) { - ACPI_MEMORY_LIST *cache_info; - char *next; + struct acpi_memory_list *cache_info; + char *next; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); cache_info = &acpi_gbl_memory_lists[list_id]; while (cache_info->list_head) { /* Delete one cached state object */ - next = * (char **) (((char *) cache_info->list_head) + cache_info->link_offset); + next = *(ACPI_CAST_INDIRECT_PTR (char, &(((char *) cache_info->list_head)[cache_info->link_offset]))); ACPI_MEM_FREE (cache_info->list_head); cache_info->list_head = next; @@ -191,26 +214,396 @@ } -#ifdef ACPI_DBG_TRACK_ALLOCATIONS +/******************************************************************************* + * + * FUNCTION: acpi_ut_validate_buffer + * + * PARAMETERS: Buffer - Buffer descriptor to be validated + * + * RETURN: Status + * + * DESCRIPTION: Perform parameter validation checks on an struct acpi_buffer + * + ******************************************************************************/ + +acpi_status +acpi_ut_validate_buffer ( + struct acpi_buffer *buffer) +{ + + /* Obviously, the structure pointer must be valid */ + + if (!buffer) { + return (AE_BAD_PARAMETER); + } + + /* Special semantics for the length */ + + if ((buffer->length == ACPI_NO_BUFFER) || + (buffer->length == ACPI_ALLOCATE_BUFFER) || + (buffer->length == ACPI_ALLOCATE_LOCAL_BUFFER)) { + return (AE_OK); + } + + /* Length is valid, the buffer pointer must be also */ + if (!buffer->pointer) { + return (AE_BAD_PARAMETER); + } + + return (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_initialize_buffer + * + * PARAMETERS: required_length - Length needed + * Buffer - Buffer to be validated + * + * RETURN: Status + * + * DESCRIPTION: Validate that the buffer is of the required length or + * allocate a new buffer. + * + ******************************************************************************/ + +acpi_status +acpi_ut_initialize_buffer ( + struct acpi_buffer *buffer, + acpi_size required_length) +{ + acpi_status status = AE_OK; + + + switch (buffer->length) { + case ACPI_NO_BUFFER: + + /* Set the exception and returned the required length */ + + status = AE_BUFFER_OVERFLOW; + break; + + + case ACPI_ALLOCATE_BUFFER: + + /* Allocate a new buffer */ + + buffer->pointer = acpi_os_allocate (required_length); + if (!buffer->pointer) { + return (AE_NO_MEMORY); + } + + /* Clear the buffer */ + + ACPI_MEMSET (buffer->pointer, 0, required_length); + break; + + + case ACPI_ALLOCATE_LOCAL_BUFFER: + + /* Allocate a new buffer with local interface to allow tracking */ + + buffer->pointer = ACPI_MEM_ALLOCATE (required_length); + if (!buffer->pointer) { + return (AE_NO_MEMORY); + } + + /* Clear the buffer */ + + ACPI_MEMSET (buffer->pointer, 0, required_length); + break; + + + default: + + /* Validate the size of the buffer */ + + if (buffer->length < required_length) { + status = AE_BUFFER_OVERFLOW; + } + break; + } + + buffer->length = required_length; + return (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_allocate + * + * PARAMETERS: Size - Size of the allocation + * Component - Component type of caller + * Module - Source file name of caller + * Line - Line number of caller + * + * RETURN: Address of the allocated memory on success, NULL on failure. + * + * DESCRIPTION: The subsystem's equivalent of malloc. + * + ******************************************************************************/ + +void * +acpi_ut_allocate ( + acpi_size size, + u32 component, + char *module, + u32 line) +{ + void *allocation; + + + ACPI_FUNCTION_TRACE_U32 ("ut_allocate", size); + + + /* Check for an inadvertent size of zero bytes */ + + if (!size) { + _ACPI_REPORT_ERROR (module, line, component, + ("ut_allocate: Attempt to allocate zero bytes\n")); + size = 1; + } + + allocation = acpi_os_allocate (size); + if (!allocation) { + /* Report allocation error */ + _ACPI_REPORT_ERROR (module, line, component, + ("ut_allocate: Could not allocate size %X\n", (u32) size)); + + return_PTR (NULL); + } + + return_PTR (allocation); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_callocate + * + * PARAMETERS: Size - Size of the allocation + * Component - Component type of caller + * Module - Source file name of caller + * Line - Line number of caller + * + * RETURN: Address of the allocated memory on success, NULL on failure. + * + * DESCRIPTION: Subsystem equivalent of calloc. + * + ******************************************************************************/ + +void * +acpi_ut_callocate ( + acpi_size size, + u32 component, + char *module, + u32 line) +{ + void *allocation; + + + ACPI_FUNCTION_TRACE_U32 ("ut_callocate", size); + + + /* Check for an inadvertent size of zero bytes */ + + if (!size) { + _ACPI_REPORT_ERROR (module, line, component, + ("ut_callocate: Attempt to allocate zero bytes\n")); + return_PTR (NULL); + } + + allocation = acpi_os_allocate (size); + if (!allocation) { + /* Report allocation error */ + + _ACPI_REPORT_ERROR (module, line, component, + ("ut_callocate: Could not allocate size %X\n", (u32) size)); + return_PTR (NULL); + } + + /* Clear the memory block */ + + ACPI_MEMSET (allocation, 0, size); + return_PTR (allocation); +} + + +#ifdef ACPI_DBG_TRACK_ALLOCATIONS /* * These procedures are used for tracking memory leaks in the subsystem, and * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set. * * Each memory allocation is tracked via a doubly linked list. Each * element contains the caller's component, module name, function name, and - * line number. Acpi_ut_allocate and Acpi_ut_callocate call - * Acpi_ut_track_allocation to add an element to the list; deletion - * occurs in the body of Acpi_ut_free. + * line number. acpi_ut_allocate and acpi_ut_callocate call + * acpi_ut_track_allocation to add an element to the list; deletion + * occurs in the body of acpi_ut_free. */ /******************************************************************************* * - * FUNCTION: Acpi_ut_find_allocation + * FUNCTION: acpi_ut_allocate_and_track + * + * PARAMETERS: Size - Size of the allocation + * Component - Component type of caller + * Module - Source file name of caller + * Line - Line number of caller + * + * RETURN: Address of the allocated memory on success, NULL on failure. + * + * DESCRIPTION: The subsystem's equivalent of malloc. + * + ******************************************************************************/ + +void * +acpi_ut_allocate_and_track ( + acpi_size size, + u32 component, + char *module, + u32 line) +{ + struct acpi_debug_mem_block *allocation; + acpi_status status; + + + allocation = acpi_ut_allocate (size + sizeof (struct acpi_debug_mem_block), component, + module, line); + if (!allocation) { + return (NULL); + } + + status = acpi_ut_track_allocation (ACPI_MEM_LIST_GLOBAL, allocation, size, + ACPI_MEM_MALLOC, component, module, line); + if (ACPI_FAILURE (status)) { + acpi_os_free (allocation); + return (NULL); + } + + acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].total_allocated++; + acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].current_total_size += (u32) size; + + return ((void *) &allocation->user_space); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_callocate_and_track + * + * PARAMETERS: Size - Size of the allocation + * Component - Component type of caller + * Module - Source file name of caller + * Line - Line number of caller + * + * RETURN: Address of the allocated memory on success, NULL on failure. + * + * DESCRIPTION: Subsystem equivalent of calloc. + * + ******************************************************************************/ + +void * +acpi_ut_callocate_and_track ( + acpi_size size, + u32 component, + char *module, + u32 line) +{ + struct acpi_debug_mem_block *allocation; + acpi_status status; + + + allocation = acpi_ut_callocate (size + sizeof (struct acpi_debug_mem_block), component, + module, line); + if (!allocation) { + /* Report allocation error */ + + _ACPI_REPORT_ERROR (module, line, component, + ("ut_callocate: Could not allocate size %X\n", (u32) size)); + return (NULL); + } + + status = acpi_ut_track_allocation (ACPI_MEM_LIST_GLOBAL, allocation, size, + ACPI_MEM_CALLOC, component, module, line); + if (ACPI_FAILURE (status)) { + acpi_os_free (allocation); + return (NULL); + } + + acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].total_allocated++; + acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].current_total_size += (u32) size; + + return ((void *) &allocation->user_space); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_free_and_track + * + * PARAMETERS: Allocation - Address of the memory to deallocate + * Component - Component type of caller + * Module - Source file name of caller + * Line - Line number of caller + * + * RETURN: None + * + * DESCRIPTION: Frees the memory at Allocation + * + ******************************************************************************/ + +void +acpi_ut_free_and_track ( + void *allocation, + u32 component, + char *module, + u32 line) +{ + struct acpi_debug_mem_block *debug_block; + acpi_status status; + + + ACPI_FUNCTION_TRACE_PTR ("ut_free", allocation); + + + if (NULL == allocation) { + _ACPI_REPORT_ERROR (module, line, component, + ("acpi_ut_free: Attempt to delete a NULL address\n")); + + return_VOID; + } + + debug_block = ACPI_CAST_PTR (struct acpi_debug_mem_block, + (((char *) allocation) - sizeof (struct acpi_debug_mem_header))); + + acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].total_freed++; + acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].current_total_size -= debug_block->size; + + status = acpi_ut_remove_allocation (ACPI_MEM_LIST_GLOBAL, debug_block, + component, module, line); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not free memory, %s\n", + acpi_format_exception (status))); + } + + acpi_os_free (debug_block); + + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation)); + + return_VOID; +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_find_allocation * - * PARAMETERS: Address - Address of allocated memory + * PARAMETERS: Allocation - Address of allocated memory * * RETURN: A list element if found; NULL otherwise. * @@ -218,15 +611,15 @@ * ******************************************************************************/ -acpi_debug_mem_block * +struct acpi_debug_mem_block * acpi_ut_find_allocation ( - u32 list_id, - void *address) + u32 list_id, + void *allocation) { - acpi_debug_mem_block *element; + struct acpi_debug_mem_block *element; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); if (list_id > ACPI_MEM_LIST_MAX) { @@ -238,7 +631,7 @@ /* Search for the address. */ while (element) { - if (element == address) { + if (element == allocation) { return (element); } @@ -251,11 +644,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_track_allocation + * FUNCTION: acpi_ut_track_allocation * - * PARAMETERS: Address - Address of allocated memory + * PARAMETERS: Allocation - Address of allocated memory * Size - Size of the allocation - * Alloc_type - MEM_MALLOC or MEM_CALLOC + * alloc_type - MEM_MALLOC or MEM_CALLOC * Component - Component type of caller * Module - Source file name of caller * Line - Line number of caller @@ -268,20 +661,20 @@ acpi_status acpi_ut_track_allocation ( - u32 list_id, - acpi_debug_mem_block *address, - u32 size, - u8 alloc_type, - u32 component, - NATIVE_CHAR *module, - u32 line) -{ - ACPI_MEMORY_LIST *mem_list; - acpi_debug_mem_block *element; - acpi_status status = AE_OK; + u32 list_id, + struct acpi_debug_mem_block *allocation, + acpi_size size, + u8 alloc_type, + u32 component, + char *module, + u32 line) +{ + struct acpi_memory_list *mem_list; + struct acpi_debug_mem_block *element; + acpi_status status = AE_OK; - FUNCTION_TRACE_PTR ("Ut_track_allocation", address); + ACPI_FUNCTION_TRACE_PTR ("ut_track_allocation", allocation); if (list_id > ACPI_MEM_LIST_MAX) { @@ -289,55 +682,58 @@ } mem_list = &acpi_gbl_memory_lists[list_id]; - acpi_ut_acquire_mutex (ACPI_MTX_MEMORY); + status = acpi_ut_acquire_mutex (ACPI_MTX_MEMORY); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* * Search list for this address to make sure it is not already on the list. * This will catch several kinds of problems. */ - element = acpi_ut_find_allocation (list_id, address); + element = acpi_ut_find_allocation (list_id, allocation); if (element) { - REPORT_ERROR (("Ut_track_allocation: Address already present in list! (%p)\n", - address)); + ACPI_REPORT_ERROR (("ut_track_allocation: Allocation already present in list! (%p)\n", + allocation)); - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Element %p Address %p\n", element, address)); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Element %p Address %p\n", element, allocation)); goto unlock_and_exit; } /* Fill in the instance data. */ - address->size = size; - address->alloc_type = alloc_type; - address->component = component; - address->line = line; + allocation->size = (u32) size; + allocation->alloc_type = alloc_type; + allocation->component = component; + allocation->line = line; - STRNCPY (address->module, module, MAX_MODULE_NAME); + ACPI_STRNCPY (allocation->module, module, ACPI_MAX_MODULE_NAME); /* Insert at list head */ if (mem_list->list_head) { - ((acpi_debug_mem_block *)(mem_list->list_head))->previous = address; + ((struct acpi_debug_mem_block *)(mem_list->list_head))->previous = allocation; } - address->next = mem_list->list_head; - address->previous = NULL; + allocation->next = mem_list->list_head; + allocation->previous = NULL; - mem_list->list_head = address; + mem_list->list_head = allocation; unlock_and_exit: - acpi_ut_release_mutex (ACPI_MTX_MEMORY); + status = acpi_ut_release_mutex (ACPI_MTX_MEMORY); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_remove_allocation + * FUNCTION: acpi_ut_remove_allocation * - * PARAMETERS: Address - Address of allocated memory + * PARAMETERS: Allocation - Address of allocated memory * Component - Component type of caller * Module - Source file name of caller * Line - Line number of caller @@ -350,16 +746,17 @@ acpi_status acpi_ut_remove_allocation ( - u32 list_id, - acpi_debug_mem_block *address, - u32 component, - NATIVE_CHAR *module, - u32 line) + u32 list_id, + struct acpi_debug_mem_block *allocation, + u32 component, + char *module, + u32 line) { - ACPI_MEMORY_LIST *mem_list; + struct acpi_memory_list *mem_list; + acpi_status status; - FUNCTION_TRACE ("Ut_remove_allocation"); + ACPI_FUNCTION_TRACE ("ut_remove_allocation"); if (list_id > ACPI_MEM_LIST_MAX) { @@ -370,43 +767,44 @@ if (NULL == mem_list->list_head) { /* No allocations! */ - _REPORT_ERROR (module, line, component, - ("Ut_remove_allocation: Empty allocation list, nothing to free!\n")); + _ACPI_REPORT_ERROR (module, line, component, + ("ut_remove_allocation: Empty allocation list, nothing to free!\n")); return_ACPI_STATUS (AE_OK); } - - acpi_ut_acquire_mutex (ACPI_MTX_MEMORY); + status = acpi_ut_acquire_mutex (ACPI_MTX_MEMORY); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* Unlink */ - if (address->previous) { - (address->previous)->next = address->next; + if (allocation->previous) { + (allocation->previous)->next = allocation->next; } else { - mem_list->list_head = address->next; + mem_list->list_head = allocation->next; } - if (address->next) { - (address->next)->previous = address->previous; + if (allocation->next) { + (allocation->next)->previous = allocation->previous; } - /* Mark the segment as deleted */ - MEMSET (&address->user_space, 0xEA, address->size); + ACPI_MEMSET (&allocation->user_space, 0xEA, allocation->size); - ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Freeing size %X\n", address->size)); + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Freeing size %X\n", allocation->size)); - acpi_ut_release_mutex (ACPI_MTX_MEMORY); - return_ACPI_STATUS (AE_OK); + status = acpi_ut_release_mutex (ACPI_MTX_MEMORY); + return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_dump_allocation_info + * FUNCTION: acpi_ut_dump_allocation_info * * PARAMETERS: * @@ -421,43 +819,43 @@ void) { /* - ACPI_MEMORY_LIST *Mem_list; + struct acpi_memory_list *mem_list; */ - FUNCTION_TRACE ("Ut_dump_allocation_info"); + ACPI_FUNCTION_TRACE ("ut_dump_allocation_info"); /* ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Current allocations", - Mem_list->Current_count, - ROUND_UP_TO_1K (Mem_list->Current_size))); + mem_list->current_count, + ROUND_UP_TO_1K (mem_list->current_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations", - Mem_list->Max_concurrent_count, - ROUND_UP_TO_1K (Mem_list->Max_concurrent_size))); + mem_list->max_concurrent_count, + ROUND_UP_TO_1K (mem_list->max_concurrent_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects", - Running_object_count, - ROUND_UP_TO_1K (Running_object_size))); + running_object_count, + ROUND_UP_TO_1K (running_object_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Total (all) allocations", - Running_alloc_count, - ROUND_UP_TO_1K (Running_alloc_size))); + running_alloc_count, + ROUND_UP_TO_1K (running_alloc_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Current Nodes", - Acpi_gbl_Current_node_count, - ROUND_UP_TO_1K (Acpi_gbl_Current_node_size))); + acpi_gbl_current_node_count, + ROUND_UP_TO_1K (acpi_gbl_current_node_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Max Nodes", - Acpi_gbl_Max_concurrent_node_count, - ROUND_UP_TO_1K ((Acpi_gbl_Max_concurrent_node_count * sizeof (acpi_namespace_node))))); + acpi_gbl_max_concurrent_node_count, + ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count * sizeof (struct acpi_namespace_node))))); */ return_VOID; } @@ -465,7 +863,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_dump_allocations + * FUNCTION: acpi_ut_dump_allocations * * PARAMETERS: Component - Component(s) to dump info for. * Module - Module to dump info for. NULL means all. @@ -478,306 +876,124 @@ void acpi_ut_dump_allocations ( - u32 component, - NATIVE_CHAR *module) + u32 component, + char *module) { - acpi_debug_mem_block *element; - u32 i; - + struct acpi_debug_mem_block *element; + union acpi_descriptor *descriptor; + u32 num_outstanding = 0; - FUNCTION_TRACE ("Ut_dump_allocations"); - - element = acpi_gbl_memory_lists[0].list_head; - if (element == NULL) { - ACPI_DEBUG_PRINT ((ACPI_DB_OK, - "No outstanding allocations.\n")); - return_VOID; - } + ACPI_FUNCTION_TRACE ("ut_dump_allocations"); /* * Walk the allocation list. */ - acpi_ut_acquire_mutex (ACPI_MTX_MEMORY); - - ACPI_DEBUG_PRINT ((ACPI_DB_OK, - "Outstanding allocations:\n")); + if (ACPI_FAILURE (acpi_ut_acquire_mutex (ACPI_MTX_MEMORY))) { + return; + } - for (i = 1; ; i++) /* Just a counter */ { + element = acpi_gbl_memory_lists[0].list_head; + while (element) { if ((element->component & component) && - ((module == NULL) || (0 == STRCMP (module, element->module)))) { - if (((acpi_operand_object *)(&element->user_space))->common.type != ACPI_CACHED_OBJECT) { - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - "%p Len %04X %9.9s-%d", - &element->user_space, element->size, element->module, - element->line)); + ((module == NULL) || (0 == ACPI_STRCMP (module, element->module)))) { + /* Ignore allocated objects that are in a cache */ + + descriptor = ACPI_CAST_PTR (union acpi_descriptor, &element->user_space); + if (descriptor->descriptor_id != ACPI_DESC_TYPE_CACHED) { + acpi_os_printf ("%p Len %04X %9.9s-%d ", + descriptor, element->size, element->module, + element->line); /* Most of the elements will be internal objects. */ - switch (((acpi_operand_object *) - (&element->user_space))->common.data_type) { - case ACPI_DESC_TYPE_INTERNAL: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " Obj_type %12.12s R%d", - acpi_ut_get_type_name (((acpi_operand_object *)(&element->user_space))->common.type), - ((acpi_operand_object *)(&element->user_space))->common.reference_count)); + switch (ACPI_GET_DESCRIPTOR_TYPE (descriptor)) { + case ACPI_DESC_TYPE_OPERAND: + acpi_os_printf ("obj_type %12.12s R%hd", + acpi_ut_get_type_name (descriptor->object.common.type), + descriptor->object.common.reference_count); break; case ACPI_DESC_TYPE_PARSER: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " Parse_obj Opcode %04X", - ((acpi_parse_object *)(&element->user_space))->opcode)); + acpi_os_printf ("parse_obj aml_opcode %04hX", + descriptor->op.asl.aml_opcode); break; case ACPI_DESC_TYPE_NAMED: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " Node %4.4s", - (char*)&((acpi_namespace_node *)(&element->user_space))->name)); + acpi_os_printf ("Node %4.4s", + descriptor->node.name.ascii); break; case ACPI_DESC_TYPE_STATE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " Untyped State_obj")); + acpi_os_printf ("Untyped state_obj"); break; case ACPI_DESC_TYPE_STATE_UPDATE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " UPDATE State_obj")); + acpi_os_printf ("UPDATE state_obj"); break; case ACPI_DESC_TYPE_STATE_PACKAGE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " PACKAGE State_obj")); + acpi_os_printf ("PACKAGE state_obj"); break; case ACPI_DESC_TYPE_STATE_CONTROL: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " CONTROL State_obj")); + acpi_os_printf ("CONTROL state_obj"); break; case ACPI_DESC_TYPE_STATE_RPSCOPE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " ROOT-PARSE-SCOPE State_obj")); + acpi_os_printf ("ROOT-PARSE-SCOPE state_obj"); break; case ACPI_DESC_TYPE_STATE_PSCOPE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " PARSE-SCOPE State_obj")); + acpi_os_printf ("PARSE-SCOPE state_obj"); break; case ACPI_DESC_TYPE_STATE_WSCOPE: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " WALK-SCOPE State_obj")); + acpi_os_printf ("WALK-SCOPE state_obj"); break; case ACPI_DESC_TYPE_STATE_RESULT: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " RESULT State_obj")); + acpi_os_printf ("RESULT state_obj"); break; case ACPI_DESC_TYPE_STATE_NOTIFY: - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, - " NOTIFY State_obj")); + acpi_os_printf ("NOTIFY state_obj"); + break; + + case ACPI_DESC_TYPE_STATE_THREAD: + acpi_os_printf ("THREAD state_obj"); + break; + + default: + /* All types should appear above */ break; } - ACPI_DEBUG_PRINT_RAW ((ACPI_DB_OK, "\n")); + acpi_os_printf ( "\n"); + num_outstanding++; } } - - if (element->next == NULL) { - break; - } - element = element->next; } - acpi_ut_release_mutex (ACPI_MTX_MEMORY); - - ACPI_DEBUG_PRINT ((ACPI_DB_OK, - "Total number of unfreed allocations = %d(%X)\n", i,i)); - - - return_VOID; - -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ut_allocate - * - * PARAMETERS: Size - Size of the allocation - * Component - Component type of caller - * Module - Source file name of caller - * Line - Line number of caller - * - * RETURN: Address of the allocated memory on success, NULL on failure. - * - * DESCRIPTION: The subsystem's equivalent of malloc. - * - ******************************************************************************/ - -void * -acpi_ut_allocate ( - u32 size, - u32 component, - NATIVE_CHAR *module, - u32 line) -{ - acpi_debug_mem_block *address; - acpi_status status; + (void) acpi_ut_release_mutex (ACPI_MTX_MEMORY); + /* Print summary */ - FUNCTION_TRACE_U32 ("Ut_allocate", size); - - - /* Check for an inadvertent size of zero bytes */ - - if (!size) { - _REPORT_ERROR (module, line, component, - ("Ut_allocate: Attempt to allocate zero bytes\n")); - size = 1; - } - - address = acpi_os_allocate (size + sizeof (acpi_debug_mem_block)); - if (!address) { - /* Report allocation error */ - - _REPORT_ERROR (module, line, component, - ("Ut_allocate: Could not allocate size %X\n", size)); - - return_PTR (NULL); - } - - status = acpi_ut_track_allocation (ACPI_MEM_LIST_GLOBAL, address, size, - MEM_MALLOC, component, module, line); - if (ACPI_FAILURE (status)) { - acpi_os_free (address); - return_PTR (NULL); - } - - acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].total_allocated++; - acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].current_total_size += size; - - ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "%p Size %X\n", address, size)); - - return_PTR ((void *) &address->user_space); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ut_callocate - * - * PARAMETERS: Size - Size of the allocation - * Component - Component type of caller - * Module - Source file name of caller - * Line - Line number of caller - * - * RETURN: Address of the allocated memory on success, NULL on failure. - * - * DESCRIPTION: Subsystem equivalent of calloc. - * - ******************************************************************************/ - -void * -acpi_ut_callocate ( - u32 size, - u32 component, - NATIVE_CHAR *module, - u32 line) -{ - acpi_debug_mem_block *address; - acpi_status status; - - - FUNCTION_TRACE_U32 ("Ut_callocate", size); - - - /* Check for an inadvertent size of zero bytes */ - - if (!size) { - _REPORT_ERROR (module, line, component, - ("Ut_callocate: Attempt to allocate zero bytes\n")); - return_PTR (NULL); - } - - - address = acpi_os_callocate (size + sizeof (acpi_debug_mem_block)); - if (!address) { - /* Report allocation error */ - - _REPORT_ERROR (module, line, component, - ("Ut_callocate: Could not allocate size %X\n", size)); - return_PTR (NULL); - } - - status = acpi_ut_track_allocation (ACPI_MEM_LIST_GLOBAL, address, size, - MEM_CALLOC, component, module, line); - if (ACPI_FAILURE (status)) { - acpi_os_free (address); - return_PTR (NULL); + if (!num_outstanding) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "No outstanding allocations.\n")); } - - acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].total_allocated++; - acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].current_total_size += size; - - ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "%p Size %X\n", address, size)); - return_PTR ((void *) &address->user_space); -} - - -/******************************************************************************* - * - * FUNCTION: Acpi_ut_free - * - * PARAMETERS: Address - Address of the memory to deallocate - * Component - Component type of caller - * Module - Source file name of caller - * Line - Line number of caller - * - * RETURN: None - * - * DESCRIPTION: Frees the memory at Address - * - ******************************************************************************/ - -void -acpi_ut_free ( - void *address, - u32 component, - NATIVE_CHAR *module, - u32 line) -{ - acpi_debug_mem_block *debug_block; - - - FUNCTION_TRACE_PTR ("Ut_free", address); - - - if (NULL == address) { - _REPORT_ERROR (module, line, component, - ("Acpi_ut_free: Trying to delete a NULL address\n")); - - return_VOID; + else { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "%d(%X) Outstanding allocations\n", + num_outstanding, num_outstanding)); } - debug_block = (acpi_debug_mem_block *) - (((char *) address) - sizeof (acpi_debug_mem_header)); - - acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].total_freed++; - acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].current_total_size -= debug_block->size; - - acpi_ut_remove_allocation (ACPI_MEM_LIST_GLOBAL, debug_block, - component, module, line); - acpi_os_free (debug_block); - - ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "%p freed\n", address)); - return_VOID; } + #endif /* #ifdef ACPI_DBG_TRACK_ALLOCATIONS */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utcopy.c linux.21rc5-ac2/drivers/acpi/utilities/utcopy.c --- linux.21rc5/drivers/acpi/utilities/utcopy.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utcopy.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,46 +1,62 @@ /****************************************************************************** * * Module Name: utcopy - Internal to external object translation utilities - * $Revision: 83 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "amlcode.h" +#include +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utcopy") + ACPI_MODULE_NAME ("utcopy") /******************************************************************************* * - * FUNCTION: Acpi_ut_copy_isimple_to_esimple + * FUNCTION: acpi_ut_copy_isimple_to_esimple * - * PARAMETERS: *Internal_object - Pointer to the object we are examining + * PARAMETERS: *internal_object - Pointer to the object we are examining * *Buffer - Where the object is returned - * *Space_used - Where the data length is returned + * *space_used - Where the data length is returned * * RETURN: Status * @@ -53,106 +69,77 @@ static acpi_status acpi_ut_copy_isimple_to_esimple ( - acpi_operand_object *internal_object, - acpi_object *external_object, - u8 *data_space, - u32 *buffer_space_used) + union acpi_operand_object *internal_object, + union acpi_object *external_object, + u8 *data_space, + acpi_size *buffer_space_used) { - u32 length = 0; - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ut_copy_isimple_to_esimple"); + ACPI_FUNCTION_TRACE ("ut_copy_isimple_to_esimple"); + *buffer_space_used = 0; + /* * Check for NULL object case (could be an uninitialized - * package element + * package element) */ if (!internal_object) { - *buffer_space_used = 0; return_ACPI_STATUS (AE_OK); } /* Always clear the external object */ - MEMSET (external_object, 0, sizeof (acpi_object)); + ACPI_MEMSET (external_object, 0, sizeof (union acpi_object)); /* * In general, the external object will be the same type as * the internal object */ - external_object->type = internal_object->common.type; + external_object->type = ACPI_GET_OBJECT_TYPE (internal_object); /* However, only a limited number of external types are supported */ - switch (internal_object->common.type) { - + switch (ACPI_GET_OBJECT_TYPE (internal_object)) { case ACPI_TYPE_STRING: - length = internal_object->string.length + 1; + external_object->string.pointer = (char *) data_space; external_object->string.length = internal_object->string.length; - external_object->string.pointer = (NATIVE_CHAR *) data_space; - MEMCPY ((void *) data_space, (void *) internal_object->string.pointer, length); + *buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD ((acpi_size) internal_object->string.length + 1); + + ACPI_MEMCPY ((void *) data_space, (void *) internal_object->string.pointer, + (acpi_size) internal_object->string.length + 1); break; case ACPI_TYPE_BUFFER: - length = internal_object->buffer.length; - external_object->buffer.length = internal_object->buffer.length; external_object->buffer.pointer = data_space; - MEMCPY ((void *) data_space, (void *) internal_object->buffer.pointer, length); + external_object->buffer.length = internal_object->buffer.length; + *buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD (internal_object->string.length); + + ACPI_MEMCPY ((void *) data_space, (void *) internal_object->buffer.pointer, + internal_object->buffer.length); break; case ACPI_TYPE_INTEGER: - external_object->integer.value= internal_object->integer.value; + external_object->integer.value = internal_object->integer.value; break; - case INTERNAL_TYPE_REFERENCE: + case ACPI_TYPE_LOCAL_REFERENCE: /* * This is an object reference. Attempt to dereference it. */ switch (internal_object->reference.opcode) { - case AML_ZERO_OP: - external_object->type = ACPI_TYPE_INTEGER; - external_object->integer.value = 0; - break; - - case AML_ONE_OP: - external_object->type = ACPI_TYPE_INTEGER; - external_object->integer.value = 1; - break; - - case AML_ONES_OP: - external_object->type = ACPI_TYPE_INTEGER; - external_object->integer.value = ACPI_INTEGER_MAX; - break; - - case AML_REVISION_OP: - external_object->type = ACPI_TYPE_INTEGER; - external_object->integer.value = ACPI_CA_SUPPORT_LEVEL; - break; - case AML_INT_NAMEPATH_OP: - /* - * This is a named reference, get the string. We already know that - * we have room for it, use max length - */ - length = MAX_STRING_LENGTH; - external_object->type = ACPI_TYPE_STRING; - external_object->string.pointer = (NATIVE_CHAR *) data_space; - status = acpi_ns_handle_to_pathname ((acpi_handle *) internal_object->reference.node, - &length, (char *) data_space); - - /* Converted (external) string length is returned from above */ - external_object->string.length = length; - break; + /* For namepath, return the object handle ("reference") */ default: /* @@ -168,7 +155,7 @@ case ACPI_TYPE_PROCESSOR: - external_object->processor.proc_id = internal_object->processor.proc_id; + external_object->processor.proc_id = internal_object->processor.proc_id; external_object->processor.pblk_address = internal_object->processor.address; external_object->processor.pblk_length = internal_object->processor.length; break; @@ -189,21 +176,17 @@ * There is no corresponding external object type */ return_ACPI_STATUS (AE_SUPPORT); - break; } - - *buffer_space_used = (u32) ROUND_UP_TO_NATIVE_WORD (length); - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_copy_ielement_to_eelement + * FUNCTION: acpi_ut_copy_ielement_to_eelement * - * PARAMETERS: ACPI_PKG_CALLBACK + * PARAMETERS: acpi_pkg_callback * * RETURN: Status * @@ -213,39 +196,39 @@ acpi_status acpi_ut_copy_ielement_to_eelement ( - u8 object_type, - acpi_operand_object *source_object, - acpi_generic_state *state, - void *context) -{ - acpi_status status = AE_OK; - acpi_pkg_info *info = (acpi_pkg_info *) context; - u32 object_space; - u32 this_index; - acpi_object *target_object; + u8 object_type, + union acpi_operand_object *source_object, + union acpi_generic_state *state, + void *context) +{ + acpi_status status = AE_OK; + struct acpi_pkg_info *info = (struct acpi_pkg_info *) context; + acpi_size object_space; + u32 this_index; + union acpi_object *target_object; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); this_index = state->pkg.index; - target_object = (acpi_object *) - &((acpi_object *)(state->pkg.dest_object))->package.elements[this_index]; + target_object = (union acpi_object *) + &((union acpi_object *)(state->pkg.dest_object))->package.elements[this_index]; switch (object_type) { case ACPI_COPY_TYPE_SIMPLE: /* - * This is a simple or null object -- get the size + * This is a simple or null object */ status = acpi_ut_copy_isimple_to_esimple (source_object, target_object, info->free_space, &object_space); if (ACPI_FAILURE (status)) { return (status); } - break; + case ACPI_COPY_TYPE_PACKAGE: /* @@ -253,7 +236,7 @@ */ target_object->type = ACPI_TYPE_PACKAGE; target_object->package.count = source_object->package.count; - target_object->package.elements = (acpi_object *) info->free_space; + target_object->package.elements = ACPI_CAST_PTR (union acpi_object, info->free_space); /* * Pass the new package object back to the package walk routine @@ -264,29 +247,28 @@ * Save space for the array of objects (Package elements) * update the buffer length counter */ - object_space = (u32) ROUND_UP_TO_NATIVE_WORD ( - target_object->package.count * sizeof (acpi_object)); + object_space = ACPI_ROUND_UP_TO_NATIVE_WORD ( + (acpi_size) target_object->package.count * sizeof (union acpi_object)); break; + default: return (AE_BAD_PARAMETER); } - info->free_space += object_space; info->length += object_space; - return (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_copy_ipackage_to_epackage + * FUNCTION: acpi_ut_copy_ipackage_to_epackage * - * PARAMETERS: *Internal_object - Pointer to the object we are returning + * PARAMETERS: *internal_object - Pointer to the object we are returning * *Buffer - Where the object is returned - * *Space_used - Where the object length is returned + * *space_used - Where the object length is returned * * RETURN: Status * @@ -295,66 +277,64 @@ * * The buffer is assumed to have sufficient space for the object. * The caller must have verified the buffer length needed using the - * Acpi_ut_get_object_size function before calling this function. + * acpi_ut_get_object_size function before calling this function. * ******************************************************************************/ static acpi_status acpi_ut_copy_ipackage_to_epackage ( - acpi_operand_object *internal_object, - u8 *buffer, - u32 *space_used) + union acpi_operand_object *internal_object, + u8 *buffer, + acpi_size *space_used) { - acpi_object *external_object; - acpi_status status; - acpi_pkg_info info; + union acpi_object *external_object; + acpi_status status; + struct acpi_pkg_info info; - FUNCTION_TRACE ("Ut_copy_ipackage_to_epackage"); + ACPI_FUNCTION_TRACE ("ut_copy_ipackage_to_epackage"); /* * First package at head of the buffer */ - external_object = (acpi_object *) buffer; + external_object = ACPI_CAST_PTR (union acpi_object, buffer); /* * Free space begins right after the first package */ - info.length = 0; + info.length = ACPI_ROUND_UP_TO_NATIVE_WORD (sizeof (union acpi_object)); + info.free_space = buffer + ACPI_ROUND_UP_TO_NATIVE_WORD (sizeof (union acpi_object)); info.object_space = 0; info.num_packages = 1; - info.free_space = buffer + ROUND_UP_TO_NATIVE_WORD (sizeof (acpi_object)); - - - external_object->type = internal_object->common.type; - external_object->package.count = internal_object->package.count; - external_object->package.elements = (acpi_object *) info.free_space; + external_object->type = ACPI_GET_OBJECT_TYPE (internal_object); + external_object->package.count = internal_object->package.count; + external_object->package.elements = ACPI_CAST_PTR (union acpi_object, info.free_space); /* - * Build an array of ACPI_OBJECTS in the buffer + * Leave room for an array of ACPI_OBJECTS in the buffer * and move the free space past it */ + info.length += (acpi_size) external_object->package.count * + ACPI_ROUND_UP_TO_NATIVE_WORD (sizeof (union acpi_object)); info.free_space += external_object->package.count * - ROUND_UP_TO_NATIVE_WORD (sizeof (acpi_object)); - + ACPI_ROUND_UP_TO_NATIVE_WORD (sizeof (union acpi_object)); status = acpi_ut_walk_package_tree (internal_object, external_object, acpi_ut_copy_ielement_to_eelement, &info); *space_used = info.length; - return_ACPI_STATUS (status); - } + /******************************************************************************* * - * FUNCTION: Acpi_ut_copy_iobject_to_eobject + * FUNCTION: acpi_ut_copy_iobject_to_eobject * - * PARAMETERS: *Internal_object - The internal object to be converted - * *Buffer_ptr - Where the object is returned + * PARAMETERS: *internal_object - The internal object to be converted + * *buffer_ptr - Where the object is returned * * RETURN: Status * @@ -365,16 +345,16 @@ acpi_status acpi_ut_copy_iobject_to_eobject ( - acpi_operand_object *internal_object, - acpi_buffer *ret_buffer) + union acpi_operand_object *internal_object, + struct acpi_buffer *ret_buffer) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Ut_copy_iobject_to_eobject"); + ACPI_FUNCTION_TRACE ("ut_copy_iobject_to_eobject"); - if (IS_THIS_OBJECT_TYPE (internal_object, ACPI_TYPE_PACKAGE)) { + if (ACPI_GET_OBJECT_TYPE (internal_object) == ACPI_TYPE_PACKAGE) { /* * Package object: Copy all subobjects (including * nested packages) @@ -382,21 +362,20 @@ status = acpi_ut_copy_ipackage_to_epackage (internal_object, ret_buffer->pointer, &ret_buffer->length); } - else { /* * Build a simple object (no nested objects) */ status = acpi_ut_copy_isimple_to_esimple (internal_object, - (acpi_object *) ret_buffer->pointer, + (union acpi_object *) ret_buffer->pointer, ((u8 *) ret_buffer->pointer + - ROUND_UP_TO_NATIVE_WORD (sizeof (acpi_object))), + ACPI_ROUND_UP_TO_NATIVE_WORD (sizeof (union acpi_object))), &ret_buffer->length); /* * build simple does not include the object size in the length * so we add it in here */ - ret_buffer->length += sizeof (acpi_object); + ret_buffer->length += sizeof (union acpi_object); } return_ACPI_STATUS (status); @@ -405,10 +384,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_copy_esimple_to_isimple + * FUNCTION: acpi_ut_copy_esimple_to_isimple * - * PARAMETERS: *External_object - The external object to be converted - * *Internal_object - Where the internal object is returned + * PARAMETERS: *external_object - The external object to be converted + * *internal_object - Where the internal object is returned * * RETURN: Status * @@ -421,13 +400,13 @@ acpi_status acpi_ut_copy_esimple_to_isimple ( - acpi_object *external_object, - acpi_operand_object **ret_internal_object) + union acpi_object *external_object, + union acpi_operand_object **ret_internal_object) { - acpi_operand_object *internal_object; + union acpi_operand_object *internal_object; - FUNCTION_TRACE ("Ut_copy_esimple_to_isimple"); + ACPI_FUNCTION_TRACE ("ut_copy_esimple_to_isimple"); /* @@ -449,7 +428,6 @@ * Whatever other type -- it is not supported */ return_ACPI_STATUS (AE_SUPPORT); - break; } @@ -459,14 +437,15 @@ case ACPI_TYPE_STRING: - internal_object->string.pointer = ACPI_MEM_CALLOCATE (external_object->string.length + 1); + internal_object->string.pointer = + ACPI_MEM_CALLOCATE ((acpi_size) external_object->string.length + 1); if (!internal_object->string.pointer) { return_ACPI_STATUS (AE_NO_MEMORY); } - MEMCPY (internal_object->string.pointer, - external_object->string.pointer, - external_object->string.length); + ACPI_MEMCPY (internal_object->string.pointer, + external_object->string.pointer, + external_object->string.length); internal_object->string.length = external_object->string.length; break; @@ -474,14 +453,15 @@ case ACPI_TYPE_BUFFER: - internal_object->buffer.pointer = ACPI_MEM_CALLOCATE (external_object->buffer.length); + internal_object->buffer.pointer = + ACPI_MEM_CALLOCATE (external_object->buffer.length); if (!internal_object->buffer.pointer) { return_ACPI_STATUS (AE_NO_MEMORY); } - MEMCPY (internal_object->buffer.pointer, - external_object->buffer.pointer, - external_object->buffer.length); + ACPI_MEMCPY (internal_object->buffer.pointer, + external_object->buffer.pointer, + external_object->buffer.length); internal_object->buffer.length = external_object->buffer.length; break; @@ -491,8 +471,11 @@ internal_object->integer.value = external_object->integer.value; break; - } + default: + /* Other types can't get here */ + break; + } *ret_internal_object = internal_object; return_ACPI_STATUS (AE_OK); @@ -505,65 +488,64 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_copy_epackage_to_ipackage + * FUNCTION: acpi_ut_copy_epackage_to_ipackage * - * PARAMETERS: *Internal_object - Pointer to the object we are returning - * *Buffer - Where the object is returned - * *Space_used - Where the length of the object is returned + * PARAMETERS: *internal_object - Pointer to the object we are returning + * *Buffer - Where the object is returned + * *space_used - Where the length of the object is returned * - * RETURN: Status - the status of the call + * RETURN: Status * * DESCRIPTION: This function is called to place a package object in a user * buffer. A package object by definition contains other objects. * * The buffer is assumed to have sufficient space for the object. * The caller must have verified the buffer length needed using the - * Acpi_ut_get_object_size function before calling this function. + * acpi_ut_get_object_size function before calling this function. * ******************************************************************************/ static acpi_status acpi_ut_copy_epackage_to_ipackage ( - acpi_operand_object *internal_object, - u8 *buffer, - u32 *space_used) -{ - u8 *free_space; - acpi_object *external_object; - u32 length = 0; - u32 this_index; - u32 object_space = 0; - acpi_operand_object *this_internal_obj; - acpi_object *this_external_obj; + union acpi_operand_object *internal_object, + u8 *buffer, + u32 *space_used) +{ + u8 *free_space; + union acpi_object *external_object; + u32 length = 0; + u32 this_index; + u32 object_space = 0; + union acpi_operand_object *this_internal_obj; + union acpi_object *this_external_obj; - FUNCTION_TRACE ("Ut_copy_epackage_to_ipackage"); + ACPI_FUNCTION_TRACE ("ut_copy_epackage_to_ipackage"); /* * First package at head of the buffer */ - external_object = (acpi_object *)buffer; + external_object = (union acpi_object *)buffer; /* * Free space begins right after the first package */ - free_space = buffer + sizeof(acpi_object); + free_space = buffer + sizeof(union acpi_object); - external_object->type = internal_object->common.type; + external_object->type = ACPI_GET_OBJECT_TYPE (internal_object); external_object->package.count = internal_object->package.count; - external_object->package.elements = (acpi_object *)free_space; - + external_object->package.elements = (union acpi_object *)free_space; /* * Build an array of ACPI_OBJECTS in the buffer * and move the free space past it */ - free_space += external_object->package.count * sizeof(acpi_object); + free_space += external_object->package.count * sizeof(union acpi_object); - /* Call Walk_package */ + /* Call walk_package */ } @@ -572,10 +554,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_copy_eobject_to_iobject + * FUNCTION: acpi_ut_copy_eobject_to_iobject * - * PARAMETERS: *Internal_object - The external object to be converted - * *Buffer_ptr - Where the internal object is returned + * PARAMETERS: *internal_object - The external object to be converted + * *buffer_ptr - Where the internal object is returned * * RETURN: Status - the status of the call * @@ -585,29 +567,19 @@ acpi_status acpi_ut_copy_eobject_to_iobject ( - acpi_object *external_object, - acpi_operand_object **internal_object) + union acpi_object *external_object, + union acpi_operand_object **internal_object) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Ut_copy_eobject_to_iobject"); + ACPI_FUNCTION_TRACE ("ut_copy_eobject_to_iobject"); if (external_object->type == ACPI_TYPE_PACKAGE) { /* - * Package objects contain other objects (which can be objects) - * buildpackage does it all - * - * TBD: Package conversion must be completed and tested - * NOTE: this code converts packages as input parameters to - * control methods only. This is a very, very rare case. + * Packages as external input to control methods are not supported, */ -/* - Status = Acpi_ut_copy_epackage_to_ipackage(Internal_object, - Ret_buffer->Pointer, - &Ret_buffer->Length); -*/ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Packages as parameters not implemented!\n")); @@ -627,11 +599,104 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_copy_ielement_to_ielement + * FUNCTION: acpi_ut_copy_simple_object * - * PARAMETERS: ACPI_PKG_CALLBACK + * PARAMETERS: source_desc - The internal object to be copied + * dest_desc - New target object * - * RETURN: Status - the status of the call + * RETURN: Status + * + * DESCRIPTION: Simple copy of one internal object to another. Reference count + * of the destination object is preserved. + * + ******************************************************************************/ + +acpi_status +acpi_ut_copy_simple_object ( + union acpi_operand_object *source_desc, + union acpi_operand_object *dest_desc) +{ + u16 reference_count; + union acpi_operand_object *next_object; + + + /* Save fields from destination that we don't want to overwrite */ + + reference_count = dest_desc->common.reference_count; + next_object = dest_desc->common.next_object; + + /* Copy the entire source object over the destination object*/ + + ACPI_MEMCPY ((char *) dest_desc, (char *) source_desc, + sizeof (union acpi_operand_object)); + + /* Restore the saved fields */ + + dest_desc->common.reference_count = reference_count; + dest_desc->common.next_object = next_object; + + /* Handle the objects with extra data */ + + switch (ACPI_GET_OBJECT_TYPE (dest_desc)) { + case ACPI_TYPE_BUFFER: + + dest_desc->buffer.node = NULL; + dest_desc->common.flags = source_desc->common.flags; + + /* + * Allocate and copy the actual buffer if and only if: + * 1) There is a valid buffer pointer + * 2) The buffer is not static (not in an ACPI table) (in this case, + * the actual pointer was already copied above) + */ + if ((source_desc->buffer.pointer) && + (!(source_desc->common.flags & AOPOBJ_STATIC_POINTER))) { + dest_desc->buffer.pointer = ACPI_MEM_ALLOCATE (source_desc->buffer.length); + if (!dest_desc->buffer.pointer) { + return (AE_NO_MEMORY); + } + + ACPI_MEMCPY (dest_desc->buffer.pointer, source_desc->buffer.pointer, + source_desc->buffer.length); + } + break; + + case ACPI_TYPE_STRING: + + /* + * Allocate and copy the actual string if and only if: + * 1) There is a valid string pointer + * 2) The string is not static (not in an ACPI table) (in this case, + * the actual pointer was already copied above) + */ + if ((source_desc->string.pointer) && + (!(source_desc->common.flags & AOPOBJ_STATIC_POINTER))) { + dest_desc->string.pointer = ACPI_MEM_ALLOCATE ((acpi_size) source_desc->string.length + 1); + if (!dest_desc->string.pointer) { + return (AE_NO_MEMORY); + } + + ACPI_MEMCPY (dest_desc->string.pointer, source_desc->string.pointer, + (acpi_size) source_desc->string.length + 1); + } + break; + + default: + /* Nothing to do for other simple objects */ + break; + } + + return (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_copy_ielement_to_ielement + * + * PARAMETERS: acpi_pkg_callback + * + * RETURN: Status * * DESCRIPTION: Copy one package element to another package element * @@ -639,58 +704,78 @@ acpi_status acpi_ut_copy_ielement_to_ielement ( - u8 object_type, - acpi_operand_object *source_object, - acpi_generic_state *state, - void *context) -{ - acpi_status status = AE_OK; - u32 this_index; - acpi_operand_object **this_target_ptr; - acpi_operand_object *target_object; + u8 object_type, + union acpi_operand_object *source_object, + union acpi_generic_state *state, + void *context) +{ + acpi_status status = AE_OK; + u32 this_index; + union acpi_operand_object **this_target_ptr; + union acpi_operand_object *target_object; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); this_index = state->pkg.index; - this_target_ptr = (acpi_operand_object **) + this_target_ptr = (union acpi_operand_object **) &state->pkg.dest_object->package.elements[this_index]; switch (object_type) { - case 0: + case ACPI_COPY_TYPE_SIMPLE: - /* - * This is a simple object, just copy it - */ - target_object = acpi_ut_create_internal_object (source_object->common.type); - if (!target_object) { - return (AE_NO_MEMORY); - } + /* A null source object indicates a (legal) null package element */ - status = acpi_ex_store_object_to_object (source_object, target_object, - (acpi_walk_state *) context); - if (ACPI_FAILURE (status)) { - return (status); + if (source_object) { + /* + * This is a simple object, just copy it + */ + target_object = acpi_ut_create_internal_object ( + ACPI_GET_OBJECT_TYPE (source_object)); + if (!target_object) { + return (AE_NO_MEMORY); + } + + status = acpi_ut_copy_simple_object (source_object, target_object); + if (ACPI_FAILURE (status)) { + return (status); + } + + *this_target_ptr = target_object; } + else { + /* Pass through a null element */ - *this_target_ptr = target_object; + *this_target_ptr = NULL; + } break; - case 1: + case ACPI_COPY_TYPE_PACKAGE: + /* * This object is a package - go down another nesting level * Create and build the package object */ target_object = acpi_ut_create_internal_object (ACPI_TYPE_PACKAGE); if (!target_object) { - /* TBD: must delete package created up to this point */ - return (AE_NO_MEMORY); } target_object->package.count = source_object->package.count; + target_object->common.flags = source_object->common.flags; + + /* + * Create the object array + */ + target_object->package.elements = + ACPI_MEM_CALLOCATE (((acpi_size) source_object->package.count + 1) * + sizeof (void *)); + if (!target_object->package.elements) { + ACPI_MEM_FREE (target_object); + return (AE_NO_MEMORY); + } /* * Pass the new package object back to the package walk routine @@ -703,21 +788,21 @@ *this_target_ptr = target_object; break; + default: return (AE_BAD_PARAMETER); } - return (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_copy_ipackage_to_ipackage + * FUNCTION: acpi_ut_copy_ipackage_to_ipackage * - * PARAMETERS: *Source_obj - Pointer to the source package object - * *Dest_obj - Where the internal object is returned + * PARAMETERS: *source_obj - Pointer to the source package object + * *dest_obj - Where the internal object is returned * * RETURN: Status - the status of the call * @@ -728,37 +813,92 @@ acpi_status acpi_ut_copy_ipackage_to_ipackage ( - acpi_operand_object *source_obj, - acpi_operand_object *dest_obj, - acpi_walk_state *walk_state) + union acpi_operand_object *source_obj, + union acpi_operand_object *dest_obj, + struct acpi_walk_state *walk_state) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE ("Ut_copy_ipackage_to_ipackage"); + ACPI_FUNCTION_TRACE ("ut_copy_ipackage_to_ipackage"); - dest_obj->common.type = source_obj->common.type; + dest_obj->common.type = ACPI_GET_OBJECT_TYPE (source_obj); + dest_obj->common.flags = source_obj->common.flags; dest_obj->package.count = source_obj->package.count; - /* * Create the object array and walk the source package tree */ - dest_obj->package.elements = ACPI_MEM_CALLOCATE ((source_obj->package.count + 1) * - sizeof (void *)); - dest_obj->package.next_element = dest_obj->package.elements; - + dest_obj->package.elements = ACPI_MEM_CALLOCATE ( + ((acpi_size) source_obj->package.count + 1) * + sizeof (void *)); if (!dest_obj->package.elements) { - REPORT_ERROR ( - ("Aml_build_copy_internal_package_object: Package allocation failure\n")); + ACPI_REPORT_ERROR ( + ("aml_build_copy_internal_package_object: Package allocation failure\n")); return_ACPI_STATUS (AE_NO_MEMORY); } - + /* + * Copy the package element-by-element by walking the package "tree". + * This handles nested packages of arbitrary depth. + */ status = acpi_ut_walk_package_tree (source_obj, dest_obj, acpi_ut_copy_ielement_to_ielement, walk_state); + if (ACPI_FAILURE (status)) { + /* On failure, delete the destination package object */ + + acpi_ut_remove_reference (dest_obj); + } return_ACPI_STATUS (status); } + +/******************************************************************************* + * + * FUNCTION: acpi_ut_copy_iobject_to_iobject + * + * PARAMETERS: walk_state - Current walk state + * source_desc - The internal object to be copied + * dest_desc - Where the copied object is returned + * + * RETURN: Status + * + * DESCRIPTION: Copy an internal object to a new internal object + * + ******************************************************************************/ + +acpi_status +acpi_ut_copy_iobject_to_iobject ( + union acpi_operand_object *source_desc, + union acpi_operand_object **dest_desc, + struct acpi_walk_state *walk_state) +{ + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("ut_copy_iobject_to_iobject"); + + + /* Create the top level object */ + + *dest_desc = acpi_ut_create_internal_object (ACPI_GET_OBJECT_TYPE (source_desc)); + if (!*dest_desc) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* Copy the object and possible subobjects */ + + if (ACPI_GET_OBJECT_TYPE (source_desc) == ACPI_TYPE_PACKAGE) { + status = acpi_ut_copy_ipackage_to_ipackage (source_desc, *dest_desc, + walk_state); + } + else { + status = acpi_ut_copy_simple_object (source_desc, *dest_desc); + } + + return_ACPI_STATUS (status); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utdebug.c linux.21rc5-ac2/drivers/acpi/utilities/utdebug.c --- linux.21rc5/drivers/acpi/utilities/utdebug.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utdebug.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,46 +1,63 @@ /****************************************************************************** * * Module Name: utdebug - Debug print routines - * $Revision: 90 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utdebug") + ACPI_MODULE_NAME ("utdebug") -u32 acpi_gbl_prev_thread_id = 0xFFFFFFFF; -char *acpi_gbl_fn_entry_str = "----Entry"; -char *acpi_gbl_fn_exit_str = "----Exit-"; +#ifdef ACPI_DEBUG_OUTPUT - -#ifdef ACPI_DEBUG +static u32 acpi_gbl_prev_thread_id = 0xFFFFFFFF; +static char *acpi_gbl_fn_entry_str = "----Entry"; +static char *acpi_gbl_fn_exit_str = "----Exit-"; /***************************************************************************** * - * FUNCTION: Acpi_ut_init_stack_ptr_trace + * FUNCTION: acpi_ut_init_stack_ptr_trace * * PARAMETERS: None * @@ -54,16 +71,16 @@ acpi_ut_init_stack_ptr_trace ( void) { - u32 current_sp; + u32 current_sp; - acpi_gbl_entry_stack_pointer = (unsigned long) ¤t_sp; + acpi_gbl_entry_stack_pointer = ACPI_PTR_DIFF (¤t_sp, NULL); } /***************************************************************************** * - * FUNCTION: Acpi_ut_track_stack_ptr + * FUNCTION: acpi_ut_track_stack_ptr * * PARAMETERS: None * @@ -77,9 +94,10 @@ acpi_ut_track_stack_ptr ( void) { - u32 current_sp; + acpi_size current_sp; + - current_sp = (u32) ¤t_sp; + current_sp = ACPI_PTR_DIFF (¤t_sp, NULL); if (current_sp < acpi_gbl_lowest_stack_pointer) { acpi_gbl_lowest_stack_pointer = current_sp; @@ -93,13 +111,13 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_debug_print + * FUNCTION: acpi_ut_debug_print * - * PARAMETERS: Debug_level - Requested debug print level - * Proc_name - Caller's procedure name - * Module_name - Caller's module name (for error output) - * Line_number - Caller's line number (for error output) - * Component_id - Caller's component ID (for error output) + * PARAMETERS: debug_level - Requested debug print level + * proc_name - Caller's procedure name + * module_name - Caller's module name (for error output) + * line_number - Caller's line number (for error output) + * component_id - Caller's component ID (for error output) * * Format - Printf format field * ... - Optional printf arguments @@ -111,15 +129,15 @@ * ****************************************************************************/ -void +void ACPI_INTERNAL_VAR_XFACE acpi_ut_debug_print ( - u32 requested_debug_level, - u32 line_number, - acpi_debug_print_info *dbg_info, - char *format, + u32 requested_debug_level, + u32 line_number, + struct acpi_debug_print_info *dbg_info, + char *format, ...) { - u32 thread_id; + u32 thread_id; va_list args; @@ -131,7 +149,6 @@ return; } - /* * Thread tracking and context switch notification */ @@ -150,14 +167,13 @@ * Display the module name, current line number, thread ID (if requested), * current procedure nesting level, and the current procedure name */ - acpi_os_printf ("%8s-%04d ", dbg_info->module_name, line_number); + acpi_os_printf ("%8s-%04ld ", dbg_info->module_name, line_number); if (ACPI_LV_THREADS & acpi_dbg_level) { - acpi_os_printf ("[%04X] ", thread_id, acpi_gbl_nesting_level, dbg_info->proc_name); + acpi_os_printf ("[%04lX] ", thread_id); } - acpi_os_printf ("[%02d] %-22.22s: ", acpi_gbl_nesting_level, dbg_info->proc_name); - + acpi_os_printf ("[%02ld] %-22.22s: ", acpi_gbl_nesting_level, dbg_info->proc_name); va_start (args, format); acpi_os_vprintf (format, args); @@ -166,30 +182,30 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_debug_print_raw + * FUNCTION: acpi_ut_debug_print_raw * - * PARAMETERS: Requested_debug_level - Requested debug print level - * Line_number - Caller's line number - * Dbg_info - Contains: - * Proc_name - Caller's procedure name - * Module_name - Caller's module name - * Component_id - Caller's component ID + * PARAMETERS: requested_debug_level - Requested debug print level + * line_number - Caller's line number + * dbg_info - Contains: + * proc_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID * Format - Printf format field * ... - Optional printf arguments * * RETURN: None * * DESCRIPTION: Print message with no headers. Has same interface as - * Debug_print so that the same macros can be used. + * debug_print so that the same macros can be used. * ****************************************************************************/ -void +void ACPI_INTERNAL_VAR_XFACE acpi_ut_debug_print_raw ( - u32 requested_debug_level, - u32 line_number, - acpi_debug_print_info *dbg_info, - char *format, + u32 requested_debug_level, + u32 line_number, + struct acpi_debug_print_info *dbg_info, + char *format, ...) { va_list args; @@ -201,32 +217,31 @@ } va_start (args, format); - acpi_os_vprintf (format, args); } /***************************************************************************** * - * FUNCTION: Acpi_ut_trace + * FUNCTION: acpi_ut_trace * - * PARAMETERS: Line_number - Caller's line number - * Dbg_info - Contains: - * Proc_name - Caller's procedure name - * Module_name - Caller's module name - * Component_id - Caller's component ID + * PARAMETERS: line_number - Caller's line number + * dbg_info - Contains: + * proc_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID * * RETURN: None * * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is - * set in Debug_level + * set in debug_level * ****************************************************************************/ void acpi_ut_trace ( - u32 line_number, - acpi_debug_print_info *dbg_info) + u32 line_number, + struct acpi_debug_print_info *dbg_info) { acpi_gbl_nesting_level++; @@ -239,27 +254,27 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_trace_ptr + * FUNCTION: acpi_ut_trace_ptr * - * PARAMETERS: Line_number - Caller's line number - * Dbg_info - Contains: - * Proc_name - Caller's procedure name - * Module_name - Caller's module name - * Component_id - Caller's component ID + * PARAMETERS: line_number - Caller's line number + * dbg_info - Contains: + * proc_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID * Pointer - Pointer to display * * RETURN: None * * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is - * set in Debug_level + * set in debug_level * ****************************************************************************/ void acpi_ut_trace_ptr ( - u32 line_number, - acpi_debug_print_info *dbg_info, - void *pointer) + u32 line_number, + struct acpi_debug_print_info *dbg_info, + void *pointer) { acpi_gbl_nesting_level++; acpi_ut_track_stack_ptr (); @@ -271,27 +286,27 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_trace_str + * FUNCTION: acpi_ut_trace_str * - * PARAMETERS: Line_number - Caller's line number - * Dbg_info - Contains: - * Proc_name - Caller's procedure name - * Module_name - Caller's module name - * Component_id - Caller's component ID + * PARAMETERS: line_number - Caller's line number + * dbg_info - Contains: + * proc_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID * String - Additional string to display * * RETURN: None * * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is - * set in Debug_level + * set in debug_level * ****************************************************************************/ void acpi_ut_trace_str ( - u32 line_number, - acpi_debug_print_info *dbg_info, - NATIVE_CHAR *string) + u32 line_number, + struct acpi_debug_print_info *dbg_info, + char *string) { acpi_gbl_nesting_level++; @@ -304,27 +319,27 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_trace_u32 + * FUNCTION: acpi_ut_trace_u32 * - * PARAMETERS: Line_number - Caller's line number - * Dbg_info - Contains: - * Proc_name - Caller's procedure name - * Module_name - Caller's module name - * Component_id - Caller's component ID + * PARAMETERS: line_number - Caller's line number + * dbg_info - Contains: + * proc_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID * Integer - Integer to display * * RETURN: None * * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is - * set in Debug_level + * set in debug_level * ****************************************************************************/ void acpi_ut_trace_u32 ( - u32 line_number, - acpi_debug_print_info *dbg_info, - u32 integer) + u32 line_number, + struct acpi_debug_print_info *dbg_info, + u32 integer) { acpi_gbl_nesting_level++; @@ -337,25 +352,25 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_exit + * FUNCTION: acpi_ut_exit * - * PARAMETERS: Line_number - Caller's line number - * Dbg_info - Contains: - * Proc_name - Caller's procedure name - * Module_name - Caller's module name - * Component_id - Caller's component ID + * PARAMETERS: line_number - Caller's line number + * dbg_info - Contains: + * proc_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID * * RETURN: None * * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is - * set in Debug_level + * set in debug_level * ****************************************************************************/ void acpi_ut_exit ( - u32 line_number, - acpi_debug_print_info *dbg_info) + u32 line_number, + struct acpi_debug_print_info *dbg_info) { acpi_ut_debug_print (ACPI_LV_FUNCTIONS, line_number, dbg_info, @@ -367,27 +382,27 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_status_exit + * FUNCTION: acpi_ut_status_exit * - * PARAMETERS: Line_number - Caller's line number - * Dbg_info - Contains: - * Proc_name - Caller's procedure name - * Module_name - Caller's module name - * Component_id - Caller's component ID + * PARAMETERS: line_number - Caller's line number + * dbg_info - Contains: + * proc_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID * Status - Exit status code * * RETURN: None * * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is - * set in Debug_level. Prints exit status also. + * set in debug_level. Prints exit status also. * ****************************************************************************/ void acpi_ut_status_exit ( - u32 line_number, - acpi_debug_print_info *dbg_info, - acpi_status status) + u32 line_number, + struct acpi_debug_print_info *dbg_info, + acpi_status status) { if (ACPI_SUCCESS (status)) { @@ -407,31 +422,32 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_value_exit + * FUNCTION: acpi_ut_value_exit * - * PARAMETERS: Line_number - Caller's line number - * Dbg_info - Contains: - * Proc_name - Caller's procedure name - * Module_name - Caller's module name - * Component_id - Caller's component ID + * PARAMETERS: line_number - Caller's line number + * dbg_info - Contains: + * proc_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID * Value - Value to be printed with exit msg * * RETURN: None * * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is - * set in Debug_level. Prints exit value also. + * set in debug_level. Prints exit value also. * ****************************************************************************/ void acpi_ut_value_exit ( - u32 line_number, - acpi_debug_print_info *dbg_info, - acpi_integer value) + u32 line_number, + struct acpi_debug_print_info *dbg_info, + acpi_integer value) { acpi_ut_debug_print (ACPI_LV_FUNCTIONS, line_number, dbg_info, - "%s %8.8X%8.8X\n", acpi_gbl_fn_exit_str, HIDWORD(value), LODWORD(value)); + "%s %8.8X%8.8X\n", acpi_gbl_fn_exit_str, + ACPI_HIDWORD (value), ACPI_LODWORD (value)); acpi_gbl_nesting_level--; } @@ -439,27 +455,27 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_ptr_exit + * FUNCTION: acpi_ut_ptr_exit * - * PARAMETERS: Line_number - Caller's line number - * Dbg_info - Contains: - * Proc_name - Caller's procedure name - * Module_name - Caller's module name - * Component_id - Caller's component ID + * PARAMETERS: line_number - Caller's line number + * dbg_info - Contains: + * proc_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID * Value - Value to be printed with exit msg * * RETURN: None * * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is - * set in Debug_level. Prints exit value also. + * set in debug_level. Prints exit value also. * ****************************************************************************/ void acpi_ut_ptr_exit ( - u32 line_number, - acpi_debug_print_info *dbg_info, - u8 *ptr) + u32 line_number, + struct acpi_debug_print_info *dbg_info, + u8 *ptr) { acpi_ut_debug_print (ACPI_LV_FUNCTIONS, line_number, dbg_info, @@ -473,12 +489,12 @@ /***************************************************************************** * - * FUNCTION: Acpi_ut_dump_buffer + * FUNCTION: acpi_ut_dump_buffer * * PARAMETERS: Buffer - Buffer to dump * Count - Amount to dump, in bytes * Display - BYTE, WORD, DWORD, or QWORD display - * Component_iD - Caller's component ID + * component_iD - Caller's component ID * * RETURN: None * @@ -488,15 +504,15 @@ void acpi_ut_dump_buffer ( - u8 *buffer, - u32 count, - u32 display, - u32 component_id) -{ - u32 i = 0; - u32 j; - u32 temp32; - u8 buf_char; + u8 *buffer, + u32 count, + u32 display, + u32 component_id) +{ + acpi_native_uint i = 0; + acpi_native_uint j; + u32 temp32; + u8 buf_char; /* Only dump the buffer if tracing is enabled */ @@ -506,6 +522,11 @@ return; } + if ((count < 4) || (count & 0x01)) { + display = DB_BYTE_DISPLAY; + } + + acpi_os_printf ("\nOffset Value\n"); /* * Nasty little dump buffer routine! @@ -513,8 +534,7 @@ while (i < count) { /* Print current offset */ - acpi_os_printf ("%05X ", i); - + acpi_os_printf ("%05X ", (u32) i); /* Print 16 hex chars */ @@ -539,8 +559,7 @@ case DB_WORD_DISPLAY: - MOVE_UNALIGNED16_TO_32 (&temp32, - &buffer[i + j]); + ACPI_MOVE_16_TO_32 (&temp32, &buffer[i + j]); acpi_os_printf ("%04X ", temp32); j += 2; break; @@ -548,8 +567,7 @@ case DB_DWORD_DISPLAY: - MOVE_UNALIGNED32_TO_32 (&temp32, - &buffer[i + j]); + ACPI_MOVE_32_TO_32 (&temp32, &buffer[i + j]); acpi_os_printf ("%08X ", temp32); j += 4; break; @@ -557,24 +575,20 @@ case DB_QWORD_DISPLAY: - MOVE_UNALIGNED32_TO_32 (&temp32, - &buffer[i + j]); + ACPI_MOVE_32_TO_32 (&temp32, &buffer[i + j]); acpi_os_printf ("%08X", temp32); - MOVE_UNALIGNED32_TO_32 (&temp32, - &buffer[i + j + 4]); + ACPI_MOVE_32_TO_32 (&temp32, &buffer[i + j + 4]); acpi_os_printf ("%08X ", temp32); j += 8; break; } } - /* * Print the ASCII equivalent characters * But watch out for the bad unprintable ones... */ - for (j = 0; j < 16; j++) { if (i + j >= count) { acpi_os_printf ("\n"); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utdelete.c linux.21rc5-ac2/drivers/acpi/utilities/utdelete.c --- linux.21rc5/drivers/acpi/utilities/utdelete.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utdelete.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,42 +1,59 @@ /******************************************************************************* * * Module Name: utdelete - object deletion and reference count utilities - * $Revision: 81 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "actables.h" -#include "acparser.h" +#include +#include +#include +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utdelete") + ACPI_MODULE_NAME ("utdelete") /******************************************************************************* * - * FUNCTION: Acpi_ut_delete_internal_obj + * FUNCTION: acpi_ut_delete_internal_obj * * PARAMETERS: *Object - Pointer to the list to be deleted * @@ -49,13 +66,15 @@ void acpi_ut_delete_internal_obj ( - acpi_operand_object *object) + union acpi_operand_object *object) { - void *obj_pointer = NULL; - acpi_operand_object *handler_desc; + void *obj_pointer = NULL; + union acpi_operand_object *handler_desc; + union acpi_operand_object *second_desc; + union acpi_operand_object *next_desc; - FUNCTION_TRACE_PTR ("Ut_delete_internal_obj", object); + ACPI_FUNCTION_TRACE_PTR ("ut_delete_internal_obj", object); if (!object) { @@ -66,16 +85,17 @@ * Must delete or free any pointers within the object that are not * actual ACPI objects (for example, a raw buffer pointer). */ - switch (object->common.type) { - + switch (ACPI_GET_OBJECT_TYPE (object)) { case ACPI_TYPE_STRING: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "**** String %p, ptr %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "**** String %p, ptr %p\n", object, object->string.pointer)); /* Free the actual string buffer */ if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) { + /* But only if it is NOT a pointer into an ACPI table */ + obj_pointer = object->string.pointer; } break; @@ -83,18 +103,22 @@ case ACPI_TYPE_BUFFER: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "**** Buffer %p, ptr %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "**** Buffer %p, ptr %p\n", object, object->buffer.pointer)); /* Free the actual buffer */ - obj_pointer = object->buffer.pointer; + if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) { + /* But only if it is NOT a pointer into an ACPI table */ + + obj_pointer = object->buffer.pointer; + } break; case ACPI_TYPE_PACKAGE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, " **** Package of count %X\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, " **** Package of count %X\n", object->package.count)); /* @@ -108,124 +132,137 @@ break; + case ACPI_TYPE_DEVICE: + + if (object->device.gpe_block) { + (void) acpi_ev_delete_gpe_block (object->device.gpe_block); + } + + /* Walk the handler list for this device */ + + handler_desc = object->device.address_space; + while (handler_desc) { + next_desc = handler_desc->address_space.next; + acpi_ut_remove_reference (handler_desc); + handler_desc = next_desc; + } + break; + + case ACPI_TYPE_MUTEX: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "***** Mutex %p, Semaphore %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "***** Mutex %p, Semaphore %p\n", object, object->mutex.semaphore)); acpi_ex_unlink_mutex (object); - acpi_os_delete_semaphore (object->mutex.semaphore); + (void) acpi_os_delete_semaphore (object->mutex.semaphore); break; case ACPI_TYPE_EVENT: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "***** Event %p, Semaphore %p\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "***** Event %p, Semaphore %p\n", object, object->event.semaphore)); - acpi_os_delete_semaphore (object->event.semaphore); + (void) acpi_os_delete_semaphore (object->event.semaphore); object->event.semaphore = NULL; break; case ACPI_TYPE_METHOD: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "***** Method %p\n", object)); + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "***** Method %p\n", object)); /* Delete the method semaphore if it exists */ if (object->method.semaphore) { - acpi_os_delete_semaphore (object->method.semaphore); + (void) acpi_os_delete_semaphore (object->method.semaphore); object->method.semaphore = NULL; } - break; case ACPI_TYPE_REGION: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "***** Region %p\n", object)); + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "***** Region %p\n", object)); - if (object->region.extra) { + second_desc = acpi_ns_get_secondary_object (object); + if (second_desc) { /* - * Free the Region_context if and only if the handler is one of the + * Free the region_context if and only if the handler is one of the * default handlers -- and therefore, we created the context object * locally, it was not created by an external caller. */ - handler_desc = object->region.addr_handler; - if ((handler_desc) && - (handler_desc->addr_handler.hflags == ADDR_HANDLER_DEFAULT_INSTALLED)) { - obj_pointer = object->region.extra->extra.region_context; + handler_desc = object->region.address_space; + if (handler_desc) { + if (handler_desc->address_space.hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) { + obj_pointer = second_desc->extra.region_context; + } + + acpi_ut_remove_reference (handler_desc); } /* Now we can free the Extra object */ - acpi_ut_delete_object_desc (object->region.extra); + acpi_ut_delete_object_desc (second_desc); } break; case ACPI_TYPE_BUFFER_FIELD: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "***** Buffer Field %p\n", object)); + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "***** Buffer Field %p\n", object)); - if (object->buffer_field.extra) { - acpi_ut_delete_object_desc (object->buffer_field.extra); + second_desc = acpi_ns_get_secondary_object (object); + if (second_desc) { + acpi_ut_delete_object_desc (second_desc); } break; + default: break; } + /* Free any allocated memory (pointer within the object) found above */ - /* - * Delete any allocated memory found above - */ if (obj_pointer) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Deleting Obj Ptr %p \n", obj_pointer)); + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Deleting Object Subptr %p\n", + obj_pointer)); ACPI_MEM_FREE (obj_pointer); } - /* Only delete the object if it was dynamically allocated */ - - if (object->common.flags & AOPOBJ_STATIC_ALLOCATION) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Object %p [%s] static allocation, no delete\n", - object, acpi_ut_get_type_name (object->common.type))); - } - - if (!(object->common.flags & AOPOBJ_STATIC_ALLOCATION)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Deleting object %p [%s]\n", - object, acpi_ut_get_type_name (object->common.type))); + /* Now the object can be safely deleted */ - acpi_ut_delete_object_desc (object); - } + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Deleting Object %p [%s]\n", + object, acpi_ut_get_object_type_name (object))); + acpi_ut_delete_object_desc (object); return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ut_delete_internal_object_list + * FUNCTION: acpi_ut_delete_internal_object_list * - * PARAMETERS: *Obj_list - Pointer to the list to be deleted + * PARAMETERS: *obj_list - Pointer to the list to be deleted * - * RETURN: Status - the status of the call + * RETURN: None * * DESCRIPTION: This function deletes an internal object list, including both * simple objects and package objects * ******************************************************************************/ -acpi_status +void acpi_ut_delete_internal_object_list ( - acpi_operand_object **obj_list) + union acpi_operand_object **obj_list) { - acpi_operand_object **internal_obj; + union acpi_operand_object **internal_obj; - FUNCTION_TRACE ("Ut_delete_internal_object_list"); + ACPI_FUNCTION_TRACE ("ut_delete_internal_object_list"); /* Walk the null-terminated internal list */ @@ -237,14 +274,13 @@ /* Free the combined parameter pointer list and object array */ ACPI_MEM_FREE (obj_list); - - return_ACPI_STATUS (AE_OK); + return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ut_update_ref_count + * FUNCTION: acpi_ut_update_ref_count * * PARAMETERS: *Object - Object whose ref count is to be updated * Action - What to do @@ -257,25 +293,25 @@ static void acpi_ut_update_ref_count ( - acpi_operand_object *object, - u32 action) + union acpi_operand_object *object, + u32 action) { - u16 count; - u16 new_count; + u16 count; + u16 new_count; + + ACPI_FUNCTION_NAME ("ut_update_ref_count"); - PROC_NAME ("Ut_update_ref_count"); if (!object) { return; } - count = object->common.reference_count; new_count = count; /* - * Reference count action (increment, decrement, or force delete) + * Perform the reference count action (increment, decrement, or force delete) */ switch (action) { @@ -284,7 +320,7 @@ new_count++; object->common.reference_count = new_count; - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Obj %p Refs=%X, [Incremented]\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Obj %p Refs=%X, [Incremented]\n", object, new_count)); break; @@ -292,21 +328,20 @@ case REF_DECREMENT: if (count < 1) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Obj %p Refs=%X, can't decrement! (Set to 0)\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Obj %p Refs=%X, can't decrement! (Set to 0)\n", object, new_count)); new_count = 0; } - else { new_count--; - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Obj %p Refs=%X, [Decremented]\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Obj %p Refs=%X, [Decremented]\n", object, new_count)); } - if (object->common.type == ACPI_TYPE_METHOD) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Method Obj %p Refs=%X, [Decremented]\n", + if (ACPI_GET_OBJECT_TYPE (object) == ACPI_TYPE_METHOD) { + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Method Obj %p Refs=%X, [Decremented]\n", object, new_count)); } @@ -320,7 +355,7 @@ case REF_FORCE_DELETE: - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Obj %p Refs=%X, Force delete! (Set to 0)\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Obj %p Refs=%X, Force delete! (Set to 0)\n", object, count)); new_count = 0; @@ -335,12 +370,11 @@ break; } - /* * Sanity check the reference count, for debug purposes only. * (A deleted object will have a huge reference count) */ - if (count > MAX_REFERENCE_COUNT) { + if (count > ACPI_MAX_REFERENCE_COUNT) { ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "**** Warning **** Large Reference Count (%X) in object %p\n\n", @@ -353,7 +387,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_update_object_reference + * FUNCTION: acpi_ut_update_object_reference * * PARAMETERS: *Object - Increment ref count for this object * and all sub-objects @@ -375,18 +409,16 @@ acpi_status acpi_ut_update_object_reference ( - acpi_operand_object *object, - u16 action) + union acpi_operand_object *object, + u16 action) { - acpi_status status; - u32 i; - acpi_operand_object *next; - acpi_operand_object *new; - acpi_generic_state *state_list = NULL; - acpi_generic_state *state; + acpi_status status; + u32 i; + union acpi_generic_state *state_list = NULL; + union acpi_generic_state *state; - FUNCTION_TRACE_PTR ("Ut_update_object_reference", object); + ACPI_FUNCTION_TRACE_PTR ("ut_update_object_reference", object); /* Ignore a null object ptr */ @@ -395,16 +427,13 @@ return_ACPI_STATUS (AE_OK); } + /* Make sure that this isn't a namespace handle */ - /* - * Make sure that this isn't a namespace handle or an AML pointer - */ - if (VALID_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_NAMED)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Object %p is NS handle\n", object)); + if (ACPI_GET_DESCRIPTOR_TYPE (object) == ACPI_DESC_TYPE_NAMED) { + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Object %p is NS handle\n", object)); return_ACPI_STATUS (AE_OK); } - state = acpi_ut_create_update_state (object, action); while (state) { @@ -416,32 +445,11 @@ * All sub-objects must have their reference count incremented also. * Different object types have different subobjects. */ - switch (object->common.type) { - + switch (ACPI_GET_OBJECT_TYPE (object)) { case ACPI_TYPE_DEVICE: - status = acpi_ut_create_update_state_and_push (object->device.addr_handler, - action, &state_list); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - - acpi_ut_update_ref_count (object->device.sys_handler, action); - acpi_ut_update_ref_count (object->device.drv_handler, action); - break; - - - case INTERNAL_TYPE_ADDRESS_HANDLER: - - /* Must walk list of address handlers */ - - next = object->addr_handler.next; - while (next) { - new = next->addr_handler.next; - acpi_ut_update_ref_count (next, action); - - next = new; - } + acpi_ut_update_ref_count (object->device.system_notify, action); + acpi_ut_update_ref_count (object->device.device_notify, action); break; @@ -460,7 +468,7 @@ status = acpi_ut_create_update_state_and_push ( object->package.elements[i], action, &state_list); if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + goto error_exit; } } break; @@ -470,63 +478,62 @@ status = acpi_ut_create_update_state_and_push ( object->buffer_field.buffer_obj, action, &state_list); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + goto error_exit; } break; - case INTERNAL_TYPE_REGION_FIELD: + case ACPI_TYPE_LOCAL_REGION_FIELD: status = acpi_ut_create_update_state_and_push ( object->field.region_obj, action, &state_list); if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + goto error_exit; } break; - case INTERNAL_TYPE_BANK_FIELD: + case ACPI_TYPE_LOCAL_BANK_FIELD: status = acpi_ut_create_update_state_and_push ( - object->bank_field.bank_register_obj, action, &state_list); + object->bank_field.bank_obj, action, &state_list); if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + goto error_exit; } status = acpi_ut_create_update_state_and_push ( object->bank_field.region_obj, action, &state_list); if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + goto error_exit; } break; - case INTERNAL_TYPE_INDEX_FIELD: + case ACPI_TYPE_LOCAL_INDEX_FIELD: status = acpi_ut_create_update_state_and_push ( object->index_field.index_obj, action, &state_list); if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + goto error_exit; } status = acpi_ut_create_update_state_and_push ( object->index_field.data_obj, action, &state_list); if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); + goto error_exit; } break; case ACPI_TYPE_REGION: - case INTERNAL_TYPE_REFERENCE: + case ACPI_TYPE_LOCAL_REFERENCE: + default: /* No subobjects */ break; } - /* * Now we can update the count in the main object. This can only * happen after we update the sub-objects in case this causes the @@ -534,20 +541,26 @@ */ acpi_ut_update_ref_count (object, action); - /* Move on to the next object to be updated */ state = acpi_ut_pop_generic_state (&state_list); } - return_ACPI_STATUS (AE_OK); + + +error_exit: + + ACPI_REPORT_ERROR (("Could not update object reference count, %s\n", + acpi_format_exception (status))); + + return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_add_reference + * FUNCTION: acpi_ut_add_reference * * PARAMETERS: *Object - Object whose reference count is to be * incremented @@ -560,31 +573,28 @@ void acpi_ut_add_reference ( - acpi_operand_object *object) + union acpi_operand_object *object) { - FUNCTION_TRACE_PTR ("Ut_add_reference", object); + ACPI_FUNCTION_TRACE_PTR ("ut_add_reference", object); - /* - * Ensure that we have a valid object - */ + /* Ensure that we have a valid object */ + if (!acpi_ut_valid_internal_object (object)) { return_VOID; } - /* - * We have a valid ACPI internal object, now increment the reference count - */ - acpi_ut_update_object_reference (object, REF_INCREMENT); + /* Increment the reference count */ + (void) acpi_ut_update_object_reference (object, REF_INCREMENT); return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ut_remove_reference + * FUNCTION: acpi_ut_remove_reference * * PARAMETERS: *Object - Object whose ref count will be decremented * @@ -596,10 +606,11 @@ void acpi_ut_remove_reference ( - acpi_operand_object *object) + union acpi_operand_object *object) { - FUNCTION_TRACE_PTR ("Ut_remove_reference", object); + ACPI_FUNCTION_TRACE_PTR ("ut_remove_reference", object); + /* * Allow a NULL pointer to be passed in, just ignore it. This saves @@ -607,18 +618,17 @@ * */ if (!object || - (VALID_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_NAMED))) { + (ACPI_GET_DESCRIPTOR_TYPE (object) == ACPI_DESC_TYPE_NAMED)) { return_VOID; } - /* - * Ensure that we have a valid object - */ + /* Ensure that we have a valid object */ + if (!acpi_ut_valid_internal_object (object)) { return_VOID; } - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Obj %p Refs=%X\n", + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Obj %p Refs=%X\n", object, object->common.reference_count)); /* @@ -626,7 +636,7 @@ * if the reference count becomes 0. (Must also decrement the ref count * of all subobjects!) */ - acpi_ut_update_object_reference (object, REF_DECREMENT); + (void) acpi_ut_update_object_reference (object, REF_DECREMENT); return_VOID; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/uteval.c linux.21rc5-ac2/drivers/acpi/utilities/uteval.c --- linux.21rc5/drivers/acpi/utilities/uteval.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/uteval.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,123 +1,218 @@ /****************************************************************************** * * Module Name: uteval - Object evaluation - * $Revision: 31 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acnamesp.h" -#include "acinterp.h" +#include +#include +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("uteval") + ACPI_MODULE_NAME ("uteval") /******************************************************************************* * - * FUNCTION: Acpi_ut_evaluate_numeric_object + * FUNCTION: acpi_ut_evaluate_object * - * PARAMETERS: *Object_name - Object name to be evaluated - * Device_node - Node for the device - * *Address - Where the value is returned + * PARAMETERS: prefix_node - Starting node + * Path - Path to object from starting node + * expected_return_types - Bitmap of allowed return types + * return_desc - Where a return value is stored * * RETURN: Status * - * DESCRIPTION: evaluates a numeric namespace object for a selected device - * and stores results in *Address. + * DESCRIPTION: Evaluates a namespace object and verifies the type of the + * return object. Common code that simplifies accessing objects + * that have required return objects of fixed types. * * NOTE: Internal function, no parameter validation * ******************************************************************************/ acpi_status -acpi_ut_evaluate_numeric_object ( - NATIVE_CHAR *object_name, - acpi_namespace_node *device_node, - acpi_integer *address) +acpi_ut_evaluate_object ( + struct acpi_namespace_node *prefix_node, + char *path, + u32 expected_return_btypes, + union acpi_operand_object **return_desc) { - acpi_operand_object *obj_desc; - acpi_status status; + union acpi_operand_object *obj_desc; + acpi_status status; + u32 return_btype; - FUNCTION_TRACE ("Ut_evaluate_numeric_object"); + ACPI_FUNCTION_TRACE ("ut_evaluate_object"); - /* Execute the method */ + /* Evaluate the object/method */ - status = acpi_ns_evaluate_relative (device_node, object_name, NULL, &obj_desc); + status = acpi_ns_evaluate_relative (prefix_node, path, NULL, &obj_desc); if (ACPI_FAILURE (status)) { if (status == AE_NOT_FOUND) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%s on %4.4s was not found\n", - object_name, (char*)&device_node->name)); + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[%4.4s.%s] was not found\n", + prefix_node->name.ascii, path)); } else { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s on %4.4s failed with status %s\n", - object_name, (char*)&device_node->name, - acpi_format_exception (status))); + ACPI_REPORT_METHOD_ERROR ("Method execution failed", + prefix_node, path, status); } return_ACPI_STATUS (status); } - /* Did we get a return object? */ if (!obj_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No object was returned from %s\n", - object_name)); - return_ACPI_STATUS (AE_TYPE); + if (expected_return_btypes) { + ACPI_REPORT_METHOD_ERROR ("No object was returned from", + prefix_node, path, AE_NOT_EXIST); + + return_ACPI_STATUS (AE_NOT_EXIST); + } + + return_ACPI_STATUS (AE_OK); } - /* Is the return object of the correct type? */ + /* Map the return object type to the bitmapped type */ + + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_INTEGER: + return_btype = ACPI_BTYPE_INTEGER; + break; + + case ACPI_TYPE_BUFFER: + return_btype = ACPI_BTYPE_BUFFER; + break; + + case ACPI_TYPE_STRING: + return_btype = ACPI_BTYPE_STRING; + break; + + case ACPI_TYPE_PACKAGE: + return_btype = ACPI_BTYPE_PACKAGE; + break; + + default: + return_btype = 0; + break; + } + + /* Is the return object one of the expected types? */ + + if (!(expected_return_btypes & return_btype)) { + ACPI_REPORT_METHOD_ERROR ("Return object type is incorrect", + prefix_node, path, AE_TYPE); - if (obj_desc->common.type != ACPI_TYPE_INTEGER) { - status = AE_TYPE; ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Type returned from %s was not a number: %X \n", - object_name, obj_desc->common.type)); + "Type returned from %s was incorrect: %X\n", + path, ACPI_GET_OBJECT_TYPE (obj_desc))); + + /* On error exit, we must delete the return object */ + + acpi_ut_remove_reference (obj_desc); + return_ACPI_STATUS (AE_TYPE); } - else { - /* - * Since the structure is a union, setting any field will set all - * of the variables in the union - */ - *address = obj_desc->integer.value; + + /* Object type is OK, return it */ + + *return_desc = obj_desc; + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_evaluate_numeric_object + * + * PARAMETERS: *object_name - Object name to be evaluated + * device_node - Node for the device + * *Address - Where the value is returned + * + * RETURN: Status + * + * DESCRIPTION: Evaluates a numeric namespace object for a selected device + * and stores result in *Address. + * + * NOTE: Internal function, no parameter validation + * + ******************************************************************************/ + +acpi_status +acpi_ut_evaluate_numeric_object ( + char *object_name, + struct acpi_namespace_node *device_node, + acpi_integer *address) +{ + union acpi_operand_object *obj_desc; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ut_evaluate_numeric_object"); + + + status = acpi_ut_evaluate_object (device_node, object_name, + ACPI_BTYPE_INTEGER, &obj_desc); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } + /* Get the returned Integer */ + + *address = obj_desc->integer.value; + /* On exit, we must delete the return object */ acpi_ut_remove_reference (obj_desc); - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_execute_HID + * FUNCTION: acpi_ut_execute_HID * - * PARAMETERS: Device_node - Node for the device + * PARAMETERS: device_node - Node for the device * *Hid - Where the HID is returned * * RETURN: Status @@ -131,81 +226,119 @@ acpi_status acpi_ut_execute_HID ( - acpi_namespace_node *device_node, - acpi_device_id *hid) + struct acpi_namespace_node *device_node, + struct acpi_device_id *hid) { - acpi_operand_object *obj_desc; - acpi_status status; + union acpi_operand_object *obj_desc; + acpi_status status; - FUNCTION_TRACE ("Ut_execute_HID"); + ACPI_FUNCTION_TRACE ("ut_execute_HID"); - /* Execute the method */ - - status = acpi_ns_evaluate_relative (device_node, - METHOD_NAME__HID, NULL, &obj_desc); + status = acpi_ut_evaluate_object (device_node, METHOD_NAME__HID, + ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING, &obj_desc); if (ACPI_FAILURE (status)) { - if (status == AE_NOT_FOUND) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "_HID on %4.4s was not found\n", - (char*)&device_node->name)); - } + return_ACPI_STATUS (status); + } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "_HID on %4.4s failed %s\n", - (char*)&device_node->name, acpi_format_exception (status))); - } + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_INTEGER) { + /* Convert the Numeric HID to string */ - return_ACPI_STATUS (status); + acpi_ex_eisa_id_to_string ((u32) obj_desc->integer.value, hid->buffer); } + else { + /* Copy the String HID from the returned object */ - /* Did we get a return object? */ + ACPI_STRNCPY (hid->buffer, obj_desc->string.pointer, sizeof(hid->buffer)); + } - if (!obj_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No object was returned from _HID\n")); - return_ACPI_STATUS (AE_TYPE); + /* On exit, we must delete the return object */ + + acpi_ut_remove_reference (obj_desc); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_execute_CID + * + * PARAMETERS: device_node - Node for the device + * *Cid - Where the CID is returned + * + * RETURN: Status + * + * DESCRIPTION: Executes the _CID control method that returns one or more + * compatible hardware IDs for the device. + * + * NOTE: Internal function, no parameter validation + * + ******************************************************************************/ + +acpi_status +acpi_ut_execute_CID ( + struct acpi_namespace_node *device_node, + struct acpi_device_id *cid) +{ + union acpi_operand_object *obj_desc; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ut_execute_CID"); + + + status = acpi_ut_evaluate_object (device_node, METHOD_NAME__CID, + ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING | ACPI_BTYPE_PACKAGE, &obj_desc); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } /* - * A _HID can return either a Number (32 bit compressed EISA ID) or - * a string + * A _CID can return either a single compatible ID or a package of compatible + * IDs. Each compatible ID can be a Number (32 bit compressed EISA ID) or + * string (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss"). */ - if ((obj_desc->common.type != ACPI_TYPE_INTEGER) && - (obj_desc->common.type != ACPI_TYPE_STRING)) { - status = AE_TYPE; - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Type returned from _HID not a number or string: %s(%X) \n", - acpi_ut_get_type_name (obj_desc->common.type), obj_desc->common.type)); - } + switch (ACPI_GET_OBJECT_TYPE (obj_desc)) { + case ACPI_TYPE_INTEGER: - else { - if (obj_desc->common.type == ACPI_TYPE_INTEGER) { - /* Convert the Numeric HID to string */ + /* Convert the Numeric CID to string */ - acpi_ex_eisa_id_to_string ((u32) obj_desc->integer.value, hid->buffer); - } + acpi_ex_eisa_id_to_string ((u32) obj_desc->integer.value, cid->buffer); + break; - else { - /* Copy the String HID from the returned object */ + case ACPI_TYPE_STRING: - STRNCPY(hid->buffer, obj_desc->string.pointer, sizeof(hid->buffer)); - } - } + /* Copy the String CID from the returned object */ + + ACPI_STRNCPY (cid->buffer, obj_desc->string.pointer, sizeof (cid->buffer)); + break; + + case ACPI_TYPE_PACKAGE: + + /* TBD: Parse package elements; need different return struct, etc. */ + + status = AE_SUPPORT; + break; + + default: + status = AE_TYPE; + break; + } /* On exit, we must delete the return object */ acpi_ut_remove_reference (obj_desc); - return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_execute_UID + * FUNCTION: acpi_ut_execute_UID * - * PARAMETERS: Device_node - Node for the device + * PARAMETERS: device_node - Node for the device * *Uid - Where the UID is returned * * RETURN: Status @@ -219,82 +352,45 @@ acpi_status acpi_ut_execute_UID ( - acpi_namespace_node *device_node, - acpi_device_id *uid) + struct acpi_namespace_node *device_node, + struct acpi_device_id *uid) { - acpi_operand_object *obj_desc; - acpi_status status; + union acpi_operand_object *obj_desc; + acpi_status status; - PROC_NAME ("Ut_execute_UID"); + ACPI_FUNCTION_TRACE ("ut_execute_UID"); - /* Execute the method */ - - status = acpi_ns_evaluate_relative (device_node, - METHOD_NAME__UID, NULL, &obj_desc); + status = acpi_ut_evaluate_object (device_node, METHOD_NAME__UID, + ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING, &obj_desc); if (ACPI_FAILURE (status)) { - if (status == AE_NOT_FOUND) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "_UID on %4.4s was not found\n", - (char*)&device_node->name)); - } - - else { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "_UID on %4.4s failed %s\n", - (char*)&device_node->name, acpi_format_exception (status))); - } - - return (status); + return_ACPI_STATUS (status); } - /* Did we get a return object? */ - - if (!obj_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No object was returned from _UID\n")); - return (AE_TYPE); - } + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_INTEGER) { + /* Convert the Numeric UID to string */ - /* - * A _UID can return either a Number (32 bit compressed EISA ID) or - * a string - */ - if ((obj_desc->common.type != ACPI_TYPE_INTEGER) && - (obj_desc->common.type != ACPI_TYPE_STRING)) { - status = AE_TYPE; - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Type returned from _UID was not a number or string: %X \n", - obj_desc->common.type)); + acpi_ex_unsigned_integer_to_string (obj_desc->integer.value, uid->buffer); } - else { - if (obj_desc->common.type == ACPI_TYPE_INTEGER) { - /* Convert the Numeric UID to string */ - - acpi_ex_unsigned_integer_to_string (obj_desc->integer.value, uid->buffer); - } - - else { - /* Copy the String UID from the returned object */ + /* Copy the String UID from the returned object */ - STRNCPY(uid->buffer, obj_desc->string.pointer, sizeof(uid->buffer)); - } + ACPI_STRNCPY (uid->buffer, obj_desc->string.pointer, sizeof (uid->buffer)); } - /* On exit, we must delete the return object */ acpi_ut_remove_reference (obj_desc); - - return (status); + return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_execute_STA + * FUNCTION: acpi_ut_execute_STA * - * PARAMETERS: Device_node - Node for the device + * PARAMETERS: device_node - Node for the device * *Flags - Where the status flags are returned * * RETURN: Status @@ -308,62 +404,37 @@ acpi_status acpi_ut_execute_STA ( - acpi_namespace_node *device_node, - u32 *flags) + struct acpi_namespace_node *device_node, + u32 *flags) { - acpi_operand_object *obj_desc; - acpi_status status; + union acpi_operand_object *obj_desc; + acpi_status status; - FUNCTION_TRACE ("Ut_execute_STA"); + ACPI_FUNCTION_TRACE ("ut_execute_STA"); - /* Execute the method */ - - status = acpi_ns_evaluate_relative (device_node, - METHOD_NAME__STA, NULL, &obj_desc); - if (AE_NOT_FOUND == status) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "_STA on %4.4s was not found, assuming present.\n", - (char*)&device_node->name)); - - *flags = 0x0F; - status = AE_OK; - } - - else if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "_STA on %4.4s failed %s\n", - (char*)&device_node->name, - acpi_format_exception (status))); - } - - else /* success */ { - /* Did we get a return object? */ - - if (!obj_desc) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No object was returned from _STA\n")); - return_ACPI_STATUS (AE_TYPE); - } - - /* Is the return object of the correct type? */ + status = acpi_ut_evaluate_object (device_node, METHOD_NAME__STA, + ACPI_BTYPE_INTEGER, &obj_desc); + if (ACPI_FAILURE (status)) { + if (AE_NOT_FOUND == status) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, + "_STA on %4.4s was not found, assuming device is present\n", + device_node->name.ascii)); - if (obj_desc->common.type != ACPI_TYPE_INTEGER) { - status = AE_TYPE; - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Type returned from _STA was not a number: %X \n", - obj_desc->common.type)); + *flags = 0x0F; + status = AE_OK; } - else { - /* Extract the status flags */ + return_ACPI_STATUS (status); + } - *flags = (u32) obj_desc->integer.value; - } + /* Extract the status flags */ - /* On exit, we must delete the return object */ + *flags = (u32) obj_desc->integer.value; - acpi_ut_remove_reference (obj_desc); - } + /* On exit, we must delete the return object */ + acpi_ut_remove_reference (obj_desc); return_ACPI_STATUS (status); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utglobal.c linux.21rc5-ac2/drivers/acpi/utilities/utglobal.c --- linux.21rc5/drivers/acpi/utilities/utglobal.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utglobal.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,44 +1,58 @@ /****************************************************************************** * * Module Name: utglobal - Global variables for the ACPI subsystem - * $Revision: 133 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ #define DEFINE_ACPI_GLOBALS -#include "acpi.h" -#include "acevents.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "amlcode.h" - +#include +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utglobal") + ACPI_MODULE_NAME ("utglobal") /****************************************************************************** * - * FUNCTION: Acpi_format_exception + * FUNCTION: acpi_format_exception * * PARAMETERS: Status - The acpi_status code to be formatted * @@ -50,57 +64,69 @@ const char * acpi_format_exception ( - acpi_status status) + acpi_status status) { - const char *exception = "UNKNOWN_STATUS_CODE"; - acpi_status sub_status; + const char *exception = "UNKNOWN_STATUS_CODE"; + acpi_status sub_status; - sub_status = (status & ~AE_CODE_MASK); + ACPI_FUNCTION_NAME ("format_exception"); + sub_status = (status & ~AE_CODE_MASK); + switch (status & AE_CODE_MASK) { case AE_CODE_ENVIRONMENTAL: if (sub_status <= AE_CODE_ENV_MAX) { exception = acpi_gbl_exception_names_env [sub_status]; + break; } - break; + goto unknown; case AE_CODE_PROGRAMMER: if (sub_status <= AE_CODE_PGM_MAX) { exception = acpi_gbl_exception_names_pgm [sub_status -1]; + break; } - break; + goto unknown; case AE_CODE_ACPI_TABLES: if (sub_status <= AE_CODE_TBL_MAX) { exception = acpi_gbl_exception_names_tbl [sub_status -1]; + break; } - break; + goto unknown; case AE_CODE_AML: if (sub_status <= AE_CODE_AML_MAX) { exception = acpi_gbl_exception_names_aml [sub_status -1]; + break; } - break; + goto unknown; case AE_CODE_CONTROL: if (sub_status <= AE_CODE_CTRL_MAX) { exception = acpi_gbl_exception_names_ctrl [sub_status -1]; + break; } - break; + goto unknown; default: - break; + goto unknown; } return ((const char *) exception); + +unknown: + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unknown exception code: 0x%8.8X\n", status)); + return ((const char *) exception); } @@ -117,37 +143,41 @@ /* Debug switch - level and trace mask */ -#ifdef ACPI_DEBUG -u32 acpi_dbg_level = DEBUG_DEFAULT; +#ifdef ACPI_DEBUG_OUTPUT +u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT; #else -u32 acpi_dbg_level = NORMAL_DEFAULT; +u32 acpi_dbg_level = ACPI_NORMAL_DEFAULT; #endif /* Debug switch - layer (component) mask */ -u32 acpi_dbg_layer = ACPI_COMPONENT_DEFAULT; -u32 acpi_gbl_nesting_level = 0; +u32 acpi_dbg_layer = ACPI_COMPONENT_DEFAULT; +u32 acpi_gbl_nesting_level = 0; /* Debugger globals */ -u8 acpi_gbl_db_terminate_threads = FALSE; -u8 acpi_gbl_method_executing = FALSE; +u8 acpi_gbl_db_terminate_threads = FALSE; +u8 acpi_gbl_abort_method = FALSE; +u8 acpi_gbl_method_executing = FALSE; /* System flags */ -u32 acpi_gbl_system_flags = 0; -u32 acpi_gbl_startup_flags = 0; +u32 acpi_gbl_startup_flags = 0; -/* System starts unitialized! */ +/* System starts uninitialized */ -u8 acpi_gbl_shutdown = TRUE; +u8 acpi_gbl_shutdown = TRUE; -const u8 acpi_gbl_decode_to8bit [8] = {1,2,4,8,16,32,64,128}; +const u8 acpi_gbl_decode_to8bit [8] = {1,2,4,8,16,32,64,128}; -const NATIVE_CHAR *acpi_gbl_db_sleep_states[ACPI_NUM_SLEEP_STATES] = { - "\\_S0_","\\_S1_","\\_S2_","\\_S3_", - "\\_S4_","\\_S5_","\\_S4B"}; +const char *acpi_gbl_db_sleep_states[ACPI_S_STATE_COUNT] = { + "\\_S0_", + "\\_S1_", + "\\_S2_", + "\\_S3_", + "\\_S4_", + "\\_S5_"}; /****************************************************************************** @@ -158,86 +188,82 @@ /* - * Names built-in to the interpreter + * Predefined ACPI Names (Built-in to the Interpreter) * * Initial values are currently supported only for types String and Number. - * To avoid type punning, both are specified as strings in this table. + * Both are specified as strings in this table. * * NOTES: * 1) _SB_ is defined to be a device to allow _SB_/_INI to be run * during the initialization sequence. */ -const predefined_names acpi_gbl_pre_defined_names[] = -{ {"_GPE", INTERNAL_TYPE_DEF_ANY}, - {"_PR_", INTERNAL_TYPE_DEF_ANY}, - {"_SB_", ACPI_TYPE_DEVICE}, - {"_SI_", INTERNAL_TYPE_DEF_ANY}, - {"_TZ_", INTERNAL_TYPE_DEF_ANY}, - {"_REV", ACPI_TYPE_INTEGER, "2"}, - {"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME}, - {"_GL_", ACPI_TYPE_MUTEX, "0"}, - {NULL, ACPI_TYPE_ANY} /* Table terminator */ +const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = +{ {"_GPE", ACPI_TYPE_LOCAL_SCOPE, NULL}, + {"_PR_", ACPI_TYPE_LOCAL_SCOPE, NULL}, + {"_SB_", ACPI_TYPE_DEVICE, NULL}, + {"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL}, + {"_TZ_", ACPI_TYPE_LOCAL_SCOPE, NULL}, + {"_REV", ACPI_TYPE_INTEGER, "2"}, + {"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME}, + {"_GL_", ACPI_TYPE_MUTEX, "0"}, + +#if defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY) + {"_OSI", ACPI_TYPE_METHOD, "1"}, +#endif + {NULL, ACPI_TYPE_ANY, NULL} /* Table terminator */ }; /* * Properties of the ACPI Object Types, both internal and external. - * - * Elements of Acpi_ns_properties are bit significant - * and the table is indexed by values of acpi_object_type + * The table is indexed by values of acpi_object_type */ -const u8 acpi_gbl_ns_properties[] = +const u8 acpi_gbl_ns_properties[] = { - NSP_NORMAL, /* 00 Any */ - NSP_NORMAL, /* 01 Number */ - NSP_NORMAL, /* 02 String */ - NSP_NORMAL, /* 03 Buffer */ - NSP_LOCAL, /* 04 Package */ - NSP_NORMAL, /* 05 Field_unit */ - NSP_NEWSCOPE | NSP_LOCAL, /* 06 Device */ - NSP_LOCAL, /* 07 Acpi_event */ - NSP_NEWSCOPE | NSP_LOCAL, /* 08 Method */ - NSP_LOCAL, /* 09 Mutex */ - NSP_LOCAL, /* 10 Region */ - NSP_NEWSCOPE | NSP_LOCAL, /* 11 Power */ - NSP_NEWSCOPE | NSP_LOCAL, /* 12 Processor */ - NSP_NEWSCOPE | NSP_LOCAL, /* 13 Thermal */ - NSP_NORMAL, /* 14 Buffer_field */ - NSP_NORMAL, /* 15 Ddb_handle */ - NSP_NORMAL, /* 16 Debug Object */ - NSP_NORMAL, /* 17 Def_field */ - NSP_NORMAL, /* 18 Bank_field */ - NSP_NORMAL, /* 19 Index_field */ - NSP_NORMAL, /* 20 Reference */ - NSP_NORMAL, /* 21 Alias */ - NSP_NORMAL, /* 22 Notify */ - NSP_NORMAL, /* 23 Address Handler */ - NSP_NEWSCOPE | NSP_LOCAL, /* 24 Resource Desc */ - NSP_NEWSCOPE | NSP_LOCAL, /* 25 Resource Field */ - NSP_NORMAL, /* 26 Def_field_defn */ - NSP_NORMAL, /* 27 Bank_field_defn */ - NSP_NORMAL, /* 28 Index_field_defn */ - NSP_NORMAL, /* 29 If */ - NSP_NORMAL, /* 30 Else */ - NSP_NORMAL, /* 31 While */ - NSP_NEWSCOPE, /* 32 Scope */ - NSP_LOCAL, /* 33 Def_any */ - NSP_NORMAL, /* 34 Extra */ - NSP_NORMAL /* 35 Invalid */ + ACPI_NS_NORMAL, /* 00 Any */ + ACPI_NS_NORMAL, /* 01 Number */ + ACPI_NS_NORMAL, /* 02 String */ + ACPI_NS_NORMAL, /* 03 Buffer */ + ACPI_NS_NORMAL, /* 04 Package */ + ACPI_NS_NORMAL, /* 05 field_unit */ + ACPI_NS_NEWSCOPE, /* 06 Device */ + ACPI_NS_NORMAL, /* 07 Event */ + ACPI_NS_NEWSCOPE, /* 08 Method */ + ACPI_NS_NORMAL, /* 09 Mutex */ + ACPI_NS_NORMAL, /* 10 Region */ + ACPI_NS_NEWSCOPE, /* 11 Power */ + ACPI_NS_NEWSCOPE, /* 12 Processor */ + ACPI_NS_NEWSCOPE, /* 13 Thermal */ + ACPI_NS_NORMAL, /* 14 buffer_field */ + ACPI_NS_NORMAL, /* 15 ddb_handle */ + ACPI_NS_NORMAL, /* 16 Debug Object */ + ACPI_NS_NORMAL, /* 17 def_field */ + ACPI_NS_NORMAL, /* 18 bank_field */ + ACPI_NS_NORMAL, /* 19 index_field */ + ACPI_NS_NORMAL, /* 20 Reference */ + ACPI_NS_NORMAL, /* 21 Alias */ + ACPI_NS_NORMAL, /* 22 Notify */ + ACPI_NS_NORMAL, /* 23 Address Handler */ + ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 24 Resource Desc */ + ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 25 Resource Field */ + ACPI_NS_NEWSCOPE, /* 26 Scope */ + ACPI_NS_NORMAL, /* 27 Extra */ + ACPI_NS_NORMAL, /* 28 Data */ + ACPI_NS_NORMAL /* 29 Invalid */ }; /* Hex to ASCII conversion table */ -const NATIVE_CHAR acpi_gbl_hex_to_ascii[] = +static const char acpi_gbl_hex_to_ascii[] = {'0','1','2','3','4','5','6','7', - '8','9','A','B','C','D','E','F'}; + '8','9','A','B','C','D','E','F'}; /***************************************************************************** * - * FUNCTION: Acpi_ut_hex_to_ascii_char + * FUNCTION: acpi_ut_hex_to_ascii_char * * PARAMETERS: Integer - Contains the hex digit * Position - bit position of the digit within the @@ -249,10 +275,10 @@ * ****************************************************************************/ -u8 +char acpi_ut_hex_to_ascii_char ( - acpi_integer integer, - u32 position) + acpi_integer integer, + u32 position) { return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]); @@ -261,68 +287,180 @@ /****************************************************************************** * - * Table globals + * Table name globals * * NOTE: This table includes ONLY the ACPI tables that the subsystem consumes. * it is NOT an exhaustive list of all possible ACPI tables. All ACPI tables * that are not used by the subsystem are simply ignored. * + * Do NOT add any table to this list that is not consumed directly by this + * subsystem. + * ******************************************************************************/ -acpi_table_desc acpi_gbl_acpi_tables[NUM_ACPI_TABLES]; +struct acpi_table_list acpi_gbl_table_lists[NUM_ACPI_TABLE_TYPES]; + +struct acpi_table_support acpi_gbl_table_data[NUM_ACPI_TABLE_TYPES] = +{ + /*********** Name, Signature, Global typed pointer Signature size, Type How many allowed?, Contains valid AML? */ + + /* RSDP 0 */ {RSDP_NAME, RSDP_SIG, NULL, sizeof (RSDP_SIG)-1, ACPI_TABLE_ROOT | ACPI_TABLE_SINGLE}, + /* DSDT 1 */ {DSDT_SIG, DSDT_SIG, (void **) &acpi_gbl_DSDT, sizeof (DSDT_SIG)-1, ACPI_TABLE_SECONDARY| ACPI_TABLE_SINGLE | ACPI_TABLE_EXECUTABLE}, + /* FADT 2 */ {FADT_SIG, FADT_SIG, (void **) &acpi_gbl_FADT, sizeof (FADT_SIG)-1, ACPI_TABLE_PRIMARY | ACPI_TABLE_SINGLE}, + /* FACS 3 */ {FACS_SIG, FACS_SIG, (void **) &acpi_gbl_FACS, sizeof (FACS_SIG)-1, ACPI_TABLE_SECONDARY| ACPI_TABLE_SINGLE}, + /* PSDT 4 */ {PSDT_SIG, PSDT_SIG, NULL, sizeof (PSDT_SIG)-1, ACPI_TABLE_PRIMARY | ACPI_TABLE_MULTIPLE | ACPI_TABLE_EXECUTABLE}, + /* SSDT 5 */ {SSDT_SIG, SSDT_SIG, NULL, sizeof (SSDT_SIG)-1, ACPI_TABLE_PRIMARY | ACPI_TABLE_MULTIPLE | ACPI_TABLE_EXECUTABLE}, + /* XSDT 6 */ {XSDT_SIG, XSDT_SIG, NULL, sizeof (RSDT_SIG)-1, ACPI_TABLE_ROOT | ACPI_TABLE_SINGLE}, +}; + + +/****************************************************************************** + * + * Event and Hardware globals + * + ******************************************************************************/ -ACPI_TABLE_SUPPORT acpi_gbl_acpi_table_data[NUM_ACPI_TABLES] = +struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG] = { - /*********** Name, Signature, Signature size, How many allowed?, Supported? Global typed pointer */ + /* Name Parent Register Register Bit Position Register Bit Mask */ + + /* ACPI_BITREG_TIMER_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_TIMER_STATUS, ACPI_BITMASK_TIMER_STATUS}, + /* ACPI_BITREG_BUS_MASTER_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_BUS_MASTER_STATUS, ACPI_BITMASK_BUS_MASTER_STATUS}, + /* ACPI_BITREG_GLOBAL_LOCK_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_GLOBAL_LOCK_STATUS, ACPI_BITMASK_GLOBAL_LOCK_STATUS}, + /* ACPI_BITREG_POWER_BUTTON_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_POWER_BUTTON_STATUS, ACPI_BITMASK_POWER_BUTTON_STATUS}, + /* ACPI_BITREG_SLEEP_BUTTON_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_SLEEP_BUTTON_STATUS, ACPI_BITMASK_SLEEP_BUTTON_STATUS}, + /* ACPI_BITREG_RT_CLOCK_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_RT_CLOCK_STATUS, ACPI_BITMASK_RT_CLOCK_STATUS}, + /* ACPI_BITREG_WAKE_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_WAKE_STATUS, ACPI_BITMASK_WAKE_STATUS}, + + /* ACPI_BITREG_TIMER_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_TIMER_ENABLE, ACPI_BITMASK_TIMER_ENABLE}, + /* ACPI_BITREG_GLOBAL_LOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE, ACPI_BITMASK_GLOBAL_LOCK_ENABLE}, + /* ACPI_BITREG_POWER_BUTTON_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_POWER_BUTTON_ENABLE, ACPI_BITMASK_POWER_BUTTON_ENABLE}, + /* ACPI_BITREG_SLEEP_BUTTON_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE, ACPI_BITMASK_SLEEP_BUTTON_ENABLE}, + /* ACPI_BITREG_RT_CLOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_RT_CLOCK_ENABLE, ACPI_BITMASK_RT_CLOCK_ENABLE}, + /* ACPI_BITREG_WAKE_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, 0, 0}, + + /* ACPI_BITREG_SCI_ENABLE */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SCI_ENABLE, ACPI_BITMASK_SCI_ENABLE}, + /* ACPI_BITREG_BUS_MASTER_RLD */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_BUS_MASTER_RLD, ACPI_BITMASK_BUS_MASTER_RLD}, + /* ACPI_BITREG_GLOBAL_LOCK_RELEASE */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE, ACPI_BITMASK_GLOBAL_LOCK_RELEASE}, + /* ACPI_BITREG_SLEEP_TYPE_A */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SLEEP_TYPE_X, ACPI_BITMASK_SLEEP_TYPE_X}, + /* ACPI_BITREG_SLEEP_TYPE_B */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SLEEP_TYPE_X, ACPI_BITMASK_SLEEP_TYPE_X}, + /* ACPI_BITREG_SLEEP_ENABLE */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SLEEP_ENABLE, ACPI_BITMASK_SLEEP_ENABLE}, - /* RSDP 0 */ {RSDP_NAME, RSDP_SIG, sizeof (RSDP_SIG)-1, ACPI_TABLE_SINGLE, AE_OK, NULL}, - /* DSDT 1 */ {DSDT_SIG, DSDT_SIG, sizeof (DSDT_SIG)-1, ACPI_TABLE_SINGLE, AE_OK, (void **) &acpi_gbl_DSDT}, - /* FADT 2 */ {FADT_SIG, FADT_SIG, sizeof (FADT_SIG)-1, ACPI_TABLE_SINGLE, AE_OK, (void **) &acpi_gbl_FADT}, - /* FACS 3 */ {FACS_SIG, FACS_SIG, sizeof (FACS_SIG)-1, ACPI_TABLE_SINGLE, AE_OK, (void **) &acpi_gbl_FACS}, - /* PSDT 4 */ {PSDT_SIG, PSDT_SIG, sizeof (PSDT_SIG)-1, ACPI_TABLE_MULTIPLE, AE_OK, NULL}, - /* SSDT 5 */ {SSDT_SIG, SSDT_SIG, sizeof (SSDT_SIG)-1, ACPI_TABLE_MULTIPLE, AE_OK, NULL}, - /* XSDT 6 */ {XSDT_SIG, XSDT_SIG, sizeof (RSDT_SIG)-1, ACPI_TABLE_SINGLE, AE_OK, NULL}, + /* ACPI_BITREG_ARB_DIS */ {ACPI_REGISTER_PM2_CONTROL, ACPI_BITPOSITION_ARB_DISABLE, ACPI_BITMASK_ARB_DISABLE} }; -#ifdef ACPI_DEBUG +struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] = +{ + /* ACPI_EVENT_PMTIMER */ {ACPI_BITREG_TIMER_STATUS, ACPI_BITREG_TIMER_ENABLE, ACPI_BITMASK_TIMER_STATUS, ACPI_BITMASK_TIMER_ENABLE}, + /* ACPI_EVENT_GLOBAL */ {ACPI_BITREG_GLOBAL_LOCK_STATUS, ACPI_BITREG_GLOBAL_LOCK_ENABLE, ACPI_BITMASK_GLOBAL_LOCK_STATUS, ACPI_BITMASK_GLOBAL_LOCK_ENABLE}, + /* ACPI_EVENT_POWER_BUTTON */ {ACPI_BITREG_POWER_BUTTON_STATUS, ACPI_BITREG_POWER_BUTTON_ENABLE, ACPI_BITMASK_POWER_BUTTON_STATUS, ACPI_BITMASK_POWER_BUTTON_ENABLE}, + /* ACPI_EVENT_SLEEP_BUTTON */ {ACPI_BITREG_SLEEP_BUTTON_STATUS, ACPI_BITREG_SLEEP_BUTTON_ENABLE, ACPI_BITMASK_SLEEP_BUTTON_STATUS, ACPI_BITMASK_SLEEP_BUTTON_ENABLE}, + /* ACPI_EVENT_RTC */ {ACPI_BITREG_RT_CLOCK_STATUS, ACPI_BITREG_RT_CLOCK_ENABLE, 0, 0}, +}; -/* - * Strings and procedures used for debug only +/***************************************************************************** * - */ + * FUNCTION: acpi_ut_get_region_name + * + * PARAMETERS: None. + * + * RETURN: Status + * + * DESCRIPTION: Translate a Space ID into a name string (Debug only) + * + ****************************************************************************/ + +/* Region type decoding */ + +const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = +{ +/*! [Begin] no source code translation (keep these ASL Keywords as-is) */ + "SystemMemory", + "SystemIO", + "PCI_Config", + "EmbeddedControl", + "SMBus", + "CMOS", + "PCIBARTarget", + "DataTable" +/*! [End] no source code translation !*/ +}; + + +char * +acpi_ut_get_region_name ( + u8 space_id) +{ + + if (space_id >= ACPI_USER_REGION_BEGIN) + { + return ("user_defined_region"); + } + + else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) + { + return ("invalid_space_id"); + } + + return ((char *) acpi_gbl_region_types[space_id]); +} /***************************************************************************** * - * FUNCTION: Acpi_ut_get_mutex_name + * FUNCTION: acpi_ut_get_event_name * * PARAMETERS: None. * * RETURN: Status * - * DESCRIPTION: Translate a mutex ID into a name string (Debug only) + * DESCRIPTION: Translate a Event ID into a name string (Debug only) * ****************************************************************************/ -NATIVE_CHAR * -acpi_ut_get_mutex_name ( - u32 mutex_id) +/* Event type decoding */ + +static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = { + "PM_Timer", + "global_lock", + "power_button", + "sleep_button", + "real_time_clock", +}; + - if (mutex_id > MAX_MTX) +char * +acpi_ut_get_event_name ( + u32 event_id) +{ + + if (event_id > ACPI_EVENT_MAX) { - return ("Invalid Mutex ID"); + return ("invalid_event_iD"); } - return (acpi_gbl_mutex_names[mutex_id]); + return ((char *) acpi_gbl_event_types[event_id]); } +/***************************************************************************** + * + * FUNCTION: acpi_ut_get_type_name + * + * PARAMETERS: None. + * + * RETURN: Status + * + * DESCRIPTION: Translate a Type ID into a name string (Debug only) + * + ****************************************************************************/ + /* - * Elements of Acpi_gbl_Ns_type_names below must match + * Elements of acpi_gbl_ns_type_names below must match * one-to-one with values of acpi_object_type * * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching; when @@ -330,17 +468,17 @@ * indicatewhat type is actually going to be stored for this entry. */ -static const NATIVE_CHAR acpi_gbl_bad_type[] = "UNDEFINED"; +static const char acpi_gbl_bad_type[] = "UNDEFINED"; #define TYPE_NAME_LENGTH 12 /* Maximum length of each string */ -static const NATIVE_CHAR *acpi_gbl_ns_type_names[] = /* printable names of ACPI types */ +static const char *acpi_gbl_ns_type_names[] = /* printable names of ACPI types */ { /* 00 */ "Untyped", /* 01 */ "Integer", /* 02 */ "String", /* 03 */ "Buffer", /* 04 */ "Package", - /* 05 */ "Field_unit", + /* 05 */ "field_unit", /* 06 */ "Device", /* 07 */ "Event", /* 08 */ "Method", @@ -349,154 +487,91 @@ /* 11 */ "Power", /* 12 */ "Processor", /* 13 */ "Thermal", - /* 14 */ "Buffer_field", - /* 15 */ "Ddb_handle", - /* 16 */ "Debug_object", - /* 17 */ "Region_field", - /* 18 */ "Bank_field", - /* 19 */ "Index_field", + /* 14 */ "buffer_field", + /* 15 */ "ddb_handle", + /* 16 */ "debug_object", + /* 17 */ "region_field", + /* 18 */ "bank_field", + /* 19 */ "index_field", /* 20 */ "Reference", /* 21 */ "Alias", /* 22 */ "Notify", - /* 23 */ "Addr_handler", - /* 24 */ "Resource_desc", - /* 25 */ "Resource_fld", - /* 26 */ "Region_fld_dfn", - /* 27 */ "Bank_fld_dfn", - /* 28 */ "Index_fld_dfn", - /* 29 */ "If", - /* 30 */ "Else", - /* 31 */ "While", - /* 32 */ "Scope", - /* 33 */ "Def_any", - /* 34 */ "Extra", - /* 35 */ "Invalid" + /* 23 */ "addr_handler", + /* 24 */ "resource_desc", + /* 25 */ "resource_fld", + /* 26 */ "Scope", + /* 27 */ "Extra", + /* 28 */ "Data", + /* 39 */ "Invalid" }; -/***************************************************************************** - * - * FUNCTION: Acpi_ut_get_type_name - * - * PARAMETERS: None. - * - * RETURN: Status - * - * DESCRIPTION: Translate a Type ID into a name string (Debug only) - * - ****************************************************************************/ - -NATIVE_CHAR * +char * acpi_ut_get_type_name ( - u32 type) + acpi_object_type type) { - if (type > INTERNAL_TYPE_INVALID) + if (type > ACPI_TYPE_INVALID) { - return ((NATIVE_CHAR *) acpi_gbl_bad_type); + return ((char *) acpi_gbl_bad_type); } - return ((NATIVE_CHAR *) acpi_gbl_ns_type_names[type]); + return ((char *) acpi_gbl_ns_type_names[type]); } -/* Region type decoding */ - -const NATIVE_CHAR *acpi_gbl_region_types[NUM_REGION_TYPES] = +char * +acpi_ut_get_object_type_name ( + union acpi_operand_object *obj_desc) { - "System_memory", - "System_iO", - "PCIConfig", - "Embedded_control", - "SMBus", - "CMOS", - "PCIBar_target", -}; + + if (!obj_desc) + { + return ("[NULL Object Descriptor]"); + } + + return (acpi_ut_get_type_name (ACPI_GET_OBJECT_TYPE (obj_desc))); +} +#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) +/* + * Strings and procedures used for debug only + */ + /***************************************************************************** * - * FUNCTION: Acpi_ut_get_region_name + * FUNCTION: acpi_ut_get_mutex_name * * PARAMETERS: None. * * RETURN: Status * - * DESCRIPTION: Translate a Space ID into a name string (Debug only) + * DESCRIPTION: Translate a mutex ID into a name string (Debug only) * ****************************************************************************/ -NATIVE_CHAR * -acpi_ut_get_region_name ( - u8 space_id) +char * +acpi_ut_get_mutex_name ( + u32 mutex_id) { - if (space_id >= USER_REGION_BEGIN) - { - return ("User_defined_region"); - } - - else if (space_id >= NUM_REGION_TYPES) + if (mutex_id > MAX_MUTEX) { - return ("Invalid_space_iD"); + return ("Invalid Mutex ID"); } - return ((NATIVE_CHAR *) acpi_gbl_region_types[space_id]); + return (acpi_gbl_mutex_names[mutex_id]); } - -/* Data used in keeping track of fields */ - -const NATIVE_CHAR *acpi_gbl_FEnames[NUM_FIELD_NAMES] = -{ - "skip", - "?access?" -}; /* FE = Field Element */ - - -const NATIVE_CHAR *acpi_gbl_match_ops[NUM_MATCH_OPS] = -{ - "Error", - "MTR", - "MEQ", - "MLE", - "MLT", - "MGE", - "MGT" -}; - - -/* Access type decoding */ - -const NATIVE_CHAR *acpi_gbl_access_types[NUM_ACCESS_TYPES] = -{ - "Any_acc", - "Byte_acc", - "Word_acc", - "DWord_acc", - "Block_acc", - "SMBSend_recv_acc", - "SMBQuick_acc" -}; - - -/* Update rule decoding */ - -const NATIVE_CHAR *acpi_gbl_update_rules[NUM_UPDATE_RULES] = -{ - "Preserve", - "Write_as_ones", - "Write_as_zeros" -}; - #endif /***************************************************************************** * - * FUNCTION: Acpi_ut_valid_object_type + * FUNCTION: acpi_ut_valid_object_type * - * PARAMETERS: None. + * PARAMETERS: Type - Object type to be validated * * RETURN: TRUE if valid object type * @@ -506,16 +581,14 @@ u8 acpi_ut_valid_object_type ( - u32 type) + acpi_object_type type) { - if (type > ACPI_TYPE_MAX) + if (type > ACPI_TYPE_LOCAL_MAX) { - if ((type < INTERNAL_TYPE_BEGIN) || - (type > INTERNAL_TYPE_MAX)) - { - return (FALSE); - } + /* Note: Assumes all TYPEs are contiguous (external/local) */ + + return (FALSE); } return (TRUE); @@ -524,9 +597,9 @@ /**************************************************************************** * - * FUNCTION: Acpi_ut_allocate_owner_id + * FUNCTION: acpi_ut_allocate_owner_id * - * PARAMETERS: Id_type - Type of ID (method or table) + * PARAMETERS: id_type - Type of ID (method or table) * * DESCRIPTION: Allocate a table or method owner id * @@ -534,52 +607,61 @@ acpi_owner_id acpi_ut_allocate_owner_id ( - u32 id_type) + u32 id_type) { - acpi_owner_id owner_id = 0xFFFF; + acpi_owner_id owner_id = 0xFFFF; - FUNCTION_TRACE ("Ut_allocate_owner_id"); + ACPI_FUNCTION_TRACE ("ut_allocate_owner_id"); - acpi_ut_acquire_mutex (ACPI_MTX_CACHES); + if (ACPI_FAILURE (acpi_ut_acquire_mutex (ACPI_MTX_CACHES))) + { + return (0); + } switch (id_type) { - case OWNER_TYPE_TABLE: + case ACPI_OWNER_TYPE_TABLE: owner_id = acpi_gbl_next_table_owner_id; acpi_gbl_next_table_owner_id++; - if (acpi_gbl_next_table_owner_id == FIRST_METHOD_ID) + /* Check for wraparound */ + + if (acpi_gbl_next_table_owner_id == ACPI_FIRST_METHOD_ID) { - acpi_gbl_next_table_owner_id = FIRST_TABLE_ID; + acpi_gbl_next_table_owner_id = ACPI_FIRST_TABLE_ID; + ACPI_REPORT_WARNING (("Table owner ID wraparound\n")); } break; - case OWNER_TYPE_METHOD: + case ACPI_OWNER_TYPE_METHOD: owner_id = acpi_gbl_next_method_owner_id; acpi_gbl_next_method_owner_id++; - if (acpi_gbl_next_method_owner_id == FIRST_TABLE_ID) + if (acpi_gbl_next_method_owner_id == ACPI_FIRST_TABLE_ID) { - acpi_gbl_next_method_owner_id = FIRST_METHOD_ID; + /* Check for wraparound */ + + acpi_gbl_next_method_owner_id = ACPI_FIRST_METHOD_ID; } break; - } + default: + break; + } - acpi_ut_release_mutex (ACPI_MTX_CACHES); - + (void) acpi_ut_release_mutex (ACPI_MTX_CACHES); return_VALUE (owner_id); } /**************************************************************************** * - * FUNCTION: Acpi_ut_init_globals + * FUNCTION: acpi_ut_init_globals * * PARAMETERS: none * @@ -592,33 +674,33 @@ acpi_ut_init_globals ( void) { - u32 i; + u32 i; - FUNCTION_TRACE ("Ut_init_globals"); + ACPI_FUNCTION_TRACE ("ut_init_globals"); /* Memory allocation and cache lists */ - MEMSET (acpi_gbl_memory_lists, 0, sizeof (ACPI_MEMORY_LIST) * ACPI_NUM_MEM_LISTS); + ACPI_MEMSET (acpi_gbl_memory_lists, 0, sizeof (struct acpi_memory_list) * ACPI_NUM_MEM_LISTS); - acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].link_offset = (u16) (NATIVE_UINT) &(((acpi_generic_state *) NULL)->common.next); - acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].link_offset = (u16) (NATIVE_UINT) &(((acpi_parse_object *) NULL)->next); - acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].link_offset = (u16) (NATIVE_UINT) &(((acpi_parse2_object *) NULL)->next); - acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].link_offset = (u16) (NATIVE_UINT) &(((acpi_operand_object *) NULL)->cache.next); - acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].link_offset = (u16) (NATIVE_UINT) &(((acpi_walk_state *) NULL)->next); - - acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].object_size = sizeof (acpi_namespace_node); - acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].object_size = sizeof (acpi_generic_state); - acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].object_size = sizeof (acpi_parse_object); - acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].object_size = sizeof (acpi_parse2_object); - acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].object_size = sizeof (acpi_operand_object); - acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].object_size = sizeof (acpi_walk_state); - - acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].max_cache_depth = MAX_STATE_CACHE_DEPTH; - acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].max_cache_depth = MAX_PARSE_CACHE_DEPTH; - acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].max_cache_depth = MAX_EXTPARSE_CACHE_DEPTH; - acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].max_cache_depth = MAX_OBJECT_CACHE_DEPTH; - acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].max_cache_depth = MAX_WALK_CACHE_DEPTH; + acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].link_offset = (u16) ACPI_PTR_DIFF (&(((union acpi_generic_state *) NULL)->common.next), NULL); + acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].link_offset = (u16) ACPI_PTR_DIFF (&(((union acpi_parse_object *) NULL)->common.next), NULL); + acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].link_offset = (u16) ACPI_PTR_DIFF (&(((union acpi_parse_object *) NULL)->common.next), NULL); + acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].link_offset = (u16) ACPI_PTR_DIFF (&(((union acpi_operand_object *) NULL)->cache.next), NULL); + acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].link_offset = (u16) ACPI_PTR_DIFF (&(((struct acpi_walk_state *) NULL)->next), NULL); + + acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].object_size = sizeof (struct acpi_namespace_node); + acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].object_size = sizeof (union acpi_generic_state); + acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].object_size = sizeof (struct acpi_parse_obj_common); + acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].object_size = sizeof (struct acpi_parse_obj_named); + acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].object_size = sizeof (union acpi_operand_object); + acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].object_size = sizeof (struct acpi_walk_state); + + acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].max_cache_depth = ACPI_MAX_STATE_CACHE_DEPTH; + acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].max_cache_depth = ACPI_MAX_PARSE_CACHE_DEPTH; + acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].max_cache_depth = ACPI_MAX_EXTPARSE_CACHE_DEPTH; + acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].max_cache_depth = ACPI_MAX_OBJECT_CACHE_DEPTH; + acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].max_cache_depth = ACPI_MAX_WALK_CACHE_DEPTH; ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].list_name = "Global Memory Allocation"); ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].list_name = "Namespace Nodes"); @@ -630,38 +712,32 @@ /* ACPI table structure */ - for (i = 0; i < NUM_ACPI_TABLES; i++) + for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) { - acpi_gbl_acpi_tables[i].prev = &acpi_gbl_acpi_tables[i]; - acpi_gbl_acpi_tables[i].next = &acpi_gbl_acpi_tables[i]; - acpi_gbl_acpi_tables[i].pointer = NULL; - acpi_gbl_acpi_tables[i].length = 0; - acpi_gbl_acpi_tables[i].allocation = ACPI_MEM_NOT_ALLOCATED; - acpi_gbl_acpi_tables[i].count = 0; + acpi_gbl_table_lists[i].next = NULL; + acpi_gbl_table_lists[i].count = 0; } + /* Mutex locked flags */ - /* Address Space handler array */ - - for (i = 0; i < ACPI_NUM_ADDRESS_SPACES; i++) + for (i = 0; i < NUM_MUTEX; i++) { - acpi_gbl_address_spaces[i].handler = NULL; - acpi_gbl_address_spaces[i].context = NULL; + acpi_gbl_mutex_info[i].mutex = NULL; + acpi_gbl_mutex_info[i].owner_id = ACPI_MUTEX_NOT_ACQUIRED; + acpi_gbl_mutex_info[i].use_count = 0; } - /* Mutex locked flags */ + /* GPE support */ - for (i = 0; i < NUM_MTX; i++) - { - acpi_gbl_acpi_mutex_info[i].mutex = NULL; - acpi_gbl_acpi_mutex_info[i].owner_id = ACPI_MUTEX_NOT_ACQUIRED; - acpi_gbl_acpi_mutex_info[i].use_count = 0; - } + acpi_gbl_gpe_xrupt_list_head = NULL; + acpi_gbl_gpe_fadt_blocks[0] = NULL; + acpi_gbl_gpe_fadt_blocks[1] = NULL; /* Global notify handlers */ - acpi_gbl_sys_notify.handler = NULL; - acpi_gbl_drv_notify.handler = NULL; + acpi_gbl_system_notify.handler = NULL; + acpi_gbl_device_notify.handler = NULL; + acpi_gbl_init_handler = NULL; /* Global "typed" ACPI table pointers */ @@ -675,11 +751,11 @@ acpi_gbl_global_lock_acquired = FALSE; acpi_gbl_global_lock_thread_count = 0; + acpi_gbl_global_lock_handle = 0; /* Miscellaneous variables */ - acpi_gbl_system_flags = 0; - acpi_gbl_startup_flags = 0; + acpi_gbl_table_flags = ACPI_PHYSICAL_POINTER; acpi_gbl_rsdp_original_location = 0; acpi_gbl_cm_single_step = FALSE; acpi_gbl_db_terminate_threads = FALSE; @@ -687,33 +763,30 @@ acpi_gbl_ns_lookup_count = 0; acpi_gbl_ps_find_count = 0; acpi_gbl_acpi_hardware_present = TRUE; - acpi_gbl_next_table_owner_id = FIRST_TABLE_ID; - acpi_gbl_next_method_owner_id = FIRST_METHOD_ID; + acpi_gbl_next_table_owner_id = ACPI_FIRST_TABLE_ID; + acpi_gbl_next_method_owner_id = ACPI_FIRST_METHOD_ID; acpi_gbl_debugger_configuration = DEBUGGER_THREADING; + acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT; /* Hardware oriented */ - acpi_gbl_gpe0enable_register_save = NULL; - acpi_gbl_gpe1_enable_register_save = NULL; - acpi_gbl_original_mode = SYS_MODE_UNKNOWN; /* original ACPI/legacy mode */ - acpi_gbl_gpe_registers = NULL; - acpi_gbl_gpe_info = NULL; + acpi_gbl_events_initialized = FALSE; /* Namespace */ acpi_gbl_root_node = NULL; - acpi_gbl_root_node_struct.name = ACPI_ROOT_NAME; - acpi_gbl_root_node_struct.data_type = ACPI_DESC_TYPE_NAMED; - acpi_gbl_root_node_struct.type = ACPI_TYPE_ANY; + acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME; + acpi_gbl_root_node_struct.descriptor = ACPI_DESC_TYPE_NAMED; + acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE; acpi_gbl_root_node_struct.child = NULL; acpi_gbl_root_node_struct.peer = NULL; acpi_gbl_root_node_struct.object = NULL; acpi_gbl_root_node_struct.flags = ANOBJ_END_OF_PEER_LIST; -#ifdef ACPI_DEBUG - acpi_gbl_lowest_stack_pointer = ACPI_UINT32_MAX; +#ifdef ACPI_DEBUG_OUTPUT + acpi_gbl_lowest_stack_pointer = ACPI_SIZE_MAX; #endif return_VOID; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utinit.c linux.21rc5-ac2/drivers/acpi/utilities/utinit.c --- linux.21rc5/drivers/acpi/utilities/utinit.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utinit.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,52 +1,63 @@ /****************************************************************************** * * Module Name: utinit - Common ACPI subsystem initialization - * $Revision: 102 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "achware.h" -#include "acnamesp.h" -#include "acevents.h" -#include "acparser.h" -#include "acdispat.h" +#include +#include +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utinit") - - -#define ACPI_OFFSET(d,o) ((u32) &(((d *)0)->o)) -#define ACPI_FADT_OFFSET(o) ACPI_OFFSET (FADT_DESCRIPTOR, o) + ACPI_MODULE_NAME ("utinit") /******************************************************************************* * - * FUNCTION: Acpi_ut_fadt_register_error + * FUNCTION: acpi_ut_fadt_register_error * - * PARAMETERS: *Register_name - Pointer to string identifying register + * PARAMETERS: *register_name - Pointer to string identifying register * Value - Actual register contents value - * Acpi_test_spec_section - TDS section containing assertion - * Acpi_assertion - Assertion number being tested + * acpi_test_spec_section - TDS section containing assertion + * acpi_assertion - Assertion number being tested * * RETURN: AE_BAD_VALUE * @@ -54,25 +65,22 @@ * ******************************************************************************/ -static acpi_status +static void acpi_ut_fadt_register_error ( - NATIVE_CHAR *register_name, - u32 value, - u32 offset) + char *register_name, + u32 value, + acpi_size offset) { - REPORT_ERROR ( - ("Invalid FADT value %s=%lX at offset %lX FADT=%p\n", - register_name, value, offset, acpi_gbl_FADT)); - - - return (AE_BAD_VALUE); + ACPI_REPORT_WARNING ( + ("Invalid FADT value %s=%X at offset %X FADT=%p\n", + register_name, value, (u32) offset, acpi_gbl_FADT)); } /****************************************************************************** * - * FUNCTION: Acpi_ut_validate_fadt + * FUNCTION: acpi_ut_validate_fadt * * PARAMETERS: None * @@ -86,101 +94,114 @@ acpi_ut_validate_fadt ( void) { - acpi_status status = AE_OK; - /* * Verify Fixed ACPI Description Table fields, * but don't abort on any problems, just display error */ if (acpi_gbl_FADT->pm1_evt_len < 4) { - status = acpi_ut_fadt_register_error ("PM1_EVT_LEN", + acpi_ut_fadt_register_error ("PM1_EVT_LEN", (u32) acpi_gbl_FADT->pm1_evt_len, ACPI_FADT_OFFSET (pm1_evt_len)); } if (!acpi_gbl_FADT->pm1_cnt_len) { - status = acpi_ut_fadt_register_error ("PM1_CNT_LEN", 0, + acpi_ut_fadt_register_error ("PM1_CNT_LEN", 0, ACPI_FADT_OFFSET (pm1_cnt_len)); } - if (!ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xpm1a_evt_blk.address)) { - status = acpi_ut_fadt_register_error ("X_PM1a_EVT_BLK", 0, - ACPI_FADT_OFFSET (Xpm1a_evt_blk.address)); + if (!acpi_gbl_FADT->xpm1a_evt_blk.address) { + acpi_ut_fadt_register_error ("X_PM1a_EVT_BLK", 0, + ACPI_FADT_OFFSET (xpm1a_evt_blk.address)); } - if (!ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xpm1a_cnt_blk.address)) { - status = acpi_ut_fadt_register_error ("X_PM1a_CNT_BLK", 0, - ACPI_FADT_OFFSET (Xpm1a_cnt_blk.address)); + if (!acpi_gbl_FADT->xpm1a_cnt_blk.address) { + acpi_ut_fadt_register_error ("X_PM1a_CNT_BLK", 0, + ACPI_FADT_OFFSET (xpm1a_cnt_blk.address)); } - if (!ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xpm_tmr_blk.address)) { - status = acpi_ut_fadt_register_error ("X_PM_TMR_BLK", 0, - ACPI_FADT_OFFSET (Xpm_tmr_blk.address)); + if (!acpi_gbl_FADT->xpm_tmr_blk.address) { + acpi_ut_fadt_register_error ("X_PM_TMR_BLK", 0, + ACPI_FADT_OFFSET (xpm_tmr_blk.address)); } - if ((ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xpm2_cnt_blk.address) && + if ((acpi_gbl_FADT->xpm2_cnt_blk.address && !acpi_gbl_FADT->pm2_cnt_len)) { - status = acpi_ut_fadt_register_error ("PM2_CNT_LEN", + acpi_ut_fadt_register_error ("PM2_CNT_LEN", (u32) acpi_gbl_FADT->pm2_cnt_len, ACPI_FADT_OFFSET (pm2_cnt_len)); } if (acpi_gbl_FADT->pm_tm_len < 4) { - status = acpi_ut_fadt_register_error ("PM_TM_LEN", + acpi_ut_fadt_register_error ("PM_TM_LEN", (u32) acpi_gbl_FADT->pm_tm_len, ACPI_FADT_OFFSET (pm_tm_len)); } - /* length of GPE blocks must be a multiple of 2 */ + /* Length of GPE blocks must be a multiple of 2 */ - if (ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xgpe0blk.address) && - (acpi_gbl_FADT->gpe0blk_len & 1)) { - status = acpi_ut_fadt_register_error ("(x)GPE0_BLK_LEN", - (u32) acpi_gbl_FADT->gpe0blk_len, - ACPI_FADT_OFFSET (gpe0blk_len)); + if (acpi_gbl_FADT->xgpe0_blk.address && + (acpi_gbl_FADT->gpe0_blk_len & 1)) { + acpi_ut_fadt_register_error ("(x)GPE0_BLK_LEN", + (u32) acpi_gbl_FADT->gpe0_blk_len, + ACPI_FADT_OFFSET (gpe0_blk_len)); } - if (ACPI_VALID_ADDRESS (acpi_gbl_FADT->Xgpe1_blk.address) && + if (acpi_gbl_FADT->xgpe1_blk.address && (acpi_gbl_FADT->gpe1_blk_len & 1)) { - status = acpi_ut_fadt_register_error ("(x)GPE1_BLK_LEN", + acpi_ut_fadt_register_error ("(x)GPE1_BLK_LEN", (u32) acpi_gbl_FADT->gpe1_blk_len, ACPI_FADT_OFFSET (gpe1_blk_len)); } - return (status); + return (AE_OK); } /****************************************************************************** * - * FUNCTION: Acpi_ut_terminate + * FUNCTION: acpi_ut_terminate * * PARAMETERS: none * * RETURN: none * - * DESCRIPTION: free memory allocated for table storage. + * DESCRIPTION: free global memory * ******************************************************************************/ void acpi_ut_terminate (void) { + struct acpi_gpe_block_info *gpe_block; + struct acpi_gpe_block_info *next_gpe_block; + struct acpi_gpe_xrupt_info *gpe_xrupt_info; + struct acpi_gpe_xrupt_info *next_gpe_xrupt_info; - FUNCTION_TRACE ("Ut_terminate"); + + ACPI_FUNCTION_TRACE ("ut_terminate"); /* Free global tables, etc. */ - if (acpi_gbl_gpe0enable_register_save) { - ACPI_MEM_FREE (acpi_gbl_gpe0enable_register_save); - } - if (acpi_gbl_gpe1_enable_register_save) { - ACPI_MEM_FREE (acpi_gbl_gpe1_enable_register_save); - } + /* Free global GPE blocks and related info structures */ + gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; + while (gpe_xrupt_info) { + gpe_block = gpe_xrupt_info->gpe_block_list_head; + while (gpe_block) { + next_gpe_block = gpe_block->next; + ACPI_MEM_FREE (gpe_block->event_info); + ACPI_MEM_FREE (gpe_block->register_info); + ACPI_MEM_FREE (gpe_block); + + gpe_block = next_gpe_block; + } + next_gpe_xrupt_info = gpe_xrupt_info->next; + ACPI_MEM_FREE (gpe_xrupt_info); + gpe_xrupt_info = next_gpe_xrupt_info; + } return_VOID; } @@ -188,7 +209,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_subsystem_shutdown + * FUNCTION: acpi_ut_subsystem_shutdown * * PARAMETERS: none * @@ -199,17 +220,17 @@ * ******************************************************************************/ -acpi_status +void acpi_ut_subsystem_shutdown (void) { - FUNCTION_TRACE ("Ut_subsystem_shutdown"); + ACPI_FUNCTION_TRACE ("ut_subsystem_shutdown"); /* Just exit if subsystem is already shutdown */ if (acpi_gbl_shutdown) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "ACPI Subsystem is already terminated\n")); - return_ACPI_STATUS (AE_OK); + return_VOID; } /* Subsystem appears active, go ahead and shut it down */ @@ -217,30 +238,21 @@ acpi_gbl_shutdown = TRUE; ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Shutting down ACPI Subsystem...\n")); + /* Close the acpi_event Handling */ + + acpi_ev_terminate (); /* Close the Namespace */ acpi_ns_terminate (); - /* Close the Acpi_event Handling */ - - acpi_ev_terminate (); - /* Close the globals */ acpi_ut_terminate (); - /* Flush the local cache(s) */ - - acpi_ut_delete_generic_state_cache (); - acpi_ut_delete_object_cache (); - acpi_ds_delete_walk_state_cache (); - - /* Close the Parser */ + /* Purge the local caches */ - /* TBD: [Restructure] Acpi_ps_terminate () */ - - acpi_ps_delete_parse_cache (); + (void) acpi_purge_cached_objects (); /* Debug only - display leftover memory allocation, if any */ @@ -248,7 +260,7 @@ acpi_ut_dump_allocations (ACPI_UINT32_MAX, NULL); #endif - return_ACPI_STATUS (AE_OK); + return_VOID; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utmath.c linux.21rc5-ac2/drivers/acpi/utilities/utmath.c --- linux.21rc5/drivers/acpi/utilities/utmath.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utmath.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,34 +1,52 @@ /******************************************************************************* * * Module Name: utmath - Integer math support routines - * $Revision: 7 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utmath") + ACPI_MODULE_NAME ("utmath") /* * Support for double-precision integer divide. This code is included here @@ -39,12 +57,12 @@ #ifndef ACPI_USE_NATIVE_DIVIDE /******************************************************************************* * - * FUNCTION: Acpi_ut_short_divide + * FUNCTION: acpi_ut_short_divide * - * PARAMETERS: In_dividend - Pointer to the dividend + * PARAMETERS: in_dividend - Pointer to the dividend * Divisor - 32-bit divisor - * Out_quotient - Pointer to where the quotient is returned - * Out_remainder - Pointer to where the remainder is returned + * out_quotient - Pointer to where the quotient is returned + * out_remainder - Pointer to where the remainder is returned * * RETURN: Status (Checks for divide-by-zero) * @@ -56,24 +74,24 @@ acpi_status acpi_ut_short_divide ( - acpi_integer *in_dividend, - u32 divisor, - acpi_integer *out_quotient, - u32 *out_remainder) + acpi_integer *in_dividend, + u32 divisor, + acpi_integer *out_quotient, + u32 *out_remainder) { - uint64_overlay dividend; - uint64_overlay quotient; - u32 remainder32; + union uint64_overlay dividend; + union uint64_overlay quotient; + u32 remainder32; - FUNCTION_TRACE ("Ut_short_divide"); + ACPI_FUNCTION_TRACE ("ut_short_divide"); dividend.full = *in_dividend; /* Always check for a zero divisor */ if (divisor == 0) { - REPORT_ERROR (("Acpi_ut_short_divide: Divide by zero\n")); + ACPI_REPORT_ERROR (("acpi_ut_short_divide: Divide by zero\n")); return_ACPI_STATUS (AE_AML_DIVIDE_BY_ZERO); } @@ -101,12 +119,12 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_divide + * FUNCTION: acpi_ut_divide * - * PARAMETERS: In_dividend - Pointer to the dividend - * In_divisor - Pointer to the divisor - * Out_quotient - Pointer to where the quotient is returned - * Out_remainder - Pointer to where the remainder is returned + * PARAMETERS: in_dividend - Pointer to the dividend + * in_divisor - Pointer to the divisor + * out_quotient - Pointer to where the quotient is returned + * out_remainder - Pointer to where the remainder is returned * * RETURN: Status (Checks for divide-by-zero) * @@ -116,29 +134,29 @@ acpi_status acpi_ut_divide ( - acpi_integer *in_dividend, - acpi_integer *in_divisor, - acpi_integer *out_quotient, - acpi_integer *out_remainder) + acpi_integer *in_dividend, + acpi_integer *in_divisor, + acpi_integer *out_quotient, + acpi_integer *out_remainder) { - uint64_overlay dividend; - uint64_overlay divisor; - uint64_overlay quotient; - uint64_overlay remainder; - uint64_overlay normalized_dividend; - uint64_overlay normalized_divisor; - u32 partial1; - uint64_overlay partial2; - uint64_overlay partial3; + union uint64_overlay dividend; + union uint64_overlay divisor; + union uint64_overlay quotient; + union uint64_overlay remainder; + union uint64_overlay normalized_dividend; + union uint64_overlay normalized_divisor; + u32 partial1; + union uint64_overlay partial2; + union uint64_overlay partial3; - FUNCTION_TRACE ("Ut_divide"); + ACPI_FUNCTION_TRACE ("ut_divide"); /* Always check for a zero divisor */ if (*in_divisor == 0) { - REPORT_ERROR (("Acpi_ut_divide: Divide by zero\n")); + ACPI_REPORT_ERROR (("acpi_ut_divide: Divide by zero\n")); return_ACPI_STATUS (AE_AML_DIVIDE_BY_ZERO); } @@ -193,7 +211,7 @@ */ partial1 = quotient.part.lo * divisor.part.hi; partial2.full = (acpi_integer) quotient.part.lo * divisor.part.lo; - partial3.full = partial2.part.hi + partial1; + partial3.full = (acpi_integer) partial2.part.hi + partial1; remainder.part.hi = partial3.part.lo; remainder.part.lo = partial2.part.lo; @@ -213,8 +231,8 @@ } remainder.full = remainder.full - dividend.full; - remainder.part.hi = -((s32) remainder.part.hi); - remainder.part.lo = -((s32) remainder.part.lo); + remainder.part.hi = (u32) -((s32) remainder.part.hi); + remainder.part.lo = (u32) -((s32) remainder.part.lo); if (remainder.part.lo) { remainder.part.hi--; @@ -238,9 +256,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_short_divide, Acpi_ut_divide + * FUNCTION: acpi_ut_short_divide, acpi_ut_divide * - * DESCRIPTION: Native versions of the Ut_divide functions. Use these if either + * DESCRIPTION: Native versions of the ut_divide functions. Use these if either * 1) The target is a 64-bit platform and therefore 64-bit * integer math is supported directly by the machine. * 2) The target is a 32-bit or 16-bit platform, and the @@ -251,19 +269,19 @@ acpi_status acpi_ut_short_divide ( - acpi_integer *in_dividend, - u32 divisor, - acpi_integer *out_quotient, - u32 *out_remainder) + acpi_integer *in_dividend, + u32 divisor, + acpi_integer *out_quotient, + u32 *out_remainder) { - FUNCTION_TRACE ("Ut_short_divide"); + ACPI_FUNCTION_TRACE ("ut_short_divide"); /* Always check for a zero divisor */ if (divisor == 0) { - REPORT_ERROR (("Acpi_ut_short_divide: Divide by zero\n")); + ACPI_REPORT_ERROR (("acpi_ut_short_divide: Divide by zero\n")); return_ACPI_STATUS (AE_AML_DIVIDE_BY_ZERO); } @@ -281,18 +299,18 @@ acpi_status acpi_ut_divide ( - acpi_integer *in_dividend, - acpi_integer *in_divisor, - acpi_integer *out_quotient, - acpi_integer *out_remainder) + acpi_integer *in_dividend, + acpi_integer *in_divisor, + acpi_integer *out_quotient, + acpi_integer *out_remainder) { - FUNCTION_TRACE ("Ut_divide"); + ACPI_FUNCTION_TRACE ("ut_divide"); /* Always check for a zero divisor */ if (*in_divisor == 0) { - REPORT_ERROR (("Acpi_ut_divide: Divide by zero\n")); + ACPI_REPORT_ERROR (("acpi_ut_divide: Divide by zero\n")); return_ACPI_STATUS (AE_AML_DIVIDE_BY_ZERO); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utmisc.c linux.21rc5-ac2/drivers/acpi/utilities/utmisc.c --- linux.21rc5/drivers/acpi/utilities/utmisc.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utmisc.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,45 +1,290 @@ /******************************************************************************* * * Module Name: utmisc - common utility procedures - * $Revision: 52 $ * ******************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acevents.h" -#include "achware.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acdebug.h" +#include +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utmisc") + ACPI_MODULE_NAME ("utmisc") /******************************************************************************* * - * FUNCTION: Acpi_ut_valid_acpi_name + * FUNCTION: acpi_ut_print_string + * + * PARAMETERS: String - Null terminated ASCII string + * + * RETURN: None + * + * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape + * sequences. + * + ******************************************************************************/ + +void +acpi_ut_print_string ( + char *string, + u8 max_length) +{ + u32 i; + + + if (!string) { + acpi_os_printf ("<\"NULL STRING PTR\">"); + return; + } + + acpi_os_printf ("\""); + for (i = 0; string[i] && (i < max_length); i++) { + /* Escape sequences */ + + switch (string[i]) { + case 0x07: + acpi_os_printf ("\\a"); /* BELL */ + break; + + case 0x08: + acpi_os_printf ("\\b"); /* BACKSPACE */ + break; + + case 0x0C: + acpi_os_printf ("\\f"); /* FORMFEED */ + break; + + case 0x0A: + acpi_os_printf ("\\n"); /* LINEFEED */ + break; + + case 0x0D: + acpi_os_printf ("\\r"); /* CARRIAGE RETURN*/ + break; + + case 0x09: + acpi_os_printf ("\\t"); /* HORIZONTAL TAB */ + break; + + case 0x0B: + acpi_os_printf ("\\v"); /* VERTICAL TAB */ + break; + + case '\'': /* Single Quote */ + case '\"': /* Double Quote */ + case '\\': /* Backslash */ + acpi_os_printf ("\\%c", (int) string[i]); + break; + + default: + + /* Check for printable character or hex escape */ + + if (ACPI_IS_PRINT (string[i])) + { + /* This is a normal character */ + + acpi_os_printf ("%c", (int) string[i]); + } + else + { + /* All others will be Hex escapes */ + + acpi_os_printf ("\\x%2.2X", (s32) string[i]); + } + break; + } + } + acpi_os_printf ("\""); + + if (i == max_length && string[i]) { + acpi_os_printf ("..."); + } +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_dword_byte_swap + * + * PARAMETERS: Value - Value to be converted + * + * DESCRIPTION: Convert a 32-bit value to big-endian (swap the bytes) + * + ******************************************************************************/ + +u32 +acpi_ut_dword_byte_swap ( + u32 value) +{ + union { + u32 value; + u8 bytes[4]; + } out; + + union { + u32 value; + u8 bytes[4]; + } in; + + + ACPI_FUNCTION_ENTRY (); + + + in.value = value; + + out.bytes[0] = in.bytes[3]; + out.bytes[1] = in.bytes[2]; + out.bytes[2] = in.bytes[1]; + out.bytes[3] = in.bytes[0]; + + return (out.value); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_set_integer_width + * + * PARAMETERS: Revision From DSDT header + * + * RETURN: None + * + * DESCRIPTION: Set the global integer bit width based upon the revision + * of the DSDT. For Revision 1 and 0, Integers are 32 bits. + * For Revision 2 and above, Integers are 64 bits. Yes, this + * makes a difference. + * + ******************************************************************************/ + +void +acpi_ut_set_integer_width ( + u8 revision) +{ + + if (revision <= 1) { + acpi_gbl_integer_bit_width = 32; + acpi_gbl_integer_byte_width = 4; + } + else { + acpi_gbl_integer_bit_width = 64; + acpi_gbl_integer_byte_width = 8; + } +} + + +#ifdef ACPI_DEBUG_OUTPUT +/******************************************************************************* + * + * FUNCTION: acpi_ut_display_init_pathname + * + * PARAMETERS: obj_handle - Handle whose pathname will be displayed + * Path - Additional path string to be appended. + * (NULL if no extra path) + * + * RETURN: acpi_status + * + * DESCRIPTION: Display full pathname of an object, DEBUG ONLY + * + ******************************************************************************/ + +void +acpi_ut_display_init_pathname ( + u8 type, + struct acpi_namespace_node *obj_handle, + char *path) +{ + acpi_status status; + struct acpi_buffer buffer; + + + ACPI_FUNCTION_ENTRY (); + + + /* Only print the path if the appropriate debug level is enabled */ + + if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) { + return; + } + + /* Get the full pathname to the node */ + + buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; + status = acpi_ns_handle_to_pathname (obj_handle, &buffer); + if (ACPI_FAILURE (status)) { + return; + } + + /* Print what we're doing */ + + switch (type) { + case ACPI_TYPE_METHOD: + acpi_os_printf ("Executing "); + break; + + default: + acpi_os_printf ("Initializing "); + break; + } + + /* Print the object type and pathname */ + + acpi_os_printf ("%-12s %s", acpi_ut_get_type_name (type), (char *) buffer.pointer); + + /* Extra path is used to append names like _STA, _INI, etc. */ + + if (path) { + acpi_os_printf (".%s", path); + } + acpi_os_printf ("\n"); + + ACPI_MEM_FREE (buffer.pointer); +} +#endif + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_valid_acpi_name * * PARAMETERS: Character - The character to be examined * @@ -54,13 +299,13 @@ u8 acpi_ut_valid_acpi_name ( - u32 name) + u32 name) { - NATIVE_CHAR *name_ptr = (NATIVE_CHAR *) &name; - u32 i; + char *name_ptr = (char *) &name; + u32 i; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); for (i = 0; i < ACPI_NAME_SIZE; i++) { @@ -77,7 +322,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_valid_acpi_character + * FUNCTION: acpi_ut_valid_acpi_character * * PARAMETERS: Character - The character to be examined * @@ -89,10 +334,10 @@ u8 acpi_ut_valid_acpi_character ( - NATIVE_CHAR character) + char character) { - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); return ((u8) ((character == '_') || (character >= 'A' && character <= 'Z') || @@ -102,40 +347,185 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_strupr + * FUNCTION: acpi_ut_strtoul64 + * + * PARAMETERS: String - Null terminated string + * Terminater - Where a pointer to the terminating byte is returned + * Base - Radix of the string + * + * RETURN: Converted value + * + * DESCRIPTION: Convert a string into an unsigned value. + * + ******************************************************************************/ +#define NEGATIVE 1 +#define POSITIVE 0 + +acpi_status +acpi_ut_strtoul64 ( + char *string, + u32 base, + acpi_integer *ret_integer) +{ + u32 index; + acpi_integer return_value = 0; + acpi_status status = AE_OK; + acpi_integer dividend; + acpi_integer quotient; + + + *ret_integer = 0; + + switch (base) { + case 0: + case 8: + case 10: + case 16: + break; + + default: + /* + * The specified Base parameter is not in the domain of + * this function: + */ + return (AE_BAD_PARAMETER); + } + + /* + * skip over any white space in the buffer: + */ + while (ACPI_IS_SPACE (*string) || *string == '\t') { + ++string; + } + + /* + * If the input parameter Base is zero, then we need to + * determine if it is octal, decimal, or hexadecimal: + */ + if (base == 0) { + if (*string == '0') { + if (ACPI_TOLOWER (*(++string)) == 'x') { + base = 16; + ++string; + } + else { + base = 8; + } + } + else { + base = 10; + } + } + + /* + * For octal and hexadecimal bases, skip over the leading + * 0 or 0x, if they are present. + */ + if (base == 8 && *string == '0') { + string++; + } + + if (base == 16 && + *string == '0' && + ACPI_TOLOWER (*(++string)) == 'x') { + string++; + } + + /* Main loop: convert the string to an unsigned long */ + + while (*string) { + if (ACPI_IS_DIGIT (*string)) { + index = ((u8) *string) - '0'; + } + else { + index = (u8) ACPI_TOUPPER (*string); + if (ACPI_IS_UPPER ((char) index)) { + index = index - 'A' + 10; + } + else { + goto error_exit; + } + } + + if (index >= base) { + goto error_exit; + } + + /* Check to see if value is out of range: */ + + dividend = ACPI_INTEGER_MAX - (acpi_integer) index; + (void) acpi_ut_short_divide (÷nd, base, "ient, NULL); + if (return_value > quotient) { + goto error_exit; + } + + return_value *= base; + return_value += index; + ++string; + } + + *ret_integer = return_value; + return (status); + + +error_exit: + switch (base) { + case 8: + status = AE_BAD_OCTAL_CONSTANT; + break; + + case 10: + status = AE_BAD_DECIMAL_CONSTANT; + break; + + case 16: + status = AE_BAD_HEX_CONSTANT; + break; + + default: + /* Base validated above */ + break; + } + + return (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strupr * - * PARAMETERS: Src_string - The source string to convert to + * PARAMETERS: src_string - The source string to convert to * - * RETURN: Src_string + * RETURN: src_string * * DESCRIPTION: Convert string to uppercase * ******************************************************************************/ -NATIVE_CHAR * +char * acpi_ut_strupr ( - NATIVE_CHAR *src_string) + char *src_string) { - NATIVE_CHAR *string; + char *string; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* Walk entire string, uppercasing the letters */ for (string = src_string; *string; ) { - *string = (char) TOUPPER (*string); + *string = (char) ACPI_TOUPPER (*string); string++; } - return (src_string); } /******************************************************************************* * - * FUNCTION: Acpi_ut_mutex_initialize + * FUNCTION: acpi_ut_mutex_initialize * * PARAMETERS: None. * @@ -149,30 +539,33 @@ acpi_ut_mutex_initialize ( void) { - u32 i; - acpi_status status; + u32 i; + acpi_status status; - FUNCTION_TRACE ("Ut_mutex_initialize"); + ACPI_FUNCTION_TRACE ("ut_mutex_initialize"); /* * Create each of the predefined mutex objects */ - for (i = 0; i < NUM_MTX; i++) { + for (i = 0; i < NUM_MUTEX; i++) { status = acpi_ut_create_mutex (i); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } } + + status = acpi_os_create_lock (&acpi_gbl_gpe_lock); + return_ACPI_STATUS (AE_OK); } /******************************************************************************* * - * FUNCTION: Acpi_ut_mutex_terminate + * FUNCTION: acpi_ut_mutex_terminate * * PARAMETERS: None. * @@ -186,28 +579,29 @@ acpi_ut_mutex_terminate ( void) { - u32 i; + u32 i; - FUNCTION_TRACE ("Ut_mutex_terminate"); + ACPI_FUNCTION_TRACE ("ut_mutex_terminate"); /* * Delete each predefined mutex object */ - for (i = 0; i < NUM_MTX; i++) { - acpi_ut_delete_mutex (i); + for (i = 0; i < NUM_MUTEX; i++) { + (void) acpi_ut_delete_mutex (i); } + acpi_os_delete_lock (acpi_gbl_gpe_lock); return_VOID; } /******************************************************************************* * - * FUNCTION: Acpi_ut_create_mutex + * FUNCTION: acpi_ut_create_mutex * - * PARAMETERS: Mutex_iD - ID of the mutex to be created + * PARAMETERS: mutex_iD - ID of the mutex to be created * * RETURN: Status * @@ -217,24 +611,23 @@ acpi_status acpi_ut_create_mutex ( - ACPI_MUTEX_HANDLE mutex_id) + acpi_mutex_handle mutex_id) { - acpi_status status = AE_OK; + acpi_status status = AE_OK; - FUNCTION_TRACE_U32 ("Ut_create_mutex", mutex_id); + ACPI_FUNCTION_TRACE_U32 ("ut_create_mutex", mutex_id); - if (mutex_id > MAX_MTX) { + if (mutex_id > MAX_MUTEX) { return_ACPI_STATUS (AE_BAD_PARAMETER); } - - if (!acpi_gbl_acpi_mutex_info[mutex_id].mutex) { + if (!acpi_gbl_mutex_info[mutex_id].mutex) { status = acpi_os_create_semaphore (1, 1, - &acpi_gbl_acpi_mutex_info[mutex_id].mutex); - acpi_gbl_acpi_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; - acpi_gbl_acpi_mutex_info[mutex_id].use_count = 0; + &acpi_gbl_mutex_info[mutex_id].mutex); + acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; + acpi_gbl_mutex_info[mutex_id].use_count = 0; } return_ACPI_STATUS (status); @@ -243,9 +636,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_delete_mutex + * FUNCTION: acpi_ut_delete_mutex * - * PARAMETERS: Mutex_iD - ID of the mutex to be deleted + * PARAMETERS: mutex_iD - ID of the mutex to be deleted * * RETURN: Status * @@ -255,23 +648,22 @@ acpi_status acpi_ut_delete_mutex ( - ACPI_MUTEX_HANDLE mutex_id) + acpi_mutex_handle mutex_id) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE_U32 ("Ut_delete_mutex", mutex_id); + ACPI_FUNCTION_TRACE_U32 ("ut_delete_mutex", mutex_id); - if (mutex_id > MAX_MTX) { + if (mutex_id > MAX_MUTEX) { return_ACPI_STATUS (AE_BAD_PARAMETER); } + status = acpi_os_delete_semaphore (acpi_gbl_mutex_info[mutex_id].mutex); - status = acpi_os_delete_semaphore (acpi_gbl_acpi_mutex_info[mutex_id].mutex); - - acpi_gbl_acpi_mutex_info[mutex_id].mutex = NULL; - acpi_gbl_acpi_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; + acpi_gbl_mutex_info[mutex_id].mutex = NULL; + acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; return_ACPI_STATUS (status); } @@ -279,9 +671,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_acquire_mutex + * FUNCTION: acpi_ut_acquire_mutex * - * PARAMETERS: Mutex_iD - ID of the mutex to be acquired + * PARAMETERS: mutex_iD - ID of the mutex to be acquired * * RETURN: Status * @@ -291,21 +683,20 @@ acpi_status acpi_ut_acquire_mutex ( - ACPI_MUTEX_HANDLE mutex_id) + acpi_mutex_handle mutex_id) { - acpi_status status; - u32 i; - u32 this_thread_id; + acpi_status status; + u32 i; + u32 this_thread_id; - PROC_NAME ("Ut_acquire_mutex"); + ACPI_FUNCTION_NAME ("ut_acquire_mutex"); - if (mutex_id > MAX_MTX) { + if (mutex_id > MAX_MUTEX) { return (AE_BAD_PARAMETER); } - this_thread_id = acpi_os_get_thread_id (); /* @@ -314,8 +705,8 @@ * the mutex ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ - for (i = mutex_id; i < MAX_MTX; i++) { - if (acpi_gbl_acpi_mutex_info[i].owner_id == this_thread_id) { + for (i = mutex_id; i < MAX_MUTEX; i++) { + if (acpi_gbl_mutex_info[i].owner_id == this_thread_id) { if (i == mutex_id) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Mutex [%s] already acquired by this thread [%X]\n", @@ -333,22 +724,19 @@ } } - ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Thread %X attempting to acquire Mutex [%s]\n", this_thread_id, acpi_ut_get_mutex_name (mutex_id))); - status = acpi_os_wait_semaphore (acpi_gbl_acpi_mutex_info[mutex_id].mutex, - 1, WAIT_FOREVER); - + status = acpi_os_wait_semaphore (acpi_gbl_mutex_info[mutex_id].mutex, + 1, ACPI_WAIT_FOREVER); if (ACPI_SUCCESS (status)) { ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Thread %X acquired Mutex [%s]\n", this_thread_id, acpi_ut_get_mutex_name (mutex_id))); - acpi_gbl_acpi_mutex_info[mutex_id].use_count++; - acpi_gbl_acpi_mutex_info[mutex_id].owner_id = this_thread_id; + acpi_gbl_mutex_info[mutex_id].use_count++; + acpi_gbl_mutex_info[mutex_id].owner_id = this_thread_id; } - else { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Thread %X could not acquire Mutex [%s] %s\n", this_thread_id, acpi_ut_get_mutex_name (mutex_id), @@ -361,9 +749,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_release_mutex + * FUNCTION: acpi_ut_release_mutex * - * PARAMETERS: Mutex_iD - ID of the mutex to be released + * PARAMETERS: mutex_iD - ID of the mutex to be released * * RETURN: Status * @@ -373,14 +761,14 @@ acpi_status acpi_ut_release_mutex ( - ACPI_MUTEX_HANDLE mutex_id) + acpi_mutex_handle mutex_id) { - acpi_status status; - u32 i; - u32 this_thread_id; + acpi_status status; + u32 i; + u32 this_thread_id; - PROC_NAME ("Ut_release_mutex"); + ACPI_FUNCTION_NAME ("ut_release_mutex"); this_thread_id = acpi_os_get_thread_id (); @@ -388,15 +776,14 @@ "Thread %X releasing Mutex [%s]\n", this_thread_id, acpi_ut_get_mutex_name (mutex_id))); - if (mutex_id > MAX_MTX) { + if (mutex_id > MAX_MUTEX) { return (AE_BAD_PARAMETER); } - /* * Mutex must be acquired in order to release it! */ - if (acpi_gbl_acpi_mutex_info[mutex_id].owner_id == ACPI_MUTEX_NOT_ACQUIRED) { + if (acpi_gbl_mutex_info[mutex_id].owner_id == ACPI_MUTEX_NOT_ACQUIRED) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Mutex [%s] is not acquired, cannot release\n", acpi_ut_get_mutex_name (mutex_id))); @@ -404,15 +791,14 @@ return (AE_NOT_ACQUIRED); } - /* * Deadlock prevention. Check if this thread owns any mutexes of value * greater than this one. If so, the thread has violated the mutex * ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ - for (i = mutex_id; i < MAX_MTX; i++) { - if (acpi_gbl_acpi_mutex_info[i].owner_id == this_thread_id) { + for (i = mutex_id; i < MAX_MUTEX; i++) { + if (acpi_gbl_mutex_info[i].owner_id == this_thread_id) { if (i == mutex_id) { continue; } @@ -425,12 +811,11 @@ } } - /* Mark unlocked FIRST */ - acpi_gbl_acpi_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; + acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; - status = acpi_os_signal_semaphore (acpi_gbl_acpi_mutex_info[mutex_id].mutex, 1); + status = acpi_os_signal_semaphore (acpi_gbl_mutex_info[mutex_id].mutex, 1); if (ACPI_FAILURE (status)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Thread %X could not release Mutex [%s] %s\n", @@ -448,11 +833,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_create_update_state_and_push + * FUNCTION: acpi_ut_create_update_state_and_push * * PARAMETERS: *Object - Object to be added to the new state * Action - Increment/Decrement - * State_list - List the state will be added to + * state_list - List the state will be added to * * RETURN: None * @@ -462,14 +847,14 @@ acpi_status acpi_ut_create_update_state_and_push ( - acpi_operand_object *object, - u16 action, - acpi_generic_state **state_list) + union acpi_operand_object *object, + u16 action, + union acpi_generic_state **state_list) { - acpi_generic_state *state; + union acpi_generic_state *state; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); /* Ignore null objects; these are expected */ @@ -483,7 +868,6 @@ return (AE_NO_MEMORY); } - acpi_ut_push_generic_state (state_list, state); return (AE_OK); } @@ -491,11 +875,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_create_pkg_state_and_push + * FUNCTION: acpi_ut_create_pkg_state_and_push * * PARAMETERS: *Object - Object to be added to the new state * Action - Increment/Decrement - * State_list - List the state will be added to + * state_list - List the state will be added to * * RETURN: None * @@ -505,15 +889,15 @@ acpi_status acpi_ut_create_pkg_state_and_push ( - void *internal_object, - void *external_object, - u16 index, - acpi_generic_state **state_list) + void *internal_object, + void *external_object, + u16 index, + union acpi_generic_state **state_list) { - acpi_generic_state *state; + union acpi_generic_state *state; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); state = acpi_ut_create_pkg_state (internal_object, external_object, index); @@ -521,7 +905,6 @@ return (AE_NO_MEMORY); } - acpi_ut_push_generic_state (state_list, state); return (AE_OK); } @@ -529,9 +912,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_push_generic_state + * FUNCTION: acpi_ut_push_generic_state * - * PARAMETERS: List_head - Head of the state stack + * PARAMETERS: list_head - Head of the state stack * State - State object to push * * RETURN: Status @@ -542,10 +925,10 @@ void acpi_ut_push_generic_state ( - acpi_generic_state **list_head, - acpi_generic_state *state) + union acpi_generic_state **list_head, + union acpi_generic_state *state) { - FUNCTION_TRACE ("Ut_push_generic_state"); + ACPI_FUNCTION_TRACE ("ut_push_generic_state"); /* Push the state object onto the front of the list (stack) */ @@ -559,9 +942,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_pop_generic_state + * FUNCTION: acpi_ut_pop_generic_state * - * PARAMETERS: List_head - Head of the state stack + * PARAMETERS: list_head - Head of the state stack * * RETURN: Status * @@ -569,14 +952,14 @@ * ******************************************************************************/ -acpi_generic_state * +union acpi_generic_state * acpi_ut_pop_generic_state ( - acpi_generic_state **list_head) + union acpi_generic_state **list_head) { - acpi_generic_state *state; + union acpi_generic_state *state; - FUNCTION_TRACE ("Ut_pop_generic_state"); + ACPI_FUNCTION_TRACE ("ut_pop_generic_state"); /* Remove the state object at the head of the list (stack) */ @@ -594,7 +977,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_create_generic_state + * FUNCTION: acpi_ut_create_generic_state * * PARAMETERS: None * @@ -605,13 +988,13 @@ * ******************************************************************************/ -acpi_generic_state * +union acpi_generic_state * acpi_ut_create_generic_state (void) { - acpi_generic_state *state; + union acpi_generic_state *state; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); state = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_STATE); @@ -628,7 +1011,46 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_create_update_state + * FUNCTION: acpi_ut_create_thread_state + * + * PARAMETERS: None + * + * RETURN: Thread State + * + * DESCRIPTION: Create a "Thread State" - a flavor of the generic state used + * to track per-thread info during method execution + * + ******************************************************************************/ + +struct acpi_thread_state * +acpi_ut_create_thread_state ( + void) +{ + union acpi_generic_state *state; + + + ACPI_FUNCTION_TRACE ("ut_create_thread_state"); + + + /* Create the generic state object */ + + state = acpi_ut_create_generic_state (); + if (!state) { + return_PTR (NULL); + } + + /* Init fields specific to the update struct */ + + state->common.data_type = ACPI_DESC_TYPE_STATE_THREAD; + state->thread.thread_id = acpi_os_get_thread_id (); + + return_PTR ((struct acpi_thread_state *) state); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_create_update_state * * PARAMETERS: Object - Initial Object to be installed in the * state @@ -642,22 +1064,22 @@ * ******************************************************************************/ -acpi_generic_state * +union acpi_generic_state * acpi_ut_create_update_state ( - acpi_operand_object *object, - u16 action) + union acpi_operand_object *object, + u16 action) { - acpi_generic_state *state; + union acpi_generic_state *state; - FUNCTION_TRACE_PTR ("Ut_create_update_state", object); + ACPI_FUNCTION_TRACE_PTR ("ut_create_update_state", object); /* Create the generic state object */ state = acpi_ut_create_generic_state (); if (!state) { - return (NULL); + return_PTR (NULL); } /* Init fields specific to the update struct */ @@ -672,7 +1094,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_create_pkg_state + * FUNCTION: acpi_ut_create_pkg_state * * PARAMETERS: Object - Initial Object to be installed in the * state @@ -684,29 +1106,29 @@ * ******************************************************************************/ -acpi_generic_state * +union acpi_generic_state * acpi_ut_create_pkg_state ( - void *internal_object, - void *external_object, - u16 index) + void *internal_object, + void *external_object, + u16 index) { - acpi_generic_state *state; + union acpi_generic_state *state; - FUNCTION_TRACE_PTR ("Ut_create_pkg_state", internal_object); + ACPI_FUNCTION_TRACE_PTR ("ut_create_pkg_state", internal_object); /* Create the generic state object */ state = acpi_ut_create_generic_state (); if (!state) { - return (NULL); + return_PTR (NULL); } /* Init fields specific to the update struct */ state->common.data_type = ACPI_DESC_TYPE_STATE_PACKAGE; - state->pkg.source_object = (acpi_operand_object *) internal_object; + state->pkg.source_object = (union acpi_operand_object *) internal_object; state->pkg.dest_object = external_object; state->pkg.index = index; state->pkg.num_packages = 1; @@ -717,7 +1139,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_create_control_state + * FUNCTION: acpi_ut_create_control_state * * PARAMETERS: None * @@ -728,28 +1150,27 @@ * ******************************************************************************/ -acpi_generic_state * +union acpi_generic_state * acpi_ut_create_control_state ( void) { - acpi_generic_state *state; + union acpi_generic_state *state; - FUNCTION_TRACE ("Ut_create_control_state"); + ACPI_FUNCTION_TRACE ("ut_create_control_state"); /* Create the generic state object */ state = acpi_ut_create_generic_state (); if (!state) { - return (NULL); + return_PTR (NULL); } - /* Init fields specific to the control struct */ state->common.data_type = ACPI_DESC_TYPE_STATE_CONTROL; - state->common.state = CONTROL_CONDITIONAL_EXECUTING; + state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING; return_PTR (state); } @@ -757,7 +1178,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_delete_generic_state + * FUNCTION: acpi_ut_delete_generic_state * * PARAMETERS: State - The state object to be deleted * @@ -770,9 +1191,9 @@ void acpi_ut_delete_generic_state ( - acpi_generic_state *state) + union acpi_generic_state *state) { - FUNCTION_TRACE ("Ut_delete_generic_state"); + ACPI_FUNCTION_TRACE ("ut_delete_generic_state"); acpi_ut_release_to_cache (ACPI_MEM_LIST_STATE, state); @@ -782,7 +1203,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_delete_generic_state_cache + * FUNCTION: acpi_ut_delete_generic_state_cache * * PARAMETERS: None * @@ -797,7 +1218,7 @@ acpi_ut_delete_generic_state_cache ( void) { - FUNCTION_TRACE ("Ut_delete_generic_state_cache"); + ACPI_FUNCTION_TRACE ("ut_delete_generic_state_cache"); acpi_ut_delete_generic_cache (ACPI_MEM_LIST_STATE); @@ -807,106 +1228,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_resolve_package_references - * - * PARAMETERS: Obj_desc - The Package object on which to resolve refs - * - * RETURN: Status - * - * DESCRIPTION: Walk through a package and turn internal references into values - * - ******************************************************************************/ - -acpi_status -acpi_ut_resolve_package_references ( - acpi_operand_object *obj_desc) -{ - u32 count; - acpi_operand_object *sub_object; - - - FUNCTION_TRACE ("Ut_resolve_package_references"); - - - if (obj_desc->common.type != ACPI_TYPE_PACKAGE) { - /* The object must be a package */ - - REPORT_ERROR (("Must resolve Package Refs on a Package\n")); - return_ACPI_STATUS(AE_ERROR); - } - - /* - * TBD: what about nested packages? */ - - for (count = 0; count < obj_desc->package.count; count++) { - sub_object = obj_desc->package.elements[count]; - - if (sub_object->common.type == INTERNAL_TYPE_REFERENCE) { - if (sub_object->reference.opcode == AML_ZERO_OP) { - sub_object->common.type = ACPI_TYPE_INTEGER; - sub_object->integer.value = 0; - } - - else if (sub_object->reference.opcode == AML_ONE_OP) { - sub_object->common.type = ACPI_TYPE_INTEGER; - sub_object->integer.value = 1; - } - - else if (sub_object->reference.opcode == AML_ONES_OP) { - sub_object->common.type = ACPI_TYPE_INTEGER; - sub_object->integer.value = ACPI_INTEGER_MAX; - } - } - } - - return_ACPI_STATUS(AE_OK); -} - -#ifdef ACPI_DEBUG - -/******************************************************************************* - * - * FUNCTION: Acpi_ut_display_init_pathname - * - * PARAMETERS: Obj_handle - Handle whose pathname will be displayed - * Path - Additional path string to be appended - * - * RETURN: acpi_status - * - * DESCRIPTION: Display full pathnbame of an object, DEBUG ONLY - * - ******************************************************************************/ - -void -acpi_ut_display_init_pathname ( - acpi_handle obj_handle, - char *path) -{ - acpi_status status; - u32 length = 128; - char buffer[128]; - - - PROC_NAME ("Ut_display_init_pathname"); - - - status = acpi_ns_handle_to_pathname (obj_handle, &length, buffer); - if (ACPI_SUCCESS (status)) { - if (path) { - ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "%s.%s\n", buffer, path)); - } - else { - ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "%s\n", buffer)); - } - } -} -#endif - -/******************************************************************************* - * - * FUNCTION: Acpi_ut_walk_package_tree + * FUNCTION: acpi_ut_walk_package_tree * - * PARAMETERS: Obj_desc - The Package object on which to resolve refs + * PARAMETERS: obj_desc - The Package object on which to resolve refs * * RETURN: Status * @@ -916,19 +1240,19 @@ acpi_status acpi_ut_walk_package_tree ( - acpi_operand_object *source_object, - void *target_object, - ACPI_PKG_CALLBACK walk_callback, - void *context) -{ - acpi_status status = AE_OK; - acpi_generic_state *state_list = NULL; - acpi_generic_state *state; - u32 this_index; - acpi_operand_object *this_source_obj; + union acpi_operand_object *source_object, + void *target_object, + acpi_pkg_callback walk_callback, + void *context) +{ + acpi_status status = AE_OK; + union acpi_generic_state *state_list = NULL; + union acpi_generic_state *state; + u32 this_index; + union acpi_operand_object *this_source_obj; - FUNCTION_TRACE ("Ut_walk_package_tree"); + ACPI_FUNCTION_TRACE ("ut_walk_package_tree"); state = acpi_ut_create_pkg_state (source_object, target_object, 0); @@ -937,29 +1261,26 @@ } while (state) { + /* Get one element of the package */ + this_index = state->pkg.index; - this_source_obj = (acpi_operand_object *) + this_source_obj = (union acpi_operand_object *) state->pkg.source_object->package.elements[this_index]; /* - * Check for + * Check for: * 1) An uninitialized package element. It is completely - * legal to declare a package and leave it uninitialized + * legal to declare a package and leave it uninitialized * 2) Not an internal object - can be a namespace node instead * 3) Any type other than a package. Packages are handled in else - * case below. + * case below. */ if ((!this_source_obj) || - (!VALID_DESCRIPTOR_TYPE ( - this_source_obj, ACPI_DESC_TYPE_INTERNAL)) || - (!IS_THIS_OBJECT_TYPE ( - this_source_obj, ACPI_TYPE_PACKAGE))) { - + (ACPI_GET_DESCRIPTOR_TYPE (this_source_obj) != ACPI_DESC_TYPE_OPERAND) || + (ACPI_GET_OBJECT_TYPE (this_source_obj) != ACPI_TYPE_PACKAGE)) { status = walk_callback (ACPI_COPY_TYPE_SIMPLE, this_source_obj, state, context); if (ACPI_FAILURE (status)) { - /* TBD: must delete package created up to this point */ - return_ACPI_STATUS (status); } @@ -975,7 +1296,6 @@ acpi_ut_delete_generic_state (state); state = acpi_ut_pop_generic_state (&state_list); - /* Finished when there are no more states */ if (!state) { @@ -994,32 +1314,23 @@ state->pkg.index++; } } - else { - /* This is a sub-object of type package */ + /* This is a subobject of type package */ status = walk_callback (ACPI_COPY_TYPE_PACKAGE, this_source_obj, state, context); if (ACPI_FAILURE (status)) { - /* TBD: must delete package created up to this point */ - return_ACPI_STATUS (status); } - - /* - * The callback above returned a new target package object. - */ - /* * Push the current state and create a new one + * The callback above returned a new target package object. */ acpi_ut_push_generic_state (&state_list, state); state = acpi_ut_create_pkg_state (this_source_obj, state->pkg.this_target_obj, 0); if (!state) { - /* TBD: must delete package created up to this point */ - return_ACPI_STATUS (AE_NO_MEMORY); } } @@ -1027,17 +1338,100 @@ /* We should never get here */ - return (AE_AML_INTERNAL); + return_ACPI_STATUS (AE_AML_INTERNAL); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_generate_checksum + * + * PARAMETERS: Buffer - Buffer to be scanned + * Length - number of bytes to examine + * + * RETURN: checksum + * + * DESCRIPTION: Generate a checksum on a raw buffer + * + ******************************************************************************/ + +u8 +acpi_ut_generate_checksum ( + u8 *buffer, + u32 length) +{ + u32 i; + signed char sum = 0; + + + for (i = 0; i < length; i++) { + sum = (signed char) (sum + buffer[i]); + } + + return ((u8) (0 - sum)); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_get_resource_end_tag + * + * PARAMETERS: obj_desc - The resource template buffer object + * + * RETURN: Pointer to the end tag + * + * DESCRIPTION: Find the END_TAG resource descriptor in a resource template + * + ******************************************************************************/ + + +u8 * +acpi_ut_get_resource_end_tag ( + union acpi_operand_object *obj_desc) +{ + u8 buffer_byte; + u8 *buffer; + u8 *end_buffer; + + + buffer = obj_desc->buffer.pointer; + end_buffer = buffer + obj_desc->buffer.length; + + while (buffer < end_buffer) { + buffer_byte = *buffer; + if (buffer_byte & ACPI_RDESC_TYPE_MASK) { + /* Large Descriptor - Length is next 2 bytes */ + + buffer += ((*(buffer+1) | (*(buffer+2) << 8)) + 3); + } + else { + /* Small Descriptor. End Tag will be found here */ + + if ((buffer_byte & ACPI_RDESC_SMALL_MASK) == ACPI_RDESC_TYPE_END_TAG) { + /* Found the end tag descriptor, all done. */ + + return (buffer); + } + + /* Length is in the header */ + + buffer += ((buffer_byte & 0x07) + 1); + } + } + + /* End tag not found */ + + return (NULL); } /******************************************************************************* * - * FUNCTION: Acpi_ut_report_error + * FUNCTION: acpi_ut_report_error * - * PARAMETERS: Module_name - Caller's module name (for error output) - * Line_number - Caller's line number (for error output) - * Component_id - Caller's component ID (for error output) + * PARAMETERS: module_name - Caller's module name (for error output) + * line_number - Caller's line number (for error output) + * component_id - Caller's component ID (for error output) * Message - Error message to use on failure * * RETURN: None @@ -1048,9 +1442,9 @@ void acpi_ut_report_error ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id) + char *module_name, + u32 line_number, + u32 component_id) { @@ -1060,11 +1454,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_report_warning + * FUNCTION: acpi_ut_report_warning * - * PARAMETERS: Module_name - Caller's module name (for error output) - * Line_number - Caller's line number (for error output) - * Component_id - Caller's component ID (for error output) + * PARAMETERS: module_name - Caller's module name (for error output) + * line_number - Caller's line number (for error output) + * component_id - Caller's component ID (for error output) * Message - Error message to use on failure * * RETURN: None @@ -1075,9 +1469,9 @@ void acpi_ut_report_warning ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id) + char *module_name, + u32 line_number, + u32 component_id) { acpi_os_printf ("%8s-%04d: *** Warning: ", module_name, line_number); @@ -1086,11 +1480,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_report_info + * FUNCTION: acpi_ut_report_info * - * PARAMETERS: Module_name - Caller's module name (for error output) - * Line_number - Caller's line number (for error output) - * Component_id - Caller's component ID (for error output) + * PARAMETERS: module_name - Caller's module name (for error output) + * line_number - Caller's line number (for error output) + * component_id - Caller's component ID (for error output) * Message - Error message to use on failure * * RETURN: None @@ -1101,9 +1495,9 @@ void acpi_ut_report_info ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id) + char *module_name, + u32 line_number, + u32 component_id) { acpi_os_printf ("%8s-%04d: *** Info: ", module_name, line_number); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utobject.c linux.21rc5-ac2/drivers/acpi/utilities/utobject.c --- linux.21rc5/drivers/acpi/utilities/utobject.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utobject.c 2003-04-28 17:59:26.000000000 +0100 @@ -1,48 +1,63 @@ /****************************************************************************** * * Module Name: utobject - ACPI object create/delete/size/cache routines - * $Revision: 57 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acinterp.h" -#include "acnamesp.h" -#include "actables.h" -#include "amlcode.h" +#include +#include +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utobject") + ACPI_MODULE_NAME ("utobject") /******************************************************************************* * - * FUNCTION: Acpi_ut_create_internal_object_dbg + * FUNCTION: acpi_ut_create_internal_object_dbg * - * PARAMETERS: Address - Address of the memory to deallocate - * Component - Component type of caller - * Module - Source file name of caller - * Line - Line number of caller + * PARAMETERS: module_name - Source file name of caller + * line_number - Line number of caller + * component_id - Component type of caller * Type - ACPI Type of the new object * * RETURN: Object - The new object. Null on failure @@ -57,31 +72,55 @@ * ******************************************************************************/ -acpi_operand_object * +union acpi_operand_object * acpi_ut_create_internal_object_dbg ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id, - acpi_object_type8 type) + char *module_name, + u32 line_number, + u32 component_id, + acpi_object_type type) { - acpi_operand_object *object; + union acpi_operand_object *object; + union acpi_operand_object *second_object; - FUNCTION_TRACE_STR ("Ut_create_internal_object_dbg", acpi_ut_get_type_name (type)); + ACPI_FUNCTION_TRACE_STR ("ut_create_internal_object_dbg", acpi_ut_get_type_name (type)); /* Allocate the raw object descriptor */ object = acpi_ut_allocate_object_desc_dbg (module_name, line_number, component_id); if (!object) { - /* Allocation failure */ - return_PTR (NULL); } + switch (type) { + case ACPI_TYPE_REGION: + case ACPI_TYPE_BUFFER_FIELD: + + /* These types require a secondary object */ + + second_object = acpi_ut_allocate_object_desc_dbg (module_name, line_number, component_id); + if (!second_object) { + acpi_ut_delete_object_desc (object); + return_PTR (NULL); + } + + second_object->common.type = ACPI_TYPE_LOCAL_EXTRA; + second_object->common.reference_count = 1; + + /* Link the second object to the first */ + + object->common.next_object = second_object; + break; + + default: + /* All others have no secondary object */ + break; + } + /* Save the object type in the object descriptor */ - object->common.type = type; + object->common.type = (u8) type; /* Init the reference count */ @@ -95,68 +134,128 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_valid_internal_object + * FUNCTION: acpi_ut_create_buffer_object + * + * PARAMETERS: buffer_size - Size of buffer to be created + * + * RETURN: Pointer to a new Buffer object + * + * DESCRIPTION: Create a fully initialized buffer object + * + ******************************************************************************/ + +union acpi_operand_object * +acpi_ut_create_buffer_object ( + acpi_size buffer_size) +{ + union acpi_operand_object *buffer_desc; + u8 *buffer; + + + ACPI_FUNCTION_TRACE_U32 ("ut_create_buffer_object", buffer_size); + + + /* + * Create a new Buffer object + */ + buffer_desc = acpi_ut_create_internal_object (ACPI_TYPE_BUFFER); + if (!buffer_desc) { + return_PTR (NULL); + } + + /* Allocate the actual buffer */ + + buffer = ACPI_MEM_CALLOCATE (buffer_size); + if (!buffer) { + ACPI_REPORT_ERROR (("create_buffer: could not allocate size %X\n", + (u32) buffer_size)); + acpi_ut_remove_reference (buffer_desc); + return_PTR (NULL); + } + + /* Complete buffer object initialization */ + + buffer_desc->buffer.flags |= AOPOBJ_DATA_VALID; + buffer_desc->buffer.pointer = buffer; + buffer_desc->buffer.length = (u32) buffer_size; + + /* Return the new buffer descriptor */ + + return_PTR (buffer_desc); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ut_valid_internal_object * - * PARAMETERS: Operand - Object to be validated + * PARAMETERS: Object - Object to be validated * - * RETURN: Validate a pointer to be an acpi_operand_object + * RETURN: Validate a pointer to be an union acpi_operand_object * ******************************************************************************/ u8 acpi_ut_valid_internal_object ( - void *object) + void *object) { - PROC_NAME ("Ut_valid_internal_object"); + ACPI_FUNCTION_NAME ("ut_valid_internal_object"); /* Check for a null pointer */ if (!object) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "**** Null Object Ptr\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "**** Null Object Ptr\n")); return (FALSE); } /* Check the descriptor type field */ - if (!VALID_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_INTERNAL)) { - /* Not an ACPI internal object, do some further checking */ + switch (ACPI_GET_DESCRIPTOR_TYPE (object)) { + case ACPI_DESC_TYPE_OPERAND: - if (VALID_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_NAMED)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "**** Obj %p is a named obj, not ACPI obj\n", object)); - } + /* The object appears to be a valid union acpi_operand_object */ - else if (VALID_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_PARSER)) { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "**** Obj %p is a parser obj, not ACPI obj\n", object)); - } + return (TRUE); - else { - ACPI_DEBUG_PRINT ((ACPI_DB_INFO, - "**** Obj %p is of unknown type\n", object)); - } + case ACPI_DESC_TYPE_NAMED: - return (FALSE); - } + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "**** Obj %p is a named obj, not ACPI obj\n", object)); + break; + + case ACPI_DESC_TYPE_PARSER: + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "**** Obj %p is a parser obj, not ACPI obj\n", object)); + break; + + case ACPI_DESC_TYPE_CACHED: + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "**** Obj %p has already been released to internal cache\n", object)); + break; - /* The object appears to be a valid acpi_operand_object */ + default: - return (TRUE); + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "**** Obj %p has unknown descriptor type %X\n", object, + ACPI_GET_DESCRIPTOR_TYPE (object))); + break; + } + + return (FALSE); } /******************************************************************************* * - * FUNCTION: Acpi_ut_allocate_object_desc_dbg + * FUNCTION: acpi_ut_allocate_object_desc_dbg * - * PARAMETERS: Module_name - Caller's module name (for error output) - * Line_number - Caller's line number (for error output) - * Component_id - Caller's component ID (for error output) - * Message - Error message to use on failure + * PARAMETERS: module_name - Caller's module name (for error output) + * line_number - Caller's line number (for error output) + * component_id - Caller's component ID (for error output) * * RETURN: Pointer to newly allocated object descriptor. Null on error * @@ -167,31 +266,30 @@ void * acpi_ut_allocate_object_desc_dbg ( - NATIVE_CHAR *module_name, - u32 line_number, - u32 component_id) + char *module_name, + u32 line_number, + u32 component_id) { - acpi_operand_object *object; + union acpi_operand_object *object; - FUNCTION_TRACE ("Ut_allocate_object_desc_dbg"); + ACPI_FUNCTION_TRACE ("ut_allocate_object_desc_dbg"); object = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_OPERAND); if (!object) { - _REPORT_ERROR (module_name, line_number, component_id, + _ACPI_REPORT_ERROR (module_name, line_number, component_id, ("Could not allocate an object descriptor\n")); return_PTR (NULL); } - /* Mark the descriptor type */ - object->common.data_type = ACPI_DESC_TYPE_INTERNAL; + ACPI_SET_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_OPERAND); ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "%p Size %X\n", - object, sizeof (acpi_operand_object))); + object, (u32) sizeof (union acpi_operand_object))); return_PTR (object); } @@ -199,9 +297,9 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_delete_object_desc + * FUNCTION: acpi_ut_delete_object_desc * - * PARAMETERS: Object - Acpi internal object to be deleted + * PARAMETERS: Object - An Acpi internal object to be deleted * * RETURN: None. * @@ -211,14 +309,14 @@ void acpi_ut_delete_object_desc ( - acpi_operand_object *object) + union acpi_operand_object *object) { - FUNCTION_TRACE_PTR ("Ut_delete_object_desc", object); + ACPI_FUNCTION_TRACE_PTR ("ut_delete_object_desc", object); - /* Object must be an acpi_operand_object */ + /* Object must be an union acpi_operand_object */ - if (object->common.data_type != ACPI_DESC_TYPE_INTERNAL) { + if (ACPI_GET_DESCRIPTOR_TYPE (object) != ACPI_DESC_TYPE_OPERAND) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Obj %p is not an ACPI object\n", object)); return_VOID; @@ -232,11 +330,11 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_delete_object_cache + * FUNCTION: acpi_ut_delete_object_cache * * PARAMETERS: None * - * RETURN: Status + * RETURN: None * * DESCRIPTION: Purge the global state object cache. Used during subsystem * termination. @@ -247,7 +345,7 @@ acpi_ut_delete_object_cache ( void) { - FUNCTION_TRACE ("Ut_delete_object_cache"); + ACPI_FUNCTION_TRACE ("ut_delete_object_cache"); acpi_ut_delete_generic_cache (ACPI_MEM_LIST_OPERAND); @@ -257,15 +355,15 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_get_simple_object_size + * FUNCTION: acpi_ut_get_simple_object_size * - * PARAMETERS: *Internal_object - Pointer to the object we are examining - * *Ret_length - Where the length is returned + * PARAMETERS: *internal_object - Pointer to the object we are examining + * *obj_length - Where the length is returned * * RETURN: Status * * DESCRIPTION: This function is called to determine the space required to - * contain a simple object for return to an API user. + * contain a simple object for return to an external user. * * The length includes the object structure plus any additional * needed space. @@ -274,14 +372,14 @@ acpi_status acpi_ut_get_simple_object_size ( - acpi_operand_object *internal_object, - u32 *obj_length) + union acpi_operand_object *internal_object, + acpi_size *obj_length) { - u32 length; - acpi_status status = AE_OK; + acpi_size length; + acpi_status status = AE_OK; - FUNCTION_TRACE_PTR ("Ut_get_simple_object_size", internal_object); + ACPI_FUNCTION_TRACE_PTR ("ut_get_simple_object_size", internal_object); /* Handle a null object (Could be a uninitialized package element -- which is legal) */ @@ -291,37 +389,33 @@ return_ACPI_STATUS (AE_OK); } - /* Start with the length of the Acpi object */ - length = sizeof (acpi_object); + length = sizeof (union acpi_object); - if (VALID_DESCRIPTOR_TYPE (internal_object, ACPI_DESC_TYPE_NAMED)) { + if (ACPI_GET_DESCRIPTOR_TYPE (internal_object) == ACPI_DESC_TYPE_NAMED) { /* Object is a named object (reference), just return the length */ - *obj_length = (u32) ROUND_UP_TO_NATIVE_WORD (length); + *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD (length); return_ACPI_STATUS (status); } - /* * The final length depends on the object type * Strings and Buffers are packed right up against the parent object and * must be accessed bytewise or there may be alignment problems on * certain processors */ - - switch (internal_object->common.type) { - + switch (ACPI_GET_OBJECT_TYPE (internal_object)) { case ACPI_TYPE_STRING: - length += internal_object->string.length + 1; + length += (acpi_size) internal_object->string.length + 1; break; case ACPI_TYPE_BUFFER: - length += internal_object->buffer.length; + length += (acpi_size) internal_object->buffer.length; break; @@ -335,25 +429,30 @@ break; - case INTERNAL_TYPE_REFERENCE: + case ACPI_TYPE_LOCAL_REFERENCE: - /* - * The only type that should be here is internal opcode NAMEPATH_OP -- since - * this means an object reference - */ - if (internal_object->reference.opcode != AML_INT_NAMEPATH_OP) { - ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, - "Unsupported Reference opcode=%X in object %p\n", - internal_object->reference.opcode, internal_object)); - status = AE_TYPE; - } + switch (internal_object->reference.opcode) { + case AML_INT_NAMEPATH_OP: - else { /* * Get the actual length of the full pathname to this object. * The reference will be converted to the pathname to the object */ - length += ROUND_UP_TO_NATIVE_WORD (acpi_ns_get_pathname_length (internal_object->reference.node)); + length += ACPI_ROUND_UP_TO_NATIVE_WORD (acpi_ns_get_pathname_length (internal_object->reference.node)); + break; + + default: + + /* + * No other reference opcodes are supported. + * Notably, Locals and Args are not supported, but this may be + * required eventually. + */ + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Unsupported Reference opcode=%X in object %p\n", + internal_object->reference.opcode, internal_object)); + status = AE_TYPE; + break; } break; @@ -361,31 +460,29 @@ default: ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unsupported type=%X in object %p\n", - internal_object->common.type, internal_object)); + ACPI_GET_OBJECT_TYPE (internal_object), internal_object)); status = AE_TYPE; break; } - /* * Account for the space required by the object rounded up to the next * multiple of the machine word size. This keeps each object aligned * on a machine word boundary. (preventing alignment faults on some * machines.) */ - *obj_length = (u32) ROUND_UP_TO_NATIVE_WORD (length); - + *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD (length); return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_get_element_length + * FUNCTION: acpi_ut_get_element_length * - * PARAMETERS: ACPI_PKG_CALLBACK + * PARAMETERS: acpi_pkg_callback * - * RETURN: Status - the status of the call + * RETURN: Status * * DESCRIPTION: Get the length of one package element. * @@ -393,18 +490,18 @@ acpi_status acpi_ut_get_element_length ( - u8 object_type, - acpi_operand_object *source_object, - acpi_generic_state *state, - void *context) + u8 object_type, + union acpi_operand_object *source_object, + union acpi_generic_state *state, + void *context) { - acpi_status status = AE_OK; - acpi_pkg_info *info = (acpi_pkg_info *) context; - u32 object_space; + acpi_status status = AE_OK; + struct acpi_pkg_info *info = (struct acpi_pkg_info *) context; + acpi_size object_space; switch (object_type) { - case 0: + case ACPI_COPY_TYPE_SIMPLE: /* * Simple object - just get the size (Null object/entry is handled @@ -419,33 +516,37 @@ break; - case 1: - /* Package - nothing much to do here, let the walk handle it */ + case ACPI_COPY_TYPE_PACKAGE: + + /* Package object - nothing much to do here, let the walk handle it */ info->num_packages++; state->pkg.this_target_obj = NULL; break; + default: + + /* No other types allowed */ + return (AE_BAD_PARAMETER); } - return (status); } /******************************************************************************* * - * FUNCTION: Acpi_ut_get_package_object_size + * FUNCTION: acpi_ut_get_package_object_size * - * PARAMETERS: *Internal_object - Pointer to the object we are examining - * *Ret_length - Where the length is returned + * PARAMETERS: *internal_object - Pointer to the object we are examining + * *obj_length - Where the length is returned * * RETURN: Status * * DESCRIPTION: This function is called to determine the space required to - * contain a package object for return to an API user. + * contain a package object for return to an external user. * * This is moderately complex since a package contains other * objects including packages. @@ -454,14 +555,14 @@ acpi_status acpi_ut_get_package_object_size ( - acpi_operand_object *internal_object, - u32 *obj_length) + union acpi_operand_object *internal_object, + acpi_size *obj_length) { - acpi_status status; - acpi_pkg_info info; + acpi_status status; + struct acpi_pkg_info info; - FUNCTION_TRACE_PTR ("Ut_get_package_object_size", internal_object); + ACPI_FUNCTION_TRACE_PTR ("ut_get_package_object_size", internal_object); info.length = 0; @@ -470,14 +571,17 @@ status = acpi_ut_walk_package_tree (internal_object, NULL, acpi_ut_get_element_length, &info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } /* * We have handled all of the objects in all levels of the package. * just add the length of the package objects themselves. * Round up to the next machine word. */ - info.length += ROUND_UP_TO_NATIVE_WORD (sizeof (acpi_object)) * - info.num_packages; + info.length += ACPI_ROUND_UP_TO_NATIVE_WORD (sizeof (union acpi_object)) * + (acpi_size) info.num_packages; /* Return the total package length */ @@ -488,10 +592,10 @@ /******************************************************************************* * - * FUNCTION: Acpi_ut_get_object_size + * FUNCTION: acpi_ut_get_object_size * - * PARAMETERS: *Internal_object - Pointer to the object we are examining - * *Ret_length - Where the length will be returned + * PARAMETERS: *internal_object - Pointer to the object we are examining + * *obj_length - Where the length will be returned * * RETURN: Status * @@ -502,20 +606,19 @@ acpi_status acpi_ut_get_object_size( - acpi_operand_object *internal_object, - u32 *obj_length) + union acpi_operand_object *internal_object, + acpi_size *obj_length) { - acpi_status status; + acpi_status status; - FUNCTION_ENTRY (); + ACPI_FUNCTION_ENTRY (); - if ((VALID_DESCRIPTOR_TYPE (internal_object, ACPI_DESC_TYPE_INTERNAL)) && - (IS_THIS_OBJECT_TYPE (internal_object, ACPI_TYPE_PACKAGE))) { + if ((ACPI_GET_DESCRIPTOR_TYPE (internal_object) == ACPI_DESC_TYPE_OPERAND) && + (ACPI_GET_OBJECT_TYPE (internal_object) == ACPI_TYPE_PACKAGE)) { status = acpi_ut_get_package_object_size (internal_object, obj_length); } - else { status = acpi_ut_get_simple_object_size (internal_object, obj_length); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utilities/utxface.c linux.21rc5-ac2/drivers/acpi/utilities/utxface.c --- linux.21rc5/drivers/acpi/utilities/utxface.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utilities/utxface.c 2003-05-29 01:02:16.000000000 +0100 @@ -1,46 +1,61 @@ /****************************************************************************** * * Module Name: utxface - External interfaces for "global" ACPI functions - * $Revision: 82 $ * *****************************************************************************/ /* - * Copyright (C) 2000, 2001 R. Byron Moore + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. */ -#include "acpi.h" -#include "acevents.h" -#include "achware.h" -#include "acnamesp.h" -#include "acinterp.h" -#include "amlcode.h" -#include "acdebug.h" -#include "acexcep.h" - +#include +#include +#include +#include +#include +#include #define _COMPONENT ACPI_UTILITIES - MODULE_NAME ("utxface") + ACPI_MODULE_NAME ("utxface") /******************************************************************************* * - * FUNCTION: Acpi_initialize_subsystem + * FUNCTION: acpi_initialize_subsystem * * PARAMETERS: None * @@ -55,12 +70,12 @@ acpi_initialize_subsystem ( void) { - acpi_status status; + acpi_status status; - FUNCTION_TRACE ("Acpi_initialize_subsystem"); + ACPI_FUNCTION_TRACE ("acpi_initialize_subsystem"); - DEBUG_EXEC(acpi_ut_init_stack_ptr_trace ()); + ACPI_DEBUG_EXEC (acpi_ut_init_stack_ptr_trace ()); /* Initialize all globals used by the subsystem */ @@ -71,7 +86,7 @@ status = acpi_os_initialize (); if (ACPI_FAILURE (status)) { - REPORT_ERROR (("OSD failed to initialize, %s\n", + ACPI_REPORT_ERROR (("OSD failed to initialize, %s\n", acpi_format_exception (status))); return_ACPI_STATUS (status); } @@ -80,7 +95,7 @@ status = acpi_ut_mutex_initialize (); if (ACPI_FAILURE (status)) { - REPORT_ERROR (("Global mutex creation failure, %s\n", + ACPI_REPORT_ERROR (("Global mutex creation failure, %s\n", acpi_format_exception (status))); return_ACPI_STATUS (status); } @@ -92,7 +107,7 @@ status = acpi_ns_root_initialize (); if (ACPI_FAILURE (status)) { - REPORT_ERROR (("Namespace initialization failure, %s\n", + ACPI_REPORT_ERROR (("Namespace initialization failure, %s\n", acpi_format_exception (status))); return_ACPI_STATUS (status); } @@ -100,7 +115,7 @@ /* If configured, initialize the AML debugger */ - DEBUGGER_EXEC (acpi_db_initialize ()); + ACPI_DEBUGGER_EXEC (status = acpi_db_initialize ()); return_ACPI_STATUS (status); } @@ -108,7 +123,7 @@ /******************************************************************************* * - * FUNCTION: Acpi_enable_subsystem + * FUNCTION: acpi_enable_subsystem * * PARAMETERS: Flags - Init/enable Options * @@ -121,37 +136,17 @@ acpi_status acpi_enable_subsystem ( - u32 flags) + u32 flags) { - acpi_status status = AE_OK; - - - FUNCTION_TRACE ("Acpi_enable_subsystem"); - + acpi_status status = AE_OK; - /* Sanity check the FADT for valid values */ - status = acpi_ut_validate_fadt (); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } + ACPI_FUNCTION_TRACE ("acpi_enable_subsystem"); - /* - * Install the default Op_region handlers. These are - * installed unless other handlers have already been - * installed via the Install_address_space_handler interface - */ - if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing default address space handlers\n")); - - status = acpi_ev_install_default_address_space_handlers (); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } - } /* * We must initialize the hardware before we can enable ACPI. + * The values from the FADT are validated here. */ if (!(flags & ACPI_NO_HARDWARE_INIT)) { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI hardware\n")); @@ -163,21 +158,24 @@ } /* - * Enable ACPI on this platform + * Enable ACPI mode */ if (!(flags & ACPI_NO_ACPI_ENABLE)) { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Going into ACPI mode\n")); + acpi_gbl_original_mode = acpi_hw_get_mode(); + status = acpi_enable (); if (ACPI_FAILURE (status)) { - ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Acpi_enable failed.\n")); + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "acpi_enable failed.\n")); return_ACPI_STATUS (status); } } /* - * Note: - * We must have the hardware AND events initialized before we can execute + * Initialize ACPI Event handling + * + * NOTE: We must have the hardware AND events initialized before we can execute * ANY control methods SAFELY. Any control method can require ACPI hardware * support, so the hardware MUST be initialized before execution! */ @@ -190,25 +188,65 @@ } } + /* Install the SCI handler, Global Lock handler, and GPE handlers */ + + if (!(flags & ACPI_NO_HANDLER_INIT)) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing SCI/GL/GPE handlers\n")); + + status = acpi_ev_handler_initialize (); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + + return_ACPI_STATUS (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_initialize_objects + * + * PARAMETERS: Flags - Init/enable Options + * + * RETURN: Status + * + * DESCRIPTION: Completes namespace initialization by initializing device + * objects and executing AML code for Regions, buffers, etc. + * + ******************************************************************************/ + +acpi_status +acpi_initialize_objects ( + u32 flags) +{ + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("acpi_initialize_objects"); + /* - * Initialize all device objects in the namespace - * This runs the _STA and _INI methods. + * Install the default op_region handlers. These are installed unless + * other handlers have already been installed via the + * install_address_space_handler interface. + * + * NOTE: This will cause _REG methods to be run. Any objects accessed + * by the _REG methods will be automatically initialized, even if they + * contain executable AML (see call to acpi_ns_initialize_objects below). */ - if (!(flags & ACPI_NO_DEVICE_INIT)) { - ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI Devices\n")); + if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing default address space handlers\n")); - status = acpi_ns_initialize_devices (); + status = acpi_ev_init_address_spaces (); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } } - /* * Initialize the objects that remain uninitialized. This - * runs the executable AML that is part of the declaration of Op_regions - * and Fields. + * runs the executable AML that may be part of the declaration of these + * objects: operation_regions, buffer_fields, Buffers, and Packages. */ if (!(flags & ACPI_NO_OBJECT_INIT)) { ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI Objects\n")); @@ -219,15 +257,34 @@ } } - acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK; + /* + * Initialize all device objects in the namespace + * This runs the _STA and _INI methods. + */ + if (!(flags & ACPI_NO_DEVICE_INIT)) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI Devices\n")); + status = acpi_ns_initialize_devices (); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + + /* + * Empty the caches (delete the cached objects) on the assumption that + * the table load filled them up more than they will be at runtime -- + * thus wasting non-paged memory. + */ + status = acpi_purge_cached_objects (); + + acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK; return_ACPI_STATUS (status); } /******************************************************************************* * - * FUNCTION: Acpi_terminate + * FUNCTION: acpi_terminate * * PARAMETERS: None * @@ -240,16 +297,15 @@ acpi_status acpi_terminate (void) { - FUNCTION_TRACE ("Acpi_terminate"); + acpi_status status; - /* Terminate the AML Debugger if present */ + ACPI_FUNCTION_TRACE ("acpi_terminate"); - DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = TRUE); - /* TBD: [Investigate] This is no longer needed?*/ -/* Acpi_ut_release_mutex (ACPI_MTX_DEBUG_CMD_READY); */ + /* Terminate the AML Debugger if present */ + ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = TRUE); /* Shutdown and free all resources */ @@ -261,7 +317,7 @@ acpi_ut_mutex_terminate (); -#ifdef ENABLE_DEBUGGER +#ifdef ACPI_DEBUGGER /* Shut down the debugger */ @@ -270,16 +326,14 @@ /* Now we can shutdown the OS-dependent layer */ - acpi_os_terminate (); - - - return_ACPI_STATUS (AE_OK); + status = acpi_os_terminate (); + return_ACPI_STATUS (status); } /***************************************************************************** * - * FUNCTION: Acpi_subsystem_status + * FUNCTION: acpi_subsystem_status * * PARAMETERS: None * @@ -305,65 +359,62 @@ /****************************************************************************** * - * FUNCTION: Acpi_get_system_info + * FUNCTION: acpi_get_system_info * - * PARAMETERS: Out_buffer - a pointer to a buffer to receive the + * PARAMETERS: out_buffer - a pointer to a buffer to receive the * resources for the device - * Buffer_length - the number of bytes available in the buffer + * buffer_length - the number of bytes available in the buffer * * RETURN: Status - the status of the call * * DESCRIPTION: This function is called to get information about the current * state of the ACPI subsystem. It will return system information - * in the Out_buffer. + * in the out_buffer. * * If the function fails an appropriate status will be returned - * and the value of Out_buffer is undefined. + * and the value of out_buffer is undefined. * ******************************************************************************/ acpi_status acpi_get_system_info ( - acpi_buffer *out_buffer) + struct acpi_buffer *out_buffer) { - acpi_system_info *info_ptr; - u32 i; + struct acpi_system_info *info_ptr; + u32 i; + acpi_status status; - FUNCTION_TRACE ("Acpi_get_system_info"); + ACPI_FUNCTION_TRACE ("acpi_get_system_info"); - /* - * Must have a valid buffer - */ - if ((!out_buffer) || - (!out_buffer->pointer)) { - return_ACPI_STATUS (AE_BAD_PARAMETER); + /* Parameter validation */ + + status = acpi_ut_validate_buffer (out_buffer); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - if (out_buffer->length < sizeof (acpi_system_info)) { - /* - * Caller's buffer is too small - */ - out_buffer->length = sizeof (acpi_system_info); + /* Validate/Allocate/Clear caller buffer */ - return_ACPI_STATUS (AE_BUFFER_OVERFLOW); + status = acpi_ut_initialize_buffer (out_buffer, sizeof (struct acpi_system_info)); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); } - /* - * Set return length and get data + * Populate the return buffer */ - out_buffer->length = sizeof (acpi_system_info); - info_ptr = (acpi_system_info *) out_buffer->pointer; + info_ptr = (struct acpi_system_info *) out_buffer->pointer; info_ptr->acpi_ca_version = ACPI_CA_VERSION; /* System flags (ACPI capabilities) */ - info_ptr->flags = acpi_gbl_system_flags; + info_ptr->flags = ACPI_SYS_MODE_ACPI; /* Timer resolution - 24 or 32 bits */ + if (!acpi_gbl_FADT) { info_ptr->timer_resolution = 0; } @@ -386,12 +437,70 @@ /* Current status of the ACPI tables, per table type */ - info_ptr->num_table_types = NUM_ACPI_TABLES; - for (i = 0; i < NUM_ACPI_TABLES; i++) { - info_ptr->table_info[i].count = acpi_gbl_acpi_tables[i].count; + info_ptr->num_table_types = NUM_ACPI_TABLE_TYPES; + for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) { + info_ptr->table_info[i].count = acpi_gbl_table_lists[i].count; } return_ACPI_STATUS (AE_OK); } +/***************************************************************************** + * + * FUNCTION: acpi_install_initialization_handler + * + * PARAMETERS: Handler - Callback procedure + * + * RETURN: Status + * + * DESCRIPTION: Install an initialization handler + * + * TBD: When a second function is added, must save the Function also. + * + ****************************************************************************/ + +acpi_status +acpi_install_initialization_handler ( + acpi_init_handler handler, + u32 function) +{ + + if (!handler) { + return (AE_BAD_PARAMETER); + } + + if (acpi_gbl_init_handler) { + return (AE_ALREADY_EXISTS); + } + + acpi_gbl_init_handler = handler; + return AE_OK; +} + + +/***************************************************************************** + * + * FUNCTION: acpi_purge_cached_objects + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Empty all caches (delete the cached objects) + * + ****************************************************************************/ + +acpi_status +acpi_purge_cached_objects (void) +{ + ACPI_FUNCTION_TRACE ("acpi_purge_cached_objects"); + + + acpi_ut_delete_generic_state_cache (); + acpi_ut_delete_object_cache (); + acpi_ds_delete_walk_state_cache (); + acpi_ps_delete_parse_cache (); + + return_ACPI_STATUS (AE_OK); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/acpi/utils.c linux.21rc5-ac2/drivers/acpi/utils.c --- linux.21rc5/drivers/acpi/utils.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/acpi/utils.c 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,413 @@ +/* + * acpi_utils.c - ACPI Utility Functions ($Revision: 8 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include + + +#define _COMPONENT ACPI_BUS_COMPONENT +ACPI_MODULE_NAME ("acpi_utils") + + +/* -------------------------------------------------------------------------- + Object Evaluation Helpers + -------------------------------------------------------------------------- */ + +#ifdef ACPI_DEBUG_OUTPUT +#define acpi_util_eval_error(h,p,s) {\ + char prefix[80] = {'\0'};\ + struct acpi_buffer buffer = {sizeof(prefix), prefix};\ + acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\ + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate [%s.%s]: %s\n",\ + (char *) prefix, p, acpi_format_exception(s))); } +#else +#define acpi_util_eval_error(h,p,s) +#endif + + +acpi_status +acpi_extract_package ( + union acpi_object *package, + struct acpi_buffer *format, + struct acpi_buffer *buffer) +{ + u32 size_required = 0; + u32 tail_offset = 0; + char *format_string = NULL; + u32 format_count = 0; + u32 i = 0; + u8 *head = NULL; + u8 *tail = NULL; + + ACPI_FUNCTION_TRACE("acpi_extract_package"); + + if (!package || (package->type != ACPI_TYPE_PACKAGE) || (package->package.count < 1)) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid 'package' argument\n")); + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + if (!format || !format->pointer || (format->length < 1)) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid 'format' argument\n")); + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + if (!buffer) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid 'buffer' argument\n")); + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + format_count = (format->length/sizeof(char)) - 1; + if (format_count > package->package.count) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Format specifies more objects [%d] than exist in package [%d].", format_count, package->package.count)); + return_ACPI_STATUS(AE_BAD_DATA); + } + + format_string = (char*)format->pointer; + + /* + * Calculate size_required. + */ + for (i=0; ipackage.elements[i]); + + if (!element) { + return_ACPI_STATUS(AE_BAD_DATA); + } + + switch (element->type) { + + case ACPI_TYPE_INTEGER: + switch (format_string[i]) { + case 'N': + size_required += sizeof(acpi_integer); + tail_offset += sizeof(acpi_integer); + break; + case 'S': + size_required += sizeof(char*) + sizeof(acpi_integer) + sizeof(char); + tail_offset += sizeof(char*); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid package element [%d]: got number, expecing [%c].\n", i, format_string[i])); + return_ACPI_STATUS(AE_BAD_DATA); + break; + } + break; + + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: + switch (format_string[i]) { + case 'S': + size_required += sizeof(char*) + (element->string.length * sizeof(char)) + sizeof(char); + tail_offset += sizeof(char*); + break; + case 'B': + size_required += sizeof(u8*) + (element->buffer.length * sizeof(u8)); + tail_offset += sizeof(u8*); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid package element [%d] got string/buffer, expecing [%c].\n", i, format_string[i])); + return_ACPI_STATUS(AE_BAD_DATA); + break; + } + break; + + case ACPI_TYPE_PACKAGE: + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unsupported element at index=%d\n", i)); + /* TBD: handle nested packages... */ + return_ACPI_STATUS(AE_SUPPORT); + break; + } + } + + /* + * Validate output buffer. + */ + if (buffer->length < size_required) { + buffer->length = size_required; + return_ACPI_STATUS(AE_BUFFER_OVERFLOW); + } + else if (buffer->length != size_required || !buffer->pointer) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + head = buffer->pointer; + tail = buffer->pointer + tail_offset; + + /* + * Extract package data. + */ + for (i=0; ipackage.elements[i]); + + if (!element) { + return_ACPI_STATUS(AE_BAD_DATA); + } + + switch (element->type) { + + case ACPI_TYPE_INTEGER: + switch (format_string[i]) { + case 'N': + *((acpi_integer*)head) = element->integer.value; + head += sizeof(acpi_integer); + break; + case 'S': + pointer = (u8**)head; + *pointer = tail; + *((acpi_integer*)tail) = element->integer.value; + head += sizeof(acpi_integer*); + tail += sizeof(acpi_integer); + /* NULL terminate string */ + *tail = (char)0; + tail += sizeof(char); + break; + default: + /* Should never get here */ + break; + } + break; + + case ACPI_TYPE_STRING: + case ACPI_TYPE_BUFFER: + switch (format_string[i]) { + case 'S': + pointer = (u8**)head; + *pointer = tail; + memcpy(tail, element->string.pointer, element->string.length); + head += sizeof(char*); + tail += element->string.length * sizeof(char); + /* NULL terminate string */ + *tail = (char)0; + tail += sizeof(char); + break; + case 'B': + pointer = (u8**)head; + *pointer = tail; + memcpy(tail, element->buffer.pointer, element->buffer.length); + head += sizeof(u8*); + tail += element->buffer.length * sizeof(u8); + break; + default: + /* Should never get here */ + break; + } + break; + + case ACPI_TYPE_PACKAGE: + /* TBD: handle nested packages... */ + default: + /* Should never get here */ + break; + } + } + + return_ACPI_STATUS(AE_OK); +} + + +acpi_status +acpi_evaluate_integer ( + acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, + unsigned long *data) +{ + acpi_status status = AE_OK; + union acpi_object element; + struct acpi_buffer buffer = {sizeof(union acpi_object), &element}; + + ACPI_FUNCTION_TRACE("acpi_evaluate_integer"); + + if (!data) + return_ACPI_STATUS(AE_BAD_PARAMETER); + + status = acpi_evaluate_object(handle, pathname, arguments, &buffer); + if (ACPI_FAILURE(status)) { + acpi_util_eval_error(handle, pathname, status); + return_ACPI_STATUS(status); + } + + if (element.type != ACPI_TYPE_INTEGER) { + acpi_util_eval_error(handle, pathname, AE_BAD_DATA); + return_ACPI_STATUS(AE_BAD_DATA); + } + + *data = element.integer.value; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%lu]\n", *data)); + + return_ACPI_STATUS(AE_OK); +} + + +#if 0 +acpi_status +acpi_evaluate_string ( + acpi_handle handle, + acpi_string pathname, + acpi_object_list *arguments, + acpi_string *data) +{ + acpi_status status = AE_OK; + acpi_object *element = NULL; + acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + + ACPI_FUNCTION_TRACE("acpi_evaluate_string"); + + if (!data) + return_ACPI_STATUS(AE_BAD_PARAMETER); + + status = acpi_evaluate_object(handle, pathname, arguments, &buffer); + if (ACPI_FAILURE(status)) { + acpi_util_eval_error(handle, pathname, status); + return_ACPI_STATUS(status); + } + + element = (acpi_object *) buffer.pointer; + + if ((element->type != ACPI_TYPE_STRING) + || (element->type != ACPI_TYPE_BUFFER) + || !element->string.length) { + acpi_util_eval_error(handle, pathname, AE_BAD_DATA); + return_ACPI_STATUS(AE_BAD_DATA); + } + + *data = kmalloc(element->string.length + 1, GFP_KERNEL); + if (!data) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Memory allocation error\n")); + return_VALUE(-ENOMEM); + } + memset(*data, 0, element->string.length + 1); + + memcpy(*data, element->string.pointer, element->string.length); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%s]\n", *data)); + + acpi_os_free(buffer.pointer); + + return_ACPI_STATUS(AE_OK); +} +#endif + + +acpi_status +acpi_evaluate_reference ( + acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, + struct acpi_handle_list *list) +{ + acpi_status status = AE_OK; + union acpi_object *package = NULL; + union acpi_object *element = NULL; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + u32 i = 0; + + ACPI_FUNCTION_TRACE("acpi_evaluate_reference"); + + if (!list) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + /* Evaluate object. */ + + status = acpi_evaluate_object(handle, pathname, arguments, &buffer); + if (ACPI_FAILURE(status)) + goto end; + + package = (union acpi_object *) buffer.pointer; + + if ((buffer.length == 0) || !package) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "No return object (len %X ptr %p)\n", + buffer.length, package)); + status = AE_BAD_DATA; + acpi_util_eval_error(handle, pathname, status); + goto end; + } + if (package->type != ACPI_TYPE_PACKAGE) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Expecting a [Package], found type %X\n", + package->type)); + status = AE_BAD_DATA; + acpi_util_eval_error(handle, pathname, status); + goto end; + } + if (!package->package.count) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "[Package] has zero elements (%p)\n", + package)); + status = AE_BAD_DATA; + acpi_util_eval_error(handle, pathname, status); + goto end; + } + + if (package->package.count > ACPI_MAX_HANDLES) { + return AE_NO_MEMORY; + } + list->count = package->package.count; + + /* Extract package data. */ + + for (i = 0; i < list->count; i++) { + + element = &(package->package.elements[i]); + + if (element->type != ACPI_TYPE_ANY) { + status = AE_BAD_DATA; + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Expecting a [Reference] package element, found type %X\n", + element->type)); + acpi_util_eval_error(handle, pathname, status); + break; + } + + /* Get the acpi_handle. */ + + list->handles[i] = element->reference.handle; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found reference [%p]\n", + list->handles[i])); + } + +end: + if (ACPI_FAILURE(status)) { + list->count = 0; + //kfree(list->handles); + } + + acpi_os_free(buffer.pointer); + + return_ACPI_STATUS(status); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/atm/iphase.c linux.21rc5-ac2/drivers/atm/iphase.c --- linux.21rc5/drivers/atm/iphase.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/atm/iphase.c 2003-05-12 16:38:48.000000000 +0100 @@ -2965,7 +2965,7 @@ dev_kfree_skb_any(skb); return 0; } - kfree(skb); + dev_kfree_skb_any(skb); skb = newskb; } /* Get a descriptor number from our free descriptor queue diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/block/cciss.c linux.21rc5-ac2/drivers/block/cciss.c --- linux.21rc5/drivers/block/cciss.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/block/cciss.c 2003-04-25 13:32:34.000000000 +0100 @@ -44,12 +44,12 @@ #include #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) -#define DRIVER_NAME "HP CISS Driver (v 2.4.42)" -#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,4,42) +#define DRIVER_NAME "HP CISS Driver (v 2.4.44)" +#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,4,44) /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Charles M. White III - Hewlett-Packard Company"); -MODULE_DESCRIPTION("Driver for HP SA5xxx SA6xxx Controllers version 2.4.42"); +MODULE_DESCRIPTION("Driver for HP SA5xxx SA6xxx Controllers version 2.4.44"); MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"); MODULE_LICENSE("GPL"); @@ -73,6 +73,8 @@ 0x0E11, 0x409B, 0, 0, 0}, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C, 0, 0, 0}, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, + 0x0E11, 0x409D, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); @@ -90,11 +92,13 @@ { 0x40830E11, "Smart Array 5312", &SA5B_access}, { 0x409A0E11, "Smart Array 641", &SA5_access}, { 0x409B0E11, "Smart Array 642", &SA5_access}, - { 0x409C0E11, "Smart Array 6400", &SA5_access}, + { 0x409C0E11, "Smart Array 6402", &SA5_access}, + { 0x409C0E11, "Smart Array 6404/256", &SA5_access}, }; /* How long to wait (in millesconds) for board to go into simple mode */ -#define MAX_CONFIG_WAIT 1000 +#define MAX_CONFIG_WAIT 30000 +#define MAX_IOCTL_CONFIG_WAIT 1000 /*define how many times we will try a command because of bus resets */ #define MAX_CMD_RETRIES 3 @@ -103,7 +107,7 @@ #define NR_CMDS 128 /* #commands that can be outstanding */ #define MAX_CTLR 8 -#define CCISS_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */ +#define CCISS_DMA_MASK 0xFFFFFFFFFFFFFFFF /* 64 bit DMA */ static ctlr_info_t *hba[MAX_CTLR]; @@ -578,7 +582,7 @@ &(c->cfgtable->HostWrite.CoalIntCount)); writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL); - for(i=0;ivaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; @@ -586,8 +590,11 @@ udelay(1000); } spin_unlock_irqrestore(&io_request_lock, flags); - if (i >= MAX_CONFIG_WAIT) - return -EFAULT; + if (i >= MAX_IOCTL_CONFIG_WAIT) + /* there is an unlikely case where this can happen, + * involving hot replacing a failed 144 GB drive in a + * RAID 5 set just as we attempt this ioctl. */ + return -EAGAIN; return 0; } case CCISS_GETNODENAME: @@ -627,7 +634,7 @@ writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL); - for(i=0;ivaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; @@ -635,8 +642,11 @@ udelay(1000); } spin_unlock_irqrestore(&io_request_lock, flags); - if (i >= MAX_CONFIG_WAIT) - return -EFAULT; + if (i >= MAX_IOCTL_CONFIG_WAIT) + /* there is an unlikely case where this can happen, + * involving hot replacing a failed 144 GB drive in a + * RAID 5 set just as we attempt this ioctl. */ + return -EAGAIN; return 0; } @@ -1345,15 +1355,7 @@ sizeof(ReportLunData_struct), 0, 0, 0 ); if (return_code == IO_OK) { - /* printk("LUN Data\n--------------------------\n"); */ - listlength |= (0xff & - (unsigned int)(ld_buff->LUNListLength[0])) << 24; - listlength |= (0xff & - (unsigned int)(ld_buff->LUNListLength[1])) << 16; - listlength |= (0xff & - (unsigned int)(ld_buff->LUNListLength[2])) << 8; - listlength |= 0xff & - (unsigned int)(ld_buff->LUNListLength[3]); + listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0])); } else { /* reading number of logical volumes failed */ printk(KERN_WARNING "cciss: report logical volume" @@ -2435,25 +2437,54 @@ c->io_mem_addr = 0; c->io_mem_length = 0; } +static int find_PCI_BAR_index(struct pci_dev *pdev, + unsigned long pci_bar_addr) +{ + int i, offset, mem_type, bar_type; + if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ + return 0; + offset = 0; + for (i=0; ivendor; device_id = pdev->device; irq = pdev->irq; - for(i=0; i<6; i++) - addr[i] = pdev->resource[i].start; - if (pci_enable_device(pdev)) { printk(KERN_ERR "cciss: Unable to Enable PCI device\n"); return -1; @@ -2479,12 +2510,12 @@ return -1; } /* search for our IO range so we can protect it */ - for (i=0; i<6; i++) { + for (i=0; iresource[i].flags & 0x01) { - c->io_mem_addr = pdev->resource[i].start; - c->io_mem_length = pdev->resource[i].end - - pdev->resource[i].start +1; + if (pci_resource_flags(pdev, i) & 0x01) { + c->io_mem_addr = pci_resource_start(pdev, i); + c->io_mem_length = pci_resource_end(pdev, i) - + pci_resource_start(pdev, i) + 1; #ifdef CCISS_DEBUG printk("IO value found base_addr[%d] %lx %lx\n", i, c->io_mem_addr, c->io_mem_length); @@ -2508,7 +2539,7 @@ printk("device_id = %x\n", device_id); printk("command = %x\n", command); for(i=0; i<6; i++) - printk("addr[%d] = %x\n", i, addr[i]); + printk("addr[%d] = %x\n", i, pci_resource_start(pdev, i); printk("revision = %x\n", revision); printk("irq = %x\n", irq); printk("cache_line_size = %x\n", cache_line_size); @@ -2523,7 +2554,7 @@ * table */ - c->paddr = addr[0] ; /* addressing mode bits already removed */ + c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */ #ifdef CCISS_DEBUG printk("address 0 = %x\n", c->paddr); #endif /* CCISS_DEBUG */ @@ -2532,21 +2563,27 @@ /* get the address index number */ cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET); /* I am not prepared to deal with a 64 bit address value */ - cfg_base_addr &= 0xffff; + cfg_base_addr &= (__u32) 0x0000ffff; #ifdef CCISS_DEBUG printk("cfg base address = %x\n", cfg_base_addr); #endif /* CCISS_DEBUG */ - cfg_base_addr_index = (cfg_base_addr - PCI_BASE_ADDRESS_0)/4; + cfg_base_addr_index = + find_PCI_BAR_index(pdev, cfg_base_addr); #ifdef CCISS_DEBUG printk("cfg base address index = %x\n", cfg_base_addr_index); #endif /* CCISS_DEBUG */ + if (cfg_base_addr_index == -1) { + printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n"); + release_io_mem(hba[i]); + return -1; + } cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET); #ifdef CCISS_DEBUG printk("cfg offset = %x\n", cfg_offset); #endif /* CCISS_DEBUG */ c->cfgtable = (CfgTable_struct *) - remap_pci_mem((addr[cfg_base_addr_index] & 0xfffffff0) + remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index) + cfg_offset, sizeof(CfgTable_struct)); c->board_id = board_id; @@ -2583,11 +2620,17 @@ &(c->cfgtable->HostWrite.TransportRequest)); writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL); + /* Here, we wait, possibly for a long time, (4 secs or more). + * In some unlikely cases, (e.g. A failed 144 GB drive in a + * RAID 5 set was hot replaced just as we're coming in here) it + * can take that long. Normally (almost always) we will wait + * less than 1 sec. */ for(i=0;ivaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; /* delay and try again */ - udelay(1000); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); } #ifdef CCISS_DEBUG @@ -2661,10 +2704,7 @@ printk("LUN Data\n--------------------------\n"); #endif /* CCISS_DEBUG */ - listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24; - listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16; - listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8; - listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]); + listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0])); } else { /* reading number of logical volumes failed */ printk(KERN_WARNING "cciss: report logical volume" " command failed\n"); @@ -2834,17 +2874,6 @@ hba[i]->ctlr = i; hba[i]->pdev = pdev; - /* configure PCI DMA stuff */ - if (!pci_set_dma_mask(pdev, (u64) 0xffffffffffffffff)) - printk("cciss: using DAC cycles\n"); - else if (!pci_set_dma_mask(pdev, (u64) 0xffffffff)) - printk("cciss: not using DAC cycles\n"); - else { - printk("cciss: no suitable DMA available\n"); - free_hba(i); - return -ENODEV; - } - if (register_blkdev(MAJOR_NR+i, hba[i]->devname, &cciss_fops)) { printk(KERN_ERR "cciss: Unable to get major number " "%d for %s\n", MAJOR_NR+i, hba[i]->devname); @@ -3021,12 +3050,8 @@ printk(KERN_INFO DRIVER_NAME "\n"); /* Register for out PCI devices */ - if (pci_register_driver(&cciss_pci_driver) > 0 ) - return 0; - else - return -ENODEV; - - } + return pci_module_init(&cciss_pci_driver); +} EXPORT_NO_SYMBOLS; static int __init init_cciss_module(void) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/block/cciss.h linux.21rc5-ac2/drivers/block/cciss.h --- linux.21rc5/drivers/block/cciss.h 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/block/cciss.h 2003-05-28 21:48:48.000000000 +0100 @@ -45,8 +45,8 @@ char firm_ver[4]; // Firmware version struct pci_dev *pdev; __u32 board_id; - ulong vaddr; - __u32 paddr; + unsigned long vaddr; + unsigned long paddr; unsigned long io_mem_addr; unsigned long io_mem_length; CfgTable_struct *cfgtable; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/block/DAC960.c linux.21rc5-ac2/drivers/block/DAC960.c --- linux.21rc5/drivers/block/DAC960.c 2003-05-28 15:07:48.000000000 +0100 +++ linux.21rc5-ac2/drivers/block/DAC960.c 2003-04-22 16:44:36.000000000 +0100 @@ -1133,6 +1133,26 @@ DAC960PU/PD/PL 3.51 and above DAC960PU/PD/PL/P 2.73 and above */ +#if defined(__alpha__) + /* + DEC Alpha machines were often equipped with DAC960 cards that were + OEMed from Mylex, and had their own custom firmware. Version 2.70, + the last custom FW revision to be released by DEC for these older + controllers, appears to work quite well with this driver. + + Cards tested successfully were several versions each of the PD and + PU, called by DEC the KZPSC and KZPAC, respectively, and having + the Manufacturer Numbers (from Mylex), usually on a sticker on the + back of the board, of: + + KZPSC D040347 (1ch) or D040348 (2ch) or D040349 (3ch) + KZPAC D040395 (1ch) or D040396 (2ch) or D040397 (3ch) + */ +# define FIRMWARE_27x "2.70" +#else +# define FIRMWARE_27x "2.73" +#endif + if (Enquiry2.FirmwareID.MajorVersion == 0) { Enquiry2.FirmwareID.MajorVersion = @@ -1152,7 +1172,7 @@ (Controller->FirmwareVersion[0] == '3' && strcmp(Controller->FirmwareVersion, "3.51") >= 0) || (Controller->FirmwareVersion[0] == '2' && - strcmp(Controller->FirmwareVersion, "2.73") >= 0))) + strcmp(Controller->FirmwareVersion, FIRMWARE_27x) >= 0))) { DAC960_Failure(Controller, "FIRMWARE VERSION VERIFICATION"); DAC960_Error("Firmware Version = '%s'\n", Controller, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/block/elevator.c linux.21rc5-ac2/drivers/block/elevator.c --- linux.21rc5/drivers/block/elevator.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/block/elevator.c 2003-04-22 16:44:36.000000000 +0100 @@ -27,6 +27,25 @@ #include #include + +static int compatible(struct request *req, struct buffer_head *bh, + request_queue_t *q, int rw, + int count, int max_sectors) +{ + if (q->head_active) + return 0; + if (req->waiting) + return 0; + if (req->rq_dev != bh->b_rdev) + return 0; + if (req->cmd != rw) + return 0; + if (req->nr_sectors + count > max_sectors) + return 0; + return 1; +} + + /* * This is a bit tricky. It's given that bh and rq are for the same * device, but the next request might of course not be. Run through @@ -83,22 +102,38 @@ struct list_head *entry = &q->queue_head; unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE; struct request *__rq; - int backmerge_only = 0; + + /* + * Quick one-entry cache of last merge + * nb. we do no latency accounting this way. + */ + + if (q->last_request) { + struct request *__rq = q->last_request; - while (!backmerge_only && (entry = entry->prev) != head) { + if (compatible(__rq, bh, q, rw, count, max_sectors)) { + if (__rq->sector + __rq->nr_sectors == bh->b_rsector) { + *req = __rq; + return ELEVATOR_BACK_MERGE; + } + } + } + + + while ((entry = entry->prev) != head) { __rq = blkdev_entry_to_request(entry); /* * we can't insert beyond a zero sequence point */ if (__rq->elevator_sequence <= 0) - backmerge_only = 1; + break; if (__rq->waiting) continue; if (__rq->rq_dev != bh->b_rdev) continue; - if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head) && !backmerge_only) + if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head)) *req = __rq; if (__rq->cmd != rw) continue; @@ -108,7 +143,7 @@ ret = ELEVATOR_BACK_MERGE; *req = __rq; break; - } else if (__rq->sector - count == bh->b_rsector && !backmerge_only) { + } else if (__rq->sector - count == bh->b_rsector) { ret = ELEVATOR_FRONT_MERGE; __rq->elevator_sequence--; *req = __rq; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/block/ll_rw_blk.c linux.21rc5-ac2/drivers/block/ll_rw_blk.c --- linux.21rc5/drivers/block/ll_rw_blk.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/block/ll_rw_blk.c 2003-05-28 15:22:47.000000000 +0100 @@ -360,8 +360,12 @@ { if (q->plugged) { q->plugged = 0; - if (!list_empty(&q->queue_head)) + if (!list_empty(&q->queue_head)) { + if (q->last_request == + blkdev_entry_next_request(&q->queue_head)) + q->last_request = NULL; q->request_fn(q); + } } } @@ -491,6 +495,7 @@ q->plug_tq.routine = &generic_unplug_device; q->plug_tq.data = q; q->plugged = 0; + q->last_request = NULL; /* * These booleans describe the queue properties. We set the * default (and most common) values here. Other drivers can @@ -810,6 +815,7 @@ * inserted at elevator_merge time */ list_add(&req->queue, insert_here); + q->last_request = req; } /* @@ -829,8 +835,10 @@ */ if (q) { list_add(&req->queue, &q->rq[rw].free); - if (++q->rq[rw].count >= q->batch_requests && - waitqueue_active(&q->wait_for_requests[rw])) + if (q->last_request == req) + q->last_request = NULL; + + if (++q->rq[rw].count >= q->batch_requests) wake_up(&q->wait_for_requests[rw]); } } @@ -867,6 +875,7 @@ req->bhtail = next->bhtail; req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; list_del(&next->queue); + q->last_request = req; /* One last thing: we have removed a request, so we now have one less expected IO to complete for accounting purposes. */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/agp/agpgart_be.c linux.21rc5-ac2/drivers/char/agp/agpgart_be.c --- linux.21rc5/drivers/char/agp/agpgart_be.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/agp/agpgart_be.c 2003-05-28 16:10:43.000000000 +0100 @@ -23,6 +23,12 @@ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ + +/* + * Intel(R) 855GM/852GM and 865G support, added by + * David Dawes . + */ + #include #include #include @@ -43,6 +49,9 @@ #include #include #include +#ifdef CONFIG_AGP_NVIDIA + #include +#endif #include #include "agp.h" @@ -577,7 +586,7 @@ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) SetPageReserved(page); - agp_bridge.gatt_table_real = (unsigned long *) table; + agp_bridge.gatt_table_real = (u32 *) table; agp_gatt_table = (void *)table; #ifdef CONFIG_X86 err = change_page_attr(virt_to_page(table), 1<device != PCI_DEVICE_ID_INTEL_830_M_0 && + agp_bridge.dev->device != PCI_DEVICE_ID_INTEL_845_G_0) { + switch (gmch_ctrl & I855_GMCH_GMS_MASK) { + case I855_GMCH_GMS_STOLEN_1M: + gtt_entries = MB(1) - KB(132); + break; + case I855_GMCH_GMS_STOLEN_4M: + gtt_entries = MB(4) - KB(132); + break; + case I855_GMCH_GMS_STOLEN_8M: + gtt_entries = MB(8) - KB(132); + break; + case I855_GMCH_GMS_STOLEN_16M: + gtt_entries = MB(16) - KB(132); + break; + case I855_GMCH_GMS_STOLEN_32M: + gtt_entries = MB(32) - KB(132); + break; + default: + gtt_entries = 0; + break; + } + } else + { + switch (gmch_ctrl & I830_GMCH_GMS_MASK) { + case I830_GMCH_GMS_STOLEN_512: + gtt_entries = KB(512) - KB(132); + break; + case I830_GMCH_GMS_STOLEN_1024: + gtt_entries = MB(1) - KB(132); + break; + case I830_GMCH_GMS_STOLEN_8192: + gtt_entries = MB(8) - KB(132); + break; + case I830_GMCH_GMS_LOCAL: + rdct = INREG8(intel_i830_private.registers, + I830_RDRAM_CHANNEL_TYPE); + gtt_entries = (I830_RDRAM_ND(rdct) + 1) * + MB(ddt[I830_RDRAM_DDT(rdct)]); + local = 1; + break; + default: + gtt_entries = 0; + break; + } } + if (gtt_entries > 0) + printk(KERN_INFO PFX "Detected %dK %s memory.\n", + gtt_entries / KB(1), local ? "local" : "stolen"); + else + printk(KERN_INFO PFX + "No pre-allocated video memory detected.\n"); gtt_entries /= KB(4); intel_i830_private.gtt_entries = gtt_entries; @@ -1192,9 +1231,16 @@ u16 gmch_ctrl; aper_size_info_fixed *values; - pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); values = A_SIZE_FIX(agp_bridge.aperture_sizes); + if (agp_bridge.dev->device != PCI_DEVICE_ID_INTEL_830_M_0 && + agp_bridge.dev->device != PCI_DEVICE_ID_INTEL_845_G_0) { + agp_bridge.previous_size = agp_bridge.current_size = (void *) values; + agp_bridge.aperture_size_idx = 0; + return(values[0].size); + } + + pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { agp_bridge.previous_size = agp_bridge.current_size = (void *) values; agp_bridge.aperture_size_idx = 0; @@ -2450,8 +2496,8 @@ return retval; } - agp_bridge.gatt_table_real = page_dir.real; - agp_bridge.gatt_table = page_dir.remapped; + agp_bridge.gatt_table_real = (u32 *)page_dir.real; + agp_bridge.gatt_table = (u32 *)page_dir.remapped; agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real); /* Get the address for the gart region. @@ -2477,8 +2523,8 @@ { amd_page_map page_dir; - page_dir.real = agp_bridge.gatt_table_real; - page_dir.remapped = agp_bridge.gatt_table; + page_dir.real = (unsigned long *)agp_bridge.gatt_table_real; + page_dir.remapped = (unsigned long *)agp_bridge.gatt_table; amd_free_gatt_pages(); amd_free_page_map(&page_dir); @@ -3559,8 +3605,8 @@ return retval; } - agp_bridge.gatt_table_real = page_dir.real; - agp_bridge.gatt_table = page_dir.remapped; + agp_bridge.gatt_table_real = (u32 *)page_dir.real; + agp_bridge.gatt_table = (u32 *)page_dir.remapped; agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real); /* Get the address for the gart region. @@ -3588,8 +3634,8 @@ { serverworks_page_map page_dir; - page_dir.real = agp_bridge.gatt_table_real; - page_dir.remapped = agp_bridge.gatt_table; + page_dir.real = (unsigned long *)agp_bridge.gatt_table_real; + page_dir.remapped = (unsigned long *)agp_bridge.gatt_table; serverworks_free_gatt_pages(); serverworks_free_page_map(&page_dir); @@ -4003,6 +4049,330 @@ #endif /* CONFIG_AGP_SWORKS */ +#ifdef CONFIG_AGP_NVIDIA + +static struct _nvidia_private { + struct pci_dev *dev_1; + struct pci_dev *dev_2; + struct pci_dev *dev_3; + volatile u32 *aperture; + int num_active_entries; + off_t pg_offset; +} nvidia_private; + +static int nvidia_fetch_size(void) +{ + int i; + u8 size_value; + aper_size_info_8 *values; + + pci_read_config_byte(agp_bridge.dev, NVIDIA_0_APSIZE, &size_value); + size_value &= 0x0f; + values = A_SIZE_8(agp_bridge.aperture_sizes); + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (size_value == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +#define SYSCFG 0xC0010010 +#define IORR_BASE0 0xC0010016 +#define IORR_MASK0 0xC0010017 +#define AMD_K7_NUM_IORR 2 + +static int nvidia_init_iorr(u32 base, u32 size) +{ + u32 base_hi, base_lo; + u32 mask_hi, mask_lo; + u32 sys_hi, sys_lo; + u32 iorr_addr, free_iorr_addr; + + /* Find the iorr that is already used for the base */ + /* If not found, determine the uppermost available iorr */ + free_iorr_addr = AMD_K7_NUM_IORR; + for(iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) { + rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); + rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); + + if ((base_lo & 0xfffff000) == (base & 0xfffff000)) + break; + + if ((mask_lo & 0x00000800) == 0) + free_iorr_addr = iorr_addr; + } + + if (iorr_addr >= AMD_K7_NUM_IORR) { + iorr_addr = free_iorr_addr; + if (iorr_addr >= AMD_K7_NUM_IORR) + return -EINVAL; + } + + base_hi = 0x0; + base_lo = (base & ~0xfff) | 0x18; + mask_hi = 0xf; + mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800; + wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); + wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); + + rdmsr(SYSCFG, sys_lo, sys_hi); + sys_lo |= 0x00100000; + wrmsr(SYSCFG, sys_lo, sys_hi); + + return 0; +} + +static int nvidia_configure(void) +{ + int i, rc, num_dirs; + u32 apbase, aplimit; + aper_size_info_8 *current_size; + u32 temp; + + current_size = A_SIZE_8(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, NVIDIA_0_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, NVIDIA_0_APBASE, &apbase); + apbase &= PCI_BASE_ADDRESS_MEM_MASK; + agp_bridge.gart_bus_addr = apbase; + aplimit = apbase + (current_size->size * 1024 * 1024) - 1; + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase); + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit); + pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase); + pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit); + if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024))) + return rc; + + /* directory size is 64k */ + num_dirs = current_size->size / 64; + nvidia_private.num_active_entries = current_size->num_entries; + nvidia_private.pg_offset = 0; + if (num_dirs == 0) { + num_dirs = 1; + nvidia_private.num_active_entries /= (64 / current_size->size); + nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) & + ~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE; + } + + /* attbase */ + for(i = 0; i < 8; i++) { + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i), + (agp_bridge.gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1); + } + + /* gtlb control */ + pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, + temp | 0x11); + + /* gart control */ + pci_read_config_dword(agp_bridge.dev, NVIDIA_0_APSIZE, &temp); + pci_write_config_dword(agp_bridge.dev, NVIDIA_0_APSIZE, + temp | 0x100); + + /* map aperture */ + nvidia_private.aperture = + (volatile u32 *) ioremap(apbase, 33 * PAGE_SIZE); + + return 0; +} + +static void nvidia_cleanup(void) +{ + aper_size_info_8 *previous_size; + u32 temp; + + /* gart control */ + pci_read_config_dword(agp_bridge.dev, NVIDIA_0_APSIZE, &temp); + pci_write_config_dword(agp_bridge.dev, NVIDIA_0_APSIZE, + temp & ~(0x100)); + + /* gtlb control */ + pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); + pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, + temp & ~(0x11)); + + /* unmap aperture */ + iounmap((void *) nvidia_private.aperture); + + /* restore previous aperture size */ + previous_size = A_SIZE_8(agp_bridge.previous_size); + pci_write_config_byte(agp_bridge.dev, NVIDIA_0_APSIZE, + previous_size->size_value); + + /* restore iorr for previous aperture size */ + nvidia_init_iorr(agp_bridge.gart_bus_addr, + previous_size->size * 1024 * 1024); +} + +static void nvidia_tlbflush(agp_memory * mem) +{ + int i; + unsigned long end; + u32 wbc_reg, wbc_mask, temp; + + /* flush chipset */ + switch(agp_bridge.type) { + case NVIDIA_NFORCE: + wbc_mask = 0x00010000; + break; + case NVIDIA_NFORCE2: + wbc_mask = 0x80000000; + break; + default: + wbc_mask = 0; + break; + } + + if (wbc_mask) { + pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg); + wbc_reg |= wbc_mask; + pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg); + + end = jiffies + 3*HZ; + do { + pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg); + if ((signed)(end - jiffies) <= 0) { + printk(KERN_ERR "TLB flush took more than 3 seconds.\n"); + } + } while (wbc_reg & wbc_mask); + } + + /* flush TLB entries */ + for(i = 0; i < 32 + 1; i++) + temp = nvidia_private.aperture[i * PAGE_SIZE / sizeof(u32)]; + for(i = 0; i < 32 + 1; i++) + temp = nvidia_private.aperture[i * PAGE_SIZE / sizeof(u32)]; +} + +static unsigned long nvidia_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + +static int nvidia_insert_memory(agp_memory * mem, + off_t pg_start, int type) +{ + int i, j; + + if ((type != 0) || (mem->type != 0)) + return -EINVAL; + + if ((pg_start + mem->page_count) > + (nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE)) + return -EINVAL; + + for(j = pg_start; j < (pg_start + mem->page_count); j++) { + if (!PGE_EMPTY(agp_bridge.gatt_table[nvidia_private.pg_offset + j])) { + return -EBUSY; + } + } + + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + agp_bridge.gatt_table[nvidia_private.pg_offset + j] = mem->memory[i]; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static int nvidia_remove_memory(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + + if ((type != 0) || (mem->type != 0)) + return -EINVAL; + + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + agp_bridge.gatt_table[nvidia_private.pg_offset + i] = + (unsigned long) agp_bridge.scratch_page; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static aper_size_info_8 nvidia_generic_sizes[5] = +{ + {512, 131072, 7, 0}, + {256, 65536, 6, 8}, + {128, 32768, 5, 12}, + {64, 16384, 4, 14}, + /* The 32M mode still requires a 64k gatt */ + {32, 16384, 4, 15} +}; + +static gatt_mask nvidia_generic_masks[] = +{ + {0x00000001, 0} +}; + +static int __init nvidia_generic_setup (struct pci_dev *pdev) +{ + nvidia_private.dev_1 = + pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1)); + nvidia_private.dev_2 = + pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2)); + nvidia_private.dev_3 = + pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0)); + + if((nvidia_private.dev_1 == NULL) || + (nvidia_private.dev_2 == NULL) || + (nvidia_private.dev_3 == NULL)) { + printk(KERN_INFO PFX "agpgart: Detected an NVIDIA " + "nForce/nForce2 chipset, but could not find " + "the secondary devices.\n"); + agp_bridge.type = NOT_SUPPORTED; + return -ENODEV; + } + + agp_bridge.masks = nvidia_generic_masks; + agp_bridge.aperture_sizes = (void *) nvidia_generic_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 5; + agp_bridge.dev_private_data = (void *) &nvidia_private; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = nvidia_configure; + agp_bridge.fetch_size = nvidia_fetch_size; + agp_bridge.cleanup = nvidia_cleanup; + agp_bridge.tlb_flush = nvidia_tlbflush; + agp_bridge.mask_memory = nvidia_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = nvidia_insert_memory; + agp_bridge.remove_memory = nvidia_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; +} + +#endif /* CONFIG_AGP_NVIDIA */ + #ifdef CONFIG_AGP_HP_ZX1 #ifndef log2 @@ -4516,15 +4886,38 @@ { PCI_DEVICE_ID_INTEL_830_M_0, PCI_VENDOR_ID_INTEL, INTEL_I830_M, - "Intel", - "i830M", + "Intel(R)", + "830M", intel_830mp_setup }, - { PCI_DEVICE_ID_INTEL_845_G_0, + + { PCI_DEVICE_ID_INTEL_845_G_0, PCI_VENDOR_ID_INTEL, INTEL_I845_G, - "Intel", - "i845G", + "Intel(R)", + "845G", + intel_845_setup }, + + { PCI_DEVICE_ID_INTEL_855_GM_0, + PCI_VENDOR_ID_INTEL, + INTEL_I855_PM, + "Intel(R)", + "855PM", intel_845_setup }, + + { PCI_DEVICE_ID_INTEL_855_PM_0, + PCI_VENDOR_ID_INTEL, + INTEL_I855_PM, + "Intel(R)", + "855PM", + intel_845_setup }, + + { PCI_DEVICE_ID_INTEL_865_G_0, + PCI_VENDOR_ID_INTEL, + INTEL_I865_G, + "Intel(R)", + "865G", + intel_845_setup }, + { PCI_DEVICE_ID_INTEL_840_0, PCI_VENDOR_ID_INTEL, INTEL_I840, @@ -4565,12 +4958,24 @@ "SiS", "740", sis_generic_setup }, + { PCI_DEVICE_ID_SI_651, + PCI_VENDOR_ID_SI, + SIS_GENERIC, + "SiS", + "651", + sis_generic_setup }, { PCI_DEVICE_ID_SI_650, PCI_VENDOR_ID_SI, SIS_GENERIC, "SiS", "650", sis_generic_setup }, + { PCI_DEVICE_ID_SI_651, + PCI_VENDOR_ID_SI, + SIS_GENERIC, + "SiS", + "651", + sis_generic_setup }, { PCI_DEVICE_ID_SI_645, PCI_VENDOR_ID_SI, SIS_GENERIC, @@ -4583,6 +4988,12 @@ "SiS", "646", sis_generic_setup }, + { PCI_DEVICE_ID_SI_648, + PCI_VENDOR_ID_SI, + SIS_GENERIC, + "SiS", + "648", + sis_generic_setup }, { PCI_DEVICE_ID_SI_735, PCI_VENDOR_ID_SI, SIS_GENERIC, @@ -4595,6 +5006,12 @@ "SiS", "745", sis_generic_setup }, + { PCI_DEVICE_ID_SI_746, + PCI_VENDOR_ID_SI, + SIS_GENERIC, + "SiS", + "746", + sis_generic_setup }, { PCI_DEVICE_ID_SI_730, PCI_VENDOR_ID_SI, SIS_GENERIC, @@ -4658,6 +5075,12 @@ "Via", "MVP3", via_generic_setup }, + { PCI_DEVICE_ID_VIA_8601_0, + PCI_VENDOR_ID_VIA, + VIA_APOLLO_PLE133, + "Via", + "Apollo PLE133", + via_generic_setup }, { PCI_DEVICE_ID_VIA_82C691_0, PCI_VENDOR_ID_VIA, VIA_APOLLO_PRO, @@ -4682,17 +5105,23 @@ "Via", "Apollo Pro KT266", via_generic_setup }, - { PCI_DEVICE_ID_VIA_8377_0, + { PCI_DEVICE_ID_VIA_8375, + PCI_VENDOR_ID_VIA, + VIA_APOLLO_KM266, + "Via", + "Apollo Pro KM266 / KL266", + via_generic_setup }, + { PCI_DEVICE_ID_VIA_8377_0, PCI_VENDOR_ID_VIA, VIA_APOLLO_KT400, "Via", "Apollo Pro KT400", via_generic_setup }, - { PCI_DEVICE_ID_VIA_P4X333, + { PCI_DEVICE_ID_VIA_8377_0, PCI_VENDOR_ID_VIA, - VIA_APOLLO_P4X400, + VIA_APOLLO_KT400, "Via", - "Apollo P4X400", + "Apollo Pro KT400", via_generic_setup }, { 0, PCI_VENDOR_ID_VIA, @@ -4702,6 +5131,27 @@ via_generic_setup }, #endif /* CONFIG_AGP_VIA */ +#ifdef CONFIG_AGP_NVIDIA + { PCI_DEVICE_ID_NVIDIA_NFORCE, + PCI_VENDOR_ID_NVIDIA, + NVIDIA_NFORCE, + "NVIDIA", + "nForce", + nvidia_generic_setup }, + { PCI_DEVICE_ID_NVIDIA_NFORCE2, + PCI_VENDOR_ID_NVIDIA, + NVIDIA_NFORCE2, + "NVIDIA", + "nForce2", + nvidia_generic_setup }, + { 0, + PCI_VENDOR_ID_NVIDIA, + NVIDIA_GENERIC, + "NVIDIA", + "Generic", + nvidia_generic_setup }, +#endif /* CONFIG_AGP_NVIDIA */ + #ifdef CONFIG_AGP_HP_ZX1 { PCI_DEVICE_ID_HP_ZX1_LBA, PCI_VENDOR_ID_HP, @@ -4888,10 +5338,14 @@ * with an external graphics * card. It will be initialized later */ + printk(KERN_ERR PFX "Detected an " + "Intel(R) 845G, but could not find the" + " secondary device. Assuming a " + "non-integrated video card.\n"); agp_bridge.type = INTEL_I845_G; break; } - printk(KERN_INFO PFX "Detected an Intel " + printk(KERN_INFO PFX "Detected an Intel(R) " "845G Chipset.\n"); agp_bridge.type = INTEL_I810; return intel_i830_setup(i810_dev); @@ -4913,10 +5367,118 @@ agp_bridge.type = INTEL_I830_M; break; } - printk(KERN_INFO PFX "Detected an Intel " + printk(KERN_INFO PFX "Detected an Intel(R) " "830M Chipset.\n"); agp_bridge.type = INTEL_I810; return intel_i830_setup(i810_dev); + case PCI_DEVICE_ID_INTEL_855_GM_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_855_GM_1, NULL); + if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) { + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_855_GM_1, i810_dev); + } + if (i810_dev == NULL) { + /* + * We probably have an 855PM chipset + * with an external graphics + * card. It will be initialized later. + */ + agp_bridge.type = INTEL_I855_PM; + break; + } + { + u32 capval = 0; + const char *name = "855GM/852GM"; + + pci_read_config_dword(dev, I85X_CAPID, &capval); + switch ((capval >> I85X_VARIANT_SHIFT) & + I85X_VARIANT_MASK) { + case I855_GME: + name = "855GME"; + break; + case I855_GM: + name = "855GM"; + break; + case I852_GME: + name = "852GME"; + break; + case I852_GM: + name = "852GM"; + break; + } + printk(KERN_INFO PFX "Detected an Intel(R) " + "%s Chipset.\n", name); + } + agp_bridge.type = INTEL_I810; + return intel_i830_setup(i810_dev); + case PCI_DEVICE_ID_INTEL_855_PM_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_855_PM_1, NULL); + if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) { + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_855_PM_1, i810_dev); + } + if (i810_dev == NULL) { + /* + * We probably have an 855PM chipset + * with an external graphics + * card. It will be initialized later. + */ + agp_bridge.type = INTEL_I855_PM; + break; + } + { + u32 capval = 0; + const char *name = "855PM/852PM"; + + pci_read_config_dword(dev, I85X_CAPID, &capval); + switch ((capval >> I85X_VARIANT_SHIFT) & + I85X_VARIANT_MASK) { + case I855_PME: + name = "855PME"; + break; + case I855_PM: + name = "855PM"; + break; + case I852_PME: + name = "852PME"; + break; + case I852_PM: + name = "852PM"; + break; + } + printk(KERN_INFO PFX "Detected an Intel(R) " + "%s Chipset.\n", name); + } + agp_bridge.type = INTEL_I810; + return intel_i830_setup(i810_dev); + case PCI_DEVICE_ID_INTEL_865_G_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_865_G_1, NULL); + if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) { + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_865_G_1, i810_dev); + } + + if (i810_dev == NULL) { + /* + * We probably have a 865G chipset + * with an external graphics + * card. It will be initialized later + */ + printk(KERN_ERR PFX "Detected an " + "Intel(R) 865G, but could not" + " find the" + " secondary device. Assuming a " + "non-integrated video card.\n"); + agp_bridge.type = INTEL_I865_G; + break; + } + printk(KERN_INFO PFX "Detected an Intel(R) " + "865G Chipset.\n"); + agp_bridge.type = INTEL_I810; + return intel_i830_setup(i810_dev); default: break; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/agp/agp.h linux.21rc5-ac2/drivers/char/agp/agp.h --- linux.21rc5/drivers/char/agp/agp.h 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/agp/agp.h 2003-05-28 15:31:54.000000000 +0100 @@ -166,6 +166,9 @@ #ifndef PCI_DEVICE_ID_VIA_8363_0 #define PCI_DEVICE_ID_VIA_8363_0 0x0305 #endif +#ifndef PCI_DEVICE_ID_VIA_8601_0 +#define PCI_DEVICE_ID_VIA_8601_0 0x0601 +#endif #ifndef PCI_DEVICE_ID_VIA_82C694X_0 #define PCI_DEVICE_ID_VIA_82C694X_0 0x0605 #endif @@ -184,6 +187,24 @@ #ifndef PCI_DEVICE_ID_INTEL_830_M_1 #define PCI_DEVICE_ID_INTEL_830_M_1 0x3577 #endif +#ifndef PCI_DEVICE_ID_INTEL_855_GM_0 +#define PCI_DEVICE_ID_INTEL_855_GM_0 0x3580 +#endif +#ifndef PCI_DEVICE_ID_INTEL_855_GM_1 +#define PCI_DEVICE_ID_INTEL_855_GM_1 0x3582 +#endif +#ifndef PCI_DEVICE_ID_INTEL_855_PM_0 +#define PCI_DEVICE_ID_INTEL_855_PM_0 0x3340 +#endif +#ifndef PCI_DEVICE_ID_INTEL_855_PM_1 +#define PCI_DEVICE_ID_INTEL_855_PM_1 0x3342 +#endif +#ifndef PCI_DEVICE_ID_INTEL_865_G_0 +#define PCI_DEVICE_ID_INTEL_865_G_0 0x2570 +#endif +#ifndef PCI_DEVICE_ID_INTEL_865_G_1 +#define PCI_DEVICE_ID_INTEL_865_G_1 0x2572 +#endif #ifndef PCI_DEVICE_ID_INTEL_820_0 #define PCI_DEVICE_ID_INTEL_820_0 0x2500 #endif @@ -280,6 +301,28 @@ #define INTEL_NBXCFG 0x50 #define INTEL_ERRSTS 0x91 +/* Intel 855GM/852GM registers */ +#define I855_GMCH_CTRL 0x52 +#define I855_GMCH_ENABLED 0x4 +#define I855_GMCH_GMS_MASK (0x7 << 4) +#define I855_GMCH_GMS_STOLEN_0M 0x0 +#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) +#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) +#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) +#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) +#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) +#define I85X_CAPID 0x44 +#define I85X_VARIANT_MASK 0x7 +#define I85X_VARIANT_SHIFT 5 +#define I855_GME 0x0 +#define I855_GM 0x4 +#define I852_GME 0x2 +#define I852_GM 0x5 +#define I855_PME 0x0 +#define I855_PM 0x4 +#define I852_PME 0x2 +#define I852_PM 0x5 + /* intel i830 registers */ #define I830_GMCH_CTRL 0x52 #define I830_GMCH_ENABLED 0x4 @@ -412,6 +455,17 @@ #define SVWRKS_POSTFLUSH 0x14 #define SVWRKS_DIRFLUSH 0x0c +/* NVIDIA registers */ +#define NVIDIA_0_APBASE 0x10 +#define NVIDIA_0_APSIZE 0x80 +#define NVIDIA_1_WBC 0xf0 +#define NVIDIA_2_GARTCTRL 0xd0 +#define NVIDIA_2_APBASE 0xd8 +#define NVIDIA_2_APLIMIT 0xdc +#define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4) +#define NVIDIA_3_APBASE 0x50 +#define NVIDIA_3_APLIMIT 0x54 + /* HP ZX1 SBA registers */ #define HP_ZX1_CTRL 0x200 #define HP_ZX1_IBASE 0x300 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/amiserial.c linux.21rc5-ac2/drivers/char/amiserial.c --- linux.21rc5/drivers/char/amiserial.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/amiserial.c 2003-04-22 16:44:36.000000000 +0100 @@ -1544,7 +1544,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/cd1865/cd1865.c linux.21rc5-ac2/drivers/char/cd1865/cd1865.c --- linux.21rc5/drivers/char/cd1865/cd1865.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/cd1865/cd1865.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,2913 @@ +/* -*- linux-c -*- */ +/* + * This code was modified from + * specialix.c -- specialix IO8+ multiport serial driver. + * + * Copyright (C) 1997 Roger Wolff (R.E.Wolff@BitWizard.nl) + * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com) + * Modifications (C) 2002 Telford Tools, Inc. (martillo@telfordtools.com) + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#define VERSION "2.11" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cdsiolx.h" +#include "../cd1865.h" /* will move all files up one level */ +#include "siolx.h" +#include "plx9060.h" + +#define SIOLX_NORMAL_MAJOR 254 /* One is needed */ +#define SIOLX_ID 0x10 +#define CD186x_MSMR 0x61 /* modem/timer iack */ +#define CD186x_TSMR 0x62 /* tx iack */ +#define CD186x_RSMR 0x63 /* rx iack */ + +/* Configurable options: */ + +/* Am I paranoid or not ? ;-) */ +#define SIOLX_PARANOIA_CHECK + +/* Do I trust the IRQ from the card? (enabeling it doesn't seem to help) + When the IRQ routine leaves the chip in a state that is keeps on + requiring attention, the timer doesn't help either. */ +#undef SIOLX_TIMER +/* + * The following defines are mostly for testing purposes. But if you need + * some nice reporting in your syslog, you can define them also. + */ +#undef SIOLX_REPORT_FIFO +#undef SIOLX_REPORT_OVERRUN + +#ifdef CONFIG_SIOLX_RTSCTS /* may need to set this */ +#define SIOLX_CRTSCTS(bla) 1 +#else +#define SIOLX_CRTSCTS(tty) C_CRTSCTS(tty) +#endif + +/* Used to be outb (0xff, 0x80); */ +#define short_pause() udelay (1) + +#define SIOLX_LEGAL_FLAGS \ + (ASYNC_HUP_NOTIFY | ASYNC_SAK | ASYNC_SPLIT_TERMIOS | \ + ASYNC_SPD_HI | ASYNC_SPEED_VHI | ASYNC_SESSION_LOCKOUT | \ + ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP) + +#ifndef MIN +#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#endif + +DECLARE_TASK_QUEUE(tq_siolx); + +#undef RS_EVENT_WRITE_WAKEUP +#define RS_EVENT_WRITE_WAKEUP 0 + +#define SIOLX_TYPE_NORMAL 1 +#define SIOLX_TYPE_CALLOUT 2 + +#define BD_8000P 1 +#define BD_16000P 2 +#define BD_8000C 3 +#define BD_16000C 4 +#define BD_MAX BD_16000C + +static struct siolx_board *SiolxIrqRoot[SIOLX_NUMINTS]; + +static char *sio16_board_type[] = +{ + "unknown", + " 8000P ", + "16000P ", + " 8000C ", + "16000C " +}; +static struct tty_driver siolx_driver, siolx_callout_driver; +static int siolx_refcount; +static unsigned char * tmp_buf; +static DECLARE_MUTEX(tmp_buf_sem); +static unsigned long baud_table[] = +{ + 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, + 9600, 19200, 38400, 57600, 115200, 0, +}; +static int siolx_debug = 0; /* turns on lots of */ + /* debugging messages*/ +static int siolx_major = SIOLX_NORMAL_MAJOR; +#ifdef MODULE +static int siolx_minorstart = 256; +#endif +static int siolx_vendor_id = PCI_VENDOR_ID_PLX; +static int siolx_device_id = PCI_DEVICE_ID_PLX_9060SD; +static int siolx_subsystem_vendor = AURASUBSYSTEM_VENDOR_ID; +static int siolx_subsystem_pci_device = AURASUBSYSTEM_MPASYNCPCI; +static int siolx_subsystem_cpci_device = AURASUBSYSTEM_MPASYNCcPCI; +static int siolx_bhindex = SIOLX_BH; /* if this softinterrupt slot is filled */ + +MODULE_PARM(siolx_vendor_id, "i"); +MODULE_PARM(siolx_device_id, "i"); +#ifdef MODULE +MODULE_PARM(siolx_minorstart, "i"); +#endif +MODULE_PARM(siolx_major, "i"); +MODULE_PARM(siolx_subsystem_vendor, "i"); +MODULE_PARM(siolx_subsystem_pci_device, "i"); +MODULE_PARM(siolx_subsystem_cpci_device, "i"); +MODULE_PARM(siolx_bhindex, "i"); + +static struct siolx_board *siolx_board_root; +static struct siolx_board *siolx_board_last; +static struct siolx_port *siolx_port_root; +static struct siolx_port *siolx_port_last; +static unsigned int NumSiolxPorts; +static struct tty_struct **siolx_table; /* make dynamic */ +static struct termios **siolx_termios; +static struct termios **siolx_termios_locked; +static int siolx_driver_registered; +static int siolx_callout_driver_registered; + +#ifdef SIOLX_TIMER +static struct timer_list missed_irq_timer; +static void siolx_interrupt(int irq, void * dev_id, struct pt_regs * regs); +#endif + +extern struct tty_driver *get_tty_driver(kdev_t device); + +static inline int port_No_by_chip (struct siolx_port const * port) +{ + return SIOLX_PORT(port->boardport); +} + +/* Describe the current board and port configuration */ + +static int siolx_read_proc(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct siolx_port *port = siolx_port_root; + off_t begin = 0; + int len = 0; + unsigned int typeno; + char *revision = "$Revision: 1.11 $"; + + len += sprintf(page, "SIOLX Version %s. %s\n", VERSION, revision); + len += sprintf(page+len, "TTY MAJOR = %d, CUA MAJOR = %d.\n", + siolx_driver.major, siolx_callout_driver.major); + + for (port = siolx_port_root; port != NULL; port = port->next_by_global_list) + { + typeno = port->board->boardtype; + if(typeno > BD_MAX) + { + typeno = 0; + } + len += sprintf(page+len, + "%3.3d: bd %2.2d: %s: ch %d: pt %2.2d/%d: tp %4.4d%c: bs %2.2d: sl %2.2d: ir %2.2d: fl %c%c%c%c%c\n", + siolx_driver.minor_start + port->driverport, + port->board->boardnumber, + sio16_board_type[typeno], + port->board->chipnumber, + port->boardport, + port_No_by_chip(port), /* port relative to chip */ + port->board->chiptype, + port->board->chiprev, + port->board->pdev.bus->number, + PCI_SLOT(port->board->pdev.devfn), + port->board->irq, + (port->flags & ASYNC_INITIALIZED) ? 'I' : ' ', + (port->flags & ASYNC_CALLOUT_ACTIVE) ? 'D' : ' ', + (port->flags & ASYNC_NORMAL_ACTIVE) ? 'T' : ' ', + (port->flags & ASYNC_CLOSING) ? 'C' : ' ', + port->board->reario ? 'R' : ' '); + if (len+begin > off+count) + { + goto done; + } + if (len+begin < off) + { + begin += len; + len = 0; + } + } + *eof = 1; + done: + if (off >= len+begin) + { + return 0; + } + *start = page + (off-begin); + return ((count < begin+len-off) ? count : begin+len-off); +} + +#ifndef MODULE +static int GetMinorStart(void) /* minor start can be determined on fly when driver linked to kernel */ +{ + struct tty_driver *ttydriver; + int minor_start = 0; + kdev_t device; + + device = MKDEV(siolx_major, minor_start); + while(ttydriver = get_tty_driver(device), ttydriver != NULL) + { + minor_start += ttydriver->num; + device = MKDEV(TTY_MAJOR, minor_start); + } + return minor_start; + +} +#endif + +/* only once per board chain */ +void SiolxResetBoard(struct siolx_board * bp, struct pci_dev *pdev) +{ + register unsigned int regvalue; + unsigned char savedvalue; + /* + * Yuch. Here's the deal with the reset bits in the + * ECNTL register of the 9060SD. + * + * It appears that LCLRST resets the PLX local configuration + * registers (not the PCI configuration registers) to their + * default values. We need to use LCLRST because it + * is the command (I think) that pulls the local reset + * line on the local bus side of the 9060SD. + * + * Unfortunately, by resetting the PLX local configuration + * registers, we can't use the damn board. So we must + * reinitialize them. The easiest way to do that is to run + * the LDREG command. Unfortunately, it has the side effect + * of reinitializing the PCI configuration registers. It seems, + * however that only the value stowed in ILINE gets choked; all + * of the others seem to be properly preserved. + * + * So, what the code does now is to get a copy of ILINE by + * hand, and then restore it after reloading the registers. + */ + + bp->pdev = *pdev; + bp->plx_vaddr = (unsigned long) ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if(bp->plx_vaddr) + { + regvalue = readl(bp->plx_vaddr + PLX_ECNTL); + regvalue &= ~PLX_ECNTLLDREG; + regvalue |= PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &savedvalue); + regvalue |= PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, savedvalue); + regvalue |= PLX_ECNTLINITSTAT; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + writel(0, bp->plx_vaddr + PLX_ICSR); + } +} + +void SiolxShutdownBoard(struct siolx_board * bp) +{ + register unsigned int regvalue; + unsigned char savedvalue; + struct pci_dev *pdev; + + if(bp->chipnumber == 0) /* only shutdown first in a chain */ + { + pdev = &bp->pdev; + + writel(0, bp->plx_vaddr + PLX_ICSR); + regvalue = readl(bp->plx_vaddr + PLX_ECNTL); + regvalue &= ~PLX_ECNTLLDREG; + regvalue |= PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &savedvalue); + regvalue |= PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, savedvalue); + regvalue |= PLX_ECNTLINITSTAT; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + writel(0, bp->plx_vaddr + PLX_ICSR); + iounmap((void*)bp->plx_vaddr); + bp->plx_vaddr = 0; + } +} + +static inline int siolx_paranoia_check(struct siolx_port const * port, + kdev_t device, const char *routine) +{ +#ifdef SIOLX_PARANOIA_CHECK + static const char *badmagic = + KERN_ERR "siolx: Warning: bad siolx port magic number for device %s in %s\n"; + static const char *badinfo = + KERN_ERR "siolx: Warning: null siolx port for device %s in %s\n"; + + if (!port) + { + printk(badinfo, kdevname(device), routine); + return 1; + } + if (port->magic != SIOLX_MAGIC) + { + printk(badmagic, kdevname(device), routine); + return 1; + } +#endif + return 0; +} + + +/* + * + * Service functions for siolx Aurora Asynchronous Adapter driver. + * + */ + +/* Get board number from pointer */ +static inline int board_No (struct siolx_board * bp) +{ + return bp->boardnumber; /* note same for all chips/boards in a chain */ +} + + +/* Get port number from pointer */ +static inline int port_No (struct siolx_port const * port) +{ + return port->driverport; /* offset from minor start */ +} + +/* Get pointer to board from pointer to port */ +static inline struct siolx_board * port_Board(struct siolx_port const * port) +{ + return port->board; /* same for ports on both chips on a board */ +} + + +/* Input Byte from CL CD186x register */ +static inline unsigned char siolx_in(struct siolx_board * bp, unsigned short reg) +{ + return readb (bp->base + reg); +} + + +/* Output Byte to CL CD186x register */ +static inline void siolx_out(struct siolx_board * bp, unsigned short reg, + unsigned char val) +{ + writeb(val, bp->base + reg); +} + + +/* Wait for Channel Command Register ready */ +static int siolx_wait_CCR(struct siolx_board * bp) +{ + unsigned long delay; + + for (delay = SIOLX_CCR_TIMEOUT; delay; delay--) + { + udelay(1); + if (!siolx_in(bp, CD186x_CCR)) + { + return 0; + } + } + printk(KERN_ERR "siolx:board %d: timeout waiting for CCR.\n", board_No(bp)); + return -1; +} + +/* Wait for ready */ +static int siolx_wait_GIVR(struct siolx_board * bp) +{ + unsigned long delay; + + for (delay = SIOLX_CCR_TIMEOUT; delay; delay--) + { + udelay(1); + if (siolx_in(bp, CD186x_GIVR) == (unsigned char) 0xff) + { + return 0; + } + } + printk(KERN_ERR "siolx: board %d: timeout waiting for GIVR.\n", board_No(bp)); + return -1; +} + +static inline void siolx_release_io_range(struct siolx_board * bp) +{ + if((bp->chipnumber == 0) && bp->vaddr) /* only release from first board in a chain */ + { + iounmap((void*)bp->vaddr); + bp->vaddr = 0; + } +} + +/* Must be called with enabled interrupts */ + +static inline void siolx_long_delay(unsigned long delay) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(delay); +} + +/* Reset and setup CD186x chip */ +static int siolx_init_CD186x(struct siolx_board * bp) +{ + unsigned long flags; + int scaler; + int rv = 1; + int rev; + int chip; + + save_flags(flags); /* not sure of need to turn off ints */ + cli(); + if(siolx_wait_CCR(bp)) + { + restore_flags(flags); + return 0; /* Wait for CCR ready */ + } + siolx_out(bp, CD186x_CAR, 0); + siolx_out(bp, CD186x_GIVR, 0); + siolx_out(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */ + if(siolx_wait_GIVR(bp)) + { + restore_flags(flags); + return 0; + } + sti(); + siolx_long_delay(HZ/20); /* Delay 0.05 sec */ + cli(); + siolx_out(bp, CD186x_GIVR, SIOLX_ID | (bp->chipnumber ? 0x80 : 0)); /* Set ID for this chip */ +#if 0 + siolx_out(bp, CD186x_GICR, 0); /* Clear all bits */ +#endif + scaler = SIOLX_OSCFREQ/1000; + siolx_out(bp, CD186x_PPRH, scaler >> 8); + siolx_out(bp, CD186x_PPRL, scaler & 0xff); + + /* Chip revcode pkgtype + GFRCR SRCR bit 7 + CD180 rev B 0x81 0 + CD180 rev C 0x82 0 + CD1864 rev A 0x82 1 + CD1865 rev A 0x83 1 -- Do not use!!! Does not work. + CD1865 rev B 0x84 1 + -- Thanks to Gwen Wang, Cirrus Logic. + */ + + switch (siolx_in(bp, CD186x_GFRCR)) + { + case 0x82: + chip = 1864; + rev='A'; + break; + case 0x83: + chip = 1865; + rev='A'; + break; + case 0x84: + chip = 1865; + rev='B'; + break; + case 0x85: + chip = 1865; + rev='C'; + break; /* Does not exist at this time */ + default: + chip=-1; + rev='x'; + break; + } + +#if SIOLX_DEBUG > 2 + printk (KERN_DEBUG " GFCR = 0x%02x\n", siolx_in(bp, CD186x_GFRCR) ); +#endif + + siolx_out(bp, CD186x_MSMR, CD186x_MRAR); /* load up match regs with address regs */ + siolx_out(bp, CD186x_TSMR, CD186x_TRAR); + siolx_out(bp, CD186x_RSMR, CD186x_RRAR); + +#if 0 + DEBUGPRINT((KERN_ALERT "match reg values are msmr %x, tsmr %x, rsmr %x.\n", + siolx_in(bp, CD186x_MSMR), + siolx_in(bp, CD186x_TSMR), + siolx_in(bp, CD186x_RSMR))); +#endif + + siolx_out(bp, CD186x_SRCR, SRCR_AUTOPRI | SRCR_GLOBPRI | SRCR_REGACKEN); + /* Setting up prescaler. We need 4 ticks per 1 ms */ + + printk(KERN_INFO"siolx: CD%4.4d%c detected at 0x%lx, IRQ %d, on Aurora asynchronous adapter board %d, chip number %d.\n", + chip, rev, bp->base, bp->irq, board_No(bp), bp->chipnumber); + + bp->chiptype = chip; + bp->chiprev = rev; + + restore_flags(flags); + return rv; +} + + +#ifdef SIOLX_TIMER +void missed_irq (unsigned long data) +{ + if (siolx_in ((struct siolx_board *)data, CD186x_SRSR) & + (SRSR_RREQint | + SRSR_TREQint | + SRSR_MREQint)) + { + printk (KERN_INFO "Missed interrupt... Calling int from timer. \n"); + siolx_interrupt (((struct siolx_board *)data)->irq, + NULL, NULL); + } + missed_irq_timer.expires = jiffies + HZ; + add_timer (&missed_irq_timer); +} +#endif + +/* Main probing routine, also sets irq. */ +static int siolx_probe(struct siolx_board *bp) +{ + unsigned char val1, val2; + + /* Are the I/O ports here ? */ + siolx_out(bp, CD186x_PPRL, 0x5a); + short_pause (); + val1 = siolx_in(bp, CD186x_PPRL); + + siolx_out(bp, CD186x_PPRL, 0xa5); + short_pause (); + val2 = siolx_in(bp, CD186x_PPRL); + + if ((val1 != 0x5a) || (val2 != 0xa5)) + { + printk(KERN_INFO + "siolx: cd serial chip not found at base %ld.\n", + bp->base); + return 1; + } + + /* Reset CD186x */ + if (!siolx_init_CD186x(bp)) + { + return -EIO; + } + +#ifdef SIOLX_TIMER + init_timer (&missed_irq_timer); + missed_irq_timer.function = missed_irq; + missed_irq_timer.data = (unsigned long) bp; + missed_irq_timer.expires = jiffies + HZ; + add_timer (&missed_irq_timer); +#endif + return 0; +} + +/* + * + * Interrupt processing routines. + * */ + +static inline void siolx_mark_event(struct siolx_port * port, int event) +{ + /* + * I'm not quite happy with current scheme all serial + * drivers use their own BH routine. + * It seems this easily can be done with one BH routine + * serving for all serial drivers. + * For now I must introduce another one - SIOLX_BH. + * Still hope this will be changed in near future. + * -- Dmitry. + */ + /* I use a module parameter that can be set at module + * load time so that this driver can be downloaded into + * a kernel where the value of SIOLX_BX has been allocated + * to something else. This kludge was not necessary + * in the ASLX driver because AURORA_BH had already + * been allocated for the sparc and there was no + * similar driver for x86 while the ASLX driver probably + * will not work for the SPARC and is not guaranteed to + * do so (at some point I should clean this situation up) -- Joachim*/ + set_bit(event, &port->event); + queue_task(&port->tqueue, &tq_siolx); + mark_bh(siolx_bhindex); +} + +static inline struct siolx_port * siolx_get_port(struct siolx_board * bp, + unsigned char const * what) +{ + unsigned char channel; + struct siolx_port * port; + + channel = siolx_in(bp, CD186x_GICR) >> GICR_CHAN_OFF; + if (channel < CD186x_NCH) + { + port = bp->portlist; + while(port) + { + if(channel == 0) + { + break; + } + port = port->next_by_board; + --channel; + } + + if(port && (port->flags & ASYNC_INITIALIZED)) /* port should be opened */ + { + return port; + } + } + printk(KERN_INFO "sx%d: %s interrupt from invalid port %d\n", + board_No(bp), what, channel); + return NULL; +} + + +static inline void siolx_receive_exc(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char status; + unsigned char ch; + + if (!(port = siolx_get_port(bp, "Receive"))) + return; + + tty = port->tty; + if (tty->flip.count >= TTY_FLIPBUF_SIZE) + { + printk(KERN_INFO "sx%d: port %d: Working around flip buffer overflow.\n", + board_No(bp), port_No(port)); + return; + } + +#ifdef SIOLX_REPORT_OVERRUN + status = siolx_in(bp, CD186x_RCSR); + if (status & RCSR_OE) + { + port->overrun++; +#if SIOLX_DEBUG + printk(KERN_DEBUG "sx%d: port %d: Overrun. Total %ld overruns.\n", + board_No(bp), port_No(port), port->overrun); +#endif + } + status &= port->mark_mask; +#else + status = siolx_in(bp, CD186x_RCSR) & port->mark_mask; +#endif + ch = siolx_in(bp, CD186x_RDR); + if (!status) + { + return; + } + if (status & RCSR_TOUT) + { + printk(KERN_INFO "siolx: board %d: chip %d: port %d: Receiver timeout. Hardware problems ?\n", + board_No(bp), bp->chipnumber, port_No(port)); + return; + + } + else if (status & RCSR_BREAK) + { +#ifdef SIOLX_DEBUG + printk(KERN_DEBUG "siolx: board %d: chip %d: port %d: Handling break...\n", + board_No(bp), bp->chipnumber, port_No(port)); +#endif + *tty->flip.flag_buf_ptr++ = TTY_BREAK; + if (port->flags & ASYNC_SAK) + { + do_SAK(tty); + } + + } + else if (status & RCSR_PE) + { + *tty->flip.flag_buf_ptr++ = TTY_PARITY; + } + else if (status & RCSR_FE) + { + *tty->flip.flag_buf_ptr++ = TTY_FRAME; + } + + else if (status & RCSR_OE) + { + *tty->flip.flag_buf_ptr++ = TTY_OVERRUN; + } + + else + { + *tty->flip.flag_buf_ptr++ = 0; + } + + *tty->flip.char_buf_ptr++ = ch; + tty->flip.count++; + queue_task(&tty->flip.tqueue, &tq_timer); +} + + +static inline void siolx_receive(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char count; + + if (!(port = siolx_get_port(bp, "Receive"))) + return; + + tty = port->tty; + + count = siolx_in(bp, CD186x_RDCR); + +#ifdef SIOLX_REPORT_FIFO + port->hits[count > 8 ? 9 : count]++; +#endif + + while (count--) + { + if (tty->flip.count >= TTY_FLIPBUF_SIZE) + { + printk(KERN_INFO "siolx: board %d: chip %d: port %d: Working around flip buffer overflow.\n", + board_No(bp), bp->chipnumber, port_No(port)); + break; + } + *tty->flip.char_buf_ptr++ = siolx_in(bp, CD186x_RDR); + *tty->flip.flag_buf_ptr++ = 0; + tty->flip.count++; + } + queue_task(&tty->flip.tqueue, &tq_timer); +} + +static inline void siolx_transmit(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char count; + + if (!(port = siolx_get_port(bp, "Transmit"))) + return; + + tty = port->tty; + + if(port->IER & IER_TXEMPTY) + { + /* FIFO drained */ +#if 0 + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); +#endif + port->IER &= ~IER_TXEMPTY; + siolx_out(bp, CD186x_IER, port->IER); + return; + } + + if(((port->xmit_cnt <= 0) && !port->break_length) || + tty->stopped || tty->hw_stopped) + { +#if 0 + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); +#endif + port->IER &= ~IER_TXRDY; + siolx_out(bp, CD186x_IER, port->IER); + return; + } + + if (port->break_length) + { + if (port->break_length > 0) + { + if (port->COR2 & COR2_ETC) + { + siolx_out(bp, CD186x_TDR, CD186x_C_ESC); + siolx_out(bp, CD186x_TDR, CD186x_C_SBRK); + port->COR2 &= ~COR2_ETC; + } + count = MIN(port->break_length, 0xff); + siolx_out(bp, CD186x_TDR, CD186x_C_ESC); + siolx_out(bp, CD186x_TDR, CD186x_C_DELAY); + siolx_out(bp, CD186x_TDR, count); + if (!(port->break_length -= count)) + { + port->break_length--; + } + } + else + { + siolx_out(bp, CD186x_TDR, CD186x_C_ESC); + siolx_out(bp, CD186x_TDR, CD186x_C_EBRK); + siolx_out(bp, CD186x_COR2, port->COR2); + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_CORCHG2); + port->break_length = 0; + } + return; + } + + count = CD186x_NFIFO; + do + { + siolx_out(bp, CD186x_TDR, port->xmit_buf[port->xmit_tail++]); + port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE-1); + if (--port->xmit_cnt <= 0) + { + break; + } + } while (--count > 0); + + if (port->xmit_cnt <= 0) + { +#if 0 + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); +#endif + port->IER &= ~IER_TXRDY; + siolx_out(bp, CD186x_IER, port->IER); + } + if (port->xmit_cnt <= port->wakeup_chars) + { + siolx_mark_event(port, RS_EVENT_WRITE_WAKEUP); + } +} + + +static inline void siolx_check_modem(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char mcr; + +#ifdef SIOLX_DEBUG + printk (KERN_DEBUG "Modem intr. "); +#endif + if (!(port = siolx_get_port(bp, "Modem"))) + { + return; + } + + tty = port->tty; + + mcr = siolx_in(bp, CD186x_MCR); + DEBUGPRINT((KERN_ALERT "mcr = %02x.\n", mcr)); + + if ((mcr & MCR_CDCHG)) + { +#ifdef SIOLX_DEBUG + DEBUGPRINT((KERN_DEBUG "CD just changed... ")); +#endif + if (siolx_in(bp, CD186x_MSVR) & MSVR_CD) + { +#ifdef SIOLX_DEBUG + DEBUGPRINT(( "Waking up guys in open.\n")); +#endif + wake_up_interruptible(&port->open_wait); /* note no linefeed in previous print */ + } + else if (!((port->flags & ASYNC_CALLOUT_ACTIVE) && + (port->flags & ASYNC_CALLOUT_NOHUP))) + { +#ifdef SIOLX_DEBUG + DEBUGPRINT(( "Sending HUP.\n")); /* note no linefeed in previous print */ +#endif + MOD_INC_USE_COUNT; + if (schedule_task(&port->tqueue_hangup) == 0) + { + MOD_DEC_USE_COUNT; + } + } + else + { +#ifdef SIOLX_DEBUG + DEBUGPRINT(("Don't need to send HUP.\n")); /* note no linefeed in previous print */ +#endif + } + } + +#ifdef SIOLX_BRAIN_DAMAGED_CTS + if (mcr & MCR_CTSCHG) + { + if (siolx_in(bp, CD186x_MSVR) & MSVR_CTS) + { + tty->hw_stopped = 0; + port->IER |= IER_TXRDY; + if (port->xmit_cnt <= port->wakeup_chars) + siolx_mark_event(port, RS_EVENT_WRITE_WAKEUP); + } + else + { + tty->hw_stopped = 1; + port->IER &= ~IER_TXRDY; + } + siolx_out(bp, CD186x_IER, port->IER); + } + if (mcr & MCR_DSSXHG) + { + if (siolx_in(bp, CD186x_MSVR) & MSVR_DSR) + { + tty->hw_stopped = 0; + port->IER |= IER_TXRDY; + if (port->xmit_cnt <= port->wakeup_chars) + { + siolx_mark_event(port, RS_EVENT_WRITE_WAKEUP); + } + } + else + { + tty->hw_stopped = 1; + port->IER &= ~IER_TXRDY; + } + siolx_out(bp, CD186x_IER, port->IER); + } +#endif /* SIOLX_BRAIN_DAMAGED_CTS */ + + /* Clear change bits */ + siolx_out(bp, CD186x_MCR, 0); +} + +/* The main interrupt processing routine */ +static void siolx_interrupt(int irq, void * dev_id, struct pt_regs * regs) +{ + unsigned char status; + unsigned char rcsr; + struct siolx_board *bp; + + if((irq < 0) || (irq >= SIOLX_NUMINTS)) + { + printk(KERN_ALERT "siolx: bad interrupt value %i.\n", irq); + return; + } + /* walk through all the cards on the interrupt that occurred. */ + for(bp = SiolxIrqRoot[irq]; bp != NULL; bp = bp->next_by_interrupt) + + { + while((readl(bp->intstatus) & PLX_ICSRINTACTIVE) != 0) /* work on on board */ + { + status = siolx_in(bp, CD186x_SRSR); + + if(status & SRSR_RREQint) + { + siolx_in(bp, CD186x_RRAR); + rcsr = siolx_in(bp, CD186x_RCSR); + if(rcsr == 0) + { + siolx_receive(bp); + } + else + { + siolx_receive_exc(bp); + } + } + else if (status & SRSR_TREQint) + { + siolx_in(bp, CD186x_TRAR); + siolx_transmit(bp); + } + else if (status & SRSR_MREQint) + { + siolx_in(bp, CD186x_MRAR); + siolx_check_modem(bp); + } + siolx_out(bp, CD186x_EOIR, 1); /* acknowledge the interrupt */ + bp = bp->next_by_chain; /* go to next chip on card -- maybe this one */ + } /* it does not matter if bp changes all in a chain have same next by interrupt */ + } +} + + +/* + * Setting up port characteristics. + * Must be called with disabled interrupts + */ +static void siolx_change_speed(struct siolx_board *bp, struct siolx_port *port) +{ + struct tty_struct *tty; + unsigned long baud; + long tmp; + unsigned char cor1 = 0, cor3 = 0; + unsigned char mcor1 = 0, mcor2 = 0; + static int again; + + tty = port->tty; + + if(!tty || !tty->termios) + { + return; + } + + port->IER = 0; + port->COR2 = 0; + /* Select port on the board */ + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + + /* The Siolx board doens't implement the RTS lines. + They are used to set the IRQ level. Don't touch them. */ + /* Must check how to apply these to sio16 boards */ + if (SIOLX_CRTSCTS(tty)) + { + port->MSVR = (MSVR_DTR | (siolx_in(bp, CD186x_MSVR) & MSVR_RTS)); + } + else + { + port->MSVR = (siolx_in(bp, CD186x_MSVR) & MSVR_RTS); + } +#ifdef DEBUG_SIOLX + DEBUGPRINT((KERN_DEBUG "siolx: got MSVR=%02x.\n", port->MSVR)); +#endif + baud = C_BAUD(tty); + + if (baud & CBAUDEX) + { + baud &= ~CBAUDEX; + if((baud < 1) || (baud > 2)) + { + port->tty->termios->c_cflag &= ~CBAUDEX; + } + else + { + baud += 15; + } + } + if (baud == 15) + { + if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) + { + baud ++; + } + if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) + { + baud += 2; + } + } + + + if (!baud_table[baud]) + { + /* Drop DTR & exit */ +#ifdef SIOLX_DEBUG + DEBUGPRINT((KERN_DEBUG "siolx: Dropping DTR... Hmm....\n")); +#endif + if (!SIOLX_CRTSCTS (tty)) + { + port->MSVR &= ~ MSVR_DTR; + siolx_out(bp, CD186x_MSVR, port->MSVR ); + } +#ifdef DEBUG_SIOLX + else + { + DEBUGPRINT((KERN_DEBUG "siolx: Can't drop DTR: no DTR.\n")); + } +#endif + return; + } + else + { + /* Set DTR on */ + if (!SIOLX_CRTSCTS (tty)) + { + port ->MSVR |= MSVR_DTR; + } + } + + /* + * Now we must calculate some speed depended things + */ + + /* Set baud rate for port */ + tmp = port->custom_divisor ; + if(tmp) + { + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: Using custom baud rate divisor %ld. \n" + "This is an untested option, please be carefull.\n", + board_No(bp), + bp->chipnumber, + port_No(port), tmp)); + } + else + { + tmp = (((SIOLX_OSCFREQ + baud_table[baud]/2) / baud_table[baud] + + CD186x_TPC/2) / CD186x_TPC); + } + + if ((tmp < 0x10) && time_before(again, jiffies)) + { + again = jiffies + HZ * 60; + /* Page 48 of version 2.0 of the CL-CD1865 databook */ + if (tmp >= 12) + { + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d:Baud rate divisor is %ld. \n" + "Performance degradation is possible.\n" + "Read siolx.txt for more info.\n", + board_No(bp), bp->chipnumber, + port_No (port), tmp)); + } else + { + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: Baud rate divisor is %ld. \n" + " Warning: overstressing Cirrus chip. " + " This might not work.\n" + " Read siolx.txt for more info.\n", + board_No(bp), bp->chipnumber, port_No (port), tmp)); + } + } + + siolx_out(bp, CD186x_RBPRH, (tmp >> 8) & 0xff); + siolx_out(bp, CD186x_TBPRH, (tmp >> 8) & 0xff); + siolx_out(bp, CD186x_RBPRL, tmp & 0xff); + siolx_out(bp, CD186x_TBPRL, tmp & 0xff); + + if (port->custom_divisor) + { + baud = (SIOLX_OSCFREQ + port->custom_divisor/2) / port->custom_divisor; + baud = ( baud + 5 ) / 10; + } else + { + baud = (baud_table[baud] + 5) / 10; /* Estimated CPS */ + } + + /* Two timer ticks seems enough to wakeup something like SLIP driver */ + tmp = ((baud + HZ/2) / HZ) * 2 - CD186x_NFIFO; + port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ? + SERIAL_XMIT_SIZE - 1 : tmp); + + /* Receiver timeout will be transmission time for 1.5 chars */ + tmp = (SIOLX_TPS + SIOLX_TPS/2 + baud/2) / baud; + tmp = (tmp > 0xff) ? 0xff : tmp; + siolx_out(bp, CD186x_RTPR, tmp); + + switch (C_CSIZE(tty)) + { + case CS5: + cor1 |= COR1_5BITS; + break; + case CS6: + cor1 |= COR1_6BITS; + break; + case CS7: + cor1 |= COR1_7BITS; + break; + case CS8: + cor1 |= COR1_8BITS; + break; + } + + if (C_CSTOPB(tty)) + { + cor1 |= COR1_2SB; + } + + cor1 |= COR1_IGNORE; + if (C_PARENB(tty)) + { + cor1 |= COR1_NORMPAR; + if (C_PARODD(tty)) + { + cor1 |= COR1_ODDP; + } + if (I_INPCK(tty)) + { + cor1 &= ~COR1_IGNORE; + } + } + /* Set marking of some errors */ + port->mark_mask = RCSR_OE | RCSR_TOUT; + if (I_INPCK(tty)) + { + port->mark_mask |= RCSR_FE | RCSR_PE; + } + if (I_BRKINT(tty) || I_PARMRK(tty)) + { + port->mark_mask |= RCSR_BREAK; + } + if (I_IGNPAR(tty)) + { + port->mark_mask &= ~(RCSR_FE | RCSR_PE); + } + if (I_IGNBRK(tty)) + { + port->mark_mask &= ~RCSR_BREAK; + if (I_IGNPAR(tty)) + { + /* Real raw mode. Ignore all */ + port->mark_mask &= ~RCSR_OE; + } + } + /* Enable Hardware Flow Control */ + if (C_CRTSCTS(tty)) + { +#ifdef SIOLX_BRAIN_DAMAGED_CTS + port->IER |= IER_DSR | IER_CTS; + mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD; + mcor2 |= MCOR2_DSROD | MCOR2_CTSOD; + tty->hw_stopped = !(siolx_in(bp, CD186x_MSVR) & (MSVR_CTS|MSVR_DSR)); +#else + port->COR2 |= COR2_CTSAE; +#endif + } + /* Enable Software Flow Control. FIXME: I'm not sure about this */ + /* Some people reported that it works, but I still doubt it */ + if (I_IXON(tty)) + { + port->COR2 |= COR2_TXIBE; + cor3 |= (COR3_FCT | COR3_SCDE); + if (I_IXANY(tty)) + { + port->COR2 |= COR2_IXM; + } + siolx_out(bp, CD186x_SCHR1, START_CHAR(tty)); + siolx_out(bp, CD186x_SCHR2, STOP_CHAR(tty)); + siolx_out(bp, CD186x_SCHR3, START_CHAR(tty)); + siolx_out(bp, CD186x_SCHR4, STOP_CHAR(tty)); + } + if (!C_CLOCAL(tty)) + { + /* Enable CD check */ + port->IER |= IER_CD; + mcor1 |= MCOR1_CDZD; + mcor2 |= MCOR2_CDOD; + } + + if (C_CREAD(tty)) + { + /* Enable receiver */ + port->IER |= IER_RXD; + } + + /* Set input FIFO size (1-8 bytes) */ + cor3 |= SIOLX_RXFIFO; + /* Setting up CD186x channel registers */ + siolx_out(bp, CD186x_COR1, cor1); + siolx_out(bp, CD186x_COR2, port->COR2); + siolx_out(bp, CD186x_COR3, cor3); + /* Make CD186x know about registers change */ + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_CORCHG1 | CCR_CORCHG2 | CCR_CORCHG3); + /* Setting up modem option registers */ +#ifdef DEBUG_SIOLX + DEBUGPRINT((KERN_ALERT "siolx: Mcor1 = %02x, mcor2 = %02x.\n", mcor1, mcor2)); +#endif + siolx_out(bp, CD186x_MCOR1, mcor1); + siolx_out(bp, CD186x_MCOR2, mcor2); + /* Enable CD186x transmitter & receiver */ + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_TXEN | CCR_RXEN); + /* Enable interrupts */ + siolx_out(bp, CD186x_IER, port->IER); + /* And finally set the modem lines... */ + siolx_out(bp, CD186x_MSVR, port->MSVR); +} + + +/* Must be called with interrupts enabled */ +static int siolx_setup_port(struct siolx_board *bp, struct siolx_port *port) +{ + unsigned long flags; + + if (port->flags & ASYNC_INITIALIZED) + { + return 0; + } + + if (!port->xmit_buf) + { + /* We may sleep in get_free_page() */ + unsigned long tmp; + + if (!(tmp = get_free_page(GFP_KERNEL))) + { + return -ENOMEM; + } + + if (port->xmit_buf) + { + free_page(tmp); + return -ERESTARTSYS; + } + port->xmit_buf = (unsigned char *) tmp; + } + + save_flags(flags); cli(); + + if (port->tty) + { + clear_bit(TTY_IO_ERROR, &port->tty->flags); + } + + port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; + siolx_change_speed(bp, port); + port->flags |= ASYNC_INITIALIZED; + + restore_flags(flags); + return 0; +} + + +/* Must be called with interrupts disabled */ +static void siolx_shutdown_port(struct siolx_board *bp, struct siolx_port *port) +{ + struct tty_struct *tty; + + if (!(port->flags & ASYNC_INITIALIZED)) + { + return; + } + +#ifdef SIOLX_REPORT_OVERRUN + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: Total %ld overruns were detected.\n", + board_No(bp), bp->chipnumber, port_No(port), port->overrun)); +#endif +#ifdef SIOLX_REPORT_FIFO + { + int i; + + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: FIFO hits [ ", + board_No(bp), bp->chipnumber, port_No(port))); + for (i = 0; i < 10; i++) + { + DEBUGPRINT(("%ld ", port->hits[i])); + } + DEBUGPRINT(("].\n")); + } +#endif + if (port->xmit_buf) + { + free_page((unsigned long) port->xmit_buf); + port->xmit_buf = NULL; + } + + /* Select port */ + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + + if (!(tty = port->tty) || C_HUPCL(tty)) + { + /* Drop DTR */ + siolx_out(bp, CD186x_MSVDTR, 0); + } + + /* Reset port */ + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_SOFTRESET); + /* Disable all interrupts from this port */ + port->IER = 0; + siolx_out(bp, CD186x_IER, port->IER); + + if (tty) + { + set_bit(TTY_IO_ERROR, &tty->flags); + } + port->flags &= ~ASYNC_INITIALIZED; + + /* + * If this is the last opened port on the board + * shutdown whole board + */ + MOD_DEC_USE_COUNT; +} + + +static int block_til_ready(struct tty_struct *tty, struct file * filp, + struct siolx_port *port) +{ + DECLARE_WAITQUEUE(wait, current); + struct siolx_board *bp = port_Board(port); + int retval; + int do_clocal = 0; + int CD; + + /* + * If the device is in the middle of being closed, then block + * until it's done, and then try again. + */ + if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) + { + interruptible_sleep_on(&port->close_wait); + if (port->flags & ASYNC_HUP_NOTIFY) + { + return -EAGAIN; + } + else + { + return -ERESTARTSYS; + } + } + + /* + * If this is a callout device, then just make sure the normal + * device isn't being used. + */ + if (tty->driver.subtype == SIOLX_TYPE_CALLOUT) + { + if (port->flags & ASYNC_NORMAL_ACTIVE) + { + return -EBUSY; + } + if ((port->flags & ASYNC_CALLOUT_ACTIVE) && + (port->flags & ASYNC_SESSION_LOCKOUT) && + (port->session != current->session)) + { + return -EBUSY; + } + if ((port->flags & ASYNC_CALLOUT_ACTIVE) && + (port->flags & ASYNC_PGRP_LOCKOUT) && + (port->pgrp != current->pgrp)) + { + return -EBUSY; + } + port->flags |= ASYNC_CALLOUT_ACTIVE; + return 0; + } + + /* + * If non-blocking mode is set, or the port is not enabled, + * then make the check up front and then exit. + */ + if ((filp->f_flags & O_NONBLOCK) || + (tty->flags & (1 << TTY_IO_ERROR))) + { + if (port->flags & ASYNC_CALLOUT_ACTIVE) + { + return -EBUSY; + } + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (port->flags & ASYNC_CALLOUT_ACTIVE) + { + if (port->normal_termios.c_cflag & CLOCAL) + { + do_clocal = 1; + } + } + else + { + if (C_CLOCAL(tty)) + { + do_clocal = 1; + } + } + + /* + * Block waiting for the carrier detect and the line to become + * free (i.e., not in use by the callout). While we are in + * this loop, info->count is dropped by one, so that + * rs_close() knows when to free things. We restore it upon + * exit, either normal or abnormal. + */ + retval = 0; + add_wait_queue(&port->open_wait, &wait); + cli(); + if (!tty_hung_up_p(filp)) + { + port->count--; + } + sti(); + port->blocked_open++; + while (1) + { + cli(); + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + CD = siolx_in(bp, CD186x_MSVR) & MSVR_CD; + if (!(port->flags & ASYNC_CALLOUT_ACTIVE)) + { + if (SIOLX_CRTSCTS (tty)) + { + /* Activate RTS */ + port->MSVR |= MSVR_DTR; + siolx_out (bp, CD186x_MSVR, port->MSVR); + } + else + { + /* Activate DTR */ + port->MSVR |= MSVR_DTR; + siolx_out (bp, CD186x_MSVR, port->MSVR); + } + } + sti(); + set_current_state(TASK_INTERRUPTIBLE); + if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) + { + if (port->flags & ASYNC_HUP_NOTIFY) + { + retval = -EAGAIN; + } + else + { + retval = -ERESTARTSYS; + } + break; + } + if (!(port->flags & ASYNC_CALLOUT_ACTIVE) && + !(port->flags & ASYNC_CLOSING) && + (do_clocal || CD)) + { + break; + } + if (signal_pending(current)) + { + retval = -ERESTARTSYS; + break; + } + schedule(); + } + current->state = TASK_RUNNING; + remove_wait_queue(&port->open_wait, &wait); + if (!tty_hung_up_p(filp)) + { + port->count++; + } + port->blocked_open--; + if (retval) + { + return retval; + } + + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; +} + +static inline struct siolx_port *siolx_portstruc(register int line) +{ + register struct siolx_port *pp; + + line -= siolx_driver.minor_start; + for(pp = siolx_port_root; (pp != NULL) && (line >= 0); --line, pp = pp->next_by_global_list) + { + if(line == 0) + { + return pp; + } + } + return NULL; +} + + +static int siolx_open(struct tty_struct * tty, struct file * filp) +{ + int error; + struct siolx_port * port; + struct siolx_board * bp; + unsigned long flags; + + port = siolx_portstruc(MINOR(tty->device)); + + if(port == NULL) + { + return -ENODEV; + } + bp = port->board; + if(bp == NULL) + { + return -ENODEV; + } + +#ifdef DEBUG_SIOLX + printk (KERN_DEBUG "Board = %d, bp = %p, port = %p, portno = %d.\n", + bp->boardnumber, bp, port, siolx_portstruc(MINOR(tty->device))); +#endif + + if (siolx_paranoia_check(port, tty->device, "siolx_open")) + return -ENODEV; + + MOD_INC_USE_COUNT; + + port->count++; + tty->driver_data = port; + port->tty = tty; + + if ((error = siolx_setup_port(bp, port))) + return error; + + if ((error = block_til_ready(tty, filp, port))) + return error; + + if ((port->count == 1) && (port->flags & ASYNC_SPLIT_TERMIOS)) { + if (tty->driver.subtype == SIOLX_TYPE_NORMAL) + *tty->termios = port->normal_termios; + else + *tty->termios = port->callout_termios; + save_flags(flags); cli(); + siolx_change_speed(bp, port); + restore_flags(flags); + } + + port->session = current->session; + port->pgrp = current->pgrp; + return 0; +} + + +static void siolx_close(struct tty_struct * tty, struct file * filp) +{ + struct siolx_port *port = (struct siolx_port *) tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + unsigned long timeout; + + if (!port || siolx_paranoia_check(port, tty->device, "close")) + return; + + save_flags(flags); cli(); + if (tty_hung_up_p(filp)) { + restore_flags(flags); + return; + } + + bp = port_Board(port); + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { + printk(KERN_ERR "sx%d: siolx_close: bad port count;" + " tty->count is 1, port count is %d\n", + board_No(bp), port->count); + port->count = 1; + } + if (--port->count < 0) { + printk(KERN_ERR "sx%d: siolx_close: bad port count for tty%d: %d\n", + board_No(bp), port_No(port), port->count); + port->count = 0; + } + if (port->count) { + restore_flags(flags); + return; + } + port->flags |= ASYNC_CLOSING; + /* + * Save the termios structure, since this port may have + * separate termios for callout and dialin. + */ + if (port->flags & ASYNC_NORMAL_ACTIVE) + port->normal_termios = *tty->termios; + if (port->flags & ASYNC_CALLOUT_ACTIVE) + port->callout_termios = *tty->termios; + /* + * Now we wait for the transmit buffer to clear; and we notify + * the line discipline to only process XON/XOFF characters. + */ + tty->closing = 1; + if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) + tty_wait_until_sent(tty, port->closing_wait); + /* + * At this point we stop accepting input. To do this, we + * disable the receive line status interrupts, and tell the + * interrupt driver to stop checking the data ready bit in the + * line status register. + */ + port->IER &= ~IER_RXD; + if (port->flags & ASYNC_INITIALIZED) { + port->IER &= ~IER_TXRDY; + port->IER |= IER_TXEMPTY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + /* + * Before we drop DTR, make sure the UART transmitter + * has completely drained; this is especially + * important if there is a transmit FIFO! + */ + timeout = jiffies+HZ; + while(port->IER & IER_TXEMPTY) { + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(port->timeout); + if (time_after(jiffies, timeout)) { + printk (KERN_INFO "siolx: Timeout waiting for close\n"); + break; + } + } + + } + siolx_shutdown_port(bp, port); + if (tty->driver.flush_buffer) + tty->driver.flush_buffer(tty); + if (tty->ldisc.flush_buffer) + tty->ldisc.flush_buffer(tty); + tty->closing = 0; + port->event = 0; + port->tty = 0; + if (port->blocked_open) { + if (port->close_delay) { + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(port->close_delay); + } + wake_up_interruptible(&port->open_wait); + } + port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE| + ASYNC_CLOSING); + wake_up_interruptible(&port->close_wait); + restore_flags(flags); +} + + +static int siolx_write(struct tty_struct * tty, int from_user, + const unsigned char *buf, int count) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + int c, total = 0; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_write")) + return 0; + + bp = port_Board(port); + + if (!tty || !port->xmit_buf || !tmp_buf) + return 0; + + save_flags(flags); + if (from_user) { + down(&tmp_buf_sem); + while (1) { + c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, + SERIAL_XMIT_SIZE - port->xmit_head)); + if (c <= 0) + break; + + c -= copy_from_user(tmp_buf, buf, c); + if (!c) { + if (!total) + total = -EFAULT; + break; + } + + cli(); + c = MIN(c, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, + SERIAL_XMIT_SIZE - port->xmit_head)); + memcpy(port->xmit_buf + port->xmit_head, tmp_buf, c); + port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1); + port->xmit_cnt += c; + restore_flags(flags); + + buf += c; + count -= c; + total += c; + } + up(&tmp_buf_sem); + } else { + while (1) { + cli(); + c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, + SERIAL_XMIT_SIZE - port->xmit_head)); + if (c <= 0) { + restore_flags(flags); + break; + } + memcpy(port->xmit_buf + port->xmit_head, buf, c); + port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1); + port->xmit_cnt += c; + restore_flags(flags); + + buf += c; + count -= c; + total += c; + } + } + + cli(); + if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped && + !(port->IER & IER_TXRDY)) { + port->IER |= IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + } + restore_flags(flags); + return total; +} + + +static void siolx_put_char(struct tty_struct * tty, unsigned char ch) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_put_char")) + return; + + if (!tty || !port->xmit_buf) + return; + + save_flags(flags); cli(); + + if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { + restore_flags(flags); + return; + } + + port->xmit_buf[port->xmit_head++] = ch; + port->xmit_head &= SERIAL_XMIT_SIZE - 1; + port->xmit_cnt++; + restore_flags(flags); +} + + +static void siolx_flush_chars(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_flush_chars")) + return; + + if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || + !port->xmit_buf) + return; + + save_flags(flags); cli(); + port->IER |= IER_TXRDY; + siolx_out(port_Board(port), CD186x_CAR, port_No_by_chip(port)); + siolx_out(port_Board(port), CD186x_IER, port->IER); + restore_flags(flags); +} + + +static int siolx_write_room(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + int ret; + + if (siolx_paranoia_check(port, tty->device, "siolx_write_room")) + return 0; + + ret = SERIAL_XMIT_SIZE - port->xmit_cnt - 1; + if (ret < 0) + ret = 0; + return ret; +} + + +static int siolx_chars_in_buffer(struct tty_struct *tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + + if (siolx_paranoia_check(port, tty->device, "siolx_chars_in_buffer")) + return 0; + + return port->xmit_cnt; +} + + +static void siolx_flush_buffer(struct tty_struct *tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_flush_buffer")) + return; + + save_flags(flags); cli(); + port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; + restore_flags(flags); + + wake_up_interruptible(&tty->write_wait); + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup)(tty); +} + + +static int siolx_get_modem_info(struct siolx_port * port, unsigned int *value) +{ + struct siolx_board * bp; + unsigned char status; + unsigned int result; + unsigned long flags; + + bp = port_Board(port); + save_flags(flags); cli(); + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + status = siolx_in(bp, CD186x_MSVR); + restore_flags(flags); +#ifdef DEBUG_SIOLX + printk (KERN_DEBUG "Got msvr[%d] = %02x, car = %d.\n", + port_No(port), status, siolx_in (bp, CD186x_CAR)); + printk (KERN_DEBUG "siolx_port = %p, port = %p\n", siolx_port, port); +#endif + if (SIOLX_CRTSCTS(port->tty)) { + result = /* (status & MSVR_RTS) ? */ TIOCM_DTR /* : 0) */ + | ((status & MSVR_DTR) ? TIOCM_RTS : 0) + | ((status & MSVR_CD) ? TIOCM_CAR : 0) + |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */ + | ((status & MSVR_CTS) ? TIOCM_CTS : 0); + } else { + result = /* (status & MSVR_RTS) ? */ TIOCM_RTS /* : 0) */ + | ((status & MSVR_DTR) ? TIOCM_DTR : 0) + | ((status & MSVR_CD) ? TIOCM_CAR : 0) + |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */ + | ((status & MSVR_CTS) ? TIOCM_CTS : 0); + } + put_user(result,(unsigned int *) value); + return 0; +} + + +static int siolx_set_modem_info(struct siolx_port * port, unsigned int cmd, + unsigned int *value) +{ + int error; + unsigned int arg; + unsigned long flags; + struct siolx_board *bp = port_Board(port); + + error = verify_area(VERIFY_READ, value, sizeof(int)); + if (error) + return error; + + get_user(arg, (unsigned long *) value); + switch (cmd) { + case TIOCMBIS: + /* if (arg & TIOCM_RTS) + port->MSVR |= MSVR_RTS; */ + /* if (arg & TIOCM_DTR) + port->MSVR |= MSVR_DTR; */ + + if (SIOLX_CRTSCTS(port->tty)) { + if (arg & TIOCM_RTS) + port->MSVR |= MSVR_DTR; + } else { + if (arg & TIOCM_DTR) + port->MSVR |= MSVR_DTR; + } + break; + case TIOCMBIC: + /* if (arg & TIOCM_RTS) + port->MSVR &= ~MSVR_RTS; */ + /* if (arg & TIOCM_DTR) + port->MSVR &= ~MSVR_DTR; */ + if (SIOLX_CRTSCTS(port->tty)) { + if (arg & TIOCM_RTS) + port->MSVR &= ~MSVR_DTR; + } else { + if (arg & TIOCM_DTR) + port->MSVR &= ~MSVR_DTR; + } + break; + case TIOCMSET: + /* port->MSVR = (arg & TIOCM_RTS) ? (port->MSVR | MSVR_RTS) : + (port->MSVR & ~MSVR_RTS); */ + /* port->MSVR = (arg & TIOCM_DTR) ? (port->MSVR | MSVR_DTR) : + (port->MSVR & ~MSVR_DTR); */ + if (SIOLX_CRTSCTS(port->tty)) { + port->MSVR = (arg & TIOCM_RTS) ? + (port->MSVR | MSVR_DTR) : + (port->MSVR & ~MSVR_DTR); + } else { + port->MSVR = (arg & TIOCM_DTR) ? + (port->MSVR | MSVR_DTR): + (port->MSVR & ~MSVR_DTR); + } + break; + default: + return -EINVAL; + } + save_flags(flags); cli(); + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_MSVR, port->MSVR); + restore_flags(flags); + return 0; +} + + +static inline void siolx_send_break(struct siolx_port * port, unsigned long length) +{ + struct siolx_board *bp = port_Board(port); + unsigned long flags; + + save_flags(flags); cli(); + port->break_length = SIOLX_TPS / HZ * length; + port->COR2 |= COR2_ETC; + port->IER |= IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_COR2, port->COR2); + siolx_out(bp, CD186x_IER, port->IER); + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_CORCHG2); + siolx_wait_CCR(bp); + restore_flags(flags); +} + + +static inline int siolx_set_serial_info(struct siolx_port * port, + struct serial_struct * newinfo) +{ + struct serial_struct tmp; + struct siolx_board *bp = port_Board(port); + int change_speed; + unsigned long flags; + int error; + + error = verify_area(VERIFY_READ, (void *) newinfo, sizeof(tmp)); + if (error) + return error; + + if (copy_from_user(&tmp, newinfo, sizeof(tmp))) + return -EFAULT; + +#if 0 + if ((tmp.irq != bp->irq) || + (tmp.port != bp->base) || + (tmp.type != PORT_CIRRUS) || + (tmp.baud_base != (SIOLX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC) || + (tmp.custom_divisor != 0) || + (tmp.xmit_fifo_size != CD186x_NFIFO) || + (tmp.flags & ~SIOLX_LEGAL_FLAGS)) + return -EINVAL; +#endif + + change_speed = ((port->flags & ASYNC_SPD_MASK) != + (tmp.flags & ASYNC_SPD_MASK)); + change_speed |= (tmp.custom_divisor != port->custom_divisor); + + if (!capable(CAP_SYS_ADMIN)) { + if ((tmp.close_delay != port->close_delay) || + (tmp.closing_wait != port->closing_wait) || + ((tmp.flags & ~ASYNC_USR_MASK) != + (port->flags & ~ASYNC_USR_MASK))) + return -EPERM; + port->flags = ((port->flags & ~ASYNC_USR_MASK) | + (tmp.flags & ASYNC_USR_MASK)); + port->custom_divisor = tmp.custom_divisor; + } else { + port->flags = ((port->flags & ~ASYNC_FLAGS) | + (tmp.flags & ASYNC_FLAGS)); + port->close_delay = tmp.close_delay; + port->closing_wait = tmp.closing_wait; + port->custom_divisor = tmp.custom_divisor; + } + if (change_speed) { + save_flags(flags); cli(); + siolx_change_speed(bp, port); + restore_flags(flags); + } + return 0; +} + + +static inline int siolx_get_serial_info(struct siolx_port * port, + struct serial_struct * retinfo) +{ + struct serial_struct tmp; + struct siolx_board *bp = port_Board(port); + int error; + + error = verify_area(VERIFY_WRITE, (void *) retinfo, sizeof(tmp)); + if (error) + return error; + + memset(&tmp, 0, sizeof(tmp)); + tmp.type = PORT_CIRRUS; + tmp.line = port->driverport; + tmp.port = bp->base; + tmp.irq = bp->irq; + tmp.flags = port->flags; + tmp.baud_base = (SIOLX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC; + tmp.close_delay = port->close_delay * HZ/100; + tmp.closing_wait = port->closing_wait * HZ/100; + tmp.custom_divisor = port->custom_divisor; + tmp.xmit_fifo_size = CD186x_NFIFO; + if (copy_to_user(retinfo, &tmp, sizeof(tmp))) + return -EFAULT; + return 0; +} + + +static int siolx_ioctl(struct tty_struct * tty, struct file * filp, + unsigned int cmd, unsigned long arg) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + int error; + int retval; + + if (siolx_paranoia_check(port, tty->device, "siolx_ioctl")) + return -ENODEV; + + switch (cmd) { + case TCSBRK: /* SVID version: non-zero arg --> no break */ + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + if (!arg) + siolx_send_break(port, HZ/4); /* 1/4 second */ + return 0; + case TCSBRKP: /* support for POSIX tcsendbreak() */ + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + siolx_send_break(port, arg ? arg*(HZ/10) : HZ/4); + return 0; + case TIOCGSOFTCAR: + error = verify_area(VERIFY_WRITE, (void *) arg, sizeof(long)); + if (error) + return error; + put_user(C_CLOCAL(tty) ? 1 : 0, + (unsigned long *) arg); + return 0; + case TIOCSSOFTCAR: + get_user(arg, (unsigned long *) arg); + tty->termios->c_cflag = + ((tty->termios->c_cflag & ~CLOCAL) | + (arg ? CLOCAL : 0)); + return 0; + case TIOCMGET: + error = verify_area(VERIFY_WRITE, (void *) arg, + sizeof(unsigned int)); + if (error) + return error; + return siolx_get_modem_info(port, (unsigned int *) arg); + case TIOCMBIS: + case TIOCMBIC: + case TIOCMSET: + return siolx_set_modem_info(port, cmd, (unsigned int *) arg); + case TIOCGSERIAL: + return siolx_get_serial_info(port, (struct serial_struct *) arg); + case TIOCSSERIAL: + return siolx_set_serial_info(port, (struct serial_struct *) arg); + default: + return -ENOIOCTLCMD; + } + return 0; +} + + +static void siolx_throttle(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_throttle")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + + /* Use DTR instead of RTS ! */ + if (SIOLX_CRTSCTS (tty)) + { + port->MSVR &= ~MSVR_DTR; + } + else + { + /* Auch!!! I think the system shouldn't call this then. */ + /* Or maybe we're supposed (allowed?) to do our side of hw + handshake anyway, even when hardware handshake is off. + When you see this in your logs, please report.... */ + printk (KERN_ERR "sx%d: Need to throttle, but can't (hardware hs is off)\n", + port_No (port)); + } + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + if (I_IXOFF(tty)) + { + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_SSCH2); + siolx_wait_CCR(bp); + } + siolx_out(bp, CD186x_MSVR, port->MSVR); + restore_flags(flags); +} + + +static void siolx_unthrottle(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_unthrottle")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + /* XXXX Use DTR INSTEAD???? */ + if (SIOLX_CRTSCTS(tty)) { + port->MSVR |= MSVR_DTR; + } /* Else clause: see remark in "siolx_throttle"... */ + + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + if (I_IXOFF(tty)) { + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_SSCH1); + siolx_wait_CCR(bp); + } + siolx_out(bp, CD186x_MSVR, port->MSVR); + restore_flags(flags); +} + + +static void siolx_stop(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_stop")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + port->IER &= ~IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + restore_flags(flags); +} + + +static void siolx_start(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_start")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) { + port->IER |= IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + } + restore_flags(flags); +} + + +/* + * This routine is called from the scheduler tqueue when the interrupt + * routine has signalled that a hangup has occurred. The path of + * hangup processing is: + * + * serial interrupt routine -> (scheduler tqueue) -> + * do_siolx_hangup() -> tty->hangup() -> siolx_hangup() + * + */ +static void do_siolx_hangup(void *private_) +{ + struct siolx_port *port = (struct siolx_port *) private_; + struct tty_struct *tty; + + tty = port->tty; + if (tty) + { + tty_hangup(tty); /* FIXME: module removal race here */ + } + MOD_DEC_USE_COUNT; +} + + +static void siolx_hangup(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + + if (siolx_paranoia_check(port, tty->device, "siolx_hangup")) + return; + + bp = port_Board(port); + + siolx_shutdown_port(bp, port); + port->event = 0; + port->count = 0; + port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE); + port->tty = 0; + wake_up_interruptible(&port->open_wait); +} + + +static void siolx_set_termios(struct tty_struct * tty, struct termios * old_termios) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_set_termios")) + return; + + if (tty->termios->c_cflag == old_termios->c_cflag && + tty->termios->c_iflag == old_termios->c_iflag) + return; + + save_flags(flags); cli(); + siolx_change_speed(port_Board(port), port); + restore_flags(flags); + + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + siolx_start(tty); + } +} + + +static void do_siolx_bh(void) +{ + run_task_queue(&tq_siolx); +} + + +static void do_softint(void *private_) +{ + struct siolx_port *port = (struct siolx_port *) private_; + struct tty_struct *tty; + + if(!(tty = port->tty)) + return; + + if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &port->event)) { + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup)(tty); + wake_up_interruptible(&tty->write_wait); + } +} + +static int siolx_finish_init_drivers(void) +{ + register struct siolx_board *bp; + register unsigned int count; + unsigned int maxport; + struct siolx_port *port; + struct siolx_port *lastport; + int error; + + bp = siolx_board_root; + + while(bp) + { + if(bp->chipnumber == 0) + { + maxport = SIOLX_NPORT; + } + else if((bp->boardtype == BD_16000C) && bp->reario) /* must be second chip of 16000C */ + { + maxport = SIOLX_NPORT/2; + } + else + { + maxport = SIOLX_NPORT; /* must be second chip of 16000P */ + } + + port = NULL; /* probably unnecessary */ + lastport = NULL; + for(count = 0; count < maxport; ++count) + { + port = (struct siolx_port*)kmalloc(sizeof(struct siolx_port), GFP_KERNEL); + if(port == NULL) + { + printk(KERN_ALERT + "siolx: Failed to create port structure on board %p.\n", bp); + break; /* no memory available */ + } + memset(port, 0, sizeof(struct siolx_port)); + + port->callout_termios = siolx_callout_driver.init_termios; + port->normal_termios = siolx_driver.init_termios; + port->magic = SIOLX_MAGIC; + port->tqueue.routine = do_softint; + port->tqueue.data = port; + port->tqueue_hangup.routine = do_siolx_hangup; + port->tqueue_hangup.data = port; + port->close_delay = 50 * HZ/100; + port->closing_wait = 3000 * HZ/100; + init_waitqueue_head(&port->open_wait); + init_waitqueue_head(&port->close_wait); + + port->board = bp; + port->driverport = NumSiolxPorts; + port->boardport = (count + (port->board->chipnumber*SIOLX_NPORT)); /* 0-16 */ + + if(count == 0) + { + bp->portlist = port; + } + else if(lastport) /* if count != 0 lastport should be non-null */ + { + lastport->next_by_board = port; + } + if(siolx_port_root == NULL) + { + siolx_port_root = port; + siolx_port_last = port; + } + else + { + siolx_port_last->next_by_global_list = port; + siolx_port_last = port; + } + lastport = port; + ++NumSiolxPorts; + } + bp = bp->next_by_global_list; + } + + siolx_driver.num = NumSiolxPorts; + + siolx_table = (struct tty_struct **) kmalloc(NumSiolxPorts*sizeof(struct tty_struct *), GFP_KERNEL); + if(siolx_table == NULL) + { + printk(KERN_ALERT "siolx: Could not allocate memory for siolx_table.\n"); + return 1; + } + memset(siolx_table, 0, NumSiolxPorts*sizeof(struct tty_struct *)); + + siolx_termios = (struct termios **) kmalloc(NumSiolxPorts*sizeof(struct termios *), GFP_KERNEL); + if(siolx_termios == NULL) + { + printk(KERN_ALERT "siolx: Could not allocate memory for siolx_termios.\n"); + return 1; + } + memset(siolx_termios, 0, NumSiolxPorts*sizeof(struct termios *)); + + siolx_termios_locked = (struct termios **) kmalloc(NumSiolxPorts*sizeof(struct termios *), GFP_KERNEL); + if(siolx_termios_locked == NULL) + { + printk(KERN_ALERT "siolx: Could not allocate memory for siolx_termios_locked.\n"); + return 1; + } + memset(siolx_termios_locked, 0, NumSiolxPorts*sizeof(struct termios *)); + + siolx_driver.table = siolx_table; /* will be changed */ + siolx_driver.termios = siolx_termios; /* will be changed */ + siolx_driver.termios_locked = siolx_termios_locked; /* will be changed */ + + if ((error = tty_register_driver(&siolx_driver))) + { + if(tmp_buf) + { + free_page((unsigned long)tmp_buf); + tmp_buf = 0; + } + printk(KERN_ERR "siolx: Couldn't register Aurora Asynchronous Adapter driver, error = %d\n", + error); + return 1; + } + if ((error = tty_register_driver(&siolx_callout_driver))) + { + if(tmp_buf) + { + free_page((unsigned long)tmp_buf); + tmp_buf = NULL; + } + tty_unregister_driver(&siolx_driver); + printk(KERN_ERR "siolx: Couldn't register Aurora Asynchronous Adapter callout driver, error = %d\n", + error); + return 1; + } + siolx_driver_registered = 1; + siolx_callout_driver_registered = 1; + return 0; /* success */ +} + +static int siolx_init_drivers(void) +{ + if (!(tmp_buf = (unsigned char *) get_free_page(GFP_KERNEL))) + { + printk(KERN_ERR "siolx: Couldn't get free page.\n"); + return 1; + } + init_bh(siolx_bhindex, do_siolx_bh); + memset(&siolx_driver, 0, sizeof(siolx_driver)); + siolx_driver.magic = TTY_DRIVER_MAGIC; + + siolx_driver.driver_name = "aurasiolx"; + siolx_driver.name = "ttyS"; + siolx_driver.major = siolx_major; +#ifdef MODULE + siolx_driver.minor_start = siolx_minorstart; /* changed from command line */ +#else + siolx_driver.minor_start = GetMinorStart(); +#endif + siolx_driver.num = 0; /* will be changed */ + + siolx_driver.type = TTY_DRIVER_TYPE_SERIAL; + siolx_driver.subtype = SIOLX_TYPE_NORMAL; + siolx_driver.init_termios = tty_std_termios; + siolx_driver.init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + siolx_driver.flags = TTY_DRIVER_REAL_RAW; + siolx_driver.refcount = &siolx_refcount; + + siolx_driver.table = siolx_table; /* will be changed */ + siolx_driver.termios = siolx_termios; /* will be changed */ + siolx_driver.termios_locked = siolx_termios_locked; /* will be changed */ + + siolx_driver.open = siolx_open; + siolx_driver.close = siolx_close; + siolx_driver.write = siolx_write; + siolx_driver.put_char = siolx_put_char; + siolx_driver.flush_chars = siolx_flush_chars; + siolx_driver.write_room = siolx_write_room; + siolx_driver.chars_in_buffer = siolx_chars_in_buffer; + siolx_driver.flush_buffer = siolx_flush_buffer; + siolx_driver.ioctl = siolx_ioctl; + siolx_driver.throttle = siolx_throttle; + siolx_driver.unthrottle = siolx_unthrottle; + siolx_driver.set_termios = siolx_set_termios; + siolx_driver.stop = siolx_stop; + siolx_driver.start = siolx_start; + siolx_driver.hangup = siolx_hangup; + + siolx_callout_driver = siolx_driver; + siolx_callout_driver.name = "cuw"; + siolx_callout_driver.major = (siolx_major+1); + siolx_callout_driver.subtype = SIOLX_TYPE_CALLOUT; + + siolx_driver.read_proc = siolx_read_proc; + return 0; +} + + +static void siolx_release_drivers(void) +{ + unsigned int intr_val; + struct siolx_board *bp; + + if(tmp_buf) + { + free_page((unsigned long)tmp_buf); + tmp_buf = NULL; + } + if(siolx_driver_registered) + { + tty_unregister_driver(&siolx_driver); + siolx_driver_registered = 0; + } + if(siolx_callout_driver_registered) + { + tty_unregister_driver(&siolx_callout_driver); + siolx_callout_driver_registered = 0; + } + /* unallocate and turn off ints */ + for(intr_val = 0; intr_val < SIOLX_NUMINTS; ++intr_val) + { + if(SiolxIrqRoot[intr_val] != NULL) + { + for(bp = SiolxIrqRoot[intr_val]; bp != NULL; + bp = bp->next_by_interrupt) + { + SiolxShutdownBoard(bp); /* turn off int; release the plx vaddr space */ + } + free_irq(intr_val, &SiolxIrqRoot[intr_val]); + } + } + +} + +static void siolx_release_memory(void) +{ + register struct siolx_board *bp; + register struct siolx_port *port; + + while(siolx_board_root) + { + bp = siolx_board_root; + siolx_board_root = bp->next_by_global_list; + siolx_release_io_range(bp); /* releases the chip vaddr */ + kfree(bp); + } + while(siolx_port_root) + { + port = siolx_port_root; + if(port->xmit_buf) + { /* should have been done when port shutdown */ + free_page((unsigned long) port->xmit_buf); + port->xmit_buf = NULL; + } + siolx_port_root = port->next_by_global_list; + kfree(port); + } + if(siolx_table) + { + kfree(siolx_table); + siolx_table = NULL; + } + if(siolx_termios) + { + kfree(siolx_termios); + siolx_termios = NULL; + } + if(siolx_termios_locked) + { + kfree(siolx_termios_locked); + siolx_termios_locked = NULL; + } + +#ifdef SIOLX_TIMER + del_timer (&missed_irq_timer); +#endif +} + + +static void siolx_cleanup(void) +{ + siolx_release_drivers(); + siolx_release_memory(); +} + +/* + * This routine must be called by kernel at boot time + */ + +static int __init siolx_init(void) +{ + unsigned char bus; + unsigned char devfn; + struct siolx_board *bp; + struct siolx_board *bp2; + unsigned int boardcount; + struct pci_dev *pdev = NULL; + unsigned int ecntl; + unsigned int intr_val; + + printk(KERN_ALERT "aurora interea miseris mortalibus almam extulerat lucem\n"); + printk(KERN_ALERT " referens opera atque labores\n"); + printk(KERN_INFO "siolx: Siolx Aurora Asynchronous Adapter driver v" VERSION ", (c) Telford Tools, Inc.\n"); +#ifdef CONFIG_SIOLX_RTSCTS + printk (KERN_INFO "siolx: DTR/RTS pin is always RTS.\n"); +#else + printk (KERN_INFO "siolx: DTR/RTS pin is RTS when CRTSCTS is on.\n"); +#endif + memset(SiolxIrqRoot, 0, sizeof(SiolxIrqRoot)); + tmp_buf = NULL; + siolx_board_root = NULL; /* clear out the global pointers */ + siolx_board_last = NULL; + siolx_port_root = NULL; + siolx_port_last = NULL; + NumSiolxPorts = 0; + siolx_table = NULL; /* make dynamic */ + siolx_termios = NULL; + siolx_termios_locked = NULL; + siolx_driver_registered = 0; + siolx_callout_driver_registered = 0; + + boardcount = 0; + + if (siolx_init_drivers()) + { + printk(KERN_INFO "siolx: Could not initialize drivers.\n"); + return -EIO; + } + + if (!pci_present()) + { + printk(KERN_INFO "siolx: Could not find PCI bus.\n"); + return -EIO; /* no PCI bus no Aurora cards */ + } + + while(1) + { + pdev = pci_find_device (siolx_vendor_id, siolx_device_id, pdev); + if (!pdev) + { + break; /* found no devices */ + } + + DEBUGPRINT((KERN_ALERT "%s\n", pdev->name)); + DEBUGPRINT((KERN_ALERT "subsystem vendor is %x.\n", + pdev->subsystem_vendor)); + DEBUGPRINT((KERN_ALERT "subsystem device is %x.\n", + pdev->subsystem_device)); + DEBUGPRINT((KERN_ALERT + "BAR0 = %lx\nBAR1 = %lx\nBAR2 = %lx\nBAR3 = %lx\nBAR4 = %lx\nBAR5 = %lx\n", + pci_resource_start(pdev, 0), + pci_resource_start(pdev, 1), + pci_resource_start(pdev, 2), + pci_resource_start(pdev, 3), + pci_resource_start(pdev, 4), + pci_resource_start(pdev, 5))); + DEBUGPRINT((KERN_ALERT + "LAS0 = %lx\nLAS1 = %lx\nLAS2 = %lx\nLAS3 = %lx\nLAS4 = %lx\nLAS5 = %lx\n", + pci_resource_len(pdev, 0), + pci_resource_len(pdev, 1), + pci_resource_len(pdev, 2), + pci_resource_len(pdev, 3), + pci_resource_len(pdev, 4), + pci_resource_len(pdev, 5))); + + if(pdev->subsystem_vendor == siolx_subsystem_vendor) + { + if(pdev->subsystem_device == siolx_subsystem_pci_device) + { + bp = (struct siolx_board*)kmalloc(sizeof(struct siolx_board), GFP_KERNEL); + if(bp == NULL) + { + printk(KERN_ALERT "siolx: Failed to create board structure on board %d.\n", boardcount); + break; /* no memory available */ + } + memset(bp, 0, sizeof(struct siolx_board)); + bp->boardtype = BD_8000P; + } + else if(pdev->subsystem_device == siolx_subsystem_cpci_device) + { + bp = (struct siolx_board*)kmalloc(sizeof(struct siolx_board), GFP_KERNEL); + if(bp == NULL) + { + printk(KERN_ALERT + "siolx: Failed to create board structure on board%p.\n", bp); + break; /* no memory available */ + } + memset(bp, 0, sizeof(struct siolx_board)); + bp->boardtype = BD_8000C; + } + else + { + continue; + } + } + else + { + continue; + } + + DEBUGPRINT((KERN_ALERT "siolx: interrupt is %i.\n", pdev->irq)); + bus = pdev->bus->number; + devfn = pdev->devfn; + DEBUGPRINT((KERN_ALERT "siolx: bus is %x, slot is %x.\n", bus, PCI_SLOT(devfn))); + + if (pci_enable_device(pdev)) + { + kfree(bp); + continue; /* enable failed */ + } + pci_set_master(pdev); + + bp->irq = pdev->irq; + SiolxResetBoard(bp, pdev); /* make sure the board is in a known state */ + if(bp->plx_vaddr == 0) + { + printk(KERN_ALERT "siolx: failed to remap plx address space.\n"); + kfree(bp); + continue; + } + bp->vaddr = (unsigned long) ioremap_nocache(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if(bp->vaddr) + { + bp->base = (bp->vaddr + MPASYNC_CHIP1_OFFSET); + bp->boardnumber = boardcount; + if (siolx_probe(bp)) /* failure is nonzero */ + { + iounmap((void*)bp->plx_vaddr); + bp->plx_vaddr = 0; + iounmap((void*)bp->vaddr); + bp->vaddr = 0; + kfree(bp); /* something wrong with board */ + continue; + } + intr_val = bp->irq; + if((intr_val < 0) || (intr_val >= SIOLX_NUMINTS)) + { + printk(KERN_ALERT "siolx: bad interrupt %i board %p.\n", intr_val, bp); + iounmap((void*)bp->plx_vaddr); /* but plx space was remapped */ + bp->plx_vaddr = 0; + iounmap((void*)bp->vaddr); /* release chip space */ + bp->vaddr = 0; + kfree(bp); /* release the board structure */ + continue; + } + bp->next_by_interrupt = SiolxIrqRoot[intr_val]; + SiolxIrqRoot[intr_val] = bp; + if(siolx_board_last == NULL) + { + siolx_board_root = bp; + siolx_board_last = bp; + } + else + { + siolx_board_last->next_by_global_list = bp; + siolx_board_last = bp; + } + bp->chipnumber = 0; + bp->intstatus = bp->plx_vaddr + PLX_ICSR; + bp->next_by_chain = bp; /* one item chain */ + ecntl = readl(bp->plx_vaddr + PLX_ECNTL); + boardcount++; /* added a board */ + if(pci_resource_len(pdev, 2) > MPASYNC_CHIP2_OFFSET) + { + ++(bp->boardtype); /* works because how types are defined 8000X --> 16000X*/ + if(bp->boardtype == BD_16000C) + { + if((ecntl & PLX_ECNTLUSERI) == 0) + { + bp->reario = 1; + } + } + bp2 = (struct siolx_board*)kmalloc(sizeof(struct siolx_board), GFP_KERNEL); + if(bp2 == NULL) + { + printk(KERN_ALERT + "siolx: Failed to create second board structure on board %p.\n", bp); + /* fall through because must turn on ints for other chip */ + } + else + { + memset(bp2, 0, sizeof(struct siolx_board)); /* unnecessary */ + *bp2 = *bp; /* note that all guys in chain point to same next_by interrupt */ + bp->next_by_chain = bp2; /* circular list */ + bp2->next_by_chain = bp;/* now chain two elements*/ + ++(bp2->chipnumber); /* chipnumber 1 */ + bp2->base = (bp2->vaddr + MPASYNC_CHIP2_OFFSET); + if(siolx_probe(bp2)) + { + printk(KERN_ALERT "siolx: Failed to probe second board structure on board %p.\n", bp); + kfree(bp2); + /* fall through because must turn on ints for other chip */ + /* don't release pci memory remap -- still works for other chip */ + } + else if(siolx_board_last == NULL) + { + siolx_board_root = bp2; /* this case should not occur */ + siolx_board_last = bp2; + } + else + { + siolx_board_last->next_by_global_list = bp2; + siolx_board_last = bp2; + } + /* don't increment boardnumber */ + } + } + } + else /* could not remap the cd18xx space */ + { + iounmap((void*)bp->plx_vaddr); /* but plx space was remapped */ + bp->plx_vaddr = 0; + kfree(bp); + } + } + if (boardcount == 0) + { + printk(KERN_INFO "siolx: No Aurora Asynchronous Adapter boards detected.\n"); + siolx_cleanup(); /* don't need any allocated memory */ + return -EIO; + } + if (siolx_finish_init_drivers()) + { + printk(KERN_INFO "siolx: Could not finish driver initialization.\n"); + siolx_cleanup(); + return -EIO; + } + + for(intr_val = 0; intr_val < SIOLX_NUMINTS; ++intr_val) /* trying to install as few int handlers as possible */ + { /* one for each group of boards (actually chips) on a given irq */ + if(SiolxIrqRoot[intr_val] != NULL) + { + if (request_irq(intr_val, siolx_interrupt, SA_SHIRQ, "siolx Aurora Asynchronous Adapter", + &SiolxIrqRoot[intr_val]) == 0) + /* interrupts on perboard basis + * cycle through chips and then + * ports */ + /* NOTE PLX INTS ARE OFF -- so turn them on */ + { + for(bp = SiolxIrqRoot[intr_val]; bp != NULL; bp = bp->next_by_interrupt) + { + writel(PLX_ICSRLCLINTPCI | PLX_ICSRPCIINTS, bp->plx_vaddr + PLX_ICSR); /* enable interrupts */ + } + } + else + { + printk(KERN_ALERT "siolx: Unable to get interrupt, board set up not complete %i.\n", intr_val); + /* no interrupts but on all lists */ + } + } + } + return 0; +} + +module_init(siolx_init); +module_exit(siolx_cleanup); +MODULE_DESCRIPTION("multiport Aurora asynchronous driver"); +MODULE_AUTHOR("Joachim Martillo "); +MODULE_LICENSE("GPL"); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/cd1865/cdsiolx.h linux.21rc5-ac2/drivers/char/cd1865/cdsiolx.h --- linux.21rc5/drivers/char/cd1865/cdsiolx.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/cd1865/cdsiolx.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,136 @@ +/* -*- linux-c -*- */ +/* + * This file was modified from + * linux/drivers/char/siolx_io8.h -- + * Siolx IO8+ multiport serial driver. + * + * Copyright (C) 1997 Roger Wolff (R.E.Wolff@BitWizard.nl) + * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com) + * Modifications (C) 2002 Telford Tools, Inc. (martillo@telfordtools.com) + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + * */ + +#ifndef __LINUX_SIOLX_H +#define __LINUX_SIOLX_H + +#include + +#ifdef __KERNEL__ + +#define SIOLX_NBOARD 8 + +/* eight ports per chip. */ +#define SIOLX_NPORT 8 +#define SIOLX_PORT(line) ((line) & (SIOLX_NPORT - 1)) + +#define MHz *1000000 /* I'm ashamed of myself. */ + +/* On-board oscillator frequency */ +#define SIOLX_OSCFREQ (33 MHz) +/* oregano is in /1 which mace 66Mhz is in /2 mode */ + +/* Ticks per sec. Used for setting receiver timeout and break length */ +#define SIOLX_TPS 4000 + +/* Yeah, after heavy testing I decided it must be 6. + * Sure, You can change it if needed. + */ +#define SIOLX_RXFIFO 6 /* Max. receiver FIFO size (1-8) */ + +#define SIOLX_MAGIC 0x0907 + +#define SIOLX_CCR_TIMEOUT 10000 /* CCR timeout. You may need to wait upto + 10 milliseconds before the internal + processor is available again after + you give it a command */ +#define SIOLX_NUMINTS 32 + +struct siolx_board +{ + unsigned long flags; + unsigned long base; + unsigned char irq; + unsigned char DTR; + unsigned long vaddr; + unsigned long plx_vaddr; + unsigned long intstatus; + struct siolx_board *next_by_chain; /* chains are circular */ + struct siolx_board *next_by_interrupt; /* only chip 0 */ + struct siolx_board *next_by_global_list; /* all boards not circular */ + struct siolx_port *portlist; + struct pci_dev pdev; + unsigned int chipnumber; /* for 8000X this structure really defines the board + * for 16000X the chain corresponds to a board and each + * structure corresponds to a dhip on a single board */ + unsigned int boardnumber; /* same for all boards/chips in a board chain */ + unsigned int boardtype; + unsigned int chiptype; + unsigned int chiprev; + unsigned int reario; + unsigned int rj45; +}; + +#define DRIVER_DEBUG() (siolx_debug) +#define DEBUGPRINT(arg) if(DRIVER_DEBUG()) printk arg + +struct siolx_port +{ + int magic; + int baud_base; + int flags; + struct tty_struct * tty; + int count; + int blocked_open; + int event; + int timeout; + int close_delay; + long session; + long pgrp; + unsigned char * xmit_buf; + int custom_divisor; + int xmit_head; + int xmit_tail; + int xmit_cnt; + struct termios normal_termios; + struct termios callout_termios; + wait_queue_head_t open_wait; + wait_queue_head_t close_wait; + struct tq_struct tqueue; + struct tq_struct tqueue_hangup; + short wakeup_chars; + short break_length; + unsigned short closing_wait; + unsigned char mark_mask; + unsigned char IER; + unsigned char MSVR; + unsigned char COR2; +#ifdef SIOLX_REPORT_OVERRUN + unsigned long overrun; +#endif +#ifdef SIOLX_REPORT_FIFO + unsigned long hits[10]; +#endif + struct siolx_port *next_by_global_list; + struct siolx_port *next_by_board; + struct siolx_board *board; + unsigned int boardport; /* relative to chain 0-15 for 16000X */ + unsigned int driverport; /* maps to minor device number */ +}; + +#endif /* __KERNEL__ */ +#endif /* __LINUX_SIOLX_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/cd1865/Makefile linux.21rc5-ac2/drivers/char/cd1865/Makefile --- linux.21rc5/drivers/char/cd1865/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/cd1865/Makefile 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,27 @@ +# Copyright (C) 2001 By Joachim Martillo, Telford Tools, Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version +# 2 of the License, or (at your option) any later version. + +# File: drivers/net/WAN/atiXX50/Makefile +# +# Makefile for the Aurora ESSC based cards +# Specifically the 2520, 4020, 4520, 8520 +# + +all: SILX.o + +O_TARGET := SILX.o + +obj-y := cd1865.o +obj-m := $(O_TARGET) + +EXTRA_CFLAGS += -I. + +include $(TOPDIR)/Rules.make + +clean: + rm -f core *.o *.a *.s *~ + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/cd1865/plx9060.h linux.21rc5-ac2/drivers/char/cd1865/plx9060.h --- linux.21rc5/drivers/char/cd1865/plx9060.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/cd1865/plx9060.h 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,97 @@ +#ifndef _PLX9060_H_ +#define _PLX9060_H_ +/* + * Aurora Cirrus CL-CD180/1865 Async Driver (sio16) + * + * This module contains the definitions for the PLX + * 9060SD PCI controller chip. + * + * COPYRIGHT (c) 1996-1998 BY AURORA TECHNOLOGIES, INC., WALTHAM, MA. + * Modifications Copyright (C) 2002 By Telford Tools, Inc., Boston, MA. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + * file: plx9060.h + * author: cmw + * created: 11/21/1996 + * info: $Id: plx9060.h,v 1.2 2002/06/11 02:50:02 martillo Exp $ + */ + +/* + * $Log: plx9060.h,v $ + * Revision 1.2 2002/06/11 02:50:02 martillo + * using silx_ and SILX_ instead of sx_ and SX_ + * + * Revision 1.1 2002/05/21 17:30:16 martillo + * first pass for the sio16 driver. + * + * Revision 1.4 1999/02/12 15:38:13 bkd + * Changed PLX_ECNTUSER0 to PLX_ECNTLUSERO and added PLX_ECNTLUSERI. + * + * Revision 1.3 1998/03/23 19:35:42 bkd + * Added definitions for all of the missing PLX9060SD registers. + * + * Revision 1.2 1998/03/13 21:02:16 bkd + * Updated copyright date to include 1998. + * + * Revision 1.1 1996/11/23 01:07:46 bkd + * cmw/bkd (PCI port): + * Initial check-in. + * + */ + +/* + * Register definitions + */ + +#define PLX_LAS0RR 0x00 +#define PLX_LAS0BAR 0x04 +#define PLX_LAR 0x08 +#define PLX_ENDR 0x0c +#define PLX_EROMRR 0x10 +#define PLX_EROMBAR 0x14 +#define PLX_LAS0BRD 0x18 +#define PLX_LAS1RR 0x30 +#define PLX_LAS1BAR 0x34 +#define PLX_LAS1BRD 0x38 + +#define PLX_MBR0 0x40 +#define PLX_MBR1 0x44 +#define PLX_MBR2 0x48 +#define PLX_MBR3 0x4c +#define PLX_PCI2LCLDBR 0x60 +#define PLX_LCL2PCIDBR 0x64 +#define PLX_ICSR 0x68 +#define PLX_ECNTL 0x6c + +/* + * Bit definitions + */ + +#define PLX_ECNTLUSERO 0x00010000 /* turn on user output */ +#define PLX_ECNTLUSERI 0x00020000 /* user input */ +#define PLX_ECNTLLDREG 0x20000000 /* reload configuration registers */ +#define PLX_ECNTLLCLRST 0x40000000 /* local bus reset */ +#define PLX_ECNTLINITSTAT 0x80000000 /* mark board init'ed */ + + +#define PLX_ICSRLSERR_ENA 0x00000001 /* enable local bus LSERR# */ +#define PLX_ICSRLSERRP_ENA 0x00000002 /* enable local bus LSERR# PCI */ +#define PLX_ICSRPCIINTS 0x00000100 /* enable PCI interrupts */ +#define PLX_ICSRLCLINTPCI 0x00000800 +#define PLX_ICSRINTACTIVE 0x00008000 /* RO: local interrupt active */ + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/cd1865/siolx.h linux.21rc5-ac2/drivers/char/cd1865/siolx.h --- linux.21rc5/drivers/char/cd1865/siolx.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/cd1865/siolx.h 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,94 @@ +/* -*- linux-c -*- */ +#ifndef _SIOLX_H_ +#define _SIOLX_H_ + +/* + * Modifications Copyright (C) 2002 By Telford Tools, Inc., Boston, MA. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + */ + +#define AURASUBSYSTEM_VENDOR_ID 0x125c +#define AURASUBSYSTEM_MPASYNCPCI 0x0640 +#define AURASUBSYSTEM_MPASYNCcPCI 0x0641 + +/* + * Aurora Cirrus CL-CD180/1865 Async Driver (sio16) + * + */ + +/* + * Register sets. These must match the order of the registers specified + * in the prom on the board! + */ + +#define MPASYNC_REG_CSR 1 +#define MPASYNC_REG_CD 2 + +#define MPASYNC_CHIP1_OFFSET 0x080 +#define MPASYNC_CHIP2_OFFSET 0x100 + +#define MPASYNC_REG_NO_OBP_CSR 1 +#define MPASYNC_REG_NO_OBP_CD 3 + +#define TX_FIFO 0x8 /* how deep is the chip fifo */ + +/* + * state flags + */ + +/* + * the following defines the model types + */ + +#define OREGANO_MODEL(mod) ((mod) == BD_16000P || (mod) == BD_8000P) +#define MACE_MODEL(mod) ((mod) == BD_16000C || (mod) == BD_8000C) + +/* + * I/O options: + */ + +#define MACE8_STD 0x0 /* 8000CP -- standard I/O */ +#define MACE8_RJ45 0x1 /* 8000CP -- rear RJ45 I/O */ + +#define MACE16_STD 0x0 /* 16000CP -- standard I/O */ +#define MACE16_RJ45 0x1 /* 16000CP -- rear RJ45 I/O */ + +#define SE2_CLK ((unsigned int) 11059200) /* 11.0592 MHz */ +#define SE_CLK ((unsigned int) 14745600) /* 14.7456 MHz */ +#define SE3_CLK ((unsigned int) 33000000) /* 33.3333 MHz */ + +/* divide x by y, rounded */ +#define ROUND_DIV(x, y) (((x) + ((y) >> 1)) / (y)) + +/* Calculate a 16 bit baud rate divisor for the given "encoded" + * (multiplied by two) baud rate. + */ + +/* chip types: */ +#define CT_UNKNOWN 0x0 /* unknown */ +#define CT_CL_CD180 0x1 /* Cirrus Logic CD-180 */ +#define CT_CL_CD1864 0x2 /* Cirrus Logic CD-1864 */ +#define CT_CL_CD1865 0x3 /* Cirrus Logic CD-1864 */ + +/* chip revisions: */ +#define CR_UNKNOWN 0x0 /* unknown */ +#define CR_REVA 0x1 /* revision A */ +#define CR_REVB 0x2 /* revision B */ +#define CR_REVC 0x3 /* revision C */ +/* ...and so on ... */ + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/Config.in linux.21rc5-ac2/drivers/char/Config.in --- linux.21rc5/drivers/char/Config.in 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/Config.in 2003-05-28 15:31:54.000000000 +0100 @@ -35,6 +35,7 @@ fi bool 'Non-standard serial port support' CONFIG_SERIAL_NONSTANDARD if [ "$CONFIG_SERIAL_NONSTANDARD" = "y" ]; then + tristate ' Aurora Technology, Inc. asynchronous PCI cards V2' CONFIG_ATI_CD1865 tristate ' Computone IntelliPort Plus serial support' CONFIG_COMPUTONE tristate ' Comtrol Rocketport support' CONFIG_ROCKETPORT tristate ' Cyclades async mux support' CONFIG_CYCLADES @@ -306,6 +307,9 @@ bool ' Generic SiS support' CONFIG_AGP_SIS bool ' ALI chipset support' CONFIG_AGP_ALI bool ' Serverworks LE/HE support' CONFIG_AGP_SWORKS + if [ "$CONFIG_X86" = "y" ]; then + bool ' NVIDIA chipset support' CONFIG_AGP_NVIDIA + fi if [ "$CONFIG_IA64" = "y" ]; then bool ' HP ZX1 AGP support' CONFIG_AGP_HP_ZX1 fi diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/console.c linux.21rc5-ac2/drivers/char/console.c --- linux.21rc5/drivers/char/console.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/console.c 2003-04-22 16:44:36.000000000 +0100 @@ -2393,7 +2393,7 @@ tty->winsize.ws_row = video_num_lines; tty->winsize.ws_col = video_num_columns; } - if (tty->count == 1) + if (atomic_read(&tty->count) == 1) vcs_make_devfs (currcons, 0); return 0; } @@ -2402,7 +2402,7 @@ { if (!tty) return; - if (tty->count != 1) return; + if (atomic_read(&tty->count) != 1) return; vcs_make_devfs (MINOR (tty->device) - tty->driver.minor_start, 1); tty->driver_data = 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/cyclades.c linux.21rc5-ac2/drivers/char/cyclades.c --- linux.21rc5/drivers/char/cyclades.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/cyclades.c 2003-04-22 16:44:36.000000000 +0100 @@ -2822,7 +2822,7 @@ #ifdef CY_DEBUG_OPEN printk("cyc:cy_close ttyC%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/Config.in linux.21rc5-ac2/drivers/char/drm/Config.in --- linux.21rc5/drivers/char/drm/Config.in 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/Config.in 2003-04-22 16:44:36.000000000 +0100 @@ -6,9 +6,9 @@ # tristate ' 3dfx Banshee/Voodoo3+' CONFIG_DRM_TDFX -#tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA +tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA tristate ' ATI Rage 128' CONFIG_DRM_R128 -dep_tristate ' ATI Radeon' CONFIG_DRM_RADEON $CONFIG_AGP +tristate ' ATI Radeon' CONFIG_DRM_RADEON dep_tristate ' Intel I810' CONFIG_DRM_I810 $CONFIG_AGP dep_mbool ' Enabled XFree 4.1 ioctl interface by default' CONFIG_DRM_I810_XFREE_41 $CONFIG_DRM_I810 dep_tristate ' Intel 830M' CONFIG_DRM_I830 $CONFIG_AGP diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_agpsupport.h linux.21rc5-ac2/drivers/char/drm/drm_agpsupport.h --- linux.21rc5/drivers/char/drm/drm_agpsupport.h 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_agpsupport.h 2003-05-29 01:44:06.000000000 +0100 @@ -279,12 +279,12 @@ break; case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133"; break; + case VIA_APOLLO_KM266: head->chipset = "VIA Apollo KM266 / KL266"; + break; case VIA_APOLLO_KT400: head->chipset = "VIA Apollo KT400"; break; case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; break; - case VIA_APOLLO_P4X400: head->chipset = "VIA Apollo P4X400"; - break; case SIS_GENERIC: head->chipset = "SiS"; break; case AMD_GENERIC: head->chipset = "AMD"; break; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_bufs.h linux.21rc5-ac2/drivers/char/drm/drm_bufs.h --- linux.21rc5/drivers/char/drm/drm_bufs.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_bufs.h 2003-05-29 01:44:06.000000000 +0100 @@ -136,6 +136,7 @@ } map->offset = (unsigned long)map->handle; if ( map->flags & _DRM_CONTAINS_LOCK ) { + dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ } break; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_context.h linux.21rc5-ac2/drivers/char/drm/drm_context.h --- linux.21rc5/drivers/char/drm/drm_context.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_context.h 2003-05-29 01:44:06.000000000 +0100 @@ -554,7 +554,7 @@ /* Allocate a new queue */ down(&dev->struct_sem); - queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES); + queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES); memset(queue, 0, sizeof(*queue)); atomic_set(&queue->use_count, 1); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_dma.h linux.21rc5-ac2/drivers/char/drm/drm_dma.h --- linux.21rc5/drivers/char/drm/drm_dma.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_dma.h 2003-05-29 01:44:06.000000000 +0100 @@ -30,7 +30,7 @@ */ #include "drmP.h" - +#include "drm_os_linux.h" #include /* For task queue support */ #ifndef __HAVE_DMA_WAITQUEUE @@ -537,8 +537,18 @@ dev->tq.data = dev; #endif +#if __HAVE_VBL_IRQ + init_waitqueue_head(&dev->vbl_queue); + + spin_lock_init( &dev->vbl_lock ); + + INIT_LIST_HEAD( &dev->vbl_sigs.head ); + + dev->vbl_pending = 0; +#endif + /* Before installing handler */ - DRIVER_PREINSTALL(); + DRM(driver_irq_preinstall)(dev); /* Install handler */ ret = request_irq( dev->irq, DRM(dma_service), @@ -551,7 +561,7 @@ } /* After installing handler */ - DRIVER_POSTINSTALL(); + DRM(driver_irq_postinstall)(dev); return 0; } @@ -570,7 +580,7 @@ DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq ); - DRIVER_UNINSTALL(); + DRM(driver_irq_uninstall)( dev ); free_irq( irq, dev ); @@ -597,6 +607,142 @@ } } +#if __HAVE_VBL_IRQ + +int DRM(wait_vblank)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_wait_vblank_t vblwait; + struct timeval now; + int ret = 0; + unsigned int flags; + + if (!dev->irq) + return -EINVAL; + + DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data, + sizeof(vblwait) ); + + switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) { + case _DRM_VBLANK_RELATIVE: + vblwait.request.sequence += atomic_read( &dev->vbl_received ); + vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; + case _DRM_VBLANK_ABSOLUTE: + break; + default: + return -EINVAL; + } + + flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + + if ( flags & _DRM_VBLANK_SIGNAL ) { + unsigned long irqflags; + drm_vbl_sig_t *vbl_sig; + + vblwait.reply.sequence = atomic_read( &dev->vbl_received ); + + spin_lock_irqsave( &dev->vbl_lock, irqflags ); + + /* Check if this task has already scheduled the same signal + * for the same vblank sequence number; nothing to be done in + * that case + */ + list_for_each( ( (struct list_head *) vbl_sig ), &dev->vbl_sigs.head ) { + if (vbl_sig->sequence == vblwait.request.sequence + && vbl_sig->info.si_signo == vblwait.request.signal + && vbl_sig->task == current) + { + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + goto done; + } + } + + if ( dev->vbl_pending >= 100 ) { + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + return -EBUSY; + } + + dev->vbl_pending++; + + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + + if ( !( vbl_sig = kmalloc(sizeof(drm_vbl_sig_t), GFP_KERNEL) ) ) + return -ENOMEM; + + + memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) ); + + vbl_sig->sequence = vblwait.request.sequence; + vbl_sig->info.si_signo = vblwait.request.signal; + vbl_sig->task = current; + + spin_lock_irqsave( &dev->vbl_lock, irqflags ); + + list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head ); + + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + } else { + ret = DRM(vblank_wait)( dev, &vblwait.request.sequence ); + + do_gettimeofday( &now ); + vblwait.reply.tval_sec = now.tv_sec; + vblwait.reply.tval_usec = now.tv_usec; + } + +done: + DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait, + sizeof(vblwait) ); + + return ret; +} + +void DRM(vbl_send_signals)( drm_device_t *dev ) +{ + struct list_head *tmp; + drm_vbl_sig_t *vbl_sig; + unsigned int vbl_seq = atomic_read( &dev->vbl_received ); + unsigned long flags; + + spin_lock_irqsave( &dev->vbl_lock, flags ); + + list_for_each_safe( ( (struct list_head *) vbl_sig ), tmp, &dev->vbl_sigs.head ) { + if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) { + vbl_sig->info.si_code = vbl_seq; + send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task ); + + list_del( (struct list_head *) vbl_sig ); + + + kfree( vbl_sig ); + dev->vbl_pending--; + } + } + + spin_unlock_irqrestore( &dev->vbl_lock, flags ); +} + +#endif /* __HAVE_VBL_IRQ */ + +#else + +int DRM(control)( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_control_t ctl; + + if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) ) + return -EFAULT; + + switch ( ctl.func ) { + case DRM_INST_HANDLER: + case DRM_UNINST_HANDLER: + return 0; + default: + return -EINVAL; + } +} + #endif /* __HAVE_DMA_IRQ */ #endif /* __HAVE_DMA */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_drv.h linux.21rc5-ac2/drivers/char/drm/drm_drv.h --- linux.21rc5/drivers/char/drm/drm_drv.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_drv.h 2003-04-22 16:44:36.000000000 +0100 @@ -115,18 +115,34 @@ #ifndef DRIVER_FOPS #define DRIVER_FOPS \ static struct file_operations DRM(fops) = { \ - owner: THIS_MODULE, \ - open: DRM(open), \ - flush: DRM(flush), \ - release: DRM(release), \ - ioctl: DRM(ioctl), \ - mmap: DRM(mmap), \ - read: DRM(read), \ - fasync: DRM(fasync), \ - poll: DRM(poll), \ + .owner = THIS_MODULE, \ + .open = DRM(open), \ + .flush = DRM(flush), \ + .release = DRM(release), \ + .ioctl = DRM(ioctl), \ + .mmap = DRM(mmap), \ + .read = DRM(read), \ + .fasync = DRM(fasync), \ + .poll = DRM(poll), \ } #endif +#ifndef MODULE +/* DRM(options) is called by the kernel to parse command-line options + * passed via the boot-loader (e.g., LILO). It calls the insmod option + * routine, drm_parse_drm. + */ +/* Use an additional macro to avoid preprocessor troubles */ +#define DRM_OPTIONS_FUNC DRM(options) +static int __init DRM(options)( char *str ) +{ + DRM(parse_options)( str ); + return 1; +} + +__setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC ); +#undef DRM_OPTIONS_FUNC +#endif /* * The default number of instances (minor numbers) to initialize. @@ -187,10 +203,8 @@ /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ -#if __HAVE_DMA_IRQ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 }, #endif -#endif #if __REALLY_HAVE_AGP [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 }, @@ -208,6 +222,10 @@ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 }, #endif +#if __HAVE_VBL_IRQ + [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 }, +#endif + DRIVER_IOCTLS }; @@ -292,7 +310,7 @@ dev->map_count = 0; dev->vmalist = NULL; - dev->lock.hw_lock = NULL; + dev->sigdata.lock = dev->lock.hw_lock = NULL; init_waitqueue_head( &dev->lock.lock_queue ); dev->queue_count = 0; dev->queue_reserved = 0; @@ -477,7 +495,7 @@ DRM(dma_takedown)( dev ); #endif if ( dev->lock.hw_lock ) { - dev->lock.hw_lock = NULL; /* SHM removed */ + dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.pid = 0; wake_up_interruptible( &dev->lock.lock_queue ); } @@ -705,7 +723,7 @@ int i; for (i = 0; i < DRM(numdevs); i++) { - if (MINOR(inode->i_rdev) == DRM(minor)[i]) { + if (minor(inode->i_rdev) == DRM(minor)[i]) { dev = &(DRM(device)[i]); break; } @@ -747,8 +765,8 @@ * Begin inline drm_release */ - DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n", - current->pid, dev->device, dev->open_count ); + DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n", + current->pid, (long)dev->device, dev->open_count ); if ( dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && @@ -873,8 +891,9 @@ atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] ); ++priv->ioctl_count; - DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n", - current->pid, cmd, nr, dev->device, priv->authenticated ); + DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", + current->pid, cmd, nr, (long)dev->device, + priv->authenticated ); if ( nr >= DRIVER_IOCTL_COUNT ) { retcode = -EINVAL; @@ -1027,6 +1046,25 @@ atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] ); +#if __HAVE_KERNEL_CTX_SWITCH + /* We no longer really hold it, but if we are the next + * agent to request it then we should just be able to + * take it immediately and not eat the ioctl. + */ + dev->lock.pid = 0; + { + __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; + unsigned int old, new, prev, ctx; + + ctx = lock.context; + do { + old = *plock; + new = ctx; + prev = cmpxchg(plock, old, new); + } while (prev != old); + } + wake_up_interruptible(&dev->lock.lock_queue); +#else DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ); #if __HAVE_DMA_SCHEDULE @@ -1041,6 +1079,7 @@ DRM_ERROR( "\n" ); } } +#endif /* !__HAVE_KERNEL_CTX_SWITCH */ unblock_all_signals(); return 0; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_fops.h linux.21rc5-ac2/drivers/char/drm/drm_fops.h --- linux.21rc5/drivers/char/drm/drm_fops.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_fops.h 2003-05-29 01:44:06.000000000 +0100 @@ -37,7 +37,7 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev) { - kdev_t minor = MINOR(inode->i_rdev); + int minor = minor(inode->i_rdev); drm_file_t *priv; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ @@ -94,25 +94,8 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n", - current->pid, dev->device, dev->open_count); - if ( dev->lock.hw_lock && - _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && - dev->lock.pid == current->pid ) { - DRM_DEBUG( "Process %d closed fd, freeing lock for context %d\n", - current->pid, - _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); -#if __HAVE_RELEASE - DRIVER_RELEASE(); -#endif - DRM(lock_free)( dev, &dev->lock.hw_lock->lock, - _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); - - /* FIXME: may require heavy-handed reset of - hardware at this point, possibly - processed via a callback to the X - server. */ - } + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", + current->pid, (long)dev->device, dev->open_count); return 0; } @@ -122,7 +105,7 @@ drm_device_t *dev = priv->dev; int retcode; - DRM_DEBUG("fd = %d, device = 0x%x\n", fd, dev->device); + DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, (long)dev->device); retcode = fasync_helper(fd, filp, on, &dev->buf_async); if (retcode < 0) return retcode; return 0; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm.h linux.21rc5-ac2/drivers/char/drm/drm.h --- linux.21rc5/drivers/char/drm/drm.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm.h 2003-04-22 16:44:36.000000000 +0100 @@ -38,10 +38,27 @@ #if defined(__linux__) #include #include /* For _IO* macros */ -#define DRM_IOCTL_NR(n) _IOC_NR(n) -#elif defined(__FreeBSD__) +#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_IOC_VOID _IOC_NONE +#define DRM_IOC_READ _IOC_READ +#define DRM_IOC_WRITE _IOC_WRITE +#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE +#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) +#elif defined(__FreeBSD__) || defined(__NetBSD__) +#if defined(__FreeBSD__) && defined(XFree86Server) +/* Prevent name collision when including sys/ioccom.h */ +#undef ioctl #include -#define DRM_IOCTL_NR(n) ((n) & 0xff) +#define ioctl(a,b,c) xf86ioctl(a,b,c) +#else +#include +#endif /* __FreeBSD__ && xf86ioctl */ +#define DRM_IOCTL_NR(n) ((n) & 0xff) +#define DRM_IOC_VOID IOC_VOID +#define DRM_IOC_READ IOC_OUT +#define DRM_IOC_WRITE IOC_IN +#define DRM_IOC_READWRITE IOC_INOUT +#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #endif #define XFREE86_VERSION(major,minor,patch,snap) \ @@ -84,6 +101,10 @@ /* Warning: If you change this structure, make sure you change * XF86DRIClipRectRec in the server as well */ +/* KW: Actually it's illegal to change either for + * backwards-compatibility reasons. + */ + typedef struct drm_clip_rect { unsigned short x1; unsigned short y1; @@ -332,6 +353,32 @@ int funcnum; } drm_irq_busid_t; +typedef enum { + _DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */ + _DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */ + _DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */ +} drm_vblank_seq_type_t; + +#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL + +struct drm_wait_vblank_request { + drm_vblank_seq_type_t type; + unsigned int sequence; + unsigned long signal; +}; + +struct drm_wait_vblank_reply { + drm_vblank_seq_type_t type; + unsigned int sequence; + long tval_sec; + long tval_usec; +}; + +typedef union drm_wait_vblank { + struct drm_wait_vblank_request request; + struct drm_wait_vblank_reply reply; +} drm_wait_vblank_t; + typedef struct drm_agp_mode { unsigned long mode; } drm_agp_mode_t; @@ -371,10 +418,9 @@ #define DRM_IOCTL_BASE 'd' #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) -#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) -#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) -#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) - +#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) +#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) +#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) #define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) @@ -427,86 +473,10 @@ #define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t) #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t) -/* MGA specific ioctls */ -#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) -#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t) -#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42) -#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43) -#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t) -#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t) -#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) -#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t) -#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t) - -/* i810 specific ioctls */ -#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) -#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) -#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) -#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43) -#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44) -#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) -#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) -#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) -#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) -#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t) -#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a) -#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b) -#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t) -#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d ) - - -/* Rage 128 specific ioctls */ -#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) -#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) -#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) -#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) -#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) -#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) -#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47) -#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t) -#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t) -#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t) -#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t) -#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t) -#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t) -#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) -#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t) - -/* Radeon specific ioctls */ -#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) -#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) -#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) -#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) -#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) -#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) -#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) -#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) -#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) -#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) -#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) -#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) -#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) -#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) - -/* SiS specific ioctls */ -#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) -#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) -#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t) -#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t) -#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) -#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) -#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) -#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) - -/* I830 specific ioctls */ -#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t) -#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t) -#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t) -#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43) -#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44) -#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t) -#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46) -#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t) -#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48) +#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t) + +/* Device specfic ioctls should only be in their respective headers + * The device specific ioctl range is 0x40 to 0x79. */ +#define DRM_COMMAND_BASE 0x40 #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_ioctl.h linux.21rc5-ac2/drivers/char/drm/drm_ioctl.h --- linux.21rc5/drivers/char/drm/drm_ioctl.h 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_ioctl.h 2003-05-29 01:44:06.000000000 +0100 @@ -111,7 +111,7 @@ do { struct pci_dev *pci_dev; - int b, d, f; + int domain, b, d, f; char *p; for(p = dev->unique; p && *p && *p != ':'; p++); @@ -123,6 +123,27 @@ f = (int)simple_strtoul(p+1, &p, 10); if (*p) break; + domain = b >> 8; + b &= 0xff; + +#ifdef __alpha__ + /* + * Find the hose the device is on (the domain number is the + * hose index) and offset the bus by the root bus of that + * hose. + */ + for(pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,NULL); + pci_dev; + pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,pci_dev)) { + struct pci_controller *hose = pci_dev->sysdata; + + if (hose->index == domain) { + b += hose->bus->number; + break; + } + } +#endif + pci_dev = pci_find_slot(b, PCI_DEVFN(d,f)); if (pci_dev) { dev->pdev = pci_dev; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_lock.h linux.21rc5-ac2/drivers/char/drm/drm_lock.h --- linux.21rc5/drivers/char/drm/drm_lock.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_lock.h 2003-05-29 01:44:06.000000000 +0100 @@ -236,7 +236,7 @@ /* Allow signal delivery if lock isn't held */ - if (!_DRM_LOCK_IS_HELD(s->lock->lock) + if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1; /* Otherwise, set flag to force call to diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_memory.h linux.21rc5-ac2/drivers/char/drm/drm_memory.h --- linux.21rc5/drivers/char/drm/drm_memory.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_memory.h 2003-05-29 01:44:06.000000000 +0100 @@ -313,6 +313,29 @@ return pt; } +void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) +{ + void *pt; + + if (!size) { + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Mapping 0 bytes at 0x%08lx\n", offset); + return NULL; + } + + if (!(pt = ioremap_nocache(offset, size))) { + spin_lock(&DRM(mem_lock)); + ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; + spin_unlock(&DRM(mem_lock)); + return NULL; + } + spin_lock(&DRM(mem_lock)); + ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; + DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; + spin_unlock(&DRM(mem_lock)); + return pt; +} + void DRM(ioremapfree)(void *pt, unsigned long size) { int alloc_count; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_os_linux.h linux.21rc5-ac2/drivers/char/drm/drm_os_linux.h --- linux.21rc5/drivers/char/drm/drm_os_linux.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_os_linux.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,56 @@ +#define __NO_VERSION__ + +#include /* For task queue support */ +#include + + +/* For data going from/to the kernel through the ioctl argument */ +#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \ + if ( copy_from_user(&arg1, arg2, arg3) ) \ + return -EFAULT +#define DRM_COPY_TO_USER_IOCTL(arg1, arg2, arg3) \ + if ( copy_to_user(arg1, &arg2, arg3) ) \ + return -EFAULT + + +#warning the author of this code needs to read up on list_entry +#define DRM_GETSAREA() \ +do { \ + struct list_head *list; \ + list_for_each( list, &dev->maplist->head ) { \ + drm_map_list_t *entry = (drm_map_list_t *)list; \ + if ( entry->map && \ + entry->map->type == _DRM_SHM && \ + (entry->map->flags & _DRM_CONTAINS_LOCK) ) { \ + dev_priv->sarea = entry->map; \ + break; \ + } \ + } \ +} while (0) + +#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ +do { \ + DECLARE_WAITQUEUE(entry, current); \ + unsigned long end = jiffies + (timeout); \ + add_wait_queue(&(queue), &entry); \ + \ + for (;;) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if((signed)(end - jiffies) <= 0) { \ + ret = -EBUSY; \ + break; \ + } \ + schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \ + if (signal_pending(current)) { \ + ret = -EINTR; \ + break; \ + } \ + } \ + set_current_state(TASK_RUNNING); \ + remove_wait_queue(&(queue), &entry); \ +} while (0) + + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drmP.h linux.21rc5-ac2/drivers/char/drm/drmP.h --- linux.21rc5/drivers/char/drm/drmP.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drmP.h 2003-05-29 01:44:06.000000000 +0100 @@ -53,6 +53,7 @@ #include #include /* For (un)lock_kernel */ #include +#include #if defined(__alpha__) || defined(__powerpc__) #include /* For pte_wrprotect */ #endif @@ -71,10 +72,7 @@ #include #include "drm.h" -/* page_to_bus for earlier kernels, not optimal in all cases */ -#ifndef page_to_bus -#define page_to_bus(page) ((unsigned int)(virt_to_bus(page_address(page)))) -#endif +#include "drm_os_linux.h" /* DRM template customization defaults */ @@ -209,6 +207,7 @@ (unsigned long)(n),sizeof(*(ptr)))) #endif /* i386 & alpha */ #endif +#define __REALLY_HAVE_SG (__HAVE_SG) /* Begin the DRM... */ @@ -251,41 +250,58 @@ #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) -#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) + /* Backward compatibility section */ +#ifndef minor +#define minor(x) MINOR((x)) +#endif -/* Macros to make printk easier */ +#ifndef MODULE_LICENSE +#define MODULE_LICENSE(x) +#endif -#if ( __GNUC__ > 2 ) -#define DRM_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__, ##arg) -#define DRM_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ - DRM(mem_stats)[area].name , ##arg) -#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) +#ifndef pte_offset_map +#define pte_offset_map pte_offset +#define pte_unmap(pte) +#endif -#if DRM_DEBUG_CODE -#define DRM_DEBUG(fmt, arg...) \ - do { \ - if ( DRM(flags) & DRM_FLAG_DEBUG ) \ - printk(KERN_DEBUG \ - "[" DRM_NAME ":%s] " fmt , \ - __FUNCTION__, \ - ##arg); \ - } while (0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) +static inline struct page * vmalloc_to_page(void * vmalloc_addr) +{ + unsigned long addr = (unsigned long) vmalloc_addr; + struct page *page = NULL; + pgd_t *pgd = pgd_offset_k(addr); + pmd_t *pmd; + pte_t *ptep, pte; + + if (!pgd_none(*pgd)) { + pmd = pmd_offset(pgd, addr); + if (!pmd_none(*pmd)) { + ptep = pte_offset_map(pmd, addr); + pte = *ptep; + if (pte_present(pte)) + page = pte_page(pte); + pte_unmap(ptep); + } + } + return page; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) +#define DRM_RPR_ARG(vma) #else -#define DRM_DEBUG(fmt, arg...) do { } while (0) +#define DRM_RPR_ARG(vma) vma, #endif -#else /* Gcc 2.x */ -/* Work around a C preprocessor bug */ +#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) /* Macros to make printk easier */ #define DRM_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg) + printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg) #define DRM_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \ + printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ DRM(mem_stats)[area].name , ##arg) #define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) @@ -294,16 +310,13 @@ do { \ if ( DRM(flags) & DRM_FLAG_DEBUG ) \ printk(KERN_DEBUG \ - "[" DRM_NAME ":" __FUNCTION__ "] " fmt , \ - ##arg); \ + "[" DRM_NAME ":%s] " fmt , \ + __FUNCTION__ , ##arg); \ } while (0) #else #define DRM_DEBUG(fmt, arg...) do { } while (0) #endif -#endif /* Gcc 2.x */ - - #define DRM_PROC_LIMIT (PAGE_SIZE-80) #define DRM_PROC_PRINT(fmt, arg...) \ @@ -318,6 +331,9 @@ #define DRM_IOREMAP(map) \ (map)->handle = DRM(ioremap)( (map)->offset, (map)->size ) +#define DRM_IOREMAP_NOCACHE(map) \ + (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size) + #define DRM_IOREMAPFREE(map) \ do { \ if ( (map)->handle && (map)->size ) \ @@ -599,6 +615,17 @@ drm_map_t *map; } drm_map_list_t; +#if __HAVE_VBL_IRQ + +typedef struct drm_vbl_sig { + struct list_head head; + unsigned int sequence; + struct siginfo info; + struct task_struct *task; +} drm_vbl_sig_t; + +#endif + typedef struct drm_device { const char *name; /* Simple driver name */ char *unique; /* Unique identifier: e.g., busid */ @@ -658,6 +685,13 @@ int last_context; /* Last current context */ unsigned long last_switch; /* jiffies at last context switch */ struct tq_struct tq; +#if __HAVE_VBL_IRQ + wait_queue_head_t vbl_queue; + atomic_t vbl_received; + spinlock_t vbl_lock; + drm_vbl_sig_t vbl_sigs; + unsigned int vbl_pending; +#endif cycles_t ctx_start; cycles_t lck_start; #if __HAVE_DMA_HISTOGRAM @@ -725,16 +759,16 @@ /* Mapping support (drm_vm.h) */ extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern void DRM(vm_open)(struct vm_area_struct *vma); extern void DRM(vm_close)(struct vm_area_struct *vma); extern void DRM(vm_shm_close)(struct vm_area_struct *vma); @@ -756,6 +790,7 @@ extern void DRM(free_pages)(unsigned long address, int order, int area); extern void *DRM(ioremap)(unsigned long offset, unsigned long size); +extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size); extern void DRM(ioremapfree)(void *pt, unsigned long size); #if __REALLY_HAVE_AGP @@ -885,6 +920,15 @@ extern int DRM(irq_uninstall)( drm_device_t *dev ); extern void DRM(dma_service)( int irq, void *device, struct pt_regs *regs ); +extern void DRM(driver_irq_preinstall)( drm_device_t *dev ); +extern void DRM(driver_irq_postinstall)( drm_device_t *dev ); +extern void DRM(driver_irq_uninstall)( drm_device_t *dev ); +#if __HAVE_VBL_IRQ +extern int DRM(wait_vblank)(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq); +extern void DRM(vbl_send_signals)( drm_device_t *dev ); +#endif #if __HAVE_DMA_IRQ_BH extern void DRM(dma_immediate_bh)( void *dev ); #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_proc.h linux.21rc5-ac2/drivers/char/drm/drm_proc.h --- linux.21rc5/drivers/char/drm/drm_proc.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_proc.h 2003-05-29 01:44:06.000000000 +0100 @@ -147,10 +147,10 @@ *eof = 0; if (dev->unique) { - DRM_PROC_PRINT("%s 0x%x %s\n", - dev->name, dev->device, dev->unique); + DRM_PROC_PRINT("%s 0x%lx %s\n", + dev->name, (long)dev->device, dev->unique); } else { - DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device); + DRM_PROC_PRINT("%s 0x%lx\n", dev->name, (long)dev->device); } if (len > request + offset) return request; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_sarea.h linux.21rc5-ac2/drivers/char/drm/drm_sarea.h --- linux.21rc5/drivers/char/drm/drm_sarea.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_sarea.h 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,57 @@ +/* sarea.h -- SAREA definitions -*- linux-c -*- + * + * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Michel Dänzer + */ + +#ifndef _DRM_SAREA_H_ +#define _DRM_SAREA_H_ + +#define SAREA_MAX_DRAWABLES 256 + +typedef struct _drm_sarea_drawable_t { + unsigned int stamp; + unsigned int flags; +} drm_sarea_drawable_t; + +typedef struct _dri_sarea_frame_t { + unsigned int x; + unsigned int y; + unsigned int width; + unsigned int height; + unsigned int fullscreen; +} drm_sarea_frame_t; + +typedef struct _drm_sarea_t { + /* first thing is always the drm locking structure */ + drm_hw_lock_t lock; + /* NOT_DONE: Use readers/writer lock for drawable_lock */ + drm_hw_lock_t drawable_lock; + drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; + drm_sarea_frame_t frame; + drm_context_t dummy_context; +} drm_sarea_t; + +#endif /* _DRM_SAREA_H_ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_stub.h linux.21rc5-ac2/drivers/char/drm/drm_stub.h --- linux.21rc5/drivers/char/drm/drm_stub.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_stub.h 2003-05-29 01:44:06.000000000 +0100 @@ -48,7 +48,7 @@ static int DRM(stub_open)(struct inode *inode, struct file *filp) { - int minor = MINOR(inode->i_rdev); + int minor = minor(inode->i_rdev); int err = -ENODEV; struct file_operations *old_fops; @@ -65,8 +65,8 @@ } static struct file_operations DRM(stub_fops) = { - owner: THIS_MODULE, - open: DRM(stub_open) + .owner = THIS_MODULE, + .open = DRM(stub_open) }; static int DRM(stub_getminor)(const char *name, struct file_operations *fops, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/drm_vm.h linux.21rc5-ac2/drivers/char/drm/drm_vm.h --- linux.21rc5/drivers/char/drm/drm_vm.h 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/drm_vm.h 2003-05-29 01:44:06.000000000 +0100 @@ -57,7 +57,7 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused) + int write_access) { #if __REALLY_HAVE_AGP drm_file_t *priv = vma->vm_file->private_data; @@ -70,7 +70,7 @@ * Find the right map */ - if(!dev->agp->cant_use_aperture) goto vm_nopage_error; + if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error; list_for_each(list, &dev->maplist->head) { r_list = (drm_map_list_t *)list; @@ -141,9 +141,7 @@ return NOPAGE_OOM; get_page(page); -#if 0 /* XXX page_to_bus is not a portable interface available on all platforms. */ - DRM_DEBUG("0x%08lx => 0x%08llx\n", address, (u64)page_to_bus(page)); -#endif + DRM_DEBUG("shm_nopage 0x%lx\n", address); return page; } @@ -245,10 +243,7 @@ get_page(page); -#if 0 /* XXX page_to_bus is not a portable interface available on all platforms. */ - DRM_DEBUG("0x%08lx (page %lu) => 0x%08llx\n", address, page_nr, - (u64)page_to_bus(page)); -#endif + DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr); return page; } @@ -449,12 +444,12 @@ } offset = DRIVER_GET_REG_OFS(); #ifdef __sparc__ - if (io_remap_page_range(vma->vm_start, + if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma) + offset, vma->vm_end - vma->vm_start, vma->vm_page_prot, 0)) #else - if (remap_page_range(vma->vm_start, + if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma) + offset, vma->vm_end - vma->vm_start, vma->vm_page_prot)) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/gamma_dma.c linux.21rc5-ac2/drivers/char/drm/gamma_dma.c --- linux.21rc5/drivers/char/drm/gamma_dma.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/gamma_dma.c 2003-04-22 16:44:36.000000000 +0100 @@ -31,33 +31,32 @@ #include "gamma.h" #include "drmP.h" +#include "drm.h" +#include "gamma_drm.h" #include "gamma_drv.h" #include /* For task queue support */ #include - static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address, unsigned long length) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - GAMMA_WRITE(GAMMA_DMAADDRESS, virt_to_phys((void *)address)); - while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) - ; + (drm_gamma_private_t *)dev->dev_private; + mb(); + while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); + GAMMA_WRITE(GAMMA_DMAADDRESS, address); + while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) cpu_relax(); GAMMA_WRITE(GAMMA_DMACOUNT, length / 4); } void gamma_dma_quiescent_single(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax(); - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; - while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) - ; + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); GAMMA_WRITE(GAMMA_SYNC, 0); @@ -71,56 +70,50 @@ void gamma_dma_quiescent_dual(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax(); - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; - while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) - ; + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); GAMMA_WRITE(GAMMA_BROADCASTMASK, 3); - GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); GAMMA_WRITE(GAMMA_SYNC, 0); - /* Read from first MX */ + /* Read from first MX */ do { - while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) - ; + while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) cpu_relax(); } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); - /* Read from second MX */ + /* Read from second MX */ do { - while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) - ; + while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) cpu_relax(); } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG); } void gamma_dma_ready(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax(); } static inline int gamma_dma_is_ready(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - return !GAMMA_READ(GAMMA_DMACOUNT); + (drm_gamma_private_t *)dev->dev_private; + return(!GAMMA_READ(GAMMA_DMACOUNT)); } void gamma_dma_service(int irq, void *device, struct pt_regs *regs) { - drm_device_t *dev = (drm_device_t *)device; - drm_device_dma_t *dma = dev->dma; + drm_device_t *dev = (drm_device_t *)device; + drm_device_dma_t *dma = dev->dma; drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */ + + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */ GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8); GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001); @@ -164,7 +157,9 @@ } buf = dma->next_buffer; - address = (unsigned long)buf->address; + /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */ + /* So we pass the buffer index value into the physical page offset */ + address = buf->idx << 12; length = buf->used; DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", @@ -231,6 +226,9 @@ buf->time_dispatched = get_cycles(); #endif + /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */ + address = buf->idx << 12; + gamma_dma_dispatch(dev, address, length); gamma_free_buffer(dev, dma->this_buffer); dma->this_buffer = buf; @@ -523,11 +521,11 @@ } } if (retcode) { - DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n", + DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d %d/%d\n", d->context, last_buf->waiting, last_buf->pending, - DRM_WAITCOUNT(dev, d->context), + (long)DRM_WAITCOUNT(dev, d->context), last_buf->idx, last_buf->list, last_buf->pid, @@ -581,3 +579,267 @@ return retcode; } + +/* ============================================================= + * DMA initialization, cleanup + */ + +static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init ) +{ + drm_gamma_private_t *dev_priv; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + int i; + struct list_head *list; + unsigned long *pgt; + + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t), + DRM_MEM_DRIVER ); + if ( !dev_priv ) + return -ENOMEM; + + dev->dev_private = (void *)dev_priv; + + memset( dev_priv, 0, sizeof(drm_gamma_private_t) ); + + list_for_each(list, &dev->maplist->head) { + #warning list_entry() is needed here + drm_map_list_t *r_list = (drm_map_list_t *)list; + if( r_list->map && + r_list->map->type == _DRM_SHM && + r_list->map->flags & _DRM_CONTAINS_LOCK ) { + dev_priv->sarea = r_list->map; + break; + } + } + + DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 ); + DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 ); + DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 ); + DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 ); + + dev_priv->sarea_priv = (drm_gamma_sarea_t *) + ((u8 *)dev_priv->sarea->handle + + init->sarea_priv_offset); + + if (init->pcimode) { + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + pgt = buf->address; + + for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { + buf = dma->buflist[i]; + *pgt = virt_to_phys((void*)buf->address) | 0x07; + pgt++; + } + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + } else { + DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset ); + + DRM_IOREMAP( dev_priv->buffers ); + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + pgt = buf->address; + + for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { + buf = dma->buflist[i]; + *pgt = (unsigned long)buf->address + 0x07; + pgt++; + } + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1) cpu_relax(); + GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe) ; + } + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2); cpu_relax(); + GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) ); + GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 ); + + return 0; +} + +int gamma_do_cleanup_dma( drm_device_t *dev ) +{ + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( dev->dev_private ) { + drm_gamma_private_t *dev_priv = dev->dev_private; + + DRM_IOREMAPFREE( dev_priv->buffers ); + + DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t), + DRM_MEM_DRIVER ); + dev->dev_private = NULL; + } + + return 0; +} + +int gamma_dma_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_gamma_init_t init; + + if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) ) + return -EFAULT; + + switch ( init.func ) { + case GAMMA_INIT_DMA: + return gamma_do_init_dma( dev, &init ); + case GAMMA_CLEANUP_DMA: + return gamma_do_cleanup_dma( dev ); + } + + return -EINVAL; +} + +static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy ) +{ + drm_device_dma_t *dma = dev->dma; + unsigned int *screenbuf; + + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + /* We've DRM_RESTRICTED this DMA buffer */ + + screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address; + +#if 0 + *buffer++ = 0x180; /* Tag (FilterMode) */ + *buffer++ = 0x200; /* Allow FBColor through */ + *buffer++ = 0x53B; /* Tag */ + *buffer++ = copy->Pitch; + *buffer++ = 0x53A; /* Tag */ + *buffer++ = copy->SrcAddress; + *buffer++ = 0x539; /* Tag */ + *buffer++ = copy->WidthHeight; /* Initiates transfer */ + *buffer++ = 0x53C; /* Tag - DMAOutputAddress */ + *buffer++ = virt_to_phys((void*)screenbuf); + *buffer++ = 0x53D; /* Tag - DMAOutputCount */ + *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/ + + /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */ + /* Now put it back to the screen */ + + *buffer++ = 0x180; /* Tag (FilterMode) */ + *buffer++ = 0x400; /* Allow Sync through */ + *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */ + *buffer++ = 0x155; /* FBSourceData | count */ + *buffer++ = 0x537; /* Tag */ + *buffer++ = copy->Pitch; + *buffer++ = 0x536; /* Tag */ + *buffer++ = copy->DstAddress; + *buffer++ = 0x535; /* Tag */ + *buffer++ = copy->WidthHeight; /* Initiates transfer */ + *buffer++ = 0x530; /* Tag - DMAAddr */ + *buffer++ = virt_to_phys((void*)screenbuf); + *buffer++ = 0x531; + *buffer++ = copy->Count; /* initiates DMA transfer of color data */ +#endif + + /* need to dispatch it now */ + + return 0; +} + +int gamma_dma_copy( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_gamma_copy_t copy; + + if ( copy_from_user( ©, (drm_gamma_copy_t *)arg, sizeof(copy) ) ) + return -EFAULT; + + return gamma_do_copy_dma( dev, © ); +} + +/* ============================================================= + * Per Context SAREA Support + */ + +int gamma_getsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_priv_map_t request; + drm_map_t *map; + + if (copy_from_user(&request, + (drm_ctx_priv_map_t *)arg, + sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + if ((int)request.ctx_id >= dev->max_context) { + up(&dev->struct_sem); + return -EINVAL; + } + + map = dev->context_sareas[request.ctx_id]; + up(&dev->struct_sem); + + request.handle = map->handle; + if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request))) + return -EFAULT; + return 0; +} + +int gamma_setsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_priv_map_t request; + drm_map_t *map = NULL; + drm_map_list_t *r_list; + struct list_head *list; + + if (copy_from_user(&request, + (drm_ctx_priv_map_t *)arg, + sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + r_list = NULL; + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *)list; + if(r_list->map && + r_list->map->handle == request.handle) break; + } + if (list == &(dev->maplist->head)) { + up(&dev->struct_sem); + return -EINVAL; + } + map = r_list->map; + up(&dev->struct_sem); + + if (!map) return -EINVAL; + + down(&dev->struct_sem); + if ((int)request.ctx_id >= dev->max_context) { + up(&dev->struct_sem); + return -EINVAL; + } + dev->context_sareas[request.ctx_id] = map; + up(&dev->struct_sem); + return 0; +} + +/* drm_dma.h hooks +*/ +void DRM(driver_irq_preinstall)( drm_device_t *dev ) { +} + +void DRM(driver_irq_postinstall)( drm_device_t *dev ) { +} + +void DRM(driver_irq_uninstall)( drm_device_t *dev ) { +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/gamma_drm.h linux.21rc5-ac2/drivers/char/drm/gamma_drm.h --- linux.21rc5/drivers/char/drm/gamma_drm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/gamma_drm.h 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,89 @@ +#ifndef _GAMMA_DRM_H_ +#define _GAMMA_DRM_H_ + +typedef struct _drm_gamma_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char in_use; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_gamma_tex_region_t; + +typedef struct { + unsigned int GDeltaMode; + unsigned int GDepthMode; + unsigned int GGeometryMode; + unsigned int GTransformMode; +} drm_gamma_context_regs_t; + +typedef struct _drm_gamma_sarea { + drm_gamma_context_regs_t context_state; + + unsigned int dirty; + + + /* Maintain an LRU of contiguous regions of texture space. If + * you think you own a region of texture memory, and it has an + * age different to the one you set, then you are mistaken and + * it has been stolen by another client. If global texAge + * hasn't changed, there is no need to walk the list. + * + * These regions can be used as a proxy for the fine-grained + * texture information of other clients - by maintaining them + * in the same lru which is used to age their own textures, + * clients have an approximate lru for the whole of global + * texture space, and can make informed decisions as to which + * areas to kick out. There is no need to choose whether to + * kick out your own texture or someone else's - simply eject + * them all in LRU order. + */ + +#define GAMMA_NR_TEX_REGIONS 64 + drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1]; + /* Last elt is sentinal */ + int texAge; /* last time texture was uploaded */ + int last_enqueue; /* last time a buffer was enqueued */ + int last_dispatch; /* age of the most recently dispatched buffer */ + int last_quiescent; /* */ + int ctxOwner; /* last context to upload state */ + + int vertex_prim; +} drm_gamma_sarea_t; + +/* WARNING: If you change any of these defines, make sure to wear a bullet + * proof vest because these are part of the stable kernel<->userspace ABI + */ + +/* Gamma specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t) +#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t) + +typedef struct drm_gamma_copy { + unsigned int DMAOutputAddress; + unsigned int DMAOutputCount; + unsigned int DMAReadGLINTSource; + unsigned int DMARectangleWriteAddress; + unsigned int DMARectangleWriteLinePitch; + unsigned int DMARectangleWrite; + unsigned int DMARectangleReadAddress; + unsigned int DMARectangleReadLinePitch; + unsigned int DMARectangleRead; + unsigned int DMARectangleReadTarget; +} drm_gamma_copy_t; + +typedef struct drm_gamma_init { + enum { + GAMMA_INIT_DMA = 0x01, + GAMMA_CLEANUP_DMA = 0x02 + } func; + + int sarea_priv_offset; + int pcimode; + unsigned int mmio0; + unsigned int mmio1; + unsigned int mmio2; + unsigned int mmio3; + unsigned int buffers_offset; +} drm_gamma_init_t; + +#endif /* _GAMMA_DRM_H_ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/gamma_drv.c linux.21rc5-ac2/drivers/char/drm/gamma_drv.c --- linux.21rc5/drivers/char/drm/gamma_drv.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/gamma_drv.c 2003-04-22 16:44:36.000000000 +0100 @@ -32,57 +32,18 @@ #include #include "gamma.h" #include "drmP.h" +#include "drm.h" +#include "gamma_drm.h" #include "gamma_drv.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." - -#define DRIVER_NAME "gamma" -#define DRIVER_DESC "3DLabs gamma" -#define DRIVER_DATE "20010216" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 } - - -#define __HAVE_COUNTERS 5 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_DMA -#define __HAVE_COUNTER8 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER9 _DRM_STAT_SPECIAL -#define __HAVE_COUNTER10 _DRM_STAT_MISSED - - #include "drm_auth.h" +#include "drm_agpsupport.h" #include "drm_bufs.h" #include "drm_context.h" #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init gamma_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", gamma_options ); -#endif - - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/gamma_drv.h linux.21rc5-ac2/drivers/char/drm/gamma_drv.h --- linux.21rc5/drivers/char/drm/gamma_drv.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/gamma_drv.h 2003-04-22 16:44:36.000000000 +0100 @@ -32,8 +32,9 @@ #ifndef _GAMMA_DRV_H_ #define _GAMMA_DRV_H_ - typedef struct drm_gamma_private { + drm_gamma_sarea_t *sarea_priv; + drm_map_t *sarea; drm_map_t *buffers; drm_map_t *mmio0; drm_map_t *mmio1; @@ -51,6 +52,11 @@ } \ } while (0) + /* gamma_dma.c */ +extern int gamma_dma_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int gamma_dma_copy( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); extern void gamma_dma_ready(drm_device_t *dev); extern void gamma_dma_quiescent_single(drm_device_t *dev); @@ -63,6 +69,7 @@ extern int gamma_find_devices(void); extern int gamma_found(void); +#define GLINT_DRI_BUF_COUNT 256 #define GAMMA_OFF(reg) \ ((reg < 0x1000) \ @@ -78,7 +85,6 @@ ((reg < 0x10000) ? dev_priv->mmio1->handle : \ ((reg < 0x11000) ? dev_priv->mmio2->handle : \ dev_priv->mmio3->handle)))) - #define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg)) #define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg) #define GAMMA_READ(reg) GAMMA_DEREF(reg) @@ -91,9 +97,11 @@ #define GAMMA_FILTERMODE 0x8c00 #define GAMMA_GCOMMANDINTFLAGS 0x0c50 #define GAMMA_GCOMMANDMODE 0x0c40 +#define GAMMA_QUEUED_DMA_MODE 1<<1 #define GAMMA_GCOMMANDSTATUS 0x0c60 #define GAMMA_GDELAYTIMER 0x0c38 #define GAMMA_GDMACONTROL 0x0060 +#define GAMMA_USE_AGP 1<<1 #define GAMMA_GINTENABLE 0x0808 #define GAMMA_GINTFLAGS 0x0810 #define GAMMA_INFIFOSPACE 0x0018 @@ -101,5 +109,12 @@ #define GAMMA_OUTPUTFIFO 0x2000 #define GAMMA_SYNC 0x8c40 #define GAMMA_SYNC_TAG 0x0188 +#define GAMMA_PAGETABLEADDR 0x0C00 +#define GAMMA_PAGETABLELENGTH 0x0C08 + +#define GAMMA_PASSTHROUGH 0x1FE +#define GAMMA_DMAADDRTAG 0x530 +#define GAMMA_DMACOUNTTAG 0x531 +#define GAMMA_COMMANDINTTAG 0x532 #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/gamma.h linux.21rc5-ac2/drivers/char/drm/gamma.h --- linux.21rc5/drivers/char/drm/gamma.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/gamma.h 2003-04-22 16:44:36.000000000 +0100 @@ -38,9 +38,36 @@ */ #define __HAVE_MTRR 1 +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "gamma" +#define DRIVER_DESC "3DLabs gamma" +#define DRIVER_DATE "20010624" + +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_GAMMA_INIT)] = { gamma_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_GAMMA_COPY)] = { gamma_dma_copy, 1, 1 } + +#define IOCTL_TABLE_NAME DRM(ioctls) +#define IOCTL_FUNC_NAME DRM(ioctl) + +#define __HAVE_COUNTERS 5 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_DMA +#define __HAVE_COUNTER8 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER9 _DRM_STAT_SPECIAL +#define __HAVE_COUNTER10 _DRM_STAT_MISSED + /* DMA customization: */ #define __HAVE_DMA 1 +#define __HAVE_AGP 1 +#define __MUST_HAVE_AGP 0 #define __HAVE_OLD_DMA 1 #define __HAVE_PCI_DMA 1 @@ -61,33 +88,61 @@ #define __HAVE_DMA_QUIESCENT 1 #define DRIVER_DMA_QUIESCENT() do { \ /* FIXME ! */ \ - gamma_dma_quiescent_dual(dev); \ + gamma_dma_quiescent_single(dev); \ return 0; \ } while (0) #define __HAVE_DMA_IRQ 1 #define __HAVE_DMA_IRQ_BH 1 + +#if 1 #define DRIVER_PREINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ - GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000000 ); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 ); \ GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); \ } while (0) - #define DRIVER_POSTINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); \ GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); \ GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); \ GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); \ } while (0) +#else +#define DRIVER_POSTINSTALL() do { \ + drm_gamma_private_t *dev_priv = \ + (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002000 ); \ + GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000004 ); \ +} while (0) + +#define DRIVER_PREINSTALL() do { \ + drm_gamma_private_t *dev_priv = \ + (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + GAMMA_WRITE( GAMMA_GCOMMANDMODE, GAMMA_QUEUED_DMA_MODE );\ + GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );\ +} while (0) +#endif #define DRIVER_UNINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); \ GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); \ GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); \ GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); \ } while (0) +#define DRIVER_AGP_BUFFERS_MAP( dev ) \ + ((drm_gamma_private_t *)((dev)->dev_private))->buffers + #endif /* __GAMMA_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i810_dma.c linux.21rc5-ac2/drivers/char/drm/i810_dma.c --- linux.21rc5/drivers/char/drm/i810_dma.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i810_dma.c 2003-04-22 16:44:36.000000000 +0100 @@ -26,21 +26,20 @@ * * Authors: Rickard E. (Rik) Faith * Jeff Hartmann - * Keith Whitwell + * Keith Whitwell * */ #include #include "i810.h" #include "drmP.h" +#include "drm.h" +#include "i810_drm.h" #include "i810_drv.h" #include /* For task queue support */ -#include +#include -/* in case we don't have a 2.3.99-pre6 kernel or later: */ -#ifndef VM_DONTCOPY -#define VM_DONTCOPY 0 -#endif +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) #define I810_BUF_FREE 2 #define I810_BUF_CLIENT 1 @@ -51,29 +50,27 @@ #define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; -#define BEGIN_LP_RING(n) do { \ - if (I810_VERBOSE) \ - DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ - n, __FUNCTION__); \ - if (dev_priv->ring.space < n*4) \ - i810_wait_ring(dev, n*4); \ - dev_priv->ring.space -= n*4; \ - outring = dev_priv->ring.tail; \ - ringmask = dev_priv->ring.tail_mask; \ - virt = dev_priv->ring.virtual_start; \ +#define BEGIN_LP_RING(n) do { \ + if (0) DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i810_wait_ring(dev, n*4); \ + dev_priv->ring.space -= n*4; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ } while (0) -#define ADVANCE_LP_RING() do { \ - if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ - dev_priv->ring.tail = outring; \ - I810_WRITE(LP_RING + RING_TAIL, outring); \ +#define ADVANCE_LP_RING() do { \ + if (0) DRM_DEBUG("ADVANCE_LP_RING\n"); \ + dev_priv->ring.tail = outring; \ + I810_WRITE(LP_RING + RING_TAIL, outring); \ } while(0) -#define OUT_RING(n) do { \ - if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = n; \ - outring += 4; \ - outring &= ringmask; \ +#define OUT_RING(n) do { \ + if (0) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outring += 4; \ + outring &= ringmask; \ } while (0) static inline void i810_print_status_page(drm_device_t *dev) @@ -135,14 +132,14 @@ } static struct file_operations i810_buffer_fops = { - open: DRM(open), - flush: DRM(flush), - release: DRM(release), - ioctl: DRM(ioctl), - mmap: i810_mmap_buffers, - read: DRM(read), - fasync: DRM(fasync), - poll: DRM(poll), + .open = DRM(open), + .flush = DRM(flush), + .release = DRM(release), + .ioctl = DRM(ioctl), + .mmap = i810_mmap_buffers, + .read = DRM(read), + .fasync = DRM(fasync), + .poll = DRM(poll), }; int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) @@ -165,7 +162,7 @@ buf_priv->currently_mapped = I810_BUF_MAPPED; unlock_kernel(); - if (remap_page_range(vma->vm_start, + if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma), vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; @@ -183,28 +180,31 @@ if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL; - if(VM_DONTCOPY != 0) { - down_write( ¤t->mm->mmap_sem ); - old_fops = filp->f_op; - filp->f_op = &i810_buffer_fops; - dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, - PROT_READ|PROT_WRITE, - MAP_SHARED, - buf->bus_address); - dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; - if ((unsigned long)buf_priv->virtual > -1024UL) { - /* Real error */ - DRM_DEBUG("mmap error\n"); - retcode = (signed int)buf_priv->virtual; - buf_priv->virtual = 0; - } - up_write( ¤t->mm->mmap_sem ); - } else { - buf_priv->virtual = buf_priv->kernel_virtual; - buf_priv->currently_mapped = I810_BUF_MAPPED; + + + + down_write( ¤t->mm->mmap_sem ); + + old_fops = filp->f_op; + filp->f_op = &i810_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ|PROT_WRITE, + MAP_SHARED, + buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_DEBUG("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = 0; } + + + + up_write( ¤t->mm->mmap_sem ); + return retcode; } @@ -213,15 +213,21 @@ drm_i810_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; - if(VM_DONTCOPY != 0) { - if(buf_priv->currently_mapped != I810_BUF_MAPPED) - return -EINVAL; - down_write( ¤t->mm->mmap_sem ); - retcode = do_munmap(current->mm, - (unsigned long)buf_priv->virtual, - (size_t) buf->total); - up_write( ¤t->mm->mmap_sem ); - } + if(buf_priv->currently_mapped != I810_BUF_MAPPED) + return -EINVAL; + + + + down_write( ¤t->mm->mmap_sem ); + + retcode = DO_MUNMAP(current->mm, + (unsigned long)buf_priv->virtual, + (size_t) buf->total); + + + + up_write( ¤t->mm->mmap_sem ); + buf_priv->currently_mapped = I810_BUF_UNMAPPED; buf_priv->virtual = 0; @@ -273,8 +279,9 @@ dev_priv->ring.Size); } if(dev_priv->hw_status_page != 0UL) { - pci_free_consistent(dev->pdev, PAGE_SIZE, (void *)dev_priv->hw_status_page, - dev_priv->dma_status_page); + pci_free_consistent(dev->pdev, PAGE_SIZE, + (void *)dev_priv->hw_status_page, + dev_priv->dma_status_page); /* Need to rewrite hardware status page */ I810_WRITE(0x02080, 0x1ffff000); } @@ -301,8 +308,6 @@ end = jiffies + (HZ*3); while (ring->space < n) { - int i; - ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->space = ring->head - (ring->tail+8); if (ring->space < 0) ring->space += ring->Size; @@ -311,13 +316,12 @@ end = jiffies + (HZ*3); iters++; - if((signed)(end - jiffies) <= 0) { + if(time_before(end, jiffies)) { DRM_ERROR("space: %d wanted %d\n", ring->space, n); DRM_ERROR("lockup\n"); goto out_wait_ring; } - - for (i = 0 ; i < 2000 ; i++) ; + udelay(1); } out_wait_ring: @@ -405,9 +409,6 @@ ((u8 *)dev_priv->sarea_map->handle + init->sarea_priv_offset); - atomic_set(&dev_priv->flush_done, 0); - init_waitqueue_head(&dev_priv->flush_queue); - dev_priv->ring.Start = init->ring_start; dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; @@ -440,8 +441,9 @@ dev_priv->zi1 = init->depth_offset | init->pitch_bits; /* Program Hardware Status Page */ - dev_priv->hw_status_page = (unsigned long)pci_alloc_consistent(dev->pdev, PAGE_SIZE, - &dev_priv->dma_status_page); + dev_priv->hw_status_page = + (unsigned long) pci_alloc_consistent(dev->pdev, PAGE_SIZE, + &dev_priv->dma_status_page); if(dev_priv->hw_status_page == 0UL) { dev->dev_private = (void *)dev_priv; i810_dma_cleanup(dev); @@ -451,7 +453,7 @@ memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE); DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page); - I810_WRITE(0x02080, dev_priv->dma_status_page); + I810_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); /* Now we need to init our freelist */ @@ -532,16 +534,12 @@ /* Most efficient way to verify state for the i810 is as it is * emitted. Non-conformant state is silently dropped. - * - * Use 'volatile' & local var tmp to force the emitted values to be - * identical to the verified ones. */ static void i810EmitContextVerified( drm_device_t *dev, - volatile unsigned int *code ) + unsigned int *code ) { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; - unsigned int tmp; RING_LOCALS; BEGIN_LP_RING( I810_CTX_SETUP_SIZE ); @@ -553,14 +551,13 @@ OUT_RING( code[I810_CTXREG_ST1] ); for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - - if ((tmp & (7<<29)) == (3<<29) && - (tmp & (0x1f<<24)) < (0x1d<<24)) + if ((code[i] & (7<<29)) == (3<<29) && + (code[i] & (0x1f<<24)) < (0x1d<<24)) { - OUT_RING( tmp ); + OUT_RING( code[i] ); j++; } + else printk("constext state dropped!!!\n"); } if (j & 1) @@ -574,7 +571,6 @@ { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; - unsigned int tmp; RING_LOCALS; BEGIN_LP_RING( I810_TEX_SETUP_SIZE ); @@ -585,14 +581,14 @@ OUT_RING( code[I810_TEXREG_MI3] ); for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - if ((tmp & (7<<29)) == (3<<29) && - (tmp & (0x1f<<24)) < (0x1d<<24)) + if ((code[i] & (7<<29)) == (3<<29) && + (code[i] & (0x1f<<24)) < (0x1d<<24)) { - OUT_RING( tmp ); + OUT_RING( code[i] ); j++; } + else printk("texture state dropped!!!\n"); } if (j & 1) @@ -617,9 +613,9 @@ if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( tmp ); - } else - DRM_DEBUG("bad di1 %x (allow %x or %x)\n", - tmp, dev_priv->front_di1, dev_priv->back_di1); + } + else + printk("buffer state dropped\n"); /* invarient: */ @@ -704,7 +700,6 @@ continue; if ( flags & I810_FRONT ) { - DRM_DEBUG("clear front\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -717,7 +712,6 @@ } if ( flags & I810_BACK ) { - DRM_DEBUG("clear back\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -730,7 +724,6 @@ } if ( flags & I810_DEPTH ) { - DRM_DEBUG("clear depth\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -756,8 +749,6 @@ int i; RING_LOCALS; - DRM_DEBUG("swapbuffers\n"); - i810_kernel_lost_context(dev); if (nbox > I810_NR_SAREA_CLIPRECTS) @@ -776,10 +767,6 @@ pbox->y2 > dev_priv->h) continue; - DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", - pbox[i].x1, pbox[i].y1, - pbox[i].x2, pbox[i].y2); - BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 ); OUT_RING( pitch | (0xCC << 16)); @@ -804,7 +791,7 @@ int nbox = sarea_priv->nbox; unsigned long address = (unsigned long)buf->bus_address; unsigned long start = address - dev->agp->base; - int i = 0, u; + int i = 0; RING_LOCALS; i810_kernel_lost_context(dev); @@ -812,33 +799,16 @@ if (nbox > I810_NR_SAREA_CLIPRECTS) nbox = I810_NR_SAREA_CLIPRECTS; - if (discard) { - u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, - I810_BUF_HARDWARE); - if(u != I810_BUF_CLIENT) { - DRM_DEBUG("xxxx 2\n"); - } - } - if (used > 4*1024) used = 0; if (sarea_priv->dirty) i810EmitState( dev ); - DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n", - address, used, nbox); - - dev_priv->counter++; - DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter); - DRM_DEBUG( "i810_dma_dispatch\n"); - DRM_DEBUG( "start : %lx\n", start); - DRM_DEBUG( "used : %d\n", used); - DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4); - if (buf_priv->currently_mapped == I810_BUF_MAPPED) { - *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | - sarea_priv->vertex_prim | + unsigned int prim = (sarea_priv->vertex_prim & PR_MASK); + + *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | prim | ((used/4)-2)); if (used & 4) { @@ -871,154 +841,62 @@ } while (++i < nbox); } - BEGIN_LP_RING(10); - OUT_RING( CMD_STORE_DWORD_IDX ); - OUT_RING( 20 ); - OUT_RING( dev_priv->counter ); - OUT_RING( 0 ); - if (discard) { + dev_priv->counter++; + + (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_HARDWARE); + + BEGIN_LP_RING(8); + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( 20 ); + OUT_RING( dev_priv->counter ); OUT_RING( CMD_STORE_DWORD_IDX ); OUT_RING( buf_priv->my_use_idx ); OUT_RING( I810_BUF_FREE ); + OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); + ADVANCE_LP_RING(); } - - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); -} - - -/* Interrupts are only for flushing */ -void i810_dma_service(int irq, void *device, struct pt_regs *regs) -{ - drm_device_t *dev = (drm_device_t *)device; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - u16 temp; - - atomic_inc(&dev->counts[_DRM_STAT_IRQ]); - temp = I810_READ16(I810REG_INT_IDENTITY_R); - temp = temp & ~(0x6000); - if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R, - temp); /* Clear all interrupts */ - else - return; - - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); } -void i810_dma_immediate_bh(void *device) -{ - drm_device_t *dev = (drm_device_t *) device; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -static inline void i810_dma_emit_flush(drm_device_t *dev) -{ - drm_i810_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - i810_kernel_lost_context(dev); - - BEGIN_LP_RING(2); - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( GFX_OP_USER_INTERRUPT ); - ADVANCE_LP_RING(); - -/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ -/* atomic_set(&dev_priv->flush_done, 1); */ -/* wake_up_interruptible(&dev_priv->flush_queue); */ -} - -static inline void i810_dma_quiescent_emit(drm_device_t *dev) +void i810_dma_quiescent(drm_device_t *dev) { drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; +/* printk("%s\n", __FUNCTION__); */ + i810_kernel_lost_context(dev); BEGIN_LP_RING(4); OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); - OUT_RING( GFX_OP_USER_INTERRUPT ); + OUT_RING( 0 ); ADVANCE_LP_RING(); -/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ -/* atomic_set(&dev_priv->flush_done, 1); */ -/* wake_up_interruptible(&dev_priv->flush_queue); */ -} - -void i810_dma_quiescent(drm_device_t *dev) -{ - DECLARE_WAITQUEUE(entry, current); - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - unsigned long end; - - if(dev_priv == NULL) { - return; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i810_dma_quiescent_emit(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if((signed)(end - jiffies) <= 0) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); - - return; + i810_wait_ring( dev, dev_priv->ring.Size - 8 ); } static int i810_flush_queue(drm_device_t *dev) { - DECLARE_WAITQUEUE(entry, current); - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + drm_i810_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; - unsigned long end; int i, ret = 0; + RING_LOCALS; + +/* printk("%s\n", __FUNCTION__); */ - if(dev_priv == NULL) { - return 0; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i810_dma_emit_flush(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if((signed)(end - jiffies) <= 0) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - ret = -EINTR; /* Can't restart */ - break; - } - } + i810_kernel_lost_context(dev); - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); + BEGIN_LP_RING(2); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + i810_wait_ring( dev, dev_priv->ring.Size - 8 ); for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; @@ -1030,7 +908,7 @@ if (used == I810_BUF_HARDWARE) DRM_DEBUG("reclaimed from HARDWARE\n"); if (used == I810_BUF_CLIENT) - DRM_DEBUG("still on client HARDWARE\n"); + DRM_DEBUG("still on client\n"); } return ret; @@ -1070,7 +948,6 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("i810_flush_ioctl\n"); if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_flush_ioctl called without lock held\n"); return -EINVAL; @@ -1101,9 +978,6 @@ return -EINVAL; } - DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", - vertex.idx, vertex.used, vertex.discard); - if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL; i810_dma_dispatch_vertex( dev, @@ -1152,8 +1026,6 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("i810_swap_bufs\n"); - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_swap_buf called without lock held\n"); return -EINVAL; @@ -1189,7 +1061,6 @@ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - DRM_DEBUG("getbuf\n"); if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d))) return -EFAULT; @@ -1202,9 +1073,6 @@ retcode = i810_dma_get_buffer(dev, &d, filp); - DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", - current->pid, retcode, d.granted); - if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d))) return -EFAULT; sarea_priv->last_dispatch = (int) hw_status[5]; @@ -1212,47 +1080,19 @@ return retcode; } -int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int i810_copybuf(struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_i810_copy_t d; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - u32 *hw_status = (u32 *)dev_priv->hw_status_page; - drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) - dev_priv->sarea_priv; - drm_buf_t *buf; - drm_i810_buf_priv_t *buf_priv; - drm_device_dma_t *dma = dev->dma; - - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { - DRM_ERROR("i810_dma called without lock held\n"); - return -EINVAL; - } - - if (copy_from_user(&d, (drm_i810_copy_t *)arg, sizeof(d))) - return -EFAULT; - - if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[ d.idx ]; - buf_priv = buf->dev_private; - if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM; - - if(d.used < 0 || d.used > buf->total) return -EINVAL; - - if (copy_from_user(buf_priv->virtual, d.address, d.used)) - return -EFAULT; - - sarea_priv->last_dispatch = (int) hw_status[5]; - + /* Never copy - 2.4.x doesn't need it */ return 0; } int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - if(VM_DONTCOPY == 0) return 1; + /* Never copy - 2.4.x doesn't need it */ return 0; } @@ -1371,7 +1211,8 @@ data.offset = dev_priv->overlay_offset; data.physical = dev_priv->overlay_physical; - copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)); + if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data))) + return -EFAULT; return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i810_drm.h linux.21rc5-ac2/drivers/char/drm/i810_drm.h --- linux.21rc5/drivers/char/drm/i810_drm.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i810_drm.h 2003-04-22 16:44:36.000000000 +0100 @@ -88,6 +88,8 @@ #define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */ #define I810_TEX_SETUP_SIZE 8 +/* Flags for clear ioctl + */ #define I810_FRONT 0x1 #define I810_BACK 0x2 #define I810_DEPTH 0x4 @@ -166,14 +168,34 @@ } drm_i810_sarea_t; +/* WARNING: If you change any of these defines, make sure to wear a bullet + * proof vest since these are part of the stable kernel<->userspace ABI + */ + +/* i810 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) +#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) +#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) +#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t) +#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a) +#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b) +#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t) +#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d ) + typedef struct _drm_i810_clear { int clear_color; int clear_depth; int flags; } drm_i810_clear_t; - - /* These may be placeholders if we have more cliprects than * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to * false, indicating that the buffer will be dispatched again with a @@ -191,6 +213,17 @@ void *address; /* Address to copy from */ } drm_i810_copy_t; +#define PR_TRIANGLES (0x0<<18) +#define PR_TRISTRIP_0 (0x1<<18) +#define PR_TRISTRIP_1 (0x2<<18) +#define PR_TRIFAN (0x3<<18) +#define PR_POLYGON (0x4<<18) +#define PR_LINES (0x5<<18) +#define PR_LINESTRIP (0x6<<18) +#define PR_RECTS (0x7<<18) +#define PR_MASK (0x7<<18) + + typedef struct drm_i810_dma { void *virtual; int request_idx; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i810_drv.c linux.21rc5-ac2/drivers/char/drm/i810_drv.c --- linux.21rc5/drivers/char/drm/i810_drv.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i810_drv.c 2003-04-22 16:44:36.000000000 +0100 @@ -33,42 +33,10 @@ #include #include "i810.h" #include "drmP.h" +#include "drm.h" +#include "i810_drm.h" #include "i810_drv.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." - -#define DRIVER_NAME "i810" -#define DRIVER_DESC "Intel i810" -#define DRIVER_DATE "20010920" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 } - - -#define __HAVE_COUNTERS 4 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#define __HAVE_COUNTER9 _DRM_STAT_DMA - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -77,25 +45,6 @@ #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init i810_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", i810_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i810_drv.h linux.21rc5-ac2/drivers/char/drm/i810_drv.h --- linux.21rc5/drivers/char/drm/i810_drv.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i810_drv.h 2003-04-22 16:44:36.000000000 +0100 @@ -63,10 +63,9 @@ unsigned long hw_status_page; unsigned long counter; - dma_addr_t dma_status_page; - atomic_t flush_done; - wait_queue_head_t flush_queue; /* Processes waiting until flush */ + dma_addr_t dma_status_page; + drm_buf_t *mmap_buffer; @@ -78,6 +77,7 @@ int overlay_physical; int w, h; int pitch; + } drm_i810_private_t; /* i810_dma.c */ @@ -92,8 +92,13 @@ extern int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma); + +/* Obsolete: + */ extern int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +/* Obsolete: + */ extern int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -111,9 +116,6 @@ extern void i810_dma_quiescent(drm_device_t *dev); -#define I810_VERBOSE 0 - - int i810_dma_vertex(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -196,6 +198,7 @@ #define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23)) #define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23)) +#define CMD_OP_FRONTBUFFER_INFO ((0x0<<29)|(0x14<<23)) #define BR00_BITBLT_CLIENT 0x40000000 #define BR00_OP_COLOR_BLT 0x10000000 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i810.h linux.21rc5-ac2/drivers/char/drm/i810.h --- linux.21rc5/drivers/char/drm/i810.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i810.h 2003-04-22 16:44:36.000000000 +0100 @@ -41,6 +41,47 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "i810" +#define DRIVER_DESC "Intel i810" +#define DRIVER_DATE "20020211" + +/* Interface history + * + * 1.1 - XFree86 4.1 + * 1.2 - XvMC interfaces + * - XFree86 4.2 + * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility) + * - Remove requirement for interrupt (leave stubs again) + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 1 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 } + + +#define __HAVE_COUNTERS 4 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY +#define __HAVE_COUNTER9 _DRM_STAT_DMA + /* Driver customization: */ #define __HAVE_RELEASE 1 @@ -60,50 +101,10 @@ i810_dma_quiescent( dev ); \ } while (0) -#define __HAVE_DMA_IRQ 1 -#define __HAVE_DMA_IRQ_BH 1 -#define __HAVE_SHARED_IRQ 1 -#define DRIVER_PREINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I810_READ16( I810REG_HWSTAM ); \ - tmp = tmp & 0x6000; \ - I810_WRITE16( I810REG_HWSTAM, tmp ); \ - \ - tmp = I810_READ16( I810REG_INT_MASK_R ); \ - tmp = tmp & 0x6000; /* Unmask interrupts */ \ - I810_WRITE16( I810REG_INT_MASK_R, tmp ); \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ -} while (0) - -#define DRIVER_POSTINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; \ - tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ -} while (0) - -#define DRIVER_UNINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - if ( dev_priv ) { \ - tmp = I810_READ16( I810REG_INT_IDENTITY_R ); \ - tmp = tmp & ~(0x6000); /* Clear all interrupts */ \ - if ( tmp != 0 ) \ - I810_WRITE16( I810REG_INT_IDENTITY_R, tmp ); \ - \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ - } \ -} while (0) +/* Don't need an irq any more. The template code will make sure that + * a noop stub is generated for compatibility. + */ +#define __HAVE_DMA_IRQ 0 /* Buffer customization: */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i830_dma.c linux.21rc5-ac2/drivers/char/drm/i830_dma.c --- linux.21rc5/drivers/char/drm/i830_dma.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i830_dma.c 2003-04-22 16:44:36.000000000 +0100 @@ -26,20 +26,22 @@ * * Authors: Rickard E. (Rik) Faith * Jeff Hartmann - * Keith Whitwell - * Abraham vd Merwe + * Keith Whitwell + * Abraham vd Merwe * */ + #include "i830.h" #include "drmP.h" +#include "drm.h" +#include "i830_drm.h" #include "i830_drv.h" #include /* For task queue support */ +#include /* For FASTCALL on unlock_page() */ #include -/* in case we don't have a 2.3.99-pre6 kernel or later: */ -#ifndef VM_DONTCOPY -#define VM_DONTCOPY 0 -#endif + +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) #define I830_BUF_FREE 2 #define I830_BUF_CLIENT 1 @@ -48,54 +50,24 @@ #define I830_BUF_UNMAPPED 0 #define I830_BUF_MAPPED 1 -#define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; -#define DO_IDLE_WORKAROUND() \ -do { \ - int _head; \ - int _tail; \ - do { \ - _head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; \ - _tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; \ - udelay(1); \ - } while(_head != _tail); \ -} while(0) - -#define I830_SYNC_WORKAROUND 0 - -#define BEGIN_LP_RING(n) do { \ - if (I830_VERBOSE) \ - DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ - n, __FUNCTION__); \ - if (I830_SYNC_WORKAROUND) \ - DO_IDLE_WORKAROUND(); \ - if (dev_priv->ring.space < n*4) \ - i830_wait_ring(dev, n*4); \ - dev_priv->ring.space -= n*4; \ - outring = dev_priv->ring.tail; \ - ringmask = dev_priv->ring.tail_mask; \ - virt = dev_priv->ring.virtual_start; \ -} while (0) - -#define ADVANCE_LP_RING() do { \ - if (I830_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ - dev_priv->ring.tail = outring; \ - I830_WRITE(LP_RING + RING_TAIL, outring); \ -} while(0) - -#define OUT_RING(n) do { \ - if (I830_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = n; \ - outring += 4; \ - outring &= ringmask; \ -} while (0); + + + + + + + + + + static inline void i830_print_status_page(drm_device_t *dev) { drm_device_dma_t *dma = dev->dma; drm_i830_private_t *dev_priv = dev->dev_private; - u8 *temp = dev_priv->hw_status_page; + u32 *temp = (u32 *)dev_priv->hw_status_page; int i; DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]); @@ -149,14 +121,14 @@ } static struct file_operations i830_buffer_fops = { - open: DRM(open), - flush: DRM(flush), - release: DRM(release), - ioctl: DRM(ioctl), - mmap: i830_mmap_buffers, - read: DRM(read), - fasync: DRM(fasync), - poll: DRM(poll), + .open = DRM(open), + .flush = DRM(flush), + .release = DRM(release), + .ioctl = DRM(ioctl), + .mmap = i830_mmap_buffers, + .read = DRM(read), + .fasync = DRM(fasync), + .poll = DRM(poll), }; int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) @@ -179,7 +151,7 @@ buf_priv->currently_mapped = I830_BUF_MAPPED; unlock_kernel(); - if (remap_page_range(vma->vm_start, + if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma), vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; @@ -197,28 +169,24 @@ if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL; - if(VM_DONTCOPY != 0) { - down_write( ¤t->mm->mmap_sem ); - old_fops = filp->f_op; - filp->f_op = &i830_buffer_fops; - dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, - PROT_READ|PROT_WRITE, - MAP_SHARED, - buf->bus_address); - dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; - if ((unsigned long)buf_priv->virtual > -1024UL) { - /* Real error */ - DRM_DEBUG("mmap error\n"); - retcode = (signed int)buf_priv->virtual; - buf_priv->virtual = 0; - } - up_write( ¤t->mm->mmap_sem ); - } else { - buf_priv->virtual = buf_priv->kernel_virtual; - buf_priv->currently_mapped = I830_BUF_MAPPED; + down_write( ¤t->mm->mmap_sem ); + old_fops = filp->f_op; + filp->f_op = &i830_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ|PROT_WRITE, + MAP_SHARED, + buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_ERROR("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = 0; } + up_write( ¤t->mm->mmap_sem ); + return retcode; } @@ -227,15 +195,15 @@ drm_i830_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; - if(VM_DONTCOPY != 0) { - if(buf_priv->currently_mapped != I830_BUF_MAPPED) - return -EINVAL; - down_write( ¤t->mm->mmap_sem ); - retcode = do_munmap(current->mm, - (unsigned long)buf_priv->virtual, - (size_t) buf->total); - up_write( ¤t->mm->mmap_sem ); - } + if(buf_priv->currently_mapped != I830_BUF_MAPPED) + return -EINVAL; + + down_write(¤t->mm->mmap_sem); + retcode = DO_MUNMAP(current->mm, + (unsigned long)buf_priv->virtual, + (size_t) buf->total); + up_write(¤t->mm->mmap_sem); + buf_priv->currently_mapped = I830_BUF_UNMAPPED; buf_priv->virtual = 0; @@ -260,7 +228,7 @@ retcode = i830_map_buffer(buf, filp); if(retcode) { i830_freelist_put(dev, buf); - DRM_DEBUG("mapbuf failed, retcode %d\n", retcode); + DRM_ERROR("mapbuf failed, retcode %d\n", retcode); return retcode; } buf->pid = priv->pid; @@ -286,12 +254,22 @@ DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, dev_priv->ring.Size); } - if(dev_priv->hw_status_page != NULL) { - pci_free_consistent(dev->pdev, PAGE_SIZE, - dev_priv->hw_status_page, dev_priv->dma_status_page); + if(dev_priv->hw_status_page != 0UL) { + pci_free_consistent(dev->pdev, PAGE_SIZE, + (void *)dev_priv->hw_status_page, + dev_priv->dma_status_page); /* Need to rewrite hardware status page */ I830_WRITE(0x02080, 0x1ffff000); } + + /* Disable interrupts here because after dev_private + * is freed, it's too late. + */ + if (dev->irq) { + I830_WRITE16( I830REG_INT_MASK_R, 0xffff ); + I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 ); + } + DRM(free)(dev->dev_private, sizeof(drm_i830_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; @@ -305,7 +283,7 @@ return 0; } -static int i830_wait_ring(drm_device_t *dev, int n) +int i830_wait_ring(drm_device_t *dev, int n, const char *caller) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_ring_buffer_t *ring = &(dev_priv->ring); @@ -314,7 +292,7 @@ unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; end = jiffies + (HZ*3); - while (ring->space < n) { + while (ring->space < n) { ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->space = ring->head - (ring->tail+8); if (ring->space < 0) ring->space += ring->Size; @@ -325,13 +303,13 @@ } iters++; - if(time_before(end,jiffies)) { + if(time_before(end, jiffies)) { DRM_ERROR("space: %d wanted %d\n", ring->space, n); DRM_ERROR("lockup\n"); goto out_wait_ring; } - - udelay(1); + udelay(1); + dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; } out_wait_ring: @@ -344,9 +322,12 @@ drm_i830_ring_buffer_t *ring = &(dev_priv->ring); ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; - ring->tail = I830_READ(LP_RING + RING_TAIL); + ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail+8); if (ring->space < 0) ring->space += ring->Size; + + if (ring->head == ring->tail) + dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY; } static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv) @@ -420,9 +401,6 @@ ((u8 *)dev_priv->sarea_map->handle + init->sarea_priv_offset); - atomic_set(&dev_priv->flush_done, 0); - init_waitqueue_head(&dev_priv->flush_queue); - dev_priv->ring.Start = init->ring_start; dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; @@ -446,11 +424,17 @@ dev_priv->pitch = init->pitch; dev_priv->back_offset = init->back_offset; dev_priv->depth_offset = init->depth_offset; + dev_priv->front_offset = init->front_offset; dev_priv->front_di1 = init->front_offset | init->pitch_bits; dev_priv->back_di1 = init->back_offset | init->pitch_bits; dev_priv->zi1 = init->depth_offset | init->pitch_bits; + DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1); + DRM_DEBUG("back_offset %x\n", dev_priv->back_offset); + DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1); + DRM_DEBUG("pitch_bits %x\n", init->pitch_bits); + dev_priv->cpp = init->cpp; /* We are using seperate values as placeholders for mechanisms for * private backbuffer/depthbuffer usage. @@ -458,20 +442,23 @@ dev_priv->back_pitch = init->back_pitch; dev_priv->depth_pitch = init->depth_pitch; + dev_priv->do_boxes = 0; + dev_priv->use_mi_batchbuffer_start = 0; /* Program Hardware Status Page */ - dev_priv->hw_status_page = pci_alloc_consistent(dev->pdev, PAGE_SIZE, + dev_priv->hw_status_page = + (unsigned long) pci_alloc_consistent(dev->pdev, PAGE_SIZE, &dev_priv->dma_status_page); - if(dev_priv->hw_status_page == NULL) { + if(dev_priv->hw_status_page == 0UL) { dev->dev_private = (void *)dev_priv; i830_dma_cleanup(dev); DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); + memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE); + DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page); - I830_WRITE(0x02080, dev_priv->dma_status_page); + I830_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); /* Now we need to init our freelist */ @@ -517,83 +504,107 @@ return retcode; } +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define ST1_ENABLE (1<<16) +#define ST1_MASK (0xffff) + /* Most efficient way to verify state for the i830 is as it is * emitted. Non-conformant state is silently dropped. - * - * Use 'volatile' & local var tmp to force the emitted values to be - * identical to the verified ones. */ -static void i830EmitContextVerified( drm_device_t *dev, - volatile unsigned int *code ) +static void i830EmitContextVerified( drm_device_t *dev, + unsigned int *code ) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( I830_CTX_SETUP_SIZE ); - for ( i = 0 ; i < I830_CTX_SETUP_SIZE ; i++ ) { + BEGIN_LP_RING( I830_CTX_SETUP_SIZE + 4 ); + + for ( i = 0 ; i < I830_CTXREG_BLENDCOLR0 ; i++ ) { tmp = code[i]; + if ((tmp & (7<<29)) == CMD_3D && + (tmp & (0x1f<<24)) < (0x1d<<24)) { + OUT_RING( tmp ); + j++; + } else { + DRM_ERROR("Skipping %d\n", i); + } + } -#if 0 - if ((tmp & (7<<29)) == (3<<29) && + OUT_RING( STATE3D_CONST_BLEND_COLOR_CMD ); + OUT_RING( code[I830_CTXREG_BLENDCOLR] ); + j += 2; + + for ( i = I830_CTXREG_VF ; i < I830_CTXREG_MCSB0 ; i++ ) { + tmp = code[i]; + if ((tmp & (7<<29)) == CMD_3D && (tmp & (0x1f<<24)) < (0x1d<<24)) { OUT_RING( tmp ); j++; } else { - printk("Skipping %d\n", i); + DRM_ERROR("Skipping %d\n", i); } -#else - OUT_RING( tmp ); - j++; -#endif } + OUT_RING( STATE3D_MAP_COORD_SETBIND_CMD ); + OUT_RING( code[I830_CTXREG_MCSB1] ); + j += 2; + if (j & 1) OUT_RING( 0 ); ADVANCE_LP_RING(); } -static void i830EmitTexVerified( drm_device_t *dev, - volatile unsigned int *code ) +static void i830EmitTexVerified( drm_device_t *dev, unsigned int *code ) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( I830_TEX_SETUP_SIZE ); - - OUT_RING( GFX_OP_MAP_INFO ); - OUT_RING( code[I830_TEXREG_MI1] ); - OUT_RING( code[I830_TEXREG_MI2] ); - OUT_RING( code[I830_TEXREG_MI3] ); - OUT_RING( code[I830_TEXREG_MI4] ); - OUT_RING( code[I830_TEXREG_MI5] ); - - for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - OUT_RING( tmp ); - j++; - } + if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO || + (code[I830_TEXREG_MI0] & ~(0xf*LOAD_TEXTURE_MAP0)) == + (STATE3D_LOAD_STATE_IMMEDIATE_2|4)) { + + BEGIN_LP_RING( I830_TEX_SETUP_SIZE ); + + OUT_RING( code[I830_TEXREG_MI0] ); /* TM0LI */ + OUT_RING( code[I830_TEXREG_MI1] ); /* TM0S0 */ + OUT_RING( code[I830_TEXREG_MI2] ); /* TM0S1 */ + OUT_RING( code[I830_TEXREG_MI3] ); /* TM0S2 */ + OUT_RING( code[I830_TEXREG_MI4] ); /* TM0S3 */ + OUT_RING( code[I830_TEXREG_MI5] ); /* TM0S4 */ + + for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) { + tmp = code[i]; + OUT_RING( tmp ); + j++; + } - if (j & 1) - OUT_RING( 0 ); + if (j & 1) + OUT_RING( 0 ); - ADVANCE_LP_RING(); + ADVANCE_LP_RING(); + } + else + printk("rejected packet %x\n", code[0]); } static void i830EmitTexBlendVerified( drm_device_t *dev, - volatile unsigned int *code, - volatile unsigned int num) + unsigned int *code, + unsigned int num) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( num ); + if (!num) + return; + + BEGIN_LP_RING( num + 1 ); for ( i = 0 ; i < num ; i++ ) { tmp = code[i]; @@ -616,6 +627,8 @@ int i; RING_LOCALS; + return; /* Is this right ? -- Arjan */ + BEGIN_LP_RING( 258 ); if(is_shared == 1) { @@ -629,44 +642,43 @@ OUT_RING(palette[i]); } OUT_RING(0); + /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop! + */ } /* Need to do some additional checking when setting the dest buffer. */ static void i830EmitDestVerified( drm_device_t *dev, - volatile unsigned int *code ) + unsigned int *code ) { drm_i830_private_t *dev_priv = dev->dev_private; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 6 ); + BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 10 ); + tmp = code[I830_DESTREG_CBUFADDR]; - if (tmp == dev_priv->front_di1) { - /* Don't use fence when front buffer rendering */ - OUT_RING( CMD_OP_DESTBUFFER_INFO ); - OUT_RING( BUF_3D_ID_COLOR_BACK | - BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) ); - OUT_RING( tmp ); + if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { + if (((int)outring) & 8) { + OUT_RING(0); + OUT_RING(0); + } OUT_RING( CMD_OP_DESTBUFFER_INFO ); - OUT_RING( BUF_3D_ID_DEPTH | - BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); - OUT_RING( dev_priv->zi1 ); - } else if(tmp == dev_priv->back_di1) { - OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) | BUF_3D_USE_FENCE); OUT_RING( tmp ); + OUT_RING( 0 ); OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE | BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); OUT_RING( dev_priv->zi1 ); + OUT_RING( 0 ); } else { - DRM_DEBUG("bad di1 %x (allow %x or %x)\n", + DRM_ERROR("bad di1 %x (allow %x or %x)\n", tmp, dev_priv->front_di1, dev_priv->back_di1); } @@ -688,25 +700,39 @@ if((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) { OUT_RING( tmp ); } else { - DRM_DEBUG("bad scissor enable\n"); + DRM_ERROR("bad scissor enable\n"); OUT_RING( 0 ); } - OUT_RING( code[I830_DESTREG_SENABLE] ); - OUT_RING( GFX_OP_SCISSOR_RECT ); OUT_RING( code[I830_DESTREG_SR1] ); OUT_RING( code[I830_DESTREG_SR2] ); + OUT_RING( 0 ); ADVANCE_LP_RING(); } +static void i830EmitStippleVerified( drm_device_t *dev, + unsigned int *code ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + BEGIN_LP_RING( 2 ); + OUT_RING( GFX_OP_STIPPLE ); + OUT_RING( code[1] ); + ADVANCE_LP_RING(); +} + + static void i830EmitState( drm_device_t *dev ) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; + DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); + if (dirty & I830_UPLOAD_BUFFERS) { i830EmitDestVerified( dev, sarea_priv->BufferState ); sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS; @@ -740,17 +766,154 @@ } if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) { - i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); + i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); + } else { + if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { + i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); + } + if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { + i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); + } + + /* 1.3: + */ +#if 0 + if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) { + i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); + } + if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) { + i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); + } +#endif + } + + /* 1.3: + */ + if (dirty & I830_UPLOAD_STIPPLE) { + i830EmitStippleVerified( dev, + sarea_priv->StippleState); + sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE; + } + + if (dirty & I830_UPLOAD_TEX2) { + i830EmitTexVerified( dev, sarea_priv->TexState2 ); + sarea_priv->dirty &= ~I830_UPLOAD_TEX2; + } + + if (dirty & I830_UPLOAD_TEX3) { + i830EmitTexVerified( dev, sarea_priv->TexState3 ); + sarea_priv->dirty &= ~I830_UPLOAD_TEX3; + } + + + if (dirty & I830_UPLOAD_TEXBLEND2) { + i830EmitTexBlendVerified( + dev, + sarea_priv->TexBlendState2, + sarea_priv->TexBlendStateWordsUsed2); + + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2; + } + + if (dirty & I830_UPLOAD_TEXBLEND3) { + i830EmitTexBlendVerified( + dev, + sarea_priv->TexBlendState3, + sarea_priv->TexBlendStateWordsUsed3); + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3; + } +} + +/* ================================================================ + * Performance monitoring functions + */ + +static void i830_fill_box( drm_device_t *dev, + int x, int y, int w, int h, + int r, int g, int b ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + u32 color; + unsigned int BR13, CMD; + RING_LOCALS; + + BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1<<24); + CMD = XY_COLOR_BLT_CMD; + x += dev_priv->sarea_priv->boxes[0].x1; + y += dev_priv->sarea_priv->boxes[0].y1; + + if (dev_priv->cpp == 4) { + BR13 |= (1<<25); + CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB); + color = (((0xff) << 24) | (r << 16) | (g << 8) | b); } else { - if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { - i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); - sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); - } - if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { - i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); - sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); - } + color = (((r & 0xf8) << 8) | + ((g & 0xfc) << 3) | + ((b & 0xf8) >> 3)); + } + + BEGIN_LP_RING( 6 ); + OUT_RING( CMD ); + OUT_RING( BR13 ); + OUT_RING( (y << 16) | x ); + OUT_RING( ((y+h) << 16) | (x+w) ); + + if ( dev_priv->current_page == 1 ) { + OUT_RING( dev_priv->front_offset ); + } else { + OUT_RING( dev_priv->back_offset ); + } + + OUT_RING( color ); + ADVANCE_LP_RING(); +} + +static void i830_cp_performance_boxes( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + /* Purple box for page flipping + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP ) + i830_fill_box( dev, 4, 4, 8, 8, 255, 0, 255 ); + + /* Red box if we have to wait for idle at any point + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT ) + i830_fill_box( dev, 16, 4, 8, 8, 255, 0, 0 ); + + /* Blue box: lost context? + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT ) + i830_fill_box( dev, 28, 4, 8, 8, 0, 0, 255 ); + + /* Yellow box for texture swaps + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD ) + i830_fill_box( dev, 40, 4, 8, 8, 255, 255, 0 ); + + /* Green box if hardware never idles (as far as we can tell) + */ + if ( !(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY) ) + i830_fill_box( dev, 64, 4, 8, 8, 0, 255, 0 ); + + + /* Draw bars indicating number of buffers allocated + * (not a great measure, easily confused) + */ + if (dev_priv->dma_used) { + int bar = dev_priv->dma_used / 10240; + if (bar > 100) bar = 100; + if (bar < 1) bar = 1; + i830_fill_box( dev, 4, 16, bar, 4, 196, 128, 128 ); + dev_priv->dma_used = 0; } + + dev_priv->sarea_priv->perf_boxes = 0; } static void i830_dma_dispatch_clear( drm_device_t *dev, int flags, @@ -768,6 +931,15 @@ unsigned int BR13, CMD, D_CMD; RING_LOCALS; + + if ( dev_priv->current_page == 1 ) { + unsigned int tmp = flags; + + flags &= ~(I830_FRONT | I830_BACK); + if ( tmp & I830_FRONT ) flags |= I830_BACK; + if ( tmp & I830_BACK ) flags |= I830_FRONT; + } + i830_kernel_lost_context(dev); switch(cpp) { @@ -808,7 +980,7 @@ OUT_RING( BR13 ); OUT_RING( (pbox->y1 << 16) | pbox->x1 ); OUT_RING( (pbox->y2 << 16) | pbox->x2 ); - OUT_RING( 0 ); + OUT_RING( dev_priv->front_offset ); OUT_RING( clear_color ); ADVANCE_LP_RING(); } @@ -847,13 +1019,17 @@ drm_clip_rect_t *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = dev_priv->cpp; - int ofs = dev_priv->back_offset; int i; unsigned int CMD, BR13; RING_LOCALS; DRM_DEBUG("swapbuffers\n"); + i830_kernel_lost_context(dev); + + if (dev_priv->do_boxes) + i830_cp_performance_boxes( dev ); + switch(cpp) { case 2: BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24); @@ -870,7 +1046,6 @@ break; } - i830_kernel_lost_context(dev); if (nbox > I830_NR_SAREA_CLIPRECTS) nbox = I830_NR_SAREA_CLIPRECTS; @@ -890,23 +1065,72 @@ BEGIN_LP_RING( 8 ); OUT_RING( CMD ); OUT_RING( BR13 ); + OUT_RING( (pbox->y1 << 16) | pbox->x1 ); + OUT_RING( (pbox->y2 << 16) | pbox->x2 ); - OUT_RING( (pbox->y1 << 16) | - pbox->x1 ); - OUT_RING( (pbox->y2 << 16) | - pbox->x2 ); - - OUT_RING( 0 /* front ofs always zero */ ); - OUT_RING( (pbox->y1 << 16) | - pbox->x1 ); + if (dev_priv->current_page == 0) + OUT_RING( dev_priv->front_offset ); + else + OUT_RING( dev_priv->back_offset ); + OUT_RING( (pbox->y1 << 16) | pbox->x1 ); OUT_RING( BR13 & 0xffff ); - OUT_RING( ofs ); + + if (dev_priv->current_page == 0) + OUT_RING( dev_priv->back_offset ); + else + OUT_RING( dev_priv->front_offset ); ADVANCE_LP_RING(); } } +static void i830_dma_dispatch_flip( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n", + __FUNCTION__, + dev_priv->current_page, + dev_priv->sarea_priv->pf_current_page); + + i830_kernel_lost_context(dev); + + if (dev_priv->do_boxes) { + dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP; + i830_cp_performance_boxes( dev ); + } + + + BEGIN_LP_RING( 2 ); + OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + + BEGIN_LP_RING( 6 ); + OUT_RING( CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP ); + OUT_RING( 0 ); + if ( dev_priv->current_page == 0 ) { + OUT_RING( dev_priv->back_offset ); + dev_priv->current_page = 1; + } else { + OUT_RING( dev_priv->front_offset ); + dev_priv->current_page = 0; + } + OUT_RING(0); + ADVANCE_LP_RING(); + + + BEGIN_LP_RING( 2 ); + OUT_RING( MI_WAIT_FOR_EVENT | + MI_WAIT_FOR_PLANE_A_FLIP ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + + + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; +} static void i830_dma_dispatch_vertex(drm_device_t *dev, drm_buf_t *buf, @@ -936,7 +1160,7 @@ } } - if (used > 4*1024) + if (used > 4*1023) used = 0; if (sarea_priv->dirty) @@ -953,12 +1177,19 @@ DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4); if (buf_priv->currently_mapped == I830_BUF_MAPPED) { - *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | - sarea_priv->vertex_prim | - ((used/4)-2)); + u32 *vp = buf_priv->virtual; + + vp[0] = (GFX_OP_PRIMITIVE | + sarea_priv->vertex_prim | + ((used/4)-2)); + + if (dev_priv->use_mi_batchbuffer_start) { + vp[used/4] = MI_BATCH_BUFFER_END; + used += 4; + } if (used & 4) { - *(u32 *)((u32)buf_priv->virtual + used) = 0; + vp[used/4] = 0; used += 4; } @@ -978,80 +1209,45 @@ ADVANCE_LP_RING(); } - BEGIN_LP_RING(4); + if (dev_priv->use_mi_batchbuffer_start) { + BEGIN_LP_RING(2); + OUT_RING( MI_BATCH_BUFFER_START | (2<<6) ); + OUT_RING( start | MI_BATCH_NON_SECURE ); + ADVANCE_LP_RING(); + } + else { + BEGIN_LP_RING(4); + OUT_RING( MI_BATCH_BUFFER ); + OUT_RING( start | MI_BATCH_NON_SECURE ); + OUT_RING( start + used - 4 ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + } - OUT_RING( MI_BATCH_BUFFER ); - OUT_RING( start | MI_BATCH_NON_SECURE ); - OUT_RING( start + used - 4 ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); - } while (++i < nbox); } - BEGIN_LP_RING(10); - OUT_RING( CMD_STORE_DWORD_IDX ); - OUT_RING( 20 ); - OUT_RING( dev_priv->counter ); - OUT_RING( 0 ); - if (discard) { + dev_priv->counter++; + + (void) cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, + I830_BUF_HARDWARE); + + BEGIN_LP_RING(8); + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( 20 ); + OUT_RING( dev_priv->counter ); OUT_RING( CMD_STORE_DWORD_IDX ); OUT_RING( buf_priv->my_use_idx ); OUT_RING( I830_BUF_FREE ); + OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); + ADVANCE_LP_RING(); } - - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); -} - -/* Interrupts are only for flushing */ -void i830_dma_service(int irq, void *device, struct pt_regs *regs) -{ - drm_device_t *dev = (drm_device_t *)device; - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - u16 temp; - - temp = I830_READ16(I830REG_INT_IDENTITY_R); - temp = temp & ~(0x6000); - if(temp != 0) I830_WRITE16(I830REG_INT_IDENTITY_R, - temp); /* Clear all interrupts */ - else - return; - - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); -} - -void DRM(dma_immediate_bh)(void *device) -{ - drm_device_t *dev = (drm_device_t *) device; - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); } -static inline void i830_dma_emit_flush(drm_device_t *dev) -{ - drm_i830_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - - i830_kernel_lost_context(dev); - BEGIN_LP_RING(2); - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( GFX_OP_USER_INTERRUPT ); - ADVANCE_LP_RING(); - - i830_wait_ring( dev, dev_priv->ring.Size - 8 ); - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -static inline void i830_dma_quiescent_emit(drm_device_t *dev) +void i830_dma_quiescent(drm_device_t *dev) { drm_i830_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -1062,79 +1258,27 @@ OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); - OUT_RING( GFX_OP_USER_INTERRUPT ); + OUT_RING( 0 ); ADVANCE_LP_RING(); - i830_wait_ring( dev, dev_priv->ring.Size - 8 ); - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -void i830_dma_quiescent(drm_device_t *dev) -{ - DECLARE_WAITQUEUE(entry, current); - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - unsigned long end; - - if(dev_priv == NULL) { - return; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i830_dma_quiescent_emit(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if(time_before(end, jiffies)) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); - - return; + i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ ); } static int i830_flush_queue(drm_device_t *dev) { - DECLARE_WAITQUEUE(entry, current); - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; + drm_i830_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; - unsigned long end; - int i, ret = 0; + int i, ret = 0; + RING_LOCALS; + + i830_kernel_lost_context(dev); - if(dev_priv == NULL) { - return 0; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i830_dma_emit_flush(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if(time_before(end, jiffies)) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - ret = -EINTR; /* Can't restart */ - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); + BEGIN_LP_RING(2); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ ); for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; @@ -1146,7 +1290,7 @@ if (used == I830_BUF_HARDWARE) DRM_DEBUG("reclaimed from HARDWARE\n"); if (used == I830_BUF_CLIENT) - DRM_DEBUG("still on client HARDWARE\n"); + DRM_DEBUG("still on client\n"); } return ret; @@ -1185,8 +1329,7 @@ { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - - DRM_DEBUG("i830_flush_ioctl\n"); + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i830_flush_ioctl called without lock held\n"); return -EINVAL; @@ -1275,6 +1418,53 @@ return 0; } + + +/* Not sure why this isn't set all the time: + */ +static void i830_do_init_pageflip( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; +} + +int i830_do_cleanup_pageflip( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + if (dev_priv->current_page != 0) + i830_dma_dispatch_flip( dev ); + + dev_priv->page_flipping = 0; + return 0; +} + +int i830_flip_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i830_flip_buf called without lock held\n"); + return -EINVAL; + } + + if (!dev_priv->page_flipping) + i830_do_init_pageflip( dev ); + + i830_dma_dispatch_flip( dev ); + return 0; +} + int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { @@ -1324,46 +1514,80 @@ return retcode; } -int i830_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, +int i830_copybuf(struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + /* Never copy - 2.4.x doesn't need it */ + return 0; +} + +int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { + return 0; +} + + + +int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) +{ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - drm_i830_copy_t d; - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - u32 *hw_status = (u32 *)dev_priv->hw_status_page; - drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) - dev_priv->sarea_priv; - drm_buf_t *buf; - drm_i830_buf_priv_t *buf_priv; - drm_device_dma_t *dma = dev->dma; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_getparam_t param; + int value; - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { - DRM_ERROR("i830_dma called without lock held\n"); + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return -EINVAL; } - - if (copy_from_user(&d, (drm_i830_copy_t *)arg, sizeof(d))) - return -EFAULT; - - if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[ d.idx ]; - buf_priv = buf->dev_private; - if (buf_priv->currently_mapped != I830_BUF_MAPPED) return -EPERM; - - if(d.used < 0 || d.used > buf->total) return -EINVAL; - if (copy_from_user(buf_priv->virtual, d.address, d.used)) + if (copy_from_user(¶m, (drm_i830_getparam_t *)arg, sizeof(param) )) return -EFAULT; - sarea_priv->last_dispatch = (int) hw_status[5]; + switch( param.param ) { + case I830_PARAM_IRQ_ACTIVE: + value = dev->irq ? 1 : 0; + break; + default: + return -EINVAL; + } + if ( copy_to_user( param.value, &value, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + return 0; } -int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) + +int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) { - if(VM_DONTCOPY == 0) return 1; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_setparam_t param; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if (copy_from_user(¶m, (drm_i830_setparam_t *)arg, sizeof(param) )) + return -EFAULT; + + switch( param.param ) { + case I830_SETPARAM_USE_MI_BATCHBUFFER_START: + dev_priv->use_mi_batchbuffer_start = param.value; + break; + default: + return -EINVAL; + } + return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i830_drm.h linux.21rc5-ac2/drivers/char/drm/i830_drm.h --- linux.21rc5/drivers/char/drm/i830_drm.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i830_drm.h 2003-04-22 16:44:36.000000000 +0100 @@ -3,6 +3,9 @@ /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. + * + * KW: Actually, you can't ever change them because doing so would + * break backwards compatibility. */ #ifndef _I830_DEFINES_ @@ -18,14 +21,12 @@ #define I830_NR_TEX_REGIONS 64 #define I830_LOG_MIN_TEX_REGION_SIZE 16 -/* if defining I830_ENABLE_4_TEXTURES, do it in i830_3d_reg.h, too */ -#if !defined(I830_ENABLE_4_TEXTURES) +/* KW: These aren't correct but someone set them to two and then + * released the module. Now we can't change them as doing so would + * break backwards compatibility. + */ #define I830_TEXTURE_COUNT 2 -#define I830_TEXBLEND_COUNT 2 /* always same as TEXTURE_COUNT? */ -#else /* defined(I830_ENABLE_4_TEXTURES) */ -#define I830_TEXTURE_COUNT 4 -#define I830_TEXBLEND_COUNT 4 /* always same as TEXTURE_COUNT? */ -#endif /* I830_ENABLE_4_TEXTURES */ +#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT #define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */ @@ -57,6 +58,7 @@ #define I830_UPLOAD_TEXBLEND_MASK 0xf00000 #define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n)) #define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000 +#define I830_UPLOAD_STIPPLE 0x8000000 /* Indices into buf.Setup where various bits of state are mirrored per * context and per buffer. These can be fired at the card as a unit, @@ -73,7 +75,6 @@ */ #define I830_DESTREG_CBUFADDR 0 -/* Invarient */ #define I830_DESTREG_DBUFADDR 1 #define I830_DESTREG_DV0 2 #define I830_DESTREG_DV1 3 @@ -109,6 +110,13 @@ #define I830_CTXREG_MCSB1 16 #define I830_CTX_SETUP_SIZE 17 +/* 1.3: Stipple state + */ +#define I830_STPREG_ST0 0 +#define I830_STPREG_ST1 1 +#define I830_STP_SETUP_SIZE 2 + + /* Texture state (per tex unit) */ @@ -124,6 +132,18 @@ #define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */ #define I830_TEX_SETUP_SIZE 10 +#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */ +#define I830_TEXREG_TM0S0 1 +#define I830_TEXREG_TM0S1 2 +#define I830_TEXREG_TM0S2 3 +#define I830_TEXREG_TM0S3 4 +#define I830_TEXREG_TM0S4 5 +#define I830_TEXREG_NOP0 6 /* noop */ +#define I830_TEXREG_NOP1 7 /* noop */ +#define I830_TEXREG_NOP2 8 /* noop */ +#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */ +#define __I830_TEX_SETUP_SIZE 10 + #define I830_FRONT 0x1 #define I830_BACK 0x2 #define I830_DEPTH 0x4 @@ -199,8 +219,53 @@ int ctxOwner; /* last context to upload state */ int vertex_prim; + + int pf_enabled; /* is pageflipping allowed? */ + int pf_active; + int pf_current_page; /* which buffer is being displayed? */ + + int perf_boxes; /* performance boxes to be displayed */ + + /* Here's the state for texunits 2,3: + */ + unsigned int TexState2[I830_TEX_SETUP_SIZE]; + unsigned int TexBlendState2[I830_TEXBLEND_SIZE]; + unsigned int TexBlendStateWordsUsed2; + + unsigned int TexState3[I830_TEX_SETUP_SIZE]; + unsigned int TexBlendState3[I830_TEXBLEND_SIZE]; + unsigned int TexBlendStateWordsUsed3; + + unsigned int StippleState[I830_STP_SETUP_SIZE]; } drm_i830_sarea_t; +/* Flags for perf_boxes + */ +#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */ +#define I830_BOX_FLIP 0x2 /* populated by kernel */ +#define I830_BOX_WAIT 0x4 /* populated by kernel & client */ +#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */ +#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */ + + +/* I830 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t) +#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t) +#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t) +#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43) +#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44) +#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t) +#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46) +#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t) +#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48) +#define DRM_IOCTL_I830_FLIP DRM_IO ( 0x49) +#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(0x4a, drm_i830_irq_emit_t) +#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( 0x4b, drm_i830_irq_wait_t) +#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(0x4c, drm_i830_getparam_t) +#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(0x4d, drm_i830_setparam_t) + typedef struct _drm_i830_clear { int clear_color; int clear_depth; @@ -235,4 +300,36 @@ int granted; } drm_i830_dma_t; + +/* 1.3: Userspace can request & wait on irq's: + */ +typedef struct drm_i830_irq_emit { + int *irq_seq; +} drm_i830_irq_emit_t; + +typedef struct drm_i830_irq_wait { + int irq_seq; +} drm_i830_irq_wait_t; + + +/* 1.3: New ioctl to query kernel params: + */ +#define I830_PARAM_IRQ_ACTIVE 1 + +typedef struct drm_i830_getparam { + int param; + int *value; +} drm_i830_getparam_t; + + +/* 1.3: New ioctl to set kernel params: + */ +#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1 + +typedef struct drm_i830_setparam { + int param; + int value; +} drm_i830_setparam_t; + + #endif /* _I830_DRM_H_ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i830_drv.c linux.21rc5-ac2/drivers/char/drm/i830_drv.c --- linux.21rc5/drivers/char/drm/i830_drv.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i830_drv.c 2003-04-22 16:44:36.000000000 +0100 @@ -29,41 +29,16 @@ * Jeff Hartmann * Gareth Hughes * Abraham vd Merwe + * Keith Whitwell */ #include #include "i830.h" #include "drmP.h" +#include "drm.h" +#include "i830_drm.h" #include "i830_drv.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." - -#define DRIVER_NAME "i830" -#define DRIVER_DESC "Intel 830M" -#define DRIVER_DATE "20011004" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 }, - -#define __HAVE_COUNTERS 4 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#define __HAVE_COUNTER9 _DRM_STAT_DMA - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -72,25 +47,6 @@ #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init i830_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", i830_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i830_drv.h linux.21rc5-ac2/drivers/char/drm/i830_drv.h --- linux.21rc5/drivers/char/drm/i830_drv.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i830_drv.h 2003-04-22 16:44:36.000000000 +0100 @@ -61,24 +61,36 @@ drm_i830_sarea_t *sarea_priv; drm_i830_ring_buffer_t ring; - u8 *hw_status_page; + unsigned long hw_status_page; unsigned long counter; - - dma_addr_t dma_status_page; - atomic_t flush_done; - wait_queue_head_t flush_queue; /* Processes waiting until flush */ + dma_addr_t dma_status_page; + drm_buf_t *mmap_buffer; u32 front_di1, back_di1, zi1; int back_offset; int depth_offset; + int front_offset; int w, h; int pitch; int back_pitch; int depth_pitch; unsigned int cpp; + + int do_boxes; + int dma_used; + + int current_page; + int page_flipping; + + wait_queue_head_t irq_queue; + atomic_t irq_received; + atomic_t irq_emitted; + + int use_mi_batchbuffer_start; + } drm_i830_private_t; /* i830_dma.c */ @@ -109,24 +121,81 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -#define I830_VERBOSE 0 +extern int i830_flip_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +extern int i830_getparam( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + +extern int i830_setparam( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + +/* i830_irq.c */ +extern int i830_irq_emit( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int i830_irq_wait( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int i830_wait_irq(drm_device_t *dev, int irq_nr); +extern int i830_emit_irq(drm_device_t *dev); + #define I830_BASE(reg) ((unsigned long) \ dev_priv->mmio_map->handle) #define I830_ADDR(reg) (I830_BASE(reg) + reg) -#define I830_DEREF(reg) *(__volatile__ int *)I830_ADDR(reg) -#define I830_READ(reg) I830_DEREF(reg) -#define I830_WRITE(reg,val) do { I830_DEREF(reg) = val; } while (0) +#define I830_DEREF(reg) *(__volatile__ unsigned int *)I830_ADDR(reg) +#define I830_READ(reg) readl((volatile u32 *)I830_ADDR(reg)) +#define I830_WRITE(reg,val) writel(val, (volatile u32 *)I830_ADDR(reg)) #define I830_DEREF16(reg) *(__volatile__ u16 *)I830_ADDR(reg) #define I830_READ16(reg) I830_DEREF16(reg) #define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0) + + +#define I830_VERBOSE 0 + +#define RING_LOCALS unsigned int outring, ringmask, outcount; \ + volatile char *virt; + +#define BEGIN_LP_RING(n) do { \ + if (I830_VERBOSE) \ + printk("BEGIN_LP_RING(%d) in %s\n", \ + n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i830_wait_ring(dev, n*4, __FUNCTION__); \ + outcount = 0; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ +} while (0) + + +#define OUT_RING(n) do { \ + if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outcount++; \ + outring += 4; \ + outring &= ringmask; \ +} while (0) + +#define ADVANCE_LP_RING() do { \ + if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \ + dev_priv->ring.tail = outring; \ + dev_priv->ring.space -= outcount * 4; \ + I830_WRITE(LP_RING + RING_TAIL, outring); \ +} while(0) + +extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller); + + #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) #define CMD_REPORT_HEAD (7<<23) #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) #define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) +#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16)) +#define LOAD_TEXTURE_MAP0 (1<<11) + #define INST_PARSER_CLIENT 0x00000000 #define INST_OP_FLUSH 0x02000000 #define INST_FLUSH_MAP_CACHE 0x00000001 @@ -142,18 +211,21 @@ #define I830REG_INT_MASK_R 0x020a8 #define I830REG_INT_ENABLE_R 0x020a0 +#define I830_IRQ_RESERVED ((1<<13)|(3<<2)) + + #define LP_RING 0x2030 #define HP_RING 0x2040 #define RING_TAIL 0x00 -#define TAIL_ADDR 0x000FFFF8 +#define TAIL_ADDR 0x001FFFF8 #define RING_HEAD 0x04 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC #define RING_START 0x08 -#define START_ADDR 0x00FFFFF8 +#define START_ADDR 0x0xFFFFF000 #define RING_LEN 0x0C -#define RING_NR_PAGES 0x000FF000 +#define RING_NR_PAGES 0x001FF000 #define RING_REPORT_MASK 0x00000006 #define RING_REPORT_64K 0x00000002 #define RING_REPORT_128K 0x00000004 @@ -184,6 +256,12 @@ #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) +#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) +#define ASYNC_FLIP (1<<22) + +#define CMD_3D (0x3<<29) +#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16)) +#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16)) #define BR00_BITBLT_CLIENT 0x40000000 #define BR00_OP_COLOR_BLT 0x10000000 @@ -208,8 +286,15 @@ #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) #define MI_BATCH_BUFFER ((0x30<<23)|1) +#define MI_BATCH_BUFFER_START (0x31<<23) +#define MI_BATCH_BUFFER_END (0xA<<23) #define MI_BATCH_NON_SECURE (1) +#define MI_WAIT_FOR_EVENT ((0x3<<23)) +#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) +#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) + +#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23)) #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i830.h linux.21rc5-ac2/drivers/char/drm/i830.h --- linux.21rc5/drivers/char/drm/i830.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i830.h 2003-04-22 16:44:36.000000000 +0100 @@ -41,6 +41,48 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "i830" +#define DRIVER_DESC "Intel 830M" +#define DRIVER_DATE "20021108" + +/* Interface history: + * + * 1.1: Original. + * 1.2: ? + * 1.3: New irq emit/wait ioctls. + * New pageflip ioctl. + * New getparam ioctl. + * State for texunits 3&4 in sarea. + * New (alternative) layout for texture state. + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 3 +#define DRIVER_PATCHLEVEL 2 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_FLIP)] = { i830_flip_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_IRQ_EMIT)] = { i830_irq_emit, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_IRQ_WAIT)] = { i830_irq_wait, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_GETPARAM)] = { i830_getparam, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_SETPARAM)] = { i830_setparam, 1, 0 } + +#define __HAVE_COUNTERS 4 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY +#define __HAVE_COUNTER9 _DRM_STAT_DMA + /* Driver customization: */ #define __HAVE_RELEASE 1 @@ -60,51 +102,50 @@ i830_dma_quiescent( dev ); \ } while (0) + +/* Driver will work either way: IRQ's save cpu time when waiting for + * the card, but are subject to subtle interactions between bios, + * hardware and the driver. + */ +#define USE_IRQS 0 + + +#if USE_IRQS #define __HAVE_DMA_IRQ 1 -#define __HAVE_DMA_IRQ_BH 1 -#define __HAVE_SHARED_IRQ 1 -#define DRIVER_PREINSTALL() do { \ - drm_i830_private_t *dev_priv = \ - (drm_i830_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I830_READ16( I830REG_HWSTAM ); \ - tmp = tmp & 0x6000; \ - I830_WRITE16( I830REG_HWSTAM, tmp ); \ - \ - tmp = I830_READ16( I830REG_INT_MASK_R ); \ - tmp = tmp & 0x6000; /* Unmask interrupts */ \ - I830_WRITE16( I830REG_INT_MASK_R, tmp ); \ - tmp = I830_READ16( I830REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \ -} while (0) +#define __HAVE_SHARED_IRQ 1 -#define DRIVER_POSTINSTALL() do { \ - drm_i830_private_t *dev_priv = \ +#define DRIVER_PREINSTALL() do { \ + drm_i830_private_t *dev_priv = \ (drm_i830_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I830_READ16( I830REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; \ - tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \ - I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \ + \ + I830_WRITE16( I830REG_HWSTAM, 0xffff ); \ + I830_WRITE16( I830REG_INT_MASK_R, 0x0 ); \ + I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 ); \ } while (0) -#define DRIVER_UNINSTALL() do { \ - drm_i830_private_t *dev_priv = \ - (drm_i830_private_t *)dev->dev_private; \ - u16 tmp; \ - if ( dev_priv ) { \ - tmp = I830_READ16( I830REG_INT_IDENTITY_R ); \ - tmp = tmp & ~(0x6000); /* Clear all interrupts */ \ - if ( tmp != 0 ) \ - I830_WRITE16( I830REG_INT_IDENTITY_R, tmp ); \ - \ - tmp = I830_READ16( I830REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \ - } \ + +#define DRIVER_POSTINSTALL() do { \ + drm_i830_private_t *dev_priv = \ + (drm_i830_private_t *)dev->dev_private; \ + I830_WRITE16( I830REG_INT_ENABLE_R, 0x2 ); \ + atomic_set(&dev_priv->irq_received, 0); \ + atomic_set(&dev_priv->irq_emitted, 0); \ + init_waitqueue_head(&dev_priv->irq_queue); \ } while (0) + +/* This gets called too late to be useful: dev_priv has already been + * freed. + */ +#define DRIVER_UNINSTALL() do { \ +} while (0) + +#else +#define __HAVE_DMA_IRQ 0 +#endif + + + /* Buffer customization: */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/i830_irq.c linux.21rc5-ac2/drivers/char/drm/i830_irq.c --- linux.21rc5/drivers/char/drm/i830_irq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/i830_irq.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,178 @@ +/* i830_dma.c -- DMA support for the I830 -*- linux-c -*- + * + * Copyright 2002 Tungsten Graphics, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Keith Whitwell + * + */ + + +#include "i830.h" +#include "drmP.h" +#include "drm.h" +#include "i830_drm.h" +#include "i830_drv.h" +#include /* For task queue support */ +#include + + +void DRM(dma_service)(int irq, void *device, struct pt_regs *regs) +{ + drm_device_t *dev = (drm_device_t *)device; + drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; + u16 temp; + + temp = I830_READ16(I830REG_INT_IDENTITY_R); + printk("%s: %x\n", __FUNCTION__, temp); + + if(temp == 0) + return; + + I830_WRITE16(I830REG_INT_IDENTITY_R, temp); + + if (temp & 2) { + atomic_inc(&dev_priv->irq_received); + wake_up_interruptible(&dev_priv->irq_queue); + } +} + + +int i830_emit_irq(drm_device_t *dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG("%s\n", __FUNCTION__); + + atomic_inc(&dev_priv->irq_emitted); + + BEGIN_LP_RING(2); + OUT_RING( 0 ); + OUT_RING( GFX_OP_USER_INTERRUPT ); + ADVANCE_LP_RING(); + + return atomic_read(&dev_priv->irq_emitted); +} + + +int i830_wait_irq(drm_device_t *dev, int irq_nr) +{ + drm_i830_private_t *dev_priv = + (drm_i830_private_t *)dev->dev_private; + DECLARE_WAITQUEUE(entry, current); + unsigned long end = jiffies + HZ*3; + int ret = 0; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if (atomic_read(&dev_priv->irq_received) >= irq_nr) + return 0; + + dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; + + add_wait_queue(&dev_priv->irq_queue, &entry); + + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if (atomic_read(&dev_priv->irq_received) >= irq_nr) + break; + if (time_after(jiffies, end)) { + DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n", + I830_READ16( I830REG_INT_IDENTITY_R ), + I830_READ16( I830REG_INT_MASK_R ), + I830_READ16( I830REG_INT_ENABLE_R ), + I830_READ16( I830REG_HWSTAM )); + + ret = -EBUSY; /* Lockup? Missed irq? */ + break; + } + schedule_timeout(HZ*3); + if (signal_pending(current)) { + ret = -EINTR; + break; + } + } + + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->irq_queue, &entry); + return ret; +} + + +/* Needs the lock as it touches the ring. + */ +int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_irq_emit_t emit; + int result; + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i830_irq_emit called without lock held\n"); + return -EINVAL; + } + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if (copy_from_user( &emit, (drm_i830_irq_emit_t *)arg, sizeof(emit) )) + return -EFAULT; + + result = i830_emit_irq( dev ); + + if ( copy_to_user( emit.irq_seq, &result, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + + return 0; +} + + +/* Doesn't need the hardware lock. + */ +int i830_irq_wait( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_irq_wait_t irqwait; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if (copy_from_user( &irqwait, (drm_i830_irq_wait_t *)arg, + sizeof(irqwait) )) + return -EFAULT; + + return i830_wait_irq( dev, irqwait.irq_seq ); +} + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/Makefile linux.21rc5-ac2/drivers/char/drm/Makefile --- linux.21rc5/drivers/char/drm/Makefile 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/Makefile 2003-04-22 16:44:36.000000000 +0100 @@ -10,9 +10,9 @@ r128-objs := r128_drv.o r128_cce.o r128_state.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o i810-objs := i810_drv.o i810_dma.o -i830-objs := i830_drv.o i830_dma.o +i830-objs := i830_drv.o i830_dma.o i830_irq.o -radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o +radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o ffb-objs := ffb_drv.o ffb_context.o sis-objs := sis_drv.o sis_ds.o sis_mm.o diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/mga_dma.c linux.21rc5-ac2/drivers/char/drm/mga_dma.c --- linux.21rc5/drivers/char/drm/mga_dma.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/mga_dma.c 2003-04-22 16:44:36.000000000 +0100 @@ -35,10 +35,11 @@ #include "mga.h" #include "drmP.h" +#include "drm.h" +#include "mga_drm.h" #include "mga_drv.h" - -#include /* For task queue support */ -#include +#include +#include "drm_os_linux.h" #define MGA_DEFAULT_USEC_TIMEOUT 10000 #define MGA_FREELIST_DEBUG 0 @@ -52,7 +53,7 @@ { u32 status = 0; int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK; @@ -74,7 +75,7 @@ { u32 status = 0; int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { status = MGA_READ( MGA_STATUS ) & MGA_DMA_IDLE_MASK; @@ -93,7 +94,7 @@ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_primary_buffer_t *primary = &dev_priv->prim; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* The primary DMA stream should look like new right about now. */ @@ -114,7 +115,7 @@ int mga_do_engine_reset( drm_mga_private_t *dev_priv ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* Okay, so we've completely screwed up and locked the engine. * How about we clean up after ourselves? @@ -160,8 +161,8 @@ u32 head, tail; u32 status = 0; int i; - DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DMA_LOCALS; + DRM_DEBUG( "\n" ); /* We need to wait so that we can do an safe flush */ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { @@ -207,7 +208,7 @@ mga_flush_write_combine(); MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); - DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); + DRM_DEBUG( "done.\n" ); } void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ) @@ -215,7 +216,7 @@ drm_mga_primary_buffer_t *primary = &dev_priv->prim; u32 head, tail; DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_DMA_WRAP(); @@ -250,7 +251,7 @@ MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); set_bit( 0, &primary->wrapped ); - DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); + DRM_DEBUG( "done.\n" ); } void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv ) @@ -258,7 +259,7 @@ drm_mga_primary_buffer_t *primary = &dev_priv->prim; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 head = dev_priv->primary->offset; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); sarea_priv->last_wrap++; DRM_DEBUG( " wrap = %d\n", sarea_priv->last_wrap ); @@ -267,7 +268,7 @@ MGA_WRITE( MGA_PRIMADDRESS, head | MGA_DMA_GENERAL ); clear_bit( 0, &primary->wrapped ); - DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); + DRM_DEBUG( "done.\n" ); } @@ -307,8 +308,7 @@ drm_mga_buf_priv_t *buf_priv; drm_mga_freelist_t *entry; int i; - DRM_DEBUG( "%s: count=%d\n", - __FUNCTION__, dma->buf_count ); + DRM_DEBUG( "count=%d\n", dma->buf_count ); dev_priv->head = DRM(alloc)( sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); @@ -354,7 +354,7 @@ drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; drm_mga_freelist_t *next; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); entry = dev_priv->head; while ( entry ) { @@ -392,7 +392,7 @@ drm_mga_freelist_t *prev; drm_mga_freelist_t *tail = dev_priv->tail; u32 head, wrap; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); head = MGA_READ( MGA_PRIMADDRESS ); wrap = dev_priv->sarea_priv->last_wrap; @@ -424,8 +424,7 @@ drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_freelist_t *head, *entry, *prev; - DRM_DEBUG( "%s: age=0x%06lx wrap=%d\n", - __FUNCTION__, + DRM_DEBUG( "age=0x%06lx wrap=%d\n", buf_priv->list_entry->age.head - dev_priv->primary->offset, buf_priv->list_entry->age.wrap ); @@ -458,9 +457,8 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) { drm_mga_private_t *dev_priv; - struct list_head *list; int ret; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); if ( !dev_priv ) @@ -494,15 +492,8 @@ dev_priv->texture_offset = init->texture_offset[0]; dev_priv->texture_size = init->texture_size[0]; - list_for_each( list, &dev->maplist->head ) { - drm_map_list_t *entry = (drm_map_list_t *)list; - if ( entry->map && - entry->map->type == _DRM_SHM && - (entry->map->flags & _DRM_CONTAINS_LOCK) ) { - dev_priv->sarea = entry->map; - break; - } - } + DRM_GETSAREA(); + if(!dev_priv->sarea) { DRM_ERROR( "failed to find sarea!\n" ); /* Assign dev_private so we can do cleanup. */ @@ -626,8 +617,6 @@ dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; - spin_lock_init( &dev_priv->prim.list_lock ); - dev_priv->prim.status[0] = dev_priv->primary->offset; dev_priv->prim.status[1] = 0; @@ -650,7 +639,7 @@ int mga_do_cleanup_dma( drm_device_t *dev ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -725,7 +714,7 @@ #if MGA_DMA_DEBUG int ret = mga_do_wait_for_idle( dev_priv ); if ( ret < 0 ) - DRM_INFO( __FUNCTION__": -EBUSY\n" ); + DRM_INFO( "%s: -EBUSY\n", __FUNCTION__ ); return ret; #else return mga_do_wait_for_idle( dev_priv ); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/mga_drm.h linux.21rc5-ac2/drivers/char/drm/mga_drm.h --- linux.21rc5/drivers/char/drm/mga_drm.h 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/mga_drm.h 2003-04-22 16:44:36.000000000 +0100 @@ -225,6 +225,20 @@ /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmMga.h) */ + +/* MGA specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t) +#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42) +#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t) +#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t) + typedef struct _drm_mga_warp_index { int installed; unsigned long phys_addr; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/mga_drv.c linux.21rc5-ac2/drivers/char/drm/mga_drv.c --- linux.21rc5/drivers/char/drm/mga_drv.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/mga_drv.c 2003-04-22 16:44:36.000000000 +0100 @@ -32,37 +32,9 @@ #include #include "mga.h" #include "drmP.h" +#include "drm.h" +#include "mga_drm.h" #include "mga_drv.h" - -#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." - -#define DRIVER_NAME "mga" -#define DRIVER_DESC "Matrox G200/G400" -#define DRIVER_DATE "20010321" - -#define DRIVER_MAJOR 3 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 2 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma_buffers, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_RESET)] = { mga_dma_reset, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, - - -#define __HAVE_COUNTERS 3 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -70,27 +42,6 @@ #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" - -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init mga_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", mga_options ); -#endif - - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/mga_drv.h linux.21rc5-ac2/drivers/char/drm/mga_drv.h --- linux.21rc5/drivers/char/drm/mga_drv.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/mga_drv.h 2003-04-22 16:44:36.000000000 +0100 @@ -46,8 +46,6 @@ u32 last_wrap; u32 high_mark; - - spinlock_t list_lock; } drm_mga_primary_buffer_t; typedef struct drm_mga_freelist { @@ -257,7 +255,7 @@ #define BEGIN_DMA_WRAP() \ do { \ if ( MGA_VERBOSE ) { \ - DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \ + DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \ DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \ } \ prim = dev_priv->prim.start; \ @@ -276,7 +274,7 @@ #define FLUSH_DMA() \ do { \ if ( 0 ) { \ - DRM_INFO( "%s:\n" , __FUNCTION__); \ + DRM_INFO( "%s:\n", __FUNCTION__ ); \ DRM_INFO( " tail=0x%06x head=0x%06lx\n", \ dev_priv->prim.tail, \ MGA_READ( MGA_PRIMADDRESS ) - \ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/mga.h linux.21rc5-ac2/drivers/char/drm/mga.h --- linux.21rc5/drivers/char/drm/mga.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/mga.h 2003-04-22 16:44:36.000000000 +0100 @@ -41,6 +41,33 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." + +#define DRIVER_NAME "mga" +#define DRIVER_DESC "Matrox G200/G400" +#define DRIVER_DATE "20010321" + +#define DRIVER_MAJOR 3 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 2 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma_buffers, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_RESET)] = { mga_dma_reset, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, + +#define __HAVE_COUNTERS 3 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY + /* Driver customization: */ #define DRIVER_PRETAKEDOWN() do { \ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/mga_state.c linux.21rc5-ac2/drivers/char/drm/mga_state.c --- linux.21rc5/drivers/char/drm/mga_state.c 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/mga_state.c 2003-04-22 16:44:36.000000000 +0100 @@ -34,8 +34,9 @@ #include "mga.h" #include "drmP.h" -#include "mga_drv.h" #include "drm.h" +#include "mga_drm.h" +#include "mga_drv.h" /* ================================================================ @@ -512,7 +513,7 @@ int nbox = sarea_priv->nbox; int i; DMA_LOCALS; - DRM_DEBUG("%s:\n" , __FUNCTION__); + DRM_DEBUG( "\n" ); BEGIN_DMA( 1 ); @@ -606,7 +607,7 @@ int nbox = sarea_priv->nbox; int i; DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); sarea_priv->last_frame.head = dev_priv->prim.tail; sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap; @@ -760,8 +761,7 @@ u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM; u32 y2; DMA_LOCALS; - DRM_DEBUG( "%s: buf=%d used=%d\n", - __FUNCTION__, buf->idx, buf->used ); + DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); y2 = length / 64; @@ -815,7 +815,7 @@ int nbox = sarea_priv->nbox; u32 scandir = 0, i; DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_DMA( 4 + nbox ); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/mga_warp.c linux.21rc5-ac2/drivers/char/drm/mga_warp.c --- linux.21rc5/drivers/char/drm/mga_warp.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/mga_warp.c 2003-04-22 16:44:36.000000000 +0100 @@ -27,8 +27,11 @@ * Gareth Hughes */ + #include "mga.h" #include "drmP.h" +#include "drm.h" +#include "mga_drm.h" #include "mga_drv.h" #include "mga_ucode.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/r128_cce.c linux.21rc5-ac2/drivers/char/drm/r128_cce.c --- linux.21rc5/drivers/char/drm/r128_cce.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/r128_cce.c 2003-04-22 16:44:36.000000000 +0100 @@ -30,14 +30,14 @@ #include "r128.h" #include "drmP.h" +#include "drm.h" +#include "r128_drm.h" #include "r128_drv.h" - -#include /* For task queue support */ -#include +#include "drm_os_linux.h" +#include #define R128_FIFO_DEBUG 0 - /* CCE microcode (from ATI) */ static u32 r128_cce_microcode[] = { 0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0, @@ -83,6 +83,7 @@ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; +int r128_do_wait_for_idle( drm_r128_private_t *dev_priv ); int R128_READ_PLL(drm_device_t *dev, int addr) { @@ -131,7 +132,7 @@ } #if R128_FIFO_DEBUG - DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + DRM_ERROR( "failed!\n" ); #endif return -EBUSY; } @@ -147,7 +148,7 @@ } #if R128_FIFO_DEBUG - DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + DRM_ERROR( "failed!\n" ); #endif return -EBUSY; } @@ -157,7 +158,7 @@ int i, ret; ret = r128_do_wait_for_fifo( dev_priv, 64 ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { if ( !(R128_READ( R128_GUI_STAT ) & R128_GUI_ACTIVE) ) { @@ -168,7 +169,7 @@ } #if R128_FIFO_DEBUG - DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + DRM_ERROR( "failed!\n" ); #endif return -EBUSY; } @@ -183,7 +184,7 @@ { int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); r128_do_wait_for_idle( dev_priv ); @@ -319,7 +320,7 @@ u32 ring_start; u32 tmp; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* The manual (p. 2) says this address is in "VM space". This * means it's an offset from the start of AGP space. @@ -351,8 +352,8 @@ R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08llx handle=0x%08lx\n", - (u64)entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } @@ -374,9 +375,8 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) { drm_r128_private_t *dev_priv; - struct list_head *list; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_r128_private_t), DRM_MEM_DRIVER ); if ( dev_priv == NULL ) @@ -481,15 +481,8 @@ dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) | (dev_priv->span_offset >> 5)); - list_for_each(list, &dev->maplist->head) { - drm_map_list_t *r_list = (drm_map_list_t *)list; - if( r_list->map && - r_list->map->type == _DRM_SHM && - r_list->map->flags & _DRM_CONTAINS_LOCK ) { - dev_priv->sarea = r_list->map; - break; - } - } + DRM_GETSAREA(); + if(!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; @@ -622,16 +615,20 @@ if ( dev->dev_private ) { drm_r128_private_t *dev_priv = dev->dev_private; +#if __REALLY_HAVE_SG if ( !dev_priv->is_pci ) { +#endif DRM_IOREMAPFREE( dev_priv->cce_ring ); DRM_IOREMAPFREE( dev_priv->ring_rptr ); DRM_IOREMAPFREE( dev_priv->buffers ); +#if __REALLY_HAVE_SG } else { if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, dev_priv->bus_pci_gart )) DRM_ERROR( "failed to cleanup PCI GART!\n" ); } +#endif DRM(free)( dev->dev_private, sizeof(drm_r128_private_t), DRM_MEM_DRIVER ); @@ -713,7 +710,7 @@ */ if ( stop.idle ) { ret = r128_do_cce_idle( dev_priv ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; } /* Finally, we can turn off the CCE. If the engine isn't idle, @@ -790,7 +787,7 @@ static int r128_do_init_pageflip( drm_device_t *dev ) { drm_r128_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv->crtc_offset = R128_READ( R128_CRTC_OFFSET ); dev_priv->crtc_offset_cntl = R128_READ( R128_CRTC_OFFSET_CNTL ); @@ -808,7 +805,7 @@ int r128_do_cleanup_pageflip( drm_device_t *dev ) { drm_r128_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); R128_WRITE( R128_CRTC_OFFSET, dev_priv->crtc_offset ); R128_WRITE( R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl ); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/r128_drm.h linux.21rc5-ac2/drivers/char/drm/r128_drm.h --- linux.21rc5/drivers/char/drm/r128_drm.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/r128_drm.h 2003-04-22 16:44:36.000000000 +0100 @@ -170,6 +170,27 @@ /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmR128.h) */ + +/* Rage 128 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) +#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) +#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) +#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) +#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47) +#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t) +#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t) +#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t) +#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t) +#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t) +#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) +#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t) +#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( 0x51, drm_r128_clear2_t) + typedef struct drm_r128_init { enum { R128_INIT_CCE = 0x01, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/r128_drv.c linux.21rc5-ac2/drivers/char/drm/r128_drv.c --- linux.21rc5/drivers/char/drm/r128_drv.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/r128_drv.c 2003-04-22 16:44:36.000000000 +0100 @@ -32,48 +32,11 @@ #include #include "r128.h" #include "drmP.h" +#include "drm.h" +#include "r128_drm.h" #include "r128_drv.h" #include "ati_pcigart.h" -#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." - -#define DRIVER_NAME "r128" -#define DRIVER_DESC "ATI Rage 128" -#define DRIVER_DATE "20010917" - -#define DRIVER_MAJOR 2 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_FULLSCREEN)] = { r128_fullscreen, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, - - -#if 0 -/* GH: Count data sent to card via ring or vertex/indirect buffers. - */ -#define __HAVE_COUNTERS 3 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#endif - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -81,26 +44,6 @@ #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" - -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init r128_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", r128_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/r128_drv.h linux.21rc5-ac2/drivers/char/drm/r128_drv.h --- linux.21rc5/drivers/char/drm/r128_drv.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/r128_drv.h 2003-04-22 16:44:36.000000000 +0100 @@ -33,9 +33,10 @@ #ifndef __R128_DRV_H__ #define __R128_DRV_H__ +#include -#define GET_RING_HEAD( ring ) le32_to_cpu( *(ring)->head ) -#define SET_RING_HEAD( ring, val ) *(ring)->head = cpu_to_le32( val ) +#define GET_RING_HEAD(ring) readl( (volatile u32 *) (ring)->head ) +#define SET_RING_HEAD(ring,val) writel( (val), (volatile u32 *) (ring)->head ) typedef struct drm_r128_freelist { unsigned int age; @@ -384,44 +385,11 @@ #define R128_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) #define R128_ADDR(reg) (R128_BASE( reg ) + reg) -#define R128_DEREF(reg) *(volatile u32 *)R128_ADDR( reg ) -#ifdef __alpha__ -#define R128_READ(reg) (_R128_READ((u32 *)R128_ADDR(reg))) -static inline u32 _R128_READ(u32 *addr) -{ - mb(); - return *(volatile u32 *)addr; -} -#define R128_WRITE(reg,val) \ -do { \ - wmb(); \ - R128_DEREF(reg) = val; \ -} while (0) -#else -#define R128_READ(reg) le32_to_cpu( R128_DEREF( reg ) ) -#define R128_WRITE(reg,val) \ -do { \ - R128_DEREF( reg ) = cpu_to_le32( val ); \ -} while (0) -#endif +#define R128_READ(reg) readl( (volatile u32 *) R128_ADDR(reg) ) +#define R128_WRITE(reg,val) writel( (val) , (volatile u32 *) R128_ADDR(reg)) -#define R128_DEREF8(reg) *(volatile u8 *)R128_ADDR( reg ) -#ifdef __alpha__ -#define R128_READ8(reg) _R128_READ8((u8 *)R128_ADDR(reg)) -static inline u8 _R128_READ8(u8 *addr) -{ - mb(); - return *(volatile u8 *)addr; -} -#define R128_WRITE8(reg,val) \ -do { \ - wmb(); \ - R128_DEREF8(reg) = val; \ -} while (0) -#else -#define R128_READ8(reg) R128_DEREF8( reg ) -#define R128_WRITE8(reg,val) do { R128_DEREF8( reg ) = val; } while (0) -#endif +#define R128_READ8(reg) readb( (volatile u8 *) R128_ADDR(reg) ) +#define R128_WRITE8(reg,val) writeb( (val), (volatile u8 *) R128_ADDR(reg) ) #define R128_WRITE_PLL(addr,val) \ do { \ @@ -470,6 +438,7 @@ return -EBUSY; \ } \ __ring_space_done: ; \ + break; \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ @@ -493,7 +462,11 @@ * Ring control */ +#if defined(__powerpc__) +#define r128_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring ) +#else #define r128_flush_write_combine() mb() +#endif #define R128_VERBOSE 0 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/r128.h linux.21rc5-ac2/drivers/char/drm/r128.h --- linux.21rc5/drivers/char/drm/r128.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/r128.h 2003-04-22 16:44:36.000000000 +0100 @@ -43,6 +43,35 @@ #define __HAVE_SG 1 #define __HAVE_PCI_DMA 1 +#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." + +#define DRIVER_NAME "r128" +#define DRIVER_DESC "ATI Rage 128" +#define DRIVER_DATE "20010917" + +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 0 + + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_FULLSCREEN)] = { r128_fullscreen, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, + /* Driver customization: */ #define DRIVER_PRERELEASE() do { \ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/r128_state.c linux.21rc5-ac2/drivers/char/drm/r128_state.c --- linux.21rc5/drivers/char/drm/r128_state.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/r128_state.c 2003-04-22 16:44:36.000000000 +0100 @@ -29,9 +29,9 @@ #include "r128.h" #include "drmP.h" -#include "r128_drv.h" #include "drm.h" -#include +#include "r128_drm.h" +#include "r128_drv.h" /* ================================================================ @@ -528,7 +528,7 @@ { drm_r128_private_t *dev_priv = dev->dev_private; RING_LOCALS; - DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page ); + DRM_DEBUG( "page=%d\n", dev_priv->current_page ); #if R128_PERFORMANCE_BOXES /* Do some trivial performance monitoring... @@ -577,8 +577,7 @@ int prim = buf_priv->prim; int i = 0; RING_LOCALS; - DRM_DEBUG( "%s: buf=%d nbox=%d\n", - __FUNCTION__, buf->idx, sarea_priv->nbox ); + DRM_DEBUG( "buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox ); if ( 0 ) r128_print_dirty( "dispatch_vertex", sarea_priv->dirty ); @@ -789,7 +788,7 @@ u32 *data; int dword_shift, dwords; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/radeon_cp.c linux.21rc5-ac2/drivers/char/drm/radeon_cp.c --- linux.21rc5/drivers/char/drm/radeon_cp.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/radeon_cp.c 2003-04-22 16:44:36.000000000 +0100 @@ -30,21 +30,278 @@ #include "radeon.h" #include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" #include "radeon_drv.h" +#include "drm_os_linux.h" #include /* For task queue support */ #include - #define RADEON_FIFO_DEBUG 0 -#if defined(__alpha__) -# define PCIGART_ENABLED -#else -# undef PCIGART_ENABLED -#endif /* CP microcode (from ATI) */ +static u32 R200_cp_microcode[][2] = { + { 0x21007000, 0000000000 }, + { 0x20007000, 0000000000 }, + { 0x000000ab, 0x00000004 }, + { 0x000000af, 0x00000004 }, + { 0x66544a49, 0000000000 }, + { 0x49494174, 0000000000 }, + { 0x54517d83, 0000000000 }, + { 0x498d8b64, 0000000000 }, + { 0x49494949, 0000000000 }, + { 0x49da493c, 0000000000 }, + { 0x49989898, 0000000000 }, + { 0xd34949d5, 0000000000 }, + { 0x9dc90e11, 0000000000 }, + { 0xce9b9b9b, 0000000000 }, + { 0x000f0000, 0x00000016 }, + { 0x352e232c, 0000000000 }, + { 0x00000013, 0x00000004 }, + { 0x000f0000, 0x00000016 }, + { 0x352e272c, 0000000000 }, + { 0x000f0001, 0x00000016 }, + { 0x3239362f, 0000000000 }, + { 0x000077ef, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x00000020, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00061000, 0x00000002 }, + { 0x00000020, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00061000, 0x00000002 }, + { 0x00000020, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00000016, 0x00000004 }, + { 0x0003802a, 0x00000002 }, + { 0x040067e0, 0x00000002 }, + { 0x00000016, 0x00000004 }, + { 0x000077e0, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x000037e1, 0x00000002 }, + { 0x040067e1, 0x00000006 }, + { 0x000077e0, 0x00000002 }, + { 0x000077e1, 0x00000002 }, + { 0x000077e1, 0x00000006 }, + { 0xffffffff, 0000000000 }, + { 0x10000000, 0000000000 }, + { 0x0003802a, 0x00000002 }, + { 0x040067e0, 0x00000006 }, + { 0x00007675, 0x00000002 }, + { 0x00007676, 0x00000002 }, + { 0x00007677, 0x00000002 }, + { 0x00007678, 0x00000006 }, + { 0x0003802b, 0x00000002 }, + { 0x04002676, 0x00000002 }, + { 0x00007677, 0x00000002 }, + { 0x00007678, 0x00000006 }, + { 0x0000002e, 0x00000018 }, + { 0x0000002e, 0x00000018 }, + { 0000000000, 0x00000006 }, + { 0x0000002f, 0x00000018 }, + { 0x0000002f, 0x00000018 }, + { 0000000000, 0x00000006 }, + { 0x01605000, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x00098000, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x64c0603d, 0x00000004 }, + { 0x00080000, 0x00000016 }, + { 0000000000, 0000000000 }, + { 0x0400251d, 0x00000002 }, + { 0x00007580, 0x00000002 }, + { 0x00067581, 0x00000002 }, + { 0x04002580, 0x00000002 }, + { 0x00067581, 0x00000002 }, + { 0x00000046, 0x00000004 }, + { 0x00005000, 0000000000 }, + { 0x00061000, 0x00000002 }, + { 0x0000750e, 0x00000002 }, + { 0x00019000, 0x00000002 }, + { 0x00011055, 0x00000014 }, + { 0x00000055, 0x00000012 }, + { 0x0400250f, 0x00000002 }, + { 0x0000504a, 0x00000004 }, + { 0x00007565, 0x00000002 }, + { 0x00007566, 0x00000002 }, + { 0x00000051, 0x00000004 }, + { 0x01e655b4, 0x00000002 }, + { 0x4401b0dc, 0x00000002 }, + { 0x01c110dc, 0x00000002 }, + { 0x2666705d, 0x00000018 }, + { 0x040c2565, 0x00000002 }, + { 0x0000005d, 0x00000018 }, + { 0x04002564, 0x00000002 }, + { 0x00007566, 0x00000002 }, + { 0x00000054, 0x00000004 }, + { 0x00401060, 0x00000008 }, + { 0x00101000, 0x00000002 }, + { 0x000d80ff, 0x00000002 }, + { 0x00800063, 0x00000008 }, + { 0x000f9000, 0x00000002 }, + { 0x000e00ff, 0x00000002 }, + { 0000000000, 0x00000006 }, + { 0x00000080, 0x00000018 }, + { 0x00000054, 0x00000004 }, + { 0x00007576, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x00009000, 0x00000002 }, + { 0x00041000, 0x00000002 }, + { 0x0c00350e, 0x00000002 }, + { 0x00049000, 0x00000002 }, + { 0x00051000, 0x00000002 }, + { 0x01e785f8, 0x00000002 }, + { 0x00200000, 0x00000002 }, + { 0x00600073, 0x0000000c }, + { 0x00007563, 0x00000002 }, + { 0x006075f0, 0x00000021 }, + { 0x20007068, 0x00000004 }, + { 0x00005068, 0x00000004 }, + { 0x00007576, 0x00000002 }, + { 0x00007577, 0x00000002 }, + { 0x0000750e, 0x00000002 }, + { 0x0000750f, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00600076, 0x0000000c }, + { 0x006075f0, 0x00000021 }, + { 0x000075f8, 0x00000002 }, + { 0x00000076, 0x00000004 }, + { 0x000a750e, 0x00000002 }, + { 0x0020750f, 0x00000002 }, + { 0x00600079, 0x00000004 }, + { 0x00007570, 0x00000002 }, + { 0x00007571, 0x00000002 }, + { 0x00007572, 0x00000006 }, + { 0x00005000, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00007568, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x00000084, 0x0000000c }, + { 0x00058000, 0x00000002 }, + { 0x0c607562, 0x00000002 }, + { 0x00000086, 0x00000004 }, + { 0x00600085, 0x00000004 }, + { 0x400070dd, 0000000000 }, + { 0x000380dd, 0x00000002 }, + { 0x00000093, 0x0000001c }, + { 0x00065095, 0x00000018 }, + { 0x040025bb, 0x00000002 }, + { 0x00061096, 0x00000018 }, + { 0x040075bc, 0000000000 }, + { 0x000075bb, 0x00000002 }, + { 0x000075bc, 0000000000 }, + { 0x00090000, 0x00000006 }, + { 0x00090000, 0x00000002 }, + { 0x000d8002, 0x00000006 }, + { 0x00005000, 0x00000002 }, + { 0x00007821, 0x00000002 }, + { 0x00007800, 0000000000 }, + { 0x00007821, 0x00000002 }, + { 0x00007800, 0000000000 }, + { 0x01665000, 0x00000002 }, + { 0x000a0000, 0x00000002 }, + { 0x000671cc, 0x00000002 }, + { 0x0286f1cd, 0x00000002 }, + { 0x000000a3, 0x00000010 }, + { 0x21007000, 0000000000 }, + { 0x000000aa, 0x0000001c }, + { 0x00065000, 0x00000002 }, + { 0x000a0000, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x000b0000, 0x00000002 }, + { 0x38067000, 0x00000002 }, + { 0x000a00a6, 0x00000004 }, + { 0x20007000, 0000000000 }, + { 0x01200000, 0x00000002 }, + { 0x20077000, 0x00000002 }, + { 0x01200000, 0x00000002 }, + { 0x20007000, 0000000000 }, + { 0x00061000, 0x00000002 }, + { 0x0120751b, 0x00000002 }, + { 0x8040750a, 0x00000002 }, + { 0x8040750b, 0x00000002 }, + { 0x00110000, 0x00000002 }, + { 0x000380dd, 0x00000002 }, + { 0x000000bd, 0x0000001c }, + { 0x00061096, 0x00000018 }, + { 0x844075bd, 0x00000002 }, + { 0x00061095, 0x00000018 }, + { 0x840075bb, 0x00000002 }, + { 0x00061096, 0x00000018 }, + { 0x844075bc, 0x00000002 }, + { 0x000000c0, 0x00000004 }, + { 0x804075bd, 0x00000002 }, + { 0x800075bb, 0x00000002 }, + { 0x804075bc, 0x00000002 }, + { 0x00108000, 0x00000002 }, + { 0x01400000, 0x00000002 }, + { 0x006000c4, 0x0000000c }, + { 0x20c07000, 0x00000020 }, + { 0x000000c6, 0x00000012 }, + { 0x00800000, 0x00000006 }, + { 0x0080751d, 0x00000006 }, + { 0x000025bb, 0x00000002 }, + { 0x000040c0, 0x00000004 }, + { 0x0000775c, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00661000, 0x00000002 }, + { 0x0460275d, 0x00000020 }, + { 0x00004000, 0000000000 }, + { 0x00007999, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00661000, 0x00000002 }, + { 0x0460299b, 0x00000020 }, + { 0x00004000, 0000000000 }, + { 0x01e00830, 0x00000002 }, + { 0x21007000, 0000000000 }, + { 0x00005000, 0x00000002 }, + { 0x00038042, 0x00000002 }, + { 0x040025e0, 0x00000002 }, + { 0x000075e1, 0000000000 }, + { 0x00000001, 0000000000 }, + { 0x000380d9, 0x00000002 }, + { 0x04007394, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, +}; + + static u32 radeon_cp_microcode[][2] = { { 0x21007000, 0000000000 }, { 0x20007000, 0000000000 }, @@ -346,6 +603,8 @@ u32 tmp; int i; + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + tmp = RADEON_READ( RADEON_RB2D_DSTCACHE_CTLSTAT ); tmp |= RADEON_RB2D_DC_FLUSH_ALL; RADEON_WRITE( RADEON_RB2D_DSTCACHE_CTLSTAT, tmp ); @@ -370,6 +629,8 @@ { int i; + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { int slots = ( RADEON_READ( RADEON_RBBM_STATUS ) & RADEON_RBBM_FIFOCNT_MASK ); @@ -388,8 +649,10 @@ { int i, ret; + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + ret = radeon_do_wait_for_fifo( dev_priv, 64 ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { if ( !(RADEON_READ( RADEON_RBBM_STATUS ) @@ -416,16 +679,31 @@ static void radeon_cp_load_microcode( drm_radeon_private_t *dev_priv ) { int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); radeon_do_wait_for_idle( dev_priv ); RADEON_WRITE( RADEON_CP_ME_RAM_ADDR, 0 ); - for ( i = 0 ; i < 256 ; i++ ) { - RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, - radeon_cp_microcode[i][1] ); - RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, - radeon_cp_microcode[i][0] ); + + if (dev_priv->is_r200) + { + DRM_INFO("Loading R200 Microcode\n"); + for ( i = 0 ; i < 256 ; i++ ) + { + RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, + R200_cp_microcode[i][1] ); + RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, + R200_cp_microcode[i][0] ); + } + } + else + { + for ( i = 0 ; i < 256 ; i++ ) { + RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, + radeon_cp_microcode[i][1] ); + RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, + radeon_cp_microcode[i][0] ); + } } } @@ -435,7 +713,7 @@ */ static void radeon_do_cp_flush( drm_radeon_private_t *dev_priv ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); #if 0 u32 tmp; @@ -449,7 +727,7 @@ int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ) { RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_RING( 6 ); @@ -458,6 +736,7 @@ RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); + COMMIT_RING(); return radeon_do_wait_for_idle( dev_priv ); } @@ -467,7 +746,7 @@ static void radeon_do_cp_start( drm_radeon_private_t *dev_priv ) { RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); radeon_do_wait_for_idle( dev_priv ); @@ -482,6 +761,7 @@ RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); + COMMIT_RING(); } /* Reset the Command Processor. This will not flush any pending @@ -491,7 +771,7 @@ static void radeon_do_cp_reset( drm_radeon_private_t *dev_priv ) { u32 cur_read_ptr; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr ); @@ -505,7 +785,7 @@ */ static void radeon_do_cp_stop( drm_radeon_private_t *dev_priv ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); RADEON_WRITE( RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS ); @@ -518,7 +798,7 @@ { drm_radeon_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); radeon_do_pixcache_flush( dev_priv ); @@ -603,6 +883,7 @@ /* Set the write pointer delay */ RADEON_WRITE( RADEON_CP_RB_WPTR_DELAY, 0 ); + RADEON_READ( RADEON_CP_RB_WPTR_DELAY ); /* read back to propagate */ /* Initialize the ring buffer's read and write pointers */ cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); @@ -622,13 +903,63 @@ RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08llx handle=0x%08lx\n", - (u64)entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } + /* Initialize the scratch register pointer. This will cause + * the scratch register values to be written out to memory + * whenever they are updated. + * + * We simply put this behind the ring read pointer, this works + * with PCI GART as well as (whatever kind of) AGP GART + */ + RADEON_WRITE( RADEON_SCRATCH_ADDR, RADEON_READ( RADEON_CP_RB_RPTR_ADDR ) + + RADEON_SCRATCH_REG_OFFSET ); + + dev_priv->scratch = ((__volatile__ u32 *) + dev_priv->ring.head + + (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); + + RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 ); + + /* Writeback doesn't seem to work everywhere, test it first */ + writel(0, &dev_priv->scratch[1]); + RADEON_WRITE( RADEON_SCRATCH_REG1, 0xdeadbeef ); + + for ( tmp = 0 ; tmp < dev_priv->usec_timeout ; tmp++ ) { + if ( readl( &dev_priv->scratch[1] ) == 0xdeadbeef ) + break; + udelay(1); + } + + if ( tmp < dev_priv->usec_timeout ) { + dev_priv->writeback_works = 1; + DRM_DEBUG( "writeback test succeeded, tmp=%d\n", tmp ); + } else { + dev_priv->writeback_works = 0; + DRM_DEBUG( "writeback test failed\n" ); + } + + dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; + RADEON_WRITE( RADEON_LAST_FRAME_REG, + dev_priv->sarea_priv->last_frame ); + + dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; + RADEON_WRITE( RADEON_LAST_DISPATCH_REG, + dev_priv->sarea_priv->last_dispatch ); + + dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; + RADEON_WRITE( RADEON_LAST_CLEAR_REG, + dev_priv->sarea_priv->last_clear ); + /* Set ring buffer size */ +#ifdef __BIG_ENDIAN + RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT ); +#else RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw ); +#endif radeon_do_wait_for_idle( dev_priv ); @@ -647,9 +978,8 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) { drm_radeon_private_t *dev_priv; - struct list_head *list; u32 tmp; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER ); if ( dev_priv == NULL ) @@ -659,17 +989,6 @@ dev_priv->is_pci = init->is_pci; -#if !defined(PCIGART_ENABLED) - /* PCI support is not 100% working, so we disable it here. - */ - if ( dev_priv->is_pci ) { - DRM_ERROR( "PCI GART not yet supported for Radeon!\n" ); - dev->dev_private = (void *)dev_priv; - radeon_do_cleanup_cp(dev); - return -EINVAL; - } -#endif - if ( dev_priv->is_pci && !dev->sg ) { DRM_ERROR( "PCI GART memory not allocated!\n" ); dev->dev_private = (void *)dev_priv; @@ -686,12 +1005,10 @@ return -EINVAL; } + dev_priv->is_r200 = (init->func == RADEON_INIT_R200_CP); + dev_priv->do_boxes = 0; dev_priv->cp_mode = init->cp_mode; - /* Simple idle check. - */ - atomic_set( &dev_priv->idle_count, 0 ); - /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring * read pointer. @@ -743,17 +1060,17 @@ * and screwing with the clear operation. */ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | - RADEON_Z_ENABLE | (dev_priv->color_fmt << 10) | - RADEON_ZBLOCK16); + (1<<15)); - dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | - RADEON_Z_TEST_ALWAYS | - RADEON_STENCIL_TEST_ALWAYS | - RADEON_STENCIL_S_FAIL_KEEP | - RADEON_STENCIL_ZPASS_KEEP | - RADEON_STENCIL_ZFAIL_KEEP | - RADEON_Z_WRITE_ENABLE); + dev_priv->depth_clear.rb3d_zstencilcntl = + (dev_priv->depth_fmt | + RADEON_Z_TEST_ALWAYS | + RADEON_STENCIL_TEST_ALWAYS | + RADEON_STENCIL_S_FAIL_REPLACE | + RADEON_STENCIL_ZPASS_REPLACE | + RADEON_STENCIL_ZFAIL_REPLACE | + RADEON_Z_WRITE_ENABLE); dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | RADEON_BFACE_SOLID | @@ -767,15 +1084,8 @@ RADEON_ROUND_MODE_TRUNC | RADEON_ROUND_PREC_8TH_PIX); - list_for_each(list, &dev->maplist->head) { - drm_map_list_t *r_list = (drm_map_list_t *)list; - if( r_list->map && - r_list->map->type == _DRM_SHM && - r_list->map->flags & _DRM_CONTAINS_LOCK ) { - dev_priv->sarea = r_list->map; - break; - } - } + DRM_GETSAREA(); + if(!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; @@ -896,34 +1206,7 @@ dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; -#if 0 - /* Initialize the scratch register pointer. This will cause - * the scratch register values to be written out to memory - * whenever they are updated. - * FIXME: This doesn't quite work yet, so we're disabling it - * for the release. - */ - RADEON_WRITE( RADEON_SCRATCH_ADDR, (dev_priv->ring_rptr->offset + - RADEON_SCRATCH_REG_OFFSET) ); - RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 ); -#endif - - dev_priv->scratch = ((__volatile__ u32 *) - dev_priv->ring_rptr->handle + - (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); - - dev_priv->sarea_priv->last_frame = 0; - RADEON_WRITE( RADEON_LAST_FRAME_REG, - dev_priv->sarea_priv->last_frame ); - - dev_priv->sarea_priv->last_dispatch = 0; - RADEON_WRITE( RADEON_LAST_DISPATCH_REG, - dev_priv->sarea_priv->last_dispatch ); - - dev_priv->sarea_priv->last_clear = 0; - RADEON_WRITE( RADEON_LAST_CLEAR_REG, - dev_priv->sarea_priv->last_clear ); - +#if __REALLY_HAVE_SG if ( dev_priv->is_pci ) { if (!DRM(ati_pcigart_init)( dev, &dev_priv->phys_pci_gart, &dev_priv->bus_pci_gart)) { @@ -953,19 +1236,20 @@ RADEON_WRITE( RADEON_MC_AGP_LOCATION, 0xffffffc0 ); /* ?? */ RADEON_WRITE( RADEON_AGP_COMMAND, 0 ); /* clear AGP_COMMAND */ } else { +#endif /* __REALLY_HAVE_SG */ /* Turn off PCI GART */ tmp = RADEON_READ( RADEON_AIC_CNTL ) & ~RADEON_PCIGART_TRANSLATE_EN; RADEON_WRITE( RADEON_AIC_CNTL, tmp ); +#if __REALLY_HAVE_SG } +#endif /* __REALLY_HAVE_SG */ radeon_cp_load_microcode( dev_priv ); radeon_cp_init_ring_buffer( dev, dev_priv ); -#if ROTATE_BUFS dev_priv->last_buf = 0; -#endif dev->dev_private = (void *)dev_priv; @@ -976,7 +1260,7 @@ int radeon_do_cleanup_cp( drm_device_t *dev ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); if ( dev->dev_private ) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -986,10 +1270,12 @@ DRM_IOREMAPFREE( dev_priv->ring_rptr ); DRM_IOREMAPFREE( dev_priv->buffers ); } else { +#if __REALLY_HAVE_SG if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, dev_priv->bus_pci_gart )) DRM_ERROR( "failed to cleanup PCI GART!\n" ); +#endif /* __REALLY_HAVE_SG */ } DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t), @@ -1012,6 +1298,7 @@ switch ( init.func ) { case RADEON_INIT_CP: + case RADEON_INIT_R200_CP: return radeon_do_init_cp( dev, &init ); case RADEON_CLEANUP_CP: return radeon_do_cleanup_cp( dev ); @@ -1075,7 +1362,7 @@ */ if ( stop.idle ) { ret = radeon_do_cp_idle( dev_priv ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; } /* Finally, we can turn off the CP. If the engine isn't idle, @@ -1145,117 +1432,74 @@ * Fullscreen mode */ -static int radeon_do_init_pageflip( drm_device_t *dev ) -{ - drm_radeon_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); - - dev_priv->crtc_offset = RADEON_READ( RADEON_CRTC_OFFSET ); - dev_priv->crtc_offset_cntl = RADEON_READ( RADEON_CRTC_OFFSET_CNTL ); - - RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->front_offset ); - RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, - dev_priv->crtc_offset_cntl | - RADEON_CRTC_OFFSET_FLIP_CNTL ); - - dev_priv->page_flipping = 1; - dev_priv->current_page = 0; - - return 0; -} - -int radeon_do_cleanup_pageflip( drm_device_t *dev ) +/* KW: Deprecated to say the least: + */ +int radeon_fullscreen(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) { - drm_radeon_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); - - RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->crtc_offset ); - RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl ); - - dev_priv->page_flipping = 0; - dev_priv->current_page = 0; - return 0; } -int radeon_fullscreen( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_radeon_fullscreen_t fs; - - LOCK_TEST_WITH_RETURN( dev ); - - if ( copy_from_user( &fs, (drm_radeon_fullscreen_t *)arg, - sizeof(fs) ) ) - return -EFAULT; - - switch ( fs.func ) { - case RADEON_INIT_FULLSCREEN: - return radeon_do_init_pageflip( dev ); - case RADEON_CLEANUP_FULLSCREEN: - return radeon_do_cleanup_pageflip( dev ); - } - - return -EINVAL; -} - /* ================================================================ * Freelist management */ -#define RADEON_BUFFER_USED 0xffffffff -#define RADEON_BUFFER_FREE 0 -#if 0 -static int radeon_freelist_init( drm_device_t *dev ) +/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through + * bufs until freelist code is used. Note this hides a problem with + * the scratch register * (used to keep track of last buffer + * completed) being written to before * the last buffer has actually + * completed rendering. + * + * KW: It's also a good way to find free buffers quickly. + * + * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't + * sleep. However, bugs in older versions of radeon_accel.c mean that + * we essentially have to do this, else old clients will break. + * + * However, it does leave open a potential deadlock where all the + * buffers are held by other clients, which can't release them because + * they can't get the lock. + */ + +drm_buf_t *radeon_freelist_get( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_buf_t *buf; drm_radeon_buf_priv_t *buf_priv; - drm_radeon_freelist_t *entry; - int i; - - dev_priv->head = DRM(alloc)( sizeof(drm_radeon_freelist_t), - DRM_MEM_DRIVER ); - if ( dev_priv->head == NULL ) - return -ENOMEM; - - memset( dev_priv->head, 0, sizeof(drm_radeon_freelist_t) ); - dev_priv->head->age = RADEON_BUFFER_USED; + drm_buf_t *buf; + int i, t; + int start; - for ( i = 0 ; i < dma->buf_count ; i++ ) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; + if ( ++dev_priv->last_buf >= dma->buf_count ) + dev_priv->last_buf = 0; - entry = DRM(alloc)( sizeof(drm_radeon_freelist_t), - DRM_MEM_DRIVER ); - if ( !entry ) return -ENOMEM; - - entry->age = RADEON_BUFFER_FREE; - entry->buf = buf; - entry->prev = dev_priv->head; - entry->next = dev_priv->head->next; - if ( !entry->next ) - dev_priv->tail = entry; - - buf_priv->discard = 0; - buf_priv->dispatched = 0; - buf_priv->list_entry = entry; + start = dev_priv->last_buf; - dev_priv->head->next = entry; + for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) { + u32 done_age = GET_SCRATCH( 1 ); + DRM_DEBUG("done_age = %d\n",done_age); + for ( i = start ; i < dma->buf_count ; i++ ) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + if ( buf->pid == 0 || (buf->pending && + buf_priv->age <= done_age) ) { + dev_priv->stats.requested_bufs++; + buf->pending = 0; + return buf; + } + start = 0; + } - if ( dev_priv->head->next ) - dev_priv->head->next->prev = entry; + if (t) { + udelay(1); + dev_priv->stats.freelist_loops++; + } } - return 0; - + DRM_DEBUG( "returning NULL!\n" ); + return NULL; } -#endif - +#if 0 drm_buf_t *radeon_freelist_get( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; @@ -1263,76 +1507,40 @@ drm_radeon_buf_priv_t *buf_priv; drm_buf_t *buf; int i, t; -#if ROTATE_BUFS int start; -#endif - - /* FIXME: Optimize -- use freelist code */ + u32 done_age = readl(&dev_priv->scratch[1]); - for ( i = 0 ; i < dma->buf_count ; i++ ) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; - if ( buf->pid == 0 ) { - DRM_DEBUG( " ret buf=%d last=%d pid=0\n", - buf->idx, dev_priv->last_buf ); - return buf; - } - DRM_DEBUG( " skipping buf=%d pid=%d\n", - buf->idx, buf->pid ); - } - -#if ROTATE_BUFS if ( ++dev_priv->last_buf >= dma->buf_count ) dev_priv->last_buf = 0; + start = dev_priv->last_buf; -#endif - for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) { -#if 0 - /* FIXME: Disable this for now */ - u32 done_age = dev_priv->scratch[RADEON_LAST_DISPATCH]; -#else - u32 done_age = RADEON_READ( RADEON_LAST_DISPATCH_REG ); -#endif -#if ROTATE_BUFS + dev_priv->stats.freelist_loops++; + + for ( t = 0 ; t < 2 ; t++ ) { for ( i = start ; i < dma->buf_count ; i++ ) { -#else - for ( i = 0 ; i < dma->buf_count ; i++ ) { -#endif buf = dma->buflist[i]; buf_priv = buf->dev_private; - if ( buf->pending && buf_priv->age <= done_age ) { - /* The buffer has been processed, so it - * can now be used. - */ + if ( buf->pid == 0 || (buf->pending && + buf_priv->age <= done_age) ) { + dev_priv->stats.requested_bufs++; buf->pending = 0; - DRM_DEBUG( " ret buf=%d last=%d age=%d done=%d\n", buf->idx, dev_priv->last_buf, buf_priv->age, done_age ); return buf; } - DRM_DEBUG( " skipping buf=%d age=%d done=%d\n", - buf->idx, buf_priv->age, - done_age ); -#if ROTATE_BUFS - start = 0; -#endif } - udelay( 1 ); + start = 0; } - DRM_ERROR( "returning NULL!\n" ); return NULL; } +#endif void radeon_freelist_reset( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; -#if ROTATE_BUFS drm_radeon_private_t *dev_priv = dev->dev_private; -#endif int i; -#if ROTATE_BUFS dev_priv->last_buf = 0; -#endif for ( i = 0 ; i < dma->buf_count ; i++ ) { drm_buf_t *buf = dma->buflist[i]; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; @@ -1349,11 +1557,23 @@ { drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; + u32 last_head = GET_RING_HEAD(ring); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { - radeon_update_ring_snapshot( ring ); + u32 head = GET_RING_HEAD(ring); + + ring->space = (head - ring->tail) * sizeof(u32); + if ( ring->space <= 0 ) + ring->space += ring->size; if ( ring->space > n ) return 0; + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + if (head != last_head) + i = 0; + last_head = head; + udelay( 1 ); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/radeon_drm.h linux.21rc5-ac2/drivers/char/drm/radeon_drm.h --- linux.21rc5/drivers/char/drm/radeon_drm.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/radeon_drm.h 2003-04-22 16:44:36.000000000 +0100 @@ -2,6 +2,7 @@ * * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -26,6 +27,7 @@ * Authors: * Kevin E. Martin * Gareth Hughes + * Keith Whitwell */ #ifndef __RADEON_DRM_H__ @@ -37,7 +39,8 @@ #ifndef __RADEON_SAREA_DEFINES__ #define __RADEON_SAREA_DEFINES__ -/* What needs to be changed for the current vertex buffer? +/* Old style state flags, required for sarea interface (1.1 and 1.2 + * clears) and 1.2 drm_vertex2 ioctl. */ #define RADEON_UPLOAD_CONTEXT 0x00000001 #define RADEON_UPLOAD_VERTFMT 0x00000002 @@ -56,11 +59,136 @@ #define RADEON_UPLOAD_TEX2IMAGES 0x00004000 #define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ #define RADEON_REQUIRE_QUIESCENCE 0x00010000 -#define RADEON_UPLOAD_ALL 0x0001ffff +#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ +#define RADEON_UPLOAD_ALL 0x003effff +#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff + + +/* New style per-packet identifiers for use in cmd_buffer ioctl with + * the RADEON_EMIT_PACKET command. Comments relate new packets to old + * state bits and the packet size: + */ +#define RADEON_EMIT_PP_MISC 0 /* context/7 */ +#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ +#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ +#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ +#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ +#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ +#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ +#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ +#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ +#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ +#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ +#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ +#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ +#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ +#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ +#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ +#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ +#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ +#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ +#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ +#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ +#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ +#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ +#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ +#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ +#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ +#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ +#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ +#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ +#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ +#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ +#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ +#define R200_EMIT_VAP_CTL 32 /* vap/1 */ +#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ +#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ +#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ +#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ +#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ +#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ +#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ +#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ +#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ +#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ +#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ +#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ +#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ +#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ +#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ +#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ +#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ +#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ +#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ +#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ +#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ +#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ +#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ +#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ +#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ +#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ +#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ +#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ +#define R200_EMIT_PP_CUBIC_FACES_0 61 +#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 +#define R200_EMIT_PP_CUBIC_FACES_1 63 +#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 +#define R200_EMIT_PP_CUBIC_FACES_2 65 +#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 +#define R200_EMIT_PP_CUBIC_FACES_3 67 +#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 +#define R200_EMIT_PP_CUBIC_FACES_4 69 +#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 +#define R200_EMIT_PP_CUBIC_FACES_5 71 +#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 +#define RADEON_MAX_STATE_PACKETS 73 + + +/* Commands understood by cmd_buffer ioctl. More can be added but + * obviously these can't be removed or changed: + */ +#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ +#define RADEON_CMD_SCALARS 2 /* emit scalar data */ +#define RADEON_CMD_VECTORS 3 /* emit vector data */ +#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ +#define RADEON_CMD_PACKET3 5 /* emit hw packet */ +#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ +#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ +#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: + * doesn't make the cpu wait, just + * the graphics hardware */ + + +typedef union { + int i; + struct { + unsigned char cmd_type, pad0, pad1, pad2; + } header; + struct { + unsigned char cmd_type, packet_id, pad0, pad1; + } packet; + struct { + unsigned char cmd_type, offset, stride, count; + } scalars; + struct { + unsigned char cmd_type, offset, stride, count; + } vectors; + struct { + unsigned char cmd_type, buf_idx, pad0, pad1; + } dma; + struct { + unsigned char cmd_type, flags, pad0, pad1; + } wait; +} drm_radeon_cmd_header_t; + +#define RADEON_WAIT_2D 0x1 +#define RADEON_WAIT_3D 0x2 + #define RADEON_FRONT 0x1 #define RADEON_BACK 0x2 #define RADEON_DEPTH 0x4 +#define RADEON_STENCIL 0x8 /* Primitive types */ @@ -78,12 +206,9 @@ /* Byte offsets for indirect buffer data */ #define RADEON_INDEX_PRIM_OFFSET 20 -#define RADEON_HOSTDATA_BLIT_OFFSET 32 #define RADEON_SCRATCH_REG_OFFSET 32 -/* Keep these small for testing - */ #define RADEON_NR_SAREA_CLIPRECTS 12 /* There are 2 heaps (local/AGP). Each region within a heap is a @@ -95,7 +220,7 @@ #define RADEON_NR_TEX_REGIONS 64 #define RADEON_LOG_TEX_GRANULARITY 16 -#define RADEON_MAX_TEXTURE_LEVELS 11 +#define RADEON_MAX_TEXTURE_LEVELS 12 #define RADEON_MAX_TEXTURE_UNITS 3 #endif /* __RADEON_SAREA_DEFINES__ */ @@ -155,28 +280,18 @@ /* Setup state */ unsigned int se_cntl_status; /* 0x2140 */ -#ifdef TCL_ENABLE - /* TCL state */ - radeon_color_regs_t se_tcl_material_emmissive; /* 0x2210 */ - radeon_color_regs_t se_tcl_material_ambient; - radeon_color_regs_t se_tcl_material_diffuse; - radeon_color_regs_t se_tcl_material_specular; - unsigned int se_tcl_shininess; - unsigned int se_tcl_output_vtx_fmt; - unsigned int se_tcl_output_vtx_sel; - unsigned int se_tcl_matrix_select_0; - unsigned int se_tcl_matrix_select_1; - unsigned int se_tcl_ucp_vert_blend_ctl; - unsigned int se_tcl_texture_proc_ctl; - unsigned int se_tcl_light_model_ctl; - unsigned int se_tcl_per_light_ctl[4]; -#endif - /* Misc state */ unsigned int re_top_left; /* 0x26c0 */ unsigned int re_misc; } drm_radeon_context_regs_t; +typedef struct { + /* Zbias state */ + unsigned int se_zbias_factor; /* 0x1dac */ + unsigned int se_zbias_constant; +} drm_radeon_context2_regs_t; + + /* Setup registers for each texture unit */ typedef struct { @@ -186,24 +301,37 @@ unsigned int pp_txcblend; unsigned int pp_txablend; unsigned int pp_tfactor; - unsigned int pp_border_color; - -#ifdef CUBIC_ENABLE - unsigned int pp_cubic_faces; - unsigned int pp_cubic_offset[5]; -#endif } drm_radeon_texture_regs_t; typedef struct { + unsigned int start; + unsigned int finish; + unsigned int prim:8; + unsigned int stateidx:8; + unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ + unsigned int vc_format; /* vertex format */ +} drm_radeon_prim_t; + + +typedef struct { + drm_radeon_context_regs_t context; + drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; + drm_radeon_context2_regs_t context2; + unsigned int dirty; +} drm_radeon_state_t; + + +typedef struct { unsigned char next, prev; unsigned char in_use; int age; } drm_radeon_tex_region_t; typedef struct { - /* The channel for communication of state information to the kernel - * on firing a vertex buffer. + /* The channel for communication of state information to the + * kernel on firing a vertex buffer with either of the + * obsoleted vertex/index ioctls. */ drm_radeon_context_regs_t context_state; drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; @@ -225,16 +353,50 @@ drm_radeon_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS+1]; int tex_age[RADEON_NR_TEX_HEAPS]; int ctx_owner; + int pfState; /* number of 3d windows (0,1,2ormore) */ + int pfCurrentPage; /* which buffer is being displayed? */ + int crtc2_base; /* CRTC2 frame offset */ } drm_radeon_sarea_t; /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmRadeon.h) + * + * KW: actually it's illegal to change any of this (backwards compatibility). */ + +/* Radeon specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) +#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) +#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) +#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) +#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) +#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) +#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) +#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) +#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) +#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) +#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) +#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) +#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) +#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex2_t) +#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t) +#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t) +#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52) +#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR( 0x53, drm_radeon_mem_alloc_t) +#define DRM_IOCTL_RADEON_FREE DRM_IOW( 0x54, drm_radeon_mem_free_t) +#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( 0x55, drm_radeon_mem_init_heap_t) +#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR( 0x56, drm_radeon_irq_emit_t) +#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( 0x57, drm_radeon_irq_wait_t) + typedef struct drm_radeon_init { enum { RADEON_INIT_CP = 0x01, - RADEON_CLEANUP_CP = 0x02 + RADEON_CLEANUP_CP = 0x02, + RADEON_INIT_R200_CP = 0x03, } func; unsigned long sarea_priv_offset; int is_pci; @@ -285,7 +447,7 @@ unsigned int clear_color; unsigned int clear_depth; unsigned int color_mask; - unsigned int depth_mask; + unsigned int depth_mask; /* misnamed field: should be stencil */ drm_radeon_clear_rect_t *depth_boxes; } drm_radeon_clear_t; @@ -304,6 +466,36 @@ int discard; /* Client finished with buffer? */ } drm_radeon_indices_t; +/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices + * - allows multiple primitives and state changes in a single ioctl + * - supports driver change to emit native primitives + */ +typedef struct drm_radeon_vertex2 { + int idx; /* Index of vertex buffer */ + int discard; /* Client finished with buffer? */ + int nr_states; + drm_radeon_state_t *state; + int nr_prims; + drm_radeon_prim_t *prim; +} drm_radeon_vertex2_t; + +/* v1.3 - obsoletes drm_radeon_vertex2 + * - allows arbitarily large cliprect list + * - allows updating of tcl packet, vector and scalar state + * - allows memory-efficient description of state updates + * - allows state to be emitted without a primitive + * (for clears, ctx switches) + * - allows more than one dma buffer to be referenced per ioctl + * - supports tcl driver + * - may be extended in future versions with new cmd types, packets + */ +typedef struct drm_radeon_cmd_buffer { + int bufsz; + char *buf; + int nbox; + drm_clip_rect_t *boxes; +} drm_radeon_cmd_buffer_t; + typedef struct drm_radeon_tex_image { unsigned int x, y; /* Blit coordinates */ unsigned int width, height; @@ -330,4 +522,55 @@ int discard; } drm_radeon_indirect_t; + +/* 1.3: An ioctl to get parameters that aren't available to the 3d + * client any other way. + */ +#define RADEON_PARAM_AGP_BUFFER_OFFSET 1 /* card offset of 1st agp buffer */ +#define RADEON_PARAM_LAST_FRAME 2 +#define RADEON_PARAM_LAST_DISPATCH 3 +#define RADEON_PARAM_LAST_CLEAR 4 +#define RADEON_PARAM_IRQ_NR 5 +#define RADEON_PARAM_AGP_BASE 6 /* card offset of agp base */ + +typedef struct drm_radeon_getparam { + int param; + int *value; +} drm_radeon_getparam_t; + +/* 1.6: Set up a memory manager for regions of shared memory: + */ +#define RADEON_MEM_REGION_AGP 1 +#define RADEON_MEM_REGION_FB 2 + +typedef struct drm_radeon_mem_alloc { + int region; + int alignment; + int size; + int *region_offset; /* offset from start of fb or agp */ +} drm_radeon_mem_alloc_t; + +typedef struct drm_radeon_mem_free { + int region; + int region_offset; +} drm_radeon_mem_free_t; + +typedef struct drm_radeon_mem_init_heap { + int region; + int size; + int start; +} drm_radeon_mem_init_heap_t; + + +/* 1.6: Userspace can request & wait on irq's: + */ +typedef struct drm_radeon_irq_emit { + int *irq_seq; +} drm_radeon_irq_emit_t; + +typedef struct drm_radeon_irq_wait { + int irq_seq; +} drm_radeon_irq_wait_t; + + #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/radeon_drv.c linux.21rc5-ac2/drivers/char/drm/radeon_drv.c --- linux.21rc5/drivers/char/drm/radeon_drv.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/radeon_drv.c 2003-04-22 16:44:36.000000000 +0100 @@ -30,47 +30,11 @@ #include #include "radeon.h" #include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" #include "radeon_drv.h" #include "ati_pcigart.h" -#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." - -#define DRIVER_NAME "radeon" -#define DRIVER_DESC "ATI Radeon" -#define DRIVER_DATE "20010405" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 1 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 }, - - -#if 0 -/* GH: Count data sent to card via ring or vertex/indirect buffers. - */ -#define __HAVE_COUNTERS 3 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#endif - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -78,26 +42,6 @@ #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" - -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init radeon_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", radeon_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/radeon_drv.h linux.21rc5-ac2/drivers/char/drm/radeon_drv.h --- linux.21rc5/drivers/char/drm/radeon_drv.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/radeon_drv.h 2003-04-22 16:44:36.000000000 +0100 @@ -31,6 +31,9 @@ #ifndef __RADEON_DRV_H__ #define __RADEON_DRV_H__ +#define GET_RING_HEAD(ring) readl( (volatile u32 *) (ring)->head ) +#define SET_RING_HEAD(ring,val) writel( (val), (volatile u32 *) (ring)->head ) + typedef struct drm_radeon_freelist { unsigned int age; drm_buf_t *buf; @@ -58,6 +61,15 @@ u32 se_cntl; } drm_radeon_depth_clear_t; + +struct mem_block { + struct mem_block *next; + struct mem_block *prev; + int start; + int size; + int pid; /* 0: free, -1: heap, other: real pids */ +}; + typedef struct drm_radeon_private { drm_radeon_ring_buffer_t ring; drm_radeon_sarea_t *sarea_priv; @@ -71,27 +83,32 @@ drm_radeon_freelist_t *head; drm_radeon_freelist_t *tail; -/* FIXME: ROTATE_BUFS is a hask to cycle through bufs until freelist - code is used. Note this hides a problem with the scratch register - (used to keep track of last buffer completed) being written to before - the last buffer has actually completed rendering. */ -#define ROTATE_BUFS 1 -#if ROTATE_BUFS int last_buf; -#endif volatile u32 *scratch; + int writeback_works; int usec_timeout; + + int is_r200; + int is_pci; unsigned long phys_pci_gart; dma_addr_t bus_pci_gart; - atomic_t idle_count; + struct { + u32 boxes; + int freelist_timeouts; + int freelist_loops; + int requested_bufs; + int last_frame_reads; + int last_clear_reads; + int clears; + int texture_uploads; + } stats; + int do_boxes; int page_flipping; int current_page; - u32 crtc_offset; - u32 crtc_offset_cntl; u32 color_fmt; unsigned int front_offset; @@ -116,14 +133,18 @@ drm_map_t *ring_rptr; drm_map_t *buffers; drm_map_t *agp_textures; + + struct mem_block *agp_heap; + struct mem_block *fb_heap; + + /* SW interrupt */ + wait_queue_head_t swi_queue; + atomic_t swi_emitted; + } drm_radeon_private_t; typedef struct drm_radeon_buf_priv { u32 age; - int prim; - int discard; - int dispatched; - drm_radeon_freelist_t *list_entry; } drm_radeon_buf_priv_t; /* radeon_cp.c */ @@ -149,14 +170,6 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n ); -static inline void -radeon_update_ring_snapshot( drm_radeon_ring_buffer_t *ring ) -{ - ring->space = (*(volatile int *)ring->head - ring->tail) * sizeof(u32); - if ( ring->space <= 0 ) - ring->space += ring->size; -} - extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); extern int radeon_do_cleanup_cp( drm_device_t *dev ); extern int radeon_do_cleanup_pageflip( drm_device_t *dev ); @@ -176,6 +189,34 @@ unsigned int cmd, unsigned long arg ); extern int radeon_cp_indirect( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); +extern int radeon_cp_vertex2(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_cp_cmdbuf(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_cp_getparam(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_cp_flip(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); + +extern int radeon_mem_alloc(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_mem_free(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_mem_init_heap(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern void radeon_mem_takedown( struct mem_block **heap ); +extern void radeon_mem_release( struct mem_block *heap ); + + /* radeon_irq.c */ +extern int radeon_irq_emit(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_irq_wait(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); + +extern int radeon_emit_and_wait_irq(drm_device_t *dev); +extern int radeon_wait_irq(drm_device_t *dev, int swi_nr); +extern int radeon_emit_irq(drm_device_t *dev); + + +/* Flags for stats.boxes + */ +#define RADEON_BOX_DMA_IDLE 0x1 +#define RADEON_BOX_RING_FULL 0x2 +#define RADEON_BOX_FLIP 0x4 +#define RADEON_BOX_WAIT_IDLE 0x8 +#define RADEON_BOX_TEXTURE_LOAD 0x10 + /* Register definitions, register access macros and drmAddMap constants @@ -202,10 +243,10 @@ #define RADEON_CRTC_OFFSET_CNTL 0x0228 # define RADEON_CRTC_TILE_EN (1 << 15) # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) +#define RADEON_CRTC2_OFFSET 0x0324 +#define RADEON_CRTC2_OFFSET_CNTL 0x0328 #define RADEON_RB3D_COLORPITCH 0x1c48 -#define RADEON_RB3D_DEPTHCLEARVALUE 0x1c30 -#define RADEON_RB3D_DEPTHXY_OFFSET 0x1c60 #define RADEON_DP_GUI_MASTER_CNTL 0x146c # define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) @@ -240,6 +281,24 @@ #define RADEON_SCRATCH_UMSK 0x0770 #define RADEON_SCRATCH_ADDR 0x0774 +#define GET_SCRATCH( x ) (dev_priv->writeback_works \ + ? readl( &dev_priv->scratch[(x)] ) \ + : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) + + +#define RADEON_GEN_INT_CNTL 0x0040 +# define RADEON_CRTC_VBLANK_MASK (1 << 0) +# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19) +# define RADEON_SW_INT_ENABLE (1 << 25) + +#define RADEON_GEN_INT_STATUS 0x0044 +# define RADEON_CRTC_VBLANK_STAT (1 << 0) +# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) +# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19) +# define RADEON_SW_INT_TEST (1 << 25) +# define RADEON_SW_INT_TEST_ACK (1 << 25) +# define RADEON_SW_INT_FIRE (1 << 26) + #define RADEON_HOST_PATH_CNTL 0x0130 # define RADEON_HDP_SOFT_RESET (1 << 26) # define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28) @@ -253,6 +312,12 @@ # define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4) # define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5) +#define RADEON_RBBM_GUICNTL 0x172c +# define RADEON_HOST_DATA_SWAP_NONE (0 << 0) +# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0) +# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0) +# define RADEON_HOST_DATA_SWAP_HDW (3 << 0) + #define RADEON_MC_AGP_LOCATION 0x014c #define RADEON_MC_FB_LOCATION 0x0148 #define RADEON_MCLK_CNTL 0x0012 @@ -290,10 +355,8 @@ # define RADEON_ROP_ENABLE (1 << 6) # define RADEON_STENCIL_ENABLE (1 << 7) # define RADEON_Z_ENABLE (1 << 8) -# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9) -# define RADEON_ZBLOCK8 (0 << 15) -# define RADEON_ZBLOCK16 (1 << 15) #define RADEON_RB3D_DEPTHOFFSET 0x1c24 +#define RADEON_RB3D_DEPTHPITCH 0x1c28 #define RADEON_RB3D_PLANEMASK 0x1d84 #define RADEON_RB3D_STENCILREFMASK 0x1d7c #define RADEON_RB3D_ZCACHE_MODE 0x3250 @@ -306,9 +369,9 @@ # define RADEON_Z_TEST_MASK (7 << 4) # define RADEON_Z_TEST_ALWAYS (7 << 4) # define RADEON_STENCIL_TEST_ALWAYS (7 << 12) -# define RADEON_STENCIL_S_FAIL_KEEP (0 << 16) -# define RADEON_STENCIL_ZPASS_KEEP (0 << 20) -# define RADEON_STENCIL_ZFAIL_KEEP (0 << 20) +# define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) +# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) +# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) # define RADEON_Z_WRITE_ENABLE (1 << 30) #define RADEON_RBBM_SOFT_RESET 0x00f0 # define RADEON_SOFT_RESET_CP (1 << 0) @@ -357,6 +420,16 @@ #define RADEON_SE_CNTL_STATUS 0x2140 #define RADEON_SE_LINE_WIDTH 0x1db8 #define RADEON_SE_VPORT_XSCALE 0x1d98 +#define RADEON_SE_ZBIAS_FACTOR 0x1db0 +#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210 +#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254 +#define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200 +# define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16 +# define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28 +#define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204 +#define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208 +# define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16 +#define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C #define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8 #define RADEON_SURFACE_ACCESS_CLR 0x0bfc #define RADEON_SURFACE_CNTL 0x0b00 @@ -421,6 +494,7 @@ #define RADEON_CP_RB_BASE 0x0700 #define RADEON_CP_RB_CNTL 0x0704 +# define RADEON_BUF_SWAP_32BIT (2 << 16) #define RADEON_CP_RB_RPTR_ADDR 0x070c #define RADEON_CP_RB_RPTR 0x0710 #define RADEON_CP_RB_WPTR 0x0714 @@ -457,11 +531,14 @@ #define RADEON_CP_PACKET3 0xC0000000 # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 # define RADEON_WAIT_FOR_IDLE 0x00002600 +# define RADEON_3D_DRAW_VBUF 0x00002800 # define RADEON_3D_DRAW_IMMD 0x00002900 -# define RADEON_3D_CLEAR_ZMASK 0x00003200 +# define RADEON_3D_DRAW_INDX 0x00002A00 +# define RADEON_3D_LOAD_VBPNTR 0x00002F00 # define RADEON_CNTL_HOSTDATA_BLT 0x00009400 # define RADEON_CNTL_PAINT_MULTI 0x00009A00 # define RADEON_CNTL_BITBLT_MULTI 0x00009B00 +# define RADEON_CNTL_SET_SCISSORS 0xC0001E00 #define RADEON_CP_PACKET_MASK 0xC0000000 #define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 @@ -470,6 +547,7 @@ #define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 #define RADEON_VTX_Z_PRESENT (1 << 31) +#define RADEON_VTX_PKCOLOR_PRESENT (1 << 3) #define RADEON_PRIM_TYPE_NONE (0 << 0) #define RADEON_PRIM_TYPE_POINT (1 << 0) @@ -482,6 +560,7 @@ #define RADEON_PRIM_TYPE_RECT_LIST (8 << 0) #define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) #define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) +#define RADEON_PRIM_TYPE_MASK 0xf #define RADEON_PRIM_WALK_IND (1 << 4) #define RADEON_PRIM_WALK_LIST (2 << 4) #define RADEON_PRIM_WALK_RING (3 << 4) @@ -508,6 +587,105 @@ #define RADEON_TXFORMAT_ARGB4444 5 #define RADEON_TXFORMAT_ARGB8888 6 #define RADEON_TXFORMAT_RGBA8888 7 +#define RADEON_TXFORMAT_VYUY422 10 +#define RADEON_TXFORMAT_YVYU422 11 +#define RADEON_TXFORMAT_DXT1 12 +#define RADEON_TXFORMAT_DXT23 14 +#define RADEON_TXFORMAT_DXT45 15 + +#define R200_PP_TXCBLEND_0 0x2f00 +#define R200_PP_TXCBLEND_1 0x2f10 +#define R200_PP_TXCBLEND_2 0x2f20 +#define R200_PP_TXCBLEND_3 0x2f30 +#define R200_PP_TXCBLEND_4 0x2f40 +#define R200_PP_TXCBLEND_5 0x2f50 +#define R200_PP_TXCBLEND_6 0x2f60 +#define R200_PP_TXCBLEND_7 0x2f70 +#define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268 +#define R200_PP_TFACTOR_0 0x2ee0 +#define R200_SE_VTX_FMT_0 0x2088 +#define R200_SE_VAP_CNTL 0x2080 +#define R200_SE_TCL_MATRIX_SEL_0 0x2230 +#define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8 +#define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0 +#define R200_PP_TXFILTER_5 0x2ca0 +#define R200_PP_TXFILTER_4 0x2c80 +#define R200_PP_TXFILTER_3 0x2c60 +#define R200_PP_TXFILTER_2 0x2c40 +#define R200_PP_TXFILTER_1 0x2c20 +#define R200_PP_TXFILTER_0 0x2c00 +#define R200_PP_TXOFFSET_5 0x2d78 +#define R200_PP_TXOFFSET_4 0x2d60 +#define R200_PP_TXOFFSET_3 0x2d48 +#define R200_PP_TXOFFSET_2 0x2d30 +#define R200_PP_TXOFFSET_1 0x2d18 +#define R200_PP_TXOFFSET_0 0x2d00 + +#define R200_PP_CUBIC_FACES_0 0x2c18 +#define R200_PP_CUBIC_FACES_1 0x2c38 +#define R200_PP_CUBIC_FACES_2 0x2c58 +#define R200_PP_CUBIC_FACES_3 0x2c78 +#define R200_PP_CUBIC_FACES_4 0x2c98 +#define R200_PP_CUBIC_FACES_5 0x2cb8 +#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04 +#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08 +#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c +#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10 +#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14 +#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c +#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20 +#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24 +#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28 +#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c +#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34 +#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38 +#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c +#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40 +#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44 +#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c +#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50 +#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54 +#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58 +#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c +#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64 +#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68 +#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c +#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70 +#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74 +#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c +#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80 +#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84 +#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88 +#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c + +#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 +#define R200_SE_VTE_CNTL 0x20b0 +#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250 +#define R200_PP_TAM_DEBUG3 0x2d9c +#define R200_PP_CNTL_X 0x2cc4 +#define R200_SE_VAP_CNTL_STATUS 0x2140 +#define R200_RE_SCISSOR_TL_0 0x1cd8 +#define R200_RE_SCISSOR_TL_1 0x1ce0 +#define R200_RE_SCISSOR_TL_2 0x1ce8 +#define R200_RB3D_DEPTHXY_OFFSET 0x1d60 +#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 +#define R200_SE_VTX_STATE_CNTL 0x2180 +#define R200_RE_POINTSIZE 0x2648 +#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254 + + +#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001 +#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000 +#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012 +#define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100 +#define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200 +#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001 +#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002 +#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b +#define R200_3D_DRAW_IMMD_2 0xC0003500 +#define R200_SE_VTX_FMT_1 0x208c +#define R200_RE_CNTL 0x1c50 + /* Constants */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -515,6 +693,7 @@ #define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0 #define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1 #define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2 +#define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3 #define RADEON_LAST_DISPATCH 1 #define RADEON_MAX_VB_AGE 0x7fffffff @@ -526,41 +705,11 @@ #define RADEON_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) #define RADEON_ADDR(reg) (RADEON_BASE( reg ) + reg) -#define RADEON_DEREF(reg) *(volatile u32 *)RADEON_ADDR( reg ) -#ifdef __alpha__ -#define RADEON_READ(reg) (_RADEON_READ((u32 *)RADEON_ADDR( reg ))) -static inline u32 _RADEON_READ(u32 *addr) -{ - mb(); - return *(volatile u32 *)addr; -} -#define RADEON_WRITE(reg,val) \ -do { \ - wmb(); \ - RADEON_DEREF(reg) = val; \ -} while (0) -#else -#define RADEON_READ(reg) RADEON_DEREF( reg ) -#define RADEON_WRITE(reg, val) do { RADEON_DEREF( reg ) = val; } while (0) -#endif +#define RADEON_READ(reg) readl( (volatile u32 *) RADEON_ADDR(reg) ) +#define RADEON_WRITE(reg,val) writel( (val), (volatile u32 *) RADEON_ADDR(reg)) -#define RADEON_DEREF8(reg) *(volatile u8 *)RADEON_ADDR( reg ) -#ifdef __alpha__ -#define RADEON_READ8(reg) _RADEON_READ8((u8 *)RADEON_ADDR( reg )) -static inline u8 _RADEON_READ8(u8 *addr) -{ - mb(); - return *(volatile u8 *)addr; -} -#define RADEON_WRITE8(reg,val) \ -do { \ - wmb(); \ - RADEON_DEREF8( reg ) = val; \ -} while (0) -#else -#define RADEON_READ8(reg) RADEON_DEREF8( reg ) -#define RADEON_WRITE8(reg, val) do { RADEON_DEREF8( reg ) = val; } while (0) -#endif +#define RADEON_READ8(reg) readb( (volatile u8 *) RADEON_ADDR(reg) ) +#define RADEON_WRITE8(reg,val) writeb( (val), (volatile u8 *) RADEON_ADDR(reg)) #define RADEON_WRITE_PLL( addr, val ) \ do { \ @@ -647,20 +796,16 @@ } \ } while (0) + +/* Perfbox functionality only. + */ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ do { \ - drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; \ - if ( ring->space < ring->high_mark ) { \ - for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ - radeon_update_ring_snapshot( ring ); \ - if ( ring->space >= ring->high_mark ) \ - goto __ring_space_done; \ - udelay( 1 ); \ - } \ - DRM_ERROR( "ring space check failed!\n" ); \ - return -EBUSY; \ + if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \ + u32 head = GET_RING_HEAD(&dev_priv->ring); \ + if (head == dev_priv->ring.tail) \ + dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \ } \ - __ring_space_done: ; \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ @@ -668,7 +813,7 @@ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ int __ret = radeon_do_cp_idle( dev_priv ); \ - if ( __ret < 0 ) return __ret; \ + if ( __ret ) return __ret; \ sarea_priv->last_dispatch = 0; \ radeon_freelist_reset( dev ); \ } \ @@ -694,12 +839,17 @@ * Ring control */ -#define radeon_flush_write_combine() mb() +#if defined(__powerpc__) +#define radeon_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring ) +#else +#define radeon_flush_write_combine() wmb() +#warning PCI posting bug +#endif #define RADEON_VERBOSE 0 -#define RING_LOCALS int write; unsigned int mask; volatile u32 *ring; +#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring; #define BEGIN_RING( n ) do { \ if ( RADEON_VERBOSE ) { \ @@ -707,9 +857,10 @@ n, __FUNCTION__ ); \ } \ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ + COMMIT_RING(); \ radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \ } \ - dev_priv->ring.space -= (n) * sizeof(u32); \ + _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ring = dev_priv->ring.start; \ write = dev_priv->ring.tail; \ mask = dev_priv->ring.tail_mask; \ @@ -720,9 +871,22 @@ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ write, dev_priv->ring.tail ); \ } \ - radeon_flush_write_combine(); \ - dev_priv->ring.tail = write; \ - RADEON_WRITE( RADEON_CP_RB_WPTR, write ); \ + if (((dev_priv->ring.tail + _nr) & mask) != write) { \ + DRM_ERROR( \ + "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ + ((dev_priv->ring.tail + _nr) & mask), \ + write, __LINE__); \ + } else \ + dev_priv->ring.tail = write; \ +} while (0) + +#define COMMIT_RING() do { \ + /* Flush writes to ring */ \ + rmb(); \ + GET_RING_HEAD( &dev_priv->ring ); \ + RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \ + /* read from PCI bus to ensure correct posting */ \ + RADEON_READ( RADEON_CP_RB_RPTR ); \ } while (0) #define OUT_RING( x ) do { \ @@ -734,6 +898,33 @@ write &= mask; \ } while (0) -#define RADEON_PERFORMANCE_BOXES 0 +#define OUT_RING_REG( reg, val ) do { \ + OUT_RING( CP_PACKET0( reg, 0 ) ); \ + OUT_RING( val ); \ +} while (0) + + +#define OUT_RING_USER_TABLE( tab, sz ) do { \ + int _size = (sz); \ + int *_tab = (tab); \ + \ + if (write + _size > mask) { \ + int i = (mask+1) - write; \ + if (__copy_from_user( (int *)(ring+write), \ + _tab, i*4 )) \ + return -EFAULT; \ + write = 0; \ + _size -= i; \ + _tab += i; \ + } \ + \ + if (_size && __copy_from_user( (int *)(ring+write), \ + _tab, _size*4 )) \ + return -EFAULT; \ + \ + write += _size; \ + write &= mask; \ +} while (0) + #endif /* __RADEON_DRV_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/radeon.h linux.21rc5-ac2/drivers/char/drm/radeon.h --- linux.21rc5/drivers/char/drm/radeon.h 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/radeon.h 2003-04-22 16:44:36.000000000 +0100 @@ -44,7 +44,78 @@ #define __HAVE_SG 1 #define __HAVE_PCI_DMA 1 -/* Driver customization: +#define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others." + +#define DRIVER_NAME "radeon" +#define DRIVER_DESC "ATI Radeon" +#define DRIVER_DATE "20020828" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 7 +#define DRIVER_PATCHLEVEL 0 + +/* Interface history: + * + * 1.1 - ?? + * 1.2 - Add vertex2 ioctl (keith) + * - Add stencil capability to clear ioctl (gareth, keith) + * - Increase MAX_TEXTURE_LEVELS (brian) + * 1.3 - Add cmdbuf ioctl (keith) + * - Add support for new radeon packets (keith) + * - Add getparam ioctl (keith) + * - Add flip-buffers ioctl, deprecate fullscreen foo (keith). + * 1.4 - Add scratch registers to get_param ioctl. + * 1.5 - Add r200 packets to cmdbuf ioctl + * - Add r200 function to init ioctl + * - Add 'scalar2' instruction to cmdbuf + * 1.6 - Add static agp memory manager + * Add irq handler (won't be turned on unless X server knows to) + * Add irq ioctls and irq_active getparam. + * Add wait command for cmdbuf ioctl + * Add agp offset query for getparam + * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5] + * and R200_PP_CUBIC_OFFSET_F1_[0..5]. + * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and + * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian) + */ +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX2)] = { radeon_cp_vertex2, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CMDBUF)] = { radeon_cp_cmdbuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_GETPARAM)] = { radeon_cp_getparam, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FLIP)] = { radeon_cp_flip, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_ALLOC)] = { radeon_mem_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FREE)] = { radeon_mem_free, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INIT_HEAP)] = { radeon_mem_init_heap, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_EMIT)] = { radeon_irq_emit, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 }, + + +#define USE_IRQS 1 +#if USE_IRQS +#define __HAVE_DMA_IRQ 1 +#define __HAVE_VBL_IRQ 1 +#define __HAVE_SHARED_IRQ 1 + +/* When a client dies: + * - Check for and clean up flipped page state + * - Free any alloced agp memory. + * + * DRM infrastructure takes care of reclaiming dma buffers. */ #define DRIVER_PRERELEASE() do { \ if ( dev->dev_private ) { \ @@ -52,31 +123,36 @@ if ( dev_priv->page_flipping ) { \ radeon_do_cleanup_pageflip( dev ); \ } \ + radeon_mem_release( dev_priv->agp_heap ); \ } \ } while (0) +/* On unloading the module: + * - Free memory heap structure + * - Remove mappings made at startup and free dev_private. + */ #define DRIVER_PRETAKEDOWN() do { \ - if ( dev->dev_private ) radeon_do_cleanup_cp( dev ); \ + if ( dev->dev_private ) { \ + drm_radeon_private_t *dev_priv = dev->dev_private; \ + radeon_mem_takedown( &(dev_priv->agp_heap) ); \ + radeon_do_cleanup_cp( dev ); \ + } \ } while (0) +#else +#define __HAVE_DMA_IRQ 0 +#endif + /* DMA customization: */ #define __HAVE_DMA 1 -#if 0 -/* GH: Remove this for now... */ -#define __HAVE_DMA_QUIESCENT 1 -#define DRIVER_DMA_QUIESCENT() do { \ - drm_radeon_private_t *dev_priv = dev->dev_private; \ - return radeon_do_cp_idle( dev_priv ); \ -} while (0) -#endif /* Buffer customization: */ #define DRIVER_BUF_PRIV_T drm_radeon_buf_priv_t -#define DRIVER_AGP_BUFFERS_MAP( dev ) \ +#define DRIVER_AGP_BUFFERS_MAP( dev ) \ ((drm_radeon_private_t *)((dev)->dev_private))->buffers #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/radeon_irq.c linux.21rc5-ac2/drivers/char/drm/radeon_irq.c --- linux.21rc5/drivers/char/drm/radeon_irq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/radeon_irq.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,258 @@ +/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- + * + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Keith Whitwell + * Michel Dänzer + */ + +#include "radeon.h" +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "drm_os_linux.h" + +/* Interrupts - Used for device synchronization and flushing in the + * following circumstances: + * + * - Exclusive FB access with hw idle: + * - Wait for GUI Idle (?) interrupt, then do normal flush. + * + * - Frame throttling, NV_fence: + * - Drop marker irq's into command stream ahead of time. + * - Wait on irq's with lock *not held* + * - Check each for termination condition + * + * - Internally in cp_getbuffer, etc: + * - as above, but wait with lock held??? + * + * NOTE: These functions are misleadingly named -- the irq's aren't + * tied to dma at all, this is just a hangover from dri prehistory. + */ + +void DRM(dma_service)(int irq, void *arg, struct pt_regs *reg) +{ + drm_device_t *dev = (drm_device_t *) arg; + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + u32 stat; + + stat = RADEON_READ(RADEON_GEN_INT_STATUS) + & (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT); + if (!stat) + return; + + /* SW interrupt */ + if (stat & RADEON_SW_INT_TEST) { + wake_up_interruptible( &dev_priv->swi_queue ); + } + + /* VBLANK interrupt */ + if (stat & RADEON_CRTC_VBLANK_STAT) { + atomic_inc(&dev->vbl_received); + wake_up_interruptible(&dev->vbl_queue); + DRM(vbl_send_signals)(dev); + } + + /* Acknowledge all the bits in GEN_INT_STATUS -- seem to get + * more than we asked for... + */ + RADEON_WRITE(RADEON_GEN_INT_STATUS, stat); +} + +static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv) +{ + u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS ) + & (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT); + if (tmp) + RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp ); +} + +int radeon_emit_irq(drm_device_t *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + unsigned int ret; + RING_LOCALS; + + atomic_inc(&dev_priv->swi_emitted); + ret = atomic_read(&dev_priv->swi_emitted); + + BEGIN_RING( 4 ); + OUT_RING_REG( RADEON_LAST_SWI_REG, ret ); + OUT_RING_REG( RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE ); + ADVANCE_RING(); + COMMIT_RING(); + + return ret; +} + + +int radeon_wait_irq(drm_device_t *dev, int swi_nr) +{ + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + int ret = 0; + + if (RADEON_READ( RADEON_LAST_SWI_REG ) >= swi_nr) + return 0; + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + /* This is a hack to work around mysterious freezes on certain + * systems: + */ + radeon_acknowledge_irqs( dev_priv ); + + DRM_WAIT_ON( ret, dev_priv->swi_queue, 3 * HZ, + RADEON_READ( RADEON_LAST_SWI_REG ) >= swi_nr ); + + return ret; +} + +int radeon_emit_and_wait_irq(drm_device_t *dev) +{ + return radeon_wait_irq( dev, radeon_emit_irq(dev) ); +} + + +int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence) +{ + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + unsigned int cur_vblank; + int ret = 0; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + radeon_acknowledge_irqs( dev_priv ); + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + /* Assume that the user has missed the current sequence number + * by about a day rather than she wants to wait for years + * using vertical blanks... + */ + DRM_WAIT_ON( ret, dev->vbl_queue, 3*HZ, + ( ( ( cur_vblank = atomic_read(&dev->vbl_received ) ) + - *sequence ) <= (1<<23) ) ); + + *sequence = cur_vblank; + + return ret; +} + + +/* Needs the lock as it touches the ring. + */ +int radeon_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_irq_emit_t emit; + int result; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t *)data, + sizeof(emit) ); + + result = radeon_emit_irq( dev ); + + if ( copy_to_user( emit.irq_seq, &result, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + + return 0; +} + + +/* Doesn't need the hardware lock. + */ +int radeon_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_irq_wait_t irqwait; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t *)data, + sizeof(irqwait) ); + + return radeon_wait_irq( dev, irqwait.irq_seq ); +} + + +/* drm_dma.h hooks +*/ +void DRM(driver_irq_preinstall)( drm_device_t *dev ) { + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + + /* Disable *all* interrupts */ + RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 ); + + /* Clear bits if they're already high */ + radeon_acknowledge_irqs( dev_priv ); +} + +void DRM(driver_irq_postinstall)( drm_device_t *dev ) { + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + + atomic_set(&dev_priv->swi_emitted, 0); + init_waitqueue_head( &dev_priv->swi_queue ); + + /* Turn on SW and VBL ints */ + RADEON_WRITE( RADEON_GEN_INT_CNTL, + RADEON_CRTC_VBLANK_MASK | + RADEON_SW_INT_ENABLE ); +} + +void DRM(driver_irq_uninstall)( drm_device_t *dev ) { + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + if ( dev_priv ) { + /* Disable *all* interrupts */ + RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 ); + } +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/radeon_mem.c linux.21rc5-ac2/drivers/char/drm/radeon_mem.c --- linux.21rc5/drivers/char/drm/radeon_mem.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/radeon_mem.c 2003-04-22 16:44:36.000000000 +0100 @@ -0,0 +1,338 @@ +/* radeon_mem.c -- Simple agp/fb memory manager for radeon -*- linux-c -*- + * + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Keith Whitwell + */ + +#include "radeon.h" +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "drm_os_linux.h" + +/* Very simple allocator for agp memory, working on a static range + * already mapped into each client's address space. + */ + +static struct mem_block *split_block(struct mem_block *p, int start, int size, + int pid ) +{ + /* Maybe cut off the start of an existing block */ + if (start > p->start) { + struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); + if (!newblock) + goto out; + newblock->start = start; + newblock->size = p->size - (start - p->start); + newblock->pid = 0; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size -= newblock->size; + p = newblock; + } + + /* Maybe cut off the end of an existing block */ + if (size < p->size) { + struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); + if (!newblock) + goto out; + newblock->start = start + size; + newblock->size = p->size - size; + newblock->pid = 0; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size = size; + } + + out: + /* Our block is in the middle */ + p->pid = pid; + return p; +} + +static struct mem_block *alloc_block( struct mem_block *heap, int size, + int align2, int pid ) +{ + struct mem_block *p; + int mask = (1 << align2)-1; + + for (p = heap->next ; p != heap ; p = p->next) { + int start = (p->start + mask) & ~mask; + if (p->pid == 0 && start + size <= p->start + p->size) + return split_block( p, start, size, pid ); + } + + return NULL; +} + +static struct mem_block *find_block( struct mem_block *heap, int start ) +{ + struct mem_block *p; + + for (p = heap->next ; p != heap ; p = p->next) + if (p->start == start) + return p; + + return NULL; +} + + +static void free_block( struct mem_block *p ) +{ + p->pid = 0; + + /* Assumes a single contiguous range. Needs a special pid in + * 'heap' to stop it being subsumed. + */ + if (p->next->pid == 0) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + kfree(q); + } + + if (p->prev->pid == 0) { + struct mem_block *q = p->prev; + q->size += p->size; + q->next = p->next; + q->next->prev = q; + kfree(p); + } +} + +static void print_heap( struct mem_block *heap ) +{ + struct mem_block *p; + + for (p = heap->next ; p != heap ; p = p->next) + DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n", + p->start, p->start + p->size, + p->size, p->pid); +} + +/* Initialize. How to check for an uninitialized heap? + */ +static int init_heap(struct mem_block **heap, int start, int size) +{ + struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); + + if (!blocks) + return -ENOMEM; + + *heap = kmalloc(sizeof(**heap), GFP_KERNEL); + if (!*heap) { + kfree( blocks ); + return -ENOMEM; + } + + blocks->start = start; + blocks->size = size; + blocks->pid = 0; + blocks->next = blocks->prev = *heap; + + memset( *heap, 0, sizeof(**heap) ); + (*heap)->pid = -1; + (*heap)->next = (*heap)->prev = blocks; + return 0; +} + + +/* Free all blocks associated with the releasing pid. + */ +void radeon_mem_release( struct mem_block *heap ) +{ + int pid = current->pid; + struct mem_block *p; + + if (!heap || !heap->next) + return; + + for (p = heap->next ; p != heap ; p = p->next) { + if (p->pid == pid) + p->pid = 0; + } + + /* Assumes a single contiguous range. Needs a special pid in + * 'heap' to stop it being subsumed. + */ + for (p = heap->next ; p != heap ; p = p->next) { + while (p->pid == 0 && p->next->pid == 0) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + kfree(q); + } + } +} + +/* Shutdown. + */ +void radeon_mem_takedown( struct mem_block **heap ) +{ + struct mem_block *p; + + if (!*heap) + return; + + for (p = (*heap)->next ; p != *heap ; ) { + struct mem_block *q = p; + p = p->next; + kfree(q); + } + + kfree( *heap ); + *heap = 0; +} + + + +/* IOCTL HANDLERS */ + +static struct mem_block **get_heap( drm_radeon_private_t *dev_priv, + int region ) +{ + switch( region ) { + case RADEON_MEM_REGION_AGP: + return &dev_priv->agp_heap; + case RADEON_MEM_REGION_FB: + return &dev_priv->fb_heap; + default: + return 0; + } +} + +int radeon_mem_alloc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_mem_alloc_t alloc; + struct mem_block *block, **heap; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t *)data, + sizeof(alloc) ); + + heap = get_heap( dev_priv, alloc.region ); + if (!heap || !*heap) + return -EFAULT; + + /* Make things easier on ourselves: all allocations at least + * 4k aligned. + */ + if (alloc.alignment < 12) + alloc.alignment = 12; + + block = alloc_block( *heap, alloc.size, alloc.alignment, + current->pid ); + + if (!block) + return -ENOMEM; + + if ( copy_to_user( alloc.region_offset, &block->start, + sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + + return 0; +} + + + +int radeon_mem_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_mem_free_t memfree; + struct mem_block *block, **heap; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data, + sizeof(memfree) ); + + heap = get_heap( dev_priv, memfree.region ); + if (!heap || !*heap) + return -EFAULT; + + block = find_block( *heap, memfree.region_offset ); + if (!block) + return -EFAULT; + + if (block->pid != current->pid) + return -EPERM; + + free_block( block ); + return 0; +} + +int radeon_mem_init_heap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_mem_init_heap_t initheap; + struct mem_block **heap; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data, + sizeof(initheap) ); + + heap = get_heap( dev_priv, initheap.region ); + if (!heap) + return -EFAULT; + + if (*heap) { + DRM_ERROR("heap already initialized?"); + return -EFAULT; + } + + return init_heap( heap, initheap.start, initheap.size ); +} + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/radeon_state.c linux.21rc5-ac2/drivers/char/drm/radeon_state.c --- linux.21rc5/drivers/char/drm/radeon_state.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/radeon_state.c 2003-04-22 16:44:36.000000000 +0100 @@ -29,10 +29,11 @@ #include "radeon.h" #include "drmP.h" -#include "radeon_drv.h" #include "drm.h" -#include - +#include "drm_sarea.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "drm_os_linux.h" /* ================================================================ * CP hardware state programming functions @@ -47,360 +48,254 @@ box->x1, box->y1, box->x2, box->y2 ); BEGIN_RING( 4 ); - OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) ); OUT_RING( (box->y1 << 16) | box->x1 ); - OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) ); OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_context( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 14 ); - - OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) ); - OUT_RING( ctx->pp_misc ); - OUT_RING( ctx->pp_fog_color ); - OUT_RING( ctx->re_solid_color ); - OUT_RING( ctx->rb3d_blendcntl ); - OUT_RING( ctx->rb3d_depthoffset ); - OUT_RING( ctx->rb3d_depthpitch ); - OUT_RING( ctx->rb3d_zstencilcntl ); - - OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) ); - OUT_RING( ctx->pp_cntl ); - OUT_RING( ctx->rb3d_cntl ); - OUT_RING( ctx->rb3d_coloroffset ); - - OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) ); - OUT_RING( ctx->rb3d_colorpitch ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_vertfmt( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 2 ); - - OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) ); - OUT_RING( ctx->se_coord_fmt ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_line( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 5 ); - - OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) ); - OUT_RING( ctx->re_line_pattern ); - OUT_RING( ctx->re_line_state ); - - OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) ); - OUT_RING( ctx->se_line_width ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_bumpmap( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 5 ); - - OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) ); - OUT_RING( ctx->pp_lum_matrix ); - - OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) ); - OUT_RING( ctx->pp_rot_matrix_0 ); - OUT_RING( ctx->pp_rot_matrix_1 ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_masks( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 4 ); - - OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) ); - OUT_RING( ctx->rb3d_stencilrefmask ); - OUT_RING( ctx->rb3d_ropcntl ); - OUT_RING( ctx->rb3d_planemask ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_viewport( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 7 ); - - OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) ); - OUT_RING( ctx->se_vport_xscale ); - OUT_RING( ctx->se_vport_xoffset ); - OUT_RING( ctx->se_vport_yscale ); - OUT_RING( ctx->se_vport_yoffset ); - OUT_RING( ctx->se_vport_zscale ); - OUT_RING( ctx->se_vport_zoffset ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_setup( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 4 ); - - OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); - OUT_RING( ctx->se_cntl ); - OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) ); - OUT_RING( ctx->se_cntl_status ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tcl( drm_radeon_private_t *dev_priv ) -{ -#ifdef TCL_ENABLE - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 29 ); - - OUT_RING( CP_PACKET0( RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 27 ) ); - OUT_RING( ctx->se_tcl_material_emmissive.red ); - OUT_RING( ctx->se_tcl_material_emmissive.green ); - OUT_RING( ctx->se_tcl_material_emmissive.blue ); - OUT_RING( ctx->se_tcl_material_emmissive.alpha ); - OUT_RING( ctx->se_tcl_material_ambient.red ); - OUT_RING( ctx->se_tcl_material_ambient.green ); - OUT_RING( ctx->se_tcl_material_ambient.blue ); - OUT_RING( ctx->se_tcl_material_ambient.alpha ); - OUT_RING( ctx->se_tcl_material_diffuse.red ); - OUT_RING( ctx->se_tcl_material_diffuse.green ); - OUT_RING( ctx->se_tcl_material_diffuse.blue ); - OUT_RING( ctx->se_tcl_material_diffuse.alpha ); - OUT_RING( ctx->se_tcl_material_specular.red ); - OUT_RING( ctx->se_tcl_material_specular.green ); - OUT_RING( ctx->se_tcl_material_specular.blue ); - OUT_RING( ctx->se_tcl_material_specular.alpha ); - OUT_RING( ctx->se_tcl_shininess ); - OUT_RING( ctx->se_tcl_output_vtx_fmt ); - OUT_RING( ctx->se_tcl_output_vtx_sel ); - OUT_RING( ctx->se_tcl_matrix_select_0 ); - OUT_RING( ctx->se_tcl_matrix_select_1 ); - OUT_RING( ctx->se_tcl_ucp_vert_blend_ctl ); - OUT_RING( ctx->se_tcl_texture_proc_ctl ); - OUT_RING( ctx->se_tcl_light_model_ctl ); - for ( i = 0 ; i < 4 ; i++ ) { - OUT_RING( ctx->se_tcl_per_light_ctl[i] ); - } - ADVANCE_RING(); -#else - DRM_ERROR( "TCL not enabled!\n" ); -#endif } -static inline void radeon_emit_misc( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 2 ); - - OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) ); - OUT_RING( ctx->re_misc ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tex0( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[0]; - RING_LOCALS; - DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); - - BEGIN_RING( 9 ); - - OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) ); - OUT_RING( tex->pp_txfilter ); - OUT_RING( tex->pp_txformat ); - OUT_RING( tex->pp_txoffset ); - OUT_RING( tex->pp_txcblend ); - OUT_RING( tex->pp_txablend ); - OUT_RING( tex->pp_tfactor ); - - OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) ); - OUT_RING( tex->pp_border_color ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tex1( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[1]; - RING_LOCALS; - DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); - - BEGIN_RING( 9 ); - - OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) ); - OUT_RING( tex->pp_txfilter ); - OUT_RING( tex->pp_txformat ); - OUT_RING( tex->pp_txoffset ); - OUT_RING( tex->pp_txcblend ); - OUT_RING( tex->pp_txablend ); - OUT_RING( tex->pp_tfactor ); - - OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) ); - OUT_RING( tex->pp_border_color ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tex2( drm_radeon_private_t *dev_priv ) +/* Emit 1.1 state + */ +static void radeon_emit_state( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx, + drm_radeon_texture_regs_t *tex, + unsigned int dirty ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[2]; RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 9 ); - - OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) ); - OUT_RING( tex->pp_txfilter ); - OUT_RING( tex->pp_txformat ); - OUT_RING( tex->pp_txoffset ); - OUT_RING( tex->pp_txcblend ); - OUT_RING( tex->pp_txablend ); - OUT_RING( tex->pp_tfactor ); - - OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) ); - OUT_RING( tex->pp_border_color ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_state( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - unsigned int dirty = sarea_priv->dirty; - - DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty ); + DRM_DEBUG( "dirty=0x%08x\n", dirty ); if ( dirty & RADEON_UPLOAD_CONTEXT ) { - radeon_emit_context( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_CONTEXT; + BEGIN_RING( 14 ); + OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) ); + OUT_RING( ctx->pp_misc ); + OUT_RING( ctx->pp_fog_color ); + OUT_RING( ctx->re_solid_color ); + OUT_RING( ctx->rb3d_blendcntl ); + OUT_RING( ctx->rb3d_depthoffset ); + OUT_RING( ctx->rb3d_depthpitch ); + OUT_RING( ctx->rb3d_zstencilcntl ); + OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) ); + OUT_RING( ctx->pp_cntl ); + OUT_RING( ctx->rb3d_cntl ); + OUT_RING( ctx->rb3d_coloroffset ); + OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) ); + OUT_RING( ctx->rb3d_colorpitch ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_VERTFMT ) { - radeon_emit_vertfmt( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_VERTFMT; + BEGIN_RING( 2 ); + OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) ); + OUT_RING( ctx->se_coord_fmt ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_LINE ) { - radeon_emit_line( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_LINE; + BEGIN_RING( 5 ); + OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) ); + OUT_RING( ctx->re_line_pattern ); + OUT_RING( ctx->re_line_state ); + OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) ); + OUT_RING( ctx->se_line_width ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_BUMPMAP ) { - radeon_emit_bumpmap( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_BUMPMAP; + BEGIN_RING( 5 ); + OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) ); + OUT_RING( ctx->pp_lum_matrix ); + OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) ); + OUT_RING( ctx->pp_rot_matrix_0 ); + OUT_RING( ctx->pp_rot_matrix_1 ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_MASKS ) { - radeon_emit_masks( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_MASKS; + BEGIN_RING( 4 ); + OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) ); + OUT_RING( ctx->rb3d_stencilrefmask ); + OUT_RING( ctx->rb3d_ropcntl ); + OUT_RING( ctx->rb3d_planemask ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_VIEWPORT ) { - radeon_emit_viewport( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_VIEWPORT; + BEGIN_RING( 7 ); + OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) ); + OUT_RING( ctx->se_vport_xscale ); + OUT_RING( ctx->se_vport_xoffset ); + OUT_RING( ctx->se_vport_yscale ); + OUT_RING( ctx->se_vport_yoffset ); + OUT_RING( ctx->se_vport_zscale ); + OUT_RING( ctx->se_vport_zoffset ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_SETUP ) { - radeon_emit_setup( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_SETUP; - } - - if ( dirty & RADEON_UPLOAD_TCL ) { -#ifdef TCL_ENABLE - radeon_emit_tcl( dev_priv ); -#endif - sarea_priv->dirty &= ~RADEON_UPLOAD_TCL; + BEGIN_RING( 4 ); + OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); + OUT_RING( ctx->se_cntl ); + OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) ); + OUT_RING( ctx->se_cntl_status ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_MISC ) { - radeon_emit_misc( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_MISC; + BEGIN_RING( 2 ); + OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) ); + OUT_RING( ctx->re_misc ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX0 ) { - radeon_emit_tex0( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX0; + BEGIN_RING( 9 ); + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) ); + OUT_RING( tex[0].pp_txfilter ); + OUT_RING( tex[0].pp_txformat ); + OUT_RING( tex[0].pp_txoffset ); + OUT_RING( tex[0].pp_txcblend ); + OUT_RING( tex[0].pp_txablend ); + OUT_RING( tex[0].pp_tfactor ); + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) ); + OUT_RING( tex[0].pp_border_color ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX1 ) { - radeon_emit_tex1( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX1; + BEGIN_RING( 9 ); + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) ); + OUT_RING( tex[1].pp_txfilter ); + OUT_RING( tex[1].pp_txformat ); + OUT_RING( tex[1].pp_txoffset ); + OUT_RING( tex[1].pp_txcblend ); + OUT_RING( tex[1].pp_txablend ); + OUT_RING( tex[1].pp_tfactor ); + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) ); + OUT_RING( tex[1].pp_border_color ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX2 ) { -#if 0 - radeon_emit_tex2( dev_priv ); -#endif - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX2; + BEGIN_RING( 9 ); + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) ); + OUT_RING( tex[2].pp_txfilter ); + OUT_RING( tex[2].pp_txformat ); + OUT_RING( tex[2].pp_txoffset ); + OUT_RING( tex[2].pp_txcblend ); + OUT_RING( tex[2].pp_txablend ); + OUT_RING( tex[2].pp_tfactor ); + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) ); + OUT_RING( tex[2].pp_border_color ); + ADVANCE_RING(); } +} - sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | - RADEON_UPLOAD_TEX1IMAGES | - RADEON_UPLOAD_TEX2IMAGES | - RADEON_REQUIRE_QUIESCENCE); +/* Emit 1.2 state + */ +static void radeon_emit_state2( drm_radeon_private_t *dev_priv, + drm_radeon_state_t *state ) +{ + RING_LOCALS; + + if (state->dirty & RADEON_UPLOAD_ZBIAS) { + BEGIN_RING( 3 ); + OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) ); + OUT_RING( state->context2.se_zbias_factor ); + OUT_RING( state->context2.se_zbias_constant ); + ADVANCE_RING(); + } + + radeon_emit_state( dev_priv, &state->context, + state->tex, state->dirty ); } +/* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in + * 1.3 cmdbuffers allow all previous state to be updated as well as + * the tcl scalar and vector areas. + */ +static struct { + int start; + int len; + const char *name; +} packet[RADEON_MAX_STATE_PACKETS] = { + { RADEON_PP_MISC,7,"RADEON_PP_MISC" }, + { RADEON_PP_CNTL,3,"RADEON_PP_CNTL" }, + { RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" }, + { RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" }, + { RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" }, + { RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" }, + { RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" }, + { RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" }, + { RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" }, + { RADEON_SE_CNTL,2,"RADEON_SE_CNTL" }, + { RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" }, + { RADEON_RE_MISC,1,"RADEON_RE_MISC" }, + { RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" }, + { RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" }, + { RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" }, + { RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" }, + { RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" }, + { RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" }, + { RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" }, + { RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" }, + { RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" }, + { R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" }, + { R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" }, + { R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" }, + { R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" }, + { R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" }, + { R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" }, + { R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" }, + { R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" }, + { R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" }, + { R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" }, + { R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" }, + { R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" }, + { R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" }, + { R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" }, + { R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" }, + { R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" }, + { R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" }, + { R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" }, + { R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" }, + { R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" }, + { R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" }, + { R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" }, + { R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" }, + { R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" }, + { R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" }, + { R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" }, + { R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" }, + { R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" }, + { R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" }, + { R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" }, + { R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" }, + { R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" }, + { R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" }, + { R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" }, + { R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" }, + { R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" }, + { R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" }, + { R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" }, + { R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" }, + { R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" }, + { R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */ + { R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */ + { R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" }, + { R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" }, + { R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" }, + { R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" }, + { R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" }, + { R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" }, + { R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" }, + { R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" }, + { R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" }, + { R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" }, +}; + + -#if RADEON_PERFORMANCE_BOXES /* ================================================================ * Performance monitoring functions */ @@ -409,10 +304,12 @@ int x, int y, int w, int h, int r, int g, int b ) { - u32 pitch, offset; u32 color; RING_LOCALS; + x += dev_priv->sarea_priv->boxes[0].x1; + y += dev_priv->sarea_priv->boxes[0].y1; + switch ( dev_priv->color_fmt ) { case RADEON_COLOR_FORMAT_RGB565: color = (((r & 0xf8) << 8) | @@ -425,8 +322,11 @@ break; } - offset = dev_priv->back_offset; - pitch = dev_priv->back_pitch >> 3; + BEGIN_RING( 4 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); + OUT_RING( 0xffffffff ); + ADVANCE_RING(); BEGIN_RING( 6 ); @@ -438,7 +338,12 @@ RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS ); - OUT_RING( (pitch << 22) | (offset >> 5) ); + if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { + OUT_RING( dev_priv->front_pitch_offset ); + } else { + OUT_RING( dev_priv->back_pitch_offset ); + } + OUT_RING( color ); OUT_RING( (x << 16) | y ); @@ -449,53 +354,77 @@ static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv ) { - if ( atomic_read( &dev_priv->idle_count ) == 0 ) { - radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 ); - } else { - atomic_set( &dev_priv->idle_count, 0 ); + /* Collapse various things into a wait flag -- trying to + * guess if userspase slept -- better just to have them tell us. + */ + if (dev_priv->stats.last_frame_reads > 1 || + dev_priv->stats.last_clear_reads > dev_priv->stats.clears) { + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; } -} -#endif + if (dev_priv->stats.freelist_loops) { + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + } + + /* Purple box for page flipping + */ + if ( dev_priv->stats.boxes & RADEON_BOX_FLIP ) + radeon_clear_box( dev_priv, 4, 4, 8, 8, 255, 0, 255 ); + /* Red box if we have to wait for idle at any point + */ + if ( dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE ) + radeon_clear_box( dev_priv, 16, 4, 8, 8, 255, 0, 0 ); + /* Blue box: lost context? + */ + + /* Yellow box for texture swaps + */ + if ( dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD ) + radeon_clear_box( dev_priv, 40, 4, 8, 8, 255, 255, 0 ); + + /* Green box if hardware never idles (as far as we can tell) + */ + if ( !(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE) ) + radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 ); + + + /* Draw bars indicating number of buffers allocated + * (not a great measure, easily confused) + */ + if (dev_priv->stats.requested_bufs) { + if (dev_priv->stats.requested_bufs > 100) + dev_priv->stats.requested_bufs = 100; + + radeon_clear_box( dev_priv, 4, 16, + dev_priv->stats.requested_bufs, 4, + 196, 128, 128 ); + } + + memset( &dev_priv->stats, 0, sizeof(dev_priv->stats) ); + +} /* ================================================================ * CP command dispatch functions */ -static void radeon_print_dirty( const char *msg, unsigned int flags ) -{ - DRM_DEBUG( "%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", - msg, - flags, - (flags & RADEON_UPLOAD_CONTEXT) ? "context, " : "", - (flags & RADEON_UPLOAD_VERTFMT) ? "vertfmt, " : "", - (flags & RADEON_UPLOAD_LINE) ? "line, " : "", - (flags & RADEON_UPLOAD_BUMPMAP) ? "bumpmap, " : "", - (flags & RADEON_UPLOAD_MASKS) ? "masks, " : "", - (flags & RADEON_UPLOAD_VIEWPORT) ? "viewport, " : "", - (flags & RADEON_UPLOAD_SETUP) ? "setup, " : "", - (flags & RADEON_UPLOAD_TCL) ? "tcl, " : "", - (flags & RADEON_UPLOAD_MISC) ? "misc, " : "", - (flags & RADEON_UPLOAD_TEX0) ? "tex0, " : "", - (flags & RADEON_UPLOAD_TEX1) ? "tex1, " : "", - (flags & RADEON_UPLOAD_TEX2) ? "tex2, " : "", - (flags & RADEON_UPLOAD_CLIPRECTS) ? "cliprects, " : "", - (flags & RADEON_REQUIRE_QUIESCENCE) ? "quiescence, " : "" ); -} - static void radeon_cp_dispatch_clear( drm_device_t *dev, drm_radeon_clear_t *clear, drm_radeon_clear_rect_t *depth_boxes ) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; int nbox = sarea_priv->nbox; drm_clip_rect_t *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; + u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "flags = 0x%x\n", flags ); + + dev_priv->stats.clears++; if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { unsigned int tmp = flags; @@ -505,127 +434,277 @@ if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT; } - for ( i = 0 ; i < nbox ; i++ ) { - int x = pbox[i].x1; - int y = pbox[i].y1; - int w = pbox[i].x2 - x; - int h = pbox[i].y2 - y; + if ( flags & (RADEON_FRONT | RADEON_BACK) ) { - DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n", - x, y, w, h, flags ); + BEGIN_RING( 4 ); - if ( flags & (RADEON_FRONT | RADEON_BACK) ) { - BEGIN_RING( 4 ); + /* Ensure the 3D stream is idle before doing a + * 2D fill to clear the front or back buffer. + */ + RADEON_WAIT_UNTIL_3D_IDLE(); + + OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); + OUT_RING( clear->color_mask ); - /* Ensure the 3D stream is idle before doing a - * 2D fill to clear the front or back buffer. - */ - RADEON_WAIT_UNTIL_3D_IDLE(); + ADVANCE_RING(); - OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); - OUT_RING( clear->color_mask ); + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->ctx_owner = 0; - ADVANCE_RING(); + for ( i = 0 ; i < nbox ; i++ ) { + int x = pbox[i].x1; + int y = pbox[i].y1; + int w = pbox[i].x2 - x; + int h = pbox[i].y2 - y; + + DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n", + x, y, w, h, flags ); + + if ( flags & RADEON_FRONT ) { + BEGIN_RING( 6 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_SOLID_COLOR | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | + RADEON_GMC_CLR_CMP_CNTL_DIS ); + + OUT_RING( dev_priv->front_pitch_offset ); + OUT_RING( clear->clear_color ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } + + if ( flags & RADEON_BACK ) { + BEGIN_RING( 6 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_SOLID_COLOR | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | + RADEON_GMC_CLR_CMP_CNTL_DIS ); + + OUT_RING( dev_priv->back_pitch_offset ); + OUT_RING( clear->clear_color ); - /* Make sure we restore the 3D state next time. - */ - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_MASKS); + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } } + } - if ( flags & RADEON_FRONT ) { - BEGIN_RING( 6 ); + /* We have to clear the depth and/or stencil buffers by + * rendering a quad into just those buffers. Thus, we have to + * make sure the 3D engine is configured correctly. + */ + if ( dev_priv->is_r200 && + (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) { - OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); - OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | - RADEON_GMC_BRUSH_SOLID_COLOR | - (dev_priv->color_fmt << 8) | - RADEON_GMC_SRC_DATATYPE_COLOR | - RADEON_ROP3_P | - RADEON_GMC_CLR_CMP_CNTL_DIS ); + int tempPP_CNTL; + int tempRE_CNTL; + int tempRB3D_CNTL; + int tempRB3D_ZSTENCILCNTL; + int tempRB3D_STENCILREFMASK; + int tempRB3D_PLANEMASK; + int tempSE_CNTL; + int tempSE_VTE_CNTL; + int tempSE_VTX_FMT_0; + int tempSE_VTX_FMT_1; + int tempSE_VAP_CNTL; + int tempRE_AUX_SCISSOR_CNTL; - OUT_RING( dev_priv->front_pitch_offset ); - OUT_RING( clear->clear_color ); + tempPP_CNTL = 0; + tempRE_CNTL = 0; - OUT_RING( (x << 16) | y ); - OUT_RING( (w << 16) | h ); + tempRB3D_CNTL = depth_clear->rb3d_cntl; + tempRB3D_CNTL &= ~(1<<15); /* unset radeon magic flag */ - ADVANCE_RING(); + tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl; + tempRB3D_STENCILREFMASK = 0x0; + + tempSE_CNTL = depth_clear->se_cntl; + + + + /* Disable TCL */ + + tempSE_VAP_CNTL = (/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */ + (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT)); + + tempRB3D_PLANEMASK = 0x0; + + tempRE_AUX_SCISSOR_CNTL = 0x0; + + tempSE_VTE_CNTL = + SE_VTE_CNTL__VTX_XY_FMT_MASK | + SE_VTE_CNTL__VTX_Z_FMT_MASK; + + /* Vertex format (X, Y, Z, W)*/ + tempSE_VTX_FMT_0 = + SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK | + SE_VTX_FMT_0__VTX_W0_PRESENT_MASK; + tempSE_VTX_FMT_1 = 0x0; + + + /* + * Depth buffer specific enables + */ + if (flags & RADEON_DEPTH) { + /* Enable depth buffer */ + tempRB3D_CNTL |= RADEON_Z_ENABLE; + } else { + /* Disable depth buffer */ + tempRB3D_CNTL &= ~RADEON_Z_ENABLE; } - if ( flags & RADEON_BACK ) { - BEGIN_RING( 6 ); + /* + * Stencil buffer specific enables + */ + if ( flags & RADEON_STENCIL ) { + tempRB3D_CNTL |= RADEON_STENCIL_ENABLE; + tempRB3D_STENCILREFMASK = clear->depth_mask; + } else { + tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE; + tempRB3D_STENCILREFMASK = 0x00000000; + } - OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); - OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | - RADEON_GMC_BRUSH_SOLID_COLOR | - (dev_priv->color_fmt << 8) | - RADEON_GMC_SRC_DATATYPE_COLOR | - RADEON_ROP3_P | - RADEON_GMC_CLR_CMP_CNTL_DIS ); + BEGIN_RING( 26 ); + RADEON_WAIT_UNTIL_2D_IDLE(); - OUT_RING( dev_priv->back_pitch_offset ); - OUT_RING( clear->clear_color ); + OUT_RING_REG( RADEON_PP_CNTL, tempPP_CNTL ); + OUT_RING_REG( R200_RE_CNTL, tempRE_CNTL ); + OUT_RING_REG( RADEON_RB3D_CNTL, tempRB3D_CNTL ); + OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL, + tempRB3D_ZSTENCILCNTL ); + OUT_RING_REG( RADEON_RB3D_STENCILREFMASK, + tempRB3D_STENCILREFMASK ); + OUT_RING_REG( RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK ); + OUT_RING_REG( RADEON_SE_CNTL, tempSE_CNTL ); + OUT_RING_REG( R200_SE_VTE_CNTL, tempSE_VTE_CNTL ); + OUT_RING_REG( R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0 ); + OUT_RING_REG( R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1 ); + OUT_RING_REG( R200_SE_VAP_CNTL, tempSE_VAP_CNTL ); + OUT_RING_REG( R200_RE_AUX_SCISSOR_CNTL, + tempRE_AUX_SCISSOR_CNTL ); + ADVANCE_RING(); - OUT_RING( (x << 16) | y ); - OUT_RING( (w << 16) | h ); + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->ctx_owner = 0; - ADVANCE_RING(); + for ( i = 0 ; i < nbox ; i++ ) { + + /* Funny that this should be required -- + * sets top-left? + */ + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); + BEGIN_RING( 14 ); + OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 12 ) ); + OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST | + RADEON_PRIM_WALK_RING | + (3 << RADEON_NUM_VERTICES_SHIFT)) ); + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); + OUT_RING( depth_boxes[i].ui[CLEAR_Y1] ); + OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x3f800000 ); + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); + OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); + OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x3f800000 ); + OUT_RING( depth_boxes[i].ui[CLEAR_X2] ); + OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); + OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x3f800000 ); + ADVANCE_RING(); } + } + else if ( (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) { + + rb3d_cntl = depth_clear->rb3d_cntl; if ( flags & RADEON_DEPTH ) { - drm_radeon_depth_clear_t *depth_clear = - &dev_priv->depth_clear; + rb3d_cntl |= RADEON_Z_ENABLE; + } else { + rb3d_cntl &= ~RADEON_Z_ENABLE; + } - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); - } + if ( flags & RADEON_STENCIL ) { + rb3d_cntl |= RADEON_STENCIL_ENABLE; + rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */ + } else { + rb3d_cntl &= ~RADEON_STENCIL_ENABLE; + rb3d_stencilrefmask = 0x00000000; + } - /* FIXME: Render a rectangle to clear the depth - * buffer. So much for those "fast Z clears"... - */ - BEGIN_RING( 23 ); + BEGIN_RING( 13 ); + RADEON_WAIT_UNTIL_2D_IDLE(); + + OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) ); + OUT_RING( 0x00000000 ); + OUT_RING( rb3d_cntl ); + + OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL, + depth_clear->rb3d_zstencilcntl ); + OUT_RING_REG( RADEON_RB3D_STENCILREFMASK, + rb3d_stencilrefmask ); + OUT_RING_REG( RADEON_RB3D_PLANEMASK, + 0x00000000 ); + OUT_RING_REG( RADEON_SE_CNTL, + depth_clear->se_cntl ); + ADVANCE_RING(); + + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->ctx_owner = 0; - RADEON_WAIT_UNTIL_2D_IDLE(); + for ( i = 0 ; i < nbox ; i++ ) { + + /* Funny that this should be required -- + * sets top-left? + */ + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); - OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) ); - OUT_RING( 0x00000000 ); - OUT_RING( depth_clear->rb3d_cntl ); - OUT_RING( CP_PACKET0( RADEON_RB3D_ZSTENCILCNTL, 0 ) ); - OUT_RING( depth_clear->rb3d_zstencilcntl ); - OUT_RING( CP_PACKET0( RADEON_RB3D_PLANEMASK, 0 ) ); - OUT_RING( 0x00000000 ); - OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); - OUT_RING( depth_clear->se_cntl ); + BEGIN_RING( 15 ); - OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 10 ) ); - OUT_RING( RADEON_VTX_Z_PRESENT ); + OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 13 ) ); + OUT_RING( RADEON_VTX_Z_PRESENT | + RADEON_VTX_PKCOLOR_PRESENT); OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST | RADEON_PRIM_WALK_RING | RADEON_MAOS_ENABLE | RADEON_VTX_FMT_RADEON_MODE | (3 << RADEON_NUM_VERTICES_SHIFT)) ); + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y1] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x0 ); OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x0 ); OUT_RING( depth_boxes[i].ui[CLEAR_X2] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x0 ); ADVANCE_RING(); - - /* Make sure we restore the 3D state next time. - */ - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_SETUP | - RADEON_UPLOAD_MASKS); } } @@ -651,13 +730,13 @@ drm_clip_rect_t *pbox = sarea_priv->boxes; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); -#if RADEON_PERFORMANCE_BOXES /* Do some trivial performance monitoring... */ - radeon_cp_performance_boxes( dev_priv ); -#endif + if (dev_priv->do_boxes) + radeon_cp_performance_boxes( dev_priv ); + /* Wait for the 3D stream to idle before dispatching the bitblt. * This will prevent data corruption between the two streams. @@ -689,9 +768,17 @@ RADEON_DP_SRC_SOURCE_MEMORY | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS ); - - OUT_RING( dev_priv->back_pitch_offset ); - OUT_RING( dev_priv->front_pitch_offset ); + + /* Make this work even if front & back are flipped: + */ + if (dev_priv->current_page == 0) { + OUT_RING( dev_priv->back_pitch_offset ); + OUT_RING( dev_priv->front_pitch_offset ); + } + else { + OUT_RING( dev_priv->front_pitch_offset ); + OUT_RING( dev_priv->back_pitch_offset ); + } OUT_RING( (x << 16) | y ); OUT_RING( (x << 16) | y ); @@ -717,29 +804,33 @@ static void radeon_cp_dispatch_flip( drm_device_t *dev ) { drm_radeon_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page ); + drm_sarea_t *sarea = (drm_sarea_t *)dev_priv->sarea->handle; + int offset = (dev_priv->current_page == 1) + ? dev_priv->front_offset : dev_priv->back_offset; + RING_LOCALS; + DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n", + __FUNCTION__, + dev_priv->current_page, + dev_priv->sarea_priv->pfCurrentPage); -#if RADEON_PERFORMANCE_BOXES /* Do some trivial performance monitoring... */ - radeon_cp_performance_boxes( dev_priv ); -#endif + if (dev_priv->do_boxes) { + dev_priv->stats.boxes |= RADEON_BOX_FLIP; + radeon_cp_performance_boxes( dev_priv ); + } + /* Update the frame offsets for both CRTCs + */ BEGIN_RING( 6 ); RADEON_WAIT_UNTIL_3D_IDLE(); - RADEON_WAIT_UNTIL_PAGE_FLIPPED(); - - OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET, 0 ) ); - - if ( dev_priv->current_page == 0 ) { - OUT_RING( dev_priv->back_offset ); - dev_priv->current_page = 1; - } else { - OUT_RING( dev_priv->front_offset ); - dev_priv->current_page = 0; - } + OUT_RING_REG( RADEON_CRTC_OFFSET, ( ( sarea->frame.y * dev_priv->front_pitch + + sarea->frame.x + * ( dev_priv->color_fmt - 2 ) ) & ~7 ) + + offset ); + OUT_RING_REG( RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base + + offset ); ADVANCE_RING(); @@ -748,6 +839,8 @@ * performing the swapbuffer ioctl. */ dev_priv->sarea_priv->last_frame++; + dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page = + 1 - dev_priv->current_page; BEGIN_RING( 2 ); @@ -756,82 +849,118 @@ ADVANCE_RING(); } +static int bad_prim_vertex_nr( int primitive, int nr ) +{ + switch (primitive & RADEON_PRIM_TYPE_MASK) { + case RADEON_PRIM_TYPE_NONE: + case RADEON_PRIM_TYPE_POINT: + return nr < 1; + case RADEON_PRIM_TYPE_LINE: + return (nr & 1) || nr == 0; + case RADEON_PRIM_TYPE_LINE_STRIP: + return nr < 2; + case RADEON_PRIM_TYPE_TRI_LIST: + case RADEON_PRIM_TYPE_3VRT_POINT_LIST: + case RADEON_PRIM_TYPE_3VRT_LINE_LIST: + case RADEON_PRIM_TYPE_RECT_LIST: + return nr % 3 || nr == 0; + case RADEON_PRIM_TYPE_TRI_FAN: + case RADEON_PRIM_TYPE_TRI_STRIP: + return nr < 3; + default: + return 1; + } +} + + + +typedef struct { + unsigned int start; + unsigned int finish; + unsigned int prim; + unsigned int numverts; + unsigned int offset; + unsigned int vc_format; +} drm_radeon_tcl_prim_t; + static void radeon_cp_dispatch_vertex( drm_device_t *dev, - drm_buf_t *buf ) + drm_buf_t *buf, + drm_radeon_tcl_prim_t *prim, + drm_clip_rect_t *boxes, + int nbox ) + { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - int format = sarea_priv->vc_format; - int offset = dev_priv->agp_buffers_offset + buf->offset; - int size = buf->used; - int prim = buf_priv->prim; + drm_clip_rect_t box; + int offset = dev_priv->agp_buffers_offset + buf->offset + prim->start; + int numverts = (int)prim->numverts; int i = 0; RING_LOCALS; - DRM_DEBUG( "%s: nbox=%d\n", __FUNCTION__, sarea_priv->nbox ); - if ( 0 ) - radeon_print_dirty( "dispatch_vertex", sarea_priv->dirty ); + DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n", + prim->prim, + prim->vc_format, + prim->start, + prim->finish, + prim->numverts); + + if (bad_prim_vertex_nr( prim->prim, prim->numverts )) { + DRM_ERROR( "bad prim %x numverts %d\n", + prim->prim, prim->numverts ); + return; + } + + do { + /* Emit the next cliprect */ + if ( i < nbox ) { + if (__copy_from_user( &box, &boxes[i], sizeof(box) )) + return; - if ( buf->used ) { - buf_priv->dispatched = 1; - - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); + radeon_emit_clip_rect( dev_priv, &box ); } - do { - /* Emit the next set of up to three cliprects */ - if ( i < sarea_priv->nbox ) { - radeon_emit_clip_rect( dev_priv, - &sarea_priv->boxes[i] ); - } + /* Emit the vertex buffer rendering commands */ + BEGIN_RING( 5 ); - /* Emit the vertex buffer rendering commands */ - BEGIN_RING( 5 ); + OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) ); + OUT_RING( offset ); + OUT_RING( numverts ); + OUT_RING( prim->vc_format ); + OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST | + RADEON_COLOR_ORDER_RGBA | + RADEON_VTX_FMT_RADEON_MODE | + (numverts << RADEON_NUM_VERTICES_SHIFT) ); - OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) ); - OUT_RING( offset ); - OUT_RING( size ); - OUT_RING( format ); - OUT_RING( prim | RADEON_PRIM_WALK_LIST | - RADEON_COLOR_ORDER_RGBA | - RADEON_VTX_FMT_RADEON_MODE | - (size << RADEON_NUM_VERTICES_SHIFT) ); + ADVANCE_RING(); - ADVANCE_RING(); + i++; + } while ( i < nbox ); +} - i++; - } while ( i < sarea_priv->nbox ); - } - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - /* Emit the vertex buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); +static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + RING_LOCALS; - buf->pending = 1; - buf->used = 0; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } + buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; - dev_priv->sarea_priv->last_dispatch++; + /* Emit the vertex buffer age */ + BEGIN_RING( 2 ); + RADEON_DISPATCH_AGE( buf_priv->age ); + ADVANCE_RING(); - sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; - sarea_priv->nbox = 0; + buf->pending = 1; + buf->used = 0; } - static void radeon_cp_dispatch_indirect( drm_device_t *dev, drm_buf_t *buf, int start, int end ) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; RING_LOCALS; DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end ); @@ -852,8 +981,6 @@ data[dwords++] = RADEON_CP_PACKET2; } - buf_priv->dispatched = 1; - /* Fire off the indirect buffer */ BEGIN_RING( 3 ); @@ -863,100 +990,75 @@ ADVANCE_RING(); } - - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - - /* Emit the indirect buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); - - buf->pending = 1; - buf->used = 0; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } - - dev_priv->sarea_priv->last_dispatch++; } + static void radeon_cp_dispatch_indices( drm_device_t *dev, - drm_buf_t *buf, - int start, int end, - int count ) + drm_buf_t *elt_buf, + drm_radeon_tcl_prim_t *prim, + drm_clip_rect_t *boxes, + int nbox ) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - int format = sarea_priv->vc_format; - int offset = dev_priv->agp_buffers_offset; - int prim = buf_priv->prim; + drm_clip_rect_t box; + int offset = dev_priv->agp_buffers_offset + prim->offset; u32 *data; int dwords; int i = 0; - RING_LOCALS; - DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count ); - - if ( 0 ) - radeon_print_dirty( "dispatch_indices", sarea_priv->dirty ); - - if ( start != end ) { - buf_priv->dispatched = 1; - - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); - } - - dwords = (end - start + 3) / sizeof(u32); - - data = (u32 *)((char *)dev_priv->buffers->handle - + buf->offset + start); + int start = prim->start + RADEON_INDEX_PRIM_OFFSET; + int count = (prim->finish - start) / sizeof(u16); - data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 ); - - data[1] = offset; - data[2] = RADEON_MAX_VB_VERTS; - data[3] = format; - data[4] = (prim | RADEON_PRIM_WALK_IND | - RADEON_COLOR_ORDER_RGBA | - RADEON_VTX_FMT_RADEON_MODE | - (count << RADEON_NUM_VERTICES_SHIFT) ); - - if ( count & 0x1 ) { - data[dwords-1] &= 0x0000ffff; - } - - do { - /* Emit the next set of up to three cliprects */ - if ( i < sarea_priv->nbox ) { - radeon_emit_clip_rect( dev_priv, - &sarea_priv->boxes[i] ); - } - - radeon_cp_dispatch_indirect( dev, buf, start, end ); - - i++; - } while ( i < sarea_priv->nbox ); + DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n", + prim->prim, + prim->vc_format, + prim->start, + prim->finish, + prim->offset, + prim->numverts); + + if (bad_prim_vertex_nr( prim->prim, count )) { + DRM_ERROR( "bad prim %x count %d\n", + prim->prim, count ); + return; } - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - /* Emit the vertex buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); + if ( start >= prim->finish || + (prim->start & 0x7) ) { + DRM_ERROR( "buffer prim %d\n", prim->prim ); + return; + } + + dwords = (prim->finish - prim->start + 3) / sizeof(u32); + + data = (u32 *)((char *)dev_priv->buffers->handle + + elt_buf->offset + prim->start); + + data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 ); + data[1] = offset; + data[2] = prim->numverts; + data[3] = prim->vc_format; + data[4] = (prim->prim | + RADEON_PRIM_WALK_IND | + RADEON_COLOR_ORDER_RGBA | + RADEON_VTX_FMT_RADEON_MODE | + (count << RADEON_NUM_VERTICES_SHIFT) ); + + do { + if ( i < nbox ) { + if (__copy_from_user( &box, &boxes[i], sizeof(box) )) + return; + + radeon_emit_clip_rect( dev_priv, &box ); + } - buf->pending = 1; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } + radeon_cp_dispatch_indirect( dev, elt_buf, + prim->start, + prim->finish ); - dev_priv->sarea_priv->last_dispatch++; + i++; + } while ( i < nbox ); - sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; - sarea_priv->nbox = 0; } #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) @@ -967,25 +1069,35 @@ { drm_radeon_private_t *dev_priv = dev->dev_private; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; u32 format; u32 *buffer; - u8 *data; + const u8 *data; int size, dwords, tex_width, blit_width; - u32 y, height; - int ret = 0, i; + u32 height; + int i; RING_LOCALS; - /* FIXME: Be smarter about this... + dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; + + /* Flush the pixel cache. This ensures no pixel data gets mixed + * up with the texture data from the host data blit, otherwise + * part of the texture image may be corrupted. */ - buf = radeon_freelist_get( dev ); - if ( !buf ) return -EAGAIN; + BEGIN_RING( 4 ); + RADEON_FLUSH_CACHE(); + RADEON_WAIT_UNTIL_IDLE(); + ADVANCE_RING(); - DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", - tex->offset >> 10, tex->pitch, tex->format, - image->x, image->y, image->width, image->height ); +#ifdef __BIG_ENDIAN + /* The Mesa texture functions provide the data in little endian as the + * chip wants it, but we need to compensate for the fact that the CP + * ring gets byte-swapped + */ + BEGIN_RING( 2 ); + OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT ); + ADVANCE_RING(); +#endif - buf_priv = buf->dev_private; /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll @@ -1002,6 +1114,8 @@ case RADEON_TXFORMAT_ARGB1555: case RADEON_TXFORMAT_RGB565: case RADEON_TXFORMAT_ARGB4444: + case RADEON_TXFORMAT_VYUY422: + case RADEON_TXFORMAT_YVYU422: format = RADEON_COLOR_FORMAT_RGB565; tex_width = tex->width * 2; blit_width = image->width * 2; @@ -1017,56 +1131,46 @@ return -EINVAL; } - DRM_DEBUG( " tex=%dx%d blit=%d\n", - tex_width, tex->height, blit_width ); - - /* Flush the pixel cache. This ensures no pixel data gets mixed - * up with the texture data from the host data blit, otherwise - * part of the texture image may be corrupted. - */ - BEGIN_RING( 4 ); - - RADEON_FLUSH_CACHE(); - RADEON_WAIT_UNTIL_IDLE(); + DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width ); - ADVANCE_RING(); + do { + DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", + tex->offset >> 10, tex->pitch, tex->format, + image->x, image->y, image->width, image->height ); - /* Make a copy of the parameters in case we have to update them - * for a multi-pass texture blit. + /* Make a copy of some parameters in case we have to + * update them for a multi-pass texture blit. */ - y = image->y; height = image->height; - data = (u8 *)image->data; + data = (const u8 *)image->data; size = height * blit_width; if ( size > RADEON_MAX_TEXTURE_SIZE ) { - /* Texture image is too large, do a multipass upload */ - ret = -EAGAIN; - - /* Adjust the blit size to fit the indirect buffer */ height = RADEON_MAX_TEXTURE_SIZE / blit_width; size = height * blit_width; - - /* Update the input parameters for next time */ - image->y += height; - image->height -= height; - image->data = (char *)image->data + size; - - if ( copy_to_user( tex->image, image, sizeof(*image) ) ) { - DRM_ERROR( "EFAULT on tex->image\n" ); - return -EFAULT; - } } else if ( size < 4 && size > 0 ) { size = 4; + } else if ( size == 0 ) { + return 0; + } + + buf = radeon_freelist_get( dev ); + if ( 0 && !buf ) { + radeon_do_cp_idle( dev_priv ); + buf = radeon_freelist_get( dev ); + } + if ( !buf ) { + DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); + copy_to_user( tex->image, image, sizeof(*image) ); + return -EAGAIN; } - dwords = size / 4; /* Dispatch the indirect buffer. */ - buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset); - + buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset); + dwords = size / 4; buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 ); buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | @@ -1080,7 +1184,7 @@ buffer[2] = (tex->pitch << 22) | (tex->offset >> 10); buffer[3] = 0xffffffff; buffer[4] = 0xffffffff; - buffer[5] = (y << 16) | image->x; + buffer[5] = (image->y << 16) | image->x; buffer[6] = (height << 16) | image->width; buffer[7] = dwords; @@ -1112,30 +1216,34 @@ buf->pid = current->pid; buf->used = (dwords + 8) * sizeof(u32); - buf_priv->discard = 1; radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); + radeon_cp_discard_buffer( dev, buf ); + + /* Update the input parameters for next time */ + image->y += height; + image->height -= height; + (const u8 *)image->data += size; + } while (image->height > 0); /* Flush the pixel cache after the blit completes. This ensures * the texture data is written out to memory before rendering * continues. */ BEGIN_RING( 4 ); - RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_2D_IDLE(); - ADVANCE_RING(); - - return ret; + return 0; } + static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple ) { drm_radeon_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_RING( 35 ); @@ -1158,31 +1266,95 @@ int radeon_cp_clear( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_clear_t clear; + drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; + DRM_DEBUG( "\n" ); + + LOCK_TEST_WITH_RETURN( dev ); + + if ( copy_from_user( &clear, (drm_radeon_clear_t *)arg, + sizeof(clear) ) ) + return -EFAULT; + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + + if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + + if ( copy_from_user( &depth_boxes, clear.depth_boxes, + sarea_priv->nbox * sizeof(depth_boxes[0]) ) ) + return -EFAULT; + + radeon_cp_dispatch_clear( dev, &clear, depth_boxes ); + + COMMIT_RING(); + return 0; +} + + +/* Not sure why this isn't set all the time: + */ +static int radeon_do_init_pageflip( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG( "\n" ); + + BEGIN_RING( 6 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) ); + OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL ); + OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) ); + OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL ); + ADVANCE_RING(); + + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page; + + return 0; +} + +/* Called whenever a client dies, from DRM(release). + * NOTE: Lock isn't necessarily held when this is called! + */ +int radeon_do_cleanup_pageflip( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "\n" ); + + if (dev_priv->current_page != 0) + radeon_cp_dispatch_flip( dev ); + + dev_priv->page_flipping = 0; + return 0; +} + +/* Swapping and flipping are different operations, need different ioctls. + * They can & should be intermixed to support multiple 3d windows. + */ +int radeon_cp_flip(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_clear_t clear; - drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev ); - if ( copy_from_user( &clear, (drm_radeon_clear_t *)arg, - sizeof(clear) ) ) - return -EFAULT; - RING_SPACE_TEST_WITH_RETURN( dev_priv ); - if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) - sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - - if ( copy_from_user( &depth_boxes, clear.depth_boxes, - sarea_priv->nbox * sizeof(depth_boxes[0]) ) ) - return -EFAULT; - - radeon_cp_dispatch_clear( dev, &clear, depth_boxes ); + if (!dev_priv->page_flipping) + radeon_do_init_pageflip( dev ); + + radeon_cp_dispatch_flip( dev ); + COMMIT_RING(); return 0; } @@ -1193,7 +1365,7 @@ drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev ); @@ -1202,14 +1374,10 @@ if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - if ( !dev_priv->page_flipping ) { - radeon_cp_dispatch_swap( dev ); - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_MASKS); - } else { - radeon_cp_dispatch_flip( dev ); - } + radeon_cp_dispatch_swap( dev ); + dev_priv->sarea_priv->ctx_owner = 0; + COMMIT_RING(); return 0; } @@ -1219,10 +1387,11 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; drm_radeon_vertex_t vertex; + drm_radeon_tcl_prim_t prim; LOCK_TEST_WITH_RETURN( dev ); @@ -1235,8 +1404,8 @@ sizeof(vertex) ) ) return -EFAULT; - DRM_DEBUG( "%s: pid=%d index=%d count=%d discard=%d\n", - __FUNCTION__, current->pid, + DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n", + current->pid, vertex.idx, vertex.count, vertex.discard ); if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { @@ -1254,7 +1423,6 @@ VB_AGE_TEST_WITH_RETURN( dev_priv ); buf = dma->buflist[vertex.idx]; - buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", @@ -1266,12 +1434,39 @@ return -EINVAL; } - buf->used = vertex.count; - buf_priv->prim = vertex.prim; - buf_priv->discard = vertex.discard; + /* Build up a prim_t record: + */ + if (vertex.count) { + buf->used = vertex.count; /* not used? */ + + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv, + &sarea_priv->context_state, + sarea_priv->tex_state, + sarea_priv->dirty ); + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); + } + + prim.start = 0; + prim.finish = vertex.count; /* unused */ + prim.prim = vertex.prim; + prim.numverts = vertex.count; + prim.vc_format = dev_priv->sarea_priv->vc_format; + + radeon_cp_dispatch_vertex( dev, buf, &prim, + dev_priv->sarea_priv->boxes, + dev_priv->sarea_priv->nbox ); + } - radeon_cp_dispatch_vertex( dev, buf ); + if (vertex.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + COMMIT_RING(); return 0; } @@ -1281,10 +1476,11 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; drm_radeon_indices_t elts; + drm_radeon_tcl_prim_t prim; int count; LOCK_TEST_WITH_RETURN( dev ); @@ -1317,7 +1513,6 @@ VB_AGE_TEST_WITH_RETURN( dev_priv ); buf = dma->buflist[elts.idx]; - buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", @@ -1342,11 +1537,37 @@ } buf->used = elts.end; - buf_priv->prim = elts.prim; - buf_priv->discard = elts.discard; - radeon_cp_dispatch_indices( dev, buf, elts.start, elts.end, count ); + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv, + &sarea_priv->context_state, + sarea_priv->tex_state, + sarea_priv->dirty ); + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); + } + + + /* Build up a prim_t record: + */ + prim.start = elts.start; + prim.finish = elts.end; + prim.prim = elts.prim; + prim.offset = 0; /* offset from start of dma buffers */ + prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ + prim.vc_format = dev_priv->sarea_priv->vc_format; + + radeon_cp_dispatch_indices( dev, buf, &prim, + dev_priv->sarea_priv->boxes, + dev_priv->sarea_priv->nbox ); + if (elts.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + COMMIT_RING(); return 0; } @@ -1358,6 +1579,7 @@ drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_texture_t tex; drm_radeon_tex_image_t image; + int ret; LOCK_TEST_WITH_RETURN( dev ); @@ -1377,7 +1599,10 @@ RING_SPACE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv ); - return radeon_cp_dispatch_texture( dev, &tex, &image ); + ret = radeon_cp_dispatch_texture( dev, &tex, &image ); + + COMMIT_RING(); + return ret; } int radeon_cp_stipple( struct inode *inode, struct file *filp, @@ -1402,6 +1627,7 @@ radeon_cp_dispatch_stipple( dev, mask ); + COMMIT_RING(); return 0; } @@ -1413,7 +1639,6 @@ drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; drm_radeon_indirect_t indirect; RING_LOCALS; @@ -1439,7 +1664,6 @@ } buf = dma->buflist[indirect.idx]; - buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", @@ -1461,7 +1685,6 @@ VB_AGE_TEST_WITH_RETURN( dev_priv ); buf->used = indirect.end; - buf_priv->discard = indirect.discard; /* Wait for the 3D stream to idle before the indirect buffer * containing 2D acceleration commands is processed. @@ -1477,6 +1700,526 @@ * privileged clients. */ radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end ); + if (indirect.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + + + COMMIT_RING(); + return 0; +} + +int radeon_cp_vertex2(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_radeon_vertex2_t vertex; + int i; + unsigned char laststate; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t *)data, + sizeof(vertex) ); + + DRM_DEBUG( "pid=%d index=%d discard=%d\n", + current->pid, + vertex.idx, vertex.discard ); + + if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + vertex.idx, dma->buf_count - 1 ); + return -EINVAL; + } + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + VB_AGE_TEST_WITH_RETURN( dev_priv ); + + buf = dma->buflist[vertex.idx]; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", vertex.idx ); + return -EINVAL; + } + + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) + return -EINVAL; + + for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) { + drm_radeon_prim_t prim; + drm_radeon_tcl_prim_t tclprim; + + if ( copy_from_user( &prim, &vertex.prim[i], sizeof(prim) ) ) + return -EFAULT; + + if ( prim.stateidx != laststate ) { + drm_radeon_state_t state; + + if ( copy_from_user( &state, + &vertex.state[prim.stateidx], + sizeof(state) ) ) + return -EFAULT; + + radeon_emit_state2( dev_priv, &state ); + + laststate = prim.stateidx; + } + + tclprim.start = prim.start; + tclprim.finish = prim.finish; + tclprim.prim = prim.prim; + tclprim.vc_format = prim.vc_format; + + if ( prim.prim & RADEON_PRIM_WALK_IND ) { + tclprim.offset = prim.numverts * 64; + tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ + + radeon_cp_dispatch_indices( dev, buf, &tclprim, + sarea_priv->boxes, + sarea_priv->nbox); + } else { + tclprim.numverts = prim.numverts; + tclprim.offset = 0; /* not used */ + + radeon_cp_dispatch_vertex( dev, buf, &tclprim, + sarea_priv->boxes, + sarea_priv->nbox); + } + + if (sarea_priv->nbox == 1) + sarea_priv->nbox = 0; + } + + if ( vertex.discard ) { + radeon_cp_discard_buffer( dev, buf ); + } + + COMMIT_RING(); + return 0; +} + + +static int radeon_emit_packets( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int id = (int)header.packet.packet_id; + int sz, reg; + int *data = (int *)cmdbuf->buf; + RING_LOCALS; + + if (id >= RADEON_MAX_STATE_PACKETS) + return -EINVAL; + + sz = packet[id].len; + reg = packet[id].start; + + if (sz * sizeof(int) > cmdbuf->bufsz) + return -EINVAL; + + BEGIN_RING(sz+1); + OUT_RING( CP_PACKET0( reg, (sz-1) ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + +static __inline__ int radeon_emit_scalars( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int sz = header.scalars.count; + int *data = (int *)cmdbuf->buf; + int start = header.scalars.offset; + int stride = header.scalars.stride; + RING_LOCALS; + + BEGIN_RING( 3+sz ); + OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); + OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); + OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + +/* God this is ugly + */ +static __inline__ int radeon_emit_scalars2( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int sz = header.scalars.count; + int *data = (int *)cmdbuf->buf; + int start = ((unsigned int)header.scalars.offset) + 0x100; + int stride = header.scalars.stride; + RING_LOCALS; + + BEGIN_RING( 3+sz ); + OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); + OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); + OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + +static __inline__ int radeon_emit_vectors( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int sz = header.vectors.count; + int *data = (int *)cmdbuf->buf; + int start = header.vectors.offset; + int stride = header.vectors.stride; + RING_LOCALS; + + BEGIN_RING( 3+sz ); + OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) ); + OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); + OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + + +static int radeon_emit_packet3( drm_device_t *dev, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + int cmdsz, tmp; + int *cmd = (int *)cmdbuf->buf; + RING_LOCALS; + + DRM_DEBUG("\n"); + + if (__get_user( tmp, &cmd[0])) + return -EFAULT; + + cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16); + + if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 || + cmdsz * 4 > cmdbuf->bufsz) + return -EINVAL; + + BEGIN_RING( cmdsz ); + OUT_RING_USER_TABLE( cmd, cmdsz ); + ADVANCE_RING(); + + cmdbuf->buf += cmdsz * 4; + cmdbuf->bufsz -= cmdsz * 4; + return 0; +} + + +static int radeon_emit_packet3_cliprect( drm_device_t *dev, + drm_radeon_cmd_buffer_t *cmdbuf, + int orig_nbox ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_clip_rect_t box; + int cmdsz, tmp; + int *cmd = (int *)cmdbuf->buf; + drm_clip_rect_t *boxes = cmdbuf->boxes; + int i = 0; + RING_LOCALS; + + DRM_DEBUG("\n"); + + if (__get_user( tmp, &cmd[0])) + return -EFAULT; + + cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16); + + if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 || + cmdsz * 4 > cmdbuf->bufsz) + return -EINVAL; + + if (!orig_nbox) + goto out; + + do { + if ( i < cmdbuf->nbox ) { + if (__copy_from_user( &box, &boxes[i], sizeof(box) )) + return -EFAULT; + /* FIXME The second and subsequent times round + * this loop, send a WAIT_UNTIL_3D_IDLE before + * calling emit_clip_rect(). This fixes a + * lockup on fast machines when sending + * several cliprects with a cmdbuf, as when + * waving a 2D window over a 3D + * window. Something in the commands from user + * space seems to hang the card when they're + * sent several times in a row. That would be + * the correct place to fix it but this works + * around it until I can figure that out - Tim + * Smith */ + if ( i ) { + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + ADVANCE_RING(); + } + radeon_emit_clip_rect( dev_priv, &box ); + } + + BEGIN_RING( cmdsz ); + OUT_RING_USER_TABLE( cmd, cmdsz ); + ADVANCE_RING(); + + } while ( ++i < cmdbuf->nbox ); + if (cmdbuf->nbox == 1) + cmdbuf->nbox = 0; + + out: + cmdbuf->buf += cmdsz * 4; + cmdbuf->bufsz -= cmdsz * 4; + return 0; +} + + +static int radeon_emit_wait( drm_device_t *dev, int flags ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG("%s: %x\n", __FUNCTION__, flags); + switch (flags) { + case RADEON_WAIT_2D: + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_2D_IDLE(); + ADVANCE_RING(); + break; + case RADEON_WAIT_3D: + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + ADVANCE_RING(); + break; + case RADEON_WAIT_2D|RADEON_WAIT_3D: + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_IDLE(); + ADVANCE_RING(); + break; + default: + return -EINVAL; + } + + return 0; +} + +int radeon_cp_cmdbuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf = 0; + int idx; + drm_radeon_cmd_buffer_t cmdbuf; + drm_radeon_cmd_header_t header; + int orig_nbox; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data, + sizeof(cmdbuf) ); + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + VB_AGE_TEST_WITH_RETURN( dev_priv ); + + + if (verify_area( VERIFY_READ, cmdbuf.buf, cmdbuf.bufsz )) + return -EFAULT; + + if (cmdbuf.nbox && + verify_area( VERIFY_READ, cmdbuf.boxes, + cmdbuf.nbox * sizeof(drm_clip_rect_t))) + return -EFAULT; + + orig_nbox = cmdbuf.nbox; + + while ( cmdbuf.bufsz >= sizeof(header) ) { + + if (__get_user( header.i, (int *)cmdbuf.buf )) { + DRM_ERROR("__get_user %p\n", cmdbuf.buf); + return -EFAULT; + } + + cmdbuf.buf += sizeof(header); + cmdbuf.bufsz -= sizeof(header); + + switch (header.header.cmd_type) { + case RADEON_CMD_PACKET: + DRM_DEBUG("RADEON_CMD_PACKET\n"); + if (radeon_emit_packets( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_packets failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_SCALARS: + DRM_DEBUG("RADEON_CMD_SCALARS\n"); + if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_scalars failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_VECTORS: + DRM_DEBUG("RADEON_CMD_VECTORS\n"); + if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_vectors failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_DMA_DISCARD: + DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); + idx = header.dma.buf_idx; + if ( idx < 0 || idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + idx, dma->buf_count - 1 ); + return -EINVAL; + } + + buf = dma->buflist[idx]; + if ( buf->pid != current->pid || buf->pending ) { + DRM_ERROR( "bad buffer\n" ); + return -EINVAL; + } + + radeon_cp_discard_buffer( dev, buf ); + break; + + case RADEON_CMD_PACKET3: + DRM_DEBUG("RADEON_CMD_PACKET3\n"); + if (radeon_emit_packet3( dev, &cmdbuf )) { + DRM_ERROR("radeon_emit_packet3 failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_PACKET3_CLIP: + DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); + if (radeon_emit_packet3_cliprect( dev, &cmdbuf, orig_nbox )) { + DRM_ERROR("radeon_emit_packet3_clip failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_SCALARS2: + DRM_DEBUG("RADEON_CMD_SCALARS2\n"); + if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_scalars2 failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_WAIT: + DRM_DEBUG("RADEON_CMD_WAIT\n"); + if (radeon_emit_wait( dev, header.wait.flags )) { + DRM_ERROR("radeon_emit_wait failed\n"); + return -EINVAL; + } + break; + default: + DRM_ERROR("bad cmd_type %d at %p\n", + header.header.cmd_type, + cmdbuf.buf - sizeof(header)); + return -EINVAL; + } + } + + + DRM_DEBUG("DONE\n"); + COMMIT_RING(); + return 0; +} + + + +int radeon_cp_getparam(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_getparam_t param; + int value; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t *)data, + sizeof(param) ); + + DRM_DEBUG( "pid=%d\n", current->pid ); + + switch( param.param ) { + case RADEON_PARAM_AGP_BUFFER_OFFSET: + value = dev_priv->agp_buffers_offset; + break; + case RADEON_PARAM_LAST_FRAME: + dev_priv->stats.last_frame_reads++; + value = GET_SCRATCH( 0 ); + break; + case RADEON_PARAM_LAST_DISPATCH: + value = GET_SCRATCH( 1 ); + break; + case RADEON_PARAM_LAST_CLEAR: + dev_priv->stats.last_clear_reads++; + value = GET_SCRATCH( 2 ); + break; + case RADEON_PARAM_IRQ_NR: + value = dev->irq; + break; + case RADEON_PARAM_AGP_BASE: + value = dev_priv->agp_vm_start; + break; + default: + return -EINVAL; + } + + if ( copy_to_user( param.value, &value, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/sis_drm.h linux.21rc5-ac2/drivers/char/drm/sis_drm.h --- linux.21rc5/drivers/char/drm/sis_drm.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/sis_drm.h 2003-04-22 16:44:36.000000000 +0100 @@ -2,6 +2,16 @@ #ifndef _sis_drm_public_h_ #define _sis_drm_public_h_ +/* SiS specific ioctls */ +#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) +#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) +#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t) +#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t) +#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) +#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) +#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) +#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) + typedef struct { int context; unsigned int offset; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/sis_drv.c linux.21rc5-ac2/drivers/char/drm/sis_drv.c --- linux.21rc5/drivers/char/drm/sis_drv.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/sis_drv.c 2003-04-22 16:44:36.000000000 +0100 @@ -31,31 +31,6 @@ #include "sis_drm.h" #include "sis_drv.h" -#define DRIVER_AUTHOR "SIS" -#define DRIVER_NAME "sis" -#define DRIVER_DESC "SIS 300/630/540" -#define DRIVER_DATE "20010503" -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \ - [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \ - /* AGP Memory Management */ \ - [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \ - [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \ - [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 } -#if 0 /* these don't appear to be defined */ - /* SIS Stereo */ - [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 }, - [DRM_IOCTL_NR(SIS_IOCTL_FLIP)] = { sis_flip, 1, 1 }, - [DRM_IOCTL_NR(SIS_IOCTL_FLIP_INIT)] = { sis_flip_init, 1, 1 }, - [DRM_IOCTL_NR(SIS_IOCTL_FLIP_FINAL)] = { sis_flip_final, 1, 1 } -#endif - -#define __HAVE_COUNTERS 5 - #include "drm_auth.h" #include "drm_agpsupport.h" #include "drm_bufs.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/sis_ds.c linux.21rc5-ac2/drivers/char/drm/sis_ds.c --- linux.21rc5/drivers/char/drm/sis_ds.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/sis_ds.c 2003-04-22 16:44:36.000000000 +0100 @@ -49,16 +49,19 @@ set_t *set; set = (set_t *)MALLOC(sizeof(set_t)); - if (set) { + if(set) + { for(i = 0; i < SET_SIZE; i++){ set->list[i].free_next = i+1; set->list[i].alloc_next = -1; - } + } + set->list[SET_SIZE-1].free_next = -1; set->free = 0; set->alloc = -1; set->trace = -1; } + return set; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/sis.h linux.21rc5-ac2/drivers/char/drm/sis.h --- linux.21rc5/drivers/char/drm/sis.h 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/sis.h 2003-04-22 16:44:36.000000000 +0100 @@ -24,7 +24,7 @@ * DEALINGS IN THE SOFTWARE. * */ -/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.1 2001/05/19 18:29:22 dawes Exp $ */ +/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.2 2001/12/19 21:25:59 dawes Exp $ */ #ifndef __SIS_H__ #define __SIS_H__ @@ -42,6 +42,31 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "SIS" +#define DRIVER_NAME "sis" +#define DRIVER_DESC "SIS 300/630/540" +#define DRIVER_DATE "20010503" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \ + /* AGP Memory Management */ \ + [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \ + [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 } +#if 0 /* these don't appear to be defined */ + /* SIS Stereo */ + [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 }, + [DRM_IOCTL_NR(SIS_IOCTL_FLIP)] = { sis_flip, 1, 1 }, + [DRM_IOCTL_NR(SIS_IOCTL_FLIP_INIT)] = { sis_flip_init, 1, 1 }, + [DRM_IOCTL_NR(SIS_IOCTL_FLIP_FINAL)] = { sis_flip_final, 1, 1 } +#endif + +#define __HAVE_COUNTERS 5 + /* Buffer customization: */ #define DRIVER_AGP_BUFFERS_MAP( dev ) \ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm/tdfx_drv.c linux.21rc5-ac2/drivers/char/drm/tdfx_drv.c --- linux.21rc5/drivers/char/drm/tdfx_drv.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm/tdfx_drv.c 2003-04-22 16:44:36.000000000 +0100 @@ -82,25 +82,6 @@ #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init tdfx_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", tdfx_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm-4.0/agpsupport.c linux.21rc5-ac2/drivers/char/drm-4.0/agpsupport.c --- linux.21rc5/drivers/char/drm-4.0/agpsupport.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm-4.0/agpsupport.c 2003-05-20 20:08:32.000000000 +0100 @@ -275,9 +275,10 @@ break; case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133"; break; + case VIA_APOLLO_KM266: head->chipset = "VIA Apollo KM266 / KL266"; + break; case VIA_APOLLO_KT400: head->chipset = "VIA Apollo KT400"; break; - case VIA_APOLLO_P4X400: head->chipset = "VIA Apollo P4X400"; #endif case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm-4.0/i810_dma.c linux.21rc5-ac2/drivers/char/drm-4.0/i810_dma.c --- linux.21rc5/drivers/char/drm-4.0/i810_dma.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm-4.0/i810_dma.c 2003-04-28 16:10:27.000000000 +0100 @@ -228,7 +228,7 @@ down_write(¤t->mm->mmap_sem); retcode = do_munmap(current->mm, (unsigned long)buf_priv->virtual, - (size_t) buf->total); + (size_t) buf->total, 0); up_write(¤t->mm->mmap_sem); } buf_priv->currently_mapped = I810_BUF_UNMAPPED; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/drm-4.0/tdfx_drv.c linux.21rc5-ac2/drivers/char/drm-4.0/tdfx_drv.c --- linux.21rc5/drivers/char/drm-4.0/tdfx_drv.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/drm-4.0/tdfx_drv.c 2003-04-22 16:44:36.000000000 +0100 @@ -554,7 +554,6 @@ lock.context, current->pid, j, dev->lock.lock_time, jiffies); current->state = TASK_INTERRUPTIBLE; - current->policy |= SCHED_YIELD; schedule_timeout(DRM_LOCK_SLICE-j); DRM_DEBUG("jiffies=%d\n", jiffies); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/dz.c linux.21rc5-ac2/drivers/char/dz.c --- linux.21rc5/drivers/char/dz.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/dz.c 2003-04-22 16:44:36.000000000 +0100 @@ -1054,7 +1054,7 @@ restore_flags(flags); return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/esp.c linux.21rc5-ac2/drivers/char/esp.c --- linux.21rc5/drivers/char/esp.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/esp.c 2003-04-22 16:44:36.000000000 +0100 @@ -136,7 +136,7 @@ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ - kdevname(tty->device), (info->flags), serial_refcount,info->count,tty->count,s) + kdevname(tty->device), (info->flags), serial_refcount,info->count,atomic_read(&tty->count),s) #else #define DBG_CNT(s) #endif @@ -2051,7 +2051,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/generic_serial.c linux.21rc5-ac2/drivers/char/generic_serial.c --- linux.21rc5/drivers/char/generic_serial.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/generic_serial.c 2003-04-22 16:44:36.000000000 +0100 @@ -753,7 +753,7 @@ return; } - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_ERR "gs: gs_close: bad port count;" " tty->count is 1, port count is %d\n", port->count); port->count = 1; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/ip2main.c linux.21rc5-ac2/drivers/char/ip2main.c --- linux.21rc5/drivers/char/ip2main.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/ip2main.c 2003-04-22 16:44:37.000000000 +0100 @@ -372,7 +372,7 @@ #if defined(MODULE) && defined(IP2DEBUG_OPEN) #define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] refc=%d, ttyc=%d, modc=%x -> %s\n", \ kdevname(tty->device),(pCh->flags),ref_count, \ - tty->count,/*GET_USE_COUNT(module)*/0,s) + atomic_read(&tty->count),/*GET_USE_COUNT(module)*/0,s) #else #define DBG_CNT(s) #endif @@ -1740,7 +1740,7 @@ noblock: /* first open - Assign termios structure to port */ - if ( tty->count == 1 ) { + if ( atomic_read(&tty->count) == 1 ) { i2QueueCommands(PTYPE_INLINE, pCh, 0, 2, CMD_CTSFL_DSAB, CMD_RTSFL_DSAB); if ( pCh->flags & ASYNC_SPLIT_TERMIOS ) { if ( tty->driver.subtype == SERIAL_TYPE_NORMAL ) { @@ -1805,7 +1805,7 @@ return; } - if ( tty->count > 1 ) { /* not the last close */ + if ( atomic_read(&tty->count) > 1 ) { /* not the last close */ MOD_DEC_USE_COUNT; ip2trace (CHANN, ITRC_CLOSE, 2, 1, 3 ); @@ -3364,7 +3364,7 @@ pCh = DevTable[i]; if (pCh) { tty = pCh->pTTY; - if (tty && tty->count) { + if (tty && atomic_read(&tty->count)) { len += sprintf(buf+len,FMTLINE,i,(int)tty->flags,pCh->flags, tty->termios->c_cflag,tty->termios->c_iflag); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/ipmi/ipmi_kcs_intf.c linux.21rc5-ac2/drivers/char/ipmi/ipmi_kcs_intf.c --- linux.21rc5/drivers/char/ipmi/ipmi_kcs_intf.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/ipmi/ipmi_kcs_intf.c 2003-05-27 14:03:49.000000000 +0100 @@ -1032,9 +1032,8 @@ #include /* A real hack, but everything's not there yet in 2.4. */ -#define COMPILER_DEPENDENT_UINT64 unsigned long -#include <../drivers/acpi/include/acpi.h> -#include <../drivers/acpi/include/actypes.h> +#include +#include struct SPMITable { s8 Signature[4]; @@ -1059,7 +1058,7 @@ static unsigned long acpi_find_bmc(void) { acpi_status status; - acpi_table_header *spmi; + struct acpi_table_header *spmi; static unsigned long io_base = 0; if (io_base != 0) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/isicom.c linux.21rc5-ac2/drivers/char/isicom.c --- linux.21rc5/drivers/char/isicom.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/isicom.c 2003-04-22 16:44:37.000000000 +0100 @@ -1160,7 +1160,7 @@ return; } - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_WARNING "ISICOM:(0x%x) isicom_close: bad port count" "tty->count = 1 port count = %d.\n", card->base, port->count); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/istallion.c linux.21rc5-ac2/drivers/char/istallion.c --- linux.21rc5/drivers/char/istallion.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/istallion.c 2003-04-22 16:44:37.000000000 +0100 @@ -1173,7 +1173,7 @@ restore_flags(flags); return; } - if ((tty->count == 1) && (portp->refcount != 1)) + if ((atomic_read(&tty->count) == 1) && (portp->refcount != 1)) portp->refcount = 1; if (portp->refcount-- > 1) { MOD_DEC_USE_COUNT; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/Makefile linux.21rc5-ac2/drivers/char/Makefile --- linux.21rc5/drivers/char/Makefile 2003-05-28 15:08:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/Makefile 2003-04-22 16:44:37.000000000 +0100 @@ -199,6 +199,12 @@ obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o obj-$(CONFIG_N_HDLC) += n_hdlc.o obj-$(CONFIG_SPECIALIX) += specialix.o + +subdir-$(CONFIG_ATI_CD1865) += cd1865 +ifeq ($(CONFIG_ATI_CD1865),y) + obj-y += cd1865/SILX.o +endif + obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o obj-$(CONFIG_A2232) += ser_a2232.o generic_serial.o obj-$(CONFIG_SX) += sx.o generic_serial.o diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/moxa.c linux.21rc5-ac2/drivers/char/moxa.c --- linux.21rc5/drivers/char/moxa.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/moxa.c 2003-04-22 16:44:37.000000000 +0100 @@ -642,7 +642,7 @@ } ch = (struct moxa_str *) tty->driver_data; - if ((tty->count == 1) && (ch->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (ch->count != 1)) { printk("moxa_close: bad serial port count; tty->count is 1, " "ch->count is %d\n", ch->count); ch->count = 1; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/mwave/mwavedd.c linux.21rc5-ac2/drivers/char/mwave/mwavedd.c --- linux.21rc5/drivers/char/mwave/mwavedd.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/mwave/mwavedd.c 2003-04-22 16:44:37.000000000 +0100 @@ -279,7 +279,6 @@ pDrvData->IPCs[ipcnum].bIsHere = FALSE; pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) - current->nice = -20; /* boost to provide priority timing */ #else current->priority = 0x28; /* boost to provide priority timing */ #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/mxser.c linux.21rc5-ac2/drivers/char/mxser.c --- linux.21rc5/drivers/char/mxser.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/mxser.c 2003-04-22 16:44:37.000000000 +0100 @@ -824,7 +824,7 @@ MOD_DEC_USE_COUNT; return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/n_hdlc.c linux.21rc5-ac2/drivers/char/n_hdlc.c --- linux.21rc5/drivers/char/n_hdlc.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/n_hdlc.c 2003-04-22 16:44:37.000000000 +0100 @@ -9,7 +9,7 @@ * Al Longyear , Paul Mackerras * * Original release 01/11/99 - * $Id: n_hdlc.c,v 3.3 2001/11/08 16:16:03 paulkf Exp $ + * $Id: n_hdlc.c,v 3.6 2002/12/19 18:58:56 paulkf Exp $ * * This code is released under the GNU General Public License (GPL) * @@ -78,7 +78,7 @@ */ #define HDLC_MAGIC 0x239e -#define HDLC_VERSION "$Revision: 3.3 $" +#define HDLC_VERSION "$Revision: 3.6 $" #include #include @@ -172,9 +172,9 @@ /* * HDLC buffer list manipulation functions */ -void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list); -void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf); -N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list); +static void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list); +static void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf); +static N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list); /* Local functions */ @@ -186,10 +186,10 @@ /* debug level can be set by insmod for debugging purposes */ #define DEBUG_LEVEL_INFO 1 -int debuglevel=0; +static int debuglevel=0; /* max frame size for memory allocations */ -ssize_t maxframe=4096; +static ssize_t maxframe=4096; /* TTY callbacks */ @@ -265,7 +265,8 @@ } else break; } - + if (n_hdlc->tbuf) + kfree(n_hdlc->tbuf); kfree(n_hdlc); } /* end of n_hdlc_release() */ @@ -905,7 +906,7 @@ * Arguments: list pointer to buffer list * Return Value: None */ -void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list) +static void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list) { memset(list,0,sizeof(N_HDLC_BUF_LIST)); spin_lock_init(&list->spinlock); @@ -922,7 +923,7 @@ * * Return Value: None */ -void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf) +static void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf) { unsigned long flags; spin_lock_irqsave(&list->spinlock,flags); @@ -952,7 +953,7 @@ * * pointer to HDLC buffer if available, otherwise NULL */ -N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list) +static N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list) { unsigned long flags; N_HDLC_BUF *buf; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/n_tty.c linux.21rc5-ac2/drivers/char/n_tty.c --- linux.21rc5/drivers/char/n_tty.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/n_tty.c 2003-04-22 16:44:37.000000000 +0100 @@ -119,7 +119,7 @@ */ static void check_unthrottle(struct tty_struct * tty) { - if (tty->count && + if (atomic_read(&tty->count) && test_and_clear_bit(TTY_THROTTLED, &tty->flags) && tty->driver.unthrottle) tty->driver.unthrottle(tty); @@ -1170,7 +1170,8 @@ retval = -ERESTARTSYS; break; } - if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) { + if (tty_hung_up_p(file) || + (tty->link && !atomic_read(&tty->link->count))) { retval = -EIO; break; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/nwflash.c linux.21rc5-ac2/drivers/char/nwflash.c --- linux.21rc5/drivers/char/nwflash.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/nwflash.c 2003-04-22 16:44:37.000000000 +0100 @@ -154,12 +154,11 @@ if (down_interruptible(&nwflash_sem)) return -ERESTARTSYS; - ret = copy_to_user(buf, (void *)(FLASH_BASE + p), count); - if (ret == 0) { - ret = count; - *ppos += count; - } + ret = count - copy_to_user(buf, (void *)(FLASH_BASE + p), count); + *ppos += ret; up(&nwflash_sem); + if (ret == 0) + ret = -EFAULT; } return ret; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/pc_keyb.c linux.21rc5-ac2/drivers/char/pc_keyb.c --- linux.21rc5/drivers/char/pc_keyb.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/pc_keyb.c 2003-04-22 16:44:37.000000000 +0100 @@ -1225,41 +1225,13 @@ #endif /* CONFIG_PSMOUSE */ -static int blink_frequency = HZ/2; +void pckbd_blink (char led) { + led = led ? (0x01 | 0x04) : 0x00; -/* Tell the user who may be running in X and not see the console that we have - panic'ed. This is to distingush panics from "real" lockups. - Could in theory send the panic message as morse, but that is left as an - exercise for the reader. */ -void panic_blink(void) -{ - static unsigned long last_jiffie; - static char led; - /* Roughly 1/2s frequency. KDB uses about 1s. Make sure it is - different. */ - if (!blink_frequency) - return; - if (jiffies - last_jiffie > blink_frequency) { - led ^= 0x01 | 0x04; while (kbd_read_status() & KBD_STAT_IBF) mdelay(1); kbd_write_output(KBD_CMD_SET_LEDS); mdelay(1); while (kbd_read_status() & KBD_STAT_IBF) mdelay(1); mdelay(1); kbd_write_output(led); - last_jiffie = jiffies; - } -} - -static int __init panicblink_setup(char *str) -{ - int par; - if (get_option(&str,&par)) - blink_frequency = par*(1000/HZ); - return 1; } - -/* panicblink=0 disables the blinking as it caused problems with some console - switches. otherwise argument is ms of a blink period. */ -__setup("panicblink=", panicblink_setup); - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/pcmcia/synclink_cs.c linux.21rc5-ac2/drivers/char/pcmcia/synclink_cs.c --- linux.21rc5/drivers/char/pcmcia/synclink_cs.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/pcmcia/synclink_cs.c 2003-04-22 16:44:37.000000000 +0100 @@ -2626,7 +2626,7 @@ if (!info->count || tty_hung_up_p(filp)) goto cleanup; - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * tty->count is 1 and the tty structure will be freed. * info->count should be one in this case. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/pcxx.c linux.21rc5-ac2/drivers/char/pcxx.c --- linux.21rc5/drivers/char/pcxx.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/pcxx.c 2003-04-22 16:44:37.000000000 +0100 @@ -581,7 +581,7 @@ return; } /* this check is in serial.c, it won't hurt to do it here too */ - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/pty.c linux.21rc5-ac2/drivers/char/pty.c --- linux.21rc5/drivers/char/pty.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/pty.c 2003-04-22 16:44:37.000000000 +0100 @@ -70,13 +70,18 @@ static void pty_close(struct tty_struct * tty, struct file * filp) { + int count; + if (!tty) return; + + count = atomic_read(&tty->count); if (tty->driver.subtype == PTY_TYPE_MASTER) { - if (tty->count > 1) - printk("master pty_close: count = %d!!\n", tty->count); + if (count > 1) + printk("master pty_close: count = %d!!\n", + atomic_read(&tty->count)); } else { - if (tty->count > 2) + if (count > 2) return; } wake_up_interruptible(&tty->read_wait); @@ -329,7 +334,7 @@ goto out; if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) goto out; - if (tty->link->count != 1) + if (atomic_read(&tty->link->count) != 1) goto out; clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/raw.c linux.21rc5-ac2/drivers/char/raw.c --- linux.21rc5/drivers/char/raw.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/raw.c 2003-04-22 16:44:37.000000000 +0100 @@ -86,12 +86,6 @@ filp->f_op = &raw_ctl_fops; return 0; } - - if (!filp->f_iobuf) { - err = alloc_kiovec(1, &filp->f_iobuf); - if (err) - return err; - } down(&raw_devices[minor].mutex); /* @@ -292,7 +286,6 @@ size_t size, loff_t *offp) { struct kiobuf * iobuf; - int new_iobuf; int err = 0; unsigned long blocknr, blocks; size_t transferred; @@ -311,18 +304,10 @@ minor = MINOR(filp->f_dentry->d_inode->i_rdev); - new_iobuf = 0; - iobuf = filp->f_iobuf; - if (test_and_set_bit(0, &filp->f_iobuf_lock)) { - /* - * A parallel read/write is using the preallocated iobuf - * so just run slow and allocate a new one. - */ - err = alloc_kiovec(1, &iobuf); - if (err) - goto out; - new_iobuf = 1; - } + err = alloc_kiovec(1, &iobuf); + if (err) + return err; + dev = to_kdev_t(raw_devices[minor].binding->bd_dev); sector_size = raw_devices[minor].sector_size; @@ -395,10 +380,6 @@ } out_free: - if (!new_iobuf) - clear_bit(0, &filp->f_iobuf_lock); - else - free_kiovec(1, &iobuf); - out: + free_kiovec(1, &iobuf); return err; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/riscom8.c linux.21rc5-ac2/drivers/char/riscom8.c --- linux.21rc5/drivers/char/riscom8.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/riscom8.c 2003-04-22 16:44:37.000000000 +0100 @@ -1142,7 +1142,7 @@ goto out; bp = port_Board(port); - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_INFO "rc%d: rc_close: bad port count;" " tty->count is 1, port count is %d\n", board_No(bp), port->count); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/rocket.c linux.21rc5-ac2/drivers/char/rocket.c --- linux.21rc5/drivers/char/rocket.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/rocket.c 2003-04-22 16:44:37.000000000 +0100 @@ -1052,7 +1052,7 @@ restore_flags(flags); return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/serial167.c linux.21rc5-ac2/drivers/char/serial167.c --- linux.21rc5/drivers/char/serial167.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/serial167.c 2003-04-22 16:44:37.000000000 +0100 @@ -1877,7 +1877,7 @@ printk("cy_close ttyS%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/serial_amba.c linux.21rc5-ac2/drivers/char/serial_amba.c --- linux.21rc5/drivers/char/serial_amba.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/serial_amba.c 2003-04-22 16:44:37.000000000 +0100 @@ -1404,7 +1404,7 @@ return; } - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/serial.c linux.21rc5-ac2/drivers/char/serial.c --- linux.21rc5/drivers/char/serial.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/serial.c 2003-04-22 16:44:37.000000000 +0100 @@ -62,6 +62,10 @@ * Robert Schwebel , * Juergen Beisert , * Theodore Ts'o + * 4/02: added TTY_DO_WRITE_WAKEUP to enable n_tty to send POLL_OUTS + * to waiting processes + * Sapan Bhatia + * */ static char *serial_version = "5.05c"; @@ -203,6 +207,7 @@ #include #include #include +#include #if (LINUX_VERSION_CODE >= 131343) #include #endif @@ -370,7 +375,7 @@ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ - kdevname(tty->device), (info->flags), serial_refcount,info->count,tty->count,s) + kdevname(tty->device), (info->flags), serial_refcount,info->count,atomic_read(&tty->count),s) #else #define DBG_CNT(s) #endif @@ -1218,7 +1223,7 @@ if (!page) return -ENOMEM; - save_flags(flags); cli(); + spin_lock_irqsave( &info->irq_spinlock, flags); if (info->flags & ASYNC_INITIALIZED) { free_page(page); @@ -1456,11 +1461,11 @@ change_speed(info, 0); info->flags |= ASYNC_INITIALIZED; - restore_flags(flags); + spin_unlock_irqrestore( &info->irq_spinlock, flags); return 0; errout: - restore_flags(flags); + spin_unlock_irqrestore( &info->irq_spinlock, flags); return retval; } @@ -1484,7 +1489,7 @@ state->irq); #endif - save_flags(flags); cli(); /* Disable interrupts */ + spin_lock_irqsave( &info->irq_spinlock, flags); /* * clear delta_msr_wait queue to avoid mem leaks: we may free the irq @@ -1492,41 +1497,6 @@ */ wake_up_interruptible(&info->delta_msr_wait); - /* - * First unlink the serial port from the IRQ chain... - */ - if (info->next_port) - info->next_port->prev_port = info->prev_port; - if (info->prev_port) - info->prev_port->next_port = info->next_port; - else - IRQ_ports[state->irq] = info->next_port; - figure_IRQ_timeout(state->irq); - - /* - * Free the IRQ, if necessary - */ - if (state->irq && (!IRQ_ports[state->irq] || - !IRQ_ports[state->irq]->next_port)) { - if (IRQ_ports[state->irq]) { - free_irq(state->irq, &IRQ_ports[state->irq]); - retval = request_irq(state->irq, rs_interrupt_single, - SA_SHIRQ, "serial", - &IRQ_ports[state->irq]); - - if (retval) - printk("serial shutdown: request_irq: error %d" - " Couldn't reacquire IRQ.\n", retval); - } else - free_irq(state->irq, &IRQ_ports[state->irq]); - } - - if (info->xmit.buf) { - unsigned long pg = (unsigned long) info->xmit.buf; - info->xmit.buf = 0; - free_page(pg); - } - info->IER = 0; serial_outp(info, UART_IER, 0x00); /* disable all intrs */ #ifdef CONFIG_SERIAL_MANY_PORTS @@ -1583,7 +1553,43 @@ serial_outp(info, UART_IER, UART_IERX_SLEEP); } info->flags &= ~ASYNC_INITIALIZED; - restore_flags(flags); + + /* + * First unlink the serial port from the IRQ chain... + */ + if (info->next_port) + info->next_port->prev_port = info->prev_port; + if (info->prev_port) + info->prev_port->next_port = info->next_port; + else + IRQ_ports[state->irq] = info->next_port; + figure_IRQ_timeout(state->irq); + + /* + * Free the IRQ, if necessary + */ + if (state->irq && (!IRQ_ports[state->irq] || + !IRQ_ports[state->irq]->next_port)) { + if (IRQ_ports[state->irq]) { + free_irq(state->irq, &IRQ_ports[state->irq]); + retval = request_irq(state->irq, rs_interrupt_single, + SA_SHIRQ, "serial", + &IRQ_ports[state->irq]); + + if (retval) + printk("serial shutdown: request_irq: error %d" + " Couldn't reacquire IRQ.\n", retval); + } else + free_irq(state->irq, &IRQ_ports[state->irq]); + } + + if (info->xmit.buf) { + unsigned long pg = (unsigned long) info->xmit.buf; + info->xmit.buf = 0; + free_page(pg); + } + + spin_unlock_irqrestore( &info->irq_spinlock, flags); } #if (LINUX_VERSION_CODE < 131394) /* Linux 2.1.66 */ @@ -2791,7 +2797,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always @@ -3128,6 +3134,7 @@ info->tqueue.routine = do_softint; info->tqueue.data = info; info->state = sstate; + spin_lock_init(&info->irq_spinlock); if (sstate->info) { kfree(info); *ret_info = sstate->info; @@ -3242,6 +3249,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_open ttys%d successful...", info->line); #endif + set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); return 0; } @@ -3655,6 +3663,7 @@ info->io_type = state->io_type; info->iomem_base = state->iomem_base; info->iomem_reg_shift = state->iomem_reg_shift; + info->irq_spinlock= (spinlock_t) SPIN_LOCK_UNLOCKED; save_flags(flags); cli(); @@ -3907,7 +3916,14 @@ case 6: /* BAR 4*/ case 7: base_idx=idx-2; /* BAR 5*/ } - + + /* AFAVLAB uses a different mixture of BARs and offsets */ + /* Not that ugly ;) -- HW */ + if (dev->vendor == PCI_VENDOR_ID_AFAVLAB && idx >= 4) { + base_idx = 4; + offset = (idx - 4) * 8; + } + /* Some Titan cards are also a little weird */ if (dev->vendor == PCI_VENDOR_ID_TITAN && (dev->device == PCI_DEVICE_ID_TITAN_400L || @@ -4311,9 +4327,11 @@ pbn_b0_bt_1_115200, pbn_b0_bt_2_115200, + pbn_b0_bt_8_115200, pbn_b0_bt_1_460800, pbn_b0_bt_2_460800, pbn_b0_bt_2_921600, + pbn_b0_bt_4_460800, pbn_b1_1_115200, pbn_b1_2_115200, @@ -4392,9 +4410,11 @@ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 }, /* pbn_b0_bt_1_115200 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 115200 }, /* pbn_b0_bt_2_115200 */ + { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 8, 115200 }, /* pbn_b0_bt_8_115200 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 460800 }, /* pbn_b0_bt_1_460800 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 460800 }, /* pbn_b0_bt_2_460800 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 921600 }, /* pbn_b0_bt_2_921600 */ + { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 4, 460800 }, /* pbn_b0_bt_4_460800 */ { SPCI_FL_BASE1, 1, 115200 }, /* pbn_b1_1_115200 */ { SPCI_FL_BASE1, 2, 115200 }, /* pbn_b1_2_115200 */ @@ -4859,6 +4879,12 @@ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUAD_B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_460800 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_SSERIAL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_1_115200 }, @@ -4871,6 +4897,11 @@ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_115200 }, + /* AFAVLAB serial card, from Harald Welte */ + { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P028, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_8_115200 }, + /* EKF addition for i960 Boards form EKF with serial port */ { PCI_VENDOR_ID_INTEL, 0x1960, 0xE4BF, PCI_ANY_ID, 0, 0, @@ -4890,6 +4921,7 @@ 0x1048, 0x1500, 0, 0, pbn_b1_1_115200 }, + /* SGI IOC3 board */ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, 0xFF00, 0, 0, 0, pbn_sgi_ioc3 }, @@ -5542,12 +5574,22 @@ tty_register_devfs(&callout_driver, 0, callout_driver.minor_start + state->line); } +#ifdef CONFIG_SERIAL_GSC + probe_serial_gsc(); +#endif +#ifdef CONFIG_SUPERIO + superio_serial_init(); +#endif #ifdef ENABLE_SERIAL_PCI probe_serial_pci(); #endif #ifdef ENABLE_SERIAL_PNP probe_serial_pnp(); #endif + // FIXME: Clean this one up +#if defined(CONFIG_SERIAL_CONSOLE) && defined(CONFIG_PARISC) + serial_console_init(); +#endif return 0; } @@ -5657,6 +5699,7 @@ info->io_type = req->io_type; info->iomem_base = req->iomem_base; info->iomem_reg_shift = req->iomem_reg_shift; + info->irq_spinlock= (spinlock_t) SPIN_LOCK_UNLOCKED; } autoconfig(state); if (state->type == PORT_UNKNOWN) { @@ -5964,6 +6007,7 @@ info->io_type = state->io_type; info->iomem_base = state->iomem_base; info->iomem_reg_shift = state->iomem_reg_shift; + info->irq_spinlock= (spinlock_t) SPIN_LOCK_UNLOCKED; quot = state->baud_base / baud; cval = cflag & (CSIZE | CSTOPB); #if defined(__powerpc__) || defined(__alpha__) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/serial_txx927.c linux.21rc5-ac2/drivers/char/serial_txx927.c --- linux.21rc5/drivers/char/serial_txx927.c 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/serial_txx927.c 2003-04-22 16:44:37.000000000 +0100 @@ -155,7 +155,7 @@ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ - kdevname(tty->device), (info->flags), serial_refcount,info->count,tty->count,s) + kdevname(tty->device), (info->flags), serial_refcount,info->count,atomic_read(&tty->count),s) #else #define DBG_CNT(s) #endif @@ -1407,7 +1407,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/sonypi.c linux.21rc5-ac2/drivers/char/sonypi.c --- linux.21rc5/drivers/char/sonypi.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/sonypi.c 2003-05-09 18:10:22.000000000 +0100 @@ -696,14 +696,36 @@ } for (i = 0; irq_list[i].irq; i++) { - if (!request_irq(irq_list[i].irq, sonypi_irq, - SA_SHIRQ, "sonypi", sonypi_irq)) { - sonypi_device.irq = irq_list[i].irq; - sonypi_device.bits = irq_list[i].bits; + + sonypi_device.irq = irq_list[i].irq; + sonypi_device.bits = irq_list[i].bits; + + /* Enable sonypi IRQ settings */ + if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) + sonypi_type2_srs(); + else + sonypi_type1_srs(); + + sonypi_call1(0x82); + sonypi_call2(0x81, 0xff); + if (compat) + sonypi_call1(0x92); + else + sonypi_call1(0x82); + + /* Now try requesting the irq from the system */ + if (!request_irq(sonypi_device.irq, sonypi_irq, + SA_SHIRQ, "sonypi", sonypi_irq)) break; - } + + /* If request_irq failed, disable sonypi IRQ settings */ + if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) + sonypi_type2_dis(); + else + sonypi_type1_dis(); } - if (!sonypi_device.irq ) { + + if (!irq_list[i].irq) { printk(KERN_ERR "sonypi: request_irq failed\n"); ret = -ENODEV; goto out3; @@ -715,17 +737,6 @@ outb(0xf0, 0xb2); #endif - if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) - sonypi_type2_srs(); - else - sonypi_type1_srs(); - - sonypi_call1(0x82); - sonypi_call2(0x81, 0xff); - if (compat) - sonypi_call1(0x92); - else - sonypi_call1(0x82); printk(KERN_INFO "sonypi: Sony Programmable I/O Controller Driver v%d.%d.\n", SONYPI_DRIVER_MAJORVERSION, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/sonypi.h linux.21rc5-ac2/drivers/char/sonypi.h --- linux.21rc5/drivers/char/sonypi.h 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/sonypi.h 2003-05-29 01:44:06.000000000 +0100 @@ -37,7 +37,7 @@ #ifdef __KERNEL__ #define SONYPI_DRIVER_MAJORVERSION 1 -#define SONYPI_DRIVER_MINORVERSION 18 +#define SONYPI_DRIVER_MINORVERSION 19 #define SONYPI_DEVICE_MODEL_TYPE1 1 #define SONYPI_DEVICE_MODEL_TYPE2 2 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/specialix.c linux.21rc5-ac2/drivers/char/specialix.c --- linux.21rc5/drivers/char/specialix.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/specialix.c 2003-04-22 16:44:37.000000000 +0100 @@ -1517,7 +1517,7 @@ } bp = port_Board(port); - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_ERR "sx%d: sx_close: bad port count;" " tty->count is 1, port count is %d\n", board_No(bp), port->count); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/stallion.c linux.21rc5-ac2/drivers/char/stallion.c --- linux.21rc5/drivers/char/stallion.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/stallion.c 2003-04-22 16:44:37.000000000 +0100 @@ -1197,7 +1197,7 @@ restore_flags(flags); return; } - if ((tty->count == 1) && (portp->refcount != 1)) + if ((atomic_read(&tty->count) == 1) && (portp->refcount != 1)) portp->refcount = 1; if (portp->refcount-- > 1) { MOD_DEC_USE_COUNT; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/synclink.c linux.21rc5-ac2/drivers/char/synclink.c --- linux.21rc5/drivers/char/synclink.c 2003-05-28 15:07:51.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/synclink.c 2003-05-03 18:43:43.000000000 +0100 @@ -3261,7 +3261,7 @@ if (!info->count || tty_hung_up_p(filp)) goto cleanup; - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * tty->count is 1 and the tty structure will be freed. * info->count should be one in this case. @@ -4291,9 +4291,7 @@ info->get_tx_holding_index=0; /* restart transmit timer */ - del_timer(&info->tx_timer); - info->tx_timer.expires = jiffies + jiffies_from_ms(5000); - add_timer(&info->tx_timer); + mod_timer(&info->tx_timer, jiffies + jiffies_from_ms(5000)); ret = 1; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/synclinkmp.c linux.21rc5-ac2/drivers/char/synclinkmp.c --- linux.21rc5/drivers/char/synclinkmp.c 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/synclinkmp.c 2003-04-22 16:44:37.000000000 +0100 @@ -851,7 +851,7 @@ if (!info->count || tty_hung_up_p(filp)) goto cleanup; - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * tty->count is 1 and the tty structure will be freed. * info->count should be one in this case. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/tty_io.c linux.21rc5-ac2/drivers/char/tty_io.c --- linux.21rc5/drivers/char/tty_io.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/tty_io.c 2003-05-27 14:04:06.000000000 +0100 @@ -144,8 +144,8 @@ extern void console_8xx_init(void); extern int rs_8xx_init(void); extern void mac_scc_console_init(void); -extern void hwc_console_init(void); -extern void hwc_tty_init(void); +extern void sclp_console_init(void); +extern void sclp_tty_init(void); extern void con3215_init(void); extern void tty3215_init(void); extern void tub3270_con_init(void); @@ -241,14 +241,15 @@ file_list_unlock(); if (tty->driver.type == TTY_DRIVER_TYPE_PTY && tty->driver.subtype == PTY_TYPE_SLAVE && - tty->link && tty->link->count) + tty->link && atomic_read(&tty->link->count)) count++; - if (tty->count != count) { + if (atomic_read(&tty->count) != count) { printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) " "!= #fd's(%d) in %s\n", - kdevname(tty->device), tty->count, count, routine); + kdevname(tty->device), atomic_read(&tty->count), + count, routine); return count; - } + } #endif return 0; } @@ -924,7 +925,7 @@ o_tty->termios_locked = *o_ltp_loc; (*driver->other->refcount)++; if (driver->subtype == PTY_TYPE_MASTER) - o_tty->count++; + atomic_inc(&o_tty->count); /* Establish the links in both directions */ tty->link = o_tty; @@ -945,7 +946,7 @@ tty->termios = *tp_loc; tty->termios_locked = *ltp_loc; (*driver->refcount)++; - tty->count++; + atomic_inc(&tty->count); /* * Structures all installed ... call the ldisc open routines. @@ -984,13 +985,13 @@ * special case for PTY masters: only one open permitted, * and the slave side open count is incremented as well. */ - if (tty->count) { + if (atomic_read(&tty->count)) { retval = -EIO; goto end_init; } - tty->link->count++; + atomic_inc(&tty->link->count); } - tty->count++; + atomic_inc(&tty->count); tty->driver = *driver; /* N.B. why do this every time?? */ success: @@ -1114,7 +1115,7 @@ #ifdef TTY_DEBUG_HANGUP printk(KERN_DEBUG "release_dev of %s (tty count=%d)...", - tty_name(tty, buf), tty->count); + tty_name(tty, buf), atomic_read(&tty->count)); #endif #ifdef TTY_PARANOIA_CHECK @@ -1166,9 +1167,9 @@ * each iteration we avoid any problems. */ while (1) { - tty_closing = tty->count <= 1; + tty_closing = atomic_read(&tty->count) <= 1; o_tty_closing = o_tty && - (o_tty->count <= (pty_master ? 1 : 0)); + (atomic_read(&o_tty->count) <= (pty_master ? 1 : 0)); do_sleep = 0; if (tty_closing) { @@ -1205,17 +1206,20 @@ * block, so it's safe to proceed with closing. */ if (pty_master) { - if (--o_tty->count < 0) { + atomic_dec(&o_tty->count); + if (atomic_read(&o_tty->count) < 0) { printk(KERN_WARNING "release_dev: bad pty slave count " "(%d) for %s\n", - o_tty->count, tty_name(o_tty, buf)); - o_tty->count = 0; + atomic_read(&o_tty->count), + tty_name(o_tty, buf)); + atomic_set(&o_tty->count, 0); } } - if (--tty->count < 0) { + atomic_dec(&tty->count); + if (atomic_read(&tty->count) < 0) { printk(KERN_WARNING "release_dev: bad tty->count (%d) for %s\n", - tty->count, tty_name(tty, buf)); - tty->count = 0; + atomic_read(&tty->count), tty_name(tty, buf)); + atomic_set(&tty->count, 0); } /* @@ -1431,7 +1435,7 @@ } if ((tty->driver.type == TTY_DRIVER_TYPE_SERIAL) && (tty->driver.subtype == SERIAL_TYPE_CALLOUT) && - (tty->count == 1)) { + (atomic_read(&tty->count) == 1)) { static int nr_warns; if (nr_warns < 5) { printk(KERN_WARNING "tty_io.c: " @@ -2272,8 +2276,8 @@ #ifdef CONFIG_TN3215 con3215_init(); #endif -#ifdef CONFIG_HWC - hwc_console_init(); +#ifdef CONFIG_SCLP_CONSOLE + sclp_console_init(); #endif #ifdef CONFIG_STDIO_CONSOLE stdio_console_init(); @@ -2432,8 +2436,8 @@ #ifdef CONFIG_TN3215 tty3215_init(); #endif -#ifdef CONFIG_HWC - hwc_tty_init(); +#ifdef CONFIG_SCLP_TTY + sclp_tty_init(); #endif #ifdef CONFIG_A2232 a2232board_init(); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/char/vt.c linux.21rc5-ac2/drivers/char/vt.c --- linux.21rc5/drivers/char/vt.c 2003-05-28 15:07:50.000000000 +0100 +++ linux.21rc5-ac2/drivers/char/vt.c 2003-04-22 16:44:37.000000000 +0100 @@ -40,7 +40,8 @@ char vt_dont_switch; extern struct tty_driver console_driver; -#define VT_IS_IN_USE(i) (console_driver.table[i] && console_driver.table[i]->count) +#define VT_IS_IN_USE(i) (console_driver.table[i] && \ + atomic_read(&console_driver.table[i]->count)) #define VT_BUSY(i) (VT_IS_IN_USE(i) || i == fg_console || i == sel_cons) /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/gsc/dino.c linux.21rc5-ac2/drivers/gsc/dino.c --- linux.21rc5/drivers/gsc/dino.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/gsc/dino.c 2003-04-28 16:02:47.000000000 +0100 @@ -251,7 +251,7 @@ unsigned long flags; \ spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \ /* tell HW which IO Port address */ \ - gsc_writel((u32) addr & ~3, d->base_addr + DINO_PCI_ADDR); \ + gsc_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \ /* generate I/O PORT read cycle */ \ v = gsc_read##type(d->base_addr+DINO_IO_DATA+(addr&mask)); \ spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \ @@ -267,7 +267,7 @@ { \ unsigned long flags; \ spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \ - /* tell HW which CFG address */ \ + /* tell HW which IO port address */ \ gsc_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \ /* generate cfg write cycle */ \ gsc_write##type(cpu_to_le##size(val), d->base_addr+DINO_IO_DATA+(addr&mask)); \ @@ -467,7 +467,7 @@ * to set up the addresses of the devices on this bus. */ #define _8MB 0x00800000UL -static void __init +static int __init dino_card_setup(struct pci_bus *bus, unsigned long base_addr) { int i; @@ -482,7 +482,7 @@ 0xffffffffffffffffUL &~ _8MB, _8MB, NULL, NULL) < 0) { printk(KERN_WARNING "Dino: Failed to allocate memory region\n"); - return; + return -ENODEV; } bus->resource[1] = res; bus->resource[0] = &(dino_dev->hba.io_space); @@ -495,6 +495,7 @@ gsc_writel(1 << i, base_addr + DINO_IO_ADDR_EN); pcibios_assign_unassigned_resources(bus); + return 0; } static void __init diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/hotplug/acpiphp_glue.c linux.21rc5-ac2/drivers/hotplug/acpiphp_glue.c --- linux.21rc5/drivers/hotplug/acpiphp_glue.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/hotplug/acpiphp_glue.c 2003-04-28 17:59:26.000000000 +0100 @@ -230,11 +230,11 @@ * TBD: _TRA, etc. */ static void -decode_acpi_resource (acpi_resource *resource, struct acpiphp_bridge *bridge) +decode_acpi_resource (struct acpi_resource *resource, struct acpiphp_bridge *bridge) { - acpi_resource_address16 *address16_data; - acpi_resource_address32 *address32_data; - acpi_resource_address64 *address64_data; + struct acpi_resource_address16 *address16_data; + struct acpi_resource_address32 *address32_data; + struct acpi_resource_address64 *address64_data; struct pci_resource *res; u32 resource_type, producer_consumer, address_length; @@ -252,7 +252,7 @@ switch (resource->id) { case ACPI_RSTYPE_ADDRESS16: - address16_data = (acpi_resource_address16 *)&resource->data; + address16_data = (struct acpi_resource_address16 *)&resource->data; resource_type = address16_data->resource_type; producer_consumer = address16_data->producer_consumer; min_address_range = address16_data->min_address_range; @@ -264,7 +264,7 @@ break; case ACPI_RSTYPE_ADDRESS32: - address32_data = (acpi_resource_address32 *)&resource->data; + address32_data = (struct acpi_resource_address32 *)&resource->data; resource_type = address32_data->resource_type; producer_consumer = address32_data->producer_consumer; min_address_range = address32_data->min_address_range; @@ -276,7 +276,7 @@ break; case ACPI_RSTYPE_ADDRESS64: - address64_data = (acpi_resource_address64 *)&resource->data; + address64_data = (struct acpi_resource_address64 *)&resource->data; resource_type = address64_data->resource_type; producer_consumer = address64_data->producer_consumer; min_address_range = address64_data->min_address_range; @@ -296,7 +296,7 @@ break; } - resource = (acpi_resource *)((char*)resource + resource->length); + resource = (struct acpi_resource *)((char*)resource + resource->length); if (found && producer_consumer == ACPI_PRODUCER && address_length > 0) { switch (resource_type) { @@ -392,10 +392,10 @@ #if ACPI_CA_VERSION < 0x20020201 acpi_buffer buffer; #else - acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER, - .pointer = NULL}; + struct acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER, + .pointer = NULL}; #endif - acpi_object *package; + union acpi_object *package; int i; /* default numbers */ @@ -425,7 +425,7 @@ return; } - package = (acpi_object *) buffer.pointer; + package = (union acpi_object *) buffer.pointer; if (!package || package->type != ACPI_TYPE_PACKAGE || package->package.count != 4 || !package->package.elements) { @@ -497,8 +497,8 @@ #if ACPI_CA_VERSION < 0x20020201 acpi_buffer buffer; #else - acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER, - .pointer = NULL}; + struct acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER, + .pointer = NULL}; #endif struct acpiphp_bridge *bridge; @@ -807,10 +807,10 @@ find_host_bridge (acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_status status; - acpi_device_info info; + struct acpi_device_info info; char objname[5]; - acpi_buffer buffer = { .length = sizeof(objname), - .pointer = objname }; + struct acpi_buffer buffer = { .length = sizeof(objname), + .pointer = objname }; status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) { @@ -872,8 +872,8 @@ acpi_status status; struct acpiphp_func *func; struct list_head *l; - acpi_object_list arg_list; - acpi_object arg; + struct acpi_object_list arg_list; + union acpi_object arg; int retval = 0; @@ -1101,8 +1101,8 @@ { struct acpiphp_bridge *bridge; char objname[64]; - acpi_buffer buffer = { .length = sizeof(objname), - .pointer = objname }; + struct acpi_buffer buffer = { .length = sizeof(objname), + .pointer = objname }; bridge = (struct acpiphp_bridge *)context; @@ -1152,8 +1152,8 @@ { struct acpiphp_func *func; char objname[64]; - acpi_buffer buffer = { .length = sizeof(objname), - .pointer = objname }; + struct acpi_buffer buffer = { .length = sizeof(objname), + .pointer = objname }; acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/hotplug/acpiphp.h linux.21rc5-ac2/drivers/hotplug/acpiphp.h --- linux.21rc5/drivers/hotplug/acpiphp.h 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/hotplug/acpiphp.h 2003-05-28 21:48:48.000000000 +0100 @@ -34,7 +34,7 @@ #ifndef _ACPIPHP_H #define _ACPIPHP_H -#include "include/acpi.h" +#include #include "pci_hotplug.h" #if ACPI_CA_VERSION < 0x20020201 @@ -71,7 +71,7 @@ return AE_OK; } #else /* ACPI_CA_VERSION < 0x20020201 */ -#include "acpi_bus.h" +#include #endif /* compatibility stuff for ACPI CA */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/hotplug/Config.in linux.21rc5-ac2/drivers/hotplug/Config.in --- linux.21rc5/drivers/hotplug/Config.in 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/drivers/hotplug/Config.in 2003-04-22 16:44:37.000000000 +0100 @@ -6,11 +6,11 @@ dep_tristate 'Support for PCI Hotplug (EXPERIMENTAL)' CONFIG_HOTPLUG_PCI $CONFIG_EXPERIMENTAL $CONFIG_PCI +dep_tristate ' ACPI PCI Hotplug driver' CONFIG_HOTPLUG_PCI_ACPI $CONFIG_ACPI $CONFIG_HOTPLUG_PCI dep_tristate ' Compaq PCI Hotplug driver' CONFIG_HOTPLUG_PCI_COMPAQ $CONFIG_HOTPLUG_PCI $CONFIG_X86 dep_mbool ' Save configuration into NVRAM on Compaq servers' CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM $CONFIG_HOTPLUG_PCI_COMPAQ if [ "$CONFIG_X86_IO_APIC" = "y" ]; then dep_tristate ' IBM PCI Hotplug driver' CONFIG_HOTPLUG_PCI_IBM $CONFIG_HOTPLUG_PCI $CONFIG_X86_IO_APIC $CONFIG_X86 fi -dep_tristate ' ACPI PCI Hotplug driver' CONFIG_HOTPLUG_PCI_ACPI $CONFIG_ACPI $CONFIG_HOTPLUG_PCI - +dep_tristate ' IBM Thinkpad (20H2999) Docking driver (VERY EXPERIMENTAL) ' CONFIG_HOTPLUG_PCI_H2999 $CONFIG_HOTPLUG_PCI $CONFIG_X86 endmenu diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/hotplug/ibmphp_ebda.c linux.21rc5-ac2/drivers/hotplug/ibmphp_ebda.c --- linux.21rc5/drivers/hotplug/ibmphp_ebda.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/hotplug/ibmphp_ebda.c 2003-05-09 18:10:37.000000000 +0100 @@ -115,10 +115,7 @@ static void free_ebda_hpc (struct controller *controller) { kfree (controller->slots); - controller->slots = NULL; kfree (controller->buses); - controller->buses = NULL; - controller->ctrl_dev = NULL; kfree (controller); } @@ -292,6 +289,7 @@ rio_complete = 0; hs_complete = 0; + /* FIXME: This is x86-32 specific, and even then PC specific.. */ io_mem = ioremap ((0x40 << 4) + 0x0e, 2); if (!io_mem ) return -ENOMEM; @@ -299,6 +297,9 @@ iounmap (io_mem); debug ("returned ebda segment: %x\n", ebda_seg); + if(ebda_seg == 0) /* No EBDA */ + return -ENOENT; + io_mem = ioremap (ebda_seg<<4, 65000); if (!io_mem ) return -ENOMEM; @@ -578,7 +579,6 @@ return 0; } - /* Since we don't know the max slot number per each chassis, hence go * through the list of all chassis to find out the range * Arguments: slot_num, 1st slot number of the chassis we think we are on, @@ -658,7 +658,8 @@ return first_slot + 1; } -static char *create_file_name (struct slot * slot_cur) + +static int create_file_name (struct slot * slot_cur, char *buf) { struct opt_rio *opt_vg_ptr = NULL; struct opt_rio_lo *opt_lo_ptr = NULL; @@ -669,15 +670,11 @@ u8 slot_num; u8 flag = 0; - if (!slot_cur) { - err ("Structure passed is empty \n"); - return NULL; - } + if (!slot_cur) + BUG(); slot_num = slot_cur->number; - memset (str, 0, sizeof(str)); - if (rio_table_ptr) { if (rio_table_ptr->ver_num == 3) { opt_vg_ptr = find_chassis_num (slot_num); @@ -709,7 +706,7 @@ } else if (rio_table_ptr) { if (rio_table_ptr->ver_num == 3) { /* if both NULL and we DO have correct RIO table in BIOS */ - return NULL; + return -1; } } if (!flag) { @@ -721,10 +718,19 @@ } } - sprintf(str, "%s%dslot%d", - which == 0 ? "chassis" : "rxe", - number, slot_num - first_slot + 1); - return str; + switch (which) { + case 0: + /* Chassis */ + snprintf(buf, 32, "chassis%dslot%d", number, slot_num - first_slot + 1); + break; + case 1: + /* RXE */ + snprintf(buf, 32, "rxe%dslot%d", number, slot_num - first_slot + 1); + break; + default: + return -1; + } + return 0; } static struct pci_driver ibmphp_driver; @@ -746,8 +752,9 @@ struct ebda_hpc_slot *slot_ptr; struct bus_info *bus_info_ptr1, *bus_info_ptr2; int rc; - struct slot *tmp_slot; + struct slot *slot_cur, *tmp_slot; struct list_head *list; + char buf[32]; addr = hpc_list_ptr->phys_addr; for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { @@ -980,10 +987,12 @@ } /* each hpc */ list_for_each (list, &ibmphp_slot_head) { - tmp_slot = list_entry (list, struct slot, ibm_slot_list); - - snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); - pci_hp_register (tmp_slot->hotplug_slot); + slot_cur = list_entry (list, struct slot, ibm_slot_list); + if(create_file_name (slot_cur, buf)==0) + { + snprintf (slot_cur->hotplug_slot->name, 30, "%s", buf); + pci_hp_register (slot_cur->hotplug_slot); + } } print_ebda_hpc (); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/hotplug/Makefile linux.21rc5-ac2/drivers/hotplug/Makefile --- linux.21rc5/drivers/hotplug/Makefile 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/hotplug/Makefile 2003-04-28 17:59:26.000000000 +0100 @@ -12,6 +12,7 @@ obj-$(CONFIG_HOTPLUG_PCI_COMPAQ) += cpqphp.o obj-$(CONFIG_HOTPLUG_PCI_IBM) += ibmphp.o obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o +obj-$(CONFIG_HOTPLUG_PCI_H2999) += tp600.o pci_hotplug-objs := pci_hotplug_core.o \ pci_hotplug_util.o @@ -27,10 +28,6 @@ ibmphp_res.o \ ibmphp_hpc.o -ifdef CONFIG_HOTPLUG_PCI_ACPI - EXTRA_CFLAGS += -D_LINUX -I$(CURDIR)/../acpi -endif - acpiphp_objs := acpiphp_core.o \ acpiphp_glue.o \ acpiphp_pci.o \ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/hotplug/tp600.c linux.21rc5-ac2/drivers/hotplug/tp600.c --- linux.21rc5/drivers/hotplug/tp600.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/hotplug/tp600.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,500 @@ +/* + * Drivers for the IBM 20H2999 found in the IBM thinkpad series + * machines. + * + * This driver was done without documentation from IBM + * + * _ + * { } + * | | All reverse engineering done + * | | in accordance with 92/250/EEC + * .-.! !.-. and Copyright (Computer Programs) + * .-! ! ! !.-. Regulations 1992 (S.I. 1992 No. 3233) + * ! ! ! ; + * \ ; + * \ ; + * ! : + * ! | + * | | + * + * + * Various other IBM's tried to obtain docs but failed. For that + * reason we only support warm not hot undocking at the moment. + * + * Known bugs: + * Sometimes we hang with an IRQ storm. I don't know what + * deals with the IRQ disables yet. (Hot dock) + * Sometimes busmastering (and maybe IRQs) don't come back + * (Seems to be a buffering issue for hot dock) + * + * Yet to do: + * ISA is not yet handled (oh god help us) + * Instead of saving/restoring pci devices we should + * re-enumerate that subtree so you can change devices + * (That also deals with stale save problems) + * We need to do a proper warm save/restore interface + * Bridged cards don't yet work + * + * Usage: + * Load module + * Pray + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pci_hotplug.h" +#include "tp600.h" + +static struct h2999_dev *testdev; + +/** + * pci_save_slot - save slot PCI data + * @slot: slot to save + * + * Save the slot data from a PCI device + */ + +static void pci_save_slot(struct h2999_slot *s) +{ + int i, n; + + for(i=0;i<8;i++) + { + struct pci_dev *p = pci_find_slot(s->dev->hotplug_bus, PCI_DEVFN(s->slotid, i)); + s->pci[i] = p; + if(p) + { + for(n = 0; n < 64; n++) + pci_read_config_dword(p, n * 4, &s->save[i][n]); +// printk("Saved %02X:%02X.%X\n", +// s->dev->hotplug_bus, s->slotid, i); + } + } +} + +static void pci_restore_slot(struct h2999_slot *s) +{ + int i,n; + + for(i = 0 ; i < 8; i++) + { + if(s->pci[i]) + { + pci_set_power_state(s->pci[i], 0); + + for(n = 0; n < 54; n++) + if(n!=1) + pci_write_config_dword(s->pci[i], n * 4, s->save[i][n]); + pci_write_config_dword(s->pci[i], 4, s->save[i][1]); +// printk("Restored %02X:%02X.%X\n", +// s->dev->hotplug_bus, s->slotid, i); + } + } +} + +/** + * slot_enable - enable H2999 slot + * @slot: slot to enable + * + * Enable a slot. Its not actually clear what this means with + * a hot dock. We can certainly 'discover' the PCI device in the + * slot when asked. + */ + +static int slot_enable(struct hotplug_slot *slot) +{ + struct h2999_slot *s = slot->private; + int i; + pci_restore_slot(s); + for(i=0; i < 8; i++) + { + if(s->pci[i] && (s->drivermap&(1<pci[i]); + } + return 0; +} + +/** + * slot_disable - disable H2999 slot + * @slot: slot to disable + * + * Disable a slot. Its not actually clear what to do here. We could + * report the device as having been removed when we are told to do + * this. + */ + +static int slot_disable(struct hotplug_slot *slot) +{ + struct h2999_slot *s = slot->private; + struct pci_dev *pdev; + int i; + + for(i = 0; i < 8; i++) + { + pdev = s->pci[i]; + /* Hack for now */ + if (pdev && pdev->driver) { + if (!pdev->driver->remove) + return -EBUSY; + } + } + + s->drivermap = 0; + + for(i = 0; i < 8; i++) + { + pdev = s->pci[i]; + if(pdev) + { + if(pdev->driver) + { + s->drivermap|=(1<driver->remove(pdev); + pdev->driver = NULL; + } + } + } + return 0; +} + +/** + * set_attention_status - set attention callback + * @slot: slot to set + * @value: on/off + * + * Called when the hotplug layer wants to set the attention status of + * the hotplug slot. The H2999 doesn't have an attention control (at + * least not that we know of). So we ignore this. + */ + +static int set_attention_status(struct hotplug_slot *slot, u8 value) +{ + return 0; +} + +/** + * hardware_test - test hardware callback + * @slot: slot to test + * value: test to run + * + * The H2999 does not support any hardware tests that we know of. + */ + +static int hardware_test(struct hotplug_slot *slot, u32 value) +{ + return 0; +} + +/** + * get_power_status - power query callback + * @slot; slot to query + * @value: returned state + * + * Called when the hotplug layer wants to ask us if the slot is + * powered. We work on the basis that all slots are powered when + * the unit is docked. This seems to be correct but I've not actually + * rammed a voltmeter into the slots to see if they are cleverer than + * that. + */ + +static int get_power_status(struct hotplug_slot *slot, u8 *value) +{ + struct h2999_slot *s = slot->private; + + /* Slots are all powered when docked */ + if(s->dev->docked > 0) + *value = 1; + else + *value = 0; + return 0; +} + +/** + * get_adapter_status - card presence query + * @slot: slot to query + * @value: returned state + * + * If we are not docked, we know the "slot" is empty. If we are + * docked its a bit more complicated. + */ + +static int get_adapter_status(struct hotplug_slot *slot, u8 *value) +{ + struct h2999_slot *s = slot->private; + + *value = 0; + + if(s->dev->docked) + *value = 1; + return 0; +} + +static struct hotplug_slot_ops h2999_ops = { + THIS_MODULE, + slot_enable, + slot_disable, + set_attention_status, + hardware_test, + get_power_status, + NULL, + NULL, + get_adapter_status +}; + +/** + * h2999_is_docked - check if docked + * @dev: h2999 device + * + * Check if we are currently docked. The method we use at the moment + * relies on poking around behind the bridge. There is no doubt a + * correct way to do this. Maybe one day IBM will be decide to + * actually provide documentation + */ + +static int h2999_is_docked(struct h2999_dev *dev) +{ + struct pci_dev *pdev = pci_find_slot(dev->hotplug_bus, PCI_DEVFN(0,0)); + u32 status; + + if(pdev == NULL) + return 0; /* Shouldnt happen - must be undocked */ + + if(pci_read_config_dword(pdev, PCI_VENDOR_ID, &status)) + return 0; /* Config read failed - its missing */ + + if(status == 0xFFFFFFFFUL) /* Failed */ + return 0; + + /* Must be docked */ + return 1; +} +/** + * h2999_reconfigure_dock - redock event handler + * @dev: h2999 device + * + * A redocking event has occurred. There may also have been an undock + * before hand. If so then the unconfigure routine is called first. + */ + +static void h2999_reconfigure_dock(struct h2999_dev *dev) +{ + int docked, i; + + docked = h2999_is_docked(dev); + + if(docked ^ dev->docked) + { + printk("h2999: Now %sdocked.\n", + docked?"":"un"); + if(docked) + { + /* We should do the re-enumeration of the bus here + The current save/restore is a test hack */ + for(i=0; i < H2999_SLOTS; i++) + { + if(dev->slots[i].hotplug_slot.private != &dev->slots[i]) + BUG(); + slot_enable(&dev->slots[i].hotplug_slot); + } + } + else + { + for(i=0; i < H2999_SLOTS; i++) + { + if(slot_disable(&dev->slots[i].hotplug_slot)) + printk(KERN_ERR "h2999: someone undocked while devices were in use, how rude!\n"); + } + } + dev->docked = docked; + } + /* Clear bits */ + pci_write_config_byte(dev->pdev, 0x1f, 0xf0); +} + +/* + * h2999_attach - attach an H2999 bridge + * @pdev: PCI device + * @unused: unused + * + * Called when the PCI layer discovers an H2999 docking bridge is + * present in the system. We scan the bridge to obtain its current + * status and register it with the hot plug layer + */ + +static int __devinit h2999_attach(struct pci_dev *pdev, const struct pci_device_id *unused) +{ + /* PCI core found a new H2999 */ + struct h2999_dev *dev; + u8 bus; + int i; + + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + if(dev == NULL) + goto nomem; + + memset(dev, 0, sizeof(*dev)); + + dev->pdev = pdev; + + pci_read_config_byte(pdev, PCI_SECONDARY_BUS, &bus); + dev->hotplug_bus = bus; + + /* Requires hotplug_bus and pdev are set */ + + dev->docked = h2999_is_docked(dev); + + printk(KERN_INFO "Found IBM 20H2999. Status is %sdocked, docking bus is %d.\n", + dev->docked?"":"un", dev->hotplug_bus); + + /* + * Allow for 8 devices. On the TP600 at least we have + * 0-3 as the onboard devices, and 4-7 as the slots. + * To add more fun there is an ISA bridge which we + * don't really handle yet. + */ + + for(i = 0; i < H2999_SLOTS; i++) + { + struct h2999_slot *s = &dev->slots[i]; + int ret; + + s->hotplug_slot.info = &s->hotplug_info; + s->hotplug_slot.private = s; + s->slotid = i; + s->dev = dev; + s->live = 1; + s->hotplug_slot.ops = &h2999_ops; + s->hotplug_slot.name = s->name; + s->hotplug_info.power_status = dev->docked; + /* FIXME - should probe here + In truth the hp_register ought to call thse as needed! */ + s->hotplug_info.adapter_status = 0; + s->pdev = pci_find_slot(dev->hotplug_bus, PCI_DEVFN(i, 0)); + snprintf(s->name, SLOT_NAME_SIZE, "Dock%d.%d", dev->hotplug_bus, i); + pci_save_slot(s); + ret = pci_hp_register(&s->hotplug_slot); + if(ret) + { + printk(KERN_ERR "pci_hp_register failed for slot %d with error %d\n", i, ret); + s->live = 0; + } + } + pci_set_drvdata(pdev, dev); + + testdev = dev; + return 0; +nomem: + printk(KERN_ERR "h2999_attach: out of memory.\n"); + return -ENOMEM; +} + +/** + * h2999_cleanup - free H2999 memory resources + * @dev: h2999 device + * + * Unregister and free up all of our slots + */ + +static int __devinit h2999_cleanup(struct h2999_dev *dev) +{ + struct h2999_slot *s; + int slot; + + for(slot = 0; slot < H2999_SLOTS; slot++) + { + s = &dev->slots[slot]; + if(s->live) + pci_hp_deregister(&s->hotplug_slot); + } + kfree(dev); +} + +/** + * h2999_detach - an H2999 controller vanished + * @dev: device that vanished + * + * Called when the PCI layer sees the bridge unplugged. At the moment + * this doesn't happen and since its currently unclear what to do + * in the hot plug layer if it does this may be a good thing 8) + */ + +static void __devinit h2999_detach(struct pci_dev *pdev) +{ + struct h2999_dev *dev = pci_get_drvdata(pdev); + h2999_cleanup(dev); +} + + +static struct pci_device_id h2999_id_tbl[] __devinitdata = { + { PCI_VENDOR_ID_IBM, 0x0095, PCI_ANY_ID, PCI_ANY_ID, }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, h2999_id_tbl); + +static struct pci_driver h2999_driver = { + name: "h2999", + id_table: h2999_id_tbl, + probe: h2999_attach, + remove: __devexit_p(h2999_detach) + /* FIXME - PM functions */ +}; + +/* + * Test harness + */ + +static struct completion thread_done; + +static int h2999_thread(void *unused) +{ + lock_kernel(); + while(testdev != NULL) + { + set_current_state(TASK_INTERRUPTIBLE); + if(signal_pending(current)) + break; + schedule_timeout(HZ); + h2999_reconfigure_dock(testdev); + } + unlock_kernel(); + complete_and_exit(&thread_done, 0); +} + +static int __init h2999_init_module(void) +{ + int rc; + printk(KERN_INFO "IBM 20H2999 PCI docking bridge driver v0.01\n"); + + init_completion(&thread_done); + + rc = pci_module_init(&h2999_driver); + if (rc == 0) + { + if( kernel_thread(h2999_thread, NULL, CLONE_SIGHAND) >= 0) + return 0; + } + complete(&thread_done); + return rc; +} + +static void __exit h2999_cleanup_module(void) +{ + pci_unregister_driver(&h2999_driver); + wait_for_completion(&thread_done); +} + +module_init(h2999_init_module); +module_exit(h2999_cleanup_module); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("IBM 20H2999 Docking Bridge Driver"); +MODULE_LICENSE("GPL"); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/hotplug/tp600.h linux.21rc5-ac2/drivers/hotplug/tp600.h --- linux.21rc5/drivers/hotplug/tp600.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/hotplug/tp600.h 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,29 @@ +#define SLOT_NAME_SIZE 12 + +struct h2999_slot +{ + int slotid; + + struct hotplug_slot hotplug_slot; + struct hotplug_slot_info hotplug_info; + char name[SLOT_NAME_SIZE]; + + struct h2999_dev *dev; + struct pci_dev *pdev; + int live; + + struct pci_dev *pci[8]; + u32 save[8][64]; + u8 drivermap; +}; + +#define H2999_SLOTS 8 + +struct h2999_dev +{ + int docked; + int hotplug_bus; + struct pci_dev *pdev; + + struct h2999_slot slots[H2999_SLOTS]; +}; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/ide/Config.in linux.21rc5-ac2/drivers/ide/Config.in --- linux.21rc5/drivers/ide/Config.in 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/ide/Config.in 2003-05-28 15:24:31.000000000 +0100 @@ -63,7 +63,7 @@ dep_tristate ' NS87415 chipset support' CONFIG_BLK_DEV_NS87415 $CONFIG_BLK_DEV_IDEDMA_PCI dep_tristate ' OPTi 82C621 chipset enhanced support (EXPERIMENTAL)' CONFIG_BLK_DEV_OPTI621 $CONFIG_EXPERIMENTAL dep_tristate ' PROMISE PDC202{46|62|65|67} support' CONFIG_BLK_DEV_PDC202XX_OLD $CONFIG_BLK_DEV_IDEDMA_PCI - dep_bool ' Special UDMA Feature' CONFIG_PDC202XX_BURST $CONFIG_BLK_DEV_PDC202XX_OLD $CONFI_BLK_DEV_IDEDMA_PCI + dep_bool ' Special UDMA Feature' CONFIG_PDC202XX_BURST $CONFIG_BLK_DEV_PDC202XX_OLD $CONFIG_BLK_DEV_IDEDMA_PCI dep_tristate ' PROMISE PDC202{68|69|70|71|75|76|77} support' CONFIG_BLK_DEV_PDC202XX_NEW $CONFIG_BLK_DEV_IDEDMA_PCI if [ "$CONFIG_BLK_DEV_PDC202XX_OLD" = "y" -o "$CONFIG_BLK_DEV_PDC202XX_OLD" = "m" -o "$CONFIG_BLK_DEV_PDC202XX_NEW" = "y" -o "$CONFIG_BLK_DEV_PDC202XX_NEW" = "m" ]; then bool ' Special FastTrak Feature' CONFIG_PDC202XX_FORCE diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/ide/ide-dma.c linux.21rc5-ac2/drivers/ide/ide-dma.c --- linux.21rc5/drivers/ide/ide-dma.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/ide/ide-dma.c 2003-05-29 01:38:47.000000000 +0100 @@ -528,9 +528,10 @@ * the driver to resolve the problem, if a DMA transfer is still * in progress we continue to wait (arguably we need to add a * secondary 'I dont care what the drive thinks' timeout here) - * Finally if we have an interrupt but for some reason got the - * timeout first we complete the I/O. This can occur if an - * interrupt is lost or due to bugs. + * Finally if we have an interrupt we let it complete the I/O. + * But only one time - we clear expiry and if it's still not + * completed after WAIT_CMD, we error and retry in PIO. + * This can occur if an interrupt is lost or due to hang or bugs. */ static int dma_timer_expiry (ide_drive_t *drive) @@ -544,22 +545,24 @@ if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ return WAIT_CMD; - HWGROUP(drive)->expiry = NULL; /* one free ride for now */ + /* + * Clear the expiry handler in case we decide to wait more, + * next time timer expires it is an error + */ + HWGROUP(drive)->expiry = NULL; /* 1 dmaing, 2 error, 4 intr */ - if (dma_stat & 2) { /* ERROR */ - (void) hwif->ide_dma_end(drive); - return DRIVER(drive)->error(drive, - "dma_timer_expiry", hwif->INB(IDE_STATUS_REG)); - } + if (dma_stat & 2) /* ERROR */ + return -1; + if (dma_stat & 1) /* DMAing */ return WAIT_CMD; if (dma_stat & 4) /* Got an Interrupt */ - HWGROUP(drive)->handler(drive); + return WAIT_CMD; - return 0; + return 0; /* Unknown status -- reset the bus */ } /** diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/ide/ide-io.c linux.21rc5-ac2/drivers/ide/ide-io.c --- linux.21rc5/drivers/ide/ide-io.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/ide/ide-io.c 2003-05-29 01:38:47.000000000 +0100 @@ -901,10 +901,11 @@ * retry the current request in pio mode instead of risking tossing it * all away */ -void ide_dma_timeout_retry(ide_drive_t *drive) +static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { ide_hwif_t *hwif = HWIF(drive); struct request *rq; + ide_startstop_t ret = ide_stopped; /* * end current dma transaction @@ -914,8 +915,16 @@ /* * complain a little, later we might remove some of this verbosity */ - printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); - (void) hwif->ide_dma_timeout(drive); + + if (error < 0) { + printk(KERN_ERR "%s: error waiting for DMA\n", drive->name); + (void)HWIF(drive)->ide_dma_end(drive); + ret = DRIVER(drive)->error(drive, "dma timeout retry", + hwif->INB(IDE_STATUS_REG)); + } else { + printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); + (void) hwif->ide_dma_timeout(drive); + } /* * disable dma for now, but remember that we did so because of @@ -938,9 +947,9 @@ rq->current_nr_sectors = rq->bh->b_size >> 9; rq->hard_cur_sectors = rq->current_nr_sectors; rq->buffer = rq->bh->b_data; -} -EXPORT_SYMBOL(ide_dma_timeout_retry); + return ret; +} /** * ide_timer_expiry - handle lack of an IDE interrupt @@ -962,10 +971,9 @@ ide_handler_t *handler; ide_expiry_t *expiry; unsigned long flags; - unsigned long wait; + unsigned long wait = -1; spin_lock_irqsave(&io_request_lock, flags); - del_timer(&hwgroup->timer); if ((handler = hwgroup->handler) == NULL) { /* @@ -992,7 +1000,7 @@ } if ((expiry = hwgroup->expiry) != NULL) { /* continue */ - if ((wait = expiry(drive)) != 0) { + if ((wait = expiry(drive)) > 0) { /* reset timer */ hwgroup->timer.expires = jiffies + wait; add_timer(&hwgroup->timer); @@ -1028,16 +1036,15 @@ startstop = handler(drive); } else { if (drive->waiting_for_dma) { - startstop = ide_stopped; - ide_dma_timeout_retry(drive); + startstop = ide_dma_timeout_retry(drive, wait); } else { startstop = DRIVER(drive)->error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG)); } } set_recovery_timer(hwif); drive->service_time = jiffies - drive->service_start; - enable_irq(hwif->irq); spin_lock_irq(&io_request_lock); + enable_irq(hwif->irq); if (startstop == ide_stopped) hwgroup->busy = 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/ide/pci/siimage.c linux.21rc5-ac2/drivers/ide/pci/siimage.c --- linux.21rc5/drivers/ide/pci/siimage.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/ide/pci/siimage.c 2003-05-22 17:29:37.000000000 +0100 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/pci/siimage.c Version 1.02 Jan 30, 2003 + * linux/drivers/ide/pci/siimage.c Version 1.04 May 22, 2003 * * Copyright (C) 2001-2002 Andre Hedrick * Copyright (C) 2003 Red Hat @@ -30,12 +30,70 @@ static struct pci_dev *siimage_devs[SIIMAGE_MAX_DEVS]; static int n_siimage_devs; -static char * print_siimage_get_info (char *buf, struct pci_dev *dev, int index) +/** + * siimage_selreg - return register base + * @hwif: interface + * @r: config offset + * + * Turn a config register offset into the right address in either + * PCI space or MMIO space to access the control register in question + * Thankfully this is a configuration operation so isnt performance + * criticial. + */ + +static unsigned long siimage_selreg(ide_hwif_t *hwif, int r) +{ + unsigned long base = (unsigned long)hwif->hwif_data; + base += 0xA0 + r; + if(hwif->mmio) + base += (hwif->channel << 6); + else + base += (hwif->channel << 4); + return base; +} + +/** + * siimage_seldev - return register base + * @hwif: interface + * @r: config offset + * + * Turn a config register offset into the right address in either + * PCI space or MMIO space to access the control register in question + * including accounting for the unit shift. + */ + +static inline unsigned long siimage_seldev(ide_drive_t *drive, int r) +{ + ide_hwif_t *hwif = HWIF(drive); + unsigned long base = (unsigned long)hwif->hwif_data; + base += 0xA0 + r; + if(hwif->mmio) + base += (hwif->channel << 6); + else + base += (hwif->channel << 4); + base |= drive->select.b.unit << drive->select.b.unit; + return base; +} + +/** + * print_siimage_get_info - print minimal proc information + * @buf: buffer to write into (kernel space) + * @dev: PCI device we are describing + * @index: Controller number + * + * Print the basic information for the state of the CMD680/SI3112 + * channel. We don't actually dump a lot of information out for + * this controller although we could expand it if we needed. + */ + +static char *print_siimage_get_info (char *buf, struct pci_dev *dev, int index) { char *p = buf; u8 mmio = (pci_get_drvdata(dev) != NULL) ? 1 : 0; - unsigned long bmdma = (mmio) ? ((unsigned long) pci_get_drvdata(dev)) : - (pci_resource_start(dev, 4)); + unsigned long bmdma = pci_resource_start(dev, 4); + + if(mmio) + bmdma = pci_resource_start(dev, 5); p += sprintf(p, "\nController: %d\n", index); p += sprintf(p, "SiI%x Chipset.\n", dev->device); @@ -43,18 +101,20 @@ p += sprintf(p, "MMIO Base 0x%lx\n", bmdma); p += sprintf(p, "%s-DMA Base 0x%lx\n", (mmio)?"MMIO":"BM", bmdma); p += sprintf(p, "%s-DMA Base 0x%lx\n", (mmio)?"MMIO":"BM", bmdma+8); - - p += sprintf(p, "--------------- Primary Channel " - "---------------- Secondary Channel " - "-------------\n"); - p += sprintf(p, "--------------- drive0 --------- drive1 " - "-------- drive0 ---------- drive1 ------\n"); - p += sprintf(p, "PIO Mode: %s %s" - " %s %s\n", - "?", "?", "?", "?"); return (char *)p; } +/** + * siimage_get_info - proc callback + * @buffer: kernel buffer to complete + * @addr: written with base of data to return + * offset: seek offset + * count: bytes to fill in + * + * Called when the user reads data from the virtual file for this + * controller from /proc + */ + static int siimage_get_info (char *buffer, char **addr, off_t offset, int count) { char *p = buffer; @@ -89,9 +149,10 @@ { ide_hwif_t *hwif = HWIF(drive); u8 mode = 0, scsc = 0; + unsigned long base = (unsigned long) hwif->hwif_data; if (hwif->mmio) - scsc = hwif->INB(HWIFADDR(0x4A)); + scsc = hwif->INB(base + 0x4A); else pci_read_config_byte(hwif->pci_dev, 0x8A, &scsc); @@ -115,14 +176,23 @@ return mode; } +/** + * siimage_taskfile_timing - turn timing data to a mode + * @hwif: interface to query + * + * Read the timing data for the interface and return the + * mode that is being used. + */ + static byte siimage_taskfile_timing (ide_hwif_t *hwif) { u16 timing = 0x328a; + unsigned long addr = siimage_selreg(hwif, 2); if (hwif->mmio) - timing = hwif->INW(SELADDR(2)); + timing = hwif->INW(addr); else - pci_read_config_word(hwif->pci_dev, SELREG(2), &timing); + pci_read_config_word(hwif->pci_dev, addr, &timing); switch (timing) { case 0x10c1: return 4; @@ -146,17 +216,15 @@ static void siimage_tuneproc (ide_drive_t *drive, byte mode_wanted) { ide_hwif_t *hwif = HWIF(drive); - struct pci_dev *dev = hwif->pci_dev; u16 speedt = 0; - u8 unit = drive->select.b.unit; + unsigned long addr = siimage_seldev(drive, 0x04); if (hwif->mmio) - speedt = hwif->INW(SELADDR(0x04|(unit<INW(addr); else - pci_read_config_word(dev, SELADDR(0x04|(unit<pci_dev, addr, &speedt); /* cheat for now and use the docs */ -// switch(siimage_taskfile_timing(hwif)) { switch(mode_wanted) { case 4: speedt = 0x10c1; break; case 3: speedt = 0x10C3; break; @@ -166,11 +234,23 @@ default: speedt = 0x328A; break; } if (hwif->mmio) - hwif->OUTW(speedt, SELADDR(0x04|(unit<OUTW(speedt, addr); else - pci_write_config_word(dev, SELADDR(0x04|(unit<pci_dev, addr, speedt); } +/** + * config_siimage_chipset_for_pio - set drive timings + * @drive: drive to tune + * @speed we want + * + * Compute the best pio mode we can for a given device. Also honour + * the timings for the driver when dealing with mixed devices. Some + * of this is ugly but its all wrapped up here + * + * The CMD680 can also do VDMA - we need to start using that + */ + static void config_siimage_chipset_for_pio (ide_drive_t *drive, byte set_speed) { u8 channel_timings = siimage_taskfile_timing(HWIF(drive)); @@ -191,6 +271,16 @@ config_siimage_chipset_for_pio(drive, set_speed); } +/** + * siimage_tune_chipset - set controller timings + * @drive: Drive to set up + * @xferspeed: speed we want to achieve + * + * Tune the SII chipset for the desired mode. If we can't achieve + * the desired mode then tune for a lower one, but ultimately + * make the thing work. + */ + static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed) { u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }; @@ -200,23 +290,25 @@ ide_hwif_t *hwif = HWIF(drive); u16 ultra = 0, multi = 0; u8 mode = 0, unit = drive->select.b.unit; - u8 speed = ide_rate_filter(siimage_ratemask(drive), xferspeed); + u8 speed = ide_rate_filter(siimage_ratemask(drive), xferspeed); + unsigned long base = (unsigned long)hwif->hwif_data; u8 scsc = 0, addr_mask = ((hwif->channel) ? ((hwif->mmio) ? 0xF4 : 0x84) : ((hwif->mmio) ? 0xB4 : 0x80)); + + unsigned long ma = siimage_seldev(drive, 0x08); + unsigned long ua = siimage_seldev(drive, 0x0C); if (hwif->mmio) { - scsc = hwif->INB(HWIFADDR(0x4A)); - mode = hwif->INB(HWIFADDR(addr_mask)); - multi = hwif->INW(SELADDR(0x08|(unit<INW(SELADDR(0x0C|(unit<INB(base + 0x4A); + mode = hwif->INB(base + addr_mask); + multi = hwif->INW(ma); + ultra = hwif->INW(ua); } else { pci_read_config_byte(hwif->pci_dev, 0x8A, &scsc); pci_read_config_byte(hwif->pci_dev, addr_mask, &mode); - pci_read_config_word(hwif->pci_dev, - SELREG(0x08|(unit<pci_dev, - SELREG(0x0C|(unit<pci_dev, ma, &multi); + pci_read_config_word(hwif->pci_dev, ua, &ultra); } mode &= ~((unit) ? 0x30 : 0x03); @@ -259,20 +351,26 @@ } if (hwif->mmio) { - hwif->OUTB(mode, HWIFADDR(addr_mask)); - hwif->OUTW(multi, SELADDR(0x08|(unit<OUTW(ultra, SELADDR(0x0C|(unit<OUTB(mode, base + addr_mask); + hwif->OUTW(multi, ma); + hwif->OUTW(ultra, ua); } else { pci_write_config_byte(hwif->pci_dev, addr_mask, mode); - pci_write_config_word(hwif->pci_dev, - SELREG(0x08|(unit<pci_dev, - SELREG(0x0C|(unit<pci_dev, ma, multi); + pci_write_config_word(hwif->pci_dev, ua, ultra); } - return (ide_config_drive_speed(drive, speed)); } +/** + * config_chipset_for_dma - configure for DMA + * @drive: drive to configure + * + * Called by the IDE layer when it wants the timings set up. + * For the CMD680 we also need to set up the PIO timings and + * enable DMA. + */ + static int config_chipset_for_dma (ide_drive_t *drive) { u8 speed = ide_dma_speed(drive, siimage_ratemask(drive)); @@ -291,6 +389,16 @@ return ide_dma_enable(drive); } +/** + * siimage_configure_drive_for_dma - set up for DMA transfers + * @drive: drive we are going to set up + * + * Set up the drive for DMA, tune the controller and drive as + * required. If the drive isn't suitable for DMA or we hit + * other problems then we will drop down to PIO and set up + * PIO appropriately + */ + static int siimage_config_drive_for_dma (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); @@ -340,18 +448,28 @@ { ide_hwif_t *hwif = HWIF(drive); u8 dma_altstat = 0; + unsigned long addr = siimage_selreg(hwif, 1); /* return 1 if INTR asserted */ if ((hwif->INB(hwif->dma_status) & 4) == 4) return 1; /* return 1 if Device INTR asserted */ - pci_read_config_byte(hwif->pci_dev, SELREG(1), &dma_altstat); + pci_read_config_byte(hwif->pci_dev, addr, &dma_altstat); if (dma_altstat & 8) return 0; //return 1; return 0; } +/** + * siimage_mmio_ide_dma_count - DMA bytes done + * @drive + * + * If we are doing VDMA the CMD680 requires a little bit + * of more careful handling and we have to read the counts + * off ourselves. For non VDMA life is normal. + */ + static int siimage_mmio_ide_dma_count (ide_drive_t *drive) { #ifdef SIIMAGE_VIRTUAL_DMAPIO @@ -359,9 +477,10 @@ ide_hwif_t *hwif = HWIF(drive); u32 count = (rq->nr_sectors * SECTOR_SIZE); u32 rcount = 0; + unsigned long addr = siimage_selreg(hwif, 0x1C); - hwif->OUTL(count, SELADDR(0x1C)); - rcount = hwif->INL(SELADDR(0x1C)); + hwif->OUTL(count, addr); + rcount = hwif->INL(addr); printk("\n%s: count = %d, rcount = %d, nr_sectors = %lu\n", drive->name, count, rcount, rq->nr_sectors); @@ -370,13 +489,22 @@ return __ide_dma_count(drive); } -/* returns 1 if dma irq issued, 0 otherwise */ +/** + * siimage_mmio_ide_dma_test_irq - check we caused an IRQ + * @drive: drive we are testing + * + * Check if we caused an IDE DMA interrupt. We may also have caused + * SATA status interrupts, if so we clean them up and continue. + */ + static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); + unsigned long base = (unsigned long)hwif->hwif_data; + unsigned long addr = siimage_selreg(hwif, 0x1); if (SATA_ERROR_REG) { - u32 ext_stat = hwif->INL(HWIFADDR(0x10)); + u32 ext_stat = hwif->INL(base + 0x10); u8 watchdog = 0; if (ext_stat & ((hwif->channel) ? 0x40 : 0x10)) { u32 sata_error = hwif->INL(SATA_ERROR_REG); @@ -403,7 +531,7 @@ return 1; /* return 1 if Device INTR asserted */ - if ((hwif->INB(SELADDR(1)) & 8) == 8) + if ((hwif->INB(addr) & 8) == 8) return 0; //return 1; return 0; @@ -432,11 +560,12 @@ { ide_hwif_t *hwif = HWIF(drive); u32 stat_config = 0; + unsigned long addr = siimage_selreg(hwif, 0); if (hwif->mmio) { - stat_config = hwif->INL(SELADDR(0)); + stat_config = hwif->INL(addr); } else - pci_read_config_dword(hwif->pci_dev, SELREG(0), &stat_config); + pci_read_config_dword(hwif->pci_dev, addr, &stat_config); switch (state) { case BUSSTATE_ON: @@ -458,6 +587,14 @@ return 0; } +/** + * siimage_reset_poll - wait for sata reset + * @drive: drive we are resetting + * + * Poll the SATA phy and see whether it has come back from the dead + * yet. + */ + static int siimage_reset_poll (ide_drive_t *drive) { if (SATA_STATUS_REG) { @@ -506,19 +643,21 @@ { ide_hwif_t *hwif = HWIF(drive); u8 reset = 0; + unsigned long addr = siimage_selreg(hwif, 0); if (hwif->mmio) { - reset = hwif->INB(SELADDR(0)); - hwif->OUTB((reset|0x03), SELADDR(0)); + reset = hwif->INB(addr); + hwif->OUTB((reset|0x03), addr); + /* FIXME:posting */ udelay(25); - hwif->OUTB(reset, SELADDR(0)); - (void) hwif->INB(SELADDR(0)); + hwif->OUTB(reset, addr); + (void) hwif->INB(addr); } else { - pci_read_config_byte(hwif->pci_dev, SELREG(0), &reset); - pci_write_config_byte(hwif->pci_dev, SELREG(0), reset|0x03); + pci_read_config_byte(hwif->pci_dev, addr, &reset); + pci_write_config_byte(hwif->pci_dev, addr, reset|0x03); udelay(25); - pci_write_config_byte(hwif->pci_dev, SELREG(0), reset); - pci_read_config_byte(hwif->pci_dev, SELREG(0), &reset); + pci_write_config_byte(hwif->pci_dev, addr, reset); + pci_read_config_byte(hwif->pci_dev, addr, &reset); } if (SATA_STATUS_REG) { @@ -570,10 +709,20 @@ #endif /* DISPLAY_SIIMAGE_TIMINGS && CONFIG_PROC_FS */ } +/** + * setup_mmio_siimage - switch an SI controller into MMIO + * @dev: PCI device we are configuring + * @name: device name + * + * Attempt to put the device into mmio mode. There are some slight + * complications here with certain systems where the mmio bar isnt + * mapped so we have to be sure we can fall back to I/O. + */ + static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name) { unsigned long bar5 = pci_resource_start(dev, 5); - unsigned long len5 = pci_resource_len(dev, 5); + unsigned long barsize = pci_resource_len(dev, 5); u8 tmpbyte = 0; unsigned long addr; void *ioaddr; @@ -584,34 +733,37 @@ * spaces. */ - if(check_mem_region(bar5, len5)!=0) + if(!request_mem_region(bar5, barsize, name)) { printk(KERN_WARNING "siimage: IDE controller MMIO ports not available.\n"); return 0; } - ioaddr = ioremap_nocache(bar5, len5); + ioaddr = ioremap_nocache(bar5, barsize); if (ioaddr == NULL) + { + release_mem_region(bar5, barsize); return 0; + } pci_set_master(dev); pci_set_drvdata(dev, ioaddr); addr = (unsigned long) ioaddr; if (dev->device == PCI_DEVICE_ID_SII_3112) { - writel(0, DEVADDR(0x148)); - writel(0, DEVADDR(0x1C8)); + writel(0, addr + 0x148); + writel(0, addr + 0x1C8); } - writeb(0, DEVADDR(0xB4)); - writeb(0, DEVADDR(0xF4)); - tmpbyte = readb(DEVADDR(0x4A)); + writeb(0, addr + 0xB4); + writeb(0, addr + 0xF4); + tmpbyte = readb(addr + 0x4A); switch(tmpbyte & 0x30) { case 0x00: /* In 100 MHz clocking, try and switch to 133 */ - writeb(tmpbyte|0x10, DEVADDR(0x4A)); + writeb(tmpbyte|0x10, addr + 0x4A); break; case 0x10: /* On 133Mhz clocking */ @@ -622,34 +774,43 @@ case 0x30: /* Clocking is disabled */ /* 133 clock attempt to force it on */ - writeb(tmpbyte & ~0x20, DEVADDR(0x4A)); + writeb(tmpbyte & ~0x20, addr + 0x4A); break; } - writeb(0x72, DEVADDR(0xA1)); - writew(0x328A, DEVADDR(0xA2)); - writel(0x62DD62DD, DEVADDR(0xA4)); - writel(0x43924392, DEVADDR(0xA8)); - writel(0x40094009, DEVADDR(0xAC)); - writeb(0x72, DEVADDR(0xE1)); - writew(0x328A, DEVADDR(0xE2)); - writel(0x62DD62DD, DEVADDR(0xE4)); - writel(0x43924392, DEVADDR(0xE8)); - writel(0x40094009, DEVADDR(0xEC)); + writeb( 0x72, addr + 0xA1); + writew( 0x328A, addr + 0xA2); + writel(0x62DD62DD, addr + 0xA4); + writel(0x43924392, addr + 0xA8); + writel(0x40094009, addr + 0xAC); + writeb( 0x72, addr + 0xE1); + writew( 0x328A, addr + 0xE2); + writel(0x62DD62DD, addr + 0xE4); + writel(0x43924392, addr + 0xE8); + writel(0x40094009, addr + 0xEC); if (dev->device == PCI_DEVICE_ID_SII_3112) { - writel(0xFFFF0000, DEVADDR(0x108)); - writel(0xFFFF0000, DEVADDR(0x188)); - writel(0x00680000, DEVADDR(0x148)); - writel(0x00680000, DEVADDR(0x1C8)); + writel(0xFFFF0000, addr + 0x108); + writel(0xFFFF0000, addr + 0x188); + writel(0x00680000, addr + 0x148); + writel(0x00680000, addr + 0x1C8); } - tmpbyte = readb(DEVADDR(0x4A)); + tmpbyte = readb(addr + 0x4A); proc_reports_siimage(dev, (tmpbyte>>=4), name); return 1; } +/** + * init_chipset_siimage - set up an SI device + * @dev: PCI device + * @name: device name + * + * Perform the initial PCI set up for this device. Attempt to switch + * to 133MHz clocking if the system isn't already set up to do it. + */ + static unsigned int __init init_chipset_siimage (struct pci_dev *dev, const char *name) { u32 class_rev = 0; @@ -686,14 +847,14 @@ break; } - pci_read_config_byte(dev, 0x8A, &tmpbyte); - pci_write_config_byte(dev, 0xA1, 0x72); - pci_write_config_word(dev, 0xA2, 0x328A); + pci_read_config_byte(dev, 0x8A, &tmpbyte); + pci_write_config_byte(dev, 0xA1, 0x72); + pci_write_config_word(dev, 0xA2, 0x328A); pci_write_config_dword(dev, 0xA4, 0x62DD62DD); pci_write_config_dword(dev, 0xA8, 0x43924392); pci_write_config_dword(dev, 0xAC, 0x40094009); - pci_write_config_byte(dev, 0xB1, 0x72); - pci_write_config_word(dev, 0xB2, 0x328A); + pci_write_config_byte(dev, 0xB1, 0x72); + pci_write_config_word(dev, 0xB2, 0x328A); pci_write_config_dword(dev, 0xB4, 0x62DD62DD); pci_write_config_dword(dev, 0xB8, 0x43924392); pci_write_config_dword(dev, 0xBC, 0x40094009); @@ -703,71 +864,78 @@ return 0; } +/** + * init_mmio_iops_siimage - set up the iops for MMIO + * @hwif: interface to set up + * + * The basic setup here is fairly simple, we can use standard MMIO + * operations. However we do have to set the taskfile register offsets + * by hand as there isnt a standard defined layout for them this + * time. + * + * The hardware supports buffered taskfiles and also some rather nice + * extended PRD tables. Unfortunately right now we don't. + */ + static void __init init_mmio_iops_siimage (ide_hwif_t *hwif) { struct pci_dev *dev = hwif->pci_dev; - unsigned long addr = (unsigned long) pci_get_drvdata(hwif->pci_dev); + void *addr = pci_get_drvdata(dev); u8 ch = hwif->channel; -// u16 i = 0; - hw_regs_t hw; + hw_regs_t hw; + unsigned long base; + + /* + * Fill in the basic HWIF bits + */ default_hwif_mmiops(hwif); + hwif->hwif_data = addr; + + /* + * Now set up the hw. We have to do this ourselves as + * the MMIO layout isnt the same as the the standard port + * based I/O + */ + memset(&hw, 0, sizeof(hw_regs_t)); + hw.priv = addr; -#if 1 -#ifdef SIIMAGE_BUFFERED_TASKFILE - hw.io_ports[IDE_DATA_OFFSET] = DEVADDR((ch) ? 0xD0 : 0x90); - hw.io_ports[IDE_ERROR_OFFSET] = DEVADDR((ch) ? 0xD1 : 0x91); - hw.io_ports[IDE_NSECTOR_OFFSET] = DEVADDR((ch) ? 0xD2 : 0x92); - hw.io_ports[IDE_SECTOR_OFFSET] = DEVADDR((ch) ? 0xD3 : 0x93); - hw.io_ports[IDE_LCYL_OFFSET] = DEVADDR((ch) ? 0xD4 : 0x94); - hw.io_ports[IDE_HCYL_OFFSET] = DEVADDR((ch) ? 0xD5 : 0x95); - hw.io_ports[IDE_SELECT_OFFSET] = DEVADDR((ch) ? 0xD6 : 0x96); - hw.io_ports[IDE_STATUS_OFFSET] = DEVADDR((ch) ? 0xD7 : 0x97); - hw.io_ports[IDE_CONTROL_OFFSET] = DEVADDR((ch) ? 0xDA : 0x9A); -#else /* ! SIIMAGE_BUFFERED_TASKFILE */ - hw.io_ports[IDE_DATA_OFFSET] = DEVADDR((ch) ? 0xC0 : 0x80); - hw.io_ports[IDE_ERROR_OFFSET] = DEVADDR((ch) ? 0xC1 : 0x81); - hw.io_ports[IDE_NSECTOR_OFFSET] = DEVADDR((ch) ? 0xC2 : 0x82); - hw.io_ports[IDE_SECTOR_OFFSET] = DEVADDR((ch) ? 0xC3 : 0x83); - hw.io_ports[IDE_LCYL_OFFSET] = DEVADDR((ch) ? 0xC4 : 0x84); - hw.io_ports[IDE_HCYL_OFFSET] = DEVADDR((ch) ? 0xC5 : 0x85); - hw.io_ports[IDE_SELECT_OFFSET] = DEVADDR((ch) ? 0xC6 : 0x86); - hw.io_ports[IDE_STATUS_OFFSET] = DEVADDR((ch) ? 0xC7 : 0x87); - hw.io_ports[IDE_CONTROL_OFFSET] = DEVADDR((ch) ? 0xCA : 0x8A); -#endif /* SIIMAGE_BUFFERED_TASKFILE */ -#else + base = (unsigned long)addr; + if(ch) + base += 0xC0; + else + base += 0x80; + #ifdef SIIMAGE_BUFFERED_TASKFILE - for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) - hw.io_ports[i] = DEVADDR((ch) ? 0xD0 : 0x90)|(i); - hw.io_ports[IDE_CONTROL_OFFSET] = DEVADDR((ch) ? 0xDA : 0x9A); -#else /* ! SIIMAGE_BUFFERED_TASKFILE */ - for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) - hw.io_ports[i] = DEVADDR((ch) ? 0xC0 : 0x80)|(i); - hw.io_ports[IDE_CONTROL_OFFSET] = DEVADDR((ch) ? 0xCA : 0x8A); -#endif /* SIIMAGE_BUFFERED_TASKFILE */ + base += 0x10; + hwif->addressing = 1; #endif -#if 0 - printk(KERN_DEBUG "%s: ", hwif->name); - for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) - printk("0x%08x ", DEVADDR((ch) ? 0xC0 : 0x80)|(i)); - printk("0x%08x ", DEVADDR((ch) ? 0xCA : 0x8A)|(i)); -#endif + hw.io_ports[IDE_DATA_OFFSET] = base; + hw.io_ports[IDE_ERROR_OFFSET] = base + 1; + hw.io_ports[IDE_NSECTOR_OFFSET] = base + 2; + hw.io_ports[IDE_SECTOR_OFFSET] = base + 3; + hw.io_ports[IDE_LCYL_OFFSET] = base + 4; + hw.io_ports[IDE_HCYL_OFFSET] = base + 5; + hw.io_ports[IDE_SELECT_OFFSET] = base + 6; + hw.io_ports[IDE_STATUS_OFFSET] = base + 7; + hw.io_ports[IDE_CONTROL_OFFSET] = base + 10; hw.io_ports[IDE_IRQ_OFFSET] = 0; if (dev->device == PCI_DEVICE_ID_SII_3112) { - hw.sata_scr[SATA_STATUS_OFFSET] = DEVADDR((ch) ? 0x184 : 0x104); - hw.sata_scr[SATA_ERROR_OFFSET] = DEVADDR((ch) ? 0x188 : 0x108); - hw.sata_scr[SATA_CONTROL_OFFSET]= DEVADDR((ch) ? 0x180 : 0x100); - hw.sata_misc[SATA_MISC_OFFSET] = DEVADDR((ch) ? 0x1C0 : 0x140); - hw.sata_misc[SATA_PHY_OFFSET] = DEVADDR((ch) ? 0x1C4 : 0x144); - hw.sata_misc[SATA_IEN_OFFSET] = DEVADDR((ch) ? 0x1C8 : 0x148); + base = (unsigned long) addr; + if(ch) + base += 0x80; + hw.sata_scr[SATA_STATUS_OFFSET] = base + 0x104; + hw.sata_scr[SATA_ERROR_OFFSET] = base + 0x108; + hw.sata_scr[SATA_CONTROL_OFFSET]= base + 0x100; + hw.sata_misc[SATA_MISC_OFFSET] = base + 0x140; + hw.sata_misc[SATA_PHY_OFFSET] = base + 0x144; + hw.sata_misc[SATA_IEN_OFFSET] = base + 0x148; } - hw.priv = (void *) addr; -// hw.priv = pci_get_drvdata(hwif->pci_dev); hw.irq = hwif->pci_dev->irq; memcpy(&hwif->hw, &hw, sizeof(hw)); @@ -778,23 +946,32 @@ memcpy(hwif->sata_misc, hwif->hw.sata_misc, sizeof(hwif->hw.sata_misc)); } -#ifdef SIIMAGE_BUFFERED_TASKFILE - hwif->addressing = 1; -#endif /* SIIMAGE_BUFFERED_TASKFILE */ hwif->irq = hw.irq; - hwif->hwif_data = pci_get_drvdata(hwif->pci_dev); + + base = (unsigned long) addr; #ifdef SIIMAGE_LARGE_DMA - hwif->dma_base = DEVADDR((ch) ? 0x18 : 0x10); - hwif->dma_base2 = DEVADDR((ch) ? 0x08 : 0x00); - hwif->dma_prdtable = (hwif->dma_base2 + 4); +/* Watch the brackets - even Ken and Dennis get some language design wrong */ + hwif->dma_base = base + (ch ? 0x18 : 0x10); + hwif->dma_base2 = base + (ch ? 0x08 : 0x00); + hwif->dma_prdtable = hwif->dma_base2 + 4; #else /* ! SIIMAGE_LARGE_DMA */ - hwif->dma_base = DEVADDR((ch) ? 0x08 : 0x00); - hwif->dma_base2 = DEVADDR((ch) ? 0x18 : 0x10); + hwif->dma_base = base + (ch ? 0x08 : 0x00); + hwif->dma_base2 = base + (ch ? 0x18 : 0x10); #endif /* SIIMAGE_LARGE_DMA */ hwif->mmio = 2; } +/** + * init_iops_siimage - set up iops + * @hwif: interface to set up + * + * Do the basic setup for the SIIMAGE hardware interface + * and then do the MMIO setup if we can. This is the first + * look in we get for setting up the hwif so that we + * can get the iops right before using them. + */ + static void __init init_iops_siimage (ide_hwif_t *hwif) { struct pci_dev *dev = hwif->pci_dev; @@ -802,9 +979,11 @@ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); class_rev &= 0xff; + + hwif->hwif_data = 0; hwif->rqsize = 128; - if ((dev->device == PCI_DEVICE_ID_SII_3112) && (!(class_rev))) + if (dev->device == PCI_DEVICE_ID_SII_3112 && !class_rev) hwif->rqsize = 16; if (pci_get_drvdata(dev) == NULL) @@ -812,17 +991,35 @@ init_mmio_iops_siimage(hwif); } +/** + * ata66_siimage - check for 80 pin cable + * @hwif: interface to check + * + * Check for the presence of an ATA66 capable cable on the + * interface. + */ + static unsigned int __init ata66_siimage (ide_hwif_t *hwif) { + unsigned long addr = siimage_selreg(hwif, 0); if (pci_get_drvdata(hwif->pci_dev) == NULL) { u8 ata66 = 0; - pci_read_config_byte(hwif->pci_dev, SELREG(0), &ata66); + pci_read_config_byte(hwif->pci_dev, addr, &ata66); return (ata66 & 0x01) ? 1 : 0; } - return (hwif->INB(SELADDR(0)) & 0x01) ? 1 : 0; + return (hwif->INB(addr) & 0x01) ? 1 : 0; } +/** + * init_hwif_siimage - set up hwif structs + * @hwif: interface to set up + * + * We do the basic set up of the interface structure. The SIIMAGE + * requires several custom handlers so we override the default + * ide DMA handlers appropriately + */ + static void __init init_hwif_siimage (ide_hwif_t *hwif) { hwif->autodma = 0; @@ -866,6 +1063,15 @@ hwif->drives[1].autodma = hwif->autodma; } +/** + * init_dma_siimage - set up IDE DMA + * @hwif: interface + * @dmabase: DMA base address to use + * + * For the SI chips this requires no special set up so we can just + * let the IDE DMA core do the usual work. + */ + static void __init init_dma_siimage (ide_hwif_t *hwif, unsigned long dmabase) { ide_setup_dma(hwif, dmabase, 8); @@ -874,6 +1080,15 @@ extern void ide_setup_pci_device(struct pci_dev *, ide_pci_device_t *); +/** + * siimage_init_one - pci layer discovery entry + * @dev: PCI device + * @id: ident table entry + * + * Called by the PCI code when it finds an SI680 or SI3112 controller. + * We then use the IDE PCI generic helper to do most of the work. + */ + static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id) { ide_pci_device_t *d = &siimage_chipsets[id->driver_data]; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/ide/pci/siimage.h linux.21rc5-ac2/drivers/ide/pci/siimage.h --- linux.21rc5/drivers/ide/pci/siimage.h 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/ide/pci/siimage.h 2003-05-29 01:44:07.000000000 +0100 @@ -13,12 +13,6 @@ #undef SIIMAGE_BUFFERED_TASKFILE #undef SIIMAGE_LARGE_DMA -#if 0 -typedef struct ide_io_ops_s siimage_iops { - -} -#endif - #define SII_DEBUG 0 #if SII_DEBUG @@ -27,12 +21,6 @@ #define siiprintk(x...) #endif -#define ADJREG(B,R) ((B)|(R)|((hwif->channel)<<(4+(2*(!!hwif->mmio))))) -#define SELREG(R) ADJREG((0xA0),(R)) -#define SELADDR(R) ((((unsigned long)hwif->hwif_data)*(!!hwif->mmio))|SELREG((R))) -#define HWIFADDR(R) ((((unsigned long)hwif->hwif_data)*(!!hwif->mmio))|(R)) -#define DEVADDR(R) (((unsigned long) pci_get_drvdata(dev))|(R)) - #if defined(DISPLAY_SIIMAGE_TIMINGS) && defined(CONFIG_PROC_FS) #include diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/ide/pci/sis5513.c linux.21rc5-ac2/drivers/ide/pci/sis5513.c --- linux.21rc5/drivers/ide/pci/sis5513.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/ide/pci/sis5513.c 2003-05-20 20:41:06.000000000 +0100 @@ -161,9 +161,10 @@ { "SiS748", PCI_DEVICE_ID_SI_748, ATA_133, 0 }, { "SiS746", PCI_DEVICE_ID_SI_746, ATA_133, 0 }, { "SiS745", PCI_DEVICE_ID_SI_745, ATA_133, 0 }, - { "SiS740", PCI_DEVICE_ID_SI_740, ATA_100, 0 }, + { "SiS740", PCI_DEVICE_ID_SI_740, ATA_133, 0 }, { "SiS735", PCI_DEVICE_ID_SI_735, ATA_100, SIS5513_LATENCY }, { "SiS730", PCI_DEVICE_ID_SI_730, ATA_100a, SIS5513_LATENCY }, + { "SiS655", PCI_DEVICE_ID_SI_655, ATA_133, 0 }, { "SiS652", PCI_DEVICE_ID_SI_652, ATA_133, 0 }, { "SiS651", PCI_DEVICE_ID_SI_651, ATA_133, 0 }, { "SiS650", PCI_DEVICE_ID_SI_650, ATA_133, 0 }, @@ -257,8 +258,8 @@ static char* chipset_capability[] = { "ATA", "ATA 16", "ATA 33", "ATA 66", - "ATA 100", "ATA 100", - "ATA 133", "ATA 133" + "ATA 100 (1st gen)", "ATA 100 (2nd gen)", + "ATA 133 (1st gen)", "ATA 133 (2nd gen)" }; #if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS) @@ -331,8 +332,8 @@ // Configuration space remapped to 0x70 drive_pci = 0x70; } - pci_read_config_dword(bmide_dev, (unsigned long)drive_pci+8*pos, ®dw0); - pci_read_config_dword(bmide_dev, (unsigned long)drive_pci+8*pos+4, ®dw1); + pci_read_config_dword(bmide_dev, (unsigned long)drive_pci+4*pos, ®dw0); + pci_read_config_dword(bmide_dev, (unsigned long)drive_pci+4*pos+8, ®dw1); p += sprintf(p, "Drive %d:\n", pos); } @@ -357,8 +358,7 @@ case ATA_100a: p += sprintf(p, cycle_time[(reg01 & 0x70) >> 4]); break; case ATA_100: case ATA_133a: p += sprintf(p, cycle_time[reg01 & 0x0F]); break; - case ATA_133: - default: p += sprintf(p, "133+ ?"); break; + default: p += sprintf(p, "?"); break; } p += sprintf(p, " \t UDMA Cycle Time "); switch(chipset_family) { @@ -367,42 +367,39 @@ case ATA_100a: p += sprintf(p, cycle_time[(reg11 & 0x70) >> 4]); break; case ATA_100: case ATA_133a: p += sprintf(p, cycle_time[reg11 & 0x0F]); break; - case ATA_133: - default: p += sprintf(p, "133+ ?"); break; + default: p += sprintf(p, "?"); break; } p += sprintf(p, "\n"); } + if (chipset_family < ATA_133) { /* else case TODO */ /* Data Active */ - p += sprintf(p, " Data Active Time "); - switch(chipset_family) { - case ATA_00: - case ATA_16: /* confirmed */ - case ATA_33: - case ATA_66: - case ATA_100a: p += sprintf(p, active_time[reg01 & 0x07]); break; - case ATA_100: - case ATA_133a: p += sprintf(p, active_time[(reg00 & 0x70) >> 4]); break; - case ATA_133: - default: p += sprintf(p, "133+ ?"); break; - } - p += sprintf(p, " \t Data Active Time "); - switch(chipset_family) { - case ATA_00: - case ATA_16: - case ATA_33: - case ATA_66: - case ATA_100a: p += sprintf(p, active_time[reg11 & 0x07]); break; - case ATA_100: - case ATA_133a: p += sprintf(p, active_time[(reg10 & 0x70) >> 4]); break; - case ATA_133: - default: p += sprintf(p, "133+ ?"); break; - } - p += sprintf(p, "\n"); + p += sprintf(p, " Data Active Time "); + switch(chipset_family) { + case ATA_00: + case ATA_16: /* confirmed */ + case ATA_33: + case ATA_66: + case ATA_100a: p += sprintf(p, active_time[reg01 & 0x07]); break; + case ATA_100: + case ATA_133a: p += sprintf(p, active_time[(reg00 & 0x70) >> 4]); break; + default: p += sprintf(p, "?"); break; + } + p += sprintf(p, " \t Data Active Time "); + switch(chipset_family) { + case ATA_00: + case ATA_16: + case ATA_33: + case ATA_66: + case ATA_100a: p += sprintf(p, active_time[reg11 & 0x07]); break; + case ATA_100: + case ATA_133a: p += sprintf(p, active_time[(reg10 & 0x70) >> 4]); break; + default: p += sprintf(p, "?"); break; + } + p += sprintf(p, "\n"); /* Data Recovery */ /* warning: may need (reg&0x07) for pre ATA66 chips */ - if (chipset_family < ATA_133) { p += sprintf(p, " Data Recovery Time %s \t Data Recovery Time %s\n", recovery_time[reg00 & 0x0f], recovery_time[reg10 & 0x0f]); } @@ -430,7 +427,6 @@ p += sprintf(p, "\nSiS 5513 "); switch(chipset_family) { - case ATA_00: p += sprintf(p, "Unknown???"); break; case ATA_16: p += sprintf(p, "DMA 16"); break; case ATA_33: p += sprintf(p, "Ultra 33"); break; case ATA_66: p += sprintf(p, "Ultra 66"); break; @@ -867,6 +863,19 @@ return sis5513_config_drive_xfer_rate(drive); } +/* Helper function used at init time + * returns a PCI device revision ID + * (used to detect different IDE controller versions) + */ +static u8 __init devfn_rev(int device, int function) +{ + u8 revision; + /* Find device */ + struct pci_dev* dev = pci_find_slot(0,PCI_DEVFN(device,function)); + pci_read_config_byte(dev, PCI_REVISION_ID, &revision); + return revision; +} + /* Chip detection and general config */ static unsigned int __init init_chipset_sis5513 (struct pci_dev *dev, const char *name) { @@ -887,26 +896,24 @@ /* check 100/133 chipset family */ if (chipset_family == ATA_133) { u32 reg54h; - u16 reg02h; + u16 devid; pci_read_config_dword(dev, 0x54, ®54h); + /* SiS962 and above report 0x5518 dev id if high bit is cleared */ pci_write_config_dword(dev, 0x54, (reg54h & 0x7fffffff)); - pci_read_config_word(dev, 0x02, ®02h); + pci_read_config_word(dev, 0x02, &devid); + /* restore register 0x54 */ pci_write_config_dword(dev, 0x54, reg54h); + /* devid 5518 here means SiS962 or later - which supports ATA133 */ - if (reg02h != 0x5518) { + which supports ATA133. + These are refered by chipset_family = ATA133 + */ + if (devid != 0x5518) { u8 reg49h; - unsigned long sbrev; /* SiS961 family */ - - /* - * FIXME !!! GAK!!!!!!!!!! PCI DIRECT POKING - */ - outl(0x80001008, 0x0cf8); - sbrev = inl(0x0cfc); - pci_read_config_byte(dev, 0x49, ®49h); - if (((sbrev & 0xff) == 0x10) && (reg49h & 0x80)) + /* check isa bridge device rev id */ + if (((devfn_rev(2,0) & 0xff) == 0x10) && (reg49h & 0x80)) chipset_family = ATA_133a; else chipset_family = ATA_100; @@ -924,6 +931,14 @@ u8 latency = (chipset_family == ATA_100)? 0x80 : 0x10; /* Lacking specs */ pci_write_config_byte(dev, PCI_LATENCY_TIMER, latency); } + + /* Special case for SiS630 : 630S/ET is ATA_100a */ + if (SiSHostChipInfo[i].host_id == PCI_DEVICE_ID_SI_630) { + /* check host device rev id */ + if (devfn_rev(0,0) >= 0x30) { + chipset_family = ATA_100a; + } + } } /* Make general config ops here diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/ide/setup-pci.c linux.21rc5-ac2/drivers/ide/setup-pci.c --- linux.21rc5/drivers/ide/setup-pci.c 2003-05-28 15:08:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/ide/setup-pci.c 2003-05-21 18:52:00.000000000 +0100 @@ -137,7 +137,7 @@ "native PCI mode\n", name); return -EOPNOTSUPP; } - printk("%s: placing both ports into native PCI mode\n", name); + printk(KERN_INFO "%s: placing both ports into native PCI mode\n", name); (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5); if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) || (progif & 5) != 5) { @@ -183,10 +183,9 @@ second_chance_to_dma: #endif /* CONFIG_BLK_DEV_IDEDMA_FORCED */ - if ((hwif->mmio) && (hwif->dma_base)) + if (hwif->mmio && hwif->dma_base) return hwif->dma_base; - - if (hwif->mate && hwif->mate->dma_base) { + else if (hwif->mate && hwif->mate->dma_base) { dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8); } else { dma_base = (hwif->mmio) ? diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/ieee1394/oui2c.sh linux.21rc5-ac2/drivers/ieee1394/oui2c.sh --- linux.21rc5/drivers/ieee1394/oui2c.sh 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/ieee1394/oui2c.sh 2003-04-23 16:49:51.000000000 +0100 @@ -0,0 +1,23 @@ +#!/bin/sh + +cat < + +#ifdef CONFIG_IEEE1394_OUI_DB +struct oui_list_struct { + int oui; + char *name; +} oui_list[] = { +EOF + +while read oui name; do + echo " { 0x$oui, \"$name\" }," +done + +cat < 2) + +#define ERR(format, arg...) \ +printk(KERN_ERR __FILE__ ": %s: " format "\n" , __FUNCTION__ , ## arg) + +#define WARN(format, arg...) \ +printk(KERN_WARNING __FILE__ ": %s: " format "\n" , __FUNCTION__ , ## arg) + +#define INFO(format, arg...) \ +printk(KERN_INFO __FILE__ ": %s: " format "\n" , __FUNCTION__ , ## arg) + +#else + #define ERR(format, arg...) \ printk(KERN_ERR __FILE__ ": " __FUNCTION__ ": " format "\n" , ## arg) @@ -227,6 +240,8 @@ #define INFO(format, arg...) \ printk(KERN_INFO __FILE__ ": " __FUNCTION__ ": " format "\n" , ## arg) +#endif + #include "isdnhdlc.h" #include "fsm.h" #include "hisax_if.h" diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/isdn/isdn_tty.c linux.21rc5-ac2/drivers/isdn/isdn_tty.c --- linux.21rc5/drivers/isdn/isdn_tty.c 2003-05-28 15:07:57.000000000 +0100 +++ linux.21rc5-ac2/drivers/isdn/isdn_tty.c 2003-04-22 16:44:37.000000000 +0100 @@ -1807,7 +1807,7 @@ #endif return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/macintosh/macserial.c linux.21rc5-ac2/drivers/macintosh/macserial.c --- linux.21rc5/drivers/macintosh/macserial.c 2003-05-28 15:08:01.000000000 +0100 +++ linux.21rc5-ac2/drivers/macintosh/macserial.c 2003-04-22 16:44:37.000000000 +0100 @@ -1957,7 +1957,7 @@ } OPNDBG("rs_close ttyS%d, count = %d\n", info->line, info->count); - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/Config.in linux.21rc5-ac2/drivers/md/Config.in --- linux.21rc5/drivers/md/Config.in 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/Config.in 2003-04-22 16:44:37.000000000 +0100 @@ -14,5 +14,8 @@ dep_tristate ' Multipath I/O support' CONFIG_MD_MULTIPATH $CONFIG_BLK_DEV_MD dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + dep_tristate ' Device-mapper support (EXPERIMENTAL)' CONFIG_BLK_DEV_DM $CONFIG_MD +fi endmenu diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm.c linux.21rc5-ac2/drivers/md/dm.c --- linux.21rc5/drivers/md/dm.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,1158 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include + +/* we only need this for the lv_bmap struct definition, not happy */ +#include + +#define DEFAULT_READ_AHEAD 64 + +static const char *_name = DM_NAME; + +static int major = 0; +static int _major = 0; + +struct io_hook { + struct mapped_device *md; + struct target *target; + int rw; + + void (*end_io) (struct buffer_head * bh, int uptodate); + void *context; +}; + +static kmem_cache_t *_io_hook_cache; + +static struct mapped_device *_devs[MAX_DEVICES]; +static struct rw_semaphore _dev_locks[MAX_DEVICES]; + +/* + * This lock is only held by dm_create and dm_set_name to avoid + * race conditions where someone else may create a device with + * the same name. + */ +static spinlock_t _create_lock = SPIN_LOCK_UNLOCKED; + +/* block device arrays */ +static int _block_size[MAX_DEVICES]; +static int _blksize_size[MAX_DEVICES]; +static int _hardsect_size[MAX_DEVICES]; + +static devfs_handle_t _dev_dir; + +static int request(request_queue_t * q, int rw, struct buffer_head *bh); +static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb); + +/* + * Protect the mapped_devices referenced from _dev[] + */ +struct mapped_device *dm_get_r(int minor) +{ + struct mapped_device *md; + + if (minor >= MAX_DEVICES) + return NULL; + + down_read(_dev_locks + minor); + md = _devs[minor]; + if (!md) + up_read(_dev_locks + minor); + return md; +} + +struct mapped_device *dm_get_w(int minor) +{ + struct mapped_device *md; + + if (minor >= MAX_DEVICES) + return NULL; + + down_write(_dev_locks + minor); + md = _devs[minor]; + if (!md) + up_write(_dev_locks + minor); + return md; +} + +static int namecmp(struct mapped_device *md, const char *name, int nametype) +{ + switch (nametype) { + case DM_LOOKUP_BY_NAME: + return strcmp(md->name, name); + break; + + case DM_LOOKUP_BY_UUID: + if (!md->uuid) + return -1; /* never equal */ + + return strcmp(md->uuid, name); + break; + + default: + DMWARN("Unknown comparison type in namecmp: %d", nametype); + BUG(); + } + + return -1; +} + +/* + * The interface (eg, ioctl) will probably access the devices + * through these slow 'by name' locks, this needs improving at + * some point if people start playing with *large* numbers of dm + * devices. + */ +struct mapped_device *dm_get_name_r(const char *name, int nametype) +{ + int i; + struct mapped_device *md; + + for (i = 0; i < MAX_DEVICES; i++) { + md = dm_get_r(i); + if (md) { + if (!namecmp(md, name, nametype)) + return md; + + dm_put_r(md); + } + } + + return NULL; +} + +struct mapped_device *dm_get_name_w(const char *name, int nametype) +{ + int i; + struct mapped_device *md; + + /* + * To avoid getting write locks on all the devices we try + * and promote a read lock to a write lock, this can + * fail, in which case we just start again. + */ + + restart: + for (i = 0; i < MAX_DEVICES; i++) { + md = dm_get_r(i); + if (!md) + continue; + + if (namecmp(md, name, nametype)) { + dm_put_r(md); + continue; + } + + /* found it */ + dm_put_r(md); + + md = dm_get_w(i); + if (!md) + goto restart; + + if (namecmp(md, name, nametype)) { + dm_put_w(md); + goto restart; + } + + return md; + } + + return NULL; +} + +void dm_put_r(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + + if (minor >= MAX_DEVICES) + return; + + up_read(_dev_locks + minor); +} + +void dm_put_w(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + + if (minor >= MAX_DEVICES) + return; + + up_write(_dev_locks + minor); +} + +/* + * Setup and tear down the driver + */ +static __init void init_locks(void) +{ + int i; + + for (i = 0; i < MAX_DEVICES; i++) + init_rwsem(_dev_locks + i); +} + +static __init int local_init(void) +{ + int r; + + init_locks(); + + /* allocate a slab for the io-hooks */ + if (!_io_hook_cache && + !(_io_hook_cache = kmem_cache_create("dm io hooks", + sizeof(struct io_hook), + 0, 0, NULL, NULL))) + return -ENOMEM; + + _major = major; + r = devfs_register_blkdev(_major, _name, &dm_blk_dops); + if (r < 0) { + DMERR("register_blkdev failed"); + kmem_cache_destroy(_io_hook_cache); + return r; + } + + if (!_major) + _major = r; + + /* set up the arrays */ + read_ahead[_major] = DEFAULT_READ_AHEAD; + blk_size[_major] = _block_size; + blksize_size[_major] = _blksize_size; + hardsect_size[_major] = _hardsect_size; + + blk_queue_make_request(BLK_DEFAULT_QUEUE(_major), request); + + _dev_dir = devfs_mk_dir(0, DM_DIR, NULL); + + return 0; +} + +static void local_exit(void) +{ + if (kmem_cache_destroy(_io_hook_cache)) + DMWARN("io_hooks still allocated during unregistration"); + _io_hook_cache = NULL; + + if (devfs_unregister_blkdev(_major, _name) < 0) + DMERR("devfs_unregister_blkdev failed"); + + read_ahead[_major] = 0; + blk_size[_major] = NULL; + blksize_size[_major] = NULL; + hardsect_size[_major] = NULL; + _major = 0; + + DMINFO("cleaned up"); +} + +/* + * We have a lot of init/exit functions, so it seems easier to + * store them in an array. The disposable macro 'xx' + * expands a prefix into a pair of function names. + */ +static struct { + int (*init)(void); + void (*exit)(void); + +} _inits[] = { +#define xx(n) {n ## _init, n ## _exit}, + xx(local) + xx(dm_target) + xx(dm_linear) + xx(dm_stripe) + xx(dm_snapshot) + xx(dm_interface) +#undef xx +}; + +static int __init dm_init(void) +{ + const int count = sizeof(_inits) / sizeof(*_inits); + + int r, i; + + for (i = 0; i < count; i++) { + r = _inits[i].init(); + if (r) + goto bad; + } + + return 0; + + bad: + while (i--) + _inits[i].exit(); + + return r; +} + +static void __exit dm_exit(void) +{ + int i = sizeof(_inits) / sizeof(*_inits); + + dm_destroy_all(); + while (i--) + _inits[i].exit(); +} + +/* + * Block device functions + */ +static int dm_blk_open(struct inode *inode, struct file *file) +{ + struct mapped_device *md; + + md = dm_get_w(MINOR(inode->i_rdev)); + if (!md) + return -ENXIO; + + md->use_count++; + dm_put_w(md); + + return 0; +} + +static int dm_blk_close(struct inode *inode, struct file *file) +{ + struct mapped_device *md; + + md = dm_get_w(MINOR(inode->i_rdev)); + if (!md) + return -ENXIO; + + if (md->use_count < 1) + DMWARN("incorrect reference count found in mapped_device"); + + md->use_count--; + dm_put_w(md); + + return 0; +} + +/* In 512-byte units */ +#define VOLUME_SIZE(minor) (_block_size[(minor)] << 1) + +static int dm_blk_ioctl(struct inode *inode, struct file *file, + uint command, unsigned long a) +{ + int minor = MINOR(inode->i_rdev); + long size; + + if (minor >= MAX_DEVICES) + return -ENXIO; + + switch (command) { + case BLKROSET: + case BLKROGET: + case BLKRASET: + case BLKRAGET: + case BLKFLSBUF: + case BLKSSZGET: + //case BLKRRPART: /* Re-read partition tables */ + //case BLKPG: + case BLKELVGET: + case BLKELVSET: + case BLKBSZGET: + case BLKBSZSET: + return blk_ioctl(inode->i_rdev, command, a); + break; + + case BLKGETSIZE: + size = VOLUME_SIZE(minor); + if (copy_to_user((void *) a, &size, sizeof(long))) + return -EFAULT; + break; + + case BLKGETSIZE64: + size = VOLUME_SIZE(minor); + if (put_user((u64) ((u64) size) << 9, (u64 *) a)) + return -EFAULT; + break; + + case BLKRRPART: + return -ENOTTY; + + case LV_BMAP: + return dm_user_bmap(inode, (struct lv_bmap *) a); + + default: + DMWARN("unknown block ioctl 0x%x", command); + return -ENOTTY; + } + + return 0; +} + +static inline struct io_hook *alloc_io_hook(void) +{ + return kmem_cache_alloc(_io_hook_cache, GFP_NOIO); +} + +static inline void free_io_hook(struct io_hook *ih) +{ + kmem_cache_free(_io_hook_cache, ih); +} + +/* + * FIXME: We need to decide if deferred_io's need + * their own slab, I say no for now since they are + * only used when the device is suspended. + */ +static inline struct deferred_io *alloc_deferred(void) +{ + return kmalloc(sizeof(struct deferred_io), GFP_NOIO); +} + +static inline void free_deferred(struct deferred_io *di) +{ + kfree(di); +} + +/* + * Call a target's optional error function if an I/O failed. + */ +static inline int call_err_fn(struct io_hook *ih, struct buffer_head *bh) +{ + dm_err_fn err = ih->target->type->err; + + if (err) + return err(bh, ih->rw, ih->target->private); + + return 0; +} + +/* + * bh->b_end_io routine that decrements the pending count + * and then calls the original bh->b_end_io fn. + */ +static void dec_pending(struct buffer_head *bh, int uptodate) +{ + struct io_hook *ih = bh->b_private; + + if (!uptodate && call_err_fn(ih, bh)) + return; + + if (atomic_dec_and_test(&ih->md->pending)) + /* nudge anyone waiting on suspend queue */ + wake_up(&ih->md->wait); + + bh->b_end_io = ih->end_io; + bh->b_private = ih->context; + free_io_hook(ih); + + bh->b_end_io(bh, uptodate); +} + +/* + * Add the bh to the list of deferred io. + */ +static int queue_io(struct buffer_head *bh, int rw) +{ + struct deferred_io *di = alloc_deferred(); + struct mapped_device *md; + + if (!di) + return -ENOMEM; + + md = dm_get_w(MINOR(bh->b_rdev)); + if (!md) { + free_deferred(di); + return -ENXIO; + } + + if (!md->suspended) { + dm_put_w(md); + free_deferred(di); + return 1; + } + + di->bh = bh; + di->rw = rw; + di->next = md->deferred; + md->deferred = di; + + dm_put_w(md); + + return 0; /* deferred successfully */ +} + +/* + * Do the bh mapping for a given leaf + */ +static inline int __map_buffer(struct mapped_device *md, + struct buffer_head *bh, int rw, int leaf) +{ + int r; + dm_map_fn fn; + void *context; + struct io_hook *ih = NULL; + struct target *ti = md->map->targets + leaf; + + fn = ti->type->map; + context = ti->private; + + ih = alloc_io_hook(); + + if (!ih) + return -1; + + ih->md = md; + ih->rw = rw; + ih->target = ti; + ih->end_io = bh->b_end_io; + ih->context = bh->b_private; + + r = fn(bh, rw, context); + + if (r > 0) { + /* hook the end io request fn */ + atomic_inc(&md->pending); + bh->b_end_io = dec_pending; + bh->b_private = ih; + + } else if (r == 0) + /* we don't need to hook */ + free_io_hook(ih); + + else if (r < 0) { + free_io_hook(ih); + return -1; + } + + return r; +} + +/* + * Search the btree for the correct target. + */ +static inline int __find_node(struct dm_table *t, struct buffer_head *bh) +{ + int l, n = 0, k = 0; + offset_t *node; + + for (l = 0; l < t->depth; l++) { + n = get_child(n, k); + node = get_node(t, l, n); + + for (k = 0; k < KEYS_PER_NODE; k++) + if (node[k] >= bh->b_rsector) + break; + } + + return (KEYS_PER_NODE * n) + k; +} + +static int request(request_queue_t * q, int rw, struct buffer_head *bh) +{ + struct mapped_device *md; + int r, minor = MINOR(bh->b_rdev); + unsigned int block_size = _blksize_size[minor]; + + md = dm_get_r(minor); + if (!md) { + buffer_IO_error(bh); + return 0; + } + + /* + * Sanity checks. + */ + if (bh->b_size > block_size) + DMERR("request is larger than block size " + "b_size (%d), block size (%d)", + bh->b_size, block_size); + + if (bh->b_rsector & ((bh->b_size >> 9) - 1)) + DMERR("misaligned block requested logical " + "sector (%lu), b_size (%d)", + bh->b_rsector, bh->b_size); + + /* + * If we're suspended we have to queue + * this io for later. + */ + while (md->suspended) { + dm_put_r(md); + + if (rw == READA) + goto bad_no_lock; + + r = queue_io(bh, rw); + + if (r < 0) + goto bad_no_lock; + + else if (r == 0) + return 0; /* deferred successfully */ + + /* + * We're in a while loop, because someone could suspend + * before we get to the following read lock. + */ + md = dm_get_r(minor); + if (!md) { + buffer_IO_error(bh); + return 0; + } + } + + if ((r = __map_buffer(md, bh, rw, __find_node(md->map, bh))) < 0) + goto bad; + + dm_put_r(md); + return r; + + bad: + dm_put_r(md); + + bad_no_lock: + buffer_IO_error(bh); + return 0; +} + +static int check_dev_size(int minor, unsigned long block) +{ + /* FIXME: check this */ + unsigned long max_sector = (_block_size[minor] << 1) + 1; + unsigned long sector = (block + 1) * (_blksize_size[minor] >> 9); + + return (sector > max_sector) ? 0 : 1; +} + +/* + * Creates a dummy buffer head and maps it (for lilo). + */ +static int do_bmap(kdev_t dev, unsigned long block, + kdev_t * r_dev, unsigned long *r_block) +{ + struct mapped_device *md; + struct buffer_head bh; + int minor = MINOR(dev), r; + struct target *t; + + md = dm_get_r(minor); + if (!md) + return -ENXIO; + + if (md->suspended) { + dm_put_r(md); + return -EPERM; + } + + if (!check_dev_size(minor, block)) { + dm_put_r(md); + return -EINVAL; + } + + /* setup dummy bh */ + memset(&bh, 0, sizeof(bh)); + bh.b_blocknr = block; + bh.b_dev = bh.b_rdev = dev; + bh.b_size = _blksize_size[minor]; + bh.b_rsector = block * (bh.b_size >> 9); + + /* find target */ + t = md->map->targets + __find_node(md->map, &bh); + + /* do the mapping */ + r = t->type->map(&bh, READ, t->private); + + *r_dev = bh.b_rdev; + *r_block = bh.b_rsector / (bh.b_size >> 9); + + dm_put_r(md); + return r; +} + +/* + * Marshals arguments and results between user and kernel space. + */ +static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb) +{ + unsigned long block, r_block; + kdev_t r_dev; + int r; + + if (get_user(block, &lvb->lv_block)) + return -EFAULT; + + if ((r = do_bmap(inode->i_rdev, block, &r_dev, &r_block))) + return r; + + if (put_user(kdev_t_to_nr(r_dev), &lvb->lv_dev) || + put_user(r_block, &lvb->lv_block)) + return -EFAULT; + + return 0; +} + +/* + * See if the device with a specific minor # is free. The write + * lock is held when it returns successfully. + */ +static inline int specific_dev(int minor, struct mapped_device *md) +{ + if (minor >= MAX_DEVICES) { + DMWARN("request for a mapped_device beyond MAX_DEVICES (%d)", + MAX_DEVICES); + return -1; + } + + down_write(_dev_locks + minor); + if (_devs[minor]) { + /* in use */ + up_write(_dev_locks + minor); + return -1; + } + + return minor; +} + +/* + * Find the first free device. Again the write lock is held on + * success. + */ +static int any_old_dev(struct mapped_device *md) +{ + int i; + + for (i = 0; i < MAX_DEVICES; i++) + if (specific_dev(i, md) != -1) + return i; + + return -1; +} + +/* + * Allocate and initialise a blank device. + * Caller must ensure uuid is null-terminated. + * Device is returned with a write lock held. + */ +static struct mapped_device *alloc_dev(const char *name, const char *uuid, + int minor) +{ + struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); + int len; + + if (!md) { + DMWARN("unable to allocate device, out of memory."); + return NULL; + } + + memset(md, 0, sizeof(*md)); + + /* + * This grabs the write lock if it succeeds. + */ + minor = (minor < 0) ? any_old_dev(md) : specific_dev(minor, md); + if (minor < 0) { + kfree(md); + return NULL; + } + + md->dev = MKDEV(_major, minor); + md->suspended = 0; + + strncpy(md->name, name, sizeof(md->name) - 1); + md->name[sizeof(md->name) - 1] = '\0'; + + /* + * Copy in the uuid. + */ + if (uuid && *uuid) { + len = strlen(uuid) + 1; + if (!(md->uuid = kmalloc(len, GFP_KERNEL))) { + DMWARN("unable to allocate uuid - out of memory."); + kfree(md); + return NULL; + } + strcpy(md->uuid, uuid); + } + + init_waitqueue_head(&md->wait); + return md; +} + +static int __register_device(struct mapped_device *md) +{ + md->devfs_entry = + devfs_register(_dev_dir, md->name, DEVFS_FL_CURRENT_OWNER, + MAJOR(md->dev), MINOR(md->dev), + S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP, + &dm_blk_dops, NULL); + + return 0; +} + +static int __unregister_device(struct mapped_device *md) +{ + devfs_unregister(md->devfs_entry); + return 0; +} + +/* + * The hardsect size for a mapped device is the largest hardsect size + * from the devices it maps onto. + */ +static int __find_hardsect_size(struct list_head *devices) +{ + int result = 512, size; + struct list_head *tmp; + + list_for_each(tmp, devices) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + size = get_hardsect_size(dd->dev); + if (size > result) + result = size; + } + + return result; +} + +/* + * Bind a table to the device. + */ +static int __bind(struct mapped_device *md, struct dm_table *t) +{ + int minor = MINOR(md->dev); + + md->map = t; + + if (!t->num_targets) { + _block_size[minor] = 0; + _blksize_size[minor] = BLOCK_SIZE; + _hardsect_size[minor] = 0; + return 0; + } + + /* in k */ + _block_size[minor] = (t->highs[t->num_targets - 1] + 1) >> 1; + + _blksize_size[minor] = BLOCK_SIZE; + _hardsect_size[minor] = __find_hardsect_size(&t->devices); + register_disk(NULL, md->dev, 1, &dm_blk_dops, _block_size[minor]); + + return 0; +} + +static void __unbind(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + + dm_table_destroy(md->map); + md->map = NULL; + + _block_size[minor] = 0; + _blksize_size[minor] = 0; + _hardsect_size[minor] = 0; +} + +static int check_name(const char *name) +{ + struct mapped_device *md; + + if (strchr(name, '/') || strlen(name) > DM_NAME_LEN) { + DMWARN("invalid device name"); + return -1; + } + + md = dm_get_name_r(name, DM_LOOKUP_BY_NAME); + if (md) { + dm_put_r(md); + DMWARN("device name already in use"); + return -1; + } + + return 0; +} + +static int check_uuid(const char *uuid) +{ + struct mapped_device *md; + + if (uuid) { + md = dm_get_name_r(uuid, DM_LOOKUP_BY_UUID); + if (md) { + dm_put_r(md); + DMWARN("device uuid already in use"); + return -1; + } + } + + return 0; +} + +/* + * Constructor for a new device. + */ +int dm_create(const char *name, const char *uuid, int minor, int ro, + struct dm_table *table) +{ + int r; + struct mapped_device *md; + + spin_lock(&_create_lock); + if (check_name(name) || check_uuid(uuid)) { + spin_unlock(&_create_lock); + return -EINVAL; + } + + md = alloc_dev(name, uuid, minor); + if (!md) { + spin_unlock(&_create_lock); + return -ENXIO; + } + minor = MINOR(md->dev); + _devs[minor] = md; + + r = __register_device(md); + if (r) + goto err; + + r = __bind(md, table); + if (r) + goto err; + + dm_set_ro(md, ro); + + spin_unlock(&_create_lock); + dm_put_w(md); + return 0; + + err: + _devs[minor] = NULL; + if (md->uuid) + kfree(md->uuid); + + dm_put_w(md); + kfree(md); + spin_unlock(&_create_lock); + return r; +} + +/* + * Renames the device. No lock held. + */ +int dm_set_name(const char *name, int nametype, const char *newname) +{ + int r; + struct mapped_device *md; + + spin_lock(&_create_lock); + if (check_name(newname) < 0) { + spin_unlock(&_create_lock); + return -EINVAL; + } + + md = dm_get_name_w(name, nametype); + if (!md) { + spin_unlock(&_create_lock); + return -ENXIO; + } + + r = __unregister_device(md); + if (r) + goto out; + + strcpy(md->name, newname); + r = __register_device(md); + + out: + dm_put_w(md); + spin_unlock(&_create_lock); + return r; +} + +/* + * Destructor for the device. You cannot destroy an open + * device. Write lock must be held before calling. + * Caller must dm_put_w(md) then kfree(md) if call was successful. + */ +int dm_destroy(struct mapped_device *md) +{ + int minor, r; + + if (md->use_count) + return -EPERM; + + r = __unregister_device(md); + if (r) + return r; + + minor = MINOR(md->dev); + _devs[minor] = NULL; + __unbind(md); + + if (md->uuid) + kfree(md->uuid); + + return 0; +} + +/* + * Destroy all devices - except open ones + */ +void dm_destroy_all(void) +{ + int i, some_destroyed, r; + struct mapped_device *md; + + do { + some_destroyed = 0; + for (i = 0; i < MAX_DEVICES; i++) { + md = dm_get_w(i); + if (!md) + continue; + + r = dm_destroy(md); + dm_put_w(md); + + if (!r) { + kfree(md); + some_destroyed = 1; + } + } + } while (some_destroyed); +} + +/* + * Sets or clears the read-only flag for the device. Write lock + * must be held. + */ +void dm_set_ro(struct mapped_device *md, int ro) +{ + md->read_only = ro; + set_device_ro(md->dev, ro); +} + +/* + * Requeue the deferred buffer_heads by calling generic_make_request. + */ +static void flush_deferred_io(struct deferred_io *c) +{ + struct deferred_io *n; + + while (c) { + n = c->next; + generic_make_request(c->rw, c->bh); + free_deferred(c); + c = n; + } +} + +/* + * Swap in a new table (destroying old one). Write lock must be + * held. + */ +int dm_swap_table(struct mapped_device *md, struct dm_table *table) +{ + int r; + + /* device must be suspended */ + if (!md->suspended) + return -EPERM; + + __unbind(md); + + r = __bind(md, table); + if (r) + return r; + + return 0; +} + +/* + * We need to be able to change a mapping table under a mounted + * filesystem. for example we might want to move some data in + * the background. Before the table can be swapped with + * dm_bind_table, dm_suspend must be called to flush any in + * flight buffer_heads and ensure that any further io gets + * deferred. Write lock must be held. + */ +int dm_suspend(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + DECLARE_WAITQUEUE(wait, current); + + if (md->suspended) + return -EINVAL; + + md->suspended = 1; + dm_put_w(md); + + /* wait for all the pending io to flush */ + add_wait_queue(&md->wait, &wait); + current->state = TASK_UNINTERRUPTIBLE; + do { + md = dm_get_w(minor); + if (!md) { + /* Caller expects to free this lock. Yuck. */ + down_write(_dev_locks + minor); + return -ENXIO; + } + + if (!atomic_read(&md->pending)) + break; + + dm_put_w(md); + schedule(); + + } while (1); + + current->state = TASK_RUNNING; + remove_wait_queue(&md->wait, &wait); + + return 0; +} + +int dm_resume(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + struct deferred_io *def; + + if (!md->suspended || !md->map->num_targets) + return -EINVAL; + + md->suspended = 0; + def = md->deferred; + md->deferred = NULL; + + dm_put_w(md); + flush_deferred_io(def); + run_task_queue(&tq_disk); + + if (!dm_get_w(minor)) { + /* FIXME: yuck */ + down_write(_dev_locks + minor); + return -ENXIO; + } + + return 0; +} + +struct block_device_operations dm_blk_dops = { + open: dm_blk_open, + release: dm_blk_close, + ioctl: dm_blk_ioctl, + owner: THIS_MODULE +}; + +/* + * module hooks + */ +module_init(dm_init); +module_exit(dm_exit); + +MODULE_PARM(major, "i"); +MODULE_PARM_DESC(major, "The major number of the device mapper"); +MODULE_DESCRIPTION(DM_NAME " driver"); +MODULE_AUTHOR("Joe Thornber "); +MODULE_LICENSE("GPL"); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm-exception-store.c linux.21rc5-ac2/drivers/md/dm-exception-store.c --- linux.21rc5/drivers/md/dm-exception-store.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm-exception-store.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,724 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm-snapshot.h" +#include "kcopyd.h" +#include +#include + +#define SECTOR_SIZE 512 +#define SECTOR_SHIFT 9 + +/*----------------------------------------------------------------- + * Persistent snapshots, by persistent we mean that the snapshot + * will survive a reboot. + *---------------------------------------------------------------*/ + +/* + * We need to store a record of which parts of the origin have + * been copied to the snapshot device. The snapshot code + * requires that we copy exception chunks to chunk aligned areas + * of the COW store. It makes sense therefore, to store the + * metadata in chunk size blocks. + * + * There is no backward or forward compatibility implemented, + * snapshots with different disk versions than the kernel will + * not be usable. It is expected that "lvcreate" will blank out + * the start of a fresh COW device before calling the snapshot + * constructor. + * + * The first chunk of the COW device just contains the header. + * After this there is a chunk filled with exception metadata, + * followed by as many exception chunks as can fit in the + * metadata areas. + * + * All on disk structures are in little-endian format. The end + * of the exceptions info is indicated by an exception with a + * new_chunk of 0, which is invalid since it would point to the + * header chunk. + */ + +/* + * Magic for persistent snapshots: "SnAp" - Feeble isn't it. + */ +#define SNAP_MAGIC 0x70416e53 + +/* + * The on-disk version of the metadata. + */ +#define SNAPSHOT_DISK_VERSION 1 + +struct disk_header { + uint32_t magic; + + /* + * Is this snapshot valid. There is no way of recovering + * an invalid snapshot. + */ + int valid; + + /* + * Simple, incrementing version. no backward + * compatibility. + */ + uint32_t version; + + /* In sectors */ + uint32_t chunk_size; +}; + +struct disk_exception { + uint64_t old_chunk; + uint64_t new_chunk; +}; + +struct commit_callback { + void (*callback)(void *, int success); + void *context; +}; + +/* + * The top level structure for a persistent exception store. + */ +struct pstore { + struct dm_snapshot *snap; /* up pointer to my snapshot */ + int version; + int valid; + uint32_t chunk_size; + uint32_t exceptions_per_area; + + /* + * Now that we have an asynchronous kcopyd there is no + * need for large chunk sizes, so it wont hurt to have a + * whole chunks worth of metadata in memory at once. + */ + void *area; + struct kiobuf *iobuf; + + /* + * Used to keep track of which metadata area the data in + * 'chunk' refers to. + */ + uint32_t current_area; + + /* + * The next free chunk for an exception. + */ + uint32_t next_free; + + /* + * The index of next free exception in the current + * metadata area. + */ + uint32_t current_committed; + + atomic_t pending_count; + uint32_t callback_count; + struct commit_callback *callbacks; +}; + +/* + * For performance reasons we want to defer writing a committed + * exceptions metadata to disk so that we can amortise away this + * exensive operation. + * + * For the initial version of this code we will remain with + * synchronous io. There are some deadlock issues with async + * that I haven't yet worked out. + */ +static int do_io(int rw, struct kcopyd_region *where, struct kiobuf *iobuf) +{ + int i, sectors_per_block, nr_blocks, start; + int blocksize = get_hardsect_size(where->dev); + int status; + + sectors_per_block = blocksize / SECTOR_SIZE; + + nr_blocks = where->count / sectors_per_block; + start = where->sector / sectors_per_block; + + for (i = 0; i < nr_blocks; i++) + iobuf->blocks[i] = start++; + + iobuf->length = where->count << 9; + iobuf->locked = 1; + + status = brw_kiovec(rw, 1, &iobuf, where->dev, iobuf->blocks, + blocksize); + if (status != (where->count << 9)) + return -EIO; + + return 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 4, 19) +/* + * FIXME: Remove once 2.4.19 has been released. + */ +struct page *vmalloc_to_page(void *vmalloc_addr) +{ + unsigned long addr = (unsigned long) vmalloc_addr; + struct page *page = NULL; + pmd_t *pmd; + pte_t *pte; + pgd_t *pgd; + + pgd = pgd_offset_k(addr); + if (!pgd_none(*pgd)) { + pmd = pmd_offset(pgd, addr); + if (!pmd_none(*pmd)) { + pte = pte_offset(pmd, addr); + if (pte_present(*pte)) { + page = pte_page(*pte); + } + } + } + return page; +} +#endif + +static int allocate_iobuf(struct pstore *ps) +{ + size_t i, r = -ENOMEM, len, nr_pages; + struct page *page; + + len = ps->chunk_size << SECTOR_SHIFT; + + /* + * Allocate the chunk_size block of memory that will hold + * a single metadata area. + */ + ps->area = vmalloc(len); + if (!ps->area) + return r; + + if (alloc_kiovec(1, &ps->iobuf)) + goto bad; + + nr_pages = ps->chunk_size / (PAGE_SIZE / SECTOR_SIZE); + r = expand_kiobuf(ps->iobuf, nr_pages); + if (r) + goto bad; + + /* + * We lock the pages for ps->area into memory since they'll be + * doing a lot of io. + */ + for (i = 0; i < nr_pages; i++) { + page = vmalloc_to_page(ps->area + (i * PAGE_SIZE)); + LockPage(page); + ps->iobuf->maplist[i] = page; + ps->iobuf->nr_pages++; + } + + ps->iobuf->nr_pages = nr_pages; + ps->iobuf->offset = 0; + + return 0; + + bad: + if (ps->iobuf) + free_kiovec(1, &ps->iobuf); + + if (ps->area) + vfree(ps->area); + ps->iobuf = NULL; + return r; +} + +static void free_iobuf(struct pstore *ps) +{ + int i; + + for (i = 0; i < ps->iobuf->nr_pages; i++) + UnlockPage(ps->iobuf->maplist[i]); + ps->iobuf->locked = 0; + + free_kiovec(1, &ps->iobuf); + vfree(ps->area); +} + +/* + * Read or write a chunk aligned and sized block of data from a device. + */ +static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) +{ + int r; + struct kcopyd_region where; + + where.dev = ps->snap->cow->dev; + where.sector = ps->chunk_size * chunk; + where.count = ps->chunk_size; + + r = do_io(rw, &where, ps->iobuf); + if (r) + return r; + + return 0; +} + +/* + * Read or write a metadata area. Remembering to skip the first + * chunk which holds the header. + */ +static int area_io(struct pstore *ps, uint32_t area, int rw) +{ + int r; + uint32_t chunk; + + /* convert a metadata area index to a chunk index */ + chunk = 1 + ((ps->exceptions_per_area + 1) * area); + + r = chunk_io(ps, chunk, rw); + if (r) + return r; + + ps->current_area = area; + return 0; +} + +static int zero_area(struct pstore *ps, uint32_t area) +{ + memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); + return area_io(ps, area, WRITE); +} + +static int read_header(struct pstore *ps, int *new_snapshot) +{ + int r; + struct disk_header *dh; + + r = chunk_io(ps, 0, READ); + if (r) + return r; + + dh = (struct disk_header *) ps->area; + + if (dh->magic == 0) { + *new_snapshot = 1; + + } else if (dh->magic == SNAP_MAGIC) { + *new_snapshot = 0; + ps->valid = dh->valid; + ps->version = dh->version; + ps->chunk_size = dh->chunk_size; + + } else { + DMWARN("Invalid/corrupt snapshot"); + r = -ENXIO; + } + + return r; +} + +static int write_header(struct pstore *ps) +{ + struct disk_header *dh; + + memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); + + dh = (struct disk_header *) ps->area; + dh->magic = SNAP_MAGIC; + dh->valid = ps->valid; + dh->version = ps->version; + dh->chunk_size = ps->chunk_size; + + return chunk_io(ps, 0, WRITE); +} + +/* + * Access functions for the disk exceptions, these do the endian conversions. + */ +static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) +{ + if (index >= ps->exceptions_per_area) + return NULL; + + return ((struct disk_exception *) ps->area) + index; +} + +static int read_exception(struct pstore *ps, + uint32_t index, struct disk_exception *result) +{ + struct disk_exception *e; + + e = get_exception(ps, index); + if (!e) + return -EINVAL; + + /* copy it */ + result->old_chunk = le64_to_cpu(e->old_chunk); + result->new_chunk = le64_to_cpu(e->new_chunk); + + return 0; +} + +static int write_exception(struct pstore *ps, + uint32_t index, struct disk_exception *de) +{ + struct disk_exception *e; + + e = get_exception(ps, index); + if (!e) + return -EINVAL; + + /* copy it */ + e->old_chunk = cpu_to_le64(de->old_chunk); + e->new_chunk = cpu_to_le64(de->new_chunk); + + return 0; +} + +/* + * Registers the exceptions that are present in the current area. + * 'full' is filled in to indicate if the area has been + * filled. + */ +static int insert_exceptions(struct pstore *ps, int *full) +{ + int i, r; + struct disk_exception de; + + /* presume the area is full */ + *full = 1; + + for (i = 0; i < ps->exceptions_per_area; i++) { + r = read_exception(ps, i, &de); + + if (r) + return r; + + /* + * If the new_chunk is pointing at the start of + * the COW device, where the first metadata area + * is we know that we've hit the end of the + * exceptions. Therefore the area is not full. + */ + if (de.new_chunk == 0LL) { + ps->current_committed = i; + *full = 0; + break; + } + + /* + * Keep track of the start of the free chunks. + */ + if (ps->next_free <= de.new_chunk) + ps->next_free = de.new_chunk + 1; + + /* + * Otherwise we add the exception to the snapshot. + */ + r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk); + if (r) + return r; + } + + return 0; +} + +static int read_exceptions(struct pstore *ps) +{ + uint32_t area; + int r, full = 1; + + /* + * Keeping reading chunks and inserting exceptions until + * we find a partially full area. + */ + for (area = 0; full; area++) { + r = area_io(ps, area, READ); + if (r) + return r; + + r = insert_exceptions(ps, &full); + if (r) + return r; + + area++; + } + + return 0; +} + +static inline struct pstore *get_info(struct exception_store *store) +{ + return (struct pstore *) store->context; +} + +static int persistent_percentfull(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + return (ps->next_free * store->snap->chunk_size * 100) / + get_dev_size(store->snap->cow->dev); +} + +static void persistent_destroy(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + vfree(ps->callbacks); + free_iobuf(ps); + kfree(ps); +} + +static int persistent_prepare(struct exception_store *store, + struct exception *e) +{ + struct pstore *ps = get_info(store); + uint32_t stride; + offset_t size = get_dev_size(store->snap->cow->dev); + + /* Is there enough room ? */ + if (size <= (ps->next_free * store->snap->chunk_size)) + return -ENOSPC; + + e->new_chunk = ps->next_free; + + /* + * Move onto the next free pending, making sure to take + * into account the location of the metadata chunks. + */ + stride = (ps->exceptions_per_area + 1); + if (!(++ps->next_free % stride)) + ps->next_free++; + + atomic_inc(&ps->pending_count); + return 0; +} + +static void persistent_commit(struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + int r, i; + struct pstore *ps = get_info(store); + struct disk_exception de; + struct commit_callback *cb; + + de.old_chunk = e->old_chunk; + de.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &de); + + /* + * Add the callback to the back of the array. This code + * is the only place where the callback array is + * manipulated, and we know that it will never be called + * multiple times concurrently. + */ + cb = ps->callbacks + ps->callback_count++; + cb->callback = callback; + cb->context = callback_context; + + /* + * If there are no more exceptions in flight, or we have + * filled this metadata area we commit the exceptions to + * disk. + */ + if (atomic_dec_and_test(&ps->pending_count) || + (ps->current_committed == ps->exceptions_per_area)) { + r = area_io(ps, ps->current_area, WRITE); + if (r) + ps->valid = 0; + + for (i = 0; i < ps->callback_count; i++) { + cb = ps->callbacks + i; + cb->callback(cb->context, r == 0 ? 1 : 0); + } + + ps->callback_count = 0; + } + + /* + * Have we completely filled the current area ? + */ + if (ps->current_committed == ps->exceptions_per_area) { + ps->current_committed = 0; + r = zero_area(ps, ps->current_area + 1); + if (r) + ps->valid = 0; + } +} + +static void persistent_drop(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + ps->valid = 0; + if (write_header(ps)) + DMWARN("write header failed"); +} + +int dm_create_persistent(struct exception_store *store, uint32_t chunk_size) +{ + int r, new_snapshot; + struct pstore *ps; + + /* allocate the pstore */ + ps = kmalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) + return -ENOMEM; + + ps->snap = store->snap; + ps->valid = 1; + ps->version = SNAPSHOT_DISK_VERSION; + ps->chunk_size = chunk_size; + ps->exceptions_per_area = (chunk_size << SECTOR_SHIFT) / + sizeof(struct disk_exception); + ps->next_free = 2; /* skipping the header and first area */ + ps->current_committed = 0; + + r = allocate_iobuf(ps); + if (r) + goto bad; + + /* + * Allocate space for all the callbacks. + */ + ps->callback_count = 0; + atomic_set(&ps->pending_count, 0); + ps->callbacks = vcalloc(ps->exceptions_per_area, + sizeof(*ps->callbacks)); + + if (!ps->callbacks) + goto bad; + + /* + * Read the snapshot header. + */ + r = read_header(ps, &new_snapshot); + if (r) + goto bad; + + /* + * Do we need to setup a new snapshot ? + */ + if (new_snapshot) { + r = write_header(ps); + if (r) { + DMWARN("write_header failed"); + goto bad; + } + + r = zero_area(ps, 0); + if (r) { + DMWARN("zero_area(0) failed"); + goto bad; + } + + } else { + /* + * Sanity checks. + */ + if (ps->chunk_size != chunk_size) { + DMWARN("chunk size for existing snapshot different " + "from that requested"); + r = -EINVAL; + goto bad; + } + + if (ps->version != SNAPSHOT_DISK_VERSION) { + DMWARN("unable to handle snapshot disk version %d", + ps->version); + r = -EINVAL; + goto bad; + } + + /* + * Read the metadata. + */ + r = read_exceptions(ps); + if (r) + goto bad; + } + + store->destroy = persistent_destroy; + store->prepare_exception = persistent_prepare; + store->commit_exception = persistent_commit; + store->drop_snapshot = persistent_drop; + store->percent_full = persistent_percentfull; + store->context = ps; + + return r; + + bad: + if (ps) { + if (ps->callbacks) + vfree(ps->callbacks); + + if (ps->iobuf) + free_iobuf(ps); + + kfree(ps); + } + return r; +} + +/*----------------------------------------------------------------- + * Implementation of the store for non-persistent snapshots. + *---------------------------------------------------------------*/ +struct transient_c { + offset_t next_free; +}; + +void transient_destroy(struct exception_store *store) +{ + kfree(store->context); +} + +int transient_prepare(struct exception_store *store, struct exception *e) +{ + struct transient_c *tc = (struct transient_c *) store->context; + offset_t size = get_dev_size(store->snap->cow->dev); + + if (size < (tc->next_free + store->snap->chunk_size)) + return -1; + + e->new_chunk = sector_to_chunk(store->snap, tc->next_free); + tc->next_free += store->snap->chunk_size; + + return 0; +} + +void transient_commit(struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + /* Just succeed */ + callback(callback_context, 1); +} + +static int transient_percentfull(struct exception_store *store) +{ + struct transient_c *tc = (struct transient_c *) store->context; + return (tc->next_free * 100) / get_dev_size(store->snap->cow->dev); +} + +int dm_create_transient(struct exception_store *store, + struct dm_snapshot *s, int blocksize, void **error) +{ + struct transient_c *tc; + + memset(store, 0, sizeof(*store)); + store->destroy = transient_destroy; + store->prepare_exception = transient_prepare; + store->commit_exception = transient_commit; + store->percent_full = transient_percentfull; + store->snap = s; + + tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + tc->next_free = 0; + store->context = tc; + + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm.h linux.21rc5-ac2/drivers/md/dm.h --- linux.21rc5/drivers/md/dm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm.h 2003-05-29 01:44:07.000000000 +0100 @@ -0,0 +1,241 @@ +/* + * Internal header file for device mapper + * + * Copyright (C) 2001 Sistina Software + * + * This file is released under the LGPL. + */ + +#ifndef DM_INTERNAL_H +#define DM_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DM_NAME "device-mapper" /* Name for messaging */ +#define DM_DRIVER_EMAIL "lvm-devel@lists.sistina.com" +#define MAX_DEPTH 16 +#define NODE_SIZE L1_CACHE_BYTES +#define KEYS_PER_NODE (NODE_SIZE / sizeof(offset_t)) +#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) +#define MAX_ARGS 32 +#define MAX_DEVICES 256 + +/* + * List of devices that a metadevice uses and should open/close. + */ +struct dm_dev { + atomic_t count; + struct list_head list; + + int mode; + + kdev_t dev; + struct block_device *bd; +}; + +/* + * I/O that had to be deferred while we were suspended + */ +struct deferred_io { + int rw; + struct buffer_head *bh; + struct deferred_io *next; +}; + +/* + * Btree leaf - this does the actual mapping + */ +struct target { + struct target_type *type; + void *private; +}; + +/* + * The btree + */ +struct dm_table { + /* btree table */ + int depth; + int counts[MAX_DEPTH]; /* in nodes */ + offset_t *index[MAX_DEPTH]; + + int num_targets; + int num_allocated; + offset_t *highs; + struct target *targets; + + /* + * Indicates the rw permissions for the new logical + * device. This should be a combination of FMODE_READ + * and FMODE_WRITE. + */ + int mode; + + /* a list of devices used by this table */ + struct list_head devices; + + /* + * A waitqueue for processes waiting for something + * interesting to happen to this table. + */ + wait_queue_head_t eventq; +}; + +/* + * The actual device struct + */ +struct mapped_device { + kdev_t dev; + char name[DM_NAME_LEN]; + char *uuid; + + int use_count; + int suspended; + int read_only; + + /* a list of io's that arrived while we were suspended */ + atomic_t pending; + wait_queue_head_t wait; + struct deferred_io *deferred; + + struct dm_table *map; + + /* used by dm-fs.c */ + devfs_handle_t devfs_entry; +}; + +extern struct block_device_operations dm_blk_dops; + +/* dm-target.c */ +int dm_target_init(void); +struct target_type *dm_get_target_type(const char *name); +void dm_put_target_type(struct target_type *t); +void dm_target_exit(void); + +/* + * Destructively splits argument list to pass to ctr. + */ +int split_args(int max, int *argc, char **argv, char *input); + +/* dm.c */ +struct mapped_device *dm_get_r(int minor); +struct mapped_device *dm_get_w(int minor); + +/* + * There are two ways to lookup a device. + */ +enum { + DM_LOOKUP_BY_NAME, + DM_LOOKUP_BY_UUID +}; + +struct mapped_device *dm_get_name_r(const char *name, int nametype); +struct mapped_device *dm_get_name_w(const char *name, int nametype); + +void dm_put_r(struct mapped_device *md); +void dm_put_w(struct mapped_device *md); + +/* + * Call with no lock. + */ +int dm_create(const char *name, const char *uuid, int minor, int ro, + struct dm_table *table); +int dm_set_name(const char *name, int nametype, const char *newname); +void dm_destroy_all(void); + +/* + * You must have the write lock before calling the remaining md + * methods. + */ +int dm_destroy(struct mapped_device *md); +void dm_set_ro(struct mapped_device *md, int ro); + +/* + * The device must be suspended before calling this method. + */ +int dm_swap_table(struct mapped_device *md, struct dm_table *t); + +/* + * A device can still be used while suspended, but I/O is deferred. + */ +int dm_suspend(struct mapped_device *md); +int dm_resume(struct mapped_device *md); + +/* dm-table.c */ +int dm_table_create(struct dm_table **result, int mode); +void dm_table_destroy(struct dm_table *t); + +int dm_table_add_target(struct dm_table *t, offset_t highs, + struct target_type *type, void *private); +int dm_table_complete(struct dm_table *t); + +/* + * Event handling + */ +void dm_table_event(struct dm_table *t); + +#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x) +#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x) +#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x) + +/* + * Calculate the index of the child node of the n'th node k'th key. + */ +static inline int get_child(int n, int k) +{ + return (n * CHILDREN_PER_NODE) + k; +} + +/* + * Return the n'th node of level l from table t. + */ +static inline offset_t *get_node(struct dm_table *t, int l, int n) +{ + return t->index[l] + (n * KEYS_PER_NODE); +} + +static inline int array_too_big(unsigned long fixed, unsigned long obj, + unsigned long num) +{ + return (num > (ULONG_MAX - fixed) / obj); +} + + +/* + * Targets + */ +int dm_linear_init(void); +void dm_linear_exit(void); + +int dm_stripe_init(void); +void dm_stripe_exit(void); + +int dm_snapshot_init(void); +void dm_snapshot_exit(void); + + +/* + * Init functions for the user interface to device-mapper. At + * the moment an ioctl interface on a special char device is + * used. A filesystem based interface would be a nicer way to + * go. + */ +int __init dm_interface_init(void); +void dm_interface_exit(void); + + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm-ioctl.c linux.21rc5-ac2/drivers/md/dm-ioctl.c --- linux.21rc5/drivers/md/dm-ioctl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm-ioctl.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,830 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include +#include + +/*----------------------------------------------------------------- + * Implementation of the ioctl commands + *---------------------------------------------------------------*/ + +/* + * All the ioctl commands get dispatched to functions with this + * prototype. + */ +typedef int (*ioctl_fn)(struct dm_ioctl *param, struct dm_ioctl *user); + +/* + * This is really a debug only call. + */ +static int remove_all(struct dm_ioctl *param, struct dm_ioctl *user) +{ + dm_destroy_all(); + return 0; +} + +/* + * Check a string doesn't overrun the chunk of + * memory we copied from userland. + */ +static int valid_str(char *str, void *begin, void *end) +{ + while (((void *) str >= begin) && ((void *) str < end)) + if (!*str++) + return 0; + + return -EINVAL; +} + +static int next_target(struct dm_target_spec *last, uint32_t next, + void *begin, void *end, + struct dm_target_spec **spec, char **params) +{ + *spec = (struct dm_target_spec *) + ((unsigned char *) last + next); + *params = (char *) (*spec + 1); + + if (*spec < (last + 1) || ((void *) *spec > end)) + return -EINVAL; + + return valid_str(*params, begin, end); +} + +/* + * Checks to see if there's a gap in the table. + * Returns true iff there is a gap. + */ +static int gap(struct dm_table *table, struct dm_target_spec *spec) +{ + if (!table->num_targets) + return (spec->sector_start > 0) ? 1 : 0; + + if (spec->sector_start != table->highs[table->num_targets - 1] + 1) + return 1; + + return 0; +} + +static int populate_table(struct dm_table *table, struct dm_ioctl *args) +{ + int i = 0, r, first = 1, argc; + struct dm_target_spec *spec; + char *params, *argv[MAX_ARGS]; + struct target_type *ttype; + void *context, *begin, *end; + offset_t highs = 0; + + if (!args->target_count) { + DMWARN("populate_table: no targets specified"); + return -EINVAL; + } + + begin = (void *) args; + end = begin + args->data_size; + +#define PARSE_ERROR(msg) {DMWARN(msg); return -EINVAL;} + + for (i = 0; i < args->target_count; i++) { + + if (first) + r = next_target((struct dm_target_spec *) args, + args->data_start, + begin, end, &spec, ¶ms); + else + r = next_target(spec, spec->next, begin, end, + &spec, ¶ms); + + if (r) + PARSE_ERROR("unable to find target"); + + /* Look up the target type */ + ttype = dm_get_target_type(spec->target_type); + if (!ttype) + PARSE_ERROR("unable to find target type"); + + if (gap(table, spec)) + PARSE_ERROR("gap in target ranges"); + + /* Split up the parameter list */ + if (split_args(MAX_ARGS, &argc, argv, params) < 0) + PARSE_ERROR("Too many arguments"); + + /* Build the target */ + if (ttype->ctr(table, spec->sector_start, spec->length, + argc, argv, &context)) { + DMWARN("%s: target constructor failed", + (char *) context); + return -EINVAL; + } + + /* Add the target to the table */ + highs = spec->sector_start + (spec->length - 1); + if (dm_table_add_target(table, highs, ttype, context)) + PARSE_ERROR("internal error adding target to table"); + + first = 0; + } + +#undef PARSE_ERROR + + r = dm_table_complete(table); + return r; +} + +/* + * Round up the ptr to the next 'align' boundary. Obviously + * 'align' must be a power of 2. + */ +static inline void *align_ptr(void *ptr, unsigned int align) +{ + align--; + return (void *) (((unsigned long) (ptr + align)) & ~align); +} + +/* + * Copies a dm_ioctl and an optional additional payload to + * userland. + */ +static int results_to_user(struct dm_ioctl *user, struct dm_ioctl *param, + void *data, uint32_t len) +{ + int r; + void *ptr = NULL; + + if (data) { + ptr = align_ptr(user + 1, sizeof(unsigned long)); + param->data_start = ptr - (void *) user; + } + + /* + * The version number has already been filled in, so we + * just copy later fields. + */ + r = copy_to_user(&user->data_size, ¶m->data_size, + sizeof(*param) - sizeof(param->version)); + if (r) + return -EFAULT; + + if (data) { + if (param->data_start + len > param->data_size) + return -ENOSPC; + + if (copy_to_user(ptr, data, len)) + r = -EFAULT; + } + + return r; +} + +/* + * Fills in a dm_ioctl structure, ready for sending back to + * userland. + */ +static void __info(struct mapped_device *md, struct dm_ioctl *param) +{ + param->flags = DM_EXISTS_FLAG; + if (md->suspended) + param->flags |= DM_SUSPEND_FLAG; + if (md->read_only) + param->flags |= DM_READONLY_FLAG; + + strncpy(param->name, md->name, sizeof(param->name)); + + if (md->uuid) + strncpy(param->uuid, md->uuid, sizeof(param->uuid) - 1); + else + param->uuid[0] = '\0'; + + param->open_count = md->use_count; + param->dev = kdev_t_to_nr(md->dev); + param->target_count = md->map->num_targets; +} + +/* + * Always use UUID for lookups if it's present, otherwise use name. + */ +static inline char *lookup_name(struct dm_ioctl *param) +{ + return (*param->uuid) ? param->uuid : param->name; +} + +static inline int lookup_type(struct dm_ioctl *param) +{ + return (*param->uuid) ? DM_LOOKUP_BY_UUID : DM_LOOKUP_BY_NAME; +} + +#define ALIGNMENT sizeof(int) +static void *_align(void *ptr, unsigned int a) +{ + register unsigned long align = --a; + + return (void *) (((unsigned long) ptr + align) & ~align); +} + +/* + * Copies device info back to user space, used by + * the create and info ioctls. + */ +static int info(struct dm_ioctl *param, struct dm_ioctl *user) +{ + struct mapped_device *md; + + param->flags = 0; + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + /* + * Device not found - returns cleared exists flag. + */ + goto out; + + __info(md, param); + dm_put_r(md); + + out: + return results_to_user(user, param, NULL, 0); +} + +static inline int get_mode(struct dm_ioctl *param) +{ + int mode = FMODE_READ | FMODE_WRITE; + + if (param->flags & DM_READONLY_FLAG) + mode = FMODE_READ; + + return mode; +} + +static int create(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r, ro; + struct dm_table *t; + int minor; + + r = dm_table_create(&t, get_mode(param)); + if (r) + return r; + + r = populate_table(t, param); + if (r) { + dm_table_destroy(t); + return r; + } + + minor = (param->flags & DM_PERSISTENT_DEV_FLAG) ? + MINOR(to_kdev_t(param->dev)) : -1; + + ro = (param->flags & DM_READONLY_FLAG) ? 1 : 0; + + r = dm_create(param->name, param->uuid, minor, ro, t); + if (r) { + dm_table_destroy(t); + return r; + } + + r = info(param, user); + return r; +} + + + +/* + * Build up the status struct for each target + */ +static int __status(struct mapped_device *md, struct dm_ioctl *param, + char *outbuf, int *len) +{ + int i; + struct dm_target_spec *spec; + uint64_t sector = 0LL; + char *outptr; + status_type_t type; + + if (param->flags & DM_STATUS_TABLE_FLAG) + type = STATUSTYPE_TABLE; + else + type = STATUSTYPE_INFO; + + outptr = outbuf; + + /* Get all the target info */ + for (i = 0; i < md->map->num_targets; i++) { + struct target_type *tt = md->map->targets[i].type; + offset_t high = md->map->highs[i]; + + if (outptr - outbuf + + sizeof(struct dm_target_spec) > param->data_size) + return -ENOMEM; + + spec = (struct dm_target_spec *) outptr; + + spec->status = 0; + spec->sector_start = sector; + spec->length = high - sector + 1; + strncpy(spec->target_type, tt->name, sizeof(spec->target_type)); + + outptr += sizeof(struct dm_target_spec); + + /* Get the status/table string from the target driver */ + if (tt->status) + tt->status(type, outptr, + outbuf + param->data_size - outptr, + md->map->targets[i].private); + else + outptr[0] = '\0'; + + outptr += strlen(outptr) + 1; + _align(outptr, ALIGNMENT); + + sector = high + 1; + + spec->next = outptr - outbuf; + } + + param->target_count = md->map->num_targets; + *len = outptr - outbuf; + + return 0; +} + +/* + * Return the status of a device as a text string for each + * target. + */ +static int get_status(struct dm_ioctl *param, struct dm_ioctl *user) +{ + struct mapped_device *md; + int len = 0; + int ret; + char *outbuf = NULL; + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + /* + * Device not found - returns cleared exists flag. + */ + goto out; + + /* We haven't a clue how long the resultant data will be so + just allocate as much as userland has allowed us and make sure + we don't overun it */ + outbuf = kmalloc(param->data_size, GFP_KERNEL); + if (!outbuf) + goto out; + /* + * Get the status of all targets + */ + __status(md, param, outbuf, &len); + + /* + * Setup the basic dm_ioctl structure. + */ + __info(md, param); + + out: + if (md) + dm_put_r(md); + + ret = results_to_user(user, param, outbuf, len); + + if (outbuf) + kfree(outbuf); + + return ret; +} + +/* + * Wait for a device to report an event + */ +static int wait_device_event(struct dm_ioctl *param, struct dm_ioctl *user) +{ + struct mapped_device *md; + DECLARE_WAITQUEUE(wq, current); + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + /* + * Device not found - returns cleared exists flag. + */ + goto out; + /* + * Setup the basic dm_ioctl structure. + */ + __info(md, param); + + /* + * Wait for a notification event + */ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&md->map->eventq, &wq); + + dm_put_r(md); + + schedule(); + set_current_state(TASK_RUNNING); + + out: + return results_to_user(user, param, NULL, 0); +} + +/* + * Retrieves a list of devices used by a particular dm device. + */ +static int dep(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int count, r; + struct mapped_device *md; + struct list_head *tmp; + size_t len = 0; + struct dm_target_deps *deps = NULL; + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + goto out; + + /* + * Setup the basic dm_ioctl structure. + */ + __info(md, param); + + /* + * Count the devices. + */ + count = 0; + list_for_each(tmp, &md->map->devices) + count++; + + /* + * Allocate a kernel space version of the dm_target_status + * struct. + */ + if (array_too_big(sizeof(*deps), sizeof(*deps->dev), count)) { + dm_put_r(md); + return -ENOMEM; + } + + len = sizeof(*deps) + (sizeof(*deps->dev) * count); + deps = kmalloc(len, GFP_KERNEL); + if (!deps) { + dm_put_r(md); + return -ENOMEM; + } + + /* + * Fill in the devices. + */ + deps->count = count; + count = 0; + list_for_each(tmp, &md->map->devices) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + deps->dev[count++] = kdev_t_to_nr(dd->dev); + } + dm_put_r(md); + + out: + r = results_to_user(user, param, deps, len); + + kfree(deps); + return r; +} + +static int remove(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r; + struct mapped_device *md; + + md = dm_get_name_w(lookup_name(param), lookup_type(param)); + if (!md) + return -ENXIO; + + r = dm_destroy(md); + dm_put_w(md); + if (!r) + kfree(md); + + return r; +} + +static int suspend(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r; + struct mapped_device *md; + + md = dm_get_name_w(lookup_name(param), lookup_type(param)); + if (!md) + return -ENXIO; + + r = (param->flags & DM_SUSPEND_FLAG) ? dm_suspend(md) : dm_resume(md); + dm_put_w(md); + + return r; +} + +static int reload(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r; + struct mapped_device *md; + struct dm_table *t; + + r = dm_table_create(&t, get_mode(param)); + if (r) + return r; + + r = populate_table(t, param); + if (r) { + dm_table_destroy(t); + return r; + } + + md = dm_get_name_w(lookup_name(param), lookup_type(param)); + if (!md) { + dm_table_destroy(t); + return -ENXIO; + } + + r = dm_swap_table(md, t); + if (r) { + dm_put_w(md); + dm_table_destroy(t); + return r; + } + + dm_set_ro(md, (param->flags & DM_READONLY_FLAG) ? 1 : 0); + dm_put_w(md); + + r = info(param, user); + return r; +} + +static int rename(struct dm_ioctl *param, struct dm_ioctl *user) +{ + char *newname = (char *) param + param->data_start; + + if (valid_str(newname, (void *) param, + (void *) param + param->data_size) || + dm_set_name(lookup_name(param), lookup_type(param), newname)) { + DMWARN("Invalid new logical volume name supplied."); + return -EINVAL; + } + + return 0; +} + + +/*----------------------------------------------------------------- + * Implementation of open/close/ioctl on the special char + * device. + *---------------------------------------------------------------*/ +static int ctl_open(struct inode *inode, struct file *file) +{ + /* only root can open this */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + MOD_INC_USE_COUNT; + + return 0; +} + +static int ctl_close(struct inode *inode, struct file *file) +{ + MOD_DEC_USE_COUNT; + return 0; +} + +static ioctl_fn lookup_ioctl(unsigned int cmd) +{ + static struct { + int cmd; + ioctl_fn fn; + } _ioctls[] = { + {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */ + {DM_REMOVE_ALL_CMD, remove_all}, + {DM_DEV_CREATE_CMD, create}, + {DM_DEV_REMOVE_CMD, remove}, + {DM_DEV_RELOAD_CMD, reload}, + {DM_DEV_RENAME_CMD, rename}, + {DM_DEV_SUSPEND_CMD, suspend}, + {DM_DEV_DEPS_CMD, dep}, + {DM_DEV_STATUS_CMD, info}, + {DM_TARGET_STATUS_CMD, get_status}, + {DM_TARGET_WAIT_CMD, wait_device_event}, + }; + static int nelts = sizeof(_ioctls) / sizeof(*_ioctls); + + return (cmd >= nelts) ? NULL : _ioctls[cmd].fn; +} + +/* + * As well as checking the version compatibility this always + * copies the kernel interface version out. + */ +static int check_version(int cmd, struct dm_ioctl *user) +{ + uint32_t version[3]; + int r = 0; + + if (copy_from_user(version, user->version, sizeof(version))) + return -EFAULT; + + if ((DM_VERSION_MAJOR != version[0]) || + (DM_VERSION_MINOR < version[1])) { + DMWARN("ioctl interface mismatch: " + "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", + DM_VERSION_MAJOR, DM_VERSION_MINOR, + DM_VERSION_PATCHLEVEL, + version[0], version[1], version[2], cmd); + r = -EINVAL; + } + + /* + * Fill in the kernel version. + */ + version[0] = DM_VERSION_MAJOR; + version[1] = DM_VERSION_MINOR; + version[2] = DM_VERSION_PATCHLEVEL; + if (copy_to_user(user->version, version, sizeof(version))) + return -EFAULT; + + return r; +} + +static void free_params(struct dm_ioctl *param) +{ + vfree(param); +} + +static int copy_params(struct dm_ioctl *user, struct dm_ioctl **param) +{ + struct dm_ioctl tmp, *dmi; + + if (copy_from_user(&tmp, user, sizeof(tmp))) + return -EFAULT; + + if (tmp.data_size < sizeof(tmp) || tmp.data_size > 65536) + return -EINVAL; + + dmi = (struct dm_ioctl *) vmalloc(tmp.data_size); + if (!dmi) + return -ENOMEM; + + if (copy_from_user(dmi, user, tmp.data_size)) { + vfree(dmi); + return -EFAULT; + } + + *param = dmi; + return 0; +} + +static int validate_params(uint cmd, struct dm_ioctl *param) +{ + /* Ignores parameters */ + if (cmd == DM_REMOVE_ALL_CMD) + return 0; + + /* Unless creating, either name of uuid but not both */ + if (cmd != DM_DEV_CREATE_CMD) { + if ((!*param->uuid && !*param->name) || + (*param->uuid && *param->name)) { + DMWARN("one of name or uuid must be supplied"); + return -EINVAL; + } + } + + /* Ensure strings are terminated */ + param->name[DM_NAME_LEN - 1] = '\0'; + param->uuid[DM_UUID_LEN - 1] = '\0'; + + return 0; +} + +static int ctl_ioctl(struct inode *inode, struct file *file, + uint command, ulong u) +{ + + int r = 0, cmd; + struct dm_ioctl *param; + struct dm_ioctl *user = (struct dm_ioctl *) u; + ioctl_fn fn = NULL; + + if (_IOC_TYPE(command) != DM_IOCTL) + return -ENOTTY; + + cmd = _IOC_NR(command); + + /* + * Check the interface version passed in. This also + * writes out the kernel's interface version. + */ + r = check_version(cmd, user); + if (r) + return r; + + /* + * Nothing more to do for the version command. + */ + if (cmd == DM_VERSION_CMD) + return 0; + + fn = lookup_ioctl(cmd); + if (!fn) { + DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); + return -ENOTTY; + } + + /* + * Copy the parameters into kernel space. + */ + r = copy_params(user, ¶m); + if (r) + return r; + + r = validate_params(cmd, param); + if (r) { + free_params(param); + return r; + } + + r = fn(param, user); + free_params(param); + return r; +} + +static struct file_operations _ctl_fops = { + open: ctl_open, + release: ctl_close, + ioctl: ctl_ioctl, + owner: THIS_MODULE, +}; + +static devfs_handle_t _ctl_handle; + +static struct miscdevice _dm_misc = { + minor: MISC_DYNAMIC_MINOR, + name: DM_NAME, + fops: &_ctl_fops +}; + +static int __init dm_devfs_init(void) { + int r; + char rname[64]; + + r = devfs_generate_path(_dm_misc.devfs_handle, rname + 3, + sizeof rname - 3); + if (r == -ENOSYS) + return 0; /* devfs not present */ + + if (r < 0) { + DMERR("devfs_generate_path failed for control device"); + return r; + } + + strncpy(rname + r, "../", 3); + r = devfs_mk_symlink(NULL, DM_DIR "/control", + DEVFS_FL_DEFAULT, rname + r, &_ctl_handle, NULL); + if (r) { + DMERR("devfs_mk_symlink failed for control device"); + return r; + } + devfs_auto_unregister(_dm_misc.devfs_handle, _ctl_handle); + + return 0; +} + +/* Create misc character device and link to DM_DIR/control */ +int __init dm_interface_init(void) +{ + int r; + + r = misc_register(&_dm_misc); + if (r) { + DMERR("misc_register failed for control device"); + return r; + } + + r = dm_devfs_init(); + if (r) { + misc_deregister(&_dm_misc); + return r; + } + + DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, + DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, + DM_DRIVER_EMAIL); + + return 0; +} + +void dm_interface_exit(void) +{ + if (misc_deregister(&_dm_misc) < 0) + DMERR("misc_deregister failed for control device"); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm-linear.c linux.21rc5-ac2/drivers/md/dm-linear.c --- linux.21rc5/drivers/md/dm-linear.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm-linear.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include + +/* + * Linear: maps a linear range of a device. + */ +struct linear_c { + long delta; /* FIXME: we need a signed offset type */ + long start; /* For display only */ + struct dm_dev *dev; +}; + +/* + * Construct a linear mapping: + */ +static int linear_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + struct linear_c *lc; + unsigned long start; /* FIXME: unsigned long long */ + char *end; + + if (argc != 2) { + *context = "dm-linear: Not enough arguments"; + return -EINVAL; + } + + lc = kmalloc(sizeof(*lc), GFP_KERNEL); + if (lc == NULL) { + *context = "dm-linear: Cannot allocate linear context"; + return -ENOMEM; + } + + start = simple_strtoul(argv[1], &end, 10); + if (*end) { + *context = "dm-linear: Invalid device sector"; + goto bad; + } + + if (dm_table_get_device(t, argv[0], start, l, t->mode, &lc->dev)) { + *context = "dm-linear: Device lookup failed"; + goto bad; + } + + lc->delta = (int) start - (int) b; + lc->start = start; + *context = lc; + return 0; + + bad: + kfree(lc); + return -EINVAL; +} + +static void linear_dtr(struct dm_table *t, void *c) +{ + struct linear_c *lc = (struct linear_c *) c; + + dm_table_put_device(t, lc->dev); + kfree(c); +} + +static int linear_map(struct buffer_head *bh, int rw, void *context) +{ + struct linear_c *lc = (struct linear_c *) context; + + bh->b_rdev = lc->dev->dev; + bh->b_rsector = bh->b_rsector + lc->delta; + + return 1; +} + +static int linear_status(status_type_t type, char *result, int maxlen, + void *context) +{ + struct linear_c *lc = (struct linear_c *) context; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + snprintf(result, maxlen, "%s %ld", kdevname(lc->dev->dev), + lc->start); + break; + } + return 0; +} + +static struct target_type linear_target = { + name: "linear", + module: THIS_MODULE, + ctr: linear_ctr, + dtr: linear_dtr, + map: linear_map, + status: linear_status, +}; + +int __init dm_linear_init(void) +{ + int r = dm_register_target(&linear_target); + + if (r < 0) + DMERR("linear: register failed %d", r); + + return r; +} + +void dm_linear_exit(void) +{ + int r = dm_unregister_target(&linear_target); + + if (r < 0) + DMERR("linear: unregister failed %d", r); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm-snapshot.c linux.21rc5-ac2/drivers/md/dm-snapshot.c --- linux.21rc5/drivers/md/dm-snapshot.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm-snapshot.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,1169 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dm-snapshot.h" +#include "kcopyd.h" + +/* + * FIXME: Remove this before release. + */ +#if 0 +#define DMDEBUG(x...) DMWARN( ## x) +#else +#define DMDEBUG(x...) +#endif + +/* + * The percentage increment we will wake up users at + */ +#define WAKE_UP_PERCENT 5 + +/* + * Hard sector size used all over the kernel + */ +#define SECTOR_SIZE 512 + +/* + * kcopyd priority of snapshot operations + */ +#define SNAPSHOT_COPY_PRIORITY 2 + +struct pending_exception { + struct exception e; + + /* + * Origin buffers waiting for this to complete are held + * in a list (using b_reqnext). + */ + struct buffer_head *origin_bhs; + struct buffer_head *snapshot_bhs; + + /* + * Other pending_exceptions that are processing this + * chunk. When this list is empty, we know we can + * complete the origins. + */ + struct list_head siblings; + + /* Pointer back to snapshot context */ + struct dm_snapshot *snap; + + /* + * 1 indicates the exception has already been sent to + * kcopyd. + */ + int started; +}; + +/* + * Hash table mapping origin volumes to lists of snapshots and + * a lock to protect it + */ +static kmem_cache_t *exception_cache; +static kmem_cache_t *pending_cache; +static mempool_t *pending_pool; + +/* + * One of these per registered origin, held in the snapshot_origins hash + */ +struct origin { + /* The origin device */ + kdev_t dev; + + struct list_head hash_list; + + /* List of snapshots for this origin */ + struct list_head snapshots; +}; + +/* + * Size of the hash table for origin volumes. If we make this + * the size of the minors list then it should be nearly perfect + */ +#define ORIGIN_HASH_SIZE 256 +#define ORIGIN_MASK 0xFF +static struct list_head *_origins; +static struct rw_semaphore _origins_lock; + +static int init_origin_hash(void) +{ + int i; + + _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), + GFP_KERNEL); + if (!_origins) { + DMERR("Device mapper: Snapshot: unable to allocate memory"); + return -ENOMEM; + } + + for (i = 0; i < ORIGIN_HASH_SIZE; i++) + INIT_LIST_HEAD(_origins + i); + init_rwsem(&_origins_lock); + + return 0; +} + +static void exit_origin_hash(void) +{ + kfree(_origins); +} + +static inline unsigned int origin_hash(kdev_t dev) +{ + return MINOR(dev) & ORIGIN_MASK; +} + +static struct origin *__lookup_origin(kdev_t origin) +{ + struct list_head *slist; + struct list_head *ol; + struct origin *o; + + ol = &_origins[origin_hash(origin)]; + list_for_each(slist, ol) { + o = list_entry(slist, struct origin, hash_list); + + if (o->dev == origin) + return o; + } + + return NULL; +} + +static void __insert_origin(struct origin *o) +{ + struct list_head *sl = &_origins[origin_hash(o->dev)]; + list_add_tail(&o->hash_list, sl); +} + +/* + * Make a note of the snapshot and its origin so we can look it + * up when the origin has a write on it. + */ +static int register_snapshot(struct dm_snapshot *snap) +{ + struct origin *o; + kdev_t dev = snap->origin->dev; + + down_write(&_origins_lock); + o = __lookup_origin(dev); + + if (!o) { + /* New origin */ + o = kmalloc(sizeof(*o), GFP_KERNEL); + if (!o) { + up_write(&_origins_lock); + return -ENOMEM; + } + + /* Initialise the struct */ + INIT_LIST_HEAD(&o->snapshots); + o->dev = dev; + + __insert_origin(o); + } + + list_add_tail(&snap->list, &o->snapshots); + + up_write(&_origins_lock); + return 0; +} + +static void unregister_snapshot(struct dm_snapshot *s) +{ + struct origin *o; + + down_write(&_origins_lock); + o = __lookup_origin(s->origin->dev); + + list_del(&s->list); + if (list_empty(&o->snapshots)) { + list_del(&o->hash_list); + kfree(o); + } + + up_write(&_origins_lock); +} + +/* + * Implementation of the exception hash tables. + */ +static int init_exception_table(struct exception_table *et, uint32_t size) +{ + int i; + + et->hash_mask = size - 1; + et->table = vcalloc(size, sizeof(struct list_head)); + if (!et->table) + return -ENOMEM; + + for (i = 0; i < size; i++) + INIT_LIST_HEAD(et->table + i); + + return 0; +} + +static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem) +{ + struct list_head *slot, *entry, *temp; + struct exception *ex; + int i, size; + + size = et->hash_mask + 1; + for (i = 0; i < size; i++) { + slot = et->table + i; + + list_for_each_safe(entry, temp, slot) { + ex = list_entry(entry, struct exception, hash_list); + kmem_cache_free(mem, ex); + } + } + + vfree(et->table); +} + +/* + * FIXME: check how this hash fn is performing. + */ +static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk) +{ + return chunk & et->hash_mask; +} + +static void insert_exception(struct exception_table *eh, struct exception *e) +{ + struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; + list_add(&e->hash_list, l); +} + +static inline void remove_exception(struct exception *e) +{ + list_del(&e->hash_list); +} + +/* + * Return the exception data for a sector, or NULL if not + * remapped. + */ +static struct exception *lookup_exception(struct exception_table *et, + chunk_t chunk) +{ + struct list_head *slot, *el; + struct exception *e; + + slot = &et->table[exception_hash(et, chunk)]; + list_for_each(el, slot) { + e = list_entry(el, struct exception, hash_list); + if (e->old_chunk == chunk) + return e; + } + + return NULL; +} + +static inline struct exception *alloc_exception(void) +{ + struct exception *e; + + e = kmem_cache_alloc(exception_cache, GFP_NOIO); + if (!e) + e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); + + return e; +} + +static inline void free_exception(struct exception *e) +{ + kmem_cache_free(exception_cache, e); +} + +static inline struct pending_exception *alloc_pending_exception(void) +{ + return mempool_alloc(pending_pool, GFP_NOIO); +} + +static inline void free_pending_exception(struct pending_exception *pe) +{ + mempool_free(pe, pending_pool); +} + +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) +{ + struct exception *e; + + e = alloc_exception(); + if (!e) + return -ENOMEM; + + e->old_chunk = old; + e->new_chunk = new; + insert_exception(&s->complete, e); + return 0; +} + +/* + * Hard coded magic. + */ +static int calc_max_buckets(void) +{ + unsigned long mem; + + mem = num_physpages << PAGE_SHIFT; + mem /= 50; + mem /= sizeof(struct list_head); + + return mem; +} + +/* + * Rounds a number down to a power of 2. + */ +static inline uint32_t round_down(uint32_t n) +{ + while (n & (n - 1)) + n &= (n - 1); + return n; +} + +/* + * Allocate room for a suitable hash table. + */ +static int init_hash_tables(struct dm_snapshot *s) +{ + offset_t hash_size, cow_dev_size, origin_dev_size, max_buckets; + + /* + * Calculate based on the size of the original volume or + * the COW volume... + */ + cow_dev_size = get_dev_size(s->cow->dev); + origin_dev_size = get_dev_size(s->origin->dev); + max_buckets = calc_max_buckets(); + + hash_size = min(origin_dev_size, cow_dev_size) / s->chunk_size; + hash_size = min(hash_size, max_buckets); + + /* Round it down to a power of 2 */ + hash_size = round_down(hash_size); + if (init_exception_table(&s->complete, hash_size)) + return -ENOMEM; + + /* + * Allocate hash table for in-flight exceptions + * Make this smaller than the real hash table + */ + hash_size >>= 3; + if (!hash_size) + hash_size = 64; + + if (init_exception_table(&s->pending, hash_size)) { + exit_exception_table(&s->complete, exception_cache); + return -ENOMEM; + } + + return 0; +} + +/* + * Round a number up to the nearest 'size' boundary. size must + * be a power of 2. + */ +static inline ulong round_up(ulong n, ulong size) +{ + size--; + return (n + size) & ~size; +} + +/* + * Construct a snapshot mapping:

+ */ +static int snapshot_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + struct dm_snapshot *s; + unsigned long chunk_size; + int r = -EINVAL; + char *persistent; + char *origin_path; + char *cow_path; + char *value; + int blocksize; + + if (argc < 4) { + *context = "dm-snapshot: requires exactly 4 arguments"; + r = -EINVAL; + goto bad; + } + + origin_path = argv[0]; + cow_path = argv[1]; + persistent = argv[2]; + + if ((*persistent & 0x5f) != 'P' && (*persistent & 0x5f) != 'N') { + *context = "Persistent flag is not P or N"; + r = -EINVAL; + goto bad; + } + + chunk_size = simple_strtoul(argv[3], &value, 10); + if (chunk_size == 0 || value == NULL) { + *context = "Invalid chunk size"; + r = -EINVAL; + goto bad; + } + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) { + *context = "Cannot allocate snapshot context private structure"; + r = -ENOMEM; + goto bad; + } + + r = dm_table_get_device(t, origin_path, 0, 0, FMODE_READ, &s->origin); + if (r) { + *context = "Cannot get origin device"; + goto bad_free; + } + + r = dm_table_get_device(t, cow_path, 0, 0, + FMODE_READ | FMODE_WRITE, &s->cow); + if (r) { + dm_table_put_device(t, s->origin); + *context = "Cannot get COW device"; + goto bad_free; + } + + /* + * Chunk size must be multiple of page size. Silently + * round up if it's not. + */ + chunk_size = round_up(chunk_size, PAGE_SIZE / SECTOR_SIZE); + + /* Validate the chunk size against the device block size */ + blocksize = get_hardsect_size(s->cow->dev); + if (chunk_size % (blocksize / SECTOR_SIZE)) { + *context = "Chunk size is not a multiple of device blocksize"; + r = -EINVAL; + goto bad_putdev; + } + + /* Check the sizes are small enough to fit in one kiovec */ + if (chunk_size > KIO_MAX_SECTORS) { + *context = "Chunk size is too big"; + r = -EINVAL; + goto bad_putdev; + } + + /* Check chunk_size is a power of 2 */ + if (chunk_size & (chunk_size - 1)) { + *context = "Chunk size is not a power of 2"; + r = -EINVAL; + goto bad_putdev; + } + + s->chunk_size = chunk_size; + s->chunk_mask = chunk_size - 1; + s->type = *persistent; + for (s->chunk_shift = 0; chunk_size; + s->chunk_shift++, chunk_size >>= 1) + ; + s->chunk_shift--; + + s->valid = 1; + s->last_percent = 0; + s->table = t; + init_rwsem(&s->lock); + + /* Allocate hash table for COW data */ + if (init_hash_tables(s)) { + *context = "Unable to allocate hash table space"; + r = -ENOMEM; + goto bad_putdev; + } + + /* + * Check the persistent flag - done here because we need the iobuf + * to check the LV header + */ + s->store.snap = s; + + if ((*persistent & 0x5f) == 'P') + r = dm_create_persistent(&s->store, s->chunk_size); + else + r = dm_create_transient(&s->store, s, blocksize, context); + + if (r) { + *context = "Couldn't create exception store"; + r = -EINVAL; + goto bad_free1; + } + + /* Flush IO to the origin device */ +#if LVM_VFS_ENHANCEMENT + fsync_dev_lockfs(s->origin->dev); +#else + fsync_dev(s->origin->dev); +#endif + + /* Add snapshot to the list of snapshots for this origin */ + if (register_snapshot(s)) { + r = -EINVAL; + *context = "Cannot register snapshot origin"; + goto bad_free2; + } +#if LVM_VFS_ENHANCEMENT + unlockfs(s->origin->dev); +#endif + kcopyd_inc_client_count(); + + *context = s; + return 0; + + bad_free2: + s->store.destroy(&s->store); + + bad_free1: + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + bad_putdev: + dm_table_put_device(t, s->cow); + dm_table_put_device(t, s->origin); + + bad_free: + kfree(s); + + bad: + return r; +} + +static void snapshot_dtr(struct dm_table *t, void *context) +{ + struct dm_snapshot *s = (struct dm_snapshot *) context; + + dm_table_event(s->table); + + unregister_snapshot(s); + + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + /* Deallocate memory used */ + s->store.destroy(&s->store); + + dm_table_put_device(t, s->origin); + dm_table_put_device(t, s->cow); + kfree(s); + + kcopyd_dec_client_count(); +} + +/* + * We hold lists of buffer_heads, using the b_reqnext field. + */ +static void queue_buffer(struct buffer_head **queue, struct buffer_head *bh) +{ + bh->b_reqnext = *queue; + *queue = bh; +} + +/* + * Flush a list of buffers. + */ +static void flush_buffers(struct buffer_head *bh) +{ + struct buffer_head *n; + + DMDEBUG("begin flush"); + while (bh) { + n = bh->b_reqnext; + bh->b_reqnext = NULL; + DMDEBUG("flushing %p", bh); + generic_make_request(WRITE, bh); + bh = n; + } + + run_task_queue(&tq_disk); +} + +/* + * Error a list of buffers. + */ +static void error_buffers(struct buffer_head *bh) +{ + struct buffer_head *n; + + while (bh) { + n = bh->b_reqnext; + bh->b_reqnext = NULL; + buffer_IO_error(bh); + bh = n; + } +} + +static void pending_complete(struct pending_exception *pe, int success) +{ + struct exception *e; + struct dm_snapshot *s = pe->snap; + + if (success) { + e = alloc_exception(); + if (!e) { + printk("Unable to allocate exception."); + down_write(&s->lock); + s->store.drop_snapshot(&s->store); + s->valid = 0; + up_write(&s->lock); + return; + } + + /* + * Add a proper exception, and remove the + * inflight exception from the list. + */ + down_write(&s->lock); + + memcpy(e, &pe->e, sizeof(*e)); + insert_exception(&s->complete, e); + remove_exception(&pe->e); + + /* Submit any pending write BHs */ + up_write(&s->lock); + + flush_buffers(pe->snapshot_bhs); + DMDEBUG("Exception completed successfully."); + + /* Notify any interested parties */ + if (s->store.percent_full) { + int pc = s->store.percent_full(&s->store); + + if (pc >= s->last_percent + WAKE_UP_PERCENT) { + dm_table_event(s->table); + s->last_percent = pc - pc % WAKE_UP_PERCENT; + } + } + + } else { + /* Read/write error - snapshot is unusable */ + DMERR("Error reading/writing snapshot"); + + down_write(&s->lock); + s->store.drop_snapshot(&s->store); + s->valid = 0; + remove_exception(&pe->e); + up_write(&s->lock); + + error_buffers(pe->snapshot_bhs); + + dm_table_event(s->table); + DMDEBUG("Exception failed."); + } + + if (list_empty(&pe->siblings)) + flush_buffers(pe->origin_bhs); + else + list_del(&pe->siblings); + + free_pending_exception(pe); +} + +static void commit_callback(void *context, int success) +{ + struct pending_exception *pe = (struct pending_exception *) context; + pending_complete(pe, success); +} + +/* + * Called when the copy I/O has finished. kcopyd actually runs + * this code so don't block. + */ +static void copy_callback(int err, void *context) +{ + struct pending_exception *pe = (struct pending_exception *) context; + struct dm_snapshot *s = pe->snap; + + if (err) + pending_complete(pe, 0); + + else + /* Update the metadata if we are persistent */ + s->store.commit_exception(&s->store, &pe->e, commit_callback, + pe); +} + +/* + * Dispatches the copy operation to kcopyd. + */ +static inline void start_copy(struct pending_exception *pe) +{ + struct dm_snapshot *s = pe->snap; + struct kcopyd_region src, dest; + + src.dev = s->origin->dev; + src.sector = chunk_to_sector(s, pe->e.old_chunk); + src.count = s->chunk_size; + + dest.dev = s->cow->dev; + dest.sector = chunk_to_sector(s, pe->e.new_chunk); + dest.count = s->chunk_size; + + if (!pe->started) { + /* Hand over to kcopyd */ + kcopyd_copy(&src, &dest, copy_callback, pe); + pe->started = 1; + } +} + +/* + * Looks to see if this snapshot already has a pending exception + * for this chunk, otherwise it allocates a new one and inserts + * it into the pending table. + */ +static struct pending_exception *find_pending_exception(struct dm_snapshot *s, + struct buffer_head *bh) +{ + struct exception *e; + struct pending_exception *pe; + chunk_t chunk = sector_to_chunk(s, bh->b_rsector); + + /* + * Is there a pending exception for this already ? + */ + e = lookup_exception(&s->pending, chunk); + if (e) { + /* cast the exception to a pending exception */ + pe = list_entry(e, struct pending_exception, e); + + } else { + /* Create a new pending exception */ + pe = alloc_pending_exception(); + if (!pe) { + DMWARN("Couldn't allocate pending exception."); + return NULL; + } + + pe->e.old_chunk = chunk; + pe->origin_bhs = pe->snapshot_bhs = NULL; + INIT_LIST_HEAD(&pe->siblings); + pe->snap = s; + pe->started = 0; + + if (s->store.prepare_exception(&s->store, &pe->e)) { + free_pending_exception(pe); + s->valid = 0; + return NULL; + } + + insert_exception(&s->pending, &pe->e); + } + + return pe; +} + +static inline void remap_exception(struct dm_snapshot *s, struct exception *e, + struct buffer_head *bh) +{ + bh->b_rdev = s->cow->dev; + bh->b_rsector = chunk_to_sector(s, e->new_chunk) + + (bh->b_rsector & s->chunk_mask); +} + +static int snapshot_map(struct buffer_head *bh, int rw, void *context) +{ + struct exception *e; + struct dm_snapshot *s = (struct dm_snapshot *) context; + int r = 1; + chunk_t chunk; + struct pending_exception *pe; + + chunk = sector_to_chunk(s, bh->b_rsector); + + /* Full snapshots are not usable */ + if (!s->valid) + return -1; + + /* + * Write to snapshot - higher level takes care of RW/RO + * flags so we should only get this if we are + * writeable. + */ + if (rw == WRITE) { + + down_write(&s->lock); + + /* If the block is already remapped - use that, else remap it */ + e = lookup_exception(&s->complete, chunk); + if (e) + remap_exception(s, e, bh); + + else { + pe = find_pending_exception(s, bh); + + if (!pe) { + s->store.drop_snapshot(&s->store); + s->valid = 0; + } + + queue_buffer(&pe->snapshot_bhs, bh); + start_copy(pe); + r = 0; + } + + up_write(&s->lock); + + } else { + /* + * FIXME: this read path scares me because we + * always use the origin when we have a pending + * exception. However I can't think of a + * situation where this is wrong - ejt. + */ + + /* Do reads */ + down_read(&s->lock); + + /* See if it it has been remapped */ + e = lookup_exception(&s->complete, chunk); + if (e) + remap_exception(s, e, bh); + else + bh->b_rdev = s->origin->dev; + + up_read(&s->lock); + } + + return r; +} + +static void list_merge(struct list_head *l1, struct list_head *l2) +{ + struct list_head *l1_n, *l2_p; + + l1_n = l1->next; + l2_p = l2->prev; + + l1->next = l2; + l2->prev = l1; + + l2_p->next = l1_n; + l1_n->prev = l2_p; +} + +static int __origin_write(struct list_head *snapshots, struct buffer_head *bh) +{ + int r = 1; + struct list_head *sl; + struct dm_snapshot *snap; + struct exception *e; + struct pending_exception *pe, *last = NULL; + chunk_t chunk; + + /* Do all the snapshots on this origin */ + list_for_each(sl, snapshots) { + snap = list_entry(sl, struct dm_snapshot, list); + + /* Only deal with valid snapshots */ + if (!snap->valid) + continue; + + down_write(&snap->lock); + + /* + * Remember, different snapshots can have + * different chunk sizes. + */ + chunk = sector_to_chunk(snap, bh->b_rsector); + + /* + * Check exception table to see if block + * is already remapped in this snapshot + * and trigger an exception if not. + */ + e = lookup_exception(&snap->complete, chunk); + if (!e) { + pe = find_pending_exception(snap, bh); + if (!pe) { + snap->store.drop_snapshot(&snap->store); + snap->valid = 0; + + } else { + if (last) + list_merge(&pe->siblings, + &last->siblings); + + last = pe; + r = 0; + } + } + + up_write(&snap->lock); + } + + /* + * Now that we have a complete pe list we can start the copying. + */ + if (last) { + pe = last; + do { + down_write(&pe->snap->lock); + queue_buffer(&pe->origin_bhs, bh); + start_copy(pe); + up_write(&pe->snap->lock); + pe = list_entry(pe->siblings.next, + struct pending_exception, siblings); + + } while (pe != last); + } + + return r; +} + +static int snapshot_status(status_type_t type, char *result, + int maxlen, void *context) +{ + struct dm_snapshot *snap = (struct dm_snapshot *) context; + char cow[16]; + char org[16]; + + switch (type) { + case STATUSTYPE_INFO: + if (!snap->valid) + snprintf(result, maxlen, "Invalid"); + else { + if (snap->store.percent_full) + snprintf(result, maxlen, "%d%%", + snap->store.percent_full(&snap-> + store)); + else + snprintf(result, maxlen, "Unknown"); + } + break; + + case STATUSTYPE_TABLE: + /* + * kdevname returns a static pointer so we need + * to make private copies if the output is to + * make sense. + */ + strncpy(cow, kdevname(snap->cow->dev), sizeof(cow)); + strncpy(org, kdevname(snap->origin->dev), sizeof(org)); + snprintf(result, maxlen, "%s %s %c %ld", org, cow, + snap->type, snap->chunk_size); + break; + } + + return 0; +} + +/* + * Called on a write from the origin driver. + */ +int do_origin(struct dm_dev *origin, struct buffer_head *bh) +{ + struct origin *o; + int r; + + down_read(&_origins_lock); + o = __lookup_origin(origin->dev); + if (!o) + BUG(); + + r = __origin_write(&o->snapshots, bh); + up_read(&_origins_lock); + + return r; +} + +/* + * Origin: maps a linear range of a device, with hooks for snapshotting. + */ + +/* + * Construct an origin mapping: + * The context for an origin is merely a 'struct dm_dev *' + * pointing to the real device. + */ +static int origin_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + int r; + struct dm_dev *dev; + + if (argc != 1) { + *context = "dm-origin: incorrect number of arguments"; + return -EINVAL; + } + + r = dm_table_get_device(t, argv[0], 0, l, t->mode, &dev); + if (r) { + *context = "Cannot get target device"; + return r; + } + + *context = dev; + + return 0; +} + +static void origin_dtr(struct dm_table *t, void *c) +{ + struct dm_dev *dev = (struct dm_dev *) c; + dm_table_put_device(t, dev); +} + +static int origin_map(struct buffer_head *bh, int rw, void *context) +{ + struct dm_dev *dev = (struct dm_dev *) context; + bh->b_rdev = dev->dev; + + /* Only tell snapshots if this is a write */ + return (rw == WRITE) ? do_origin(dev, bh) : 1; +} + +static int origin_status(status_type_t type, char *result, + int maxlen, void *context) +{ + struct dm_dev *dev = (struct dm_dev *) context; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + snprintf(result, maxlen, "%s", kdevname(dev->dev)); + break; + } + + return 0; +} + +static struct target_type origin_target = { + name: "snapshot-origin", + module: THIS_MODULE, + ctr: origin_ctr, + dtr: origin_dtr, + map: origin_map, + status: origin_status, + err: NULL +}; + +static struct target_type snapshot_target = { + name: "snapshot", + module: THIS_MODULE, + ctr: snapshot_ctr, + dtr: snapshot_dtr, + map: snapshot_map, + status: snapshot_status, + err: NULL +}; + +int __init dm_snapshot_init(void) +{ + int r; + + r = dm_register_target(&snapshot_target); + if (r) { + DMERR("snapshot target register failed %d", r); + return r; + } + + r = dm_register_target(&origin_target); + if (r < 0) { + DMERR("Device mapper: Origin: register failed %d\n", r); + goto bad1; + } + + r = init_origin_hash(); + if (r) { + DMERR("init_origin_hash failed."); + goto bad2; + } + + exception_cache = kmem_cache_create("dm-snapshot-ex", + sizeof(struct exception), + __alignof__(struct exception), + 0, NULL, NULL); + if (!exception_cache) { + DMERR("Couldn't create exception cache."); + r = -ENOMEM; + goto bad3; + } + + pending_cache = + kmem_cache_create("dm-snapshot-in", + sizeof(struct pending_exception), + __alignof__(struct pending_exception), + 0, NULL, NULL); + if (!pending_cache) { + DMERR("Couldn't create pending cache."); + r = -ENOMEM; + goto bad4; + } + + pending_pool = mempool_create(128, mempool_alloc_slab, + mempool_free_slab, pending_cache); + if (!pending_pool) { + DMERR("Couldn't create pending pool."); + r = -ENOMEM; + goto bad5; + } + + return 0; + + bad5: + kmem_cache_destroy(pending_cache); + bad4: + kmem_cache_destroy(exception_cache); + bad3: + exit_origin_hash(); + bad2: + dm_unregister_target(&origin_target); + bad1: + dm_unregister_target(&snapshot_target); + return r; +} + +void dm_snapshot_exit(void) +{ + int r; + + r = dm_unregister_target(&snapshot_target); + if (r) + DMERR("snapshot unregister failed %d", r); + + r = dm_unregister_target(&origin_target); + if (r) + DMERR("origin unregister failed %d", r); + + exit_origin_hash(); + mempool_destroy(pending_pool); + kmem_cache_destroy(pending_cache); + kmem_cache_destroy(exception_cache); +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm-snapshot.h linux.21rc5-ac2/drivers/md/dm-snapshot.h --- linux.21rc5/drivers/md/dm-snapshot.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm-snapshot.h 2003-05-29 01:44:07.000000000 +0100 @@ -0,0 +1,147 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#ifndef DM_SNAPSHOT_H +#define DM_SNAPSHOT_H + +#include "dm.h" +#include + +struct exception_table { + uint32_t hash_mask; + struct list_head *table; +}; + +/* + * The snapshot code deals with largish chunks of the disk at a + * time. Typically 64k - 256k. + */ +/* FIXME: can we get away with limiting these to a uint32_t ? */ +typedef offset_t chunk_t; + +/* + * An exception is used where an old chunk of data has been + * replaced by a new one. + */ +struct exception { + struct list_head hash_list; + + chunk_t old_chunk; + chunk_t new_chunk; +}; + +/* + * Abstraction to handle the meta/layout of exception stores (the + * COW device). + */ +struct exception_store { + + /* + * Destroys this object when you've finished with it. + */ + void (*destroy) (struct exception_store *store); + + /* + * Find somewhere to store the next exception. + */ + int (*prepare_exception) (struct exception_store *store, + struct exception *e); + + /* + * Update the metadata with this exception. + */ + void (*commit_exception) (struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context); + + /* + * The snapshot is invalid, note this in the metadata. + */ + void (*drop_snapshot) (struct exception_store *store); + + /* + * Return the %age full of the snapshot + */ + int (*percent_full) (struct exception_store *store); + + struct dm_snapshot *snap; + void *context; +}; + +struct dm_snapshot { + struct rw_semaphore lock; + struct dm_table *table; + + struct dm_dev *origin; + struct dm_dev *cow; + + /* List of snapshots per Origin */ + struct list_head list; + + /* Size of data blocks saved - must be a power of 2 */ + chunk_t chunk_size; + chunk_t chunk_mask; + chunk_t chunk_shift; + + /* You can't use a snapshot if this is 0 (e.g. if full) */ + int valid; + + /* Used for display of table */ + char type; + + /* The last percentage we notified */ + int last_percent; + + struct exception_table pending; + struct exception_table complete; + + /* The on disk metadata handler */ + struct exception_store store; +}; + +/* + * Used by the exception stores to load exceptions hen + * initialising. + */ +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new); + +/* + * Constructor and destructor for the default persistent + * store. + */ +int dm_create_persistent(struct exception_store *store, uint32_t chunk_size); + +int dm_create_transient(struct exception_store *store, + struct dm_snapshot *s, int blocksize, void **error); + +/* + * Return the number of sectors in the device. + */ +static inline offset_t get_dev_size(kdev_t dev) +{ + int *sizes; + + sizes = blk_size[MAJOR(dev)]; + if (sizes) + return sizes[MINOR(dev)] << 1; + + return 0; +} + +static inline chunk_t sector_to_chunk(struct dm_snapshot *s, offset_t sector) +{ + return (sector & ~s->chunk_mask) >> s->chunk_shift; +} + +static inline offset_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk) +{ + return chunk << s->chunk_shift; +} + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm-stripe.c linux.21rc5-ac2/drivers/md/dm-stripe.c --- linux.21rc5/drivers/md/dm-stripe.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm-stripe.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,234 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include + +struct stripe { + struct dm_dev *dev; + offset_t physical_start; +}; + +struct stripe_c { + offset_t logical_start; + uint32_t stripes; + + /* The size of this target / num. stripes */ + uint32_t stripe_width; + + /* stripe chunk size */ + uint32_t chunk_shift; + offset_t chunk_mask; + + struct stripe stripe[0]; +}; + +static inline struct stripe_c *alloc_context(int stripes) +{ + size_t len; + + if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), + stripes)) + return NULL; + + len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); + + return kmalloc(len, GFP_KERNEL); +} + +/* + * Parse a single pair + */ +static int get_stripe(struct dm_table *t, struct stripe_c *sc, + int stripe, char **argv) +{ + char *end; + unsigned long start; + + start = simple_strtoul(argv[1], &end, 10); + if (*end) + return -EINVAL; + + if (dm_table_get_device(t, argv[0], start, sc->stripe_width, + t->mode, &sc->stripe[stripe].dev)) + return -ENXIO; + + sc->stripe[stripe].physical_start = start; + return 0; +} + +/* + * Construct a striped mapping. + * [ ]+ + */ +static int stripe_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + struct stripe_c *sc; + uint32_t stripes; + uint32_t chunk_size; + char *end; + int r, i; + + if (argc < 2) { + *context = "dm-stripe: Not enough arguments"; + return -EINVAL; + } + + stripes = simple_strtoul(argv[0], &end, 10); + if (*end) { + *context = "dm-stripe: Invalid stripe count"; + return -EINVAL; + } + + chunk_size = simple_strtoul(argv[1], &end, 10); + if (*end) { + *context = "dm-stripe: Invalid chunk_size"; + return -EINVAL; + } + + if (l % stripes) { + *context = "dm-stripe: Target length not divisable by " + "number of stripes"; + return -EINVAL; + } + + sc = alloc_context(stripes); + if (!sc) { + *context = "dm-stripe: Memory allocation for striped context " + "failed"; + return -ENOMEM; + } + + sc->logical_start = b; + sc->stripes = stripes; + sc->stripe_width = l / stripes; + + /* + * chunk_size is a power of two + */ + if (!chunk_size || (chunk_size & (chunk_size - 1))) { + *context = "dm-stripe: Invalid chunk size"; + kfree(sc); + return -EINVAL; + } + + sc->chunk_mask = chunk_size - 1; + for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) + chunk_size >>= 1; + sc->chunk_shift--; + + /* + * Get the stripe destinations. + */ + for (i = 0; i < stripes; i++) { + if (argc < 2) { + *context = "dm-stripe: Not enough destinations " + "specified"; + kfree(sc); + return -EINVAL; + } + + argv += 2; + + r = get_stripe(t, sc, i, argv); + if (r < 0) { + *context = "dm-stripe: Couldn't parse stripe " + "destination"; + while (i--) + dm_table_put_device(t, sc->stripe[i].dev); + kfree(sc); + return r; + } + } + + *context = sc; + return 0; +} + +static void stripe_dtr(struct dm_table *t, void *c) +{ + unsigned int i; + struct stripe_c *sc = (struct stripe_c *) c; + + for (i = 0; i < sc->stripes; i++) + dm_table_put_device(t, sc->stripe[i].dev); + + kfree(sc); +} + +static int stripe_map(struct buffer_head *bh, int rw, void *context) +{ + struct stripe_c *sc = (struct stripe_c *) context; + + offset_t offset = bh->b_rsector - sc->logical_start; + uint32_t chunk = (uint32_t) (offset >> sc->chunk_shift); + uint32_t stripe = chunk % sc->stripes; /* 32bit modulus */ + chunk = chunk / sc->stripes; + + bh->b_rdev = sc->stripe[stripe].dev->dev; + bh->b_rsector = sc->stripe[stripe].physical_start + + (chunk << sc->chunk_shift) + (offset & sc->chunk_mask); + return 1; +} + +static int stripe_status(status_type_t type, char *result, int maxlen, + void *context) +{ + struct stripe_c *sc = (struct stripe_c *) context; + int offset; + int i; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + offset = snprintf(result, maxlen, "%d %ld", + sc->stripes, sc->chunk_mask + 1); + for (i = 0; i < sc->stripes; i++) { + offset += + snprintf(result + offset, maxlen - offset, + " %s %ld", + kdevname(sc->stripe[i].dev->dev), + sc->stripe[i].physical_start); + } + break; + } + return 0; +} + +static struct target_type stripe_target = { + name: "striped", + module: THIS_MODULE, + ctr: stripe_ctr, + dtr: stripe_dtr, + map: stripe_map, + status: stripe_status, +}; + +int __init dm_stripe_init(void) +{ + int r; + + r = dm_register_target(&stripe_target); + if (r < 0) + DMWARN("striped target registration failed"); + + return r; +} + +void dm_stripe_exit(void) +{ + if (dm_unregister_target(&stripe_target)) + DMWARN("striped target unregistration failed"); + + return; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm-table.c linux.21rc5-ac2/drivers/md/dm-table.c --- linux.21rc5/drivers/md/dm-table.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm-table.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,452 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include + +/* ceiling(n / size) * size */ +static inline unsigned long round_up(unsigned long n, unsigned long size) +{ + unsigned long r = n % size; + return n + (r ? (size - r) : 0); +} + +/* ceiling(n / size) */ +static inline unsigned long div_up(unsigned long n, unsigned long size) +{ + return round_up(n, size) / size; +} + +/* similar to ceiling(log_size(n)) */ +static uint int_log(unsigned long n, unsigned long base) +{ + int result = 0; + + while (n > 1) { + n = div_up(n, base); + result++; + } + + return result; +} + +/* + * return the highest key that you could lookup + * from the n'th node on level l of the btree. + */ +static offset_t high(struct dm_table *t, int l, int n) +{ + for (; l < t->depth - 1; l++) + n = get_child(n, CHILDREN_PER_NODE - 1); + + if (n >= t->counts[l]) + return (offset_t) - 1; + + return get_node(t, l, n)[KEYS_PER_NODE - 1]; +} + +/* + * fills in a level of the btree based on the + * highs of the level below it. + */ +static int setup_btree_index(int l, struct dm_table *t) +{ + int n, k; + offset_t *node; + + for (n = 0; n < t->counts[l]; n++) { + node = get_node(t, l, n); + + for (k = 0; k < KEYS_PER_NODE; k++) + node[k] = high(t, l + 1, get_child(n, k)); + } + + return 0; +} + +/* + * highs, and targets are managed as dynamic + * arrays during a table load. + */ +static int alloc_targets(struct dm_table *t, int num) +{ + offset_t *n_highs; + struct target *n_targets; + int n = t->num_targets; + + /* + * Allocate both the target array and offset array at once. + */ + n_highs = (offset_t *) vcalloc(sizeof(struct target) + sizeof(offset_t), + num); + if (!n_highs) + return -ENOMEM; + + n_targets = (struct target *) (n_highs + num); + + if (n) { + memcpy(n_highs, t->highs, sizeof(*n_highs) * n); + memcpy(n_targets, t->targets, sizeof(*n_targets) * n); + } + + memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); + if (t->highs) + vfree(t->highs); + + t->num_allocated = num; + t->highs = n_highs; + t->targets = n_targets; + + return 0; +} + +int dm_table_create(struct dm_table **result, int mode) +{ + struct dm_table *t = kmalloc(sizeof(*t), GFP_NOIO); + + if (!t) + return -ENOMEM; + + memset(t, 0, sizeof(*t)); + INIT_LIST_HEAD(&t->devices); + + /* allocate a single node's worth of targets to begin with */ + if (alloc_targets(t, KEYS_PER_NODE)) { + kfree(t); + t = NULL; + return -ENOMEM; + } + + init_waitqueue_head(&t->eventq); + t->mode = mode; + *result = t; + return 0; +} + +static void free_devices(struct list_head *devices) +{ + struct list_head *tmp, *next; + + for (tmp = devices->next; tmp != devices; tmp = next) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + next = tmp->next; + kfree(dd); + } +} + +void dm_table_destroy(struct dm_table *t) +{ + int i; + + /* destroying the table counts as an event */ + dm_table_event(t); + + /* free the indexes (see dm_table_complete) */ + if (t->depth >= 2) + vfree(t->index[t->depth - 2]); + + /* free the targets */ + for (i = 0; i < t->num_targets; i++) { + struct target *tgt = &t->targets[i]; + + dm_put_target_type(t->targets[i].type); + + if (tgt->type->dtr) + tgt->type->dtr(t, tgt->private); + } + + vfree(t->highs); + + /* free the device list */ + if (t->devices.next != &t->devices) { + DMWARN("devices still present during destroy: " + "dm_table_remove_device calls missing"); + + free_devices(&t->devices); + } + + kfree(t); +} + +/* + * Checks to see if we need to extend highs or targets. + */ +static inline int check_space(struct dm_table *t) +{ + if (t->num_targets >= t->num_allocated) + return alloc_targets(t, t->num_allocated * 2); + + return 0; +} + +/* + * Convert a device path to a kdev_t. + */ +int lookup_device(const char *path, kdev_t *dev) +{ + int r; + struct nameidata nd; + struct inode *inode; + + if (!path_init(path, LOOKUP_FOLLOW, &nd)) + return 0; + + if ((r = path_walk(path, &nd))) + goto bad; + + inode = nd.dentry->d_inode; + if (!inode) { + r = -ENOENT; + goto bad; + } + + if (!S_ISBLK(inode->i_mode)) { + r = -EINVAL; + goto bad; + } + + *dev = inode->i_rdev; + + bad: + path_release(&nd); + return r; +} + +/* + * See if we've already got a device in the list. + */ +static struct dm_dev *find_device(struct list_head *l, kdev_t dev) +{ + struct list_head *tmp; + + list_for_each(tmp, l) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + if (dd->dev == dev) + return dd; + } + + return NULL; +} + +/* + * Open a device so we can use it as a map destination. + */ +static int open_dev(struct dm_dev *d) +{ + int err; + + if (d->bd) + BUG(); + + if (!(d->bd = bdget(kdev_t_to_nr(d->dev)))) + return -ENOMEM; + + if ((err = blkdev_get(d->bd, d->mode, 0, BDEV_FILE))) + return err; + + return 0; +} + +/* + * Close a device that we've been using. + */ +static void close_dev(struct dm_dev *d) +{ + if (!d->bd) + return; + + blkdev_put(d->bd, BDEV_FILE); + d->bd = NULL; +} + +/* + * If possible (ie. blk_size[major] is set), this + * checks an area of a destination device is + * valid. + */ +static int check_device_area(kdev_t dev, offset_t start, offset_t len) +{ + int *sizes; + offset_t dev_size; + + if (!(sizes = blk_size[MAJOR(dev)]) || !(dev_size = sizes[MINOR(dev)])) + /* we don't know the device details, + * so give the benefit of the doubt */ + return 1; + + /* convert to 512-byte sectors */ + dev_size <<= 1; + + return ((start < dev_size) && (len <= (dev_size - start))); +} + +/* + * This upgrades the mode on an already open dm_dev. Being + * careful to leave things as they were if we fail to reopen the + * device. + */ +static int upgrade_mode(struct dm_dev *dd, int new_mode) +{ + int r; + struct dm_dev dd_copy; + + memcpy(&dd_copy, dd, sizeof(dd_copy)); + + dd->mode |= new_mode; + dd->bd = NULL; + r = open_dev(dd); + if (!r) + close_dev(&dd_copy); + else + memcpy(dd, &dd_copy, sizeof(dd_copy)); + + return r; +} + +/* + * Add a device to the list, or just increment the usage count + * if it's already present. + */ +int dm_table_get_device(struct dm_table *t, const char *path, + offset_t start, offset_t len, int mode, + struct dm_dev **result) +{ + int r; + kdev_t dev; + struct dm_dev *dd; + int major, minor; + + if (sscanf(path, "%x:%x", &major, &minor) == 2) { + /* Extract the major/minor numbers */ + dev = MKDEV(major, minor); + } else { + /* convert the path to a device */ + if ((r = lookup_device(path, &dev))) + return r; + } + + dd = find_device(&t->devices, dev); + if (!dd) { + dd = kmalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; + + dd->mode = mode; + dd->dev = dev; + dd->bd = NULL; + + if ((r = open_dev(dd))) { + kfree(dd); + return r; + } + + atomic_set(&dd->count, 0); + list_add(&dd->list, &t->devices); + + } else if (dd->mode != (mode | dd->mode)) { + r = upgrade_mode(dd, mode); + if (r) + return r; + } + atomic_inc(&dd->count); + + if (!check_device_area(dd->dev, start, len)) { + DMWARN("device %s too small for target", path); + dm_table_put_device(t, dd); + return -EINVAL; + } + + *result = dd; + + return 0; +} + +/* + * Decrement a devices use count and remove it if neccessary. + */ +void dm_table_put_device(struct dm_table *t, struct dm_dev *dd) +{ + if (atomic_dec_and_test(&dd->count)) { + close_dev(dd); + list_del(&dd->list); + kfree(dd); + } +} + +/* + * Adds a target to the map + */ +int dm_table_add_target(struct dm_table *t, offset_t highs, + struct target_type *type, void *private) +{ + int r, n; + + if ((r = check_space(t))) + return r; + + n = t->num_targets++; + t->highs[n] = highs; + t->targets[n].type = type; + t->targets[n].private = private; + + return 0; +} + +static int setup_indexes(struct dm_table *t) +{ + int i, total = 0; + offset_t *indexes; + + /* allocate the space for *all* the indexes */ + for (i = t->depth - 2; i >= 0; i--) { + t->counts[i] = div_up(t->counts[i + 1], CHILDREN_PER_NODE); + total += t->counts[i]; + } + + indexes = (offset_t *) vcalloc(total, (unsigned long) NODE_SIZE); + if (!indexes) + return -ENOMEM; + + /* set up internal nodes, bottom-up */ + for (i = t->depth - 2, total = 0; i >= 0; i--) { + t->index[i] = indexes; + indexes += (KEYS_PER_NODE * t->counts[i]); + setup_btree_index(i, t); + } + + return 0; +} + +/* + * Builds the btree to index the map + */ +int dm_table_complete(struct dm_table *t) +{ + int leaf_nodes, r = 0; + + /* how many indexes will the btree have ? */ + leaf_nodes = div_up(t->num_targets, KEYS_PER_NODE); + t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); + + /* leaf layer has already been set up */ + t->counts[t->depth - 1] = leaf_nodes; + t->index[t->depth - 1] = t->highs; + + if (t->depth >= 2) + r = setup_indexes(t); + + return r; +} + +void dm_table_event(struct dm_table *t) +{ + wake_up_interruptible(&t->eventq); +} + +EXPORT_SYMBOL(dm_table_get_device); +EXPORT_SYMBOL(dm_table_put_device); +EXPORT_SYMBOL(dm_table_event); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/dm-target.c linux.21rc5-ac2/drivers/md/dm-target.c --- linux.21rc5/drivers/md/dm-target.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/dm-target.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include + +struct tt_internal { + struct target_type tt; + + struct list_head list; + long use; +}; + +static LIST_HEAD(_targets); +static rwlock_t _lock = RW_LOCK_UNLOCKED; + +#define DM_MOD_NAME_SIZE 32 + +/* + * Destructively splits up the argument list to pass to ctr. + */ +int split_args(int max, int *argc, char **argv, char *input) +{ + char *start, *end = input, *out; + *argc = 0; + + while (1) { + start = end; + + /* Skip whitespace */ + while (*start && isspace(*start)) + start++; + + if (!*start) + break; /* success, we hit the end */ + + /* 'out' is used to remove any back-quotes */ + end = out = start; + while (*end) { + /* Everything apart from '\0' can be quoted */ + if (*end == '\\' && *(end + 1)) { + *out++ = *(end + 1); + end += 2; + continue; + } + + if (isspace(*end)) + break; /* end of token */ + + *out++ = *end++; + } + + /* have we already filled the array ? */ + if ((*argc + 1) > max) + return -EINVAL; + + /* we know this is whitespace */ + if (*end) + end++; + + /* terminate the string and put it in the array */ + *out = '\0'; + argv[*argc] = start; + (*argc)++; + } + + return 0; +} + +static inline struct tt_internal *__find_target_type(const char *name) +{ + struct list_head *tih; + struct tt_internal *ti; + + list_for_each(tih, &_targets) { + ti = list_entry(tih, struct tt_internal, list); + + if (!strcmp(name, ti->tt.name)) + return ti; + } + + return NULL; +} + +static struct tt_internal *get_target_type(const char *name) +{ + struct tt_internal *ti; + + read_lock(&_lock); + ti = __find_target_type(name); + + if (ti) { + if (ti->use == 0 && ti->tt.module) + __MOD_INC_USE_COUNT(ti->tt.module); + ti->use++; + } + read_unlock(&_lock); + + return ti; +} + +static void load_module(const char *name) +{ + char module_name[DM_MOD_NAME_SIZE] = "dm-"; + + /* Length check for strcat() below */ + if (strlen(name) > (DM_MOD_NAME_SIZE - 4)) + return; + + strcat(module_name, name); + request_module(module_name); + + return; +} + +struct target_type *dm_get_target_type(const char *name) +{ + struct tt_internal *ti = get_target_type(name); + + if (!ti) { + load_module(name); + ti = get_target_type(name); + } + + return ti ? &ti->tt : NULL; +} + +void dm_put_target_type(struct target_type *t) +{ + struct tt_internal *ti = (struct tt_internal *) t; + + read_lock(&_lock); + if (--ti->use == 0 && ti->tt.module) + __MOD_DEC_USE_COUNT(ti->tt.module); + + if (ti->use < 0) + BUG(); + read_unlock(&_lock); + + return; +} + +static struct tt_internal *alloc_target(struct target_type *t) +{ + struct tt_internal *ti = kmalloc(sizeof(*ti), GFP_KERNEL); + + if (ti) { + memset(ti, 0, sizeof(*ti)); + ti->tt = *t; + } + + return ti; +} + +int dm_register_target(struct target_type *t) +{ + int rv = 0; + struct tt_internal *ti = alloc_target(t); + + if (!ti) + return -ENOMEM; + + write_lock(&_lock); + if (__find_target_type(t->name)) + rv = -EEXIST; + else + list_add(&ti->list, &_targets); + + write_unlock(&_lock); + return rv; +} + +int dm_unregister_target(struct target_type *t) +{ + struct tt_internal *ti; + + write_lock(&_lock); + if (!(ti = __find_target_type(t->name))) { + write_unlock(&_lock); + return -EINVAL; + } + + if (ti->use) { + write_unlock(&_lock); + return -ETXTBSY; + } + + list_del(&ti->list); + kfree(ti); + + write_unlock(&_lock); + return 0; +} + +/* + * io-err: always fails an io, useful for bringing + * up LV's that have holes in them. + */ +static int io_err_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **args, void **context) +{ + *context = NULL; + return 0; +} + +static void io_err_dtr(struct dm_table *t, void *c) +{ + /* empty */ + return; +} + +static int io_err_map(struct buffer_head *bh, int rw, void *context) +{ + buffer_IO_error(bh); + return 0; +} + +static struct target_type error_target = { + name: "error", + ctr: io_err_ctr, + dtr: io_err_dtr, + map: io_err_map, + status: NULL, +}; + +int dm_target_init(void) +{ + return dm_register_target(&error_target); +} + +void dm_target_exit(void) +{ + if (dm_unregister_target(&error_target)) + DMWARN("error target unregistration failed"); +} + +EXPORT_SYMBOL(dm_register_target); +EXPORT_SYMBOL(dm_unregister_target); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/kcopyd.c linux.21rc5-ac2/drivers/md/kcopyd.c --- linux.21rc5/drivers/md/kcopyd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/kcopyd.c 2003-05-28 16:14:40.000000000 +0100 @@ -0,0 +1,841 @@ +/* + * Copyright (C) 2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kcopyd.h" + +/* FIXME: this is only needed for the DMERR macros */ +#include "dm.h" + +/* + * Hard sector size used all over the kernel. + */ +#define SECTOR_SIZE 512 +#define SECTOR_SHIFT 9 + +static void wake_kcopyd(void); + +/*----------------------------------------------------------------- + * We reserve our own pool of preallocated pages that are + * only used for kcopyd io. + *---------------------------------------------------------------*/ + +/* + * FIXME: This should be configurable. + */ +#define NUM_PAGES 512 + +static DECLARE_MUTEX(_pages_lock); +static int _num_free_pages; +static struct page *_pages_array[NUM_PAGES]; +static DECLARE_MUTEX(start_lock); + +static int init_pages(void) +{ + int i; + struct page *p; + + for (i = 0; i < NUM_PAGES; i++) { + p = alloc_page(GFP_KERNEL); + if (!p) + goto bad; + + LockPage(p); + _pages_array[i] = p; + } + + _num_free_pages = NUM_PAGES; + return 0; + + bad: + while (i--) + __free_page(_pages_array[i]); + return -ENOMEM; +} + +static void exit_pages(void) +{ + int i; + struct page *p; + + for (i = 0; i < NUM_PAGES; i++) { + p = _pages_array[i]; + UnlockPage(p); + __free_page(p); + } + + _num_free_pages = 0; +} + +static int kcopyd_get_pages(int num, struct page **result) +{ + int i; + + down(&_pages_lock); + if (_num_free_pages < num) { + up(&_pages_lock); + return -ENOMEM; + } + + for (i = 0; i < num; i++) { + _num_free_pages--; + result[i] = _pages_array[_num_free_pages]; + } + up(&_pages_lock); + + return 0; +} + +static void kcopyd_free_pages(int num, struct page **result) +{ + int i; + + down(&_pages_lock); + for (i = 0; i < num; i++) + _pages_array[_num_free_pages++] = result[i]; + up(&_pages_lock); +} + +/*----------------------------------------------------------------- + * We keep our own private pool of buffer_heads. These are just + * held in a list on the b_reqnext field. + *---------------------------------------------------------------*/ + +/* + * Make sure we have enough buffers to always keep the pages + * occupied. So we assume the worst case scenario where blocks + * are the size of a single sector. + */ +#define NUM_BUFFERS NUM_PAGES * (PAGE_SIZE / SECTOR_SIZE) + +static spinlock_t _buffer_lock = SPIN_LOCK_UNLOCKED; +static struct buffer_head *_all_buffers; +static struct buffer_head *_free_buffers; + +static int init_buffers(void) +{ + int i; + struct buffer_head *buffers; + + buffers = vcalloc(NUM_BUFFERS, sizeof(struct buffer_head)); + if (!buffers) { + DMWARN("Couldn't allocate buffer heads."); + return -ENOMEM; + } + + for (i = 0; i < NUM_BUFFERS; i++) { + if (i < NUM_BUFFERS - 1) + buffers[i].b_reqnext = &buffers[i + 1]; + init_waitqueue_head(&buffers[i].b_wait); + INIT_LIST_HEAD(&buffers[i].b_inode_buffers); + } + + _all_buffers = _free_buffers = buffers; + return 0; +} + +static void exit_buffers(void) +{ + vfree(_all_buffers); +} + +static struct buffer_head *alloc_buffer(void) +{ + struct buffer_head *r; + unsigned long flags; + + spin_lock_irqsave(&_buffer_lock, flags); + + if (!_free_buffers) + r = NULL; + else { + r = _free_buffers; + _free_buffers = _free_buffers->b_reqnext; + r->b_reqnext = NULL; + } + + spin_unlock_irqrestore(&_buffer_lock, flags); + + return r; +} + +/* + * Only called from interrupt context. + */ +static void free_buffer(struct buffer_head *bh) +{ + unsigned long flags, was_empty; + + spin_lock_irqsave(&_buffer_lock, flags); + was_empty = (_free_buffers == NULL) ? 1 : 0; + bh->b_reqnext = _free_buffers; + _free_buffers = bh; + spin_unlock_irqrestore(&_buffer_lock, flags); + + /* + * If the buffer list was empty then kcopyd probably went + * to sleep because it ran out of buffer heads, so let's + * wake it up. + */ + if (was_empty) + wake_kcopyd(); +} + +/*----------------------------------------------------------------- + * kcopyd_jobs need to be allocated by the *clients* of kcopyd, + * for this reason we use a mempool to prevent the client from + * ever having to do io (which could cause a + * deadlock). + *---------------------------------------------------------------*/ +#define MIN_JOBS NUM_PAGES + +static kmem_cache_t *_job_cache = NULL; +static mempool_t *_job_pool = NULL; + +/* + * We maintain three lists of jobs: + * + * i) jobs waiting for pages + * ii) jobs that have pages, and are waiting for the io to be issued. + * iii) jobs that have completed. + * + * All three of these are protected by job_lock. + */ + +static spinlock_t _job_lock = SPIN_LOCK_UNLOCKED; + +static LIST_HEAD(_complete_jobs); +static LIST_HEAD(_io_jobs); +static LIST_HEAD(_pages_jobs); + +static int init_jobs(void) +{ + INIT_LIST_HEAD(&_complete_jobs); + INIT_LIST_HEAD(&_io_jobs); + INIT_LIST_HEAD(&_pages_jobs); + + _job_cache = kmem_cache_create("kcopyd-jobs", sizeof(struct kcopyd_job), + __alignof__(struct kcopyd_job), + 0, NULL, NULL); + if (!_job_cache) + return -ENOMEM; + + _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab, + mempool_free_slab, _job_cache); + if (!_job_pool) { + kmem_cache_destroy(_job_cache); + return -ENOMEM; + } + + return 0; +} + +static void exit_jobs(void) +{ + mempool_destroy(_job_pool); + kmem_cache_destroy(_job_cache); +} + +struct kcopyd_job *kcopyd_alloc_job(void) +{ + struct kcopyd_job *job; + + job = mempool_alloc(_job_pool, GFP_KERNEL); + if (!job) + return NULL; + + memset(job, 0, sizeof(*job)); + return job; +} + +void kcopyd_free_job(struct kcopyd_job *job) +{ + mempool_free(job, _job_pool); +} + +/* + * Functions to push and pop a job onto the head of a given job + * list. + */ +static inline struct kcopyd_job *pop(struct list_head *jobs) +{ + struct kcopyd_job *job = NULL; + unsigned long flags; + + spin_lock_irqsave(&_job_lock, flags); + + if (!list_empty(jobs)) { + job = list_entry(jobs->next, struct kcopyd_job, list); + list_del(&job->list); + } + spin_unlock_irqrestore(&_job_lock, flags); + + return job; +} + +static inline void push(struct list_head *jobs, struct kcopyd_job *job) +{ + unsigned long flags; + + spin_lock_irqsave(&_job_lock, flags); + list_add(&job->list, jobs); + spin_unlock_irqrestore(&_job_lock, flags); +} + +/* + * Completion function for one of our buffers. + */ +static void end_bh(struct buffer_head *bh, int uptodate) +{ + struct kcopyd_job *job = bh->b_private; + + mark_buffer_uptodate(bh, uptodate); + unlock_buffer(bh); + + if (!uptodate) + job->err = -EIO; + + /* are we the last ? */ + if (atomic_dec_and_test(&job->nr_incomplete)) { + push(&_complete_jobs, job); + wake_kcopyd(); + } + + free_buffer(bh); +} + +static void dispatch_bh(struct kcopyd_job *job, + struct buffer_head *bh, int block) +{ + int p; + + /* + * Add in the job offset + */ + bh->b_blocknr = (job->disk.sector >> job->block_shift) + block; + + p = block >> job->bpp_shift; + block &= job->bpp_mask; + + bh->b_dev = B_FREE; + bh->b_size = job->block_size; + set_bh_page(bh, job->pages[p], ((block << job->block_shift) + + job->offset) << SECTOR_SHIFT); + bh->b_this_page = bh; + + init_buffer(bh, end_bh, job); + + bh->b_dev = job->disk.dev; + bh->b_state = ((1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req)); + + set_bit(BH_Uptodate, &bh->b_state); + if (job->rw == WRITE) + clear_bit(BH_Dirty, &bh->b_state); + + submit_bh(job->rw, bh); +} + +/* + * These three functions process 1 item from the corresponding + * job list. + * + * They return: + * < 0: error + * 0: success + * > 0: can't process yet. + */ +static int run_complete_job(struct kcopyd_job *job) +{ + job->callback(job); + return 0; +} + +/* + * Request io on as many buffer heads as we can currently get for + * a particular job. + */ +static int run_io_job(struct kcopyd_job *job) +{ + unsigned int block; + struct buffer_head *bh; + + for (block = atomic_read(&job->nr_requested); + block < job->nr_blocks; block++) { + bh = alloc_buffer(); + if (!bh) + break; + + atomic_inc(&job->nr_requested); + dispatch_bh(job, bh, block); + } + + return (block == job->nr_blocks) ? 0 : 1; +} + +static int run_pages_job(struct kcopyd_job *job) +{ + int r; + + job->nr_pages = (job->disk.count + job->offset) / + (PAGE_SIZE / SECTOR_SIZE); + r = kcopyd_get_pages(job->nr_pages, job->pages); + + if (!r) { + /* this job is ready for io */ + push(&_io_jobs, job); + return 0; + } + + if (r == -ENOMEM) + /* can complete now */ + return 1; + + return r; +} + +/* + * Run through a list for as long as possible. Returns the count + * of successful jobs. + */ +static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) +{ + struct kcopyd_job *job; + int r, count = 0; + + while ((job = pop(jobs))) { + + r = fn(job); + + if (r < 0) { + /* error this rogue job */ + job->err = r; + push(&_complete_jobs, job); + break; + } + + if (r > 0) { + /* + * We couldn't service this job ATM, so + * push this job back onto the list. + */ + push(jobs, job); + break; + } + + count++; + } + + return count; +} + +/* + * kcopyd does this every time it's woken up. + */ +static void do_work(void) +{ + int count; + + /* + * We loop round until there is no more work to do. + */ + do { + count = process_jobs(&_complete_jobs, run_complete_job); + count += process_jobs(&_io_jobs, run_io_job); + count += process_jobs(&_pages_jobs, run_pages_job); + + } while (count); + + run_task_queue(&tq_disk); +} + +/*----------------------------------------------------------------- + * The daemon + *---------------------------------------------------------------*/ +static atomic_t _kcopyd_must_die; +static DECLARE_MUTEX(_run_lock); +static DECLARE_WAIT_QUEUE_HEAD(_job_queue); + +static int kcopyd(void *arg) +{ + DECLARE_WAITQUEUE(wq, current); + + daemonize(); + strcpy(current->comm, "kcopyd"); + atomic_set(&_kcopyd_must_die, 0); + + add_wait_queue(&_job_queue, &wq); + + down(&_run_lock); + up(&start_lock); + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + + if (atomic_read(&_kcopyd_must_die)) + break; + + do_work(); + schedule(); + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&_job_queue, &wq); + + up(&_run_lock); + + return 0; +} + +static int start_daemon(void) +{ + static pid_t pid = 0; + + down(&start_lock); + + pid = kernel_thread(kcopyd, NULL, 0); + if (pid <= 0) { + DMERR("Failed to start kcopyd thread"); + return -EAGAIN; + } + + /* + * wait for the daemon to up this mutex. + */ + down(&start_lock); + up(&start_lock); + + return 0; +} + +static int stop_daemon(void) +{ + atomic_set(&_kcopyd_must_die, 1); + wake_kcopyd(); + down(&_run_lock); + up(&_run_lock); + + return 0; +} + +static void wake_kcopyd(void) +{ + wake_up_interruptible(&_job_queue); +} + +static int calc_shift(unsigned int n) +{ + int s; + + for (s = 0; n; s++, n >>= 1) + ; + + return --s; +} + +static void calc_block_sizes(struct kcopyd_job *job) +{ + job->block_size = get_hardsect_size(job->disk.dev); + job->block_shift = calc_shift(job->block_size / SECTOR_SIZE); + job->bpp_shift = PAGE_SHIFT - job->block_shift - SECTOR_SHIFT; + job->bpp_mask = (1 << job->bpp_shift) - 1; + job->nr_blocks = job->disk.count >> job->block_shift; + atomic_set(&job->nr_requested, 0); + atomic_set(&job->nr_incomplete, job->nr_blocks); +} + +int kcopyd_io(struct kcopyd_job *job) +{ + calc_block_sizes(job); + push(job->pages[0] ? &_io_jobs : &_pages_jobs, job); + wake_kcopyd(); + return 0; +} + +/*----------------------------------------------------------------- + * The copier is implemented on top of the simpler async io + * daemon above. + *---------------------------------------------------------------*/ +struct copy_info { + kcopyd_notify_fn notify; + void *notify_context; + + struct kcopyd_region to; +}; + +#define MIN_INFOS 128 +static kmem_cache_t *_copy_cache = NULL; +static mempool_t *_copy_pool = NULL; + +static int init_copier(void) +{ + _copy_cache = kmem_cache_create("kcopyd-info", + sizeof(struct copy_info), + __alignof__(struct copy_info), + 0, NULL, NULL); + if (!_copy_cache) + return -ENOMEM; + + _copy_pool = mempool_create(MIN_INFOS, mempool_alloc_slab, + mempool_free_slab, _copy_cache); + if (!_copy_pool) { + kmem_cache_destroy(_copy_cache); + return -ENOMEM; + } + + return 0; +} + +static void exit_copier(void) +{ + if (_copy_pool) + mempool_destroy(_copy_pool); + + if (_copy_cache) + kmem_cache_destroy(_copy_cache); +} + +static inline struct copy_info *alloc_copy_info(void) +{ + return mempool_alloc(_copy_pool, GFP_KERNEL); +} + +static inline void free_copy_info(struct copy_info *info) +{ + mempool_free(info, _copy_pool); +} + +void copy_complete(struct kcopyd_job *job) +{ + struct copy_info *info = (struct copy_info *) job->context; + + if (info->notify) + info->notify(job->err, info->notify_context); + + free_copy_info(info); + + kcopyd_free_pages(job->nr_pages, job->pages); + + kcopyd_free_job(job); +} + +static void page_write_complete(struct kcopyd_job *job) +{ + struct copy_info *info = (struct copy_info *) job->context; + int i; + + if (info->notify) + info->notify(job->err, info->notify_context); + + free_copy_info(info); + for (i = 0; i < job->nr_pages; i++) + put_page(job->pages[i]); + + kcopyd_free_job(job); +} + +/* + * These callback functions implement the state machine that copies regions. + */ +void copy_write(struct kcopyd_job *job) +{ + struct copy_info *info = (struct copy_info *) job->context; + + if (job->err && info->notify) { + info->notify(job->err, job->context); + kcopyd_free_job(job); + free_copy_info(info); + return; + } + + job->rw = WRITE; + memcpy(&job->disk, &info->to, sizeof(job->disk)); + job->callback = copy_complete; + job->context = info; + + /* + * Queue the write. + */ + kcopyd_io(job); +} + +int kcopyd_write_pages(struct kcopyd_region *to, int nr_pages, + struct page **pages, int offset, kcopyd_notify_fn fn, + void *context) +{ + struct copy_info *info; + struct kcopyd_job *job; + int i; + + /* + * Allocate a new copy_info. + */ + info = alloc_copy_info(); + if (!info) + return -ENOMEM; + + job = kcopyd_alloc_job(); + if (!job) { + free_copy_info(info); + return -ENOMEM; + } + + /* + * set up for the write. + */ + info->notify = fn; + info->notify_context = context; + memcpy(&info->to, to, sizeof(*to)); + + /* Get the pages */ + job->nr_pages = nr_pages; + for (i = 0; i < nr_pages; i++) { + get_page(pages[i]); + job->pages[i] = pages[i]; + } + + job->rw = WRITE; + + memcpy(&job->disk, &info->to, sizeof(job->disk)); + job->offset = offset; + calc_block_sizes(job); + job->callback = page_write_complete; + job->context = info; + + /* + * Trigger job. + */ + kcopyd_io(job); + return 0; +} + +int kcopyd_copy(struct kcopyd_region *from, struct kcopyd_region *to, + kcopyd_notify_fn fn, void *context) +{ + struct copy_info *info; + struct kcopyd_job *job; + + /* + * Allocate a new copy_info. + */ + info = alloc_copy_info(); + if (!info) + return -ENOMEM; + + job = kcopyd_alloc_job(); + if (!job) { + free_copy_info(info); + return -ENOMEM; + } + + /* + * set up for the read. + */ + info->notify = fn; + info->notify_context = context; + memcpy(&info->to, to, sizeof(*to)); + + job->rw = READ; + memcpy(&job->disk, from, sizeof(*from)); + + job->offset = 0; + calc_block_sizes(job); + job->callback = copy_write; + job->context = info; + + /* + * Trigger job. + */ + kcopyd_io(job); + return 0; +} + +/*----------------------------------------------------------------- + * Unit setup + *---------------------------------------------------------------*/ +static struct { + int (*init) (void); + void (*exit) (void); + +} _inits[] = { +#define xx(n) { init_ ## n, exit_ ## n} + xx(pages), + xx(buffers), + xx(jobs), + xx(copier) +#undef xx +}; + +static int _client_count = 0; +static DECLARE_MUTEX(_client_count_sem); + +static int kcopyd_init(void) +{ + const int count = sizeof(_inits) / sizeof(*_inits); + + int r, i; + + for (i = 0; i < count; i++) { + r = _inits[i].init(); + if (r) + goto bad; + } + + start_daemon(); + return 0; + + bad: + while (i--) + _inits[i].exit(); + + return r; +} + +static void kcopyd_exit(void) +{ + int i = sizeof(_inits) / sizeof(*_inits); + + if (stop_daemon()) + DMWARN("Couldn't stop kcopyd."); + + while (i--) + _inits[i].exit(); +} + +void kcopyd_inc_client_count(void) +{ + /* + * What I need here is an atomic_test_and_inc that returns + * the previous value of the atomic... In its absence I lock + * an int with a semaphore. :-( + */ + down(&_client_count_sem); + if (_client_count == 0) + kcopyd_init(); + _client_count++; + + up(&_client_count_sem); +} + +void kcopyd_dec_client_count(void) +{ + down(&_client_count_sem); + if (--_client_count == 0) + kcopyd_exit(); + + up(&_client_count_sem); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/kcopyd.h linux.21rc5-ac2/drivers/md/kcopyd.h --- linux.21rc5/drivers/md/kcopyd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/kcopyd.h 2003-05-29 01:44:07.000000000 +0100 @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2001 Sistina Software + * + * This file is released under the GPL. + */ + +#ifndef DM_KCOPYD_H +#define DM_KCOPYD_H + +/* + * Needed for the definition of offset_t. + */ +#include +#include + +struct kcopyd_region { + kdev_t dev; + offset_t sector; + offset_t count; +}; + +#define MAX_KCOPYD_PAGES 128 + +struct kcopyd_job { + struct list_head list; + + /* + * Error state of the job. + */ + int err; + + /* + * Either READ or WRITE + */ + int rw; + + /* + * The source or destination for the transfer. + */ + struct kcopyd_region disk; + + int nr_pages; + struct page *pages[MAX_KCOPYD_PAGES]; + + /* + * Shifts and masks that will be useful when dispatching + * each buffer_head. + */ + offset_t offset; + offset_t block_size; + offset_t block_shift; + offset_t bpp_shift; /* blocks per page */ + offset_t bpp_mask; + + /* + * nr_blocks is how many buffer heads will have to be + * displatched to service this job, nr_requested is how + * many have been dispatched and nr_complete is how many + * have come back. + */ + unsigned int nr_blocks; + atomic_t nr_requested; + atomic_t nr_incomplete; + + /* + * Set this to ensure you are notified when the job has + * completed. 'context' is for callback to use. + */ + void (*callback)(struct kcopyd_job *job); + void *context; +}; + +/* + * Low level async io routines. + */ +struct kcopyd_job *kcopyd_alloc_job(void); +void kcopyd_free_job(struct kcopyd_job *job); + +int kcopyd_queue_job(struct kcopyd_job *job); + +/* + * Submit a copy job to kcopyd. This is built on top of the + * previous three fns. + */ +typedef void (*kcopyd_notify_fn)(int err, void *context); + +int kcopyd_copy(struct kcopyd_region *from, struct kcopyd_region *to, + kcopyd_notify_fn fn, void *context); + +int kcopyd_write_pages(struct kcopyd_region *to, int nr_pages, + struct page **pages, int offset, kcopyd_notify_fn fn, + void *context); + +/* + * We only want kcopyd to reserve resources if someone is + * actually using it. + */ +void kcopyd_inc_client_count(void); +void kcopyd_dec_client_count(void); + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/Makefile linux.21rc5-ac2/drivers/md/Makefile --- linux.21rc5/drivers/md/Makefile 2003-05-28 15:08:06.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/Makefile 2003-04-22 16:44:37.000000000 +0100 @@ -4,9 +4,11 @@ O_TARGET := mddev.o -export-objs := md.o xor.o +export-objs := md.o xor.o dm-table.o dm-target.o kcopyd.o list-multi := lvm-mod.o lvm-mod-objs := lvm.o lvm-snap.o lvm-fs.o +dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ + dm-ioctl.o dm-snapshot.o dm-exception-store.o kcopyd.o # Note: link order is important. All raid personalities # and xor.o must come before md.o, as they each initialise @@ -20,8 +22,12 @@ obj-$(CONFIG_MD_MULTIPATH) += multipath.o obj-$(CONFIG_BLK_DEV_MD) += md.o obj-$(CONFIG_BLK_DEV_LVM) += lvm-mod.o +obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o include $(TOPDIR)/Rules.make lvm-mod.o: $(lvm-mod-objs) $(LD) -r -o $@ $(lvm-mod-objs) + +dm-mod.o: $(dm-mod-objs) + $(LD) -r -o $@ $(dm-mod-objs) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/md/md.c linux.21rc5-ac2/drivers/md/md.c --- linux.21rc5/drivers/md/md.c 2003-05-28 15:08:54.000000000 +0100 +++ linux.21rc5-ac2/drivers/md/md.c 2003-04-22 16:44:37.000000000 +0100 @@ -77,7 +77,7 @@ */ static int sysctl_speed_limit_min = 100; -static int sysctl_speed_limit_max = 100000; +static int sysctl_speed_limit_max = 10000; static struct ctl_table_header *raid_table_header; @@ -2931,8 +2931,6 @@ * bdflush, otherwise bdflush will deadlock if there are too * many dirty RAID5 blocks. */ - current->policy = SCHED_OTHER; - current->nice = -20; md_unlock_kernel(); complete(thread->event); @@ -3454,11 +3452,6 @@ "(but not more than %d KB/sec) for reconstruction.\n", sysctl_speed_limit_max); - /* - * Resync has low priority. - */ - current->nice = 19; - is_mddev_idle(mddev); /* this also initializes IO event counters */ for (m = 0; m < SYNC_MARKS; m++) { mark[m] = jiffies; @@ -3536,16 +3529,13 @@ currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1; if (currspeed > sysctl_speed_limit_min) { - current->nice = 19; - if ((currspeed > sysctl_speed_limit_max) || !is_mddev_idle(mddev)) { current->state = TASK_INTERRUPTIBLE; md_schedule_timeout(HZ/4); goto repeat; } - } else - current->nice = -20; + } } printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev)); err = 0; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/media/video/bttv-driver.c linux.21rc5-ac2/drivers/media/video/bttv-driver.c --- linux.21rc5/drivers/media/video/bttv-driver.c 2003-05-28 15:08:54.000000000 +0100 +++ linux.21rc5-ac2/drivers/media/video/bttv-driver.c 2003-04-22 16:44:37.000000000 +0100 @@ -1649,28 +1649,23 @@ case VIDIOCGCHAN: { struct video_channel v; - unsigned int channel; - if(copy_from_user(&v, arg,sizeof(v))) return -EFAULT; - channel = v.channel; - if (channel>=bttv_tvcards[btv->type].video_inputs) - return -EINVAL; v.flags=VIDEO_VC_AUDIO; v.tuners=0; v.type=VIDEO_TYPE_CAMERA; v.norm = btv->win.norm; - if(channel==bttv_tvcards[btv->type].tuner) + if (v.channel < 0 || v.channel >= bttv_tvcards[btv->type].video_inputs) + return -EINVAL; + if(v.channel==bttv_tvcards[btv->type].tuner) { strcpy(v.name,"Television"); v.flags|=VIDEO_VC_TUNER; v.type=VIDEO_TYPE_TV; v.tuners=1; } - else if (channel==bttv_tvcards[btv->type].svhs) + else if(v.channel==bttv_tvcards[btv->type].svhs) strcpy(v.name,"S-Video"); - else if (bttv_tvcards[btv->type].muxsel[v.channel] < 0) - strcpy(v.name,"Digital Video"); else sprintf(v.name,"Composite%d",v.channel); @@ -1684,20 +1679,17 @@ case VIDIOCSCHAN: { struct video_channel v; - unsigned int channel; - if(copy_from_user(&v, arg,sizeof(v))) return -EFAULT; - channel = v.channel; - if (channel>bttv_tvcards[btv->type].video_inputs) + if (v.channel < 0 || v.channel >= bttv_tvcards[btv->type].video_inputs) return -EINVAL; if (v.norm > TVNORMS) return -EOPNOTSUPP; bttv_call_i2c_clients(btv,cmd,&v); down(&btv->lock); - bt848_muxsel(btv, channel); + bt848_muxsel(btv, v.channel); bttv_set_norm(btv, v.norm); up(&btv->lock); return 0; @@ -1726,13 +1718,10 @@ case VIDIOCSTUNER: { struct video_tuner v; - unsigned int tuner; - if(copy_from_user(&v, arg, sizeof(v))) return -EFAULT; - tuner = v.tuner; /* Only one channel has a tuner */ - if(tuner!=bttv_tvcards[btv->type].tuner) + if(v.tuner!=bttv_tvcards[btv->type].tuner) return -EINVAL; if(v.mode!=VIDEO_MODE_PAL&&v.mode!=VIDEO_MODE_NTSC @@ -1990,13 +1979,9 @@ case VIDIOCSAUDIO: { struct video_audio v; - unsigned int n; if(copy_from_user(&v,arg, sizeof(v))) return -EFAULT; - n = v.audio; - if(n >= bttv_tvcards[btv->type].audio_inputs) - return -EINVAL; down(&btv->lock); if(v.flags&VIDEO_AUDIO_MUTE) audio(btv, AUDIO_MUTE); @@ -2017,12 +2002,12 @@ case VIDIOCSYNC: { + int i; DECLARE_WAITQUEUE(wait, current); - unsigned int i; if(copy_from_user((void *)&i,arg,sizeof(int))) return -EFAULT; - if (i >= gbuffers) + if (i < 0 || i >= gbuffers) return -EINVAL; switch (btv->gbuf[i].stat) { case GBUFFER_UNUSED: @@ -2094,8 +2079,7 @@ case VIDIOCGMBUF: { struct video_mbuf vm; - unsigned int i; - + int i; memset(&vm, 0 , sizeof(vm)); vm.size=gbufsize*gbuffers; vm.frames=gbuffers; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/message/edp2/edp2_disk.c linux.21rc5-ac2/drivers/message/edp2/edp2_disk.c --- linux.21rc5/drivers/message/edp2/edp2_disk.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/message/edp2/edp2_disk.c 2003-05-02 17:58:24.000000000 +0100 @@ -0,0 +1,1262 @@ +/* + * EDP2 Block Storage + * This is a work in progress and not yet functional. + * + * Copyright 2003 Red Hat Inc. All Rights Reserved. + * Based on drivers/message/i2o/i2o_block.c + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define MAJOR_NR 250 + +#include + +#define MAX_EDP2_DISK 16 + +#define MAX_EDP2_DEPTH 8 +#define MAX_EDP2_RETRIES 8 + +#define DRIVERDEBUG +#ifdef DRIVERDEBUG +#define DEBUG( s ) printk( s ) +#else +#define DEBUG( s ) +#endif + +/* + * Some of these can be made smaller later + */ + +static int edp2_blksizes[MAX_EDP2_DISK<<4]; +static int edp2_hardsizes[MAX_EDP2_DISK<<4]; +static int edp2_sizes[MAX_EDP2_DISK<<4]; + +static int edp2_context; + +/* + * I2O Block device descriptor + */ +struct edp2_device +{ + struct net_device *dev; + u8 mac[6]; /* Drive MAC address */ + int flags; + int refcnt; + struct request *head, *tail; + request_queue_t *req_queue; + int done_flag; + int depth; +}; + +/* + * FIXME: + * We should cache align these to avoid ping-ponging lines on SMP + * boxes under heavy I/O load... + */ + +struct edp2_request +{ + struct edp2_request *next; + struct request *req; + u16 tag; /* EDP2 tag */ + u16 type; /* Type of request, 0 for normal I/O */ + unsigned long sent; /* For retransmissions */ + +}; + +/* + * Per EDP request queue information + * + * We have a separate requeust_queue_t per EDP + */ + +struct edp2_iop_queue +{ + atomic_t queue_depth; + struct edp2_request request_queue[MAX_EDP2_DEPTH]; + struct edp2_request *edp2_qhead; + request_queue_t req_queue; +}; +static struct edp2_iop_queue *edp2_queues[MAX_EDP2_DISK]; + +/* + * Each EDP2 disk is one of these. + */ + +static struct edp2_device edp2_dev[MAX_EDP2_DISK<<4]; +static int edp2_dev_count = 0; +static struct hd_struct edp2_b[MAX_EDP2_DISK<<4]; +static struct gendisk edp2_gendisk; /* Declared later */ + +static void edp2_block_reply(struct i2o_handler *, struct i2o_controller *, + struct i2o_message *); +static void edp2_new_device(struct edp2_device *); +static void edp2_del_device(struct edp2_device *); +static int edp2_install_device(struct edp2_device *, int); +static void edp2_end_request(struct request *); +static void edp2_request(request_queue_t *); +static int edp2_init_unit(unsigned int); +static request_queue_t* edp2_get_queue(kdev_t); +static int edp2_query_device(struct edp2_device *, int, int, void*, int); +static int do_edp2_revalidate(kdev_t, int); +static int edp2_evt(void *); + +/** + * edp2_get - get a message + * @dev: device to get message for + * + * Obtain and fill in the basics of an EDPv2 message + * buffer. Right now we always grab 1514 bytes, but that + * should be fixed because we can do grab a small buffer + * on read I/O. The pre-computed cached EDP header is + * added and the tag incremented. The caller needs to + * overwrite the function number if its not an I/O + */ + +static struct sk_buff *edp2_get(struct edp2_device *dev) +{ + struct sk_buff * skb = alloc_skb(1514, GF_ATOMIC); + memcpy(skb_put(skb, 20), dev->header.bits, 20); + dev->header.struct.tag++; + return skb; +} + +/** + * edp2_send_write - Outgoing I/O path + * @skb: buffer from edp2_get + * @dev: EDP2 device + * @ireq: Request structure + * @base: Partition offset + * @unit: Device identity + * + * We build a descriptor for the I/O request and fire at at the network + * interface. Retramsission and timeouts are handled by the layer + * above. + */ + +static int edp2_send(struct sk_buff *skb, struct edp2_device *dev, struct edp2_request *ireq, u32 base, int unit) +{ + u64 offset; + struct request *req = ireq->req; + struct buffer_head *bh = req->bh; + struct edp_ata_fid0 *io; + u8 *data; + + /* We don't support GigE big frames for EDP */ + if(count > 1024) + BUG(); + + io = skb_put(skb, sizeof(edp_ata_fid0)); + + /* + * Mask out partitions from now on + */ + unit &= 0xF0; + + /* This can be optimised later - just want to be sure its right for + starters */ + offset = ((u64)(req->sector+base)) << 9; + + /* Pack the I/O request for LBA28 format */ + /* FIXME: use LBA48 when needed */ + io->lba0 = offset & 0xFF: + io->lba1 = (offset >> 8) & 0xFF: + io->lba2 = (offset >> 16) & 0xFF: + io->lba3 = (offset >> 24) & 0x0F + ??; + io->cmd_status = req->nr_sectors; + io->err_feature = 0; + + data = skb_put(skb, rq->nr_sectors << 9); + + /* Write I/O requires we attach the data too */ + if(req->cmd == WRITE) + { + io->flag_ver = EDP_ATA_WRITE; + while(bh!=NULL) + { + memcpy(data, bh->b_data, bh->b_size); + data += bh->b_size; + bh = bh->b_reqnext; + } + } + + epd2_post_message(dev, skb); + atomic_inc(&edp2_queues[dev->unit]->queue_depth); + return 0; +} + +/** + * edp2_unhook_request - dequeue request from locked list + * @ireq: Request to dequeue + * @disk: Disk to dequeue from + * + * Remove a request from the _locked_ request list. We update both the + * list chain and if this is the last item the tail pointer. Caller + * must hold the lock. + */ + +static inline void edp2_unhook_request(struct edp2_request *ireq, unsigned int disk) +{ + ireq->next = edp2_queues[disk]->edp2_qhead; + edp2_queues[disk]->edp2_qhead = ireq; +} + +/** + * edi2_end_request - request completion handler + * @req: block request to complete + * + * Called when the EDP2 layer completes an I/O request and + * wants to complete it with the kernel block layer + */ + +static inline void edp2_end_request(struct request *req) +{ + /* + * Loop until all of the buffers that are linked + * to this request have been marked updated and + * unlocked. + */ + + while (end_that_request_first( req, !req->errors, "edp2" )); + + /* + * It is now ok to complete the request. + */ + end_that_request_last( req ); +} + +/** + * edp2_report_data - decode edp2 data + * @data: error bits to decode + * + * Called to decode EDP level error events from ATA FID0 frames + */ + +static void edp2_report_error(struct edp2_device *dev, u8 data) +{ + switch(data) + { + case EDPT2_ATA_BADPARAM: + printk(KERN_ERR "%s: Parameter error reported by controller.\n", dev->name); + break; + case EDPT2_ATA_DISKFAIL: + printk(KERN_ERR "%s: Disk failure reported by controller.\n", dev->name); + break; + default: + printk(KERN_ERR "%s: Error %d reported by controller.\n", dev->name, data); + break; + } +} + +/** + * edp2_report_ata_error - report ATA error + * @dev: device we are talking with + * @io: io reply + * + * Called when an ATA error has been reported by the remote target. + * We decode the unpacked return registers and report the error + * to the user in the style of a local ATA device + */ + +static void edp2_report_ata_error(struct edp2_device *dev, struct edp2_ata_fid0 *io) +{ + /* + * Dump EDP2 ATA errors in the same style as the IDE + * driver. No point making people learn to interpret + * twice as much crap + */ + printk(KERN_WARNING "%s: logged error: status=0x%02x { ", dev->name, stat); + /* We should never see "busy" */ + if(stat&BUSY_STAT) + printk("Busy Â); + else + { + if (stat & READY_STAT) printk("DriveReady "); + if (stat & WRERR_STAT) printk("DeviceFault "); + if (stat & SEEK_STAT) printk("SeekComplete "); + if (stat & DRQ_STAT) printk("DataRequest "); + if (stat & ECC_STAT) printk("CorrectedError "); + if (stat & INDEX_STAT) printk("Index "); + if (stat & ERR_STAT) printk("Error "); + } + printk("}\n"); + if((status & (BUSY_STAT|ERR_STAT)) == ERR_STAT) + { + u8 err = io->err_feature; + printk(" { "); + if (err & ABRT_ERR) printk("DriveStatusError "); + if (err & ICRC_ERR) printk("Bad%s ", (err & ABRT_ERR) ? "CRC" : "Sector"); + if (err & ECC_ERR) printk("UncorrectableError "); + if (err & ID_ERR) printk("SectorIdNotFound "); + if (err & TRK0_ERR) printk("TrackZeroNotFound "); + if (err & MARK_ERR) printk("AddrMarkNotFound "); + printk("}"); + } + /* We know the block ID but don't dump it yet */ +} + +/** + * edp2_io_reply - I/O reply handler + * @edp: EDP header of reply + * @skb: skbuff holding remainder of data + * + * Called when the EDP protocol layer receives a FID 0 frame + * for this host. + */ + +static int edp2_block_reply(struct edp *edp, struct sk_buff *skb) +{ + unsigned long flags; + struct edp2_request *ireq = NULL; + struct edp2_device *dev = edp2_find(skb->mac.raw+6); + struct edp2_ata_fid0 *io; + u8 status; + + /* Refuse short frames */ + if(skb->len < sizeof(struct edp2_ata_fid0)) + return 0; + + io = skb_pull(skb, sizeof(struct edp2_ata_fid0)); + + /* Find the request */ + ireq = edp2_find_request(edp->tag); + if(ireq == NULL) + return 0; + + /* We don't yet know if this is a "real" block request or + a handler completion. ireq->type tells us this */ + + spin_lock_prefetch(&io_request_lock); + + /* Error from the controller layer */ + if(edp->flag_err & EDP_F_ERROR) + { + edp_report_error(edp->flag_err & EDP_F_ERRMASK); + spin_lock_irqsave(&io_request_lock, flags); + edp2_unhook_request(ireq, c->unit); + if(ireq->type == 0) + ireq->req->errors++; + edp2_end_request(ireq->req); + spin_unlock_irqrestore(&io_request_lock, flags); + return; + } + + /* Now check for ATA error */ + status = io->cmd_status; + + if((status&(READY_STAT|BAD_STAT))==BAD_STAT) + { + edp2_report_ata_error(dev, io); + ireq->req->errors++; + } + else + ireq->req->errors = 0; + + /* + * Dequeue the request. We use irqsave locks as one day we + * may be running polled controllers from a BH... + */ + + spin_lock_irqsave(&io_request_lock, flags); + edp2_unhook_request(ireq, dev->unit); + edp2_end_request(ireq->req); + atomic_dec(&edp2_queues[dev->unit]->queue_depth); + + /* + * We may be able to do more I/O + */ + + edp2_request(dev->req_queue); + spin_unlock_irqrestore(&io_request_lock, flags); +} + +/* + * The EDP2 block driver is listed as one of those that pulls the + * front entry off the queue before processing it. This is important + * to remember here. If we drop the io lock then CURRENT will change + * on us. We must unlink CURRENT in this routine before we return, if + * we use it. + */ + +static void edp2_request(request_queue_t *q) +{ + struct request *req; + struct edp2_request *ireq; + int unit; + struct edp2_device *dev; + struct sk_buff *m; + + while (!list_empty(&q->queue_head)) { + /* + * On an IRQ completion if there is an inactive + * request on the queue head it means it isnt yet + * ready to dispatch. + */ + req = blkdev_entry_next_request(&q->queue_head); + + if(req->rq_status == RQ_INACTIVE) + return; + + unit = MINOR(req->rq_dev); + dev = &edp2_dev[(unit&0xF0)]; + + if(atomic_read(&edp2_queues[dev->unit]->queue_depth) >= dev->depth) + break; + + /* Get a message */ + m = edp2_get(dev, 1514); + + if(m==NULL) + { + /* We need a deadlock breaker timeout here */ + if(atomic_read(&edp2_queues[dev->unit]->queue_depth) == 0) + printk(KERN_ERR "edp2: message queue and request queue empty!!\n"); + break; + } + /* + * Everything ok, so pull from kernel queue onto our queue + */ + req->errors = 0; + blkdev_dequeue_request(req); + req->waiting = NULL; + + ireq = edp2_queues[dev->unit]->edp2_qhead; + edp2_queues[dev->unit]->edp2_qhead = ireq->next; + ireq->req = req; + + edp2_send(m, dev, ireq, edp2_b[unit].start_sect, (unit&0xF0)); + } +} + + +/* + * SCSI-CAM for ioctl geometry mapping + * Duplicated with SCSI - this should be moved into somewhere common + * perhaps genhd ? + * + * LBA -> CHS mapping table taken from: + * + * "Incorporating the I2O Architecture into BIOS for Intel Architecture + * Platforms" + * + * This is an I2O document that is only available to I2O members, + * not developers. + * + * From my understanding, this is how all the I2O cards do this + * + * Disk Size | Sectors | Heads | Cylinders + * ---------------+---------+-------+------------------- + * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) + * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) + * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) + * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) + * + */ +#define BLOCK_SIZE_528M 1081344 +#define BLOCK_SIZE_1G 2097152 +#define BLOCK_SIZE_21G 4403200 +#define BLOCK_SIZE_42G 8806400 +#define BLOCK_SIZE_84G 17612800 + +static void edp2_block_biosparam( + unsigned long capacity, + unsigned short *cyls, + unsigned char *hds, + unsigned char *secs) +{ + unsigned long heads, sectors, cylinders; + + sectors = 63L; /* Maximize sectors per track */ + if(capacity <= BLOCK_SIZE_528M) + heads = 16; + else if(capacity <= BLOCK_SIZE_1G) + heads = 32; + else if(capacity <= BLOCK_SIZE_21G) + heads = 64; + else if(capacity <= BLOCK_SIZE_42G) + heads = 128; + else + heads = 255; + + cylinders = capacity / (heads * sectors); + + *cyls = (unsigned short) cylinders; /* Stuff return values */ + *secs = (unsigned char) sectors; + *hds = (unsigned char) heads; +} + + +/* + * Rescan the partition tables + */ + +static int do_edp2_revalidate(kdev_t dev, int maxu) +{ + int minor=MINOR(dev); + int i; + + minor&=0xF0; + + edp2_dev[minor].refcnt++; + if(edp2_dev[minor].refcnt>maxu+1) + { + edp2_dev[minor].refcnt--; + return -EBUSY; + } + + for( i = 15; i>=0 ; i--) + { + int m = minor+i; + invalidate_device(MKDEV(MAJOR_NR, m), 1); + edp2_gendisk.part[m].start_sect = 0; + edp2_gendisk.part[m].nr_sects = 0; + } + + /* + * Do a physical check and then reconfigure + */ + + edp2_install_device(edp2_dev[minor].controller, edp2_dev[minor].i2odev, + minor); + edp2_dev[minor].refcnt--; + return 0; +} + +/* + * Issue device specific ioctl calls. + */ + +static int edp2_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct edp2_device *dev; + int minor; + + /* Anyone capable of this syscall can do *real bad* things */ + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (!inode) + return -EINVAL; + minor = MINOR(inode->i_rdev); + if (minor >= (MAX_EDP2_DISK<<4)) + return -ENODEV; + + dev = &edp2_dev[minor]; + switch (cmd) { + case HDIO_GETGEO: + { + struct hd_geometry g; + int u=minor&0xF0; + i2o_block_biosparam(edp2_sizes[u]<<1, + &g.cylinders, &g.heads, &g.sectors); + g.start = edp2_b[minor].start_sect; + return copy_to_user((void *)arg,&g, sizeof(g))?-EFAULT:0; + } + + case BLKRRPART: + if(!capable(CAP_SYS_ADMIN)) + return -EACCES; + return do_edp2_revalidate(inode->i_rdev,1); + + default: + return blk_ioctl(inode->i_rdev, cmd, arg); + } + return 0; +} + +/* + * Close the block device down + */ + +static int edp2_release(struct inode *inode, struct file *file) +{ + struct edp2_device *dev; + int minor; + + minor = MINOR(inode->i_rdev); + if (minor >= (MAX_EDP2_DISK<<4)) + return -ENODEV; + dev = &edp2_dev[(minor&0xF0)]; + + /* + * This is to deail with the case of an application + * opening a device and then the device dissapears while + * it's in use, and then the application tries to release + * it. ex: Unmounting a deleted RAID volume at reboot. + * If we send messages, it will just cause FAILs since + * the TID no longer exists. + */ + if(!dev->i2odev) + return 0; + + if (dev->refcnt <= 0) + printk(KERN_ALERT "edp2_release: refcount(%d) <= 0\n", dev->refcnt); + dev->refcnt--; + if(dev->refcnt==0) + { + epd2_flush_cache_wait(dev); + if(edp2_unclaim(dev)) + printk(KERN_ERR "edp2_release: controller rejected unclaim.\n"); + } + return 0; +} + +/* + * Open the block device. + */ + +static int edp2_open(struct inode *inode, struct file *file) +{ + int minor; + struct edp2_device *dev; + + if (!inode) + return -EINVAL; + minor = MINOR(inode->i_rdev); + if (minor >= MAX_EDP2_DISK<<4) + return -ENODEV; + dev=&edp2_dev[(minor&0xF0)]; + + if(!dev->dev) + return -ENODEV; + + if(dev->refcnt++==0) + { + if(edp2_claim_wait(dev)<0) + { + dev->refcnt--; + printk(KERN_INFO "EDP2: Could not claim device.\n"); + return -EBUSY; + } + DEBUG("Claimed "); + } + return 0; +} + +/* + * Install the EDP2 block device we found. + */ + +static int edp2_install_device(struct edp2_device *dev) +{ + u64 size; + u32 blocksize; + u8 type; + u16 power; + u32 flags, status; + int i; + + /* + * For logging purposes... + */ + printk(KERN_INFO "edp2_b: Installing tid %d device at unit %d\n", + d->lct_data.tid, unit); + + /* + * Ask for the current media data. If that isn't supported + * then we ask for the device capacity data + */ + if(edp2_query_device(dev, 0x0004, 1, &blocksize, 4) != 0 + || edp2_query_device(dev, 0x0004, 0, &size, 8) !=0 ) + { + edp2_query_device(dev, 0x0000, 3, &blocksize, 4); + edp2_query_device(dev, 0x0000, 4, &size, 8); + } + + if(edp2_query_device(dev, 0x0000, 2, &power, 2)!=0) + power = 0; + edp2_query_device(dev, 0x0000, 5, &flags, 4); + edp2_query_device(dev, 0x0000, 6, &status, 4); + edp2_sizes[unit] = (int)(size>>10); + for(i=unit; i <= unit+15 ; i++) + edp2_hardsizes[i] = blocksize; + edp2_gendisk.part[unit].nr_sects = size>>9; + edp2_b[unit].nr_sects = (int)(size>>9); + + /* + * Max number of Scatter-Gather Elements + */ + + edp2_dev[unit].power = power; /* Save power state in device proper */ + edp2_dev[unit].flags = flags; + + for(i=unit;i<=unit+15;i++) + { + edp2_dev[i].power = power; /* Save power state */ + edp2_dev[unit].flags = flags; /* Keep the type info */ + edp2_max_sectors[i] = 96; /* 256 might be nicer but many controllers + explode on 65536 or higher */ + edp2_dev[i].max_segments = (d->controller->status_block->inbound_frame_size - 7) / 2; + + edp2_dev[i].rcache = CACHE_SMARTFETCH; + edp2_dev[i].wcache = CACHE_WRITETHROUGH; + + if(d->controller->battery == 0) + edp2_dev[i].wcache = CACHE_WRITETHROUGH; + + if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.promise) + edp2_dev[i].wcache = CACHE_WRITETHROUGH; + + if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.short_req) + { + edp2_max_sectors[i] = 8; + edp2_dev[i].max_segments = 8; + } + } + + sprintf(d->dev_name, "%s%c", edp2_gendisk.major_name, 'a' + (unit>>4)); + + printk(KERN_INFO "%s: Max segments %d, queue depth %d, byte limit %d.\n", + d->dev_name, edp2_dev[unit].max_segments, edp2_dev[unit].depth, edp2_max_sectors[unit]<<9); + + edp2_query_device(dev, 0x0000, 0, &type, 1); + + printk(KERN_INFO "%s: ", d->dev_name); + switch(type) + { + case 0: printk("Disk Storage");break; + case 4: printk("WORM");break; + case 5: printk("CD-ROM");break; + case 7: printk("Optical device");break; + default: + printk("Type %d", type); + } + if(status&(1<<10)) + printk("(RAID)"); + + if((flags^status)&(1<<4|1<<3)) /* Missing media or device */ + { + printk(KERN_INFO " Not loaded.\n"); + /* Device missing ? */ + if((flags^status)&(1<<4)) + return 1; + } + else + { + printk(": %dMB, %d byte sectors", + (int)(size>>20), blocksize); + } + if(status&(1<<0)) + { + u32 cachesize; + edp2_query_device(dev, 0x0003, 0, &cachesize, 4); + cachesize>>=10; + if(cachesize>4095) + printk(", %dMb cache", cachesize>>10); + else + printk(", %dKb cache", cachesize); + } + printk(".\n"); + printk(KERN_INFO "%s: Maximum sectors/read set to %d.\n", + d->dev_name, edp2_max_sectors[unit]); + + /* + * If this is the first I2O block device found on this IOP, + * we need to initialize all the queue data structures + * before any I/O can be performed. If it fails, this + * device is useless. + */ + if(!edp2_queues[c->unit]) { + if(edp2_init_iop(c->unit)) + return 1; + } + + /* + * This will save one level of lookup/indirection in critical + * code so that we can directly get the queue ptr from the + * device instead of having to go the IOP data structure. + */ + dev->req_queue = &edp2_queues[c->unit]->req_queue; + + grok_partitions(&edp2_gendisk, unit>>4, 1<<4, (long)(size>>9)); + + return 0; +} + +/* + * Initialize IOP specific queue structures. This is called + * once for each IOP that has a block device sitting behind it. + */ +static int edp2_init_iop(unsigned int unit) +{ + int i; + + edp2_queues[unit] = (struct edp2_iop_queue *) kmalloc(sizeof(struct edp2_iop_queue), GFP_ATOMIC); + if(!edp2_queues[unit]) + { + printk(KERN_WARNING "Could not allocate request queue for I2O block device!\n"); + return -1; + } + + for(i = 0; i< MAX_EDP2_DEPTH; i++) + { + edp2_queues[unit]->request_queue[i].next = &edp2_queues[unit]->request_queue[i+1]; + edp2_queues[unit]->request_queue[i].num = i; + } + + /* Queue is MAX_EDP2_DISK + 1... */ + edp2_queues[unit]->request_queue[i].next = NULL; + edp2_queues[unit]->edp2_qhead = &edp2_queues[unit]->request_queue[0]; + atomic_set(&edp2_queues[unit]->queue_depth, 0); + + blk_init_queue(&edp2_queues[unit]->req_queue, edp2_request); + blk_queue_headactive(&edp2_queues[unit]->req_queue, 0); + edp2_queues[unit]->req_queue.back_merge_fn = edp2_back_merge; + edp2_queues[unit]->req_queue.front_merge_fn = edp2_front_merge; + edp2_queues[unit]->req_queue.merge_requests_fn = edp2_merge_requests; + edp2_queues[unit]->req_queue.queuedata = &edp2_queues[unit]; + + return 0; +} + +/* + * Get the request queue for the given device. + */ +static request_queue_t* edp2_get_queue(kdev_t dev) +{ + int unit = MINOR(dev)&0xF0; + return edp2_dev[unit].req_queue; +} + +/* + * Probe the I2O subsytem for block class devices + */ +static void edp2_scan(int bios) +{ + int i; + int warned = 0; + + struct i2o_device *d, *b=NULL; + struct i2o_controller *c; + struct edp2_device *dev; + + for(i=0; i< MAX_I2O_CONTROLLERS; i++) + { + c=i2o_find_controller(i); + + if(c==NULL) + continue; + + /* + * The device list connected to the I2O Controller is doubly linked + * Here we traverse the end of the list , and start claiming devices + * from that end. This assures that within an I2O controller atleast + * the newly created volumes get claimed after the older ones, thus + * mapping to same major/minor (and hence device file name) after + * every reboot. + * The exception being: + * 1. If there was a TID reuse. + * 2. There was more than one I2O controller. + */ + + if(!bios) + { + for (d=c->devices;d!=NULL;d=d->next) + if(d->next == NULL) + b = d; + } + else + b = c->devices; + + while(b != NULL) + { + d=b; + if(bios) + b = b->next; + else + b = b->prev; + + if(d->lct_data.class_id!=I2O_CLASS_RANDOM_BLOCK_STORAGE) + continue; + + if(d->lct_data.user_tid != 0xFFF) + continue; + + if(bios) + { + if(d->lct_data.bios_info != 0x80) + continue; + printk(KERN_INFO "Claiming as Boot device: Controller %d, TID %d\n", c->unit, d->lct_data.tid); + } + else + { + if(d->lct_data.bios_info == 0x80) + continue; /*Already claimed on pass 1 */ + } + + if(i2o_claim_device(d, &i2o_block_handler)) + { + printk(KERN_WARNING "i2o_block: Controller %d, TID %d\n", c->unit, + d->lct_data.tid); + printk(KERN_WARNING "\t%sevice refused claim! Skipping installation\n", bios?"Boot d":"D"); + continue; + } + + if(scan_uniti2odev = d; + dev->controller = c; + dev->unit = c->unit; + dev->tid = d->lct_data.tid; + + if(edp2_install_device(c,d,scan_unit)) + printk(KERN_WARNING "Could not install I2O block device\n"); + else + { + scan_unit+=16; + edp2_dev_count++; + + /* We want to know when device goes away */ + i2o_device_notify_on(d, &i2o_block_handler); + } + } + else + { + if(!warned++) + printk(KERN_WARNING "i2o_block: too many device, registering only %d.\n", scan_unit>>4); + } + i2o_release_device(d, &i2o_block_handler); + } + i2o_unlock_controller(c); + } +} + +static void edp2_probe(void) +{ + /* + * Some overhead/redundancy involved here, while trying to + * claim the first boot volume encountered as /dev/i2o/hda + * everytime. All the i2o_controllers are searched and the + * first i2o block device marked as bootable is claimed + * If an I2O block device was booted off , the bios sets + * its bios_info field to 0x80, this what we search for. + * Assuming that the bootable volume is /dev/i2o/hda + * everytime will prevent any kernel panic while mounting + * root partition + */ + + printk(KERN_INFO "i2o_block: Checking for Boot device...\n"); + edp2_scan(1); + + /* + * Now the remainder. + */ + printk(KERN_INFO "i2o_block: Checking for I2O Block devices...\n"); + edp2_scan(0); +} + + +/* + * New device notification handler. Called whenever a new + * EDP2 block storage device is added to the system. + * + * Should we spin lock around this to keep multiple devs from + * getting updated at the same time? + * + */ +void edp2_new_device(struct i2o_controller *c, struct i2o_device *d) +{ + struct edp2_device *dev; + int unit = 0; + + printk(KERN_INFO "i2o_block: New device detected\n"); + printk(KERN_INFO " Controller %d Tid %d\n",c->unit, d->lct_data.tid); + + /* Check for available space */ + if(edp2_dev_count>=MAX_EDP2_DISK<<4) + { + printk(KERN_ERR "i2o_block: No more devices allowed!\n"); + return; + } + for(unit = 0; unit < (MAX_EDP2_DISK<<4); unit += 16) + { + if(!edp2_dev[unit].i2odev) + break; + } + + if(i2o_claim_device(d, &i2o_block_handler)) + { + printk(KERN_INFO "i2o_block: Unable to claim device. Installation aborted\n"); + return; + } + + dev = &edp2_dev[unit]; + dev->i2odev = d; + dev->controller = c; + dev->tid = d->lct_data.tid; + + if(edp2_install_device(c,d,unit)) + printk(KERN_ERR "i2o_block: Could not install new device\n"); + else + { + edp2_dev_count++; + i2o_device_notify_on(d, &i2o_block_handler); + } + + i2o_release_device(d, &i2o_block_handler); + + return; +} + +/* + * Deleted device notification handler. Called when a device we + * are talking to has been deleted by the user or some other + * mysterious fource outside the kernel. + */ + +void edp2_del_device(struct i2o_controller *c, struct i2o_device *d) +{ + int unit = 0; + int i = 0; + unsigned long flags; + + spin_lock_irqsave(&io_request_lock, flags); + + printk(KERN_INFO "EDP2 Block Device Deleted\n"); + + for(unit = 0; unit < MAX_EDP2_DISK<<4; unit += 16) + { + if(edp2_dev[unit].i2odev == d) + { + printk(KERN_INFO " /dev/%s: Controller %d Tid %d\n", + d->dev_name, c->unit, d->lct_data.tid); + break; + } + } + if(unit >= MAX_EDP2_DISK<<4) + { + printk(KERN_ERR "edp2_del_device called, but not in dev table!\n"); + spin_unlock_irqrestore(&io_request_lock, flags); + return; + } + + /* + * This will force errors when edp2_get_queue() is called + * by the kenrel. + */ + edp2_dev[unit].req_queue = NULL; + for(i = unit; i <= unit+15; i++) + { + edp2_dev[i].i2odev = NULL; + edp2_sizes[i] = 0; + edp2_hardsizes[i] = 0; + edp2_max_sectors[i] = 0; + edp2_b[i].nr_sects = 0; + edp2_gendisk.part[i].nr_sects = 0; + } + spin_unlock_irqrestore(&io_request_lock, flags); + + /* + * Decrease usage count for module + */ + + while(edp2_dev[unit].refcnt--) + MOD_DEC_USE_COUNT; + + edp2_dev[unit].refcnt = 0; + + edp2_dev_count--; +} + +/* + * Have we seen a media change ? + */ + +static int edp2_media_change(kdev_t dev) +{ + return 0; +} + +static int edp2_revalidate(kdev_t dev) +{ + return do_edp2_revalidate(dev, 0); +} + +/* + * Reboot notifier. This is called by kernel core when the system + * shuts down. + */ +static void edp2_reboot_event(void) +{ + int i; + + for(i=0;irefcnt!=0) + edp2_flush_cache_wait(dev); + } +} + +static struct block_device_operations edp2_fops = +{ + owner: THIS_MODULE, + open: edp2_open, + release: edp2_release, + ioctl: edp2_ioctl, + check_media_change: edp2_media_change, + revalidate: edp2_revalidate, +}; + +static struct gendisk edp2_gendisk = +{ + major: MAJOR_NR, + major_name: "edp2/hd", + minor_shift: 4, + max_p: 1<<4, + part: edp2_b, + sizes: edp2_sizes, + nr_real: MAX_EDP2_DISK, + fops: &edp2_fops, +}; + + +/* + * And here should be modules and kernel interface + * (Just smiley confuses emacs :-) + */ + +static int edp2_block_init(void) +{ + int i; + + printk(KERN_INFO "EDP2 Coraid Storage v0.01\n"); + printk(KERN_INFO " (c) Copyright 2003 Red Hat Software.\n"); + + /* + * Register the block device interfaces + */ + + if (register_blkdev(MAJOR_NR, "edp2", &edp2_fops)) { + printk(KERN_ERR "Unable to get major number %d for i2o_block\n", + MAJOR_NR); + return -EIO; + } + + /* + * Now fill in the boiler plate + */ + + blksize_size[MAJOR_NR] = edp2_blksizes; + hardsect_size[MAJOR_NR] = edp2_hardsizes; + blk_size[MAJOR_NR] = edp2_sizes; + max_sectors[MAJOR_NR] = edp2_max_sectors; + blk_dev[MAJOR_NR].queue = edp2_get_queue; + + blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), edp2_request); + blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0); + + for (i = 0; i < MAX_EDP2_DISK; i++) { + edp2_dev[i].refcnt = 0; + edp2_dev[i].flags = 0; + edp2_dev[i].controller = NULL; + edp2_dev[i].i2odev = NULL; + edp2_dev[i].tid = 0; + edp2_dev[i].head = NULL; + edp2_dev[i].tail = NULL; + edp2_dev[i].depth = MAX_EDP2_DEPTH; + edp2_blksizes[i] = 512; + edp2_max_sectors[i] = 2; + edp2_queues[i] = NULL; + } + + /* + * Register the OSM handler as we will need this to probe for + * drives, geometry and other goodies. + */ + + if(i2o_install_handler(&i2o_block_handler)<0) + { + unregister_blkdev(MAJOR_NR, "i2o_block"); + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); + printk(KERN_ERR "i2o_block: unable to register OSM.\n"); + return -EINVAL; + } + edp2_context = i2o_block_handler.context; + + /* + * Finally see what is actually plugged in to our controllers + */ + for (i = 0; i < MAX_EDP2_DISK; i++) + register_disk(&edp2_gendisk, MKDEV(MAJOR_NR,i<<4), 1<<4, + &edp2_fops, 0); + edp2_probe(); + + /* + * Adding edp2_gendisk into the gendisk list. + */ + add_gendisk(&edp2_gendisk); + + return 0; +} + + +static void edp2_block_exit(void) +{ + int i; + + /* + * Flush the OSM + */ + + i2o_remove_handler(&i2o_block_handler); + + /* + * Return the block device + */ + if (unregister_blkdev(MAJOR_NR, "edp2") != 0) + printk("i2o_block: cleanup_module failed\n"); + + /* + * free request queue + */ + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); + del_gendisk(&edp2_gendisk); +} + +EXPORT_NO_SYMBOLS; +MODULE_AUTHOR("Red Hat Software"); +MODULE_DESCRIPTION("Coraid EDP2"); +MODULE_LICENSE("GPL"); + +module_init(edp2_block_init); +module_exit(edp2_block_exit); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/mtd/maps/Config.in linux.21rc5-ac2/drivers/mtd/maps/Config.in --- linux.21rc5/drivers/mtd/maps/Config.in 2003-05-28 15:08:54.000000000 +0100 +++ linux.21rc5-ac2/drivers/mtd/maps/Config.in 2003-04-22 16:44:37.000000000 +0100 @@ -19,7 +19,7 @@ if [ "$CONFIG_X86" = "y" ]; then dep_tristate ' CFI Flash device mapped on Photron PNC-2000' CONFIG_MTD_PNC2000 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS - dep_tristate ' CFI Flash device mapped on AMD SC520 CDP' CONFIG_MTD_SC520CDP $CONFIG_MTD_CFI + dep_tristate ' CFI Flash device mapped on AMD SC520 CDP' CONFIG_MTD_SC520CDP $CONFIG_MTD_CFI $CONFIG_MTD_CONCAT dep_tristate ' CFI Flash device mapped on AMD NetSc520' CONFIG_MTD_NETSC520 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS dep_tristate ' CFI Flash device mapped on Arcom SBC-GXx boards' CONFIG_MTD_SBC_GXX $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS dep_tristate ' CFI Flash device mapped on Arcom ELAN-104NC' CONFIG_MTD_ELAN_104NC $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/3c59x.c linux.21rc5-ac2/drivers/net/3c59x.c --- linux.21rc5/drivers/net/3c59x.c 2003-05-28 15:08:54.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/3c59x.c 2003-04-22 16:44:37.000000000 +0100 @@ -166,6 +166,11 @@ - Rename wait_for_completion() to issue_and_wait() to avoid completion.h clash. + LK1.1.18ac 01Jul02 akpm + - Fix for undocumented transceiver power-up bit on some 3c566B's + (Donald Becker, Rahul Karnik) + + - See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details. - Also see Documentation/networking/vortex.txt */ @@ -181,8 +186,8 @@ #define DRV_NAME "3c59x" -#define DRV_VERSION "LK1.1.16" -#define DRV_RELDATE "19 July 2001" +#define DRV_VERSION "LK1.1.18-ac" +#define DRV_RELDATE "1 July 2002" @@ -400,7 +405,7 @@ EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, - EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000 }; + EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000 }; enum vortex_chips { CH_3C590 = 0, @@ -424,6 +429,7 @@ CH_3C905B_2, CH_3C905B_FX, CH_3C905C, + CH_3C905C2, CH_3C980, CH_3C9805, @@ -495,6 +501,8 @@ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c905C Tornado", PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c905C Tornado 2", + PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c980 Cyclone", PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c982 Dual Port Server Cyclone", @@ -509,7 +517,7 @@ HAS_HWCKSM, 128, }, {"3c556B Laptop Hurricane", PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| - HAS_HWCKSM, 128, }, + WNO_XCVR_PWR|HAS_HWCKSM, 128, }, {"3c575 [Megahertz] 10/100 LAN CardBus", PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, @@ -561,6 +569,7 @@ { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, + { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C2 }, { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, @@ -578,7 +587,6 @@ { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, - { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 }, {0,} /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); @@ -1190,6 +1198,10 @@ if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; outw(n, ioaddr + Wn2_ResetOptions); + if (vp->drv_flags & WNO_XCVR_PWR) { + EL3WINDOW(0); + outw(0x0800, ioaddr); + } } /* Extract our information from the EEPROM data. */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/aironet4500_core.c linux.21rc5-ac2/drivers/net/aironet4500_core.c --- linux.21rc5/drivers/net/aironet4500_core.c 2003-05-28 15:07:46.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/aironet4500_core.c 2003-04-22 16:44:37.000000000 +0100 @@ -2676,10 +2676,8 @@ #endif //awc_dump_registers(dev); - if (adhoc & !max_mtu) - max_mtu= 2250; - else if (!max_mtu) - max_mtu= 1500; + if (!max_mtu) + max_mtu= adhoc ? 2250 : 1500; priv->sleeping_bap = 1; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/cs89x0.c linux.21rc5-ac2/drivers/net/cs89x0.c --- linux.21rc5/drivers/net/cs89x0.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/cs89x0.c 2003-05-20 20:32:42.000000000 +0100 @@ -1629,16 +1629,21 @@ } -static int set_mac_address(struct net_device *dev, void *addr) +static int set_mac_address(struct net_device *dev, void *p) { int i; + struct sockaddr *addr = p; + if (netif_running(dev)) return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + if (net_debug) { printk("%s: Setting MAC address to ", dev->name); - for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = ((unsigned char *)addr)[i]); + for (i = 0; i < dev->addr_len; i++) + printk(" %2.2x", dev->dev_addr[i]); printk(".\n"); } /* set the Ethernet address */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/irda/ma600.c linux.21rc5-ac2/drivers/net/irda/ma600.c --- linux.21rc5/drivers/net/irda/ma600.c 2003-05-28 15:07:47.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/irda/ma600.c 2003-05-29 01:40:07.000000000 +0100 @@ -51,9 +51,9 @@ #undef ASSERT(expr, func) #define ASSERT(expr, func) \ if(!(expr)) { \ - printk( "Assertion failed! %s,%s,%s,line=%d\n",\ - #expr,__FILE__,__FUNCTION__,__LINE__); \ - ##func} + printk( "Assertion failed! %s,%s,%s,line=%d\n",\ + #expr,__FILE__,__FUNCTION__,__LINE__); \ + func} #endif /* convert hex value to ascii hex */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/Makefile.lib linux.21rc5-ac2/drivers/net/Makefile.lib --- linux.21rc5/drivers/net/Makefile.lib 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/Makefile.lib 2003-05-20 20:21:28.000000000 +0100 @@ -0,0 +1,74 @@ +# These drivers all require crc32.o +obj-$(CONFIG_8139CP) += crc32.o +obj-$(CONFIG_8139TOO) += crc32.o +obj-$(CONFIG_A2065) += crc32.o +obj-$(CONFIG_ADAPTEC_STARFIRE) += crc32.o +obj-$(CONFIG_AMD8111_ETH) += crc32.o +obj-$(CONFIG_ARM_AM79C961A) += crc32.o +obj-$(CONFIG_AT1700) += crc32.o +obj-$(CONFIG_ATP) += crc32.o +obj-$(CONFIG_BMAC) += crc32.o +obj-$(CONFIG_DE4X5) += crc32.o +obj-$(CONFIG_DECLANCE) += crc32.o +obj-$(CONFIG_DEPCA) += crc32.o +obj-$(CONFIG_DL2K) += crc32.o +obj-$(CONFIG_DM9102) += crc32.o +obj-$(CONFIG_EPIC100) += crc32.o +obj-$(CONFIG_EWRK3) += crc32.o +obj-$(CONFIG_FEALNX) += crc32.o +obj-$(CONFIG_GMAC) += crc32.o +obj-$(CONFIG_HAPPYMEAL) += crc32.o +obj-$(CONFIG_MACE) += crc32.o +obj-$(CONFIG_MACMACE) += crc32.o +obj-$(CONFIG_PCNET32) += crc32.o +obj-$(CONFIG_R8169) += crc32.o +obj-$(CONFIG_SGI_IOC3_ETH) += crc32.o +obj-$(CONFIG_SIS900) += crc32.o +obj-$(CONFIG_SMC9194) += crc32.o +obj-$(CONFIG_SUNBMAC) += crc32.o +obj-$(CONFIG_SUNDANCE) += crc32.o +obj-$(CONFIG_SUNGEM) += crc32.o +obj-$(CONFIG_SUNLANCE) += crc32.o +obj-$(CONFIG_SUNQE) += crc32.o +obj-$(CONFIG_TYPHOON) += crc32.o +obj-$(CONFIG_VIA_RHINE) += crc32.o +obj-$(CONFIG_WINBOND_840) += crc32.o +obj-$(CONFIG_YELLOWFIN) += crc32.o + +# These rely on drivers/net/7990.o which requires crc32.o +obj-$(CONFIG_HPLANCE) += crc32.o +obj-$(CONFIG_MVME147_NET) += crc32.o + +# These rely on drivers/net/8390.o which requires crc32.o +obj-$(CONFIG_OAKNET) += crc32.o +obj-$(CONFIG_NE2K_PCI) += crc32.o +obj-$(CONFIG_STNIC) += crc32.o +obj-$(CONFIG_MAC8390) += crc32.o +obj-$(CONFIG_APNE) += crc32.o +obj-$(CONFIG_PCMCIA_PCNET) += crc32.o +obj-$(CONFIG_ARM_ETHERH) += crc32.o +obj-$(CONFIG_WD80x3) += crc32.o +obj-$(CONFIG_EL2) += crc32.o +obj-$(CONFIG_NE2000) += crc32.o +obj-$(CONFIG_NE2_MCA) += crc32.o +obj-$(CONFIG_HPLAN) += crc32.o +obj-$(CONFIG_HPLAN_PLUS) += crc32.o +obj-$(CONFIG_ULTRA) += crc32.o +obj-$(CONFIG_ULTRAMCA) += crc32.o +obj-$(CONFIG_ULTRA32) += crc32.o +obj-$(CONFIG_E2100) += crc32.o +obj-$(CONFIG_ES3210) += crc32.o +obj-$(CONFIG_LNE390) += crc32.o +obj-$(CONFIG_NE3210) += crc32.o +obj-$(CONFIG_AC3200) += crc32.o +obj-$(CONFIG_ARIADNE2) += crc32.o +obj-$(CONFIG_HYDRA) += crc32.o + +# drivers/net/pcmcia +obj-$(CONFIG_PCMCIA_FMVJ18X) += crc32.o +obj-$(CONFIG_PCMCIA_SMC91C92) += crc32.o +obj-$(CONFIG_PCMCIA_XIRTULIP) += crc32.o + +# drivers/net/tulip +obj-$(CONFIG_TULIP) += crc32.o + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/pcmcia/aironet4500_cs.c linux.21rc5-ac2/drivers/net/pcmcia/aironet4500_cs.c --- linux.21rc5/drivers/net/pcmcia/aironet4500_cs.c 2003-05-28 15:07:47.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/pcmcia/aironet4500_cs.c 2003-04-23 14:31:24.000000000 +0100 @@ -282,7 +282,7 @@ }; memset(dev,0,sizeof(struct net_device)); dev->priv = kmalloc(sizeof(struct awc_private), GFP_KERNEL); - if (!dev->priv ) {printk(KERN_CRIT "out of mem on dev priv alloc \n"); return NULL;}; + if (!dev->priv ) {printk(KERN_CRIT "out of mem on dev priv alloc \n"); kfree(dev); return NULL;}; memset(dev->priv,0,sizeof(struct awc_private)); // link->dev->minor = dev->minor; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/pcnet32.c linux.21rc5-ac2/drivers/net/pcnet32.c --- linux.21rc5/drivers/net/pcnet32.c 2003-05-28 15:08:55.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/pcnet32.c 2003-04-22 16:44:37.000000000 +0100 @@ -1,5 +1,5 @@ -/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ -/* +/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. + * * Copyright 1996-1999 Thomas Bogendoerfer * * Derived from the lance driver written 1993,1994,1995 by Donald Becker. @@ -543,7 +543,7 @@ /* initialize variables */ fdx = mii = fset = dxsuflo = ltint = 0; chip_version = (chip_version >> 12) & 0xffff; - + switch (chip_version) { case 0x2420: chipname = "PCnet/PCI 79C970"; /* PCI */ @@ -1175,19 +1175,12 @@ if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; if (err_status & 0x10000000) lp->stats.tx_window_errors++; -#ifndef DO_DXSUFLO if (err_status & 0x40000000) { lp->stats.tx_fifo_errors++; - /* Ackk! On FIFO errors the Tx unit is turned off! */ - /* Remove this verbosity later! */ - printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", - dev->name, csr0); - must_restart = 1; - } -#else - if (err_status & 0x40000000) { - lp->stats.tx_fifo_errors++; - if (! lp->dxsuflo) { /* If controller doesn't recover ... */ +#ifdef DO_DXSUFLO + if (! lp->dxsuflo) +#endif + { /* If controller doesn't recover ... */ /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", @@ -1195,7 +1188,6 @@ must_restart = 1; } } -#endif } else { if (status & 0x1800) lp->stats.collisions++; @@ -1722,12 +1714,13 @@ } } + module_init(pcnet32_init_module); module_exit(pcnet32_cleanup_module); /* * Local variables: - * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c pcnet32.c" + * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include/linux -Wall -Wstrict-prototypes -O2 -m486 -c pcnet32.c" * c-indent-level: 4 * tab-width: 8 * End: diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/sk98lin/build_no.c linux.21rc5-ac2/drivers/net/sk98lin/build_no.c --- linux.21rc5/drivers/net/sk98lin/build_no.c 2003-05-28 15:08:55.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/sk98lin/build_no.c 2003-04-22 16:44:37.000000000 +0100 @@ -7,4 +7,3 @@ static const char SysKonnectBuildNumber[] = "@(#)SK-BUILD: 6.02 (20021219) PL: ALL.01"; -^Z \ No newline at end of file diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/tlan.c linux.21rc5-ac2/drivers/net/tlan.c --- linux.21rc5/drivers/net/tlan.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/tlan.c 2003-05-29 00:37:18.000000000 +0100 @@ -208,7 +208,6 @@ MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)"); MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)"); -EXPORT_NO_SYMBOLS; /* Define this to enable Link beat monitoring */ #undef MONITOR @@ -218,10 +217,11 @@ static int bbuf; static u8 *TLanPadBuffer; +static dma_addr_t TLanPadBufferDMA; static char TLanSignature[] = "TLAN"; -static const char tlan_banner[] = "ThunderLAN driver v1.15\n"; -static int tlan_have_pci; -static int tlan_have_eisa; +static const char tlan_banner[] = "ThunderLAN driver v1.15\n"; +static int tlan_have_pci; +static int tlan_have_eisa; const char *media[] = { "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ", @@ -347,6 +347,27 @@ static int TLan_EeReadByte( struct net_device *, u8, u8 * ); +static void +TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) +{ + unsigned long addr = (unsigned long)skb; + tag->buffer[9].address = (u32)addr; + addr >>= 31; /* >>= 32 is undefined for 32bit arch, stupid C */ + addr >>= 1; + tag->buffer[8].address = (u32)addr; +} + +static struct sk_buff * +TLan_GetSKB( struct tlan_list_tag *tag) +{ + unsigned long addr = tag->buffer[8].address; + addr <<= 31; + addr <<= 1; + addr |= tag->buffer[9].address; + return (struct sk_buff *) addr; +} + + static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { TLan_HandleInvalid, TLan_HandleTxEOF, @@ -422,7 +443,7 @@ unregister_netdev( dev ); if ( priv->dmaStorage ) { - kfree( priv->dmaStorage ); + pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); } release_region( dev->base_addr, 0x10 ); @@ -433,10 +454,10 @@ } static struct pci_driver tlan_driver = { - name: "tlan", - id_table: tlan_pci_tbl, - probe: tlan_init_one, - remove: __devexit_p(tlan_remove_one), + .name = "tlan", + .id_table = tlan_pci_tbl, + .probe = tlan_init_one, + .remove = __devexit_p(tlan_remove_one), }; static int __init tlan_probe(void) @@ -445,8 +466,7 @@ printk(KERN_INFO "%s", tlan_banner); - TLanPadBuffer = (u8 *) kmalloc(TLAN_MIN_FRAME_SIZE, - GFP_KERNEL); + TLanPadBuffer = (u8 *) pci_alloc_consistent(NULL, TLAN_MIN_FRAME_SIZE, &TLanPadBufferDMA); if (TLanPadBuffer == NULL) { printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n"); @@ -471,7 +491,7 @@ if (TLanDevicesInstalled == 0) { pci_unregister_driver(&tlan_driver); - kfree(TLanPadBuffer); + pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA); return -ENODEV; } return 0; @@ -526,12 +546,22 @@ priv = dev->priv; + priv->pciDev = pdev; + /* Is this a PCI device? */ if (pdev) { u32 pci_io_base = 0; priv->adapter = &board_info[ent->driver_data]; + if(pci_set_dma_mask(pdev, 0xFFFFFFFF)) + { + printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n"); + unregister_netdev(dev); + kfree(dev); + return -ENODEV; + } + pci_read_config_byte ( pdev, PCI_REVISION_ID, &pci_rev); for ( reg= 0; reg <= 5; reg ++ ) { @@ -639,7 +669,7 @@ dev = TLan_Eisa_Devices; priv = dev->priv; if (priv->dmaStorage) { - kfree(priv->dmaStorage); + pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); } release_region( dev->base_addr, 0x10); unregister_netdev( dev ); @@ -657,7 +687,7 @@ if (tlan_have_eisa) TLan_Eisa_Cleanup(); - kfree( TLanPadBuffer ); + pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA); } @@ -808,7 +838,9 @@ dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) * ( sizeof(TLanList) ); } - priv->dmaStorage = kmalloc(dma_size, GFP_KERNEL | GFP_DMA); + priv->dmaStorage = pci_alloc_consistent(priv->pciDev, dma_size, &priv->dmaStorageDMA); + priv->dmaSize = dma_size; + if ( priv->dmaStorage == NULL ) { printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n", dev->name ); @@ -818,11 +850,14 @@ memset( priv->dmaStorage, 0, dma_size ); priv->rxList = (TLanList *) ( ( ( (u32) priv->dmaStorage ) + 7 ) & 0xFFFFFFF8 ); + priv->rxListDMA = ( ( ( (u32) priv->dmaStorageDMA ) + 7 ) & 0xFFFFFFF8 ); priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; + priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; if ( bbuf ) { priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS ); - priv->txBuffer = priv->rxBuffer - + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); + priv->rxBufferDMA =priv->txListDMA + sizeof(TLanList) * TLAN_NUM_TX_LISTS; + priv->txBuffer = priv->rxBuffer + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); + priv->txBufferDMA = priv->rxBufferDMA + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); } err = 0; @@ -1003,6 +1038,7 @@ { TLanPrivateInfo *priv = dev->priv; TLanList *tail_list; + dma_addr_t tail_list_phys; u8 *tail_buffer; int pad; unsigned long flags; @@ -1014,6 +1050,7 @@ } tail_list = priv->txList + priv->txTail; + tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", dev->name, priv->txHead, priv->txTail ); @@ -1028,8 +1065,8 @@ tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); memcpy( tail_buffer, skb->data, skb->len ); } else { - tail_list->buffer[0].address = virt_to_bus( skb->data ); - tail_list->buffer[9].address = (u32) skb; + tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); + TLan_StoreSKB(tail_list, skb); } pad = TLAN_MIN_FRAME_SIZE - skb->len; @@ -1038,7 +1075,7 @@ tail_list->frameSize = (u16) skb->len + pad; tail_list->buffer[0].count = (u32) skb->len; tail_list->buffer[1].count = TLAN_LAST_BUFFER | (u32) pad; - tail_list->buffer[1].address = virt_to_bus( TLanPadBuffer ); + tail_list->buffer[1].address = TLanPadBufferDMA; } else { tail_list->frameSize = (u16) skb->len; tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len; @@ -1051,14 +1088,14 @@ if ( ! priv->txInProgress ) { priv->txInProgress = 1; TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); - outl( virt_to_bus( tail_list ), dev->base_addr + TLAN_CH_PARM ); + outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); } else { TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", priv->txTail ); if ( priv->txTail == 0 ) { - ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = virt_to_bus( tail_list ); + ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = tail_list_phys; } else { - ( priv->txList + ( priv->txTail - 1 ) )->forward = virt_to_bus( tail_list ); + ( priv->txList + ( priv->txTail - 1 ) )->forward = tail_list_phys; } } spin_unlock_irqrestore(&priv->lock, flags); @@ -1344,6 +1381,7 @@ TLanPrivateInfo *priv = dev->priv; int eoc = 0; TLanList *head_list; + dma_addr_t head_list_phys; u32 ack = 0; u16 tmpCStat; @@ -1353,7 +1391,10 @@ while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { ack++; if ( ! bbuf ) { - dev_kfree_skb_any( (struct sk_buff *) head_list->buffer[9].address ); + struct sk_buff *skb = TLan_GetSKB(head_list); + pci_unmap_single(priv->pciDev, head_list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + head_list->buffer[8].address = 0; head_list->buffer[9].address = 0; } @@ -1374,8 +1415,9 @@ if ( eoc ) { TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", priv->txHead, priv->txTail ); head_list = priv->txList + priv->txHead; + head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { - outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM ); + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); ack |= TLAN_HC_GO; } else { priv->txInProgress = 0; @@ -1468,9 +1510,11 @@ void *t; u32 frameSize; u16 tmpCStat; + dma_addr_t head_list_phys; TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); head_list = priv->rxList + priv->rxHead; + head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { frameSize = head_list->frameSize; @@ -1498,17 +1542,16 @@ struct sk_buff *new_skb; /* - * I changed the algorithm here. What we now do - * is allocate the new frame. If this fails we - * simply recycle the frame. - */ + * I changed the algorithm here. What we now do + * is allocate the new frame. If this fails we + * simply recycle the frame. + */ new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); if ( new_skb != NULL ) { - /* If this ever happened it would be a problem */ - /* not any more - ac */ - skb = (struct sk_buff *) head_list->buffer[9].address; + skb = TLan_GetSKB(head_list); + pci_unmap_single(priv->pciDev, head_list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); skb_trim( skb, frameSize ); priv->stats.rx_bytes += frameSize; @@ -1519,9 +1562,12 @@ new_skb->dev = dev; skb_reserve( new_skb, 2 ); t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE ); - head_list->buffer[0].address = virt_to_bus( t ); + head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); head_list->buffer[8].address = (u32) t; - head_list->buffer[9].address = (u32) new_skb; +#if BITS_PER_LONG==64 +#error "Not 64bit clean" +#endif + TLan_StoreSKB(head_list, new_skb); } else printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" ); } @@ -1529,11 +1575,12 @@ head_list->forward = 0; head_list->cStat = 0; tail_list = priv->rxList + priv->rxTail; - tail_list->forward = virt_to_bus( head_list ); + tail_list->forward = head_list_phys; CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS ); CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS ); head_list = priv->rxList + priv->rxHead; + head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; } if (!ack) @@ -1545,7 +1592,8 @@ if ( eoc ) { TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); head_list = priv->rxList + priv->rxHead; - outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM ); + head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); ack |= TLAN_HC_GO | TLAN_HC_RT; priv->rxEocCount++; } @@ -1611,7 +1659,7 @@ * host_int The contents of the HOST_INT * port. * - * This driver is structured to determine EOC occurances by + * This driver is structured to determine EOC occurrences by * reading the CSTAT member of the list structure. Tx EOC * interrupts are disabled via the DIO INTDIS register. * However, TLAN chips before revision 3.0 didn't have this @@ -1624,15 +1672,17 @@ { TLanPrivateInfo *priv = dev->priv; TLanList *head_list; + dma_addr_t head_list_phys; u32 ack = 1; host_int = 0; if ( priv->tlanRev < 0x30 ) { TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", priv->txHead, priv->txTail ); head_list = priv->txList + priv->txHead; + head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { netif_stop_queue(dev); - outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM ); + outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); ack |= TLAN_HC_GO; } else { priv->txInProgress = 0; @@ -1733,7 +1783,7 @@ * host_int The contents of the HOST_INT * port. * - * This driver is structured to determine EOC occurances by + * This driver is structured to determine EOC occurrences by * reading the CSTAT member of the list structure. Rx EOC * interrupts are disabled via the DIO INTDIS register. * However, TLAN chips before revision 3.0 didn't have this @@ -1745,13 +1795,13 @@ u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) { TLanPrivateInfo *priv = dev->priv; - TLanList *head_list; + dma_addr_t head_list_phys; u32 ack = 1; if ( priv->tlanRev < 0x30 ) { TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", priv->rxHead, priv->rxTail ); - head_list = priv->rxList + priv->rxHead; - outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM ); + head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; + outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); ack |= TLAN_HC_GO | TLAN_HC_RT; priv->rxEocCount++; } @@ -1888,6 +1938,7 @@ TLanPrivateInfo *priv = dev->priv; int i; TLanList *list; + dma_addr_t list_phys; struct sk_buff *skb; void *t = NULL; @@ -1897,12 +1948,13 @@ list = priv->txList + i; list->cStat = TLAN_CSTAT_UNUSED; if ( bbuf ) { - list->buffer[0].address = virt_to_bus( priv->txBuffer + ( i * TLAN_MAX_FRAME_SIZE ) ); + list->buffer[0].address = priv->txBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); } else { list->buffer[0].address = 0; } list->buffer[2].count = 0; list->buffer[2].address = 0; + list->buffer[8].address = 0; list->buffer[9].address = 0; } @@ -1910,11 +1962,12 @@ priv->rxTail = TLAN_NUM_RX_LISTS - 1; for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { list = priv->rxList + i; + list_phys = priv->rxListDMA + sizeof(TLanList) * i; list->cStat = TLAN_CSTAT_READY; list->frameSize = TLAN_MAX_FRAME_SIZE; list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; if ( bbuf ) { - list->buffer[0].address = virt_to_bus( priv->rxBuffer + ( i * TLAN_MAX_FRAME_SIZE ) ); + list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); } else { skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); if ( skb == NULL ) { @@ -1925,14 +1978,14 @@ skb_reserve( skb, 2 ); t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE ); } - list->buffer[0].address = virt_to_bus( t ); + list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); list->buffer[8].address = (u32) t; - list->buffer[9].address = (u32) skb; + TLan_StoreSKB(list, skb); } list->buffer[1].count = 0; list->buffer[1].address = 0; if ( i < TLAN_NUM_RX_LISTS - 1 ) - list->forward = virt_to_bus( list + 1 ); + list->forward = list_phys + sizeof(TLanList); else list->forward = 0; } @@ -1950,23 +2003,26 @@ if ( ! bbuf ) { for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { list = priv->txList + i; - skb = (struct sk_buff *) list->buffer[9].address; + skb = TLan_GetSKB(list); if ( skb ) { + pci_unmap_single(priv->pciDev, list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_any( skb ); + list->buffer[8].address = 0; list->buffer[9].address = 0; } } for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { list = priv->rxList + i; - skb = (struct sk_buff *) list->buffer[9].address; + skb = TLan_GetSKB(list); if ( skb ) { + pci_unmap_single(priv->pciDev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb_any( skb ); + list->buffer[8].address = 0; list->buffer[9].address = 0; } } } - } /* TLan_FreeLists */ @@ -2271,8 +2327,8 @@ printk("TLAN: Partner capability: "); for (i = 5; i <= 10; i++) if (partner & (1<base_addr, TLAN_LED_REG, TLAN_LED_LINK ); @@ -2304,7 +2360,7 @@ if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) { outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); } - outl( virt_to_bus( priv->rxList ), dev->base_addr + TLAN_CH_PARM ); + outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM ); outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); } else { printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name ); @@ -2376,7 +2432,7 @@ * dev A pointer to the device structure of the * TLAN device having the PHYs to be detailed. * - * This function prints the registers a PHY (aka tranceiver). + * This function prints the registers a PHY (aka transceiver). * ********************************************************************/ @@ -2492,7 +2548,7 @@ /* Wait for 50 ms and powerup * This is abitrary. It is intended to make sure the - * tranceiver settles. + * transceiver settles. */ TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP ); @@ -2512,7 +2568,7 @@ TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); TLan_MiiSync(dev->base_addr); /* Wait for 500 ms and reset the - * tranceiver. The TLAN docs say both 50 ms and + * transceiver. The TLAN docs say both 50 ms and * 500 ms, so do the longer, just in case. */ TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET ); @@ -2627,7 +2683,7 @@ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl ); } - /* Wait for 2 sec to give the tranceiver time + /* Wait for 2 sec to give the transceiver time * to establish link. */ TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET ); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/tlan.h linux.21rc5-ac2/drivers/net/tlan.h --- linux.21rc5/drivers/net/tlan.h 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/tlan.h 2003-05-29 01:44:06.000000000 +0100 @@ -169,15 +169,22 @@ typedef struct tlan_private_tag { struct net_device *nextDevice; + struct pci_dev *pciDev; void *dmaStorage; + dma_addr_t dmaStorageDMA; + unsigned int dmaSize; u8 *padBuffer; TLanList *rxList; + dma_addr_t rxListDMA; u8 *rxBuffer; + dma_addr_t rxBufferDMA; u32 rxHead; u32 rxTail; u32 rxEocCount; TLanList *txList; + dma_addr_t txListDMA; u8 *txBuffer; + dma_addr_t txBufferDMA; u32 txHead; u32 txInProgress; u32 txTail; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/tokenring/olympic.c linux.21rc5-ac2/drivers/net/tokenring/olympic.c --- linux.21rc5/drivers/net/tokenring/olympic.c 2003-05-28 15:07:46.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/tokenring/olympic.c 2003-05-28 15:36:48.000000000 +0100 @@ -655,8 +655,7 @@ printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) ); printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]); - printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = -%08x\n",olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ; + printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ; #endif writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/tun.c linux.21rc5-ac2/drivers/net/tun.c --- linux.21rc5/drivers/net/tun.c 2003-05-28 15:07:47.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/tun.c 2003-04-22 16:44:37.000000000 +0100 @@ -188,7 +188,7 @@ size_t len = count; if (!(tun->flags & TUN_NO_PI)) { - if ((len -= sizeof(pi)) < 0) + if ((len -= sizeof(pi)) > len) return -EINVAL; memcpy_fromiovec((void *)&pi, iv, sizeof(pi)); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/8253x/8253xsyn.c linux.21rc5-ac2/drivers/net/wan/8253x/8253xsyn.c --- linux.21rc5/drivers/net/wan/8253x/8253xsyn.c 2003-05-28 15:07:46.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/8253x/8253xsyn.c 2003-04-22 16:44:37.000000000 +0100 @@ -1030,7 +1030,7 @@ } #if 0 - if ((tty->count == 1) && (port->count != 0)) + if ((atomic_read(&tty->count) == 1) && (port->count != 0)) { /* * Uh, oh. tty->count is 1, which means that the tty diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/c101.c linux.21rc5-ac2/drivers/net/wan/c101.c --- linux.21rc5/drivers/net/wan/c101.c 2003-05-28 15:08:58.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/c101.c 2003-04-22 16:44:37.000000000 +0100 @@ -1,12 +1,11 @@ /* * Moxa C101 synchronous serial card driver for Linux * - * Copyright (C) 2000-2002 Krzysztof Halasa + * Copyright (C) 2000-2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. * * For information see http://hq.pm.waw.pl/hdlc/ * @@ -31,7 +30,7 @@ #include "hd64570.h" -static const char* version = "Moxa C101 driver version: 1.10"; +static const char* version = "Moxa C101 driver version: 1.12"; static const char* devname = "C101"; #define C101_PAGE 0x1D00 @@ -78,7 +77,12 @@ #define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg)) #define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg)) #define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg)) -#define sca_outw(value, reg, card) writew(value, (card)->win0base + C101_SCA + (reg)) + +/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */ +#define sca_outw(value, reg, card) do { \ + writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \ + writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg+1));\ +} while(0) #define port_to_card(port) (port) #define log_node(port) (0) @@ -352,7 +356,7 @@ c101_run(irq, ram); if (*hw == '\x0') - return 0; + return first_card ? 0 : -ENOSYS; }while(*hw++ == ':'); printk(KERN_ERR "c101: invalid hardware parameters\n"); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/Config.in linux.21rc5-ac2/drivers/net/wan/Config.in --- linux.21rc5/drivers/net/wan/Config.in 2003-05-28 15:08:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/Config.in 2003-04-22 16:44:37.000000000 +0100 @@ -68,6 +68,7 @@ tristate ' Generic HDLC layer' CONFIG_HDLC if [ "$CONFIG_HDLC" != "n" ]; then bool ' Raw HDLC support' CONFIG_HDLC_RAW + bool ' Raw HDLC Ethernet device support' CONFIG_HDLC_RAW_ETH bool ' Cisco HDLC support' CONFIG_HDLC_CISCO bool ' Frame Relay support' CONFIG_HDLC_FR bool ' Synchronous Point-to-Point Protocol (PPP) support' CONFIG_HDLC_PPP @@ -76,6 +77,18 @@ else comment ' X.25/LAPB support is disabled' fi + if [ "$CONFIG_PCI" != "n" ]; then + dep_tristate ' Cyclades-PC300 support (RS-232/V.35, X.21, T1/E1 boards)' CONFIG_PC300 $CONFIG_HDLC + if [ "$CONFIG_PC300" != "n" ]; then + if ["$CONFIG_PPP" != "n" -a "$CONFIG_PPP_MULTLINK" != "n" -a "$CONFIG_PPP_SYNCTTY" != "n" -a "$CONFIG_HDLC_PPP" = "y"]; + then + bool ' Cyclades-PC300 MLPPP support' CONFIG_PC300_MLPPP + else + comment ' Cyclades-PC300 MLPPP support is disabled. You have to enable PPP, PPP_MULTILINK' + comment ' PPP_SYNCTTY and HDLC_PPP to use this package.' + fi + fi + fi dep_tristate ' SDL RISCom/N2 support' CONFIG_N2 $CONFIG_HDLC dep_tristate ' Moxa C101 support' CONFIG_C101 $CONFIG_HDLC dep_tristate ' FarSync T-Series support' CONFIG_FARSYNC $CONFIG_HDLC diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/dscc4.c linux.21rc5-ac2/drivers/net/wan/dscc4.c --- linux.21rc5/drivers/net/wan/dscc4.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/dscc4.c 2003-04-22 16:44:37.000000000 +0100 @@ -487,9 +487,9 @@ skb = dev_alloc_skb(len); dpriv->rx_skbuff[dirty] = skb; if (skb) { - skb->dev = dev; - skb->protocol = htons(ETH_P_HDLC); - skb->mac.raw = skb->data; + skb->dev = dev; + skb->protocol = hdlc_type_trans(skb, dev); + skb->mac.raw = skb->data; rx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, len, PCI_DMA_FROMDEVICE); } else { @@ -1757,7 +1757,7 @@ (++i%TX_RING_SIZE)*sizeof(*tx_fd)); } while (i < TX_RING_SIZE); - if (dscc4_init_dummy_skb(dpriv) < 0) + if (dscc4_init_dummy_skb(dpriv) == NULL) goto err_free_dma_tx; memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/farsync.c linux.21rc5-ac2/drivers/net/wan/farsync.c --- linux.21rc5/drivers/net/wan/farsync.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/farsync.c 2003-04-22 16:44:37.000000000 +0100 @@ -764,7 +764,7 @@ /* Push upstream */ skb->mac.raw = skb->data; skb->dev = hdlc_to_dev ( &port->hdlc ); - skb->protocol = htons ( ETH_P_HDLC ); + skb->protocol = hdlc_type_trans(skb, skb->dev); netif_rx ( skb ); port_to_dev ( port )->last_rx = jiffies; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/hd64572.h linux.21rc5-ac2/drivers/net/wan/hd64572.h --- linux.21rc5/drivers/net/wan/hd64572.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/hd64572.h 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,433 @@ +/* + * hd64572.h Description of the Hitachi HD64572 (SCA-II), valid for + * CPU modes 0 & 2. + * + * Author: Ivan Passos + * + * Copyright: (c) 2000-2001 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _HD64572_H +#define _HD64572_H + +/* Illegal Access Register */ +#define ILAR 0x00 + +/* Wait Controller Registers */ +#define PABR0L 0x20 /* Physical Addr Boundary Register 0 L */ +#define PABR0H 0x21 /* Physical Addr Boundary Register 0 H */ +#define PABR1L 0x22 /* Physical Addr Boundary Register 1 L */ +#define PABR1H 0x23 /* Physical Addr Boundary Register 1 H */ +#define WCRL 0x24 /* Wait Control Register L */ +#define WCRM 0x25 /* Wait Control Register M */ +#define WCRH 0x26 /* Wait Control Register H */ + +/* Interrupt Registers */ +#define IVR 0x60 /* Interrupt Vector Register */ +#define IMVR 0x64 /* Interrupt Modified Vector Register */ +#define ITCR 0x68 /* Interrupt Control Register */ +#define ISR0 0x6c /* Interrupt Status Register 0 */ +#define ISR1 0x70 /* Interrupt Status Register 1 */ +#define IER0 0x74 /* Interrupt Enable Register 0 */ +#define IER1 0x78 /* Interrupt Enable Register 1 */ + +/* Register Access Macros (chan is 0 or 1 in _any_ case) */ +#define M_REG(reg, chan) (reg + 0x80*chan) /* MSCI */ +#define DRX_REG(reg, chan) (reg + 0x40*chan) /* DMA Rx */ +#define DTX_REG(reg, chan) (reg + 0x20*(2*chan + 1)) /* DMA Tx */ +#define TRX_REG(reg, chan) (reg + 0x20*chan) /* Timer Rx */ +#define TTX_REG(reg, chan) (reg + 0x10*(2*chan + 1)) /* Timer Tx */ +#define ST_REG(reg, chan) (reg + 0x80*chan) /* Status Cnt */ +#define IR0_DRX(val, chan) ((val)<<(8*(chan))) /* Int DMA Rx */ +#define IR0_DTX(val, chan) ((val)<<(4*(2*chan + 1))) /* Int DMA Tx */ +#define IR0_M(val, chan) ((val)<<(8*(chan))) /* Int MSCI */ + +/* MSCI Channel Registers */ +#define MD0 0x138 /* Mode reg 0 */ +#define MD1 0x139 /* Mode reg 1 */ +#define MD2 0x13a /* Mode reg 2 */ +#define MD3 0x13b /* Mode reg 3 */ +#define CTL 0x130 /* Control reg */ +#define RXS 0x13c /* RX clock source */ +#define TXS 0x13d /* TX clock source */ +#define EXS 0x13e /* External clock input selection */ +#define TMCT 0x144 /* Time constant (Tx) */ +#define TMCR 0x145 /* Time constant (Rx) */ +#define CMD 0x128 /* Command reg */ +#define ST0 0x118 /* Status reg 0 */ +#define ST1 0x119 /* Status reg 1 */ +#define ST2 0x11a /* Status reg 2 */ +#define ST3 0x11b /* Status reg 3 */ +#define ST4 0x11c /* Status reg 4 */ +#define FST 0x11d /* frame Status reg */ +#define IE0 0x120 /* Interrupt enable reg 0 */ +#define IE1 0x121 /* Interrupt enable reg 1 */ +#define IE2 0x122 /* Interrupt enable reg 2 */ +#define IE4 0x124 /* Interrupt enable reg 4 */ +#define FIE 0x125 /* Frame Interrupt enable reg */ +#define SA0 0x140 /* Syn Address reg 0 */ +#define SA1 0x141 /* Syn Address reg 1 */ +#define IDL 0x142 /* Idle register */ +#define TRBL 0x100 /* TX/RX buffer reg L */ +#define TRBK 0x101 /* TX/RX buffer reg K */ +#define TRBJ 0x102 /* TX/RX buffer reg J */ +#define TRBH 0x103 /* TX/RX buffer reg H */ +#define TRC0 0x148 /* TX Ready control reg 0 */ +#define TRC1 0x149 /* TX Ready control reg 1 */ +#define RRC 0x14a /* RX Ready control reg */ +#define CST0 0x108 /* Current Status Register 0 */ +#define CST1 0x109 /* Current Status Register 1 */ +#define CST2 0x10a /* Current Status Register 2 */ +#define CST3 0x10b /* Current Status Register 3 */ +#define GPO 0x131 /* General Purpose Output Pin Ctl Reg */ +#define TFS 0x14b /* Tx Start Threshold Ctl Reg */ +#define TFN 0x143 /* Inter-transmit-frame Time Fill Ctl Reg */ +#define TBN 0x110 /* Tx Buffer Number Reg */ +#define RBN 0x111 /* Rx Buffer Number Reg */ +#define TNR0 0x150 /* Tx DMA Request Ctl Reg 0 */ +#define TNR1 0x151 /* Tx DMA Request Ctl Reg 1 */ +#define TCR 0x152 /* Tx DMA Critical Request Reg */ +#define RNR 0x154 /* Rx DMA Request Ctl Reg */ +#define RCR 0x156 /* Rx DMA Critical Request Reg */ + +/* Timer Registers */ +#define TCNTL 0x200 /* Timer Upcounter L */ +#define TCNTH 0x201 /* Timer Upcounter H */ +#define TCONRL 0x204 /* Timer Constant Register L */ +#define TCONRH 0x205 /* Timer Constant Register H */ +#define TCSR 0x206 /* Timer Control/Status Register */ +#define TEPR 0x207 /* Timer Expand Prescale Register */ + +/* DMA registers */ +#define PCR 0x40 /* DMA priority control reg */ +#define DRR 0x44 /* DMA reset reg */ +#define DMER 0x07 /* DMA Master Enable reg */ +#define BTCR 0x08 /* Burst Tx Ctl Reg */ +#define BOLR 0x0c /* Back-off Length Reg */ +#define DSR_RX(chan) (0x48 + 2*chan) /* DMA Status Reg (Rx) */ +#define DSR_TX(chan) (0x49 + 2*chan) /* DMA Status Reg (Tx) */ +#define DIR_RX(chan) (0x4c + 2*chan) /* DMA Interrupt Enable Reg (Rx) */ +#define DIR_TX(chan) (0x4d + 2*chan) /* DMA Interrupt Enable Reg (Tx) */ +#define FCT_RX(chan) (0x50 + 2*chan) /* Frame End Interrupt Counter (Rx) */ +#define FCT_TX(chan) (0x51 + 2*chan) /* Frame End Interrupt Counter (Tx) */ +#define DMR_RX(chan) (0x54 + 2*chan) /* DMA Mode Reg (Rx) */ +#define DMR_TX(chan) (0x55 + 2*chan) /* DMA Mode Reg (Tx) */ +#define DCR_RX(chan) (0x58 + 2*chan) /* DMA Command Reg (Rx) */ +#define DCR_TX(chan) (0x59 + 2*chan) /* DMA Command Reg (Tx) */ + +/* DMA Channel Registers */ +#define DARL 0x80 /* Dest Addr Register L (single-block, RX only) */ +#define DARH 0x81 /* Dest Addr Register H (single-block, RX only) */ +#define DARB 0x82 /* Dest Addr Register B (single-block, RX only) */ +#define DARBH 0x83 /* Dest Addr Register BH (single-block, RX only) */ +#define SARL 0x80 /* Source Addr Register L (single-block, TX only) */ +#define SARH 0x81 /* Source Addr Register H (single-block, TX only) */ +#define SARB 0x82 /* Source Addr Register B (single-block, TX only) */ +#define DARBH 0x83 /* Source Addr Register BH (single-block, TX only) */ +#define BARL 0x80 /* Buffer Addr Register L (chained-block) */ +#define BARH 0x81 /* Buffer Addr Register H (chained-block) */ +#define BARB 0x82 /* Buffer Addr Register B (chained-block) */ +#define BARBH 0x83 /* Buffer Addr Register BH (chained-block) */ +#define CDAL 0x84 /* Current Descriptor Addr Register L */ +#define CDAH 0x85 /* Current Descriptor Addr Register H */ +#define CDAB 0x86 /* Current Descriptor Addr Register B */ +#define CDABH 0x87 /* Current Descriptor Addr Register BH */ +#define EDAL 0x88 /* Error Descriptor Addr Register L */ +#define EDAH 0x89 /* Error Descriptor Addr Register H */ +#define EDAB 0x8a /* Error Descriptor Addr Register B */ +#define EDABH 0x8b /* Error Descriptor Addr Register BH */ +#define BFLL 0x90 /* RX Buffer Length L (only RX) */ +#define BFLH 0x91 /* RX Buffer Length H (only RX) */ +#define BCRL 0x8c /* Byte Count Register L */ +#define BCRH 0x8d /* Byte Count Register H */ + +/* Block Descriptor Structure */ +typedef struct { + unsigned long next; /* pointer to next block descriptor */ + unsigned long ptbuf; /* buffer pointer */ + unsigned short len; /* data length */ + unsigned char status; /* status */ + unsigned char filler[5]; /* alignment filler (16 bytes) */ +} pcsca_bd_t; + +/* + Descriptor Status definitions: + + Bit Transmission Reception + + 7 EOM EOM + 6 - Short Frame + 5 - Abort + 4 - Residual bit + 3 Underrun Overrun + 2 - CRC + 1 Ownership Ownership + 0 EOT - +*/ +#define DST_EOT 0x01 /* End of transmit command */ +#define DST_OSB 0x02 /* Ownership bit */ +#define DST_CRC 0x04 /* CRC Error */ +#define DST_OVR 0x08 /* Overrun */ +#define DST_UDR 0x08 /* Underrun */ +#define DST_RBIT 0x10 /* Residual bit */ +#define DST_ABT 0x20 /* Abort */ +#define DST_SHRT 0x40 /* Short Frame */ +#define DST_EOM 0x80 /* End of Message */ + +/* Status Counter Registers */ +#define CMCR 0x158 /* Counter Master Ctl Reg */ +#define TECNTL 0x160 /* Tx EOM Counter L */ +#define TECNTM 0x161 /* Tx EOM Counter M */ +#define TECNTH 0x162 /* Tx EOM Counter H */ +#define TECCR 0x163 /* Tx EOM Counter Ctl Reg */ +#define URCNTL 0x164 /* Underrun Counter L */ +#define URCNTH 0x165 /* Underrun Counter H */ +#define URCCR 0x167 /* Underrun Counter Ctl Reg */ +#define RECNTL 0x168 /* Rx EOM Counter L */ +#define RECNTM 0x169 /* Rx EOM Counter M */ +#define RECNTH 0x16a /* Rx EOM Counter H */ +#define RECCR 0x16b /* Rx EOM Counter Ctl Reg */ +#define ORCNTL 0x16c /* Overrun Counter L */ +#define ORCNTH 0x16d /* Overrun Counter H */ +#define ORCCR 0x16f /* Overrun Counter Ctl Reg */ +#define CECNTL 0x170 /* CRC Counter L */ +#define CECNTH 0x171 /* CRC Counter H */ +#define CECCR 0x173 /* CRC Counter Ctl Reg */ +#define ABCNTL 0x174 /* Abort frame Counter L */ +#define ABCNTH 0x175 /* Abort frame Counter H */ +#define ABCCR 0x177 /* Abort frame Counter Ctl Reg */ +#define SHCNTL 0x178 /* Short frame Counter L */ +#define SHCNTH 0x179 /* Short frame Counter H */ +#define SHCCR 0x17b /* Short frame Counter Ctl Reg */ +#define RSCNTL 0x17c /* Residual bit Counter L */ +#define RSCNTH 0x17d /* Residual bit Counter H */ +#define RSCCR 0x17f /* Residual bit Counter Ctl Reg */ + +/* Register Programming Constants */ + +#define IR0_DMIC 0x00000001 +#define IR0_DMIB 0x00000002 +#define IR0_DMIA 0x00000004 +#define IR0_EFT 0x00000008 +#define IR0_DMAREQ 0x00010000 +#define IR0_TXINT 0x00020000 +#define IR0_RXINTB 0x00040000 +#define IR0_RXINTA 0x00080000 +#define IR0_TXRDY 0x00100000 +#define IR0_RXRDY 0x00200000 + +#define MD0_CRC16_0 0x00 +#define MD0_CRC16_1 0x01 +#define MD0_CRC32 0x02 +#define MD0_CRC_CCITT 0x03 +#define MD0_CRCC0 0x04 +#define MD0_CRCC1 0x08 +#define MD0_AUTO_ENA 0x10 +#define MD0_ASYNC 0x00 +#define MD0_BY_MSYNC 0x20 +#define MD0_BY_BISYNC 0x40 +#define MD0_BY_EXT 0x60 +#define MD0_BIT_SYNC 0x80 +#define MD0_TRANSP 0xc0 + +#define MD1_NOADDR 0x00 +#define MD1_SADDR1 0x40 +#define MD1_SADDR2 0x80 +#define MD1_DADDR 0xc0 + +#define MD2_F_DUPLEX 0x00 +#define MD2_AUTO_ECHO 0x01 +#define MD2_LOOP_HI_Z 0x02 +#define MD2_LOOP_MIR 0x03 +#define MD2_ADPLL_X8 0x00 +#define MD2_ADPLL_X16 0x08 +#define MD2_ADPLL_X32 0x10 +#define MD2_NRZ 0x00 +#define MD2_NRZI 0x20 +#define MD2_NRZ_IEEE 0x40 +#define MD2_MANCH 0x00 +#define MD2_FM1 0x20 +#define MD2_FM0 0x40 +#define MD2_FM 0x80 + +#define CTL_RTS 0x01 +#define CTL_DTR 0x02 +#define CTL_SYN 0x04 +#define CTL_IDLC 0x10 +#define CTL_UDRNC 0x20 +#define CTL_URSKP 0x40 +#define CTL_URCT 0x80 + +#define RXS_BR0 0x01 +#define RXS_BR1 0x02 +#define RXS_BR2 0x04 +#define RXS_BR3 0x08 +#define RXS_ECLK 0x00 +#define RXS_ECLK_NS 0x20 +#define RXS_IBRG 0x40 +#define RXS_PLL1 0x50 +#define RXS_PLL2 0x60 +#define RXS_PLL3 0x70 +#define RXS_DRTXC 0x80 + +#define TXS_BR0 0x01 +#define TXS_BR1 0x02 +#define TXS_BR2 0x04 +#define TXS_BR3 0x08 +#define TXS_ECLK 0x00 +#define TXS_IBRG 0x40 +#define TXS_RCLK 0x60 +#define TXS_DTRXC 0x80 + +#define EXS_RES0 0x01 +#define EXS_RES1 0x02 +#define EXS_RES2 0x04 +#define EXS_TES0 0x10 +#define EXS_TES1 0x20 +#define EXS_TES2 0x40 + +#define CMD_RX_RST 0x11 +#define CMD_RX_ENA 0x12 +#define CMD_RX_DIS 0x13 +#define CMD_RX_CRC_INIT 0x14 +#define CMD_RX_MSG_REJ 0x15 +#define CMD_RX_MP_SRCH 0x16 +#define CMD_RX_CRC_EXC 0x17 +#define CMD_RX_CRC_FRC 0x18 +#define CMD_TX_RST 0x01 +#define CMD_TX_ENA 0x02 +#define CMD_TX_DISA 0x03 +#define CMD_TX_CRC_INIT 0x04 +#define CMD_TX_CRC_EXC 0x05 +#define CMD_TX_EOM 0x06 +#define CMD_TX_ABORT 0x07 +#define CMD_TX_MP_ON 0x08 +#define CMD_TX_BUF_CLR 0x09 +#define CMD_TX_DISB 0x0b +#define CMD_CH_RST 0x21 +#define CMD_SRCH_MODE 0x31 +#define CMD_NOP 0x00 + +#define ST0_RXRDY 0x01 +#define ST0_TXRDY 0x02 +#define ST0_RXINTB 0x20 +#define ST0_RXINTA 0x40 +#define ST0_TXINT 0x80 + +#define ST1_IDLE 0x01 +#define ST1_ABORT 0x02 +#define ST1_CDCD 0x04 +#define ST1_CCTS 0x08 +#define ST1_SYN_FLAG 0x10 +#define ST1_CLMD 0x20 +#define ST1_TXIDLE 0x40 +#define ST1_UDRN 0x80 + +#define ST2_CRCE 0x04 +#define ST2_ONRN 0x08 +#define ST2_RBIT 0x10 +#define ST2_ABORT 0x20 +#define ST2_SHORT 0x40 +#define ST2_EOM 0x80 + +#define ST3_RX_ENA 0x01 +#define ST3_TX_ENA 0x02 +#define ST3_DCD 0x04 +#define ST3_CTS 0x08 +#define ST3_SRCH_MODE 0x10 +#define ST3_SLOOP 0x20 +#define ST3_GPI 0x80 + +#define ST4_RDNR 0x01 +#define ST4_RDCR 0x02 +#define ST4_TDNR 0x04 +#define ST4_TDCR 0x08 +#define ST4_OCLM 0x20 +#define ST4_CFT 0x40 +#define ST4_CGPI 0x80 + +#define FST_CRCEF 0x04 +#define FST_OVRNF 0x08 +#define FST_RBIF 0x10 +#define FST_ABTF 0x20 +#define FST_SHRTF 0x40 +#define FST_EOMF 0x80 + +#define IE0_RXRDY 0x01 +#define IE0_TXRDY 0x02 +#define IE0_RXINTB 0x20 +#define IE0_RXINTA 0x40 +#define IE0_TXINT 0x80 + +#define IE1_IDLD 0x01 +#define IE1_ABTD 0x02 +#define IE1_CDCD 0x04 +#define IE1_CCTS 0x08 +#define IE1_SYNCD 0x10 +#define IE1_CLMD 0x20 +#define IE1_IDL 0x40 +#define IE1_UDRN 0x80 + +#define IE2_CRCE 0x04 +#define IE2_OVRN 0x08 +#define IE2_RBIT 0x10 +#define IE2_ABT 0x20 +#define IE2_SHRT 0x40 +#define IE2_EOM 0x80 + +#define IE4_RDNR 0x01 +#define IE4_RDCR 0x02 +#define IE4_TDNR 0x04 +#define IE4_TDCR 0x08 +#define IE4_OCLM 0x20 +#define IE4_CFT 0x40 +#define IE4_CGPI 0x80 + +#define FIE_CRCEF 0x04 +#define FIE_OVRNF 0x08 +#define FIE_RBIF 0x10 +#define FIE_ABTF 0x20 +#define FIE_SHRTF 0x40 +#define FIE_EOMF 0x80 + +#define DSR_DWE 0x01 +#define DSR_DE 0x02 +#define DSR_REF 0x04 +#define DSR_UDRF 0x04 +#define DSR_COA 0x08 +#define DSR_COF 0x10 +#define DSR_BOF 0x20 +#define DSR_EOM 0x40 +#define DSR_EOT 0x80 + +#define DIR_REF 0x04 +#define DIR_UDRF 0x04 +#define DIR_COA 0x08 +#define DIR_COF 0x10 +#define DIR_BOF 0x20 +#define DIR_EOM 0x40 +#define DIR_EOT 0x80 + +#define DMR_CNTE 0x02 +#define DMR_NF 0x04 +#define DMR_SEOME 0x08 +#define DMR_TMOD 0x10 + +#define DCR_SW_ABT 0x01 +#define DCR_FCT_CLR 0x02 + +#define PCR_PR0 0x01 +#define PCR_PR1 0x02 +#define PCR_PR2 0x04 +#define PCR_CCC 0x08 +#define PCR_BRC 0x10 +#define PCR_OSB 0x40 +#define PCR_BURST 0x80 + +#endif /* (_HD64572_H) */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/hd6457x.c linux.21rc5-ac2/drivers/net/wan/hd6457x.c --- linux.21rc5/drivers/net/wan/hd6457x.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/hd6457x.c 2003-04-22 16:44:37.000000000 +0100 @@ -1,12 +1,11 @@ /* * Hitachi SCA HD64570 and HD64572 common driver for Linux * - * Copyright (C) 1998-2000 Krzysztof Halasa + * Copyright (C) 1998-2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. * * Sources of information: * Hitachi HD64570 SCA User's Manual @@ -42,7 +41,7 @@ #error Either hd64570.h or hd64572.h must be included #endif -static char sca_version[]="1.09"; +static char sca_version[]="1.12"; #define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) #define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET) @@ -294,7 +293,7 @@ skb->mac.raw = skb->data; skb->dev = hdlc_to_dev(&port->hdlc); skb->dev->last_rx = jiffies; - skb->protocol = htons(ETH_P_HDLC); + skb->protocol = hdlc_type_trans(skb, hdlc_to_dev(&port->hdlc)); netif_rx(skb); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/hdlc_cisco.c linux.21rc5-ac2/drivers/net/wan/hdlc_cisco.c --- linux.21rc5/drivers/net/wan/hdlc_cisco.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/hdlc_cisco.c 2003-04-22 16:44:37.000000000 +0100 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * Cisco HDLC support * - * Copyright (C) 2000 - 2001 Krzysztof Halasa + * Copyright (C) 2000 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -85,12 +84,37 @@ skb_put(skb, sizeof(cisco_packet)); skb->priority = TC_PRIO_CONTROL; skb->dev = hdlc_to_dev(hdlc); + skb->nh.raw = skb->data; dev_queue_xmit(skb); } +static unsigned short cisco_type_trans(struct sk_buff *skb, + struct net_device *dev) +{ + hdlc_header *data = (hdlc_header*)skb->data; + + if (skb->len < sizeof(hdlc_header)) + return __constant_htons(ETH_P_HDLC); + + if (data->address != CISCO_MULTICAST && + data->address != CISCO_UNICAST) + return __constant_htons(ETH_P_HDLC); + + switch(data->protocol) { + case __constant_htons(ETH_P_IP): + case __constant_htons(ETH_P_IPX): + case __constant_htons(ETH_P_IPV6): + skb_pull(skb, sizeof(hdlc_header)); + return data->protocol; + default: + return __constant_htons(ETH_P_HDLC); + } +} + + static void cisco_rx(struct sk_buff *skb) { hdlc_device *hdlc = dev_to_hdlc(skb->dev); @@ -109,14 +133,6 @@ skb_pull(skb, sizeof(hdlc_header)); switch(ntohs(data->protocol)) { - case ETH_P_IP: - case ETH_P_IPX: - case ETH_P_IPV6: - skb->protocol = data->protocol; - skb->dev = hdlc_to_dev(hdlc); - netif_rx(skb); - return; - case CISCO_SYS_INFO: /* Packet is not needed, drop it. */ dev_kfree_skb_any(skb); @@ -288,6 +304,7 @@ hdlc->open = cisco_open; hdlc->stop = cisco_close; hdlc->netif_rx = cisco_rx; + hdlc->type_trans = cisco_type_trans; hdlc->proto = IF_PROTO_CISCO; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = cisco_hard_header; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/hdlc_fr.c linux.21rc5-ac2/drivers/net/wan/hdlc_fr.c --- linux.21rc5/drivers/net/wan/hdlc_fr.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/hdlc_fr.c 2003-04-22 16:44:37.000000000 +0100 @@ -2,13 +2,22 @@ * Generic HDLC support routines for Linux * Frame Relay support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + + Theory of PVC state in DCE mode: + + (exist,new) -> 0,0 when "PVC create" or if "link unreliable" + 0,x -> 1,1 if "link reliable" when sending FULL STATUS + 1,1 -> 1,0 if received FULL STATUS ACK + + (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" + -> 1 when "PVC up" and (exist,new) = 1,0 +*/ #include #include @@ -20,19 +29,23 @@ #include #include #include +#include #include #include #include +#include #include __inline__ pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci) { - pvc_device *pvc=hdlc->state.fr.first_pvc; + pvc_device *pvc = hdlc->state.fr.first_pvc; - while (pvc) { - if (netdev_dlci(&pvc->netdev) == dlci) + while(pvc) { + if (pvc->dlci == dlci) return pvc; + if (pvc->dlci > dlci) + return NULL; /* the listed is sorted */ pvc = pvc->next; } @@ -40,18 +53,72 @@ } +__inline__ pvc_device* add_pvc(hdlc_device *hdlc, u16 dlci) +{ + pvc_device *pvc, **pvc_p = &hdlc->state.fr.first_pvc; + + while(*pvc_p) { + if ((*pvc_p)->dlci == dlci) + return *pvc_p; + if ((*pvc_p)->dlci > dlci) + break; /* the listed is sorted */ + pvc_p = &(*pvc_p)->next; + } + + pvc = kmalloc(sizeof(pvc_device), GFP_KERNEL); + if (!pvc) + return NULL; + + memset(pvc, 0, sizeof(pvc_device)); + pvc->dlci = dlci; + pvc->master = hdlc; + pvc->next = *pvc_p; /* Put it in the chain */ + *pvc_p = pvc; + return pvc; +} + + +__inline__ int pvc_is_used(pvc_device *pvc) +{ + return pvc->main != NULL || pvc->ether != NULL; +} + + +__inline__ void delete_unused_pvcs(hdlc_device *hdlc) +{ + pvc_device **pvc_p = &hdlc->state.fr.first_pvc; + + while(*pvc_p) { + if (!pvc_is_used(*pvc_p)) { + pvc_device *pvc = *pvc_p; + *pvc_p = pvc->next; + kfree(pvc); + continue; + } + pvc_p = &(*pvc_p)->next; + } +} + -__inline__ u16 status_to_dlci(hdlc_device *hdlc, u8 *status, - int *active, int *new) +__inline__ struct net_device** get_dev_p(pvc_device *pvc, int type) { - *new = (status[2] & 0x08); - *active = (!*new && (status[2] & 0x02)); + if (type == ARPHRD_ETHER) + return &pvc->ether; + else + return &pvc->main; +} + + +__inline__ u16 status_to_dlci(u8 *status, int *active, int *new) +{ + *new = (status[2] & 0x08) ? 1 : 0; + *active = (status[2] & 0x02) ? 1 : 0; return ((status[0] & 0x3F)<<4) | ((status[1] & 0x78)>>3); } -__inline__ void dlci_to_status(hdlc_device *hdlc, u16 dlci, u8 *status, +__inline__ void dlci_to_status(u16 dlci, u8 *status, int active, int new) { status[0] = (dlci>>4) & 0x3F; @@ -66,37 +133,50 @@ -static int fr_hard_header(struct sk_buff *skb, struct net_device *dev, - u16 type, void *daddr, void *saddr, unsigned int len) +static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) { u16 head_len; + struct sk_buff *skb = *skb_p; - if (!daddr) - daddr = dev->broadcast; - -#ifdef CONFIG_HDLC_DEBUG_HARD_HEADER - printk(KERN_DEBUG "%s: fr_hard_header called\n", dev->name); -#endif - - switch(type) { - case ETH_P_IP: + switch(skb->protocol) { + case __constant_ntohs(ETH_P_IP): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IP; break; - case ETH_P_IPV6: + case __constant_ntohs(ETH_P_IPV6): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IPV6; break; - case LMI_PROTO: + case __constant_ntohs(LMI_PROTO): head_len = 4; skb_push(skb, head_len); skb->data[3] = LMI_PROTO; break; + case __constant_ntohs(ETH_P_802_3): + head_len = 10; + if (skb_headroom(skb) < head_len) { + struct sk_buff *skb2 = skb_realloc_headroom(skb, + head_len); + if (!skb2) + return -ENOBUFS; + dev_kfree_skb(skb); + skb = *skb_p = skb2; + } + skb_push(skb, head_len); + skb->data[3] = FR_PAD; + skb->data[4] = NLPID_SNAP; + skb->data[5] = FR_PAD; + skb->data[6] = 0x80; + skb->data[7] = 0xC2; + skb->data[8] = 0x00; + skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ + break; + default: head_len = 10; skb_push(skb, head_len); @@ -105,14 +185,12 @@ skb->data[5] = FR_PAD; skb->data[6] = FR_PAD; skb->data[7] = FR_PAD; - skb->data[8] = type>>8; - skb->data[9] = (u8)type; + *(u16*)(skb->data + 8) = skb->protocol; } - memcpy(skb->data, daddr, 2); + dlci_to_q922(skb->data, dlci); skb->data[2] = FR_UI; - - return head_len; + return 0; } @@ -124,13 +202,12 @@ if ((hdlc_to_dev(pvc->master)->flags & IFF_UP) == 0) return -EIO; /* Master must be UP in order to activate PVC */ - if (pvc->master->state.fr.settings.lmi != LMI_NONE) - pvc->state.active = 0; - else - pvc->state.active = 1; + if (pvc->open_count++ == 0) { + if (pvc->master->state.fr.settings.lmi == LMI_NONE) + pvc->state.active = 1; - pvc->state.new = 0; - pvc->master->state.fr.changed = 1; + pvc->master->state.fr.dce_changed = 1; + } return 0; } @@ -139,38 +216,94 @@ static int pvc_close(struct net_device *dev) { pvc_device *pvc = dev_to_pvc(dev); - pvc->state.active = pvc->state.new = 0; - pvc->master->state.fr.changed = 1; + + if (--pvc->open_count == 0) { + if (pvc->master->state.fr.settings.lmi == LMI_NONE) + pvc->state.active = 0; + + if (pvc->master->state.fr.settings.dce) { + pvc->master->state.fr.dce_changed = 1; + pvc->state.active = 0; + } + } return 0; } -static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) +int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { pvc_device *pvc = dev_to_pvc(dev); + fr_proto_pvc_info info; - if (pvc->state.active) { - skb->dev = hdlc_to_dev(pvc->master); - pvc->stats.tx_bytes += skb->len; - pvc->stats.tx_packets++; - if (pvc->state.fecn) - pvc->stats.tx_compressed++; /* TX Congestion counter */ - dev_queue_xmit(skb); - } else { - pvc->stats.tx_dropped++; - dev_kfree_skb(skb); + if (ifr->ifr_settings.type == IF_GET_PROTO) { + if (dev->type == ARPHRD_ETHER) + ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; + else + ifr->ifr_settings.type = IF_PROTO_FR_PVC; + + if (ifr->ifr_settings.size < sizeof(info)) { + /* data size wanted */ + ifr->ifr_settings.size = sizeof(info); + return -ENOBUFS; + } + + info.dlci = pvc->dlci; + memcpy(info.master, hdlc_to_name(pvc->master), IFNAMSIZ); + if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, + &info, sizeof(info))) + return -EFAULT; + return 0; } - return 0; + return -EINVAL; +} + + +__inline__ struct net_device_stats *pvc_get_stats(struct net_device *dev) +{ + return (struct net_device_stats *) + ((char *)dev + sizeof(struct net_device)); } -static struct net_device_stats *pvc_get_stats(struct net_device *dev) +static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) { pvc_device *pvc = dev_to_pvc(dev); - return &pvc->stats; + struct net_device_stats *stats = pvc_get_stats(dev); + + if (pvc->state.active) { + if (dev->type == ARPHRD_ETHER) { + int pad = ETH_ZLEN - skb->len; + if (pad > 0) { /* Pad the frame with zeros */ + int len = skb->len; + if (skb_tailroom(skb) < pad) + if (pskb_expand_head(skb, 0, pad, + GFP_ATOMIC)) { + stats->tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + skb_put(skb, pad); + memset(skb->data + len, 0, pad); + } + skb->protocol = __constant_htons(ETH_P_802_3); + } + if (!fr_hard_header(&skb, pvc->dlci)) { + stats->tx_bytes += skb->len; + stats->tx_packets++; + if (pvc->state.fecn) /* TX Congestion counter */ + stats->tx_compressed++; + skb->dev = hdlc_to_dev(pvc->master); + dev_queue_xmit(skb); + return 0; + } + } + + stats->tx_dropped++; + dev_kfree_skb(skb); + return 0; } @@ -187,9 +320,15 @@ static inline void fr_log_dlci_active(pvc_device *pvc) { - printk(KERN_INFO "%s: %sactive%s\n", pvc_to_name(pvc), - pvc->state.active ? "" : "in", - pvc->state.new ? " new" : ""); + printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", + hdlc_to_name(pvc->master), + pvc->dlci, + pvc->main ? pvc->main->name : "", + pvc->main && pvc->ether ? " " : "", + pvc->ether ? pvc->ether->name : "", + pvc->state.new ? " new" : "", + !pvc->state.exist ? "deleted" : + pvc->state.active ? "active" : "inactive"); } @@ -213,8 +352,8 @@ int i = 0; if (hdlc->state.fr.settings.dce && fullrep) { - len += hdlc->state.fr.pvc_count * (2 + stat_len); - if (len > HDLC_MAX_MTU) { + len += hdlc->state.fr.dce_pvc_count * (2 + stat_len); + if (len > HDLC_MAX_MRU) { printk(KERN_WARNING "%s: Too many PVCs while sending " "LMI full report\n", hdlc_to_name(hdlc)); return; @@ -224,12 +363,13 @@ skb = dev_alloc_skb(len); if (!skb) { printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n", - hdlc_to_name(hdlc)); + hdlc_to_name(hdlc)); return; } memset(skb->data, 0, len); skb_reserve(skb, 4); - fr_hard_header(skb, hdlc_to_dev(hdlc), LMI_PROTO, NULL, NULL, 0); + skb->protocol = __constant_htons(LMI_PROTO); + fr_hard_header(&skb, LMI_DLCI); data = skb->tail; data[i++] = LMI_CALLREF; data[i++] = hdlc->state.fr.settings.dce @@ -253,16 +393,20 @@ ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT; data[i++] = stat_len; - if (hdlc->state.fr.reliable && - (pvc->netdev.flags & IFF_UP) && - !pvc->state.active && - !pvc->state.new) { - pvc->state.new = 1; + /* LMI start/restart */ + if (hdlc->state.fr.reliable && !pvc->state.exist) { + pvc->state.exist = pvc->state.new = 1; + fr_log_dlci_active(pvc); + } + + /* ifconfig PVC up */ + if (pvc->open_count && !pvc->state.active && + pvc->state.exist && !pvc->state.new) { + pvc->state.active = 1; fr_log_dlci_active(pvc); } - dlci_to_status(hdlc, netdev_dlci(&pvc->netdev), - data + i, + dlci_to_status(pvc->dlci, data + i, pvc->state.active, pvc->state.new); i += stat_len; pvc = pvc->next; @@ -272,6 +416,7 @@ skb_put(skb, i); skb->priority = TC_PRIO_CONTROL; skb->dev = hdlc_to_dev(hdlc); + skb->nh.raw = skb->data; dev_queue_xmit(skb); } @@ -312,10 +457,11 @@ if (reliable) { hdlc->state.fr.n391cnt = 0; /* Request full status */ - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_changed = 1; } else { while (pvc) { /* Deactivate all PVCs */ - pvc->state.new = pvc->state.active = 0; + pvc->state.exist = 0; + pvc->state.active = pvc->state.new = 0; pvc = pvc->next; } } @@ -346,7 +492,7 @@ { int stat_len; pvc_device *pvc; - int reptype = -1, error; + int reptype = -1, error, no_ram; u8 rxseq, txseq; int i; @@ -420,20 +566,18 @@ while (pvc) { if (pvc->state.new) { pvc->state.new = 0; - pvc->state.active = 1; - fr_log_dlci_active(pvc); /* Tell DTE that new PVC is now active */ - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_changed = 1; } pvc = pvc->next; } } - if (hdlc->state.fr.changed) { + if (hdlc->state.fr.dce_changed) { reptype = LMI_FULLREP; hdlc->state.fr.fullrep_sent = 1; - hdlc->state.fr.changed = 0; + hdlc->state.fr.dce_changed = 0; } fr_lmi_send(hdlc, reptype == LMI_FULLREP ? 1 : 0); @@ -449,13 +593,14 @@ pvc = hdlc->state.fr.first_pvc; while (pvc) { - pvc->state.deleted = pvc->state.active; /* mark active PVCs */ + pvc->state.deleted = 1; pvc = pvc->next; } + no_ram = 0; while (skb->len >= i + 2 + stat_len) { u16 dlci; - int active, new; + unsigned int active, new; if (skb->data[i] != ((hdlc->state.fr.settings.lmi == LMI_CCITT) ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT)) { @@ -472,21 +617,28 @@ } i++; - dlci = status_to_dlci(hdlc, skb->data + i, &active, &new); - pvc = find_pvc(hdlc, dlci); + dlci = status_to_dlci(skb->data + i, &active, &new); + + pvc = add_pvc(hdlc, dlci); + + if (!pvc && !no_ram) { + printk(KERN_WARNING + "%s: Memory squeeze on fr_lmi_recv()\n", + hdlc_to_name(hdlc)); + no_ram = 1; + } - active |= new; if (pvc) { - if (active && !pvc->state.active && - (pvc->netdev.flags & IFF_UP)) { + pvc->state.exist = 1; + pvc->state.deleted = 0; + if (active != pvc->state.active || + new != pvc->state.new || + !pvc->state.exist) { + pvc->state.new = new; pvc->state.active = active; fr_log_dlci_active(pvc); } - pvc->state.deleted = 0; } - else if (new) - printk(KERN_INFO "%s: new PVC available, DLCI=%u\n", - hdlc_to_name(hdlc), dlci); i += stat_len; } @@ -494,10 +646,10 @@ pvc = hdlc->state.fr.first_pvc; while (pvc) { - if (pvc->state.deleted) { + if (pvc->state.deleted && pvc->state.exist) { pvc->state.active = pvc->state.new = 0; + pvc->state.exist = 0; fr_log_dlci_active(pvc); - pvc->state.deleted = 0; } pvc = pvc->next; } @@ -517,8 +669,9 @@ u8 *data = skb->data; u16 dlci; pvc_device *pvc; + struct net_device *dev = NULL; - if (skb->len<4 || fh->ea1 || data[2] != FR_UI) + if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) goto rx_error; dlci = q922_to_dlci(skb->data); @@ -550,57 +703,39 @@ printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n", hdlc_to_name(hdlc), dlci); #endif - goto rx_error; - } - - if ((pvc->netdev.flags & IFF_UP) == 0) { -#ifdef CONFIG_HDLC_DEBUG_PKT - printk(KERN_INFO "%s: PVC for received frame's DLCI %d is down\n", - hdlc_to_name(hdlc), dlci); -#endif - goto rx_error; + dev_kfree_skb_any(skb); + return; } - pvc->stats.rx_packets++; /* PVC traffic */ - pvc->stats.rx_bytes += skb->len; - - if (pvc->state.fecn != (fh->fecn ? PVC_STATE_FECN : 0)) { + if (pvc->state.fecn != fh->fecn) { #ifdef CONFIG_HDLC_DEBUG_ECN - printk(KERN_DEBUG "%s: FECN O%s\n", pvc_to_name(pvc), - fh->fecn ? "N" : "FF"); + printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", hdlc_to_name(pvc), + dlci, fh->fecn ? "N" : "FF"); #endif pvc->state.fecn ^= 1; } - if (pvc->state.becn != (fh->becn ? PVC_STATE_BECN : 0)) { + if (pvc->state.becn != fh->becn) { #ifdef CONFIG_HDLC_DEBUG_ECN - printk(KERN_DEBUG "%s: BECN O%s\n", pvc_to_name(pvc), - fh->becn ? "N" : "FF"); + printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", hdlc_to_name(pvc), + dlci, fh->becn ? "N" : "FF"); #endif pvc->state.becn ^= 1; } - if (pvc->state.becn) - pvc->stats.rx_compressed++; - - skb->dev = &pvc->netdev; if (data[3] == NLPID_IP) { skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ + dev = pvc->main; skb->protocol = htons(ETH_P_IP); - netif_rx(skb); - return; - } - - if (data[3] == NLPID_IPV6) { + } else if (data[3] == NLPID_IPV6) { skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ + dev = pvc->main; skb->protocol = htons(ETH_P_IPV6); - netif_rx(skb); - return; - } - if (data[3] == FR_PAD && data[4] == NLPID_SNAP && data[5] == FR_PAD) { + } else if (skb->len > 10 && data[3] == FR_PAD && + data[4] == NLPID_SNAP && data[5] == FR_PAD) { u16 oui = ntohs(*(u16*)(data + 6)); u16 pid = ntohs(*(u16*)(data + 8)); skb_pull(skb, 10); @@ -610,23 +745,39 @@ case ETH_P_IPX: case ETH_P_IP: /* a long variant */ case ETH_P_IPV6: + dev = pvc->main; skb->protocol = htons(pid); break; + case 0x80C20007: /* bridged Ethernet frame */ + if ((dev = pvc->ether) != NULL) + skb->protocol = eth_type_trans(skb, dev); + break; + default: printk(KERN_INFO "%s: Unsupported protocol, OUI=%x " "PID=%x\n", hdlc_to_name(hdlc), oui, pid); dev_kfree_skb_any(skb); return; } - - netif_rx(skb); + } else { + printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x " + "length = %i\n", hdlc_to_name(hdlc), data[3], skb->len); + dev_kfree_skb_any(skb); return; } - printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x\n", - hdlc_to_name(hdlc), data[3]); - dev_kfree_skb_any(skb); + if (dev) { + struct net_device_stats *stats = pvc_get_stats(dev); + stats->rx_packets++; /* PVC traffic */ + stats->rx_bytes += skb->len; + if (pvc->state.becn) + stats->rx_compressed++; + skb->dev = dev; + netif_rx(skb); + } else + dev_kfree_skb_any(skb); + return; rx_error: @@ -641,7 +792,7 @@ if (hdlc->state.fr.settings.lmi != LMI_NONE) { hdlc->state.fr.last_poll = 0; hdlc->state.fr.reliable = 0; - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_changed = 1; hdlc->state.fr.request = 0; hdlc->state.fr.fullrep_sent = 0; hdlc->state.fr.last_errors = 0xFFFFFFFF; @@ -669,90 +820,119 @@ if (hdlc->state.fr.settings.lmi != LMI_NONE) del_timer_sync(&hdlc->state.fr.timer); - while(pvc) { - dev_close(&pvc->netdev); /* Shutdown all PVCs for this FRAD */ + while(pvc) { /* Shutdown all PVCs for this FRAD */ + if (pvc->main) + dev_close(pvc->main); + if (pvc->ether) + dev_close(pvc->ether); + pvc->state.active = pvc->state.new = pvc->state.fecn = + pvc->state.becn = 0; + pvc->state.exist = 0; pvc = pvc->next; } } - -static int fr_pvc(hdlc_device *hdlc, unsigned int dlci, int create) +static int fr_add_pvc(hdlc_device *hdlc, unsigned int dlci, int type) { - pvc_device **pvc_p = &hdlc->state.fr.first_pvc; - pvc_device *pvc; - int result; + pvc_device *pvc = NULL; + struct net_device *dev; + int result, used; + char * prefix = "pvc%d"; - if(dlci <= 0 || dlci >= 1024) - return -EINVAL; /* Only 10 bits for DLCI, DLCI 0 reserved */ + if (type == ARPHRD_ETHER) + prefix = "pvceth%d"; - while(*pvc_p) { - if (netdev_dlci(&(*pvc_p)->netdev) == dlci) - break; - pvc_p = &(*pvc_p)->next; + if ((pvc = add_pvc(hdlc, dlci)) == NULL) { + printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n", + hdlc_to_name(hdlc)); + return -ENOBUFS; } - if (create) { /* Create PVC */ - if (*pvc_p != NULL) - return -EEXIST; - - pvc = *pvc_p = kmalloc(sizeof(pvc_device), GFP_KERNEL); - if (!pvc) { - printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", - hdlc_to_name(hdlc)); - return -ENOBUFS; - } - memset(pvc, 0, sizeof(pvc_device)); + if (*get_dev_p(pvc, type)) + return -EEXIST; - pvc->netdev.hard_start_xmit = pvc_xmit; - pvc->netdev.get_stats = pvc_get_stats; - pvc->netdev.open = pvc_open; - pvc->netdev.stop = pvc_close; - pvc->netdev.change_mtu = pvc_change_mtu; - pvc->netdev.mtu = HDLC_MAX_MTU; - - pvc->netdev.type = ARPHRD_DLCI; - pvc->netdev.hard_header_len = 16; - pvc->netdev.hard_header = fr_hard_header; - pvc->netdev.tx_queue_len = 0; - pvc->netdev.flags = IFF_POINTOPOINT; - - pvc->master = hdlc; - *(u16*)pvc->netdev.dev_addr = htons(dlci); - dlci_to_q922(pvc->netdev.broadcast, dlci); - pvc->netdev.addr_len = 2; + used = pvc_is_used(pvc); - result = dev_alloc_name(&pvc->netdev, "pvc%d"); - if (result < 0) { - kfree(pvc); - *pvc_p = NULL; - return result; - } - - if (register_netdevice(&pvc->netdev) != 0) { - kfree(pvc); - *pvc_p = NULL; - return -EIO; - } + dev = kmalloc(sizeof(struct net_device) + + sizeof(struct net_device_stats), GFP_KERNEL); + if (!dev) { + printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", + hdlc_to_name(hdlc)); + delete_unused_pvcs(hdlc); + return -ENOBUFS; + } + memset(dev, 0, sizeof(struct net_device) + + sizeof(struct net_device_stats)); - hdlc->state.fr.changed = 1; - hdlc->state.fr.pvc_count++; - return 0; + if (type == ARPHRD_ETHER) { + ether_setup(dev); + memcpy(dev->dev_addr, "\x00\x01", 2); + get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); + } else { + dev->type = ARPHRD_DLCI; + dev->flags = IFF_POINTOPOINT; + dev->hard_header_len = 10; + dev->addr_len = 2; + *(u16*)dev->dev_addr = htons(dlci); + dlci_to_q922(dev->broadcast, dlci); + } + dev->hard_start_xmit = pvc_xmit; + dev->get_stats = pvc_get_stats; + dev->open = pvc_open; + dev->stop = pvc_close; + dev->do_ioctl = pvc_ioctl; + dev->change_mtu = pvc_change_mtu; + dev->mtu = HDLC_MAX_MTU; + dev->tx_queue_len = 0; + dev->priv = pvc; + + result = dev_alloc_name(dev, prefix); + if (result < 0) { + kfree(dev); + delete_unused_pvcs(hdlc); + return result; + } + + if (register_netdevice(dev) != 0) { + kfree(dev); + delete_unused_pvcs(hdlc); + return -EIO; + } + + *get_dev_p(pvc, type) = dev; + if (!used) { + hdlc->state.fr.dce_changed = 1; + hdlc->state.fr.dce_pvc_count++; } + return 0; +} + + - if (*pvc_p == NULL) /* Delete PVC */ +static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) +{ + pvc_device *pvc; + struct net_device *dev; + + if ((pvc = find_pvc(hdlc, dlci)) == NULL) return -ENOENT; - pvc = *pvc_p; + if ((dev = *get_dev_p(pvc, type)) == NULL) + return -ENOENT; - if (pvc->netdev.flags & IFF_UP) + if (dev->flags & IFF_UP) return -EBUSY; /* PVC in use */ - hdlc->state.fr.changed = 1; - hdlc->state.fr.pvc_count--; - *pvc_p = pvc->next; - unregister_netdevice(&pvc->netdev); - kfree(pvc); + unregister_netdevice(dev); + kfree(dev); + *get_dev_p(pvc, type) = NULL; + + if (!pvc_is_used(pvc)) { + hdlc->state.fr.dce_pvc_count--; + hdlc->state.fr.dce_changed = 1; + } + delete_unused_pvcs(hdlc); return 0; } @@ -763,14 +943,21 @@ pvc_device *pvc = hdlc->state.fr.first_pvc; while(pvc) { pvc_device *next = pvc->next; - unregister_netdev(&pvc->netdev); + if (pvc->main) { + unregister_netdevice(pvc->main); + kfree(pvc->main); + } + if (pvc->ether) { + unregister_netdevice(pvc->ether); + kfree(pvc->ether); + } kfree(pvc); pvc = next; } hdlc->state.fr.first_pvc = NULL; /* All PVCs destroyed */ - hdlc->state.fr.pvc_count = 0; - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_pvc_count = 0; + hdlc->state.fr.dce_changed = 1; } @@ -828,25 +1015,27 @@ if (hdlc->proto != IF_PROTO_FR) { hdlc_proto_detach(hdlc); hdlc->state.fr.first_pvc = NULL; - hdlc->state.fr.pvc_count = 0; + hdlc->state.fr.dce_pvc_count = 0; } memcpy(&hdlc->state.fr.settings, &new_settings, size); hdlc->open = fr_open; hdlc->stop = fr_close; hdlc->netif_rx = fr_rx; + hdlc->type_trans = NULL; hdlc->proto_detach = fr_destroy; hdlc->proto = IF_PROTO_FR; dev->hard_start_xmit = hdlc->xmit; - dev->hard_header = fr_hard_header; + dev->hard_header = NULL; dev->type = ARPHRD_FRAD; - dev->addr_len = 2; - *(u16*)dev->dev_addr = htons(LMI_DLCI); - dlci_to_q922(dev->broadcast, LMI_DLCI); + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->addr_len = 0; return 0; case IF_PROTO_FR_ADD_PVC: case IF_PROTO_FR_DEL_PVC: + case IF_PROTO_FR_ADD_ETH_PVC: + case IF_PROTO_FR_DEL_ETH_PVC: if(!capable(CAP_NET_ADMIN)) return -EPERM; @@ -854,8 +1043,20 @@ sizeof(fr_proto_pvc))) return -EFAULT; - return fr_pvc(hdlc, pvc.dlci, - ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC); + if (pvc.dlci <= 0 || pvc.dlci >= 1024) + return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ + + if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || + ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) + result = ARPHRD_ETHER; /* bridged Ethernet device */ + else + result = ARPHRD_DLCI; + + if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || + ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) + return fr_add_pvc(hdlc, pvc.dlci, result); + else + return fr_del_pvc(hdlc, pvc.dlci, result); } return -EINVAL; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/hdlc_generic.c linux.21rc5-ac2/drivers/net/wan/hdlc_generic.c --- linux.21rc5/drivers/net/wan/hdlc_generic.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/hdlc_generic.c 2003-04-22 16:44:37.000000000 +0100 @@ -1,17 +1,13 @@ /* * Generic HDLC support routines for Linux * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. * - * Current status: - * - this is work in progress - * - not heavily tested on SMP - * - currently supported: + * Currently supported: * * raw IP-in-HDLC * * Cisco HDLC * * Frame Relay with ANSI or CCITT LMI (both user and network side) @@ -37,7 +33,7 @@ #include -static const char* version = "HDLC support module revision 1.11"; +static const char* version = "HDLC support module revision 1.12"; static int hdlc_change_mtu(struct net_device *dev, int new_mtu) @@ -60,7 +56,13 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p) { - dev_to_hdlc(dev)->netif_rx(skb); + hdlc_device *hdlc = dev_to_hdlc(dev); + if (hdlc->netif_rx) + hdlc->netif_rx(skb); + else { + hdlc->stats.rx_dropped++; /* Shouldn't happen */ + dev_kfree_skb(skb); + } return 0; } @@ -69,6 +71,10 @@ #define hdlc_raw_ioctl(hdlc, ifr) -ENOSYS #endif +#ifndef CONFIG_HDLC_RAW_ETH +#define hdlc_raw_eth_ioctl(hdlc, ifr) -ENOSYS +#endif + #ifndef CONFIG_HDLC_PPP #define hdlc_ppp_ioctl(hdlc, ifr) -ENOSYS #endif @@ -96,6 +102,7 @@ switch(ifr->ifr_settings.type) { case IF_PROTO_HDLC: + case IF_PROTO_HDLC_ETH: case IF_PROTO_PPP: case IF_PROTO_CISCO: case IF_PROTO_FR: @@ -109,6 +116,7 @@ switch(proto) { case IF_PROTO_HDLC: return hdlc_raw_ioctl(hdlc, ifr); + case IF_PROTO_HDLC_ETH: return hdlc_raw_eth_ioctl(hdlc, ifr); case IF_PROTO_PPP: return hdlc_ppp_ioctl(hdlc, ifr); case IF_PROTO_CISCO: return hdlc_cisco_ioctl(hdlc, ifr); case IF_PROTO_FR: return hdlc_fr_ioctl(hdlc, ifr); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/hdlc_ppp.c linux.21rc5-ac2/drivers/net/wan/hdlc_ppp.c --- linux.21rc5/drivers/net/wan/hdlc_ppp.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/hdlc_ppp.c 2003-04-22 16:44:37.000000000 +0100 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * Point-to-point protocol support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -68,10 +67,10 @@ -static void ppp_rx(struct sk_buff *skb) +static unsigned short ppp_type_trans(struct sk_buff *skb, + struct net_device *dev) { - skb->protocol = htons(ETH_P_WAN_PPP); - netif_rx(skb); + return __constant_htons(ETH_P_WAN_PPP); } @@ -103,7 +102,8 @@ hdlc->open = ppp_open; hdlc->stop = ppp_close; - hdlc->netif_rx = ppp_rx; + hdlc->netif_rx = NULL; + hdlc->type_trans = ppp_type_trans; hdlc->proto = IF_PROTO_PPP; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = NULL; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/hdlc_raw.c linux.21rc5-ac2/drivers/net/wan/hdlc_raw.c --- linux.21rc5/drivers/net/wan/hdlc_raw.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/hdlc_raw.c 2003-04-22 16:44:37.000000000 +0100 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * HDLC support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -26,10 +25,10 @@ #include -static void raw_rx(struct sk_buff *skb) +static unsigned short raw_type_trans(struct sk_buff *skb, + struct net_device *dev) { - skb->protocol = htons(ETH_P_IP); - netif_rx(skb); + return __constant_htons(ETH_P_IP); } @@ -67,7 +66,7 @@ new_settings.encoding = ENCODING_NRZ; if (new_settings.parity == PARITY_DEFAULT) - new_settings.parity = PARITY_NONE; + new_settings.parity = PARITY_CRC16_PR1_CCITT; result = hdlc->attach(hdlc, new_settings.encoding, new_settings.parity); @@ -79,11 +78,13 @@ hdlc->open = NULL; hdlc->stop = NULL; - hdlc->netif_rx = raw_rx; + hdlc->netif_rx = NULL; + hdlc->type_trans = raw_type_trans; hdlc->proto = IF_PROTO_HDLC; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = NULL; dev->type = ARPHRD_RAWHDLC; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->addr_len = 0; return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/hdlc_raw_eth.c linux.21rc5-ac2/drivers/net/wan/hdlc_raw_eth.c --- linux.21rc5/drivers/net/wan/hdlc_raw_eth.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/hdlc_raw_eth.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,110 @@ +/* + * Generic HDLC support routines for Linux + * HDLC Ethernet emulation support + * + * Copyright (C) 2002-2003 Krzysztof Halasa + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int eth_tx(struct sk_buff *skb, struct net_device *dev) +{ + int pad = ETH_ZLEN - skb->len; + if (pad > 0) { /* Pad the frame with zeros */ + int len = skb->len; + if (skb_tailroom(skb) < pad) + if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) { + dev_to_hdlc(dev)->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + skb_put(skb, pad); + memset(skb->data + len, 0, pad); + } + return dev_to_hdlc(dev)->xmit(skb, dev); +} + + +int hdlc_raw_eth_ioctl(hdlc_device *hdlc, struct ifreq *ifr) +{ + raw_hdlc_proto *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc; + const size_t size = sizeof(raw_hdlc_proto); + raw_hdlc_proto new_settings; + struct net_device *dev = hdlc_to_dev(hdlc); + int result; + void *old_ch_mtu; + int old_qlen; + + switch (ifr->ifr_settings.type) { + case IF_GET_PROTO: + ifr->ifr_settings.type = IF_PROTO_HDLC_ETH; + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size)) + return -EFAULT; + return 0; + + case IF_PROTO_HDLC_ETH: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->flags & IFF_UP) + return -EBUSY; + + if (copy_from_user(&new_settings, raw_s, size)) + return -EFAULT; + + if (new_settings.encoding == ENCODING_DEFAULT) + new_settings.encoding = ENCODING_NRZ; + + if (new_settings.parity == PARITY_DEFAULT) + new_settings.parity = PARITY_CRC16_PR1_CCITT; + + result = hdlc->attach(hdlc, new_settings.encoding, + new_settings.parity); + if (result) + return result; + + hdlc_proto_detach(hdlc); + memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size); + + hdlc->open = NULL; + hdlc->stop = NULL; + hdlc->netif_rx = NULL; + hdlc->type_trans = eth_type_trans; + hdlc->proto = IF_PROTO_HDLC_ETH; + dev->hard_start_xmit = eth_tx; + old_ch_mtu = dev->change_mtu; + old_qlen = dev->tx_queue_len; + ether_setup(dev); + dev->change_mtu = old_ch_mtu; + dev->tx_queue_len = old_qlen; + memcpy(dev->dev_addr, "\x00\x01", 2); + get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); + return 0; + } + + return -EINVAL; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/hdlc_x25.c linux.21rc5-ac2/drivers/net/wan/hdlc_x25.c --- linux.21rc5/drivers/net/wan/hdlc_x25.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/hdlc_x25.c 2003-04-22 16:44:37.000000000 +0100 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * X.25 support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -204,6 +203,7 @@ hdlc->open = x25_open; hdlc->stop = x25_close; hdlc->netif_rx = x25_rx; + hdlc->type_trans = NULL; hdlc->proto = IF_PROTO_X25; dev->hard_start_xmit = x25_xmit; dev->hard_header = NULL; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/Makefile linux.21rc5-ac2/drivers/net/wan/Makefile --- linux.21rc5/drivers/net/wan/Makefile 2003-05-28 15:08:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/Makefile 2003-04-22 16:44:37.000000000 +0100 @@ -9,7 +9,7 @@ O_TARGET := wan.o -export-objs = z85230.o syncppp.o comx.o sdladrv.o cycx_drv.o hdlc_generic.o +export-objs = z85230.o syncppp.o comx.o sdladrv.o cycx_drv.o hdlc_generic.o pc300_drv.o list-multi = wanpipe.o cyclomx.o wanpipe-objs = sdlamain.o sdla_ft1.o $(wanpipe-y) @@ -24,12 +24,15 @@ hdlc-y := hdlc_generic.o hdlc-$(CONFIG_HDLC_RAW) += hdlc_raw.o +hdlc-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o hdlc-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o hdlc-$(CONFIG_HDLC_FR) += hdlc_fr.o hdlc-$(CONFIG_HDLC_PPP) += hdlc_ppp.o hdlc-$(CONFIG_HDLC_X25) += hdlc_x25.o hdlc-objs := $(hdlc-y) +pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o + obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o obj-$(CONFIG_COMX) += comx.o @@ -70,6 +73,7 @@ obj-$(CONFIG_CYCLADES_SYNC) += cycx_drv.o cyclomx.o obj-$(CONFIG_LAPBETHER) += lapbether.o obj-$(CONFIG_SBNI) += sbni.o +obj-$(CONFIG_PC300) += pc300.o obj-$(CONFIG_HDLC) += hdlc.o ifeq ($(CONFIG_HDLC_PPP),y) obj-$(CONFIG_HDLC) += syncppp.o @@ -79,6 +83,9 @@ include $(TOPDIR)/Rules.make +pc300.o: pc300_drv.o $(pc300-y) + $(LD) -r -o $@ pc300_drv.o $(pc300-y) + hdlc.o: $(hdlc-objs) $(LD) -r -o $@ $(hdlc-objs) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/pc300_drv.c linux.21rc5-ac2/drivers/net/wan/pc300_drv.c --- linux.21rc5/drivers/net/wan/pc300_drv.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/pc300_drv.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,3489 @@ +/* + * pc300.c Cyclades-PC300(tm) Driver. + * + * Author: Ivan Passos + * Maintainer: Henrique Gobbi + * + * Copyright: (c) 1999-2002 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#define USE_PCI_CLOCK + +static char rcsid[] = +"Revision: 3.4.8 Date: 2002/07/19 "; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pc300.h" + +#define CPC_LOCK(card,flags) \ + do { \ + spin_lock_irqsave(&card->card_lock, flags); \ + } while (0) + +#define CPC_UNLOCK(card,flags) \ + do { \ + spin_unlock_irqrestore(&card->card_lock, flags); \ + } while (0) + +#undef PC300_DEBUG_PCI +#undef PC300_DEBUG_INTR +#undef PC300_DEBUG_TX +#undef PC300_DEBUG_RX +#undef PC300_DEBUG_OTHER + +static struct pci_device_id cpc_pci_dev_id[] __devinitdata = { + /* PC300/RSV or PC300/X21, 2 chan */ + {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300}, + /* PC300/RSV or PC300/X21, 1 chan */ + {0x120e, 0x301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x301}, + /* PC300/TE, 2 chan */ + {0x120e, 0x310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x310}, + /* PC300/TE, 1 chan */ + {0x120e, 0x311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x311}, + /* PC300/TE-M, 2 chan */ + {0x120e, 0x320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x320}, + /* PC300/TE-M, 1 chan */ + {0x120e, 0x321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x321}, + /* End of table */ + {0,}, +}; +MODULE_DEVICE_TABLE(pci, cpc_pci_dev_id); + +#ifndef cpc_min +#define cpc_min(a,b) (((a)<(b))?(a):(b)) +#endif +#ifndef cpc_max +#define cpc_max(a,b) (((a)>(b))?(a):(b)) +#endif + +/* prototypes */ +static void tx_dma_buf_pt_init(pc300_t *, int); +static void tx_dma_buf_init(pc300_t *, int); +static void rx_dma_buf_pt_init(pc300_t *, int); +static void rx_dma_buf_init(pc300_t *, int); +static void tx_dma_buf_check(pc300_t *, int); +static void rx_dma_buf_check(pc300_t *, int); +static void cpc_intr(int, void *, struct pt_regs *); +static struct net_device_stats *cpc_get_stats(struct net_device *); +static int clock_rate_calc(uclong, uclong, int *); +static uclong detect_ram(pc300_t *); +static void plx_init(pc300_t *); +static void cpc_trace(struct net_device *, struct sk_buff *, char); +static int cpc_attach(hdlc_device *, unsigned short, unsigned short); + +#ifdef CONFIG_PC300_MLPPP +void cpc_tty_init(pc300dev_t * dev); +void cpc_tty_unregister_service(pc300dev_t * pc300dev); +void cpc_tty_receive(pc300dev_t * pc300dev); +void cpc_tty_trigger_poll(pc300dev_t * pc300dev); +void cpc_tty_reset_var(void); +#endif + +/************************/ +/*** DMA Routines ***/ +/************************/ +static void tx_dma_buf_pt_init(pc300_t * card, int ch) +{ + int i; + int ch_factor = ch * N_DMA_TX_BUF; + volatile pcsca_bd_t *ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + + for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { + cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE + + (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); + cpc_writel(&ptdescr->ptbuf, + (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); + } +} + +static void tx_dma_buf_init(pc300_t * card, int ch) +{ + int i; + int ch_factor = ch * N_DMA_TX_BUF; + volatile pcsca_bd_t *ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + + for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { + memset_io(ptdescr, 0, sizeof(pcsca_bd_t)); + cpc_writew(&ptdescr->len, 0); + cpc_writeb(&ptdescr->status, DST_OSB); + } + tx_dma_buf_pt_init(card, ch); +} + +static void rx_dma_buf_pt_init(pc300_t * card, int ch) +{ + int i; + int ch_factor = ch * N_DMA_RX_BUF; + volatile pcsca_bd_t *ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + + for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { + cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE + + (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); + cpc_writel(&ptdescr->ptbuf, + (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); + } +} + +static void rx_dma_buf_init(pc300_t * card, int ch) +{ + int i; + int ch_factor = ch * N_DMA_RX_BUF; + volatile pcsca_bd_t *ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + + for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { + memset_io(ptdescr, 0, sizeof(pcsca_bd_t)); + cpc_writew(&ptdescr->len, 0); + cpc_writeb(&ptdescr->status, 0); + } + rx_dma_buf_pt_init(card, ch); +} + +static void tx_dma_buf_check(pc300_t * card, int ch) +{ + volatile pcsca_bd_t *ptdescr; + int i; + ucshort first_bd = card->chan[ch].tx_first_bd; + ucshort next_bd = card->chan[ch].tx_next_bd; + + printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, + first_bd, TX_BD_ADDR(ch, first_bd), + next_bd, TX_BD_ADDR(ch, next_bd)); + for (i = first_bd, + ptdescr = (pcsca_bd_t *) (card->hw.rambase + TX_BD_ADDR(ch, first_bd)); + i != ((next_bd + 1) & (N_DMA_TX_BUF - 1)); + i = (i + 1) & (N_DMA_TX_BUF - 1), + ptdescr = (pcsca_bd_t *) (card->hw.rambase + TX_BD_ADDR(ch, i))) { + printk("\n CH%d TX%d: next=0x%lx, ptbuf=0x%lx, ST=0x%x, len=%d", + ch, i, (uclong) cpc_readl(&ptdescr->next), + (uclong) cpc_readl(&ptdescr->ptbuf), + cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len)); + } + printk("\n"); +} + +#ifdef PC300_DEBUG_OTHER +/* Show all TX buffer descriptors */ +static void tx1_dma_buf_check(pc300_t * card, int ch) +{ + volatile pcsca_bd_t *ptdescr; + int i; + ucshort first_bd = card->chan[ch].tx_first_bd; + ucshort next_bd = card->chan[ch].tx_next_bd; + uclong scabase = card->hw.scabase; + + printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); + printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, + first_bd, TX_BD_ADDR(ch, first_bd), + next_bd, TX_BD_ADDR(ch, next_bd)); + printk("TX_CDA=0x%08lx, TX_EDA=0x%08lx\n", + (uclong) cpc_readl(scabase + DTX_REG(CDAL, ch)), + (uclong) cpc_readl(scabase + DTX_REG(EDAL, ch))); + for (i = 0; i < N_DMA_TX_BUF; i++) { + ptdescr = (pcsca_bd_t *) (card->hw.rambase + TX_BD_ADDR(ch, i)); + printk("\n CH%d TX%d: next=0x%lx, ptbuf=0x%lx, ST=0x%x, len=%d", + ch, i, (uclong) cpc_readl(&ptdescr->next), + (uclong) cpc_readl(&ptdescr->ptbuf), + cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len)); + } + printk("\n"); +} +#endif + +static void rx_dma_buf_check(pc300_t * card, int ch) +{ + volatile pcsca_bd_t *ptdescr; + int i; + ucshort first_bd = card->chan[ch].rx_first_bd; + ucshort last_bd = card->chan[ch].rx_last_bd; + int ch_factor; + + ch_factor = ch * N_DMA_RX_BUF; + printk("#CH%d: f_bd = %d, l_bd = %d\n", ch, first_bd, last_bd); + for (i = 0, ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + i < N_DMA_RX_BUF; i++, ptdescr++) { + if (cpc_readb(&ptdescr->status) & DST_OSB) + printk ("\n CH%d RX%d: next=0x%lx, ptbuf=0x%lx, ST=0x%x, len=%d", + ch, i, (uclong) cpc_readl(&ptdescr->next), + (uclong) cpc_readl(&ptdescr->ptbuf), + cpc_readb(&ptdescr->status), + cpc_readw(&ptdescr->len)); + } + printk("\n"); +} + +int dma_get_rx_frame_size(pc300_t * card, int ch) +{ + volatile pcsca_bd_t *ptdescr; + ucshort first_bd = card->chan[ch].rx_first_bd; + int rcvd = 0; + volatile ucchar status; + + ptdescr = (pcsca_bd_t *)(card->hw.rambase + RX_BD_ADDR(ch, first_bd)); + while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { + rcvd += cpc_readw(&ptdescr->len); + first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1); + if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) { + /* Return the size of a good frame or incomplete bad frame + * (dma_buf_read will clean the buffer descriptors in this case). */ + return (rcvd); + } + ptdescr = (pcsca_bd_t *)(card->hw.rambase + cpc_readl(&ptdescr->next)); + } + return (-1); +} + +/* + * dma_buf_write: writes a frame to the Tx DMA buffers + * NOTE: this function writes one frame at a time. + */ +int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) +{ + int i, nchar; + volatile pcsca_bd_t *ptdescr; + int tosend = len; + ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1; + + if (nbuf >= card->chan[ch].nfree_tx_bd) { + return -ENOMEM; + } + + for (i = 0; i < nbuf; i++) { + ptdescr = (pcsca_bd_t *) (card->hw.rambase + + TX_BD_ADDR(ch, card->chan[ch].tx_next_bd)); + nchar = cpc_min(BD_DEF_LEN, tosend); + if (cpc_readb(&ptdescr->status) & DST_OSB) { + memcpy_toio((void *)(card->hw.rambase + cpc_readl(&ptdescr->ptbuf)), + &ptdata[len - tosend], nchar); + cpc_writew(&ptdescr->len, nchar); + card->chan[ch].nfree_tx_bd--; + if ((i + 1) == nbuf) { + /* This must be the last BD to be used */ + cpc_writeb(&ptdescr->status, DST_EOM); + } else { + cpc_writeb(&ptdescr->status, 0); + } + } else { + return -ENOMEM; + } + tosend -= nchar; + card->chan[ch].tx_next_bd = + (card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1); + } + /* If it gets to here, it means we have sent the whole frame */ + return 0; +} + +/* + * dma_buf_read: reads a frame from the Rx DMA buffers + * NOTE: this function reads one frame at a time. + */ +int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) +{ + int nchar; + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + volatile pcsca_bd_t *ptdescr; + int rcvd = 0; + volatile ucchar status; + + ptdescr = (pcsca_bd_t *) (card->hw.rambase + + RX_BD_ADDR(ch, chan->rx_first_bd)); + while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { + nchar = cpc_readw(&ptdescr->len); + if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT)) + || (nchar > BD_DEF_LEN)) { + + if (nchar > BD_DEF_LEN) + status |= DST_RBIT; + rcvd = -status; + /* Discard remaining descriptors used by the bad frame */ + while (chan->rx_first_bd != chan->rx_last_bd) { + cpc_writeb(&ptdescr->status, 0); + chan->rx_first_bd = (chan->rx_first_bd+1) & (N_DMA_RX_BUF-1); + if (status & DST_EOM) + break; + ptdescr = (pcsca_bd_t *) (card->hw.rambase + + cpc_readl(&ptdescr->next)); + status = cpc_readb(&ptdescr->status); + } + break; + } + if (nchar != 0) { + if (skb) { + memcpy_fromio(skb_put(skb, nchar), + (void *)(card->hw.rambase+cpc_readl(&ptdescr->ptbuf)),nchar); + } + rcvd += nchar; + } + cpc_writeb(&ptdescr->status, 0); + cpc_writeb(&ptdescr->len, 0); + chan->rx_first_bd = (chan->rx_first_bd + 1) & (N_DMA_RX_BUF - 1); + + if (status & DST_EOM) + break; + + ptdescr = (pcsca_bd_t *) (card->hw.rambase + cpc_readl(&ptdescr->next)); + } + + if (rcvd != 0) { + /* Update pointer */ + chan->rx_last_bd = (chan->rx_first_bd - 1) & (N_DMA_RX_BUF - 1); + /* Update EDA */ + cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), + RX_BD_ADDR(ch, chan->rx_last_bd)); + } + return (rcvd); +} + +void tx_dma_stop(pc300_t * card, int ch) +{ + uclong scabase = card->hw.scabase; + ucchar drr_ena_bit = 1 << (5 + 2 * ch); + ucchar drr_rst_bit = 1 << (1 + 2 * ch); + + /* Disable DMA */ + cpc_writeb(scabase + DRR, drr_ena_bit); + cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit); +} + +void rx_dma_stop(pc300_t * card, int ch) +{ + uclong scabase = card->hw.scabase; + ucchar drr_ena_bit = 1 << (4 + 2 * ch); + ucchar drr_rst_bit = 1 << (2 * ch); + + /* Disable DMA */ + cpc_writeb(scabase + DRR, drr_ena_bit); + cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit); +} + +void rx_dma_start(pc300_t * card, int ch) +{ + uclong scabase = card->hw.scabase; + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + + /* Start DMA */ + cpc_writel(scabase + DRX_REG(CDAL, ch), + RX_BD_ADDR(ch, chan->rx_first_bd)); + if (cpc_readl(scabase + DRX_REG(CDAL,ch)) != + RX_BD_ADDR(ch, chan->rx_first_bd)) { + cpc_writel(scabase + DRX_REG(CDAL, ch), + RX_BD_ADDR(ch, chan->rx_first_bd)); + } + cpc_writel(scabase + DRX_REG(EDAL, ch), + RX_BD_ADDR(ch, chan->rx_last_bd)); + cpc_writew(scabase + DRX_REG(BFLL, ch), BD_DEF_LEN); + cpc_writeb(scabase + DSR_RX(ch), DSR_DE); + if (!(cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) { + cpc_writeb(scabase + DSR_RX(ch), DSR_DE); + } +} + +/*************************/ +/*** FALC Routines ***/ +/*************************/ +void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) +{ + uclong falcbase = card->hw.falcbase; + unsigned long i = 0; + + while (cpc_readb(falcbase + F_REG(SIS, ch)) & SIS_CEC) { + if (i++ >= PC300_FALC_MAXLOOP) { + printk("%s: FALC command locked(cmd=0x%x).\n", + card->chan[ch].d.name, cmd); + break; + } + } + cpc_writeb(falcbase + F_REG(CMDR, ch), cmd); +} + +void falc_intr_enable(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + /* Interrupt pins are open-drain */ + cpc_writeb(falcbase + F_REG(IPC, ch), + cpc_readb(falcbase + F_REG(IPC, ch)) & ~IPC_IC0); + /* Conters updated each second */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_ECM); + /* Enable SEC and ES interrupts */ + cpc_writeb(falcbase + F_REG(IMR3, ch), + cpc_readb(falcbase + F_REG(IMR3, ch)) & ~(IMR3_SEC | IMR3_ES)); + if (conf->fr_mode == PC300_FR_UNFRAMED) { + cpc_writeb(falcbase + F_REG(IMR4, ch), + cpc_readb(falcbase + F_REG(IMR4, ch)) & ~(IMR4_LOS)); + } else { + cpc_writeb(falcbase + F_REG(IMR4, ch), + cpc_readb(falcbase + F_REG(IMR4, ch)) & + ~(IMR4_LFA | IMR4_AIS | IMR4_LOS | IMR4_SLIP)); + } + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(IMR3, ch), + cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC); + } else { + cpc_writeb(falcbase + F_REG(IPC, ch), + cpc_readb(falcbase + F_REG(IPC, ch)) | IPC_SCI); + if (conf->fr_mode == PC300_FR_UNFRAMED) { + cpc_writeb(falcbase + F_REG(IMR2, ch), + cpc_readb(falcbase + F_REG(IMR2, ch)) & ~(IMR2_LOS)); + } else { + cpc_writeb(falcbase + F_REG(IMR2, ch), + cpc_readb(falcbase + F_REG(IMR2, ch)) & + ~(IMR2_FAR | IMR2_LFA | IMR2_AIS | IMR2_LOS)); + if (pfalc->multiframe_mode) { + cpc_writeb(falcbase + F_REG(IMR2, ch), + cpc_readb(falcbase + F_REG(IMR2, ch)) & + ~(IMR2_T400MS | IMR2_MFAR)); + } else { + cpc_writeb(falcbase + F_REG(IMR2, ch), + cpc_readb(falcbase + F_REG(IMR2, ch)) | + IMR2_T400MS | IMR2_MFAR); + } + } + } +} + +void falc_open_timeslot(pc300_t * card, int ch, int timeslot) +{ + uclong falcbase = card->hw.falcbase; + ucchar tshf = card->chan[ch].falc.offset; + + cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), + cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & + ~(0x80 >> ((timeslot - tshf) & 0x07))); + cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch), + cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) | + (0x80 >> (timeslot & 0x07))); + cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch), + cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) | + (0x80 >> (timeslot & 0x07))); +} + +void falc_close_timeslot(pc300_t * card, int ch, int timeslot) +{ + uclong falcbase = card->hw.falcbase; + ucchar tshf = card->chan[ch].falc.offset; + + cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), + cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | + (0x80 >> ((timeslot - tshf) & 0x07))); + cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch), + cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) & + ~(0x80 >> (timeslot & 0x07))); + cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch), + cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) & + ~(0x80 >> (timeslot & 0x07))); +} + +void falc_close_all_timeslots(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + uclong falcbase = card->hw.falcbase; + + cpc_writeb(falcbase + F_REG(ICB1, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR1, ch), 0); + cpc_writeb(falcbase + F_REG(RTR1, ch), 0); + cpc_writeb(falcbase + F_REG(ICB2, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR2, ch), 0); + cpc_writeb(falcbase + F_REG(RTR2, ch), 0); + cpc_writeb(falcbase + F_REG(ICB3, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR3, ch), 0); + cpc_writeb(falcbase + F_REG(RTR3, ch), 0); + if (conf->media == IF_IFACE_E1) { + cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR4, ch), 0); + cpc_writeb(falcbase + F_REG(RTR4, ch), 0); + } +} + +void falc_open_all_timeslots(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + uclong falcbase = card->hw.falcbase; + + cpc_writeb(falcbase + F_REG(ICB1, ch), 0); + if (conf->fr_mode == PC300_FR_UNFRAMED) { + cpc_writeb(falcbase + F_REG(TTR1, ch), 0xff); + cpc_writeb(falcbase + F_REG(RTR1, ch), 0xff); + } else { + /* Timeslot 0 is never enabled */ + cpc_writeb(falcbase + F_REG(TTR1, ch), 0x7f); + cpc_writeb(falcbase + F_REG(RTR1, ch), 0x7f); + } + cpc_writeb(falcbase + F_REG(ICB2, ch), 0); + cpc_writeb(falcbase + F_REG(TTR2, ch), 0xff); + cpc_writeb(falcbase + F_REG(RTR2, ch), 0xff); + cpc_writeb(falcbase + F_REG(ICB3, ch), 0); + cpc_writeb(falcbase + F_REG(TTR3, ch), 0xff); + cpc_writeb(falcbase + F_REG(RTR3, ch), 0xff); + if (conf->media == IF_IFACE_E1) { + cpc_writeb(falcbase + F_REG(ICB4, ch), 0); + cpc_writeb(falcbase + F_REG(TTR4, ch), 0xff); + cpc_writeb(falcbase + F_REG(RTR4, ch), 0xff); + } else { + cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR4, ch), 0x80); + cpc_writeb(falcbase + F_REG(RTR4, ch), 0x80); + } +} + +void falc_init_timeslot(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + int tslot; + + for (tslot = 0; tslot < pfalc->num_channels; tslot++) { + if (conf->tslot_bitmap & (1 << tslot)) { + // Channel enabled + falc_open_timeslot(card, ch, tslot + 1); + } else { + // Channel disabled + falc_close_timeslot(card, ch, tslot + 1); + } + } +} + +void falc_enable_comm(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + + if (pfalc->full_bandwidth) { + falc_open_all_timeslots(card, ch); + } else { + falc_init_timeslot(card, ch); + } + // CTS/DCD ON + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) & + ~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch))); +} + +void falc_disable_comm(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + + if (pfalc->loop_active != 2) { + falc_close_all_timeslots(card, ch); + } + // CTS/DCD OFF + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) | + ((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch))); +} + +void falc_init_t1(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); + + /* Switch to T1 mode (PCM 24) */ + cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); + + /* Wait 20 us for setup */ + udelay(20); + + /* Transmit Buffer Size (1 frame) */ + cpc_writeb(falcbase + F_REG(SIC1, ch), SIC1_XBS0); + + /* Clock mode */ + if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */ + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS); + } else { /* Slave mode */ + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS); + cpc_writeb(falcbase + F_REG(LOOP, ch), + cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_RTM); + } + + cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI); + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) & + ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1)); + + switch (conf->lcode) { + case PC300_LC_AMI: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | + FMR0_XC1 | FMR0_RC1); + /* Clear Channel register to ON for all channels */ + cpc_writeb(falcbase + F_REG(CCB1, ch), 0xff); + cpc_writeb(falcbase + F_REG(CCB2, ch), 0xff); + cpc_writeb(falcbase + F_REG(CCB3, ch), 0xff); + break; + + case PC300_LC_B8ZS: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | + FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1); + break; + + case PC300_LC_NRZ: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | 0x00); + break; + } + + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_ELOS); + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0)); + /* Set interface mode to 2 MBPS */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD); + + switch (conf->fr_mode) { + case PC300_FR_ESF: + pfalc->multiframe_mode = 0; + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_FM1); + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | + FMR1_CRC | FMR1_EDL); + cpc_writeb(falcbase + F_REG(XDL1, ch), 0); + cpc_writeb(falcbase + F_REG(XDL2, ch), 0); + cpc_writeb(falcbase + F_REG(XDL3, ch), 0); + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) & ~FMR0_SRAF); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2,ch)) | FMR2_MCSP | FMR2_SSP); + break; + + case PC300_FR_D4: + pfalc->multiframe_mode = 1; + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) & + ~(FMR4_FM1 | FMR4_FM0)); + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | FMR0_SRAF); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_SSP); + break; + } + + /* Enable Automatic Resynchronization */ + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_AUTO); + + /* Transmit Automatic Remote Alarm */ + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA); + + /* Channel translation mode 1 : one to one */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_CTM); + + /* No signaling */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_SIGM); + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) & + ~(FMR5_EIBR | FMR5_SRS)); + cpc_writeb(falcbase + F_REG(CCR1, ch), 0); + + cpc_writeb(falcbase + F_REG(LIM1, ch), + cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1); + + switch (conf->lbo) { + /* Provides proper Line Build Out */ + case PC300_LBO_0_DB: + cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja)); + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x5a); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x8f); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20); + break; + case PC300_LBO_7_5_DB: + cpc_writeb(falcbase + F_REG(LIM2, ch), (0x40 | LIM2_LOS1 | dja)); + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x11); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x02); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20); + break; + case PC300_LBO_15_DB: + cpc_writeb(falcbase + F_REG(LIM2, ch), (0x80 | LIM2_LOS1 | dja)); + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x8e); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20); + break; + case PC300_LBO_22_5_DB: + cpc_writeb(falcbase + F_REG(LIM2, ch), (0xc0 | LIM2_LOS1 | dja)); + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x09); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20); + break; + } + + /* Transmit Clock-Slot Offset */ + cpc_writeb(falcbase + F_REG(XC0, ch), + cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01); + /* Transmit Time-slot Offset */ + cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e); + /* Receive Clock-Slot offset */ + cpc_writeb(falcbase + F_REG(RC0, ch), 0x05); + /* Receive Time-slot offset */ + cpc_writeb(falcbase + F_REG(RC1, ch), 0x00); + + /* LOS Detection after 176 consecutive 0s */ + cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a); + /* LOS Recovery after 22 ones in the time window of PCD */ + cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15); + + cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f); + + if (conf->fr_mode == PC300_FR_ESF_JAPAN) { + cpc_writeb(falcbase + F_REG(RC1, ch), + cpc_readb(falcbase + F_REG(RC1, ch)) | 0x80); + } + + falc_close_all_timeslots(card, ch); +} + +void falc_init_e1(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); + + /* Switch to E1 mode (PCM 30) */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_PMOD); + + /* Clock mode */ + if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */ + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS); + } else { /* Slave mode */ + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS); + } + cpc_writeb(falcbase + F_REG(LOOP, ch), + cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_SFM); + + cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI); + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) & + ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1)); + + switch (conf->lcode) { + case PC300_LC_AMI: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | + FMR0_XC1 | FMR0_RC1); + break; + + case PC300_LC_HDB3: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | + FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1); + break; + + case PC300_LC_NRZ: + break; + } + + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0)); + /* Set interface mode to 2 MBPS */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD); + + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x18); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x03); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x00); + + switch (conf->fr_mode) { + case PC300_FR_MF_CRC4: + pfalc->multiframe_mode = 1; + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_XFS); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_RFS1); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_RFS0); + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_EXTIW); + + /* MultiFrame Resynchronization */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_MFCS); + + /* Automatic Loss of Multiframe > 914 CRC errors */ + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_ALMF); + + /* S1 and SI1/SI2 spare Bits set to 1 */ + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_AXS); + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_EBP); + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XS13 | XSP_XS15); + + /* Automatic Force Resynchronization */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR); + + /* Transmit Automatic Remote Alarm */ + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA); + + /* Transmit Spare Bits for National Use (Y, Sn, Sa) */ + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) | + XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4); + break; + + case PC300_FR_MF_NON_CRC4: + case PC300_FR_D4: + pfalc->multiframe_mode = 0; + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & + ~(FMR2_RFS1 | FMR2_RFS0)); + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XSIS); + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XSIF); + + /* Automatic Force Resynchronization */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR); + + /* Transmit Automatic Remote Alarm */ + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA); + + /* Transmit Spare Bits for National Use (Y, Sn, Sa) */ + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) | + XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4); + break; + + case PC300_FR_UNFRAMED: + pfalc->multiframe_mode = 0; + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & + ~(FMR2_RFS1 | FMR2_RFS0)); + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_TT0); + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) & + ~(XSW_XTM|XSW_XY0|XSW_XY1|XSW_XY2|XSW_XY3|XSW_XY4)); + cpc_writeb(falcbase + F_REG(TSWM, ch), 0xff); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | + (FMR2_RTM | FMR2_DAIS)); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_AXRA); + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_AFR); + pfalc->sync = 1; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED2 << (2 * ch))); + break; + } + + /* No signaling */ + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_CASEN); + cpc_writeb(falcbase + F_REG(CCR1, ch), 0); + + cpc_writeb(falcbase + F_REG(LIM1, ch), + cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1); + cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja)); + + /* Transmit Clock-Slot Offset */ + cpc_writeb(falcbase + F_REG(XC0, ch), + cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01); + /* Transmit Time-slot Offset */ + cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e); + /* Receive Clock-Slot offset */ + cpc_writeb(falcbase + F_REG(RC0, ch), 0x05); + /* Receive Time-slot offset */ + cpc_writeb(falcbase + F_REG(RC1, ch), 0x00); + + /* LOS Detection after 176 consecutive 0s */ + cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a); + /* LOS Recovery after 22 ones in the time window of PCD */ + cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15); + + cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f); + + falc_close_all_timeslots(card, ch); +} + +void falc_init_hdlc(pc300_t * card, int ch) +{ + uclong falcbase = card->hw.falcbase; + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + + /* Enable transparent data transfer */ + if (conf->fr_mode == PC300_FR_UNFRAMED) { + cpc_writeb(falcbase + F_REG(MODE, ch), 0); + } else { + cpc_writeb(falcbase + F_REG(MODE, ch), + cpc_readb(falcbase + F_REG(MODE, ch)) | + (MODE_HRAC | MODE_MDS2)); + cpc_writeb(falcbase + F_REG(RAH2, ch), 0xff); + cpc_writeb(falcbase + F_REG(RAH1, ch), 0xff); + cpc_writeb(falcbase + F_REG(RAL2, ch), 0xff); + cpc_writeb(falcbase + F_REG(RAL1, ch), 0xff); + } + + /* Tx/Rx reset */ + falc_issue_cmd(card, ch, CMDR_RRES | CMDR_XRES | CMDR_SRES); + + /* Enable interrupt sources */ + falc_intr_enable(card, ch); +} + +void te_config(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar dummy; + unsigned long flags; + + memset(pfalc, 0, sizeof(falc_t)); + switch (conf->media) { + case IF_IFACE_T1: + pfalc->num_channels = NUM_OF_T1_CHANNELS; + pfalc->offset = 1; + break; + case IF_IFACE_E1: + pfalc->num_channels = NUM_OF_E1_CHANNELS; + pfalc->offset = 0; + break; + } + if (conf->tslot_bitmap == 0xffffffffUL) + pfalc->full_bandwidth = 1; + else + pfalc->full_bandwidth = 0; + + CPC_LOCK(card, flags); + /* Reset the FALC chip */ + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) | + (CPLD_REG1_FALC_RESET << (2 * ch))); + udelay(10000); + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) & + ~(CPLD_REG1_FALC_RESET << (2 * ch))); + + if (conf->media == IF_IFACE_T1) { + falc_init_t1(card, ch); + } else { + falc_init_e1(card, ch); + } + falc_init_hdlc(card, ch); + if (conf->rx_sens == PC300_RX_SENS_SH) { + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_EQON); + } else { + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_EQON); + } + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) | + ((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK) << (2 * ch))); + + /* Clear all interrupt registers */ + dummy = cpc_readb(falcbase + F_REG(FISR0, ch)) + + cpc_readb(falcbase + F_REG(FISR1, ch)) + + cpc_readb(falcbase + F_REG(FISR2, ch)) + + cpc_readb(falcbase + F_REG(FISR3, ch)); + CPC_UNLOCK(card, flags); +} + +void falc_check_status(pc300_t * card, int ch, unsigned char frs0) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + /* Verify LOS */ + if (frs0 & FRS0_LOS) { + if (!pfalc->red_alarm) { + pfalc->red_alarm = 1; + pfalc->los++; + if (!pfalc->blue_alarm) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere + * with other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) + | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + } + } + } else { + if (pfalc->red_alarm) { + pfalc->red_alarm = 0; + pfalc->losr++; + } + } + + if (conf->fr_mode != PC300_FR_UNFRAMED) { + /* Verify AIS alarm */ + if (frs0 & FRS0_AIS) { + if (!pfalc->blue_alarm) { + pfalc->blue_alarm = 1; + pfalc->ais++; + // EVENT_AIS + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere with other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_AIS + } + } else { + pfalc->blue_alarm = 0; + } + + /* Verify LFA */ + if (frs0 & FRS0_LFA) { + if (!pfalc->loss_fa) { + pfalc->loss_fa = 1; + pfalc->lfa++; + if (!pfalc->blue_alarm && !pfalc->red_alarm) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise + * interfere with other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) + | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + } + } + } else { + if (pfalc->loss_fa) { + pfalc->loss_fa = 0; + pfalc->farec++; + } + } + + /* Verify LMFA */ + if (pfalc->multiframe_mode && (frs0 & FRS0_LMFA)) { + /* D4 or CRC4 frame mode */ + if (!pfalc->loss_mfa) { + pfalc->loss_mfa = 1; + pfalc->lmfa++; + if (!pfalc->blue_alarm && !pfalc->red_alarm && + !pfalc->loss_fa) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise + * interfere with other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) + | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + } + } + } else { + pfalc->loss_mfa = 0; + } + + /* Verify Remote Alarm */ + if (frs0 & FRS0_RRA) { + if (!pfalc->yellow_alarm) { + pfalc->yellow_alarm = 1; + pfalc->rai++; + if (pfalc->sync) { + // EVENT_RAI + falc_disable_comm(card, ch); + // EVENT_RAI + } + } + } else { + pfalc->yellow_alarm = 0; + } + } /* if !PC300_UNFRAMED */ + + if (pfalc->red_alarm || pfalc->loss_fa || + pfalc->loss_mfa || pfalc->blue_alarm) { + if (pfalc->sync) { + pfalc->sync = 0; + chan->d.line_off++; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED2 << (2 * ch))); + } + } else { + if (!pfalc->sync) { + pfalc->sync = 1; + chan->d.line_on++; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED2 << (2 * ch))); + } + } + + if (pfalc->sync && !pfalc->yellow_alarm) { + if (!pfalc->active) { + // EVENT_FALC_NORMAL + if (pfalc->loop_active) { + return; + } + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) & ~IMR0_PDEN); + } + falc_enable_comm(card, ch); + // EVENT_FALC_NORMAL + pfalc->active = 1; + } + } else { + if (pfalc->active) { + pfalc->active = 0; + } + } +} + +void falc_update_stats(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucshort counter; + + counter = cpc_readb(falcbase + F_REG(FECL, ch)); + counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; + pfalc->fec += counter; + + counter = cpc_readb(falcbase + F_REG(CVCL, ch)); + counter |= cpc_readb(falcbase + F_REG(CVCH, ch)) << 8; + pfalc->cvc += counter; + + counter = cpc_readb(falcbase + F_REG(CECL, ch)); + counter |= cpc_readb(falcbase + F_REG(CECH, ch)) << 8; + pfalc->cec += counter; + + counter = cpc_readb(falcbase + F_REG(EBCL, ch)); + counter |= cpc_readb(falcbase + F_REG(EBCH, ch)) << 8; + pfalc->ebc += counter; + + if (cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) { + mdelay(10); + counter = cpc_readb(falcbase + F_REG(BECL, ch)); + counter |= cpc_readb(falcbase + F_REG(BECH, ch)) << 8; + pfalc->bec += counter; + + if (((conf->media == IF_IFACE_T1) && + (cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_LLBAD) && + (!(cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_PDEN))) + || + ((conf->media == IF_IFACE_E1) && + (cpc_readb(falcbase + F_REG(RSP, ch)) & RSP_LLBAD))) { + pfalc->prbs = 2; + } else { + pfalc->prbs = 1; + } + } +} + +/*---------------------------------------------------------------------------- + * falc_remote_loop + *---------------------------------------------------------------------------- + * Description: In the remote loopback mode the clock and data recovered + * from the line inputs RL1/2 or RDIP/RDIN are routed back + * to the line outputs XL1/2 or XDOP/XDON via the analog + * transmitter. As in normal mode they are processsed by + * the synchronizer and then sent to the system interface. + *---------------------------------------------------------------------------- + */ +void falc_remote_loop(pc300_t * card, int ch, int loop_on) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (loop_on) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere with + * other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + cpc_writeb(falcbase + F_REG(LIM1, ch), + cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RL); + pfalc->loop_active = 1; + } else { + cpc_writeb(falcbase + F_REG(LIM1, ch), + cpc_readb(falcbase + F_REG(LIM1, ch)) & ~LIM1_RL); + pfalc->sync = 0; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED2 << (2 * ch))); + pfalc->active = 0; + falc_issue_cmd(card, ch, CMDR_XRES); + pfalc->loop_active = 0; + } +} + +/*---------------------------------------------------------------------------- + * falc_local_loop + *---------------------------------------------------------------------------- + * Description: The local loopback mode disconnects the receive lines + * RL1/RL2 resp. RDIP/RDIN from the receiver. Instead of the + * signals coming from the line the data provided by system + * interface are routed through the analog receiver back to + * the system interface. The unipolar bit stream will be + * undisturbed transmitted on the line. Receiver and transmitter + * coding must be identical. + *---------------------------------------------------------------------------- + */ +void falc_local_loop(pc300_t * card, int ch, int loop_on) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (loop_on) { + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_LL); + pfalc->loop_active = 1; + } else { + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_LL); + pfalc->loop_active = 0; + } +} + +/*---------------------------------------------------------------------------- + * falc_payload_loop + *---------------------------------------------------------------------------- + * Description: This routine allows to enable/disable payload loopback. + * When the payload loop is activated, the received 192 bits + * of payload data will be looped back to the transmit + * direction. The framing bits, CRC6 and DL bits are not + * looped. They are originated by the FALC-LH transmitter. + *---------------------------------------------------------------------------- + */ +void falc_payload_loop(pc300_t * card, int ch, int loop_on) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (loop_on) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere with + * other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_PLB); + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_TM); + } else { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) | XSP_TT0); + } + falc_open_all_timeslots(card, ch); + pfalc->loop_active = 2; + } else { + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_PLB); + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) & ~FMR4_TM); + } else { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) & ~XSP_TT0); + } + pfalc->sync = 0; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED2 << (2 * ch))); + pfalc->active = 0; + falc_issue_cmd(card, ch, CMDR_XRES); + pfalc->loop_active = 0; + } +} + +/*---------------------------------------------------------------------------- + * turn_off_xlu + *---------------------------------------------------------------------------- + * Description: Turns XLU bit off in the proper register + *---------------------------------------------------------------------------- + */ +void turn_off_xlu(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + uclong falcbase = card->hw.falcbase; + + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLU); + } else { + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLU); + } +} + +/*---------------------------------------------------------------------------- + * turn_off_xld + *---------------------------------------------------------------------------- + * Description: Turns XLD bit off in the proper register + *---------------------------------------------------------------------------- + */ +void turn_off_xld(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + uclong falcbase = card->hw.falcbase; + + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLD); + } else { + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLD); + } +} + +/*---------------------------------------------------------------------------- + * falc_generate_loop_up_code + *---------------------------------------------------------------------------- + * Description: This routine writes the proper FALC chip register in order + * to generate a LOOP activation code over a T1/E1 line. + *---------------------------------------------------------------------------- + */ +void falc_generate_loop_up_code(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLU); + } else { + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLU); + } + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere with + * other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + pfalc->loop_gen = 1; +} + +/*---------------------------------------------------------------------------- + * falc_generate_loop_down_code + *---------------------------------------------------------------------------- + * Description: This routine writes the proper FALC chip register in order + * to generate a LOOP deactivation code over a T1/E1 line. + *---------------------------------------------------------------------------- + */ +void falc_generate_loop_down_code(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLD); + } else { + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLD); + } + pfalc->sync = 0; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED2 << (2 * ch))); + pfalc->active = 0; +//? falc_issue_cmd(card, ch, CMDR_XRES); + pfalc->loop_gen = 0; +} + +/*---------------------------------------------------------------------------- + * falc_pattern_test + *---------------------------------------------------------------------------- + * Description: This routine generates a pattern code and checks + * it on the reception side. + *---------------------------------------------------------------------------- + */ +void falc_pattern_test(pc300_t * card, int ch, unsigned int activate) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (activate) { + pfalc->prbs = 1; + pfalc->bec = 0; + if (conf->media == IF_IFACE_T1) { + /* Disable local loop activation/deactivation detect */ + cpc_writeb(falcbase + F_REG(IMR3, ch), + cpc_readb(falcbase + F_REG(IMR3, ch)) | IMR3_LLBSC); + } else { + /* Disable local loop activation/deactivation detect */ + cpc_writeb(falcbase + F_REG(IMR1, ch), + cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_LLBSC); + } + /* Activates generation and monitoring of PRBS + * (Pseudo Random Bit Sequence) */ + cpc_writeb(falcbase + F_REG(LCR1, ch), + cpc_readb(falcbase + F_REG(LCR1, ch)) | LCR1_EPRM | LCR1_XPRBS); + } else { + pfalc->prbs = 0; + /* Deactivates generation and monitoring of PRBS + * (Pseudo Random Bit Sequence) */ + cpc_writeb(falcbase + F_REG(LCR1, ch), + cpc_readb(falcbase+F_REG(LCR1,ch)) & ~(LCR1_EPRM | LCR1_XPRBS)); + if (conf->media == IF_IFACE_T1) { + /* Enable local loop activation/deactivation detect */ + cpc_writeb(falcbase + F_REG(IMR3, ch), + cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC); + } else { + /* Enable local loop activation/deactivation detect */ + cpc_writeb(falcbase + F_REG(IMR1, ch), + cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_LLBSC); + } + } +} + +/*---------------------------------------------------------------------------- + * falc_pattern_test_error + *---------------------------------------------------------------------------- + * Description: This routine returns the bit error counter value + *---------------------------------------------------------------------------- + */ +ucshort falc_pattern_test_error(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + + return (pfalc->bec); +} + +/**********************************/ +/*** Net Interface Routines ***/ +/**********************************/ + +static void +cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx) +{ + struct sk_buff *skb; + + if ((skb = dev_alloc_skb(10 + skb_main->len)) == NULL) { + printk("%s: out of memory\n", dev->name); + return; + } + skb_put(skb, 10 + skb_main->len); + + skb->dev = dev; + skb->protocol = htons(ETH_P_CUST); + skb->mac.raw = skb->data; + skb->pkt_type = PACKET_HOST; + skb->len = 10 + skb_main->len; + + memcpy(skb->data, dev->name, 5); + skb->data[5] = '['; + skb->data[6] = rx_tx; + skb->data[7] = ']'; + skb->data[8] = ':'; + skb->data[9] = ' '; + memcpy(&skb->data[10], skb_main->data, skb_main->len); + + netif_rx(skb); +} + +void cpc_tx_timeout(struct net_device *dev) +{ + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + struct net_device_stats *stats = &d->hdlc->stats; + int ch = chan->channel; + uclong flags; + ucchar ilar; + + stats->tx_errors++; + stats->tx_aborted_errors++; + CPC_LOCK(card, flags); + if ((ilar = cpc_readb(card->hw.scabase + ILAR)) != 0) { + printk("%s: ILAR=0x%x\n", dev->name, ilar); + cpc_writeb(card->hw.scabase + ILAR, ilar); + cpc_writeb(card->hw.scabase + DMER, 0x80); + } + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED1 << (2 * ch))); + } + dev->trans_start = jiffies; + CPC_UNLOCK(card, flags); + netif_wake_queue(dev); +} + +int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) +{ + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + struct net_device_stats *stats = &d->hdlc->stats; + int ch = chan->channel; + uclong flags; +#ifdef PC300_DEBUG_TX + int i; +#endif + + if (chan->conf.monitor) { + /* In monitor mode no Tx is done: ignore packet */ + dev_kfree_skb(skb); + return 0; + } else if (!netif_carrier_ok(dev)) { + /* DCD must be OFF: drop packet */ + dev_kfree_skb(skb); + stats->tx_errors++; + stats->tx_carrier_errors++; + return 0; + } else if (cpc_readb(card->hw.scabase + M_REG(ST3, ch)) & ST3_DCD) { + printk("%s: DCD is OFF. Going administrative down.\n", dev->name); + stats->tx_errors++; + stats->tx_carrier_errors++; + dev_kfree_skb(skb); + netif_carrier_off(dev); + CPC_LOCK(card, flags); + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR); + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED1 << (2 * ch))); + } + CPC_UNLOCK(card, flags); + netif_wake_queue(dev); + return 0; + } + + /* Write buffer to DMA buffers */ + if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) { +// printk("%s: write error. Dropping TX packet.\n", dev->name); + netif_stop_queue(dev); + dev_kfree_skb(skb); + stats->tx_errors++; + stats->tx_dropped++; + return 0; + } +#ifdef PC300_DEBUG_TX + printk("%s T:", dev->name); + for (i = 0; i < skb->len; i++) + printk(" %02x", *(skb->data + i)); + printk("\n"); +#endif + + if (d->trace_on) { + cpc_trace(dev, skb, 'T'); + } + dev->trans_start = jiffies; + + /* Start transmission */ + CPC_LOCK(card, flags); + /* verify if it has more than one free descriptor */ + if (card->chan[ch].nfree_tx_bd <= 1) { + /* don't have so stop the queue */ + netif_stop_queue(dev); + } + cpc_writel(card->hw.scabase + DTX_REG(EDAL, ch), + TX_BD_ADDR(ch, chan->tx_next_bd)); + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA); + cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE); + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + CPC_UNLOCK(card, flags); + dev_kfree_skb(skb); + + return 0; +} + +void cpc_net_rx(hdlc_device * hdlc) +{ + struct net_device *dev = hdlc_to_dev(hdlc); + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + struct net_device_stats *stats = &d->hdlc->stats; + int ch = chan->channel; +#ifdef PC300_DEBUG_RX + int i; +#endif + int rxb; + struct sk_buff *skb; + + while (1) { + if ((rxb = dma_get_rx_frame_size(card, ch)) == -1) + return; + + if (!netif_carrier_ok(dev)) { + /* DCD must be OFF: drop packet */ + printk("%s : DCD is OFF - drop %d rx bytes\n", dev->name, rxb); + skb = NULL; + } else { + if (rxb > (dev->mtu + 40)) { /* add headers */ + printk("%s : MTU exceeded %d\n", dev->name, rxb); + skb = NULL; + } else { + skb = dev_alloc_skb(rxb); + if (skb == NULL) { + printk("%s: Memory squeeze!!\n", dev->name); + return; + } + skb->dev = dev; + } + } + + if (((rxb = dma_buf_read(card, ch, skb)) <= 0) || (skb == NULL)) { +#ifdef PC300_DEBUG_RX + printk("%s: rxb = %x\n", dev->name, rxb); +#endif + if ((skb == NULL) && (rxb > 0)) { + /* rxb > dev->mtu */ + stats->rx_errors++; + stats->rx_length_errors++; + continue; + } + + if (rxb < 0) { /* Invalid frame */ + rxb = -rxb; + if (rxb & DST_OVR) { + stats->rx_errors++; + stats->rx_fifo_errors++; + } + if (rxb & DST_CRC) { + stats->rx_errors++; + stats->rx_crc_errors++; + } + if (rxb & (DST_RBIT | DST_SHRT | DST_ABT)) { + stats->rx_errors++; + stats->rx_frame_errors++; + } + } + if (skb) { + dev_kfree_skb_irq(skb); + } + continue; + } + + stats->rx_bytes += rxb; + +#ifdef PC300_DEBUG_RX + printk("%s R:", dev->name); + for (i = 0; i < skb->len; i++) + printk(" %02x", *(skb->data + i)); + printk("\n"); +#endif + if (d->trace_on) { + cpc_trace(dev, skb, 'R'); + } + stats->rx_packets++; + skb->mac.raw = skb->data; + skb->protocol = htons(ETH_P_HDLC); + netif_rx(skb); + } +} + +/************************************/ +/*** PC300 Interrupt Routines ***/ +/************************************/ +static void sca_tx_intr(pc300dev_t *dev) +{ + pc300ch_t *chan = (pc300ch_t *)dev->chan; + pc300_t *card = (pc300_t *)chan->card; + int ch = chan->channel; + volatile pcsca_bd_t * ptdescr; + struct net_device_stats *stats = &dev->hdlc->stats; + + /* Clean up descriptors from previous transmission */ + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + TX_BD_ADDR(ch,chan->tx_first_bd)); + while ((cpc_readl(card->hw.scabase + DTX_REG(CDAL,ch)) != + TX_BD_ADDR(ch,chan->tx_first_bd)) && + (cpc_readb(&ptdescr->status) & DST_OSB)) { + stats->tx_packets++; + stats->tx_bytes += cpc_readw(&ptdescr->len); + cpc_writeb(&ptdescr->status, DST_OSB); + cpc_writew(&ptdescr->len, 0); + chan->nfree_tx_bd++; + chan->tx_first_bd = (chan->tx_first_bd + 1) & (N_DMA_TX_BUF - 1); + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + TX_BD_ADDR(ch,chan->tx_first_bd)); + } + +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto == PC300_PROTO_MLPPP) { + cpc_tty_trigger_poll(dev); + } else { +#endif + /* Tell the upper layer we are ready to transmit more packets */ + netif_wake_queue((struct net_device*)dev->hdlc); +#ifdef CONFIG_PC300_MLPPP + } +#endif +} + +static void sca_intr(pc300_t * card) +{ + uclong scabase = card->hw.scabase; + volatile uclong status; + int ch; + int intr_count = 0; + unsigned char dsr_rx; + + while ((status = cpc_readl(scabase + ISR0)) != 0) { + for (ch = 0; ch < card->hw.nchan; ch++) { + pc300ch_t *chan = &card->chan[ch]; + pc300dev_t *d = &chan->d; + hdlc_device *hdlc = d->hdlc; + struct net_device *dev = hdlc_to_dev(hdlc); + + spin_lock(&card->card_lock); + + /**** Reception ****/ + if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { + ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch)); + + /* Clear RX interrupts */ + cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); + +#ifdef PC300_DEBUG_INTR + printk ("sca_intr: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n", + ch, status, drx_stat); +#endif + if (status & IR0_DRX(IR0_DMIA, ch)) { + if (drx_stat & DSR_BOF) { +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto == PC300_PROTO_MLPPP) { + /* verify if driver is TTY */ + if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) { + rx_dma_stop(card, ch); + } + cpc_tty_receive(d); + rx_dma_start(card, ch); + } else +#endif + { + if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) { + rx_dma_stop(card, ch); + } + cpc_net_rx(hdlc); + /* Discard invalid frames */ + hdlc->stats.rx_errors++; + hdlc->stats.rx_over_errors++; + chan->rx_first_bd = 0; + chan->rx_last_bd = N_DMA_RX_BUF - 1; + rx_dma_start(card, ch); + } + } + } + if (status & IR0_DRX(IR0_DMIB, ch)) { + if (drx_stat & DSR_EOM) { + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + + card->hw.cpld_reg2, + cpc_readb (card->hw.falcbase + + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED1 << (2 * ch))); + } +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto == PC300_PROTO_MLPPP) { + /* verify if driver is TTY */ + cpc_tty_receive(d); + } else { + cpc_net_rx(hdlc); + } +#else + cpc_net_rx(hdlc); +#endif + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + + card->hw.cpld_reg2, + cpc_readb (card->hw.falcbase + + card->hw.cpld_reg2) & + ~ (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + } + } + if (!(dsr_rx = cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) { + +printk("%s: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x, dsr2=0x%02x)\n", + dev->name, ch, status, drx_stat, dsr_rx); + cpc_writeb(scabase + DSR_RX(ch), (dsr_rx | DSR_DE) & 0xfe); + } + } + + /**** Transmission ****/ + if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { + ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch)); + + /* Clear TX interrupts */ + cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); + +#ifdef PC300_DEBUG_INTR + printk ("sca_intr: TX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n", + ch, status, dtx_stat); +#endif + if (status & IR0_DTX(IR0_EFT, ch)) { + if (dtx_stat & DSR_UDRF) { + if (cpc_readb (scabase + M_REG(TBN, ch)) != 0) { + cpc_writeb(scabase + M_REG(CMD,ch), CMD_TX_BUF_CLR); + } + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb (card->hw.falcbase + + card->hw.cpld_reg2) & + ~ (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + hdlc->stats.tx_errors++; + hdlc->stats.tx_fifo_errors++; + sca_tx_intr(d); + } + } + if (status & IR0_DTX(IR0_DMIA, ch)) { + if (dtx_stat & DSR_BOF) { + } + } + if (status & IR0_DTX(IR0_DMIB, ch)) { + if (dtx_stat & DSR_EOM) { + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb (card->hw.falcbase + + card->hw.cpld_reg2) & + ~ (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + sca_tx_intr(d); + } + } + } + + /**** MSCI ****/ + if (status & IR0_M(IR0_RXINTA, ch)) { + ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch)); + + /* Clear MSCI interrupts */ + cpc_writeb(scabase + M_REG(ST1, ch), st1); + +#ifdef PC300_DEBUG_INTR + printk("sca_intr: MSCI intr chan[%d] (st=0x%08lx, st1=0x%02x)\n", + ch, status, st1); +#endif + if (st1 & ST1_CDCD) { /* DCD changed */ + if (cpc_readb(scabase + M_REG(ST3, ch)) & ST3_DCD) { + printk ("%s: DCD is OFF. Going administrative down.\n", + dev->name); +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto != PC300_PROTO_MLPPP) { + netif_carrier_off(dev); + } +#else + netif_carrier_off(dev); + +#endif + card->chan[ch].d.line_off++; + } else { /* DCD = 1 */ + printk ("%s: DCD is ON. Going administrative up.\n", + dev->name); +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto != PC300_PROTO_MLPPP) + /* verify if driver is not TTY */ +#endif + netif_carrier_on(dev); + card->chan[ch].d.line_on++; + } + } + } + spin_unlock(&card->card_lock); + } + if (++intr_count == 10) + /* Too much work at this board. Force exit */ + break; + } +} + +static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) && + !pfalc->loop_gen) { + if (frs1 & FRS1_LLBDD) { + // A Line Loop Back Deactivation signal detected + if (pfalc->loop_active) { + falc_remote_loop(card, ch, 0); + } + } else { + if ((frs1 & FRS1_LLBAD) && + ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) { + // A Line Loop Back Activation signal detected + if (!pfalc->loop_active) { + falc_remote_loop(card, ch, 1); + } + } + } + } +} + +static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) && + !pfalc->loop_gen) { + if (rsp & RSP_LLBDD) { + // A Line Loop Back Deactivation signal detected + if (pfalc->loop_active) { + falc_remote_loop(card, ch, 0); + } + } else { + if ((rsp & RSP_LLBAD) && + ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) { + // A Line Loop Back Activation signal detected + if (!pfalc->loop_active) { + falc_remote_loop(card, ch, 1); + } + } + } + } +} + +static void falc_t1_intr(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar isr0, isr3, gis; + ucchar dummy; + + while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { + if (gis & GIS_ISR0) { + isr0 = cpc_readb(falcbase + F_REG(FISR0, ch)); + if (isr0 & FISR0_PDEN) { + /* Read the bit to clear the situation */ + if (cpc_readb(falcbase + F_REG(FRS1, ch)) & + FRS1_PDEN) { + pfalc->pden++; + } + } + } + + if (gis & GIS_ISR1) { + dummy = cpc_readb(falcbase + F_REG(FISR1, ch)); + } + + if (gis & GIS_ISR2) { + dummy = cpc_readb(falcbase + F_REG(FISR2, ch)); + } + + if (gis & GIS_ISR3) { + isr3 = cpc_readb(falcbase + F_REG(FISR3, ch)); + if (isr3 & FISR3_SEC) { + pfalc->sec++; + falc_update_stats(card, ch); + falc_check_status(card, ch, + cpc_readb(falcbase + F_REG(FRS0, ch))); + } + if (isr3 & FISR3_ES) { + pfalc->es++; + } + if (isr3 & FISR3_LLBSC) { + falc_t1_loop_detection(card, ch, + cpc_readb(falcbase + F_REG(FRS1, ch))); + } + } + } +} + +static void falc_e1_intr(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar isr1, isr2, isr3, gis, rsp; + ucchar dummy; + + while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { + rsp = cpc_readb(falcbase + F_REG(RSP, ch)); + + if (gis & GIS_ISR0) { + dummy = cpc_readb(falcbase + F_REG(FISR0, ch)); + } + if (gis & GIS_ISR1) { + isr1 = cpc_readb(falcbase + F_REG(FISR1, ch)); + if (isr1 & FISR1_XMB) { + if ((pfalc->xmb_cause & 2) + && pfalc->multiframe_mode) { + if (cpc_readb (falcbase + F_REG(FRS0, ch)) & + (FRS0_LOS | FRS0_AIS | FRS0_LFA)) { + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) + & ~XSP_AXS); + } else { + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) + | XSP_AXS); + } + } + pfalc->xmb_cause = 0; + cpc_writeb(falcbase + F_REG(IMR1, ch), + cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_XMB); + } + if (isr1 & FISR1_LLBSC) { + falc_e1_loop_detection(card, ch, rsp); + } + } + if (gis & GIS_ISR2) { + isr2 = cpc_readb(falcbase + F_REG(FISR2, ch)); + if (isr2 & FISR2_T400MS) { + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XRA); + } + if (isr2 & FISR2_MFAR) { + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) & ~XSW_XRA); + } + if (isr2 & (FISR2_FAR | FISR2_LFA | FISR2_AIS | FISR2_LOS)) { + pfalc->xmb_cause |= 2; + cpc_writeb(falcbase + F_REG(IMR1, ch), + cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_XMB); + } + } + if (gis & GIS_ISR3) { + isr3 = cpc_readb(falcbase + F_REG(FISR3, ch)); + if (isr3 & FISR3_SEC) { + pfalc->sec++; + falc_update_stats(card, ch); + falc_check_status(card, ch, + cpc_readb(falcbase + F_REG(FRS0, ch))); + } + if (isr3 & FISR3_ES) { + pfalc->es++; + } + } + } +} + +static void falc_intr(pc300_t * card) +{ + int ch; + + for (ch = 0; ch < card->hw.nchan; ch++) { + pc300ch_t *chan = &card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + + if (conf->media == IF_IFACE_T1) { + falc_t1_intr(card, ch); + } else { + falc_e1_intr(card, ch); + } + } +} + +static void cpc_intr(int irq, void *dev_id, struct pt_regs *regs) +{ + pc300_t *card; + volatile ucchar plx_status; + + if ((card = (pc300_t *) dev_id) == 0) { +#ifdef PC300_DEBUG_INTR + printk("cpc_intr: spurious intr %d\n", irq); +#endif + return; /* spurious intr */ + } + + if (card->hw.rambase == 0) { +#ifdef PC300_DEBUG_INTR + printk("cpc_intr: spurious intr2 %d\n", irq); +#endif + return; /* spurious intr */ + } + + switch (card->hw.type) { + case PC300_RSV: + case PC300_X21: + sca_intr(card); + break; + + case PC300_TE: + while ( (plx_status = (cpc_readb(card->hw.plxbase + card->hw.intctl_reg) & + (PLX_9050_LINT1_STATUS | PLX_9050_LINT2_STATUS))) != 0) { + if (plx_status & PLX_9050_LINT1_STATUS) { /* SCA Interrupt */ + sca_intr(card); + } + if (plx_status & PLX_9050_LINT2_STATUS) { /* FALC Interrupt */ + falc_intr(card); + } + } + break; + } +} + +void cpc_sca_status(pc300_t * card, int ch) +{ + ucchar ilar; + uclong scabase = card->hw.scabase; + uclong flags; + + tx_dma_buf_check(card, ch); + rx_dma_buf_check(card, ch); + ilar = cpc_readb(scabase + ILAR); + printk ("ILAR=0x%02x, WCRL=0x%02x, PCR=0x%02x, BTCR=0x%02x, BOLR=0x%02x\n", + ilar, cpc_readb(scabase + WCRL), cpc_readb(scabase + PCR), + cpc_readb(scabase + BTCR), cpc_readb(scabase + BOLR)); + printk("TX_CDA=0x%08lx, TX_EDA=0x%08lx\n", + (uclong) cpc_readl(scabase + DTX_REG(CDAL, ch)), + (uclong) cpc_readl(scabase + DTX_REG(EDAL, ch))); + printk("RX_CDA=0x%08lx, RX_EDA=0x%08lx, BFL=0x%04x\n", + (uclong) cpc_readl(scabase + DRX_REG(CDAL, ch)), + (uclong) cpc_readl(scabase + DRX_REG(EDAL, ch)), + cpc_readw(scabase + DRX_REG(BFLL, ch))); + printk("DMER=0x%02x, DSR_TX=0x%02x, DSR_RX=0x%02x\n", + cpc_readb(scabase + DMER), cpc_readb(scabase + DSR_TX(ch)), + cpc_readb(scabase + DSR_RX(ch))); + printk("DMR_TX=0x%02x, DMR_RX=0x%02x, DIR_TX=0x%02x, DIR_RX=0x%02x\n", + cpc_readb(scabase + DMR_TX(ch)), cpc_readb(scabase + DMR_RX(ch)), + cpc_readb(scabase + DIR_TX(ch)), + cpc_readb(scabase + DIR_RX(ch))); + printk("DCR_TX=0x%02x, DCR_RX=0x%02x, FCT_TX=0x%02x, FCT_RX=0x%02x\n", + cpc_readb(scabase + DCR_TX(ch)), cpc_readb(scabase + DCR_RX(ch)), + cpc_readb(scabase + FCT_TX(ch)), + cpc_readb(scabase + FCT_RX(ch))); + printk("MD0=0x%02x, MD1=0x%02x, MD2=0x%02x, MD3=0x%02x, IDL=0x%02x\n", + cpc_readb(scabase + M_REG(MD0, ch)), + cpc_readb(scabase + M_REG(MD1, ch)), + cpc_readb(scabase + M_REG(MD2, ch)), + cpc_readb(scabase + M_REG(MD3, ch)), + cpc_readb(scabase + M_REG(IDL, ch))); + printk("CMD=0x%02x, SA0=0x%02x, SA1=0x%02x, TFN=0x%02x, CTL=0x%02x\n", + cpc_readb(scabase + M_REG(CMD, ch)), + cpc_readb(scabase + M_REG(SA0, ch)), + cpc_readb(scabase + M_REG(SA1, ch)), + cpc_readb(scabase + M_REG(TFN, ch)), + cpc_readb(scabase + M_REG(CTL, ch))); + printk("ST0=0x%02x, ST1=0x%02x, ST2=0x%02x, ST3=0x%02x, ST4=0x%02x\n", + cpc_readb(scabase + M_REG(ST0, ch)), + cpc_readb(scabase + M_REG(ST1, ch)), + cpc_readb(scabase + M_REG(ST2, ch)), + cpc_readb(scabase + M_REG(ST3, ch)), + cpc_readb(scabase + M_REG(ST4, ch))); + printk ("CST0=0x%02x, CST1=0x%02x, CST2=0x%02x, CST3=0x%02x, FST=0x%02x\n", + cpc_readb(scabase + M_REG(CST0, ch)), + cpc_readb(scabase + M_REG(CST1, ch)), + cpc_readb(scabase + M_REG(CST2, ch)), + cpc_readb(scabase + M_REG(CST3, ch)), + cpc_readb(scabase + M_REG(FST, ch))); + printk("TRC0=0x%02x, TRC1=0x%02x, RRC=0x%02x, TBN=0x%02x, RBN=0x%02x\n", + cpc_readb(scabase + M_REG(TRC0, ch)), + cpc_readb(scabase + M_REG(TRC1, ch)), + cpc_readb(scabase + M_REG(RRC, ch)), + cpc_readb(scabase + M_REG(TBN, ch)), + cpc_readb(scabase + M_REG(RBN, ch))); + printk("TFS=0x%02x, TNR0=0x%02x, TNR1=0x%02x, RNR=0x%02x\n", + cpc_readb(scabase + M_REG(TFS, ch)), + cpc_readb(scabase + M_REG(TNR0, ch)), + cpc_readb(scabase + M_REG(TNR1, ch)), + cpc_readb(scabase + M_REG(RNR, ch))); + printk("TCR=0x%02x, RCR=0x%02x, TNR1=0x%02x, RNR=0x%02x\n", + cpc_readb(scabase + M_REG(TCR, ch)), + cpc_readb(scabase + M_REG(RCR, ch)), + cpc_readb(scabase + M_REG(TNR1, ch)), + cpc_readb(scabase + M_REG(RNR, ch))); + printk("TXS=0x%02x, RXS=0x%02x, EXS=0x%02x, TMCT=0x%02x, TMCR=0x%02x\n", + cpc_readb(scabase + M_REG(TXS, ch)), + cpc_readb(scabase + M_REG(RXS, ch)), + cpc_readb(scabase + M_REG(EXS, ch)), + cpc_readb(scabase + M_REG(TMCT, ch)), + cpc_readb(scabase + M_REG(TMCR, ch))); + printk("IE0=0x%02x, IE1=0x%02x, IE2=0x%02x, IE4=0x%02x, FIE=0x%02x\n", + cpc_readb(scabase + M_REG(IE0, ch)), + cpc_readb(scabase + M_REG(IE1, ch)), + cpc_readb(scabase + M_REG(IE2, ch)), + cpc_readb(scabase + M_REG(IE4, ch)), + cpc_readb(scabase + M_REG(FIE, ch))); + printk("IER0=0x%08lx\n", (uclong) cpc_readl(scabase + IER0)); + + if (ilar != 0) { + CPC_LOCK(card, flags); + cpc_writeb(scabase + ILAR, ilar); + cpc_writeb(scabase + DMER, 0x80); + CPC_UNLOCK(card, flags); + } +} + +void cpc_falc_status(pc300_t * card, int ch) +{ + pc300ch_t *chan = &card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong flags; + + CPC_LOCK(card, flags); + printk("CH%d: %s %s %d channels\n", + ch, (pfalc->sync ? "SYNC" : ""), (pfalc->active ? "ACTIVE" : ""), + pfalc->num_channels); + + printk(" pden=%d, los=%d, losr=%d, lfa=%d, farec=%d\n", + pfalc->pden, pfalc->los, pfalc->losr, pfalc->lfa, pfalc->farec); + printk(" lmfa=%d, ais=%d, sec=%d, es=%d, rai=%d\n", + pfalc->lmfa, pfalc->ais, pfalc->sec, pfalc->es, pfalc->rai); + printk(" bec=%d, fec=%d, cvc=%d, cec=%d, ebc=%d\n", + pfalc->bec, pfalc->fec, pfalc->cvc, pfalc->cec, pfalc->ebc); + + printk("\n"); + printk(" STATUS: %s %s %s %s %s %s\n", + (pfalc->red_alarm ? "RED" : ""), + (pfalc->blue_alarm ? "BLU" : ""), + (pfalc->yellow_alarm ? "YEL" : ""), + (pfalc->loss_fa ? "LFA" : ""), + (pfalc->loss_mfa ? "LMF" : ""), (pfalc->prbs ? "PRB" : "")); + CPC_UNLOCK(card, flags); +} + +int cpc_change_mtu(struct net_device *dev, int new_mtu) +{ + if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU)) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + pc300conf_t conf_aux; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + int ch = chan->channel; + void *arg = (void *) ifr->ifr_data; + uclong scabase = card->hw.scabase; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + switch (cmd) { + case SIOCGPC300CONF: +#ifdef CONFIG_PC300_MLPPP + if (conf->proto != PC300_PROTO_MLPPP) { + conf->proto = hdlc->proto; + } +#else + conf->proto = hdlc->proto; +#endif + memcpy(&conf_aux.conf, conf, sizeof(pc300chconf_t)); + memcpy(&conf_aux.hw, &card->hw, sizeof(pc300hw_t)); + if (!arg || + copy_to_user(arg, &conf_aux, sizeof(pc300conf_t))) + return -EINVAL; + return 0; + case SIOCSPC300CONF: + if (!suser()) + return -EPERM; + if (!arg || + copy_from_user(&conf_aux.conf, arg, sizeof(pc300chconf_t))) + return -EINVAL; + if (card->hw.cpld_id < 0x02 && + conf_aux.conf.fr_mode == PC300_FR_UNFRAMED) { + /* CPLD_ID < 0x02 doesn't support Unframed E1 */ + return -EINVAL; + } +#ifdef CONFIG_PC300_MLPPP + if (conf_aux.conf.proto == PC300_PROTO_MLPPP) { + if (conf->proto != PC300_PROTO_MLPPP) { + memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t)); + cpc_tty_init(d); /* init TTY driver */ + } + } else { + if (conf_aux.conf.proto == 0xffff) { + if (conf->proto == PC300_PROTO_MLPPP){ + /* ifdown interface */ + cpc_close(dev); + } + } else { + memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t)); + hdlc->proto = conf->proto; + } + } +#else + memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t)); + hdlc->proto = conf->proto; +#endif + return 0; + case SIOCGPC300STATUS: + cpc_sca_status(card, ch); + return 0; + case SIOCGPC300FALCSTATUS: + cpc_falc_status(card, ch); + return 0; + + case SIOCGPC300UTILSTATS: + { + if (!arg) { /* clear statistics */ + memset(&hdlc->stats, 0, sizeof(struct net_device_stats)); + if (card->hw.type == PC300_TE) { + memset(&chan->falc, 0, sizeof(falc_t)); + } + } else { + pc300stats_t pc300stats; + + memset(&pc300stats, 0, sizeof(pc300stats_t)); + pc300stats.hw_type = card->hw.type; + pc300stats.line_on = card->chan[ch].d.line_on; + pc300stats.line_off = card->chan[ch].d.line_off; + memcpy(&pc300stats.gen_stats, &hdlc->stats, + sizeof(struct net_device_stats)); + if (card->hw.type == PC300_TE) + memcpy(&pc300stats.te_stats,&chan->falc,sizeof(falc_t)); + copy_to_user(arg, &pc300stats, sizeof(pc300stats_t)); + } + return 0; + } + + case SIOCGPC300UTILSTATUS: + { + struct pc300status pc300status; + + pc300status.hw_type = card->hw.type; + if (card->hw.type == PC300_TE) { + pc300status.te_status.sync = chan->falc.sync; + pc300status.te_status.red_alarm = chan->falc.red_alarm; + pc300status.te_status.blue_alarm = chan->falc.blue_alarm; + pc300status.te_status.loss_fa = chan->falc.loss_fa; + pc300status.te_status.yellow_alarm =chan->falc.yellow_alarm; + pc300status.te_status.loss_mfa = chan->falc.loss_mfa; + pc300status.te_status.prbs = chan->falc.prbs; + } else { + pc300status.gen_status.dcd = + !(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_DCD); + pc300status.gen_status.cts = + !(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_CTS); + pc300status.gen_status.rts = + !(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_RTS); + pc300status.gen_status.dtr = + !(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_DTR); + /* There is no DSR in HD64572 */ + } + if (!arg + || copy_to_user(arg, &pc300status, sizeof(pc300status_t))) + return -EINVAL; + return 0; + } + + case SIOCSPC300TRACE: + /* Sets/resets a trace_flag for the respective device */ + if (!arg || copy_from_user(&d->trace_on, arg,sizeof(unsigned char))) + return -EINVAL; + return 0; + + case SIOCSPC300LOOPBACK: + { + struct pc300loopback pc300loop; + + /* TE boards only */ + if (card->hw.type != PC300_TE) + return -EINVAL; + + if (!arg || + copy_from_user(&pc300loop, arg, sizeof(pc300loopback_t))) + return -EINVAL; + switch (pc300loop.loop_type) { + case PC300LOCLOOP: /* Turn the local loop on/off */ + falc_local_loop(card, ch, pc300loop.loop_on); + return 0; + + case PC300REMLOOP: /* Turn the remote loop on/off */ + falc_remote_loop(card, ch, pc300loop.loop_on); + return 0; + + case PC300PAYLOADLOOP: /* Turn the payload loop on/off */ + falc_payload_loop(card, ch, pc300loop.loop_on); + return 0; + + case PC300GENLOOPUP: /* Generate loop UP */ + if (pc300loop.loop_on) { + falc_generate_loop_up_code (card, ch); + } else { + turn_off_xlu(card, ch); + } + return 0; + + case PC300GENLOOPDOWN: /* Generate loop DOWN */ + if (pc300loop.loop_on) { + falc_generate_loop_down_code (card, ch); + } else { + turn_off_xld(card, ch); + } + return 0; + + default: + return -EINVAL; + } + } + + case SIOCSPC300PATTERNTEST: + /* Turn the pattern test on/off and show the errors counter */ + { + struct pc300patterntst pc300patrntst; + + /* TE boards only */ + if (card->hw.type != PC300_TE) + return -EINVAL; + + if (card->hw.cpld_id < 0x02) { + /* CPLD_ID < 0x02 doesn't support pattern test */ + return -EINVAL; + } + + if (!arg || + copy_from_user(&pc300patrntst,arg,sizeof(pc300patterntst_t))) + return -EINVAL; + if (pc300patrntst.patrntst_on == 2) { + if (chan->falc.prbs == 0) { + falc_pattern_test(card, ch, 1); + } + pc300patrntst.num_errors = + falc_pattern_test_error(card, ch); + if (!arg + || copy_to_user(arg, &pc300patrntst, + sizeof (pc300patterntst_t))) + return -EINVAL; + } else { + falc_pattern_test(card, ch, pc300patrntst.patrntst_on); + } + return 0; + } + + case SIOCWANDEV: + switch (ifr->ifr_settings.type) { + case IF_GET_IFACE: + { + const size_t size = sizeof(sync_serial_settings); + ifr->ifr_settings.type = conf->media; + if (ifr->ifr_settings.size == 0) { + return 0; //return interface type only + } + if (ifr->ifr_settings.size < size) { + return -ENOMEM; //small buffer + } + if (copy_to_user(& ifr->ifr_settings.ifs_ifsu, + &conf->phys_settings, size)) { + return -EFAULT; + } + ifr->ifr_settings.size = size; + return 0; + } + + case IF_IFACE_V35: + case IF_IFACE_V24: + case IF_IFACE_X21: + { + const size_t size = sizeof(sync_serial_settings); + + if (!capable(CAP_NET_ADMIN)) { + return -EPERM; + } + if (ifr->ifr_settings.size != size) { + return -ENOMEM; //incorrect data len + } + if (copy_from_user(&conf->phys_settings, + &ifr->ifr_settings.ifs_ifsu, size)) { + return -EFAULT; + } + if (conf->phys_settings.loopback) { + cpc_writeb(card->hw.scabase + M_REG(MD2, ch), + cpc_readb(card->hw.scabase + M_REG(MD2, ch)) | + MD2_LOOP_MIR); + } + conf->media = ifr->ifr_settings.type; + return 0; + } + + case IF_IFACE_T1: + case IF_IFACE_E1: + { + const size_t te_size = sizeof(te1_settings); + const size_t size = sizeof(sync_serial_settings); + + if (!capable(CAP_NET_ADMIN)) { + return -EPERM; + } + if (ifr->ifr_settings.size != te_size) { + return -ENOMEM; //incorrect data len + } + if (copy_from_user(&conf->phys_settings, + &ifr->ifr_settings.ifs_ifsu, size)) { + return -EFAULT; + }/* Ignoring HDLC slot_map for a while */ + if (conf->phys_settings.loopback) { + cpc_writeb(card->hw.scabase + M_REG(MD2, ch), + cpc_readb(card->hw.scabase + M_REG(MD2, ch)) | + MD2_LOOP_MIR); + } + conf->media = ifr->ifr_settings.type; + return 0; + } + + default: + return hdlc_ioctl(dev, ifr, cmd); + } + + default: + return hdlc_ioctl(dev, ifr, cmd); + } +} + +static struct net_device_stats *cpc_get_stats(struct net_device *dev) +{ + pc300dev_t *d = (pc300dev_t *) dev->priv; + + if (d) + return &d->hdlc->stats; + else + return NULL; +} + +static int clock_rate_calc(uclong rate, uclong clock, int *br_io) +{ + int br, tc; + int br_pwr, error; + + if (rate == 0) + return (0); + + for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) { + if ((tc = clock / br_pwr / rate) <= 0xff) { + *br_io = br; + break; + } + } + + if (tc <= 0xff) { + error = ((rate - (clock / br_pwr / rate)) / rate) * 1000; + /* Errors bigger than +/- 1% won't be tolerated */ + if (error < -10 || error > 10) + return (-1); + else + return (tc); + } else { + return (-1); + } +} + +int ch_config(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + pc300_t *card = (pc300_t *) chan->card; + uclong scabase = card->hw.scabase; + uclong plxbase = card->hw.plxbase; + int ch = chan->channel; + uclong clkrate = chan->conf.phys_settings.clock_rate; + uclong clktype = chan->conf.phys_settings.clock_type; + ucshort encoding = chan->conf.proto_settings.encoding; + ucshort parity = chan->conf.proto_settings.parity; + int tmc, br; + ucchar md0, md2; + + /* Reset the channel */ + cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); + + /* Configure the SCA registers */ + switch (parity) { + case PARITY_NONE: + md0 = MD0_BIT_SYNC; + break; + case PARITY_CRC16_PR0: + md0 = MD0_CRC16_0|MD0_CRCC0|MD0_BIT_SYNC; + break; + case PARITY_CRC16_PR1: + md0 = MD0_CRC16_1|MD0_CRCC0|MD0_BIT_SYNC; + break; + case PARITY_CRC32_PR1_CCITT: + md0 = MD0_CRC32|MD0_CRCC0|MD0_BIT_SYNC; + break; + case PARITY_CRC16_PR1_CCITT: + default: + md0 = MD0_CRC_CCITT|MD0_CRCC0|MD0_BIT_SYNC; + break; + } + switch (encoding) { + case ENCODING_NRZI: + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZI; + break; + case ENCODING_FM_MARK: /* FM1 */ + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM1; + break; + case ENCODING_FM_SPACE: /* FM0 */ + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM0; + break; + case ENCODING_MANCHESTER: /* It's not working... */ + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_MANCH; + break; + case ENCODING_NRZ: + default: + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZ; + break; + } + cpc_writeb(scabase + M_REG(MD0, ch), md0); + cpc_writeb(scabase + M_REG(MD1, ch), 0); + cpc_writeb(scabase + M_REG(MD2, ch), md2); + cpc_writeb(scabase + M_REG(IDL, ch), 0x7e); + cpc_writeb(scabase + M_REG(CTL, ch), CTL_URSKP | CTL_IDLC); + + /* Configure HW media */ + switch (card->hw.type) { + case PC300_RSV: + if (conf->media == IF_IFACE_V35) { + cpc_writel((plxbase + card->hw.gpioc_reg), + cpc_readl(plxbase + card->hw.gpioc_reg) | PC300_CHMEDIA_MASK(ch)); + } else { + cpc_writel((plxbase + card->hw.gpioc_reg), + cpc_readl(plxbase + card->hw.gpioc_reg) & ~PC300_CHMEDIA_MASK(ch)); + } + break; + + case PC300_X21: + break; + + case PC300_TE: + te_config(card, ch); + break; + } + + switch (card->hw.type) { + case PC300_RSV: + case PC300_X21: + if (clktype == CLOCK_INT || clktype == CLOCK_TXINT) { + /* Calculate the clkrate parameters */ + tmc = clock_rate_calc(clkrate, card->hw.clock, &br); + cpc_writeb(scabase + M_REG(TMCT, ch), tmc); + cpc_writeb(scabase + M_REG(TXS, ch), + (TXS_DTRXC | TXS_IBRG | br)); + if (clktype == CLOCK_INT) { + cpc_writeb(scabase + M_REG(TMCR, ch), tmc); + cpc_writeb(scabase + M_REG(RXS, ch), + (RXS_IBRG | br)); + } else { + cpc_writeb(scabase + M_REG(TMCR, ch), 1); + cpc_writeb(scabase + M_REG(RXS, ch), 0); + } + if (card->hw.type == PC300_X21) { + cpc_writeb(scabase + M_REG(GPO, ch), 1); + cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1); + } else { + cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1); + } + } else { + cpc_writeb(scabase + M_REG(TMCT, ch), 1); + if (clktype == CLOCK_EXT) { + cpc_writeb(scabase + M_REG(TXS, ch), + TXS_DTRXC); + } else { + cpc_writeb(scabase + M_REG(TXS, ch), + TXS_DTRXC|TXS_RCLK); + } + cpc_writeb(scabase + M_REG(TMCR, ch), 1); + cpc_writeb(scabase + M_REG(RXS, ch), 0); + if (card->hw.type == PC300_X21) { + cpc_writeb(scabase + M_REG(GPO, ch), 0); + cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1); + } else { + cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1); + } + } + break; + + case PC300_TE: + /* SCA always receives clock from the FALC chip */ + cpc_writeb(scabase + M_REG(TMCT, ch), 1); + cpc_writeb(scabase + M_REG(TXS, ch), 0); + cpc_writeb(scabase + M_REG(TMCR, ch), 1); + cpc_writeb(scabase + M_REG(RXS, ch), 0); + cpc_writeb(scabase + M_REG(EXS, ch), 0); + break; + } + + /* Enable Interrupts */ + cpc_writel(scabase + IER0, + cpc_readl(scabase + IER0) | + IR0_M(IR0_RXINTA, ch) | + IR0_DRX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch) | + IR0_DTX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch)); + cpc_writeb(scabase + M_REG(IE0, ch), + cpc_readl(scabase + M_REG(IE0, ch)) | IE0_RXINTA); + cpc_writeb(scabase + M_REG(IE1, ch), + cpc_readl(scabase + M_REG(IE1, ch)) | IE1_CDCD); + + return 0; +} + +int rx_config(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + uclong scabase = card->hw.scabase; + int ch = chan->channel; + + cpc_writeb(scabase + DSR_RX(ch), 0); + + /* General RX settings */ + cpc_writeb(scabase + M_REG(RRC, ch), 0); + cpc_writeb(scabase + M_REG(RNR, ch), 16); + + /* Enable reception */ + cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_CRC_INIT); + cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_ENA); + + /* Initialize DMA stuff */ + chan->rx_first_bd = 0; + chan->rx_last_bd = N_DMA_RX_BUF - 1; + rx_dma_buf_init(card, ch); + cpc_writeb(scabase + DCR_RX(ch), DCR_FCT_CLR); + cpc_writeb(scabase + DMR_RX(ch), (DMR_TMOD | DMR_NF)); + cpc_writeb(scabase + DIR_RX(ch), (DIR_EOM | DIR_BOF)); + + /* Start DMA */ + rx_dma_start(card, ch); + + return 0; +} + +int tx_config(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + uclong scabase = card->hw.scabase; + int ch = chan->channel; + + cpc_writeb(scabase + DSR_TX(ch), 0); + + /* General TX settings */ + cpc_writeb(scabase + M_REG(TRC0, ch), 0); + cpc_writeb(scabase + M_REG(TFS, ch), 32); + cpc_writeb(scabase + M_REG(TNR0, ch), 20); + cpc_writeb(scabase + M_REG(TNR1, ch), 48); + cpc_writeb(scabase + M_REG(TCR, ch), 8); + + /* Enable transmission */ + cpc_writeb(scabase + M_REG(CMD, ch), CMD_TX_CRC_INIT); + + /* Initialize DMA stuff */ + chan->tx_first_bd = 0; + chan->tx_next_bd = 0; + tx_dma_buf_init(card, ch); + cpc_writeb(scabase + DCR_TX(ch), DCR_FCT_CLR); + cpc_writeb(scabase + DMR_TX(ch), (DMR_TMOD | DMR_NF)); + cpc_writeb(scabase + DIR_TX(ch), (DIR_EOM | DIR_BOF | DIR_UDRF)); + cpc_writel(scabase + DTX_REG(CDAL, ch), TX_BD_ADDR(ch, chan->tx_first_bd)); + cpc_writel(scabase + DTX_REG(EDAL, ch), TX_BD_ADDR(ch, chan->tx_next_bd)); + + return 0; +} + +static int cpc_attach(hdlc_device * hdlc, unsigned short encoding, + unsigned short parity) +{ + struct net_device * dev = hdlc_to_dev(hdlc); + pc300dev_t *d = (pc300dev_t *)dev->priv; + pc300ch_t *chan = (pc300ch_t *)d->chan; + pc300_t *card = (pc300_t *)chan->card; + pc300chconf_t *conf = (pc300chconf_t *)&chan->conf; + + if (card->hw.type == PC300_TE) { + if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI) { + return -EINVAL; + } + } else { + if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI && + encoding != ENCODING_FM_MARK && encoding != ENCODING_FM_SPACE) { + /* Driver doesn't support ENCODING_MANCHESTER yet */ + return -EINVAL; + } + } + + if (parity != PARITY_NONE && parity != PARITY_CRC16_PR0 && + parity != PARITY_CRC16_PR1 && parity != PARITY_CRC32_PR1_CCITT && + parity != PARITY_CRC16_PR1_CCITT) { + return -EINVAL; + } + + conf->proto_settings.encoding = encoding; + conf->proto_settings.parity = parity; + return 0; +} + +void cpc_opench(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + int ch = chan->channel; + uclong scabase = card->hw.scabase; + + ch_config(d); + + rx_config(d); + + tx_config(d); + + /* Assert RTS and DTR */ + cpc_writeb(scabase + M_REG(CTL, ch), + cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR)); +} + +void cpc_closech(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + falc_t *pfalc = (falc_t *) & chan->falc; + int ch = chan->channel; + + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_CH_RST); + rx_dma_stop(card, ch); + tx_dma_stop(card, ch); + + if (card->hw.type == PC300_TE) { + memset(pfalc, 0, sizeof(falc_t)); + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & + ~((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK | + CPLD_REG2_FALC_LED2) << (2 * ch))); + /* Reset the FALC chip */ + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) | + (CPLD_REG1_FALC_RESET << (2 * ch))); + udelay(10000); + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) & + ~(CPLD_REG1_FALC_RESET << (2 * ch))); + } +} + +int cpc_open(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + pc300dev_t *d = (pc300dev_t *) dev->priv; + struct ifreq ifr; + int result; + +#ifdef PC300_DEBUG_OTHER + printk("pc300: cpc_open"); +#endif + + if (hdlc->proto == IF_PROTO_PPP) { + d->if_ptr = &hdlc->state.ppp.pppdev; + } + + result = hdlc_open(hdlc); + if (hdlc->proto == IF_PROTO_PPP) { + dev->priv = d; + } + if (result) { + return result; + } + + MOD_INC_USE_COUNT; + sprintf(ifr.ifr_name, "%s", dev->name); + cpc_opench(d); + netif_start_queue(dev); + return 0; +} + +int cpc_close(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + uclong flags; + +#ifdef PC300_DEBUG_OTHER + printk("pc300: cpc_close"); +#endif + + netif_stop_queue(dev); + + CPC_LOCK(card, flags); + cpc_closech(d); + CPC_UNLOCK(card, flags); + + hdlc_close(hdlc); + if (hdlc->proto == IF_PROTO_PPP) { + d->if_ptr = NULL; + } +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto == PC300_PROTO_MLPPP) { + cpc_tty_unregister_service(d); + chan->conf.proto = 0xffff; + } +#endif + + MOD_DEC_USE_COUNT; + return 0; +} + +static uclong detect_ram(pc300_t * card) +{ + uclong i; + ucchar data; + uclong rambase = card->hw.rambase; + + card->hw.ramsize = PC300_RAMSIZE; + /* Let's find out how much RAM is present on this board */ + for (i = 0; i < card->hw.ramsize; i++) { + data = (ucchar) (i & 0xff); + cpc_writeb(rambase + i, data); + if (cpc_readb(rambase + i) != data) { + break; + } + } + return (i); +} + +static void plx_init(pc300_t * card) +{ + struct RUNTIME_9050 *plx_ctl = (struct RUNTIME_9050 *) card->hw.plxbase; + + /* Reset PLX */ + cpc_writel(&plx_ctl->init_ctrl, + cpc_readl(&plx_ctl->init_ctrl) | 0x40000000); + udelay(10000L); + cpc_writel(&plx_ctl->init_ctrl, + cpc_readl(&plx_ctl->init_ctrl) & ~0x40000000); + + /* Reload Config. Registers from EEPROM */ + cpc_writel(&plx_ctl->init_ctrl, + cpc_readl(&plx_ctl->init_ctrl) | 0x20000000); + udelay(10000L); + cpc_writel(&plx_ctl->init_ctrl, + cpc_readl(&plx_ctl->init_ctrl) & ~0x20000000); + +} + +static inline void show_version(void) +{ + char *rcsvers, *rcsdate, *tmp; + + rcsvers = strchr(rcsid, ' '); + rcsvers++; + tmp = strchr(rcsvers, ' '); + *tmp++ = '\0'; + rcsdate = strchr(tmp, ' '); + rcsdate++; + tmp = strrchr(rcsdate, ' '); + *tmp = '\0'; + printk(KERN_INFO "Cyclades-PC300 driver %s %s (built %s %s)\n", + rcsvers, rcsdate, __DATE__, __TIME__); +} /* show_version */ + +static void cpc_init_card(pc300_t * card) +{ + int i, devcount = 0; + static int board_nbr = 1; + + /* Enable interrupts on the PCI bridge */ + plx_init(card); + cpc_writew(card->hw.plxbase + card->hw.intctl_reg, + cpc_readw(card->hw.plxbase + card->hw.intctl_reg) | 0x0040); + +#ifdef USE_PCI_CLOCK + /* Set board clock to PCI clock */ + cpc_writel(card->hw.plxbase + card->hw.gpioc_reg, + cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) | 0x00000004UL); + card->hw.clock = PC300_PCI_CLOCK; +#else + /* Set board clock to internal oscillator clock */ + cpc_writel(card->hw.plxbase + card->hw.gpioc_reg, + cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & ~0x00000004UL); + card->hw.clock = PC300_OSC_CLOCK; +#endif + + /* Detect actual on-board RAM size */ + card->hw.ramsize = detect_ram(card); + + /* Set Global SCA-II registers */ + cpc_writeb(card->hw.scabase + PCR, PCR_PR2); + cpc_writeb(card->hw.scabase + BTCR, 0x10); + cpc_writeb(card->hw.scabase + WCRL, 0); + cpc_writeb(card->hw.scabase + DMER, 0x80); + + if (card->hw.type == PC300_TE) { + ucchar reg1; + + /* Check CPLD version */ + reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); + cpc_writeb(card->hw.falcbase + CPLD_REG1, (reg1 + 0x5a)); + if (cpc_readb(card->hw.falcbase + CPLD_REG1) == reg1) { + /* New CPLD */ + card->hw.cpld_id = cpc_readb(card->hw.falcbase + CPLD_ID_REG); + card->hw.cpld_reg1 = CPLD_V2_REG1; + card->hw.cpld_reg2 = CPLD_V2_REG2; + } else { + /* old CPLD */ + card->hw.cpld_id = 0; + card->hw.cpld_reg1 = CPLD_REG1; + card->hw.cpld_reg2 = CPLD_REG2; + cpc_writeb(card->hw.falcbase + CPLD_REG1, reg1); + } + + /* Enable the board's global clock */ + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) | + CPLD_REG1_GLOBAL_CLK); + + } + + for (i = 0; i < card->hw.nchan; i++) { + pc300ch_t *chan = &card->chan[i]; + pc300dev_t *d = &chan->d; + hdlc_device *hdlc; + struct net_device *dev; + + chan->card = card; + chan->channel = i; + chan->conf.phys_settings.clock_rate = 0; + chan->conf.phys_settings.clock_type = CLOCK_EXT; + chan->conf.proto_settings.encoding = ENCODING_NRZ; + chan->conf.proto_settings.parity = PARITY_CRC16_PR1_CCITT; + switch (card->hw.type) { + case PC300_TE: + chan->conf.media = IF_IFACE_T1; + chan->conf.lcode = PC300_LC_B8ZS; + chan->conf.fr_mode = PC300_FR_ESF; + chan->conf.lbo = PC300_LBO_0_DB; + chan->conf.rx_sens = PC300_RX_SENS_SH; + chan->conf.tslot_bitmap = 0xffffffffUL; + break; + + case PC300_X21: + chan->conf.media = IF_IFACE_X21; + break; + + case PC300_RSV: + default: + chan->conf.media = IF_IFACE_V35; + break; + } + chan->conf.proto = IF_PROTO_PPP; + chan->tx_first_bd = 0; + chan->tx_next_bd = 0; + chan->rx_first_bd = 0; + chan->rx_last_bd = N_DMA_RX_BUF - 1; + chan->nfree_tx_bd = N_DMA_TX_BUF; + + d->chan = chan; + d->tx_skb = NULL; + d->trace_on = 0; + d->line_on = 0; + d->line_off = 0; + + d->hdlc = (hdlc_device *) kmalloc(sizeof(hdlc_device), GFP_KERNEL); + if (d->hdlc == NULL) + continue; + memset(d->hdlc, 0, sizeof(hdlc_device)); + + hdlc = d->hdlc; + hdlc->xmit = cpc_queue_xmit; + hdlc->attach = cpc_attach; + + dev = hdlc_to_dev(hdlc); + + dev->mem_start = card->hw.ramphys; + dev->mem_end = card->hw.ramphys + card->hw.ramsize - 1; + dev->irq = card->hw.irq; + dev->init = NULL; + dev->tx_queue_len = PC300_TX_QUEUE_LEN; + dev->mtu = PC300_DEF_MTU; + + dev->open = cpc_open; + dev->stop = cpc_close; + dev->tx_timeout = cpc_tx_timeout; + dev->watchdog_timeo = PC300_TX_TIMEOUT; + dev->get_stats = cpc_get_stats; + dev->set_multicast_list = NULL; + dev->set_mac_address = NULL; + dev->change_mtu = cpc_change_mtu; + dev->do_ioctl = cpc_ioctl; + + if (register_hdlc_device(hdlc) == 0) { + dev->priv = d; /* We need 'priv', hdlc doesn't */ + printk("%s: Cyclades-PC300/", dev->name); + switch (card->hw.type) { + case PC300_TE: + if (card->hw.bus == PC300_PMC) { + printk("TE-M"); + } else { + printk("TE "); + } + break; + + case PC300_X21: + printk("X21 "); + break; + + case PC300_RSV: + default: + printk("RSV "); + break; + } + printk (" #%d, %ldKB of RAM at 0x%08lx, IRQ%d, channel %d.\n", + board_nbr, card->hw.ramsize / 1024, + card->hw.ramphys, card->hw.irq, i + 1); + devcount++; + } else { + printk ("Dev%d on card(0x%08lx): unable to allocate i/f name.\n", + i + 1, card->hw.ramphys); + *(dev->name) = 0; + kfree(d->hdlc); + continue; + } + } + spin_lock_init(&card->card_lock); + + board_nbr++; +} + +static int __devinit +cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int first_time = 1; + ucchar cpc_rev_id; + int err = 0, eeprom_outdated = 0; + ucshort device_id; + pc300_t *card; + + if (first_time) { + first_time = 0; + show_version(); +#ifdef CONFIG_PC300_MLPPP + cpc_tty_reset_var(); +#endif + } + + card = (pc300_t *) kmalloc(sizeof(pc300_t), GFP_KERNEL); + if (card == NULL) { + printk("PC300 found at RAM 0x%08lx, " + "but could not allocate card structure.\n", + pci_resource_start(pdev, 3)); + return -ENOMEM; + } + memset(card, 0, sizeof(pc300_t)); + + /* read PCI configuration area */ + device_id = ent->device; + card->hw.irq = pdev->irq; + card->hw.iophys = pci_resource_start(pdev, 1); + card->hw.iosize = pci_resource_len(pdev, 1); + card->hw.scaphys = pci_resource_start(pdev, 2); + card->hw.scasize = pci_resource_len(pdev, 2); + card->hw.ramphys = pci_resource_start(pdev, 3); + card->hw.alloc_ramsize = pci_resource_len(pdev, 3); + card->hw.falcphys = pci_resource_start(pdev, 4); + card->hw.falcsize = pci_resource_len(pdev, 4); + card->hw.plxphys = pci_resource_start(pdev, 5); + card->hw.plxsize = pci_resource_len(pdev, 5); + pci_read_config_byte(pdev, PCI_REVISION_ID, &cpc_rev_id); + + switch (device_id) { + case PCI_DEVICE_ID_PC300_RX_1: + case PCI_DEVICE_ID_PC300_TE_1: + card->hw.nchan = 1; + break; + + case PCI_DEVICE_ID_PC300_RX_2: + case PCI_DEVICE_ID_PC300_TE_2: + default: + card->hw.nchan = PC300_MAXCHAN; + break; + } +#ifdef PC300_DEBUG_PCI + printk("cpc (bus=0x0%x,pci_id=0x%x,", pdev->bus->number, pdev->devfn); + printk("rev_id=%d) IRQ%d\n", cpc_rev_id, card->hw.irq); + printk("cpc:found ramaddr=0x%08lx plxaddr=0x%08lx " + "ctladdr=0x%08lx falcaddr=0x%08lx\n", + card->hw.ramphys, card->hw.plxphys, card->hw.scaphys, + card->hw.falcphys); +#endif + /* Although we don't use this I/O region, we should + * request it from the kernel anyway, to avoid problems + * with other drivers accessing it. */ + if (!request_region(card->hw.iophys, card->hw.iosize, "PLX Registers")) { + /* In case we can't allocate it, warn user */ + printk("WARNING: couldn't allocate I/O region for PC300 board " + "at 0x%08lx!\n", card->hw.ramphys); + } + + if (card->hw.plxphys) { + pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, card->hw.plxphys); + } else { + eeprom_outdated = 1; + card->hw.plxphys = pci_resource_start(pdev, 0); + card->hw.plxsize = pci_resource_len(pdev, 0); + } + + if (!request_mem_region(card->hw.plxphys, card->hw.plxsize, + "PLX Registers")) { + printk("PC300 found at RAM 0x%08lx, " + "but could not allocate PLX mem region.\n", + card->hw.ramphys); + err = -ENODEV; + goto err_release_io; + } + if (!request_mem_region(card->hw.ramphys, card->hw.alloc_ramsize, + "On-board RAM")) { + printk("PC300 found at RAM 0x%08lx, " + "but could not allocate RAM mem region.\n", + card->hw.ramphys); + err = -ENODEV; + goto err_release_plx; + } + if (!request_mem_region(card->hw.scaphys, card->hw.scasize, + "SCA-II Registers")) { + printk("PC300 found at RAM 0x%08lx, " + "but could not allocate SCA mem region.\n", + card->hw.ramphys); + err = -ENODEV; + goto err_release_ram; + } + + if ((err = pci_enable_device(pdev)) != 0) + goto err_release_sca; + + card->hw.plxbase = (uclong) ioremap(card->hw.plxphys, card->hw.plxsize); + card->hw.rambase = (uclong) ioremap(card->hw.ramphys, + card->hw.alloc_ramsize); + card->hw.scabase = (uclong) ioremap(card->hw.scaphys, card->hw.scasize); + switch (device_id) { + case PCI_DEVICE_ID_PC300_TE_1: + case PCI_DEVICE_ID_PC300_TE_2: + request_mem_region(card->hw.falcphys, card->hw.falcsize, + "FALC Registers"); + card->hw.falcbase = (uclong) ioremap(card->hw.falcphys, + card->hw.falcsize); + break; + + case PCI_DEVICE_ID_PC300_RX_1: + case PCI_DEVICE_ID_PC300_RX_2: + default: + card->hw.falcbase = 0; + break; + } + +#ifdef PC300_DEBUG_PCI + printk("cpc: relocate ramaddr=0x%08lx plxaddr=0x%08lx " + "ctladdr=0x%08lx falcaddr=0x%08lx\n", + card->hw.rambase, card->hw.plxbase, card->hw.scabase, + card->hw.falcbase); +#endif + + /* Set PCI drv pointer to the card structure */ + pdev->driver_data = card; + + /* Set board type */ + switch (device_id) { + case PCI_DEVICE_ID_PC300_TE_1: + case PCI_DEVICE_ID_PC300_TE_2: + card->hw.type = PC300_TE; + card->hw.bus = PC300_PCI; + /* Set PLX register offsets */ + card->hw.gpioc_reg = 0x50; + card->hw.intctl_reg = 0x4c; + break; + case PCI_DEVICE_ID_PC300_RX_1: + case PCI_DEVICE_ID_PC300_RX_2: + default: + card->hw.bus = PC300_PCI; + /* Set PLX register offsets */ + card->hw.gpioc_reg = 0x50; + card->hw.intctl_reg = 0x4c; + + if ((cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & PC300_CTYPE_MASK)) { + card->hw.type = PC300_X21; + } else { + card->hw.type = PC300_RSV; + } + break; + } + + /* Allocate IRQ */ + if (request_irq(card->hw.irq, cpc_intr, SA_SHIRQ, "Cyclades-PC300", card)) { + printk ("PC300 found at RAM 0x%08lx, but could not allocate IRQ%d.\n", + card->hw.ramphys, card->hw.irq); + goto err_io_unmap; + } + + cpc_init_card(card); + + if (eeprom_outdated) + printk("WARNING: PC300 with outdated EEPROM.\n"); + return 0; + +err_io_unmap: + iounmap((void *) card->hw.plxbase); + iounmap((void *) card->hw.scabase); + iounmap((void *) card->hw.rambase); + if (card->hw.type == PC300_TE) { + iounmap((void *) card->hw.falcbase); + release_mem_region(card->hw.falcphys, card->hw.falcsize); + } +err_release_sca: + release_mem_region(card->hw.scaphys, card->hw.scasize); +err_release_ram: + release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize); +err_release_plx: + release_mem_region(card->hw.plxphys, card->hw.plxsize); +err_release_io: + release_region(card->hw.iophys, card->hw.iosize); + kfree(card); + return -ENODEV; +} + +static void __devexit cpc_remove_one(struct pci_dev *pdev) +{ + pc300_t *card = (pc300_t *) pdev->driver_data; + + if (card->hw.rambase != 0) { + int i; + + /* Disable interrupts on the PCI bridge */ + cpc_writew(card->hw.plxbase + card->hw.intctl_reg, + cpc_readw(card->hw.plxbase + card->hw.intctl_reg) & ~(0x0040)); + + for (i = 0; i < card->hw.nchan; i++) { + unregister_hdlc_device(card->chan[i].d.hdlc); + } + iounmap((void *) card->hw.plxbase); + iounmap((void *) card->hw.scabase); + iounmap((void *) card->hw.rambase); + release_mem_region(card->hw.plxphys, card->hw.plxsize); + release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize); + release_mem_region(card->hw.scaphys, card->hw.scasize); + release_region(card->hw.iophys, card->hw.iosize); + if (card->hw.type == PC300_TE) { + iounmap((void *) card->hw.falcbase); + release_mem_region(card->hw.falcphys, card->hw.falcsize); + } + if (card->hw.irq) + free_irq(card->hw.irq, card); + kfree(card); + } +} + +static struct pci_driver cpc_driver = { + name:"pc300", + id_table:cpc_pci_dev_id, + probe:cpc_init_one, + remove:cpc_remove_one, + suspend:NULL, + resume:NULL, +}; + +static int __init cpc_init(void) +{ + return pci_module_init(&cpc_driver); +} + +static void __exit cpc_cleanup_module(void) +{ + pci_unregister_driver(&cpc_driver); +} + +module_init(cpc_init); +module_exit(cpc_cleanup_module); + +MODULE_DESCRIPTION("Cyclades-PC300 cards driver"); +MODULE_AUTHOR( "Author: Ivan Passos \r\n" + "Maintainer: Henrique Gobbi + * + * Copyright: (c) 2000-2001 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _FALC_LH_H +#define _FALC_LH_H + +#define NUM_OF_T1_CHANNELS 24 +#define NUM_OF_E1_CHANNELS 32 + +/*>>>>>>>>>>>>>>>>> FALC Register Bits (Transmit Mode) <<<<<<<<<<<<<<<<<<< */ + +/* CMDR (Command Register) + ---------------- E1 & T1 ------------------------------ */ +#define CMDR_RMC 0x80 +#define CMDR_RRES 0x40 +#define CMDR_XREP 0x20 +#define CMDR_XRES 0x10 +#define CMDR_XHF 0x08 +#define CMDR_XTF 0x04 +#define CMDR_XME 0x02 +#define CMDR_SRES 0x01 + +/* MODE (Mode Register) + ----------------- E1 & T1 ----------------------------- */ +#define MODE_MDS2 0x80 +#define MODE_MDS1 0x40 +#define MODE_MDS0 0x20 +#define MODE_BRAC 0x10 +#define MODE_HRAC 0x08 + +/* IPC (Interrupt Port Configuration) + ----------------- E1 & T1 ----------------------------- */ +#define IPC_VIS 0x80 +#define IPC_SCI 0x04 +#define IPC_IC1 0x02 +#define IPC_IC0 0x01 + +/* CCR1 (Common Configuration Register 1) + ----------------- E1 & T1 ----------------------------- */ +#define CCR1_SFLG 0x80 +#define CCR1_XTS16RA 0x40 +#define CCR1_BRM 0x40 +#define CCR1_CASSYM 0x20 +#define CCR1_EDLX 0x20 +#define CCR1_EITS 0x10 +#define CCR1_ITF 0x08 +#define CCR1_RFT1 0x02 +#define CCR1_RFT0 0x01 + +/* CCR3 (Common Configuration Register 3) + ---------------- E1 & T1 ------------------------------ */ + +#define CCR3_PRE1 0x80 +#define CCR3_PRE0 0x40 +#define CCR3_EPT 0x20 +#define CCR3_RADD 0x10 +#define CCR3_RCRC 0x04 +#define CCR3_XCRC 0x02 + + +/* RTR1-4 (Receive Timeslot Register 1-4) + ---------------- E1 & T1 ------------------------------ */ + +#define RTR1_TS0 0x80 +#define RTR1_TS1 0x40 +#define RTR1_TS2 0x20 +#define RTR1_TS3 0x10 +#define RTR1_TS4 0x08 +#define RTR1_TS5 0x04 +#define RTR1_TS6 0x02 +#define RTR1_TS7 0x01 + +#define RTR2_TS8 0x80 +#define RTR2_TS9 0x40 +#define RTR2_TS10 0x20 +#define RTR2_TS11 0x10 +#define RTR2_TS12 0x08 +#define RTR2_TS13 0x04 +#define RTR2_TS14 0x02 +#define RTR2_TS15 0x01 + +#define RTR3_TS16 0x80 +#define RTR3_TS17 0x40 +#define RTR3_TS18 0x20 +#define RTR3_TS19 0x10 +#define RTR3_TS20 0x08 +#define RTR3_TS21 0x04 +#define RTR3_TS22 0x02 +#define RTR3_TS23 0x01 + +#define RTR4_TS24 0x80 +#define RTR4_TS25 0x40 +#define RTR4_TS26 0x20 +#define RTR4_TS27 0x10 +#define RTR4_TS28 0x08 +#define RTR4_TS29 0x04 +#define RTR4_TS30 0x02 +#define RTR4_TS31 0x01 + + +/* TTR1-4 (Transmit Timeslot Register 1-4) + ---------------- E1 & T1 ------------------------------ */ + +#define TTR1_TS0 0x80 +#define TTR1_TS1 0x40 +#define TTR1_TS2 0x20 +#define TTR1_TS3 0x10 +#define TTR1_TS4 0x08 +#define TTR1_TS5 0x04 +#define TTR1_TS6 0x02 +#define TTR1_TS7 0x01 + +#define TTR2_TS8 0x80 +#define TTR2_TS9 0x40 +#define TTR2_TS10 0x20 +#define TTR2_TS11 0x10 +#define TTR2_TS12 0x08 +#define TTR2_TS13 0x04 +#define TTR2_TS14 0x02 +#define TTR2_TS15 0x01 + +#define TTR3_TS16 0x80 +#define TTR3_TS17 0x40 +#define TTR3_TS18 0x20 +#define TTR3_TS19 0x10 +#define TTR3_TS20 0x08 +#define TTR3_TS21 0x04 +#define TTR3_TS22 0x02 +#define TTR3_TS23 0x01 + +#define TTR4_TS24 0x80 +#define TTR4_TS25 0x40 +#define TTR4_TS26 0x20 +#define TTR4_TS27 0x10 +#define TTR4_TS28 0x08 +#define TTR4_TS29 0x04 +#define TTR4_TS30 0x02 +#define TTR4_TS31 0x01 + + + +/* IMR0-4 (Interrupt Mask Register 0-4) + + ----------------- E1 & T1 ----------------------------- */ + +#define IMR0_RME 0x80 +#define IMR0_RFS 0x40 +#define IMR0_T8MS 0x20 +#define IMR0_ISF 0x20 +#define IMR0_RMB 0x10 +#define IMR0_CASC 0x08 +#define IMR0_RSC 0x08 +#define IMR0_CRC6 0x04 +#define IMR0_CRC4 0x04 +#define IMR0_PDEN 0x02 +#define IMR0_RPF 0x01 + +#define IMR1_CASE 0x80 +#define IMR1_RDO 0x40 +#define IMR1_ALLS 0x20 +#define IMR1_XDU 0x10 +#define IMR1_XMB 0x08 +#define IMR1_XLSC 0x02 +#define IMR1_XPR 0x01 +#define IMR1_LLBSC 0x80 + +#define IMR2_FAR 0x80 +#define IMR2_LFA 0x40 +#define IMR2_MFAR 0x20 +#define IMR2_T400MS 0x10 +#define IMR2_LMFA 0x10 +#define IMR2_AIS 0x08 +#define IMR2_LOS 0x04 +#define IMR2_RAR 0x02 +#define IMR2_RA 0x01 + +#define IMR3_ES 0x80 +#define IMR3_SEC 0x40 +#define IMR3_LMFA16 0x20 +#define IMR3_AIS16 0x10 +#define IMR3_RA16 0x08 +#define IMR3_API 0x04 +#define IMR3_XSLP 0x20 +#define IMR3_XSLN 0x10 +#define IMR3_LLBSC 0x08 +#define IMR3_XRS 0x04 +#define IMR3_SLN 0x02 +#define IMR3_SLP 0x01 + +#define IMR4_LFA 0x80 +#define IMR4_FER 0x40 +#define IMR4_CER 0x20 +#define IMR4_AIS 0x10 +#define IMR4_LOS 0x08 +#define IMR4_CVE 0x04 +#define IMR4_SLIP 0x02 +#define IMR4_EBE 0x01 + +/* FMR0-5 for E1 and T1 (Framer Mode Register ) */ + +#define FMR0_XC1 0x80 +#define FMR0_XC0 0x40 +#define FMR0_RC1 0x20 +#define FMR0_RC0 0x10 +#define FMR0_EXTD 0x08 +#define FMR0_ALM 0x04 +#define E1_FMR0_FRS 0x02 +#define T1_FMR0_FRS 0x08 +#define FMR0_SRAF 0x04 +#define FMR0_EXLS 0x02 +#define FMR0_SIM 0x01 + +#define FMR1_MFCS 0x80 +#define FMR1_AFR 0x40 +#define FMR1_ENSA 0x20 +#define FMR1_CTM 0x80 +#define FMR1_SIGM 0x40 +#define FMR1_EDL 0x20 +#define FMR1_PMOD 0x10 +#define FMR1_XFS 0x08 +#define FMR1_CRC 0x08 +#define FMR1_ECM 0x04 +#define FMR1_IMOD 0x02 +#define FMR1_XAIS 0x01 + +#define FMR2_RFS1 0x80 +#define FMR2_RFS0 0x40 +#define FMR2_MCSP 0x40 +#define FMR2_RTM 0x20 +#define FMR2_SSP 0x20 +#define FMR2_DAIS 0x10 +#define FMR2_SAIS 0x08 +#define FMR2_PLB 0x04 +#define FMR2_AXRA 0x02 +#define FMR2_ALMF 0x01 +#define FMR2_EXZE 0x01 + +#define LOOP_RTM 0x40 +#define LOOP_SFM 0x40 +#define LOOP_ECLB 0x20 +#define LOOP_CLA 0x1f + +/*--------------------- E1 ----------------------------*/ +#define FMR3_XLD 0x20 +#define FMR3_XLU 0x10 + +/*--------------------- T1 ----------------------------*/ +#define FMR4_AIS3 0x80 +#define FMR4_TM 0x40 +#define FMR4_XRA 0x20 +#define FMR4_SSC1 0x10 +#define FMR4_SSC0 0x08 +#define FMR4_AUTO 0x04 +#define FMR4_FM1 0x02 +#define FMR4_FM0 0x01 + +#define FMR5_SRS 0x80 +#define FMR5_EIBR 0x40 +#define FMR5_XLD 0x20 +#define FMR5_XLU 0x10 + + +/* LOOP (Channel Loop Back) + + ------------------ E1 & T1 ---------------------------- */ + +#define LOOP_SFM 0x40 +#define LOOP_ECLB 0x20 +#define LOOP_CLA4 0x10 +#define LOOP_CLA3 0x08 +#define LOOP_CLA2 0x04 +#define LOOP_CLA1 0x02 +#define LOOP_CLA0 0x01 + + + +/* XSW (Transmit Service Word Pulseframe) + + ------------------- E1 --------------------------- */ + +#define XSW_XSIS 0x80 +#define XSW_XTM 0x40 +#define XSW_XRA 0x20 +#define XSW_XY0 0x10 +#define XSW_XY1 0x08 +#define XSW_XY2 0x04 +#define XSW_XY3 0x02 +#define XSW_XY4 0x01 + + +/* XSP (Transmit Spare Bits) + + ------------------- E1 --------------------------- */ + +#define XSP_XAP 0x80 +#define XSP_CASEN 0x40 +#define XSP_TT0 0x20 +#define XSP_EBP 0x10 +#define XSP_AXS 0x08 +#define XSP_XSIF 0x04 +#define XSP_XS13 0x02 +#define XSP_XS15 0x01 + + +/* XC0/1 (Transmit Control 0/1) + ------------------ E1 & T1 ---------------------------- */ + +#define XC0_SA8E 0x80 +#define XC0_SA7E 0x40 +#define XC0_SA6E 0x20 +#define XC0_SA5E 0x10 +#define XC0_SA4E 0x08 +#define XC0_BRM 0x80 +#define XC0_MFBS 0x40 +#define XC0_SFRZ 0x10 +#define XC0_XCO2 0x04 +#define XC0_XCO1 0x02 +#define XC0_XCO0 0x01 + +#define XC1_XTO5 0x20 +#define XC1_XTO4 0x10 +#define XC1_XTO3 0x08 +#define XC1_XTO2 0x04 +#define XC1_XTO1 0x02 +#define XC1_XTO0 0x01 + + +/* RC0/1 (Receive Control 0/1) + ------------------ E1 & T1 ---------------------------- */ + +#define RC0_SICS 0x40 +#define RC0_CRCI 0x20 +#define RC0_XCRCI 0x10 +#define RC0_RDIS 0x08 +#define RC0_RCO2 0x04 +#define RC0_RCO1 0x02 +#define RC0_RCO0 0x01 + +#define RC1_SWD 0x80 +#define RC1_ASY4 0x40 +#define RC1_RRAM 0x40 +#define RC1_RTO5 0x20 +#define RC1_RTO4 0x10 +#define RC1_RTO3 0x08 +#define RC1_RTO2 0x04 +#define RC1_RTO1 0x02 +#define RC1_RTO0 0x01 + + + +/* XPM0-2 (Transmit Pulse Mask 0-2) + --------------------- E1 & T1 ------------------------- */ + +#define XPM0_XP12 0x80 +#define XPM0_XP11 0x40 +#define XPM0_XP10 0x20 +#define XPM0_XP04 0x10 +#define XPM0_XP03 0x08 +#define XPM0_XP02 0x04 +#define XPM0_XP01 0x02 +#define XPM0_XP00 0x01 + +#define XPM1_XP30 0x80 +#define XPM1_XP24 0x40 +#define XPM1_XP23 0x20 +#define XPM1_XP22 0x10 +#define XPM1_XP21 0x08 +#define XPM1_XP20 0x04 +#define XPM1_XP14 0x02 +#define XPM1_XP13 0x01 + +#define XPM2_XLHP 0x80 +#define XPM2_XLT 0x40 +#define XPM2_DAXLT 0x20 +#define XPM2_XP34 0x08 +#define XPM2_XP33 0x04 +#define XPM2_XP32 0x02 +#define XPM2_XP31 0x01 + + +/* TSWM (Transparent Service Word Mask) + ------------------ E1 ---------------------------- */ + +#define TSWM_TSIS 0x80 +#define TSWM_TSIF 0x40 +#define TSWM_TRA 0x20 +#define TSWM_TSA4 0x10 +#define TSWM_TSA5 0x08 +#define TSWM_TSA6 0x04 +#define TSWM_TSA7 0x02 +#define TSWM_TSA8 0x01 + +/* IDLE + + ------------------ E1 & T1 ----------------------- */ + +#define IDLE_IDL7 0x80 +#define IDLE_IDL6 0x40 +#define IDLE_IDL5 0x20 +#define IDLE_IDL4 0x10 +#define IDLE_IDL3 0x08 +#define IDLE_IDL2 0x04 +#define IDLE_IDL1 0x02 +#define IDLE_IDL0 0x01 + + +/* XSA4-8 + -------------------E1 ----------------------------- */ + +#define XSA4_XS47 0x80 +#define XSA4_XS46 0x40 +#define XSA4_XS45 0x20 +#define XSA4_XS44 0x10 +#define XSA4_XS43 0x08 +#define XSA4_XS42 0x04 +#define XSA4_XS41 0x02 +#define XSA4_XS40 0x01 + +#define XSA5_XS57 0x80 +#define XSA5_XS56 0x40 +#define XSA5_XS55 0x20 +#define XSA5_XS54 0x10 +#define XSA5_XS53 0x08 +#define XSA5_XS52 0x04 +#define XSA5_XS51 0x02 +#define XSA5_XS50 0x01 + +#define XSA6_XS67 0x80 +#define XSA6_XS66 0x40 +#define XSA6_XS65 0x20 +#define XSA6_XS64 0x10 +#define XSA6_XS63 0x08 +#define XSA6_XS62 0x04 +#define XSA6_XS61 0x02 +#define XSA6_XS60 0x01 + +#define XSA7_XS77 0x80 +#define XSA7_XS76 0x40 +#define XSA7_XS75 0x20 +#define XSA7_XS74 0x10 +#define XSA7_XS73 0x08 +#define XSA7_XS72 0x04 +#define XSA7_XS71 0x02 +#define XSA7_XS70 0x01 + +#define XSA8_XS87 0x80 +#define XSA8_XS86 0x40 +#define XSA8_XS85 0x20 +#define XSA8_XS84 0x10 +#define XSA8_XS83 0x08 +#define XSA8_XS82 0x04 +#define XSA8_XS81 0x02 +#define XSA8_XS80 0x01 + + +/* XDL1-3 (Transmit DL-Bit Register1-3 (read/write)) + ----------------------- T1 --------------------- */ + +#define XDL1_XDL17 0x80 +#define XDL1_XDL16 0x40 +#define XDL1_XDL15 0x20 +#define XDL1_XDL14 0x10 +#define XDL1_XDL13 0x08 +#define XDL1_XDL12 0x04 +#define XDL1_XDL11 0x02 +#define XDL1_XDL10 0x01 + +#define XDL2_XDL27 0x80 +#define XDL2_XDL26 0x40 +#define XDL2_XDL25 0x20 +#define XDL2_XDL24 0x10 +#define XDL2_XDL23 0x08 +#define XDL2_XDL22 0x04 +#define XDL2_XDL21 0x02 +#define XDL2_XDL20 0x01 + +#define XDL3_XDL37 0x80 +#define XDL3_XDL36 0x40 +#define XDL3_XDL35 0x20 +#define XDL3_XDL34 0x10 +#define XDL3_XDL33 0x08 +#define XDL3_XDL32 0x04 +#define XDL3_XDL31 0x02 +#define XDL3_XDL30 0x01 + + +/* ICB1-4 (Idle Channel Register 1-4) + ------------------ E1 ---------------------------- */ + +#define E1_ICB1_IC0 0x80 +#define E1_ICB1_IC1 0x40 +#define E1_ICB1_IC2 0x20 +#define E1_ICB1_IC3 0x10 +#define E1_ICB1_IC4 0x08 +#define E1_ICB1_IC5 0x04 +#define E1_ICB1_IC6 0x02 +#define E1_ICB1_IC7 0x01 + +#define E1_ICB2_IC8 0x80 +#define E1_ICB2_IC9 0x40 +#define E1_ICB2_IC10 0x20 +#define E1_ICB2_IC11 0x10 +#define E1_ICB2_IC12 0x08 +#define E1_ICB2_IC13 0x04 +#define E1_ICB2_IC14 0x02 +#define E1_ICB2_IC15 0x01 + +#define E1_ICB3_IC16 0x80 +#define E1_ICB3_IC17 0x40 +#define E1_ICB3_IC18 0x20 +#define E1_ICB3_IC19 0x10 +#define E1_ICB3_IC20 0x08 +#define E1_ICB3_IC21 0x04 +#define E1_ICB3_IC22 0x02 +#define E1_ICB3_IC23 0x01 + +#define E1_ICB4_IC24 0x80 +#define E1_ICB4_IC25 0x40 +#define E1_ICB4_IC26 0x20 +#define E1_ICB4_IC27 0x10 +#define E1_ICB4_IC28 0x08 +#define E1_ICB4_IC29 0x04 +#define E1_ICB4_IC30 0x02 +#define E1_ICB4_IC31 0x01 + +/* ICB1-4 (Idle Channel Register 1-4) + ------------------ T1 ---------------------------- */ + +#define T1_ICB1_IC1 0x80 +#define T1_ICB1_IC2 0x40 +#define T1_ICB1_IC3 0x20 +#define T1_ICB1_IC4 0x10 +#define T1_ICB1_IC5 0x08 +#define T1_ICB1_IC6 0x04 +#define T1_ICB1_IC7 0x02 +#define T1_ICB1_IC8 0x01 + +#define T1_ICB2_IC9 0x80 +#define T1_ICB2_IC10 0x40 +#define T1_ICB2_IC11 0x20 +#define T1_ICB2_IC12 0x10 +#define T1_ICB2_IC13 0x08 +#define T1_ICB2_IC14 0x04 +#define T1_ICB2_IC15 0x02 +#define T1_ICB2_IC16 0x01 + +#define T1_ICB3_IC17 0x80 +#define T1_ICB3_IC18 0x40 +#define T1_ICB3_IC19 0x20 +#define T1_ICB3_IC20 0x10 +#define T1_ICB3_IC21 0x08 +#define T1_ICB3_IC22 0x04 +#define T1_ICB3_IC23 0x02 +#define T1_ICB3_IC24 0x01 + +/* FMR3 (Framer Mode Register 3) + --------------------E1------------------------ */ + +#define FMR3_CMI 0x08 +#define FMR3_SYNSA 0x04 +#define FMR3_CFRZ 0x02 +#define FMR3_EXTIW 0x01 + + + +/* CCB1-3 (Clear Channel Register) + ------------------- T1 ----------------------- */ + +#define CCB1_CH1 0x80 +#define CCB1_CH2 0x40 +#define CCB1_CH3 0x20 +#define CCB1_CH4 0x10 +#define CCB1_CH5 0x08 +#define CCB1_CH6 0x04 +#define CCB1_CH7 0x02 +#define CCB1_CH8 0x01 + +#define CCB2_CH9 0x80 +#define CCB2_CH10 0x40 +#define CCB2_CH11 0x20 +#define CCB2_CH12 0x10 +#define CCB2_CH13 0x08 +#define CCB2_CH14 0x04 +#define CCB2_CH15 0x02 +#define CCB2_CH16 0x01 + +#define CCB3_CH17 0x80 +#define CCB3_CH18 0x40 +#define CCB3_CH19 0x20 +#define CCB3_CH20 0x10 +#define CCB3_CH21 0x08 +#define CCB3_CH22 0x04 +#define CCB3_CH23 0x02 +#define CCB3_CH24 0x01 + + +/* LIM0/1 (Line Interface Mode 0/1) + ------------------- E1 & T1 --------------------------- */ + +#define LIM0_XFB 0x80 +#define LIM0_XDOS 0x40 +#define LIM0_SCL1 0x20 +#define LIM0_SCL0 0x10 +#define LIM0_EQON 0x08 +#define LIM0_ELOS 0x04 +#define LIM0_LL 0x02 +#define LIM0_MAS 0x01 + +#define LIM1_EFSC 0x80 +#define LIM1_RIL2 0x40 +#define LIM1_RIL1 0x20 +#define LIM1_RIL0 0x10 +#define LIM1_DCOC 0x08 +#define LIM1_JATT 0x04 +#define LIM1_RL 0x02 +#define LIM1_DRS 0x01 + + +/* PCDR (Pulse Count Detection Register(Read/Write)) + ------------------ E1 & T1 ------------------------- */ + +#define PCDR_PCD7 0x80 +#define PCDR_PCD6 0x40 +#define PCDR_PCD5 0x20 +#define PCDR_PCD4 0x10 +#define PCDR_PCD3 0x08 +#define PCDR_PCD2 0x04 +#define PCDR_PCD1 0x02 +#define PCDR_PCD0 0x01 + +#define PCRR_PCR7 0x80 +#define PCRR_PCR6 0x40 +#define PCRR_PCR5 0x20 +#define PCRR_PCR4 0x10 +#define PCRR_PCR3 0x08 +#define PCRR_PCR2 0x04 +#define PCRR_PCR1 0x02 +#define PCRR_PCR0 0x01 + + +/* LIM2 (Line Interface Mode 2) + + ------------------ E1 & T1 ---------------------------- */ + +#define LIM2_DJA2 0x20 +#define LIM2_DJA1 0x10 +#define LIM2_LOS2 0x02 +#define LIM2_LOS1 0x01 + +/* LCR1 (Loop Code Register 1) */ + +#define LCR1_EPRM 0x80 +#define LCR1_XPRBS 0x40 + +/* SIC1 (System Interface Control 1) */ +#define SIC1_SRSC 0x80 +#define SIC1_RBS1 0x20 +#define SIC1_RBS0 0x10 +#define SIC1_SXSC 0x08 +#define SIC1_XBS1 0x02 +#define SIC1_XBS0 0x01 + +/* DEC (Disable Error Counter) + ------------------ E1 & T1 ---------------------------- */ + +#define DEC_DCEC3 0x20 +#define DEC_DBEC 0x10 +#define DEC_DCEC1 0x08 +#define DEC_DCEC 0x08 +#define DEC_DEBC 0x04 +#define DEC_DCVC 0x02 +#define DEC_DFEC 0x01 + + +/* FALC Register Bits (Receive Mode) + ---------------------------------------------------------------------------- */ + + +/* FRS0/1 (Framer Receive Status Register 0/1) + ----------------- E1 & T1 ---------------------------------- */ + +#define FRS0_LOS 0x80 +#define FRS0_AIS 0x40 +#define FRS0_LFA 0x20 +#define FRS0_RRA 0x10 +#define FRS0_API 0x08 +#define FRS0_NMF 0x04 +#define FRS0_LMFA 0x02 +#define FRS0_FSRF 0x01 + +#define FRS1_TS16RA 0x40 +#define FRS1_TS16LOS 0x20 +#define FRS1_TS16AIS 0x10 +#define FRS1_TS16LFA 0x08 +#define FRS1_EXZD 0x80 +#define FRS1_LLBDD 0x10 +#define FRS1_LLBAD 0x08 +#define FRS1_XLS 0x02 +#define FRS1_XLO 0x01 +#define FRS1_PDEN 0x40 + +/* FRS2/3 (Framer Receive Status Register 2/3) + ----------------- T1 ---------------------------------- */ + +#define FRS2_ESC2 0x80 +#define FRS2_ESC1 0x40 +#define FRS2_ESC0 0x20 + +#define FRS3_FEH5 0x20 +#define FRS3_FEH4 0x10 +#define FRS3_FEH3 0x08 +#define FRS3_FEH2 0x04 +#define FRS3_FEH1 0x02 +#define FRS3_FEH0 0x01 + + +/* RSW (Receive Service Word Pulseframe) + ----------------- E1 ------------------------------ */ + +#define RSW_RSI 0x80 +#define RSW_RRA 0x20 +#define RSW_RYO 0x10 +#define RSW_RY1 0x08 +#define RSW_RY2 0x04 +#define RSW_RY3 0x02 +#define RSW_RY4 0x01 + + +/* RSP (Receive Spare Bits / Additional Status) + ---------------- E1 ------------------------------- */ + +#define RSP_SI1 0x80 +#define RSP_SI2 0x40 +#define RSP_LLBDD 0x10 +#define RSP_LLBAD 0x08 +#define RSP_RSIF 0x04 +#define RSP_RS13 0x02 +#define RSP_RS15 0x01 + + +/* FECL (Framing Error Counter) + ---------------- E1 & T1 -------------------------- */ + +#define FECL_FE7 0x80 +#define FECL_FE6 0x40 +#define FECL_FE5 0x20 +#define FECL_FE4 0x10 +#define FECL_FE3 0x08 +#define FECL_FE2 0x04 +#define FECL_FE1 0x02 +#define FECL_FE0 0x01 + +#define FECH_FE15 0x80 +#define FECH_FE14 0x40 +#define FECH_FE13 0x20 +#define FECH_FE12 0x10 +#define FECH_FE11 0x08 +#define FECH_FE10 0x04 +#define FECH_FE9 0x02 +#define FECH_FE8 0x01 + + +/* CVCl (Code Violation Counter) + ----------------- E1 ------------------------- */ + +#define CVCL_CV7 0x80 +#define CVCL_CV6 0x40 +#define CVCL_CV5 0x20 +#define CVCL_CV4 0x10 +#define CVCL_CV3 0x08 +#define CVCL_CV2 0x04 +#define CVCL_CV1 0x02 +#define CVCL_CV0 0x01 + +#define CVCH_CV15 0x80 +#define CVCH_CV14 0x40 +#define CVCH_CV13 0x20 +#define CVCH_CV12 0x10 +#define CVCH_CV11 0x08 +#define CVCH_CV10 0x04 +#define CVCH_CV9 0x02 +#define CVCH_CV8 0x01 + + +/* CEC1-3L (CRC Error Counter) + ------------------ E1 ----------------------------- */ + +#define CEC1L_CR7 0x80 +#define CEC1L_CR6 0x40 +#define CEC1L_CR5 0x20 +#define CEC1L_CR4 0x10 +#define CEC1L_CR3 0x08 +#define CEC1L_CR2 0x04 +#define CEC1L_CR1 0x02 +#define CEC1L_CR0 0x01 + +#define CEC1H_CR15 0x80 +#define CEC1H_CR14 0x40 +#define CEC1H_CR13 0x20 +#define CEC1H_CR12 0x10 +#define CEC1H_CR11 0x08 +#define CEC1H_CR10 0x04 +#define CEC1H_CR9 0x02 +#define CEC1H_CR8 0x01 + +#define CEC2L_CR7 0x80 +#define CEC2L_CR6 0x40 +#define CEC2L_CR5 0x20 +#define CEC2L_CR4 0x10 +#define CEC2L_CR3 0x08 +#define CEC2L_CR2 0x04 +#define CEC2L_CR1 0x02 +#define CEC2L_CR0 0x01 + +#define CEC2H_CR15 0x80 +#define CEC2H_CR14 0x40 +#define CEC2H_CR13 0x20 +#define CEC2H_CR12 0x10 +#define CEC2H_CR11 0x08 +#define CEC2H_CR10 0x04 +#define CEC2H_CR9 0x02 +#define CEC2H_CR8 0x01 + +#define CEC3L_CR7 0x80 +#define CEC3L_CR6 0x40 +#define CEC3L_CR5 0x20 +#define CEC3L_CR4 0x10 +#define CEC3L_CR3 0x08 +#define CEC3L_CR2 0x04 +#define CEC3L_CR1 0x02 +#define CEC3L_CR0 0x01 + +#define CEC3H_CR15 0x80 +#define CEC3H_CR14 0x40 +#define CEC3H_CR13 0x20 +#define CEC3H_CR12 0x10 +#define CEC3H_CR11 0x08 +#define CEC3H_CR10 0x04 +#define CEC3H_CR9 0x02 +#define CEC3H_CR8 0x01 + + +/* CECL (CRC Error Counter) + + ------------------ T1 ----------------------------- */ + +#define CECL_CR7 0x80 +#define CECL_CR6 0x40 +#define CECL_CR5 0x20 +#define CECL_CR4 0x10 +#define CECL_CR3 0x08 +#define CECL_CR2 0x04 +#define CECL_CR1 0x02 +#define CECL_CR0 0x01 + +#define CECH_CR15 0x80 +#define CECH_CR14 0x40 +#define CECH_CR13 0x20 +#define CECH_CR12 0x10 +#define CECH_CR11 0x08 +#define CECH_CR10 0x04 +#define CECH_CR9 0x02 +#define CECH_CR8 0x01 + +/* EBCL (E Bit Error Counter) + ------------------- E1 & T1 ------------------------- */ + +#define EBCL_EB7 0x80 +#define EBCL_EB6 0x40 +#define EBCL_EB5 0x20 +#define EBCL_EB4 0x10 +#define EBCL_EB3 0x08 +#define EBCL_EB2 0x04 +#define EBCL_EB1 0x02 +#define EBCL_EB0 0x01 + +#define EBCH_EB15 0x80 +#define EBCH_EB14 0x40 +#define EBCH_EB13 0x20 +#define EBCH_EB12 0x10 +#define EBCH_EB11 0x08 +#define EBCH_EB10 0x04 +#define EBCH_EB9 0x02 +#define EBCH_EB8 0x01 + + +/* RSA4-8 (Receive Sa4-8-Bit Register) + -------------------- E1 --------------------------- */ + +#define RSA4_RS47 0x80 +#define RSA4_RS46 0x40 +#define RSA4_RS45 0x20 +#define RSA4_RS44 0x10 +#define RSA4_RS43 0x08 +#define RSA4_RS42 0x04 +#define RSA4_RS41 0x02 +#define RSA4_RS40 0x01 + +#define RSA5_RS57 0x80 +#define RSA5_RS56 0x40 +#define RSA5_RS55 0x20 +#define RSA5_RS54 0x10 +#define RSA5_RS53 0x08 +#define RSA5_RS52 0x04 +#define RSA5_RS51 0x02 +#define RSA5_RS50 0x01 + +#define RSA6_RS67 0x80 +#define RSA6_RS66 0x40 +#define RSA6_RS65 0x20 +#define RSA6_RS64 0x10 +#define RSA6_RS63 0x08 +#define RSA6_RS62 0x04 +#define RSA6_RS61 0x02 +#define RSA6_RS60 0x01 + +#define RSA7_RS77 0x80 +#define RSA7_RS76 0x40 +#define RSA7_RS75 0x20 +#define RSA7_RS74 0x10 +#define RSA7_RS73 0x08 +#define RSA7_RS72 0x04 +#define RSA7_RS71 0x02 +#define RSA7_RS70 0x01 + +#define RSA8_RS87 0x80 +#define RSA8_RS86 0x40 +#define RSA8_RS85 0x20 +#define RSA8_RS84 0x10 +#define RSA8_RS83 0x08 +#define RSA8_RS82 0x04 +#define RSA8_RS81 0x02 +#define RSA8_RS80 0x01 + +/* RSA6S (Receive Sa6 Bit Status Register) + ------------------------ T1 ------------------------- */ + +#define RSA6S_SX 0x20 +#define RSA6S_SF 0x10 +#define RSA6S_SE 0x08 +#define RSA6S_SC 0x04 +#define RSA6S_SA 0x02 +#define RSA6S_S8 0x01 + + +/* RDL1-3 Receive DL-Bit Register1-3) + ------------------------ T1 ------------------------- */ + +#define RDL1_RDL17 0x80 +#define RDL1_RDL16 0x40 +#define RDL1_RDL15 0x20 +#define RDL1_RDL14 0x10 +#define RDL1_RDL13 0x08 +#define RDL1_RDL12 0x04 +#define RDL1_RDL11 0x02 +#define RDL1_RDL10 0x01 + +#define RDL2_RDL27 0x80 +#define RDL2_RDL26 0x40 +#define RDL2_RDL25 0x20 +#define RDL2_RDL24 0x10 +#define RDL2_RDL23 0x08 +#define RDL2_RDL22 0x04 +#define RDL2_RDL21 0x02 +#define RDL2_RDL20 0x01 + +#define RDL3_RDL37 0x80 +#define RDL3_RDL36 0x40 +#define RDL3_RDL35 0x20 +#define RDL3_RDL34 0x10 +#define RDL3_RDL33 0x08 +#define RDL3_RDL32 0x04 +#define RDL3_RDL31 0x02 +#define RDL3_RDL30 0x01 + + +/* SIS (Signaling Status Register) + + -------------------- E1 & T1 -------------------------- */ + +#define SIS_XDOV 0x80 +#define SIS_XFW 0x40 +#define SIS_XREP 0x20 +#define SIS_RLI 0x08 +#define SIS_CEC 0x04 +#define SIS_BOM 0x01 + + +/* RSIS (Receive Signaling Status Register) + + -------------------- E1 & T1 --------------------------- */ + +#define RSIS_VFR 0x80 +#define RSIS_RDO 0x40 +#define RSIS_CRC16 0x20 +#define RSIS_RAB 0x10 +#define RSIS_HA1 0x08 +#define RSIS_HA0 0x04 +#define RSIS_HFR 0x02 +#define RSIS_LA 0x01 + + +/* RBCL/H (Receive Byte Count Low/High) + + ------------------- E1 & T1 ----------------------- */ + +#define RBCL_RBC7 0x80 +#define RBCL_RBC6 0x40 +#define RBCL_RBC5 0x20 +#define RBCL_RBC4 0x10 +#define RBCL_RBC3 0x08 +#define RBCL_RBC2 0x04 +#define RBCL_RBC1 0x02 +#define RBCL_RBC0 0x01 + +#define RBCH_OV 0x10 +#define RBCH_RBC11 0x08 +#define RBCH_RBC10 0x04 +#define RBCH_RBC9 0x02 +#define RBCH_RBC8 0x01 + + +/* ISR1-3 (Interrupt Status Register 1-3) + + ------------------ E1 & T1 ------------------------------ */ + +#define FISR0_RME 0x80 +#define FISR0_RFS 0x40 +#define FISR0_T8MS 0x20 +#define FISR0_ISF 0x20 +#define FISR0_RMB 0x10 +#define FISR0_CASC 0x08 +#define FISR0_RSC 0x08 +#define FISR0_CRC6 0x04 +#define FISR0_CRC4 0x04 +#define FISR0_PDEN 0x02 +#define FISR0_RPF 0x01 + +#define FISR1_CASE 0x80 +#define FISR1_LLBSC 0x80 +#define FISR1_RDO 0x40 +#define FISR1_ALLS 0x20 +#define FISR1_XDU 0x10 +#define FISR1_XMB 0x08 +#define FISR1_XLSC 0x02 +#define FISR1_XPR 0x01 + +#define FISR2_FAR 0x80 +#define FISR2_LFA 0x40 +#define FISR2_MFAR 0x20 +#define FISR2_T400MS 0x10 +#define FISR2_LMFA 0x10 +#define FISR2_AIS 0x08 +#define FISR2_LOS 0x04 +#define FISR2_RAR 0x02 +#define FISR2_RA 0x01 + +#define FISR3_ES 0x80 +#define FISR3_SEC 0x40 +#define FISR3_LMFA16 0x20 +#define FISR3_AIS16 0x10 +#define FISR3_RA16 0x08 +#define FISR3_API 0x04 +#define FISR3_XSLP 0x20 +#define FISR3_XSLN 0x10 +#define FISR3_LLBSC 0x08 +#define FISR3_XRS 0x04 +#define FISR3_SLN 0x02 +#define FISR3_SLP 0x01 + + +/* GIS (Global Interrupt Status Register) + + --------------------- E1 & T1 --------------------- */ + +#define GIS_ISR3 0x08 +#define GIS_ISR2 0x04 +#define GIS_ISR1 0x02 +#define GIS_ISR0 0x01 + + +/* VSTR (Version Status Register) + + --------------------- E1 & T1 --------------------- */ + +#define VSTR_VN3 0x08 +#define VSTR_VN2 0x04 +#define VSTR_VN1 0x02 +#define VSTR_VN0 0x01 + + +/*>>>>>>>>>>>>>>>>>>>>> Local Control Structures <<<<<<<<<<<<<<<<<<<<<<<<< */ + +/* Write-only Registers (E1/T1 control mode write registers) */ +#define XFIFOH 0x00 /* Tx FIFO High Byte */ +#define XFIFOL 0x01 /* Tx FIFO Low Byte */ +#define CMDR 0x02 /* Command Reg */ +#define DEC 0x60 /* Disable Error Counter */ +#define TEST2 0x62 /* Manuf. Test Reg 2 */ +#define XS(nbr) (0x70 + (nbr)) /* Tx CAS Reg (0 to 15) */ + +/* Read-write Registers (E1/T1 status mode read registers) */ +#define MODE 0x03 /* Mode Reg */ +#define RAH1 0x04 /* Receive Address High 1 */ +#define RAH2 0x05 /* Receive Address High 2 */ +#define RAL1 0x06 /* Receive Address Low 1 */ +#define RAL2 0x07 /* Receive Address Low 2 */ +#define IPC 0x08 /* Interrupt Port Configuration */ +#define CCR1 0x09 /* Common Configuration Reg 1 */ +#define CCR3 0x0A /* Common Configuration Reg 3 */ +#define PRE 0x0B /* Preamble Reg */ +#define RTR1 0x0C /* Receive Timeslot Reg 1 */ +#define RTR2 0x0D /* Receive Timeslot Reg 2 */ +#define RTR3 0x0E /* Receive Timeslot Reg 3 */ +#define RTR4 0x0F /* Receive Timeslot Reg 4 */ +#define TTR1 0x10 /* Transmit Timeslot Reg 1 */ +#define TTR2 0x11 /* Transmit Timeslot Reg 2 */ +#define TTR3 0x12 /* Transmit Timeslot Reg 3 */ +#define TTR4 0x13 /* Transmit Timeslot Reg 4 */ +#define IMR0 0x14 /* Interrupt Mask Reg 0 */ +#define IMR1 0x15 /* Interrupt Mask Reg 1 */ +#define IMR2 0x16 /* Interrupt Mask Reg 2 */ +#define IMR3 0x17 /* Interrupt Mask Reg 3 */ +#define IMR4 0x18 /* Interrupt Mask Reg 4 */ +#define IMR5 0x19 /* Interrupt Mask Reg 5 */ +#define FMR0 0x1A /* Framer Mode Reigster 0 */ +#define FMR1 0x1B /* Framer Mode Reigster 1 */ +#define FMR2 0x1C /* Framer Mode Reigster 2 */ +#define LOOP 0x1D /* Channel Loop Back */ +#define XSW 0x1E /* Transmit Service Word */ +#define FMR4 0x1E /* Framer Mode Reg 4 */ +#define XSP 0x1F /* Transmit Spare Bits */ +#define FMR5 0x1F /* Framer Mode Reg 5 */ +#define XC0 0x20 /* Transmit Control 0 */ +#define XC1 0x21 /* Transmit Control 1 */ +#define RC0 0x22 /* Receive Control 0 */ +#define RC1 0x23 /* Receive Control 1 */ +#define XPM0 0x24 /* Transmit Pulse Mask 0 */ +#define XPM1 0x25 /* Transmit Pulse Mask 1 */ +#define XPM2 0x26 /* Transmit Pulse Mask 2 */ +#define TSWM 0x27 /* Transparent Service Word Mask */ +#define TEST1 0x28 /* Manuf. Test Reg 1 */ +#define IDLE 0x29 /* Idle Channel Code */ +#define XSA4 0x2A /* Transmit SA4 Bit Reg */ +#define XDL1 0x2A /* Transmit DL-Bit Reg 2 */ +#define XSA5 0x2B /* Transmit SA4 Bit Reg */ +#define XDL2 0x2B /* Transmit DL-Bit Reg 2 */ +#define XSA6 0x2C /* Transmit SA4 Bit Reg */ +#define XDL3 0x2C /* Transmit DL-Bit Reg 2 */ +#define XSA7 0x2D /* Transmit SA4 Bit Reg */ +#define CCB1 0x2D /* Clear Channel Reg 1 */ +#define XSA8 0x2E /* Transmit SA4 Bit Reg */ +#define CCB2 0x2E /* Clear Channel Reg 2 */ +#define FMR3 0x2F /* Framer Mode Reg. 3 */ +#define CCB3 0x2F /* Clear Channel Reg 3 */ +#define ICB1 0x30 /* Idle Channel Reg 1 */ +#define ICB2 0x31 /* Idle Channel Reg 2 */ +#define ICB3 0x32 /* Idle Channel Reg 3 */ +#define ICB4 0x33 /* Idle Channel Reg 4 */ +#define LIM0 0x34 /* Line Interface Mode 0 */ +#define LIM1 0x35 /* Line Interface Mode 1 */ +#define PCDR 0x36 /* Pulse Count Detection */ +#define PCRR 0x37 /* Pulse Count Recovery */ +#define LIM2 0x38 /* Line Interface Mode Reg 2 */ +#define LCR1 0x39 /* Loop Code Reg 1 */ +#define LCR2 0x3A /* Loop Code Reg 2 */ +#define LCR3 0x3B /* Loop Code Reg 3 */ +#define SIC1 0x3C /* System Interface Control 1 */ + +/* Read-only Registers (E1/T1 control mode read registers) */ +#define RFIFOH 0x00 /* Receive FIFO */ +#define RFIFOL 0x01 /* Receive FIFO */ +#define FRS0 0x4C /* Framer Receive Status 0 */ +#define FRS1 0x4D /* Framer Receive Status 1 */ +#define RSW 0x4E /* Receive Service Word */ +#define FRS2 0x4E /* Framer Receive Status 2 */ +#define RSP 0x4F /* Receive Spare Bits */ +#define FRS3 0x4F /* Framer Receive Status 3 */ +#define FECL 0x50 /* Framing Error Counter */ +#define FECH 0x51 /* Framing Error Counter */ +#define CVCL 0x52 /* Code Violation Counter */ +#define CVCH 0x53 /* Code Violation Counter */ +#define CECL 0x54 /* CRC Error Counter 1 */ +#define CECH 0x55 /* CRC Error Counter 1 */ +#define EBCL 0x56 /* E-Bit Error Counter */ +#define EBCH 0x57 /* E-Bit Error Counter */ +#define BECL 0x58 /* Bit Error Counter Low */ +#define BECH 0x59 /* Bit Error Counter Low */ +#define CEC3 0x5A /* CRC Error Counter 3 (16-bit) */ +#define RSA4 0x5C /* Receive SA4 Bit Reg */ +#define RDL1 0x5C /* Receive DL-Bit Reg 1 */ +#define RSA5 0x5D /* Receive SA5 Bit Reg */ +#define RDL2 0x5D /* Receive DL-Bit Reg 2 */ +#define RSA6 0x5E /* Receive SA6 Bit Reg */ +#define RDL3 0x5E /* Receive DL-Bit Reg 3 */ +#define RSA7 0x5F /* Receive SA7 Bit Reg */ +#define RSA8 0x60 /* Receive SA8 Bit Reg */ +#define RSA6S 0x61 /* Receive SA6 Bit Status Reg */ +#define TSR0 0x62 /* Manuf. Test Reg 0 */ +#define TSR1 0x63 /* Manuf. Test Reg 1 */ +#define SIS 0x64 /* Signaling Status Reg */ +#define RSIS 0x65 /* Receive Signaling Status Reg */ +#define RBCL 0x66 /* Receive Byte Control */ +#define RBCH 0x67 /* Receive Byte Control */ +#define FISR0 0x68 /* Interrupt Status Reg 0 */ +#define FISR1 0x69 /* Interrupt Status Reg 1 */ +#define FISR2 0x6A /* Interrupt Status Reg 2 */ +#define FISR3 0x6B /* Interrupt Status Reg 3 */ +#define GIS 0x6E /* Global Interrupt Status */ +#define VSTR 0x6F /* Version Status */ +#define RS(nbr) (0x70 + (nbr)) /* Rx CAS Reg (0 to 15) */ + +#endif /* _FALC_LH_H */ + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/pc300.h linux.21rc5-ac2/drivers/net/wan/pc300.h --- linux.21rc5/drivers/net/wan/pc300.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/pc300.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,428 @@ +/* + * pc300.h Cyclades-PC300(tm) Kernel API Definitions. + * + * Author: Ivan Passos + * + * Copyright: (c) 1999-2002 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _PC300_H +#define _PC300_H + +#ifndef __HDLC_IOCTL_H__ +#include +#endif + +#ifndef __HDLC_H +#include +#endif + +#ifndef _HD64572_H +#include "hd64572.h" +#endif +#ifndef _FALC_LH_H +#include "pc300-falc-lh.h" +#endif + +#ifndef CY_TYPES +#define CY_TYPES +#if defined(__alpha__) +typedef unsigned long ucdouble; /* 64 bits, unsigned */ +typedef unsigned int uclong; /* 32 bits, unsigned */ +#else +typedef unsigned long uclong; /* 32 bits, unsigned */ +#endif +typedef unsigned short ucshort; /* 16 bits, unsigned */ +typedef unsigned char ucchar; /* 8 bits, unsigned */ +#endif /* CY_TYPES */ + +#define PC300_PROTO_MLPPP 1 + +#define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */ + +#define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */ +#define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */ + +#define PC300_MAXCARDS 4 /* Max number of cards per system */ +#define PC300_MAXCHAN 2 /* Number of channels per card */ + +#define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */ +#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ +#define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */ +#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ + +#define PC300_OSC_CLOCK 24576000 +#define PC300_PCI_CLOCK 33000000 + +#define BD_DEF_LEN 0x0800 /* DMA buffer length (2KB) */ +#define DMA_TX_MEMSZ 0x8000 /* Total DMA Tx memory size (32KB/ch) */ +#define DMA_RX_MEMSZ 0x10000 /* Total DMA Rx memory size (64KB/ch) */ + +#define N_DMA_TX_BUF (DMA_TX_MEMSZ / BD_DEF_LEN) /* DMA Tx buffers */ +#define N_DMA_RX_BUF (DMA_RX_MEMSZ / BD_DEF_LEN) /* DMA Rx buffers */ + +/* DMA Buffer Offsets */ +#define DMA_TX_BASE ((N_DMA_TX_BUF + N_DMA_RX_BUF) * \ + PC300_MAXCHAN * sizeof(pcsca_bd_t)) +#define DMA_RX_BASE (DMA_TX_BASE + PC300_MAXCHAN*DMA_TX_MEMSZ) + +/* DMA Descriptor Offsets */ +#define DMA_TX_BD_BASE 0x0000 +#define DMA_RX_BD_BASE (DMA_TX_BD_BASE + ((PC300_MAXCHAN*DMA_TX_MEMSZ / \ + BD_DEF_LEN) * sizeof(pcsca_bd_t))) + +/* DMA Descriptor Macros */ +#define TX_BD_ADDR(chan, n) (DMA_TX_BD_BASE + \ + ((N_DMA_TX_BUF*chan) + n) * sizeof(pcsca_bd_t)) +#define RX_BD_ADDR(chan, n) (DMA_RX_BD_BASE + \ + ((N_DMA_RX_BUF*chan) + n) * sizeof(pcsca_bd_t)) + +/* Macro to access the FALC registers (TE only) */ +#define F_REG(reg, chan) (0x200*(chan) + ((reg)<<2)) + +/*************************************** + * Memory access functions/macros * + * (required to support Alpha systems) * + ***************************************/ +#ifdef __KERNEL__ +#define cpc_writeb(port,val) {writeb((ucchar)(val),(ulong)(port)); mb();} +#define cpc_writew(port,val) {writew((ushort)(val),(ulong)(port)); mb();} +#define cpc_writel(port,val) {writel((uclong)(val),(ulong)(port)); mb();} + +#define cpc_readb(port) readb(port) +#define cpc_readw(port) readw(port) +#define cpc_readl(port) readl(port) + +#else /* __KERNEL__ */ +#define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val)) +#define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val)) +#define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val)) + +#define cpc_readb(port) (*(volatile ucchar *)(port)) +#define cpc_readw(port) (*(volatile ucshort *)(port)) +#define cpc_readl(port) (*(volatile uclong *)(port)) + +#endif /* __KERNEL__ */ + +/****** Data Structures *****************************************************/ + +/* + * RUNTIME_9050 - PLX PCI9050-1 local configuration and shared runtime + * registers. This structure can be used to access the 9050 registers + * (memory mapped). + */ +struct RUNTIME_9050 { + uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ + uclong loc_rom_range; /* 10h : Local ROM Range */ + uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ + uclong loc_rom_base; /* 24h : Local ROM Base */ + uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ + uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */ + uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ + uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ + uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ +}; + +#define PLX_9050_LINT1_ENABLE 0x01 +#define PLX_9050_LINT1_POL 0x02 +#define PLX_9050_LINT1_STATUS 0x04 +#define PLX_9050_LINT2_ENABLE 0x08 +#define PLX_9050_LINT2_POL 0x10 +#define PLX_9050_LINT2_STATUS 0x20 +#define PLX_9050_INTR_ENABLE 0x40 +#define PLX_9050_SW_INTR 0x80 + +/* Masks to access the init_ctrl PLX register */ +#define PC300_CLKSEL_MASK (0x00000004UL) +#define PC300_CHMEDIA_MASK(chan) (0x00000020UL<<(chan*3)) +#define PC300_CTYPE_MASK (0x00000800UL) + +/* CPLD Registers (base addr = falcbase, TE only) */ +/* CPLD v. 0 */ +#define CPLD_REG1 0x140 /* Chip resets, DCD/CTS status */ +#define CPLD_REG2 0x144 /* Clock enable , LED control */ +/* CPLD v. 2 or higher */ +#define CPLD_V2_REG1 0x100 /* Chip resets, DCD/CTS status */ +#define CPLD_V2_REG2 0x104 /* Clock enable , LED control */ +#define CPLD_ID_REG 0x108 /* CPLD version */ + +/* CPLD Register bit description: for the FALC bits, they should always be + set based on the channel (use (bit<<(2*ch)) to access the correct bit for + that channel) */ +#define CPLD_REG1_FALC_RESET 0x01 +#define CPLD_REG1_SCA_RESET 0x02 +#define CPLD_REG1_GLOBAL_CLK 0x08 +#define CPLD_REG1_FALC_DCD 0x10 +#define CPLD_REG1_FALC_CTS 0x20 + +#define CPLD_REG2_FALC_TX_CLK 0x01 +#define CPLD_REG2_FALC_RX_CLK 0x02 +#define CPLD_REG2_FALC_LED1 0x10 +#define CPLD_REG2_FALC_LED2 0x20 + +/* Structure with FALC-related fields (TE only) */ +#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ + +typedef struct falc { + ucchar sync; /* If true FALC is synchronized */ + ucchar active; /* if TRUE then already active */ + ucchar loop_active; /* if TRUE a line loopback UP was received */ + ucchar loop_gen; /* if TRUE a line loopback UP was issued */ + + ucchar num_channels; + ucchar offset; /* 1 for T1, 0 for E1 */ + ucchar full_bandwidth; + + ucchar xmb_cause; + ucchar multiframe_mode; + + /* Statistics */ + ucshort pden; /* Pulse Density violation count */ + ucshort los; /* Loss of Signal count */ + ucshort losr; /* Loss of Signal recovery count */ + ucshort lfa; /* Loss of frame alignment count */ + ucshort farec; /* Frame Alignment Recovery count */ + ucshort lmfa; /* Loss of multiframe alignment count */ + ucshort ais; /* Remote Alarm indication Signal count */ + ucshort sec; /* One-second timer */ + ucshort es; /* Errored second */ + ucshort rai; /* remote alarm received */ + ucshort bec; + ucshort fec; + ucshort cvc; + ucshort cec; + ucshort ebc; + + /* Status */ + ucchar red_alarm; + ucchar blue_alarm; + ucchar loss_fa; + ucchar yellow_alarm; + ucchar loss_mfa; + ucchar prbs; +} falc_t; + +typedef struct falc_status { + ucchar sync; /* If true FALC is synchronized */ + ucchar red_alarm; + ucchar blue_alarm; + ucchar loss_fa; + ucchar yellow_alarm; + ucchar loss_mfa; + ucchar prbs; +} falc_status_t; + +typedef struct rsv_x21_status { + ucchar dcd; + ucchar dsr; + ucchar cts; + ucchar rts; + ucchar dtr; +} rsv_x21_status_t; + +typedef struct pc300stats { + int hw_type; + uclong line_on; + uclong line_off; + struct net_device_stats gen_stats; + falc_t te_stats; +} pc300stats_t; + +typedef struct pc300status { + int hw_type; + rsv_x21_status_t gen_status; + falc_status_t te_status; +} pc300status_t; + +typedef struct pc300loopback { + char loop_type; + char loop_on; +} pc300loopback_t; + +typedef struct pc300patterntst { + char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ + ucshort num_errors; +} pc300patterntst_t; + +typedef struct pc300dev { + void *if_ptr; /* General purpose pointer */ + struct pc300ch *chan; + ucchar trace_on; + uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ + uclong line_off; +#ifdef __KERNEL__ + char name[16]; + hdlc_device *hdlc; + + void *private; + struct sk_buff *tx_skb; + union { /* This union has all the protocol-specific structures */ + struct ppp_device pppdev; + }ifu; +#ifdef CONFIG_PC300_MLPPP + void *cpc_tty; /* information to PC300 TTY driver */ +#endif +#endif /* __KERNEL__ */ +}pc300dev_t; + +typedef struct pc300hw { + int type; /* RSV, X21, etc. */ + int bus; /* Bus (PCI, PMC, etc.) */ + int nchan; /* number of channels */ + int irq; /* interrupt request level */ + uclong clock; /* Board clock */ + ucchar cpld_id; /* CPLD ID (TE only) */ + ucshort cpld_reg1; /* CPLD reg 1 (TE only) */ + ucshort cpld_reg2; /* CPLD reg 2 (TE only) */ + ucshort gpioc_reg; /* PLX GPIOC reg */ + ucshort intctl_reg; /* PLX Int Ctrl/Status reg */ + uclong iophys; /* PLX registers I/O base */ + uclong iosize; /* PLX registers I/O size */ + uclong plxphys; /* PLX registers MMIO base (physical) */ + uclong plxbase; /* PLX registers MMIO base (virtual) */ + uclong plxsize; /* PLX registers MMIO size */ + uclong scaphys; /* SCA registers MMIO base (physical) */ + uclong scabase; /* SCA registers MMIO base (virtual) */ + uclong scasize; /* SCA registers MMIO size */ + uclong ramphys; /* On-board RAM MMIO base (physical) */ + uclong rambase; /* On-board RAM MMIO base (virtual) */ + uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ + uclong ramsize; /* On-board RAM MMIO size */ + uclong falcphys; /* FALC registers MMIO base (physical) */ + uclong falcbase; /* FALC registers MMIO base (virtual) */ + uclong falcsize; /* FALC registers MMIO size */ +} pc300hw_t; + +typedef struct pc300chconf { + sync_serial_settings phys_settings; /* Clock type/rate (in bps), + loopback mode */ + raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ + uclong media; /* HW media (RS232, V.35, etc.) */ + uclong proto; /* Protocol (PPP, X.25, etc.) */ + ucchar monitor; /* Monitor mode (0 = off, !0 = on) */ + + /* TE-specific parameters */ + ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */ + ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */ + ucchar lbo; /* Line Build Out */ + ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */ + uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ +} pc300chconf_t; + +typedef struct pc300ch { + struct pc300 *card; + int channel; + pc300dev_t d; + pc300chconf_t conf; + ucchar tx_first_bd; /* First TX DMA block descr. w/ data */ + ucchar tx_next_bd; /* Next free TX DMA block descriptor */ + ucchar rx_first_bd; /* First free RX DMA block descriptor */ + ucchar rx_last_bd; /* Last free RX DMA block descriptor */ + ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */ + falc_t falc; /* FALC structure (TE only) */ +} pc300ch_t; + +typedef struct pc300 { + pc300hw_t hw; /* hardware config. */ + pc300ch_t chan[PC300_MAXCHAN]; +#ifdef __KERNEL__ + spinlock_t card_lock; +#endif /* __KERNEL__ */ +} pc300_t; + +typedef struct pc300conf { + pc300hw_t hw; + pc300chconf_t conf; +} pc300conf_t; + +/* DEV ioctl() commands */ +#define N_SPPP_IOCTLS 2 + +enum pc300_ioctl_cmds { + SIOCCPCRESERVED = (SIOCDEVPRIVATE + N_SPPP_IOCTLS), + SIOCGPC300CONF, + SIOCSPC300CONF, + SIOCGPC300STATUS, + SIOCGPC300FALCSTATUS, + SIOCGPC300UTILSTATS, + SIOCGPC300UTILSTATUS, + SIOCSPC300TRACE, + SIOCSPC300LOOPBACK, + SIOCSPC300PATTERNTEST, +}; + +/* Loopback types - PC300/TE boards */ +enum pc300_loopback_cmds { + PC300LOCLOOP = 1, + PC300REMLOOP, + PC300PAYLOADLOOP, + PC300GENLOOPUP, + PC300GENLOOPDOWN, +}; + +/* Control Constant Definitions */ +#define PC300_RSV 0x01 +#define PC300_X21 0x02 +#define PC300_TE 0x03 + +#define PC300_PCI 0x00 +#define PC300_PMC 0x01 + +#define PC300_LC_AMI 0x01 +#define PC300_LC_B8ZS 0x02 +#define PC300_LC_NRZ 0x03 +#define PC300_LC_HDB3 0x04 + +/* Framing (T1) */ +#define PC300_FR_ESF 0x01 +#define PC300_FR_D4 0x02 +#define PC300_FR_ESF_JAPAN 0x03 + +/* Framing (E1) */ +#define PC300_FR_MF_CRC4 0x04 +#define PC300_FR_MF_NON_CRC4 0x05 +#define PC300_FR_UNFRAMED 0x06 + +#define PC300_LBO_0_DB 0x00 +#define PC300_LBO_7_5_DB 0x01 +#define PC300_LBO_15_DB 0x02 +#define PC300_LBO_22_5_DB 0x03 + +#define PC300_RX_SENS_SH 0x01 +#define PC300_RX_SENS_LH 0x02 + +#define PC300_TX_TIMEOUT (2*HZ) +#define PC300_TX_QUEUE_LEN 100 +#define PC300_DEF_MTU 1600 + +#ifdef __KERNEL__ +/* Function Prototypes */ +int dma_buf_write(pc300_t *, int, ucchar *, int); +int dma_buf_read(pc300_t *, int, struct sk_buff *); +void tx_dma_start(pc300_t *, int); +void rx_dma_start(pc300_t *, int); +void tx_dma_stop(pc300_t *, int); +void rx_dma_stop(pc300_t *, int); +int cpc_queue_xmit(struct sk_buff *, struct net_device *); +void cpc_net_rx(hdlc_device *); +void cpc_sca_status(pc300_t *, int); +int cpc_change_mtu(struct net_device *, int); +int cpc_ioctl(struct net_device *, struct ifreq *, int); +int ch_config(pc300dev_t *); +int rx_config(pc300dev_t *); +int tx_config(pc300dev_t *); +void cpc_opench(pc300dev_t *); +void cpc_closech(pc300dev_t *); +int cpc_open(struct net_device *dev); +int cpc_close(struct net_device *dev); +int cpc_set_media(hdlc_device *, int); +#endif /* __KERNEL__ */ + +#endif /* _PC300_H */ + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/pc300_tty.c linux.21rc5-ac2/drivers/net/wan/pc300_tty.c --- linux.21rc5/drivers/net/wan/pc300_tty.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/pc300_tty.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,1122 @@ +/* + * pc300_tty.c Cyclades-PC300(tm) TTY Driver. + * + * Author: Regina Kodato + * + * Copyright: (c) 1999-2002 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* TTY includes */ +#include +#include +#include + +#include "pc300.h" + +/* defines and macros */ +/* TTY Global definitions */ +#define CPC_TTY_NPORTS 8 /* maximum number of the sync tty connections */ +#define CPC_TTY_MAJOR CYCLADES_MAJOR +#define CPC_TTY_MINOR_START 240 /* minor of the first PC300 interface */ + +#define CPC_TTY_MAX_MTU 2000 + +/* tty interface state */ +#define CPC_TTY_ST_IDLE 0 +#define CPC_TTY_ST_INIT 1 /* configured with MLPPP and up */ +#define CPC_TTY_ST_OPEN 2 /* opened by application */ + +#define CPC_TTY_LOCK(card,flags)\ + do {\ + spin_lock_irqsave(&card->card_lock, flags); \ + } while (0) + +#define CPC_TTY_UNLOCK(card,flags) \ + do {\ + spin_unlock_irqrestore(&card->card_lock, flags); \ + } while (0) + +//#define CPC_TTY_DBG(format,a...) printk(format,##a) +#define CPC_TTY_DBG(format,a...) + +/* data structures */ +typedef struct _st_cpc_rx_buf { + struct _st_cpc_rx_buf *next; + int size; + unsigned char data[1]; +} st_cpc_rx_buf; + +struct st_cpc_rx_list { + st_cpc_rx_buf *first; + st_cpc_rx_buf *last; +}; + +typedef struct _st_cpc_tty_area { + int state; /* state of the TTY interface */ + int num_open; + unsigned int tty_minor; /* minor this interface */ + volatile struct st_cpc_rx_list buf_rx; /* ptr. to reception buffer */ + unsigned char* buf_tx; /* ptr. to transmission buffer */ + pc300dev_t* pc300dev; /* ptr. to info struct in PC300 driver */ + unsigned char name[20]; /* interf. name + "-tty" */ + struct tty_struct *tty; + struct tq_struct tty_tx_task_queue; /* tx task - tx interrupt */ + struct tq_struct tty_rx_task_queue; /* rx task - rx interrupt */ + } st_cpc_tty_area; + +/* TTY data structures */ +static struct tty_struct *cpc_tty_serial_table[CPC_TTY_NPORTS]; +static struct termios *cpc_tty_serial_termios[CPC_TTY_NPORTS]; +static struct termios *cpc_tty_serial_termios_locked[CPC_TTY_NPORTS]; +static struct tty_driver serial_drv, callout_drv; + +/* local variables */ +st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS]; + +int cpc_tty_cnt=0; /* number of intrfaces configured with MLPPP */ +int cpc_tty_refcount; +int cpc_tty_unreg_flag = 0; + +/* TTY functions prototype */ +static int cpc_tty_open(struct tty_struct *tty, struct file *flip); +static void cpc_tty_close(struct tty_struct *tty, struct file *flip); +static int cpc_tty_write(struct tty_struct *tty, int from_user, + const unsigned char *buf, int count); +static int cpc_tty_write_room(struct tty_struct *tty); +static int cpc_tty_chars_in_buffer(struct tty_struct *tty); +static int cpc_tty_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg); +static void cpc_tty_flush_buffer(struct tty_struct *tty); +static void cpc_tty_hangup(struct tty_struct *tty); +static void cpc_tty_rx_task(void *data); +static void cpc_tty_tx_task(void *data); +static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); +static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); +static void cpc_tty_dtr_off(pc300dev_t *pc300dev); +static void cpc_tty_dtr_on(pc300dev_t *pc300dev); + +/* functions called by PC300 driver */ +void cpc_tty_init(pc300dev_t *dev); +void cpc_tty_unregister_service(pc300dev_t *pc300dev); +void cpc_tty_receive(pc300dev_t *pc300dev); +void cpc_tty_trigger_poll(pc300dev_t *pc300dev); +void cpc_tty_reset_var(void); + +/* + * PC300 TTY clear DTR signal + */ +static void cpc_tty_dtr_off(pc300dev_t *pc300dev) +{ + pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; + pc300_t *card = (pc300_t *) pc300chan->card; + int ch = pc300chan->channel; + unsigned long flags; + + CPC_TTY_DBG("%s-tty: Clear signal DTR\n", + ((struct net_device*)(pc300dev->hdlc))->name); + CPC_TTY_LOCK(card, flags); + cpc_writeb(card->hw.scabase + M_REG(CTL,ch), + cpc_readb(card->hw.scabase+M_REG(CTL,ch))& CTL_DTR); + CPC_TTY_UNLOCK(card,flags); +} + +/* + * PC300 TTY set DTR signal to ON + */ +static void cpc_tty_dtr_on(pc300dev_t *pc300dev) +{ + pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; + pc300_t *card = (pc300_t *) pc300chan->card; + int ch = pc300chan->channel; + unsigned long flags; + + CPC_TTY_DBG("%s-tty: Set signal DTR\n", + ((struct net_device*)(pc300dev->hdlc))->name); + CPC_TTY_LOCK(card, flags); + cpc_writeb(card->hw.scabase + M_REG(CTL,ch), + cpc_readb(card->hw.scabase+M_REG(CTL,ch))& ~CTL_DTR); + CPC_TTY_UNLOCK(card,flags); +} + +/* + * PC300 TTY initialization routine + * + * This routine is called by the PC300 driver during board configuration + * (ioctl=SIOCSP300CONF). At this point the adapter is completely + * initialized. + * o verify kernel version (only 2.4.x) + * o register TTY driver + * o init cpc_tty_area struct + */ +void cpc_tty_init(pc300dev_t *pc300dev) +{ + int port, aux; + st_cpc_tty_area * cpc_tty; + + if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)) { + printk("%s-tty: Error: TTY driver is supported on 2.4.X kernel!\n", + ((struct net_device*)(pc300dev->hdlc))->name); + return; + } + + /* hdlcX - X=interface number */ + port = ((struct net_device*)(pc300dev->hdlc))->name[4] - '0'; + if (port >= CPC_TTY_NPORTS) { + printk("%s-tty: invalid interface selected (0-%i): %i", + ((struct net_device*)(pc300dev->hdlc))->name, + CPC_TTY_NPORTS-1,port); + return; + } + + if (cpc_tty_cnt == 0) { /* first TTY connection -> register driver */ + CPC_TTY_DBG("%s-tty: driver init, major:%i, minor range:%i=%i\n", + ((struct net_device*)(pc300dev->hdlc))->name, + CPC_TTY_MAJOR, CPC_TTY_MINOR_START, + CPC_TTY_MINOR_START+CPC_TTY_NPORTS); + /* initialize tty driver struct */ + memset(&serial_drv,0,sizeof(struct tty_driver)); + serial_drv.magic = TTY_DRIVER_MAGIC; + serial_drv.driver_name = "pc300_tty"; + serial_drv.name = "ttyCP"; + serial_drv.major = CPC_TTY_MAJOR; + serial_drv.minor_start = CPC_TTY_MINOR_START; + serial_drv.num = CPC_TTY_NPORTS; + serial_drv.type = TTY_DRIVER_TYPE_SERIAL; + serial_drv.subtype = SERIAL_TYPE_NORMAL; + + serial_drv.init_termios = tty_std_termios; + serial_drv.init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL; + serial_drv.flags = TTY_DRIVER_REAL_RAW; + serial_drv.refcount = &cpc_tty_refcount; + + /* tty data structures */ + serial_drv.table = cpc_tty_serial_table; + serial_drv.termios = cpc_tty_serial_termios; + serial_drv.termios_locked = cpc_tty_serial_termios_locked; + + /* interface routines from the upper tty layer to the tty driver */ + serial_drv.open = cpc_tty_open; + serial_drv.close = cpc_tty_close; + serial_drv.write = cpc_tty_write; + serial_drv.write_room = cpc_tty_write_room; + serial_drv.chars_in_buffer = cpc_tty_chars_in_buffer; + serial_drv.ioctl = cpc_tty_ioctl; + serial_drv.flush_buffer = cpc_tty_flush_buffer; + serial_drv.hangup = cpc_tty_hangup; + + /* the callout device is just like normal device except for major */ + /* number and the subtype code */ + callout_drv = serial_drv; + callout_drv.name = "cucp"; + callout_drv.major = CPC_TTY_MAJOR + 1; + callout_drv.subtype = SERIAL_TYPE_CALLOUT; + callout_drv.read_proc = 0; + callout_drv.proc_entry = 0; + + /* register the TTY driver */ + if (tty_register_driver(&serial_drv)) { + printk("%s-tty: Failed to register serial driver! ", + ((struct net_device*)(pc300dev->hdlc))->name); + return; + } + + if (tty_register_driver(&callout_drv)) { + CPC_TTY_DBG("%s-tty: Failed to register callout driver! ", + ((struct net_device*)(pc300dev->hdlc))->name); + return; + } + memset((void *)cpc_tty_area, 0, + sizeof(st_cpc_tty_area) * CPC_TTY_NPORTS); + } + + cpc_tty = &cpc_tty_area[port]; + + if (cpc_tty->state != CPC_TTY_ST_IDLE) { + CPC_TTY_DBG("%s-tty: TTY port %i, already in use.\n", + ((struct net_device*)(pc300dev->hdlc))->name,port); + return; + } + + cpc_tty_cnt++; + cpc_tty->state = CPC_TTY_ST_INIT; + cpc_tty->num_open= 0; + cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; + cpc_tty->pc300dev = pc300dev; + + cpc_tty->tty_tx_task_queue.routine = cpc_tty_tx_task; + cpc_tty->tty_tx_task_queue.data = (void *)cpc_tty; + + cpc_tty->tty_rx_task_queue.routine = cpc_tty_rx_task; + cpc_tty->tty_rx_task_queue.data = (void *) port; + + cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = 0; + + pc300dev->cpc_tty = (void *)cpc_tty; + + aux = strlen(((struct net_device*)(pc300dev->hdlc))->name); + memcpy(cpc_tty->name,((struct net_device*)(pc300dev->hdlc))->name,aux); + memcpy(&cpc_tty->name[aux], "-tty", 5); + + cpc_open((struct net_device *)pc300dev->hdlc); + cpc_tty_dtr_off(pc300dev); + + CPC_TTY_DBG("%s: Initializing TTY Sync Driver, tty major#%d minor#%i\n", + cpc_tty->name,CPC_TTY_MAJOR,cpc_tty->tty_minor); + return; +} + +/* + * PC300 TTY OPEN routine + * + * This routine is called by the tty driver to open the interface + * o verify minor + * o allocate buffer to Rx and Tx + */ +static int cpc_tty_open(struct tty_struct *tty, struct file *flip) +{ + int port ; + st_cpc_tty_area *cpc_tty; + + if (!tty) { + return -ENODEV; + } + + port = MINOR(tty->device) - tty->driver.minor_start; + + if ((port < 0) || (port >= CPC_TTY_NPORTS)){ + CPC_TTY_DBG("pc300_tty: open invalid minor %i\n",MINOR(tty->device)); + return -ENODEV; + } + + cpc_tty = &cpc_tty_area[port]; + + if (cpc_tty->state == CPC_TTY_ST_IDLE){ + CPC_TTY_DBG("%s: open - invalid interface, minor=%i\n", + cpc_tty->name, MINOR(tty->device)); + return -ENODEV; + } + + if (cpc_tty->num_open == 0) { /* first open of this tty */ + if (!cpc_tty_area[port].buf_tx){ + cpc_tty_area[port].buf_tx = kmalloc(CPC_TTY_MAX_MTU,GFP_KERNEL); + if (cpc_tty_area[port].buf_tx == 0){ + CPC_TTY_DBG("%s: error in memory allocation\n",cpc_tty->name); + return -ENOMEM; + } + } + + if (cpc_tty_area[port].buf_rx.first) { + unsigned char * aux; + while (cpc_tty_area[port].buf_rx.first) { + aux = (unsigned char *)cpc_tty_area[port].buf_rx.first; + cpc_tty_area[port].buf_rx.first = cpc_tty_area[port].buf_rx.first->next; + kfree(aux); + } + cpc_tty_area[port].buf_rx.first = NULL; + cpc_tty_area[port].buf_rx.last = NULL; + } + + cpc_tty_area[port].state = CPC_TTY_ST_OPEN; + cpc_tty_area[port].tty = tty; + tty->driver_data = &cpc_tty_area[port]; + + cpc_tty_dtr_on(cpc_tty->pc300dev); + } + + cpc_tty->num_open++; + + CPC_TTY_DBG("%s: opening TTY driver\n", cpc_tty->name); + + /* avisar driver PC300 */ + return 0; +} + +/* + * PC300 TTY CLOSE routine + * + * This routine is called by the tty driver to close the interface + * o call close channel in PC300 driver (cpc_closech) + * o free Rx and Tx buffers + */ + +static void cpc_tty_close(struct tty_struct *tty, struct file *flip) +{ + st_cpc_tty_area *cpc_tty; + unsigned long flags; + int res; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlx-tty: no TTY in close \n"); + return; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty)|| (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return; + } + + if (!cpc_tty->num_open) { + CPC_TTY_DBG("%s: TTY is closed\n",cpc_tty->name); + return; + } + + if (--cpc_tty->num_open > 0) { + CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name); + return; + } + + cpc_tty_dtr_off(cpc_tty->pc300dev); + + CPC_TTY_LOCK(cpc_tty->pc300dev->chan->card, flags); /* lock irq */ + cpc_tty->tty = NULL; + cpc_tty->state = CPC_TTY_ST_INIT; + CPC_TTY_UNLOCK(cpc_tty->pc300dev->chan->card, flags); /* unlock irq */ + + if (cpc_tty->buf_rx.first) { + unsigned char * aux; + while (cpc_tty->buf_rx.first) { + aux = (unsigned char *)cpc_tty->buf_rx.first; + cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next; + kfree(aux); + } + cpc_tty->buf_rx.first = NULL; + cpc_tty->buf_rx.last = NULL; + } + + if (cpc_tty->buf_tx) { + kfree(cpc_tty->buf_tx); + cpc_tty->buf_tx = NULL; + } + + CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name); + + if (!cpc_tty_refcount && cpc_tty_unreg_flag) { + cpc_tty_unreg_flag = 0; + CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name); + if ((res=tty_unregister_driver(&serial_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + if ((res=tty_unregister_driver(&callout_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + } + return; +} + +/* + * PC300 TTY WRITE routine + * + * This routine is called by the tty driver to write a series of characters + * to the tty device. The characters may come from user or kernel space. + * o verify the DCD signal + * o send characters to board and start the transmission + */ +static int cpc_tty_write(struct tty_struct *tty, int from_user, + const unsigned char *buf, int count) +{ + st_cpc_tty_area *cpc_tty; + pc300ch_t *pc300chan; + pc300_t *card; + int ch; + unsigned long flags; + struct net_device_stats *stats; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY in write\n"); + return -ENODEV; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n", cpc_tty->name); + return -ENODEV; + } + + if (count > CPC_TTY_MAX_MTU) { + CPC_TTY_DBG("%s: count is invalid\n",cpc_tty->name); + return -EINVAL; /* frame too big */ + } + + CPC_TTY_DBG("%s: cpc_tty_write %s data len=%i\n",cpc_tty->name, + (from_user)?"from user" : "from kernel",count); + + pc300chan = (pc300ch_t *)((pc300dev_t*)cpc_tty->pc300dev)->chan; + stats = &((pc300dev_t*)cpc_tty->pc300dev)->hdlc->stats; + card = (pc300_t *) pc300chan->card; + ch = pc300chan->channel; + + /* verify DCD signal*/ + if (cpc_readb(card->hw.scabase + M_REG(ST3,ch)) & ST3_DCD) { + /* DCD is OFF */ + CPC_TTY_DBG("%s : DCD is OFF\n", cpc_tty->name); + stats->tx_errors++; + stats->tx_carrier_errors++; + CPC_TTY_LOCK(card, flags); + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR); + + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED1 << (2 *ch))); + } + + CPC_TTY_UNLOCK(card, flags); + + return -EINVAL; + } + + if (from_user) { + unsigned char *buf_tmp; + + buf_tmp = cpc_tty->buf_tx; + if (copy_from_user(buf_tmp, buf, count)) { + /* failed to copy from user */ + CPC_TTY_DBG("%s: error in copy from user\n",cpc_tty->name); + return -EINVAL; + } + + if (cpc_tty_send_to_card(cpc_tty->pc300dev, (void*) buf_tmp,count)) { + /* failed to send */ + CPC_TTY_DBG("%s: transmission error\n",cpc_tty->name); + return 0; + } + } else { + if (cpc_tty_send_to_card(cpc_tty->pc300dev, (void*)buf, count)) { + /* failed to send */ + CPC_TTY_DBG("%s: trasmition error\n", cpc_tty->name); + return 0; + } + } + return count; +} + +/* + * PC300 TTY Write Room routine + * + * This routine returns the numbers of characteres the tty driver will accept + * for queuing to be written. + * o return MTU + */ +static int cpc_tty_write_room(struct tty_struct *tty) +{ + st_cpc_tty_area *cpc_tty; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to write room\n"); + return -ENODEV; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return -ENODEV; + } + + CPC_TTY_DBG("%s: write room\n",cpc_tty->name); + + return CPC_TTY_MAX_MTU; +} + +/* + * PC300 TTY chars in buffer routine + * + * This routine returns the chars number in the transmission buffer + * o returns 0 + */ +static int cpc_tty_chars_in_buffer(struct tty_struct *tty) +{ + st_cpc_tty_area *cpc_tty; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n"); + return -ENODEV; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return -ENODEV; + } + + return(0); +} + +/* + * PC300 TTY IOCTL routine + * + * This routine treats TIOCMBIS (set DTR signal) and TIOCMBIC (clear DTR + * signal)IOCTL commands. + */ +static int cpc_tty_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg) + +{ + st_cpc_tty_area *cpc_tty; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n"); + return -ENODEV; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return -ENODEV; + } + + CPC_TTY_DBG("%s: IOCTL cmd %x\n",cpc_tty->name,cmd); + + switch (cmd) { + case TIOCMBIS : /* set DTR */ + cpc_tty_dtr_on(cpc_tty->pc300dev); + break; + + case TIOCMBIC: /* clear DTR */ + cpc_tty_dtr_off(cpc_tty->pc300dev); + break; + default : + return -ENOIOCTLCMD; + } + return 0; +} + +/* + * PC300 TTY Flush Buffer routine + * + * This routine resets the transmission buffer + */ +static void cpc_tty_flush_buffer(struct tty_struct *tty) +{ + st_cpc_tty_area *cpc_tty; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to flush buffer\n"); + return; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return; + } + + CPC_TTY_DBG("%s: call wake_up_interruptible\n",cpc_tty->name); + + wake_up_interruptible(&tty->write_wait); + + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && tty->ldisc.write_wakeup){ + CPC_TTY_DBG("%s: call line disc. wake up\n",cpc_tty->name); + tty->ldisc.write_wakeup(tty); + } + + return; +} + +/* + * PC300 TTY Hangup routine + * + * This routine is called by the tty driver to hangup the interface + * o clear DTR signal + */ + +static void cpc_tty_hangup(struct tty_struct *tty) +{ + st_cpc_tty_area *cpc_tty; + int res; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to hangup\n"); + return ; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return ; + } + if (!cpc_tty_refcount && cpc_tty_unreg_flag) { + cpc_tty_unreg_flag = 0; + CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name); + if ((res=tty_unregister_driver(&serial_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + if ((res=tty_unregister_driver(&callout_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + } + cpc_tty_dtr_off(cpc_tty->pc300dev); +} + +/* + * PC300 TTY RX task routine + * This routine treats RX task + * o verify read buffer + * o call the line disc. read + * o free memory + */ +static void cpc_tty_rx_task(void * data) +{ + int port, i, j; + st_cpc_tty_area *cpc_tty; + volatile st_cpc_rx_buf * buf; + char flags=0,flg_rx=1; + + if (cpc_tty_cnt == 0) return; + + for (i=0; (i < 4) && flg_rx ; i++) { + flg_rx = 0; + port = (int) data; + for (j=0; j < CPC_TTY_NPORTS; j++) { + cpc_tty = &cpc_tty_area[port]; + + if ((buf=cpc_tty->buf_rx.first) != 0) { + + if (cpc_tty->tty && (cpc_tty->tty->ldisc.receive_buf)) { + CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name); + cpc_tty->tty->ldisc.receive_buf(cpc_tty->tty, buf->data, + &flags, buf->size); + } + cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next; + kfree((unsigned char *)buf); + buf = cpc_tty->buf_rx.first; + flg_rx = 1; + } + if (++port == CPC_TTY_NPORTS) port = 0; + } + } +} + +/* + * PC300 TTY RX task routine + * + * This routine treats RX interrupt. + * o read all frames in card + * o verify the frame size + * o read the frame in rx buffer + */ +static void cpc_tty_rx_disc_frame(pc300ch_t *pc300chan) +{ + volatile pcsca_bd_t * ptdescr; + volatile unsigned char status; + pc300_t *card = (pc300_t *)pc300chan->card; + int ch = pc300chan->channel; + + /* dma buf read */ + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + RX_BD_ADDR(ch, pc300chan->rx_first_bd)); + while (pc300chan->rx_first_bd != pc300chan->rx_last_bd) { + status = cpc_readb(&ptdescr->status); + cpc_writeb(&ptdescr->status, 0); + cpc_writeb(&ptdescr->len, 0); + pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) & + (N_DMA_RX_BUF - 1); + if (status & DST_EOM) { + break; /* end of message */ + } + ptdescr = (pcsca_bd_t *)(card->hw.rambase + cpc_readl(&ptdescr->next)); + } +} + +void cpc_tty_receive(pc300dev_t *pc300dev) +{ + st_cpc_tty_area *cpc_tty; + pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; + pc300_t *card = (pc300_t *)pc300chan->card; + int ch = pc300chan->channel; + volatile pcsca_bd_t * ptdescr; + struct net_device_stats *stats = &pc300dev->hdlc->stats; + int rx_len, rx_aux; + volatile unsigned char status; + unsigned short first_bd = pc300chan->rx_first_bd; + st_cpc_rx_buf *new; + unsigned char dsr_rx; + + if (pc300dev->cpc_tty == NULL) { + return; + } + + dsr_rx = cpc_readb(card->hw.scabase + DSR_RX(ch)); + + cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty; + + while (1) { + rx_len = 0; + ptdescr = (pcsca_bd_t *)(card->hw.rambase + RX_BD_ADDR(ch, first_bd)); + while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { + rx_len += cpc_readw(&ptdescr->len); + first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1); + if (status & DST_EOM) { + break; + } + ptdescr=(pcsca_bd_t*)(card->hw.rambase+cpc_readl(&ptdescr->next)); + } + + if (!rx_len) { + if (dsr_rx & DSR_BOF) { + /* update EDA */ + cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), + RX_BD_ADDR(ch, pc300chan->rx_last_bd)); + } + return; + } + + if (rx_len > CPC_TTY_MAX_MTU) { + /* Free RX descriptors */ + CPC_TTY_DBG("%s: frame size is invalid.\n",cpc_tty->name); + stats->rx_errors++; + stats->rx_frame_errors++; + cpc_tty_rx_disc_frame(pc300chan); + continue; + } + + new = (st_cpc_rx_buf *) kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC); + if (new == 0) { + cpc_tty_rx_disc_frame(pc300chan); + continue; + } + + /* dma buf read */ + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + RX_BD_ADDR(ch, pc300chan->rx_first_bd)); + + rx_len = 0; /* counter frame size */ + + while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { + rx_aux = cpc_readw(&ptdescr->len); + if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT)) + || (rx_aux > BD_DEF_LEN)) { + CPC_TTY_DBG("%s: reception error\n", cpc_tty->name); + stats->rx_errors++; + if (status & DST_OVR) { + stats->rx_fifo_errors++; + } + if (status & DST_CRC) { + stats->rx_crc_errors++; + } + if ((status & (DST_RBIT | DST_SHRT | DST_ABT)) || + (rx_aux > BD_DEF_LEN)) { + stats->rx_frame_errors++; + } + /* discard remainig descriptors used by the bad frame */ + CPC_TTY_DBG("%s: reception error - discard descriptors", + cpc_tty->name); + cpc_tty_rx_disc_frame(pc300chan); + rx_len = 0; + kfree((unsigned char *)new); + break; /* read next frame - while(1) */ + } + + if (cpc_tty->state != CPC_TTY_ST_OPEN) { + /* Free RX descriptors */ + cpc_tty_rx_disc_frame(pc300chan); + stats->rx_dropped++; + rx_len = 0; + kfree((unsigned char *)new); + break; /* read next frame - while(1) */ + } + + /* read the segment of the frame */ + if (rx_aux != 0) { + memcpy_fromio((new->data + rx_len), + (void *)(card->hw.rambase + + cpc_readl(&ptdescr->ptbuf)), rx_aux); + rx_len += rx_aux; + } + cpc_writeb(&ptdescr->status,0); + cpc_writeb(&ptdescr->len, 0); + pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) & + (N_DMA_RX_BUF -1); + if (status & DST_EOM)break; + + ptdescr = (pcsca_bd_t *) (card->hw.rambase + + cpc_readl(&ptdescr->next)); + } + /* update pointer */ + pc300chan->rx_last_bd = (pc300chan->rx_first_bd - 1) & + (N_DMA_RX_BUF - 1) ; + if (!(dsr_rx & DSR_BOF)) { + /* update EDA */ + cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), + RX_BD_ADDR(ch, pc300chan->rx_last_bd)); + } + if (rx_len != 0) { + stats->rx_bytes += rx_len; + + if (pc300dev->trace_on) { + cpc_tty_trace(pc300dev, new->data,rx_len, 'R'); + } + new->size = rx_len; + new->next = 0; + if (cpc_tty->buf_rx.first == 0) { + cpc_tty->buf_rx.first = new; + cpc_tty->buf_rx.last = new; + } else { + cpc_tty->buf_rx.last->next = new; + cpc_tty->buf_rx.last = new; + } + schedule_task(&(cpc_tty->tty_rx_task_queue)); + stats->rx_packets++; + } + } +} + +/* + * PC300 TTY TX task routine + * + * This routine treats TX interrupt. + * o if need call line discipline wakeup + * o call wake_up_interruptible + */ +static void cpc_tty_tx_task(void *data) +{ + st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; + struct tty_struct *tty; + + CPC_TTY_DBG("%s: cpc_tty_tx_task init\n",cpc_tty->name); + + if ((tty = cpc_tty->tty) == 0) { + CPC_TTY_DBG("%s: the interface is not opened\n",cpc_tty->name); + return; + } + + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && tty->ldisc.write_wakeup){ + CPC_TTY_DBG("%s:call line disc. wakeup\n",cpc_tty->name); + tty->ldisc.write_wakeup (tty); + } + + wake_up_interruptible(&tty->write_wait); +} + +/* + * PC300 TTY send to card routine + * + * This routine send data to card. + * o clear descriptors + * o write data to DMA buffers + * o start the transmission + */ +static int cpc_tty_send_to_card(pc300dev_t *dev,void* buf, int len) +{ + pc300ch_t *chan = (pc300ch_t *)dev->chan; + pc300_t *card = (pc300_t *)chan->card; + int ch = chan->channel; + struct net_device_stats *stats = &dev->hdlc->stats; + unsigned long flags; + volatile pcsca_bd_t * ptdescr; + int i, nchar; + int tosend = len; + int nbuf = ((len - 1)/BD_DEF_LEN) + 1; + unsigned char *pdata=buf; + + CPC_TTY_DBG("%s:cpc_tty_send_to_cars len=%i", + (st_cpc_tty_area *)dev->cpc_tty->name,len); + + if (nbuf >= card->chan[ch].nfree_tx_bd) { + return 1; + } + + /* write buffer to DMA buffers */ + CPC_TTY_DBG("%s: call dma_buf_write\n", + (st_cpc_tty_area *)dev->cpc_tty->name); + for (i = 0 ; i < nbuf ; i++) { + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + TX_BD_ADDR(ch, card->chan[ch].tx_next_bd)); + nchar = (BD_DEF_LEN > tosend) ? tosend : BD_DEF_LEN; + if (cpc_readb(&ptdescr->status) & DST_OSB) { + memcpy_toio((void *)(card->hw.rambase + + cpc_readl(&ptdescr->ptbuf)), + &pdata[len - tosend], + nchar); + card->chan[ch].nfree_tx_bd--; + if ((i + 1) == nbuf) { + /* This must be the last BD to be used */ + cpc_writeb(&ptdescr->status, DST_EOM); + } else { + cpc_writeb(&ptdescr->status, 0); + } + cpc_writew(&ptdescr->len, nchar); + } else { + CPC_TTY_DBG("%s: error in dma_buf_write\n", + (st_cpc_tty_area *)dev->cpc_tty->name); + stats->tx_dropped++; + return 1; + } + tosend -= nchar; + card->chan[ch].tx_next_bd = + (card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1); + } + + if (dev->trace_on) { + cpc_tty_trace(dev, buf, len,'T'); + } + + /* start transmission */ + CPC_TTY_DBG("%s: start transmission\n", + (st_cpc_tty_area *)dev->cpc_tty->name); + + CPC_TTY_LOCK(card, flags); + cpc_writeb(card->hw.scabase + DTX_REG(EDAL, ch), + TX_BD_ADDR(ch, chan->tx_next_bd)); + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA); + cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE); + + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + CPC_TTY_UNLOCK(card, flags); + return 0; +} + +/* + * PC300 TTY trace routine + * + * This routine send trace of connection to application. + * o clear descriptors + * o write data to DMA buffers + * o start the transmission + */ + +static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx) +{ + struct sk_buff *skb; + + if ((skb = dev_alloc_skb(10 + len)) == NULL) { + /* out of memory */ + CPC_TTY_DBG("%s: tty_trace - out of memory\n", + ((struct net_device *)(dev->hdlc))->name); + return; + } + + skb_put (skb, 10 + len); + skb->dev = (struct net_device *) dev->hdlc; + skb->protocol = htons(ETH_P_CUST); + skb->mac.raw = skb->data; + skb->pkt_type = PACKET_HOST; + skb->len = 10 + len; + + memcpy(skb->data,((struct net_device *)(dev->hdlc))->name,5); + skb->data[5] = '['; + skb->data[6] = rxtx; + skb->data[7] = ']'; + skb->data[8] = ':'; + skb->data[9] = ' '; + memcpy(&skb->data[10], buf, len); + netif_rx(skb); +} + +/* + * PC300 TTY unregister service routine + * + * This routine unregister one interface. + */ +void cpc_tty_unregister_service(pc300dev_t *pc300dev) +{ + st_cpc_tty_area *cpc_tty; + ulong flags; + int res; + + if ((cpc_tty= (st_cpc_tty_area *) pc300dev->cpc_tty) == 0) { + CPC_TTY_DBG("%s: interface is not TTY\n", + ((struct net_device *)(pc300dev->hdlc))->name); + return; + } + CPC_TTY_DBG("%s: cpc_tty_unregister_service", cpc_tty->name); + + if (cpc_tty->pc300dev != pc300dev) { + CPC_TTY_DBG("%s: invalid tty ptr=%s\n", + ((struct net_device *)(pc300dev->hdlc))->name, cpc_tty->name); + return; + } + + if (--cpc_tty_cnt == 0) { + if (cpc_tty_refcount) { + CPC_TTY_DBG("%s: unregister is not possible, refcount=%d", + cpc_tty->name, cpc_tty_refcount); + cpc_tty_cnt++; + cpc_tty_unreg_flag = 1; + return; + } else { + CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name); + if ((res=tty_unregister_driver(&serial_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + if ((res=tty_unregister_driver(&callout_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + } + } + CPC_TTY_LOCK(pc300dev->chan->card,flags); + cpc_tty->tty = NULL; + CPC_TTY_UNLOCK(pc300dev->chan->card, flags); + cpc_tty->tty_minor = 0; + cpc_tty->state = CPC_TTY_ST_IDLE; +} + +/* + * PC300 TTY trigger poll routine + * This routine is called by pc300driver to treats Tx interrupt. + */ +void cpc_tty_trigger_poll(pc300dev_t *pc300dev) +{ + st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty; + if (!cpc_tty) { + return; + } + schedule_task(&(cpc_tty->tty_tx_task_queue)); +} + +/* + * PC300 TTY reset var routine + * This routine is called by pc300driver to init the TTY area. + */ + +void cpc_tty_reset_var(void) +{ + int i ; + + CPC_TTY_DBG("hdlcX-tty: reset variables\n"); + /* reset the tty_driver structure - serial_drv */ + memset(&serial_drv, 0, sizeof(struct tty_driver)); + memset(&callout_drv, 0, sizeof(struct tty_driver)); + for (i=0; i < CPC_TTY_NPORTS; i++){ + memset(&cpc_tty_area[i],0, sizeof(st_cpc_tty_area)); + } +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/sdla_chdlc.c linux.21rc5-ac2/drivers/net/wan/sdla_chdlc.c --- linux.21rc5/drivers/net/wan/sdla_chdlc.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/sdla_chdlc.c 2003-05-28 15:35:56.000000000 +0100 @@ -591,8 +591,7 @@ if (chdlc_set_intr_mode(card, APP_INT_ON_TIMER)){ - printk (KERN_INFO "%s: - Failed to set interrupt triggers!\n", + printk (KERN_INFO "%s: Failed to set interrupt triggers!\n", card->devname); return -EIO; } @@ -1089,13 +1088,11 @@ set_bit(0,&chdlc_priv_area->config_chdlc); chdlc_priv_area->config_chdlc_timeout=jiffies; - del_timer(&chdlc_priv_area->poll_delay_timer); /* Start the CHDLC configuration after 1sec delay. * This will give the interface initilization time * to finish its configuration */ - chdlc_priv_area->poll_delay_timer.expires=jiffies+HZ; - add_timer(&chdlc_priv_area->poll_delay_timer); + mod_timer(&chdlc_priv_area->poll_delay_timer, jiffies + HZ); return err; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/sdla_fr.c linux.21rc5-ac2/drivers/net/wan/sdla_fr.c --- linux.21rc5/drivers/net/wan/sdla_fr.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/sdla_fr.c 2003-05-03 18:44:14.000000000 +0100 @@ -4541,9 +4541,7 @@ { fr_channel_t* chan = dev->priv; - del_timer(&chan->fr_arp_timer); - chan->fr_arp_timer.expires = jiffies + (chan->inarp_interval * HZ); - add_timer(&chan->fr_arp_timer); + mod_timer(&chan->fr_arp_timer, jiffies + (chan->inarp_interval) * HZ); return; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wan/sdla_x25.c linux.21rc5-ac2/drivers/net/wan/sdla_x25.c --- linux.21rc5/drivers/net/wan/sdla_x25.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wan/sdla_x25.c 2003-05-03 18:43:59.000000000 +0100 @@ -1267,9 +1267,7 @@ connect(card); S508_S514_unlock(card, &smp_flags); - del_timer(&card->u.x.x25_timer); - card->u.x.x25_timer.expires=jiffies+HZ; - add_timer(&card->u.x.x25_timer); + mod_timer(&card->u.x.x25_timer, jiffies + HZ); } } /* Device is not up until the we are in connected state */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wireless/airport.c linux.21rc5-ac2/drivers/net/wireless/airport.c --- linux.21rc5/drivers/net/wireless/airport.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wireless/airport.c 2003-04-23 14:29:39.000000000 +0100 @@ -1,4 +1,4 @@ -/* airport.c 0.13b +/* airport.c 0.13d * * A driver for "Hermes" chipset based Apple Airport wireless * card. @@ -95,7 +95,7 @@ netif_device_detach(dev); - priv->hw_unavailable = 1; + priv->hw_unavailable++; orinoco_unlock(priv, &flags); @@ -121,14 +121,15 @@ netif_device_attach(dev); - if (priv->open) { + priv->hw_unavailable--; + + if (priv->open && (! priv->hw_unavailable)) { err = __orinoco_up(dev); if (err) printk(KERN_ERR "%s: Error %d restarting card on PBOOK_WAKE\n", dev->name, err); } - priv->hw_unavailable = 0; spin_unlock_irqrestore(&priv->lock, flags); @@ -140,8 +141,21 @@ static int airport_hard_reset(struct orinoco_private *priv) { + /* It would be nice to power cycle the Airport for a real hard + * reset, but for some reason although it appears to + * re-initialize properly, it falls in a screaming heap + * shortly afterwards. */ +#if 0 + struct net_device *dev = priv->ndev; struct airport *card = priv->card; + /* Vitally important. If we don't do this it seems we get an + * interrupt somewhere during the power cycle, since + * hw_unavailable is already set it doesn't get ACKed, we get + * into an interrupt loop and the the PMU decides to turn us + * off. */ + disable_irq(dev->irq); + pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, card->node, 0, 0); current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ); @@ -149,6 +163,10 @@ current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ); + enable_irq(dev->irq); + schedule_timeout(HZ); +#endif + return 0; } @@ -209,7 +227,7 @@ /* Reset it before we get the interrupt */ hermes_init(hw); - if (request_irq(dev->irq, orinoco_interrupt, 0, "Airport", (void *)priv)) { + if (request_irq(dev->irq, orinoco_interrupt, 0, "Airport", dev)) { printk(KERN_ERR "airport: Couldn't get IRQ %d\n", dev->irq); goto failed; } @@ -251,7 +269,7 @@ card->ndev_registered = 0; if (card->irq_requested) - free_irq(dev->irq, priv); + free_irq(dev->irq, dev); card->irq_requested = 0; if (card->vaddr) @@ -269,11 +287,10 @@ kfree(dev); } /* airport_detach */ -static char version[] __initdata = "airport.c 0.13b (Benjamin Herrenschmidt )"; +static char version[] __initdata = "airport.c 0.13d (Benjamin Herrenschmidt )"; MODULE_AUTHOR("Benjamin Herrenschmidt "); MODULE_DESCRIPTION("Driver for the Apple Airport wireless card."); MODULE_LICENSE("Dual MPL/GPL"); -EXPORT_NO_SYMBOLS; static int __init init_airport(void) @@ -282,15 +299,11 @@ printk(KERN_DEBUG "%s\n", version); - MOD_INC_USE_COUNT; - /* Lookup card in device tree */ airport_node = find_devices("radio"); if (airport_node && !strcmp(airport_node->parent->name, "mac-io")) airport_dev = airport_attach(airport_node); - MOD_DEC_USE_COUNT; - return airport_dev ? 0 : -ENODEV; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wireless/hermes.c linux.21rc5-ac2/drivers/net/wireless/hermes.c --- linux.21rc5/drivers/net/wireless/hermes.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wireless/hermes.c 2003-04-23 14:29:39.000000000 +0100 @@ -544,4 +544,9 @@ return 0; } +static void __exit exit_hermes(void) +{ +} + module_init(init_hermes); +module_exit(exit_hermes); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wireless/hermes.h linux.21rc5-ac2/drivers/net/wireless/hermes.h --- linux.21rc5/drivers/net/wireless/hermes.h 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wireless/hermes.h 2003-05-28 21:48:47.000000000 +0100 @@ -250,7 +250,6 @@ u16 scanreason; /* ??? */ struct hermes_scan_apinfo aps[35]; /* Scan result */ } __attribute__ ((packed)); - #define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000) #define HERMES_LINKSTATUS_CONNECTED (0x0001) #define HERMES_LINKSTATUS_DISCONNECTED (0x0002) @@ -368,7 +367,7 @@ if (hw->io_space) { insw(hw->iobase + off, buf, count); } else { - int i; + unsigned i; u16 *p; /* This needs to *not* byteswap (like insw()) but @@ -388,7 +387,7 @@ if (hw->io_space) { outsw(hw->iobase + off, buf, count); } else { - int i; + unsigned i; const u16 *p; /* This needs to *not* byteswap (like outsw()) but @@ -401,6 +400,21 @@ } } +static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count) +{ + unsigned i; + + off = off << hw->reg_spacing;; + + if (hw->io_space) { + for (i = 0; i < count; i++) + outw(0, hw->iobase + off); + } else { + for (i = 0; i < count; i++) + writew(0, hw->iobase + off); + } +} + #define HERMES_READ_RECORD(hw, bap, rid, buf) \ (hermes_read_ltv((hw),(bap),(rid), sizeof(*buf), NULL, (buf))) #define HERMES_WRITE_RECORD(hw, bap, rid, buf) \ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wireless/orinoco.c linux.21rc5-ac2/drivers/net/wireless/orinoco.c --- linux.21rc5/drivers/net/wireless/orinoco.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wireless/orinoco.c 2003-04-23 14:29:39.000000000 +0100 @@ -1,4 +1,4 @@ -/* orinoco.c 0.13b - (formerly known as dldwd_cs.c and orinoco_cs.c) +/* orinoco.c 0.13d - (formerly known as dldwd_cs.c and orinoco_cs.c) * * A driver for Hermes or Prism 2 chipset based PCMCIA wireless * adaptors, with Lucent/Agere, Intersil or Symbol firmware. @@ -345,11 +345,45 @@ * we are connected (avoids cofusing the firmware), and only * give LINKSTATUS printk()s if the status has changed. * + * v0.13b -> v0.13c - 11 Mar 2003 - David Gibson + * o Cleanup: use dev instead of priv in various places. + * o Bug fix: Don't ReleaseConfiguration on RESET_PHYSICAL event + * if we're in the middle of a (driver initiated) hard reset. + * o Bug fix: ETH_ZLEN is supposed to include the header + * (Dionysus Blazakis & Manish Karir) + * o Convert to using workqueues instead of taskqueues (and + * backwards compatibility macros for pre 2.5.41 kernels). + * o Drop redundant (I think...) MOD_{INC,DEC}_USE_COUNT in + * airport.c + * o New orinoco_tmd.c init module from Joerg Dorchain for + * TMD7160 based PCI to PCMCIA bridges (similar to + * orinoco_plx.c). + * + * v0.13c -> v0.13d - 22 Apr 2003 - David Gibson + * o Make hw_unavailable a counter, rather than just a flag, this + * is necessary to avoid some races (such as a card being + * removed in the middle of orinoco_reset(). + * o Restore Release/RequestConfiguration in the PCMCIA event handler + * when dealing with a driver initiated hard reset. This is + * necessary to prevent hangs due to a spurious interrupt while + * the reset is in progress. + * o Clear the 802.11 header when transmitting, even though we + * don't use it. This fixes a long standing bug on some + * firmwares, which seem to get confused if that isn't done. + * o Be less eager to de-encapsulate SNAP frames, only do so if + * the OUI is 00:00:00 or 00:00:f8, leave others alone. The old + * behaviour broke CDP (Cisco Discovery Protocol). + * o Use dev instead of priv for free_irq() as well as + * request_irq() (oops). + * o Attempt to reset rather than giving up if we get too many + * IRQs. + * o Changed semantics of __orinoco_down() so it can be called + * safely with hw_unavailable set. It also now clears the + * linkstatus (since we're going to have to reassociate). + * * TODO - * o New wireless extensions API (patch from Moustafa * Youssef, updated by Jim Carter). - * o Fix PCMCIA hard resets with pcmcia-cs. * o Handle de-encapsulation within network layer, provide 802.11 * headers (patch from Thomas 'Dent' Mirlacher) * o Fix possible races in SPY handling. @@ -373,7 +407,7 @@ * flag after taking the lock, and if it is set, give up on whatever * they are doing and drop the lock again. The orinoco_lock() * function handles this (it unlocks and returns -EBUSY if - * hw_unavailable is true). */ + * hw_unavailable is non-zero). */ #include @@ -522,7 +556,7 @@ /* Hardware control routines */ -static int __orinoco_program_rids(struct orinoco_private *priv); +static int __orinoco_program_rids(struct net_device *dev); static int __orinoco_hw_set_bitrate(struct orinoco_private *priv); static int __orinoco_hw_setup_wep(struct orinoco_private *priv); @@ -535,14 +569,14 @@ static void __orinoco_set_multicast_list(struct net_device *dev); /* Interrupt handling routines */ -static void __orinoco_ev_tick(struct orinoco_private *priv, hermes_t *hw); -static void __orinoco_ev_wterr(struct orinoco_private *priv, hermes_t *hw); -static void __orinoco_ev_infdrop(struct orinoco_private *priv, hermes_t *hw); -static void __orinoco_ev_info(struct orinoco_private *priv, hermes_t *hw); -static void __orinoco_ev_rx(struct orinoco_private *priv, hermes_t *hw); -static void __orinoco_ev_txexc(struct orinoco_private *priv, hermes_t *hw); -static void __orinoco_ev_tx(struct orinoco_private *priv, hermes_t *hw); -static void __orinoco_ev_alloc(struct orinoco_private *priv, hermes_t *hw); +static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw); +static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw); +static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw); +static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw); +static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw); +static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw); +static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw); +static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw); /* ioctl() routines */ static int orinoco_ioctl_getiwrange(struct net_device *dev, struct iw_point *rrq); @@ -577,7 +611,7 @@ struct hermes *hw = &priv->hw; int err; - err = __orinoco_program_rids(priv); + err = __orinoco_program_rids(dev); if (err) { printk(KERN_ERR "%s: Error %d configuring card\n", dev->name, err); @@ -606,14 +640,25 @@ netif_stop_queue(dev); - err = hermes_disable_port(hw, 0); - if (err) { - printk(KERN_ERR "%s: Error %d disabling MAC port\n", - dev->name, err); - return err; + if (! priv->hw_unavailable) { + if (! priv->broken_disableport) { + err = hermes_disable_port(hw, 0); + if (err) { + /* Some firmwares (e.g. Intersil 1.3.x) seem + * to have problems disabling the port, oh + * well, too bad. */ + printk(KERN_WARNING "%s: Error %d disabling MAC port\n", + dev->name, err); + priv->broken_disableport = 1; + } + } + hermes_set_irqmask(hw, 0); + hermes_write_regn(hw, EVACK, 0xffff); } - hermes_set_irqmask(hw, 0); - hermes_write_regn(hw, EVACK, 0xffff); + + /* firmware will have to reassociate */ + priv->last_linkstatus = 0xffff; + priv->connected = 0; return 0; } @@ -656,38 +701,38 @@ if (err) return err; - priv->open = 1; - err = __orinoco_up(dev); + if (! err) + priv->open = 1; + orinoco_unlock(priv, &flags); return err; } -static int orinoco_stop(struct net_device *dev) +int orinoco_stop(struct net_device *dev) { struct orinoco_private *priv = dev->priv; int err = 0; /* We mustn't use orinoco_lock() here, because we need to be - able to close the interface, even if hw_unavailable is set + able to close the interface even if hw_unavailable is set (e.g. as we're released after a PC Card removal) */ spin_lock_irq(&priv->lock); priv->open = 0; - if (! priv->hw_unavailable) - err = __orinoco_down(dev); + err = __orinoco_down(dev); spin_unlock_irq(&priv->lock); return err; } -static int __orinoco_program_rids(struct orinoco_private *priv) +static int __orinoco_program_rids(struct net_device *dev) { - struct net_device *dev = priv->ndev; + struct orinoco_private *priv = dev->priv; hermes_t *hw = &priv->hw; int err; struct hermes_idstring idbuf; @@ -873,51 +918,84 @@ } /* xyzzy */ -static int orinoco_reconfigure(struct orinoco_private *priv) +static int orinoco_reconfigure(struct net_device *dev) { + struct orinoco_private *priv = dev->priv; struct hermes *hw = &priv->hw; unsigned long flags; int err = 0; - orinoco_lock(priv, &flags); + if (priv->broken_disableport) { + schedule_work(&priv->reset_work); + return 0; + } + err = orinoco_lock(priv, &flags); + if (err) + return err; + + err = hermes_disable_port(hw, 0); if (err) { - printk(KERN_ERR "%s: Unable to disable port in orinco_reconfigure()\n", - priv->ndev->name); + printk(KERN_WARNING "%s: Unable to disable port while reconfiguring card\n", + dev->name); + priv->broken_disableport = 1; goto out; } - err = __orinoco_program_rids(priv); - if (err) + err = __orinoco_program_rids(dev); + if (err) { + printk(KERN_WARNING "%s: Unable to reconfigure card\n", + dev->name); goto out; + } err = hermes_enable_port(hw, 0); if (err) { - printk(KERN_ERR "%s: Unable to enable port in orinco_reconfigure()\n", - priv->ndev->name); + printk(KERN_WARNING "%s: Unable to enable port while reconfiguring card\n", + dev->name); goto out; } out: + if (err) { + printk(KERN_WARNING "%s: Resetting instead...\n", dev->name); + schedule_work(&priv->reset_work); + err = 0; + } + orinoco_unlock(priv, &flags); return err; } /* This must be called from user context, without locks held - use - * schedule_task() */ + * schedule_work() */ static void orinoco_reset(struct net_device *dev) { struct orinoco_private *priv = dev->priv; + struct hermes *hw = &priv->hw; int err; unsigned long flags; err = orinoco_lock(priv, &flags); if (err) + /* When the hardware becomes available again, whatever + * detects that is responsible for re-initializing + * it. So no need for anything further*/ return; - priv->hw_unavailable = 1; + netif_stop_queue(dev); + + /* Shut off interrupts. Depending on what state the hardware + * is in, this might not work, but we'll try anyway */ + hermes_set_irqmask(hw, 0); + hermes_write_regn(hw, EVACK, 0xffff); + + priv->hw_unavailable++; + priv->last_linkstatus = 0xffff; /* firmware will have to reassociate */ + priv->connected = 0; + orinoco_unlock(priv, &flags); if (priv->hard_reset) @@ -936,18 +1014,22 @@ return; } - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irq(&priv->lock); /* This has to be called from user context */ - priv->hw_unavailable = 0; + priv->hw_unavailable--; - err = __orinoco_up(dev); - if (err) { - printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n", - dev->name, err); - } else - dev->trans_start = jiffies; + /* priv->open or priv->hw_unavailable might have changed while + * we dropped the lock */ + if (priv->open && (! priv->hw_unavailable)) { + err = __orinoco_up(dev); + if (err) { + printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n", + dev->name, err); + } else + dev->trans_start = jiffies; + } - orinoco_unlock(priv, &flags); + spin_unlock_irq(&priv->lock); return; } @@ -979,10 +1061,18 @@ } } +/* Does the frame have a SNAP header indicating it should be + * de-encapsulated to Ethernet-II? */ static inline int -is_snap(struct header_struct *hdr) +is_ethersnap(struct header_struct *hdr) { - return (hdr->dsap == 0xAA) && (hdr->ssap == 0xAA) && (hdr->ctrl == 0x3); + /* We de-encapsulate all packets which, a) have SNAP headers + * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header + * and where b) the OUI of the SNAP header is 00:00:00 or + * 00:00:f8 - we need both because different APs appear to use + * different OUIs for some reason */ + return (memcmp(&hdr->dsap, &encaps_hdr, 5) == 0) + && ( (hdr->oui[2] == 0x00) || (hdr->oui[2] == 0xf8) ); } static void @@ -1140,7 +1230,8 @@ return 0; } -static int orinoco_hw_get_bssid(struct orinoco_private *priv, char buf[ETH_ALEN]) +static int orinoco_hw_get_bssid(struct orinoco_private *priv, + char buf[ETH_ALEN]) { hermes_t *hw = &priv->hw; int err = 0; @@ -1159,7 +1250,7 @@ } static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active, - char buf[IW_ESSID_MAX_SIZE+1]) + char buf[IW_ESSID_MAX_SIZE+1]) { hermes_t *hw = &priv->hw; int err = 0; @@ -1236,9 +1327,8 @@ } if ( (channel < 1) || (channel > NUM_CHANNELS) ) { - struct net_device *dev = priv->ndev; - - printk(KERN_WARNING "%s: Channel out of range (%d)!\n", dev->name, channel); + printk(KERN_WARNING "%s: Channel out of range (%d)!\n", + priv->ndev->name, channel); err = -EBUSY; goto out; @@ -1253,8 +1343,8 @@ return err ? err : freq; } -static int orinoco_hw_get_bitratelist(struct orinoco_private *priv, int *numrates, - s32 *rates, int max) +static int orinoco_hw_get_bitratelist(struct orinoco_private *priv, + int *numrates, s32 *rates, int max) { hermes_t *hw = &priv->hw; struct hermes_idstring list; @@ -1354,9 +1444,9 @@ */ void orinoco_interrupt(int irq, void *dev_id, struct pt_regs *regs) { - struct orinoco_private *priv = (struct orinoco_private *) dev_id; + struct net_device *dev = (struct net_device *)dev_id; + struct orinoco_private *priv = dev->priv; hermes_t *hw = &priv->hw; - struct net_device *dev = priv->ndev; int count = MAX_IRQLOOPS_PER_IRQ; u16 evstat, events; /* These are used to detect a runaway interrupt situation */ @@ -1380,11 +1470,11 @@ while (events && count--) { if (++loops_this_jiffy > MAX_IRQLOOPS_PER_JIFFY) { - printk(KERN_CRIT "%s: IRQ handler is looping too \ -much! Shutting down.\n", - dev->name); - /* Perform an emergency shutdown */ + printk(KERN_WARNING "%s: IRQ handler is looping too " + "much! Resetting.\n", dev->name); + /* Disable interrupts for now */ hermes_set_irqmask(hw, 0); + schedule_work(&priv->reset_work); break; } @@ -1395,21 +1485,21 @@ } if (events & HERMES_EV_TICK) - __orinoco_ev_tick(priv, hw); + __orinoco_ev_tick(dev, hw); if (events & HERMES_EV_WTERR) - __orinoco_ev_wterr(priv, hw); + __orinoco_ev_wterr(dev, hw); if (events & HERMES_EV_INFDROP) - __orinoco_ev_infdrop(priv, hw); + __orinoco_ev_infdrop(dev, hw); if (events & HERMES_EV_INFO) - __orinoco_ev_info(priv, hw); + __orinoco_ev_info(dev, hw); if (events & HERMES_EV_RX) - __orinoco_ev_rx(priv, hw); + __orinoco_ev_rx(dev, hw); if (events & HERMES_EV_TXEXC) - __orinoco_ev_txexc(priv, hw); + __orinoco_ev_txexc(dev, hw); if (events & HERMES_EV_TX) - __orinoco_ev_tx(priv, hw); + __orinoco_ev_tx(dev, hw); if (events & HERMES_EV_ALLOC) - __orinoco_ev_alloc(priv, hw); + __orinoco_ev_alloc(dev, hw); hermes_write_regn(hw, EVACK, events); @@ -1420,22 +1510,22 @@ orinoco_unlock(priv, &flags); } -static void __orinoco_ev_tick(struct orinoco_private *priv, hermes_t *hw) +static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw) { - printk(KERN_DEBUG "%s: TICK\n", priv->ndev->name); + printk(KERN_DEBUG "%s: TICK\n", dev->name); } -static void __orinoco_ev_wterr(struct orinoco_private *priv, hermes_t *hw) +static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw) { /* This seems to happen a fair bit under load, but ignoring it seems to work fine...*/ printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n", - priv->ndev->name); + dev->name); } -static void __orinoco_ev_infdrop(struct orinoco_private *priv, hermes_t *hw) +static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw) { - printk(KERN_WARNING "%s: Information frame lost.\n", priv->ndev->name); + printk(KERN_WARNING "%s: Information frame lost.\n", dev->name); } static void print_linkstatus(struct net_device *dev, u16 status) @@ -1472,9 +1562,9 @@ dev->name, s, status); } -static void __orinoco_ev_info(struct orinoco_private *priv, hermes_t *hw) +static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw) { - struct net_device *dev = priv->ndev; + struct orinoco_private *priv = dev->priv; u16 infofid; struct { u16 len; @@ -1573,9 +1663,9 @@ } } -static void __orinoco_ev_rx(struct orinoco_private *priv, hermes_t *hw) +static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw) { - struct net_device *dev = priv->ndev; + struct orinoco_private *priv = dev->priv; struct net_device_stats *stats = &priv->stats; struct iw_statistics *wstats = &priv->wstats; struct sk_buff *skb = NULL; @@ -1664,7 +1754,7 @@ * So, check ourselves */ if(((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) || ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) || - is_snap(&hdr)) { + is_ethersnap(&hdr)) { /* These indicate a SNAP within 802.2 LLC within 802.11 frame which we'll need to de-encapsulate to the original EthernetII frame. */ @@ -1726,9 +1816,9 @@ return; } -static void __orinoco_ev_txexc(struct orinoco_private *priv, hermes_t *hw) +static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw) { - struct net_device *dev = priv->ndev; + struct orinoco_private *priv = dev->priv; struct net_device_stats *stats = &priv->stats; u16 fid = hermes_read_regn(hw, TXCOMPLFID); struct hermes_tx_descriptor desc; @@ -1752,8 +1842,9 @@ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); } -static void __orinoco_ev_tx(struct orinoco_private *priv, hermes_t *hw) +static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw) { + struct orinoco_private *priv = dev->priv; struct net_device_stats *stats = &priv->stats; stats->tx_packets++; @@ -1761,9 +1852,10 @@ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); } -static void __orinoco_ev_alloc(struct orinoco_private *priv, hermes_t *hw) +static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw) { - struct net_device *dev = priv->ndev; + struct orinoco_private *priv = dev->priv; + u16 fid = hermes_read_regn(hw, ALLOCFID); if (fid != priv->txfid) { @@ -1945,7 +2037,7 @@ TRACE_ENTER(dev->name); - /* No need to lock, the resetting flag is already set in + /* No need to lock, the hw_unavailable flag is already set in * alloc_orinocodev() */ priv->nicbuf_size = IEEE802_11_FRAME_LEN + ETH_HLEN; @@ -2081,8 +2173,6 @@ priv->wep_on = 0; priv->tx_key = 0; - priv->hw_unavailable = 0; - err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid); if (err == -EIO) { /* Try workaround for old Symbol firmware bug */ @@ -2102,6 +2192,12 @@ goto out; } + /* Make the hardware available, as long as it hasn't been + * removed elsewhere (e.g. by PCMCIA hot unplug) */ + spin_lock_irq(&priv->lock); + priv->hw_unavailable--; + spin_unlock_irq(&priv->lock); + printk(KERN_DEBUG "%s: ready\n", dev->name); out: @@ -2267,7 +2363,7 @@ /* Length of the packet body */ /* FIXME: what if the skb is smaller than this? */ - len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN); + len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN); eh = (struct ethhdr *)skb->data; @@ -2281,6 +2377,12 @@ goto fail; } + /* Clear the 802.11 header and data length fields - some + * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused + * if this isn't done. */ + hermes_clear_words(hw, HERMES_DATA0, + HERMES_802_3_OFFSET - HERMES_802_11_OFFSET); + /* Encapsulate Ethernet-II frames */ if (ntohs(eh->h_proto) > 1500) { /* Ethernet-II frame */ struct header_struct hdr; @@ -2362,7 +2464,7 @@ stats->tx_errors++; - schedule_task(&priv->timeout_task); + schedule_work(&priv->reset_work); } static int @@ -2532,7 +2634,7 @@ } err = orinoco_hw_get_bitratelist(priv, &numrates, - range.bitrate, IW_MAX_BITRATES); + range.bitrate, IW_MAX_BITRATES); if (err) return err; range.num_bitrates = numrates; @@ -3128,7 +3230,7 @@ rrq->value = 5500000; else rrq->value = val * 1000000; - break; + break; case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */ case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */ for (i = 0; i < BITRATE_TABLE_SIZE; i++) @@ -3754,7 +3856,7 @@ printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); - schedule_task(&priv->timeout_task); + schedule_work(&priv->reset_work); break; case SIOCIWFIRSTPRIV + 0x2: /* set_port3 */ @@ -3839,7 +3941,7 @@ } if (! err && changed && netif_running(dev)) { - err = orinoco_reconfigure(priv); + err = orinoco_reconfigure(dev); } TRACE_EXIT(dev->name); @@ -3924,7 +4026,7 @@ DEBUG_REC(PRIID,WORDS), DEBUG_REC(PRISUPRANGE,WORDS), DEBUG_REC(CFIACTRANGES,WORDS), - DEBUG_REC(NICSERNUM,WORDS), + DEBUG_REC(NICSERNUM,XSTRING), DEBUG_REC(NICID,WORDS), DEBUG_REC(MFISUPRANGE,WORDS), DEBUG_REC(CFISUPRANGE,WORDS), @@ -4062,7 +4164,7 @@ priv->hw_unavailable = 1; /* orinoco_init() must clear this * before anything else touches the * hardware */ - INIT_TQUEUE(&priv->timeout_task, (void (*)(void *))orinoco_reset, dev); + INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev); priv->last_linkstatus = 0xffff; priv->connected = 0; @@ -4079,13 +4181,14 @@ EXPORT_SYMBOL(__orinoco_up); EXPORT_SYMBOL(__orinoco_down); +EXPORT_SYMBOL(orinoco_stop); EXPORT_SYMBOL(orinoco_reinit_firmware); EXPORT_SYMBOL(orinoco_interrupt); /* Can't be declared "const" or the whole __initdata section will * become const */ -static char version[] __initdata = "orinoco.c 0.13b (David Gibson and others)"; +static char version[] __initdata = "orinoco.c 0.13d (David Gibson and others)"; static int __init init_orinoco(void) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wireless/orinoco_cs.c linux.21rc5-ac2/drivers/net/wireless/orinoco_cs.c --- linux.21rc5/drivers/net/wireless/orinoco_cs.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wireless/orinoco_cs.c 2003-04-23 14:29:39.000000000 +0100 @@ -1,4 +1,4 @@ -/* orinoco_cs.c 0.13b - (formerly known as dldwd_cs.c) +/* orinoco_cs.c 0.13d - (formerly known as dldwd_cs.c) * * A driver for "Hermes" chipset based PCMCIA wireless adaptors, such * as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/ @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -38,7 +37,6 @@ #include #include #include -#include #include "orinoco.h" @@ -269,19 +267,12 @@ return; } - /* - If the device is currently configured and active, we won't - actually delete it yet. Instead, it is marked so that when - the release() function is called, that will trigger a proper - detach(). - */ if (link->state & DEV_CONFIG) { -#ifdef PCMCIA_DEBUG - printk(KERN_DEBUG "orinoco_cs: detach postponed, '%s' " - "still locked\n", link->dev->dev_name); -#endif - link->state |= DEV_STALE_LINK; - return; + orinoco_cs_release((u_long)link); + if (link->state & DEV_CONFIG) { + link->state |= DEV_STALE_LINK; + return; + } } /* Break the link with Card Services */ @@ -472,7 +463,7 @@ link->irq.IRQInfo2 |= 1 << irq_list[i]; link->irq.Handler = orinoco_interrupt; - link->irq.Instance = priv; + link->irq.Instance = dev; CS_CHECK(RequestIRQ, link->handle, &link->irq); } @@ -549,18 +540,13 @@ dev_link_t *link = (dev_link_t *) arg; struct net_device *dev = link->priv; struct orinoco_private *priv = dev->priv; + unsigned long flags; - /* - If the device is currently in use, we won't release until it - is actually closed, because until then, we can't be sure that - no one will try to access the device or its data structures. - */ - if (priv->open) { - DEBUG(0, "orinoco_cs: release postponed, '%s' still open\n", - link->dev->dev_name); - link->state |= DEV_STALE_CONFIG; - return; - } + /* We're committed to taking the device away now, so mark the + * hardware as unavailable */ + spin_lock_irqsave(&priv->lock, flags); + priv->hw_unavailable++; + spin_unlock_irqrestore(&priv->lock, flags); /* Don't bother checking to see if these succeed or not */ CardServices(ReleaseConfiguration, link->handle); @@ -593,14 +579,9 @@ orinoco_lock(priv, &flags); netif_device_detach(dev); - priv->hw_unavailable = 1; + priv->hw_unavailable++; orinoco_unlock(priv, &flags); - -/* if (link->open) */ -/* orinoco_cs_stop(dev); */ - - mod_timer(&link->release, jiffies + HZ / 20); } break; @@ -619,13 +600,8 @@ a better way, short of rewriting the PCMCIA layer to not suck :-( */ if (! test_bit(0, &card->hard_reset_in_progress)) { - err = orinoco_lock(priv, &flags); - if (err) { - printk(KERN_ERR "%s: hw_unavailable on SUSPEND/RESET_PHYSICAL\n", - dev->name); - break; - } - + spin_lock_irqsave(&priv->lock, flags); + err = __orinoco_down(dev); if (err) printk(KERN_WARNING "%s: %s: Error %d downing interface\n", @@ -634,9 +610,9 @@ err); netif_device_detach(dev); - priv->hw_unavailable = 1; - - orinoco_unlock(priv, &flags); + priv->hw_unavailable++; + + spin_unlock_irqrestore(&priv->lock, flags); } CardServices(ReleaseConfiguration, link->handle); @@ -653,10 +629,6 @@ CardServices(RequestConfiguration, link->handle, &link->conf); - /* If we're only getting these events because - of the ResetCard in the hard reset, we - don't need to do anything - orinoco_reset() - will handle reinitialization. */ if (! test_bit(0, &card->hard_reset_in_progress)) { err = orinoco_reinit_firmware(dev); if (err) { @@ -668,9 +640,9 @@ spin_lock_irqsave(&priv->lock, flags); netif_device_attach(dev); - priv->hw_unavailable = 0; + priv->hw_unavailable--; - if (priv->open) { + if (priv->open && ! priv->hw_unavailable) { err = __orinoco_up(dev); if (err) printk(KERN_ERR "%s: Error %d restarting card\n", @@ -678,7 +650,7 @@ } - orinoco_unlock(priv, &flags); + spin_unlock_irqrestore(&priv->lock, flags); } } break; @@ -693,7 +665,7 @@ /* Can't be declared "const" or the whole __initdata section will * become const */ -static char version[] __initdata = "orinoco_cs.c 0.13b (David Gibson and others)"; +static char version[] __initdata = "orinoco_cs.c 0.13d (David Gibson and others)"; static int __init init_orinoco_cs(void) @@ -722,7 +694,6 @@ if (dev_list) DEBUG(0, "orinoco_cs: Removing leftover devices.\n"); while (dev_list != NULL) { - del_timer(&dev_list->release); if (dev_list->state & DEV_CONFIG) orinoco_cs_release((u_long) dev_list); orinoco_cs_detach(dev_list); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wireless/orinoco.h linux.21rc5-ac2/drivers/net/wireless/orinoco.h --- linux.21rc5/drivers/net/wireless/orinoco.h 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wireless/orinoco.h 2003-05-29 01:44:06.000000000 +0100 @@ -11,9 +11,20 @@ #include #include #include -#include +#include #include "hermes.h" +/* Workqueue / task queue backwards compatibility stuff */ + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41) +#include +#else +#include +#define work_struct tq_struct +#define INIT_WORK INIT_TQUEUE +#define schedule_work schedule_task +#endif + /* To enable debug messages */ //#define ORINOCO_DEBUG 3 @@ -42,7 +53,7 @@ /* Synchronisation stuff */ spinlock_t lock; int hw_unavailable; - struct tq_struct timeout_task; + struct work_struct reset_work; /* driver state */ int open; @@ -72,6 +83,7 @@ int has_sensitivity; int nicbuf_size; u16 channel_mask; + int broken_disableport; /* Configuration paramaters */ u32 iw_mode; @@ -111,8 +123,8 @@ int (*hard_reset)(struct orinoco_private *)); extern int __orinoco_up(struct net_device *dev); extern int __orinoco_down(struct net_device *dev); -int orinoco_reinit_firmware(struct net_device *dev); - +extern int orinoco_stop(struct net_device *dev); +extern int orinoco_reinit_firmware(struct net_device *dev); extern void orinoco_interrupt(int irq, void * dev_id, struct pt_regs *regs); /********************************************************************/ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wireless/orinoco_pci.c linux.21rc5-ac2/drivers/net/wireless/orinoco_pci.c --- linux.21rc5/drivers/net/wireless/orinoco_pci.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wireless/orinoco_pci.c 2003-04-23 14:29:39.000000000 +0100 @@ -1,4 +1,4 @@ -/* orinoco_pci.c 0.13b +/* orinoco_pci.c 0.13d * * Driver for Prism II devices that have a direct PCI interface * (i.e., not in a Pcmcia or PLX bridge) @@ -143,8 +143,6 @@ unsigned long timeout; u16 reg; - TRACE_ENTER(priv->ndev->name); - /* Assert the reset until the card notice */ hermes_write_regn(hw, PCI_COR, HERMES_PCI_COR_MASK); printk(KERN_NOTICE "Reset done"); @@ -181,8 +179,6 @@ } printk(KERN_NOTICE "pci_cor : reg = 0x%X - %lX - %lX\n", reg, timeout, jiffies); - TRACE_EXIT(priv->ndev->name); - return 0; } @@ -232,7 +228,7 @@ hermes_struct_init(&(priv->hw), dev->base_addr, HERMES_MEM, HERMES_32BIT_REGSPACING); pci_set_drvdata(pdev, dev); - err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name, priv); + err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name, dev); if (err) { printk(KERN_ERR "orinoco_pci: Error allocating IRQ %d.\n", pdev->irq); err = -EBUSY; @@ -264,7 +260,7 @@ unregister_netdev(dev); if (dev->irq) - free_irq(dev->irq, priv); + free_irq(dev->irq, dev); kfree(dev); } @@ -286,7 +282,7 @@ unregister_netdev(dev); if (dev->irq) - free_irq(dev->irq, priv); + free_irq(dev->irq, dev); if (priv->hw.iobase) iounmap((unsigned char *) priv->hw.iobase); @@ -321,7 +317,7 @@ netif_device_detach(dev); - priv->hw_unavailable = 1; + priv->hw_unavailable++; orinoco_unlock(priv, &flags); @@ -348,15 +344,15 @@ netif_device_attach(dev); - if (priv->open) { + priv->hw_unavailable--; + + if (priv->open && (! priv->hw_unavailable)) { err = __orinoco_up(dev); if (err) printk(KERN_ERR "%s: Error %d restarting card on orinoco_pci_resume()\n", dev->name, err); } - priv->hw_unavailable = 0; - spin_unlock_irqrestore(&priv->lock, flags); return 0; @@ -378,7 +374,7 @@ .resume = orinoco_pci_resume, }; -static char version[] __initdata = "orinoco_pci.c 0.13b (David Gibson & Jean Tourrilhes )"; +static char version[] __initdata = "orinoco_pci.c 0.13d (David Gibson & Jean Tourrilhes )"; MODULE_AUTHOR("David Gibson "); MODULE_DESCRIPTION("Driver for wireless LAN cards using direct PCI interface"); MODULE_LICENSE("Dual MPL/GPL"); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wireless/orinoco_plx.c linux.21rc5-ac2/drivers/net/wireless/orinoco_plx.c --- linux.21rc5/drivers/net/wireless/orinoco_plx.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wireless/orinoco_plx.c 2003-04-23 14:29:39.000000000 +0100 @@ -1,4 +1,4 @@ -/* orinoco_plx.c 0.13b +/* orinoco_plx.c 0.13d * * Driver for Prism II devices which would usually be driven by orinoco_cs, * but are connected to the PCI bus by a PLX9052. @@ -243,7 +243,7 @@ HERMES_IO, HERMES_16BIT_REGSPACING); pci_set_drvdata(pdev, dev); - err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name, priv); + err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name, dev); if (err) { printk(KERN_ERR "orinoco_plx: Error allocating IRQ %d.\n", pdev->irq); err = -EBUSY; @@ -266,7 +266,7 @@ unregister_netdev(dev); if (dev->irq) - free_irq(dev->irq, priv); + free_irq(dev->irq, dev); kfree(priv); } @@ -285,7 +285,6 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); - struct orinoco_private *priv = dev->priv; if (! dev) BUG(); @@ -293,7 +292,7 @@ unregister_netdev(dev); if (dev->irq) - free_irq(dev->irq, priv); + free_irq(dev->irq, dev); pci_set_drvdata(pdev, NULL); @@ -334,7 +333,7 @@ .resume = 0, }; -static char version[] __initdata = "orinoco_plx.c 0.13b (Daniel Barlow , David Gibson )"; +static char version[] __initdata = "orinoco_plx.c 0.13d (Daniel Barlow , David Gibson )"; MODULE_AUTHOR("Daniel Barlow "); MODULE_DESCRIPTION("Driver for wireless LAN cards using the PLX9052 PCI bridge"); #ifdef MODULE_LICENSE diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/net/wireless/orinoco_tmd.c linux.21rc5-ac2/drivers/net/wireless/orinoco_tmd.c --- linux.21rc5/drivers/net/wireless/orinoco_tmd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/net/wireless/orinoco_tmd.c 2003-04-23 14:29:39.000000000 +0100 @@ -0,0 +1,240 @@ +/* orinoco_tmd.c 0.01 + * + * Driver for Prism II devices which would usually be driven by orinoco_cs, + * but are connected to the PCI bus by a TMD7160. + * + * Copyright (C) 2003 Joerg Dorchain + * based heavily upon orinoco_plx.c Copyright (C) 2001 Daniel Barlow + * + * The contents of this file are subject to the Mozilla Public License + * Version 1.1 (the "License"); you may not use this file except in + * compliance with the License. You may obtain a copy of the License + * at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and + * limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU General Public License version 2 (the "GPL"), in + * which case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use your + * version of this file under the MPL, indicate your decision by + * deleting the provisions above and replace them with the notice and + * other provisions required by the GPL. If you do not delete the + * provisions above, a recipient may use your version of this file + * under either the MPL or the GPL. + + * Caution: this is experimental and probably buggy. For success and + * failure reports for different cards and adaptors, see + * orinoco_tmd_pci_id_table near the end of the file. If you have a + * card we don't have the PCI id for, and looks like it should work, + * drop me mail with the id and "it works"/"it doesn't work". + * + * Note: if everything gets detected fine but it doesn't actually send + * or receive packets, your first port of call should probably be to + * try newer firmware in the card. Especially if you're doing Ad-Hoc + * modes + * + * The actual driving is done by orinoco.c, this is just resource + * allocation stuff. + * + * This driver is modeled after the orinoco_plx driver. The main + * difference is that the TMD chip has only IO port ranges and no + * memory space, i.e. no access to the CIS. Compared to the PLX chip, + * the io range functionalities are exchanged. + * + * Pheecom sells cards with the TMD chip as "ASIC version" + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "hermes.h" +#include "orinoco.h" + +static char dev_info[] = "orinoco_tmd"; + +#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA | COR_FUNC_ENA) /* Enable PC card with level triggered irqs and irq requests */ + + +static int orinoco_tmd_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int err = 0; + u32 reg, addr; + struct orinoco_private *priv = NULL; + unsigned long pccard_ioaddr = 0; + unsigned long pccard_iolen = 0; + struct net_device *dev = NULL; + int netdev_registered = 0; + + err = pci_enable_device(pdev); + if (err) + return -EIO; + + printk(KERN_DEBUG "TMD setup\n"); + pccard_ioaddr = pci_resource_start(pdev, 2); + pccard_iolen = pci_resource_len(pdev, 2); + if (! request_region(pccard_ioaddr, pccard_iolen, dev_info)) { + printk(KERN_ERR "orinoco_tmd: I/O resource at 0x%lx len 0x%lx busy\n", + pccard_ioaddr, pccard_iolen); + pccard_ioaddr = 0; + err = -EBUSY; + goto fail; + } + addr = pci_resource_start(pdev, 1); + outb(COR_VALUE, addr); + mdelay(1); + reg = inb(addr); + if (reg != COR_VALUE) { + printk(KERN_ERR "orinoco_tmd: Error setting TMD COR values %x should be %x\n", reg, COR_VALUE); + err = -EIO; + goto fail; + } + + dev = alloc_orinocodev(0, NULL); + if (! dev) { + err = -ENOMEM; + goto fail; + } + + priv = dev->priv; + dev->base_addr = pccard_ioaddr; + SET_MODULE_OWNER(dev); + + printk(KERN_DEBUG + "Detected Orinoco/Prism2 TMD device at %s irq:%d, io addr:0x%lx\n", + pdev->slot_name, pdev->irq, pccard_ioaddr); + + hermes_struct_init(&(priv->hw), dev->base_addr, + HERMES_IO, HERMES_16BIT_REGSPACING); + pci_set_drvdata(pdev, dev); + + err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name, + dev); + if (err) { + printk(KERN_ERR "orinoco_tmd: Error allocating IRQ %d.\n", + pdev->irq); + err = -EBUSY; + goto fail; + } + dev->irq = pdev->irq; + + err = register_netdev(dev); + if (err) + goto fail; + netdev_registered = 1; + + return 0; /* succeeded */ + + fail: + printk(KERN_DEBUG "orinoco_tmd: init_one(), FAIL!\n"); + + if (priv) { + if (dev->irq) + free_irq(dev->irq, dev); + + kfree(priv); + } + + if (pccard_ioaddr) + release_region(pccard_ioaddr, pccard_iolen); + + pci_disable_device(pdev); + + return err; +} + +static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (! dev) + BUG(); + + unregister_netdev(dev); + + if (dev->irq) + free_irq(dev->irq, dev); + + pci_set_drvdata(pdev, NULL); + + kfree(dev); + + release_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); + + pci_disable_device(pdev); +} + + +static struct pci_device_id orinoco_tmd_pci_id_table[] __devinitdata = { + {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */ + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, orinoco_tmd_pci_id_table); + +static struct pci_driver orinoco_tmd_driver = { + .name = "orinoco_tmd", + .id_table = orinoco_tmd_pci_id_table, + .probe = orinoco_tmd_init_one, + .remove = __devexit_p(orinoco_tmd_remove_one), + .suspend = 0, + .resume = 0, +}; + +static char version[] __initdata = "orinoco_tmd.c 0.01 (Joerg Dorchain )"; +MODULE_AUTHOR("Joerg Dorchain "); +MODULE_DESCRIPTION("Driver for wireless LAN cards using the TMD7160 PCI bridge"); +#ifdef MODULE_LICENSE +MODULE_LICENSE("Dual MPL/GPL"); +#endif + +static int __init orinoco_tmd_init(void) +{ + printk(KERN_DEBUG "%s\n", version); + return pci_module_init(&orinoco_tmd_driver); +} + +extern void __exit orinoco_tmd_exit(void) +{ + pci_unregister_driver(&orinoco_tmd_driver); + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(HZ); +} + +module_init(orinoco_tmd_init); +module_exit(orinoco_tmd_exit); + +/* + * Local variables: + * c-indent-level: 8 + * c-basic-offset: 8 + * tab-width: 8 + * End: + */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/pci/pci.ids linux.21rc5-ac2/drivers/pci/pci.ids --- linux.21rc5/drivers/pci/pci.ids 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/pci/pci.ids 2003-05-27 14:06:01.000000000 +0100 @@ -2751,6 +2751,7 @@ 1458 5004 GA-7VAX Mainboard 3109 VT8233C PCI to ISA Bridge 3112 VT8361 [KLE133] Host Bridge + 3116 VT8375 [KM266/KL266 AGP] Host Bridge 3128 VT8753 [P4X266 AGP] 3133 VT3133 Host Bridge 3147 VT8233A ISA Bridge diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/pcmcia/rsrc_mgr.c linux.21rc5-ac2/drivers/pcmcia/rsrc_mgr.c --- linux.21rc5/drivers/pcmcia/rsrc_mgr.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/pcmcia/rsrc_mgr.c 2003-05-20 20:29:40.000000000 +0100 @@ -419,14 +419,17 @@ void validate_mem(int (*is_valid)(u_long), int (*do_cksum)(u_long), int force_low, socket_info_t *s) { - resource_map_t *m; + resource_map_t *m, *n; static int done = 0; if (!probe_mem || done++) return; - for (m = mem_db.next; m != &mem_db; m = m->next) + + for (m = mem_db.next; m != &mem_db; m = n) { + n = m->next; if (do_mem_probe(m->base, m->num, is_valid, do_cksum, s)) return; + } } #endif /* CONFIG_ISA */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/pcmcia/ti113x.h linux.21rc5-ac2/drivers/pcmcia/ti113x.h --- linux.21rc5/drivers/pcmcia/ti113x.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/pcmcia/ti113x.h 2003-05-28 21:48:47.000000000 +0100 @@ -167,6 +167,27 @@ new |= I365_INTR_ENA; if (new != reg) exca_writeb(socket, I365_INTCTL, new); + + /* + * If ISA interrupts don't work, then fall back to routing card + * interrupts to the PCI interrupt of the socket. + */ + if (!socket->cap.irq_mask) { + int irqmux, devctl; + + printk (KERN_INFO "ti113x: Routing card interrupts to PCI\n"); + + devctl = config_readb(socket, TI113X_DEVICE_CONTROL); + devctl &= ~TI113X_DCR_IMODE_MASK; + + irqmux = config_readl(socket, TI122X_IRQMUX); + irqmux = (irqmux & ~0x0f) | 0x02; /* route INTA */ + irqmux = (irqmux & ~0xf0) | 0x20; /* route INTB */ + + config_writel(socket, TI122X_IRQMUX, irqmux); + config_writeb(socket, TI113X_DEVICE_CONTROL, devctl); + } + return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/pnp/Config.in linux.21rc5-ac2/drivers/pnp/Config.in --- linux.21rc5/drivers/pnp/Config.in 2003-05-28 15:08:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/pnp/Config.in 2003-04-22 16:44:37.000000000 +0100 @@ -8,4 +8,8 @@ dep_tristate ' ISA Plug and Play support' CONFIG_ISAPNP $CONFIG_PNP +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + dep_bool ' PNPBIOS support (EXPERIMENTAL)' CONFIG_PNPBIOS $CONFIG_PNP +fi + endmenu diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/pnp/Makefile linux.21rc5-ac2/drivers/pnp/Makefile --- linux.21rc5/drivers/pnp/Makefile 2003-05-28 15:08:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/pnp/Makefile 2003-04-22 16:44:37.000000000 +0100 @@ -10,15 +10,22 @@ O_TARGET := pnp.o -export-objs := isapnp.o -list-multi := isa-pnp.o +export-objs := isapnp.o pnpbios_core.o +multi-objs := isa-pnp.o pnpbios.o -proc-$(CONFIG_PROC_FS) = isapnp_proc.o -isa-pnp-objs := isapnp.o quirks.o $(proc-y) +isa-pnp-proc-$(CONFIG_PROC_FS) = isapnp_proc.o +pnpbios-proc-$(CONFIG_PROC_FS) = pnpbios_proc.o + +isa-pnp-objs := isapnp.o quirks.o $(isa-pnp-proc-y) +pnpbios-objs := pnpbios_core.o $(pnpbios-proc-y) obj-$(CONFIG_ISAPNP) += isa-pnp.o +obj-$(CONFIG_PNPBIOS) += pnpbios.o include $(TOPDIR)/Rules.make isa-pnp.o: $(isa-pnp-objs) $(LD) $(LD_RFLAG) -r -o $@ $(isa-pnp-objs) + +pnpbios.o: $(pnpbios-objs) + $(LD) $(LD_RFLAG) -r -o $@ $(pnpbios-objs) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/pnp/pnpbios_core.c linux.21rc5-ac2/drivers/pnp/pnpbios_core.c --- linux.21rc5/drivers/pnp/pnpbios_core.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/pnp/pnpbios_core.c 2003-04-23 14:39:14.000000000 +0100 @@ -0,0 +1,1352 @@ +/* + * PnP BIOS services + * + * Originally (C) 1998 Christian Schmidt + * Modifications (c) 1998 Tom Lees + * Minor reorganizations by David Hinds + * Modifications (c) 2001,2002 by Thomas Hood + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * References: + * Compaq Computer Corporation, Phoenix Technologies Ltd., Intel Corporation + * Plug and Play BIOS Specification, Version 1.0A, May 5, 1994 + * Plug and Play BIOS Clarification Paper, October 6, 1994 + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * + * PnP BIOS INTERFACE + * + */ + +/* PnP BIOS signature: "$PnP" */ +#define PNP_SIGNATURE (('$' << 0) + ('P' << 8) + ('n' << 16) + ('P' << 24)) + +#pragma pack(1) +union pnp_bios_expansion_header { + struct { + u32 signature; /* "$PnP" */ + u8 version; /* in BCD */ + u8 length; /* length in bytes, currently 21h */ + u16 control; /* system capabilities */ + u8 checksum; /* all bytes must add up to 0 */ + + u32 eventflag; /* phys. address of the event flag */ + u16 rmoffset; /* real mode entry point */ + u16 rmcseg; + u16 pm16offset; /* 16 bit protected mode entry */ + u32 pm16cseg; + u32 deviceID; /* EISA encoded system ID or 0 */ + u16 rmdseg; /* real mode data segment */ + u32 pm16dseg; /* 16 bit pm data segment base */ + } fields; + char chars[0x21]; /* To calculate the checksum */ +}; +#pragma pack() + +static struct { + u16 offset; + u16 segment; +} pnp_bios_callpoint; + +static union pnp_bios_expansion_header * pnp_bios_hdr = NULL; + +/* The PnP BIOS entries in the GDT */ +#define PNP_GDT (0x0060) +#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */ +#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */ +#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */ +#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */ +#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */ + +/* + * These are some opcodes for a "static asmlinkage" + * As this code is *not* executed inside the linux kernel segment, but in a + * alias at offset 0, we need a far return that can not be compiled by + * default (please, prove me wrong! this is *really* ugly!) + * This is the only way to get the bios to return into the kernel code, + * because the bios code runs in 16 bit protected mode and therefore can only + * return to the caller if the call is within the first 64kB, and the linux + * kernel begins at offset 3GB... + */ + +asmlinkage void pnp_bios_callfunc(void); + +__asm__( + ".text \n" + __ALIGN_STR "\n" + SYMBOL_NAME_STR(pnp_bios_callfunc) ":\n" + " pushl %edx \n" + " pushl %ecx \n" + " pushl %ebx \n" + " pushl %eax \n" + " lcallw " SYMBOL_NAME_STR(pnp_bios_callpoint) "\n" + " addl $16, %esp \n" + " lret \n" + ".previous \n" +); + +#define Q_SET_SEL(selname, address, size) \ +set_base (gdt [(selname) >> 3], __va((u32)(address))); \ +set_limit (gdt [(selname) >> 3], size) + +#define Q2_SET_SEL(selname, address, size) \ +set_base (gdt [(selname) >> 3], (u32)(address)); \ +set_limit (gdt [(selname) >> 3], size) + +/* + * At some point we want to use this stack frame pointer to unwind + * after PnP BIOS oopses. + */ + +u32 pnp_bios_fault_esp; +u32 pnp_bios_fault_eip; +u32 pnp_bios_is_utter_crap = 0; + +static spinlock_t pnp_bios_lock; + +static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, + u16 arg4, u16 arg5, u16 arg6, u16 arg7, + void *ts1_base, u32 ts1_size, + void *ts2_base, u32 ts2_size) +{ + unsigned long flags; + u16 status; + + /* + * PnP BIOSes are generally not terribly re-entrant. + * Also, don't rely on them to save everything correctly. + */ + if(pnp_bios_is_utter_crap) + return PNP_FUNCTION_NOT_SUPPORTED; + + /* On some boxes IRQ's during PnP BIOS calls are deadly. */ + spin_lock_irqsave(&pnp_bios_lock, flags); + + if (ts1_size) + Q2_SET_SEL(PNP_TS1, ts1_base, ts1_size); + if (ts2_size) + Q2_SET_SEL(PNP_TS2, ts2_base, ts2_size); + + __asm__ __volatile__( + "pushl %%ebp\n\t" + "pushl %%edi\n\t" + "pushl %%esi\n\t" + "pushl %%ds\n\t" + "pushl %%es\n\t" + "pushl %%fs\n\t" + "pushl %%gs\n\t" + "pushfl\n\t" + "movl %%esp, pnp_bios_fault_esp\n\t" + "movl $1f, pnp_bios_fault_eip\n\t" + "lcall %5,%6\n\t" + "1:popfl\n\t" + "popl %%gs\n\t" + "popl %%fs\n\t" + "popl %%es\n\t" + "popl %%ds\n\t" + "popl %%esi\n\t" + "popl %%edi\n\t" + "popl %%ebp\n\t" + : "=a" (status) + : "0" ((func) | (((u32)arg1) << 16)), + "b" ((arg2) | (((u32)arg3) << 16)), + "c" ((arg4) | (((u32)arg5) << 16)), + "d" ((arg6) | (((u32)arg7) << 16)), + "i" (PNP_CS32), + "i" (0) + : "memory" + ); + spin_unlock_irqrestore(&pnp_bios_lock, flags); + + /* If we get here and this is set then the PnP BIOS faulted on us. */ + if(pnp_bios_is_utter_crap) + { + printk(KERN_ERR "PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue.\n"); + printk(KERN_ERR "PnPBIOS: You may need to reboot with the \"pnpbios=off\" option to operate stably.\n"); + printk(KERN_ERR "PnPBIOS: Check with your vendor for an updated BIOS.\n"); + } + + return status; +} + + +/* + * + * UTILITY FUNCTIONS + * + */ + +static void pnpbios_warn_unexpected_status(const char * module, u16 status) +{ + printk(KERN_ERR "PnPBIOS: %s: Unexpected status 0x%x\n", module, status); +} + +void *pnpbios_kmalloc(size_t size, int f) +{ + void *p = kmalloc( size, f ); + if ( p == NULL ) + printk(KERN_ERR "PnPBIOS: kmalloc() failed\n"); + return p; +} + +/* + * Call this only after init time + */ +static inline int pnp_bios_present(void) +{ + return (pnp_bios_hdr != NULL); +} + +/* Forward declaration */ +static void update_devlist( u8 nodenum, struct pnp_bios_node *data ); + + +/* + * + * PnP BIOS ACCESS FUNCTIONS + * + */ + +#define PNP_GET_NUM_SYS_DEV_NODES 0x00 +#define PNP_GET_SYS_DEV_NODE 0x01 +#define PNP_SET_SYS_DEV_NODE 0x02 +#define PNP_GET_EVENT 0x03 +#define PNP_SEND_MESSAGE 0x04 +#define PNP_GET_DOCKING_STATION_INFORMATION 0x05 +#define PNP_SET_STATIC_ALLOCED_RES_INFO 0x09 +#define PNP_GET_STATIC_ALLOCED_RES_INFO 0x0a +#define PNP_GET_APM_ID_TABLE 0x0b +#define PNP_GET_PNP_ISA_CONFIG_STRUC 0x40 +#define PNP_GET_ESCD_INFO 0x41 +#define PNP_READ_ESCD 0x42 +#define PNP_WRITE_ESCD 0x43 + +/* + * Call PnP BIOS with function 0x00, "get number of system device nodes" + */ +static int __pnp_bios_dev_node_info(struct pnp_dev_node_info *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2, PNP_TS1, PNP_DS, 0, 0, + data, sizeof(struct pnp_dev_node_info), 0, 0); + data->no_nodes &= 0xff; + return status; +} + +int pnp_bios_dev_node_info(struct pnp_dev_node_info *data) +{ + int status = __pnp_bios_dev_node_info( data ); + if ( status ) + pnpbios_warn_unexpected_status( "dev_node_info", status ); + return status; +} + +/* + * Note that some PnP BIOSes (e.g., on Sony Vaio laptops) die a horrible + * death if they are asked to access the "current" configuration. + * Therefore, if it's a matter of indifference, it's better to call + * get_dev_node() and set_dev_node() with boot=1 rather than with boot=0. + */ + +/* + * Call PnP BIOS with function 0x01, "get system device node" + * Input: *nodenum = desired node, + * boot = whether to get nonvolatile boot (!=0) + * or volatile current (0) config + * Output: *nodenum=next node or 0xff if no more nodes + */ +static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + if ( !boot & pnpbios_dont_use_current_config ) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0, + nodenum, sizeof(char), data, 65536); + return status; +} + +int pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) +{ + int status; + status = __pnp_bios_get_dev_node( nodenum, boot, data ); + if ( status ) + pnpbios_warn_unexpected_status( "get_dev_node", status ); + return status; +} + + +/* + * Call PnP BIOS with function 0x02, "set system device node" + * Input: *nodenum = desired node, + * boot = whether to set nonvolatile boot (!=0) + * or volatile current (0) config + */ +static int __pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + if ( !boot & pnpbios_dont_use_current_config ) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1, boot ? 2 : 1, PNP_DS, 0, 0, + data, 65536, 0, 0); + return status; +} + +int pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data) +{ + int status; + status = __pnp_bios_set_dev_node( nodenum, boot, data ); + if ( status ) { + pnpbios_warn_unexpected_status( "set_dev_node", status ); + return status; + } + if ( !boot ) { /* Update devlist */ + u8 thisnodenum = nodenum; + status = pnp_bios_get_dev_node( &nodenum, boot, data ); + if ( status ) + return status; + update_devlist( thisnodenum, data ); + } + return status; +} + +#if needed +/* + * Call PnP BIOS with function 0x03, "get event" + */ +static int pnp_bios_get_event(u16 *event) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_EVENT, 0, PNP_TS1, PNP_DS, 0, 0 ,0 ,0, + event, sizeof(u16), 0, 0); + return status; +} +#endif + +#if needed +/* + * Call PnP BIOS with function 0x04, "send message" + */ +static int pnp_bios_send_message(u16 message) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_SEND_MESSAGE, message, PNP_DS, 0, 0, 0, 0, 0, 0, 0, 0, 0); + return status; +} +#endif + +#ifdef CONFIG_HOTPLUG +/* + * Call PnP BIOS with function 0x05, "get docking station information" + */ +static int pnp_bios_dock_station_info(struct pnp_docking_station_info *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + data, sizeof(struct pnp_docking_station_info), 0, 0); + return status; +} +#endif + +#if needed +/* + * Call PnP BIOS with function 0x09, "set statically allocated resource + * information" + */ +static int pnp_bios_set_stat_res(char *info) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_SET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + info, *((u16 *) info), 0, 0); + return status; +} +#endif + +/* + * Call PnP BIOS with function 0x0a, "get statically allocated resource + * information" + */ +static int __pnp_bios_get_stat_res(char *info) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + info, 65536, 0, 0); + return status; +} + +int pnp_bios_get_stat_res(char *info) +{ + int status; + status = __pnp_bios_get_stat_res( info ); + if ( status ) + pnpbios_warn_unexpected_status( "get_stat_res", status ); + return status; +} + +#if needed +/* + * Call PnP BIOS with function 0x0b, "get APM id table" + */ +static int pnp_bios_apm_id_table(char *table, u16 *size) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_APM_ID_TABLE, 0, PNP_TS2, 0, PNP_TS1, PNP_DS, 0, 0, + table, *size, size, sizeof(u16)); + return status; +} +#endif + +/* + * Call PnP BIOS with function 0x40, "get isa pnp configuration structure" + */ +static int __pnp_bios_isapnp_config(struct pnp_isa_config_struc *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + data, sizeof(struct pnp_isa_config_struc), 0, 0); + return status; +} + +int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data) +{ + int status; + status = __pnp_bios_isapnp_config( data ); + if ( status ) + pnpbios_warn_unexpected_status( "isapnp_config", status ); + return status; +} + +/* + * Call PnP BIOS with function 0x41, "get ESCD info" + */ +static int __pnp_bios_escd_info(struct escd_info_struc *data) +{ + u16 status; + if (!pnp_bios_present()) + return ESCD_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4, PNP_TS1, PNP_DS, + data, sizeof(struct escd_info_struc), 0, 0); + return status; +} + +int pnp_bios_escd_info(struct escd_info_struc *data) +{ + int status; + status = __pnp_bios_escd_info( data ); + if ( status ) + pnpbios_warn_unexpected_status( "escd_info", status ); + return status; +} + +/* + * Call PnP BIOS function 0x42, "read ESCD" + * nvram_base is determined by calling escd_info + */ +static int __pnp_bios_read_escd(char *data, u32 nvram_base) +{ + u16 status; + if (!pnp_bios_present()) + return ESCD_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0, + data, 65536, (void *)nvram_base, 65536); + return status; +} + +int pnp_bios_read_escd(char *data, u32 nvram_base) +{ + int status; + status = __pnp_bios_read_escd( data, nvram_base ); + if ( status ) + pnpbios_warn_unexpected_status( "read_escd", status ); + return status; +} + +#if needed +/* + * Call PnP BIOS function 0x43, "write ESCD" + */ +static int pnp_bios_write_escd(char *data, u32 nvram_base) +{ + u16 status; + if (!pnp_bios_present()) + return ESCD_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_WRITE_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0, + data, 65536, nvram_base, 65536); + return status; +} +#endif + + +/* + * + * DOCKING FUNCTIONS + * + */ + +#ifdef CONFIG_HOTPLUG + +static int unloading = 0; +static struct completion unload_sem; + +/* + * (Much of this belongs in a shared routine somewhere) + */ + +static int pnp_dock_event(int dock, struct pnp_docking_station_info *info) +{ + char *argv [3], **envp, *buf, *scratch; + int i = 0, value; + + if (!hotplug_path [0]) + return -ENOENT; + if (!current->fs->root) { + return -EAGAIN; + } + if (!(envp = (char **) pnpbios_kmalloc (20 * sizeof (char *), GFP_KERNEL))) { + return -ENOMEM; + } + if (!(buf = pnpbios_kmalloc (256, GFP_KERNEL))) { + kfree (envp); + return -ENOMEM; + } + + /* only one standardized param to hotplug command: type */ + argv [0] = hotplug_path; + argv [1] = "dock"; + argv [2] = 0; + + /* minimal command environment */ + envp [i++] = "HOME=/"; + envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + +#ifdef DEBUG + /* hint that policy agent should enter no-stdout debug mode */ + envp [i++] = "DEBUG=kernel"; +#endif + /* extensible set of named bus-specific parameters, + * supporting multiple driver selection algorithms. + */ + scratch = buf; + + /* action: add, remove */ + envp [i++] = scratch; + scratch += sprintf (scratch, "ACTION=%s", dock?"add":"remove") + 1; + + /* Report the ident for the dock */ + envp [i++] = scratch; + scratch += sprintf (scratch, "DOCK=%x/%x/%x", + info->location_id, info->serial, info->capabilities); + envp[i] = 0; + + value = call_usermodehelper (argv [0], argv, envp); + kfree (buf); + kfree (envp); + return 0; +} + +/* + * Poll the PnP docking at regular intervals + */ +static int pnp_dock_thread(void * unused) +{ + static struct pnp_docking_station_info now; + int docked = -1, d = 0; + daemonize(); + reparent_to_init(); + strcpy(current->comm, "kpnpbiosd"); + while(!unloading && !signal_pending(current)) + { + int status; + + /* + * Poll every 2 seconds + */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ*2); + if(signal_pending(current)) + break; + + status = pnp_bios_dock_station_info(&now); + + switch(status) + { + /* + * No dock to manage + */ + case PNP_FUNCTION_NOT_SUPPORTED: + complete_and_exit(&unload_sem, 0); + case PNP_SYSTEM_NOT_DOCKED: + d = 0; + break; + case PNP_SUCCESS: + d = 1; + break; + default: + pnpbios_warn_unexpected_status( "pnp_dock_thread", status ); + continue; + } + if(d != docked) + { + if(pnp_dock_event(d, &now)==0) + { + docked = d; +#if 0 + printk(KERN_INFO "PnPBIOS: Docking station %stached\n", docked?"at":"de"); +#endif + } + } + } + complete_and_exit(&unload_sem, 0); +} + +#endif /* CONFIG_HOTPLUG */ + + +/* + * + * NODE DATA PARSING FUNCTIONS + * + */ + +static void add_irqresource(struct pci_dev *dev, int irq) +{ + int i = 0; + while (!(dev->irq_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_IRQ) i++; + if (i < DEVICE_COUNT_IRQ) { + dev->irq_resource[i].start = (unsigned long) irq; + dev->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag + } +} + +static void add_dmaresource(struct pci_dev *dev, int dma) +{ + int i = 0; + while (!(dev->dma_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_DMA) i++; + if (i < DEVICE_COUNT_DMA) { + dev->dma_resource[i].start = (unsigned long) dma; + dev->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag + } +} + +static void add_ioresource(struct pci_dev *dev, int io, int len) +{ + int i = 0; + while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++; + if (i < DEVICE_COUNT_RESOURCE) { + dev->resource[i].start = (unsigned long) io; + dev->resource[i].end = (unsigned long)(io + len - 1); + dev->resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag + } +} + +static void add_memresource(struct pci_dev *dev, int mem, int len) +{ + int i = 0; + while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++; + if (i < DEVICE_COUNT_RESOURCE) { + dev->resource[i].start = (unsigned long) mem; + dev->resource[i].end = (unsigned long)(mem + len - 1); + dev->resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag + } +} + +static void node_resource_data_to_dev(struct pnp_bios_node *node, struct pci_dev *dev) +{ + unsigned char *p = node->data, *lastp=NULL; + int i; + + /* + * First, set resource info to default values + */ + for (i=0;iresource[i].start = 0; // "disabled" + dev->resource[i].flags = IORESOURCE_UNSET; + } + for (i=0;iirq_resource[i].start = (unsigned long)-1; // "disabled" + dev->irq_resource[i].flags = IORESOURCE_UNSET; + } + for (i=0;idma_resource[i].start = (unsigned long)-1; // "disabled" + dev->dma_resource[i].flags = IORESOURCE_UNSET; + } + + /* + * Fill in dev resource info + */ + while ( (char *)p < ((char *)node->data + node->size )) { + if(p==lastp) break; + + if( p[0] & 0x80 ) {// large item + switch (p[0] & 0x7f) { + case 0x01: // memory + { + int io = *(short *) &p[4]; + int len = *(short *) &p[10]; + add_memresource(dev, io, len); + break; + } + case 0x02: // device name + { + int len = *(short *) &p[1]; + memcpy(dev->name, p + 3, len >= 80 ? 79 : len); + break; + } + case 0x05: // 32-bit memory + { + int io = *(int *) &p[4]; + int len = *(int *) &p[16]; + add_memresource(dev, io, len); + break; + } + case 0x06: // fixed location 32-bit memory + { + int io = *(int *) &p[4]; + int len = *(int *) &p[8]; + add_memresource(dev, io, len); + break; + } + } /* switch */ + lastp = p+3; + p = p + p[1] + p[2]*256 + 3; + continue; + } + if ((p[0]>>3) == 0x0f) // end tag + break; + switch (p[0]>>3) { + case 0x04: // irq + { + int i, mask, irq = -1; + mask= p[1] + p[2]*256; + for (i=0;i<16;i++, mask=mask>>1) + if(mask & 0x01) irq=i; + add_irqresource(dev, irq); + break; + } + case 0x05: // dma + { + int i, mask, dma = -1; + mask = p[1]; + for (i=0;i<8;i++, mask = mask>>1) + if(mask & 0x01) dma=i; + add_dmaresource(dev, dma); + break; + } + case 0x08: // io + { + int io= p[2] + p[3] *256; + int len = p[7]; + add_ioresource(dev, io, len); + break; + } + case 0x09: // fixed location io + { + int io = p[1] + p[2] * 256; + int len = p[3]; + add_ioresource(dev, io, len); + break; + } + } /* switch */ + lastp=p+1; + p = p + (p[0] & 0x07) + 1; + + } /* while */ + + return; +} + + +/* + * + * DEVICE LIST MANAGEMENT FUNCTIONS + * + * + * Some of these are exported to give public access + * + * Question: Why maintain a device list when the PnP BIOS can + * list devices for us? Answer: Some PnP BIOSes can't report + * the current configuration, only the boot configuration. + * The boot configuration can be changed, so we need to keep + * a record of what the configuration was when we booted; + * presumably it continues to describe the current config. + * For those BIOSes that can change the current config, we + * keep the information in the devlist up to date. + * + * Note that it is currently assumed that the list does not + * grow or shrink in size after init time, and slot_name + * never changes. The list is protected by a spinlock. + */ + +static LIST_HEAD(pnpbios_devices); + +static spinlock_t pnpbios_devices_lock; + +static int inline insert_device(struct pci_dev *dev) +{ + + /* + * FIXME: Check for re-add of existing node; + * return -1 if node already present + */ + + /* We don't lock because we only do this at init time */ + list_add_tail(&dev->global_list, &pnpbios_devices); + + return 0; +} + +#define HEX(id,a) hex[((id)>>a) & 15] +#define CHAR(id,a) (0x40 + (((id)>>a) & 31)) +// +static void inline pnpid32_to_pnpid(u32 id, char *str) +{ + const char *hex = "0123456789abcdef"; + + id = be32_to_cpu(id); + str[0] = CHAR(id, 26); + str[1] = CHAR(id, 21); + str[2] = CHAR(id,16); + str[3] = HEX(id, 12); + str[4] = HEX(id, 8); + str[5] = HEX(id, 4); + str[6] = HEX(id, 0); + str[7] = '\0'; + + return; +} +// +#undef CHAR +#undef HEX + +/* + * Build a linked list of pci_devs in order of ascending node number + * Called only at init time. + */ +static void __init build_devlist(void) +{ + u8 nodenum; + unsigned int nodes_got = 0; + unsigned int devs = 0; + struct pnp_bios_node *node; + struct pnp_dev_node_info node_info; + struct pci_dev *dev; + + if (!pnp_bios_present()) + return; + + if (pnp_bios_dev_node_info(&node_info) != 0) + return; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) + return; + + for(nodenum=0; nodenum<0xff; ) { + u8 thisnodenum = nodenum; + /* We build the list from the "boot" config because + * asking for the "current" config causes some + * BIOSes to crash. + */ + if (pnp_bios_get_dev_node(&nodenum, (char )1 , node)) + break; + nodes_got++; + dev = pnpbios_kmalloc(sizeof (struct pci_dev), GFP_KERNEL); + if (!dev) + break; + memset(dev,0,sizeof(struct pci_dev)); + dev->devfn = thisnodenum; + memcpy(dev->name,"PNPBIOS",8); + pnpid32_to_pnpid(node->eisa_id,dev->slot_name); + node_resource_data_to_dev(node,dev); + if(insert_device(dev)<0) + kfree(dev); + else + devs++; + if (nodenum <= thisnodenum) { + printk(KERN_ERR "PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", (unsigned int)nodenum, (unsigned int)thisnodenum); + break; + } + } + kfree(node); + + printk(KERN_INFO "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n", + nodes_got, nodes_got != 1 ? "s" : "", devs); +} + +static struct pci_dev *find_device_by_nodenum( u8 nodenum ) +{ + struct pci_dev *dev; + + pnpbios_for_each_dev(dev) { + if(dev->devfn == nodenum) + return dev; + } + + return NULL; +} + +static void update_devlist( u8 nodenum, struct pnp_bios_node *data ) +{ + unsigned long flags; + struct pci_dev *dev; + + spin_lock_irqsave(&pnpbios_devices_lock, flags); + dev = find_device_by_nodenum( nodenum ); + if ( dev ) { + node_resource_data_to_dev(data,dev); + } + spin_unlock_irqrestore(&pnpbios_devices_lock, flags); + + return; +} + + +/* + * + * DRIVER REGISTRATION FUNCTIONS + * + * + * Exported to give public access + * + */ + +static LIST_HEAD(pnpbios_drivers); + +static const struct pnpbios_device_id * +match_device(const struct pnpbios_device_id *ids, const struct pci_dev *dev) +{ + while (*ids->id) + { + if(memcmp(ids->id, dev->slot_name, 7)==0) + return ids; + ids++; + } + return NULL; +} + +static int announce_device(struct pnpbios_driver *drv, struct pci_dev *dev) +{ + const struct pnpbios_device_id *id; + struct pci_dev tmpdev; + int ret; + + if (drv->id_table) { + id = match_device(drv->id_table, dev); + if (!id) + return 0; + } else + id = NULL; + + memcpy( &tmpdev, dev, sizeof(struct pci_dev)); + tmpdev.global_list.prev = NULL; + tmpdev.global_list.next = NULL; + + dev_probe_lock(); + /* Obviously, probe() should not call any pnpbios functions */ + ret = drv->probe(&tmpdev, id); + dev_probe_unlock(); + if (ret < 1) + return 0; + + dev->driver = (void *)drv; + + return 1; +} + +/** + * pnpbios_register_driver - register a new pci driver + * @drv: the driver structure to register + * + * Adds the driver structure to the list of registered drivers + * + * For each device in the pnpbios device list that matches one of + * the ids in drv->id_table, calls the driver's "probe" function with + * arguments (1) a pointer to a *temporary* struct pci_dev containing + * resource info for the device, and (2) a pointer to the id string + * of the device. Expects the probe function to return 1 if the + * driver claims the device (otherwise 0) in which case, marks the + * device as having this driver. + * + * Returns the number of pci devices which were claimed by the driver + * during registration. The driver remains registered even if the + * return value is zero. + */ +int pnpbios_register_driver(struct pnpbios_driver *drv) +{ + struct pci_dev *dev; + unsigned long flags; + int count = 0; + + list_add_tail(&drv->node, &pnpbios_drivers); + spin_lock_irqsave(&pnpbios_devices_lock, flags); + pnpbios_for_each_dev(dev) { + if (!pnpbios_dev_driver(dev)) + count += announce_device(drv, dev); + } + spin_unlock_irqrestore(&pnpbios_devices_lock, flags); + return count; +} + +EXPORT_SYMBOL(pnpbios_register_driver); + +/** + * pnpbios_unregister_driver - unregister a pci driver + * @drv: the driver structure to unregister + * + * Deletes the driver structure from the list of registered PnPBIOS + * drivers, gives it a chance to clean up by calling its "remove" + * function for each device it was responsible for, and marks those + * devices as driverless. + */ +void pnpbios_unregister_driver(struct pnpbios_driver *drv) +{ + unsigned long flags; + struct pci_dev *dev; + + list_del(&drv->node); + spin_lock_irqsave(&pnpbios_devices_lock, flags); + pnpbios_for_each_dev(dev) { + if (dev->driver == (void *)drv) { + if (drv->remove) + drv->remove(dev); + dev->driver = NULL; + } + } + spin_unlock_irqrestore(&pnpbios_devices_lock, flags); +} + +EXPORT_SYMBOL(pnpbios_unregister_driver); + + +/* + * + * RESOURCE RESERVATION FUNCTIONS + * + * + * Used only at init time + * + */ + +static void __init reserve_ioport_range(char *pnpid, int start, int end) +{ + struct resource *res; + char *regionid; + +#if 0 + /* + * TEMPORARY hack to work around the fact that the + * floppy driver inappropriately reserves ioports 0x3f0 and 0x3f1 + * Remove this once the floppy driver is fixed. + */ + if ( + (0x3f0 >= start && 0x3f0 <= end) + || (0x3f1 >= start && 0x3f1 <= end) + ) { + printk(KERN_INFO + "PnPBIOS: %s: ioport range 0x%x-0x%x NOT reserved\n", + pnpid, start, end + ); + return; + } +#endif + + regionid = pnpbios_kmalloc(16, GFP_KERNEL); + if ( regionid == NULL ) + return; + snprintf(regionid, 16, "PnPBIOS %s", pnpid); + res = request_region(start,end-start+1,regionid); + if ( res == NULL ) + kfree( regionid ); + else + res->flags &= ~IORESOURCE_BUSY; + /* + * Failures at this point are usually harmless. pci quirks for + * example do reserve stuff they know about too, so we may well + * have double reservations. + */ + printk(KERN_INFO + "PnPBIOS: %s: ioport range 0x%x-0x%x %s reserved\n", + pnpid, start, end, + NULL != res ? "has been" : "could not be" + ); + + return; +} + +static void __init reserve_resources_of_dev( struct pci_dev *dev ) +{ + int i; + + for (i=0;iresource[i].flags & IORESOURCE_UNSET ) + /* end of resources */ + break; + if (dev->resource[i].flags & IORESOURCE_IO) { + /* ioport */ + if ( dev->resource[i].start == 0 ) + /* disabled */ + /* Do nothing */ + continue; + if ( dev->resource[i].start < 0x100 ) + /* + * Below 0x100 is only standard PC hardware + * (pics, kbd, timer, dma, ...) + * We should not get resource conflicts there, + * and the kernel reserves these anyway + * (see arch/i386/kernel/setup.c). + * So, do nothing + */ + continue; + if ( dev->resource[i].end < dev->resource[i].start ) + /* invalid endpoint */ + /* Do nothing */ + continue; + reserve_ioport_range( + dev->slot_name, + dev->resource[i].start, + dev->resource[i].end + ); + } else if (dev->resource[i].flags & IORESOURCE_MEM) { + /* iomem */ + /* For now do nothing */ + continue; + } else { + /* Neither ioport nor iomem */ + /* Do nothing */ + continue; + } + } + + return; +} + +static void __init reserve_resources( void ) +{ + struct pci_dev *dev; + + pnpbios_for_each_dev(dev) { + if ( + 0 != strcmp(dev->slot_name,"PNP0c01") && /* memory controller */ + 0 != strcmp(dev->slot_name,"PNP0c02") /* system peripheral: other */ + ) { + continue; + } + reserve_resources_of_dev(dev); + } + + return; +} + + +/* + * + * INIT AND EXIT + * + */ + +extern int is_sony_vaio_laptop; + +static int pnpbios_disabled; /* = 0 */ +static int dont_reserve_resources; /* = 0 */ +int pnpbios_dont_use_current_config; /* = 0 */ + +#ifndef MODULE +static int __init pnpbios_setup(char *str) +{ + int invert; + + while ((str != NULL) && (*str != '\0')) { + if (strncmp(str, "off", 3) == 0) + pnpbios_disabled=1; + if (strncmp(str, "on", 2) == 0) + pnpbios_disabled=0; + invert = (strncmp(str, "no-", 3) == 0); + if (invert) + str += 3; + if (strncmp(str, "curr", 4) == 0) + pnpbios_dont_use_current_config = invert; + if (strncmp(str, "res", 3) == 0) + dont_reserve_resources = invert; + str = strchr(str, ','); + if (str != NULL) + str += strspn(str, ", \t"); + } + + return 1; +} + +__setup("pnpbios=", pnpbios_setup); +#endif + +int __init pnpbios_init(void) +{ + union pnp_bios_expansion_header *check; + u8 sum; + int i, length, r; + + spin_lock_init(&pnp_bios_lock); + spin_lock_init(&pnpbios_devices_lock); + + if(pnpbios_disabled || (dmi_broken & BROKEN_PNP_BIOS) ) { + printk(KERN_INFO "PnPBIOS: Disabled\n"); + return -ENODEV; + } + + if ( is_sony_vaio_laptop ) + pnpbios_dont_use_current_config = 1; + + /* + * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS + * structure and, if one is found, sets up the selectors and + * entry points + */ + for (check = (union pnp_bios_expansion_header *) __va(0xf0000); + check < (union pnp_bios_expansion_header *) __va(0xffff0); + ((void *) (check)) += 16) { + if (check->fields.signature != PNP_SIGNATURE) + continue; + length = check->fields.length; + if (!length) + continue; + for (sum = 0, i = 0; i < length; i++) + sum += check->chars[i]; + if (sum) + continue; + if (check->fields.version < 0x10) { + printk(KERN_WARNING "PnPBIOS: PnP BIOS version %d.%d is not supported\n", + check->fields.version >> 4, + check->fields.version & 15); + continue; + } + printk(KERN_INFO "PnPBIOS: Found PnP BIOS installation structure at 0x%p\n", check); + printk(KERN_INFO "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x\n", + check->fields.version >> 4, check->fields.version & 15, + check->fields.pm16cseg, check->fields.pm16offset, + check->fields.pm16dseg); + Q2_SET_SEL(PNP_CS32, &pnp_bios_callfunc, 64 * 1024); + Q_SET_SEL(PNP_CS16, check->fields.pm16cseg, 64 * 1024); + Q_SET_SEL(PNP_DS, check->fields.pm16dseg, 64 * 1024); + pnp_bios_callpoint.offset = check->fields.pm16offset; + pnp_bios_callpoint.segment = PNP_CS16; + pnp_bios_hdr = check; + break; + } + if (!pnp_bios_present()) + return -ENODEV; + build_devlist(); + if ( ! dont_reserve_resources ) + reserve_resources(); +#ifdef CONFIG_PROC_FS + r = pnpbios_proc_init(); + if (r) + return r; +#endif + return 0; +} + +static int pnpbios_thread_init(void) +{ +#ifdef CONFIG_HOTPLUG + init_completion(&unload_sem); + if(kernel_thread(pnp_dock_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL)>0) + unloading = 0; +#endif + return 0; +} + +#ifndef MODULE + +/* init/main.c calls pnpbios_init early */ + +/* Start the kernel thread later: */ +module_init(pnpbios_thread_init); + +#else + +/* + * N.B.: Building pnpbios as a module hasn't been fully implemented + */ + +MODULE_LICENSE("GPL"); + +static int pnpbios_init_all(void) +{ + int r; + r = pnpbios_init(); + if (r) + return r; + r = pnpbios_thread_init(); + if (r) + return r; + return 0; +} + +static void __exit pnpbios_exit(void) +{ +#ifdef CONFIG_HOTPLUG + unloading = 1; + wait_for_completion(&unload_sem); +#endif + pnpbios_proc_exit(); + /* We ought to free resources here */ + return; +} + +module_init(pnpbios_init_all); +module_exit(pnpbios_exit); + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/pnp/pnpbios_proc.c linux.21rc5-ac2/drivers/pnp/pnpbios_proc.c --- linux.21rc5/drivers/pnp/pnpbios_proc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/pnp/pnpbios_proc.c 2003-04-22 16:44:37.000000000 +0100 @@ -0,0 +1,278 @@ +/* + * /proc/bus/pnp interface for Plug and Play devices + * + * Written by David Hinds, dahinds@users.sourceforge.net + * Modified by Thomas Hood, jdthood@mail.com + * + * The .../devices and .../ and .../boot/ files are + * utilized by the lspnp and setpnp utilities, supplied with the + * pcmcia-cs package. + * http://pcmcia-cs.sourceforge.net + * + * The .../escd file is utilized by the lsescd utility written by + * Gunther Mayer. + * http://home.t-online.de/home/gunther.mayer/lsescd + * + * The .../legacy_device_resources file is not used yet. + * + * The other files are human-readable. + */ + +//#include +#define __NO_VERSION__ +//#include + +#include +#include +#include +#include +#include +#include + +static struct proc_dir_entry *proc_pnp = NULL; +static struct proc_dir_entry *proc_pnp_boot = NULL; +static struct pnp_dev_node_info node_info; + +static int proc_read_pnpconfig(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct pnp_isa_config_struc pnps; + + if (pnp_bios_isapnp_config(&pnps)) + return -EIO; + return snprintf(buf, count, + "structure_revision %d\n" + "number_of_CSNs %d\n" + "ISA_read_data_port 0x%x\n", + pnps.revision, + pnps.no_csns, + pnps.isa_rd_data_port + ); +} + +static int proc_read_escdinfo(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct escd_info_struc escd; + + if (pnp_bios_escd_info(&escd)) + return -EIO; + return snprintf(buf, count, + "min_ESCD_write_size %d\n" + "ESCD_size %d\n" + "NVRAM_base 0x%x\n", + escd.min_escd_write_size, + escd.escd_size, + escd.nv_storage_base + ); +} + +#define MAX_SANE_ESCD_SIZE (32*1024) +static int proc_read_escd(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct escd_info_struc escd; + char *tmpbuf; + int escd_size, escd_left_to_read, n; + + if (pnp_bios_escd_info(&escd)) + return -EIO; + + /* sanity check */ + if (escd.escd_size > MAX_SANE_ESCD_SIZE) { + printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n"); + return -EFBIG; + } + + tmpbuf = pnpbios_kmalloc(escd.escd_size, GFP_KERNEL); + if (!tmpbuf) return -ENOMEM; + + if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) + return -EIO; + + escd_size = (unsigned char)(tmpbuf[0]) + (unsigned char)(tmpbuf[1])*256; + + /* sanity check */ + if (escd_size > MAX_SANE_ESCD_SIZE) { + printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by BIOS read_escd call is too great\n"); + return -EFBIG; + } + + escd_left_to_read = escd_size - pos; + if (escd_left_to_read < 0) escd_left_to_read = 0; + if (escd_left_to_read == 0) *eof = 1; + n = min(count,escd_left_to_read); + memcpy(buf, tmpbuf + pos, n); + kfree(tmpbuf); + *start = buf; + return n; +} + +static int proc_read_legacyres(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + /* Assume that the following won't overflow the buffer */ + if (pnp_bios_get_stat_res(buf)) + return -EIO; + + return count; // FIXME: Return actual length +} + +static int proc_read_devices(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct pnp_bios_node *node; + u8 nodenum; + char *p = buf; + + if (pos >= 0xff) + return 0; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) return -ENOMEM; + + for (nodenum=pos; nodenum<0xff; ) { + u8 thisnodenum = nodenum; + /* 26 = the number of characters per line sprintf'ed */ + if ((p - buf + 26) > count) + break; + if (pnp_bios_get_dev_node(&nodenum, 1, node)) + break; + p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n", + node->handle, node->eisa_id, + node->type_code[0], node->type_code[1], + node->type_code[2], node->flags); + if (nodenum <= thisnodenum) { + printk(KERN_ERR "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", "PnPBIOS: proc_read_devices:", (unsigned int)nodenum, (unsigned int)thisnodenum); + *eof = 1; + break; + } + } + kfree(node); + if (nodenum == 0xff) + *eof = 1; + *start = (char *)((off_t)nodenum - pos); + return p - buf; +} + +static int proc_read_node(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct pnp_bios_node *node; + int boot = (long)data >> 8; + u8 nodenum = (long)data; + int len; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) return -ENOMEM; + if (pnp_bios_get_dev_node(&nodenum, boot, node)) + return -EIO; + len = node->size - sizeof(struct pnp_bios_node); + memcpy(buf, node->data, len); + kfree(node); + return len; +} + +static int proc_write_node(struct file *file, const char *buf, + unsigned long count, void *data) +{ + struct pnp_bios_node *node; + int boot = (long)data >> 8; + u8 nodenum = (long)data; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) return -ENOMEM; + if ( pnp_bios_get_dev_node(&nodenum, boot, node) ) + return -EIO; + if (count != node->size - sizeof(struct pnp_bios_node)) + return -EINVAL; + memcpy(node->data, buf, count); + if (pnp_bios_set_dev_node(node->handle, boot, node) != 0) + return -EINVAL; + kfree(node); + return count; +} + +/* + * When this is called, pnpbios functions are assumed to + * work and the pnpbios_dont_use_current_config flag + * should already have been set to the appropriate value + */ +int __init pnpbios_proc_init( void ) +{ + struct pnp_bios_node *node; + struct proc_dir_entry *ent; + char name[3]; + u8 nodenum; + + if (pnp_bios_dev_node_info(&node_info)) + return -EIO; + + proc_pnp = proc_mkdir("pnp", proc_bus); + if (!proc_pnp) + return -EIO; + proc_pnp_boot = proc_mkdir("boot", proc_pnp); + if (!proc_pnp_boot) + return -EIO; + create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL); + create_proc_read_entry("configuration_info", 0, proc_pnp, proc_read_pnpconfig, NULL); + create_proc_read_entry("escd_info", S_IRUSR, proc_pnp, proc_read_escdinfo, NULL); + create_proc_read_entry("escd", S_IRUSR, proc_pnp, proc_read_escd, NULL); + create_proc_read_entry("legacy_device_resources", 0, proc_pnp, proc_read_legacyres, NULL); + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) + return -ENOMEM; + + for (nodenum=0; nodenum<0xff; ) { + u8 thisnodenum = nodenum; + if (pnp_bios_get_dev_node(&nodenum, 1, node) != 0) + break; + sprintf(name, "%02x", node->handle); + if ( !pnpbios_dont_use_current_config ) { + ent = create_proc_entry(name, 0, proc_pnp); + if (ent) { + ent->read_proc = proc_read_node; + ent->write_proc = proc_write_node; + ent->data = (void *)(long)(node->handle); + } + } + ent = create_proc_entry(name, 0, proc_pnp_boot); + if (ent) { + ent->read_proc = proc_read_node; + ent->write_proc = proc_write_node; + ent->data = (void *)(long)(node->handle+0x100); + } + if (nodenum <= thisnodenum) { + printk(KERN_ERR "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", "PnPBIOS: proc_init:", (unsigned int)nodenum, (unsigned int)thisnodenum); + break; + } + } + kfree(node); + + return 0; +} + +void __exit pnpbios_proc_exit(void) +{ + int i; + char name[3]; + + if (!proc_pnp) return; + + for (i=0; i<0xff; i++) { + sprintf(name, "%02x", i); + if ( !pnpbios_dont_use_current_config ) + remove_proc_entry(name, proc_pnp); + remove_proc_entry(name, proc_pnp_boot); + } + remove_proc_entry("legacy_device_resources", proc_pnp); + remove_proc_entry("escd", proc_pnp); + remove_proc_entry("escd_info", proc_pnp); + remove_proc_entry("configuration_info", proc_pnp); + remove_proc_entry("devices", proc_pnp); + remove_proc_entry("boot", proc_pnp); + remove_proc_entry("pnp", proc_bus); + + return; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/block/dasd_3990_erp.c linux.21rc5-ac2/drivers/s390/block/dasd_3990_erp.c --- linux.21rc5/drivers/s390/block/dasd_3990_erp.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/block/dasd_3990_erp.c 2003-04-25 13:49:34.000000000 +0100 @@ -5,6 +5,8 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001 * + * $Revision: 1.50 $ + * * History of changes: * 05/14/01 fixed PL030160GTO (BUG() in erp_action_5) */ @@ -41,7 +43,9 @@ char *nl, *end_cqr, *begin, - *end; + *end, + buffer[80]; + /* dump sense data */ if (device->discipline && @@ -54,83 +58,130 @@ /* log the channel program */ while (loop_cqr != NULL) { - DASD_MESSAGE (KERN_ERR, device, - "(%s) ERP chain report for req: %p", - caller == 0 ? "EXAMINE" : "ACTION", - loop_cqr); + DEV_MESSAGE (KERN_ERR, device, + "(%s) ERP chain report for req: %p", + caller == 0 ? "EXAMINE" : "ACTION", + loop_cqr); nl = (char *) loop_cqr; end_cqr = nl + sizeof (ccw_req_t); while (nl < end_cqr) { + + sprintf (buffer, + "%p: %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x", + nl, + nl[0], nl[1], nl[2], nl[3], + nl[4], nl[5], nl[6], nl[7], + nl[8], nl[9], nl[10], nl[11], + nl[12], nl[13], nl[14], nl[15]); + + DEV_MESSAGE (KERN_ERR, device, "%s", + buffer); - DASD_MESSAGE (KERN_ERR, device, - "%p: %02x%02x%02x%02x %02x%02x%02x%02x " - "%02x%02x%02x%02x %02x%02x%02x%02x", - nl, - nl[0], nl[1], nl[2], nl[3], - nl[4], nl[5], nl[6], nl[7], - nl[8], nl[9], nl[10], nl[11], - nl[12], nl[13], nl[14], nl[15]); nl +=16; } - + + DEV_MESSAGE (KERN_ERR, device, + "DATA area is at: %p", + loop_cqr->data); + + nl = (char *) loop_cqr->data; + end_cqr = nl + loop_cqr->datasize; + + while (nl < end_cqr) { + + sprintf (buffer, + "%p: %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x", + nl, + nl[0], nl[1], nl[2], nl[3], + nl[4], nl[5], nl[6], nl[7], + nl[8], nl[9], nl[10], nl[11], + nl[12], nl[13], nl[14], nl[15]); + + DEV_MESSAGE (KERN_ERR, device, "%s", + buffer); + + nl +=16; + } + nl = (char *) loop_cqr->cpaddr; if (loop_cqr->cplength > 40) { /* log only parts of the CP */ - DASD_MESSAGE (KERN_ERR, device, "%s", + DEV_MESSAGE (KERN_ERR, device, "%s", "Start of channel program:"); for (i = 0; i < 20; i += 2) { - DASD_MESSAGE (KERN_ERR, device, - "%p: %02x%02x%02x%02x %02x%02x%02x%02x " - "%02x%02x%02x%02x %02x%02x%02x%02x", - nl, - nl[0], nl[1], nl[2], nl[3], - nl[4], nl[5], nl[6], nl[7], - nl[8], nl[9], nl[10], nl[11], - nl[12], nl[13], nl[14], nl[15]); + sprintf (buffer, + "%p: %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x", + nl, + nl[0], nl[1], nl[2], nl[3], + nl[4], nl[5], nl[6], nl[7], + nl[8], nl[9], nl[10], nl[11], + nl[12], nl[13], nl[14], nl[15]); + DEV_MESSAGE (KERN_ERR, device, "%s", + buffer); + nl += 16; } - DASD_MESSAGE (KERN_ERR, device, "%s", - "End of channel program:"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "End of channel program:"); nl = (char *) loop_cqr->cpaddr; nl += ((loop_cqr->cplength - 10) * 8); for (i = 0; i < 20; i += 2) { - DASD_MESSAGE (KERN_ERR, device, - "%p: %02x%02x%02x%02x %02x%02x%02x%02x " - "%02x%02x%02x%02x %02x%02x%02x%02x", - nl, - nl[0], nl[1], nl[2], nl[3], - nl[4], nl[5], nl[6], nl[7], - nl[8], nl[9], nl[10], nl[11], - nl[12], nl[13], nl[14], nl[15]); + sprintf (buffer, + "%p: %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x", + nl, + nl[0], nl[1], nl[2], nl[3], + nl[4], nl[5], nl[6], nl[7], + nl[8], nl[9], nl[10], nl[11], + nl[12], nl[13], nl[14], nl[15]); + + DEV_MESSAGE (KERN_ERR, device, "%s", + buffer); nl += 16; } } else { /* log the whole CP */ - DASD_MESSAGE (KERN_ERR, device, "%s", + DEV_MESSAGE (KERN_ERR, device, "%s", "Channel program (complete):"); for (i = 0; i < (loop_cqr->cplength + 4); i += 2) { - DASD_MESSAGE (KERN_ERR, device, - "%p: %02x%02x%02x%02x %02x%02x%02x%02x " - "%02x%02x%02x%02x %02x%02x%02x%02x", - nl, - nl[0], nl[1], nl[2], nl[3], - nl[4], nl[5], nl[6], nl[7], - nl[8], nl[9], nl[10], nl[11], - nl[12], nl[13], nl[14], nl[15]); + sprintf (buffer, + "%p: %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x", + nl, + nl[0], nl[1], nl[2], nl[3], + nl[4], nl[5], nl[6], nl[7], + nl[8], nl[9], nl[10], nl[11], + nl[12], nl[13], nl[14], nl[15]); + + DEV_MESSAGE (KERN_ERR, device, "%s", + buffer); nl += 16; } @@ -150,29 +201,34 @@ nl -= 10*8; /* start some bytes before */ - DASD_MESSAGE (KERN_ERR, device, - "Failed CCW (%p) (area):", - (void *)(long)cpa); + DEV_MESSAGE (KERN_ERR, device, + "Failed CCW (%p) (area):", + (void *)(long)cpa); for (i = 0; i < 20; i += 2) { - DASD_MESSAGE (KERN_ERR, device, - "%p: %02x%02x%02x%02x %02x%02x%02x%02x " - "%02x%02x%02x%02x %02x%02x%02x%02x", - nl, - nl[0], nl[1], nl[2], nl[3], - nl[4], nl[5], nl[6], nl[7], - nl[8], nl[9], nl[10], nl[11], - nl[12], nl[13], nl[14], nl[15]); + sprintf (buffer, + "%p: %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x" + " %02x%02x%02x%02x", + nl, + nl[0], nl[1], nl[2], nl[3], + nl[4], nl[5], nl[6], nl[7], + nl[8], nl[9], nl[10], nl[11], + nl[12], nl[13], nl[14], nl[15]); + + DEV_MESSAGE (KERN_ERR, device, "%s", + buffer); nl += 16; } } else { - DASD_MESSAGE (KERN_ERR, device, - "Failed CCW (%p) already logged", - (void *)(long)cpa); + DEV_MESSAGE (KERN_ERR, device, + "Failed CCW (%p) already logged", + (void *)(long)cpa); } } @@ -216,9 +272,9 @@ if (( sense[0] & SNS0_CMD_REJECT ) && (!(sense[2] & SNS2_ENV_DATA_PRESENT)) ) { - DASD_MESSAGE (KERN_ERR, device, "%s", - "EXAMINE 24: Command Reject detected - " - "fatal error"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "EXAMINE 24: Command Reject detected - " + "fatal error"); return dasd_era_fatal; } @@ -227,9 +283,9 @@ if (( sense[1] & SNS1_INV_TRACK_FORMAT ) && (!(sense[2] & SNS2_ENV_DATA_PRESENT)) ) { - DASD_MESSAGE (KERN_ERR, device, "%s", - "EXAMINE 24: Invalid Track Format detected " - "- fatal error"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "EXAMINE 24: Invalid Track Format detected " + "- fatal error"); return dasd_era_fatal; } @@ -237,10 +293,10 @@ /* check for 'No Record Found' */ if (sense[1] & SNS1_NO_REC_FOUND) { - DASD_MESSAGE (KERN_ERR, device, - "EXAMINE 24: No Record Found detected %s", - cqr == device->init_cqr ? " " : - "- fatal error"); + DEV_MESSAGE (KERN_ERR, device, + "EXAMINE 24: No Record Found detected %s", + cqr == device->init_cqr ? " " : + "- fatal error"); return dasd_era_fatal; } @@ -274,8 +330,9 @@ return dasd_era_none; case 0x01: - DASD_MESSAGE (KERN_ERR, device, "%s", - "EXAMINE 32: fatal error"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "EXAMINE 32: fatal error"); + return dasd_era_fatal; default: @@ -405,20 +462,24 @@ dasd_device_t *device = erp->device; - DASD_MESSAGE (KERN_INFO, device, - "blocking request queue for %is", - (int) expires); + DEV_MESSAGE (KERN_INFO, device, + "blocking request queue for %is", + (int) expires); check_then_set (&erp->status, CQR_STATUS_ERROR, CQR_STATUS_PENDING); /* restart queue after some time */ - device->timer.function = dasd_3990_erp_restart_queue; - device->timer.data = (unsigned long) erp; - device->timer.expires = jiffies + (expires * HZ); - - add_timer(&device->timer); + if (!timer_pending(&device->blocking_timer)) { + init_timer(&device->blocking_timer); + device->blocking_timer.function = dasd_3990_erp_restart_queue; + device->blocking_timer.data = (unsigned long) erp; + device->blocking_timer.expires = jiffies + (expires * HZ); + add_timer(&device->blocking_timer); + } else { + mod_timer(&device->blocking_timer, jiffies + (expires * HZ)); + } } /* end dasd_3990_erp_block_queue */ @@ -453,8 +514,8 @@ /* 'restart' the device queue */ if (cqr->status == CQR_STATUS_PENDING) { - DASD_MESSAGE (KERN_INFO, device, "%s", - "request queue restarted by MIH"); + DEV_MESSAGE (KERN_INFO, device, "%s", + "request queue restarted by MIH"); check_then_set (&cqr->status, CQR_STATUS_PENDING, @@ -498,9 +559,9 @@ } else { /* issue a message and wait for 'device ready' interrupt */ - DASD_MESSAGE (KERN_ERR, device, "%s", - "is offline or not installed - " - "INTERVENTION REQUIRED!!"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "is offline or not installed - " + "INTERVENTION REQUIRED!!"); dasd_3990_erp_block_queue (erp, 60); @@ -538,11 +599,11 @@ if ((erp->lpm & ioinfo[irq]->opm) != 0x00) { - DASD_MESSAGE (KERN_DEBUG, device, - "try alternate lpm=%x (lpum=%x / opm=%x)", - erp->lpm, - erp->dstat->lpum, - ioinfo[irq]->opm); + DEV_MESSAGE (KERN_DEBUG, device, + "try alternate lpm=%x (lpum=%x / opm=%x)", + erp->lpm, + erp->dstat->lpum, + ioinfo[irq]->opm); /* reset status to queued to handle the request again... */ check_then_set (&erp->status, @@ -553,11 +614,11 @@ } else { - DASD_MESSAGE (KERN_ERR, device, - "No alternate channel path left (lpum=%x / " - "opm=%x) -> permanent error", - erp->dstat->lpum, - ioinfo[irq]->opm); + DEV_MESSAGE (KERN_ERR, device, + "No alternate channel path left (lpum=%x / " + "opm=%x) -> permanent error", + erp->dstat->lpum, + ioinfo[irq]->opm); /* post request with permanent error */ check_then_set (&erp->status, @@ -598,8 +659,8 @@ if (!dctl_cqr) { - DASD_MESSAGE (KERN_ERR, device, "%s", - "Unable to allocate DCTL-CQR"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "Unable to allocate DCTL-CQR"); check_then_set (&erp->status, CQR_STATUS_ERROR, @@ -620,8 +681,9 @@ if (dasd_set_normalized_cda(ccw, __pa (DCTL_data), dctl_cqr, erp->device)) { dasd_free_request (dctl_cqr, erp->device); - DASD_MESSAGE (KERN_ERR, device, "%s", - "Unable to allocate DCTL-CQR"); + + DEV_MESSAGE (KERN_ERR, device, "%s", + "Unable to allocate DCTL-CQR"); check_then_set (&erp->status, CQR_STATUS_ERROR, @@ -635,7 +697,8 @@ dctl_cqr->lpm = LPM_ANYPATH; dctl_cqr->expires = 5 * TOD_MIN; dctl_cqr->retries = 2; - asm volatile ("STCK %0":"=m" (dctl_cqr->buildclk)); + + dctl_cqr->buildclk = get_clock (); dctl_cqr->status = CQR_STATUS_FILLED; @@ -706,17 +769,16 @@ } else { - if (sense[25] & 0x1D) { /* state change pending */ + if (sense[25] == 0x1D) { /* state change pending */ - DASD_MESSAGE (KERN_INFO, device, "%s", - "waiting for state change pending " - "int"); + DEV_MESSAGE (KERN_INFO, device, "%s", + "waiting for state change pending " + "int"); dasd_3990_erp_block_queue (erp, 30); - } else { - + DEV_MESSAGE (KERN_INFO, device, "redriving request immediately, %d retries left", erp->retries); /* no state change pending - retry */ check_then_set (&erp->status, CQR_STATUS_ERROR, @@ -793,98 +855,98 @@ case 0x00: /* No Message */ break; case 0x01: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Invalid Command"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Invalid Command"); break; case 0x02: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Invalid Command " - "Sequence"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Invalid Command " + "Sequence"); break; case 0x03: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - CCW Count less than " - "required"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - CCW Count less than " + "required"); break; case 0x04: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Invalid Parameter"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Invalid Parameter"); break; case 0x05: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Diagnostic of Sepecial" - " Command Violates File Mask"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Diagnostic of Sepecial" + " Command Violates File Mask"); break; case 0x07: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Channel Returned with " - "Incorrect retry CCW"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Channel Returned with " + "Incorrect retry CCW"); break; case 0x08: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Reset Notification"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Reset Notification"); break; case 0x09: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Storage Path Restart"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Storage Path Restart"); break; case 0x0A: - DASD_MESSAGE (KERN_WARNING, device, - "FORMAT 0 - Channel requested " - "... %02x", - sense[8]); + DEV_MESSAGE (KERN_WARNING, device, + "FORMAT 0 - Channel requested " + "... %02x", + sense[8]); break; case 0x0B: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Invalid Defective/" - "Alternate Track Pointer"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Invalid Defective/" + "Alternate Track Pointer"); break; case 0x0C: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - DPS Installation " - "Check"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - DPS Installation " + "Check"); break; case 0x0E: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Command Invalid on " - "Secondary Address"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Command Invalid on " + "Secondary Address"); break; case 0x0F: - DASD_MESSAGE (KERN_WARNING, device, - "FORMAT 0 - Status Not As " - "Required: reason %02x", - sense[8]); + DEV_MESSAGE (KERN_WARNING, device, + "FORMAT 0 - Status Not As " + "Required: reason %02x", + sense[8]); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Reseved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Reseved"); } } else { switch (msg_no) { case 0x00: /* No Message */ break; case 0x01: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Device Error Source"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Device Error Source"); break; case 0x02: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Reserved"); break; case 0x03: - DASD_MESSAGE (KERN_WARNING, device, - "FORMAT 0 - Device Fenced - " - "device = %02x", - sense[4]); + DEV_MESSAGE (KERN_WARNING, device, + "FORMAT 0 - Device Fenced - " + "device = %02x", + sense[4]); break; case 0x04: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Data Pinned for " - "Device"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Data Pinned for " + "Device"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 0 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 0 - Reserved"); } } break; @@ -894,348 +956,348 @@ case 0x00: /* No Message */ break; case 0x01: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Device Status 1 not as " - "expected"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Device Status 1 not as " + "expected"); break; case 0x03: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Index missing"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Index missing"); break; case 0x04: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Interruption cannot be reset"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Interruption cannot be reset"); break; case 0x05: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Device did not respond to " - "selection"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Device did not respond to " + "selection"); break; case 0x06: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Device check-2 error or Set " - "Sector is not complete"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Device check-2 error or Set " + "Sector is not complete"); break; case 0x07: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Head address does not " - "compare"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Head address does not " + "compare"); break; case 0x08: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Device status 1 not valid"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Device status 1 not valid"); break; case 0x09: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Device not ready"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Device not ready"); break; case 0x0A: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Track physical address did " - "not compare"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Track physical address did " + "not compare"); break; case 0x0B: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Missing device address bit"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Missing device address bit"); break; case 0x0C: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Drive motor switch is off"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Drive motor switch is off"); break; case 0x0D: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Seek incomplete"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Seek incomplete"); break; case 0x0E: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Cylinder address did not " - "compare"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Cylinder address did not " + "compare"); break; case 0x0F: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Offset active cannot be " - "reset"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Offset active cannot be " + "reset"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 1 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 1 - Reserved"); } break; case 0x20: /* Format 2 - 3990 Equipment Checks */ switch (msg_no) { case 0x08: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 2 - 3990 check-2 error"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 2 - 3990 check-2 error"); break; case 0x0E: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 2 - Support facility errors"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 2 - Support facility errors"); break; case 0x0F: - DASD_MESSAGE (KERN_WARNING, device, - "FORMAT 2 - Microcode detected error %02x", - sense[8]); + DEV_MESSAGE (KERN_WARNING, device, + "FORMAT 2 - Microcode detected error %02x", + sense[8]); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 2 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 2 - Reserved"); } break; case 0x30: /* Format 3 - 3990 Control Checks */ switch (msg_no) { case 0x0F: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 3 - Allegiance terminated"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 3 - Allegiance terminated"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 3 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 3 - Reserved"); } break; case 0x40: /* Format 4 - Data Checks */ switch (msg_no) { case 0x00: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - Home address area error"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - Home address area error"); break; case 0x01: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - Count area error"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - Count area error"); break; case 0x02: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - Key area error"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - Key area error"); break; case 0x03: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - Data area error"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - Data area error"); break; case 0x04: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - No sync byte in home address " + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - No sync byte in home address " "area"); break; case 0x05: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - No sync byte in count address " - "area"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - No sync byte in count address " + "area"); break; case 0x06: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - No sync byte in key area"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - No sync byte in key area"); break; case 0x07: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - No sync byte in data area"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - No sync byte in data area"); break; case 0x08: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - Home address area error; " - "offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - Home address area error; " + "offset active"); break; case 0x09: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - Count area error; offset " - "active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - Count area error; offset " + "active"); break; case 0x0A: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - Key area error; offset " - "active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - Key area error; offset " + "active"); break; case 0x0B: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - Data area error; " - "offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - Data area error; " + "offset active"); break; case 0x0C: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - No sync byte in home " - "address area; offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - No sync byte in home " + "address area; offset active"); break; case 0x0D: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - No syn byte in count " - "address area; offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - No syn byte in count " + "address area; offset active"); break; case 0x0E: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - No sync byte in key area; " - "offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - No sync byte in key area; " + "offset active"); break; case 0x0F: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - No syn byte in data area; " - "offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - No syn byte in data area; " + "offset active"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 4 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 4 - Reserved"); } break; case 0x50: /* Format 5 - Data Check with displacement information */ switch (msg_no) { case 0x00: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 5 - Data Check in the " - "home address area"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 5 - Data Check in the " + "home address area"); break; case 0x01: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 5 - Data Check in the count area"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 5 - Data Check in the count area"); break; case 0x02: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 5 - Data Check in the key area"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 5 - Data Check in the key area"); break; case 0x03: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 5 - Data Check in the data area"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 5 - Data Check in the data area"); break; case 0x08: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 5 - Data Check in the " - "home address area; offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 5 - Data Check in the " + "home address area; offset active"); break; case 0x09: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 5 - Data Check in the count area; " - "offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 5 - Data Check in the count area; " + "offset active"); break; case 0x0A: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 5 - Data Check in the key area; " - "offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 5 - Data Check in the key area; " + "offset active"); break; case 0x0B: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 5 - Data Check in the data area; " - "offset active"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 5 - Data Check in the data area; " + "offset active"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 5 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 5 - Reserved"); } break; case 0x60: /* Format 6 - Usage Statistics/Overrun Errors */ switch (msg_no) { case 0x00: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 6 - Overrun on channel A"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 6 - Overrun on channel A"); break; case 0x01: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 6 - Overrun on channel B"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 6 - Overrun on channel B"); break; case 0x02: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 6 - Overrun on channel C"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 6 - Overrun on channel C"); break; case 0x03: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 6 - Overrun on channel D"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 6 - Overrun on channel D"); break; case 0x04: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 6 - Overrun on channel E"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 6 - Overrun on channel E"); break; case 0x05: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 6 - Overrun on channel F"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 6 - Overrun on channel F"); break; case 0x06: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 6 - Overrun on channel G"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 6 - Overrun on channel G"); break; case 0x07: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 6 - Overrun on channel H"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 6 - Overrun on channel H"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 6 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 6 - Reserved"); } break; case 0x70: /* Format 7 - Device Connection Control Checks */ switch (msg_no) { case 0x00: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - RCC initiated by a connection " - "check alert"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - RCC initiated by a connection " + "check alert"); break; case 0x01: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - RCC 1 sequence not " - "successful"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - RCC 1 sequence not " + "successful"); break; case 0x02: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - RCC 1 and RCC 2 sequences not " - "successful"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - RCC 1 and RCC 2 sequences not " + "successful"); break; case 0x03: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - Invalid tag-in during " - "selection sequence"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - Invalid tag-in during " + "selection sequence"); break; case 0x04: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - extra RCC required"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - extra RCC required"); break; case 0x05: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - Invalid DCC selection " - "response or timeout"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - Invalid DCC selection " + "response or timeout"); break; case 0x06: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - Missing end operation; device " - "transfer complete"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - Missing end operation; device " + "transfer complete"); break; case 0x07: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - Missing end operation; device " - "transfer incomplete"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - Missing end operation; device " + "transfer incomplete"); break; case 0x08: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - Invalid tag-in for an " - "immediate command sequence"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - Invalid tag-in for an " + "immediate command sequence"); break; case 0x09: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - Invalid tag-in for an " - "extended command sequence"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - Invalid tag-in for an " + "extended command sequence"); break; case 0x0A: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - 3990 microcode time out when " - "stopping selection"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - 3990 microcode time out when " + "stopping selection"); break; case 0x0B: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - No response to selection " - "after a poll interruption"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - No response to selection " + "after a poll interruption"); break; case 0x0C: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - Permanent path error (DASD " - "controller not available)"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - Permanent path error (DASD " + "controller not available)"); break; case 0x0D: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - DASD controller not available" - " on disconnected command chain"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - DASD controller not available" + " on disconnected command chain"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 7 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 7 - Reserved"); } break; @@ -1243,52 +1305,52 @@ switch (msg_no) { case 0x00: /* No Message */ case 0x01: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - Error correction code " - "hardware fault"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - Error correction code " + "hardware fault"); break; case 0x03: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - Unexpected end operation " - "response code"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - Unexpected end operation " + "response code"); break; case 0x04: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - End operation with transfer " - "count not zero"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - End operation with transfer " + "count not zero"); break; case 0x05: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - End operation with transfer " - "count zero"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - End operation with transfer " + "count zero"); break; case 0x06: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - DPS checks after a system " - "reset or selective reset"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - DPS checks after a system " + "reset or selective reset"); break; case 0x07: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - DPS cannot be filled"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - DPS cannot be filled"); break; case 0x08: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - Short busy time-out during " - "device selection"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - Short busy time-out during " + "device selection"); break; case 0x09: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - DASD controller failed to " - "set or reset the long busy latch"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - DASD controller failed to " + "set or reset the long busy latch"); break; case 0x0A: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - No interruption from device " - "during a command chain"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - No interruption from device " + "during a command chain"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 8 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 8 - Reserved"); } break; @@ -1297,94 +1359,97 @@ case 0x00: break; /* No Message */ case 0x06: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 9 - Device check-2 error"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 9 - Device check-2 error"); break; case 0x07: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 9 - Head address did not compare"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 9 - Head address did not compare"); break; case 0x0A: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 9 - Track physical address did " - "not compare while oriented"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 9 - Track physical address did " + "not compare while oriented"); break; case 0x0E: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 9 - Cylinder address did not " - "compare"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 9 - Cylinder address did not " + "compare"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT 9 - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT 9 - Reserved"); } break; case 0xF0: /* Format F - Cache Storage Checks */ switch (msg_no) { case 0x00: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Operation Terminated"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Operation Terminated"); break; case 0x01: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Subsystem Processing Error"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Subsystem Processing Error"); break; case 0x02: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Cache or nonvolatile storage " - "equipment failure"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Cache or nonvolatile storage " + "equipment failure"); break; case 0x04: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Caching terminated"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Caching terminated"); break; case 0x06: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Cache fast write access not " - "authorized"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Cache fast write access not " + "authorized"); break; case 0x07: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Track format incorrect"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Track format incorrect"); break; case 0x09: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Caching reinitiated"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Caching reinitiated"); break; case 0x0A: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Nonvolatile storage " - "terminated"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Nonvolatile storage " + "terminated"); break; case 0x0B: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Volume is suspended duplex"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Volume is suspended duplex"); break; case 0x0C: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Subsystem status connot be " - "determined"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Subsystem status connot be " + "determined"); break; case 0x0D: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - Caching status reset to " - "default"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - Caching status reset to " + "default"); break; case 0x0E: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT F - DASD Fast Write inhibited"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT F - DASD Fast Write inhibited"); break; default: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "FORMAT D - Reserved"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "FORMAT D - Reserved"); } break; default: /* unknown message format - should not happen */ - + DEV_MESSAGE (KERN_WARNING, device, + "unknown message format %02x", + msg_format); + } /* end switch message format */ - + } /* end dasd_3990_handle_env_data */ /* @@ -1412,8 +1477,8 @@ /* env data present (ACTION 10 - retry should work) */ if (sense[2] & SNS2_ENV_DATA_PRESENT) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Command Reject - environmental data present"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Command Reject - environmental data present"); dasd_3990_handle_env_data (erp, sense); @@ -1422,8 +1487,8 @@ } else { /* fatal error - set status to FAILED */ - DASD_MESSAGE (KERN_ERR, device, "%s", - "Command Reject - Fatal error"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "Command Reject - Fatal error"); erp = dasd_3990_erp_cleanup (erp, CQR_STATUS_FAILED); @@ -1460,9 +1525,9 @@ } else { /* issue a message and wait for 'device ready' interrupt */ - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "bus out parity error or BOPC requested by " - "channel"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "bus out parity error or BOPC requested by " + "channel"); dasd_3990_erp_block_queue (erp, 60); @@ -1495,22 +1560,22 @@ if (sense[1] & SNS1_WRITE_INHIBITED) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Write inhibited path encountered"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Write inhibited path encountered"); /* vary path offline */ - DASD_MESSAGE (KERN_ERR, device, "%s", - "Path should be varied off-line. " - "This is not implemented yet \n - please report " - "to linux390@de.ibm.com"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "Path should be varied off-line. " + "This is not implemented yet \n - please report " + "to linux390@de.ibm.com"); erp = dasd_3990_erp_action_1 (erp); } else if (sense[2] & SNS2_ENV_DATA_PRESENT) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Equipment Check - " - "environmental data present"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Equipment Check - " + "environmental data present"); dasd_3990_handle_env_data (erp, sense); @@ -1519,17 +1584,18 @@ sense); } else if (sense[1] & SNS1_PERM_ERR) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Equipment Check - retry exhausted or " - "undesirable"); + + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Equipment Check - retry exhausted or " + "undesirable"); erp = dasd_3990_erp_action_1 (erp); } else { /* all other equipment checks - Action 5 */ /* rest is done when retries == 0 */ - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Equipment check or processing error"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Equipment check or processing error"); erp = dasd_3990_erp_action_5 (erp); } @@ -1561,9 +1627,9 @@ if (sense[2] & SNS2_CORRECTABLE) { /* correctable data check */ /* issue message that the data has been corrected */ - DASD_MESSAGE (KERN_EMERG, device, "%s", - "Data recovered during retry with PCI " - "fetch mode active"); + DEV_MESSAGE (KERN_EMERG, device, "%s", + "Data recovered during retry with PCI " + "fetch mode active"); /* not possible to handle this situation in Linux */ panic("No way to inform appliction about the possibly " @@ -1571,26 +1637,26 @@ } else if (sense[2] & SNS2_ENV_DATA_PRESENT) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Uncorrectable data check recovered secondary " - "addr of duplex pair"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Uncorrectable data check recovered secondary " + "addr of duplex pair"); erp = dasd_3990_erp_action_4 (erp, sense); } else if (sense[1] & SNS1_PERM_ERR) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Uncorrectable data check with internal " - "retry exhausted"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Uncorrectable data check with internal " + "retry exhausted"); erp = dasd_3990_erp_action_1 (erp); } else { /* all other data checks */ - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Uncorrectable data check with retry count " - "exhausted..."); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Uncorrectable data check with retry count " + "exhausted..."); erp = dasd_3990_erp_action_5 (erp); } @@ -1619,9 +1685,9 @@ erp->function = dasd_3990_erp_overrun; - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Overrun - service overrun or overrun" - " error requested by channel"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Overrun - service overrun or overrun" + " error requested by channel"); erp = dasd_3990_erp_action_5 (erp); @@ -1651,9 +1717,9 @@ if (sense[2] & SNS2_ENV_DATA_PRESENT) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Track format error when destaging or " - "staging data"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Track format error when destaging or " + "staging data"); dasd_3990_handle_env_data (erp, sense); @@ -1662,9 +1728,9 @@ sense); } else { - DASD_MESSAGE (KERN_ERR, device, "%s", - "Invalid Track Format - Fatal error should have " - "been handled within the interrupt handler"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "Invalid Track Format - Fatal error should have " + "been handled within the interrupt handler"); erp= dasd_3990_erp_cleanup (erp, CQR_STATUS_FAILED); @@ -1692,8 +1758,8 @@ dasd_device_t *device = default_erp->device; - DASD_MESSAGE (KERN_ERR, device, "%s", - "End-of-Cylinder - must never happen"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "End-of-Cylinder - must never happen"); /* implement action 7 - BUG */ return dasd_3990_erp_cleanup (default_erp, @@ -1721,8 +1787,8 @@ erp->function = dasd_3990_erp_env_data; - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Environmental data present"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Environmental data present"); dasd_3990_handle_env_data (erp, sense); @@ -1761,9 +1827,9 @@ dasd_device_t *device = default_erp->device; - DASD_MESSAGE (KERN_ERR, device, "%s", - "No Record Found - Fatal error should " - "have been handled within the interrupt handler"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "No Record Found - Fatal error should " + "have been handled within the interrupt handler"); return dasd_3990_erp_cleanup (default_erp, CQR_STATUS_FAILED); @@ -1789,8 +1855,8 @@ dasd_device_t *device = erp->device; - DASD_MESSAGE (KERN_ERR, device, "%s", - "File Protected"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "File Protected"); return dasd_3990_erp_cleanup (erp, CQR_STATUS_FAILED); @@ -1921,8 +1987,8 @@ erp->retries = 256; erp->function = dasd_3990_erp_action_10_32; - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Perform logging requested"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Perform logging requested"); return erp; @@ -1960,8 +2026,8 @@ char *LO_data; /* LO_eckd_data_t */ ccw1_t *ccw; - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Write not finished because of unexpected condition"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Write not finished because of unexpected condition"); default_erp->function = dasd_3990_erp_action_1B_32; @@ -1975,8 +2041,8 @@ /* for imprecise ending just do default erp */ if (sense[1] & 0x01) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Imprecise ending is set - just retry"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Imprecise ending is set - just retry"); return default_erp; } @@ -1987,9 +2053,9 @@ if (cpa == 0) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Unable to determine address of the CCW " - "to be restarted"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Unable to determine address of the CCW " + "to be restarted"); return dasd_3990_erp_cleanup (default_erp, CQR_STATUS_FAILED); @@ -2004,8 +2070,8 @@ if (!erp) { - DASD_MESSAGE (KERN_ERR, device, "%s", - "Unable to allocate ERP"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "Unable to allocate ERP"); return dasd_3990_erp_cleanup (default_erp, CQR_STATUS_FAILED); @@ -2023,8 +2089,8 @@ if ((sense[3] == 0x01) && (LO_data[1] & 0x01) ){ - DASD_MESSAGE (KERN_ERR, device, "%s", - "BUG - this should not happen"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "BUG - this should not happen"); return dasd_3990_erp_cleanup (default_erp, CQR_STATUS_FAILED); @@ -2060,8 +2126,8 @@ if (dasd_set_normalized_cda (ccw, __pa (DE_data), erp, device)) { dasd_free_request (erp, device); - DASD_MESSAGE (KERN_ERR, device, "%s", - "Unable to allocate ERP"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "Unable to allocate ERP"); return dasd_3990_erp_cleanup (default_erp, CQR_STATUS_FAILED); @@ -2076,8 +2142,8 @@ if (dasd_set_normalized_cda (ccw, __pa (LO_data), erp, device)){ dasd_free_request (erp, device); - DASD_MESSAGE (KERN_ERR, device, "%s", - "Unable to allocate ERP"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "Unable to allocate ERP"); return dasd_3990_erp_cleanup (default_erp, CQR_STATUS_FAILED); @@ -2132,9 +2198,9 @@ char *LO_data; /* LO_eckd_data_t */ ccw1_t *ccw; - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Write not finished because of unexpected condition" - " - follow on"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Write not finished because of unexpected condition" + " - follow on"); /* determine the original cqr */ cqr = previous_erp; @@ -2146,8 +2212,8 @@ /* for imprecise ending just do default erp */ if (sense[1] & 0x01) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Imprecise ending is set - just retry"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Imprecise ending is set - just retry"); check_then_set (&previous_erp->status, CQR_STATUS_ERROR, @@ -2162,9 +2228,9 @@ if (cpa == 0) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "Unable to determine address of the CCW " - "to be restarted"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Unable to determine address of the CCW " + "to be restarted"); check_then_set (&previous_erp->status, CQR_STATUS_ERROR, @@ -2181,8 +2247,8 @@ if ((sense[3] == 0x01) && (LO_data[1] & 0x01) ){ - DASD_MESSAGE (KERN_ERR, device, "%s", - "BUG - this should not happen"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "BUG - this should not happen"); check_then_set (&previous_erp->status, CQR_STATUS_ERROR, @@ -2337,7 +2403,7 @@ switch (sense[28]) { case 0x17: /* issue a Diagnostic Control command with an - * Inhibit Write subcommand and controler modifier */ + * Inhibit Write subcommand and controler modifier */ erp = dasd_3990_erp_DCTL (erp, 0x20); break; @@ -2351,7 +2417,7 @@ break; default: - /* should not happen - continue */ + ; /* should not happen - continue */ } } @@ -2389,11 +2455,11 @@ /* set to suspended duplex state then restart */ dasd_device_t *device = erp->device; - DASD_MESSAGE (KERN_ERR, device, "%s", - "Set device to suspended duplex state should be " - "done!\n" - "This is not implemented yet (for compound ERP)" - " - please report to linux390@de.ibm.com"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "Set device to suspended duplex state should be " + "done!\n" + "This is not implemented yet (for compound ERP)" + " - please report to linux390@de.ibm.com"); } @@ -2490,10 +2556,10 @@ switch (sense[25]) { case 0x00: /* success */ - DASD_MESSAGE (KERN_DEBUG, device, - "ERP called for successful request %p" - " - NO ERP necessary", - erp); + DEV_MESSAGE (KERN_DEBUG, device, + "ERP called for successful request %p" + " - NO ERP necessary", + erp); erp = dasd_3990_erp_cleanup (erp, CQR_STATUS_DONE); @@ -2501,9 +2567,9 @@ break; case 0x01: /* fatal error */ - DASD_MESSAGE (KERN_ERR, device, "%s", - "Fatal error should have been " - "handled within the interrupt handler"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "Fatal error should have been " + "handled within the interrupt handler"); erp = dasd_3990_erp_cleanup (erp, CQR_STATUS_FAILED); @@ -2515,12 +2581,12 @@ break; case 0x0F: /* length mismatch during update write command */ - DASD_MESSAGE (KERN_ERR, device, "%s", - "update write command error - should not " - "happen;\n" - "Please send this message together with " - "the above sense data to linux390@de." - "ibm.com"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "update write command error - should not " + "happen;\n" + "Please send this message together with " + "the above sense data to linux390@de." + "ibm.com"); erp = dasd_3990_erp_cleanup (erp, CQR_STATUS_FAILED); @@ -2532,12 +2598,12 @@ break; case 0x15: /* next track outside defined extend */ - DASD_MESSAGE (KERN_ERR, device, "%s", - "next track outside defined extend - " - "should not happen;\n" - "Please send this message together with " - "the above sense data to linux390@de." - "ibm.com"); + DEV_MESSAGE (KERN_ERR, device, "%s", + "next track outside defined extend - " + "should not happen;\n" + "Please send this message together with " + "the above sense data to linux390@de." + "ibm.com"); erp= dasd_3990_erp_cleanup (erp, CQR_STATUS_FAILED); @@ -2550,9 +2616,9 @@ break; case 0x1C: /* invalid data */ - DASD_MESSAGE (KERN_EMERG, device, "%s", - "Data recovered during retry with PCI " - "fetch mode active"); + DEV_MESSAGE (KERN_EMERG, device, "%s", + "Data recovered during retry with PCI " + "fetch mode active"); /* not possible to handle this situation in Linux */ panic("Invalid data - No way to inform appliction about " @@ -2560,16 +2626,16 @@ break; case 0x1D: /* state-change pending */ - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "A State change pending condition exists " - "for the subsystem or device"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "A State change pending condition exists " + "for the subsystem or device"); erp = dasd_3990_erp_action_4 (erp, sense); break; - default: /* all others errors - default erp */ - + default: + ; /* all others errors - default erp */ } } @@ -2642,25 +2708,47 @@ { dasd_device_t *device = cqr->device; + ccw1_t *ccw; /* allocate additional request block */ - ccw_req_t *erp = dasd_alloc_request ((char *) &cqr->magic, 1, 0, cqr->device); + ccw_req_t *erp = dasd_alloc_request ((char *) &cqr->magic, 2, 0, cqr->device); if (!erp) { - - DASD_MESSAGE (KERN_ERR, device, "%s", - "Unable to allocate ERP request"); + if (cqr->retries <= 0) { + DEV_MESSAGE (KERN_ERR, device, "%s", + "Unable to allocate ERP request (NO retries left)"); - check_then_set (&cqr->status, - CQR_STATUS_ERROR, - CQR_STATUS_FAILED); + check_then_set (&cqr->status, + CQR_STATUS_ERROR, + CQR_STATUS_FAILED); + cqr->stopclk = get_clock (); + + } else { + DEV_MESSAGE (KERN_ERR, device, + "Unable to allocate ERP request (%i retries left)", + cqr->retries); + + if (!timer_pending(&device->timer)) { + init_timer (&device->timer); + device->timer.function = dasd_schedule_bh_timed; + device->timer.data = (unsigned long) device; + device->timer.expires = jiffies + (HZ << 3); + add_timer (&device->timer); + } else { + mod_timer(&device->timer, jiffies + (HZ << 3)); + } + } return cqr; } /* initialize request with default TIC to current ERP/CQR */ - erp->cpaddr->cmd_code = CCW_CMD_TIC; - erp->cpaddr->cda = (long)(cqr->cpaddr); + ccw = erp->cpaddr; + ccw->cmd_code = CCW_CMD_NOOP; + ccw->flags = CCW_FLAG_CC; + ccw++; + ccw->cmd_code = CCW_CMD_TIC; + ccw->cda = (long)(cqr->cpaddr); erp->function = dasd_3990_erp_add_erp; erp->refers = cqr; erp->device = cqr->device; @@ -2738,7 +2826,7 @@ } /* check sense data; byte 0-2,25,27 */ - if (!((strncmp (cqr1->dstat->ii.sense.data, + if (!((memcmp (cqr1->dstat->ii.sense.data, cqr2->dstat->ii.sense.data, 3) == 0) && (cqr1->dstat->ii.sense.data[27] == @@ -2859,10 +2947,10 @@ break; } default: - DASD_MESSAGE (KERN_DEBUG, device, - "invalid subcommand modifier 0x%x " - "for Diagnostic Control Command", - sense[25]); + DEV_MESSAGE (KERN_DEBUG, device, + "invalid subcommand modifier 0x%x " + "for Diagnostic Control Command", + sense[25]); } } @@ -2877,10 +2965,10 @@ } else { /* no retry left and no additional special handling necessary */ - DASD_MESSAGE (KERN_ERR, device, - "no retries left for erp %p - " - "set status to FAILED", - erp); + DEV_MESSAGE (KERN_ERR, device, + "no retries left for erp %p - " + "set status to FAILED", + erp); check_then_set (&erp->status, CQR_STATUS_ERROR, @@ -2940,7 +3028,7 @@ if (erp->retries > 0) { - char *sense = erp->dstat->ii.sense.data; + char *sense = erp->refers->dstat->ii.sense.data; /* check for special retries */ if (erp->function == dasd_3990_erp_action_4) { @@ -2959,10 +3047,10 @@ } else { /* simple retry */ - DASD_MESSAGE (KERN_DEBUG, device, - "%i retries left for erp %p", - erp->retries, - erp); + DEV_MESSAGE (KERN_DEBUG, device, + "%i retries left for erp %p", + erp->retries, + erp); /* handle the request again... */ check_then_set (&erp->status, @@ -3006,26 +3094,20 @@ __u32 cpa = cqr->dstat->cpa; #ifdef ERP_DEBUG - DASD_MESSAGE (KERN_DEBUG, device, - "entering 3990 ERP for " - "0x%04X on sch %d = /dev/%s ", - device->devinfo.devno, - device->devinfo.irq, - device->name); - /* print current erp_chain */ - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "ERP chain at BEGINNING of ERP-ACTION"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "ERP chain at BEGINNING of ERP-ACTION"); { ccw_req_t *temp_erp = NULL; + for (temp_erp = cqr; temp_erp != NULL; temp_erp = temp_erp->refers){ - DASD_MESSAGE (KERN_DEBUG, device, - " erp %p refers to %p", - temp_erp, - temp_erp->refers); + DEV_MESSAGE (KERN_DEBUG, device, + " erp %p refers to %p", + temp_erp, + temp_erp->refers); } } #endif /* ERP_DEBUG */ @@ -3034,10 +3116,10 @@ if ((cqr->dstat->cstat == 0x00 ) && (cqr->dstat->dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) ) { - DASD_MESSAGE (KERN_DEBUG, device, - "ERP called for successful request %p" - " - NO ERP necessary", - cqr); + DEV_MESSAGE (KERN_DEBUG, device, + "ERP called for successful request %p" + " - NO ERP necessary", + cqr); check_then_set (&cqr->status, CQR_STATUS_ERROR, @@ -3047,10 +3129,10 @@ } /* check if sense data are available */ if (!cqr->dstat->ii.sense.data) { - DASD_MESSAGE (KERN_DEBUG, device, - "ERP called witout sense data avail ..." - "request %p - NO ERP possible", - cqr); + DEV_MESSAGE (KERN_DEBUG, device, + "ERP called witout sense data avail ..." + "request %p - NO ERP possible", + cqr); check_then_set (&cqr->status, CQR_STATUS_ERROR, @@ -3074,18 +3156,18 @@ #ifdef ERP_DEBUG /* print current erp_chain */ - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "ERP chain at END of ERP-ACTION"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "ERP chain at END of ERP-ACTION"); { ccw_req_t *temp_erp = NULL; for (temp_erp = erp; temp_erp != NULL; temp_erp = temp_erp->refers) { - DASD_MESSAGE (KERN_DEBUG, device, - " erp %p refers to %p", - temp_erp, - temp_erp->refers); + DEV_MESSAGE (KERN_DEBUG, device, + " erp %p refers to %p", + temp_erp, + temp_erp->refers); } } #endif /* ERP_DEBUG */ @@ -3098,35 +3180,32 @@ } /* enqueue added ERP request */ - if ((erp != cqr ) && + if ((erp != device->queue.head ) && (erp->status == CQR_STATUS_FILLED) ){ dasd_chanq_enq_head (&device->queue, erp); } else { - if ((erp->status == CQR_STATUS_FILLED )|| - (erp != cqr ) ){ - /* something strange happened - log the error and throw a BUG() */ - DASD_MESSAGE (KERN_ERR, device, "%s", - "Problems with ERP chain!!! BUG"); - + if ((erp->status == CQR_STATUS_FILLED ) || (erp != device->queue.head)) { + /* something strange happened - log the error and panic */ /* print current erp_chain */ - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "ERP chain at END of ERP-ACTION"); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "ERP chain at END of ERP-ACTION"); { ccw_req_t *temp_erp = NULL; for (temp_erp = erp; temp_erp != NULL; temp_erp = temp_erp->refers) { - DASD_MESSAGE (KERN_DEBUG, device, - " erp %p (function %p) refers to %p", - temp_erp, - temp_erp->function, - temp_erp->refers); + DEV_MESSAGE (KERN_DEBUG, device, + " erp %p (function %p)" + " refers to %p", + temp_erp, + temp_erp->function, + temp_erp->refers); } } - BUG(); + panic ("Problems with ERP chain!!! Please report to linux390@de.ibm.com"); } } @@ -3136,112 +3215,6 @@ } /* end dasd_3990_erp_action */ /* - * DASD_3990_ERP_POSTACTION - * - * DESCRIPTION - * Frees all ERPs of the current ERP Chain and set the status - * of the original CQR either to CQR_STATUS_DONE if ERP was successful - * or to CQR_STATUS_FAILED if ERP was NOT successful. - * - * PARAMETER - * erp current erp_head - * - * RETURN VALUES - * cqr pointer to the original CQR - */ -ccw_req_t * -dasd_3990_erp_postaction (ccw_req_t *erp) -{ - - ccw_req_t *cqr = NULL, - *free_erp = NULL; - dasd_device_t *device = erp->device; - int success; - - if (erp->refers == NULL || - erp->function == NULL ) { - - BUG (); - } - - if (erp->status == CQR_STATUS_DONE) - success = 1; - else - success = 0; - -#ifdef ERP_DEBUG - - /* print current erp_chain */ - printk (KERN_DEBUG PRINTK_HEADER - "3990 ERP postaction called for erp chain:\n"); - { - ccw_req_t *temp_erp = NULL; - - for (temp_erp = erp; - temp_erp != NULL; - temp_erp = temp_erp->refers) { - - printk (KERN_DEBUG PRINTK_HEADER - " erp %p refers to %p with erp function %p\n", - temp_erp, temp_erp->refers, temp_erp->function); - } - } - -#endif /* ERP_DEBUG */ - - /* free all ERPs - but NOT the original cqr */ - while (erp->refers != NULL) { - - free_erp = erp; - erp = erp->refers; - - /* remove the request from the device queue */ - dasd_chanq_deq (&device->queue, - free_erp); - - /* free the finished erp request */ - dasd_free_request (free_erp, free_erp->device); - } - - /* save ptr to original cqr */ - cqr = erp; - - /* set corresponding status to original cqr */ - if (success) { - - check_then_set (&cqr->status, - CQR_STATUS_ERROR, - CQR_STATUS_DONE); - } else { - - check_then_set (&cqr->status, - CQR_STATUS_ERROR, - CQR_STATUS_FAILED); - } - -#ifdef ERP_DEBUG - /* print current erp_chain */ - printk (KERN_DEBUG PRINTK_HEADER - "3990 ERP postaction finished with remaining chain:\n"); - { - ccw_req_t *temp_erp = NULL; - - for (temp_erp = cqr; - temp_erp != NULL; - temp_erp = temp_erp->refers) { - - printk (KERN_DEBUG PRINTK_HEADER - " erp %p refers to %p \n", temp_erp, - temp_erp->refers); - } - } -#endif /* ERP_DEBUG */ - - return cqr; - -} /* end dasd_3990_erp_postaction */ - -/* * Overrides for Emacs so that we follow Linus's tabbing style. * Emacs will notice this stuff at the end of the file and automatically * adjust the settings for this buffer only. This must remain at the end diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/block/dasd.c linux.21rc5-ac2/drivers/s390/block/dasd.c --- linux.21rc5/drivers/s390/block/dasd.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/block/dasd.c 2003-05-28 15:13:51.000000000 +0100 @@ -6,6 +6,8 @@ * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 * + * $Revision: 1.289 $ + * * History of changes (starts July 2000) * 11/09/00 complete redesign after code review * 02/01/01 added dynamic registration of ioctls @@ -33,6 +35,7 @@ * 06/26/01 hopefully fixed PL030172SBA,PL030234SBA * 07/09/01 fixed PL030324MSH (wrong statistics output) * 07/16/01 merged in new fixes for handling low-mem situations + * 01/22/01 fixed PL030579KBE (wrong statistics) */ #include @@ -70,7 +73,6 @@ #include #include #include -#include #include "dasd_int.h" @@ -84,8 +86,9 @@ #include "dasd_diag.h" #endif /* CONFIG_DASD_DIAG */ -/* SECTION: exported variables of dasd.c */ - +/******************************************************************************** + * SECTION: exported variables of dasd.c + ********************************************************************************/ debug_info_t *dasd_debug_area; MODULE_AUTHOR ("Holger Smolinski "); @@ -94,6 +97,9 @@ MODULE_SUPPORTED_DEVICE ("dasd"); MODULE_PARM (dasd, "1-" __MODULE_STRING (256) "s"); MODULE_PARM (dasd_disciplines, "1-" __MODULE_STRING (8) "s"); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12)) +MODULE_LICENSE ("GPL"); +#endif EXPORT_SYMBOL (dasd_chanq_enq_head); EXPORT_SYMBOL (dasd_debug_area); EXPORT_SYMBOL (dasd_chanq_enq); @@ -103,6 +109,7 @@ EXPORT_SYMBOL (dasd_start_IO); EXPORT_SYMBOL (dasd_term_IO); EXPORT_SYMBOL (dasd_schedule_bh); +EXPORT_SYMBOL (dasd_schedule_bh_timed); EXPORT_SYMBOL (dasd_int_handler); EXPORT_SYMBOL (dasd_oper_handler); EXPORT_SYMBOL (dasd_alloc_request); @@ -115,17 +122,24 @@ EXPORT_SYMBOL (dasd_set_normalized_cda); EXPORT_SYMBOL (dasd_device_from_kdev); -/* SECTION: Constant definitions to be used within this file */ + +/******************************************************************************** + * SECTION: Constant definitions to be used within this file + ********************************************************************************/ #define PRINTK_HEADER DASD_NAME":" -#undef DASD_PROFILE /* fill profile information - used for */ +#define DASD_PROFILE /* fill profile information - used for */ /* statistics and perfomance */ -#define DASD_MIN_SIZE_FOR_QUEUE 32 -#undef CONFIG_DYNAMIC_QUEUE_MIN_SIZE -#define DASD_CHANQ_MAX_SIZE 6 - -/* SECTION: prototypes for static functions of dasd.c */ +#ifndef CONFIG_PROC_FS /* DASD_PROFILE doesn't make sense */ +#undef DASD_PROFILE /* without procfs */ +#endif /* not CONFIG_PROC_FS */ + +#define DASD_CHANQ_MAX_SIZE 4 + +/******************************************************************************** + * SECTION: prototypes for static functions of dasd.c + ********************************************************************************/ static request_fn_proc do_dasd_request; static int dasd_set_device_level (unsigned int, dasd_discipline_t *, int); @@ -147,7 +161,11 @@ static struct block_device_operations dasd_device_operations; static inline dasd_device_t ** dasd_device_from_devno (int); static void dasd_process_queues (dasd_device_t * device); -/* SECTION: static variables of dasd.c */ +static int dasd_sleep_on_immediate (ccw_req_t *cqr); + +/******************************************************************************** + * SECTION: static variables of dasd.c + ********************************************************************************/ static devfs_handle_t dasd_devfs_handle; static wait_queue_head_t dasd_init_waitq; @@ -155,7 +173,9 @@ #ifdef CONFIG_DASD_DYNAMIC -/* SECTION: managing dynamic configuration of dasd_driver */ +/******************************************************************************** + * SECTION: managing dynamic configuration of dasd_driver + ********************************************************************************/ static struct list_head dasd_devreg_head = LIST_HEAD_INIT (dasd_devreg_head); @@ -188,15 +208,17 @@ #endif /* CONFIG_DASD_DYNAMIC */ -/* SECTION: managing setup of dasd_driver */ +/******************************************************************************** + * SECTION: managing setup of dasd_driver + ********************************************************************************/ /* default setting is probeonly, autodetect */ -static int dasd_probeonly = 1; /* is true, when probeonly mode is active */ -static int dasd_autodetect = 1; /* is true, when autodetection is active */ +static int dasd_probeonly = 0; /* is true, when probeonly mode is active */ +static int dasd_autodetect = 0; /* is true, when autodetection is active */ static dasd_range_t dasd_range_head = { list:LIST_HEAD_INIT (dasd_range_head.list) }; -static spinlock_t range_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t range_lock = SPIN_LOCK_UNLOCKED; /* * function: dasd_create_range @@ -210,20 +232,24 @@ int i; if ( from > to ) { - printk (KERN_WARNING PRINTK_HEADER - "Adding device range %04x-%04x: range invalid, ignoring.\n", - from, - to); + + MESSAGE (KERN_WARNING, + "Adding device range %04x-%04x: " + "range invalid, ignoring.", + from, + to); return NULL; } for (i=from;i<=to;i++) { if (dasd_device_from_devno(i)) { - printk (KERN_WARNING PRINTK_HEADER - "device range %04x-%04x: device %04x is already in a range.\n", - from, - to, - i); + + MESSAGE (KERN_WARNING, + "device range %04x-%04x: device " + "%04x is already in a range.", + from, + to, + i); } } range = (dasd_range_t *) kmalloc (sizeof (dasd_range_t), GFP_KERNEL); @@ -392,7 +418,9 @@ return -ENODEV; } -/* SECTION: parsing the dasd= parameter of the parmline/insmod cmdline */ +/******************************************************************************** + * SECTION: parsing the dasd= parameter of the parmline/insmod cmdline + ********************************************************************************/ /* * char *dasd[] is intended to hold the ranges supplied by the dasd= statement @@ -426,11 +454,14 @@ *end = '\0'; end++; } - dasd[count] = kmalloc (len * sizeof (char), GFP_ATOMIC); + dasd[count] = kmalloc (len * sizeof (char), + GFP_ATOMIC); if (dasd[count] == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "can't store dasd= parameter no %d\n", - count + 1); + + MESSAGE (KERN_WARNING, + "can't store dasd= parameter no" + " %d", + count + 1); break; } memset (dasd[count], 0, len * sizeof (char)); @@ -500,10 +531,13 @@ int val,i,start; buffer=(char*)kmalloc((strlen(str)+1)*sizeof(char),GFP_ATOMIC); + if (buffer==NULL) { - printk (KERN_WARNING PRINTK_HEADER - "can't parse dasd= parameter %s due to low memory\n", - str); + + MESSAGE (KERN_WARNING, + "can't parse dasd= parameter %s due " + "to low memory", + str); } /* remove leading '0x' */ @@ -528,7 +562,7 @@ val = simple_strtoul (buffer, &buffer, 16); /* check for features - e.g. (ro) ; the '\0', ')' and '-' stops check */ - *features = DASD_DEFAULT_FEATURES; + *features = DASD_FEATURE_DEFAULT; if (temp[i]=='(') { @@ -545,14 +579,18 @@ (*features) |= DASD_FEATURE_READONLY; break; } - printk (KERN_WARNING PRINTK_HEADER - "unsupported feature: %s, ignoring setting", - buffer); + + MESSAGE (KERN_WARNING, + "unsupported feature: %s, " + "ignoring setting", + buffer); } } } *stra = temp+i; + if ((val > 65535) || (val < 0)) + return -EINVAL; return val; } @@ -569,22 +607,23 @@ int features; int rc = 0; - if (*str) { - /* turn off probeonly mode, if any dasd parameter is present */ - dasd_probeonly = 0; - dasd_autodetect = 0; - } while (*str) { temp = *str; from = 0; to = 0; if (strcmp ("autodetect", *str) == 0) { dasd_autodetect = 1; - printk (KERN_INFO "turning to autodetection mode\n"); + + MESSAGE (KERN_INFO, "%s", + "turning to autodetection mode"); + break; } else if (strcmp ("probeonly", *str) == 0) { dasd_probeonly = 1; - printk (KERN_INFO "turning to probeonly mode\n"); + + MESSAGE (KERN_INFO, "%s", + "turning to probeonly mode"); + break; } else { /* turn off autodetect mode, if any range is present */ @@ -609,20 +648,16 @@ return rc; } -/* SECTION: Dealing with devices registered to multiple major numbers */ +/******************************************************************************** + * SECTION: Dealing with devices registered to multiple major numbers + ********************************************************************************/ static spinlock_t dasd_major_lock = SPIN_LOCK_UNLOCKED; -static major_info_t dasd_major_info[] = { - { - list:LIST_HEAD_INIT (dasd_major_info[1].list) - }, - { - list:LIST_HEAD_INIT (dasd_major_info[0].list), - gendisk:{ - INIT_GENDISK (94, DASD_NAME, DASD_PARTN_BITS, DASD_PER_MAJOR) - }, - flags:DASD_MAJOR_INFO_IS_STATIC} +static struct list_head dasd_major_info = LIST_HEAD_INIT(dasd_major_info); +static major_info_t dasd_major_static = { + gendisk:{INIT_GENDISK(94, DASD_NAME, DASD_PARTN_BITS, DASD_PER_MAJOR)}, + flags: DASD_MAJOR_INFO_IS_STATIC }; static major_info_t * @@ -658,8 +693,11 @@ if (major_info == NULL) { major_info = get_new_major_info (); if (!major_info) { - printk (KERN_WARNING PRINTK_HEADER - "Cannot get memory to allocate another major number\n"); + + MESSAGE (KERN_WARNING, "%s", + "Cannot get memory to allocate another " + "major number"); + return -ENOMEM; } } @@ -686,10 +724,12 @@ /* register blockdevice */ rc = devfs_register_blkdev (major, DASD_NAME, &dasd_device_operations); if (rc < 0) { - printk (KERN_WARNING PRINTK_HEADER - "Cannot register to major no %d, rc = %d\n", - major, - rc); + + MESSAGE (KERN_WARNING, + "Cannot register to major no %d, rc = %d", + major, + rc); + goto out_reg_blkdev; } else { major_info->flags |= DASD_MAJOR_INFO_REGISTERED; @@ -698,7 +738,7 @@ /* Insert the new major info into dasd_major_info if needed (dynamic major) */ if (!(major_info->flags & DASD_MAJOR_INFO_IS_STATIC)) { spin_lock_irqsave (&dasd_major_lock, flags); - list_add_tail (&major_info->list, &dasd_major_info[0].list); + list_add_tail (&major_info->list, &dasd_major_info); spin_unlock_irqrestore (&dasd_major_lock, flags); } @@ -791,10 +831,11 @@ /* unregister blockdevice */ rc = devfs_unregister_blkdev (major, DASD_NAME); if (rc < 0) { - printk (KERN_WARNING PRINTK_HEADER - "Unable to unregister from major no %d, rc = %d\n", - major, - rc); + + MESSAGE (KERN_WARNING, + "Unable to unregister from major no %d, rc = %d", + major, + rc); } else { major_info->flags &= ~DASD_MAJOR_INFO_REGISTERED; } @@ -844,10 +885,12 @@ rc = devfs_unregister_blkdev (major, DASD_NAME); if (rc < 0) { - printk (KERN_WARNING PRINTK_HEADER - "Cannot unregister from major no %d, rc = %d\n", - major, - rc); + + MESSAGE (KERN_WARNING, + "Cannot unregister from major no %d, rc = %d", + major, + rc); + return rc; } else { major_info->flags &= ~DASD_MAJOR_INFO_REGISTERED; @@ -874,20 +917,23 @@ dasd_device_t * dasd_device_from_kdev (kdev_t kdev) { - major_info_t *major_info = NULL; + major_info_t *major_info; + dasd_device_t *device; struct list_head *l; unsigned long flags; + device = NULL; spin_lock_irqsave (&dasd_major_lock, flags); - list_for_each (l, &dasd_major_info[0].list) { + list_for_each (l, &dasd_major_info) { major_info = list_entry (l, major_info_t, list); - if (major_info->gendisk.major == MAJOR (kdev)) + if (major_info->gendisk.major == MAJOR (kdev)) { + device = major_info->dasd_device[MINOR (kdev) >> + DASD_PARTN_BITS]; break; + } } spin_unlock_irqrestore (&dasd_major_lock, flags); - if (major_info != &dasd_major_info[0]) - return major_info->dasd_device[MINOR (kdev) >> DASD_PARTN_BITS]; - return NULL; + return device; } /* @@ -900,21 +946,29 @@ dasd_device_from_devno (int devno) { major_info_t *major_info; + dasd_device_t **device; struct list_head *l; - int devindex = dasd_devindex_from_devno (devno); + int devindex; unsigned long flags; spin_lock_irqsave (&dasd_major_lock, flags); - list_for_each (l, &dasd_major_info[0].list) { + devindex = dasd_devindex_from_devno (devno); + if (devindex < 0) { + spin_unlock_irqrestore (&dasd_major_lock, flags); + return NULL; + } + + device = NULL; + list_for_each (l, &dasd_major_info) { major_info = list_entry (l, major_info_t, list); if (devindex < DASD_PER_MAJOR) { - spin_unlock_irqrestore (&dasd_major_lock, flags); - return &major_info->dasd_device[devindex]; + device = &major_info->dasd_device[devindex]; + break; } devindex -= DASD_PER_MAJOR; } spin_unlock_irqrestore (&dasd_major_lock, flags); - return NULL; + return device; } /* @@ -945,9 +999,33 @@ return -ENODEV; } +/* + * function: dasd_check_bp_block + * checks the blocksize and returns 0 if valid. + */ + +static int +dasd_check_bp_block (dasd_device_t *device) +{ + int rc; + switch (device->sizes.bp_block) { + case 512: + case 1024: + case 2048: + case 4096: + rc = 0; + break; + default: + rc = -EMEDIUMTYPE; + } + + return rc; +} -/* SECTION: managing dasd disciplines */ +/******************************************************************************** + * SECTION: managing dasd disciplines + ********************************************************************************/ /* anchor and spinlock for list of disciplines */ static struct list_head dasd_disc_head = LIST_HEAD_INIT(dasd_disc_head); @@ -955,14 +1033,18 @@ /* * function dasd_discipline_enq - * chains the discpline given as argument to the head of disiplines - * head chaining policy is required to allow module disciplines to - * be preferred against those, who are statically linked + * chains the discpline given as argument to the tail of disiplines. + * Exception: DIAG is always queued to the head, to ensure that CMS RESERVED + * minidisks are invariably accessed using DIAG. */ static inline void -dasd_discipline_enq (dasd_discipline_t * d) +dasd_discipline_enq (dasd_discipline_t *discipline) { - list_add(&d->list, &dasd_disc_head); + if (strncmp (discipline->name, "DIAG", 4) == 0) { + list_add (&discipline->list, &dasd_disc_head); + } else { + list_add_tail (&discipline->list, &dasd_disc_head); + } } /* @@ -970,59 +1052,88 @@ * removes the discipline given as argument from the list of disciplines */ static inline void -dasd_discipline_deq (dasd_discipline_t * d) +dasd_discipline_deq (dasd_discipline_t * discipline) { - list_del(&d->list); + if (&discipline->list) { + list_del (&discipline->list); + } } void -dasd_discipline_add (dasd_discipline_t * d) +dasd_discipline_add (dasd_discipline_t * discipline) { unsigned long flags; MOD_INC_USE_COUNT; spin_lock_irqsave (&discipline_lock,flags); - dasd_discipline_enq (d); + dasd_discipline_enq (discipline); spin_unlock_irqrestore (&discipline_lock,flags); - dasd_enable_ranges (&dasd_range_head, d, DASD_STATE_ONLINE); + + dasd_enable_ranges (&dasd_range_head, + discipline, + DASD_STATE_ONLINE); } -void dasd_discipline_del (dasd_discipline_t * d) +void dasd_discipline_del (dasd_discipline_t * discipline) { unsigned long flags; + + dasd_disable_ranges(&dasd_range_head, + discipline, + DASD_STATE_DEL, + 1); + spin_lock_irqsave (&discipline_lock,flags); - dasd_disable_ranges(&dasd_range_head, d, DASD_STATE_DEL, 1); - dasd_discipline_deq (d); + dasd_discipline_deq (discipline); spin_unlock_irqrestore (&discipline_lock,flags); MOD_DEC_USE_COUNT; } +/* + * function dasd_find_disc + * checks the list of disciplines for the first one able to access the device + */ static inline dasd_discipline_t * -dasd_find_disc (dasd_device_t * device, dasd_discipline_t *d) +dasd_find_disc (dasd_device_t * device, dasd_discipline_t *discipline) { dasd_discipline_t *t; - struct list_head *l = d ? &d->list : dasd_disc_head.next; + struct list_head *l = discipline ? + &discipline->list : dasd_disc_head.next; + do { t = list_entry(l,dasd_discipline_t,list); + if ( ( t->id_check == NULL || t->id_check (&device->devinfo) == 0 ) && ( t->check_characteristics == NULL || t->check_characteristics (device) == 0 ) ) break; l = l->next; - if ( d || + if ( discipline || l == &dasd_disc_head ) { t = NULL; break; } } while ( 1 ); + return t; } -/* SECTION: profiling stuff */ +/******************************************************************************** + * SECTION: profiling stuff + ********************************************************************************/ + +#ifdef CONFIG_PROC_FS static dasd_profile_info_t dasd_global_profile; +#endif /* CONFIG_PROC_FS */ #ifdef DASD_PROFILE + +#define DASD_PROFILE_ON 1 +#define DASD_PROFILE_OFF 0 + +static unsigned int dasd_profile_level = DASD_PROFILE_OFF; + /* * macro: dasd_profile_add_counter * increments counter in global and local profiling structures @@ -1032,7 +1143,7 @@ { \ int ind; \ long help; \ - for (ind = 0, help = value >> 3; \ + for (ind = 0, help = value >> 2; \ ind < 31 && help; \ help = help >> 1, ind++) {} \ dasd_global_profile.counter[ind]++; \ @@ -1086,56 +1197,150 @@ dasd_profile_add_counter (irqtime / sectors, dasd_io_time2ps, device); dasd_profile_add_counter (endtime, dasd_io_time3, device); } -#endif +#endif /* DASD_PROFILE */ -/* SECTION: All the gendisk stuff */ +/******************************************************************************** + * SECTION: All the gendisk stuff + ********************************************************************************/ -/* SECTION: Managing wrappers for ccwcache */ +/******************************************************************************** + * SECTION: Managing wrappers for ccwcache + ********************************************************************************/ /* * function dasd_alloc_request * tries to return space for a channel program of length cplength with * additional data of size datasize. - * If the ccwcache cannot fulfill the request it tries the emergeny requests - * before giving up finally + * If the ccwcache cannot fulfill the request it tries the lowmem requests + * before giving up finally. * FIXME: initialization of ccw_req_t should be done by function of ccwcache */ ccw_req_t * -dasd_alloc_request (char *magic, int cplength, int datasize, dasd_device_t* device) +dasd_alloc_request (char *magic, int cplength, int datasize, + dasd_device_t *device) { - ccw_req_t *rv = NULL; + ccw_req_t *cqr; + unsigned long size_needed; + unsigned long data_offset, ccw_offset; + dasd_lowmem_t *lowmem; + + if ((cqr = ccw_alloc_request (magic, cplength, datasize)) != NULL) { + return cqr; + } + + /* Sanity checks */ + if (magic == NULL || datasize > PAGE_SIZE || + cplength == 0 || (cplength * sizeof(ccw1_t)) > PAGE_SIZE) + BUG(); - if ((rv = ccw_alloc_request (magic, cplength, datasize)) != NULL) { - return rv; + /* use lowmem page only for ERP or */ + /* if there are less than 2 requests on queue */ + if (device->queue.head != NULL && + device->queue.head->next != NULL && + device->queue.head->status != CQR_STATUS_ERROR) { + DEV_MESSAGE (KERN_DEBUG, device, + "Refusing emergency mem for request " + "(enough requests in queue) " + "head %p, status %x next %p", + device->queue.head, + device->queue.head->status, + device->queue.head->next); + return NULL; + } + + /* We try to keep things together in memory */ + size_needed = (sizeof (ccw_req_t) + 7) & (~7L); + data_offset = ccw_offset = 0; + if (size_needed + datasize <= PAGE_SIZE) { + /* Keep data with the request */ + data_offset = size_needed; + size_needed += (datasize + 7) & (~7L); + } + if (size_needed + cplength*sizeof(ccw1_t) <= PAGE_SIZE) { + /* Keep CCWs with request */ + ccw_offset = size_needed; + size_needed += (cplength*sizeof(ccw1_t)) & (~7L); } - if ((((sizeof (ccw_req_t) + 7) & -8) + - cplength * sizeof (ccw1_t) + datasize) > PAGE_SIZE) { - BUG (); + + /* take page from lowmem_pool for request */ + list_for_each_entry (lowmem, &device->lowmem_pool, list) { + DEV_MESSAGE (KERN_DEBUG, device, + "Take lowmem page %p for request", + lowmem); + list_del (&lowmem->list); + cqr = (ccw_req_t *) lowmem; + memset (cqr, 0, PAGE_SIZE); + cqr->flags |= CQR_FLAGS_LM_CQR; + break; + } + if (cqr == NULL) + return NULL; + + /* take page from lowmem_pool for the extra data */ + if (data_offset == 0) { + + list_for_each_entry (lowmem, &device->lowmem_pool, list) { + DEV_MESSAGE (KERN_DEBUG, device, + "Take lowmem page %p for extra data", + lowmem); + list_del (&lowmem->list); + cqr->data = (void *) lowmem; + memset (cqr->data, 0, PAGE_SIZE); + break; + } + if (cqr->data == NULL) { + printk(KERN_DEBUG PRINTK_HEADER + "Couldn't allocate data area\n"); + + lowmem = (dasd_lowmem_t *) cqr; + list_add (&lowmem->list, &device->lowmem_pool); + return NULL; } - if (device->lowmem_cqr==NULL) { - DASD_DRIVER_DEBUG_EVENT (2, dasd_alloc_request, - "(%04x) Low memory! Using emergency request %p.", - device->devinfo.devno, - device->lowmem_ccws); - - device->lowmem_cqr=device->lowmem_ccws; - rv = device->lowmem_ccws; - memset (rv, 0, PAGE_SIZE); - strncpy ((char *) (&rv->magic), magic, 4); - ASCEBC ((char *) (&rv->magic), 4); - rv->cplength = cplength; - rv->datasize = datasize; - rv->data = (void *) ((long) rv + PAGE_SIZE - datasize); - rv->cpaddr = (ccw1_t *) ((long) rv + sizeof (ccw_req_t)); - } else { - DASD_DRIVER_DEBUG_EVENT (2, dasd_alloc_request, - "(%04x) Refusing emergency mem for request " - "NULL, already in use at %p.", - device->devinfo.devno, - device->lowmem_ccws); - } - return rv; + } else { + /* Extra data already allocated with the request */ + cqr->data = (void *) ((addr_t) cqr + data_offset); + } + + /* take page from lowmem_pool for the channel program */ + if (ccw_offset == 0) { + + list_for_each_entry (lowmem, &device->lowmem_pool, list) { + DEV_MESSAGE (KERN_DEBUG, device, + "Take lowmem page %p for channel program", + lowmem); + list_del (&lowmem->list); + cqr->cpaddr = (ccw1_t *) lowmem; + memset (cqr->cpaddr, 0, PAGE_SIZE); + break; + } + + if (cqr->cpaddr == NULL) { + printk (KERN_DEBUG PRINTK_HEADER + "Couldn't allocate channel program area\n"); + if (data_offset == 0) { + lowmem = (dasd_lowmem_t *) cqr->data; + list_add (&lowmem->list, &device->lowmem_pool); + } + lowmem = (dasd_lowmem_t *) cqr; + list_add (&lowmem->list, &device->lowmem_pool); + return NULL; + } + } else { + /* Channel program already allocated with the request */ + cqr->cpaddr = (ccw1_t *) ((addr_t) cqr + ccw_offset); + } + + /* use the remaining memory of the cqr page for IDALs */ + cqr->lowmem_idal_ptr = (void *) ((addr_t) cqr + size_needed); + + strncpy ((char *)(&cqr->magic), magic, 4); + + ASCEBC((char *)(&cqr->magic), 4); + cqr->cplength = cplength; + cqr->datasize = datasize; + + return cqr; } /* @@ -1143,87 +1348,168 @@ * returns a ccw_req_t to the appropriate cache or emergeny request line */ void -dasd_free_request (ccw_req_t * request, dasd_device_t* device) +dasd_free_request (ccw_req_t *cqr, dasd_device_t* device) { + unsigned long size_needed; + dasd_lowmem_t *lowmem; + #ifdef CONFIG_ARCH_S390X ccw1_t* ccw; - /* clear any idals used for chain */ - ccw=request->cpaddr-1; + /* clear any idals used for chain (might be in lowmen cqr page, */ + /* in seperate lowmen page or kmalloced */ + ccw=cqr->cpaddr-1; do { ccw++; - if ((ccw->cda < (unsigned long) device->lowmem_idals ) || - (ccw->cda >= (unsigned long) device->lowmem_idals+PAGE_SIZE) ) - clear_normalized_cda (ccw); - else { - if (device->lowmem_idal_ptr != device->lowmem_idals) - DASD_MESSAGE (KERN_WARNING, device, - "Freeing emergency idals from request at %p.", - request); - device->lowmem_idal_ptr = device->lowmem_idals; - device->lowmem_cqr=NULL; + if ((cqr->flags & CQR_FLAGS_LM_CQR) && + (ccw->cda >= (unsigned long) cqr) && + (ccw->cda < (unsigned long) cqr + PAGE_SIZE)) { + /* IDAL is on the car lowmem page */ + continue; + } + + if ((cqr->flags & CQR_FLAGS_LM_IDAL) && + (ccw->cda >= (unsigned long) cqr->lowmem_idal) && + (ccw->cda < (unsigned long) cqr->lowmem_idal + PAGE_SIZE)) { + /* IDAL is on seperate lowmem page */ + continue; } + + /* IDAL was build by set_normalized_cda */ + clear_normalized_cda (ccw); + } while ((ccw->flags & CCW_FLAG_CC) || (ccw->flags & CCW_FLAG_DC) ); #endif - if (request != device->lowmem_ccws) { - /* compare to lowmem_ccws to protect usage of lowmem_cqr for IDAL only ! */ - ccw_free_request (request); - } else { - DASD_MESSAGE (KERN_WARNING, device, - "Freeing emergency request at %p", - request); - device->lowmem_cqr=NULL; - } + /* give idal lowmem page back to lowmem_pool */ + if (cqr->flags & CQR_FLAGS_LM_IDAL) { + DEV_MESSAGE (KERN_DEBUG, device, + "Freeing lowmem idal page %p from request %p.", + cqr->lowmem_idal, cqr); + + lowmem = (dasd_lowmem_t *) cqr->lowmem_idal; + list_add (&lowmem->list, &device->lowmem_pool); + cqr->flags &= ~CQR_FLAGS_LM_IDAL; + } + + /* give cqr lowmem pages back to lowmem_pool */ + if (cqr->flags & CQR_FLAGS_LM_CQR) { + + /* make the same decisions as in dasd_alloc_request */ + size_needed = (sizeof (ccw_req_t) + 7) & (~7L); + if (size_needed + cqr->datasize <= PAGE_SIZE) { + /* We kept the data with the request */ + size_needed += (cqr->datasize + 7) & (~7L); + } else { + DEV_MESSAGE (KERN_DEBUG, device, + "Freeing lowmem data page %p", + cqr->data); + lowmem = (dasd_lowmem_t *) cqr->data; + list_add (&lowmem->list, &device->lowmem_pool); + } + + if (size_needed + cqr->cplength * sizeof(ccw1_t) > PAGE_SIZE) { + /* We didn't keep the CCWs with request */ + DEV_MESSAGE (KERN_DEBUG, device, + "Freeing lowmem channel program page %p", + cqr->cpaddr); + lowmem = (dasd_lowmem_t *) cqr->cpaddr; + list_add (&lowmem->list, &device->lowmem_pool); + } + DEV_MESSAGE (KERN_DEBUG, device, + "Freeing lowmem request page %p", + cqr); + lowmem = (dasd_lowmem_t *) cqr; + list_add (&lowmem->list, &device->lowmem_pool); + } else { + ccw_free_request (cqr); + } } +/* + * function dasd_set_normalized_cda + * calls set_normalized_cda to build IDALs. + * If this did not work because of low memory, we try to use memory from the + * lowmem pool. + */ int -dasd_set_normalized_cda (ccw1_t * cp, unsigned long address, - ccw_req_t* request, dasd_device_t* device ) +dasd_set_normalized_cda (ccw1_t *cp, unsigned long address, + ccw_req_t *cqr, dasd_device_t *device) { #ifdef CONFIG_ARCH_S390X + int rc; int nridaws; + dasd_lowmem_t *lowmem; int count = cp->count; - if (set_normalized_cda (cp, address)!=-ENOMEM) { - return 0; - } - - if ((device->lowmem_cqr!=NULL) && (device->lowmem_cqr!=request)) { - DASD_MESSAGE (KERN_WARNING, device, - "Refusing emergency idals for request %p, memory" - " is already in use for request %p", - request, - device->lowmem_cqr); - return -ENOMEM; - } - device->lowmem_cqr=request; - if (device->lowmem_idal_ptr == device->lowmem_idals) { - DASD_MESSAGE (KERN_WARNING,device, - "Low memory! Using emergency IDALs for request %p.\n", - request); + /* use lowmem idal page if already assinged */ + if (!(cqr->flags & CQR_FLAGS_LM_IDAL)) { + rc = set_normalized_cda (cp, (void *)address); + if (rc !=-ENOMEM) { + return rc; + } } + + /* get number of idal words needed */ nridaws = ((address & (IDA_BLOCK_SIZE-1)) + count + (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; - if ( device->lowmem_idal_ptr>=device->lowmem_idals + PAGE_SIZE ) { - /* Ouch! No Idals left for emergency request */ - BUG(); - } + + /* check if we need an additional IDALs page */ + if (!(cqr->flags & CQR_FLAGS_LM_IDAL)) { + /* we got no lowmem cqr page OR */ + /* there is no space left for IDALs */ + if ((!(cqr->flags & CQR_FLAGS_LM_CQR)) || + ((cqr->lowmem_idal_ptr + nridaws * sizeof(unsigned long)) > + ((void *) cqr + PAGE_SIZE))) { + + /* use lowmem page only for ERP or */ + /* if there are less than 2 requests on queue */ + if (device->queue.head != NULL && + device->queue.head->next != NULL && + device->queue.head->status != CQR_STATUS_ERROR) { + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Refusing emergency idals for " + "request ((enough requests in " + "queue)"); + return -ENOMEM; + } + + list_for_each_entry (lowmem, &device->lowmem_pool, + list) { + DEV_MESSAGE (KERN_DEBUG, device, + "take lowmem page %p for IDALs of " + "request %p.", + lowmem, cqr); + list_del (&lowmem->list); + cqr->lowmem_idal = (void *)lowmem; + cqr->lowmem_idal_ptr = (void *) lowmem; + memset (cqr->lowmem_idal, 0, PAGE_SIZE); + cqr->flags |= CQR_FLAGS_LM_IDAL; + break; + } + } + + } + + /* now we (should) have an valid lowmem_idal_ptr and enough space for */ + /* the IDALs - fill the idals table */ cp->flags |= CCW_FLAG_IDA; - cp->cda = (__u32)(unsigned long)device->lowmem_idal_ptr; + cp->cda = (__u32)(unsigned long)cqr->lowmem_idal_ptr; do { - *((long*)device->lowmem_idal_ptr) = address; - address = (address & -(IDA_BLOCK_SIZE)) + (IDA_BLOCK_SIZE); + *((long*)cqr->lowmem_idal_ptr) = address; + address = (address & -(IDA_BLOCK_SIZE)) + (IDA_BLOCK_SIZE); + cqr->lowmem_idal_ptr += sizeof(unsigned long); nridaws --; - device->lowmem_idal_ptr += sizeof(unsigned long); } while ( nridaws > 0 ); #else - cp -> cda = address; + cp->cda = address; #endif return 0; } -/* SECTION: (de)queueing of requests to channel program queues */ +/******************************************************************************** + * SECTION: (de)queueing of requests to channel program queues + ********************************************************************************/ /* * function dasd_chanq_enq @@ -1245,22 +1531,26 @@ #ifdef DASD_PROFILE - /* save profile information for non erp cqr */ - if (cqr->refers == NULL) { - unsigned int counter = 0; - ccw_req_t *ptr; - dasd_device_t *device = cqr->device; - - /* count the length of the chanq for statistics */ - for (ptr = q->head; - ptr->next != NULL && counter <=31; - ptr = ptr->next) { - counter++; - } - - dasd_global_profile.dasd_io_nr_req[counter]++; - device->profile.dasd_io_nr_req[counter]++; - } + if (dasd_profile_level == DASD_PROFILE_ON) { + + /* save profile information for non erp cqr */ + if (cqr->refers == NULL) { + unsigned int counter = 0; + ccw_req_t *ptr; + dasd_device_t *device = cqr->device; + + /* count the length of the chanq for statistics */ + for (ptr = q->head; + ptr->next != NULL && counter <=31; + ptr = ptr->next) { + counter++; + } + + dasd_global_profile.dasd_io_nr_req[counter]++; + device->profile.dasd_io_nr_req[counter]++; + } + + } /* end if DASD_PROFILE_ON */ #endif } @@ -1276,7 +1566,10 @@ q->head = cqr; if (q->tail == NULL) q->tail = cqr; - check_then_set (&cqr->status, CQR_STATUS_FILLED, CQR_STATUS_QUEUED); + + check_then_set (&cqr->status, + CQR_STATUS_FILLED, + CQR_STATUS_QUEUED); } /* @@ -1302,7 +1595,7 @@ while (prev && prev->next != cqr) prev = prev->next; if (prev == NULL) - return; + return; /* request not in chanq */ prev->next = cqr->next; if (prev->next == NULL) q->tail = prev; @@ -1310,7 +1603,87 @@ cqr->next = NULL; } -/* SECTION: Managing the device queues etc. */ +/******************************************************************************** + * SECTION: Managing the device queues etc. + ********************************************************************************/ + +/* + * DASD_RESREL_TIMEOUT + * + * A timer is used to suspend the current reserve/release request + * if it doesn't return within a certain time. + */ +void +dasd_resrel_timeout (unsigned long cqr_ptr) +{ + dasd_device_t *device = ((ccw_req_t *) cqr_ptr)->device; + ccw_req_t *cqr; + unsigned long flags; + + s390irq_spin_lock_irqsave (device->devinfo.irq, + flags); + cqr = device->queue.head; + + switch (cqr->status) { + case CQR_STATUS_FILLED: + case CQR_STATUS_QUEUED: + /* request was not started - just set to failed */ + cqr->status = CQR_STATUS_FAILED; + break; + + case CQR_STATUS_IN_IO: + case CQR_STATUS_ERROR: + if (device->discipline->term_IO (cqr) != 0); + cqr->status = CQR_STATUS_FAILED; + break; + + default: + ; /* DONE and FAILED are ok */ + } + + dasd_schedule_bh (device); + + s390irq_spin_unlock_irqrestore (device->devinfo.irq, + flags); + +} /* end dasd_resrel_timeout */ + +/* + * Call unconditional reserve to break the reserve of an other system. + * Timeout the request if it doesn't succseed within a certain time. + */ +static int +dasd_steal_lock (dasd_device_t *device) +{ + ccw_req_t *cqr; + int rc = 0; + + if (!device->discipline->steal_lock) + rc = -EINVAL; + + cqr = device->discipline->steal_lock (device); + + if (cqr) { + struct timer_list res_timer; + + init_timer(&res_timer); + res_timer.function = dasd_resrel_timeout; + res_timer.data = (unsigned long) cqr; + res_timer.expires = jiffies + 4 * HZ; + add_timer(&res_timer); + + rc = dasd_sleep_on_immediate (cqr); + + del_timer_sync(&res_timer); + dasd_free_request (cqr, + device); + } else { + rc = -ENOMEM; + } + + return rc; + +} /* end dasd_steal_lock */ /* * DASD_TERM_IO @@ -1331,55 +1704,57 @@ BUG (); } irq = device->devinfo.irq; - if (strncmp ((char *) &cqr->magic, device->discipline->ebcname, 4)) { - DASD_MESSAGE (KERN_WARNING, device, - " ccw_req_t 0x%08x magic doesn't match" - " discipline 0x%08x\n", - cqr->magic, - *(unsigned int *) device->discipline->name); + if (strncmp ((char *) &cqr->magic, + device->discipline->ebcname, 4)) { + + DEV_MESSAGE (KERN_WARNING, device, + " ccw_req_t 0x%08x magic doesn't match" + " discipline 0x%08x", + cqr->magic, + *(unsigned int *) device->discipline->name); + return -EINVAL; } while ((retries < 5 ) && (cqr->status == CQR_STATUS_IN_IO) ) { - if ( retries < 2 ) - rc = halt_IO(irq, (long)cqr, - cqr->options | DOIO_WAIT_FOR_INTERRUPT); - else - rc = clear_IO(irq, (long)cqr, - cqr->options | DOIO_WAIT_FOR_INTERRUPT); - + rc = clear_IO (irq, + (long)cqr, + cqr->options); + switch (rc) { case 0: /* termination successful */ check_then_set (&cqr->status, CQR_STATUS_IN_IO, CQR_STATUS_FAILED); - asm volatile ("STCK %0":"=m" (cqr->stopclk)); + cqr->stopclk = get_clock (); + break; case -ENODEV: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "device gone, retry\n"); + DBF_DEV_EVENT (DBF_ERR, device, "%s", + "device gone, retry"); break; case -EIO: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "I/O error, retry\n"); + DBF_DEV_EVENT (DBF_ERR, device, "%s", + "I/O error, retry"); break; case -EBUSY: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "device busy, retry later\n"); + DBF_DEV_EVENT (DBF_ERR, device, "%s", + "device busy, retry later"); break; default: - DASD_MESSAGE (KERN_ERR, device, - "line %d unknown RC=%d, please report" - " to linux390@de.ibm.com\n", - __LINE__, - rc); + DEV_MESSAGE (KERN_ERR, device, + "line %d unknown RC=%d, please " + "report to linux390@de.ibm.com", + __LINE__, + rc); BUG (); break; } + dasd_schedule_bh (device); retries ++; } return rc; @@ -1401,27 +1776,30 @@ BUG (); } irq = device->devinfo.irq; - if (strncmp ((char *) &cqr->magic, device->discipline->ebcname, 4)) { - DASD_MESSAGE (KERN_WARNING, device, - " ccw_req_t 0x%08x magic doesn't match" - " discipline 0x%08x\n", - cqr->magic, - *(unsigned int *) device->discipline->name); + if (strncmp ((char *) &cqr->magic, + device->discipline->ebcname, 4)) { + + DEV_MESSAGE (KERN_ERR, device, + " ccw_req_t 0x%08x magic doesn't match" + " discipline 0x%08x", + cqr->magic, + *(unsigned int *) device->discipline->name); + return -EINVAL; } - asm volatile ("STCK %0":"=m" (now)); - cqr->startclk = now; + now = get_clock (); - rc = do_IO (irq, cqr->cpaddr, (long) cqr, cqr->lpm, cqr->options); + cqr->startclk = now; + if (device->accessible) + rc = do_IO (irq, cqr->cpaddr, (long) cqr, cqr->lpm, cqr->options); + else + rc = -EBUSY; switch (rc) { case 0: if (cqr->options & DOIO_WAIT_FOR_INTERRUPT) { /* request already finished (synchronous IO) */ - DASD_MESSAGE (KERN_ERR, device, "%s", - " do_IO finished request... " - "DOIO_WAIT_FOR_INTERRUPT was set"); check_then_set (&cqr->status, CQR_STATUS_QUEUED, CQR_STATUS_DONE); @@ -1436,12 +1814,22 @@ } break; case -EBUSY: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "device busy, retry later\n"); + DBF_DEV_EVENT (DBF_ERR, device, "%s", + "device busy, retry later"); + + if (!timer_pending(&device->timer)) { + init_timer (&device->timer); + device->timer.function = dasd_schedule_bh_timed; + device->timer.data = (unsigned long) device; + device->timer.expires = jiffies + (HZ >> 4); + add_timer (&device->timer); + } else { + mod_timer(&device->timer, jiffies + (HZ >> 4)); + } break; case -ETIMEDOUT: - DASD_MESSAGE (KERN_WARNING, device, "%s", - "request timeout - terminated\n"); + DBF_DEV_EVENT (DBF_ERR, device, "%s", + "request timeout - terminated"); case -ENODEV: case -EIO: check_then_set (&cqr->status, @@ -1452,9 +1840,11 @@ dasd_schedule_bh (device); break; default: - DASD_MESSAGE (KERN_ERR, device, - "line %d unknown RC=%d, please report" - " to linux390@de.ibm.com\n", __LINE__, rc); + DEV_MESSAGE (KERN_ERR, device, + "line %d unknown RC=%d, please report" + " to linux390@de.ibm.com", + __LINE__, + rc); BUG (); break; } @@ -1465,43 +1855,83 @@ /* * function dasd_sleep_on_req * attempts to start the IO and waits for completion - * FIXME: replace handmade sleeping by wait_event */ int -dasd_sleep_on_req (ccw_req_t * req) +dasd_sleep_on_req (ccw_req_t * cqr) { unsigned long flags; - int cs; - int rc = 0; - dasd_device_t *device = (dasd_device_t *) req->device; + dasd_device_t *device = (dasd_device_t *) cqr->device; - if ( signal_pending(current) ) { + if (signal_pending(current)) { return -ERESTARTSYS; } - s390irq_spin_lock_irqsave (device->devinfo.irq, flags); - dasd_chanq_enq (&device->queue, req); + s390irq_spin_lock_irqsave (device->devinfo.irq, + flags); + + dasd_chanq_enq (&device->queue, + cqr); + /* let the bh start the request to keep them in order */ dasd_schedule_bh (device); - do { - s390irq_spin_unlock_irqrestore (device->devinfo.irq, flags); - wait_event ( device->wait_q, - (((cs = req->status) == CQR_STATUS_DONE) || - (cs == CQR_STATUS_FAILED) || - signal_pending(current))); - s390irq_spin_lock_irqsave (device->devinfo.irq, flags); - if ( signal_pending(current) ) { - rc = -ERESTARTSYS; - if (req->status == CQR_STATUS_IN_IO ) - device->discipline->term_IO(req); - break; - } else if ( req->status == CQR_STATUS_FAILED) { - rc = -EIO; - break; - } - } while (cs != CQR_STATUS_DONE && cs != CQR_STATUS_FAILED); - s390irq_spin_unlock_irqrestore (device->devinfo.irq, flags); - return rc; -} /* end dasd_sleep_on_req */ + + s390irq_spin_unlock_irqrestore (device->devinfo.irq, + flags); + + wait_event (device->wait_q, + cqr->flags & CQR_FLAGS_FINALIZED); + + if (cqr->status == CQR_STATUS_FAILED) { + return -EIO; + } + + return 0; + +} /* end dasd_sleep_on_req */ + +/* + * function dasd_sleep_on_immediate + * same as dasd_sleep_on_req, but attempts to start the IO immediately + * (killing the actual running IO). + */ +static int +dasd_sleep_on_immediate (ccw_req_t *cqr) +{ + unsigned long flags; + dasd_device_t *device = (dasd_device_t *) cqr->device; + + if (signal_pending(current)) + return -ERESTARTSYS; + + s390irq_spin_lock_irqsave (device->devinfo.irq, + flags); + + /* terminate currently running IO */ + if (device->queue.head->status == CQR_STATUS_IN_IO) { + + device->discipline->term_IO (device->queue.head); + + device->queue.head->status = CQR_STATUS_QUEUED; + } + + dasd_chanq_enq_head (&device->queue, + cqr); + + /* let the bh start the request to keep them in order */ + dasd_schedule_bh (device); + + s390irq_spin_unlock_irqrestore (device->devinfo.irq, + flags); + + wait_event (device->wait_q, + cqr->flags & CQR_FLAGS_FINALIZED); + + if (cqr->status == CQR_STATUS_FAILED) { + return -EIO; + } + + return 0; + +} /* end dasd_sleep_on_immediate */ /* * function dasd_end_request @@ -1547,12 +1977,17 @@ unsigned long long now; int rc = 0; - asm volatile ("STCK %0":"=m" (now)); - if (cqr->expires && cqr->expires + cqr->startclk < now) { - DASD_MESSAGE (KERN_ERR, ((dasd_device_t *) cqr->device), - "IO timeout 0x%08lx%08lx usecs in req %p\n", - (long) (cqr->expires >> 44), - (long) (cqr->expires >> 12), cqr); + now = get_clock (); + + if (cqr->expires && + cqr->expires + cqr->startclk < now) { + + DBF_DEV_EVENT (DBF_WARNING, ((dasd_device_t *) cqr->device), + "IO timeout 0x%08lx%08lx usecs in req %p", + (long) (cqr->expires >> 44), + (long) (cqr->expires >> 12), + cqr); + cqr->expires <<= 1; rc = -EIO; } @@ -1569,22 +2004,36 @@ { dasd_device_t *device = cqr->device; - asm volatile ("STCK %0":"=m" (cqr->endclk)); + cqr->endclk = get_clock (); + if (cqr->req) { + #ifdef DASD_PROFILE - dasd_profile_add (cqr); + if (dasd_profile_level == DASD_PROFILE_ON) { + dasd_profile_add (cqr); + } #endif + dasd_end_request (cqr->req, (cqr->status == CQR_STATUS_DONE)); /* free request if nobody is waiting on it */ dasd_free_request (cqr, cqr->device); } else { - if ( cqr == device->init_cqr && /* bring late devices online */ - device->level <= DASD_STATE_ONLINE ) { - device->timer.function = dasd_enable_single_device; - device->timer.data = (unsigned long) device; - device->timer.expires = jiffies; - add_timer(&device->timer); + if (cqr == device->init_cqr && /* bring late devices online */ + device->level <= DASD_STATE_ONLINE ) { + if (!timer_pending(&device->late_timer)) { + init_timer(&device->late_timer); + device->late_timer.function = dasd_enable_single_device; + device->late_timer.data = (unsigned long) device; + device->late_timer.expires = jiffies; + add_timer(&device->late_timer); + } else { + mod_timer(&device->late_timer, jiffies); + } + } else { + /* notify sleep_on_xxx about finished cqr */ + cqr->flags |= CQR_FLAGS_FINALIZED; } + /* notify sleeping task about finished postprocessing */ wake_up (&device->wait_q); @@ -1606,12 +2055,10 @@ dasd_chanq_t *qp = &device->queue; int irq = device->devinfo.irq; ccw_req_t *final_requests = NULL; - static int chanq_min_size = DASD_MIN_SIZE_FOR_QUEUE; int chanq_max_size = DASD_CHANQ_MAX_SIZE; ccw_req_t *cqr = NULL, *temp; dasd_erp_postaction_fn_t erp_postaction; - s390irq_spin_lock_irqsave (irq, flags); /* First we dechain the requests, processed with completed status */ @@ -1628,16 +2075,10 @@ qp->head->retries--; - if (qp->head->dstat->flag & DEVSTAT_HALT_FUNCTION) { - - check_then_set (&qp->head->status, - CQR_STATUS_ERROR, - CQR_STATUS_FAILED); - - asm volatile ("STCK %0":"=m" (qp->head->stopclk)); - - } else if ((device->discipline->erp_action == NULL ) || - ((erp_action = device->discipline->erp_action (qp->head)) == NULL) ) { + if ((qp->head->dstat == NULL ) || + ((qp->head->dstat->flag & DEVSTAT_FLAG_SENSE_AVAIL) == 0 ) || + (device->discipline->erp_action == NULL ) || + ((erp_action = device->discipline->erp_action (qp->head)) == NULL) ) { erp_cqr = dasd_default_erp_action (qp->head); @@ -1651,12 +2092,12 @@ if (qp->head->status == CQR_STATUS_DONE) { - DASD_MESSAGE (KERN_DEBUG, device, "%s", - "ERP successful"); + DBF_DEV_EVENT (DBF_NOTICE, device, "%s", + "ERP successful"); } else { - DASD_MESSAGE (KERN_ERR, device, "%s", - "ERP unsuccessful"); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "ERP unsuccessful"); } if ((device->discipline->erp_postaction == NULL )|| @@ -1685,65 +2126,60 @@ } /* end while over completed requests */ if (cqr) - cqr->next = NULL; + cqr->next = NULL; /* terminate final_requests queue */ + /* Now clean the requests with final status */ while (final_requests) { temp = final_requests; final_requests = temp->next; dasd_finalize_request (temp); } + /* Now we try to fetch requests from the request queue */ - for (temp = cqr; temp != NULL; temp = temp->next) + for (temp = qp->head; temp != NULL; temp = temp->next) { if (temp->status == CQR_STATUS_QUEUED) chanq_max_size--; + } + while ((atomic_read(&device->plugged) == 0) && + (queue) && (!queue->plugged) && (!list_empty (&queue->queue_head)) && - (req = dasd_next_request (queue)) != NULL) { + (req = dasd_next_request (queue)) && + (qp->head == NULL || chanq_max_size > 0)) { /* queue empty or certain critera fulfilled -> transfer */ - if (qp->head == NULL || - chanq_max_size > 0 || (req->nr_sectors >= chanq_min_size)) { - ccw_req_t *cqr = NULL; - if (is_read_only(device->kdev) && req->cmd == WRITE) { - - DASD_DRIVER_DEBUG_EVENT (3, dasd_int_handler, - "(%04x) Rejecting write request %p\n", - device->devinfo.devno, - req); + cqr = NULL; + if (is_read_only(device->kdev) && req->cmd == WRITE) { - dasd_end_request (req, 0); - dasd_dequeue_request (queue,req); - } else { - /* relocate request according to partition table */ - req->sector += - device->major_info->gendisk. - part[MINOR (req->rq_dev)].start_sect; - cqr = device->discipline->build_cp_from_req (device, req); - if (cqr == NULL) { - - DASD_DRIVER_DEBUG_EVENT (3, dasd_int_handler, - "(%04x) CCW creation failed " - "on request %p\n", - device->devinfo.devno, - req); - /* revert relocation of request */ - req->sector -= - device->major_info->gendisk. - part[MINOR (req->rq_dev)].start_sect; - break; /* terminate request queue loop */ - - } -#ifdef CONFIG_DYNAMIC_QUEUE_MIN_SIZE - chanq_min_size = - (chanq_min_size + req->nr_sectors) >> 1; -#endif /* CONFIG_DYNAMIC_QUEUE_MIN_SIZE */ - dasd_dequeue_request (queue, req); - dasd_chanq_enq (qp, cqr); + DBF_EVENT (DBF_ERR, + "(%04x) Rejecting write request %p", + device->devinfo.devno, + req); + dasd_dequeue_request (queue,req); + dasd_end_request (req, 0); + continue; + } + cqr = device->discipline->build_cp_from_req (device, req); + + if (cqr == NULL || IS_ERR(cqr)) { + if (cqr == ERR_PTR(-ENOMEM)) { + break; } - } else { /* queue not empty OR criteria not met */ - break; /* terminate request queue loop */ - } - } + + MESSAGE (KERN_EMERG, + "(%04x) CCW creation failed " + "on request %p", + device->devinfo.devno, req); + dasd_dequeue_request (queue,req); + dasd_end_request (req, 0); + continue; + } + dasd_dequeue_request (queue, req); + dasd_chanq_enq (qp, cqr); + chanq_max_size--; + + } + /* we process the requests with non-final status */ if (qp->head) { switch (qp->head->status) { @@ -1764,6 +2200,10 @@ /* just wait */ break; default: + MESSAGE (KERN_EMERG, + "invalid cqr (%p) detected with status %02x ", + qp->head, + qp->head->status); BUG (); } } @@ -1786,6 +2226,18 @@ } /* + * function dasd_schedule_bh_timed + * retriggers the dasd_schedule_bh function (called by timer queue) + */ +void +dasd_schedule_bh_timed (unsigned long device_ptr) +{ + dasd_device_t *device = (dasd_device_t *) device_ptr; + + dasd_schedule_bh (device); +} + +/* * function dasd_schedule_bh * schedules the request_fn to run with next run_bh cycle */ @@ -1842,30 +2294,43 @@ if (device_addr == NULL) { - printk (KERN_DEBUG PRINTK_HEADER - "unable to find device for state change pending " - "interrupt: devno%04x\n", - stat->devno); + MESSAGE (KERN_DEBUG, + "unable to find device for state change pending " + "interrupt: devno%04x", + stat->devno); return; } /* re-activate first request in queue */ cqr = (*device_addr)->queue.head; + + if (cqr == NULL) { + MESSAGE (KERN_DEBUG, + "got state change pending interrupt on" + "idle device: %04x", + stat->devno); + return; + } if (cqr->status == CQR_STATUS_PENDING) { - DASD_MESSAGE (KERN_DEBUG, (*device_addr), "%s", - "device request queue restarted by " - "state change pending interrupt\n"); + DEV_MESSAGE (KERN_DEBUG, (*device_addr), "%s", + "device request queue restarted by " + "state change pending interrupt"); - del_timer (&(*device_addr)->timer); + del_timer_sync (&(*device_addr)->blocking_timer); check_then_set (&cqr->status, CQR_STATUS_PENDING, CQR_STATUS_QUEUED); - dasd_schedule_bh (*device_addr); - } + if (cqr->status == CQR_STATUS_IN_IO) { + cqr->status = CQR_STATUS_QUEUED; + DEV_MESSAGE (KERN_WARNING, (*device_addr), "%s", + "redriving state change pending condition while in IO"); + } + + dasd_schedule_bh (*device_addr); } /* end dasd_handle_state_change_pending */ @@ -1879,50 +2344,50 @@ int ip; ccw_req_t *cqr; dasd_device_t *device; - unsigned long long now; - dasd_era_t era = dasd_era_none; /* default is everything is okay */ + dasd_era_t era; devstat_t *stat = (devstat_t *)ds; if (stat == NULL) { BUG(); } - DASD_DRIVER_DEBUG_EVENT (6, dasd_int_handler, - "Interrupt: IRQ %02x, stat %02x, devno %04x", - irq, - stat->dstat, - stat->devno); - asm volatile ("STCK %0":"=m" (now)); + + DBF_EVENT (DBF_DEBUG, + "Int: IRQ %02x, CS/DS %04x, flag %08x, devno %04x, ip %08x", + irq, + ((stat->cstat<<8)|stat->dstat), + stat->flag, + stat->devno, + stat->intparm); /* first of all check for state change pending interrupt */ if ((stat->dstat & DEV_STAT_ATTENTION ) && (stat->dstat & DEV_STAT_DEV_END ) && (stat->dstat & DEV_STAT_UNIT_EXCEP) ) { - DASD_DRIVER_DEBUG_EVENT (2, dasd_int_handler, - "State change Interrupt: %04x", - stat->devno); + + DBF_EVENT (DBF_NOTICE, + "State change Interrupt: %04x", + stat->devno); + dasd_handle_state_change_pending (stat); return; } ip = stat->intparm; if (!ip) { /* no intparm: unsolicited interrupt */ - DASD_DRIVER_DEBUG_EVENT (2, dasd_int_handler, - "Unsolicited Interrupt: %04x", - stat->devno); - printk (KERN_DEBUG PRINTK_HEADER - "unsolicited interrupt: irq 0x%x devno %04x\n", - irq, - stat->devno); + + MESSAGE (KERN_DEBUG, + "unsolicited interrupt: irq 0x%x devno %04x", + irq, + stat->devno); return; } - if (ip & 0x80000001) { - DASD_DRIVER_DEBUG_EVENT (2, dasd_int_handler, - "spurious Interrupt: %04x", - stat->devno); - printk (KERN_DEBUG PRINTK_HEADER - "spurious interrupt: irq 0x%x devno %04x, parm %08x\n", - irq, - stat->devno,ip); + + if (ip & 0x80000001) { /* check for invalid 'cqr' address */ + + MESSAGE (KERN_DEBUG, + "spurious interrupt: irq 0x%x devno %04x, parm %08x", + irq, + stat->devno,ip); return; } @@ -1930,19 +2395,16 @@ /* check status - the request might have been killed because of dyn dettach */ if (cqr->status != CQR_STATUS_IN_IO) { - DASD_DRIVER_DEBUG_EVENT (2, dasd_int_handler, - "invalid status %02x on device %04x", - cqr->status, - stat->devno); - - printk (KERN_DEBUG PRINTK_HEADER - "invalid status: irq 0x%x devno %04x, status %02x\n", - irq, - stat->devno, - cqr->status); + + MESSAGE (KERN_DEBUG, + "invalid status: irq 0x%x devno %04x, status %02x", + irq, + stat->devno, + cqr->status); return; } + /* some consistency checks */ device = (dasd_device_t *) cqr->device; if (device == NULL || device != ds-offsetof(dasd_device_t,dev_status)) { @@ -1955,73 +2417,98 @@ BUG(); } - /* first of all lets try to find out the appropriate era_action */ - DASD_DEVICE_DEBUG_EVENT (4, device," Int: CS/DS 0x%04x", - ((stat->cstat<<8)|stat->dstat)); - /* first of all lets try to find out the appropriate era_action */ - if (stat->flag & DEVSTAT_FLAG_SENSE_AVAIL || - stat->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { - /* anything abnormal ? */ - if (device->discipline->examine_error == NULL || - stat->flag & DEVSTAT_HALT_FUNCTION) { - era = dasd_era_fatal; - } else { - era = device->discipline->examine_error (cqr, stat); - } - DASD_DRIVER_DEBUG_EVENT (1, dasd_int_handler," era_code %d", - era); - } - if ( era == dasd_era_none ) { - check_then_set(&cqr->status, - CQR_STATUS_IN_IO, - CQR_STATUS_DONE); - - cqr->stopclk=now; - /* start the next queued request if possible -> fast_io */ - if (cqr->next && - cqr->next->status == CQR_STATUS_QUEUED) { - if (device->discipline->start_IO (cqr->next) != 0) { - printk (KERN_WARNING PRINTK_HEADER - "Interrupt fastpath failed!\n"); - } - } - } else { /* error */ + if (stat->flag & DEVSTAT_HALT_FUNCTION) { + era = dasd_era_fatal; + + } else if (stat->flag & DEVSTAT_FINAL_STATUS && + stat->dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && + stat->cstat == 0) { + /* received 'ok' for running IO */ + era = dasd_era_none; + + } else if (stat->flag & DEVSTAT_FLAG_SENSE_AVAIL) { + /* got sense data */ if (cqr->dstat == NULL) cqr->dstat = kmalloc (sizeof (devstat_t), GFP_ATOMIC); if (cqr->dstat) { memcpy (cqr->dstat, stat, sizeof (devstat_t)); } else { - PRINT_ERR ("no memory for dstat...ignoring\n"); + MESSAGE (KERN_DEBUG, "%s", + "no memory for dstat...ignoring"); } - #ifdef ERP_DEBUG - /* dump sense data */ - if (device->discipline && + if (device->discipline && device->discipline->dump_sense ) { - + device->discipline->dump_sense (device, cqr); } #endif + if (device->discipline->examine_error == NULL) { + era = dasd_era_recover; + } else { + era = device->discipline->examine_error (cqr, stat); + } - switch (era) { - case dasd_era_fatal: - check_then_set (&cqr->status, - CQR_STATUS_IN_IO, - CQR_STATUS_FAILED); + } else if (stat->flag & DEVSTAT_NOT_OPER) { + /* path became offline or similar */ + /* => retry to see if there are any other pathes available */ + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "Device or a path became not operational while in IO"); + era = dasd_era_recover; + + } else if (stat->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END) || + stat->cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN) ) { + /* received device state apart from (channel end & device end) */ + /* OR any kind of channel check (e.g. IFCC, DATA_CHECK or ..... */ + /* we got no sense data, therefore we just retry */ + DEV_MESSAGE (KERN_DEBUG, device, + "Status without sense (IFCC,...) CS/DS %04x flag %08x", + ((stat->cstat<<8)|stat->dstat), + stat->flag); + era = dasd_era_recover; - cqr->stopclk = now; - break; - case dasd_era_recover: - check_then_set (&cqr->status, - CQR_STATUS_IN_IO, - CQR_STATUS_ERROR); - break; - default: - BUG (); + } else { + /* any other kind of interrupt - just retry */ + DEV_MESSAGE (KERN_DEBUG, device, + "Got unclassified interrupt CS/DS %04x flag %08x", + ((stat->cstat<<8)|stat->dstat), + stat->flag); + era = dasd_era_recover; + } + + switch (era) { + case dasd_era_none: + check_then_set(&cqr->status, + CQR_STATUS_IN_IO, + CQR_STATUS_DONE); + cqr->stopclk = get_clock (); + /* start the next queued request if possible -> fast_io */ + if (cqr->next && + cqr->next->status == CQR_STATUS_QUEUED) { + if (device->discipline->start_IO (cqr->next) != 0) { + MESSAGE (KERN_WARNING, "%s", + "Interrupt fastpath failed!"); + } } - } + break; + case dasd_era_fatal: + check_then_set (&cqr->status, + CQR_STATUS_IN_IO, + CQR_STATUS_FAILED); + cqr->stopclk = get_clock (); + break; + case dasd_era_recover: + check_then_set (&cqr->status, + CQR_STATUS_IN_IO, + CQR_STATUS_ERROR); + break; + default: + BUG (); + } + + /* handle special device initialization request */ if ( cqr == device->init_cqr && ( cqr->status == CQR_STATUS_DONE || cqr->status == CQR_STATUS_FAILED )){ @@ -2033,59 +2520,47 @@ } /* end dasd_int_handler */ -/* SECTION: Some stuff related to error recovery */ +/******************************************************************************** + * SECTION: Some stuff related to error recovery + ********************************************************************************/ /* * DEFAULT_ERP_ACTION * * DESCRIPTION - * sets up the default-ERP ccw_req_t, namely one, which performs a TIC - * to the original channel program with a retry counter of 16 + * just retries the current cqr * * PARAMETER * cqr failed CQR * * RETURN VALUES - * erp CQR performing the ERP + * cqr modified CQR */ ccw_req_t * dasd_default_erp_action (ccw_req_t * cqr) { dasd_device_t *device = cqr->device; - ccw_req_t *erp = dasd_alloc_request ((char *) &cqr->magic, 1, 0, cqr->device); - - printk (KERN_DEBUG PRINTK_HEADER "Default ERP called... \n"); - - if (!erp) { - - DASD_MESSAGE (KERN_ERR, device, "%s", - "Unable to allocate ERP request"); - + // just retry - there is nothing to save ... I got no sense data.... + if (cqr->retries > 0) { + DEV_MESSAGE (KERN_DEBUG, device, + "default ERP called (%i retries left)", + cqr->retries); + check_then_set (&cqr->status, CQR_STATUS_ERROR, - CQR_STATUS_FAILED); - - asm volatile ("STCK %0":"=m" (cqr->stopclk)); - - return cqr; - } - - erp->cpaddr->cmd_code = CCW_CMD_TIC; - erp->cpaddr->cda = (__u32) (addr_t) cqr->cpaddr; - erp->function = dasd_default_erp_action; - erp->refers = cqr; - erp->device = cqr->device; - erp->magic = cqr->magic; - erp->retries = 16; - - erp->status = CQR_STATUS_FILLED; - - dasd_chanq_enq_head (&device->queue, - erp); - - return erp; - + CQR_STATUS_QUEUED); + } else { + DEV_MESSAGE (KERN_WARNING, device, "%s", + "default ERP called (NO retry left)"); + + check_then_set (&cqr->status, + CQR_STATUS_ERROR, + CQR_STATUS_FAILED); + + cqr->stopclk = get_clock (); + } + return cqr; } /* end dasd_default_erp_action */ /* @@ -2141,26 +2616,21 @@ /* save ptr to original cqr */ cqr = erp; - /* set corresponding status to original cqr */ + /* set corresponding status for original cqr */ if (success) { - - check_then_set (&cqr->status, - CQR_STATUS_ERROR, - CQR_STATUS_DONE); + cqr->status = CQR_STATUS_DONE; } else { - - check_then_set (&cqr->status, - CQR_STATUS_ERROR, - CQR_STATUS_FAILED); - - asm volatile ("STCK %0":"=m" (cqr->stopclk)); + cqr->status = CQR_STATUS_FAILED; + cqr->stopclk = get_clock (); } return cqr; } /* end default_erp_postaction */ -/* SECTION: The helpers of the struct file_operations */ +/******************************************************************************** + * SECTION: The helpers of the struct file_operations + ********************************************************************************/ /* * function dasd_format @@ -2175,39 +2645,41 @@ { int rc = 0; int openct = atomic_read (&device->open_count); + ccw_req_t *req; if (openct > 1) { - DASD_MESSAGE (KERN_WARNING, device, "%s", - "dasd_format: device is open! expect errors."); + + DEV_MESSAGE (KERN_WARNING, device, "%s", + "dasd_format: device is open! " + "expect errors."); } - DASD_MESSAGE (KERN_INFO, device, - "formatting units %d to %d (%d B blocks) flags %d", - fdata->start_unit, - fdata->stop_unit, - fdata->blksize, - fdata->intensity); + + DBF_DEV_EVENT (DBF_NOTICE, device, + "formatting units %d to %d (%d B blocks) flags %d", + fdata->start_unit, + fdata->stop_unit, + fdata->blksize, + fdata->intensity); + while ((!rc) && (fdata->start_unit <= fdata->stop_unit)) { - ccw_req_t *req; - dasd_format_fn_t ffn = device->discipline->format_device; - ffn = device->discipline->format_device; - if (ffn == NULL) + + if (device->discipline->format_device == NULL) break; - req = ffn (device, fdata); + + req = device->discipline->format_device (device, fdata); if (req == NULL) { rc = -ENOMEM; break; } if ((rc = dasd_sleep_on_req (req)) != 0) { - DASD_MESSAGE (KERN_WARNING, device, - " Formatting of unit %d failed with rc = %d\n", - fdata->start_unit, rc); + + DEV_MESSAGE (KERN_WARNING, device, + " Formatting of unit %d failed " + "with rc = %d", + fdata->start_unit, rc); break; } dasd_free_request (req, device); /* request is no longer used */ - if ( signal_pending(current) ) { - rc = -ERESTARTSYS; - break; - } fdata->start_unit++; } return rc; @@ -2258,6 +2730,9 @@ return 0; } +/* + * handle the re-read partition table IOCTL (BLKRRPART) + */ static int dasd_revalidate (dasd_device_t * device) { @@ -2267,8 +2742,9 @@ int openct = atomic_read (&device->open_count); int start = MINOR (kdev); if (openct != 1) { - DASD_MESSAGE (KERN_WARNING, device, "%s", - "BLKRRPART: device is open! expect errors."); + + DEV_MESSAGE (KERN_WARNING, device, "%s", + "BLKRRPART: device is open! expect errors."); } for (i = (1 << DASD_PARTN_BITS) - 1; i >= 0; i--) { int major = device->major_info->gendisk.major; @@ -2279,6 +2755,15 @@ return rc; } + +/* + * function do_dasd_ioctl + * Implementation of the DASD API. + * Changes to the API should be binary compatible to privous versions + * of the user-space applications by means of any already existing tool + * (e.g. dasdfmt) must work with the new kernel API. + */ + static int do_dasd_ioctl (struct inode *inp, /* unsigned */ int no, unsigned long data) { @@ -2287,10 +2772,12 @@ major_info_t *major_info; if (!device) { - printk (KERN_WARNING PRINTK_HEADER - "No device registered as device (%d:%d)\n", - MAJOR (inp->i_rdev), - MINOR (inp->i_rdev)); + + MESSAGE (KERN_WARNING, + "No device registered as device (%d:%d)", + MAJOR (inp->i_rdev), + MINOR (inp->i_rdev)); + return -EINVAL; } if ((_IOC_DIR (no) != _IOC_NONE) && (data == 0)) { @@ -2298,225 +2785,356 @@ return -EINVAL; } major_info = device->major_info; -#if 0 - printk (KERN_DEBUG PRINTK_HEADER - "ioctl 0x%08x %s'0x%x'%d(%d) on /dev/%s (%d:%d," - " devno 0x%04x on irq %d) with data %8lx\n", - no, - _IOC_DIR (no) == _IOC_NONE ? "0" : - _IOC_DIR (no) == _IOC_READ ? "r" : - _IOC_DIR (no) == _IOC_WRITE ? "w" : - _IOC_DIR (no) == (_IOC_READ | _IOC_WRITE) ? "rw" : "u", - _IOC_TYPE (no), - _IOC_NR (no), - _IOC_SIZE (no), - device->name, - MAJOR (inp->i_rdev), - MINOR (inp->i_rdev), - device->devinfo.devno, - device->devinfo.irq, - data); -#endif + + DBF_DEV_EVENT (DBF_DEBUG, device, + "ioctl 0x%08x %s'0x%x'%d(%d) with data %8lx", + no, + (_IOC_DIR (no) == _IOC_NONE ? "0" : + _IOC_DIR (no) == _IOC_READ ? "r" : + _IOC_DIR (no) == _IOC_WRITE ? "w" : + _IOC_DIR (no) == (_IOC_READ | _IOC_WRITE) ? "rw" : "u"), + _IOC_TYPE (no), + _IOC_NR (no), + _IOC_SIZE (no), + data); + switch (no) { - case DASDAPIVER: { - int ver = DASD_API_VERSION; - rc = put_user(ver, (int *) data); - break; + case DASDAPIVER: { /* retrun dasd API version */ + int ver = DASD_API_VERSION; + rc = put_user(ver, (int *) data); + break; + } + case BLKGETSIZE: { /* Return device size in # of sectors */ + long blocks = major_info->gendisk.sizes + [MINOR (inp->i_rdev)] << 1; + rc = put_user(blocks, (long *) data); + break; + } + case BLKGETSIZE64:{ + u64 blocks = major_info->gendisk.sizes + [MINOR (inp->i_rdev)]; + rc = put_user(blocks << 10, (u64 *) data); + break; + } + case BLKRRPART: { /* reread partition table */ + if (!capable (CAP_SYS_ADMIN)) { + rc = -EACCES; + break; + } + rc = dasd_revalidate (device); + break; } - case BLKGETSIZE:{ /* Return device size */ - long blocks = major_info->gendisk.sizes - [MINOR (inp->i_rdev)] << 1; - rc = put_user(blocks, (long *) data); - break; - } - case BLKGETSIZE64:{ - u64 blocks = major_info->gendisk.sizes - [MINOR (inp->i_rdev)]; - rc = put_user(blocks << 10, (u64 *) data); - break; - } - case BLKRRPART:{ - if (!capable (CAP_SYS_ADMIN)) { - rc = -EACCES; - break; - } - rc = dasd_revalidate (device); - break; - } - case HDIO_GETGEO:{ - struct hd_geometry geo = { 0, }; - rc = dasd_fillgeo (inp->i_rdev, &geo); - if (rc) - break; + case HDIO_GETGEO: { /* return disk geometry */ + struct hd_geometry geo = { 0, }; + rc = dasd_fillgeo (inp->i_rdev, &geo); + if (rc) + break; - rc = copy_to_user ((struct hd_geometry *) data, &geo, - sizeof (struct hd_geometry)); - if (rc) - rc = -EFAULT; - break; - } - case BIODASDDISABLE:{ - if (!capable (CAP_SYS_ADMIN)) { - rc = -EACCES; - break; - } - if ( device->level > DASD_STATE_ACCEPT) { - dasd_deactivate_queue(device); - if ( device->request_queue) - dasd_flush_request_queues(device,0); - dasd_flush_chanq(device,0); - dasd_disable_blkdev(device); - dasd_set_device_level (device->devinfo.devno, - device->discipline, - DASD_STATE_ACCEPT); - } + rc = copy_to_user ((struct hd_geometry *) data, &geo, + sizeof (struct hd_geometry)); + if (rc) + rc = -EFAULT; + break; + } + case BIODASDDISABLE: { /* disable device */ + if (!capable (CAP_SYS_ADMIN)) { + rc = -EACCES; break; + } + + if ( device->level > DASD_STATE_ACCEPT) { + dasd_deactivate_queue(device); + if ( device->request_queue) + dasd_flush_request_queues(device,0); + dasd_flush_chanq(device,0); + dasd_disable_blkdev(device); + dasd_set_device_level (device->devinfo.devno, + device->discipline, + DASD_STATE_ACCEPT); + } + + break; } - case BIODASDENABLE:{ - dasd_range_t range = { - from: device->devinfo.devno, - to: device->devinfo.devno - }; - if (!capable (CAP_SYS_ADMIN)) { - rc = -EACCES; - break; - } - dasd_enable_ranges (&range, device->discipline, 0); + case BIODASDENABLE: { /* enable device */ + dasd_range_t range = { + from: device->devinfo.devno, + to: device->devinfo.devno + }; + if (!capable (CAP_SYS_ADMIN)) { + rc = -EACCES; break; + } + dasd_enable_ranges (&range, device->discipline, 0); + break; } - case BIODASDFMT:{ - /* fdata == NULL is no longer a valid arg to dasd_format ! */ - int partn = MINOR (inp->i_rdev) & - ((1 << major_info->gendisk.minor_shift) - 1); - format_data_t fdata; + case BIODASDFMT: { /* format device */ + /* fdata == NULL is no longer a valid arg to dasd_format ! */ + int partn = MINOR (inp->i_rdev) & + ((1 << major_info->gendisk.minor_shift) - 1); + format_data_t fdata; + + if (!capable (CAP_SYS_ADMIN)) { + rc = -EACCES; + break; + } + if (dasd_features_from_devno(device->devinfo.devno)&DASD_FEATURE_READONLY) { + rc = -EROFS; + break; + } + if (!data) { + rc = -EINVAL; + break; + } + rc = copy_from_user (&fdata, (void *) data, + sizeof (format_data_t)); + if (rc) { + rc = -EFAULT; + break; + } + if (partn != 0) { - if (!capable (CAP_SYS_ADMIN)) { - rc = -EACCES; - break; - } - if (dasd_features_from_devno(device->devinfo.devno)&DASD_FEATURE_READONLY) { - rc = -EROFS; - break; + DEV_MESSAGE (KERN_WARNING, device, "%s", + "Cannot low-level format a partition"); + + return -EINVAL; + } + rc = dasd_format (device, &fdata); + break; + } + case BIODASDSATTR: { /* Set Attributes (cache operations) */ + + attrib_data_t attrib; + + if (!capable (CAP_SYS_ADMIN)) { + rc = -EACCES; + break; + } + + if (!data) { + rc = -EINVAL; + break; + } + + if (!device->discipline->set_attrib) { + rc = -EINVAL; + break; + } + + rc = copy_from_user (&attrib, (void *) data, + sizeof (attrib_data_t)); + if (rc) { + rc = -EFAULT; + break; + } + + rc = device->discipline->set_attrib (device, + &attrib); + break; + } + case BIODASDPRRST: { /* reset device profile information */ + if (!capable (CAP_SYS_ADMIN)) { + rc = -EACCES; + break; + } + memset (&device->profile, 0, + sizeof (dasd_profile_info_t)); + break; + } + case BIODASDPRRD: { /* return device profile information */ + rc = copy_to_user((long *)data, + (long *)&device->profile, + sizeof(dasd_profile_info_t)); + if (rc) + rc = -EFAULT; + break; + } + case BIODASDRSRV: { /* reserve device */ + ccw_req_t *cqr; + if (!capable (CAP_SYS_ADMIN)) { + rc = -EACCES; + break; + } + + if (!device->discipline->reserve) { + rc = -EINVAL; + break; + } + + cqr = device->discipline->reserve (device); + + if (cqr) { + struct timer_list res_timer; + + init_timer (&res_timer); + res_timer.function = dasd_resrel_timeout; + res_timer.data = (unsigned long) cqr; + res_timer.expires = jiffies + 2 * HZ; + add_timer (&res_timer); + + rc = dasd_sleep_on_immediate (cqr); + + del_timer_sync (&res_timer); + dasd_free_request (cqr, + device); + } else { + rc = -ENOMEM; + } + break; + } + case BIODASDRLSE: { /* release device */ + ccw_req_t *cqr; + if (!capable (CAP_SYS_ADMIN)) { + rc = -EACCES; + break; + } + + if (!device->discipline->release) { + rc = -EINVAL; + break; + } + + cqr = device->discipline->release (device); + + if (cqr) { + struct timer_list rel_timer; + + init_timer (&rel_timer); + rel_timer.function = dasd_resrel_timeout; + rel_timer.data = (unsigned long) cqr; + rel_timer.expires = jiffies + 2 * HZ; + add_timer (&rel_timer); + + rc = dasd_sleep_on_immediate (cqr); + + del_timer_sync (&rel_timer); /* in case of interrupt */ + dasd_free_request (cqr, + device); + } else { + rc = -ENOMEM; + } + break; + } + case BIODASDSLCK: { /* steal lock - unconditional reserve device */ + if (!capable (CAP_SYS_ADMIN)) { + rc = -EACCES; + break; + } + + rc = dasd_steal_lock (device); + break; + } + case BIODASDINFO: /* return dasd information */ + case BIODASDINFO2: { /* return dasd information2 (incl. format and features) */ + dasd_information2_t dasd_info; + + unsigned long flags; + + if (!device->discipline->fill_info) { + rc = -EINVAL; + break; + } + + rc = device->discipline->fill_info (device, + &dasd_info); + + dasd_info.label_block = device->sizes.pt_block; + dasd_info.devno = device->devinfo.devno; + dasd_info.schid = device->devinfo.irq; + dasd_info.cu_type = device->devinfo.sid_data.cu_type; + dasd_info.cu_model = device->devinfo.sid_data.cu_model; + dasd_info.dev_type = device->devinfo.sid_data.dev_type; + dasd_info.dev_model = device->devinfo.sid_data.dev_model; + dasd_info.open_count = + atomic_read (&device->open_count); + dasd_info.status = device->level; + + /* check if device is really formatted - LDL / CDL was returned by 'fill_info' */ + if ((device->level < DASD_STATE_READY) || + (dasd_check_bp_block (device) ) ) { + dasd_info.format = DASD_FORMAT_NONE; + } + + dasd_info.features = + dasd_features_from_devno (device->devinfo.devno); + + if (device->discipline) { + memcpy (dasd_info.type, + device->discipline->name, 4); + } else { + memcpy (dasd_info.type, "none", 4); + } + dasd_info.req_queue_len = 0; + dasd_info.chanq_len = 0; + + if ((device->request_queue ) && + (device->request_queue->request_fn) ) { + struct list_head *l; + ccw_req_t *cqr = device->queue.head; + spin_lock_irqsave (&io_request_lock, flags); + list_for_each (l, + &device->request_queue-> + queue_head) { + dasd_info.req_queue_len++; } - if (!data) { - rc = -EINVAL; - break; - } - rc = copy_from_user (&fdata, (void *) data, - sizeof (format_data_t)); - if (rc) { - rc = -EFAULT; - break; - } - if (partn != 0) { - DASD_MESSAGE (KERN_WARNING, device, "%s", - "Cannot low-level format a partition"); - return -EINVAL; - } - rc = dasd_format (device, &fdata); - break; - } - case BIODASDPRRST:{ /* reset device profile information */ - if (!capable (CAP_SYS_ADMIN)) { - rc = -EACCES; - break; - } - memset (&device->profile, 0, - sizeof (dasd_profile_info_t)); - break; - } - case BIODASDPRRD:{ /* retrun device profile information */ - rc = copy_to_user((long *)data, - (long *)&device->profile, - sizeof(dasd_profile_info_t)); - if (rc) - rc = -EFAULT; - break; - } - case BIODASDRSRV:{ /* reserve */ - ccw_req_t *req; - if (!capable (CAP_SYS_ADMIN)) { - rc = -EACCES; - break; - } - req = device->discipline->reserve (device); - rc = dasd_sleep_on_req (req); - dasd_free_request (req, device); - break; - } - case BIODASDRLSE:{ /* release */ - ccw_req_t *req; - if (!capable (CAP_SYS_ADMIN)) { - rc = -EACCES; - break; - } - req = device->discipline->release (device); - rc = dasd_sleep_on_req (req); - dasd_free_request (req, device); - break; - } - case BIODASDSLCK:{ /* steal lock - unconditional reserve */ - ccw_req_t *req; - if (!capable (CAP_SYS_ADMIN)) { - rc = -EACCES; - break; - } - req = device->discipline->steal_lock (device); - rc = dasd_sleep_on_req (req); - dasd_free_request (req, device); - break; - } - case BIODASDINFO:{ - dasd_information_t dasd_info; - unsigned long flags; - rc = device->discipline->fill_info (device, &dasd_info); - dasd_info.label_block = device->sizes.pt_block; - dasd_info.devno = device->devinfo.devno; - dasd_info.schid = device->devinfo.irq; - dasd_info.cu_type = device->devinfo.sid_data.cu_type; - dasd_info.cu_model = device->devinfo.sid_data.cu_model; - dasd_info.dev_type = device->devinfo.sid_data.dev_type; - dasd_info.dev_model = device->devinfo.sid_data.dev_model; - dasd_info.open_count = - atomic_read (&device->open_count); - dasd_info.status = device->level; - if (device->discipline) { - memcpy (dasd_info.type, - device->discipline->name, 4); - } else { - memcpy (dasd_info.type, "none", 4); - } - dasd_info.req_queue_len = 0; - dasd_info.chanq_len = 0; - if (device->request_queue->request_fn) { - struct list_head *l; - ccw_req_t *cqr = device->queue.head; - spin_lock_irqsave (&io_request_lock, flags); - list_for_each (l, - &device->request_queue-> - queue_head) { - dasd_info.req_queue_len++; - } - spin_unlock_irqrestore (&io_request_lock, - flags); - s390irq_spin_lock_irqsave (device->devinfo.irq, - flags); - while (cqr) { - cqr = cqr->next; - dasd_info.chanq_len++; - } - s390irq_spin_unlock_irqrestore (device->devinfo. - irq, flags); - } - rc = - copy_to_user ((long *) data, (long *) &dasd_info, - sizeof (dasd_information_t)); - if (rc) - rc = -EFAULT; - break; - } + spin_unlock_irqrestore (&io_request_lock, + flags); + s390irq_spin_lock_irqsave (device->devinfo.irq, + flags); + while (cqr) { + cqr = cqr->next; + dasd_info.chanq_len++; + } + s390irq_spin_unlock_irqrestore (device->devinfo. + irq, flags); + } + + rc = copy_to_user ((long *) data, (long *) &dasd_info, + ((no == (unsigned int) BIODASDINFO2) ? + sizeof (dasd_information2_t) : + sizeof (dasd_information_t))); + + if (rc) + rc = -EFAULT; + break; + } + case BIODASDPSRD: { /* Performance Statistics Read */ + + ccw_req_t *cqr; + dasd_rssd_perf_stats_t *stats; + + if ((!device->discipline->read_stats) || + (!device->discipline->ret_stats ) ) { + rc = -EINVAL; + break; + } + + cqr = device->discipline->read_stats (device); + + if (cqr) { + + if ((rc = dasd_sleep_on_req (cqr)) == 0) { + + if ((stats = device->discipline->ret_stats (cqr)) != NULL) { + + rc = copy_to_user ((long *) data, + (long *) stats, + sizeof (dasd_rssd_perf_stats_t)); + } else { + + rc = -EFAULT; + } + } + + dasd_free_request (cqr, + device); + + } else { + rc = -ENOMEM; + } + break; + } #if 0 /* needed for XFS */ - case BLKBSZSET:{ + case BLKBSZSET: { int bsz; rc = copy_from_user ((long *)&bsz,(long *)data,sizeof(int)); if ( rc ) { @@ -2528,10 +3146,46 @@ rc = -EINVAL; } break; - } + } #endif /* 0 */ + case BLKROSET: { + int intval; + dasd_range_t *temp; + int devindex = 0; + unsigned long flags; + struct list_head *l; + int major=MAJOR(device->kdev); + int minor; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (inp->i_rdev != device->kdev) + // ro setting is not allowed for partitions + return -EINVAL; + if (get_user(intval, (int *)(data))) + return -EFAULT; + spin_lock_irqsave (&range_lock, flags); + list_for_each (l, &dasd_range_head.list) { + temp = list_entry (l, dasd_range_t, list); + if (device->devinfo.devno >= temp->from && device->devinfo.devno <= temp->to) { + spin_unlock_irqrestore (&range_lock, flags); + if (intval) + temp->features |= DASD_FEATURE_READONLY; + else + temp->features &= ~DASD_FEATURE_READONLY; + goto continue_blkroset; + } + devindex += temp->to - temp->from + 1; + } + spin_unlock_irqrestore (&range_lock, flags); + return(-ENODEV); +continue_blkroset: + for (minor = MINOR(device->kdev); minor < MINOR(device->kdev) + (1 << DASD_PARTN_BITS); minor++) + set_device_ro(MKDEV(major,minor), intval); + return 0; + } + case BLKBSZGET: case BLKSSZGET: - case BLKROSET: case BLKROGET: case BLKRASET: case BLKRAGET: @@ -2541,37 +3195,41 @@ case BLKELVSET: return blk_ioctl (inp->i_rdev, no, data); break; - default:{ + default: { - dasd_ioctl_list_t *old = dasd_find_ioctl (no); - if (old) { - if ( old->owner ) - __MOD_INC_USE_COUNT(old->owner); - rc = old->handler (inp, no, data); - if ( old->owner ) - __MOD_DEC_USE_COUNT(old->owner); - } else { - DASD_MESSAGE (KERN_INFO, device, - "ioctl 0x%08x=%s'0x%x'%d(%d) data %8lx\n", - no, - _IOC_DIR (no) == _IOC_NONE ? "0" : - _IOC_DIR (no) == _IOC_READ ? "r" : - _IOC_DIR (no) == _IOC_WRITE ? "w" : - _IOC_DIR (no) == - (_IOC_READ | _IOC_WRITE) ? "rw" : "u", - _IOC_TYPE (no), - _IOC_NR (no), - _IOC_SIZE (no), - data); - rc = -ENOTTY; - } - break; + dasd_ioctl_list_t *old = dasd_find_ioctl (no); + if (old) { + if ( old->owner ) + __MOD_INC_USE_COUNT(old->owner); + rc = old->handler (inp, no, data); + if ( old->owner ) + __MOD_DEC_USE_COUNT(old->owner); + } else { + + DBF_DEV_EVENT (DBF_INFO, device, + "unknown ioctl 0x%08x=%s'0x%x'%d(%d) data %8lx", + no, + (_IOC_DIR (no) == _IOC_NONE ? "0" : + _IOC_DIR (no) == _IOC_READ ? "r" : + _IOC_DIR (no) == _IOC_WRITE ? "w" : + _IOC_DIR (no) == + (_IOC_READ | _IOC_WRITE) ? "rw" : "u"), + _IOC_TYPE (no), + _IOC_NR (no), + _IOC_SIZE (no), + data); + + rc = -ENOTTY; + } + break; } } return rc; } -/* SECTION: The members of the struct file_operations */ +/******************************************************************************** + * SECTION: The members of the struct file_operations + ********************************************************************************/ static int dasd_ioctl (struct inode *inp, struct file *filp, @@ -2597,26 +3255,32 @@ goto fail; } if (dasd_probeonly) { - printk ("\n" KERN_INFO PRINTK_HEADER - "No access to device (%d:%d) due to probeonly mode\n", - MAJOR (inp->i_rdev), - MINOR (inp->i_rdev)); + + MESSAGE (KERN_INFO, + "No access to device (%d:%d) due to probeonly mode", + MAJOR (inp->i_rdev), + MINOR (inp->i_rdev)); + rc = -EPERM; goto fail; } spin_lock_irqsave(&discipline_lock,flags); device = dasd_device_from_kdev (inp->i_rdev); if (!device) { - printk (KERN_WARNING PRINTK_HEADER - "No device registered as (%d:%d)\n", - MAJOR (inp->i_rdev), - MINOR (inp->i_rdev)); + + MESSAGE (KERN_WARNING, + "No device registered as (%d:%d)", + MAJOR (inp->i_rdev), + MINOR (inp->i_rdev)); + rc = -ENODEV; goto unlock; } if (device->level <= DASD_STATE_ACCEPT ) { - DASD_MESSAGE (KERN_WARNING, device, " %s", - " Cannot open unrecognized device\n"); + + DBF_DEV_EVENT (DBF_ERR, device, " %s", + " Cannot open unrecognized device"); + rc = -ENODEV; goto unlock; } @@ -2648,17 +3312,21 @@ } device = dasd_device_from_kdev (inp->i_rdev); if (!device) { - printk (KERN_WARNING PRINTK_HEADER - "No device registered as %d:%d\n", - MAJOR (inp->i_rdev), - MINOR (inp->i_rdev)); + + MESSAGE (KERN_WARNING, + "No device registered as %d:%d", + MAJOR (inp->i_rdev), + MINOR (inp->i_rdev)); + rc = -EINVAL; goto out; } if (device->level < DASD_STATE_ACCEPT ) { - DASD_MESSAGE (KERN_WARNING, device, " %s", - " Cannot release unrecognized device\n"); + + DBF_DEV_EVENT (DBF_ERR, device, " %s", + " Cannot release unrecognized device"); + rc = -ENODEV; goto out; } @@ -2669,8 +3337,9 @@ __MOD_DEC_USE_COUNT(device->discipline->owner); } else if ( count == -1 ) { /* paranoia only */ atomic_set (&device->open_count,0); - printk (KERN_WARNING PRINTK_HEADER - "release called with open count==0\n"); + + MESSAGE (KERN_WARNING, "%s", + "release called with open count==0"); } out: return rc; @@ -2685,7 +3354,9 @@ ioctl:dasd_ioctl, }; -/* SECTION: Management of device list */ +/******************************************************************************** + * SECTION: Management of device list + ********************************************************************************/ int dasd_fillgeo(int kdev,struct hd_geometry *geo) { @@ -2708,24 +3379,27 @@ int dasd_device_name (char *str, int index, int partition, struct gendisk *hd) { - int len = 0; + major_info_t *major_info; + struct list_head *l; char first, second, third; + int len; - if (hd) { - major_info_t *major_info = NULL; - struct list_head *l; - - list_for_each (l, &dasd_major_info[0].list) { - major_info = list_entry (l, major_info_t, list); - if (&major_info->gendisk == hd) { - break; - } - index += DASD_PER_MAJOR; - } - if (major_info == &dasd_major_info[0]) { - return -EINVAL; - } - } + if (hd == NULL) + return -EINVAL; + + major_info = NULL; + list_for_each (l, &dasd_major_info) { + major_info = list_entry (l, major_info_t, list); + if (&major_info->gendisk == hd) + break; + index += DASD_PER_MAJOR; + } + if (major_info == NULL || &major_info->gendisk != hd) { + /* list empty or hd not found in list */ + return -EINVAL; + } + + len = 0; third = index % 26; second = ((index - 26) / 26) % 26; first = (((index - 702) / 26) / 26) % 26; @@ -2777,7 +3451,8 @@ cqr->status != CQR_STATUS_FAILED ) { cqr->status = CQR_STATUS_FAILED; - asm volatile ("STCK %0":"=m" (cqr->stopclk)); + + cqr->stopclk = get_clock (); } dasd_schedule_bh(device); @@ -2802,6 +3477,56 @@ } } +static inline void dasd_do_hotplug_event (dasd_device_t* device, int eventid) { +#ifdef CONFIG_HOTPLUG + int i; + char *argv[3], *envp[8]; + char devno[20],major[20],minor[20],devname[26],action[20]; + + /* setup command line arguments */ + i=0; + argv[i++] = hotplug_path; + argv[i++] = "dasd"; + argv[i++] = 0; + + /* minimal environment */ + i=0; + envp[i++] = "HOME=/"; + envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + + /* device information and event*/ + sprintf (devno, "DEVNO=%04x", device->devinfo.devno); + sprintf (major, "MAJOR=%d", MAJOR(device->kdev)); + sprintf (minor, "MINOR=%d", MINOR(device->kdev)); + sprintf (devname, "DASDNAME=%s",device->name); + switch (eventid) { + case DASD_HOTPLUG_EVENT_ADD: + sprintf (action,"ACTION=add"); + break; + case DASD_HOTPLUG_EVENT_REMOVE: + sprintf (action,"ACTION=remove"); + break; + case DASD_HOTPLUG_EVENT_PARTCHK: + sprintf (action,"ACTION=partchk"); + break; + case DASD_HOTPLUG_EVENT_PARTREMOVE: + sprintf (action,"ACTION=partremove"); + break; + default: + BUG(); + } + envp[i++] = devno; + envp[i++] = major; + envp[i++] = minor; + envp[i++] = devname; + envp[i++] = action; + envp[i++] = 0; + + call_usermodehelper (argv [0], argv, envp); +#endif +} + + static int dasd_disable_volume ( dasd_device_t * device, int force ) { @@ -2810,8 +3535,9 @@ int count = atomic_read (&device->open_count); if ( count ) { - DASD_MESSAGE (KERN_EMERG, device, "%s", - "device has vanished although it was open!"); + + DEV_MESSAGE (KERN_EMERG, device, "%s", + "device has vanished although it was open!"); } if ( force ) { dasd_deactivate_queue(device); @@ -2824,9 +3550,11 @@ /* unregister partitions ('ungrok_partitions') */ devfs_register_partitions(&device->major_info->gendisk, MINOR(device->kdev),1); + dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_PARTREMOVE); - DASD_MESSAGE (KERN_WARNING, device, - "disabling device, target state: %d",target); + DBF_DEV_EVENT (DBF_ERR, device, + "disabling device, target state: %d", + target); dasd_set_device_level (device->devinfo.devno, device->discipline, @@ -2836,7 +3564,7 @@ static void dasd_disable_ranges (dasd_range_t *range, - dasd_discipline_t *d, + dasd_discipline_t *discipline, int all, int force ) { dasd_range_t *rrange; @@ -2858,14 +3586,15 @@ } device = *dptr; if (device == NULL || - (d != NULL && - device -> discipline != d)) + (discipline != NULL && + device -> discipline != discipline)) continue; dasd_disable_volume(device, force); } rrange = list_entry (rrange->list.next, dasd_range_t, list); } while ( all && rrange && rrange != range ); + } static void @@ -2877,10 +3606,13 @@ } static void -dasd_enable_ranges (dasd_range_t *range, dasd_discipline_t *d, int all ) +dasd_enable_ranges (dasd_range_t *range, + dasd_discipline_t *discipline, + int all) { int retries = 0; int j; + int do_again; kdev_t tempdev; dasd_range_t *rrange; @@ -2888,6 +3620,7 @@ return; do { + do_again = 0; if (range == &dasd_range_head) { rrange = list_entry (range->list.next, dasd_range_t, list); @@ -2898,26 +3631,40 @@ for (j = rrange->from; j <= rrange->to; j++) { if ( dasd_devindex_from_devno(j) < 0 ) continue; - dasd_set_device_level (j, d, DASD_STATE_ONLINE); + if (-EAGAIN == dasd_set_device_level + (j, discipline, DASD_STATE_ONLINE)) + do_again = 1; } rrange = list_entry (rrange->list.next, dasd_range_t, list); } while ( all && rrange && rrange != range ); - if (atomic_read (&dasd_init_pending) == 0) /* we are done, exit loop */ + if ((atomic_read (&dasd_init_pending) == 0) && + (!do_again)) /* we are done, exit loop */ break; if ( retries == 0 ) { - printk (KERN_INFO PRINTK_HEADER - "waiting for responses...\n"); + + MESSAGE (KERN_INFO, "%s", + "waiting for responses..."); + } else if ( retries < 5 ) { - printk (KERN_INFO PRINTK_HEADER - "waiting a little bit longer...\n"); + + DBF_EVENT (DBF_NOTICE, "%s", + "waiting a little bit longer..."); + } else { - printk (KERN_INFO PRINTK_HEADER - "giving up, enable late devices manually!\n"); + + MESSAGE (KERN_INFO, "%s", + "giving up, enable late devices manually!"); break; } - interruptible_sleep_on_timeout (&dasd_init_waitq, (1 * HZ)); + + /* prevent scheduling if called by bh (timer) */ + if (!in_interrupt()) { + interruptible_sleep_on_timeout (&dasd_init_waitq, + (1 * HZ) ); + } + retries ++; } while (1); /* now setup block devices */ @@ -2939,9 +3686,9 @@ device = *dptr; if (device == NULL ) continue; - if ( ((d == NULL && device->discipline != NULL) || - (device->discipline == d )) && - device->level >= DASD_STATE_READY && + if ( ((discipline == NULL && device->discipline != NULL) || + (device->discipline == discipline )) && + device->level == DASD_STATE_ONLINE && device->request_queue == NULL ) { if (dasd_features_from_devno(j)&DASD_FEATURE_READONLY) { for (tempdev=device->kdev; @@ -2949,9 +3696,8 @@ tempdev++) set_device_ro (tempdev, 1); - printk (KERN_WARNING PRINTK_HEADER - "setting read-only mode for device /dev/%s\n", - device->name); + DEV_MESSAGE (KERN_WARNING, device, "%s", + "setting read-only mode "); } dasd_setup_blkdev(device); dasd_setup_partitions(device); @@ -2971,13 +3717,17 @@ static void dasd_not_oper_handler (int irq, int status) { - dasd_device_t *device = NULL; - major_info_t *major_info = NULL; + dasd_device_t *device; + major_info_t *major_info; + ccw_req_t* cqr; struct list_head *l; - int i, devno = -ENODEV; + unsigned long flags; + int i, devno; /* find out devno of leaving device: CIO has already deleted this information ! */ - list_for_each (l, &dasd_major_info[0].list) { + devno = -ENODEV; + device = NULL; + list_for_each (l, &dasd_major_info) { major_info = list_entry (l, major_info_t, list); for (i = 0; i < DASD_PER_MAJOR; i++) { device = major_info->dasd_device[i]; @@ -2990,17 +3740,44 @@ break; } - DASD_DRIVER_DEBUG_EVENT (5, dasd_not_oper_handler, - "called for devno %04x", - devno); - if (devno < 0) { - printk (KERN_WARNING PRINTK_HEADER - "not_oper_handler called on irq 0x%04x no devno!\n", - irq); + + MESSAGE (KERN_WARNING, + "not_oper_handler called on irq 0x%04x no devno!", + irq); return; } - dasd_disable_volume(device, 1); + switch (status) { + case DEVSTAT_DEVICE_GONE: + case DEVSTAT_REVALIDATE: //FIXME + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "device is gone, disabling it permanently\n"); + dasd_disable_volume(device, 1); + break; + case DEVSTAT_NOT_ACC: + case DEVSTAT_NOT_ACC_ERR: + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "device is not accessible, disabling it temporary\n"); + s390irq_spin_lock_irqsave (device->devinfo.irq, + flags); + device->accessible = 0; + if (status == DEVSTAT_NOT_ACC_ERR) { + cqr = device->queue.head; + while (cqr) { + if (cqr->status == CQR_STATUS_QUEUED) + break; + if (cqr->status == CQR_STATUS_IN_IO) + cqr->status = CQR_STATUS_QUEUED; + cqr = cqr->next; + } + } + s390irq_spin_unlock_irqrestore(device->devinfo.irq, + flags); + + break; + default: + panic ("dasd not operational handler was called with illegal status\n"); + } } /* @@ -3014,56 +3791,62 @@ { int devno; int rc = 0; - major_info_t *major_info = NULL; + major_info_t *major_info; dasd_range_t *rptr,range; - dasd_device_t *device = NULL; + dasd_device_t *device; struct list_head *l; + unsigned long flags; int i; + devno = get_devno_by_irq (irq); if (devno == -ENODEV) { rc = -ENODEV; goto out; } - DASD_DRIVER_DEBUG_EVENT (5, dasd_oper_handler, - "called for devno %04x", - devno); - /* find out devno of device */ - list_for_each (l, &dasd_major_info[0].list) { + device = NULL; + list_for_each (l, &dasd_major_info) { major_info = list_entry (l, major_info_t, list); for (i = 0; i < DASD_PER_MAJOR; i++) { device = major_info->dasd_device[i]; - if (device && device->devinfo.irq == irq) { - devno = device->devinfo.devno; + if (device && device->devinfo.irq == irq) break; - } + else + device = NULL; } - if (devno != -ENODEV) + if (device) break; } - if (devno < 0) { - BUG(); - } + if ( device && - device->level == DASD_STATE_READY ) { - dasd_set_device_level (device->devinfo.devno, - device->discipline, DASD_STATE_ONLINE); + (device->level == DASD_STATE_ONLINE) && + (!device->accessible) ) { + s390irq_spin_lock_irqsave (device->devinfo.irq, + flags); + DEV_MESSAGE (KERN_DEBUG, device, "%s", + "device is accessible again, reenabling it\n"); + device->accessible = 1; + s390irq_spin_unlock_irqrestore(device->devinfo.irq, + flags); + + dasd_schedule_bh(device); } else { - if (dasd_autodetect) { - rptr = dasd_add_range (devno, devno, DASD_DEFAULT_FEATURES); - if ( rptr == NULL ) { - rc = -ENOMEM; - goto out; - } - } else { - range.from = devno; - range.to = devno; - rptr = ⦥ - } - dasd_enable_ranges (rptr, NULL, 0); + + if (dasd_autodetect) { + rptr = dasd_add_range (devno, devno, DASD_FEATURE_DEFAULT); + if ( rptr == NULL ) { + rc = -ENOMEM; + goto out; + } + } else { + range.from = devno; + range.to = devno; + rptr = ⦥ + } + dasd_enable_ranges (rptr, NULL, 0); } out: return rc; @@ -3075,13 +3858,16 @@ { dasd_device_t **device_addr; - DASD_DRIVER_DEBUG_EVENT (1, dasd_find_device_addr, - "devno %04x", - devno); + DBF_EVENT (DBF_INFO, + "devno %04x", + devno); + if ( dasd_devindex_from_devno (devno) < 0 ) { - DASD_DRIVER_DEBUG_EXCEPTION (1, dasd_find_device_addr, - "no dasd: devno %04x", - devno); + + DBF_EXC (DBF_ALERT, + "no dasd: devno %04x", + devno); + return NULL; } /* allocate major numbers on demand for new devices */ @@ -3090,9 +3876,8 @@ if ((rc = dasd_register_major (NULL)) <= 0) { - DASD_DRIVER_DEBUG_EXCEPTION (1, dasd_find_device_addr, - "%s", - "out of major numbers!"); + DBF_EXC (DBF_ALERT, "%s", + "out of major numbers!"); break; } } @@ -3100,86 +3885,116 @@ } static inline int -dasd_state_del_to_new (dasd_device_t **addr ) +dasd_state_del_to_new (dasd_device_t **addr, int devno) { + int i; dasd_device_t* device; - int rc = 0; - if (*addr == NULL) { /* allocate device descriptor on demand for new device */ - device = kmalloc (sizeof (dasd_device_t), GFP_ATOMIC); - if (device == NULL ) { - rc = -ENOMEM; - goto out; - } - memset (device, 0, sizeof (dasd_device_t)); + dasd_lowmem_t *lowmem; + int rc; + + + /* allocate device descriptor on demand for new device */ + if (*addr != NULL) { + BUG (); + } + + device = kmalloc (sizeof (dasd_device_t), GFP_ATOMIC); + if (device == NULL) { + return -ENOMEM; + } + + memset (device, 0, sizeof (dasd_device_t)); + dasd_plug_device (device); + device->accessible = 1; + INIT_LIST_HEAD (&device->lowmem_pool); + + /* allocate pages for lowmem pool */ + for (i = 0; i < DASD_LOWMEM_PAGES; i++) { + + lowmem = (void *) get_free_page (GFP_ATOMIC|GFP_DMA); + if (lowmem == NULL) { + break; + } + + MESSAGE (KERN_DEBUG, + " Add lowmem page :%p", + devno, lowmem); + list_add (&lowmem->list, &device->lowmem_pool); + } + + if (i < DASD_LOWMEM_PAGES) { + /* didn't get the needed lowmem pages */ + list_for_each_entry (lowmem, &device->lowmem_pool, list) { + MESSAGE (KERN_DEBUG, + " not enough memory - " + "Free page again :%p", + devno, lowmem); + free_page ((unsigned long) lowmem); + } + kfree (device); + rc = -ENOMEM; + } else { *addr = device; - device->lowmem_ccws = (void*)get_free_page (GFP_ATOMIC|GFP_DMA); - if (device->lowmem_ccws == NULL) { - rc = -ENOMEM; - goto noccw; - } -#ifdef CONFIG_ARCH_S390X - device->lowmem_idals = - device->lowmem_idal_ptr = (void*) get_free_page (GFP_ATOMIC|GFP_DMA); - if (device->lowmem_idals == NULL) { - rc = -ENOMEM; - goto noidal; - } -#endif -} - goto out; -#ifdef CONFIG_ARCH_S390X - noidal: - free_page ((long) device->lowmem_ccws); -#endif - noccw: - kfree(device); - out: + rc = 0; + } return rc; } static inline int -dasd_state_new_to_del (dasd_device_t **addr ) +dasd_state_new_to_del (dasd_device_t **addr, int devno) { + dasd_lowmem_t *lowmem; + dasd_device_t *device = *addr; + + /* free private area */ if (device && device->private) { kfree(device->private); - device->private = NULL; } -#ifdef CONFIG_ARCH_S390X - free_page ((long)(device->lowmem_idals)); -#endif - free_page((long)(device->lowmem_ccws)); + + /* free lowmem_pool */ + list_for_each_entry (lowmem, &device->lowmem_pool, list) { + MESSAGE (KERN_DEBUG, + " Free lowmem page :%p", + devno, lowmem); + free_page ((unsigned long) lowmem); + } + + /* free device */ kfree(device); *addr = NULL; return 0; } static inline int -dasd_state_new_to_known (dasd_device_t **dptr, int devno, dasd_discipline_t *disc) +dasd_state_new_to_known (dasd_device_t **dptr, + int devno, + dasd_discipline_t *discipline) { int rc = 0; umode_t devfs_perm = S_IFBLK | S_IRUSR | S_IWUSR; struct list_head *l; - major_info_t *major_info = NULL; + major_info_t *major_info, *tmp; int i; dasd_device_t *device = *dptr; devfs_handle_t dir; char buffer[5]; - - list_for_each (l, &dasd_major_info[0].list) { - major_info = list_entry (l, major_info_t, list); + major_info = NULL; + list_for_each (l, &dasd_major_info) { + tmp = list_entry (l, major_info_t, list); for (i = 0; i < DASD_PER_MAJOR; i++) { - if (major_info->dasd_device[i] == device) { - device->kdev = MKDEV (major_info->gendisk.major, + if (tmp->dasd_device[i] == device) { + device->kdev = MKDEV (tmp->gendisk.major, i << DASD_PARTN_BITS); + major_info = tmp; break; } } - if (i < DASD_PER_MAJOR) /* we found one */ + if (major_info != NULL) /* we found one */ break; } - if ( major_info == NULL || major_info == &dasd_major_info[0] ) + if ( major_info == NULL ) BUG(); device->major_info = major_info; @@ -3192,18 +4007,24 @@ rc = get_dev_info_by_devno (devno, &device->devinfo); if ( rc ) { + /* returns -EUSERS if boxed !!*/ + if (rc == -EUSERS) { + device->level = DASD_STATE_BOXED; + } goto out; } - DASD_DRIVER_DEBUG_EVENT (5, dasd_state_new_to_known, - "got devinfo CU-type %04x and dev-type %04x", - device->devinfo.sid_data.cu_type, - device->devinfo.sid_data.dev_type); - + DBF_EVENT (DBF_NOTICE, + "got devinfo CU-type %04x and dev-type %04x", + device->devinfo.sid_data.cu_type, + device->devinfo.sid_data.dev_type); + if ( devno != device->devinfo.devno ) BUG(); - device->discipline = dasd_find_disc (device, disc); + + device->discipline = dasd_find_disc (device, + discipline); if ( device->discipline == NULL ) { rc = -ENODEV; goto out; @@ -3221,6 +4042,7 @@ MINOR(device->kdev), devfs_perm, &dasd_device_operations,NULL); + dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_ADD); device->level = DASD_STATE_KNOWN; out: return rc; @@ -3233,7 +4055,7 @@ /* don't reset to zeros because of persistent data durich detach/attach! */ devfs_unregister(device->devfs_entry); devfs_unregister(device->major_info->gendisk.de_arr[MINOR(device->kdev) >> DASD_PARTN_BITS]); - + dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_REMOVE); return rc; } @@ -3241,12 +4063,21 @@ dasd_state_known_to_accept (dasd_device_t *device) { int rc = 0; - device->debug_area = debug_register (device->name, 0, 2, - 3 * sizeof (long)); - debug_register_view (device->debug_area, &debug_sprintf_view); - debug_register_view (device->debug_area, &debug_hex_ascii_view); - DASD_DEVICE_DEBUG_EVENT (0, device,"%p debug area created", - device); + + /* register 'device' debug area, used for all DBF_DEV_XXX calls*/ + device->debug_area = debug_register (device->name, + 0, /* size of debug area */ + 2, /* number of areas */ + 8 * sizeof (long)); + + debug_register_view (device->debug_area, + &debug_sprintf_view); + + debug_set_level (device->debug_area, + DBF_ERR); + + DBF_DEV_EVENT (DBF_EMERG, device, "%s", + "debug area created"); if (device->discipline->int_handler) { rc = s390_request_irq_special (device->devinfo.irq, @@ -3255,7 +4086,10 @@ 0, DASD_NAME, &device->dev_status); if ( rc ) { - printk("No request IRQ\n"); + + MESSAGE (KERN_DEBUG, "%s", + "No request IRQ"); + goto out; } } @@ -3272,10 +4106,15 @@ if (device->discipline->int_handler) { free_irq (device->devinfo.irq, &device->dev_status); } - DASD_DEVICE_DEBUG_EVENT (0, device,"%p debug area deleted", - device); - if ( device->debug_area != NULL ) + + DBF_DEV_EVENT (DBF_EMERG, device, + "%p debug area deleted", + device); + + if (device->debug_area != NULL) { debug_unregister (device->debug_area); + device->debug_area = NULL; + } device->discipline = NULL; device->level = DASD_STATE_KNOWN; out: @@ -3297,18 +4136,17 @@ s390irq_spin_lock_irqsave (device->devinfo.irq, flags); rc = device->discipline->start_IO (device->init_cqr); + if ( ! rc ) + device->level = DASD_STATE_INIT; s390irq_spin_unlock_irqrestore(device->devinfo.irq, flags); - if ( rc ) - goto out; - device->level = DASD_STATE_INIT; } else { rc = -ENOMEM; } } else { rc = dasd_state_init_to_ready ( device ); } - out: + return rc; } @@ -3318,15 +4156,8 @@ int rc = 0; if (device->discipline->do_analysis != NULL) if ( device->discipline->do_analysis (device) == 0 ) - switch (device->sizes.bp_block) { - case 512: - case 1024: - case 2048: - case 4096: - break; - default: - rc = -EMEDIUMTYPE; - } + rc = dasd_check_bp_block (device); + if ( device->init_cqr ) { /* This pointer is no longer needed, BUT dont't free the */ /* memory, because this is done in bh for finished request!!!! */ @@ -3362,8 +4193,10 @@ dasd_state_ready_to_online (dasd_device_t *device ) { int rc = 0; - dasd_unplug_device (device); - device->level = DASD_STATE_ONLINE; + if (!(rc = dasd_check_bp_block (device))) { + dasd_unplug_device (device); + device->level = DASD_STATE_ONLINE; + } return rc; } @@ -3383,6 +4216,7 @@ int i; int major = MAJOR(device->kdev); int minor = MINOR(device->kdev); + request_queue_t *request_queue; for (i = 0; i < (1 << DASD_PARTN_BITS); i++) { if (i == 0) @@ -3399,11 +4233,16 @@ device->major_info->gendisk.part[minor+i].start_sect = 0; device->major_info->gendisk.part[minor+i].nr_sects = 0; } - device->request_queue = kmalloc(sizeof(request_queue_t),GFP_KERNEL); - device->request_queue->queuedata = device; - blk_init_queue (device->request_queue, do_dasd_request); - blk_queue_headactive (device->request_queue, 0); - elevator_init (&(device->request_queue->elevator),ELEVATOR_NOOP); + + request_queue = kmalloc(sizeof(request_queue_t),GFP_KERNEL); + if (request_queue) { + request_queue->queuedata = device; + blk_init_queue (request_queue, do_dasd_request); + blk_queue_headactive (request_queue, 1); + elevator_init (&(request_queue->elevator),ELEVATOR_NOOP); + } + device->request_queue = request_queue; + return rc; } @@ -3424,6 +4263,19 @@ int i; int major = MAJOR(device->kdev); int minor = MINOR(device->kdev); + request_queue_t *q = device->request_queue; + struct request *req; + long flags; + + spin_lock_irqsave(&io_request_lock, flags); + while (q && + !list_empty(&q->queue_head) && + (req = dasd_next_request(q)) != NULL) { + + dasd_end_request(req, 0); + dasd_dequeue_request(q, req); + } + spin_unlock_irqrestore(&io_request_lock, flags); for (i = 0; i < (1 << DASD_PARTN_BITS); i++) { destroy_buffers(MKDEV(major,minor+i)); @@ -3453,6 +4305,7 @@ 1 << DASD_PARTN_BITS, &dasd_device_operations, (device->sizes.blocks << device->sizes.s2b_shift)); + dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_PARTCHK); } static inline void @@ -3467,14 +4320,7 @@ } devfs_register_partitions(&device->major_info->gendisk, MINOR(device->kdev),1); -} - -static inline void -dasd_resetup_partitions ( dasd_device_t * device ) -{ - BUG(); - dasd_destroy_partitions ( device ) ; - dasd_setup_partitions ( device ) ; + dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_PARTREMOVE); } /* @@ -3505,11 +4351,11 @@ from_state = device->level; } - DASD_DRIVER_DEBUG_EVENT (3, dasd_set_device_level, - "devno %04x; from %i to %i", - devno, - from_state, - to_state); + DBF_EVENT (DBF_INFO, + "devno %04x; from %i to %i", + devno, + from_state, + to_state); if ( from_state == to_state ) goto out; @@ -3520,20 +4366,30 @@ /* First check for bringup */ if ( from_state <= DASD_STATE_DEL && to_state >= DASD_STATE_NEW ) { - rc = dasd_state_del_to_new(device_addr); + rc = dasd_state_del_to_new(device_addr, devno); if ( rc ) { goto bringup_fail; } device = *device_addr; } - if ( from_state <= DASD_STATE_NEW && + + /* reprobe boxed devices */ + if (device->level == DASD_STATE_BOXED) { + rc = s390_trigger_resense (device->devinfo.irq); + if ( rc ) { + goto bringup_fail; + } + } + + if ( device->level <= DASD_STATE_BOXED && to_state >= DASD_STATE_KNOWN ) { rc = dasd_state_new_to_known( device_addr, devno, discipline ); if ( rc ) { goto bringup_fail; } } - if ( from_state <= DASD_STATE_KNOWN && + + if ( device->level <= DASD_STATE_KNOWN && to_state >= DASD_STATE_ACCEPT ) { rc = dasd_state_known_to_accept(device); if ( rc ) { @@ -3543,19 +4399,21 @@ if ( dasd_probeonly ) { goto out; } - if ( from_state <= DASD_STATE_ACCEPT && + + if ( device->level <= DASD_STATE_ACCEPT && to_state >= DASD_STATE_INIT ) { rc = dasd_state_accept_to_init(device); if ( rc ) { goto bringup_fail; } } - if ( from_state <= DASD_STATE_INIT && + if ( device->level <= DASD_STATE_INIT && to_state >= DASD_STATE_READY ) { rc = -EAGAIN; goto out; } - if ( from_state <= DASD_STATE_READY && + + if ( device->level <= DASD_STATE_READY && to_state >= DASD_STATE_ONLINE ) { rc = dasd_state_ready_to_online(device); if ( rc ) { @@ -3564,50 +4422,57 @@ } goto out; bringup_fail: /* revert changes */ -#if 0 - printk (KERN_DEBUG PRINTK_HEADER - "failed to set device from state %d to %d at " - "level %d rc %d. Reverting...\n", - from_state, - to_state, - device->level, - rc); -#endif - to_state = from_state; - from_state = device->level; + + DBF_DEV_EVENT (DBF_ERR, device, + "failed to set device from state %d to %d at " + "level %d rc %d. Reverting...", + from_state, + to_state, + device->level, + rc); + + if (device->level <= DASD_STATE_NEW) { + /* Revert - device can not be accessed */ + to_state = from_state; + from_state = device->level; + } /* now do a shutdown */ shutdown: - if ( from_state >= DASD_STATE_ONLINE && + if ( device->level >= DASD_STATE_ONLINE && to_state <= DASD_STATE_READY ) if (dasd_state_online_to_ready(device)) BUG(); - if ( from_state >= DASD_STATE_READY && + if ( device->level >= DASD_STATE_READY && to_state <= DASD_STATE_ACCEPT ) if ( dasd_state_ready_to_accept(device)) BUG(); - if ( from_state >= DASD_STATE_ACCEPT && + if ( device->level >= DASD_STATE_ACCEPT && to_state <= DASD_STATE_KNOWN ) if ( dasd_state_accept_to_known(device)) BUG(); - if ( from_state >= DASD_STATE_KNOWN && + if ( device->level >= DASD_STATE_KNOWN && to_state <= DASD_STATE_NEW ) if ( dasd_state_known_to_new(device)) BUG(); - if ( from_state >= DASD_STATE_NEW && + if ( device->level >= DASD_STATE_NEW && to_state <= DASD_STATE_DEL) - if (dasd_state_new_to_del(device_addr)) + if (dasd_state_new_to_del(device_addr, devno)) BUG(); goto out; out: return rc; } -/* SECTION: Procfs stuff */ +/******************************************************************************** + * SECTION: Procfs stuff + ********************************************************************************/ +#ifdef CONFIG_PROC_FS + typedef struct { char *data; int len; @@ -3640,29 +4505,53 @@ int index = 0; MOD_INC_USE_COUNT; - spin_lock_irqsave(&discipline_lock,flags); + + spin_lock_irqsave(&discipline_lock, + flags); + info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t)); + if (info == NULL) { - printk (KERN_WARNING "No memory available for data\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory available for data (tempinfo)"); + + spin_unlock_irqrestore(&discipline_lock, + flags); + MOD_DEC_USE_COUNT; + return -ENOMEM; + } else { file->private_data = (void *) info; } - list_for_each (l, &dasd_major_info[0].list) { + list_for_each (l, &dasd_major_info) { size += 128 * 1 << (MINORBITS - DASD_PARTN_BITS); } info->data = (char *) vmalloc (size); - DASD_DRIVER_DEBUG_EVENT (1, dasd_devices_open, "area: %p, size 0x%x", - info->data, - size); + if (size && info->data == NULL) { - printk (KERN_WARNING "No memory available for data\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory available for data (info->data)"); + vfree (info); + + spin_unlock_irqrestore(&discipline_lock, + flags); + MOD_DEC_USE_COUNT; + return -ENOMEM; } - list_for_each (l, &dasd_major_info[0].list) { + + DBF_EVENT (DBF_NOTICE, + "procfs-area: %p, size 0x%x allocated", + info->data, + size); + + list_for_each (l, &dasd_major_info) { temp = list_entry (l, major_info_t, list); for (i = 0; i < 1 << (MINORBITS - DASD_PARTN_BITS); i++) { dasd_device_t *device; @@ -3673,8 +4562,8 @@ continue; features = dasd_features_from_devno(devno); - if (features < DASD_DEFAULT_FEATURES) - features = DASD_DEFAULT_FEATURES; + if (features < DASD_FEATURE_DEFAULT) + features = DASD_FEATURE_DEFAULT; device = temp->dasd_device[i]; if (device) { @@ -3702,45 +4591,45 @@ sprintf (info->data + len, "detected"); break; + case DASD_STATE_BOXED: + len += + sprintf (info->data + len, + "boxed"); + break; case DASD_STATE_ACCEPT: - len += sprintf (info->data + len,"accepted"); + len += sprintf (info->data + len, + "accepted"); break; case DASD_STATE_INIT: - len += - sprintf (info->data + len, - "busy "); + len += sprintf (info->data + len, + "busy "); break; case DASD_STATE_READY: + len += sprintf (info->data + len, + "ready "); + break; case DASD_STATE_ONLINE: - if ( atomic_read(&device->plugged) ) - len += - sprintf (info->data + len, - "fenced "); - else - len += - sprintf (info->data + len, - "active "); - if ( device->sizes.bp_block == 512 || - device->sizes.bp_block == 1024 || - device->sizes.bp_block == 2048 || - device->sizes.bp_block == 4096 ) - len += - sprintf (info->data + len, - "at blocksize: %d, %ld blocks, %ld MB", - device->sizes.bp_block, - device->sizes.blocks, - ((device-> - sizes.bp_block >> 9) * - device->sizes. - blocks) >> 11); - else - len += - sprintf (info->data + len, - "n/f "); + len += sprintf (info->data + len, + "active "); + + if (dasd_check_bp_block (device)) + len += + sprintf (info->data + len, + "n/f "); + else + len += + sprintf (info->data + len, + "at blocksize: %d, %ld blocks, %ld MB", + device->sizes.bp_block, + device->sizes.blocks, + ((device-> + sizes.bp_block >> 9) * + device->sizes. + blocks) >> 11); break; default: len += - sprintf (info->data + len, + sprintf (info->data + len, "no stat"); break; } @@ -3769,7 +4658,9 @@ index += 1 << (MINORBITS - DASD_PARTN_BITS); } info->len = len; - spin_unlock_irqrestore(&discipline_lock,flags); + + spin_unlock_irqrestore(&discipline_lock, + flags); return rc; } @@ -3793,22 +4684,250 @@ } } +/* + * scan for device range in given string (e.g. 0x0150-0x0155). + * devnos are always hex and leading 0x are ignored. + */ +static char * +dasd_parse_range (char *buffer, dasd_range_t *range) +{ + char *str; + + /* remove optional 'device ' and 'range=' and search for nexet digit */ + for (str = buffer + 4; isspace(*str); str++); + + if (strncmp (str, "device ", 7) == 0) + for (str = str + 7; isspace(*str); str++); + + if (strncmp (str, "range=", 6) == 0) + for (str = str + 6; isspace(*str); str++); + + range->to = range->from = dasd_strtoul (str, + &str, + &(range->features)); + + if (*str == '-') { + str++; + range->to = dasd_strtoul (str, + &str, + &(range->features)); + } + + /* remove blanks after device range */ + for (; isspace(*str); str++); + + if (range->from < 0 || range->to < 0) { + MESSAGE (KERN_WARNING, + "/proc/dasd/devices: range parse error in '%s'", + buffer); + return ERR_PTR (-EINVAL); + } + + return str; + +} /* end dasd_parse_range */ + +/* + * Enable / Disable the given devices + */ +static void +dasd_proc_set (char *buffer) +{ + dasd_range_t range; + char *str; + + str = dasd_parse_range (buffer, + &range); + + /* Negative numbers in str/from/to indicate errors */ + if (IS_ERR (str) || (range.from < 0) || (range.to < 0) + || (range.from > 65535) || (range.to > 65535)) + return; + + if (strncmp (str, "on", 2) == 0) { + dasd_enable_ranges (&range, NULL, 0); + + } else if (strncmp (str, "off", 3) == 0) { + dasd_disable_ranges (&range, NULL, 0, 1); + + } else { + MESSAGE (KERN_WARNING, + "/proc/dasd/devices: " + "only 'on' and 'off' are alowed in 'set' " + "command ('%s'/'%s')", + buffer, + str); + } + + return; + +} /* end dasd_proc_set */ + +/* + * Add the given devices + */ +static void +dasd_proc_add (char *buffer) +{ + dasd_range_t range; + char *str; + + str = dasd_parse_range (buffer, + &range); + + /* Negative numbers in str/from/to indicate errors */ + if (IS_ERR (str) || (range.from < 0) || (range.to < 0) + || (range.from > 65535) || (range.to > 65535)) + return; + + dasd_add_range (range.from, range.to, range.features); + dasd_enable_ranges (&range, NULL, 0); + + return; + +} /* end dasd_proc_add */ + +/* + * Break the lock of a given 'boxed' dasd. + * If the dasd in not in status 'boxed' just return. + */ +static int +dasd_break_boxed (dasd_range_t *range, + dasd_device_t *device) +{ + int rc = 0; + dasd_discipline_t *discipline; + struct list_head *lh = dasd_disc_head.next; + + /* check devixe status */ + if (device->level != DASD_STATE_BOXED) { + MESSAGE (KERN_WARNING, + "/proc/dasd/devices: the given device (%04X) " + "is not 'boxed')", + device->devinfo.devno); + rc = -EINVAL; + goto out; + } + + /* force eckd discipline */ + do { + discipline = list_entry(lh, + dasd_discipline_t, + list); + + if (strncmp (discipline->name, range->discipline, 4) == 0) + break; /* discipline found */ + + lh = lh->next; /* check next discipline in list */ + if (lh == &dasd_disc_head) { + discipline = NULL; + break; + } + } while ( 1 ); + device->discipline = discipline; + + if (device->discipline == NULL) { + MESSAGE (KERN_WARNING, "%s", + "/proc/dasd/devices: discipline not found " + "in discipline list"); + rc = -EINVAL; + goto out; + } + + /* register the int handler to enable IO */ + rc = s390_request_irq_special (device->devinfo.irq, + device->discipline->int_handler, + dasd_not_oper_handler, + 0, + DASD_NAME, + &device->dev_status); + if ( rc ) + goto out; + + rc = dasd_steal_lock (device); + + /* unregister the int handler to enable re-sensing */ + free_irq (device->devinfo.irq, + &device->dev_status); + + device->discipline = NULL; + device->level = DASD_STATE_NEW; + + out: + return rc; + +} /* end dasd_break_boxed */ + +/* + * Handle the procfs call 'brk . + */ +static void +dasd_proc_brk (char *buffer) +{ + char *str; + dasd_range_t range; + dasd_device_t *device; + int rc = 0; + + str = dasd_parse_range (buffer, + &range); + + if (IS_ERR (str)) + return; + + if (range.from != range.to) { + MESSAGE (KERN_WARNING, "%s", + "/proc/dasd/devices: 'brk " + "is only allowed for a single device (no ranges)"); + return; + } + + /* check for discipline = 'eckd' */ + if (strncmp(str, "eckd", 4) != 0) { + MESSAGE (KERN_WARNING, + "/proc/dasd/devices: 'brk " + "is only allowed for 'eckd' (%s)", + str); + return; + } + + memcpy (range.discipline, "ECKD", 4); + + device = *(dasd_device_from_devno (range.from)); + if (device == NULL) { + MESSAGE (KERN_WARNING, + "/proc/dasd/devices: no device found for devno (%04X)", + range.from); + return; + } + + rc = dasd_break_boxed (&range, + device); + if (rc == 0) { + /* trigger CIO to resense the device */ + s390_trigger_resense (device->devinfo.irq); + + // get the device online now + dasd_enable_ranges (&range, + NULL, + 0); + } + +} /* end dasd_proc_brk */ + static ssize_t dasd_devices_write (struct file *file, const char *user_buf, size_t user_len, loff_t * offset) { char *buffer; - int off = 0; - char *temp; - dasd_range_t range; - int features; if (user_len > PAGE_SIZE) return -EINVAL; - + buffer = vmalloc (user_len+1); if (buffer == NULL) return -ENOMEM; + if (copy_from_user (buffer, user_buf, user_len)) { vfree (buffer); return -EFAULT; @@ -3821,60 +4940,29 @@ buffer[user_len] = '\0'; } - printk (KERN_INFO PRINTK_HEADER "/proc/dasd/devices: '%s'\n", buffer); - if (strncmp (buffer, "set ", 4) && strncmp (buffer, "add ", 4)) { - printk (KERN_WARNING PRINTK_HEADER - "/proc/dasd/devices: only 'set' and 'add' are supported verbs\n"); - return -EINVAL; - } - off += 4; - while (buffer[off] && !isalnum (buffer[off])) - off++; - if (!strncmp (buffer + off, "device", strlen ("device"))) { - off += strlen ("device"); - while (buffer[off] && !isalnum (buffer[off])) - off++; - } - if (!strncmp (buffer + off, "range=", strlen ("range="))) { - off += strlen ("range="); - while (buffer[off] && !isalnum (buffer[off])) - off++; - } - - temp = buffer + off; - range.from = dasd_strtoul (temp, &temp, &features); - range.to = range.from; - - if (*temp == '-') { - temp++; - range.to = dasd_strtoul (temp, &temp, &features); - } + MESSAGE (KERN_INFO, + "/proc/dasd/devices: '%s'", + buffer); + + if (strncmp (buffer, "set ", 4) == 0) { + /* handle 'set on/off' */ + dasd_proc_set (buffer); + + } else if (strncmp (buffer, "add ", 4) == 0) { + /* handle 'add ' */ + dasd_proc_add (buffer); + + } else if (strncmp (buffer, "brk ", 4) == 0) { + /* handle 'brk ' */ + dasd_proc_brk (buffer); - if (range.from == -EINVAL || - range.to == -EINVAL ) { - - printk (KERN_WARNING PRINTK_HEADER - "/proc/dasd/devices: range parse error in '%s'\n", - buffer); } else { - off = (long) temp - (long) buffer; - if (!strncmp (buffer, "add", strlen ("add"))) { - dasd_add_range (range.from, range.to, features); - dasd_enable_ranges (&range, NULL, 0); - } else { - while (buffer[off] && !isalnum (buffer[off])) - off++; - if (!strncmp (buffer + off, "on", strlen ("on"))) { - dasd_enable_ranges (&range, NULL, 0); - } else if (!strncmp (buffer + off, "off", strlen ("off"))) { - dasd_disable_ranges (&range, NULL, 0, 1); - } else { - printk (KERN_WARNING PRINTK_HEADER - "/proc/dasd/devices: parse error in '%s'\n", - buffer); - } - } - } + MESSAGE (KERN_WARNING, "%s", + "/proc/dasd/devices: only 'set' ,'add' and " + "'brk' are supported verbs"); + vfree (buffer); + return -EINVAL; + } vfree (buffer); return user_len; @@ -3895,9 +4983,9 @@ } static struct file_operations dasd_devices_file_ops = { - read:dasd_generic_read, /* read */ + read:dasd_generic_read, /* read */ write:dasd_devices_write, /* write */ - open:dasd_devices_open, /* open */ + open:dasd_devices_open, /* open */ release:dasd_devices_close, /* close */ }; @@ -3914,22 +5002,41 @@ MOD_INC_USE_COUNT; info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t)); + if (info == NULL) { - printk (KERN_WARNING "No memory available for data\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory available for data (tempinfo)"); + MOD_DEC_USE_COUNT; return -ENOMEM; } else { file->private_data = (void *) info; } - info->data = (char *) vmalloc (PAGE_SIZE); /* FIXME! determine space needed in a better way */ + /* FIXME! determine space needed in a better way */ + info->data = (char *) vmalloc (PAGE_SIZE); + if (info->data == NULL) { - printk (KERN_WARNING "No memory available for data\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory available for data (info->data)"); + vfree (info); file->private_data = NULL; MOD_DEC_USE_COUNT; return -ENOMEM; } + /* check for active profiling */ + if (dasd_profile_level == DASD_PROFILE_OFF) { + + info->len = sprintf (info->data, + "Statistics are off - they might be " + "switched on using 'echo set on > " + "/proc/dasd/statistics'\n"); + return rc; + } + /* prevent couter 'ouverflow' on output */ for (shift = 0, help = dasd_global_profile.dasd_io_reqs; help > 9999999; help = help >> 1, shift++) ; @@ -3958,7 +5065,15 @@ } len += sprintf (info->data + len, "\n"); - len += sprintf (info->data + len, "Histogram of I/O times (microseconds)\n"); + for (; i < 32; i++) { + len += sprintf (info->data + len, "%7d ", + dasd_global_profile.dasd_io_secs[i] >> shift); + } + len += sprintf (info->data + len, "\n"); + + len += sprintf (info->data + len, + "Histogram of I/O times (microseconds)\n"); + for (i = 0; i < 16; i++) { len += sprintf (info->data + len, "%7d ", dasd_global_profile.dasd_io_times[i] >> shift); @@ -4008,7 +5123,9 @@ len += sprintf (info->data + len, "\n"); len += sprintf (info->data + len, - "Histogram of I/O time between ssch and irq per sector\n"); + "Histogram of I/O time between ssch and irq per " + "sector\n"); + for (i = 0; i < 16; i++) { len += sprintf (info->data + len, "%7d ", dasd_global_profile.dasd_io_time2ps[i] >> shift); @@ -4064,23 +5181,89 @@ if (buffer == NULL) return -ENOMEM; + if (copy_from_user (buffer, user_buf, user_len)) { vfree (buffer); return -EFAULT; } + buffer[user_len] = 0; - printk (KERN_INFO PRINTK_HEADER "/proc/dasd/statictics: '%s'\n", - buffer); - if (strncmp (buffer, "reset", 4)) { - memset (&dasd_global_profile, 0, sizeof (dasd_profile_info_t)); + + MESSAGE (KERN_INFO, + "/proc/dasd/statictics: '%s'", + buffer); + +#ifdef DASD_PROFILE + /* check for valid verbs */ + if (strncmp (buffer, "reset", 5) && + strncmp (buffer, "set ", 4) ) { + + MESSAGE (KERN_WARNING, "%s", + "/proc/dasd/statistics: only 'set' and " + "'reset' are supported verbs"); + + return -EINVAL; } + + if (!strncmp (buffer, "reset", 5)) { + + /* reset the statistics */ + memset (&dasd_global_profile, + 0, + sizeof (dasd_profile_info_t)); + + MESSAGE (KERN_INFO, "%s", + "Statictics reset"); + + } else { + + /* 'set xxx' was given */ + int offset = 4; + + while (buffer[offset] && !isalnum (buffer[offset])) + offset++; + + if (!strncmp (buffer + offset, "on", 2)) { + + /* switch on statistics profiling */ + dasd_profile_level = DASD_PROFILE_ON; + + MESSAGE (KERN_INFO, "%s", + "Statictics switched on"); + + } else if (!strncmp (buffer + offset, "off", 3)) { + + /* switch off and reset statistics profiling */ + memset (&dasd_global_profile, + 0, + sizeof (dasd_profile_info_t)); + + dasd_profile_level = DASD_PROFILE_OFF; + + MESSAGE (KERN_INFO, "%s", + "Statictics switched off"); + + } else { + + MESSAGE (KERN_WARNING, "%s", + "/proc/dasd/statistics: only 'set on' and " + "'set off' are supported verbs"); + } + } +#else + MESSAGE (KERN_WARNING, "%s", + "/proc/dasd/statistics: is not activated in this " + "kernel"); + + +#endif /* DASD_PROFILE */ return user_len; } static struct file_operations dasd_statistics_file_ops = { - read:dasd_generic_read, /* read */ - open:dasd_statistics_open, /* open */ + read:dasd_generic_read, /* read */ write:dasd_statistics_write, /* write */ + open:dasd_statistics_open, /* open */ release:dasd_devices_close, /* close */ }; @@ -4113,6 +5296,11 @@ remove_proc_entry ("dasd", &proc_root); } +#endif /* CONFIG_PROC_FS */ + +/******************************************************************************** + * SECTION: Initializing the driver + ********************************************************************************/ int dasd_request_module ( void *name ) { int rc = -ERESTARTSYS; @@ -4124,16 +5312,17 @@ } while ( (rc=request_module(name)) != 0 ) { DECLARE_WAIT_QUEUE_HEAD(wait); - printk ( KERN_INFO "request_module returned %d for %s\n", + + MESSAGE (KERN_INFO, + "request_module returned %d for %s", rc, (char*)name); + sleep_on_timeout(&wait,5* HZ); /* wait in steps of 5 seconds */ } return rc; } - -/* SECTION: Initializing the driver */ int __init dasd_init (void) { @@ -4142,40 +5331,56 @@ major_info_t *major_info = NULL; struct list_head *l; - printk (KERN_INFO PRINTK_HEADER "initializing...\n"); - dasd_debug_area = debug_register (DASD_NAME, 0, 2, 5 * sizeof (long)); - debug_register_view (dasd_debug_area, &debug_sprintf_view); - debug_register_view (dasd_debug_area, &debug_hex_ascii_view); + MESSAGE (KERN_INFO, "%s", + "initializing..."); init_waitqueue_head (&dasd_init_waitq); + /* register 'common' DASD debug area, used faor all DBF_XXX calls*/ + dasd_debug_area = debug_register (DASD_NAME, + 0, /* size of debug area */ + 2, /* number of areas */ + 8 * sizeof (long)); + + debug_register_view (dasd_debug_area, + &debug_sprintf_view); + if (dasd_debug_area == NULL) { goto failed; } - DASD_DRIVER_DEBUG_EVENT (0, dasd_init, "%s", - "ENTRY"); - dasd_devfs_handle = devfs_mk_dir (NULL, DASD_NAME, NULL); + + debug_set_level (dasd_debug_area, + DBF_ERR); + + DBF_EVENT (DBF_EMERG, "%s", + "debug area created"); + + dasd_devfs_handle = devfs_mk_dir (NULL, + DASD_NAME, + NULL); + if (dasd_devfs_handle < 0) { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, "%s", - "no devfs"); + + DBF_EVENT (DBF_ALERT, "%s", + "no devfs"); goto failed; } - list_for_each (l, &dasd_major_info[0].list) { + + list_add_tail(&dasd_major_static.list, &dasd_major_info); + list_for_each (l, &dasd_major_info) { major_info = list_entry (l, major_info_t, list); if ((rc = dasd_register_major (major_info)) > 0) { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, - "major %d: success", - major_info->gendisk.major); - printk (KERN_INFO PRINTK_HEADER - "Registered successfully to major no %u\n", - major_info->gendisk.major); + + MESSAGE (KERN_INFO, + "Registered successfully to major no %u", + major_info->gendisk.major); } else { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, - "major %d: failed", - major_info->gendisk.major); - printk (KERN_WARNING PRINTK_HEADER - "Couldn't register successfully to major no %d\n", - major_info->gendisk.major); + + MESSAGE (KERN_WARNING, + "Couldn't register successfully to " + "major no %d", + major_info->gendisk.major); + /* revert registration of major infos */ goto failed; } @@ -4185,18 +5390,24 @@ #endif /* ! MODULE */ rc = dasd_parse (dasd); if (rc) { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, "%s", - "invalid range found"); + + DBF_EVENT (DBF_ALERT, "%s", + "invalid range found"); goto failed; } +#ifdef CONFIG_PROC_FS rc = dasd_proc_init (); if (rc) { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, "%s", "no proc-FS"); + + DBF_EVENT (DBF_ALERT, "%s", + "no proc-FS"); goto failed; } +#endif /* CONFIG_PROC_FS */ + genhd_dasd_name = dasd_device_name; - genhd_dasd_ioctl = dasd_ioctl; + genhd_dasd_ioctl = dasd_ioctl; if (dasd_autodetect) { /* update device range to all devices */ for (irq = get_irq_first (); irq != -ENODEV; @@ -4204,10 +5415,14 @@ int devno = get_devno_by_irq (irq); int index = dasd_devindex_from_devno (devno); if (index == -ENODEV) { /* not included in ranges */ - DASD_DRIVER_DEBUG_EVENT (2, dasd_init, - "add %04x to range", - devno); - dasd_add_range (devno, devno, DASD_DEFAULT_FEATURES); + + DBF_EVENT (DBF_CRIT, + "add %04x to range", + devno); + + dasd_add_range (devno, + devno, + DASD_FEATURE_DEFAULT); } } } @@ -4216,15 +5431,15 @@ #ifdef CONFIG_DASD_DIAG rc = dasd_diag_init (); if (rc == 0) { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, - "DIAG discipline %s", - "success"); - printk (KERN_INFO PRINTK_HEADER - "Registered DIAG discipline successfully\n"); + + MESSAGE (KERN_INFO, "%s", + "Registered DIAG discipline successfully"); + } else { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, - "DIAG discipline %s", - "failed"); + + DBF_EVENT (DBF_ALERT, "%s", + "Register DIAG discipline failed"); + goto failed; } #endif /* CONFIG_DASD_DIAG */ @@ -4235,13 +5450,14 @@ #ifdef CONFIG_DASD_ECKD rc = dasd_eckd_init (); if (rc == 0) { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, - "ECKD discipline %s", "success"); - printk (KERN_INFO PRINTK_HEADER - "Registered ECKD discipline successfully\n"); + + MESSAGE (KERN_INFO, "%s", + "Registered ECKD discipline successfully"); } else { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, - "ECKD discipline %s", "failed"); + + DBF_EVENT (DBF_ALERT, "%s", + "Register ECKD discipline failed"); + goto failed; } #endif /* CONFIG_DASD_ECKD */ @@ -4251,14 +5467,14 @@ #ifdef CONFIG_DASD_FBA rc = dasd_fba_init (); if (rc == 0) { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, - "FBA discipline %s", "success"); - - printk (KERN_INFO PRINTK_HEADER - "Registered FBA discipline successfully\n"); + + MESSAGE (KERN_INFO, "%s", + "Registered FBA discipline successfully"); } else { - DASD_DRIVER_DEBUG_EVENT (1, dasd_init, - "FBA discipline %s", "failed"); + + DBF_EVENT (DBF_ALERT, "%s", + "Register FBA discipline failed"); + goto failed; } #endif /* CONFIG_DASD_FBA */ @@ -4276,12 +5492,16 @@ rc = 0; goto out; failed: - printk (KERN_INFO PRINTK_HEADER - "initialization not performed due to errors\n"); + + MESSAGE (KERN_INFO, "%s", + "initialization not performed due to errors"); + cleanup_dasd (); out: - DASD_DRIVER_DEBUG_EVENT (0, dasd_init, "%s", "LEAVE"); - printk (KERN_INFO PRINTK_HEADER "initialization finished\n"); + + MESSAGE (KERN_INFO, "%s", + "initialization finished"); + return rc; } @@ -4293,57 +5513,60 @@ struct list_head *l,*n; dasd_range_t *range; - printk (KERN_INFO PRINTK_HEADER "shutting down\n"); - DASD_DRIVER_DEBUG_EVENT(0,"cleanup_dasd","%s","ENTRY"); - dasd_disable_ranges (&dasd_range_head, NULL, 1, 1); - if (MACHINE_IS_VM) { + MESSAGE (KERN_INFO, "%s", + "shutting down"); + + dasd_disable_ranges (&dasd_range_head, + NULL, 1, 1); + #ifdef CONFIG_DASD_DIAG + if (MACHINE_IS_VM) { dasd_diag_cleanup (); - DASD_DRIVER_DEBUG_EVENT (1, "cleanup_dasd", - "DIAG discipline %s", "success"); - printk (KERN_INFO PRINTK_HEADER - "De-Registered DIAG discipline successfully\n"); -#endif /* CONFIG_DASD_ECKD_BUILTIN */ + + MESSAGE (KERN_INFO, "%s", + "De-Registered DIAG discipline successfully"); } +#endif /* CONFIG_DASD_DIAG */ + #ifdef CONFIG_DASD_FBA dasd_fba_cleanup (); - DASD_DRIVER_DEBUG_EVENT (1, "cleanup_dasd", - "FBA discipline %s", "success"); - printk (KERN_INFO PRINTK_HEADER - "De-Registered FBA discipline successfully\n"); -#endif /* CONFIG_DASD_ECKD_BUILTIN */ + + MESSAGE (KERN_INFO, "%s", + "De-Registered FBA discipline successfully"); +#endif /* CONFIG_DASD_FBA */ + #ifdef CONFIG_DASD_ECKD dasd_eckd_cleanup (); - DASD_DRIVER_DEBUG_EVENT (1, "cleanup_dasd", - "ECKD discipline %s", "success"); - printk (KERN_INFO PRINTK_HEADER - "De-Registered ECKD discipline successfully\n"); -#endif /* CONFIG_DASD_ECKD_BUILTIN */ - genhd_dasd_name = NULL; - genhd_dasd_ioctl = NULL; + MESSAGE (KERN_INFO, "%s", + "De-Registered ECKD discipline successfully"); +#endif /* CONFIG_DASD_ECKD */ + + genhd_dasd_name = NULL; + genhd_dasd_ioctl = NULL; + +#ifdef CONFIG_PROC_FS dasd_proc_cleanup (); +#endif /* CONFIG_PROC_FS */ - list_for_each_safe (l, n, &dasd_major_info[0].list) { + list_for_each_safe (l, n, &dasd_major_info) { major_info = list_entry (l, major_info_t, list); for (i = 0; i < DASD_PER_MAJOR; i++) { kfree (major_info->dasd_device[i]); } if ((major_info->flags & DASD_MAJOR_INFO_REGISTERED) && (rc = dasd_unregister_major (major_info)) == 0) { - DASD_DRIVER_DEBUG_EVENT (1, "cleanup_dasd", - "major %d: success", - major_info->gendisk.major); - printk (KERN_INFO PRINTK_HEADER - "Unregistered successfully from major no %u\n", + + MESSAGE (KERN_INFO, + "Unregistered successfully from major no %u", major_info->gendisk.major); } else { - DASD_DRIVER_DEBUG_EVENT (1, "cleanup_dasd", - "major %d: failed", - major_info->gendisk.major); - printk (KERN_WARNING PRINTK_HEADER - "Couldn't unregister successfully from major no %d rc = %d\n", - major_info->gendisk.major, rc); + + MESSAGE (KERN_WARNING, + "Couldn't unregister successfully from major " + "no %d rc = %d", + major_info->gendisk.major, + rc); } } list_for_each_safe (l, n, &dasd_range_head.list) { @@ -4360,10 +5583,14 @@ #endif /* MODULE */ if (dasd_devfs_handle) devfs_unregister(dasd_devfs_handle); - if (dasd_debug_area != NULL ) + + if (dasd_debug_area != NULL ) { debug_unregister(dasd_debug_area); - printk (KERN_INFO PRINTK_HEADER "shutdown completed\n"); - DASD_DRIVER_DEBUG_EVENT(0,"cleanup_dasd","%s","LEAVE"); + dasd_debug_area = NULL; + } + + MESSAGE (KERN_INFO, "%s", + "shutdown completed"); } #ifdef MODULE diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/block/dasd_diag.c linux.21rc5-ac2/drivers/s390/block/dasd_diag.c --- linux.21rc5/drivers/s390/block/dasd_diag.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/block/dasd_diag.c 2003-04-25 13:49:34.000000000 +0100 @@ -5,13 +5,16 @@ * ...............: by Hartmunt Penner * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 - + * + * $Revision: 1.47 $ + * * History of changes * 07/13/00 Added fixup sections for diagnoses ans saved some registers * 07/14/00 fixed constraints in newly generated inline asm * 10/05/00 adapted to 'new' DASD driver * fixed return codes of dia250() * fixed partition handling and HDIO_GETGEO + * 10/01/02 fixed Bugzilla 1341 */ #include @@ -20,7 +23,7 @@ #include #include -#include /* HDIO_GETGEO */ +#include #include #include #include @@ -29,6 +32,7 @@ #include #include #include +#include #include "dasd_int.h" #include "dasd_diag.h" @@ -38,62 +42,50 @@ #endif /* PRINTK_HEADER */ #define PRINTK_HEADER DASD_NAME"(diag):" +#ifdef MODULE +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12)) +MODULE_LICENSE("GPL"); +#endif +#endif + dasd_discipline_t dasd_diag_discipline; typedef struct dasd_diag_private_t { - dasd_diag_characteristics_t rdc_data; + diag210_t rdc_data; diag_rw_io_t iob; diag_init_io_t iib; unsigned long *label; } dasd_diag_private_t; static __inline__ int -dia210 (void *devchar) -{ - int rc; - - __asm__ __volatile__ (" diag %1,0,0x210\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - ".section .fixup,\"ax\"\n" - "2: lhi %0,3\n" - " bras 1,3f\n" - " .long 1b\n" - "3: l 1,0(1)\n" - " br 1\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,2b\n" ".previous\n":"=d" (rc) - :"d" ((void *) __pa (devchar)) - :"1"); - return rc; -} - -static __inline__ int dia250 (void *iob, int cmd) { - __asm__ __volatile__ (" lr 0,%1\n" - " diag 0,%0,0x250\n" + unsigned long addr; + int rc; + + addr = __pa(iob); + __asm__ __volatile__ (" lhi %0,11\n" + " lr 0,%2\n" + " diag 0,%1,0x250\n" "0: ipm %0\n" " srl %0,28\n" " or %0,1\n" "1:\n" - ".section .fixup,\"ax\"\n" - "2: lhi %0,3\n" - " bras 1,3f\n" - " .long 1b\n" - "3: l 1,0(1)\n" - " br 1\n" - ".previous\n" +#ifndef CONFIG_ARCH_S390X ".section __ex_table,\"a\"\n" " .align 4\n" - " .long 0b,2b\n" ".previous\n":"+d" (cmd) - :"d" ((void *) __pa (iob)) - :"0", "1", "cc"); - return cmd; + " .long 0b,1b\n" + ".previous\n" +#else + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 0b,1b\n" + ".previous\n" +#endif + : "+&d" (rc) + : "d" (cmd), "d" (addr) : "0", "1", "cc"); + return rc; } static __inline__ int @@ -144,13 +136,18 @@ iob->key = 0; iob->flags = 2; iob->block_count = cqr->cplength >> 1; - iob->interrupt_params = (u32) cqr; + iob->interrupt_params = (u32)(addr_t) cqr; iob->bio_list = __pa (cqr->cpaddr); - asm volatile ("STCK %0":"=m" (cqr->startclk)); + cqr->startclk = get_clock (); + rc = dia250 (iob, RW_BIO); if (rc > 8) { - PRINT_WARN ("dia250 returned CC %d\n", rc); + + MESSAGE (KERN_WARNING, + "dia250 returned CC %d", + rc); + check_then_set (&cqr->status, CQR_STATUS_QUEUED, CQR_STATUS_ERROR); } else if (rc == 0) { @@ -176,41 +173,56 @@ int done_fast_io = 0; int devno; unsigned long flags; + int subcode; + /* + * Get the external interruption subcode. VM stores + * this in the 'cpu address' field associated with + * the external interrupt. For diag 250 the subcode + * needs to be 3. + */ + subcode = S390_lowcore.cpu_addr; + if ((subcode & 0xff00) != 0x0300) + return; irq_enter(cpu, -1); if (!ip) { /* no intparm: unsolicited interrupt */ - printk (KERN_WARNING PRINTK_HEADER - "caught unsolicited interrupt\n"); + + MESSAGE (KERN_DEBUG, "%s", + "caught unsolicited interrupt"); + irq_exit(cpu, -1); return; } if (ip & 0x80000001) { - printk (KERN_WARNING PRINTK_HEADER - "caught spurious interrupt with parm %08x\n", ip); + + MESSAGE (KERN_DEBUG, + "caught spurious interrupt with parm %08x", + ip); + irq_exit(cpu, -1); return; } - cqr = (ccw_req_t *) ip; + cqr = (ccw_req_t *)(addr_t) ip; device = (dasd_device_t *) cqr->device; devno = device->devinfo.devno; + if (device == NULL) { - printk (KERN_WARNING PRINTK_HEADER - " INT on devno 0x%04X = /dev/%s (%d:%d)" - " belongs to NULL device\n", - devno, device->name, MAJOR (device->kdev), - MINOR (device->kdev)); + + DEV_MESSAGE (KERN_WARNING, device, "%s", + " belongs to NULL device"); } + if (strncmp (device->discipline->ebcname, (char *) &cqr->magic, 4)) { - printk (KERN_WARNING PRINTK_HEADER - "0x%04X : /dev/%s (%d:%d)" - " magic number of ccw_req_t 0x%08X doesn't match" - " discipline 0x%08X\n", - devno, device->name, - MAJOR (device->kdev), MINOR (device->kdev), - cqr->magic, *(int *) (&device->discipline->name)); + + DEV_MESSAGE (KERN_WARNING, device, + " magic number of ccw_req_t 0x%08X doesn't match" + " discipline 0x%08X", + cqr->magic, + *(int *) (&device->discipline->name)); + irq_exit(cpu, -1); return; } @@ -219,7 +231,7 @@ s390irq_spin_lock_irqsave (device->devinfo.irq, flags); - asm volatile ("STCK %0":"=m" (cqr->stopclk)); + cqr->stopclk = get_clock (); switch (status) { case 0x00: @@ -254,52 +266,60 @@ { int rc = 0; int bsize; - int label_block; dasd_diag_private_t *private; - dasd_diag_characteristics_t *rdc_data; + diag210_t *rdc_data; ccw_req_t *cqr; long *label; int sb; if (device == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "Null device pointer passed to characteristics checker\n"); + + MESSAGE (KERN_WARNING, "%s", + "Null device pointer passed to characteristics " + "checker"); + return -ENODEV; } device->private = kmalloc (sizeof (dasd_diag_private_t), GFP_KERNEL); + if (device->private == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "memory allocation failed for private data\n"); + + MESSAGE (KERN_WARNING, "%s", + "memory allocation failed for private data"); + rc = -ENOMEM; goto fail; } private = (dasd_diag_private_t *) device->private; rdc_data = (void *) &(private->rdc_data); - rdc_data->dev_nr = device->devinfo.devno; - rdc_data->rdc_len = sizeof (dasd_diag_characteristics_t); + rdc_data->vrdcdvno = device->devinfo.devno; + rdc_data->vrdclen = sizeof (diag210_t); - rc = dia210 (rdc_data); + rc = diag210 (rdc_data); if ( rc != 0) { goto fail; } - if (rdc_data->vdev_class != DEV_CLASS_FBA && - rdc_data->vdev_class != DEV_CLASS_ECKD && - rdc_data->vdev_class != DEV_CLASS_CKD) { + if (rdc_data->vrdcvcla != DEV_CLASS_FBA && + rdc_data->vrdcvcla != DEV_CLASS_ECKD && + rdc_data->vrdcvcla != DEV_CLASS_CKD) { rc = -ENOTSUPP; goto fail; } -#if 0 - printk (KERN_INFO PRINTK_HEADER - "%04X: %04X on real %04X/%02X\n", - rdc_data->dev_nr, - rdc_data->vdev_type, rdc_data->rdev_type, rdc_data->rdev_model); -#endif + + DBF_EVENT (DBF_INFO, + "%04X: %04X on real %04X/%02X", + rdc_data->vrdcdvno, + rdc_data->vrdcvtyp, + rdc_data->vrdccrty, + rdc_data->vrdccrmd); + + /* Figure out position of label block */ - if (private->rdc_data.vdev_class == DEV_CLASS_FBA) { + if (private->rdc_data.vrdcvcla == DEV_CLASS_FBA) { device->sizes.pt_block = 1; - } else if (private->rdc_data.vdev_class == DEV_CLASS_ECKD || - private->rdc_data.vdev_class == DEV_CLASS_CKD) { + } else if (private->rdc_data.vrdcvcla == DEV_CLASS_ECKD || + private->rdc_data.vrdcvcla == DEV_CLASS_CKD) { device->sizes.pt_block = 2; } else { BUG(); @@ -319,9 +339,12 @@ cqr = dasd_alloc_request (dasd_diag_discipline.name, sizeof (diag_bio_t) / sizeof (ccw1_t), 0, device); + if (cqr == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "No memory to allocate initialization request\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory to allocate initialization request"); + free_page ((long) private->label); rc = -ENOMEM; goto fail; @@ -334,11 +357,12 @@ cqr->device = device; cqr->status = CQR_STATUS_FILLED; memset (iob, 0, sizeof (diag_rw_io_t)); - iob->dev_nr = rdc_data->dev_nr; + iob->dev_nr = rdc_data->vrdcdvno; iob->block_count = 1; - iob->interrupt_params = (u32) cqr; + iob->interrupt_params = (u32)(addr_t) cqr; iob->bio_list = __pa (bio); rc = dia250 (iob, RW_BIO); + dasd_free_request(cqr, device); if (rc == 0) { if (label[3] != bsize || label[0] != 0xc3d4e2f1 || /* != CMS1 */ @@ -357,13 +381,16 @@ device->sizes.blocks = label[7]; device->sizes.bp_block = bsize; device->sizes.s2b_shift = 0; /* bits to shift 512 to get a block */ + for (sb = 512; sb < bsize; sb = sb << 1) + device->sizes.s2b_shift++; - printk (KERN_INFO PRINTK_HEADER - "/dev/%s (%04X): capacity (%dkB blks): %ldkB\n", - device->name, device->devinfo.devno, - (device->sizes.bp_block >> 10), - (device->sizes.blocks << device->sizes.s2b_shift) >> 1); + + DEV_MESSAGE (KERN_INFO, device, + "capacity (%dkB blks): %ldkB", + (device->sizes.bp_block >> 10), + (device->sizes.blocks << device->sizes.s2b_shift) >> 1); + free_page ((long) private->label); rc = 0; goto out; @@ -380,12 +407,9 @@ dasd_diag_fill_geometry (struct dasd_device_t *device, struct hd_geometry *geo) { int rc = 0; - dasd_diag_private_t *private = (dasd_diag_private_t *) device->private; unsigned long sectors = device->sizes.blocks << device->sizes.s2b_shift; unsigned long tracks = sectors >> 6; - unsigned long trk_rem = sectors & ((1 << 6) - 1); /* 64k per track */ unsigned long cyls = tracks >> 4; - unsigned long cyls_rem = tracks & ((1 << 4) - 1); /* 16 head per cyl */ switch (device->sizes.bp_block) { case 512: @@ -420,14 +444,19 @@ diag_bio_t *bio; int bhct; long size; + unsigned long reloc_sector = req->sector + + device->major_info->gendisk.part[MINOR (req->rq_dev)].start_sect; if (!noblk) { - PRINT_ERR ("No blocks to read/write...returning\n"); - return NULL; + + MESSAGE (KERN_ERR, "%s", + "No blocks to read/write...returning"); + + return ERR_PTR(-EINVAL); } if (req->cmd == READ) { rw_cmd = MDSK_READ_REQ; - } else if (req->cmd == WRITE) { + } else { rw_cmd = MDSK_WRITE_REQ; } bhct = 0; @@ -439,11 +468,11 @@ /* Build the request */ rw_cp = dasd_alloc_request (dasd_diag_discipline.name, bhct << 1, 0, device); if (!rw_cp) { - return NULL; + return ERR_PTR(-ENOMEM); } bio = (diag_bio_t *) (rw_cp->cpaddr); - block = req->sector >> device->sizes.s2b_shift; + block = reloc_sector >> device->sizes.s2b_shift; for (bh = req->bh; bh; bh = bh->b_reqnext) { memset (bio, 0, sizeof (diag_bio_t)); for (size = 0; size < bh->b_size; size += byt_per_blk) { @@ -454,7 +483,9 @@ block++; } } - asm volatile ("STCK %0":"=m" (rw_cp->buildclk)); + + rw_cp->buildclk = get_clock (); + rw_cp->device = device; rw_cp->expires = 50 * TOD_SEC; /* 50 seconds */ rw_cp->req = req; @@ -463,14 +494,15 @@ } static int -dasd_diag_fill_info (dasd_device_t * device, dasd_information_t * info) +dasd_diag_fill_info (dasd_device_t * device, dasd_information2_t * info) { int rc = 0; info->FBA_layout = 1; - info->characteristics_size = sizeof (dasd_diag_characteristics_t); + info->format = DASD_FORMAT_LDL; + info->characteristics_size = sizeof (diag210_t); memcpy (info->characteristics, &((dasd_diag_private_t *) device->private)->rdc_data, - sizeof (dasd_diag_characteristics_t)); + sizeof (diag210_t)); info->confdata_size = 0; return rc; } @@ -501,26 +533,32 @@ examine_error:dasd_diag_examine_error, build_cp_from_req:dasd_diag_build_cp_from_req, dump_sense:dasd_diag_dump_sense, - int_handler:dasd_ext_handler, + int_handler:(void *) dasd_ext_handler, fill_info:dasd_diag_fill_info, + list:LIST_HEAD_INIT(dasd_diag_discipline.list), }; int dasd_diag_init (void) { int rc = 0; + if (MACHINE_IS_VM) { - printk (KERN_INFO PRINTK_HEADER - "%s discipline initializing\n", - dasd_diag_discipline.name); + + MESSAGE (KERN_INFO, + "%s discipline initializing", + dasd_diag_discipline.name); + ASCEBC (dasd_diag_discipline.ebcname, 4); ctl_set_bit (0, 9); register_external_interrupt (0x2603, dasd_ext_handler); dasd_discipline_add (&dasd_diag_discipline); } else { - printk (KERN_INFO PRINTK_HEADER - "Machine is not VM: %s discipline not initializing\n", - dasd_diag_discipline.name); + + MESSAGE (KERN_INFO, + "Machine is not VM: %s discipline not initializing", + dasd_diag_discipline.name); + rc = -EINVAL; } return rc; @@ -530,16 +568,19 @@ dasd_diag_cleanup (void) { if (MACHINE_IS_VM) { - printk (KERN_INFO PRINTK_HEADER - "%s discipline cleaning up\n", - dasd_diag_discipline.name); + + MESSAGE (KERN_INFO, + "%s discipline cleaning up", + dasd_diag_discipline.name); + dasd_discipline_del (&dasd_diag_discipline); unregister_external_interrupt (0x2603, dasd_ext_handler); ctl_clear_bit (0, 9); } else { - printk (KERN_INFO PRINTK_HEADER - "Machine is not VM: %s discipline not initializing\n", - dasd_diag_discipline.name); + + MESSAGE (KERN_INFO, + "Machine is not VM: %s discipline not initializing", + dasd_diag_discipline.name); } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/block/dasd_diag.h linux.21rc5-ac2/drivers/s390/block/dasd_diag.h --- linux.21rc5/drivers/s390/block/dasd_diag.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/block/dasd_diag.h 2003-04-25 13:49:34.000000000 +0100 @@ -5,6 +5,11 @@ * ...............: by Hartmunt Penner * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 + * + * $Revision: 1.7 $ + * + * History of changes + * */ #define MDSK_WRITE_REQ 0x01 @@ -18,21 +23,6 @@ #define DEV_CLASS_ECKD 0x04 #define DEV_CLASS_CKD 0x04 -typedef struct dasd_diag_characteristics_t { - u16 dev_nr; - u16 rdc_len; - u8 vdev_class; - u8 vdev_type; - u8 vdev_status; - u8 vdev_flags; - u8 rdev_class; - u8 rdev_type; - u8 rdev_model; - u8 rdev_features; -} __attribute__ ((packed, aligned (4))) - - dasd_diag_characteristics_t; - typedef struct diag_bio_t { u8 type; u8 status; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/block/dasd_eckd.c linux.21rc5-ac2/drivers/s390/block/dasd_eckd.c --- linux.21rc5/drivers/s390/block/dasd_eckd.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/block/dasd_eckd.c 2003-05-28 15:13:51.000000000 +0100 @@ -1,10 +1,13 @@ /* * File...........: linux/drivers/s390/block/dasd_eckd.c * Author(s)......: Holger Smolinski - Carsten Otte + * Horst Hummel + * Carsten Otte * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 - + * + * $Revision $ + * * History of changes (starts July 2000) * 07/11/00 Enabled rotational position sensing * 07/14/00 Reorganized the format process for better ERP @@ -42,7 +45,6 @@ #undef PRINTK_HEADER #endif /* PRINTK_HEADER */ #define PRINTK_HEADER DASD_NAME"(eckd):" -#undef CDL_PRINTK #define ECKD_C0(i) (i->home_bytes) #define ECKD_F(i) (i->formula) @@ -55,21 +57,27 @@ #define ECKD_F7(i) (i->factor7) #define ECKD_F8(i) (i->factor8) +#ifdef MODULE +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12)) +MODULE_LICENSE("GPL"); +#endif +#endif + dasd_discipline_t dasd_eckd_discipline; -typedef struct -dasd_eckd_private_t { +typedef struct dasd_eckd_private_t { dasd_eckd_characteristics_t rdc_data; - dasd_eckd_confdata_t conf_data; - eckd_count_t count_area[5]; - int uses_cdl; + dasd_eckd_confdata_t conf_data; + eckd_count_t count_area[5]; + int uses_cdl; + attrib_data_t attrib; /* e.g. cache operations */ } dasd_eckd_private_t; #ifdef CONFIG_DASD_DYNAMIC static devreg_t dasd_eckd_known_devices[] = { { - ci: { hc: {ctype:0x3880, dtype: 3390}}, + ci: { hc: {ctype:0x3880, dtype: 0x3390}}, flag:(DEVREG_MATCH_CU_TYPE | DEVREG_MATCH_DEV_TYPE | DEVREG_TYPE_DEVCHARS), oper_func:dasd_oper_handler @@ -143,7 +151,10 @@ break; } default: - INTERNAL_ERROR ("unknown formula%d\n", rdc->formula); + + MESSAGE (KERN_ERR, + "unknown formula%d", + rdc->formula); } return bpr; } @@ -190,10 +201,35 @@ return rpt; } +static inline void +check_XRC (ccw1_t *de_ccw, + DE_eckd_data_t *data, + dasd_device_t *device) +{ + + dasd_eckd_private_t *private = (dasd_eckd_private_t *) device->private; + + /* switch on System Time Stamp - needed for XRC Support */ + if (private->rdc_data.facilities.XRC_supported) { + + data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ + data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ + + data->ep_sys_time = get_clock (); + + de_ccw->count = sizeof (DE_eckd_data_t); + de_ccw->flags |= CCW_FLAG_SLI; + } + + return; + +} /* end check_XRC */ + static inline int define_extent (ccw1_t * de_ccw, DE_eckd_data_t * data, - int trk, int totrk, int cmd, dasd_device_t * device, ccw_req_t* cqr) + int trk, int totrk, + int cmd, dasd_device_t * device, ccw_req_t* cqr) { int rc=0; ch_t geo, beg, end; @@ -207,8 +243,10 @@ end.head = totrk % geo.head; memset (de_ccw, 0, sizeof (ccw1_t)); + de_ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; de_ccw->count = 16; + if ((rc=dasd_set_normalized_cda (de_ccw, __pa (data), cqr, device))) return rc; @@ -224,40 +262,86 @@ case DASD_ECKD_CCW_READ_KD_MT: case DASD_ECKD_CCW_READ_COUNT: data->mask.perm = 0x1; - data->attributes.operation = 0x3; /* enable seq. caching */ + data->attributes.operation = private->attrib.operation; break; case DASD_ECKD_CCW_WRITE: case DASD_ECKD_CCW_WRITE_MT: case DASD_ECKD_CCW_WRITE_KD: case DASD_ECKD_CCW_WRITE_KD_MT: data->mask.perm = 0x02; - data->attributes.operation = 0x3; /* enable seq. caching */ + data->attributes.operation = private->attrib.operation; + + check_XRC (de_ccw, + data, + device); break; case DASD_ECKD_CCW_WRITE_CKD: case DASD_ECKD_CCW_WRITE_CKD_MT: - data->attributes.operation = 0x1; /* format through cache */ + data->attributes.operation = DASD_BYPASS_CACHE; + + check_XRC (de_ccw, + data, + device); break; case DASD_ECKD_CCW_ERASE: case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: case DASD_ECKD_CCW_WRITE_RECORD_ZERO: data->mask.perm = 0x3; data->mask.auth = 0x1; - data->attributes.operation = 0x1; /* format through cache */ + data->attributes.operation = DASD_BYPASS_CACHE; + + check_XRC (de_ccw, + data, + device); break; default: - INTERNAL_ERROR ("unknown opcode 0x%x\n", cmd); + + MESSAGE (KERN_ERR, + "unknown opcode 0x%x", + cmd); + break; } - data->attributes.mode = 0x3; + + data->attributes.mode = 0x3; /* ECKD */ + if (private->rdc_data.cu_type == 0x2105 - && !(private->uses_cdl && trk < 2) - ) { - data->reserved |= 0x40; + && !(private->uses_cdl && trk < 2) ) { + + data->ga_extended |= 0x40; } - data->beg_ext.cyl = beg.cyl; + + /* check for sequential prestage - enhance cylinder range */ + if (data->attributes.operation == DASD_SEQ_PRESTAGE || + data->attributes.operation == DASD_SEQ_ACCESS ) { + + if (end.cyl + private->attrib.nr_cyl < geo.cyl) { + + end.cyl += private->attrib.nr_cyl; + + DBF_DEV_EVENT (DBF_NOTICE, device, + "Enhanced DE Cylinder from %x to %x", + (totrk / geo.head), + end.cyl); + + + } else { + end.cyl = (geo.cyl -1); + + DBF_DEV_EVENT (DBF_NOTICE, device, + "Enhanced DE Cylinder from %x to " + "End of device %x", + (totrk / geo.head), + end.cyl); + + } + } + + data->beg_ext.cyl = beg.cyl; data->beg_ext.head = beg.head; - data->end_ext.cyl = end.cyl; + data->end_ext.cyl = end.cyl; data->end_ext.head = end.head; + return rc; } @@ -266,7 +350,11 @@ LO_eckd_data_t * data, int trk, int rec_on_trk, - int no_rec, int cmd, dasd_device_t * device, int reclen, ccw_req_t* cqr) + int no_rec, + int cmd, + dasd_device_t * device, + int reclen, + ccw_req_t* cqr) { int rc=0; dasd_eckd_private_t *private = (dasd_eckd_private_t *) device->private; @@ -276,10 +364,14 @@ ch_t seek = { trk / (geo.head), trk % (geo.head) }; int sector = 0; -#ifdef CDL_PRINTK - printk ("Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d\n", trk, - rec_on_trk, no_rec, cmd, reclen); -#endif + DBF_EVENT (DBF_INFO, + "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", + trk, + rec_on_trk, + no_rec, + cmd, + reclen); + memset (lo_ccw, 0, sizeof (ccw1_t)); lo_ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; lo_ccw->count = 16; @@ -365,7 +457,10 @@ data->operation.operation = 0x0b; break; default: - INTERNAL_ERROR ("unknown opcode 0x%x\n", cmd); + + MESSAGE (KERN_ERR, + "unknown opcode 0x%x", + cmd); } memcpy (&(data->seek_addr), &seek, sizeof (ch_t)); memcpy (&(data->search_arg), &seek, sizeof (ch_t)); @@ -391,79 +486,113 @@ static int dasd_eckd_check_characteristics (struct dasd_device_t *device) { - int rc = 0; + int rc = 0; void *conf_data; - void *rdc_data; - int conf_len; + void *rdc_data; + int conf_len; dasd_eckd_private_t *private; if (device == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "Null device pointer passed to characteristics checker\n"); + + MESSAGE (KERN_WARNING, "%s", + "Null device pointer passed to characteristics " + "checker"); + return -ENODEV; } - device->private = kmalloc (sizeof (dasd_eckd_private_t), GFP_KERNEL); + device->private = kmalloc (sizeof (dasd_eckd_private_t), + GFP_KERNEL); + if (device->private == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "memory allocation failed for private data\n"); + + MESSAGE (KERN_WARNING, "%s", + "memory allocation failed for private data"); + rc = -ENOMEM; goto fail; } - private = (dasd_eckd_private_t *) device->private; + + private = (dasd_eckd_private_t *) device->private; rdc_data = (void *) &(private->rdc_data); - rc = read_dev_chars (device->devinfo.irq, &rdc_data, 64); + + /* Read Device Characteristics */ + rc = read_dev_chars (device->devinfo.irq, + &rdc_data, + 64); if (rc) { - printk (KERN_WARNING PRINTK_HEADER - "Read device characteristics returned error %d\n", rc); + + MESSAGE (KERN_WARNING, + "Read device characteristics returned error %d", + rc); + goto fail; } - printk (KERN_INFO PRINTK_HEADER - "%04X on sch %d: %04X/%02X(CU:%04X/%02X) " - "Cyl:%d Head:%d Sec:%d\n", - device->devinfo.devno, device->devinfo.irq, - private->rdc_data.dev_type, private->rdc_data.dev_model, - private->rdc_data.cu_type, private->rdc_data.cu_model.model, - private->rdc_data.no_cyl, private->rdc_data.trk_per_cyl, - private->rdc_data.sec_per_trk); - rc = read_conf_data (device->devinfo.irq, &conf_data, &conf_len, + + DEV_MESSAGE (KERN_INFO, device, + "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d", + private->rdc_data.dev_type, + private->rdc_data.dev_model, + private->rdc_data.cu_type, + private->rdc_data.cu_model.model, + private->rdc_data.no_cyl, + private->rdc_data.trk_per_cyl, + private->rdc_data.sec_per_trk); + + /* set default cache operations */ + private->attrib.operation = DASD_SEQ_ACCESS; + private->attrib.nr_cyl = 0x02; + + /* Read Configuration Data */ + rc = read_conf_data (device->devinfo.irq, + &conf_data, + &conf_len, LPM_ANYPATH); if (rc == -EOPNOTSUPP) { rc = 0; /* this one is ok */ } if (rc) { - printk (KERN_WARNING PRINTK_HEADER - "Read configuration data returned error %d\n", rc); + + MESSAGE (KERN_WARNING, + "Read configuration data returned error %d", + rc); + goto fail; } if (conf_data == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "No configuration data retrieved\n"); + + MESSAGE (KERN_WARNING, "%s", + "No configuration data retrieved"); + goto out; /* no errror */ } if (conf_len != sizeof (dasd_eckd_confdata_t)) { - printk (KERN_WARNING PRINTK_HEADER - "sizes of configuration data mismatch" - "%d (read) vs %ld (expected)\n", - conf_len, sizeof (dasd_eckd_confdata_t)); + + MESSAGE (KERN_WARNING, + "sizes of configuration data mismatch" + "%d (read) vs %ld (expected)", + conf_len, + sizeof (dasd_eckd_confdata_t)); + goto out; /* no errror */ } memcpy (&private->conf_data, conf_data, sizeof (dasd_eckd_confdata_t)); - printk (KERN_INFO PRINTK_HEADER - "%04X on sch %d: %04X/%02X(CU:%04X/%02X): " - "Configuration data read\n", - device->devinfo.devno, device->devinfo.irq, + + DEV_MESSAGE (KERN_INFO, device, + "%04X/%02X(CU:%04X/%02X): Configuration data read", private->rdc_data.dev_type, private->rdc_data.dev_model, private->rdc_data.cu_type, private->rdc_data.cu_model.model); goto out; + fail: - if ( rc ) { + if (device->private) { kfree (device->private); device->private = NULL; } + out: return rc; } @@ -498,8 +627,9 @@ 2 * sizeof (LO_eckd_data_t), device); if (cqr == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "No memory to allocate initialization request\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory to allocate initialization request"); goto out; } DE_data = cqr->data; @@ -557,13 +687,17 @@ } cqr->device = device; cqr->retries = 0; + cqr->buildclk = get_clock (); cqr->status = CQR_STATUS_FILLED; dasd_chanq_enq (&device->queue, cqr); goto out; + clear_cqr: dasd_free_request (cqr,device); - printk (KERN_WARNING PRINTK_HEADER - "No memory to allocate initialization request\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory to allocate initialization request"); + cqr=NULL; out: return cqr; @@ -578,14 +712,17 @@ char *cdl_msg; int status; int i; + private->uses_cdl = 1; status = device->init_cqr->status; dasd_chanq_deq (&device->queue, device->init_cqr); dasd_free_request (device->init_cqr, device); /* Free the cqr and cleanup device->sizes */ if ( status != CQR_STATUS_DONE ) { - DASD_MESSAGE (KERN_WARNING,device,"%s", - "volume analysis returned unformatted disk"); + + DEV_MESSAGE (KERN_WARNING, device, "%s", + "volume analysis returned unformatted disk"); + return -EMEDIUMTYPE; } /* Check Track 0 for Compatible Disk Layout */ @@ -614,8 +751,9 @@ } } else { if (private->count_area[3].record == 1) { - DASD_MESSAGE (KERN_WARNING, device, "%s", - "Trk 0: no records after VTOC!"); + + DEV_MESSAGE (KERN_WARNING, device, "%s", + "Trk 0: no records after VTOC!"); } } if (count_area != NULL && /* we found notthing violating our disk layout */ @@ -631,8 +769,10 @@ } } if (device->sizes.bp_block == 0) { - DASD_MESSAGE (KERN_WARNING, device, "%s", - "Volume has incompatible disk layout"); + + DEV_MESSAGE (KERN_WARNING, device, "%s", + "Volume has incompatible disk layout"); + return -EMEDIUMTYPE; } device->sizes.s2b_shift = 0; /* bits to shift 512 to get a block */ @@ -647,18 +787,21 @@ device->sizes.bp_block)); cdl_msg = private-> - uses_cdl ? "compatible disk layout" : "classic disk layout"; + uses_cdl ? "compatible disk layout" : "linux disk layout"; + + DEV_MESSAGE (KERN_INFO, device, + "(%dkB blks): %dkB at %dkB/trk %s", + (device->sizes.bp_block >> 10), + ((private->rdc_data.no_cyl * + private->rdc_data.trk_per_cyl * + recs_per_track (&private->rdc_data, 0, + device->sizes.bp_block) * + (device->sizes.bp_block >> 9)) >> 1), + ((recs_per_track (&private->rdc_data, 0, + device->sizes.bp_block) * + device->sizes.bp_block) >> 10), + cdl_msg); - DASD_MESSAGE (KERN_INFO, device, "(%dkB blks): %dkB at %dkB/trk %s", - (device->sizes.bp_block >> 10), - (private->rdc_data.no_cyl * - private->rdc_data.trk_per_cyl * - recs_per_track (&private->rdc_data, 0, - device->sizes.bp_block) * - (device->sizes.bp_block >> 9)) >> 1, - (recs_per_track (&private->rdc_data, 0, - device->sizes.bp_block) * - device->sizes.bp_block) >> 10, cdl_msg); return 0; } @@ -703,13 +846,21 @@ int wrccws = rpt; int datasize = sizeof (DE_eckd_data_t) + sizeof (LO_eckd_data_t); - if (fdata->start_unit >= (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)){ - DASD_MESSAGE (KERN_INFO, device, "Track no %d too big!", fdata->start_unit); + if (fdata->start_unit >= + (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)){ + + DEV_MESSAGE (KERN_INFO, device, + "Track no %d too big!", + fdata->start_unit); + return NULL; } if ( fdata->start_unit > fdata->stop_unit) { - DASD_MESSAGE (KERN_INFO, device, "Track %d reached! ending.", - fdata->start_unit); + + DEV_MESSAGE (KERN_INFO, device, + "Track %d reached! ending.", + fdata->start_unit); + return NULL; } switch (fdata->blksize) { @@ -719,8 +870,11 @@ case 4096: break; default: - printk (KERN_WARNING PRINTK_HEADER - "Invalid blocksize %d...terminating!\n", fdata->blksize); + + MESSAGE (KERN_WARNING, + "Invalid blocksize %d...terminating!", + fdata->blksize); + return NULL; } switch (fdata->intensity) { @@ -734,8 +888,11 @@ case 0x0c: break; default: - printk (KERN_WARNING PRINTK_HEADER - "Invalid flags 0x%x...terminating!\n", fdata->intensity); + + MESSAGE (KERN_WARNING, + "Invalid flags 0x%x...terminating!", + fdata->intensity); + return NULL; } @@ -745,9 +902,12 @@ (fdata->start_unit % private->rdc_data.no_cyl == 0 && (fdata->start_unit / private->rdc_data.no_cyl) % (private->rdc_data.no_cyl / 20))) { - DASD_MESSAGE (KERN_INFO, device, - "Format Cylinder: %d Flags: %d", - fdata->start_unit / private->rdc_data.trk_per_cyl, fdata->intensity); + + DBF_DEV_EVENT (DBF_NOTICE, device, + "Format Cylinder: %d Flags: %d", + fdata->start_unit / private->rdc_data.trk_per_cyl, + fdata->intensity); + } if ((fdata->intensity & ~0x8) & 0x04) { wrccws = 1; @@ -789,14 +949,14 @@ switch (fdata->intensity & ~0x08) { case 0x03: if (define_extent (last_ccw, DE_data, fdata->start_unit, fdata->start_unit, - DASD_ECKD_CCW_WRITE_HOME_ADDRESS, + DASD_ECKD_CCW_WRITE_HOME_ADDRESS, device, fcp)) { goto clear_fcp; } last_ccw->flags |= CCW_FLAG_CC; last_ccw++; if (locate_record (last_ccw, LO_data, fdata->start_unit, 0, wrccws, - DASD_ECKD_CCW_WRITE_HOME_ADDRESS, device, + DASD_ECKD_CCW_WRITE_HOME_ADDRESS, device, device->sizes.bp_block, fcp)) { goto clear_fcp; } @@ -811,7 +971,7 @@ last_ccw->flags |= CCW_FLAG_CC; last_ccw++; if (locate_record (last_ccw, LO_data, fdata->start_unit, 0, wrccws, - DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, + DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, device->sizes.bp_block, fcp)) { goto clear_fcp; } @@ -837,11 +997,19 @@ last_ccw++; break; default: - PRINT_WARN ("Unknown format flags...%d\n", fdata->intensity); + + MESSAGE (KERN_WARNING, + "Unknown format flags...%d", + fdata->intensity); + return NULL; } if (fdata->intensity & 0x02) { - PRINT_WARN ("Unsupported format flag...%d\n", fdata->intensity); + + MESSAGE (KERN_WARNING, + "Unsupported format flag...%d", + fdata->intensity); + return NULL; } if (fdata->intensity & 0x01) { /* write record zero */ @@ -913,6 +1081,7 @@ } (last_ccw - 1)->flags &= ~(CCW_FLAG_CC | CCW_FLAG_DC); fcp->device = device; + fcp->buildclk = get_clock (); fcp->status = CQR_STATUS_FILLED; } goto out; @@ -939,8 +1108,10 @@ case 0x9343: return dasd_9343_erp_examine (cqr, stat); default: - printk (KERN_WARNING PRINTK_HEADER - "default (unknown CU type) - RECOVERABLE return \n"); + + MESSAGE (KERN_WARNING, "%s", + "default (unknown CU type) - RECOVERABLE return"); + return dasd_era_recover; } } @@ -1015,26 +1186,36 @@ int byt_per_blk = device->sizes.bp_block; int shift = device->sizes.s2b_shift; int blk_per_trk = recs_per_track (&(private->rdc_data), 0, byt_per_blk); - int btrk = (req->sector >> shift) / blk_per_trk; - int etrk = ((req->sector + req->nr_sectors - 1) >> shift) / blk_per_trk; - int recid = req->sector >> shift; + unsigned long reloc_sector = req->sector + + device->major_info->gendisk.part[MINOR(req->rq_dev)].start_sect; + int btrk = (reloc_sector >> shift) / blk_per_trk; + int etrk = ((reloc_sector + req->nr_sectors - 1) >> shift) / blk_per_trk; + int recid = reloc_sector >> shift; int locate4k_set = 0; int nlocs = 0; + int errcode; if (req->cmd == READ) { rw_cmd = DASD_ECKD_CCW_READ_MT; } else if (req->cmd == WRITE) { rw_cmd = DASD_ECKD_CCW_WRITE_MT; } else { - PRINT_ERR ("Unknown command %d\n", req->cmd); - return NULL; + + MESSAGE (KERN_ERR, + "Unknown command %d", + req->cmd); + + return ERR_PTR(-EINVAL); } /* Build the request */ /* count bhs to prevent errors, when bh smaller than block */ bhct = 0; for (bh = req->bh; bh; bh = bh->b_reqnext) { - if (bh->b_size < byt_per_blk) - BUG(); + if (bh->b_size < byt_per_blk) { + MESSAGE(KERN_ERR, "ignoring bogus sized request: %d<%d", + bh->b_size, byt_per_blk); + return ERR_PTR(-EINVAL); + } bhct+= bh->b_size >> (device->sizes.s2b_shift+9); } if (btrk < 2 && private->uses_cdl) { @@ -1050,12 +1231,13 @@ sizeof (LO_eckd_data_t), device); if (!rw_cp) { - return NULL; + return ERR_PTR(-ENOMEM); } DE_data = rw_cp->data; LO_data = rw_cp->data + sizeof (DE_eckd_data_t); ccw = rw_cp->cpaddr; - if (define_extent (ccw, DE_data, btrk, etrk, rw_cmd, device, rw_cp)) { + if ((errcode = define_extent (ccw, DE_data, btrk, etrk, + rw_cmd, device, rw_cp))) { goto clear_rw_cp; } ccw->flags |= CCW_FLAG_CC; @@ -1068,25 +1250,25 @@ && private->uses_cdl) { /* Do a locate record for our special blocks */ int cmd = dasd_eckd_cdl_cmd (device,recid, req->cmd); - if (locate_record (ccw, + if ((errcode = locate_record (ccw, LO_data++, recid / blk_per_trk, recid % blk_per_trk + 1, 1, cmd, device, - dasd_eckd_cdl_reclen(device, recid), rw_cp)) { + dasd_eckd_cdl_reclen(device, recid), rw_cp))) { goto clear_rw_cp; } } else { // Do a locate record for standard blocks */ - if (locate_record (ccw, + if ((errcode = locate_record (ccw, LO_data++, recid /blk_per_trk, recid %blk_per_trk + 1, - (((req->sector + + (((reloc_sector + req->nr_sectors) >> shift) - recid), rw_cmd, device, - device->sizes.bp_block, rw_cp)) { + device->sizes.bp_block, rw_cp))) { goto clear_rw_cp; } locate4k_set = 1; @@ -1105,25 +1287,32 @@ 0xE5, byt_per_blk - ccw->count); } } - if (dasd_set_normalized_cda (ccw, __pa (bh->b_data+size), rw_cp, device)) { + if ((errcode = dasd_set_normalized_cda (ccw, __pa (bh->b_data+size), + rw_cp, device))) { goto clear_rw_cp; } recid++; } bh = bh->b_reqnext; } - ccw->flags &= ~(CCW_FLAG_DC | CCW_FLAG_CC); - rw_cp->device = device; + ccw->flags &= ~(CCW_FLAG_DC | CCW_FLAG_CC); + rw_cp->device = device; rw_cp->expires = 5 * TOD_MIN; /* 5 minutes */ - rw_cp->req = req; - rw_cp->lpm = LPM_ANYPATH; - rw_cp->retries = 2; - asm volatile ("STCK %0":"=m" (rw_cp->buildclk)); - check_then_set (&rw_cp->status, CQR_STATUS_EMPTY, CQR_STATUS_FILLED); + rw_cp->req = req; + rw_cp->lpm = LPM_ANYPATH; + rw_cp->retries = 256; + + rw_cp->buildclk = get_clock (); + + check_then_set (&rw_cp->status, + CQR_STATUS_EMPTY, + CQR_STATUS_FILLED); + goto out; clear_rw_cp: - dasd_free_request (rw_cp, device); - rw_cp=NULL; + dasd_free_request (rw_cp, + device); + rw_cp=ERR_PTR(errcode); out: return rw_cp; } @@ -1176,18 +1365,31 @@ ccw_req_t * dasd_eckd_reserve (struct dasd_device_t * device) { - ccw_req_t *cqr = - dasd_alloc_request (dasd_eckd_discipline.name, 1 + 1, 0, device); + ccw_req_t *cqr; + + cqr = dasd_alloc_request (dasd_eckd_discipline.name, + 1 + 1, 32, device); if (cqr == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "No memory to allocate initialization request\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory to allocate initialization request"); + return NULL; } cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE; + cqr->cpaddr->flags |= CCW_FLAG_SLI; + cqr->cpaddr->count = 32; + + if (dasd_set_normalized_cda (cqr->cpaddr, __pa (cqr->data), + cqr, device)) { + dasd_free_request (cqr, device); + return NULL; + } + cqr->device = device; cqr->retries = 0; cqr->expires = 10 * TOD_SEC; - cqr->options = (DOIO_WAIT_FOR_INTERRUPT | DOIO_TIMEOUT); /* timeout reqest */ + cqr->buildclk = get_clock (); cqr->status = CQR_STATUS_FILLED; return cqr; } @@ -1202,18 +1404,31 @@ ccw_req_t * dasd_eckd_release (struct dasd_device_t * device) { - ccw_req_t *cqr = - dasd_alloc_request (dasd_eckd_discipline.name, 1 + 1, 0, device); + ccw_req_t *cqr; + + cqr = dasd_alloc_request (dasd_eckd_discipline.name, + 1 + 1, 32, device); if (cqr == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "No memory to allocate initialization request\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory to allocate initialization request"); + return NULL; } cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE; + cqr->cpaddr->flags |= CCW_FLAG_SLI; + cqr->cpaddr->count = 32; + + if (dasd_set_normalized_cda (cqr->cpaddr, __pa (cqr->data), + cqr, device)) { + dasd_free_request (cqr, device); + return NULL; + } + cqr->device = device; cqr->retries = 0; cqr->expires = 10 * TOD_SEC; - cqr->options = (DOIO_WAIT_FOR_INTERRUPT | DOIO_TIMEOUT); /* timeout reqest */ + cqr->buildclk = get_clock (); cqr->status = CQR_STATUS_FILLED; return cqr; @@ -1229,18 +1444,31 @@ ccw_req_t * dasd_eckd_steal_lock (struct dasd_device_t * device) { - ccw_req_t *cqr = - dasd_alloc_request (dasd_eckd_discipline.name, 1 + 1, 0, device); + ccw_req_t *cqr; + + cqr = dasd_alloc_request (dasd_eckd_discipline.name, + 1 + 1, 32, device); if (cqr == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "No memory to allocate initialization request\n"); + + MESSAGE (KERN_WARNING, "%s", + "No memory to allocate initialization request"); + return NULL; } cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK; + cqr->cpaddr->flags |= CCW_FLAG_SLI; + cqr->cpaddr->count = 32; + + if (dasd_set_normalized_cda (cqr->cpaddr, __pa (cqr->data), + cqr, device)) { + dasd_free_request (cqr, device); + return NULL; + } + cqr->device = device; cqr->retries = 0; cqr->expires = 10 * TOD_SEC; - cqr->options = (DOIO_WAIT_FOR_INTERRUPT | DOIO_TIMEOUT); /* timeout reqest */ + cqr->buildclk = get_clock (); cqr->status = CQR_STATUS_FILLED; return cqr; } @@ -1274,14 +1502,17 @@ } static int -dasd_eckd_fill_info (dasd_device_t * device, dasd_information_t * info) +dasd_eckd_fill_info (dasd_device_t * device, dasd_information2_t * info) { int rc = 0; info->label_block = 2; - if (((dasd_eckd_private_t *) device->private)->uses_cdl) + if (((dasd_eckd_private_t *) device->private)->uses_cdl) { info->FBA_layout = 0; - else - info->FBA_layout = 1; + info->format = DASD_FORMAT_CDL; + } else { + info->FBA_layout = 1; + info->format = DASD_FORMAT_LDL; + } info->characteristics_size = sizeof (dasd_eckd_characteristics_t); memcpy (info->characteristics, &((dasd_eckd_private_t *) device->private)->rdc_data, @@ -1293,6 +1524,138 @@ return rc; } +/* + * DASD_ECKD_READ_STATS + * + * DESCRIPTION + * build the channel program to read the performance statistics + * of the attached subsystem + */ +ccw_req_t * +dasd_eckd_read_stats (struct dasd_device_t * device) +{ + + int rc; + ccw1_t *ccw; + ccw_req_t *cqr; + dasd_psf_prssd_data_t *prssdp; + dasd_rssd_perf_stats_t *statsp; + + cqr = dasd_alloc_request (dasd_eckd_discipline.name, + 1 /* PSF */ + 1 /* RSSD */, + (sizeof (dasd_psf_prssd_data_t) + + sizeof (dasd_rssd_perf_stats_t) ), + device); + + if (cqr == NULL) { + + MESSAGE (KERN_WARNING, "%s", + "No memory to allocate initialization request"); + + return NULL; + } + + cqr->device = device; + cqr->retries = 0; + cqr->expires = 10 * TOD_SEC; + + /* Prepare for Read Subsystem Data */ + prssdp = (dasd_psf_prssd_data_t *) cqr->data; + + memset (prssdp, 0, sizeof (dasd_psf_prssd_data_t)); + + prssdp->order = PSF_ORDER_PRSSD; + prssdp->suborder = 0x01; /* Perfomance Statistics */ + prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ + + ccw = cqr->cpaddr; + + ccw->cmd_code = DASD_ECKD_CCW_PSF; + ccw->count = sizeof (dasd_psf_prssd_data_t); + ccw->flags |= CCW_FLAG_CC; + + if ((rc = dasd_set_normalized_cda (ccw,__pa (prssdp), cqr, device))) { + + dasd_free_request (cqr, + device); + return NULL; + } + + ccw++; + + /* Read Subsystem Data - Performance Statistics */ + statsp = (dasd_rssd_perf_stats_t *) (prssdp + 1); + memset (statsp, 0, sizeof (dasd_rssd_perf_stats_t)); + + ccw->cmd_code = DASD_ECKD_CCW_RSSD; + ccw->count = sizeof (dasd_rssd_perf_stats_t); + + if ((rc = dasd_set_normalized_cda (ccw,__pa (statsp), cqr, device))) { + + dasd_free_request (cqr, + device); + return NULL; + } + cqr->buildclk = get_clock (); + cqr->status = CQR_STATUS_FILLED; + + return cqr; +} /* end dasd_eckd_rstat */ + +/* + * DASD_ECKD_RET_STATS + * + * DESCRIPTION + * returns pointer to Performance Statistics Data. + */ +dasd_rssd_perf_stats_t * +dasd_eckd_ret_stats (ccw_req_t *cqr) +{ + + dasd_psf_prssd_data_t *prssdp; + dasd_rssd_perf_stats_t *statsp; + + if (cqr == NULL) { + + return NULL; + } + + /* Prepare for Read Subsystem Data */ + prssdp = (dasd_psf_prssd_data_t *) cqr->data; + statsp = (dasd_rssd_perf_stats_t *) (prssdp + 1); + + return statsp; + +} /* end dasd_eckd_rstat */ + + +/* + * DASD_ECKD_SET_ATTRUB + * + * DESCRIPTION + * stores the attributes for cache operation to be used in Define Extend (DE). + */ +int +dasd_eckd_set_attrib (dasd_device_t *device, + attrib_data_t *attrib) +{ + int rc = 0; + dasd_eckd_private_t *private; + + private = (dasd_eckd_private_t *) device->private; + private->attrib = *attrib; + + DBF_DEV_EVENT (DBF_ERR, device, + "cache operation mode set to " + "%x (%i cylinder prestage)", + private->attrib.operation, + private->attrib.nr_cyl); + + + return rc; + +} /* end dasd_eckd_set_attrib */ + static char* dasd_eckd_dump_sense (struct dasd_device_t *device, ccw_req_t *req) @@ -1304,8 +1667,10 @@ int len, sl, sct; if (page == NULL) { - printk (KERN_ERR PRINTK_HEADER - "No memory to dump sense data\n"); + + MESSAGE (KERN_ERR, "%s", + "No memory to dump sense data"); + return NULL; } @@ -1318,17 +1683,27 @@ len += sprintf (page + len, KERN_ERR PRINTK_HEADER "Failing CCW: %p\n", (void *) (long) stat->cpa); { + ccw1_t *act = req->cpaddr; int i = req->cplength; + do { -#ifdef ERP_DEBUG - printk (KERN_ERR "CCW %p: %08X %08X\n", - act, ((int *) act)[0], ((int *) act)[1]); - printk (KERN_ERR "DAT: %08X %08X %08X %08X\n", - ((int *) act->cda)[0], ((int *) act->cda)[1], - ((int *) act->cda)[2], ((int *) act->cda)[3]); -#endif /* ERP_DEBUG */ + + DBF_EVENT (DBF_INFO, + "CCW %p: %08X %08X", + act, + ((int *) act)[0], + ((int *) act)[1]); + + DBF_EVENT (DBF_INFO, + "DAT: %08X %08X %08X %08X", + ((int *) (addr_t) act->cda)[0], + ((int *) (addr_t) act->cda)[1], + ((int *) (addr_t) act->cda)[2], + ((int *) (addr_t) act->cda)[3]); + act++; + } while (--i); } if (stat->flag & DEVSTAT_FLAG_SENSE_AVAIL) { @@ -1353,13 +1728,15 @@ } else { /* 32 Byte Sense Data */ len += sprintf (page + len, KERN_ERR PRINTK_HEADER - "32 Byte: Format: %x Exception class %x\n", + "32 Byte: Format: %x " + "Exception class %x\n", sense[6] & 0x0f, sense[22] >> 4); } } - printk ("Sense data:\n%s", - page); + MESSAGE (KERN_ERR, + "Sense data:\n%s", + page); free_page ((unsigned long) page); @@ -1371,7 +1748,7 @@ owner: THIS_MODULE, name:"ECKD", ebcname:"ECKD", - max_blocks:255, + max_blocks:240, id_check:dasd_eckd_id_check, check_characteristics:dasd_eckd_check_characteristics, init_analysis:dasd_eckd_init_analysis, @@ -1391,14 +1768,21 @@ steal_lock:dasd_eckd_steal_lock, merge_cp:dasd_eckd_merge_cp, fill_info:dasd_eckd_fill_info, + read_stats:dasd_eckd_read_stats, + ret_stats:dasd_eckd_ret_stats, + set_attrib:dasd_eckd_set_attrib, + list:LIST_HEAD_INIT(dasd_eckd_discipline.list), }; int dasd_eckd_init (void) { int rc = 0; - printk (KERN_INFO PRINTK_HEADER - "%s discipline initializing\n", dasd_eckd_discipline.name); + + MESSAGE (KERN_INFO, + "%s discipline initializing", + dasd_eckd_discipline.name); + ASCEBC (dasd_eckd_discipline.ebcname, 4); dasd_discipline_add (&dasd_eckd_discipline); #ifdef CONFIG_DASD_DYNAMIC @@ -1407,10 +1791,12 @@ for (i = 0; i < sizeof (dasd_eckd_known_devices) / sizeof (devreg_t); i++) { - printk (KERN_INFO PRINTK_HEADER - "We are interested in: CU %04X/%02x\n", - dasd_eckd_known_devices[i].ci.hc.ctype, - dasd_eckd_known_devices[i].ci.hc.cmode); + + MESSAGE (KERN_INFO, + "We are interested in: CU %04X/%02x", + dasd_eckd_known_devices[i].ci.hc.ctype, + dasd_eckd_known_devices[i].ci.hc.cmode); + s390_device_register (&dasd_eckd_known_devices[i]); } } @@ -1421,18 +1807,23 @@ void dasd_eckd_cleanup (void) { - printk (KERN_INFO PRINTK_HEADER - "%s discipline cleaning up\n", dasd_eckd_discipline.name); + + MESSAGE (KERN_INFO, + "%s discipline cleaning up", + dasd_eckd_discipline.name); + #ifdef CONFIG_DASD_DYNAMIC { int i; for (i = 0; i < sizeof (dasd_eckd_known_devices) / sizeof (devreg_t); i++) { - printk (KERN_INFO PRINTK_HEADER - "We were interested in: CU %04X/%02x\n", - dasd_eckd_known_devices[i].ci.hc.ctype, - dasd_eckd_known_devices[i].ci.hc.cmode); + + MESSAGE (KERN_INFO, + "We were interested in: CU %04X/%02x", + dasd_eckd_known_devices[i].ci.hc.ctype, + dasd_eckd_known_devices[i].ci.hc.cmode); + s390_device_unregister (&dasd_eckd_known_devices[i]); } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/block/dasd_eckd.h linux.21rc5-ac2/drivers/s390/block/dasd_eckd.h --- linux.21rc5/drivers/s390/block/dasd_eckd.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/block/dasd_eckd.h 2003-04-25 13:49:34.000000000 +0100 @@ -1,78 +1,96 @@ +/* + * File...........: linux/drivers/s390/block/dasd_eckd.h + * Author(s)......: Holger Smolinski + * Horst Hummel + * Bugreports.to..: + * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 + * + * $Revision: 1.14 $ + * + * History of changes + * + */ + #ifndef DASD_ECKD_H #define DASD_ECKD_H #include "dasd_3990_erp.h" #include "dasd_9343_erp.h" -#define DASD_ECKD_CCW_WRITE 0x05 -#define DASD_ECKD_CCW_READ 0x06 +/******************************************************************************* + * SECTION: CCW Definitions + ******************************************************************************/ +#define DASD_ECKD_CCW_WRITE 0x05 +#define DASD_ECKD_CCW_READ 0x06 #define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09 -#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a -#define DASD_ECKD_CCW_WRITE_KD 0x0d -#define DASD_ECKD_CCW_READ_KD 0x0e -#define DASD_ECKD_CCW_ERASE 0x11 -#define DASD_ECKD_CCW_READ_COUNT 0x12 -#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15 -#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16 -#define DASD_ECKD_CCW_WRITE_CKD 0x1d -#define DASD_ECKD_CCW_READ_CKD 0x1e -#define DASD_ECKD_CCW_LOCATE_RECORD 0x47 -#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 -#define DASD_ECKD_CCW_WRITE_MT 0x85 -#define DASD_ECKD_CCW_READ_MT 0x86 -#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d -#define DASD_ECKD_CCW_READ_KD_MT 0x8e -#define DASD_ECKD_CCW_RELEASE 0x94 -#define DASD_ECKD_CCW_READ_CKD_MT 0x9e -#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d -#define DASD_ECKD_CCW_RESERVE 0xB4 -#define DASD_ECKD_CCW_SLCK 0x14 /* steal lock - unconditional reserve */ +#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a +#define DASD_ECKD_CCW_WRITE_KD 0x0d +#define DASD_ECKD_CCW_READ_KD 0x0e +#define DASD_ECKD_CCW_ERASE 0x11 +#define DASD_ECKD_CCW_READ_COUNT 0x12 +#define DASD_ECKD_CCW_SLCK 0x14 +#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15 +#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16 +#define DASD_ECKD_CCW_WRITE_CKD 0x1d +#define DASD_ECKD_CCW_READ_CKD 0x1e +#define DASD_ECKD_CCW_PSF 0x27 +#define DASD_ECKD_CCW_RSSD 0x3e +#define DASD_ECKD_CCW_LOCATE_RECORD 0x47 +#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 +#define DASD_ECKD_CCW_WRITE_MT 0x85 +#define DASD_ECKD_CCW_READ_MT 0x86 +#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d +#define DASD_ECKD_CCW_READ_KD_MT 0x8e +#define DASD_ECKD_CCW_RELEASE 0x94 +#define DASD_ECKD_CCW_READ_CKD_MT 0x9e +#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d +#define DASD_ECKD_CCW_RESERVE 0xB4 + +/* + *Perform Subsystem Function / Sub-Orders + */ +#define PSF_ORDER_PRSSD 0x18 + + +/******************************************************************************* + * SECTION: Type Definitions + ******************************************************************************/ -typedef - struct eckd_count_t { +typedef struct eckd_count_t { __u16 cyl; __u16 head; __u8 record; __u8 kl; __u16 dl; -} __attribute__ ((packed)) +} __attribute__ ((packed)) eckd_count_t; - eckd_count_t; -typedef - struct ch_t { +typedef struct ch_t { __u16 cyl; __u16 head; -} __attribute__ ((packed)) +} __attribute__ ((packed)) ch_t; - ch_t; -typedef - struct chs_t { +typedef struct chs_t { __u16 cyl; __u16 head; __u32 sector; -} __attribute__ ((packed)) +} __attribute__ ((packed)) chs_t; - chs_t; -typedef - struct chr_t { +typedef struct chr_t { __u16 cyl; __u16 head; __u8 record; -} __attribute__ ((packed)) +} __attribute__ ((packed)) chr_t; - chr_t; -typedef - struct geom_t { +typedef struct geom_t { __u16 cyl; __u16 head; __u32 sector; -} __attribute__ ((packed)) +} __attribute__ ((packed)) geom_t; - geom_t; typedef struct eckd_home_t { __u8 skip_control[14]; @@ -83,12 +101,10 @@ __u8 reserved; __u8 key_length; __u8 reserved2[2]; -} __attribute__ ((packed)) +} __attribute__ ((packed)) eckd_home_t; - eckd_home_t; -typedef - struct DE_eckd_data_t { +typedef struct DE_eckd_data_t { struct { unsigned char perm:2; /* Permissions on this extent */ unsigned char reserved:1; @@ -103,18 +119,20 @@ unsigned char cfw:1; /* Cache fast write */ unsigned char dfw:1; /* DASD fast write */ } __attribute__ ((packed)) attributes; - __u16 short blk_size; /* Blocksize */ + __u16 blk_size; /* Blocksize */ __u16 fast_write_id; - __u8 unused; - __u8 reserved; + __u8 ga_additional; /* Global Attributes Additional */ + __u8 ga_extended; /* Global Attributes Extended */ ch_t beg_ext; ch_t end_ext; -} __attribute__ ((packed)) + unsigned long long ep_sys_time; /* Extended Parameter - System Time Stamp */ + __u8 ep_format; /* Extended Parameter format byte */ + __u8 ep_prio; /* Extended Parameter priority I/O byte */ + __u8 ep_reserved[6]; /* Extended Parameter Reserved */ +} __attribute__ ((packed)) DE_eckd_data_t; - DE_eckd_data_t; -typedef - struct LO_eckd_data_t { +typedef struct LO_eckd_data_t { struct { unsigned char orientation:2; unsigned char operation:6; @@ -130,12 +148,10 @@ chr_t search_arg; __u8 sector; __u16 length; -} __attribute__ ((packed)) +} __attribute__ ((packed)) LO_eckd_data_t; - LO_eckd_data_t; -typedef - struct dasd_eckd_characteristics_t { +typedef struct dasd_eckd_characteristics_t { __u16 cu_type; struct { unsigned char support:2; @@ -154,7 +170,8 @@ unsigned char reserved2:4; unsigned char reserved3:8; unsigned char defect_wr:1; - unsigned char reserved4:2; + unsigned char XRC_supported:1; + unsigned char reserved4:1; unsigned char striping:1; unsigned char reserved5:4; unsigned char cfw:1; @@ -205,9 +222,7 @@ __u8 factor8; __u8 reserved2[3]; __u8 reserved3[10]; -} __attribute__ ((packed)) - - dasd_eckd_characteristics_t; +} __attribute__ ((packed)) dasd_eckd_characteristics_t; typedef struct dasd_eckd_confdata_t { struct { @@ -324,9 +339,20 @@ __u8 log_dev_address; unsigned char reserved2[12]; } __attribute__ ((packed)) neq; -} __attribute__ ((packed)) +} __attribute__ ((packed)) dasd_eckd_confdata_t; + +/* + * Perform Subsystem Function - Prepare for Read Subsystem Data + */ +typedef struct dasd_psf_prssd_data_t { + unsigned char order; + unsigned char flags; + unsigned char reserved[4]; + unsigned char suborder; + unsigned char varies[9]; +} __attribute__((packed)) dasd_psf_prssd_data_t; + - dasd_eckd_confdata_t; int dasd_eckd_init (void); void dasd_eckd_cleanup (void); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/block/dasd_fba.c linux.21rc5-ac2/drivers/s390/block/dasd_fba.c --- linux.21rc5/drivers/s390/block/dasd_fba.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/block/dasd_fba.c 2003-05-28 15:13:51.000000000 +0100 @@ -1,9 +1,12 @@ - /* * File...........: linux/drivers/s390/block/dasd_fba.c * Author(s)......: Holger Smolinski * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 + * + * $Revision: 1.49 $ + * + * History of changes * fixed partition handling and HDIO_GETGEO */ @@ -38,6 +41,18 @@ #define DASD_FBA_CCW_LOCATE 0x43 #define DASD_FBA_CCW_DEFINE_EXTENT 0x63 +#ifdef MODULE +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12)) +MODULE_LICENSE("GPL"); +#endif +#endif + +#ifdef MODULE +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12)) +MODULE_LICENSE("GPL"); +#endif +#endif + dasd_discipline_t dasd_fba_discipline; typedef struct @@ -123,35 +138,46 @@ dasd_fba_private_t *private; if (device == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "Null device pointer passed to characteristics checker\n"); + + MESSAGE (KERN_WARNING, "%s", + "Null device pointer passed to characteristics " + "checker"); + return -ENODEV; } device->private = kmalloc (sizeof (dasd_fba_private_t), GFP_KERNEL); + if (device->private == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "memory allocation failed for private data\n"); + + MESSAGE (KERN_WARNING, "%s", + "memory allocation failed for private data"); + rc = -ENOMEM; goto fail; } private = (dasd_fba_private_t *) device->private; rdc_data = (void *) &(private->rdc_data); rc = read_dev_chars (device->devinfo.irq, &rdc_data, 32); + if (rc) { - printk (KERN_WARNING PRINTK_HEADER - "Read device characteristics returned error %d\n", rc); + + MESSAGE (KERN_WARNING, + "Read device characteristics returned error %d", + rc); + goto fail; } - printk (KERN_INFO PRINTK_HEADER - "%04X on sch %d: %04X/%02X(CU:%04X/%02X) %dMB at(%d B/blk)\n", - device->devinfo.devno, device->devinfo.irq, - device->devinfo.sid_data.dev_type, - device->devinfo.sid_data.dev_model, - device->devinfo.sid_data.cu_type, - device->devinfo.sid_data.cu_model, - (private->rdc_data.blk_bdsa * - (private->rdc_data.blk_size >> 9)) >> 11, - private->rdc_data.blk_size); + + DEV_MESSAGE (KERN_INFO, device, + "%04X/%02X(CU:%04X/%02X) %dMB at(%d B/blk)", + device->devinfo.sid_data.dev_type, + device->devinfo.sid_data.dev_model, + device->devinfo.sid_data.cu_type, + device->devinfo.sid_data.cu_model, + ((private->rdc_data.blk_bdsa * + (private->rdc_data.blk_size >> 9)) >> 11), + private->rdc_data.blk_size); + goto out; fail: if ( rc ) { @@ -180,9 +206,11 @@ device->sizes.bp_block = bs; break; default: - printk (KERN_INFO PRINTK_HEADER - "/dev/%s (%04X): unknown blocksize %d\n", - device->name, device->devinfo.devno, bs); + + DEV_MESSAGE (KERN_INFO, device, + "unknown blocksize %d", + bs); + return -EMEDIUMTYPE; } device->sizes.s2b_shift = 0; /* bits to shift 512 to get a block */ @@ -247,8 +275,11 @@ { if (cqr->function == dasd_default_erp_action) return dasd_default_erp_postaction; - printk (KERN_WARNING PRINTK_HEADER - "unknown ERP action %p\n", cqr->function); + + MESSAGE (KERN_WARNING, + "unknown ERP action %p", + cqr->function); + return NULL; } @@ -265,14 +296,20 @@ struct buffer_head *bh; dasd_fba_private_t *private = (dasd_fba_private_t *) device->private; int byt_per_blk = device->sizes.bp_block; + unsigned long reloc_sector = req->sector + + device->major_info->gendisk.part[MINOR (req->rq_dev)].start_sect; if (req->cmd == READ) { rw_cmd = DASD_FBA_CCW_READ; } else if (req->cmd == WRITE) { rw_cmd = DASD_FBA_CCW_WRITE; } else { - PRINT_ERR ("Unknown command %d\n", req->cmd); - return NULL; + + MESSAGE (KERN_ERR, + "Unknown command %d\n", + req->cmd); + + return ERR_PTR(-EINVAL); } /* Build the request */ /* count hs to prevent errors, when bh smaller than block */ @@ -300,14 +337,14 @@ device); } if (!rw_cp) { - return NULL; + return ERR_PTR(-ENOMEM); } DE_data = rw_cp->data; LO_data = rw_cp->data + sizeof (DE_fba_data_t); ccw = rw_cp->cpaddr; if (define_extent (ccw, DE_data, req->cmd, byt_per_blk, - req->sector, req->nr_sectors, rw_cp, device)) { + reloc_sector, req->nr_sectors, rw_cp, device)) { goto clear_rw_cp; } ccw->flags |= CCW_FLAG_CC; @@ -363,11 +400,12 @@ } static int -dasd_fba_fill_info (dasd_device_t * device, dasd_information_t * info) +dasd_fba_fill_info (dasd_device_t * device, dasd_information2_t * info) { int rc = 0; info->label_block = 1; info->FBA_layout = 1; + info->format = DASD_FORMAT_LDL; info->characteristics_size = sizeof (dasd_fba_characteristics_t); memcpy (info->characteristics, &((dasd_fba_private_t *) device->private)->rdc_data, @@ -410,14 +448,18 @@ dump_sense:dasd_fba_dump_sense, int_handler:dasd_int_handler, fill_info:dasd_fba_fill_info, + list:LIST_HEAD_INIT(dasd_fba_discipline.list), }; int dasd_fba_init (void) { int rc = 0; - printk (KERN_INFO PRINTK_HEADER - "%s discipline initializing\n", dasd_fba_discipline.name); + + MESSAGE (KERN_INFO, + "%s discipline initializing", + dasd_fba_discipline.name); + ASCEBC (dasd_fba_discipline.ebcname, 4); dasd_discipline_add (&dasd_fba_discipline); #ifdef CONFIG_DASD_DYNAMIC @@ -426,12 +468,15 @@ for (i = 0; i < sizeof (dasd_fba_known_devices) / sizeof (devreg_t); i++) { - printk (KERN_INFO PRINTK_HEADER - "We are interested in: Dev %04X/%02X @ CU %04X/%02x\n", - dasd_fba_known_devices[i].ci.hc.dtype, - dasd_fba_known_devices[i].ci.hc.dmode, - dasd_fba_known_devices[i].ci.hc.ctype, - dasd_fba_known_devices[i].ci.hc.cmode); + + MESSAGE (KERN_INFO, + "We are interested in: " + "Dev %04X/%02X @ CU %04X/%02x", + dasd_fba_known_devices[i].ci.hc.dtype, + dasd_fba_known_devices[i].ci.hc.dmode, + dasd_fba_known_devices[i].ci.hc.ctype, + dasd_fba_known_devices[i].ci.hc.cmode); + s390_device_register (&dasd_fba_known_devices[i]); } } @@ -441,8 +486,11 @@ void dasd_fba_cleanup( void ) { - printk ( KERN_INFO PRINTK_HEADER - "%s discipline cleaning up\n", dasd_fba_discipline.name); + + MESSAGE (KERN_INFO, + "%s discipline cleaning up", + dasd_fba_discipline.name); + #ifdef CONFIG_DASD_DYNAMIC { int i; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/block/dasd_fba.h linux.21rc5-ac2/drivers/s390/block/dasd_fba.h --- linux.21rc5/drivers/s390/block/dasd_fba.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/block/dasd_fba.h 2003-04-25 13:49:34.000000000 +0100 @@ -1,3 +1,15 @@ +/* + * File...........: linux/drivers/s390/block/dasd_fba.h + * Author(s)......: Holger Smolinski + * Horst Hummel + * Bugreports.to..: + * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 + * + * $Revision: 1.6 $ + * + * History of changes + * + */ #ifndef DASD_FBA_H #define DASD_FBA_H diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/block/dasd_int.h linux.21rc5-ac2/drivers/s390/block/dasd_int.h --- linux.21rc5/drivers/s390/block/dasd_int.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/block/dasd_int.h 2003-04-25 13:49:34.000000000 +0100 @@ -1,9 +1,12 @@ /* - * File...........: linux/drivers/s390/block/dasd.c + * File...........: linux/drivers/s390/block/dasd_int.h * Author(s)......: Holger Smolinski + * Horst Hummel * Bugreports.to..: * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * + * $Revision: 1.26 $ + * * History of changes (starts July 2000) * 02/01/01 added dynamic registration of ioctls */ @@ -11,8 +14,6 @@ #ifndef DASD_INT_H #define DASD_INT_H -#define DASD_API_VERSION 0 - #include #define CONFIG_DASD_DYNAMIC @@ -28,14 +29,19 @@ #define DASD_FORMAT_INTENS_WRITE_RECZERO 0x01 #define DASD_FORMAT_INTENS_WRITE_HOMEADR 0x02 -#define DASD_STATE_DEL -1 -#define DASD_STATE_NEW 0 -#define DASD_STATE_KNOWN 1 -#define DASD_STATE_ACCEPT 2 -#define DASD_STATE_INIT 3 -#define DASD_STATE_READY 4 -#define DASD_STATE_ONLINE 5 - +#define DASD_STATE_DEL -1 /* "unknown" */ +#define DASD_STATE_NEW 0 /* memory for dasd_device_t and lowmem ccw/idals allocated */ +#define DASD_STATE_BOXED 1 /* boxed dasd could not be analysed "plugged" */ +#define DASD_STATE_KNOWN 2 /* major_info/devinfo/discipline/devfs-'device'/gendisk - "detected" */ +#define DASD_STATE_ACCEPT 3 /* irq requested - "accepted" */ +#define DASD_STATE_INIT 4 /* init_cqr started - "busy" */ +#define DASD_STATE_READY 5 /* init finished - "fenced(plugged)" */ +#define DASD_STATE_ONLINE 6 /* unplugged "active" */ + +#define DASD_HOTPLUG_EVENT_ADD 0 +#define DASD_HOTPLUG_EVENT_REMOVE 1 +#define DASD_HOTPLUG_EVENT_PARTCHK 2 +#define DASD_HOTPLUG_EVENT_PARTREMOVE 3 #define DASD_FORMAT_INTENS_WRITE_RECZERO 0x01 #define DASD_FORMAT_INTENS_WRITE_HOMEADR 0x02 @@ -61,7 +67,9 @@ #include #include -/* Kernel Version Compatibility section */ +/******************************************************************************** + * SECTION: Kernel Version Compatibility section + ********************************************************************************/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98)) typedef struct request *request_queue_t; #define block_device_operations file_operations @@ -102,6 +110,7 @@ *q = req->next; req->next = NULL; } + #else #define INIT_BLK_DEV(d_major,d_request_fn,d_queue_fn,d_current) \ do { \ @@ -124,9 +133,12 @@ { blkdev_dequeue_request (req); } -#endif +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98)) */ + +/******************************************************************************** + * SECTION: Type definitions + ********************************************************************************/ -/* dasd_range_t are used for dynamic device att-/detachment */ typedef struct dasd_devreg_t { devreg_t devreg; /* the devreg itself */ /* build a linked list of devregs, needed for cleanup */ @@ -153,27 +165,6 @@ #define DASD_SENSE_BIT_2 0x20 #define DASD_SENSE_BIT_3 0x10 -#define check_then_set(where,from,to) \ -do { \ - if ((*(where)) != (from) ) { \ - printk (KERN_ERR PRINTK_HEADER "was %d\n", *(where)); \ - BUG(); \ - } \ - (*(where)) = (to); \ -} while (0) - -#define DASD_MESSAGE(d_loglevel,d_device,d_string,d_args...)\ -do { \ - int d_devno = d_device->devinfo.devno; \ - int d_irq = d_device->devinfo.irq; \ - char *d_name = d_device->name; \ - int d_major = MAJOR(d_device->kdev); \ - int d_minor = MINOR(d_device->kdev); \ - printk(d_loglevel PRINTK_HEADER \ - "/dev/%s(%d:%d),%04x@0x%x:" \ - d_string "\n",d_name,d_major,d_minor,d_devno,d_irq,d_args ); \ -} while(0) - /* * struct dasd_sizes_t * represents all data needed to access dasd with properly set up sectors @@ -196,68 +187,164 @@ ccw_req_t *tail; } dasd_chanq_t; -#define DASD_DEVICE_FORMAT_STRING "Device: %p" -#define DASD_DEVICE_DEBUG_EVENT(d_level, d_device, d_str, d_data...)\ -do {\ - if ( d_device->debug_area != NULL )\ - debug_sprintf_event(d_device->debug_area,d_level,\ - DASD_DEVICE_FORMAT_STRING d_str "\n",\ - d_device, d_data);\ +/* + * struct dasd_lowmem_t + * represents a queue of pages for lowmem request + */ +typedef struct { + struct list_head list; +} dasd_lowmem_t; + +#define DASD_LOWMEM_PAGES 2 /* # of lowmem pages per device (min 2) */ + +/******************************************************************************** + * SECTION: MACROS + ********************************************************************************/ + +/* + * CHECK_THEN_SET + * + * Change 'where' value from 'from' to 'to'. + ' BUG if the 'from' value doesn't match. + */ +#define check_then_set(where,from,to) \ +do { \ + if ((*(where)) != (from) ) { \ + printk (KERN_ERR PRINTK_HEADER "was %d\n", *(where)); \ + BUG(); \ + } \ + (*(where)) = (to); \ } while(0) -#define DASD_DEVICE_DEBUG_EXCEPTION(d_level, d_device, d_str, d_data...)\ -do {\ - if ( d_device->debug_area != NULL )\ - debug_sprintf_exception(d_device->debug_area,d_level,\ - DASD_DEVICE_FORMAT_STRING d_str "\n",\ - d_device, d_data);\ + + +/******************************************************************************** + * SECION: MACROs for klogd and s390 debug feature (dbf) + ********************************************************************************/ + +#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \ +do { \ + if (d_device->debug_area != NULL) \ + debug_sprintf_event(d_device->debug_area, \ + d_level, \ + d_str "\n", \ + d_data); \ +} while(0) + +#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \ +do { \ + if (d_device->debug_area != NULL) \ + debug_sprintf_exception(d_device->debug_area, \ + d_level, \ + d_str "\n", \ + d_data); \ +} while(0) + +#define DBF_EVENT(d_level, d_str, d_data...)\ +do { \ + if (dasd_debug_area != NULL) \ + debug_sprintf_event(dasd_debug_area, \ + d_level,\ + d_str "\n", \ + d_data); \ +} while(0) + +#define DBF_EXC(d_level, d_str, d_data...)\ +do { \ + if (dasd_debug_area != NULL) \ + debug_sprintf_exception(dasd_debug_area, \ + d_level,\ + d_str "\n", \ + d_data); \ } while(0) -#define DASD_DRIVER_FORMAT_STRING "Driver: <[%p]>" -#define DASD_DRIVER_DEBUG_EVENT(d_level, d_fn, d_str, d_data...)\ -do {\ - if ( dasd_debug_area != NULL )\ - debug_sprintf_event(dasd_debug_area, d_level,\ - DASD_DRIVER_FORMAT_STRING #d_fn ":" d_str "\n",\ - d_fn, d_data);\ +/* definition of dbf debug levels */ +#define DBF_EMERG 0 /* system is unusable */ +#define DBF_ALERT 1 /* action must be taken immediately */ +#define DBF_CRIT 2 /* critical conditions */ +#define DBF_ERR 3 /* error conditions */ +#define DBF_WARNING 4 /* warning conditions */ +#define DBF_NOTICE 5 /* normal but significant condition */ +#define DBF_INFO 6 /* informational */ +#define DBF_DEBUG 6 /* debug-level messages */ + +/* messages to be written via klogd and dbf */ +#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\ +do { \ + int d_devno = d_device->devinfo.devno; \ + int d_irq = d_device->devinfo.irq; \ + char *d_name = d_device->name; \ + int d_major = MAJOR(d_device->kdev); \ + int d_minor = MINOR(d_device->kdev); \ +\ + printk(d_loglevel PRINTK_HEADER \ + " /dev/%-7s(%3d:%3d),%04x@%02x: " \ + d_string "\n", \ + d_name, \ + d_major, \ + d_minor, \ + d_devno, \ + d_irq, \ + d_args); \ +\ + DBF_DEV_EVENT(DBF_ALERT, \ + d_device, \ + d_string, \ + d_args); \ } while(0) -#define DASD_DRIVER_DEBUG_EXCEPTION(d_level, d_fn, d_str, d_data...)\ -do {\ - if ( dasd_debug_area != NULL )\ - debug_sprintf_exception(dasd_debug_area, d_level,\ - DASD_DRIVER_FORMAT_STRING #d_fn ":" d_str "\n",\ - d_fn, d_data);\ + +#define MESSAGE(d_loglevel,d_string,d_args...)\ +do { \ + printk(d_loglevel PRINTK_HEADER \ + " " d_string "\n", \ + d_args); \ +\ + DBF_EVENT(DBF_ALERT, \ + d_string, \ + d_args); \ } while(0) struct dasd_device_t; struct request; -/* - * signatures for the functions of dasd_discipline_t +/******************************************************************************** + * SECTION: signatures for the functions of dasd_discipline_t * make typecasts much easier - */ -typedef ccw_req_t *(*dasd_erp_action_fn_t) (ccw_req_t * cqr); + ********************************************************************************/ + +typedef int (*dasd_ck_id_fn_t) (s390_dev_info_t *); +typedef int (*dasd_ck_characteristics_fn_t) (struct dasd_device_t *); +typedef int (*dasd_fill_geometry_fn_t) (struct dasd_device_t *, + struct hd_geometry *); +typedef int (*dasd_do_analysis_fn_t) (struct dasd_device_t *); +typedef int (*dasd_io_starter_fn_t) (ccw_req_t *); +typedef int (*dasd_io_stopper_fn_t) (ccw_req_t *); +typedef int (*dasd_info_fn_t) (struct dasd_device_t *, + dasd_information2_t *); +typedef int (*dasd_use_count_fn_t) (int); +typedef int (*dasd_set_attrib_fn_t) (struct dasd_device_t *, + struct attrib_data_t *); +typedef void (*dasd_int_handler_fn_t) (int irq, void *, + struct pt_regs *); +typedef char * (*dasd_dump_sense_fn_t) (struct dasd_device_t *, + ccw_req_t *); +typedef ccw_req_t *(*dasd_format_fn_t) (struct dasd_device_t *, + struct format_data_t *); +typedef ccw_req_t *(*dasd_init_analysis_fn_t ) (struct dasd_device_t *); +typedef ccw_req_t *(*dasd_cp_builder_fn_t) (struct dasd_device_t *, + struct request *); +typedef ccw_req_t *(*dasd_reserve_fn_t) (struct dasd_device_t *); +typedef ccw_req_t *(*dasd_release_fn_t) (struct dasd_device_t *); +typedef ccw_req_t *(*dasd_steal_lock_fn_t) (struct dasd_device_t *); +typedef ccw_req_t *(*dasd_merge_cp_fn_t) (struct dasd_device_t *); +typedef ccw_req_t *(*dasd_erp_action_fn_t) (ccw_req_t * cqr); typedef ccw_req_t *(*dasd_erp_postaction_fn_t) (ccw_req_t * cqr); +typedef ccw_req_t *(*dasd_read_stats_fn_t) (struct dasd_device_t *); -typedef int (*dasd_ck_id_fn_t) (s390_dev_info_t *); -typedef int (*dasd_ck_characteristics_fn_t) (struct dasd_device_t *); -typedef int (*dasd_fill_geometry_fn_t) (struct dasd_device_t *, struct hd_geometry *); -typedef ccw_req_t *(*dasd_format_fn_t) (struct dasd_device_t *, struct format_data_t *); -typedef ccw_req_t *(*dasd_init_analysis_fn_t) (struct dasd_device_t *); -typedef int (*dasd_do_analysis_fn_t) (struct dasd_device_t *); -typedef int (*dasd_io_starter_fn_t) (ccw_req_t *); -typedef int (*dasd_io_stopper_fn_t) (ccw_req_t *); -typedef void (*dasd_int_handler_fn_t)(int irq, void *, struct pt_regs *); -typedef dasd_era_t (*dasd_error_examine_fn_t) (ccw_req_t *, devstat_t * stat); -typedef dasd_erp_action_fn_t (*dasd_error_analyse_fn_t) (ccw_req_t *); -typedef dasd_erp_postaction_fn_t (*dasd_erp_analyse_fn_t) (ccw_req_t *); -typedef ccw_req_t *(*dasd_cp_builder_fn_t)(struct dasd_device_t *,struct request *); -typedef char *(*dasd_dump_sense_fn_t)(struct dasd_device_t *,ccw_req_t *); -typedef ccw_req_t *(*dasd_reserve_fn_t)(struct dasd_device_t *); -typedef ccw_req_t *(*dasd_release_fn_t)(struct dasd_device_t *); -typedef ccw_req_t *(*dasd_steal_lock_fn_t)(struct dasd_device_t *); -typedef ccw_req_t *(*dasd_merge_cp_fn_t)(struct dasd_device_t *); -typedef int (*dasd_info_fn_t) (struct dasd_device_t *, dasd_information_t *); -typedef int (*dasd_use_count_fn_t) (int); +typedef dasd_rssd_perf_stats_t * (*dasd_ret_stats_fn_t) (ccw_req_t *); +typedef dasd_era_t (*dasd_error_examine_fn_t) (ccw_req_t *, + devstat_t * stat); +typedef dasd_erp_action_fn_t (*dasd_error_analyse_fn_t) (ccw_req_t *); +typedef dasd_erp_postaction_fn_t (*dasd_erp_analyse_fn_t) (ccw_req_t *); /* * the dasd_discipline_t is @@ -270,31 +357,31 @@ char ebcname[8]; /* a name used for tagging and printks */ char name[8]; /* a name used for tagging and printks */ int max_blocks; /* maximum number of blocks to be chained */ - dasd_ck_id_fn_t id_check; /* to check sense data */ - dasd_ck_characteristics_fn_t check_characteristics; /* to check the characteristics */ - dasd_init_analysis_fn_t init_analysis; /* to start the analysis of the volume */ - dasd_do_analysis_fn_t do_analysis; /* to complete the analysis of the volume */ - dasd_fill_geometry_fn_t fill_geometry; /* to set up hd_geometry */ - dasd_io_starter_fn_t start_IO; - dasd_io_stopper_fn_t term_IO; - dasd_format_fn_t format_device; /* to format the device */ - dasd_error_examine_fn_t examine_error; - dasd_error_analyse_fn_t erp_action; - dasd_erp_analyse_fn_t erp_postaction; - dasd_cp_builder_fn_t build_cp_from_req; - dasd_dump_sense_fn_t dump_sense; - dasd_int_handler_fn_t int_handler; - dasd_reserve_fn_t reserve; - dasd_release_fn_t release; - dasd_steal_lock_fn_t steal_lock; - dasd_merge_cp_fn_t merge_cp; - dasd_info_fn_t fill_info; + dasd_ck_id_fn_t id_check; /* check sense data */ + dasd_ck_characteristics_fn_t check_characteristics; /* check the characteristics */ + dasd_init_analysis_fn_t init_analysis; /* start the analysis of the volume */ + dasd_do_analysis_fn_t do_analysis; /* complete the analysis of the volume */ + dasd_fill_geometry_fn_t fill_geometry; /* set up hd_geometry */ + dasd_io_starter_fn_t start_IO; + dasd_io_stopper_fn_t term_IO; + dasd_format_fn_t format_device; /* format the device */ + dasd_error_examine_fn_t examine_error; + dasd_error_analyse_fn_t erp_action; + dasd_erp_analyse_fn_t erp_postaction; + dasd_cp_builder_fn_t build_cp_from_req; + dasd_dump_sense_fn_t dump_sense; + dasd_int_handler_fn_t int_handler; + dasd_reserve_fn_t reserve; + dasd_release_fn_t release; + dasd_steal_lock_fn_t steal_lock; + dasd_merge_cp_fn_t merge_cp; + dasd_info_fn_t fill_info; + dasd_read_stats_fn_t read_stats; + dasd_ret_stats_fn_t ret_stats; /* return performance statistics */ + dasd_set_attrib_fn_t set_attrib; /* set attributes (cache operations */ struct list_head list; /* used for list of disciplines */ } dasd_discipline_t; -#define DASD_DEFAULT_FEATURES 0 -#define DASD_FEATURE_READONLY 1 - /* dasd_range_t are used for ordering the DASD devices */ typedef struct dasd_range_t { unsigned int from; /* first DASD in range */ @@ -326,7 +413,9 @@ struct dasd_chanq_t queue; wait_queue_head_t wait_q; request_queue_t *request_queue; - struct timer_list timer; + struct timer_list timer; /* used for start_IO */ + struct timer_list late_timer; /* to get late devices online */ + struct timer_list blocking_timer; /* used for ERP */ devstat_t dev_status; /* needed ONLY!! for request_irq */ dasd_sizes_t sizes; char name[16]; /* The name of the device in /dev */ @@ -340,35 +429,37 @@ dasd_profile_info_t profile; ccw_req_t *init_cqr; atomic_t plugged; - void* lowmem_cqr; - void* lowmem_ccws; - void* lowmem_idals; - void* lowmem_idal_ptr; + int accessible; /* set to !=0 if doing IO is permitted */ + struct list_head lowmem_pool; } dasd_device_t; -int dasd_init (void); -void dasd_discipline_add(dasd_discipline_t *); -void dasd_discipline_del(dasd_discipline_t *); -int dasd_start_IO (ccw_req_t *); -int dasd_term_IO (ccw_req_t *); -void dasd_int_handler (int , void *, struct pt_regs *); -ccw_req_t *dasd_default_erp_action (ccw_req_t *); -ccw_req_t *dasd_default_erp_postaction (ccw_req_t *); -inline void dasd_chanq_deq (dasd_chanq_t *, ccw_req_t *); -inline void dasd_chanq_enq (dasd_chanq_t *, ccw_req_t *); -inline void dasd_chanq_enq_head (dasd_chanq_t *, ccw_req_t *); -ccw_req_t *dasd_alloc_request (char *, int, int, dasd_device_t *); -void dasd_free_request (ccw_req_t *, dasd_device_t *); -int dasd_oper_handler (int irq, devreg_t * devreg); -void dasd_schedule_bh (dasd_device_t *); -int dasd_sleep_on_req(ccw_req_t*); -int dasd_set_normalized_cda ( ccw1_t * cp, unsigned long address, ccw_req_t* request, dasd_device_t* device ); -dasd_device_t * dasd_device_from_kdev (kdev_t kdev); + +int dasd_init (void); +void dasd_discipline_add (dasd_discipline_t *); +void dasd_discipline_del (dasd_discipline_t *); +int dasd_start_IO (ccw_req_t *); +int dasd_term_IO (ccw_req_t *); +void dasd_int_handler (int , void *, struct pt_regs *); +void dasd_free_request (ccw_req_t *, dasd_device_t *); +int dasd_oper_handler (int irq, devreg_t * devreg); +void dasd_schedule_bh (dasd_device_t *); +void dasd_schedule_bh_timed (unsigned long); +int dasd_sleep_on_req (ccw_req_t*); +int dasd_set_normalized_cda (ccw1_t * cp, unsigned long address, + ccw_req_t* request, + dasd_device_t* device ); +ccw_req_t * dasd_default_erp_action (ccw_req_t *); +ccw_req_t * dasd_default_erp_postaction (ccw_req_t *); +inline void dasd_chanq_deq (dasd_chanq_t *, ccw_req_t *); +inline void dasd_chanq_enq (dasd_chanq_t *, ccw_req_t *); +inline void dasd_chanq_enq_head (dasd_chanq_t *, ccw_req_t *); +ccw_req_t * dasd_alloc_request (char *, int, int, dasd_device_t *); +dasd_device_t * dasd_device_from_kdev (kdev_t kdev); extern debug_info_t *dasd_debug_area; extern int (*genhd_dasd_name) (char *, int, int, struct gendisk *); extern int (*genhd_dasd_ioctl) (struct inode *inp, struct file *filp, - unsigned int no, unsigned long data); + unsigned int no, unsigned long data); #endif /* __KERNEL__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/con3215.c linux.21rc5-ac2/drivers/s390/char/con3215.c --- linux.21rc5/drivers/s390/char/con3215.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/con3215.c 2003-05-28 15:40:29.000000000 +0100 @@ -31,8 +31,7 @@ #include #include #include - -#include "ctrlchar.h" +#include #define NR_3215 1 #define NR_3215_REQ (4*NR_3215) @@ -462,30 +461,35 @@ if ((raw = req->info) == NULL) return; /* That shouldn't happen ... */ if (req->type == RAW3215_READ && raw->tty != NULL) { - char *cchar; + unsigned int cchar; tty = raw->tty; - count = 160 - req->residual; - if (MACHINE_IS_P390) { - slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE); - if (count > slen) - count = slen; - } else + count = 160 - req->residual; + if (MACHINE_IS_P390) { + slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE); + if (count > slen) + count = slen; + } else if (count >= TTY_FLIPBUF_SIZE - tty->flip.count) count = TTY_FLIPBUF_SIZE - tty->flip.count - 1; EBCASC(raw->inbuf, count); - if ((cchar = ctrlchar_handle(raw->inbuf, count, tty))) { - if (cchar == (char *)-1) - goto in_out; + cchar = ctrlchar_handle(raw->inbuf, count, tty, 1); + switch (cchar & CTRLCHAR_MASK) { + case CTRLCHAR_SYSRQ: + break; + + case CTRLCHAR_CTRL: tty->flip.count++; *tty->flip.flag_buf_ptr++ = TTY_NORMAL; - *tty->flip.char_buf_ptr++ = *cchar; + *tty->flip.char_buf_ptr++ = cchar; tty_flip_buffer_push(raw->tty); - } else { + break; + + case CTRLCHAR_NONE: memcpy(tty->flip.char_buf_ptr, raw->inbuf, count); if (count < 2 || - (strncmp(raw->inbuf+count-2, "^n", 2) || + (strncmp(raw->inbuf+count-2, "^n", 2) && strncmp(raw->inbuf+count-2, "\252n", 2)) ) { /* don't add the auto \n */ tty->flip.char_buf_ptr[count] = '\n'; @@ -498,12 +502,12 @@ tty->flip.flag_buf_ptr += count; tty->flip.count += count; tty_flip_buffer_push(raw->tty); + break; } } else if (req->type == RAW3215_WRITE) { raw->count -= req->len; raw->written -= req->len; } -in_out: raw->flags &= ~RAW3215_WORKING; raw3215_free_req(req); /* check for empty wait */ @@ -823,7 +827,8 @@ * The console structure for the 3215 console */ static struct console con3215 = { - name: "tty3215", + name: "ttyS", + index: 0, write: con3215_write, device: con3215_device, unblank: con3215_unblank, @@ -897,7 +902,7 @@ raw3215_info *raw; raw = (raw3215_info *) tty->driver_data; - if (raw == NULL || tty->count > 1) + if (raw == NULL || atomic_read(&tty->count) > 1) return; tty->closing = 1; /* Shutdown the terminal */ @@ -1120,6 +1125,9 @@ */ void __init tty3215_init(void) { + /* Don't bother registering the tty if we already skipped the console */ + if (!CONSOLE_IS_3215) + return; /* * Initialize the tty_driver structure * Entries in tty3215_driver that are NOT initialized: diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/ctrlchar.c linux.21rc5-ac2/drivers/s390/char/ctrlchar.c --- linux.21rc5/drivers/s390/char/ctrlchar.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/ctrlchar.c 2003-05-28 15:40:29.000000000 +0100 @@ -8,32 +8,30 @@ */ #include -#include -#include -#include - +#include #include +#include +#include -#include -#include -#include -#include -#include +#include #ifdef CONFIG_MAGIC_SYSRQ static int ctrlchar_sysrq_key; static struct tq_struct ctrlchar_tq; static void -ctrlchar_handle_sysrq(struct tty_struct *tty) { - handle_sysrq(ctrlchar_sysrq_key, NULL, NULL, tty); +ctrlchar_handle_sysrq(void *tty) +{ + handle_sysrq(ctrlchar_sysrq_key, NULL, NULL, (struct tty_struct*) tty); } #endif -void ctrlchar_init(void) { +void +ctrlchar_init(void) +{ #ifdef CONFIG_MAGIC_SYSRQ static int init_done = 0; - + if (init_done++) return; INIT_LIST_HEAD(&ctrlchar_tq.list); @@ -48,49 +46,44 @@ * @param buf Console input buffer. * @param len Length of valid data in buffer. * @param tty The tty struct for this console. - * @return NULL, if nothing matched, (char *)-1, if buffer contents - * should be ignored, otherwise pointer to char to be inserted. + * @return CTRLCHAR_NONE, if nothing matched, + * CTRLCHAR_SYSRQ, if sysrq was encountered + * otherwise char to be inserted logically or'ed + * with CTRLCHAR_CTRL */ -char *ctrlchar_handle(const char *buf, int len, struct tty_struct *tty) { - - static char ret; - +unsigned int +ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty, + int is_console) +{ if ((len < 2) || (len > 3)) - return NULL; + return CTRLCHAR_NONE; + /* hat is 0xb1 in codepage 037 (US etc.) and thus */ /* converted to 0x5e in ascii ('^') */ if ((buf[0] != '^') && (buf[0] != '\252')) - return NULL; - switch (buf[1]) { + return CTRLCHAR_NONE; + #ifdef CONFIG_MAGIC_SYSRQ - case '-': - if (len == 3) { - ctrlchar_sysrq_key = buf[2]; - ctrlchar_tq.data = tty; - queue_task(&ctrlchar_tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); - return (char *)-1; - } - break; + /* racy */ + if (is_console && len == 3 && buf[1] == '-') { + ctrlchar_sysrq_key = buf[2]; + ctrlchar_tq.data = tty; + queue_task(&ctrlchar_tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); + return CTRLCHAR_SYSRQ; + } #endif - case 'c': - if (len == 2) { - ret = INTR_CHAR(tty); - return &ret; - } - break; - case 'd': - if (len == 2) { - ret = EOF_CHAR(tty); - return &ret; - } - break; - case 'z': - if (len == 2) { - ret = SUSP_CHAR(tty); - return &ret; - } - break; + + if (len != 2) + return CTRLCHAR_NONE; + + switch (tolower(buf[1])) { + case 'c': + return INTR_CHAR(tty) | CTRLCHAR_CTRL; + case 'd': + return EOF_CHAR(tty) | CTRLCHAR_CTRL; + case 'z': + return SUSP_CHAR(tty) | CTRLCHAR_CTRL; } - return NULL; + return CTRLCHAR_NONE; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/ctrlchar.h linux.21rc5-ac2/drivers/s390/char/ctrlchar.h --- linux.21rc5/drivers/s390/char/ctrlchar.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/ctrlchar.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -/* - * drivers/s390/char/ctrlchar.c - * Unified handling of special chars. - * - * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Fritz Elfert - * - */ - -#include -#include -#include - -extern void ctrlchar_init(void); -extern char *ctrlchar_handle(const char *buf, int len, struct tty_struct *tty); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/hwc_con.c linux.21rc5-ac2/drivers/s390/char/hwc_con.c --- linux.21rc5/drivers/s390/char/hwc_con.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/hwc_con.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,89 +0,0 @@ -/* - * drivers/s390/char/hwc_con.c - * HWC line mode console driver - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hwc_rw.h" - -#ifdef CONFIG_HWC_CONSOLE - -#define hwc_console_major 4 -#define hwc_console_minor 64 -#define hwc_console_name "console" - -void hwc_console_write (struct console *, const char *, unsigned int); -kdev_t hwc_console_device (struct console *); -void hwc_console_unblank (void); - -#define HWC_CON_PRINT_HEADER "hwc console driver: " - -struct console hwc_console = { - name: hwc_console_name, - write: hwc_console_write, - device: hwc_console_device, - unblank:hwc_console_unblank, - flags: CON_PRINTBUFFER, -}; - -void -hwc_console_write ( - struct console *console, - const char *message, - unsigned int count) -{ - - if (console->device (console) != hwc_console.device (&hwc_console)) { - - hwc_printk (KERN_WARNING HWC_CON_PRINT_HEADER - "hwc_console_write() called with wrong " - "device number"); - return; - } - hwc_write (0, message, count); -} - -kdev_t -hwc_console_device (struct console * c) -{ - return MKDEV (hwc_console_major, hwc_console_minor); -} - -void -hwc_console_unblank (void) -{ - hwc_unblank (); -} - -#endif - -void __init -hwc_console_init (void) -{ - if (!MACHINE_HAS_HWC) - return; - - if (hwc_init () == 0) { -#ifdef CONFIG_HWC_CONSOLE - - if (CONSOLE_IS_HWC) - register_console (&hwc_console); -#endif - } else - panic (HWC_CON_PRINT_HEADER "hwc initialisation failed !"); - - return; -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/hwc_cpi.c linux.21rc5-ac2/drivers/s390/char/hwc_cpi.c --- linux.21rc5/drivers/s390/char/hwc_cpi.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/hwc_cpi.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,211 +0,0 @@ - -/* - * Author: Martin Peschke - * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "hwc_rw.h" -#include "hwc.h" - -#define CPI_RETRIES 3 -#define CPI_SLEEP_TICKS 50 - -#define CPI_LENGTH_SYSTEM_TYPE 8 -#define CPI_LENGTH_SYSTEM_NAME 8 -#define CPI_LENGTH_SYSPLEX_NAME 8 - -typedef struct { - _EBUF_HEADER - u8 id_format; - u8 reserved0; - u8 system_type[CPI_LENGTH_SYSTEM_TYPE]; - u64 reserved1; - u8 system_name[CPI_LENGTH_SYSTEM_NAME]; - u64 reserved2; - u64 system_level; - u64 reserved3; - u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME]; - u8 reserved4[16]; -} __attribute__ ((packed)) - -cpi_evbuf_t; - -typedef struct _cpi_hwcb_t { - _HWCB_HEADER - cpi_evbuf_t cpi_evbuf; -} __attribute__ ((packed)) - -cpi_hwcb_t; - -cpi_hwcb_t *cpi_hwcb; - -static int __init cpi_module_init (void); -static void __exit cpi_module_exit (void); - -module_init (cpi_module_init); -module_exit (cpi_module_exit); - -MODULE_AUTHOR ( - "Martin Peschke, IBM Deutschland Entwicklung GmbH " - ""); - -MODULE_DESCRIPTION ( - "identify this operating system instance to the S/390 or zSeries hardware"); - -static char *system_name = NULL; -MODULE_PARM (system_name, "s"); -MODULE_PARM_DESC (system_name, "e.g. hostname - max. 8 characters"); - -static char *sysplex_name = NULL; -#ifdef ALLOW_SYSPLEX_NAME -MODULE_PARM (sysplex_name, "s"); -MODULE_PARM_DESC (sysplex_name, "if applicable - max. 8 characters"); -#endif - -static char *system_type = "LINUX"; - -hwc_request_t cpi_request = -{}; - -hwc_callback_t cpi_callback; - -static DECLARE_MUTEX_LOCKED (sem); - -static int __init -cpi_module_init (void) -{ - int retval; - int system_type_length; - int system_name_length; - int sysplex_name_length = 0; - int retries; - - if (!MACHINE_HAS_HWC) { - printk ("cpi: bug: hardware console not present\n"); - retval = -EINVAL; - goto out; - } - if (!system_type) { - printk ("cpi: bug: no system type specified\n"); - retval = -EINVAL; - goto out; - } - system_type_length = strlen (system_type); - if (system_type_length > CPI_LENGTH_SYSTEM_NAME) { - printk ("cpi: bug: system type has length of %i characters - " - "only %i characters supported\n", - system_type_length, - CPI_LENGTH_SYSTEM_TYPE); - retval = -EINVAL; - goto out; - } - if (!system_name) { - printk ("cpi: no system name specified\n"); - retval = -EINVAL; - goto out; - } - system_name_length = strlen (system_name); - if (system_name_length > CPI_LENGTH_SYSTEM_NAME) { - printk ("cpi: system name has length of %i characters - " - "only %i characters supported\n", - system_name_length, - CPI_LENGTH_SYSTEM_NAME); - retval = -EINVAL; - goto out; - } - if (sysplex_name) { - sysplex_name_length = strlen (sysplex_name); - if (sysplex_name_length > CPI_LENGTH_SYSPLEX_NAME) { - printk ("cpi: sysplex name has length of %i characters - " - "only %i characters supported\n", - sysplex_name_length, - CPI_LENGTH_SYSPLEX_NAME); - retval = -EINVAL; - goto out; - } - } - cpi_hwcb = kmalloc (sizeof (cpi_hwcb_t), GFP_KERNEL); - if (!cpi_hwcb) { - printk ("cpi: no storage to fulfill request\n"); - retval = -ENOMEM; - goto out; - } - memset (cpi_hwcb, 0, sizeof (cpi_hwcb_t)); - - cpi_hwcb->length = sizeof (cpi_hwcb_t); - cpi_hwcb->cpi_evbuf.length = sizeof (cpi_evbuf_t); - cpi_hwcb->cpi_evbuf.type = 0x0B; - - memset (cpi_hwcb->cpi_evbuf.system_type, ' ', CPI_LENGTH_SYSTEM_TYPE); - memcpy (cpi_hwcb->cpi_evbuf.system_type, system_type, system_type_length); - HWC_ASCEBC_STR (cpi_hwcb->cpi_evbuf.system_type, CPI_LENGTH_SYSTEM_TYPE); - EBC_TOUPPER (cpi_hwcb->cpi_evbuf.system_type, CPI_LENGTH_SYSTEM_TYPE); - - memset (cpi_hwcb->cpi_evbuf.system_name, ' ', CPI_LENGTH_SYSTEM_NAME); - memcpy (cpi_hwcb->cpi_evbuf.system_name, system_name, system_name_length); - HWC_ASCEBC_STR (cpi_hwcb->cpi_evbuf.system_name, CPI_LENGTH_SYSTEM_NAME); - EBC_TOUPPER (cpi_hwcb->cpi_evbuf.system_name, CPI_LENGTH_SYSTEM_NAME); - - cpi_hwcb->cpi_evbuf.system_level = LINUX_VERSION_CODE; - - if (sysplex_name) { - memset (cpi_hwcb->cpi_evbuf.sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME); - memcpy (cpi_hwcb->cpi_evbuf.sysplex_name, sysplex_name, sysplex_name_length); - HWC_ASCEBC_STR (cpi_hwcb->cpi_evbuf.sysplex_name, CPI_LENGTH_SYSPLEX_NAME); - EBC_TOUPPER (cpi_hwcb->cpi_evbuf.sysplex_name, CPI_LENGTH_SYSPLEX_NAME); - } - cpi_request.block = cpi_hwcb; - cpi_request.word = HWC_CMDW_WRITEDATA; - cpi_request.callback = cpi_callback; - - for (retries = CPI_RETRIES; retries; retries--) { - retval = hwc_send (&cpi_request); - if (retval) { - - set_current_state (TASK_INTERRUPTIBLE); - schedule_timeout (CPI_SLEEP_TICKS); - } else { - - down (&sem); - - switch (cpi_hwcb->response_code) { - case 0x0020: - printk ("cpi: succeeded\n"); - break; - default: - printk ("cpi: failed with response code 0x%x\n", - cpi_hwcb->response_code); - } - goto free; - } - } - - printk ("cpi: failed (%i)\n", retval); - - free: - kfree (cpi_hwcb); - - out: - return retval; -} - -static void __exit -cpi_module_exit (void) -{ - printk ("cpi: exit\n"); -} - -void -cpi_callback (hwc_request_t * req) -{ - up (&sem); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/hwc.h linux.21rc5-ac2/drivers/s390/char/hwc.h --- linux.21rc5/drivers/s390/char/hwc.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/hwc.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,275 +0,0 @@ -/* - * drivers/s390/char/hwc.h - * - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - * - * - * - */ - -#ifndef __HWC_H__ -#define __HWC_H__ - -#define HWC_EXT_INT_PARAM_ADDR 0xFFFFFFF8 -#define HWC_EXT_INT_PARAM_PEND 0x00000001 - -#define ET_OpCmd 0x01 -#define ET_Msg 0x02 -#define ET_StateChange 0x08 -#define ET_PMsgCmd 0x09 -#define ET_CntlProgOpCmd 0x20 -#define ET_CntlProgIdent 0x0B -#define ET_SigQuiesce 0x1D - -#define ET_OpCmd_Mask 0x80000000 -#define ET_Msg_Mask 0x40000000 -#define ET_StateChange_Mask 0x01000000 -#define ET_PMsgCmd_Mask 0x00800000 -#define ET_CtlProgOpCmd_Mask 0x00000001 -#define ET_CtlProgIdent_Mask 0x00200000 -#define ET_SigQuiesce_Mask 0x00000008 - -#define GMF_DOM 0x8000 -#define GMF_SndAlrm 0x4000 -#define GMF_HoldMsg 0x2000 - -#define LTF_CntlText 0x8000 -#define LTF_LabelText 0x4000 -#define LTF_DataText 0x2000 -#define LTF_EndText 0x1000 -#define LTF_PromptText 0x0800 - -#define HWC_COMMAND_INITIATED 0 -#define HWC_BUSY 2 -#define HWC_NOT_OPERATIONAL 3 - -#define hwc_cmdw_t u32; - -#define HWC_CMDW_READDATA 0x00770005 - -#define HWC_CMDW_WRITEDATA 0x00760005 - -#define HWC_CMDW_WRITEMASK 0x00780005 - -#define GDS_ID_MDSMU 0x1310 - -#define GDS_ID_MDSRouteInfo 0x1311 - -#define GDS_ID_AgUnWrkCorr 0x1549 - -#define GDS_ID_SNACondReport 0x1532 - -#define GDS_ID_CPMSU 0x1212 - -#define GDS_ID_RoutTargInstr 0x154D - -#define GDS_ID_OpReq 0x8070 - -#define GDS_ID_TextCmd 0x1320 - -#define GDS_KEY_SelfDefTextMsg 0x31 - -#define _HWCB_HEADER u16 length; \ - u8 function_code; \ - u8 control_mask[3]; \ - u16 response_code; - -#define _EBUF_HEADER u16 length; \ - u8 type; \ - u8 flags; \ - u16 _reserved; - -typedef struct { - _EBUF_HEADER -} __attribute__ ((packed)) - -evbuf_t; - -#define _MDB_HEADER u16 length; \ - u16 type; \ - u32 tag; \ - u32 revision_code; - -#define _GO_HEADER u16 length; \ - u16 type; \ - u32 domid; \ - u8 hhmmss_time[8]; \ - u8 th_time[3]; \ - u8 _reserved_0; \ - u8 dddyyyy_date[7]; \ - u8 _reserved_1; \ - u16 general_msg_flags; \ - u8 _reserved_2[10]; \ - u8 originating_system_name[8]; \ - u8 job_guest_name[8]; - -#define _MTO_HEADER u16 length; \ - u16 type; \ - u16 line_type_flags; \ - u8 alarm_control; \ - u8 _reserved[3]; - -typedef struct { - _GO_HEADER -} __attribute__ ((packed)) - -go_t; - -typedef struct { - go_t go; -} __attribute__ ((packed)) - -mdb_body_t; - -typedef struct { - _MDB_HEADER - mdb_body_t mdb_body; -} __attribute__ ((packed)) - -mdb_t; - -typedef struct { - _EBUF_HEADER - mdb_t mdb; -} __attribute__ ((packed)) - -msgbuf_t; - -typedef struct { - _HWCB_HEADER - msgbuf_t msgbuf; -} __attribute__ ((packed)) - -write_hwcb_t; - -typedef struct { - _MTO_HEADER -} __attribute__ ((packed)) - -mto_t; - -static write_hwcb_t write_hwcb_template = -{ - sizeof (write_hwcb_t), - 0x00, - { - 0x00, - 0x00, - 0x00 - }, - 0x0000, - { - sizeof (msgbuf_t), - ET_Msg, - 0x00, - 0x0000, - { - sizeof (mdb_t), - 0x0001, - 0xD4C4C240, - 0x00000001, - { - { - sizeof (go_t), - 0x0001 - - } - } - } - } -}; - -static mto_t mto_template = -{ - sizeof (mto_t), - 0x0004, - LTF_EndText, - 0x00 -}; - -typedef u32 _hwcb_mask_t; - -typedef struct { - _HWCB_HEADER - u16 _reserved; - u16 mask_length; - _hwcb_mask_t cp_receive_mask; - _hwcb_mask_t cp_send_mask; - _hwcb_mask_t hwc_receive_mask; - _hwcb_mask_t hwc_send_mask; -} __attribute__ ((packed)) - -init_hwcb_t; - -static init_hwcb_t init_hwcb_template = -{ - sizeof (init_hwcb_t), - 0x00, - { - 0x00, - 0x00, - 0x00 - }, - 0x0000, - 0x0000, - sizeof (_hwcb_mask_t), - ET_OpCmd_Mask | ET_PMsgCmd_Mask | - ET_StateChange_Mask | ET_SigQuiesce_Mask, - ET_Msg_Mask | ET_PMsgCmd_Mask | ET_CtlProgIdent_Mask -}; - -typedef struct { - _EBUF_HEADER - u8 validity_hwc_active_facility_mask:1; - u8 validity_hwc_receive_mask:1; - u8 validity_hwc_send_mask:1; - u8 validity_read_data_function_mask:1; - u16 _zeros:12; - u16 mask_length; - u64 hwc_active_facility_mask; - _hwcb_mask_t hwc_receive_mask; - _hwcb_mask_t hwc_send_mask; - u32 read_data_function_mask; -} __attribute__ ((packed)) - -statechangebuf_t; - -#define _GDS_VECTOR_HEADER u16 length; \ - u16 gds_id; - -#define _GDS_SUBVECTOR_HEADER u8 length; \ - u8 key; - -typedef struct { - _GDS_VECTOR_HEADER -} __attribute__ ((packed)) - -gds_vector_t; - -typedef struct { - _GDS_SUBVECTOR_HEADER -} __attribute__ ((packed)) - -gds_subvector_t; - -typedef struct { - _HWCB_HEADER -} __attribute__ ((packed)) - -read_hwcb_t; - -static read_hwcb_t read_hwcb_template = -{ - PAGE_SIZE, - 0x00, - { - 0x00, - 0x00, - 0x80 - } -}; - -#endif /* __HWC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/hwc_rw.c linux.21rc5-ac2/drivers/s390/char/hwc_rw.c --- linux.21rc5/drivers/s390/char/hwc_rw.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/hwc_rw.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,2458 +0,0 @@ -/* - * drivers/s390/char/hwc_rw.c - * driver: reading from and writing to system console on S/390 via HWC - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - * - * - * - * - * - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef MIN -#define MIN(a,b) (((amto_char_sum) - -#define _HWCB_MTO(hwcb) (_LIST(hwcb)->mto_number) - -#define _HWCB_CHAR_LOST(hwcb) (_LIST(hwcb)->mto_char_sum_lost) - -#define _HWCB_MTO_LOST(hwcb) (_LIST(hwcb)->mto_number_lost) - -#define _HWCB_TIMES_LOST(hwcb) (_LIST(hwcb)->times_lost) - -#define _HWCB_NEXT(hwcb) (_LIST(hwcb)->next) - -#define BUF_HWCB_CHAR _HWCB_CHAR(BUF_HWCB) - -#define BUF_HWCB_MTO _HWCB_MTO(BUF_HWCB) - -#define BUF_HWCB_NEXT _HWCB_NEXT(BUF_HWCB) - -#define OUT_HWCB_CHAR _HWCB_CHAR(OUT_HWCB) - -#define OUT_HWCB_MTO _HWCB_MTO(OUT_HWCB) - -#define OUT_HWCB_NEXT _HWCB_NEXT(OUT_HWCB) - -#define BUF_HWCB_CHAR_LOST _HWCB_CHAR_LOST(BUF_HWCB) - -#define BUF_HWCB_MTO_LOST _HWCB_MTO_LOST(BUF_HWCB) - -#define OUT_HWCB_CHAR_LOST _HWCB_CHAR_LOST(OUT_HWCB) - -#define OUT_HWCB_MTO_LOST _HWCB_MTO_LOST(OUT_HWCB) - -#define BUF_HWCB_TIMES_LOST _HWCB_TIMES_LOST(BUF_HWCB) - -#include "hwc.h" - -#define __HWC_RW_C__ -#include "hwc_rw.h" -#undef __HWC_RW_C__ - -static unsigned char _obuf[MAX_HWCB_ROOM]; - -static unsigned char - _page[PAGE_SIZE] __attribute__ ((aligned (PAGE_SIZE))); - -typedef unsigned long kmem_pages_t; - -#define MAX_KMEM_PAGES (sizeof(kmem_pages_t) << 3) - -#define HWC_WTIMER_RUNS 1 -#define HWC_FLUSH 2 -#define HWC_INIT 4 -#define HWC_BROKEN 8 -#define HWC_INTERRUPT 16 -#define HWC_PTIMER_RUNS 32 - -static struct { - - hwc_ioctls_t ioctls; - - hwc_ioctls_t init_ioctls; - - unsigned char *hwcb_list_head; - - unsigned char *hwcb_list_tail; - - unsigned short int mto_number; - - unsigned int mto_char_sum; - - unsigned char hwcb_count; - - unsigned long kmem_start; - - unsigned long kmem_end; - - kmem_pages_t kmem_pages; - - unsigned char *obuf; - - unsigned short int obuf_cursor; - - unsigned short int obuf_count; - - unsigned short int obuf_start; - - unsigned char *page; - - u32 current_servc; - - unsigned char *current_hwcb; - - unsigned char write_nonprio:1; - unsigned char write_prio:1; - unsigned char read_nonprio:1; - unsigned char read_prio:1; - unsigned char read_statechange:1; - unsigned char sig_quiesce:1; - - unsigned char flags; - - hwc_high_level_calls_t *calls; - - hwc_request_t *request; - - spinlock_t lock; - - struct timer_list write_timer; - - struct timer_list poll_timer; -} hwc_data = -{ - { - }, - { - 8, - 0, - 80, - 1, - MAX_KMEM_PAGES, - MAX_KMEM_PAGES, - - 0, - - 0x6c - - }, - NULL, - NULL, - 0, - 0, - 0, - 0, - 0, - 0, - _obuf, - 0, - 0, - 0, - _page, - 0, - NULL, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - NULL, - NULL - -}; - -static unsigned long cr0 __attribute__ ((aligned (8))); -static unsigned long cr0_save __attribute__ ((aligned (8))); -static unsigned char psw_mask __attribute__ ((aligned (8))); - -static ext_int_info_t ext_int_info_hwc; - -#define DELAYED_WRITE 0 -#define IMMEDIATE_WRITE 1 - -static signed int do_hwc_write (int from_user, unsigned char *, - unsigned int, - unsigned char); - -unsigned char hwc_ip_buf[512]; - -static asmlinkage int -internal_print (char write_time, char *fmt,...) -{ - va_list args; - int i; - - va_start (args, fmt); - i = vsprintf (hwc_ip_buf, fmt, args); - va_end (args); - return do_hwc_write (0, hwc_ip_buf, i, write_time); -} - -int -hwc_printk (const char *fmt,...) -{ - va_list args; - int i; - unsigned long flags; - int retval; - - spin_lock_irqsave (&hwc_data.lock, flags); - - i = vsprintf (hwc_ip_buf, fmt, args); - va_end (args); - retval = do_hwc_write (0, hwc_ip_buf, i, IMMEDIATE_WRITE); - - spin_unlock_irqrestore (&hwc_data.lock, flags); - - return retval; -} - -#ifdef DUMP_HWCB_INPUT - -static void -dump_storage_area (unsigned char *area, unsigned short int count) -{ - unsigned short int index; - ioctl_nl_t old_final_nl; - - if (!area || !count) - return; - - old_final_nl = hwc_data.ioctls.final_nl; - hwc_data.ioctls.final_nl = 1; - - internal_print (DELAYED_WRITE, "\n%8x ", area); - - for (index = 0; index < count; index++) { - - if (area[index] <= 0xF) - internal_print (DELAYED_WRITE, "0%x", area[index]); - else - internal_print (DELAYED_WRITE, "%x", area[index]); - - if ((index & 0xF) == 0xF) - internal_print (DELAYED_WRITE, "\n%8x ", - &area[index + 1]); - else if ((index & 3) == 3) - internal_print (DELAYED_WRITE, " "); - } - - internal_print (IMMEDIATE_WRITE, "\n"); - - hwc_data.ioctls.final_nl = old_final_nl; -} -#endif - -static inline u32 -service_call ( - u32 hwc_command_word, - unsigned char hwcb[]) -{ - unsigned int condition_code = 1; - - __asm__ __volatile__ ("L 1, 0(%0) \n\t" - "LRA 2, 0(%1) \n\t" - ".long 0xB2200012 \n\t" - : - :"a" (&hwc_command_word), "a" (hwcb) - :"1", "2", "memory"); - - __asm__ __volatile__ ("IPM %0 \n\t" - "SRL %0, 28 \n\t" - :"=r" (condition_code)); - - return condition_code; -} - -static inline unsigned long -hwc_ext_int_param (void) -{ - u32 param; - - __asm__ __volatile__ ("L %0,128\n\t" - :"=r" (param)); - - return (unsigned long) param; -} - -static int -prepare_write_hwcb (void) -{ - write_hwcb_t *hwcb; - - if (!BUF_HWCB) - return -ENOMEM; - - BUF_HWCB_MTO = 0; - BUF_HWCB_CHAR = 0; - - hwcb = (write_hwcb_t *) BUF_HWCB; - - memcpy (hwcb, &write_hwcb_template, sizeof (write_hwcb_t)); - - return 0; -} - -static int -sane_write_hwcb (void) -{ - unsigned short int lost_msg; - unsigned int lost_char; - unsigned char lost_hwcb; - unsigned char *bad_addr; - unsigned long page; - int page_nr; - - if (!OUT_HWCB) - return -ENOMEM; - - if ((unsigned long) OUT_HWCB & 0xFFF) { - - bad_addr = OUT_HWCB; - -#ifdef DUMP_HWC_WRITE_LIST_ERROR - __asm__ ("LHI 1,0xe30\n\t" - "LRA 2,0(%0) \n\t" - "J .+0 \n\t" - : - : "a" (bad_addr) - : "1", "2"); -#endif - - hwc_data.kmem_pages = 0; - if ((unsigned long) BUF_HWCB & 0xFFF) { - - lost_hwcb = hwc_data.hwcb_count; - lost_msg = ALL_HWCB_MTO; - lost_char = ALL_HWCB_CHAR; - - OUT_HWCB = NULL; - BUF_HWCB = NULL; - ALL_HWCB_MTO = 0; - ALL_HWCB_CHAR = 0; - hwc_data.hwcb_count = 0; - } else { - - lost_hwcb = hwc_data.hwcb_count - 1; - lost_msg = ALL_HWCB_MTO - BUF_HWCB_MTO; - lost_char = ALL_HWCB_CHAR - BUF_HWCB_CHAR; - OUT_HWCB = BUF_HWCB; - ALL_HWCB_MTO = BUF_HWCB_MTO; - ALL_HWCB_CHAR = BUF_HWCB_CHAR; - hwc_data.hwcb_count = 1; - page = (unsigned long) BUF_HWCB; - - if (page >= hwc_data.kmem_start && - page <= hwc_data.kmem_end) { - - page_nr = (int) - ((page - hwc_data.kmem_start) >> 12); - set_bit (page_nr, &hwc_data.kmem_pages); - } - } - - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "found invalid HWCB at address 0x%lx. List corrupted. " - "Lost %i HWCBs with %i characters within up to %i " - "messages. Saved %i HWCB with last %i characters i" - "within up to %i messages.\n", - (unsigned long) bad_addr, - lost_hwcb, lost_char, lost_msg, - hwc_data.hwcb_count, - ALL_HWCB_CHAR, ALL_HWCB_MTO); - } - return 0; -} - -static int -reuse_write_hwcb (void) -{ - int retval; - - if (hwc_data.hwcb_count < 2) -#ifdef DUMP_HWC_WRITE_LIST_ERROR - __asm__ ("LHI 1,0xe31\n\t" - "LRA 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "J .+0 \n\t" - : - : "a" (BUF_HWCB), "a" (OUT_HWCB) - : "1", "2", "3"); -#else - return -EPERM; -#endif - - if (hwc_data.current_hwcb == OUT_HWCB) { - - if (hwc_data.hwcb_count > 2) { - - BUF_HWCB_NEXT = OUT_HWCB_NEXT; - - BUF_HWCB = OUT_HWCB_NEXT; - - OUT_HWCB_NEXT = BUF_HWCB_NEXT; - - BUF_HWCB_NEXT = NULL; - } - } else { - - BUF_HWCB_NEXT = OUT_HWCB; - - BUF_HWCB = OUT_HWCB; - - OUT_HWCB = OUT_HWCB_NEXT; - - BUF_HWCB_NEXT = NULL; - } - - BUF_HWCB_TIMES_LOST += 1; - BUF_HWCB_CHAR_LOST += BUF_HWCB_CHAR; - BUF_HWCB_MTO_LOST += BUF_HWCB_MTO; - ALL_HWCB_MTO -= BUF_HWCB_MTO; - ALL_HWCB_CHAR -= BUF_HWCB_CHAR; - - retval = prepare_write_hwcb (); - - if (hwc_data.hwcb_count == hwc_data.ioctls.max_hwcb) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "reached my own limit of " - "allowed buffer space for output (%i HWCBs = %li " - "bytes), skipped content of oldest HWCB %i time(s) " - "(%i lines = %i characters)\n", - hwc_data.ioctls.max_hwcb, - hwc_data.ioctls.max_hwcb * PAGE_SIZE, - BUF_HWCB_TIMES_LOST, - BUF_HWCB_MTO_LOST, - BUF_HWCB_CHAR_LOST); - else - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "page allocation failed, " - "could not expand buffer for output (currently in " - "use: %i HWCBs = %li bytes), skipped content of " - "oldest HWCB %i time(s) (%i lines = %i characters)\n", - hwc_data.hwcb_count, - hwc_data.hwcb_count * PAGE_SIZE, - BUF_HWCB_TIMES_LOST, - BUF_HWCB_MTO_LOST, - BUF_HWCB_CHAR_LOST); - - return retval; -} - -static int -allocate_write_hwcb (void) -{ - unsigned char *page; - int page_nr; - - if (hwc_data.hwcb_count == hwc_data.ioctls.max_hwcb) - return -ENOMEM; - - page_nr = find_first_zero_bit (&hwc_data.kmem_pages, MAX_KMEM_PAGES); - if (page_nr < hwc_data.ioctls.kmem_hwcb) { - - page = (unsigned char *) - (hwc_data.kmem_start + (page_nr << 12)); - set_bit (page_nr, &hwc_data.kmem_pages); - } else - page = (unsigned char *) __get_free_page (GFP_ATOMIC | GFP_DMA); - - if (!page) - return -ENOMEM; - - if (!OUT_HWCB) - OUT_HWCB = page; - else - BUF_HWCB_NEXT = page; - - BUF_HWCB = page; - - BUF_HWCB_NEXT = NULL; - - hwc_data.hwcb_count++; - - prepare_write_hwcb (); - - BUF_HWCB_TIMES_LOST = 0; - BUF_HWCB_MTO_LOST = 0; - BUF_HWCB_CHAR_LOST = 0; - -#ifdef BUFFER_STRESS_TEST - - internal_print ( - DELAYED_WRITE, - "*** " HWC_RW_PRINT_HEADER - "page #%i at 0x%x for buffering allocated. ***\n", - hwc_data.hwcb_count, page); - -#endif - - return 0; -} - -static int -release_write_hwcb (void) -{ - unsigned long page; - int page_nr; - - if (!hwc_data.hwcb_count) - return -ENODATA; - - if (hwc_data.hwcb_count == 1) { - - prepare_write_hwcb (); - - ALL_HWCB_CHAR = 0; - ALL_HWCB_MTO = 0; - BUF_HWCB_TIMES_LOST = 0; - BUF_HWCB_MTO_LOST = 0; - BUF_HWCB_CHAR_LOST = 0; - } else { - page = (unsigned long) OUT_HWCB; - - ALL_HWCB_MTO -= OUT_HWCB_MTO; - ALL_HWCB_CHAR -= OUT_HWCB_CHAR; - hwc_data.hwcb_count--; - - OUT_HWCB = OUT_HWCB_NEXT; - - if (page >= hwc_data.kmem_start && - page <= hwc_data.kmem_end) { - /*memset((void *) page, 0, PAGE_SIZE); */ - - page_nr = (int) ((page - hwc_data.kmem_start) >> 12); - clear_bit (page_nr, &hwc_data.kmem_pages); - } else - free_page (page); -#ifdef BUFFER_STRESS_TEST - - internal_print ( - DELAYED_WRITE, - "*** " HWC_RW_PRINT_HEADER - "page at 0x%x released, %i pages still in use ***\n", - page, hwc_data.hwcb_count); - -#endif - } - return 0; -} - -static int -add_mto ( - unsigned char *message, - unsigned short int count) -{ - unsigned short int mto_size; - write_hwcb_t *hwcb; - mto_t *mto; - void *dest; - - if (!BUF_HWCB) - return -ENOMEM; - - if (BUF_HWCB == hwc_data.current_hwcb) - return -ENOMEM; - - mto_size = sizeof (mto_t) + count; - - hwcb = (write_hwcb_t *) BUF_HWCB; - - if ((MAX_HWCB_ROOM - hwcb->length) < mto_size) - return -ENOMEM; - - mto = (mto_t *) (((unsigned long) hwcb) + hwcb->length); - - memcpy (mto, &mto_template, sizeof (mto_t)); - - dest = (void *) (((unsigned long) mto) + sizeof (mto_t)); - - memcpy (dest, message, count); - - mto->length += count; - - hwcb->length += mto_size; - hwcb->msgbuf.length += mto_size; - hwcb->msgbuf.mdb.length += mto_size; - - BUF_HWCB_MTO++; - ALL_HWCB_MTO++; - BUF_HWCB_CHAR += count; - ALL_HWCB_CHAR += count; - - return count; -} - -static int write_event_data_1 (void); - -static void -do_poll_hwc (unsigned long data) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_data.lock, flags); - - write_event_data_1 (); - - spin_unlock_irqrestore (&hwc_data.lock, flags); -} - -void -start_poll_hwc (void) -{ - init_timer (&hwc_data.poll_timer); - hwc_data.poll_timer.function = do_poll_hwc; - hwc_data.poll_timer.data = (unsigned long) NULL; - hwc_data.poll_timer.expires = jiffies + 2 * HZ; - add_timer (&hwc_data.poll_timer); - hwc_data.flags |= HWC_PTIMER_RUNS; -} - -static int -write_event_data_1 (void) -{ - unsigned short int condition_code; - int retval; - write_hwcb_t *hwcb = (write_hwcb_t *) OUT_HWCB; - - if ((!hwc_data.write_prio) && - (!hwc_data.write_nonprio) && - hwc_data.read_statechange) - return -EOPNOTSUPP; - - if (hwc_data.current_servc) - return -EBUSY; - - retval = sane_write_hwcb (); - if (retval < 0) - return -EIO; - - if (!OUT_HWCB_MTO) - return -ENODATA; - - if (!hwc_data.write_nonprio && hwc_data.write_prio) - hwcb->msgbuf.type = ET_PMsgCmd; - else - hwcb->msgbuf.type = ET_Msg; - - condition_code = service_call (HWC_CMDW_WRITEDATA, OUT_HWCB); - -#ifdef DUMP_HWC_WRITE_ERROR - if (condition_code != HWC_COMMAND_INITIATED) - __asm__ ("LHI 1,0xe20\n\t" - "L 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "J .+0 \n\t" - : - : "a" (&condition_code), "a" (OUT_HWCB) - : "1", "2", "3"); -#endif - - switch (condition_code) { - case HWC_COMMAND_INITIATED: - hwc_data.current_servc = HWC_CMDW_WRITEDATA; - hwc_data.current_hwcb = OUT_HWCB; - retval = condition_code; - break; - case HWC_BUSY: - retval = -EBUSY; - break; - case HWC_NOT_OPERATIONAL: - start_poll_hwc (); - default: - retval = -EIO; - } - - return retval; -} - -static void -flush_hwcbs (void) -{ - while (hwc_data.hwcb_count > 1) - release_write_hwcb (); - - release_write_hwcb (); - - hwc_data.flags &= ~HWC_FLUSH; -} - -static int -write_event_data_2 (u32 ext_int_param) -{ - write_hwcb_t *hwcb; - int retval = 0; - -#ifdef DUMP_HWC_WRITE_ERROR - if ((ext_int_param & HWC_EXT_INT_PARAM_ADDR) - != (unsigned long) hwc_data.current_hwcb) { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "write_event_data_2 : " - "HWCB address does not fit " - "(expected: 0x%lx, got: 0x%lx).\n", - (unsigned long) hwc_data.current_hwcb, - ext_int_param); - return -EINVAL; - } -#endif - - hwcb = (write_hwcb_t *) OUT_HWCB; - -#ifdef DUMP_HWC_WRITE_LIST_ERROR - if (((unsigned char *) hwcb) != hwc_data.current_hwcb) { - __asm__ ("LHI 1,0xe22\n\t" - "LRA 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "LRA 4,0(%2)\n\t" - "LRA 5,0(%3)\n\t" - "J .+0 \n\t" - : - : "a" (OUT_HWCB), - "a" (hwc_data.current_hwcb), - "a" (BUF_HWCB), - "a" (hwcb) - : "1", "2", "3", "4", "5"); - } -#endif - -#ifdef DUMP_HWC_WRITE_ERROR - if (hwcb->response_code != 0x0020) { - __asm__ ("LHI 1,0xe21\n\t" - "LRA 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "LRA 4,0(%2)\n\t" - "LH 5,0(%3)\n\t" - "SRL 5,8\n\t" - "J .+0 \n\t" - : - : "a" (OUT_HWCB), "a" (hwc_data.current_hwcb), - "a" (BUF_HWCB), - "a" (&(hwc_data.hwcb_count)) - : "1", "2", "3", "4", "5"); - } -#endif - - switch (hwcb->response_code) { - case 0x0020: - - retval = OUT_HWCB_CHAR; - release_write_hwcb (); - break; - case 0x0040: - case 0x0340: - case 0x40F0: - if (!hwc_data.read_statechange) { - hwcb->response_code = 0; - start_poll_hwc (); - } - retval = -EIO; - break; - default: - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "write_event_data_2 : " - "failed operation " - "(response code: 0x%x " - "HWCB address: 0x%x).\n", - hwcb->response_code, - hwcb); - retval = -EIO; - } - - if (retval == -EIO) { - - hwcb->control_mask[0] = 0; - hwcb->control_mask[1] = 0; - hwcb->control_mask[2] = 0; - hwcb->response_code = 0; - } - hwc_data.current_servc = 0; - hwc_data.current_hwcb = NULL; - - if (hwc_data.flags & HWC_FLUSH) - flush_hwcbs (); - - return retval; -} - -static void -do_put_line ( - unsigned char *message, - unsigned short count) -{ - - if (add_mto (message, count) != count) { - - if (allocate_write_hwcb () < 0) - reuse_write_hwcb (); - -#ifdef DUMP_HWC_WRITE_LIST_ERROR - if (add_mto (message, count) != count) - __asm__ ("LHI 1,0xe32\n\t" - "LRA 2,0(%0)\n\t" - "L 3,0(%1)\n\t" - "LRA 4,0(%2)\n\t" - "LRA 5,0(%3)\n\t" - "J .+0 \n\t" - : - : "a" (message), "a" (&hwc_data.kmem_pages), - "a" (BUF_HWCB), "a" (OUT_HWCB) - : "1", "2", "3", "4", "5"); -#else - add_mto (message, count); -#endif - } -} - -static void -put_line ( - unsigned char *message, - unsigned short count) -{ - - if ((!hwc_data.obuf_start) && (hwc_data.flags & HWC_WTIMER_RUNS)) { - del_timer (&hwc_data.write_timer); - hwc_data.flags &= ~HWC_WTIMER_RUNS; - } - hwc_data.obuf_start += count; - - do_put_line (message, count); - - hwc_data.obuf_start -= count; -} - -static void -set_alarm (void) -{ - write_hwcb_t *hwcb; - - if ((!BUF_HWCB) || (BUF_HWCB == hwc_data.current_hwcb)) - allocate_write_hwcb (); - - hwcb = (write_hwcb_t *) BUF_HWCB; - hwcb->msgbuf.mdb.mdb_body.go.general_msg_flags |= GMF_SndAlrm; -} - -static void -hwc_write_timeout (unsigned long data) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_data.lock, flags); - - hwc_data.obuf_start = hwc_data.obuf_count; - if (hwc_data.obuf_count) - put_line (hwc_data.obuf, hwc_data.obuf_count); - hwc_data.obuf_start = 0; - - hwc_data.obuf_cursor = 0; - hwc_data.obuf_count = 0; - - write_event_data_1 (); - - spin_unlock_irqrestore (&hwc_data.lock, flags); -} - -static int -do_hwc_write ( - int from_user, - unsigned char *msg, - unsigned int count, - unsigned char write_time) -{ - unsigned int i_msg = 0; - unsigned short int spaces = 0; - unsigned int processed_characters = 0; - unsigned char ch; - unsigned short int obuf_count; - unsigned short int obuf_cursor; - unsigned short int obuf_columns; - - if (hwc_data.obuf_start) { - obuf_cursor = 0; - obuf_count = 0; - obuf_columns = MIN (hwc_data.ioctls.columns, - MAX_MESSAGE_SIZE - hwc_data.obuf_start); - } else { - obuf_cursor = hwc_data.obuf_cursor; - obuf_count = hwc_data.obuf_count; - obuf_columns = hwc_data.ioctls.columns; - } - - for (i_msg = 0; i_msg < count; i_msg++) { - if (from_user) - get_user (ch, msg + i_msg); - else - ch = msg[i_msg]; - - processed_characters++; - - if ((obuf_cursor == obuf_columns) && - - (ch != '\n') && - - (ch != '\t')) { - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_columns); - obuf_cursor = 0; - obuf_count = 0; - } - switch (ch) { - - case '\n': - - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_count); - obuf_cursor = 0; - obuf_count = 0; - break; - - case '\a': - - hwc_data.obuf_start += obuf_count; - set_alarm (); - hwc_data.obuf_start -= obuf_count; - - break; - - case '\t': - - do { - if (obuf_cursor < obuf_columns) { - hwc_data.obuf[hwc_data.obuf_start + - obuf_cursor] - = HWC_ASCEBC (' '); - obuf_cursor++; - } else - break; - } while (obuf_cursor % hwc_data.ioctls.width_htab); - - break; - - case '\f': - case '\v': - - spaces = obuf_cursor; - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_count); - obuf_count = obuf_cursor; - while (spaces) { - hwc_data.obuf[hwc_data.obuf_start + - obuf_cursor - spaces] - = HWC_ASCEBC (' '); - spaces--; - } - - break; - - case '\b': - - if (obuf_cursor) - obuf_cursor--; - break; - - case '\r': - - obuf_cursor = 0; - break; - - case 0x00: - - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_count); - obuf_cursor = 0; - obuf_count = 0; - goto out; - - default: - - if (isprint (ch)) - hwc_data.obuf[hwc_data.obuf_start + - obuf_cursor++] - = HWC_ASCEBC (ch); - } - if (obuf_cursor > obuf_count) - obuf_count = obuf_cursor; - } - - if (obuf_cursor) { - - if (hwc_data.obuf_start || - (hwc_data.ioctls.final_nl == 0)) { - - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_count); - obuf_cursor = 0; - obuf_count = 0; - } else { - - if (hwc_data.ioctls.final_nl > 0) { - - if (hwc_data.flags & HWC_WTIMER_RUNS) { - - mod_timer (&hwc_data.write_timer, - jiffies + hwc_data.ioctls.final_nl * HZ / 10); - } else { - - init_timer (&hwc_data.write_timer); - hwc_data.write_timer.function = - hwc_write_timeout; - hwc_data.write_timer.data = - (unsigned long) NULL; - hwc_data.write_timer.expires = - jiffies + - hwc_data.ioctls.final_nl * HZ / 10; - add_timer (&hwc_data.write_timer); - hwc_data.flags |= HWC_WTIMER_RUNS; - } - } else; - - } - } else; - - out: - - if (!hwc_data.obuf_start) { - hwc_data.obuf_cursor = obuf_cursor; - hwc_data.obuf_count = obuf_count; - } - if (write_time == IMMEDIATE_WRITE) - write_event_data_1 (); - - return processed_characters; -} - -signed int -hwc_write (int from_user, const unsigned char *msg, unsigned int count) -{ - unsigned long flags; - int retval; - - spin_lock_irqsave (&hwc_data.lock, flags); - - retval = do_hwc_write (from_user, (unsigned char *) msg, - count, IMMEDIATE_WRITE); - - spin_unlock_irqrestore (&hwc_data.lock, flags); - - return retval; -} - -unsigned int -hwc_chars_in_buffer (unsigned char flag) -{ - unsigned short int number = 0; - unsigned long flags; - - spin_lock_irqsave (&hwc_data.lock, flags); - - if (flag & IN_HWCB) - number += ALL_HWCB_CHAR; - - if (flag & IN_WRITE_BUF) - number += hwc_data.obuf_cursor; - - spin_unlock_irqrestore (&hwc_data.lock, flags); - - return number; -} - -static inline int -nr_setbits (kmem_pages_t arg) -{ - int i; - int nr = 0; - - for (i = 0; i < (sizeof (arg) << 3); i++) { - if (arg & 1) - nr++; - arg >>= 1; - } - - return nr; -} - -unsigned int -hwc_write_room (unsigned char flag) -{ - unsigned int number = 0; - unsigned long flags; - write_hwcb_t *hwcb; - - spin_lock_irqsave (&hwc_data.lock, flags); - - if (flag & IN_HWCB) { - - if (BUF_HWCB) { - hwcb = (write_hwcb_t *) BUF_HWCB; - number += MAX_HWCB_ROOM - hwcb->length; - } - number += (hwc_data.ioctls.kmem_hwcb - - nr_setbits (hwc_data.kmem_pages)) * - (MAX_HWCB_ROOM - - (sizeof (write_hwcb_t) + sizeof (mto_t))); - } - if (flag & IN_WRITE_BUF) - number += MAX_HWCB_ROOM - hwc_data.obuf_cursor; - - spin_unlock_irqrestore (&hwc_data.lock, flags); - - return number; -} - -void -hwc_flush_buffer (unsigned char flag) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_data.lock, flags); - - if (flag & IN_HWCB) { - if (hwc_data.current_servc != HWC_CMDW_WRITEDATA) - flush_hwcbs (); - else - hwc_data.flags |= HWC_FLUSH; - } - if (flag & IN_WRITE_BUF) { - hwc_data.obuf_cursor = 0; - hwc_data.obuf_count = 0; - } - spin_unlock_irqrestore (&hwc_data.lock, flags); -} - -unsigned short int -seperate_cases (unsigned char *buf, unsigned short int count) -{ - - unsigned short int i_in; - - unsigned short int i_out = 0; - - unsigned char _case = 0; - - for (i_in = 0; i_in < count; i_in++) { - - if (buf[i_in] == hwc_data.ioctls.delim) { - - if ((i_in + 1 < count) && - (buf[i_in + 1] == hwc_data.ioctls.delim)) { - - buf[i_out] = hwc_data.ioctls.delim; - - i_out++; - - i_in++; - - } else - _case = ~_case; - - } else { - - if (_case) { - - if (hwc_data.ioctls.tolower) - buf[i_out] = _ebc_toupper[buf[i_in]]; - - else - buf[i_out] = _ebc_tolower[buf[i_in]]; - - } else - buf[i_out] = buf[i_in]; - - i_out++; - } - } - - return i_out; -} - -#ifdef DUMP_HWCB_INPUT - -static int -gds_vector_name (u16 id, unsigned char name[]) -{ - int retval = 0; - - switch (id) { - case GDS_ID_MDSMU: - name = "Multiple Domain Support Message Unit"; - break; - case GDS_ID_MDSRouteInfo: - name = "MDS Routing Information"; - break; - case GDS_ID_AgUnWrkCorr: - name = "Agent Unit of Work Correlator"; - break; - case GDS_ID_SNACondReport: - name = "SNA Condition Report"; - break; - case GDS_ID_CPMSU: - name = "CP Management Services Unit"; - break; - case GDS_ID_RoutTargInstr: - name = "Routing and Targeting Instructions"; - break; - case GDS_ID_OpReq: - name = "Operate Request"; - break; - case GDS_ID_TextCmd: - name = "Text Command"; - break; - - default: - name = "unknown GDS variable"; - retval = -EINVAL; - } - - return retval; -} -#endif - -inline static gds_vector_t * -find_gds_vector ( - gds_vector_t * start, void *end, u16 id) -{ - gds_vector_t *vec; - gds_vector_t *retval = NULL; - - vec = start; - - while (((void *) vec) < end) { - if (vec->gds_id == id) { - -#ifdef DUMP_HWCB_INPUT - int retval_name; - unsigned char name[64]; - - retval_name = gds_vector_name (id, name); - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "%s at 0x%x up to 0x%x, length: %d", - name, - (unsigned long) vec, - ((unsigned long) vec) + vec->length - 1, - vec->length); - if (retval_name < 0) - internal_print ( - IMMEDIATE_WRITE, - ", id: 0x%x\n", - vec->gds_id); - else - internal_print ( - IMMEDIATE_WRITE, - "\n"); -#endif - - retval = vec; - break; - } - vec = (gds_vector_t *) (((unsigned long) vec) + vec->length); - } - - return retval; -} - -inline static gds_subvector_t * -find_gds_subvector ( - gds_subvector_t * start, void *end, u8 key) -{ - gds_subvector_t *subvec; - gds_subvector_t *retval = NULL; - - subvec = start; - - while (((void *) subvec) < end) { - if (subvec->key == key) { - retval = subvec; - break; - } - subvec = (gds_subvector_t *) - (((unsigned long) subvec) + subvec->length); - } - - return retval; -} - -inline static int -get_input (void *start, void *end) -{ - int count; - - count = ((unsigned long) end) - ((unsigned long) start); - - if (hwc_data.ioctls.tolower) - EBC_TOLOWER (start, count); - - if (hwc_data.ioctls.delim) - count = seperate_cases (start, count); - - HWC_EBCASC_STR (start, count); - - if (hwc_data.ioctls.echo) - do_hwc_write (0, start, count, IMMEDIATE_WRITE); - - if (hwc_data.calls != NULL) - if (hwc_data.calls->move_input != NULL) - (hwc_data.calls->move_input) (start, count); - - return count; -} - -inline static int -eval_selfdeftextmsg (gds_subvector_t * start, void *end) -{ - gds_subvector_t *subvec; - void *subvec_data; - void *subvec_end; - int retval = 0; - - subvec = start; - - while (((void *) subvec) < end) { - subvec = find_gds_subvector (subvec, end, 0x30); - if (!subvec) - break; - subvec_data = (void *) - (((unsigned long) subvec) + - sizeof (gds_subvector_t)); - subvec_end = (void *) - (((unsigned long) subvec) + subvec->length); - retval += get_input (subvec_data, subvec_end); - subvec = (gds_subvector_t *) subvec_end; - } - - return retval; -} - -inline static int -eval_textcmd (gds_subvector_t * start, void *end) -{ - gds_subvector_t *subvec; - gds_subvector_t *subvec_data; - void *subvec_end; - int retval = 0; - - subvec = start; - - while (((void *) subvec) < end) { - subvec = find_gds_subvector ( - subvec, end, GDS_KEY_SelfDefTextMsg); - if (!subvec) - break; - subvec_data = (gds_subvector_t *) - (((unsigned long) subvec) + - sizeof (gds_subvector_t)); - subvec_end = (void *) - (((unsigned long) subvec) + subvec->length); - retval += eval_selfdeftextmsg (subvec_data, subvec_end); - subvec = (gds_subvector_t *) subvec_end; - } - - return retval; -} - -inline static int -eval_cpmsu (gds_vector_t * start, void *end) -{ - gds_vector_t *vec; - gds_subvector_t *vec_data; - void *vec_end; - int retval = 0; - - vec = start; - - while (((void *) vec) < end) { - vec = find_gds_vector (vec, end, GDS_ID_TextCmd); - if (!vec) - break; - vec_data = (gds_subvector_t *) - (((unsigned long) vec) + sizeof (gds_vector_t)); - vec_end = (void *) (((unsigned long) vec) + vec->length); - retval += eval_textcmd (vec_data, vec_end); - vec = (gds_vector_t *) vec_end; - } - - return retval; -} - -inline static int -eval_mdsmu (gds_vector_t * start, void *end) -{ - gds_vector_t *vec; - gds_vector_t *vec_data; - void *vec_end; - int retval = 0; - - vec = find_gds_vector (start, end, GDS_ID_CPMSU); - if (vec) { - vec_data = (gds_vector_t *) - (((unsigned long) vec) + sizeof (gds_vector_t)); - vec_end = (void *) (((unsigned long) vec) + vec->length); - retval = eval_cpmsu (vec_data, vec_end); - } - return retval; -} - -static int -eval_evbuf (gds_vector_t * start, void *end) -{ - gds_vector_t *vec; - gds_vector_t *vec_data; - void *vec_end; - int retval = 0; - - vec = find_gds_vector (start, end, GDS_ID_MDSMU); - if (vec) { - vec_data = (gds_vector_t *) - (((unsigned long) vec) + sizeof (gds_vector_t)); - vec_end = (void *) (((unsigned long) vec) + vec->length); - retval = eval_mdsmu (vec_data, vec_end); - } - return retval; -} - -static inline int -eval_hwc_receive_mask (_hwcb_mask_t mask) -{ - - hwc_data.write_nonprio - = ((mask & ET_Msg_Mask) == ET_Msg_Mask); - - hwc_data.write_prio - = ((mask & ET_PMsgCmd_Mask) == ET_PMsgCmd_Mask); - - if (hwc_data.write_prio || hwc_data.write_nonprio) { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can write messages\n"); - return 0; - } else { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can not write messages\n"); - return -1; - } -} - -static inline int -eval_hwc_send_mask (_hwcb_mask_t mask) -{ - - hwc_data.read_statechange - = ((mask & ET_StateChange_Mask) == ET_StateChange_Mask); - if (hwc_data.read_statechange) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can read state change notifications\n"); - else - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can not read state change notifications\n"); - - hwc_data.sig_quiesce - = ((mask & ET_SigQuiesce_Mask) == ET_SigQuiesce_Mask); - if (hwc_data.sig_quiesce) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can receive signal quiesce\n"); - else - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can not receive signal quiesce\n"); - - hwc_data.read_nonprio - = ((mask & ET_OpCmd_Mask) == ET_OpCmd_Mask); - if (hwc_data.read_nonprio) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can read commands\n"); - - hwc_data.read_prio - = ((mask & ET_PMsgCmd_Mask) == ET_PMsgCmd_Mask); - if (hwc_data.read_prio) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can read priority commands\n"); - - if (hwc_data.read_prio || hwc_data.read_nonprio) { - return 0; - } else { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can not read commands from operator\n"); - return -1; - } -} - -static int -eval_statechangebuf (statechangebuf_t * scbuf) -{ - int retval = 0; - - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "HWC state change detected\n"); - - if (scbuf->validity_hwc_active_facility_mask) { - - } - if (scbuf->validity_hwc_receive_mask) { - - if (scbuf->mask_length != 4) { -#ifdef DUMP_HWC_INIT_ERROR - __asm__ ("LHI 1,0xe50\n\t" - "LRA 2,0(%0)\n\t" - "J .+0 \n\t" - : - : "a" (scbuf) - : "1", "2"); -#endif - } else { - - retval += eval_hwc_receive_mask - (scbuf->hwc_receive_mask); - } - } - if (scbuf->validity_hwc_send_mask) { - - if (scbuf->mask_length != 4) { -#ifdef DUMP_HWC_INIT_ERROR - __asm__ ("LHI 1,0xe51\n\t" - "LRA 2,0(%0)\n\t" - "J .+0 \n\t" - : - : "a" (scbuf) - : "1", "2"); -#endif - } else { - - retval += eval_hwc_send_mask - (scbuf->hwc_send_mask); - } - } - if (scbuf->validity_read_data_function_mask) { - - } - return retval; -} - -#ifdef CONFIG_SMP -extern unsigned long cpu_online_map; -static volatile unsigned long cpu_quiesce_map; - -static void -do_load_quiesce_psw (void) -{ - psw_t quiesce_psw; - - clear_bit (smp_processor_id (), &cpu_quiesce_map); - if (smp_processor_id () == 0) { - - while (cpu_quiesce_map != 0) ; - - quiesce_psw.mask = _DW_PSW_MASK; - quiesce_psw.addr = 0xfff; - __load_psw (quiesce_psw); - } - signal_processor (smp_processor_id (), sigp_stop); -} - -static void -do_machine_quiesce (void) -{ - cpu_quiesce_map = cpu_online_map; - smp_call_function (do_load_quiesce_psw, NULL, 0, 0); - do_load_quiesce_psw (); -} - -#else -static void -do_machine_quiesce (void) -{ - psw_t quiesce_psw; - - quiesce_psw.mask = _DW_PSW_MASK; - queisce_psw.addr = 0xfff; - __load_psw (quiesce_psw); -} - -#endif - -static int -process_evbufs (void *start, void *end) -{ - int retval = 0; - evbuf_t *evbuf; - void *evbuf_end; - gds_vector_t *evbuf_data; - - evbuf = (evbuf_t *) start; - while (((void *) evbuf) < end) { - evbuf_data = (gds_vector_t *) - (((unsigned long) evbuf) + sizeof (evbuf_t)); - evbuf_end = (void *) (((unsigned long) evbuf) + evbuf->length); - switch (evbuf->type) { - case ET_OpCmd: - case ET_CntlProgOpCmd: - case ET_PMsgCmd: -#ifdef DUMP_HWCB_INPUT - - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "event buffer " - "at 0x%x up to 0x%x, length: %d\n", - (unsigned long) evbuf, - (unsigned long) (evbuf_end - 1), - evbuf->length); - dump_storage_area ((void *) evbuf, evbuf->length); -#endif - retval += eval_evbuf (evbuf_data, evbuf_end); - break; - case ET_StateChange: - retval += eval_statechangebuf - ((statechangebuf_t *) evbuf); - break; - case ET_SigQuiesce: - - _machine_restart = do_machine_quiesce; - _machine_halt = do_machine_quiesce; - _machine_power_off = do_machine_quiesce; - ctrl_alt_del (); - break; - default: - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: " - "unknown event buffer found, " - "type 0x%x", - evbuf->type); - retval = -ENOSYS; - } - evbuf = (evbuf_t *) evbuf_end; - } - return retval; -} - -static int -unconditional_read_1 (void) -{ - unsigned short int condition_code; - read_hwcb_t *hwcb = (read_hwcb_t *) hwc_data.page; - int retval; - -#if 0 - - if ((!hwc_data.read_prio) && (!hwc_data.read_nonprio)) - return -EOPNOTSUPP; - - if (hwc_data.current_servc) - return -EBUSY; -#endif - - memset (hwcb, 0x00, PAGE_SIZE); - memcpy (hwcb, &read_hwcb_template, sizeof (read_hwcb_t)); - - condition_code = service_call (HWC_CMDW_READDATA, hwc_data.page); - -#ifdef DUMP_HWC_READ_ERROR - if (condition_code == HWC_NOT_OPERATIONAL) - __asm__ ("LHI 1,0xe40\n\t" - "L 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "J .+0 \n\t" - : - : "a" (&condition_code), "a" (hwc_data.page) - : "1", "2", "3"); -#endif - - switch (condition_code) { - case HWC_COMMAND_INITIATED: - hwc_data.current_servc = HWC_CMDW_READDATA; - hwc_data.current_hwcb = hwc_data.page; - retval = condition_code; - break; - case HWC_BUSY: - retval = -EBUSY; - break; - default: - retval = -EIO; - } - - return retval; -} - -static int -unconditional_read_2 (u32 ext_int_param) -{ - read_hwcb_t *hwcb = (read_hwcb_t *) hwc_data.page; - -#ifdef DUMP_HWC_READ_ERROR - if ((hwcb->response_code != 0x0020) && - (hwcb->response_code != 0x0220) && - (hwcb->response_code != 0x60F0) && - (hwcb->response_code != 0x62F0)) - __asm__ ("LHI 1,0xe41\n\t" - "LRA 2,0(%0)\n\t" - "L 3,0(%1)\n\t" - "J .+0\n\t" - : - : "a" (hwc_data.page), "a" (&(hwcb->response_code)) - : "1", "2", "3"); -#endif - - hwc_data.current_servc = 0; - hwc_data.current_hwcb = NULL; - - switch (hwcb->response_code) { - - case 0x0020: - case 0x0220: - return process_evbufs ( - (void *) (((unsigned long) hwcb) + sizeof (read_hwcb_t)), - (void *) (((unsigned long) hwcb) + hwcb->length)); - - case 0x60F0: - case 0x62F0: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: " - "got interrupt and tried to read input, " - "but nothing found (response code=0x%x).\n", - hwcb->response_code); - return 0; - - case 0x0100: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: HWCB boundary violation - this " - "must not occur in a correct driver, please contact " - "author\n"); - return -EIO; - - case 0x0300: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: " - "insufficient HWCB length - this must not occur in a " - "correct driver, please contact author\n"); - return -EIO; - - case 0x01F0: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: " - "invalid command - this must not occur in a correct " - "driver, please contact author\n"); - return -EIO; - - case 0x40F0: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: invalid function code\n"); - return -EIO; - - case 0x70F0: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: invalid selection mask\n"); - return -EIO; - - case 0x0040: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: HWC equipment check\n"); - return -EIO; - - default: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: invalid response code %x - this " - "must not occur in a correct driver, please contact " - "author\n", - hwcb->response_code); - return -EIO; - } -} - -static int -write_event_mask_1 (void) -{ - unsigned int condition_code; - int retval; - - condition_code = service_call (HWC_CMDW_WRITEMASK, hwc_data.page); - -#ifdef DUMP_HWC_INIT_ERROR - - if (condition_code == HWC_NOT_OPERATIONAL) - __asm__ ("LHI 1,0xe10\n\t" - "L 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "J .+0\n\t" - : - : "a" (&condition_code), "a" (hwc_data.page) - : "1", "2", "3"); -#endif - - switch (condition_code) { - case HWC_COMMAND_INITIATED: - hwc_data.current_servc = HWC_CMDW_WRITEMASK; - hwc_data.current_hwcb = hwc_data.page; - retval = condition_code; - break; - case HWC_BUSY: - retval = -EBUSY; - break; - default: - retval = -EIO; - } - - return retval; -} - -static int -write_event_mask_2 (u32 ext_int_param) -{ - init_hwcb_t *hwcb = (init_hwcb_t *) hwc_data.page; - int retval = 0; - - if (hwcb->response_code != 0x0020) { -#ifdef DUMP_HWC_INIT_ERROR - __asm__ ("LHI 1,0xe11\n\t" - "LRA 2,0(%0)\n\t" - "L 3,0(%1)\n\t" - "J .+0\n\t" - : - : "a" (hwcb), "a" (&(hwcb->response_code)) - : "1", "2", "3"); -#else - retval = -1; -#endif - } else { - if (hwcb->mask_length != 4) { -#ifdef DUMP_HWC_INIT_ERROR - __asm__ ("LHI 1,0xe52\n\t" - "LRA 2,0(%0)\n\t" - "J .+0 \n\t" - : - : "a" (hwcb) - : "1", "2"); -#endif - } else { - retval += eval_hwc_receive_mask - (hwcb->hwc_receive_mask); - retval += eval_hwc_send_mask (hwcb->hwc_send_mask); - } - } - - hwc_data.current_servc = 0; - hwc_data.current_hwcb = NULL; - - return retval; -} - -static int -set_hwc_ioctls (hwc_ioctls_t * ioctls, char correct) -{ - int retval = 0; - hwc_ioctls_t tmp; - - if (ioctls->width_htab > MAX_MESSAGE_SIZE) { - if (correct) - tmp.width_htab = MAX_MESSAGE_SIZE; - else - retval = -EINVAL; - } else - tmp.width_htab = ioctls->width_htab; - - tmp.echo = ioctls->echo; - - if (ioctls->columns > MAX_MESSAGE_SIZE) { - if (correct) - tmp.columns = MAX_MESSAGE_SIZE; - else - retval = -EINVAL; - } else - tmp.columns = ioctls->columns; - - tmp.final_nl = ioctls->final_nl; - - if (ioctls->max_hwcb < 2) { - if (correct) - tmp.max_hwcb = 2; - else - retval = -EINVAL; - } else - tmp.max_hwcb = ioctls->max_hwcb; - - tmp.tolower = ioctls->tolower; - - if (ioctls->kmem_hwcb > ioctls->max_hwcb) { - if (correct) - tmp.kmem_hwcb = ioctls->max_hwcb; - else - retval = -EINVAL; - } else - tmp.kmem_hwcb = ioctls->kmem_hwcb; - - if (ioctls->kmem_hwcb > MAX_KMEM_PAGES) { - if (correct) - ioctls->kmem_hwcb = MAX_KMEM_PAGES; - else - retval = -EINVAL; - } - if (ioctls->kmem_hwcb < 2) { - if (correct) - ioctls->kmem_hwcb = 2; - else - retval = -EINVAL; - } - tmp.delim = ioctls->delim; - - if (!(retval < 0)) - hwc_data.ioctls = tmp; - - return retval; -} - -int -do_hwc_init (void) -{ - int retval; - - memcpy (hwc_data.page, &init_hwcb_template, sizeof (init_hwcb_t)); - - do { - - retval = write_event_mask_1 (); - - if (retval == -EBUSY) { - - hwc_data.flags |= HWC_INIT; - - __ctl_store (cr0, 0, 0); - cr0_save = cr0; - cr0 |= 0x00000200; - cr0 &= 0xFFFFF3AC; - __ctl_load (cr0, 0, 0); - - asm volatile ("STOSM %0,0x01" - :"=m" (psw_mask)::"memory"); - - while (!(hwc_data.flags & HWC_INTERRUPT)) - barrier (); - - asm volatile ("STNSM %0,0xFE" - :"=m" (psw_mask)::"memory"); - - __ctl_load (cr0_save, 0, 0); - - hwc_data.flags &= ~HWC_INIT; - } - } while (retval == -EBUSY); - - if (retval == -EIO) { - hwc_data.flags |= HWC_BROKEN; - printk (HWC_RW_PRINT_HEADER "HWC not operational\n"); - } - return retval; -} - -void hwc_interrupt_handler (struct pt_regs *regs, __u16 code); - -int -hwc_init (void) -{ - int retval; - -#ifdef BUFFER_STRESS_TEST - - init_hwcb_t *hwcb; - int i; - -#endif - - if (register_early_external_interrupt (0x2401, hwc_interrupt_handler, - &ext_int_info_hwc) != 0) - panic ("Couldn't request external interrupts 0x2401"); - - spin_lock_init (&hwc_data.lock); - -#ifdef USE_VM_DETECTION - - if (MACHINE_IS_VM) { - - if (hwc_data.init_ioctls.columns > 76) - hwc_data.init_ioctls.columns = 76; - hwc_data.init_ioctls.tolower = 1; - if (!hwc_data.init_ioctls.delim) - hwc_data.init_ioctls.delim = DEFAULT_CASE_DELIMITER; - } else { - hwc_data.init_ioctls.tolower = 0; - hwc_data.init_ioctls.delim = 0; - } -#endif - retval = set_hwc_ioctls (&hwc_data.init_ioctls, 1); - - hwc_data.kmem_start = (unsigned long) - alloc_bootmem_low_pages (hwc_data.ioctls.kmem_hwcb * PAGE_SIZE); - hwc_data.kmem_end = hwc_data.kmem_start + - hwc_data.ioctls.kmem_hwcb * PAGE_SIZE - 1; - - retval = do_hwc_init (); - - ctl_set_bit (0, 9); - -#ifdef BUFFER_STRESS_TEST - - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "use %i bytes for buffering.\n", - hwc_data.ioctls.kmem_hwcb * PAGE_SIZE); - for (i = 0; i < 500; i++) { - hwcb = (init_hwcb_t *) BUF_HWCB; - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "This is stress test message #%i, free: %i bytes\n", - i, - MAX_HWCB_ROOM - (hwcb->length + sizeof (mto_t))); - } - -#endif - - return /*retval */ 0; -} - -signed int -hwc_register_calls (hwc_high_level_calls_t * calls) -{ - if (calls == NULL) - return -EINVAL; - - if (hwc_data.calls != NULL) - return -EBUSY; - - hwc_data.calls = calls; - return 0; -} - -signed int -hwc_unregister_calls (hwc_high_level_calls_t * calls) -{ - if (hwc_data.calls == NULL) - return -EINVAL; - - if (calls != hwc_data.calls) - return -EINVAL; - - hwc_data.calls = NULL; - return 0; -} - -int -hwc_send (hwc_request_t * req) -{ - unsigned long flags; - int retval; - int cc; - - spin_lock_irqsave (&hwc_data.lock, flags); - if (!req || !req->callback || !req->block) { - retval = -EINVAL; - goto unlock; - } - if (hwc_data.request) { - retval = -ENOTSUPP; - goto unlock; - } - cc = service_call (req->word, req->block); - switch (cc) { - case 0: - hwc_data.request = req; - hwc_data.current_servc = req->word; - hwc_data.current_hwcb = req->block; - retval = 0; - break; - case 2: - retval = -EBUSY; - break; - default: - retval = -ENOSYS; - - } - unlock: - spin_unlock_irqrestore (&hwc_data.lock, flags); - return retval; -} - -EXPORT_SYMBOL (hwc_send); - -void -do_hwc_callback (u32 ext_int_param) -{ - if (!hwc_data.request || !hwc_data.request->callback) - return; - if ((ext_int_param & HWC_EXT_INT_PARAM_ADDR) - != (unsigned long) hwc_data.request->block) - return; - hwc_data.request->callback (hwc_data.request); - hwc_data.request = NULL; - hwc_data.current_hwcb = NULL; - hwc_data.current_servc = 0; -} - -void -hwc_do_interrupt (u32 ext_int_param) -{ - u32 finished_hwcb = ext_int_param & HWC_EXT_INT_PARAM_ADDR; - u32 evbuf_pending = ext_int_param & HWC_EXT_INT_PARAM_PEND; - - if (hwc_data.flags & HWC_PTIMER_RUNS) { - del_timer (&hwc_data.poll_timer); - hwc_data.flags &= ~HWC_PTIMER_RUNS; - } - if (finished_hwcb) { - - if ((unsigned long) hwc_data.current_hwcb != finished_hwcb) { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "interrupt: mismatch: " - "ext. int param. (0x%x) vs. " - "current HWCB (0x%x)\n", - ext_int_param, - hwc_data.current_hwcb); - } else { - if (hwc_data.request) { - - do_hwc_callback (ext_int_param); - } else { - - switch (hwc_data.current_servc) { - - case HWC_CMDW_WRITEMASK: - - write_event_mask_2 (ext_int_param); - break; - - case HWC_CMDW_WRITEDATA: - - write_event_data_2 (ext_int_param); - break; - - case HWC_CMDW_READDATA: - - unconditional_read_2 (ext_int_param); - break; - default: - } - } - } - } else { - - if (hwc_data.current_hwcb) { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "interrupt: mismatch: " - "ext. int. param. (0x%x) vs. " - "current HWCB (0x%x)\n", - ext_int_param, - hwc_data.current_hwcb); - } - } - - if (evbuf_pending) { - - unconditional_read_1 (); - } else { - - write_event_data_1 (); - } - - if (!hwc_data.calls || !hwc_data.calls->wake_up) - return; - (hwc_data.calls->wake_up) (); -} - -void -hwc_interrupt_handler (struct pt_regs *regs, __u16 code) -{ - int cpu = smp_processor_id (); - - u32 ext_int_param = hwc_ext_int_param (); - - irq_enter (cpu, 0x2401); - - if (hwc_data.flags & HWC_INIT) { - - hwc_data.flags |= HWC_INTERRUPT; - } else if (hwc_data.flags & HWC_BROKEN) { - - if (!do_hwc_init ()) { - hwc_data.flags &= ~HWC_BROKEN; - internal_print (DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "delayed HWC setup after" - " temporary breakdown" - " (ext. int. parameter=0x%x)\n", - ext_int_param); - } - } else { - spin_lock (&hwc_data.lock); - hwc_do_interrupt (ext_int_param); - spin_unlock (&hwc_data.lock); - } - irq_exit (cpu, 0x2401); -} - -void -hwc_unblank (void) -{ - - spin_lock (&hwc_data.lock); - spin_unlock (&hwc_data.lock); - - __ctl_store (cr0, 0, 0); - cr0_save = cr0; - cr0 |= 0x00000200; - cr0 &= 0xFFFFF3AC; - __ctl_load (cr0, 0, 0); - - asm volatile ("STOSM %0,0x01":"=m" (psw_mask)::"memory"); - - while (ALL_HWCB_CHAR) - barrier (); - - asm volatile ("STNSM %0,0xFE":"=m" (psw_mask)::"memory"); - - __ctl_load (cr0_save, 0, 0); -} - -int -hwc_ioctl (unsigned int cmd, unsigned long arg) -{ - hwc_ioctls_t tmp = hwc_data.ioctls; - int retval = 0; - unsigned long flags; - unsigned int obuf; - - spin_lock_irqsave (&hwc_data.lock, flags); - - switch (cmd) { - - case TIOCHWCSHTAB: - if (get_user (tmp.width_htab, (ioctl_htab_t *) arg)) - goto fault; - break; - - case TIOCHWCSECHO: - if (get_user (tmp.echo, (ioctl_echo_t *) arg)) - goto fault; - break; - - case TIOCHWCSCOLS: - if (get_user (tmp.columns, (ioctl_cols_t *) arg)) - goto fault; - break; - - case TIOCHWCSNL: - if (get_user (tmp.final_nl, (ioctl_nl_t *) arg)) - goto fault; - break; - - case TIOCHWCSOBUF: - if (get_user (obuf, (unsigned int *) arg)) - goto fault; - if (obuf & 0xFFF) - tmp.max_hwcb = (((obuf | 0xFFF) + 1) >> 12); - else - tmp.max_hwcb = (obuf >> 12); - break; - - case TIOCHWCSCASE: - if (get_user (tmp.tolower, (ioctl_case_t *) arg)) - goto fault; - break; - - case TIOCHWCSDELIM: - if (get_user (tmp.delim, (ioctl_delim_t *) arg)) - goto fault; - break; - - case TIOCHWCSINIT: - retval = set_hwc_ioctls (&hwc_data.init_ioctls, 1); - break; - - case TIOCHWCGHTAB: - if (put_user (tmp.width_htab, (ioctl_htab_t *) arg)) - goto fault; - break; - - case TIOCHWCGECHO: - if (put_user (tmp.echo, (ioctl_echo_t *) arg)) - goto fault; - break; - - case TIOCHWCGCOLS: - if (put_user (tmp.columns, (ioctl_cols_t *) arg)) - goto fault; - break; - - case TIOCHWCGNL: - if (put_user (tmp.final_nl, (ioctl_nl_t *) arg)) - goto fault; - break; - - case TIOCHWCGOBUF: - if (put_user (tmp.max_hwcb, (ioctl_obuf_t *) arg)) - goto fault; - break; - - case TIOCHWCGKBUF: - if (put_user (tmp.kmem_hwcb, (ioctl_obuf_t *) arg)) - goto fault; - break; - - case TIOCHWCGCASE: - if (put_user (tmp.tolower, (ioctl_case_t *) arg)) - goto fault; - break; - - case TIOCHWCGDELIM: - if (put_user (tmp.delim, (ioctl_delim_t *) arg)) - goto fault; - break; -#if 0 - - case TIOCHWCGINIT: - if (put_user (&hwc_data.init_ioctls, (hwc_ioctls_t *) arg)) - goto fault; - break; - - case TIOCHWCGCURR: - if (put_user (&hwc_data.ioctls, (hwc_ioctls_t *) arg)) - goto fault; - break; -#endif - - default: - goto noioctlcmd; - } - - if (_IOC_DIR (cmd) == _IOC_WRITE) - retval = set_hwc_ioctls (&tmp, 0); - - goto out; - - fault: - retval = -EFAULT; - goto out; - noioctlcmd: - retval = -ENOIOCTLCMD; - out: - spin_unlock_irqrestore (&hwc_data.lock, flags); - return retval; -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/hwc_rw.h linux.21rc5-ac2/drivers/s390/char/hwc_rw.h --- linux.21rc5/drivers/s390/char/hwc_rw.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/hwc_rw.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,132 +0,0 @@ -/* - * drivers/s390/char/hwc_rw.h - * interface to the HWC-read/write driver - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - */ - -#ifndef __HWC_RW_H__ -#define __HWC_RW_H__ - -#include - -typedef struct { - - void (*move_input) (unsigned char *, unsigned int); - - void (*wake_up) (void); -} hwc_high_level_calls_t; - -struct _hwc_request; - -typedef void hwc_callback_t (struct _hwc_request *); - -typedef struct _hwc_request { - void *block; - u32 word; - hwc_callback_t *callback; - void *data; -} __attribute__ ((packed)) - -hwc_request_t; - -#define HWC_ASCEBC(x) ((MACHINE_IS_VM ? _ascebc[x] : _ascebc_500[x])) - -#define HWC_EBCASC_STR(s,c) ((MACHINE_IS_VM ? EBCASC(s,c) : EBCASC_500(s,c))) - -#define HWC_ASCEBC_STR(s,c) ((MACHINE_IS_VM ? ASCEBC(s,c) : ASCEBC_500(s,c))) - -#define IN_HWCB 1 -#define IN_WRITE_BUF 2 -#define IN_BUFS_TOTAL (IN_HWCB | IN_WRITE_BUF) - -typedef unsigned short int ioctl_htab_t; -typedef unsigned char ioctl_echo_t; -typedef unsigned short int ioctl_cols_t; -typedef signed char ioctl_nl_t; -typedef unsigned short int ioctl_obuf_t; -typedef unsigned char ioctl_case_t; -typedef unsigned char ioctl_delim_t; - -typedef struct { - ioctl_htab_t width_htab; - ioctl_echo_t echo; - ioctl_cols_t columns; - ioctl_nl_t final_nl; - ioctl_obuf_t max_hwcb; - ioctl_obuf_t kmem_hwcb; - ioctl_case_t tolower; - ioctl_delim_t delim; -} hwc_ioctls_t; - -static hwc_ioctls_t _hwc_ioctls; - -#define HWC_IOCTL_LETTER 'B' - -#define TIOCHWCSHTAB _IOW(HWC_IOCTL_LETTER, 0, _hwc_ioctls.width_htab) - -#define TIOCHWCSECHO _IOW(HWC_IOCTL_LETTER, 1, _hwc_ioctls.echo) - -#define TIOCHWCSCOLS _IOW(HWC_IOCTL_LETTER, 2, _hwc_ioctls.columns) - -#define TIOCHWCSNL _IOW(HWC_IOCTL_LETTER, 4, _hwc_ioctls.final_nl) - -#define TIOCHWCSOBUF _IOW(HWC_IOCTL_LETTER, 5, _hwc_ioctls.max_hwcb) - -#define TIOCHWCSINIT _IO(HWC_IOCTL_LETTER, 6) - -#define TIOCHWCSCASE _IOW(HWC_IOCTL_LETTER, 7, _hwc_ioctls.tolower) - -#define TIOCHWCSDELIM _IOW(HWC_IOCTL_LETTER, 9, _hwc_ioctls.delim) - -#define TIOCHWCGHTAB _IOR(HWC_IOCTL_LETTER, 10, _hwc_ioctls.width_htab) - -#define TIOCHWCGECHO _IOR(HWC_IOCTL_LETTER, 11, _hwc_ioctls.echo) - -#define TIOCHWCGCOLS _IOR(HWC_IOCTL_LETTER, 12, _hwc_ioctls.columns) - -#define TIOCHWCGNL _IOR(HWC_IOCTL_LETTER, 14, _hwc_ioctls.final_nl) - -#define TIOCHWCGOBUF _IOR(HWC_IOCTL_LETTER, 15, _hwc_ioctls.max_hwcb) - -#define TIOCHWCGINIT _IOR(HWC_IOCTL_LETTER, 16, _hwc_ioctls) - -#define TIOCHWCGCASE _IOR(HWC_IOCTL_LETTER, 17, _hwc_ioctls.tolower) - -#define TIOCHWCGDELIM _IOR(HWC_IOCTL_LETTER, 19, _hwc_ioctls.delim) - -#define TIOCHWCGKBUF _IOR(HWC_IOCTL_LETTER, 20, _hwc_ioctls.max_hwcb) - -#define TIOCHWCGCURR _IOR(HWC_IOCTL_LETTER, 21, _hwc_ioctls) - -#ifndef __HWC_RW_C__ - -extern int hwc_init (void); - -extern int hwc_write (int from_user, const unsigned char *, unsigned int); - -extern unsigned int hwc_chars_in_buffer (unsigned char); - -extern unsigned int hwc_write_room (unsigned char); - -extern void hwc_flush_buffer (unsigned char); - -extern void hwc_unblank (void); - -extern signed int hwc_ioctl (unsigned int, unsigned long); - -extern void do_hwc_interrupt (void); - -extern int hwc_printk (const char *,...); - -extern signed int hwc_register_calls (hwc_high_level_calls_t *); - -extern signed int hwc_unregister_calls (hwc_high_level_calls_t *); - -extern int hwc_send (hwc_request_t *); - -#endif - -#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/hwc_tty.c linux.21rc5-ac2/drivers/s390/char/hwc_tty.c --- linux.21rc5/drivers/s390/char/hwc_tty.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/hwc_tty.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,273 +0,0 @@ -/* - * drivers/s390/char/hwc_tty.c - * HWC line mode terminal driver. - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - * - * Thanks to Martin Schwidefsky. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "hwc_rw.h" -#include "ctrlchar.h" - -#define HWC_TTY_PRINT_HEADER "hwc tty driver: " - -#define HWC_TTY_BUF_SIZE 512 - -typedef struct { - - struct tty_struct *tty; - - unsigned char buf[HWC_TTY_BUF_SIZE]; - - unsigned short int buf_count; - - spinlock_t lock; - - hwc_high_level_calls_t calls; -} hwc_tty_data_struct; - -static hwc_tty_data_struct hwc_tty_data = -{ /* NULL/0 */ }; -static struct tty_driver hwc_tty_driver; -static struct tty_struct *hwc_tty_table[1]; -static struct termios *hwc_tty_termios[1]; -static struct termios *hwc_tty_termios_locked[1]; -static int hwc_tty_refcount = 0; - -extern struct termios tty_std_termios; - -void hwc_tty_wake_up (void); -void hwc_tty_input (unsigned char *, unsigned int); - -static int -hwc_tty_open (struct tty_struct *tty, - struct file *filp) -{ - - if (MINOR (tty->device) - tty->driver.minor_start) - return -ENODEV; - - tty->driver_data = &hwc_tty_data; - hwc_tty_data.buf_count = 0; - hwc_tty_data.tty = tty; - tty->low_latency = 0; - - hwc_tty_data.calls.wake_up = hwc_tty_wake_up; - hwc_tty_data.calls.move_input = hwc_tty_input; - hwc_register_calls (&(hwc_tty_data.calls)); - - return 0; -} - -static void -hwc_tty_close (struct tty_struct *tty, - struct file *filp) -{ - if (MINOR (tty->device) != tty->driver.minor_start) { - printk (KERN_WARNING HWC_TTY_PRINT_HEADER - "do not close hwc tty because of wrong device number"); - return; - } - if (tty->count > 1) - return; - - hwc_tty_data.tty = NULL; - - hwc_unregister_calls (&(hwc_tty_data.calls)); -} - -static int -hwc_tty_write_room (struct tty_struct *tty) -{ - int retval; - - retval = hwc_write_room (IN_BUFS_TOTAL); - return retval; -} - -static int -hwc_tty_write (struct tty_struct *tty, - int from_user, - const unsigned char *buf, - int count) -{ - int retval; - - if (hwc_tty_data.buf_count > 0) { - hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count); - hwc_tty_data.buf_count = 0; - } - retval = hwc_write (from_user, buf, count); - return retval; -} - -static void -hwc_tty_put_char (struct tty_struct *tty, - unsigned char ch) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_tty_data.lock, flags); - if (hwc_tty_data.buf_count >= HWC_TTY_BUF_SIZE) { - hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count); - hwc_tty_data.buf_count = 0; - } - hwc_tty_data.buf[hwc_tty_data.buf_count] = ch; - hwc_tty_data.buf_count++; - spin_unlock_irqrestore (&hwc_tty_data.lock, flags); -} - -static void -hwc_tty_flush_chars (struct tty_struct *tty) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_tty_data.lock, flags); - hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count); - hwc_tty_data.buf_count = 0; - spin_unlock_irqrestore (&hwc_tty_data.lock, flags); -} - -static int -hwc_tty_chars_in_buffer (struct tty_struct *tty) -{ - int retval; - - retval = hwc_chars_in_buffer (IN_BUFS_TOTAL); - return retval; -} - -static void -hwc_tty_flush_buffer (struct tty_struct *tty) -{ - hwc_tty_wake_up (); -} - -static int -hwc_tty_ioctl ( - struct tty_struct *tty, - struct file *file, - unsigned int cmd, - unsigned long arg) -{ - if (tty->flags & (1 << TTY_IO_ERROR)) - return -EIO; - - return hwc_ioctl (cmd, arg); -} - -void -hwc_tty_wake_up (void) -{ - if (hwc_tty_data.tty == NULL) - return; - if ((hwc_tty_data.tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && - hwc_tty_data.tty->ldisc.write_wakeup) - (hwc_tty_data.tty->ldisc.write_wakeup) (hwc_tty_data.tty); - wake_up_interruptible (&hwc_tty_data.tty->write_wait); -} - -void -hwc_tty_input (unsigned char *buf, unsigned int count) -{ - struct tty_struct *tty = hwc_tty_data.tty; - - if (tty != NULL) { - char *cchar; - if ((cchar = ctrlchar_handle (buf, count, tty))) { - if (cchar == (char *) -1) - return; - tty->flip.count++; - *tty->flip.flag_buf_ptr++ = TTY_NORMAL; - *tty->flip.char_buf_ptr++ = *cchar; - } else { - - memcpy (tty->flip.char_buf_ptr, buf, count); - if (count < 2 || ( - strncmp (buf + count - 2, "^n", 2) || - strncmp (buf + count - 2, "\0252n", 2))) { - tty->flip.char_buf_ptr[count] = '\n'; - count++; - } else - count -= 2; - memset (tty->flip.flag_buf_ptr, TTY_NORMAL, count); - tty->flip.char_buf_ptr += count; - tty->flip.flag_buf_ptr += count; - tty->flip.count += count; - } - tty_flip_buffer_push (tty); - hwc_tty_wake_up (); - } -} - -void -hwc_tty_init (void) -{ - if (!CONSOLE_IS_HWC) - return; - - ctrlchar_init (); - - memset (&hwc_tty_driver, 0, sizeof (struct tty_driver)); - memset (&hwc_tty_data, 0, sizeof (hwc_tty_data_struct)); - hwc_tty_driver.magic = TTY_DRIVER_MAGIC; - hwc_tty_driver.driver_name = "tty_hwc"; - hwc_tty_driver.name = "ttyS"; - hwc_tty_driver.name_base = 0; - hwc_tty_driver.major = TTY_MAJOR; - hwc_tty_driver.minor_start = 64; - hwc_tty_driver.num = 1; - hwc_tty_driver.type = TTY_DRIVER_TYPE_SYSTEM; - hwc_tty_driver.subtype = SYSTEM_TYPE_TTY; - hwc_tty_driver.init_termios = tty_std_termios; - hwc_tty_driver.init_termios.c_iflag = IGNBRK | IGNPAR; - hwc_tty_driver.init_termios.c_oflag = ONLCR; - hwc_tty_driver.init_termios.c_lflag = ISIG | ECHO; - hwc_tty_driver.flags = TTY_DRIVER_REAL_RAW; - hwc_tty_driver.refcount = &hwc_tty_refcount; - - hwc_tty_driver.table = hwc_tty_table; - hwc_tty_driver.termios = hwc_tty_termios; - hwc_tty_driver.termios_locked = hwc_tty_termios_locked; - - hwc_tty_driver.open = hwc_tty_open; - hwc_tty_driver.close = hwc_tty_close; - hwc_tty_driver.write = hwc_tty_write; - hwc_tty_driver.put_char = hwc_tty_put_char; - hwc_tty_driver.flush_chars = hwc_tty_flush_chars; - hwc_tty_driver.write_room = hwc_tty_write_room; - hwc_tty_driver.chars_in_buffer = hwc_tty_chars_in_buffer; - hwc_tty_driver.flush_buffer = hwc_tty_flush_buffer; - hwc_tty_driver.ioctl = hwc_tty_ioctl; - - hwc_tty_driver.throttle = NULL; - hwc_tty_driver.unthrottle = NULL; - hwc_tty_driver.send_xchar = NULL; - hwc_tty_driver.set_termios = NULL; - hwc_tty_driver.set_ldisc = NULL; - hwc_tty_driver.stop = NULL; - hwc_tty_driver.start = NULL; - hwc_tty_driver.hangup = NULL; - hwc_tty_driver.break_ctl = NULL; - hwc_tty_driver.wait_until_sent = NULL; - hwc_tty_driver.read_proc = NULL; - hwc_tty_driver.write_proc = NULL; - - if (tty_register_driver (&hwc_tty_driver)) - panic ("Couldn't register hwc_tty driver\n"); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/Makefile linux.21rc5-ac2/drivers/s390/char/Makefile --- linux.21rc5/drivers/s390/char/Makefile 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/Makefile 2003-04-25 13:53:58.000000000 +0100 @@ -4,31 +4,36 @@ O_TARGET := s390-char.o -list-multi := tub3270.o tape390.o -export-objs := hwc_rw.o +list-multi := tub3270.o \ + tape390.o + +export-objs := sclp.o \ + tape_core.o \ + tape_devmap.o \ + tape_std.o tub3270-objs := tuball.o tubfs.o tubtty.o \ tubttyaid.o tubttybld.o tubttyscl.o \ tubttyrcl.o tubttysiz.o -tape390-$(CONFIG_S390_TAPE_CHAR) += tapechar.o -tape390-$(CONFIG_S390_TAPE_BLOCK) += tapeblock.o -tape390-$(CONFIG_S390_TAPE_3480) += tape3480.o tape34xx.o -tape390-$(CONFIG_S390_TAPE_3490) += tape3490.o tape34xx.o -tape390-objs := tape.o $(sort $(tape390-y)) +tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o +tape-objs := tape_core.o tape_devmap.o tape_proc.o tape_std.o tape_char.o \ + $(sort $(tape-y)) +obj-$(CONFIG_S390_TAPE) += tape390.o +obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o obj-y += ctrlchar.o obj-$(CONFIG_TN3215) += con3215.o -obj-$(CONFIG_HWC) += hwc_con.o hwc_rw.o hwc_tty.o -obj-$(CONFIG_HWC_CPI) += hwc_cpi.o +obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o +obj-$(CONFIG_SCLP_TTY) += sclp_tty.o +obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o +obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o obj-$(CONFIG_TN3270) += tub3270.o -obj-$(CONFIG_S390_TAPE) += tape390.o include $(TOPDIR)/Rules.make tub3270.o: $(tub3270-objs) $(LD) -r -o $@ $(tub3270-objs) -tape390.o: $(tape390-objs) - $(LD) -r -o $@ $(tape390-objs) - +tape390.o: $(tape-objs) + $(LD) -r -o $@ $(tape-objs) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/sclp.c linux.21rc5-ac2/drivers/s390/char/sclp.c --- linux.21rc5/drivers/s390/char/sclp.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/sclp.c 2003-05-28 15:13:54.000000000 +0100 @@ -0,0 +1,786 @@ +/* + * drivers/s390/char/sclp.c + * core function to access sclp interface + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" + +#define SCLP_CORE_PRINT_HEADER "sclp low level driver: " + +/* Structure for register_early_external_interrupt. */ +static ext_int_info_t ext_int_info_hwc; + +/* spinlock to protect global variables of sclp_core */ +static spinlock_t sclp_lock; + +/* Mask of valid sclp events */ +static sccb_mask_t sclp_receive_mask; +static sccb_mask_t sclp_send_mask; + +/* List of registered event types */ +static struct list_head sclp_reg_list; + +/* sccb queue */ +static struct list_head sclp_req_queue; + +/* sccb for unconditional read */ +static struct sclp_req sclp_read_req; +static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); +/* sccb for write mask sccb */ +static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); + +/* Timer for init mask retries. */ +static struct timer_list retry_timer; + +static volatile unsigned long sclp_status = 0; +/* some status flags */ +#define SCLP_INIT 0 +#define SCLP_RUNNING 1 +#define SCLP_READING 2 + +#define SCLP_INIT_POLL_INTERVAL 1 + +#define SCLP_COMMAND_INITIATED 0 +#define SCLP_BUSY 2 +#define SCLP_NOT_OPERATIONAL 3 + +/* + * assembler instruction for Service Call + */ +static int +__service_call(sclp_cmdw_t command, void *sccb) +{ + int cc; + + /* + * Mnemonic: SERVC Rx, Ry [RRE] + * + * Rx: SCLP command word + * Ry: address of SCCB + */ + __asm__ __volatile__( + " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ + " ipm %0\n" + " srl %0,28" + : "=&d" (cc) + : "d" (command), "a" (__pa(sccb)) + : "cc", "memory" ); + /* + * cc == 0: Service Call succesful initiated + * cc == 2: SCLP busy, new Service Call not initiated, + * new SCCB unchanged + * cc == 3: SCLP function not operational + */ + if (cc == SCLP_NOT_OPERATIONAL) + return -EIO; + /* + * We set the SCLP_RUNNING bit for cc 2 as well because if + * service_call returns cc 2 some old request is running + * that has to complete first + */ + set_bit(SCLP_RUNNING, &sclp_status); + if (cc == SCLP_BUSY) + return -EBUSY; + return 0; +} + +static int +sclp_start_request(void) +{ + struct sclp_req *req; + int rc; + unsigned long flags; + + /* quick exit if sclp is already in use */ + if (test_bit(SCLP_RUNNING, &sclp_status)) + return -EBUSY; + spin_lock_irqsave(&sclp_lock, flags); + /* Get first request on queue if available */ + req = NULL; + if (!list_empty(&sclp_req_queue)) + req = list_entry(sclp_req_queue.next, struct sclp_req, list); + if (req) { + rc = __service_call(req->command, req->sccb); + if (rc) { + req->status = SCLP_REQ_FAILED; + list_del(&req->list); + } else + req->status = SCLP_REQ_RUNNING; + } else + rc = -EINVAL; + spin_unlock_irqrestore(&sclp_lock, flags); + if (rc == -EIO && req->callback != NULL) + req->callback(req, req->callback_data); + return rc; +} + +static int +sclp_process_evbufs(struct sccb_header *sccb) +{ + int result; + unsigned long flags; + struct evbuf_header *evbuf; + struct list_head *l; + struct sclp_register *t; + + spin_lock_irqsave(&sclp_lock, flags); + evbuf = (struct evbuf_header *) (sccb + 1); + result = 0; + while ((addr_t) evbuf < (addr_t) sccb + sccb->length) { + /* check registered event */ + t = NULL; + list_for_each(l, &sclp_reg_list) { + t = list_entry(l, struct sclp_register, list); + if (t->receive_mask & (1 << (32 - evbuf->type))) { + if (t->receiver_fn != NULL) { + spin_unlock_irqrestore(&sclp_lock, + flags); + t->receiver_fn(evbuf); + spin_lock_irqsave(&sclp_lock, flags); + } + break; + } + else + t = NULL; + } + /* Check for unrequested event buffer */ + if (t == NULL) + result = -ENOSYS; + evbuf = (struct evbuf_header *) + ((addr_t) evbuf + evbuf->length); + } + spin_unlock_irqrestore(&sclp_lock, flags); + return result; +} + +char * +sclp_error_message(u16 rc) +{ + static struct { + u16 code; char *msg; + } sclp_errors[] = { + { 0x0000, "No response code stored (machine malfunction)" }, + { 0x0020, "Normal Completion" }, + { 0x0040, "SCLP equipment check" }, + { 0x0100, "SCCB boundary violation" }, + { 0x01f0, "Invalid command" }, + { 0x0220, "Normal Completion; suppressed buffers pending" }, + { 0x0300, "Insufficient SCCB length" }, + { 0x0340, "Contained SCLP equipment check" }, + { 0x05f0, "Target resource in improper state" }, + { 0x40f0, "Invalid function code/not installed" }, + { 0x60f0, "No buffers stored" }, + { 0x62f0, "No buffers stored; suppressed buffers pending" }, + { 0x70f0, "Invalid selection mask" }, + { 0x71f0, "Event buffer exceeds available space" }, + { 0x72f0, "Inconsistent lengths" }, + { 0x73f0, "Event buffer syntax error" } + }; + int i; + for (i = 0; i < sizeof(sclp_errors)/sizeof(sclp_errors[0]); i++) + if (rc == sclp_errors[i].code) + return sclp_errors[i].msg; + return "Invalid response code"; +} + +/* + * postprocessing of unconditional read service call + */ +static void +sclp_unconditional_read_cb(struct sclp_req *read_req, void *data) +{ + struct sccb_header *sccb; + + sccb = read_req->sccb; + if (sccb->response_code == 0x0020 || + sccb->response_code == 0x0220) { + if (sclp_process_evbufs(sccb) != 0) + printk(KERN_WARNING SCLP_CORE_PRINT_HEADER + "unconditional read: " + "unrequested event buffer received.\n"); + } + + if (sccb->response_code != 0x0020) + printk(KERN_WARNING SCLP_CORE_PRINT_HEADER + "unconditional read: %s (response code=0x%x).\n", + sclp_error_message(sccb->response_code), + sccb->response_code); + + clear_bit(SCLP_READING, &sclp_status); +} + +/* + * Function to queue Read Event Data/Unconditional Read + */ +static void +__sclp_unconditional_read(void) +{ + struct sccb_header *sccb; + struct sclp_req *read_req; + + /* + * Don't try to initiate Unconditional Read if we are not able to + * receive anything + */ + if (sclp_receive_mask == 0) + return; + /* Don't try reading if a read is already outstanding */ + if (test_and_set_bit(SCLP_READING, &sclp_status)) + return; + /* Initialize read sccb */ + sccb = (struct sccb_header *) sclp_read_sccb; + clear_page(sccb); + sccb->length = PAGE_SIZE; + sccb->function_code = 0; /* unconditional read */ + sccb->control_mask[2] = 0x80; /* variable length response */ + /* Initialize request structure */ + read_req = &sclp_read_req; + read_req->command = SCLP_CMDW_READDATA; + read_req->status = SCLP_REQ_QUEUED; + read_req->callback = sclp_unconditional_read_cb; + read_req->sccb = sccb; + /* Add read request to the head of queue */ + list_add(&read_req->list, &sclp_req_queue); +} + +/* Bit masks to interpret external interruption parameter contents. */ +#define EXT_INT_SCCB_MASK 0xfffffff8 +#define EXT_INT_STATECHANGE_PENDING 0x00000002 +#define EXT_INT_EVBUF_PENDING 0x00000001 + +/* + * Handler for service-signal external interruptions + */ +static void +sclp_interrupt_handler(struct pt_regs *regs, __u16 code) +{ + u32 ext_int_param, finished_sccb, evbuf_pending; + struct list_head *l; + struct sclp_req *req, *tmp; + int cpu; + + spin_lock(&sclp_lock); + /* + * Only process interrupt if sclp is initialized. + * This avoids strange effects for a pending request + * from before the last re-ipl. + */ + if (!test_bit(SCLP_INIT, &sclp_status)) { + /* Now clear the running bit */ + clear_bit(SCLP_RUNNING, &sclp_status); + spin_unlock(&sclp_lock); + return; + } + ext_int_param = S390_lowcore.ext_params; + finished_sccb = ext_int_param & EXT_INT_SCCB_MASK; + evbuf_pending = ext_int_param & (EXT_INT_EVBUF_PENDING | + EXT_INT_STATECHANGE_PENDING); + cpu = smp_processor_id(); + irq_enter(cpu, 0x2401); + req = NULL; + if (finished_sccb != 0U) { + list_for_each(l, &sclp_req_queue) { + tmp = list_entry(l, struct sclp_req, list); + if (finished_sccb == (u32)(addr_t) tmp->sccb) { + list_del(&tmp->list); + req = tmp; + break; + } + } + } + spin_unlock(&sclp_lock); + /* Perform callback */ + if (req != NULL) { + req->status = SCLP_REQ_DONE; + if (req->callback != NULL) + req->callback(req, req->callback_data); + } + spin_lock(&sclp_lock); + /* Head queue a read sccb if an event buffer is pending */ + if (evbuf_pending) + __sclp_unconditional_read(); + /* Now clear the running bit */ + clear_bit(SCLP_RUNNING, &sclp_status); + spin_unlock(&sclp_lock); + /* and start next request on the queue */ + sclp_start_request(); + irq_exit(cpu, 0x2401); +} + +/* + * Wait synchronously for external interrupt of sclp. We may not receive + * any other external interrupt, so we disable all other external interrupts + * in control register 0. + */ +void +sclp_sync_wait(void) +{ + unsigned long psw_mask; + unsigned long cr0, cr0_sync; + + /* + * save cr0 + * enable service signal external interruption (cr0.22) + * disable cr0.20-21, cr0.25, cr0.27, cr0.30-31 + * don't touch any other bit in cr0 + */ + __ctl_store(cr0, 0, 0); + cr0_sync = cr0; + cr0_sync |= 0x00000200; + cr0_sync &= 0xFFFFF3AC; + __ctl_load(cr0_sync, 0, 0); + + /* enable external interruptions (PSW-mask.7) */ + asm volatile ("STOSM 0(%1),0x01" + : "=m" (psw_mask) : "a" (&psw_mask) : "memory"); + + /* wait until ISR signals receipt of interrupt */ + while (test_bit(SCLP_RUNNING, &sclp_status)) { + barrier(); + cpu_relax(); + } + + /* disable external interruptions */ + asm volatile ("SSM 0(%0)" + : : "a" (&psw_mask) : "memory"); + + /* restore cr0 */ + __ctl_load(cr0, 0, 0); +} + +/* + * Queue an SCLP request. Request will immediately be processed if queue is + * empty. + */ +void +sclp_add_request(struct sclp_req *req) +{ + unsigned long flags; + + if (!test_bit(SCLP_INIT, &sclp_status)) { + req->status = SCLP_REQ_FAILED; + if (req->callback != NULL) + req->callback(req, req->callback_data); + return; + } + spin_lock_irqsave(&sclp_lock, flags); + /* queue the request */ + req->status = SCLP_REQ_QUEUED; + list_add_tail(&req->list, &sclp_req_queue); + spin_unlock_irqrestore(&sclp_lock, flags); + /* try to start the first request on the queue */ + sclp_start_request(); +} + +/* state change notification */ +struct sclp_statechangebuf { + struct evbuf_header header; + u8 validity_sclp_active_facility_mask : 1; + u8 validity_sclp_receive_mask : 1; + u8 validity_sclp_send_mask : 1; + u8 validity_read_data_function_mask : 1; + u16 _zeros : 12; + u16 mask_length; + u64 sclp_active_facility_mask; + sccb_mask_t sclp_receive_mask; + sccb_mask_t sclp_send_mask; + u32 read_data_function_mask; +} __attribute__((packed)); + +static inline void +__sclp_notify_state_change(void) +{ + struct list_head *l; + struct sclp_register *t; + sccb_mask_t receive_mask, send_mask; + + list_for_each(l, &sclp_reg_list) { + t = list_entry(l, struct sclp_register, list); + receive_mask = t->receive_mask & sclp_receive_mask; + send_mask = t->send_mask & sclp_send_mask; + if (t->sclp_receive_mask != receive_mask || + t->sclp_send_mask != send_mask) { + t->sclp_receive_mask = receive_mask; + t->sclp_send_mask = send_mask; + if (t->state_change_fn != NULL) + t->state_change_fn(t); + } + } +} + +static void +sclp_state_change(struct evbuf_header *evbuf) +{ + unsigned long flags; + struct sclp_statechangebuf *scbuf; + + spin_lock_irqsave(&sclp_lock, flags); + scbuf = (struct sclp_statechangebuf *) evbuf; + + if (scbuf->validity_sclp_receive_mask) { + if (scbuf->mask_length != sizeof(sccb_mask_t)) + printk(KERN_WARNING SCLP_CORE_PRINT_HEADER + "state change event with mask length %i\n", + scbuf->mask_length); + else + /* set new receive mask */ + sclp_receive_mask = scbuf->sclp_receive_mask; + } + + if (scbuf->validity_sclp_send_mask) { + if (scbuf->mask_length != sizeof(sccb_mask_t)) + printk(KERN_WARNING SCLP_CORE_PRINT_HEADER + "state change event with mask length %i\n", + scbuf->mask_length); + else + /* set new send mask */ + sclp_send_mask = scbuf->sclp_send_mask; + } + + __sclp_notify_state_change(); + spin_unlock_irqrestore(&sclp_lock, flags); +} + +static struct sclp_register sclp_state_change_event = { + .receive_mask = EvTyp_StateChange_Mask, + .receiver_fn = sclp_state_change +}; + + +/* + * SCLP quiesce event handler + */ +#ifdef CONFIG_SMP +static volatile unsigned long cpu_quiesce_map; + +static void +do_load_quiesce_psw(void * __unused) +{ + psw_t quiesce_psw; + + clear_bit(smp_processor_id(), &cpu_quiesce_map); + if (smp_processor_id() == 0) { + /* Wait for all other cpus to enter do_load_quiesce_psw */ + while (cpu_quiesce_map != 0); + /* Quiesce the last cpu with the special psw */ + quiesce_psw.mask = _DW_PSW_MASK; + quiesce_psw.addr = 0xfff; + __load_psw(quiesce_psw); + } + signal_processor(smp_processor_id(), sigp_stop); +} + +static void +do_machine_quiesce(void) +{ + cpu_quiesce_map = cpu_online_map; + smp_call_function(do_load_quiesce_psw, NULL, 0, 0); + do_load_quiesce_psw(NULL); +} +#else +static void +do_machine_quiesce(void) +{ + psw_t quiesce_psw; + + quiesce_psw.mask = _DW_PSW_MASK; + quiesce_psw.addr = 0xfff; + __load_psw(quiesce_psw); +} +#endif + +extern void ctrl_alt_del(void); + +static void +sclp_quiesce(struct evbuf_header *evbuf) +{ + /* + * We got a "shutdown" request. + * Add a call to an appropriate "shutdown" routine here. This + * routine should set all PSWs to 'disabled-wait', 'stopped' + * or 'check-stopped' - except 1 PSW which needs to carry a + * special bit pattern called 'quiesce PSW'. + */ + _machine_restart = (void *) do_machine_quiesce; + _machine_halt = do_machine_quiesce; + _machine_power_off = do_machine_quiesce; + ctrl_alt_del(); +} + +static struct sclp_register sclp_quiesce_event = { + .receive_mask = EvTyp_SigQuiesce_Mask, + .receiver_fn = sclp_quiesce +}; + +/* initialisation of SCLP */ +struct init_sccb { + struct sccb_header header; + u16 _reserved; + u16 mask_length; + sccb_mask_t receive_mask; + sccb_mask_t send_mask; + sccb_mask_t sclp_send_mask; + sccb_mask_t sclp_receive_mask; +} __attribute__((packed)); + +static void sclp_init_mask_retry(unsigned long); + +static int +sclp_init_mask(void) +{ + unsigned long flags; + struct init_sccb *sccb; + struct sclp_req *req; + struct list_head *l; + struct sclp_register *t; + int rc; + + sccb = (struct init_sccb *) sclp_init_sccb; + /* stick the request structure to the end of the init sccb page */ + req = (struct sclp_req *) ((addr_t) sccb + PAGE_SIZE) - 1; + + /* SCLP setup concerning receiving and sending Event Buffers */ + req->command = SCLP_CMDW_WRITEMASK; + req->status = SCLP_REQ_QUEUED; + req->callback = NULL; + req->sccb = sccb; + /* setup sccb for writemask command */ + memset(sccb, 0, sizeof(struct init_sccb)); + sccb->header.length = sizeof(struct init_sccb); + sccb->mask_length = sizeof(sccb_mask_t); + /* copy in the sccb mask of the registered event types */ + spin_lock_irqsave(&sclp_lock, flags); + list_for_each(l, &sclp_reg_list) { + t = list_entry(l, struct sclp_register, list); + sccb->receive_mask |= t->receive_mask; + sccb->send_mask |= t->send_mask; + } + sccb->sclp_receive_mask = 0; + sccb->sclp_send_mask = 0; + if (test_bit(SCLP_INIT, &sclp_status)) { + /* add request to sclp queue */ + list_add_tail(&req->list, &sclp_req_queue); + spin_unlock_irqrestore(&sclp_lock, flags); + /* and start if SCLP is idle */ + sclp_start_request(); + /* now wait for completion */ + while (req->status != SCLP_REQ_DONE && + req->status != SCLP_REQ_FAILED) + sclp_sync_wait(); + spin_lock_irqsave(&sclp_lock, flags); + } else { + /* + * Special case for the very first write mask command. + * The interrupt handler is not removing request from + * the request queue and doesn't call callbacks yet + * because there might be an pending old interrupt + * after a Re-IPL. We have to receive and ignore it. + */ + do { + rc = __service_call(req->command, req->sccb); + spin_unlock_irqrestore(&sclp_lock, flags); + if (rc == -EIO) + return -ENOSYS; + sclp_sync_wait(); + spin_lock_irqsave(&sclp_lock, flags); + } while (rc == -EBUSY); + } + if (sccb->header.response_code != 0x0020) { + /* WRITEMASK failed - we cannot rely on receiving a state + change event, so initially, polling is the only alternative + for us to ever become operational. */ + if (!timer_pending(&retry_timer) || + !mod_timer(&retry_timer, + jiffies + SCLP_INIT_POLL_INTERVAL*HZ)) { + retry_timer.function = sclp_init_mask_retry; + retry_timer.data = 0; + retry_timer.expires = jiffies + + SCLP_INIT_POLL_INTERVAL*HZ; + add_timer(&retry_timer); + } + } else { + sclp_receive_mask = sccb->sclp_receive_mask; + sclp_send_mask = sccb->sclp_send_mask; + __sclp_notify_state_change(); + } + spin_unlock_irqrestore(&sclp_lock, flags); + return 0; +} + +static void +sclp_init_mask_retry(unsigned long data) +{ + sclp_init_mask(); +} + +/* + * sclp setup function. Called early (no kmalloc!) from sclp_console_init(). + */ +static int +sclp_init(void) +{ + int rc; + + if (test_bit(SCLP_INIT, &sclp_status)) + /* Already initialized. */ + return 0; + + spin_lock_init(&sclp_lock); + INIT_LIST_HEAD(&sclp_req_queue); + + /* init event list */ + INIT_LIST_HEAD(&sclp_reg_list); + list_add(&sclp_state_change_event.list, &sclp_reg_list); + list_add(&sclp_quiesce_event.list, &sclp_reg_list); + + /* + * request the 0x2401 external interrupt + * The sclp driver is initialized early (before kmalloc works). We + * need to use register_early_external_interrupt. + */ + if (register_early_external_interrupt(0x2401, sclp_interrupt_handler, + &ext_int_info_hwc) != 0) + return -EBUSY; + + /* enable service-signal external interruptions, + * Control Register 0 bit 22 := 1 + * (besides PSW bit 7 must be set to 1 sometimes for external + * interruptions) + */ + ctl_set_bit(0, 9); + + init_timer(&retry_timer); + /* do the initial write event mask */ + rc = sclp_init_mask(); + if (rc == 0) { + /* Ok, now everything is setup right. */ + set_bit(SCLP_INIT, &sclp_status); + return 0; + } + + /* The sclp_init_mask failed. SCLP is broken, unregister and exit. */ + ctl_clear_bit(0,9); + unregister_early_external_interrupt(0x2401, sclp_interrupt_handler, + &ext_int_info_hwc); + + return rc; +} + +/* + * Register the SCLP event listener identified by REG. Return 0 on success. + * Some error codes and their meaning: + * + * -ENODEV = SCLP interface is not supported on this machine + * -EBUSY = there is already a listener registered for the requested + * event type + * -EIO = SCLP interface is currently not operational + */ +int +sclp_register(struct sclp_register *reg) +{ + unsigned long flags; + struct list_head *l; + struct sclp_register *t; + + if (!MACHINE_HAS_SCLP) + return -ENODEV; + + if (!test_bit(SCLP_INIT, &sclp_status)) + sclp_init(); + spin_lock_irqsave(&sclp_lock, flags); + /* check already registered event masks for collisions */ + list_for_each(l, &sclp_reg_list) { + t = list_entry(l, struct sclp_register, list); + if (t->receive_mask & reg->receive_mask || + t->send_mask & reg->send_mask) { + spin_unlock_irqrestore(&sclp_lock, flags); + return -EBUSY; + } + } + /* + * set present mask to 0 to trigger state change + * callback in sclp_init_mask + */ + reg->sclp_receive_mask = 0; + reg->sclp_send_mask = 0; + list_add(®->list, &sclp_reg_list); + spin_unlock_irqrestore(&sclp_lock, flags); + sclp_init_mask(); + return 0; +} + +/* + * Unregister the SCLP event listener identified by REG. + */ +void +sclp_unregister(struct sclp_register *reg) +{ + unsigned long flags; + + spin_lock_irqsave(&sclp_lock, flags); + list_del(®->list); + spin_unlock_irqrestore(&sclp_lock, flags); + sclp_init_mask(); +} + +#define SCLP_EVBUF_PROCESSED 0x80 + +/* + * Traverse array of event buffers contained in SCCB and remove all buffers + * with a set "processed" flag. Return the number of unprocessed buffers. + */ +int +sclp_remove_processed(struct sccb_header *sccb) +{ + struct evbuf_header *evbuf; + int unprocessed; + u16 remaining; + + evbuf = (struct evbuf_header *) (sccb + 1); + unprocessed = 0; + remaining = sccb->length - sizeof(struct sccb_header); + while (remaining > 0) { + remaining -= evbuf->length; + if (evbuf->flags & SCLP_EVBUF_PROCESSED) { + sccb->length -= evbuf->length; + memcpy((void *) evbuf, + (void *) ((addr_t) evbuf + evbuf->length), + remaining); + } else { + unprocessed++; + evbuf = (struct evbuf_header *) + ((addr_t) evbuf + evbuf->length); + } + } + + return unprocessed; +} + +module_init(sclp_init); + +EXPORT_SYMBOL(sclp_add_request); +EXPORT_SYMBOL(sclp_sync_wait); +EXPORT_SYMBOL(sclp_register); +EXPORT_SYMBOL(sclp_unregister); +EXPORT_SYMBOL(sclp_error_message); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/sclp_con.c linux.21rc5-ac2/drivers/s390/char/sclp_con.c --- linux.21rc5/drivers/s390/char/sclp_con.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/sclp_con.c 2003-05-28 15:13:54.000000000 +0100 @@ -0,0 +1,244 @@ +/* + * drivers/s390/char/sclp_con.c + * SCLP line mode console driver + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" +#include "sclp_rw.h" + +#define SCLP_CON_PRINT_HEADER "sclp console driver: " + +#define sclp_console_major 4 /* TTYAUX_MAJOR */ +#define sclp_console_minor 64 +#define sclp_console_name "ttyS" + +/* Lock to guard over changes to global variables */ +static spinlock_t sclp_con_lock; +/* List of free pages that can be used for console output buffering */ +static struct list_head sclp_con_pages; +/* List of full struct sclp_buffer structures ready for output */ +static struct list_head sclp_con_outqueue; +/* Counter how many buffers are emitted (max 1) and how many */ +/* are on the output queue. */ +static int sclp_con_buffer_count; +/* Pointer to current console buffer */ +static struct sclp_buffer *sclp_conbuf; +/* Timer for delayed output of console messages */ +static struct timer_list sclp_con_timer; + +/* Output format for console messages */ +static unsigned short sclp_con_columns; +static unsigned short sclp_con_width_htab; + +static void +sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) +{ + unsigned long flags; + struct sclp_buffer *next; + void *page; + + /* Ignore return code - because console-writes aren't critical, + we do without a sophisticated error recovery mechanism. */ + page = sclp_unmake_buffer(buffer); + spin_lock_irqsave(&sclp_con_lock, flags); + /* Remove buffer from outqueue */ + list_del(&buffer->list); + sclp_con_buffer_count--; + list_add_tail((struct list_head *) page, &sclp_con_pages); + /* Check if there is a pending buffer on the out queue. */ + next = NULL; + if (!list_empty(&sclp_con_outqueue)) + next = list_entry(sclp_con_outqueue.next, + struct sclp_buffer, list); + spin_unlock_irqrestore(&sclp_con_lock, flags); + if (next != NULL) + sclp_emit_buffer(next, sclp_conbuf_callback); +} + +static inline void +sclp_conbuf_emit(void) +{ + struct sclp_buffer* buffer; + unsigned long flags; + int count; + + spin_lock_irqsave(&sclp_con_lock, flags); + buffer = sclp_conbuf; + sclp_conbuf = NULL; + if (buffer == NULL) { + spin_unlock_irqrestore(&sclp_con_lock, flags); + return; + } + list_add_tail(&buffer->list, &sclp_con_outqueue); + count = sclp_con_buffer_count++; + spin_unlock_irqrestore(&sclp_con_lock, flags); + if (count == 0) + sclp_emit_buffer(buffer, sclp_conbuf_callback); +} + +/* + * When this routine is called from the timer then we flush the + * temporary write buffer without further waiting on a final new line. + */ +static void +sclp_console_timeout(unsigned long data) +{ + sclp_conbuf_emit(); +} + +/* + * Writes the given message to S390 system console + */ +static void +sclp_console_write(struct console *console, const char *message, + unsigned int count) +{ + unsigned long flags; + void *page; + int written; + + if (count == 0) + return; + spin_lock_irqsave(&sclp_con_lock, flags); + /* + * process escape characters, write message into buffer, + * send buffer to SCLP + */ + do { + /* make sure we have a console output buffer */ + if (sclp_conbuf == NULL) { + while (list_empty(&sclp_con_pages)) { + spin_unlock_irqrestore(&sclp_con_lock, flags); + sclp_sync_wait(); + spin_lock_irqsave(&sclp_con_lock, flags); + } + page = sclp_con_pages.next; + list_del((struct list_head *) page); + sclp_conbuf = sclp_make_buffer(page, sclp_con_columns, + sclp_con_width_htab); + } + /* try to write the string to the current output buffer */ + written = sclp_write(sclp_conbuf, (const unsigned char *) + message, count, 0); + if (written == -EFAULT || written == count) + break; + /* + * Not all characters could be written to the current + * output buffer. Emit the buffer, create a new buffer + * and then output the rest of the string. + */ + spin_unlock_irqrestore(&sclp_con_lock, flags); + sclp_conbuf_emit(); + spin_lock_irqsave(&sclp_con_lock, flags); + message += written; + count -= written; + } while (count > 0); + /* Setup timer to output current console buffer after 1/10 second */ + if (sclp_conbuf != NULL && !timer_pending(&sclp_con_timer)) { + init_timer(&sclp_con_timer); + sclp_con_timer.function = sclp_console_timeout; + sclp_con_timer.data = 0UL; + sclp_con_timer.expires = jiffies + HZ/10; + add_timer(&sclp_con_timer); + } + spin_unlock_irqrestore(&sclp_con_lock, flags); +} + +/* returns the device number of the SCLP console */ +static kdev_t +sclp_console_device(struct console *c) +{ + return mk_kdev(sclp_console_major, sclp_console_minor); +} + +/* + * This routine is called from panic when the kernel + * is going to give up. We have to make sure that all buffers + * will be flushed to the SCLP. + */ +static void +sclp_console_unblank(void) +{ + unsigned long flags; + + sclp_conbuf_emit(); + spin_lock_irqsave(&sclp_con_lock, flags); + if (timer_pending(&sclp_con_timer)) + del_timer(&sclp_con_timer); + while (sclp_con_buffer_count > 0) { + spin_unlock_irqrestore(&sclp_con_lock, flags); + sclp_sync_wait(); + spin_lock_irqsave(&sclp_con_lock, flags); + } + spin_unlock_irqrestore(&sclp_con_lock, flags); +} + +/* + * used to register the SCLP console to the kernel and to + * give printk necessary information + */ +static struct console sclp_console = +{ + .name = sclp_console_name, + .write = sclp_console_write, + .device = sclp_console_device, + .unblank = sclp_console_unblank, + .flags = CON_PRINTBUFFER, + .index = 0 /* ttyS0 */ +}; + +/* + * called by console_init() in drivers/char/tty_io.c at boot-time. + */ +void __init +sclp_console_init(void) +{ + void *page; + int i; + + if (!CONSOLE_IS_SCLP) + return; + if (sclp_rw_init() != 0) + return; + /* Allocate pages for output buffering */ + INIT_LIST_HEAD(&sclp_con_pages); + for (i = 0; i < MAX_CONSOLE_PAGES; i++) { + page = alloc_bootmem_low_pages(PAGE_SIZE); + if (page == NULL) + return; + list_add_tail((struct list_head *) page, &sclp_con_pages); + } + INIT_LIST_HEAD(&sclp_con_outqueue); + spin_lock_init(&sclp_con_lock); + sclp_con_buffer_count = 0; + sclp_conbuf = NULL; + init_timer(&sclp_con_timer); + + /* Set output format */ + if (MACHINE_IS_VM) + /* + * save 4 characters for the CPU number + * written at start of each line by VM/CP + */ + sclp_con_columns = 76; + else + sclp_con_columns = 80; + sclp_con_width_htab = 8; + + /* enable printk-access to this driver */ + register_console(&sclp_console); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/sclp_cpi.c linux.21rc5-ac2/drivers/s390/char/sclp_cpi.c --- linux.21rc5/drivers/s390/char/sclp_cpi.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/sclp_cpi.c 2003-04-25 13:53:31.000000000 +0100 @@ -0,0 +1,244 @@ +/* + * Author: Martin Peschke + * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation + * + * SCLP Control-Program Identification. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" +#include "sclp_rw.h" + +#define CPI_LENGTH_SYSTEM_TYPE 8 +#define CPI_LENGTH_SYSTEM_NAME 8 +#define CPI_LENGTH_SYSPLEX_NAME 8 + +struct cpi_evbuf { + struct evbuf_header header; + u8 id_format; + u8 reserved0; + u8 system_type[CPI_LENGTH_SYSTEM_TYPE]; + u64 reserved1; + u8 system_name[CPI_LENGTH_SYSTEM_NAME]; + u64 reserved2; + u64 system_level; + u64 reserved3; + u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME]; + u8 reserved4[16]; +} __attribute__((packed)); + +struct cpi_sccb { + struct sccb_header header; + struct cpi_evbuf cpi_evbuf; +} __attribute__((packed)); + +/* Event type structure for write message and write priority message */ +static struct sclp_register sclp_cpi_event = +{ + .send_mask = EvTyp_CtlProgIdent_Mask +}; + +MODULE_AUTHOR( + "Martin Peschke, IBM Deutschland Entwicklung GmbH " + ""); + +MODULE_DESCRIPTION( + "identify this operating system instance to the S/390 " + "or zSeries hardware"); + +static char *system_name = NULL; +MODULE_PARM(system_name, "s"); +MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters"); + +static char *sysplex_name = NULL; +#ifdef ALLOW_SYSPLEX_NAME +MODULE_PARM(sysplex_name, "s"); +MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters"); +#endif + +/* use default value for this field (as well as for system level) */ +static char *system_type = "LINUX"; + +static int +cpi_check_parms(void) +{ + /* reject if no system type specified */ + if (!system_type) { + printk("cpi: bug: no system type specified\n"); + return -EINVAL; + } + + /* reject if system type larger than 8 characters */ + if (strlen(system_type) > CPI_LENGTH_SYSTEM_NAME) { + printk("cpi: bug: system type has length of %li characters - " + "only %i characters supported\n", + strlen(system_type), CPI_LENGTH_SYSTEM_TYPE); + return -EINVAL; + } + + /* reject if no system name specified */ + if (!system_name) { + printk("cpi: no system name specified\n"); + return -EINVAL; + } + + /* reject if system name larger than 8 characters */ + if (strlen(system_name) > CPI_LENGTH_SYSTEM_NAME) { + printk("cpi: system name has length of %li characters - " + "only %i characters supported\n", + strlen(system_name), CPI_LENGTH_SYSTEM_NAME); + return -EINVAL; + } + + /* reject if specified sysplex name larger than 8 characters */ + if (sysplex_name && strlen(sysplex_name) > CPI_LENGTH_SYSPLEX_NAME) { + printk("cpi: sysplex name has length of %li characters" + " - only %i characters supported\n", + strlen(sysplex_name), CPI_LENGTH_SYSPLEX_NAME); + return -EINVAL; + } + return 0; +} + +static void +cpi_callback(struct sclp_req *req, void *data) +{ + struct semaphore *sem; + + sem = (struct semaphore *) data; + up(sem); +} + +static struct sclp_req * +cpi_prepare_req(void) +{ + struct sclp_req *req; + struct cpi_sccb *sccb; + struct cpi_evbuf *evb; + + req = (struct sclp_req *) kmalloc(sizeof(struct sclp_req), GFP_KERNEL); + if (req == NULL) + return ERR_PTR(-ENOMEM); + sccb = (struct cpi_sccb *) get_free_page(GFP_KERNEL | GFP_DMA); + if (sccb == NULL) { + kfree(req); + return ERR_PTR(-ENOMEM); + } + memset(sccb, 0, sizeof(struct cpi_sccb)); + + /* setup SCCB for Control-Program Identification */ + sccb->header.length = sizeof(struct cpi_sccb); + sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf); + sccb->cpi_evbuf.header.type = 0x0B; + evb = &sccb->cpi_evbuf; + + /* set system type */ + memset(evb->system_type, ' ', CPI_LENGTH_SYSTEM_TYPE); + memcpy(evb->system_type, system_type, strlen(system_type)); + sclp_ascebc_str(evb->system_type, CPI_LENGTH_SYSTEM_TYPE); + EBC_TOUPPER(evb->system_type, CPI_LENGTH_SYSTEM_TYPE); + + /* set system name */ + memset(evb->system_name, ' ', CPI_LENGTH_SYSTEM_NAME); + memcpy(evb->system_name, system_name, strlen(system_name)); + sclp_ascebc_str(evb->system_name, CPI_LENGTH_SYSTEM_NAME); + EBC_TOUPPER(evb->system_name, CPI_LENGTH_SYSTEM_NAME); + + /* set sytem level */ + evb->system_level = LINUX_VERSION_CODE; + + /* set sysplex name */ + if (sysplex_name) { + memset(evb->sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME); + memcpy(evb->sysplex_name, sysplex_name, strlen(sysplex_name)); + sclp_ascebc_str(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME); + EBC_TOUPPER(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME); + } + + /* prepare request data structure presented to SCLP driver */ + req->command = SCLP_CMDW_WRITEDATA; + req->sccb = sccb; + req->status = SCLP_REQ_FILLED; + req->callback = cpi_callback; + return req; +} + +static void +cpi_free_req(struct sclp_req *req) +{ + free_page((unsigned long) req->sccb); + kfree(req); +} + +static int __init +cpi_module_init(void) +{ + struct semaphore sem; + struct sclp_req *req; + int rc; + + rc = cpi_check_parms(); + if (rc) + return rc; + + rc = sclp_register(&sclp_cpi_event); + if (rc) { + /* could not register sclp event. Die. */ + printk("cpi: could not register to hardware console.\n"); + return -EINVAL; + } + if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) { + printk("cpi: no control program identification support\n"); + sclp_unregister(&sclp_cpi_event); + return -ENOTSUPP; + } + + req = cpi_prepare_req(); + if (IS_ERR(req)) { + printk("cpi: couldn't allocate request\n"); + sclp_unregister(&sclp_cpi_event); + return PTR_ERR(req); + } + + /* Prepare semaphore */ + sema_init(&sem, 0); + req->callback_data = &sem; + /* Add request to sclp queue */ + sclp_add_request(req); + /* make "insmod" sleep until callback arrives */ + down(&sem); + + rc = ((struct cpi_sccb *) req->sccb)->header.response_code; + if (rc != 0x0020) { + printk("cpi: failed with response code 0x%x\n", rc); + rc = -ECOMM; + } else + rc = 0; + + cpi_free_req(req); + sclp_unregister(&sclp_cpi_event); + + return rc; +} + + +static void __exit cpi_module_exit(void) +{ +} + + +/* declare driver module init/cleanup functions */ +module_init(cpi_module_init); +module_exit(cpi_module_exit); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/sclp.h linux.21rc5-ac2/drivers/s390/char/sclp.h --- linux.21rc5/drivers/s390/char/sclp.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/sclp.h 2003-04-25 13:53:31.000000000 +0100 @@ -0,0 +1,157 @@ +/* + * drivers/s390/char/sclp.h + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#ifndef __SCLP_H__ +#define __SCLP_H__ + +#include +#include + +#include + +/* maximum number of pages concerning our own memory management */ +#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) +#define MAX_CONSOLE_PAGES 4 + +#define EvTyp_OpCmd 0x01 +#define EvTyp_Msg 0x02 +#define EvTyp_StateChange 0x08 +#define EvTyp_PMsgCmd 0x09 +#define EvTyp_CntlProgOpCmd 0x20 +#define EvTyp_CntlProgIdent 0x0B +#define EvTyp_SigQuiesce 0x1D +#define EvTyp_VT220Msg 0x1A + +#define EvTyp_OpCmd_Mask 0x80000000 +#define EvTyp_Msg_Mask 0x40000000 +#define EvTyp_StateChange_Mask 0x01000000 +#define EvTyp_PMsgCmd_Mask 0x00800000 +#define EvTyp_CtlProgOpCmd_Mask 0x00000001 +#define EvTyp_CtlProgIdent_Mask 0x00200000 +#define EvTyp_SigQuiesce_Mask 0x00000008 +#define EvTyp_VT220Msg_Mask 0x00000040 + +#define GnrlMsgFlgs_DOM 0x8000 +#define GnrlMsgFlgs_SndAlrm 0x4000 +#define GnrlMsgFlgs_HoldMsg 0x2000 + +#define LnTpFlgs_CntlText 0x8000 +#define LnTpFlgs_LabelText 0x4000 +#define LnTpFlgs_DataText 0x2000 +#define LnTpFlgs_EndText 0x1000 +#define LnTpFlgs_PromptText 0x0800 + +typedef unsigned int sclp_cmdw_t; + +#define SCLP_CMDW_READDATA 0x00770005 +#define SCLP_CMDW_WRITEDATA 0x00760005 +#define SCLP_CMDW_WRITEMASK 0x00780005 + +#define GDS_ID_MDSMU 0x1310 +#define GDS_ID_MDSRouteInfo 0x1311 +#define GDS_ID_AgUnWrkCorr 0x1549 +#define GDS_ID_SNACondReport 0x1532 +#define GDS_ID_CPMSU 0x1212 +#define GDS_ID_RoutTargInstr 0x154D +#define GDS_ID_OpReq 0x8070 +#define GDS_ID_TextCmd 0x1320 + +#define GDS_KEY_SelfDefTextMsg 0x31 + +typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ + +struct sccb_header { + u16 length; + u8 function_code; + u8 control_mask[3]; + u16 response_code; +} __attribute__((packed)); + +struct gds_subvector { + u8 length; + u8 key; +} __attribute__((packed)); + +struct gds_vector { + u16 length; + u16 gds_id; +} __attribute__((packed)); + +struct evbuf_header { + u16 length; + u8 type; + u8 flags; + u16 _reserved; +} __attribute__((packed)); + +struct sclp_req { + struct list_head list; /* list_head for request queueing. */ + sclp_cmdw_t command; /* sclp command to execute */ + void *sccb; /* pointer to the sccb to execute */ + char status; /* status of this request */ + /* Callback that is called after reaching final status. */ + void (*callback)(struct sclp_req *, void *data); + void *callback_data; +}; + +#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */ +#define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */ +#define SCLP_REQ_RUNNING 0x02 /* request is currently running */ +#define SCLP_REQ_DONE 0x03 /* request is completed successfully */ +#define SCLP_REQ_FAILED 0x05 /* request is finally failed */ + +/* function pointers that a high level driver has to use for registration */ +/* of some routines it wants to be called from the low level driver */ +struct sclp_register { + struct list_head list; + /* event masks this user is registered for */ + sccb_mask_t receive_mask; + sccb_mask_t send_mask; + /* actually present events */ + sccb_mask_t sclp_receive_mask; + sccb_mask_t sclp_send_mask; + /* called if event type availability changes */ + void (*state_change_fn)(struct sclp_register *); + /* called for events in cp_receive_mask/sclp_receive_mask */ + void (*receiver_fn)(struct evbuf_header *); +}; + +/* externals from sclp.c */ +void sclp_add_request(struct sclp_req *req); +void sclp_sync_wait(void); +int sclp_register(struct sclp_register *reg); +void sclp_unregister(struct sclp_register *reg); +char *sclp_error_message(u16 response_code); +int sclp_remove_processed(struct sccb_header *sccb); + +/* useful inlines */ + +/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */ +/* translate single character from ASCII to EBCDIC */ +static inline unsigned char +sclp_ascebc(unsigned char ch) +{ + return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch]; +} + +/* translate string from EBCDIC to ASCII */ +static inline void +sclp_ebcasc_str(unsigned char *str, int nr) +{ + (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr); +} + +/* translate string from ASCII to EBCDIC */ +static inline void +sclp_ascebc_str(unsigned char *str, int nr) +{ + (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); +} + +#endif /* __SCLP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/sclp_rw.c linux.21rc5-ac2/drivers/s390/char/sclp_rw.c --- linux.21rc5/drivers/s390/char/sclp_rw.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/sclp_rw.c 2003-04-25 13:53:31.000000000 +0100 @@ -0,0 +1,496 @@ +/* + * drivers/s390/char/sclp_rw.c + * driver: reading from and writing to system console on S/390 via SCLP + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" +#include "sclp_rw.h" + +#define SCLP_RW_PRINT_HEADER "sclp low level driver: " + +/* + * The room for the SCCB (only for writing) is not equal to a pages size + * (as it is specified as the maximum size in the the SCLP ducumentation) + * because of the additional data structure described above. + */ +#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) + +/* Event type structure for write message and write priority message */ +static struct sclp_register sclp_rw_event = { + .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask +}; + +/* + * Setup a sclp write buffer. Gets a page as input (4K) and returns + * a pointer to a struct sclp_buffer structure that is located at the + * end of the input page. This reduces the buffer space by a few + * bytes but simplifies things. + */ +struct sclp_buffer * +sclp_make_buffer(void *page, unsigned short columns, unsigned short htab) +{ + struct sclp_buffer *buffer; + struct write_sccb *sccb; + + sccb = (struct write_sccb *) page; + /* + * We keep the struct sclp_buffer structure at the end + * of the sccb page. + */ + buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1; + buffer->sccb = sccb; + buffer->retry_count = 0; + init_timer(&buffer->retry_timer); + buffer->mto_number = 0; + buffer->mto_char_sum = 0; + buffer->current_line = NULL; + buffer->current_length = 0; + buffer->columns = columns; + buffer->htab = htab; + + /* initialize sccb */ + memset(sccb, 0, sizeof(struct write_sccb)); + sccb->header.length = sizeof(struct write_sccb); + sccb->msg_buf.header.length = sizeof(struct msg_buf); + sccb->msg_buf.header.type = EvTyp_Msg; + sccb->msg_buf.mdb.header.length = sizeof(struct mdb); + sccb->msg_buf.mdb.header.type = 1; + sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ + sccb->msg_buf.mdb.header.revision_code = 1; + sccb->msg_buf.mdb.go.length = sizeof(struct go); + sccb->msg_buf.mdb.go.type = 1; + + return buffer; +} + +/* + * Return a pointer to the orignal page that has been used to create + * the buffer. + */ +void * +sclp_unmake_buffer(struct sclp_buffer *buffer) +{ + return buffer->sccb; +} + +/* + * Initialize a new Message Text Object (MTO) at the end of the provided buffer + * with enough room for max_len characters. Return 0 on success. + */ +static int +sclp_initialize_mto(struct sclp_buffer *buffer, int max_len) +{ + struct write_sccb *sccb; + struct mto *mto; + int mto_size; + + /* max size of new Message Text Object including message text */ + mto_size = sizeof(struct mto) + max_len; + + /* check if current buffer sccb can contain the mto */ + sccb = buffer->sccb; + if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size) + return -ENOMEM; + + /* find address of new message text object */ + mto = (struct mto *)(((addr_t) sccb) + sccb->header.length); + + /* + * fill the new Message-Text Object, + * starting behind the former last byte of the SCCB + */ + memset(mto, 0, sizeof(struct mto)); + mto->length = sizeof(struct mto); + mto->type = 4; /* message text object */ + mto->line_type_flags = LnTpFlgs_EndText; /* end text */ + + /* set pointer to first byte after struct mto. */ + buffer->current_line = (char *) (mto + 1); + buffer->current_length = 0; + + return 0; +} + +/* + * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of + * MTO, enclosing MDB, event buffer and SCCB. + */ +static void +sclp_finalize_mto(struct sclp_buffer *buffer) +{ + struct write_sccb *sccb; + struct mto *mto; + int str_len, mto_size; + + str_len = buffer->current_length; + buffer->current_line = NULL; + buffer->current_length = 0; + + /* real size of new Message Text Object including message text */ + mto_size = sizeof(struct mto) + str_len; + + /* find address of new message text object */ + sccb = buffer->sccb; + mto = (struct mto *)(((addr_t) sccb) + sccb->header.length); + + /* set size of message text object */ + mto->length = mto_size; + + /* + * update values of sizes + * (SCCB, Event(Message) Buffer, Message Data Block) + */ + sccb->header.length += mto_size; + sccb->msg_buf.header.length += mto_size; + sccb->msg_buf.mdb.header.length += mto_size; + + /* + * count number of buffered messages (= number of Message Text + * Objects) and number of buffered characters + * for the SCCB currently used for buffering and at all + */ + buffer->mto_number++; + buffer->mto_char_sum += str_len; +} + +/* + * processing of a message including escape characters, + * returns number of characters written to the output sccb + * ("processed" means that is not guaranteed that the character have already + * been sent to the SCLP but that it will be done at least next time the SCLP + * is not busy) + */ +int +sclp_write(struct sclp_buffer *buffer, + const unsigned char *msg, int count, int from_user) +{ + int spaces, i_msg; + char ch; + int rc; + + /* + * parse msg for escape sequences (\t,\v ...) and put formated + * msg into an mto (created by sclp_initialize_mto). + * + * We have to do this work ourselfs because there is no support for + * these characters on the native machine and only partial support + * under VM (Why does VM interpret \n but the native machine doesn't ?) + * + * Depending on i/o-control setting the message is always written + * immediately or we wait for a final new line maybe coming with the + * next message. Besides we avoid a buffer overrun by writing its + * content. + * + * RESTRICTIONS: + * + * \r and \b work within one line because we are not able to modify + * previous output that have already been accepted by the SCLP. + * + * \t combined with following \r is not correctly represented because + * \t is expanded to some spaces but \r does not know about a + * previous \t and decreases the current position by one column. + * This is in order to a slim and quick implementation. + */ + for (i_msg = 0; i_msg < count; i_msg++) { + if (from_user) { + if (get_user(ch, msg + i_msg) != 0) + return -EFAULT; + } else + ch = msg[i_msg]; + + switch (ch) { + case '\n': /* new line, line feed (ASCII) */ + /* check if new mto needs to be created */ + if (buffer->current_line == NULL) { + rc = sclp_initialize_mto(buffer, 0); + if (rc) + return i_msg; + } + sclp_finalize_mto(buffer); + break; + case '\a': /* bell, one for several times */ + /* set SCLP sound alarm bit in General Object */ + buffer->sccb->msg_buf.mdb.go.general_msg_flags |= + GnrlMsgFlgs_SndAlrm; + break; + case '\t': /* horizontal tabulator */ + /* check if new mto needs to be created */ + if (buffer->current_line == NULL) { + rc = sclp_initialize_mto(buffer, + buffer->columns); + if (rc) + return i_msg; + } + /* "go to (next htab-boundary + 1, same line)" */ + do { + if (buffer->current_length >= buffer->columns) + break; + /* ok, add a blank */ + *buffer->current_line++ = 0x40; + buffer->current_length++; + } while (buffer->current_length % buffer->htab); + break; + case '\f': /* form feed */ + case '\v': /* vertical tabulator */ + /* "go to (actual column, actual line + 1)" */ + /* = new line, leading spaces */ + if (buffer->current_line != NULL) { + spaces = buffer->current_length; + sclp_finalize_mto(buffer); + rc = sclp_initialize_mto(buffer, + buffer->columns); + if (rc) + return i_msg; + memset(buffer->current_line, 0x40, spaces); + buffer->current_line += spaces; + buffer->current_length = spaces; + } else { + /* one an empty line this is the same as \n */ + rc = sclp_initialize_mto(buffer, + buffer->columns); + if (rc) + return i_msg; + sclp_finalize_mto(buffer); + } + break; + case '\b': /* backspace */ + /* "go to (actual column - 1, actual line)" */ + /* decrement counter indicating position, */ + /* do not remove last character */ + if (buffer->current_line != NULL && + buffer->current_length > 0) { + buffer->current_length--; + buffer->current_line--; + } + break; + case 0x00: /* end of string */ + /* transfer current line to SCCB */ + if (buffer->current_line != NULL) + sclp_finalize_mto(buffer); + /* skip the rest of the message including the 0 byte */ + i_msg = count; + break; + default: /* no escape character */ + /* do not output unprintable characters */ + if (!isprint(ch)) + break; + /* check if new mto needs to be created */ + if (buffer->current_line == NULL) { + rc = sclp_initialize_mto(buffer, + buffer->columns); + if (rc) + return i_msg; + } + *buffer->current_line++ = sclp_ascebc(ch); + buffer->current_length++; + break; + } + /* check if current mto is full */ + if (buffer->current_line != NULL && + buffer->current_length >= buffer->columns) + sclp_finalize_mto(buffer); + } + + /* return number of processed characters */ + return i_msg; +} + +/* + * Return the number of free bytes in the sccb + */ +int +sclp_buffer_space(struct sclp_buffer *buffer) +{ + int count; + + count = MAX_SCCB_ROOM - buffer->sccb->header.length; + if (buffer->current_line != NULL) + count -= sizeof(struct mto) + buffer->current_length; + return count; +} + +/* + * Return number of characters in buffer + */ +int +sclp_chars_in_buffer(struct sclp_buffer *buffer) +{ + int count; + + count = buffer->mto_char_sum; + if (buffer->current_line != NULL) + count += buffer->current_length; + return count; +} + +/* + * sets or provides some values that influence the drivers behaviour + */ +void +sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns) +{ + buffer->columns = columns; + if (buffer->current_line != NULL && + buffer->current_length > buffer->columns) + sclp_finalize_mto(buffer); +} + +void +sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab) +{ + buffer->htab = htab; +} + +/* + * called by sclp_console_init and/or sclp_tty_init + */ +int +sclp_rw_init(void) +{ + static int init_done = 0; + int rc; + + if (init_done) + return 0; + + rc = sclp_register(&sclp_rw_event); + if (rc == 0) + init_done = 1; + return rc; +} + +static void +sclp_buffer_retry(unsigned long data) +{ + struct sclp_buffer *buffer = (struct sclp_buffer *) data; + buffer->request.status = SCLP_REQ_FILLED; + buffer->sccb->header.response_code = 0x0000; + sclp_add_request(&buffer->request); +} + +#define SCLP_BUFFER_MAX_RETRY 5 +#define SCLP_BUFFER_RETRY_INTERVAL 2 + +/* + * second half of Write Event Data-function that has to be done after + * interruption indicating completion of Service Call. + */ +static void +sclp_writedata_callback(struct sclp_req *request, void *data) +{ + int rc; + struct sclp_buffer *buffer; + struct write_sccb *sccb; + + buffer = (struct sclp_buffer *) data; + sccb = buffer->sccb; + + if (request->status == SCLP_REQ_FAILED) { + if (buffer->callback != NULL) + buffer->callback(buffer, -EIO); + return; + } + /* check SCLP response code and choose suitable action */ + switch (sccb->header.response_code) { + case 0x0020 : + /* Normal completion, buffer processed, message(s) sent */ + rc = 0; + break; + + case 0x0340: /* Contained SCLP equipment check */ + if (buffer->retry_count++ > SCLP_BUFFER_MAX_RETRY) { + rc = -EIO; + break; + } + /* remove processed buffers and requeue rest */ + if (sclp_remove_processed((struct sccb_header *) sccb) > 0) { + /* not all buffers were processed */ + sccb->header.response_code = 0x0000; + buffer->request.status = SCLP_REQ_FILLED; + sclp_add_request(request); + return; + } + rc = 0; + break; + + case 0x0040: /* SCLP equipment check */ + case 0x05f0: /* Target resource in improper state */ + if (buffer->retry_count++ > SCLP_BUFFER_MAX_RETRY) { + rc = -EIO; + break; + } + /* wait some time, then retry request */ + buffer->retry_timer.function = sclp_buffer_retry; + buffer->retry_timer.data = (unsigned long) buffer; + buffer->retry_timer.expires = jiffies + + SCLP_BUFFER_RETRY_INTERVAL*HZ; + add_timer(&buffer->retry_timer); + return; + + default: + if (sccb->header.response_code == 0x71f0) + rc = -ENOMEM; + else + rc = -EINVAL; + break; + } + if (buffer->callback != NULL) + buffer->callback(buffer, rc); +} + +/* + * Setup the request structure in the struct sclp_buffer to do SCLP Write + * Event Data and pass the request to the core SCLP loop. + */ +void +sclp_emit_buffer(struct sclp_buffer *buffer, + void (*callback)(struct sclp_buffer *, int)) +{ + struct write_sccb *sccb; + + /* add current line if there is one */ + if (buffer->current_line != NULL) + sclp_finalize_mto(buffer); + + /* Are there messages in the output buffer ? */ + if (buffer->mto_number == 0) { + if (callback != NULL) + callback(buffer, 0); + return; + } + + sccb = buffer->sccb; + if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask) + /* Use normal write message */ + sccb->msg_buf.header.type = EvTyp_Msg; + else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask) + /* Use write priority message */ + sccb->msg_buf.header.type = EvTyp_PMsgCmd; + else { + if (callback != NULL) + callback(buffer, -ENOSYS); + return; + } + buffer->request.command = SCLP_CMDW_WRITEDATA; + buffer->request.status = SCLP_REQ_FILLED; + buffer->request.callback = sclp_writedata_callback; + buffer->request.callback_data = buffer; + buffer->request.sccb = sccb; + buffer->callback = callback; + sclp_add_request(&buffer->request); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/sclp_rw.h linux.21rc5-ac2/drivers/s390/char/sclp_rw.h --- linux.21rc5/drivers/s390/char/sclp_rw.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/sclp_rw.h 2003-04-25 13:53:31.000000000 +0100 @@ -0,0 +1,98 @@ +/* + * drivers/s390/char/sclp_rw.h + * interface to the SCLP-read/write driver + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#ifndef __SCLP_RW_H__ +#define __SCLP_RW_H__ + +#include +#include + +struct mto { + u16 length; + u16 type; + u16 line_type_flags; + u8 alarm_control; + u8 _reserved[3]; +} __attribute__((packed)); + +struct go { + u16 length; + u16 type; + u32 domid; + u8 hhmmss_time[8]; + u8 th_time[3]; + u8 reserved_0; + u8 dddyyyy_date[7]; + u8 _reserved_1; + u16 general_msg_flags; + u8 _reserved_2[10]; + u8 originating_system_name[8]; + u8 job_guest_name[8]; +} __attribute__((packed)); + +struct mdb_header { + u16 length; + u16 type; + u32 tag; + u32 revision_code; +} __attribute__((packed)); + +struct mdb { + struct mdb_header header; + struct go go; +} __attribute__((packed)); + +struct msg_buf { + struct evbuf_header header; + struct mdb mdb; +} __attribute__((packed)); + +struct write_sccb { + struct sccb_header header; + struct msg_buf msg_buf; +} __attribute__((packed)); + +/* The number of empty mto buffers that can be contained in a single sccb. */ +#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \ + sizeof(struct write_sccb)) / sizeof(struct mto)) + +/* + * data structure for information about list of SCCBs (only for writing), + * will be located at the end of a SCCBs page + */ +struct sclp_buffer { + struct list_head list; /* list_head for sccb_info chain */ + struct sclp_req request; + struct write_sccb *sccb; + char *current_line; + int current_length; + int retry_count; + struct timer_list retry_timer; + /* output format settings */ + unsigned short columns; + unsigned short htab; + /* statistics about this buffer */ + unsigned int mto_char_sum; /* # chars in sccb */ + unsigned int mto_number; /* # mtos in sccb */ + /* Callback that is called after reaching final status. */ + void (*callback)(struct sclp_buffer *, int); +}; + +int sclp_rw_init(void); +struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short); +void *sclp_unmake_buffer(struct sclp_buffer *); +int sclp_buffer_space(struct sclp_buffer *); +int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int, int); +void sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int)); +void sclp_set_columns(struct sclp_buffer *, unsigned short); +void sclp_set_htab(struct sclp_buffer *, unsigned short); +int sclp_chars_in_buffer(struct sclp_buffer *); + +#endif /* __SCLP_RW_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/sclp_tty.c linux.21rc5-ac2/drivers/s390/char/sclp_tty.c --- linux.21rc5/drivers/s390/char/sclp_tty.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/sclp_tty.c 2003-05-29 00:54:44.000000000 +0100 @@ -0,0 +1,816 @@ +/* + * drivers/s390/char/sclp_tty.c + * SCLP line mode terminal driver. + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" +#include "sclp_rw.h" +#include "sclp_tty.h" + +#define SCLP_TTY_PRINT_HEADER "sclp tty driver: " + +/* + * size of a buffer that collects single characters coming in + * via sclp_tty_put_char() + */ +#define SCLP_TTY_BUF_SIZE 512 + +/* + * There is exactly one SCLP terminal, so we can keep things simple + * and allocate all variables statically. + */ + +/* Lock to guard over changes to global variables. */ +static spinlock_t sclp_tty_lock; +/* List of free pages that can be used for console output buffering. */ +static struct list_head sclp_tty_pages; +/* List of full struct sclp_buffer structures ready for output. */ +static struct list_head sclp_tty_outqueue; +/* Counter how many buffers are emitted. */ +static int sclp_tty_buffer_count; +/* Pointer to current console buffer. */ +static struct sclp_buffer *sclp_ttybuf; +/* Timer for delayed output of console messages. */ +static struct timer_list sclp_tty_timer; +/* Waitqueue to wait for buffers to get empty. */ +static wait_queue_head_t sclp_tty_waitq; + +static struct tty_struct *sclp_tty; +static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; +static unsigned short int sclp_tty_chars_count; + +static struct tty_driver sclp_tty_driver; +static struct tty_struct * sclp_tty_table[1]; +static struct termios * sclp_tty_termios[1]; +static struct termios * sclp_tty_termios_locked[1]; +static int sclp_tty_refcount = 0; + +extern struct termios tty_std_termios; + +static struct sclp_ioctls sclp_ioctls; +static struct sclp_ioctls sclp_ioctls_init = +{ + 8, /* 1 hor. tab. = 8 spaces */ + 0, /* no echo of input by this driver */ + 80, /* 80 characters/line */ + 1, /* write after 1/10 s without final new line */ + MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */ + MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */ + 0, /* do not convert to lower case */ + 0x6c /* to seprate upper and lower case */ + /* ('%' in EBCDIC) */ +}; + +/* This routine is called whenever we try to open a SCLP terminal. */ +static int +sclp_tty_open(struct tty_struct *tty, struct file *filp) +{ + sclp_tty = tty; + tty->driver_data = NULL; + tty->low_latency = 0; + return 0; +} + +/* This routine is called when the SCLP terminal is closed. */ +static void +sclp_tty_close(struct tty_struct *tty, struct file *filp) +{ + if (atomic_read(&tty->count) > 1) + return; + sclp_tty = NULL; +} + +/* execute commands to control the i/o behaviour of the SCLP tty at runtime */ +static int +sclp_tty_ioctl(struct tty_struct *tty, struct file * file, + unsigned int cmd, unsigned long arg) +{ + unsigned long flags; + unsigned int obuf; + int check; + int rc; + + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + rc = 0; + check = 0; + switch (cmd) { + case TIOCSCLPSHTAB: + /* set width of horizontal tab */ + if (get_user(sclp_ioctls.htab, (unsigned short *) arg)) + rc = -EFAULT; + else + check = 1; + break; + case TIOCSCLPGHTAB: + /* get width of horizontal tab */ + if (put_user(sclp_ioctls.htab, (unsigned short *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSECHO: + /* enable/disable echo of input */ + if (get_user(sclp_ioctls.echo, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGECHO: + /* Is echo of input enabled ? */ + if (put_user(sclp_ioctls.echo, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSCOLS: + /* set number of columns for output */ + if (get_user(sclp_ioctls.columns, (unsigned short *) arg)) + rc = -EFAULT; + else + check = 1; + break; + case TIOCSCLPGCOLS: + /* get number of columns for output */ + if (put_user(sclp_ioctls.columns, (unsigned short *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSNL: + /* enable/disable writing without final new line character */ + if (get_user(sclp_ioctls.final_nl, (signed char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGNL: + /* Is writing without final new line character enabled ? */ + if (put_user(sclp_ioctls.final_nl, (signed char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSOBUF: + /* + * set the maximum buffers size for output, will be rounded + * up to next 4kB boundary and stored as number of SCCBs + * (4kB Buffers) limitation: 256 x 4kB + */ + if (get_user(obuf, (unsigned int *) arg) == 0) { + if (obuf & 0xFFF) + sclp_ioctls.max_sccb = (obuf >> 12) + 1; + else + sclp_ioctls.max_sccb = (obuf >> 12); + } else + rc = -EFAULT; + break; + case TIOCSCLPGOBUF: + /* get the maximum buffers size for output */ + obuf = sclp_ioctls.max_sccb << 12; + if (put_user(obuf, (unsigned int *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGKBUF: + /* get the number of buffers got from kernel at startup */ + if (put_user(sclp_ioctls.kmem_sccb, (unsigned short *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSCASE: + /* enable/disable conversion from upper to lower case */ + if (get_user(sclp_ioctls.tolower, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGCASE: + /* Is conversion from upper to lower case of input enabled? */ + if (put_user(sclp_ioctls.tolower, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSDELIM: + /* + * set special character used for separating upper and + * lower case, 0x00 disables this feature + */ + if (get_user(sclp_ioctls.delim, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGDELIM: + /* + * get special character used for separating upper and + * lower case, 0x00 disables this feature + */ + if (put_user(sclp_ioctls.delim, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSINIT: + /* set initial (default) sclp ioctls */ + sclp_ioctls = sclp_ioctls_init; + check = 1; + break; + default: + rc = -ENOIOCTLCMD; + break; + } + if (check) { + spin_lock_irqsave(&sclp_tty_lock, flags); + if (sclp_ttybuf != NULL) { + sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab); + sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns); + } + spin_unlock_irqrestore(&sclp_tty_lock, flags); + } + return rc; +} + +/* + * This routine returns the numbers of characters the tty driver + * will accept for queuing to be written. This number is subject + * to change as output buffers get emptied, or if the output flow + * control is acted. This is not an exact number because not every + * character needs the same space in the sccb. The worst case is + * a string of newlines. Every newlines creates a new mto which + * needs 8 bytes. + */ +static int +sclp_tty_write_room (struct tty_struct *tty) +{ + unsigned long flags; + struct list_head *l; + int count; + + spin_lock_irqsave(&sclp_tty_lock, flags); + count = 0; + if (sclp_ttybuf != NULL) + count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto); + list_for_each(l, &sclp_tty_pages) + count += NR_EMPTY_MTO_PER_SCCB; + spin_unlock_irqrestore(&sclp_tty_lock, flags); + return count; +} + +static void +sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc) +{ + unsigned long flags; + struct sclp_buffer *next; + void *page; + + /* Ignore return code - because tty-writes aren't critical, + we do without a sophisticated error recovery mechanism. */ + page = sclp_unmake_buffer(buffer); + spin_lock_irqsave(&sclp_tty_lock, flags); + /* Remove buffer from outqueue */ + list_del(&buffer->list); + sclp_tty_buffer_count--; + list_add_tail((struct list_head *) page, &sclp_tty_pages); + /* Check if there is a pending buffer on the out queue. */ + next = NULL; + if (!list_empty(&sclp_tty_outqueue)) + next = list_entry(sclp_tty_outqueue.next, + struct sclp_buffer, list); + spin_unlock_irqrestore(&sclp_tty_lock, flags); + if (next != NULL) + sclp_emit_buffer(next, sclp_ttybuf_callback); + wake_up(&sclp_tty_waitq); + /* check if the tty needs a wake up call */ + if (sclp_tty != NULL) { + if ((sclp_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + sclp_tty->ldisc.write_wakeup) + (sclp_tty->ldisc.write_wakeup)(sclp_tty); + wake_up_interruptible(&sclp_tty->write_wait); + } +} + +static inline void +__sclp_ttybuf_emit(struct sclp_buffer *buffer) +{ + unsigned long flags; + int count; + + spin_lock_irqsave(&sclp_tty_lock, flags); + list_add_tail(&buffer->list, &sclp_tty_outqueue); + count = sclp_tty_buffer_count++; + spin_unlock_irqrestore(&sclp_tty_lock, flags); + + if (count == 0) + sclp_emit_buffer(buffer, sclp_ttybuf_callback); +} + +/* + * When this routine is called from the timer then we flush the + * temporary write buffer. + */ +static void +sclp_tty_timeout(unsigned long data) +{ + unsigned long flags; + struct sclp_buffer *buf; + + spin_lock_irqsave(&sclp_tty_lock, flags); + buf = sclp_ttybuf; + sclp_ttybuf = NULL; + spin_unlock_irqrestore(&sclp_tty_lock, flags); + + if (buf != NULL) { + __sclp_ttybuf_emit(buf); + } +} + +/* + * Write a string to the sclp tty. + */ +static void +sclp_tty_write_string(const unsigned char *str, int count, int from_user) +{ + unsigned long flags; + void *page; + int written; + struct sclp_buffer *buf; + + if (count <= 0) + return; + spin_lock_irqsave(&sclp_tty_lock, flags); + do { + /* Create a sclp output buffer if none exists yet */ + if (sclp_ttybuf == NULL) { + while (list_empty(&sclp_tty_pages)) { + spin_unlock_irqrestore(&sclp_tty_lock, flags); + wait_event(sclp_tty_waitq, + !list_empty(&sclp_tty_pages)); + spin_lock_irqsave(&sclp_tty_lock, flags); + } + page = sclp_tty_pages.next; + list_del((struct list_head *) page); + sclp_ttybuf = sclp_make_buffer(page, + sclp_ioctls.columns, + sclp_ioctls.htab); + } + /* try to write the string to the current output buffer */ + written = sclp_write(sclp_ttybuf, str, count, from_user); + if (written == -EFAULT || written == count) + break; + /* + * Not all characters could be written to the current + * output buffer. Emit the buffer, create a new buffer + * and then output the rest of the string. + */ + buf = sclp_ttybuf; + sclp_ttybuf = NULL; + spin_unlock_irqrestore(&sclp_tty_lock, flags); + __sclp_ttybuf_emit(buf); + spin_lock_irqsave(&sclp_tty_lock, flags); + str += written; + count -= written; + } while (count > 0); + /* Setup timer to output current console buffer after 1/10 second */ + if (sclp_ioctls.final_nl) { + if (sclp_ttybuf != NULL && !timer_pending(&sclp_tty_timer)) { + init_timer(&sclp_tty_timer); + sclp_tty_timer.function = sclp_tty_timeout; + sclp_tty_timer.data = 0UL; + sclp_tty_timer.expires = jiffies + HZ/10; + add_timer(&sclp_tty_timer); + } + } else { + __sclp_ttybuf_emit(sclp_ttybuf); + sclp_ttybuf = NULL; + } + spin_unlock_irqrestore(&sclp_tty_lock, flags); +} + +/* + * This routine is called by the kernel to write a series of characters to the + * tty device. The characters may come from user space or kernel space. This + * routine will return the number of characters actually accepted for writing. + */ +static int +sclp_tty_write(struct tty_struct *tty, int from_user, + const unsigned char *buf, int count) +{ + if (sclp_tty_chars_count > 0) { + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); + sclp_tty_chars_count = 0; + } + sclp_tty_write_string(buf, count, from_user); + return count; +} + +/* + * This routine is called by the kernel to write a single character to the tty + * device. If the kernel uses this routine, it must call the flush_chars() + * routine (if defined) when it is done stuffing characters into the driver. + * + * Characters provided to sclp_tty_put_char() are buffered by the SCLP driver. + * If the given character is a '\n' the contents of the SCLP write buffer + * - including previous characters from sclp_tty_put_char() and strings from + * sclp_write() without final '\n' - will be written. + */ +static void +sclp_tty_put_char(struct tty_struct *tty, unsigned char ch) +{ + sclp_tty_chars[sclp_tty_chars_count++] = ch; + if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); + sclp_tty_chars_count = 0; + } +} + +/* + * This routine is called by the kernel after it has written a series of + * characters to the tty device using put_char(). + */ +static void +sclp_tty_flush_chars(struct tty_struct *tty) +{ + if (sclp_tty_chars_count > 0) { + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); + sclp_tty_chars_count = 0; + } +} + +/* + * This routine returns the number of characters in the write buffer of the + * SCLP driver. The provided number includes all characters that are stored + * in the SCCB (will be written next time the SCLP is not busy) as well as + * characters in the write buffer (will not be written as long as there is a + * final line feed missing). + */ +static int +sclp_tty_chars_in_buffer(struct tty_struct *tty) +{ + unsigned long flags; + struct list_head *l; + struct sclp_buffer *t; + int count; + + spin_lock_irqsave(&sclp_tty_lock, flags); + count = 0; + if (sclp_ttybuf != NULL) + count = sclp_chars_in_buffer(sclp_ttybuf); + list_for_each(l, &sclp_tty_outqueue) { + t = list_entry(l, struct sclp_buffer, list); + count += sclp_chars_in_buffer(sclp_ttybuf); + } + spin_unlock_irqrestore(&sclp_tty_lock, flags); + return count; +} + +/* + * removes all content from buffers of low level driver + */ +static void +sclp_tty_flush_buffer(struct tty_struct *tty) +{ + if (sclp_tty_chars_count > 0) { + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); + sclp_tty_chars_count = 0; + } +} + +/* + * push input to tty + */ +static void +sclp_tty_input(unsigned char* buf, unsigned int count) +{ + unsigned int cchar; + + /* + * If this tty driver is currently closed + * then throw the received input away. + */ + if (sclp_tty == NULL) + return; + cchar = ctrlchar_handle(buf, count, sclp_tty, 1); + switch (cchar & CTRLCHAR_MASK) { + case CTRLCHAR_SYSRQ: + break; + case CTRLCHAR_CTRL: + sclp_tty->flip.count++; + *sclp_tty->flip.flag_buf_ptr++ = TTY_NORMAL; + *sclp_tty->flip.char_buf_ptr++ = cchar; + tty_flip_buffer_push(sclp_tty); + break; + case CTRLCHAR_NONE: + /* send (normal) input to line discipline */ + memcpy(sclp_tty->flip.char_buf_ptr, buf, count); + if (count < 2 || + (strncmp ((const char *) buf + count - 2, "^n", 2) && + strncmp ((const char *) buf + count - 2, "\252n", 2))) { + sclp_tty->flip.char_buf_ptr[count] = '\n'; + count++; + } else + count -= 2; + memset(sclp_tty->flip.flag_buf_ptr, TTY_NORMAL, count); + sclp_tty->flip.char_buf_ptr += count; + sclp_tty->flip.flag_buf_ptr += count; + sclp_tty->flip.count += count; + tty_flip_buffer_push(sclp_tty); + break; + } +} + +/* + * get a EBCDIC string in upper/lower case, + * find out characters in lower/upper case separated by a special character, + * modifiy original string, + * returns length of resulting string + */ +static int +sclp_switch_cases(unsigned char *buf, int count, + unsigned char delim, int tolower) +{ + unsigned char *ip, *op; + int toggle; + + /* initially changing case is off */ + toggle = 0; + ip = op = buf; + while (count-- > 0) { + /* compare with special character */ + if (*ip == delim) { + /* followed by another special character? */ + if (count && ip[1] == delim) { + /* + * ... then put a single copy of the special + * character to the output string + */ + *op++ = *ip++; + count--; + } else + /* + * ... special character follower by a normal + * character toggles the case change behaviour + */ + toggle = ~toggle; + /* skip special character */ + ip++; + } else + /* not the special character */ + if (toggle) + /* but case switching is on */ + if (tolower) + /* switch to uppercase */ + *op++ = _ebc_toupper[(int) *ip++]; + else + /* switch to lowercase */ + *op++ = _ebc_tolower[(int) *ip++]; + else + /* no case switching, copy the character */ + *op++ = *ip++; + } + /* return length of reformatted string. */ + return op - buf; +} + +static void +sclp_get_input(unsigned char *start, unsigned char *end) +{ + int count; + + count = end - start; + /* + * if set in ioctl convert EBCDIC to lower case + * (modify original input in SCCB) + */ + if (sclp_ioctls.tolower) + EBC_TOLOWER(start, count); + + /* + * if set in ioctl find out characters in lower or upper case + * (depends on current case) separated by a special character, + * works on EBCDIC + */ + if (sclp_ioctls.delim) + count = sclp_switch_cases(start, count, + sclp_ioctls.delim, + sclp_ioctls.tolower); + + /* convert EBCDIC to ASCII (modify original input in SCCB) */ + sclp_ebcasc_str(start, count); + + /* if set in ioctl write operators input to console */ + if (sclp_ioctls.echo) + sclp_tty_write(sclp_tty, 0, start, count); + + /* transfer input to high level driver */ + sclp_tty_input(start, count); +} + +static inline struct gds_vector * +find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id) +{ + struct gds_vector *vec; + + for (vec = start; vec < end; (void *) vec += vec->length) + if (vec->gds_id == id) + return vec; + return NULL; +} + +static inline struct gds_subvector * +find_gds_subvector(struct gds_subvector *start, + struct gds_subvector *end, u8 key) +{ + struct gds_subvector *subvec; + + for (subvec = start; subvec < end; (void *) subvec += subvec->length) + if (subvec->key == key) + return subvec; + return NULL; +} + +static inline void +sclp_eval_selfdeftextmsg(struct gds_subvector *start, + struct gds_subvector *end) +{ + struct gds_subvector *subvec; + + subvec = start; + while (subvec < end) { + subvec = find_gds_subvector(subvec, end, 0x30); + if (!subvec) + break; + sclp_get_input((unsigned char *)(subvec + 1), + (unsigned char *) subvec + subvec->length); + (void *) subvec += subvec->length; + } +} + +static inline void +sclp_eval_textcmd(struct gds_subvector *start, + struct gds_subvector *end) +{ + struct gds_subvector *subvec; + + subvec = start; + while (subvec < end) { + subvec = find_gds_subvector(subvec, end, + GDS_KEY_SelfDefTextMsg); + if (!subvec) + break; + sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), + (void *)subvec + subvec->length); + (void *) subvec += subvec->length; + } +} + +static inline void +sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end) +{ + struct gds_vector *vec; + + vec = start; + while (vec < end) { + vec = find_gds_vector(vec, end, GDS_ID_TextCmd); + if (!vec) + break; + sclp_eval_textcmd((struct gds_subvector *)(vec + 1), + (void *) vec + vec->length); + (void *) vec += vec->length; + } +} + + +static inline void +sclp_eval_mdsmu(struct gds_vector *start, void *end) +{ + struct gds_vector *vec; + + vec = find_gds_vector(start, end, GDS_ID_CPMSU); + if (vec) + sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length); +} + +static void +sclp_tty_receiver(struct evbuf_header *evbuf) +{ + struct gds_vector *start, *end, *vec; + + start = (struct gds_vector *)(evbuf + 1); + end = (void *) evbuf + evbuf->length; + vec = find_gds_vector(start, end, GDS_ID_MDSMU); + if (vec) + sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length); +} + +static void +sclp_tty_state_change(struct sclp_register *reg) +{ +} + +static struct sclp_register sclp_input_event = +{ + .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask, + .state_change_fn = sclp_tty_state_change, + .receiver_fn = sclp_tty_receiver +}; + +void +sclp_tty_init(void) +{ + void *page; + int i; + int rc; + + if (!CONSOLE_IS_SCLP) + return; + rc = sclp_rw_init(); + if (rc != 0) { + printk(KERN_ERR SCLP_TTY_PRINT_HEADER + "could not register tty - " + "sclp_rw_init returned %d\n", rc); + return; + } + /* Allocate pages for output buffering */ + INIT_LIST_HEAD(&sclp_tty_pages); + for (i = 0; i < MAX_KMEM_PAGES; i++) { + page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (page == NULL) + return; + list_add_tail((struct list_head *) page, &sclp_tty_pages); + } + INIT_LIST_HEAD(&sclp_tty_outqueue); + spin_lock_init(&sclp_tty_lock); + init_waitqueue_head(&sclp_tty_waitq); + init_timer(&sclp_tty_timer); + sclp_ttybuf = NULL; + sclp_tty_buffer_count = 0; + if (MACHINE_IS_VM) { + /* + * save 4 characters for the CPU number + * written at start of each line by VM/CP + */ + sclp_ioctls_init.columns = 76; + /* case input lines to lowercase */ + sclp_ioctls_init.tolower = 1; + } + sclp_ioctls = sclp_ioctls_init; + sclp_tty_chars_count = 0; + sclp_tty = NULL; + + ctrlchar_init(); + + if (sclp_register(&sclp_input_event) != 0) + return; + + memset (&sclp_tty_driver, 0, sizeof(struct tty_driver)); + sclp_tty_driver.magic = TTY_DRIVER_MAGIC; + sclp_tty_driver.driver_name = "sclp_line"; + sclp_tty_driver.name = "ttyS"; + sclp_tty_driver.name_base = 0; + sclp_tty_driver.major = TTY_MAJOR; + sclp_tty_driver.minor_start = 64; + sclp_tty_driver.num = 1; + sclp_tty_driver.type = TTY_DRIVER_TYPE_SYSTEM; + sclp_tty_driver.subtype = SYSTEM_TYPE_TTY; + sclp_tty_driver.init_termios = tty_std_termios; + sclp_tty_driver.flags = TTY_DRIVER_REAL_RAW; + sclp_tty_driver.refcount = &sclp_tty_refcount; + /* sclp_tty_driver.proc_entry ? */ + sclp_tty_driver.table = sclp_tty_table; + sclp_tty_driver.termios = sclp_tty_termios; + sclp_tty_driver.termios_locked = sclp_tty_termios_locked; + sclp_tty_driver.open = sclp_tty_open; + sclp_tty_driver.close = sclp_tty_close; + sclp_tty_driver.write = sclp_tty_write; + sclp_tty_driver.put_char = sclp_tty_put_char; + sclp_tty_driver.flush_chars = sclp_tty_flush_chars; + sclp_tty_driver.write_room = sclp_tty_write_room; + sclp_tty_driver.chars_in_buffer = sclp_tty_chars_in_buffer; + sclp_tty_driver.flush_buffer = sclp_tty_flush_buffer; + sclp_tty_driver.ioctl = sclp_tty_ioctl; + /* + * No need for these function because they would be only called when + * the line discipline is close to full. That means that there must be + * collected nearly 4kB of input data. I suppose it is very difficult + * for the operator to enter lines quickly enough to let overrun the + * line discipline. Besides the n_tty line discipline does not try to + * call such functions if the pointers are set to NULL. Finally I have + * no idea what to do within these function. I can not prevent the + * operator and the SCLP to deliver input. Because of the reasons + * above it seems not worth to implement a buffer mechanism. + */ + sclp_tty_driver.throttle = NULL; + sclp_tty_driver.unthrottle = NULL; + sclp_tty_driver.send_xchar = NULL; + sclp_tty_driver.set_termios = NULL; + sclp_tty_driver.set_ldisc = NULL; + sclp_tty_driver.stop = NULL; + sclp_tty_driver.start = NULL; + sclp_tty_driver.hangup = NULL; + sclp_tty_driver.break_ctl = NULL; + sclp_tty_driver.wait_until_sent = NULL; + sclp_tty_driver.read_proc = NULL; + sclp_tty_driver.write_proc = NULL; + + rc = tty_register_driver(&sclp_tty_driver); + if (rc != 0) + printk(KERN_ERR SCLP_TTY_PRINT_HEADER + "could not register tty - " + "sclp_drv_register returned %d\n", rc); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/sclp_tty.h linux.21rc5-ac2/drivers/s390/char/sclp_tty.h --- linux.21rc5/drivers/s390/char/sclp_tty.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/sclp_tty.h 2003-04-25 13:53:31.000000000 +0100 @@ -0,0 +1,67 @@ +/* + * drivers/s390/char/sclp_tty.h + * interface to the SCLP-read/write driver + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#ifndef __SCLP_TTY_H__ +#define __SCLP_TTY_H__ + +#include + +/* This is the type of data structures storing sclp ioctl setting. */ +struct sclp_ioctls { + unsigned short htab; + unsigned char echo; + unsigned short columns; + unsigned char final_nl; + unsigned short max_sccb; + unsigned short kmem_sccb; /* can't be modified at run time */ + unsigned char tolower; + unsigned char delim; +}; + +/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */ +#define SCLP_IOCTL_LETTER 'B' + +/* set width of horizontal tabulator */ +#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short) +/* enable/disable echo of input (independent from line discipline) */ +#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char) +/* set number of colums for output */ +#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short) +/* enable/disable writing without final new line character */ +#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char) +/* set the maximum buffers size for output, rounded up to next 4kB boundary */ +#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short) +/* set initial (default) sclp ioctls */ +#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6) +/* enable/disable conversion from upper to lower case of input */ +#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char) +/* set special character used for separating upper and lower case, */ +/* 0x00 disables this feature */ +#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char) + +/* get width of horizontal tabulator */ +#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short) +/* Is echo of input enabled ? (independent from line discipline) */ +#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char) +/* get number of colums for output */ +#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short) +/* Is writing without final new line character enabled ? */ +#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char) +/* get the maximum buffers size for output */ +#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short) +/* Is conversion from upper to lower case of input enabled ? */ +#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char) +/* get special character used for separating upper and lower case, */ +/* 0x00 disables this feature */ +#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char) +/* get the number of buffers/pages got from kernel at startup */ +#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short) + +#endif /* __SCLP_TTY_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape3480.c linux.21rc5-ac2/drivers/s390/char/tape3480.c --- linux.21rc5/drivers/s390/char/tape3480.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape3480.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,156 +0,0 @@ -/*************************************************************************** - * - * drivers/s390/char/tape3480.c - * tape device discipline for 3480 tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include /* CCW allocations */ -#include -#include -#include -#include "tape.h" -#include "tape34xx.h" -#include "tape3480.h" - -tape_event_handler_t tape3480_event_handler_table[TS_SIZE][TE_SIZE] = -{ - /* {START , DONE, FAILED, ERROR, OTHER } */ - {NULL, tape34xx_unused_done, NULL, NULL, NULL}, /* TS_UNUSED */ - {NULL, tape34xx_idle_done, NULL, NULL, NULL}, /* TS_IDLE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_DONE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_FAILED */ - {NULL, tape34xx_block_done, NULL, NULL, NULL}, /* TS_BLOCK_INIT */ - {NULL, tape34xx_bsb_init_done, NULL, NULL, NULL}, /* TS_BSB_INIT */ - {NULL, tape34xx_bsf_init_done, NULL, NULL, NULL}, /* TS_BSF_INIT */ - {NULL, tape34xx_dse_init_done, NULL, NULL, NULL}, /* TS_DSE_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_EGA_INIT */ - {NULL, tape34xx_fsb_init_done, NULL, NULL, NULL}, /* TS_FSB_INIT */ - {NULL, tape34xx_fsf_init_done, NULL, NULL, NULL}, /* TS_FSF_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_LDI_INIT */ - {NULL, tape34xx_lbl_init_done, NULL, NULL, NULL}, /* TS_LBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_MSE_INIT */ - {NULL, tape34xx_nop_init_done, NULL, NULL, NULL}, /* TS_NOP_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RBA_INIT */ - {NULL, tape34xx_rbi_init_done, NULL, NULL, NULL}, /* TS_RBI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBU_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RDC_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RFO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RSD_INIT */ - {NULL, tape34xx_rew_init_done, NULL, NULL, NULL}, /* TS_REW_INIT */ - {NULL, tape34xx_rew_release_init_done, NULL, NULL, NULL}, /* TS_REW_RELEASE_IMIT */ - {NULL, tape34xx_run_init_done, NULL, NULL, NULL}, /* TS_RUN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SEN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SID_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SNP_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SPG_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SWI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SMR_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SYN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_TIO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_UNA_INIT */ - {NULL, tape34xx_wri_init_done, NULL, NULL, NULL}, /* TS_WRI_INIT */ - {NULL, tape34xx_wtm_init_done, NULL, NULL, NULL}, /* TS_WTM_INIT */ - {NULL, NULL, NULL, NULL, NULL}}; /* TS_NOT_OPER */ - -devreg_t tape3480_devreg = { - ci: - {hc: - {ctype:0x3480}}, - flag:DEVREG_MATCH_CU_TYPE | DEVREG_TYPE_DEVCHARS, - oper_func:tape_oper_handler -}; - - -void -tape3480_setup_assist (tape_info_t * ti) -{ - tape3480_disc_data_t *data = NULL; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"3480 dsetu"); - debug_text_event (tape_debug_area,6,"dev:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif /* TAPE_DEBUG */ - while (data == NULL) - data = kmalloc (sizeof (tape3480_disc_data_t), GFP_KERNEL); - data->modeset_byte = 0x00; - ti->discdata = (void *) data; -} - - -void -tape3480_shutdown (int autoprobe) { - if (autoprobe) - s390_device_unregister(&tape3480_devreg); -} - -tape_discipline_t * -tape3480_init (int autoprobe) -{ - tape_discipline_t *disc; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"3480 init"); -#endif /* TAPE_DEBUG */ - disc = kmalloc (sizeof (tape_discipline_t), GFP_KERNEL); - if (disc == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"disc:nomem"); -#endif /* TAPE_DEBUG */ - return disc; - } - disc->cu_type = 0x3480; - disc->setup_assist = tape3480_setup_assist; - disc->error_recovery = tape34xx_error_recovery; - disc->write_block = tape34xx_write_block; - disc->free_write_block = tape34xx_free_write_block; - disc->read_block = tape34xx_read_block; - disc->free_read_block = tape34xx_free_read_block; - disc->mtfsf = tape34xx_mtfsf; - disc->mtbsf = tape34xx_mtbsf; - disc->mtfsr = tape34xx_mtfsr; - disc->mtbsr = tape34xx_mtbsr; - disc->mtweof = tape34xx_mtweof; - disc->mtrew = tape34xx_mtrew; - disc->mtoffl = tape34xx_mtoffl; - disc->mtnop = tape34xx_mtnop; - disc->mtbsfm = tape34xx_mtbsfm; - disc->mtfsfm = tape34xx_mtfsfm; - disc->mteom = tape34xx_mteom; - disc->mterase = tape34xx_mterase; - disc->mtsetdensity = tape34xx_mtsetdensity; - disc->mtseek = tape34xx_mtseek; - disc->mttell = tape34xx_mttell; - disc->mtsetdrvbuffer = tape34xx_mtsetdrvbuffer; - disc->mtlock = tape34xx_mtlock; - disc->mtunlock = tape34xx_mtunlock; - disc->mtload = tape34xx_mtload; - disc->mtunload = tape34xx_mtunload; - disc->mtcompression = tape34xx_mtcompression; - disc->mtsetpart = tape34xx_mtsetpart; - disc->mtmkpart = tape34xx_mtmkpart; - disc->mtiocget = tape34xx_mtiocget; - disc->mtiocpos = tape34xx_mtiocpos; - disc->shutdown = tape3480_shutdown; - disc->discipline_ioctl_overload = tape34xx_ioctl_overload; - disc->event_table = &tape3480_event_handler_table; - disc->default_handler = tape34xx_default_handler; - disc->bread = tape34xx_bread; - disc->free_bread = tape34xx_free_bread; - disc->tape = NULL; /* pointer for backreference */ - disc->next = NULL; - if (autoprobe) - s390_device_register(&tape3480_devreg); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"3480 regis"); -#endif /* TAPE_DEBUG */ - return disc; -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape3480.h linux.21rc5-ac2/drivers/s390/char/tape3480.h --- linux.21rc5/drivers/s390/char/tape3480.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape3480.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -/*************************************************************************** - * - * drivers/s390/char/tape3480.h - * tape device discipline for 3480 tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#ifndef _TAPE3480_H - -#define _TAPE3480_H - - -typedef struct _tape3480_disc_data_t { - __u8 modeset_byte; -} tape3480_disc_data_t __attribute__ ((packed, aligned(8))); -tape_discipline_t * tape3480_init (int); -#endif // _TAPE3480_H diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape3490.c linux.21rc5-ac2/drivers/s390/char/tape3490.c --- linux.21rc5/drivers/s390/char/tape3490.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape3490.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,156 +0,0 @@ -/*************************************************************************** - * - * drivers/s390/char/tape3490.c - * tape device discipline for 3490E tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include /* CCW allocations */ -#include -#include -#include -#include "tape.h" -#include "tape34xx.h" -#include "tape3490.h" - -tape_event_handler_t tape3490_event_handler_table[TS_SIZE][TE_SIZE] = -{ - /* {START , DONE, FAILED, ERROR, OTHER } */ - {NULL, tape34xx_unused_done, NULL, NULL, NULL}, /* TS_UNUSED */ - {NULL, tape34xx_idle_done, NULL, NULL, NULL}, /* TS_IDLE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_DONE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_FAILED */ - {NULL, tape34xx_block_done, NULL, NULL, NULL}, /* TS_BLOCK_INIT */ - {NULL, tape34xx_bsb_init_done, NULL, NULL, NULL}, /* TS_BSB_INIT */ - {NULL, tape34xx_bsf_init_done, NULL, NULL, NULL}, /* TS_BSF_INIT */ - {NULL, tape34xx_dse_init_done, NULL, NULL, NULL}, /* TS_DSE_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_EGA_INIT */ - {NULL, tape34xx_fsb_init_done, NULL, NULL, NULL}, /* TS_FSB_INIT */ - {NULL, tape34xx_fsf_init_done, NULL, NULL, NULL}, /* TS_FSF_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_LDI_INIT */ - {NULL, tape34xx_lbl_init_done, NULL, NULL, NULL}, /* TS_LBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_MSE_INIT */ - {NULL, tape34xx_nop_init_done, NULL, NULL, NULL}, /* TS_NOP_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RBA_INIT */ - {NULL, tape34xx_rbi_init_done, NULL, NULL, NULL}, /* TS_RBI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBU_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RDC_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RFO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RSD_INIT */ - {NULL, tape34xx_rew_init_done, NULL, NULL, NULL}, /* TS_REW_INIT */ - {NULL, tape34xx_rew_release_init_done, NULL, NULL, NULL}, /* TS_REW_RELEASE_IMIT */ - {NULL, tape34xx_run_init_done, NULL, NULL, NULL}, /* TS_RUN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SEN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SID_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SNP_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SPG_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SWI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SMR_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SYN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_TIO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_UNA_INIT */ - {NULL, tape34xx_wri_init_done, NULL, NULL, NULL}, /* TS_WRI_INIT */ - {NULL, tape34xx_wtm_init_done, NULL, NULL, NULL}, /* TS_WTM_INIT */ - {NULL, NULL, NULL, NULL, NULL}}; /* TS_NOT_OPER */ - -devreg_t tape3490_devreg = { - ci: - {hc: - {ctype:0x3490}}, - flag:DEVREG_MATCH_CU_TYPE | DEVREG_TYPE_DEVCHARS, - oper_func:tape_oper_handler -}; - -void -tape3490_setup_assist (tape_info_t * ti) -{ - tape3490_disc_data_t *data = NULL; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"3490 dsetu"); - debug_text_event (tape_debug_area,6,"dev:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif /* TAPE_DEBUG */ - while (data == NULL) - data = kmalloc (sizeof (tape3490_disc_data_t), GFP_KERNEL); - data->modeset_byte = 0x00; - ti->discdata = (void *) data; -} - - -void -tape3490_shutdown (int autoprobe) { - if (autoprobe) - s390_device_unregister(&tape3490_devreg); -} - - -tape_discipline_t * -tape3490_init (int autoprobe) -{ - tape_discipline_t *disc; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"3490 init"); -#endif /* TAPE_DEBUG */ - disc = kmalloc (sizeof (tape_discipline_t), GFP_KERNEL); - if (disc == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"disc:nomem"); -#endif /* TAPE_DEBUG */ - return disc; - } - disc->cu_type = 0x3490; - disc->setup_assist = tape3490_setup_assist; - disc->error_recovery = tape34xx_error_recovery; - disc->write_block = tape34xx_write_block; - disc->free_write_block = tape34xx_free_write_block; - disc->read_block = tape34xx_read_block; - disc->free_read_block = tape34xx_free_read_block; - disc->mtfsf = tape34xx_mtfsf; - disc->mtbsf = tape34xx_mtbsf; - disc->mtfsr = tape34xx_mtfsr; - disc->mtbsr = tape34xx_mtbsr; - disc->mtweof = tape34xx_mtweof; - disc->mtrew = tape34xx_mtrew; - disc->mtoffl = tape34xx_mtoffl; - disc->mtnop = tape34xx_mtnop; - disc->mtbsfm = tape34xx_mtbsfm; - disc->mtfsfm = tape34xx_mtfsfm; - disc->mteom = tape34xx_mteom; - disc->mterase = tape34xx_mterase; - disc->mtsetdensity = tape34xx_mtsetdensity; - disc->mtseek = tape34xx_mtseek; - disc->mttell = tape34xx_mttell; - disc->mtsetdrvbuffer = tape34xx_mtsetdrvbuffer; - disc->mtlock = tape34xx_mtlock; - disc->mtunlock = tape34xx_mtunlock; - disc->mtload = tape34xx_mtload; - disc->mtunload = tape34xx_mtunload; - disc->mtcompression = tape34xx_mtcompression; - disc->mtsetpart = tape34xx_mtsetpart; - disc->mtmkpart = tape34xx_mtmkpart; - disc->mtiocget = tape34xx_mtiocget; - disc->mtiocpos = tape34xx_mtiocpos; - disc->shutdown = tape3490_shutdown; - disc->discipline_ioctl_overload = tape34xx_ioctl_overload; - disc->event_table = &tape3490_event_handler_table; - disc->default_handler = tape34xx_default_handler; - disc->bread = tape34xx_bread; - disc->free_bread = tape34xx_free_bread; - disc->tape = NULL; /* pointer for backreference */ - disc->next = NULL; - if (autoprobe) - s390_device_register(&tape3490_devreg); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"3490 regis"); -#endif /* TAPE_DEBUG */ - return disc; -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape3490.h linux.21rc5-ac2/drivers/s390/char/tape3490.h --- linux.21rc5/drivers/s390/char/tape3490.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape3490.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tape3490.h - * tape device discipline for 3490E tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#ifndef _TAPE3490_H - -#define _TAPE3490_H - - -typedef struct _tape3490_disc_data_t { - __u8 modeset_byte; -} tape3490_disc_data_t __attribute__ ((packed, aligned(8))); -tape_discipline_t * tape3490_init (int); -#endif // _TAPE3490_H diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape_34xx.c linux.21rc5-ac2/drivers/s390/char/tape_34xx.c --- linux.21rc5/drivers/s390/char/tape_34xx.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape_34xx.c 2003-05-28 15:15:07.000000000 +0100 @@ -0,0 +1,1340 @@ +/* + * drivers/s390/char/tape_34xx.c + * tape device discipline for 3480/3490 tapes. + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#include +#include +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#define PRINTK_HEADER "T34xx:" + +/* + * The block ID is the complete marker for a specific tape position. + * It contains a physical part (wrap, segment, format) and a logical + * block number. + */ +#define TBI_FORMAT_3480 0x00 +#define TBI_FORMAT_3480_2_XF 0x01 +#define TBI_FORMAT_3480_XF 0x02 +#define TBI_FORMAT_RESERVED 0x03 + +struct tape_34xx_block_id { + unsigned int tbi_wrap : 1; + unsigned int tbi_segment : 7; + unsigned int tbi_format : 2; + unsigned int tbi_block : 22; +} __attribute__ ((packed)); + +struct sbid_entry { + struct list_head list; + struct tape_34xx_block_id bid; +}; + +struct tape_34xx_discdata { + /* A list of block id's of the tape segments (for faster seek) */ + struct list_head sbid_list; +}; + +/* Internal prototypes */ +static void tape_34xx_clear_sbid_list(struct tape_device *); + +/* 34xx specific functions */ +static void +__tape_34xx_medium_sense_callback(struct tape_request *request, void *data) +{ + unsigned char *sense = request->cpdata; + + request->callback = NULL; + + DBF_EVENT(5, "TO_MSEN[0]: %08x\n", *((unsigned int *) sense)); + DBF_EVENT(5, "TO_MSEN[1]: %08x\n", *((unsigned int *) sense+1)); + DBF_EVENT(5, "TO_MSEN[2]: %08x\n", *((unsigned int *) sense+2)); + DBF_EVENT(5, "TO_MSEN[3]: %08x\n", *((unsigned int *) sense+3)); + + if(sense[0] & SENSE_INTERVENTION_REQUIRED) { + tape_med_state_set(request->device, MS_UNLOADED); + } else { + tape_med_state_set(request->device, MS_LOADED); + } + + if(sense[1] & SENSE_WRITE_PROTECT) { + request->device->tape_generic_status |= GMT_WR_PROT(~0); + } else{ + request->device->tape_generic_status &= ~GMT_WR_PROT(~0); + } + + tape_put_request(request); +} + +static int +tape_34xx_medium_sense(struct tape_device *device) +{ + struct tape_request * request; + int rc; + + tape_34xx_clear_sbid_list(device); + + request = tape_alloc_request(1, 32); + if(IS_ERR(request)) { + DBF_EXCEPTION(6, "MSN fail\n"); + return PTR_ERR(request); + } + + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); + request->callback = __tape_34xx_medium_sense_callback; + + rc = tape_do_io_async(device, request); + + return rc; +} + +static void +tape_34xx_work_handler(void *data) +{ + struct { + struct tape_device *device; + enum tape_op op; + struct tq_struct task; + } *p = data; + + switch(p->op) { + case TO_MSEN: + tape_34xx_medium_sense(p->device); + break; + default: + DBF_EVENT(3, "T34XX: internal error: unknown work\n"); + } + + tape_put_device(p->device); + kfree(p); +} + +/* + * This function is currently used to schedule a sense for later execution. + * For example whenever a unsolicited interrupt signals a new tape medium + * and we can't call tape_do_io from that interrupt handler. + */ +static int +tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) +{ + struct { + struct tape_device *device; + enum tape_op op; + struct tq_struct task; + } *p; + + if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) + return -ENOMEM; + + memset(p, 0, sizeof(*p)); + INIT_LIST_HEAD(&p->task.list); + p->task.routine = tape_34xx_work_handler; + p->task.data = p; + + p->device = tape_clone_device(device); + p->op = op; + + schedule_task(&p->task); + + return 0; +} + +/* + * Done Handler is called when dev stat = DEVICE-END (successful operation) + */ +static int +tape_34xx_done(struct tape_device *device, struct tape_request *request) +{ + DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); + // FIXME: Maybe only on assign/unassign + TAPE_CLEAR_STATE(device, TAPE_STATUS_BOXED); + + return TAPE_IO_SUCCESS; +} + +static inline int +tape_34xx_erp_failed(struct tape_device *device, + struct tape_request *request, int rc) +{ + DBF_EVENT(3, "Error recovery failed for %s\n", + tape_op_verbose[request->op]); + return rc; +} + +static inline int +tape_34xx_erp_succeeded(struct tape_device *device, + struct tape_request *request) +{ + DBF_EVENT(3, "Error Recovery successful for %s\n", + tape_op_verbose[request->op]); + return tape_34xx_done(device, request); +} + +static inline int +tape_34xx_erp_retry(struct tape_device *device, struct tape_request *request) +{ + DBF_EVENT(3, "xerp retr %s\n", + tape_op_verbose[request->op]); + return TAPE_IO_RETRY; +} + +/* + * This function is called, when no request is outstanding and we get an + * interrupt + */ +static int +tape_34xx_unsolicited_irq(struct tape_device *device) +{ + if (device->devstat.dstat == 0x85 /* READY */) { + /* A medium was inserted in the drive. */ + DBF_EVENT(6, "T34xx: tape load\n"); + tape_34xx_schedule_work(device, TO_MSEN); + } else { + DBF_EVENT(3, "T34xx: unsol.irq! dev end: %x\n", + device->devinfo.irq); + PRINT_WARN("Unsolicited IRQ (Device End) caught.\n"); + tape_dump_sense(device, NULL); + } + return TAPE_IO_SUCCESS; +} + +/* + * Read Opposite Error Recovery Function: + * Used, when Read Forward does not work + */ +static int +tape_34xx_erp_read_opposite(struct tape_device *device, + struct tape_request *request) +{ + if (request->op == TO_RFO) { + /* + * We did read forward, but the data could not be read + * *correctly*. We transform the request to a read backward + * and try again. + */ + tape_std_read_backward(device, request); + return tape_34xx_erp_retry(device, request); + } + if (request->op != TO_RBA) + PRINT_ERR("read_opposite called with state:%s\n", + tape_op_verbose[request->op]); + /* + * We tried to read forward and backward, but hat no + * success -> failed. + */ + return tape_34xx_erp_failed(device, request, -EIO); +} + +static int +tape_34xx_erp_bug(struct tape_device *device, + struct tape_request *request, int no) +{ + if (request->op != TO_ASSIGN) { + PRINT_WARN("An unexpected condition #%d was caught in " + "tape error recovery.\n", no); + PRINT_WARN("Please report this incident.\n"); + if (request) + PRINT_WARN("Operation of tape:%s\n", + tape_op_verbose[request->op]); + tape_dump_sense(device, request); + } + return tape_34xx_erp_failed(device, request, -EIO); +} + +/* + * Handle data overrun between cu and drive. The channel speed might + * be too slow. + */ +static int +tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request) +{ + if (device->devstat.ii.sense.data[3] == 0x40) { + PRINT_WARN ("Data overrun error between control-unit " + "and drive. Use a faster channel connection, " + "if possible! \n"); + return tape_34xx_erp_failed(device, request, -EIO); + } + return tape_34xx_erp_bug(device, request, -1); +} + +/* + * Handle record sequence error. + */ +static int +tape_34xx_erp_sequence(struct tape_device *device, + struct tape_request *request) +{ + if (device->devstat.ii.sense.data[3] == 0x41) { + /* + * cu detected incorrect block-id sequence on tape. + */ + PRINT_WARN("Illegal block-id sequence found!\n"); + return tape_34xx_erp_failed(device, request, -EIO); + } + /* + * Record sequence error bit is set, but erpa does not + * show record sequence error. + */ + return tape_34xx_erp_bug(device, request, -2); +} + +/* + * This function analyses the tape's sense-data in case of a unit-check. + * If possible, it tries to recover from the error. Else the user is + * informed about the problem. + */ +static int +tape_34xx_unit_check(struct tape_device *device, struct tape_request *request) +{ + int inhibit_cu_recovery; + __u8* sense; + + inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0; + sense = device->devstat.ii.sense.data; + +#ifdef CONFIG_S390_TAPE_BLOCK + if (request->op == TO_BLOCK) { + /* + * Recovery for block device requests. Set the block_position + * to something invalid and retry. + */ + device->blk_data.block_position = -1; + if (request->retries-- <= 0) + return tape_34xx_erp_failed(device, request, -EIO); + else + return tape_34xx_erp_retry(device, request); + } +#endif + + if ( + sense[0] & SENSE_COMMAND_REJECT && + sense[1] & SENSE_WRITE_PROTECT + ) { + if ( + request->op == TO_DSE || + request->op == TO_WRI || + request->op == TO_WTM + ) { + /* medium is write protected */ + return tape_34xx_erp_failed(device, request, -EACCES); + } else { + return tape_34xx_erp_bug(device, request, -3); + } + } + + /* + * special cases for various tape-states when reaching + * end of recorded area + */ + /* + * FIXME: Maybe a special case of the special case: + * sense[0] == SENSE_EQUIPMENT_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE && + * sense[3] == 0x47 (Volume Fenced) + * + * This was caused by continued FSF or FSR after an + * 'End Of Data'. + */ + if (( + sense[0] == SENSE_DATA_CHECK || + sense[0] == SENSE_EQUIPMENT_CHECK || + sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK + ) && ( + sense[1] == SENSE_DRIVE_ONLINE || + sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE + )) { + switch (request->op) { + /* + * sense[0] == SENSE_DATA_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE + * sense[3] == 0x36 (End Of Data) + * + * Further seeks might return a 'Volume Fenced'. + */ + case TO_FSF: + case TO_FSB: + /* Trying to seek beyond end of recorded area */ + return tape_34xx_erp_failed(device, request, -ENOSPC); + case TO_BSB: + return tape_34xx_erp_retry(device, request); + /* + * sense[0] == SENSE_DATA_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE && + * sense[3] == 0x36 (End Of Data) + */ + case TO_LBL: + /* Block could not be located. */ + return tape_34xx_erp_failed(device, request, -EIO); + case TO_RFO: + /* Read beyond end of recorded area -> 0 bytes read */ + return tape_34xx_erp_failed(device, request, 0); + default: + PRINT_ERR("Invalid op %s in %s:%i\n", + tape_op_verbose[request->op], + __FUNCTION__, __LINE__); + return tape_34xx_erp_failed(device, request, 0); + } + } + + /* Sensing special bits */ + if (sense[0] & SENSE_BUS_OUT_CHECK) + return tape_34xx_erp_retry(device, request); + + if (sense[0] & SENSE_DATA_CHECK) { + /* + * hardware failure, damaged tape or improper + * operating conditions + */ + switch (sense[3]) { + case 0x23: + /* a read data check occurred */ + if ((sense[2] & SENSE_TAPE_SYNC_MODE) || + inhibit_cu_recovery) + // data check is not permanent, may be + // recovered. We always use async-mode with + // cu-recovery, so this should *never* happen. + return tape_34xx_erp_bug(device, request, -4); + + /* data check is permanent, CU recovery has failed */ + PRINT_WARN("Permanent read error\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x25: + // a write data check occurred + if ((sense[2] & SENSE_TAPE_SYNC_MODE) || + inhibit_cu_recovery) + // data check is not permanent, may be + // recovered. We always use async-mode with + // cu-recovery, so this should *never* happen. + return tape_34xx_erp_bug(device, request, -5); + + // data check is permanent, cu-recovery has failed + PRINT_WARN("Permanent write error\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x26: + /* Data Check (read opposite) occurred. */ + return tape_34xx_erp_read_opposite(device, request); + case 0x28: + /* ID-Mark at tape start couldn't be written */ + PRINT_WARN("ID-Mark could not be written.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x31: + /* Tape void. Tried to read beyond end of device. */ + PRINT_WARN("Read beyond end of recorded area.\n"); + return tape_34xx_erp_failed(device, request, -ENOSPC); + case 0x41: + /* Record sequence error. */ + PRINT_WARN("Invalid block-id sequence found.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + default: + /* all data checks for 3480 should result in one of + * the above erpa-codes. For 3490, other data-check + * conditions do exist. */ + if (device->discipline->cu_type == 0x3480) + return tape_34xx_erp_bug(device, request, -6); + } + } + + if (sense[0] & SENSE_OVERRUN) + return tape_34xx_erp_overrun(device, request); + + if (sense[1] & SENSE_RECORD_SEQUENCE_ERR) + return tape_34xx_erp_sequence(device, request); + + /* Sensing erpa codes */ + switch (sense[3]) { + case 0x00: + /* Unit check with erpa code 0. Report and ignore. */ + PRINT_WARN("Non-error sense was found. " + "Unit-check will be ignored.\n"); + return TAPE_IO_SUCCESS; + case 0x21: + /* + * Data streaming not operational. CU will switch to + * interlock mode. Reissue the command. + */ + PRINT_WARN("Data streaming not operational. " + "Switching to interlock-mode.\n"); + return tape_34xx_erp_retry(device, request); + case 0x22: + /* + * Path equipment check. Might be drive adapter error, buffer + * error on the lower interface, internal path not usable, + * or error during cartridge load. + */ + PRINT_WARN("A path equipment check occurred. One of the " + "following conditions occurred:\n"); + PRINT_WARN("drive adapter error, buffer error on the lower " + "interface, internal path not usable, error " + "during cartridge load.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x24: + /* + * Load display check. Load display was command was issued, + * but the drive is displaying a drive check message. Can + * be threated as "device end". + */ + return tape_34xx_erp_succeeded(device, request); + case 0x27: + /* + * Command reject. May indicate illegal channel program or + * buffer over/underrun. Since all channel programs are + * issued by this driver and ought be correct, we assume a + * over/underrun situation and retry the channel program. + */ + return tape_34xx_erp_retry(device, request); + case 0x29: + /* + * Function incompatible. Either the tape is idrc compressed + * but the hardware isn't capable to do idrc, or a perform + * subsystem func is issued and the CU is not on-line. + */ + PRINT_WARN ("Function incompatible. Try to switch off idrc\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x2a: + /* + * Unsolicited environmental data. An internal counter + * overflows, we can ignore this and reissue the cmd. + */ + return tape_34xx_erp_retry(device, request); + case 0x2b: + /* + * Environmental data present. Indicates either unload + * completed ok or read buffered log command completed ok. + */ + if (request->op == TO_RUN) { + tape_med_state_set(device, MS_UNLOADED); + /* Rewind unload completed ok. */ + return tape_34xx_erp_succeeded(device, request); + } + /* tape_34xx doesn't use read buffered log commands. */ + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x2c: + /* + * Permanent equipment check. CU has tried recovery, but + * did not succeed. + */ + return tape_34xx_erp_failed(device, request, -EIO); + case 0x2d: + /* Data security erase failure. */ + if (request->op == TO_DSE) + return tape_34xx_erp_failed(device, request, -EIO); + /* Data security erase failure, but no such command issued. */ + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x2e: + /* + * Not capable. This indicates either that the drive fails + * reading the format id mark or that that format specified + * is not supported by the drive. + */ + PRINT_WARN("Drive not capable processing the tape format!"); + return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE); + case 0x30: + /* The medium is write protected. */ + PRINT_WARN("Medium is write protected!\n"); + return tape_34xx_erp_failed(device, request, -EACCES); + case 0x32: + // Tension loss. We cannot recover this, it's an I/O error. + PRINT_WARN("The drive lost tape tension.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x33: + /* + * Load Failure. The cartridge was not inserted correctly or + * the tape is not threaded correctly. + */ + PRINT_WARN("Cartridge load failure. Reload the cartridge " + "and try again.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x34: + /* + * Unload failure. The drive cannot maintain tape tension + * and control tape movement during an unload operation. + */ + PRINT_WARN("Failure during cartridge unload. " + "Please try manually.\n"); + if (request->op == TO_RUN) + return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x35: + /* + * Drive equipment check. One of the following: + * - cu cannot recover from a drive detected error + * - a check code message is shown on drive display + * - the cartridge loader does not respond correctly + * - a failure occurs during an index, load, or unload cycle + */ + PRINT_WARN("Equipment check! Please check the drive and " + "the cartridge loader.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x36: + if (device->discipline->cu_type == 0x3490) + /* End of data. */ + return tape_34xx_erp_failed(device, request, -EIO); + /* This erpa is reserved for 3480 */ + return tape_34xx_erp_bug(device,request,sense[3]); + case 0x37: + /* + * Tape length error. The tape is shorter than reported in + * the beginning-of-tape data. + */ + PRINT_WARN("Tape length error.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x38: + /* + * Physical end of tape. A read/write operation reached + * the physical end of tape. + */ + if (request->op==TO_WRI || + request->op==TO_DSE || + request->op==TO_WTM) + return tape_34xx_erp_failed(device, request, -ENOSPC); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x39: + /* Backward at Beginning of tape. */ + return tape_34xx_erp_failed(device, request, -EIO); + case 0x3a: + /* Drive switched to not ready. */ + PRINT_WARN("Drive not ready. Turn the ready/not ready switch " + "to ready position and try again.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x3b: + /* Manual rewind or unload. This causes an I/O error. */ + PRINT_WARN("Medium was rewound or unloaded manually.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x42: + /* + * Degraded mode. A condition that can cause degraded + * performance is detected. + */ + PRINT_WARN("Subsystem is running in degraded mode.\n"); + return tape_34xx_erp_retry(device, request); + case 0x43: + /* Drive not ready. */ + tape_med_state_set(device, MS_UNLOADED); + /* SMB: some commands do not need a tape inserted */ + if((sense[1] & SENSE_DRIVE_ONLINE)) { + switch(request->op) { + case TO_ASSIGN: + case TO_UNASSIGN: + case TO_DIS: + return tape_34xx_done(device, request); + break; + default: + break; + } + } + PRINT_WARN("The drive is not ready.\n"); + return tape_34xx_erp_failed(device, request, -ENOMEDIUM); + case 0x44: + /* Locate Block unsuccessful. */ + if (request->op != TO_BLOCK && request->op != TO_LBL) + /* No locate block was issued. */ + return tape_34xx_erp_bug(device, request, sense[3]); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x45: + /* The drive is assigned to a different channel path. */ + PRINT_WARN("The drive is assigned elsewhere.\n"); + TAPE_SET_STATE(device, TAPE_STATUS_BOXED); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x46: + /* + * Drive not on-line. Drive may be switched offline, + * the power supply may be switched off or + * the drive address may not be set correctly. + */ + PRINT_WARN("The drive is not on-line."); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x47: + /* Volume fenced. CU reports volume integrity is lost. */ + PRINT_WARN("Volume fenced. The volume integrity is lost because\n"); + PRINT_WARN("assignment or tape position was lost.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x48: + /* Log sense data and retry request. */ + return tape_34xx_erp_retry(device, request); + case 0x49: + /* Bus out check. A parity check error on the bus was found. */ + PRINT_WARN("Bus out check. A data transfer over the bus " + "has been corrupted.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x4a: + /* Control unit erp failed. */ + PRINT_WARN("The control unit I/O error recovery failed.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x4b: + /* + * CU and drive incompatible. The drive requests micro-program + * patches, which are not available on the CU. + */ + PRINT_WARN("The drive needs microprogram patches from the " + "control unit, which are not available.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x4c: + /* + * Recovered Check-One failure. Cu develops a hardware error, + * but is able to recover. + */ + return tape_34xx_erp_retry(device, request); + case 0x4d: + if (device->discipline->cu_type == 0x3490) + /* + * Resetting event received. Since the driver does + * not support resetting event recovery (which has to + * be handled by the I/O Layer), retry our command. + */ + return tape_34xx_erp_retry(device, request); + /* This erpa is reserved for 3480. */ + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x4e: + if (device->discipline->cu_type == 0x3490) { + /* + * Maximum block size exceeded. This indicates, that + * the block to be written is larger than allowed for + * buffered mode. + */ + PRINT_WARN("Maximum block size for buffered " + "mode exceeded.\n"); + return tape_34xx_erp_failed(device, request, -ENOBUFS); + } + /* This erpa is reserved for 3480. */ + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x50: + /* + * Read buffered log (Overflow). CU is running in extended + * buffered log mode, and a counter overflows. This should + * never happen, since we're never running in extended + * buffered log mode. + */ + return tape_34xx_erp_retry(device, request); + case 0x51: + /* + * Read buffered log (EOV). EOF processing occurs while the + * CU is in extended buffered log mode. This should never + * happen, since we're never running in extended buffered + * log mode. + */ + return tape_34xx_erp_retry(device, request); + case 0x52: + /* End of Volume complete. Rewind unload completed ok. */ + if (request->op == TO_RUN) { + /* SMB */ + tape_med_state_set(device, MS_UNLOADED); + return tape_34xx_erp_succeeded(device, request); + } + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x53: + /* Global command intercept. */ + return tape_34xx_erp_retry(device, request); + case 0x54: + /* Channel interface recovery (temporary). */ + return tape_34xx_erp_retry(device, request); + case 0x55: + /* Channel interface recovery (permanent). */ + PRINT_WARN("A permanent channel interface error occurred.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x56: + /* Channel protocol error. */ + PRINT_WARN("A channel protocol error occurred.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x57: + if (device->discipline->cu_type == 0x3480) { + /* Attention intercept. */ + PRINT_WARN("An attention intercept occurred, " + "which will be recovered.\n"); + return tape_34xx_erp_retry(device, request); + } else { + /* Global status intercept. */ + PRINT_WARN("An global status intercept was received, " + "which will be recovered.\n"); + return tape_34xx_erp_retry(device, request); + } + case 0x5a: + /* + * Tape length incompatible. The tape inserted is too long, + * which could cause damage to the tape or the drive. + */ + PRINT_WARN("Tape length incompatible [should be IBM Cartridge " + "System Tape]. May cause damage to drive or tape.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x5b: + /* Format 3480 XF incompatible */ + if (sense[1] & SENSE_BEGINNING_OF_TAPE) + /* The tape will get overwritten. */ + return tape_34xx_erp_retry(device, request); + PRINT_WARN("Tape format is incompatible to the drive, " + "which writes 3480-2 XF.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x5c: + /* Format 3480-2 XF incompatible */ + PRINT_WARN("Tape format is incompatible to the drive. " + "The drive cannot access 3480-2 XF volumes.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x5d: + /* Tape length violation. */ + PRINT_WARN("Tape length violation [should be IBM Enhanced " + "Capacity Cartridge System Tape]. May cause " + "damage to drive or tape.\n"); + return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE); + case 0x5e: + /* Compaction algorithm incompatible. */ + PRINT_WARN("The volume is recorded using an incompatible " + "compaction algorithm, which is not supported by " + "the control unit.\n"); + return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE); + + /* The following erpas should have been covered earlier. */ + case 0x23: /* Read data check. */ + case 0x25: /* Write data check. */ + case 0x26: /* Data check (read opposite). */ + case 0x28: /* Write id mark check. */ + case 0x31: /* Tape void. */ + case 0x40: /* Overrun error. */ + case 0x41: /* Record sequence error. */ + /* All other erpas are reserved for future use. */ + default: + return tape_34xx_erp_bug(device, request, sense[3]); + } +} + +/* + * 3480/3490 interrupt handler + */ +static int +tape_34xx_irq(struct tape_device *device, struct tape_request *request) +{ + if (request == NULL) + return tape_34xx_unsolicited_irq(device); + + if ((device->devstat.dstat & DEV_STAT_UNIT_EXCEP) && + (device->devstat.dstat & DEV_STAT_DEV_END) && + (request->op == TO_WRI)) { + /* Write at end of volume */ + PRINT_INFO("End of volume\n"); /* XXX */ + return tape_34xx_erp_failed(device, request, -ENOSPC); + } + + if ((device->devstat.dstat & DEV_STAT_UNIT_EXCEP) && + (request->op == TO_BSB || request->op == TO_FSB)) + DBF_EVENT(5, "Skipped over tapemark\n"); + + if (device->devstat.dstat & DEV_STAT_UNIT_CHECK) + return tape_34xx_unit_check(device, request); + + if (device->devstat.dstat & DEV_STAT_DEV_END) + return tape_34xx_done(device, request); + + DBF_EVENT(6, "xunknownirq\n"); + PRINT_ERR("Unexpected interrupt.\n"); + PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]); + tape_dump_sense(device, request); + return TAPE_IO_STOP; +} + +/* + * ioctl_overload + */ +static int +tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) +{ + if (cmd == TAPE390_DISPLAY) { + struct display_struct disp; + + if(copy_from_user(&disp, (char *) arg, sizeof(disp)) != 0) + return -EFAULT; + + return tape_std_display(device, &disp); + } else + return -EINVAL; +} + +static int +tape_34xx_setup_device(struct tape_device * device) +{ + struct tape_34xx_discdata *discdata; + + DBF_EVENT(5, "tape_34xx_setup_device(%p)\n", device); + DBF_EVENT(6, "34xx minor1: %x\n", device->first_minor); + discdata = kmalloc(sizeof(struct tape_34xx_discdata), GFP_ATOMIC); + if(discdata) { + memset(discdata, 0, sizeof(struct tape_34xx_discdata)); + INIT_LIST_HEAD(&discdata->sbid_list); + device->discdata = discdata; + } + tape_34xx_medium_sense(device); + return 0; +} + +static void +tape_34xx_cleanup_device(struct tape_device * device) +{ + if (device->discdata) { + tape_34xx_clear_sbid_list(device); + kfree(device->discdata); + device->discdata = NULL; + } +} + +/* + * Build up the lookup table... + */ +static void +tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid) +{ + struct tape_34xx_discdata * discdata = device->discdata; + struct sbid_entry * new; + struct sbid_entry * cur; + struct list_head * l; + + if(discdata == NULL) + return; + if((new = kmalloc(sizeof(struct sbid_entry), GFP_ATOMIC)) == NULL) + return; + + new->bid = bid; + new->bid.tbi_format = 0; + + /* + * Search the position where to insert the new entry. It is possible + * that the entry should not be added but the block number has to be + * updated to approximate the logical block, where a segment starts. + */ + list_for_each(l, &discdata->sbid_list) { + cur = list_entry(l, struct sbid_entry, list); + + /* + * If the current entry has the same segment and wrap, then + * there is no new entry needed. Only the block number of the + * current entry might be adjusted to reflect an earlier start + * of the segment. + */ + if( + (cur->bid.tbi_segment == new->bid.tbi_segment) && + (cur->bid.tbi_wrap == new->bid.tbi_wrap) + ) { + if(new->bid.tbi_block < cur->bid.tbi_block) { + cur->bid.tbi_block = new->bid.tbi_block; + } + kfree(new); + break; + } + + /* + * Otherwise the list is sorted by block number because it + * is alway ascending while the segment number decreases on + * the second wrap. + */ + if(cur->bid.tbi_block > new->bid.tbi_block) { + list_add_tail(&new->list, l); + break; + } + } + + /* + * The loop went through without finding a merge or adding an entry + * add the new entry to the end of the list. + */ + if(l == &discdata->sbid_list) { + list_add_tail(&new->list, &discdata->sbid_list); + } + + list_for_each(l, &discdata->sbid_list) { + cur = list_entry(l, struct sbid_entry, list); + + DBF_EVENT(3, "sbid_list(%03i:%1i:%08i)\n", + cur->bid.tbi_segment, cur->bid.tbi_wrap, + cur->bid.tbi_block); + } + + return; +} + +/* + * Fill hardware positioning information into the given block id. With that + * seeks don't have to go back to the beginning of the tape and are done at + * faster speed because the vicinity of a segment can be located at faster + * speed. + * + * The caller must have set tbi_block. + */ +static void +tape_34xx_merge_sbid( + struct tape_device * device, + struct tape_34xx_block_id * bid +) { + struct tape_34xx_discdata * discdata = device->discdata; + struct sbid_entry * cur; + struct list_head * l; + + bid->tbi_wrap = 0; + bid->tbi_segment = 1; + bid->tbi_format = (*device->modeset_byte & 0x08) ? + TBI_FORMAT_3480_XF : TBI_FORMAT_3480; + + if(discdata == NULL) + goto tape_34xx_merge_sbid_exit; + if(list_empty(&discdata->sbid_list)) + goto tape_34xx_merge_sbid_exit; + + list_for_each(l, &discdata->sbid_list) { + cur = list_entry(l, struct sbid_entry, list); + + if(cur->bid.tbi_block > bid->tbi_block) + break; + } + + /* If block comes before first entries block, use seek from start. */ + if(l->prev == &discdata->sbid_list) + goto tape_34xx_merge_sbid_exit; + + cur = list_entry(l->prev, struct sbid_entry, list); + bid->tbi_wrap = cur->bid.tbi_wrap; + bid->tbi_segment = cur->bid.tbi_segment; + +tape_34xx_merge_sbid_exit: + DBF_EVENT(6, "merged_bid = %08x\n", *((unsigned int *) bid)); + return; +} + +static void +tape_34xx_clear_sbid_list(struct tape_device *device) +{ + struct list_head * l; + struct list_head * n; + struct tape_34xx_discdata * discdata = device->discdata; + + list_for_each_safe(l, n, &discdata->sbid_list) { + list_del(l); + kfree(list_entry(l, struct sbid_entry, list)); + } +} + +/* + * MTTELL: Tell block. Return the number of block relative to current file. + */ +int +tape_34xx_mttell(struct tape_device *device, int mt_count) +{ + struct tape_34xx_block_id bid; + int rc; + + rc = tape_std_read_block_id(device, (unsigned int *) &bid); + if (rc) + return rc; + + /* + * Build up a lookup table. The format id is ingored. + */ + tape_34xx_add_sbid(device, bid); + + return bid.tbi_block; +} + +/* + * MTSEEK: seek to the specified block. + */ +int +tape_34xx_mtseek(struct tape_device *device, int mt_count) +{ + struct tape_34xx_block_id bid; + + if (mt_count > 0x400000) { + DBF_EXCEPTION(6, "xsee parm\n"); + return -EINVAL; + } + + bid.tbi_block = mt_count; + + /* + * Set hardware seek information in the block id. + */ + tape_34xx_merge_sbid(device, &bid); + + return tape_std_seek_block_id(device, *((unsigned int *) &bid)); +} + +/* + * Tape block read for 34xx. + */ +#ifdef CONFIG_S390_TAPE_BLOCK +struct tape_request * +tape_34xx_bread(struct tape_device *device, struct request *req) +{ + struct tape_request *request; + struct buffer_head *bh; + ccw1_t *ccw; + int count; + int size; + + DBF_EVENT(6, "tape_34xx_bread(sector=%u,size=%u)\n", + req->sector, req->nr_sectors); + + /* Count the number of blocks for the request. */ + count = 0; + size = 0; + for(bh = req->bh; bh; bh = bh->b_reqnext) { + for(size = 0; size < bh->b_size; size += TAPEBLOCK_HSEC_SIZE) + count++; + } + + /* Allocate the ccw request. */ + request = tape_alloc_request(3+count+1, 8); + if (IS_ERR(request)) + return request; + + /* + * Setup the tape block id to start the read from. The block number + * is later compared to the current position to decide whether a + * locate block is required. If one is needed this block id is used + * to locate it. + */ + ((struct tape_34xx_block_id *) request->cpdata)->tbi_block = + req->sector >> TAPEBLOCK_HSEC_S2B; + + /* Setup ccws. */ + request->op = TO_BLOCK; + ccw = request->cpaddr; + ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); + + /* + * We always setup a nop after the mode set ccw. This slot is + * used in tape_std_check_locate to insert a locate ccw if the + * current tape position doesn't match the start block to be read. + * The second nop will be filled with a read block id which is in + * turn used by tape_34xx_free_bread to populate the segment bid + * table. + */ + ccw = tape_ccw_cc(ccw, NOP, 0, NULL); + ccw = tape_ccw_cc(ccw, NOP, 0, NULL); + + for(bh = req->bh; bh; bh = bh->b_reqnext) { + for(size = 0; size < bh->b_size; size += TAPEBLOCK_HSEC_SIZE) { + ccw->flags = CCW_FLAG_CC; + ccw->cmd_code = READ_FORWARD; + ccw->count = TAPEBLOCK_HSEC_SIZE; + set_normalized_cda(ccw, (void *) __pa(bh->b_data+size)); + ccw++; + } + } + + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + + return request; +} + +void +tape_34xx_free_bread (struct tape_request *request) +{ + ccw1_t* ccw = request->cpaddr; + + if((ccw + 2)->cmd_code == READ_BLOCK_ID) { + struct { + struct tape_34xx_block_id channel_block_id; + struct tape_34xx_block_id device_block_id; + } __attribute__ ((packed)) *rbi_data; + + rbi_data = request->cpdata; + + if(!request->device) + DBF_EVENT(6, "tape_34xx_free_bread: no device!\n"); + DBF_EVENT(6, "tape_34xx_free_bread: update_sbid\n"); + tape_34xx_add_sbid( + request->device, + rbi_data->channel_block_id + ); + } else { + DBF_EVENT(3, "tape_34xx_free_bread: no block info\n"); + } + + /* Last ccw is a nop and doesn't need clear_normalized_cda */ + for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++) + if (ccw->cmd_code == READ_FORWARD) + clear_normalized_cda(ccw); + tape_put_request(request); +} + +/* + * check_locate is called just before the tape request is passed to + * the common io layer for execution. It has to check the current + * tape position and insert a locate ccw if it doesn't match the + * start block for the request. + */ +void +tape_34xx_check_locate(struct tape_device *device, struct tape_request *request) +{ + struct tape_34xx_block_id *id; + struct tape_34xx_block_id *start; + + id = (struct tape_34xx_block_id *) request->cpdata; + + /* + * The tape is already at the correct position. No seek needed. + */ + if (id->tbi_block == device->blk_data.block_position) + return; + + /* + * In case that the block device image doesn't start at the beginning + * of the tape, adjust the blocknumber for the locate request. + */ + start = (struct tape_34xx_block_id *) &device->blk_data.start_block_id; + if(start->tbi_block) + id->tbi_block = id->tbi_block + start->tbi_block; + + /* + * Merge HW positioning information to the block id. This information + * is used by the device for faster seeks. + */ + tape_34xx_merge_sbid(device, id); + + /* + * Transform the NOP to a LOCATE entry. + */ + tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); + tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata); + + return; +} +#endif + +static int +tape_34xx_mtweof(struct tape_device *device, int count) +{ + tape_34xx_clear_sbid_list(device); + return tape_std_mtweof(device, count); +} + +/* + * List of 3480/3490 magnetic tape commands. + */ +static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = +{ + [MTRESET] = tape_std_mtreset, + [MTFSF] = tape_std_mtfsf, + [MTBSF] = tape_std_mtbsf, + [MTFSR] = tape_std_mtfsr, + [MTBSR] = tape_std_mtbsr, + [MTWEOF] = tape_34xx_mtweof, + [MTREW] = tape_std_mtrew, + [MTOFFL] = tape_std_mtoffl, + [MTNOP] = tape_std_mtnop, + [MTRETEN] = tape_std_mtreten, + [MTBSFM] = tape_std_mtbsfm, + [MTFSFM] = tape_std_mtfsfm, + [MTEOM] = tape_std_mteom, + [MTERASE] = tape_std_mterase, + [MTRAS1] = NULL, + [MTRAS2] = NULL, + [MTRAS3] = NULL, + [MTSETBLK] = tape_std_mtsetblk, + [MTSETDENSITY] = NULL, + [MTSEEK] = tape_34xx_mtseek, + [MTTELL] = tape_34xx_mttell, + [MTSETDRVBUFFER] = NULL, + [MTFSS] = NULL, + [MTBSS] = NULL, + [MTWSM] = NULL, + [MTLOCK] = NULL, + [MTUNLOCK] = NULL, + [MTLOAD] = tape_std_mtload, + [MTUNLOAD] = tape_std_mtunload, + [MTCOMPRESSION] = tape_std_mtcompression, + [MTSETPART] = NULL, + [MTMKPART] = NULL +}; + +/* + * Tape discipline structures for 3480 and 3490. + */ +static struct tape_discipline tape_discipline_3480 = { + .owner = THIS_MODULE, + .cu_type = 0x3480, + .setup_device = tape_34xx_setup_device, + .cleanup_device = tape_34xx_cleanup_device, + .process_eov = tape_std_process_eov, + .irq = tape_34xx_irq, + .read_block = tape_std_read_block, + .write_block = tape_std_write_block, + .assign = tape_std_assign, + .unassign = tape_std_unassign, +#ifdef TAPE390_FORCE_UNASSIGN + .force_unassign = tape_std_force_unassign, +#endif +#ifdef CONFIG_S390_TAPE_BLOCK + .bread = tape_34xx_bread, + .free_bread = tape_34xx_free_bread, + .check_locate = tape_34xx_check_locate, +#endif + .ioctl_fn = tape_34xx_ioctl, + .mtop_array = tape_34xx_mtop +}; + +static struct tape_discipline tape_discipline_3490 = { + .owner = THIS_MODULE, + .cu_type = 0x3490, + .setup_device = tape_34xx_setup_device, + .cleanup_device = tape_34xx_cleanup_device, + .process_eov = tape_std_process_eov, + .irq = tape_34xx_irq, + .read_block = tape_std_read_block, + .write_block = tape_std_write_block, + .assign = tape_std_assign, + .unassign = tape_std_unassign, +#ifdef TAPE390_FORCE_UNASSIGN + .force_unassign = tape_std_force_unassign, +#endif +#ifdef CONFIG_S390_TAPE_BLOCK + .bread = tape_34xx_bread, + .free_bread = tape_34xx_free_bread, + .check_locate = tape_34xx_check_locate, +#endif + .ioctl_fn = tape_34xx_ioctl, + .mtop_array = tape_34xx_mtop +}; + +int +tape_34xx_init (void) +{ + int rc; + + DBF_EVENT(3, "34xx init: $Revision: 1.9 $\n"); + /* Register discipline. */ + rc = tape_register_discipline(&tape_discipline_3480); + if (rc == 0) { + rc = tape_register_discipline(&tape_discipline_3490); + if (rc) + tape_unregister_discipline(&tape_discipline_3480); + } + if (rc) + DBF_EVENT(3, "34xx init failed\n"); + else + DBF_EVENT(3, "34xx registered\n"); + return rc; +} + +void +tape_34xx_exit(void) +{ + tape_unregister_discipline(&tape_discipline_3480); + tape_unregister_discipline(&tape_discipline_3490); +} + +MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH"); +MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape " + "device driver ($Revision: 1.9 $)"); +MODULE_LICENSE("GPL"); + +module_init(tape_34xx_init); +module_exit(tape_34xx_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape34xx.c linux.21rc5-ac2/drivers/s390/char/tape34xx.c --- linux.21rc5/drivers/s390/char/tape34xx.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape34xx.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,2389 +0,0 @@ -/*************************************************************************** - * - * drivers/s390/char/tape34xx.c - * common tape device discipline for 34xx tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_S390_TAPE_DYNAMIC -#include -#endif -#include -#include -#include "tape.h" -#include "tape34xx.h" - -#define PRINTK_HEADER "T34xx:" - -tape_event_handler_t tape34xx_event_handler_table[TS_SIZE][TE_SIZE] = -{ - /* {START , DONE, FAILED, ERROR, OTHER } */ - {NULL, tape34xx_unused_done, NULL, NULL, NULL}, /* TS_UNUSED */ - {NULL, tape34xx_idle_done, NULL, NULL, NULL}, /* TS_IDLE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_DONE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_FAILED */ - {NULL, tape34xx_block_done, NULL, NULL, NULL}, /* TS_BLOCK_INIT */ - {NULL, tape34xx_bsb_init_done, NULL, NULL, NULL}, /* TS_BSB_INIT */ - {NULL, tape34xx_bsf_init_done, NULL, NULL, NULL}, /* TS_BSF_INIT */ - {NULL, tape34xx_dse_init_done, NULL, NULL, NULL}, /* TS_DSE_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_EGA_INIT */ - {NULL, tape34xx_fsb_init_done, NULL, NULL, NULL}, /* TS_FSB_INIT */ - {NULL, tape34xx_fsf_init_done, NULL, NULL, NULL}, /* TS_FSF_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_LDI_INIT */ - {NULL, tape34xx_lbl_init_done, NULL, NULL, NULL}, /* TS_LBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_MSE_INIT */ - {NULL, tape34xx_nop_init_done, NULL, NULL, NULL}, /* TS_NOP_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBA_INIT */ - {NULL, tape34xx_rbi_init_done, NULL, NULL, NULL}, /* TS_RBI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBU_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RDC_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RFO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RSD_INIT */ - {NULL, tape34xx_rew_init_done, NULL, NULL, NULL}, /* TS_REW_INIT */ - {NULL, tape34xx_rew_release_init_done, NULL, NULL, NULL}, /* TS_REW_RELEASE_IMIT */ - {NULL, tape34xx_run_init_done, NULL, NULL, NULL}, /* TS_RUN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SEN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SID_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SNP_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SPG_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SWI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SMR_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SYN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_TIO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_UNA_INIT */ - {NULL, tape34xx_wri_init_done, NULL, NULL, NULL}, /* TS_WRI_INIT */ - {NULL, tape34xx_wtm_init_done, NULL, NULL, NULL}, /* TS_WTM_INIT */ - {NULL, NULL, NULL, NULL, NULL}}; /* TS_NOT_OPER */ - - -int -tape34xx_ioctl_overload (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) -{ - return -EINVAL; // no additional ioctls - -} - -ccw_req_t * -tape34xx_write_block (const char *data, size_t count, tape_info_t * ti) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - void *mem; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xwbl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - mem = kmalloc (count, GFP_KERNEL); - if (!mem) { - tape_free_request (cqr); -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xwbl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - if (copy_from_user (mem, data, count)) { - kfree (mem); - tape_free_request (cqr); -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xwbl segf."); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - - ccw->cmd_code = WRITE_CMD; - ccw->flags = 0; - ccw->count = count; - set_normalized_cda (ccw, (unsigned long) mem); - if ((ccw->cda) == 0) { - kfree (mem); - tape_free_request (cqr); - return NULL; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = mem; - ti->userbuf = (void *) data; - tapestate_set (ti, TS_WRI_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xwbl ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -void -tape34xx_free_write_block (ccw_req_t * cqr, tape_info_t * ti) -{ - unsigned long lockflags; - ccw1_t *ccw; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ccw = cqr->cpaddr; - ccw++; - clear_normalized_cda (ccw); - kfree (ti->kernbuf); - tape_free_request (cqr); - ti->kernbuf = ti->userbuf = NULL; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfwb free"); -#endif /* TAPE_DEBUG */ -} - -ccw_req_t * -tape34xx_read_block (const char *data, size_t count, tape_info_t * ti) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - void *mem; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xrbl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - mem = kmalloc (count, GFP_KERNEL); - if (!mem) { - tape_free_request (cqr); -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xrbl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - - ccw->cmd_code = READ_FORWARD; - ccw->flags = 0; - ccw->count = count; - set_normalized_cda (ccw, (unsigned long) mem); - if ((ccw->cda) == 0) { - kfree (mem); - tape_free_request (cqr); - return NULL; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = mem; - ti->userbuf = (void *) data; - tapestate_set (ti, TS_RFO_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xrbl ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -ccw_req_t * -tape34xx_read_opposite (tape_info_t * ti,int novalue) -{ - ccw_req_t *cqr; - ccw1_t *ccw; - size_t count; - // first, retrieve the count from the old cqr. - cqr = ti->cqr; - ccw = cqr->cpaddr; - ccw++; - count=ccw->count; - // free old cqr. - clear_normalized_cda (ccw); - tape_free_request (cqr); - // build new cqr - cqr = tape_alloc_ccw_req (ti, 3, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xrop nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - - ccw->cmd_code = READ_BACKWARD; - ccw->flags = CCW_FLAG_CC; - ccw->count = count; - set_normalized_cda (ccw, (unsigned long) ti->kernbuf); - if ((ccw->cda) == 0) { - tape_free_request (cqr); - return NULL; - } - ccw++; - ccw->cmd_code = FORSPACEBLOCK; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - ccw->cda = (unsigned long)ccw; - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 1; - ccw->cda = (unsigned long)ccw; - tapestate_set (ti, TS_RBA_INIT); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xrop ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -void -tape34xx_free_read_block (ccw_req_t * cqr, tape_info_t * ti) -{ - unsigned long lockflags; - size_t cpysize; - ccw1_t *ccw; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ccw = cqr->cpaddr; - ccw++; - cpysize = ccw->count - ti->devstat.rescnt; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (copy_to_user (ti->userbuf, ti->kernbuf, cpysize)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfrb segf."); -#endif /* TAPE_DEBUG */ - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - clear_normalized_cda (ccw); - kfree (ti->kernbuf); - tape_free_request (cqr); - ti->kernbuf = ti->userbuf = NULL; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfrb free"); -#endif /* TAPE_DEBUG */ -} - -/* - * The IOCTL interface is implemented in the following section, - * excepted the MTRESET, MTSETBLK which are handled by tapechar.c - */ -/* - * MTFSF: Forward space over 'count' file marks. The tape is positioned - * at the EOT (End of Tape) side of the file mark. - */ -ccw_req_t * -tape34xx_mtfsf (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsf parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsf nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = FORSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_FSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfsf ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTBSF: Backward space over 'count' file marks. The tape is positioned at - * the EOT (End of Tape) side of the last skipped file mark. - */ -ccw_req_t * -tape34xx_mtbsf (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsf parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsf nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = BACKSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_BSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xbsf ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTFSR: Forward space over 'count' tape blocks (blocksize is set - * via MTSETBLK. - */ -ccw_req_t * -tape34xx_mtfsr (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsr parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsr nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = FORSPACEBLOCK; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_FSB_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfsr ccwgen"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTBSR: Backward space over 'count' tape blocks. - * (blocksize is set via MTSETBLK. - */ -ccw_req_t * -tape34xx_mtbsr (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsr parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsr nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = BACKSPACEBLOCK; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_BSB_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xbsr ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTWEOF: Write 'count' file marks at the current position. - */ -ccw_req_t * -tape34xx_mtweof (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xweo parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xweo nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = WRITETAPEMARK; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_WTM_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xweo ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTREW: Rewind the tape. - */ -ccw_req_t * -tape34xx_mtrew (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 3, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xrew nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = REWIND; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_REW_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xrew ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTOFFL: Rewind the tape and put the drive off-line. - * Implement 'rewind unload' - */ -ccw_req_t * -tape34xx_mtoffl (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 3, 32); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xoff nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = REWIND_UNLOAD; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = SENSE; - ccw->flags = 0; - ccw->count = 32; - ccw->cda = (unsigned long) cqr->cpaddr; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_RUN_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xoff ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTNOP: 'No operation'. - */ -ccw_req_t * -tape34xx_mtnop (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 1, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xnop nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) ccw->cmd_code; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xnop ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTBSFM: Backward space over 'count' file marks. - * The tape is positioned at the BOT (Begin Of Tape) side of the - * last skipped file mark. - */ -ccw_req_t * -tape34xx_mtbsfm (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsm parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsm nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = BACKSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_BSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xbsm ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTFSFM: Forward space over 'count' file marks. - * The tape is positioned at the BOT (Begin Of Tape) side - * of the last skipped file mark. - */ -ccw_req_t * -tape34xx_mtfsfm (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsm parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsm nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = FORSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_FSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfsm ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTEOM: positions at the end of the portion of the tape already used - * for recordind data. MTEOM positions after the last file mark, ready for - * appending another file. - * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind. - */ -ccw_req_t * -tape34xx_mteom (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 4, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xeom nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = FORSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = CCW_CMD_TIC; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (cqr->cpaddr); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_FSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xeom ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTERASE: erases the tape. - */ -ccw_req_t * -tape34xx_mterase (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 5, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xera nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = REWIND; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = ERASE_GAP; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = DATA_SEC_ERASE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_DSE_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xera ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTSETDENSITY: set tape density. - */ -ccw_req_t * -tape34xx_mtsetdensity (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xden nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xden ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTSEEK: seek to the specified block. - */ -ccw_req_t * -tape34xx_mtseek (tape_info_t * ti, int count) -{ - long lockflags; - __u8 *data; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((data = kmalloc (4 * sizeof (__u8), GFP_KERNEL)) == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xsee nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - data[0] = 0x01; - data[1] = data[2] = data[3] = 0x00; - if (count >= 4194304) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xsee parm"); -#endif /* TAPE_DEBUG */ - kfree(data); - return NULL; - } - if (((tape34xx_disc_data_t *) ti->discdata)->modeset_byte & 0x08) // IDRC on - - data[1] = data[1] | 0x80; - data[3] += count % 256; - data[2] += (count / 256) % 256; - data[1] += (count / 65536); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xsee id:"); - debug_int_event (tape_debug_area,6,count); -#endif /* TAPE_DEBUG */ - cqr = tape_alloc_ccw_req (ti, 3, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xsee nomem"); -#endif /* TAPE_DEBUG */ - kfree (data); - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = LOCATE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 4; - set_normalized_cda (ccw, (unsigned long) data); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = data; - ti->userbuf = NULL; - tapestate_set (ti, TS_LBL_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xsee ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTTELL: Tell block. Return the number of block relative to current file. - */ -ccw_req_t * -tape34xx_mttell (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - void *mem; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xtel nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - mem = kmalloc (8, GFP_KERNEL); - if (!mem) { - tape_free_request (cqr); -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xtel nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - - ccw->cmd_code = READ_BLOCK_ID; - ccw->flags = 0; - ccw->count = 8; - set_normalized_cda (ccw, (unsigned long) mem); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = mem; - ti->userbuf = NULL; - tapestate_set (ti, TS_RBI_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xtel ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTSETDRVBUFFER: Set the tape drive buffer code to number. - * Implement NOP. - */ -ccw_req_t * -tape34xx_mtsetdrvbuffer (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbuf nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xbuf ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTLOCK: Locks the tape drive door. - * Implement NOP CCW command. - */ -ccw_req_t * -tape34xx_mtlock (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xloc nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xloc ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTUNLOCK: Unlocks the tape drive door. - * Implement the NOP CCW command. - */ -ccw_req_t * -tape34xx_mtunlock (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xulk nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xulk ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTLOAD: Loads the tape. - * This function is not implemented and returns NULL, which causes the Frontend to wait for a medium being loaded. - * The 3480/3490 type Tapes do not support a load command - */ -ccw_req_t * -tape34xx_mtload (tape_info_t * ti, int count) -{ - return NULL; -} - -/* - * MTUNLOAD: Rewind the tape and unload it. - */ -ccw_req_t * -tape34xx_mtunload (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 3, 32); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xunl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = REWIND_UNLOAD; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = SENSE; - ccw->flags = 0; - ccw->count = 32; - ccw->cda = (unsigned long) cqr->cpaddr; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_RUN_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xunl ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTCOMPRESSION: used to enable compression. - * Sets the IDRC on/off. - */ -ccw_req_t * -tape34xx_mtcompression (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count < 0) || (count > 1)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xcom parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - if (count == 0) - ((tape34xx_disc_data_t *) ti->discdata)->modeset_byte = 0x00; // IDRC off - - else - ((tape34xx_disc_data_t *) ti->discdata)->modeset_byte = 0x08; // IDRC on - - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xcom nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xcom ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTSTPART: Move the tape head at the partition with the number 'count'. - * Implement the NOP CCW command. - */ -ccw_req_t * -tape34xx_mtsetpart (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xspa nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xspa ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTMKPART: .... dummy . - * Implement the NOP CCW command. - */ -ccw_req_t * -tape34xx_mtmkpart (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xnpa nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xnpa ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTIOCGET: query the tape drive status. - */ -ccw_req_t * -tape34xx_mtiocget (tape_info_t * ti, int count) -{ - return NULL; -} - -/* - * MTIOCPOS: query the tape position. - */ -ccw_req_t * -tape34xx_mtiocpos (tape_info_t * ti, int count) -{ - return NULL; -} - -ccw_req_t * tape34xx_bread (struct request *req,tape_info_t* ti,int tapeblock_major) { - ccw_req_t *cqr; - ccw1_t *ccw; - __u8 *data; - int s2b = blksize_size[tapeblock_major][ti->blk_minor]/hardsect_size[tapeblock_major][ti->blk_minor]; - int realcount; - int size,bhct = 0; - struct buffer_head* bh; - for (bh = req->bh; bh; bh = bh->b_reqnext) { - if (bh->b_size > blksize_size[tapeblock_major][ti->blk_minor]) - for (size = 0; size < bh->b_size; size += blksize_size[tapeblock_major][ti->blk_minor]) - bhct++; - else - bhct++; - } - if ((data = kmalloc (4 * sizeof (__u8), GFP_ATOMIC)) == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"xBREDnomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - data[0] = 0x01; - data[1] = data[2] = data[3] = 0x00; - realcount=req->sector/s2b; - if (((tape34xx_disc_data_t *) ti->discdata)->modeset_byte & 0x08) // IDRC on - - data[1] = data[1] | 0x80; - data[3] += realcount % 256; - data[2] += (realcount / 256) % 256; - data[1] += (realcount / 65536); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xBREDid:"); - debug_int_event (tape_debug_area,6,realcount); -#endif /* TAPE_DEBUG */ - cqr = tape_alloc_ccw_req (ti, 2+bhct+1, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xBREDnomem"); -#endif /* TAPE_DEBUG */ - kfree(data); - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - if (realcount!=ti->position) { - ccw++; - ccw->cmd_code = LOCATE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 4; - set_normalized_cda (ccw, (unsigned long) data); - } - ti->position=realcount+req->nr_sectors/s2b; - for (bh=req->bh;bh!=NULL;) { - ccw->flags = CCW_FLAG_CC; - if (bh->b_size >= blksize_size[tapeblock_major][ti->blk_minor]) { - for (size = 0; size < bh->b_size; size += blksize_size[tapeblock_major][ti->blk_minor]) { - ccw++; - ccw->flags = CCW_FLAG_CC; - ccw->cmd_code = READ_FORWARD; - ccw->count = blksize_size[tapeblock_major][ti->blk_minor]; - set_normalized_cda (ccw, __pa (bh->b_data + size)); - } - bh = bh->b_reqnext; - } else { /* group N bhs to fit into byt_per_blk */ - for (size = 0; bh != NULL && size < blksize_size[tapeblock_major][ti->blk_minor];) { - ccw++; - ccw->flags = CCW_FLAG_DC; - ccw->cmd_code = READ_FORWARD; - ccw->count = bh->b_size; - set_normalized_cda (ccw, __pa (bh->b_data)); - size += bh->b_size; - bh = bh->b_reqnext; - } - if (size != blksize_size[tapeblock_major][ti->blk_minor]) { - PRINT_WARN ("Cannot fulfill small request %d vs. %d (%ld sects)\n", - size, - blksize_size[tapeblock_major][ti->blk_minor], - req->nr_sectors); - kfree(data); - tape_free_request (cqr); - return NULL; - } - } - } - ccw -> flags &= ~(CCW_FLAG_DC); - ccw -> flags |= (CCW_FLAG_CC); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ti->kernbuf = data; - ti->userbuf = NULL; - tapestate_set (ti, TS_BLOCK_INIT); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xBREDccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} -void tape34xx_free_bread (ccw_req_t* cqr,struct _tape_info_t* ti) { - ccw1_t* ccw; - for (ccw=(ccw1_t*)cqr->cpaddr;(ccw->flags & CCW_FLAG_CC)||(ccw->flags & CCW_FLAG_DC);ccw++) - if ((ccw->cmd_code == MODE_SET_DB) || - (ccw->cmd_code == LOCATE) || - (ccw->cmd_code == READ_FORWARD)) - clear_normalized_cda(ccw); - tape_free_request(cqr); - kfree(ti->kernbuf); - ti->kernbuf=NULL; -} - -/* event handlers */ -void -tape34xx_default_handler (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xdefhandle"); -#endif /* TAPE_DEBUG */ - PRINT_ERR ("TAPE34XX: An unexpected Unit Check occurred.\n"); - PRINT_ERR ("TAPE34XX: Please read Documentation/s390/TAPE and report it!\n"); - PRINT_ERR ("TAPE34XX: Current state is: %s", - (((tapestate_get (ti) < TS_SIZE) && (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "->UNKNOWN STATE<-")); - tape_dump_sense (&ti->devstat); - ti->rc = -EIO; - ti->wanna_wakeup=1; - switch (tapestate_get(ti)) { - case TS_REW_RELEASE_INIT: - tapestate_set(ti,TS_FAILED); - wake_up (&ti->wq); - break; - case TS_BLOCK_INIT: - tapestate_set(ti,TS_FAILED); - schedule_tapeblock_exec_IO(ti); - break; - default: - tapestate_set(ti,TS_FAILED); - wake_up_interruptible (&ti->wq); - } -} - -void -tape34xx_unexpect_uchk_handler (tape_info_t * ti) -{ - if ((ti->devstat.ii.sense.data[0] == 0x40) && - (ti->devstat.ii.sense.data[1] == 0x40) && - (ti->devstat.ii.sense.data[3] == 0x43)) { - // no tape in the drive - PRINT_INFO ("Drive %d not ready. No volume loaded.\n", ti->rew_minor / 2); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xuuh nomed"); -#endif /* TAPE_DEBUG */ - tapestate_set (ti, TS_FAILED); - ti->rc = -ENOMEDIUM; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); - } else if ((ti->devstat.ii.sense.data[0] == 0x42) && - (ti->devstat.ii.sense.data[1] == 0x44) && - (ti->devstat.ii.sense.data[3] == 0x3b)) { - PRINT_INFO ("Media in drive %d was changed!\n", - ti->rew_minor / 2); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xuuh medchg"); -#endif - /* nothing to do. chan end & dev end will be reported when io is finished */ - } else { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xuuh unexp"); - debug_text_event (tape_debug_area,3,"state:"); - debug_text_event (tape_debug_area,3,((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : - "TS UNKNOWN"); -#endif /* TAPE_DEBUG */ - tape34xx_default_handler (ti); - } -} - -void -tape34xx_unused_done (tape_info_t * ti) -{ - if (ti->medium_is_unloaded) { - // A medium was inserted in the drive! -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xuui med"); -#endif /* TAPE_DEBUG */ - PRINT_WARN ("A medium was inserted into the tape.\n"); - ti->medium_is_unloaded=0; - } else { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"unsol.irq!"); - debug_text_event (tape_debug_area,3,"dev end"); - debug_int_exception (tape_debug_area,3,ti->devinfo.irq); -#endif /* TAPE_DEBUG */ - PRINT_WARN ("Unsolicited IRQ (Device End) caught in unused state.\n"); - tape_dump_sense (&ti->devstat); - } -} - - -void -tape34xx_idle_done (tape_info_t * ti) -{ - if (ti->medium_is_unloaded) { - // A medium was inserted in the drive! -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xuud med"); -#endif /* TAPE_DEBUG */ - PRINT_WARN ("A medium was inserted into the tape.\n"); - ti->medium_is_unloaded=0; - wake_up_interruptible (&ti->wq); - } else { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"unsol.irq!"); - debug_text_event (tape_debug_area,3,"dev end"); - debug_int_exception (tape_debug_area,3,ti->devinfo.irq); -#endif /* TAPE_DEBUG */ - PRINT_WARN ("Unsolicited IRQ (Device End) caught in idle state.\n"); - tape_dump_sense (&ti->devstat); - } -} - -void -tape34xx_block_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"x:bREQdone"); -#endif /* TAPE_DEBUG */ - tapestate_set(ti,TS_DONE); - schedule_tapeblock_exec_IO(ti); -} - -void -tape34xx_bsf_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"bsf done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_dse_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"dse done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_fsf_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"fsf done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_fsb_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"fsb done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_bsb_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"bsb done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_lbl_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"lbl done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(tape->devinfo.irq); - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_nop_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"nop done.."); - debug_text_exception (tape_debug_area,6,"or rew/rel"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(tape->devinfo.irq); - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_rfo_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rfo done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_rbi_init_done (tape_info_t * ti) -{ - __u8 *data; -#ifdef TAPE_DEBUG - int i; -#endif - tapestate_set (ti, TS_FAILED); - data = ti->kernbuf; - ti->rc = data[3]; - ti->rc += 256 * data[2]; - ti->rc += 65536 * (data[1] & 0x3F); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rbi done"); - debug_text_event (tape_debug_area,6,"data:"); - for (i=0;i<8;i++) - debug_int_event (tape_debug_area,6,data[i]); -#endif - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_rew_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rew done"); -#endif - //BH: use irqsave - //s390irq_spin_lock(tape->devinfo.irq); - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(tape->devinfo.irq); - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_rew_release_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rewR done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(tape->devinfo.irq); - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_run_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rew done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_wri_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"wri done"); -#endif - //BH: use irqsave - //s390irq_spin_lock(ti->devinfo.irq); - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(ti->devinfo.irq); - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_wtm_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"wtm done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -/* This function analyses the tape's sense-data in case of a unit-check. If possible, - it tries to recover from the error. Else the user is informed about the problem. */ -void -tape34xx_error_recovery (tape_info_t* ti) -{ - __u8* sense=ti->devstat.ii.sense.data; - int inhibit_cu_recovery=0; - int cu_type=ti->discipline->cu_type; - if ((((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)&0x80) inhibit_cu_recovery=1; - if (tapestate_get(ti)==TS_BLOCK_INIT) { - // no recovery for block device, bottom half will retry... - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - if (sense[0]&SENSE_COMMAND_REJECT) - switch (tapestate_get(ti)) { - case TS_BLOCK_INIT: - case TS_DSE_INIT: - case TS_EGA_INIT: - case TS_WRI_INIT: - case TS_WTM_INIT: - if (sense[1]&SENSE_WRITE_PROTECT) { - // trying to write, but medium is write protected - tape34xx_error_recovery_has_failed(ti,EACCES); - return; - } - default: - tape34xx_error_recovery_HWBUG(ti,1); - return; - } - // special cases for various tape-states when reaching end of recorded area - if (((sense[0]==0x08) || (sense[0]==0x10) || (sense[0]==0x12)) && - ((sense[1]==0x40) || (sense[1]==0x0c))) - switch (tapestate_get(ti)) { - case TS_FSF_INIT: - // Trying to seek beyond end of recorded area - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case TS_LBL_INIT: - // Block could not be located. - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case TS_RFO_INIT: - // Try to read beyond end of recorded area -> 0 bytes read - tape34xx_error_recovery_has_failed(ti,0); - return; - } - // Sensing special bits - if (sense[0]&SENSE_BUS_OUT_CHECK) { - tape34xx_error_recovery_do_retry(ti); - return; - } - if (sense[0]&SENSE_DATA_CHECK) { - // hardware failure, damaged tape or improper operating conditions - switch (sense[3]) { - case 0x23: - // a read data check occurred - if ((sense[2]&SENSE_TAPE_SYNC_MODE) || - (inhibit_cu_recovery)) { - // data check is not permanent, may be recovered. - // We always use async-mode with cu-recovery, so this should *never* happen. - tape34xx_error_recovery_HWBUG(ti,2); - return; - } else { - // data check is permanent, CU recovery has failed - PRINT_WARN("Permanent read error, recovery failed!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - case 0x25: - // a write data check occurred - if ((sense[2]&SENSE_TAPE_SYNC_MODE) || - (inhibit_cu_recovery)) { - // data check is not permanent, may be recovered. - // We always use async-mode with cu-recovery, so this should *never* happen. - tape34xx_error_recovery_HWBUG(ti,3); - return; - } else { - // data check is permanent, cu-recovery has failed - PRINT_WARN("Permanent write error, recovery failed!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - case 0x26: - // Data Check (read opposite) occurred. We'll recover this. - tape34xx_error_recovery_read_opposite(ti); - return; - case 0x28: - // The ID-Mark at the beginning of the tape could not be written. This is fatal, we'll report and exit. - PRINT_WARN("ID-Mark could not be written. Check your hardware!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x31: - // Tape void. Tried to read beyond end of device. We'll report and exit. - PRINT_WARN("Try to read beyond end of recorded area!\n"); - tape34xx_error_recovery_has_failed(ti,ENOSPC); - return; - case 0x41: - // Record sequence error. cu detected incorrect block-id sequence on tape. We'll report and exit. - PRINT_WARN("Illegal block-id sequence found!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - default: - // well, all data checks for 3480 should result in one of the above erpa-codes. if not -> bug - // On 3490, other data-check conditions do exist. - if (cu_type==0x3480) { - tape34xx_error_recovery_HWBUG(ti,4); - return; - } - } - } - if (sense[0]&SENSE_OVERRUN) { - // A data overrun between cu and drive occurred. The channel speed is to slow! We'll report this and exit! - switch (sense[3]) { - case 0x40: // overrun error - PRINT_WARN ("Data overrun error between control-unit and drive. Use a faster channel connection, if possible! \n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - default: - // Overrun bit is set, but erpa does not show overrun error. This is a bug. - tape34xx_error_recovery_HWBUG(ti,5); - return; - } - } - if (sense[1]&SENSE_RECORD_SEQUENCE_ERR) { - switch (sense[3]) { - case 0x41: - // Record sequence error. cu detected incorrect block-id sequence on tape. We'll report and exit. - PRINT_WARN("Illegal block-id sequence found!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - default: - // Record sequence error bit is set, but erpa does not show record sequence error. This is a bug. - tape34xx_error_recovery_HWBUG(ti,6); - return; - } - } - // Sensing erpa codes - switch (sense[3]) { - case 0x00: - // Everything is fine, but we got a unit check. Report and ignore! - PRINT_WARN ("Non-error sense was found. Unit-check will be ignored, expect errors...\n"); - return; - case 0x21: - // Data streaming not operational. Cu switches to interlock mode, we reissue the command. - PRINT_WARN ("Data streaming not operational. Switching to interlock-mode! \n"); - tape34xx_error_recovery_do_retry(ti); - return; - case 0x22: - // Path equipment check. Might be drive adapter error, buffer error on the lower interface, internal path not useable, or error during cartridge load. - // All of the above are not recoverable - PRINT_WARN ("A path equipment check occurred. One of the following conditions occurred:\n"); - PRINT_WARN ("drive adapter error,buffer error on the lower interface, internal path not useable, error during cartridge load.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x23: - // Read data check. Should have been be covered earlier -> Bug! - tape34xx_error_recovery_HWBUG(ti,7); - return; - case 0x24: - // Load display check. Load display was command was issued, but the drive is displaying a drive check message. Can be threated as "device end". - tape34xx_error_recovery_succeded(ti); - return; - case 0x25: - // Write data check. Should have been covered earlier -> Bug! - tape34xx_error_recovery_HWBUG(ti,8); - return; - case 0x26: - // Data check (read opposite). Should have been covered earlier -> Bug! - tape34xx_error_recovery_HWBUG(ti,9); - return; - case 0x27: - // Command reject. May indicate illegal channel program or buffer over/underrun. - // Since all channel programms are issued by this driver and ought be correct, - // we assume a over/underrun situaltion and retry the channel program. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x28: - // Write id mark check. Should have beed covered earlier -> bug! - tape34xx_error_recovery_HWBUG(ti,10); - return; - case 0x29: - // Function incompatible. Either idrc is on but hardware not capable doing idrc - // or a perform subsystem func is issued and the cu is not online. Anyway, this - // cannot be recovered and is an I/O error. - PRINT_WARN ("Function incompatible. Try to switch off idrc! \n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x2a: - // Unsolicited environmental data. An internal counter overflows, we can ignore - // this and reissue the cmd. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x2b: - // Environmental data present. Indicates either unload completed ok or read buffered - // log command completed ok. - if (tapestate_get(ti)==TS_RUN_INIT) { - // Rewind unload completed ok. - tape34xx_error_recovery_succeded(ti); - return; - } - // Since we do not issue read buffered log commands, this should never occur -> bug. - tape34xx_error_recovery_HWBUG(ti,11); - return; - case 0x2c: - // Permanent equipment check. cu has tried recovery, but did not succeed. This is an - // I/O error. - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x2d: - // Data security erase failure. - if (tapestate_get(ti)==TS_DSE_INIT) { - // report an I/O error - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - // Data security erase failure, but no such command issued. This is a bug. - tape34xx_error_recovery_HWBUG(ti,12); - return; - case 0x2e: - // Not capable. This indicates either that the drive fails reading the format id mark - // or that that format specified is not supported by the drive. We write a message and - // return an I/O error. - PRINT_WARN("Drive not capable processing the tape format!"); - tape34xx_error_recovery_has_failed(ti,EMEDIUMTYPE); - return; - case 0x2f: - // This erpa is reserved. This is a bug. - tape34xx_error_recovery_HWBUG(ti,13); - return; - case 0x30: - // The medium is write protected, while trying to write on it. We'll report this. - PRINT_WARN("Medium is write protected!\n"); - tape34xx_error_recovery_has_failed(ti,EACCES); - return; - case 0x31: - // Tape void. Should have beed covered ealier -> bug - tape34xx_error_recovery_HWBUG(ti,14); - return; - case 0x32: - // Tension loss. We cannot recover this, it's an I/O error. - PRINT_WARN("The drive lost tape tension.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x33: - // Load Failure. The catridge was not inserted correctly or the tape is not threaded - // correctly. We cannot recover this, the user has to reload the catridge. - PRINT_WARN("Cartridge load failure. Reload the cartridge and try again.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x34: - // Unload failure. The drive cannot maintain tape tension and control tape movement - // during an unload operation. - PRINT_WARN("Failure during cartridge unload. Please try manually.\n"); - if (tapestate_get(ti)!=TS_RUN_INIT) { - tape34xx_error_recovery_HWBUG(ti,15); - return; - } - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x35: - // Drive equipment check. One of the following: - // - cu cannot recover from a drive detected error - // - a check code message is displayed on drive message/load displays - // - the cartridge loader does not respond correctly - // - a failure occurs during an index, load, or unload cycle - PRINT_WARN("Equipment check! Please check the drive and the cartridge loader.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x36: - switch (cu_type) { - case 0x3480: - // This erpa is reserved for 3480 -> BUG - tape34xx_error_recovery_HWBUG(ti,16); - return; - case 0x3490: - // End of data. This is a permanent I/O error, which cannot be recovered. - // A read-type command has reached the end-of-data mark. - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - case 0x37: - // Tape length error. The tape is shorter than reported in the beginning-of-tape data. - PRINT_WARN("Tape length error.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x38: - // Physical end of tape. A read/write operation reached the physical end of tape. - if (tapestate_get(ti)==TS_WRI_INIT || - tapestate_get(ti)==TS_DSE_INIT || - tapestate_get(ti)==TS_EGA_INIT || - tapestate_get(ti)==TS_WTM_INIT){ - tape34xx_error_recovery_has_failed(ti,ENOSPC); - } else { - tape34xx_error_recovery_has_failed(ti,EIO); - } - return; - case 0x39: - // Backward at BOT. The drive is at BOT and is requestet to move backward. - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x3a: - // Drive switched not ready, but the command needs the drive to be ready. - PRINT_WARN("Drive not ready. Turn the ready/not ready switch to ready position and try again.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x3b: - // Manual rewind or unload. This causes an I/O error. - PRINT_WARN("Medium was rewound or unloaded manually. Expect errors! Please do only use the mtoffl and mtrew ioctl to unload tapes or rewind tapes.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x3c: - case 0x3d: - case 0x3e: - case 0x3f: - // These erpas are reserved -> BUG - tape34xx_error_recovery_HWBUG(ti,17); - return; - case 0x40: - // Overrun error. This should have been covered earlier -> bug. - tape34xx_error_recovery_HWBUG(ti,18); - return; - case 0x41: - // Record sequence error. This should have been covered earlier -> bug. - tape34xx_error_recovery_HWBUG(ti,19); - return; - case 0x42: - // Degraded mode. A condition that can cause degraded performace is detected. - PRINT_WARN("Subsystem is running in degraded mode. This may compromise your performace.\n"); - tape34xx_error_recovery_do_retry(ti); - return; - case 0x43: - // Drive not ready. Probably swith the ready/not ready switch to ready? - PRINT_WARN("The drive is not ready. Maybe no medium in?\n"); - tape34xx_error_recovery_has_failed(ti,ENOMEDIUM); - return; - case 0x44: - // Locate Block unsuccessfull. We'll report this. - if ((tapestate_get(ti)!=TS_BLOCK_INIT) && - (tapestate_get(ti)!=TS_LBL_INIT)) { - tape34xx_error_recovery_HWBUG(ti,20); // No locate block was issued... - return; - } - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x45: - // The drive is assigned elsewhere [to a different channel path/computer]. - PRINT_WARN("The drive is assigned elsewhere.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x46: - // Drive not online. Drive may be switched offline, the power supply may be switched off - // or the drive address may not be set correctly. - PRINT_WARN("The drive is not online."); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x47: - // Volume fenced. cu reports volume integrity is lost! - PRINT_WARN("Volume fenced. The volume integrity is lost! \n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x48: - // Log sense data and retry request. We'll do so... - tape34xx_error_recovery_do_retry(ti); - return; - case 0x49: - // Bus out check. A parity check error on the bus was found. PRINT_WARN("Bus out check. A data transfer over the bus was corrupted.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x4a: - // Control unit erp failed. We'll report this. - PRINT_WARN("The control unit failed recovering an I/O error.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x4b: - // Cu and drive incompatible. The drive requests micro-program patches, which are not available on the cu. - PRINT_WARN("The drive needs microprogram patches from the control unit, which are not available.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x4c: - // Recovered Check-One failure. Cu develops a hardware error, but is able to recover. We'll reissue the command. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x4d: - switch (cu_type) { - case 0x3480: - // This erpa is reserved for 3480 -> bug - tape34xx_error_recovery_HWBUG(ti,21); - return; - case 0x3490: - // Resetting event received. Since the driver does not support resetting event recovery - // (which has to be handled by the I/O Layer), we'll report and retry our command. - tape34xx_error_recovery_do_retry(ti); - return; - } - case 0x4e: - switch (cu_type) { - case 0x3480: - // This erpa is reserved for 3480 -> bug. - tape34xx_error_recovery_HWBUG(ti,22); - return; - case 0x3490: - // Maximum block size exeeded. This indicates, that the block to be written is larger - // than allowed for buffered mode. We'll report this... - PRINT_WARN("Maximum block size for buffered mode exceeded.\n"); - tape34xx_error_recovery_has_failed(ti,ENOBUFS); - return; - } - case 0x4f: - // These erpas are reserved -> bug - tape34xx_error_recovery_HWBUG(ti,23); - return; - case 0x50: - // Read buffered log (Overflow). Cu is running in extended beffered log mode, and a counter overflows. - // This should never happen, since we're never running in extended buffered log mode -> bug. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x51: - // Read buffered log (EOV). EOF processing occurs while the cu is in extended buffered log mode. - // This should never happen, since we're never running in extended buffered log mode -> bug. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x52: - // End of Volume complete. Rewind unload completed ok. We'll report to the user... - if (tapestate_get(ti)!=TS_RUN_INIT) { - tape34xx_error_recovery_HWBUG(ti,24); - return; - } - tape34xx_error_recovery_succeded(ti); - return; - case 0x53: - // Global command intercept. We'll have to reissue our command. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x54: - // Channel interface recovery (temporary). This can be recovered by reissuing the command. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x55: - // Channel interface recovery (permanent). This cannot be recovered, we'll inform the user. - PRINT_WARN("A permanent channel interface error occurred.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x56: - // Channel protocol error. This cannot be recovered. - PRINT_WARN("A channel protocol error occurred.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x57: - switch (cu_type) { - case 0x3480: - // Attention intercept. We have to reissue the command. - PRINT_WARN("An attention intercept occurred, which will be recovered.\n"); - tape34xx_error_recovery_do_retry(ti); - return; - case 0x3490: - // Global status intercept. We have to reissue the command. - PRINT_WARN("An global status intercept was received, which will be recovered.\n"); - tape34xx_error_recovery_do_retry(ti); - return; - } - case 0x58: - case 0x59: - // These erpas are reserved -> bug. - tape34xx_error_recovery_HWBUG(ti,25); - return; - case 0x5a: - // Tape length incompatible. The tape inserted is too long, - // which could cause damage to the tape or the drive. - PRINT_WARN("Tape length incompatible [should be IBM Cartridge System Tape]. May cause damage to drive or tape.n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x5b: - // Format 3480 XF incompatible - if (sense[1]&SENSE_BEGINNING_OF_TAPE) { - // Everything is fine. The tape will be overwritten in a different format. - tape34xx_error_recovery_do_retry(ti); - return; - } - PRINT_WARN("Tape format is incompatible to the drive, which writes 3480-2 XF.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x5c: - // Format 3480-2 XF incompatible - PRINT_WARN("Tape format is incompatible to the drive. The drive cannot access 3480-2 XF volumes.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x5d: - // Tape length violation. - PRINT_WARN("Tape length violation [should be IBM Enhanced Capacity Cartridge System Tape]. May cause damage to drive or tape.\n"); - tape34xx_error_recovery_has_failed(ti,EMEDIUMTYPE); - return; - case 0x5e: - // Compaction algorithm incompatible. - PRINT_WARN("The volume is recorded using an incompatible compaction algorith, which is not supported by the control unit.\n"); - tape34xx_error_recovery_has_failed(ti,EMEDIUMTYPE); - return; - default: - // Reserved erpas -> bug - tape34xx_error_recovery_HWBUG(ti,26); - return; - } -} - -void tape34xx_error_recovery_has_failed (tape_info_t* ti,int error_id) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xerp fail"); - debug_text_event (tape_debug_area,3,(((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); -#endif - if ((tapestate_get(ti)!=TS_UNUSED) && (tapestate_get(ti)!=TS_IDLE)) { - tape_dump_sense(&ti->devstat); - ti->rc = -error_id; - ti->wanna_wakeup=1; - switch (tapestate_get(ti)) { - case TS_REW_RELEASE_INIT: - case TS_RFO_INIT: - case TS_RBA_INIT: - tapestate_set(ti,TS_FAILED); - wake_up (&ti->wq); - break; - case TS_BLOCK_INIT: - tapestate_set(ti,TS_FAILED); - schedule_tapeblock_exec_IO(ti); - break; - default: - tapestate_set(ti,TS_FAILED); - wake_up_interruptible (&ti->wq); - } - } else { - PRINT_WARN("Recieved an unsolicited IRQ.\n"); - tape_dump_sense(&ti->devstat); - } -} - -void tape34xx_error_recovery_succeded(tape_info_t* ti) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xerp done"); - debug_text_event (tape_debug_area,3,(((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); -#endif - if ((tapestate_get(ti)!=TS_UNUSED) && (tapestate_get(ti)!=TS_DONE)) { - tapestate_event (ti, TE_DONE); - } else { - PRINT_WARN("Recieved an unsolicited IRQ.\n"); - tape_dump_sense(&ti->devstat); - } -} - -void tape34xx_error_recovery_do_retry(tape_info_t* ti) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xerp retr"); - debug_text_event (tape_debug_area,3,(((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); -#endif - if ((tapestate_get(ti)!=TS_UNUSED) && (tapestate_get(ti)!=TS_IDLE)) { - tape_dump_sense(&ti->devstat); - while (do_IO (ti->devinfo.irq, ti->cqr->cpaddr, (unsigned long) ti->cqr, 0x00, ti->cqr->options)); - } else { - PRINT_WARN("Recieved an unsolicited IRQ.\n"); - tape_dump_sense(&ti->devstat); - } -} - -void -tape34xx_error_recovery_read_opposite (tape_info_t* ti) { - switch (tapestate_get(ti)) { - case TS_RFO_INIT: - // We did read forward, but the data could not be read *correctly*. - // We will read backward and then skip forward again. - ti->cqr=tape34xx_read_opposite(ti,0); - if (ti->cqr==NULL) - tape34xx_error_recovery_has_failed(ti,EIO); - else - tape34xx_error_recovery_do_retry(ti); - break; - case TS_RBA_INIT: - // We tried to read forward and backward, but hat no success -> failed. - tape34xx_error_recovery_has_failed(ti,EIO); - break; - case TS_BLOCK_INIT: - tape34xx_error_recovery_do_retry(ti); - break; - default: - PRINT_WARN("read_opposite_recovery_called_with_state:%s\n", - (((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); - } -} - -void -tape34xx_error_recovery_HWBUG (tape_info_t* ti,int condno) { - devstat_t* stat=&ti->devstat; - PRINT_WARN("An unexpected condition #%d was caught in tape error recovery.\n",condno); - PRINT_WARN("Please report this incident.\n"); - PRINT_WARN("State of the tape:%s\n", - (((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); - PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X " - " %02X%02X%02X%02X %02X%02X%02X%02X \n", - stat->ii.sense.data[0], stat->ii.sense.data[1], - stat->ii.sense.data[2], stat->ii.sense.data[3], - stat->ii.sense.data[4], stat->ii.sense.data[5], - stat->ii.sense.data[6], stat->ii.sense.data[7], - stat->ii.sense.data[8], stat->ii.sense.data[9], - stat->ii.sense.data[10], stat->ii.sense.data[11], - stat->ii.sense.data[12], stat->ii.sense.data[13], - stat->ii.sense.data[14], stat->ii.sense.data[15]); - PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X " - " %02X%02X%02X%02X %02X%02X%02X%02X \n", - stat->ii.sense.data[16], stat->ii.sense.data[17], - stat->ii.sense.data[18], stat->ii.sense.data[19], - stat->ii.sense.data[20], stat->ii.sense.data[21], - stat->ii.sense.data[22], stat->ii.sense.data[23], - stat->ii.sense.data[24], stat->ii.sense.data[25], - stat->ii.sense.data[26], stat->ii.sense.data[27], - stat->ii.sense.data[28], stat->ii.sense.data[29], - stat->ii.sense.data[30], stat->ii.sense.data[31]); - tape34xx_error_recovery_has_failed(ti,EIO); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape34xx.h linux.21rc5-ac2/drivers/s390/char/tape34xx.h --- linux.21rc5/drivers/s390/char/tape34xx.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape34xx.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,183 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tape34xx.h - * common tape device discipline for 34xx tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#ifndef _TAPE34XX_H - -#define _TAPE34XX_H - -/* - * The CCW commands for the Tape type of command. - */ - -#define INVALID_00 0x00 /* Invalid cmd */ -#define BACKSPACEBLOCK 0x27 /* Back Space block */ -#define BACKSPACEFILE 0x2f /* Back Space file */ -#define DATA_SEC_ERASE 0x97 /* Data security erase */ -#define ERASE_GAP 0x17 /* Erase Gap */ -#define FORSPACEBLOCK 0x37 /* Forward space block */ -#define FORSPACEFILE 0x3F /* Forward Space file */ -#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */ -#define NOP 0x03 /* No operation */ -#define READ_FORWARD 0x02 /* Read forward */ -#define REWIND 0x07 /* Rewind */ -#define REWIND_UNLOAD 0x0F /* Rewind and Unload */ -#define SENSE 0x04 /* Sense */ -#define NEW_MODE_SET 0xEB /* Guess it is Mode set */ -#define WRITE_CMD 0x01 /* Write */ -#define WRITETAPEMARK 0x1F /* Write Tape Mark */ - -#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */ -#define CONTROL_ACCESS 0xE3 /* Set high speed */ -#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT*/ -#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */ -#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */ -#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */ -#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */ -#define MODE_SET_C3 0xC3 /* for 3420 */ -#define MODE_SET_CB 0xCB /* for 3420 */ -#define MODE_SET_D3 0xD3 /* for 3420 */ -#define READ_BACKWARD 0x0C /* */ -#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */ -#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */ -#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */ -#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT*/ -#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT*/ -#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT*/ -#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */ -#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */ -#define READ_DEV_CHAR 0x64 /* Read device characteristics */ -#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT*/ -#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */ -#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */ -#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */ -#define SYNC 0x43 /* Synchronize (flush buffer) */ -#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */ -#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */ -#define READ_CONFIG_DATA 0xFA /* 3490 CMD */ -#define READ_MESSAGE_ID 0x4E /* 3490 CMD */ -#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */ -#define SET_INTERFACE_ID 0x73 /* 3490 CMD */ - -#ifndef MIN -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) -#endif - - -#define BLOCKSIZE 4096 /* size of the tape rcds */ - -#define COMMAND_CHAIN CCW_FLAG_CC /* redefine from irq.h */ -#define CHANNEL_END DEV_STAT_CHN_END /* redefine from irq.h */ -#define DEVICE_END DEV_STAT_DEV_END /* redefine from irq.h */ -#define UNIT_CHECK DEV_STAT_UNIT_CHECK /* redefine from irq.h */ -#define UNIT_EXCEPTION DEV_STAT_UNIT_EXCEP /* redefine from irq.h */ -#define CONTROL_UNIT_END DEV_STAT_CU_END /* redefine from irq.h */ -#define INCORR_LEN SCHN_STAT_INCORR_LEN /* redefine from irq.h */ - -#define SENSE_COMMAND_REJECT 0x80 -#define SENSE_INTERVENTION_REQUIRED 0x40 -#define SENSE_BUS_OUT_CHECK 0x20 -#define SENSE_EQUIPMENT_CHECK 0x10 -#define SENSE_DATA_CHECK 0x08 -#define SENSE_OVERRUN 0x04 -#define SENSE_DEFERRED_UNIT_CHECK 0x02 -#define SENSE_ASSIGNED_ELSEWHERE 0x01 - -#define SENSE_LOCATE_FAILURE 0x80 -#define SENSE_DRIVE_ONLINE 0x40 -#define SENSE_RESERVED 0x20 -#define SENSE_RECORD_SEQUENCE_ERR 0x10 -#define SENSE_BEGINNING_OF_TAPE 0x08 -#define SENSE_WRITE_MODE 0x04 -#define SENSE_WRITE_PROTECT 0x02 -#define SENSE_NOT_CAPABLE 0x01 - -#define SENSE_CHANNEL_ADAPTER_CODE 0xE0 -#define SENSE_CHANNEL_ADAPTER_LOC 0x10 -#define SENSE_REPORTING_CU 0x08 -#define SENSE_AUTOMATIC_LOADER 0x04 -#define SENSE_TAPE_SYNC_MODE 0x02 -#define SENSE_TAPE_POSITIONING 0x01 - -typedef struct _tape34xx_disc_data_t { - __u8 modeset_byte; -} tape34xx_disc_data_t __attribute__ ((packed, aligned(8))); - -/* discipline functions */ -int tape34xx_ioctl_overload (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -ccw_req_t * tape34xx_write_block (const char *data, size_t count, tape_info_t * ti); -void tape34xx_free_write_block (ccw_req_t * cqr, tape_info_t * ti); -ccw_req_t * tape34xx_read_block (const char *data, size_t count, tape_info_t * ti); -void tape34xx_free_read_block (ccw_req_t * cqr, tape_info_t * ti); -void tape34xx_clear_read_block (ccw_req_t * cqr, tape_info_t * ti); -ccw_req_t * tape34xx_mtfsf (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtbsf (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtfsr (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtbsr (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtweof (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtrew (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtoffl (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtnop (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtbsfm (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtfsfm (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mteom (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mterase (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtsetdensity (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtseek (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mttell (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtsetdrvbuffer (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtlock (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtunlock (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtload (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtunload (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtcompression (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtsetpart (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtmkpart (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtiocget (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtiocpos (tape_info_t * ti, int count); -ccw_req_t * tape34xx_bread (struct request *req, tape_info_t* ti,int tapeblock_major); -ccw_req_t * tape34xx_bwrite (struct request *req, tape_info_t* ti,int tapeblock_major); -void tape34xx_free_bread (ccw_req_t*,struct _tape_info_t*); -void tape34xx_free_bwrite (ccw_req_t*,struct _tape_info_t*); - -/* Event handlers */ -void tape34xx_default_handler (tape_info_t * ti); -void tape34xx_unexpect_uchk_handler (tape_info_t * ti); -void tape34xx_unused_done(tape_info_t* ti); -void tape34xx_idle_done(tape_info_t* ti); -void tape34xx_block_done(tape_info_t* ti); -void tape34xx_bsf_init_done(tape_info_t* ti); -void tape34xx_dse_init_done(tape_info_t* ti); -void tape34xx_fsf_init_done(tape_info_t* ti); -void tape34xx_bsb_init_done(tape_info_t* ti); -void tape34xx_fsb_init_done(tape_info_t* ti); -void tape34xx_lbl_init_done(tape_info_t* ti); -void tape34xx_nop_init_done(tape_info_t* ti); -void tape34xx_rfo_init_done(tape_info_t* ti); -void tape34xx_rbi_init_done(tape_info_t* ti); -void tape34xx_rew_init_done(tape_info_t* ti); -void tape34xx_rew_release_init_done(tape_info_t* ti); -void tape34xx_run_init_done(tape_info_t* ti); -void tape34xx_wri_init_done(tape_info_t* ti); -void tape34xx_wtm_init_done(tape_info_t* ti); - -extern void schedule_tapeblock_exec_IO (tape_info_t *ti); - -// the error recovery stuff: -void tape34xx_error_recovery (tape_info_t* ti); -void tape34xx_error_recovery_has_failed (tape_info_t* ti,int error_id); -void tape34xx_error_recovery_succeded(tape_info_t* ti); -void tape34xx_error_recovery_do_retry(tape_info_t* ti); -void tape34xx_error_recovery_read_opposite (tape_info_t* ti); -void tape34xx_error_recovery_HWBUG (tape_info_t* ti,int condno); -#endif // _TAPE34XX_H diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape3590.c linux.21rc5-ac2/drivers/s390/char/tape3590.c --- linux.21rc5/drivers/s390/char/tape3590.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape3590.c 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -// tbd diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape3590.h linux.21rc5-ac2/drivers/s390/char/tape3590.h --- linux.21rc5/drivers/s390/char/tape3590.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape3590.h 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -// tbd diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape_block.c linux.21rc5-ac2/drivers/s390/char/tape_block.c --- linux.21rc5/drivers/s390/char/tape_block.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape_block.c 2003-04-25 13:53:58.000000000 +0100 @@ -0,0 +1,676 @@ +/* + * drivers/s390/char/tape_block.c + * block device frontend for tape device driver + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Tuan Ngo-Anh + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#define PRINTK_HEADER "TBLOCK:" + +#define TAPEBLOCK_DEVFSMODE 0060644 /* brwxrw-rw- */ +#define TAPEBLOCK_MAX_SEC 100 +#define TAPEBLOCK_MIN_REQUEUE 3 + +/* + * file operation structure for tape block frontend + */ +static int tapeblock_open(struct inode *, struct file *); +static int tapeblock_release(struct inode *, struct file *); +static int tapeblock_ioctl( + struct inode *, struct file *, unsigned int, unsigned long); + +static struct block_device_operations tapeblock_bdops = { + .owner = THIS_MODULE, + .open = tapeblock_open, + .release = tapeblock_release, + .ioctl = tapeblock_ioctl, +}; + +int tapeblock_major = 0; + +/* + * Some helper inlines + */ +static inline int tapeblock_size(int minor) { + return blk_size[tapeblock_major][minor]; +} +static inline int tapeblock_ssize(int minor) { + return blksize_size[tapeblock_major][minor]; +} +static inline int tapeblock_hw_ssize(int minor) { + return hardsect_size[tapeblock_major][minor]; +} + +/* + * Post finished request. + */ +static inline void +tapeblock_end_request(struct request *req, int uptodate) +{ + if (end_that_request_first(req, uptodate, "tBLK")) + BUG(); + end_that_request_last(req); +} + +static void +__tapeblock_end_request(struct tape_request *ccw_req, void *data) +{ + struct tape_device *device; + struct request *req; + + device = ccw_req->device; + req = (struct request *) data; + if(!device || !req) + BUG(); + + tapeblock_end_request(req, ccw_req->rc == 0); + if (ccw_req->rc == 0) + /* Update position. */ + device->blk_data.block_position = + (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; + else + /* We lost the position information due to an error. */ + device->blk_data.block_position = -1; + + device->discipline->free_bread(ccw_req); + + if (!list_empty(&device->req_queue) || + !list_empty(&device->blk_data.request_queue.queue_head)) + tasklet_schedule(&device->blk_data.tasklet); +} + +/* + * Fetch requests from block device queue. + */ +static inline void +__tape_process_blk_queue(struct tape_device *device, struct list_head *new_req) +{ + request_queue_t *queue; + struct list_head *l; + struct request *req; + struct tape_request *ccw_req; + int nr_queued; + + if (!TAPE_BLOCKDEV(device)) { + PRINT_WARN("can't process queue. Not a tape blockdevice.\n"); + return; + } + + nr_queued = 0; + queue = &device->blk_data.request_queue; + + /* Count number of requests on ccw queue. */ + list_for_each(l, &device->req_queue) + nr_queued++; + + while ( + !queue->plugged && + !list_empty(&queue->queue_head) && + nr_queued < TAPEBLOCK_MIN_REQUEUE + ) { + /* tape_block_next_request(queue); */ + req = blkdev_entry_next_request(&queue->queue_head); + + if (req->cmd == WRITE) { + DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); + blkdev_dequeue_request(req); + tapeblock_end_request(req, 0); + continue; + } + ccw_req = device->discipline->bread(device, req); + if (IS_ERR(ccw_req)) { + if (PTR_ERR(ccw_req) == -ENOMEM) + break; /* don't try again */ + DBF_EVENT(1, "TBLOCK: bread failed\n"); + blkdev_dequeue_request(req); + tapeblock_end_request(req, 0); + continue; + } + blkdev_dequeue_request(req); + ccw_req->callback = __tapeblock_end_request; + ccw_req->callback_data = (void *) req; + ccw_req->retries = TAPEBLOCK_RETRIES; + + list_add_tail(&ccw_req->list, new_req); + nr_queued++; + } +} + +/* + * Feed requests to the tape device. + */ +static inline int +tape_queue_requests(struct tape_device *device, struct list_head *new_req) +{ + struct list_head *l, *n; + struct tape_request *ccw_req; + struct request *req; + int rc, fail; + + fail = 0; + list_for_each_safe(l, n, new_req) { + ccw_req = list_entry(l, struct tape_request, list); + list_del(&ccw_req->list); + + rc = tape_do_io_async(device, ccw_req); + if (rc) { + /* + * Start/enqueueing failed. No retries in + * this case. + */ + DBF_EVENT(5, "enqueueing failed\n"); + req = (struct request *) ccw_req->callback_data; + tapeblock_end_request(req, 0); + device->discipline->free_bread(ccw_req); + fail = 1; + } + } + return fail; +} + +/* + * Tape request queue function. Called from ll_rw_blk.c + */ +static void +tapeblock_request_fn(request_queue_t *queue) +{ + struct list_head new_req; + struct tape_device *device; + + device = (struct tape_device *) queue->queuedata; + if(device == NULL) + BUG(); + + while (!list_empty(&queue->queue_head)) { + INIT_LIST_HEAD(&new_req); + spin_lock(get_irq_lock(device->devinfo.irq)); + __tape_process_blk_queue(device, &new_req); + spin_unlock(get_irq_lock(device->devinfo.irq)); + /* + * Now queue the new request to the tape. This needs to be + * done without the device lock held. + */ + if (tape_queue_requests(device, &new_req) == 0) + /* All requests queued. Thats enough for now. */ + break; + } +} + +/* + * Returns block frontend request queue for a tape device. + * FIXME: on shutdown make sure ll_rw_blk can put requests on a dead queue. + */ +static request_queue_t * +tapeblock_get_queue(kdev_t kdev) +{ + struct tape_device *device; + request_queue_t *queue; + + if (major(kdev) != tapeblock_major) + return NULL; + + device = tape_get_device(minor(kdev) >> 1); + if (IS_ERR(device)) + return NULL; + + queue = &device->blk_data.request_queue; + tape_put_device(device); + return queue; +} + +/* + * Acquire the device lock and process queues for the device. + */ +static void +tapeblock_tasklet(unsigned long data) +{ + struct list_head new_req; + struct tape_device *device; + + device = (struct tape_device *) data; + while (!list_empty(&device->blk_data.request_queue.queue_head)) { + INIT_LIST_HEAD(&new_req); + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + __tape_process_blk_queue(device, &new_req); + spin_unlock_irq(get_irq_lock(device->devinfo.irq)); + /* + * Now queue the new request to the tape. This needs to be + * done without the device lock held. + */ + if (tape_queue_requests(device, &new_req) == 0) + /* All requests queued. Thats enough for now. */ + break; + } +} + +/* + * Create block directory with disc entries + */ +static int +tapeblock_mkdevfstree (struct tape_device *device) +{ +#ifdef CONFIG_DEVFS_FS + device->blk_data.devfs_block_dir = + devfs_mk_dir (device->devfs_dir, "block", device); + if (device->blk_data.devfs_block_dir == 0) + return -ENOENT; + device->blk_data.devfs_disc = + devfs_register(device->blk_data.devfs_block_dir, + "disc", DEVFS_FL_DEFAULT, + tapeblock_major, device->first_minor, + TAPEBLOCK_DEVFSMODE, &tapeblock_bdops, device); + if (device->blk_data.devfs_disc == NULL) { + devfs_unregister(device->blk_data.devfs_block_dir); + return -ENOENT; + } +#endif + return 0; +} + +/* + * Remove devfs entries + */ +static void +tapeblock_rmdevfstree (struct tape_device *device) +{ +#ifdef CONFIG_DEVFS_FS + if (device->blk_data.devfs_disc) + devfs_unregister(device->blk_data.devfs_disc); + if (device->blk_data.devfs_block_dir) + devfs_unregister(device->blk_data.devfs_block_dir); +#endif +} + +/* + * This function is called for every new tapedevice + */ +int +tapeblock_setup_device(struct tape_device * device) +{ + int rc; + + /* FIXME: We should be able to sense the sector size */ + blk_size[tapeblock_major][device->first_minor] = 0; + blksize_size[tapeblock_major][device->first_minor] = + hardsect_size[tapeblock_major][device->first_minor] = + TAPEBLOCK_HSEC_SIZE; + + /* Create devfs entries. */ + rc = tapeblock_mkdevfstree(device); + if (rc) + return rc; + + /* Setup request queue and initialize gendisk for this device. */ + device->blk_data.request_queue.queuedata = tape_clone_device(device); + + + /* As long as the tasklet is running it may access the device */ + tasklet_init(&device->blk_data.tasklet, tapeblock_tasklet, + (unsigned long) tape_clone_device(device)); + + blk_init_queue(&device->blk_data.request_queue, tapeblock_request_fn); + blk_queue_headactive(&device->blk_data.request_queue, 0); + + tape_hotplug_event(device, tapeblock_major, TAPE_HOTPLUG_BLOCK_ADD); + + set_device_ro(mk_kdev(tapeblock_major, device->first_minor), 1); + return 0; +} + +void +tapeblock_cleanup_device(struct tape_device *device) +{ + /* Prevent further requests to the block request queue. */ + blk_size[tapeblock_major][device->first_minor] = 0; + + tapeblock_rmdevfstree(device); + + /* With the tasklet gone the reference is gone as well. */ + tasklet_kill(&device->blk_data.tasklet); + tape_put_device(device); + + /* Cleanup the request queue. */ + blk_cleanup_queue(&device->blk_data.request_queue); + + /* Remove reference in private data */ + device->blk_data.request_queue.queuedata = NULL; + tape_put_device(device); + + tape_hotplug_event(device, tapeblock_major, TAPE_HOTPLUG_BLOCK_REMOVE); +} + +/* + * Detect number of blocks of the tape. + * FIXME: can we extent this to detect the blocks size as well ? + * FIXME: (minor) On 34xx the block id also contains a format specification + * which is unknown before the block was skipped or read at + * least once. So detection is sometimes done a second time. + */ +int tapeblock_mediumdetect(struct tape_device *device) +{ + unsigned int bid; + unsigned int nr_of_blks; + int rc; + + /* + * Identify the first records format + */ + if((rc = tape_mtop(device, MTFSR, 1)) < 0) + return rc; + if((rc = tape_mtop(device, MTBSR, 1)) < 0) + return rc; + + device->blk_data.block_position = 0; + if (tape_std_read_block_id(device, &bid)) { + rc = tape_mtop(device, MTREW, 1); + if (rc) { + device->blk_data.block_position = -1; + blk_size[tapeblock_major][device->first_minor] = 0; + return rc; + } + bid = 0; + } + + if(bid != device->blk_data.start_block_id) { + device->blk_data.start_block_id = bid; + blk_size[tapeblock_major][device->first_minor] = 0; + } + + if(blk_size[tapeblock_major][device->first_minor] > 0) + return 0; + + PRINT_INFO("Detecting media size...\n"); + blk_size[tapeblock_major][device->first_minor] = 0; + + rc = tape_mtop(device, MTFSF, 1); + if (rc) + return rc; + + rc = tape_mtop(device, MTTELL, 1); + if (rc < 0) + return rc; + nr_of_blks = rc - 1; /* don't count FM */ + + if (device->blk_data.start_block_id) { + rc = tape_std_seek_block_id( + device, + device->blk_data.start_block_id); + } else { + rc = tape_mtop(device, MTREW, 1); + } + if (rc) + return rc; + + rc = tape_mtop(device, MTTELL, 1); + if (rc < 0) + return rc; + + /* Don't include start offset */ + nr_of_blks -= rc; + + PRINT_INFO("Found %i blocks on media\n", nr_of_blks); + if (tapeblock_hw_ssize(device->first_minor) > 1024) { + nr_of_blks *= tapeblock_hw_ssize(device->first_minor) / 1024; + } else { + nr_of_blks /= 1024 / tapeblock_hw_ssize(device->first_minor); + } + PRINT_INFO("Tape block device size is %i KB\n", nr_of_blks); + blk_size[tapeblock_major][device->first_minor] = nr_of_blks; + + return 0; +} + +/* + * This function has to be called whenever a new medium has been inserted + * into the drive. + */ +void +tapeblock_medium_change(struct tape_device *device) { + device->blk_data.start_block_id = 0; + blk_size[tapeblock_major][device->first_minor] = 0; +} + +/* + * Block frontend tape device open function. + */ +int +tapeblock_open(struct inode *inode, struct file *filp) { + struct tape_device *device; + int rc; + + if (major(filp->f_dentry->d_inode->i_rdev) != tapeblock_major) + return -ENODEV; + + MOD_INC_USE_COUNT; + device = tape_get_device(minor(filp->f_dentry->d_inode->i_rdev) >> 1); + if (IS_ERR(device)) { + MOD_DEC_USE_COUNT; + return PTR_ERR(device); + } + + DBF_EVENT(6, "TBLOCK: open: %x\n", device->first_minor); + + if(device->required_tapemarks) { + DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); + PRINT_ERR("TBLOCK: Refusing to open tape with missing" + " end of file marks.\n"); + tape_put_device(device); + MOD_DEC_USE_COUNT; + return -EPERM; + } + + rc = tape_open(device); + if (rc == 0) { + rc = tape_assign(device, TAPE_STATUS_ASSIGN_A); + if (rc == 0) { + rc = tapeblock_mediumdetect(device); + if (rc == 0) { + TAPE_SET_STATE(device, TAPE_STATUS_BLOCKDEV); + tape_put_device(device); + return 0; + } + tape_unassign(device, TAPE_STATUS_ASSIGN_A); + } + tape_release(device); + } + tape_put_device(device); + MOD_DEC_USE_COUNT; + return rc; +} + +/* + * Block frontend tape device release function. + */ +int +tapeblock_release(struct inode *inode, struct file *filp) { + struct tape_device *device; + + device = tape_get_device(minor(inode->i_rdev) >> 1); + + DBF_EVENT(4, "TBLOCK: release %i\n", device->first_minor); + + /* Remove all buffers at device close. */ + /* FIXME: can we do that a tape unload ? */ + invalidate_buffers(inode->i_rdev); + + if (device->blk_data.start_block_id) { + tape_std_seek_block_id(device, device->blk_data.start_block_id); + } else { + tape_mtop(device, MTREW, 1); + } + TAPE_CLEAR_STATE(device, TAPE_STATUS_BLOCKDEV); + tape_unassign(device, TAPE_STATUS_ASSIGN_A); + tape_release(device); + tape_put_device(device); + MOD_DEC_USE_COUNT; + + return 0; +} + +int +tapeblock_ioctl( + struct inode *inode, + struct file *file, + unsigned int command, + unsigned long arg +) { + int rc = 0; + int minor = minor(inode->i_rdev); + + DBF_EVENT(6, "tapeblock_ioctl(%x)\n", command); + + switch(command) { + case BLKSSZGET: + if(put_user(tapeblock_ssize(minor), (int *) arg)) + rc = -EFAULT; + break; + case BLKGETSIZE: + if( + put_user( + tapeblock_size(minor), + (unsigned long *) arg + ) + ) + rc = -EFAULT; + break; +#ifdef BLKGETSIZE64 + case BLKGETSIZE64: + if(put_user(tapeblock_size(minor) << 9, (u64 *) arg)) + rc = -EFAULT; + break; +#endif + case CDROMMULTISESSION: + case CDROMREADTOCENTRY: + /* No message for these... */ + rc = -EINVAL; + break; + default: + PRINT_WARN("invalid ioctl 0x%x\n", command); + rc = -EINVAL; + } + return rc; +} + +/* + * Initialize block device frontend. + */ +int +tapeblock_init(void) +{ + int rc; + + /* Register the tape major number to the kernel */ +#ifdef CONFIG_DEVFS_FS + if (tapeblock_major == 0) + tapeblock_major = devfs_alloc_major(DEVFS_SPECIAL_BLK); +#endif + rc = register_blkdev(tapeblock_major, "tBLK", &tapeblock_bdops); + if (rc < 0) { + PRINT_ERR("can't get major %d for block device\n", + tapeblock_major); + return rc; + } + if(tapeblock_major == 0) + tapeblock_major = rc; + + /* Allocate memory for kernel block device tables */ + rc = -ENOMEM; + blk_size[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL); + if(blk_size[tapeblock_major] == NULL) + goto tapeblock_init_fail; + memset(blk_size[tapeblock_major], 0, 256*sizeof(int)); + blksize_size[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL); + if(blksize_size[tapeblock_major] == NULL) + goto tapeblock_init_fail; + memset(blksize_size[tapeblock_major], 0, 256*sizeof(int)); + hardsect_size[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL); + if(hardsect_size[tapeblock_major] == NULL) + goto tapeblock_init_fail; + memset(hardsect_size[tapeblock_major], 0, 256*sizeof(int)); + max_sectors[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL); + if(max_sectors[tapeblock_major] == NULL) + goto tapeblock_init_fail; + memset(max_sectors[tapeblock_major], 0, 256*sizeof(int)); + + blk_dev[tapeblock_major].queue = tapeblock_get_queue; + + PRINT_INFO("tape gets major %d for block device\n", tapeblock_major); + DBF_EVENT(3, "TBLOCK: major = %d\n", tapeblock_major); + DBF_EVENT(3, "TBLOCK: init ok\n"); + + return 0; + +tapeblock_init_fail: + if(tapeblock_major > 0) { + if(blk_size[tapeblock_major]) { + kfree(blk_size[tapeblock_major]); + blk_size[tapeblock_major] = NULL; + } + if(blksize_size[tapeblock_major]) { + kfree(blksize_size[tapeblock_major]); + blksize_size[tapeblock_major] = NULL; + } + if(hardsect_size[tapeblock_major]) { + kfree(hardsect_size[tapeblock_major]); + hardsect_size[tapeblock_major] = NULL; + } + if(max_sectors[tapeblock_major]) { + kfree(max_sectors[tapeblock_major]); + max_sectors[tapeblock_major] = NULL; + } +#ifdef CONFIG_DEVFS_FS + devfs_unregister_blkdev(tapeblock_major, "tBLK"); +#else + unregister_blkdev(tapeblock_major, "tBLK"); +#endif + tapeblock_major = -1; + } + + DBF_EVENT(3, "TBLOCK: init failed(%d)\n", rc); + return rc; +} + +/* + * Deregister major for block device frontend + */ +void +tapeblock_exit(void) +{ + if(blk_size[tapeblock_major]) { + kfree(blk_size[tapeblock_major]); + blk_size[tapeblock_major] = NULL; + } + if(blksize_size[tapeblock_major]) { + kfree(blksize_size[tapeblock_major]); + blksize_size[tapeblock_major] = NULL; + } + if(hardsect_size[tapeblock_major]) { + kfree(hardsect_size[tapeblock_major]); + hardsect_size[tapeblock_major] = NULL; + } + if(max_sectors[tapeblock_major]) { + kfree(max_sectors[tapeblock_major]); + max_sectors[tapeblock_major] = NULL; + } + blk_dev[tapeblock_major].queue = NULL; + unregister_blkdev(tapeblock_major, "tBLK"); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tapeblock.c linux.21rc5-ac2/drivers/s390/char/tapeblock.c --- linux.21rc5/drivers/s390/char/tapeblock.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tapeblock.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,593 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tapeblock.c - * block device frontend for tape device driver - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include -#include -#include -#include -#include /* CCW allocations */ -#include -#include -#include -#ifdef MODULE -#define __NO_VERSION__ -#include -#endif -#include "tape.h" -#include "tapeblock.h" - -#define PRINTK_HEADER "TBLOCK:" - -/* - * file operation structure for tape devices - */ -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) -static struct block_device_operations tapeblock_fops = { -#else -static struct file_operations tapeblock_fops = { -#endif - owner : THIS_MODULE, - open : tapeblock_open, /* open */ - release : tapeblock_release, /* release */ - }; - -int tapeblock_major = 0; - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) -static void tape_request_fn (request_queue_t * queue); -#else -static void tape_request_fn (void); -#endif - -static request_queue_t* tapeblock_getqueue (kdev_t kdev); - -#ifdef CONFIG_DEVFS_FS -void -tapeblock_mkdevfstree (tape_info_t* ti) { - ti->devfs_block_dir=devfs_mk_dir (ti->devfs_dir, "block", ti); - ti->devfs_disc=devfs_register(ti->devfs_block_dir, "disc",DEVFS_FL_DEFAULT, - tapeblock_major, ti->blk_minor, - TAPEBLOCK_DEFAULTMODE, &tapeblock_fops, ti); -} - -void -tapeblock_rmdevfstree (tape_info_t* ti) { - devfs_unregister(ti->devfs_disc); - devfs_unregister(ti->devfs_block_dir); -} -#endif - -void -tapeblock_setup(tape_info_t* ti) { - blk_size[tapeblock_major][ti->blk_minor]=0; // this will be detected - blksize_size[tapeblock_major][ti->blk_minor]=2048; // blocks are 2k by default. - hardsect_size[tapeblock_major][ti->blk_minor]=512; - blk_init_queue (&ti->request_queue, tape_request_fn); - blk_queue_headactive (&ti->request_queue, 0); -#ifdef CONFIG_DEVFS_FS - tapeblock_mkdevfstree(ti); -#endif -} - -int -tapeblock_init(void) { - int result; - tape_frontend_t* blkfront,*temp; - tape_info_t* ti; - - tape_init(); - /* Register the tape major number to the kernel */ -#ifdef CONFIG_DEVFS_FS - result = devfs_register_blkdev(tapeblock_major, "tBLK", &tapeblock_fops); -#else - result = register_blkdev(tapeblock_major, "tBLK", &tapeblock_fops); -#endif - if (result < 0) { - PRINT_WARN(KERN_ERR "tape: can't get major %d for block device\n", tapeblock_major); - panic ("cannot get major number for tape block device"); - } - if (tapeblock_major == 0) tapeblock_major = result; /* accept dynamic major number*/ - INIT_BLK_DEV(tapeblock_major,tape_request_fn,tapeblock_getqueue,NULL); - read_ahead[tapeblock_major]=TAPEBLOCK_READAHEAD; - PRINT_WARN(KERN_ERR " tape gets major %d for block device\n", result); - blk_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC); - memset(blk_size[tapeblock_major],0,256*sizeof(int)); - blksize_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC); - memset(blksize_size[tapeblock_major],0,256*sizeof(int)); - hardsect_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC); - memset(hardsect_size[tapeblock_major],0,256*sizeof(int)); - max_sectors[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC); - memset(max_sectors[tapeblock_major],0,256*sizeof(int)); - blkfront = kmalloc(sizeof(tape_frontend_t),GFP_KERNEL); - if (blkfront==NULL) panic ("no mem for tape block device structure"); - blkfront->device_setup=tapeblock_setup; -#ifdef CONFIG_DEVFS_FS - blkfront->mkdevfstree = tapeblock_mkdevfstree; - blkfront->rmdevfstree = tapeblock_rmdevfstree; -#endif - blkfront->next=NULL; - if (first_frontend==NULL) { - first_frontend=blkfront; - } else { - temp=first_frontend; - while (temp->next!=NULL) - temp=temp->next; - temp->next=blkfront; - } - ti=first_tape_info; - while (ti!=NULL) { - tapeblock_setup(ti); - ti=ti->next; - } - return 0; -} - - -void -tapeblock_uninit(void) { - unregister_blkdev(tapeblock_major, "tBLK"); -} - -int -tapeblock_open(struct inode *inode, struct file *filp) { - tape_info_t *ti; - kdev_t dev; - int rc; - long lockflags; - - inode = filp->f_dentry->d_inode; - ti = first_tape_info; - while ((ti != NULL) && (ti->blk_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if (ti == NULL) - return -ENODEV; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:open:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) != TS_UNUSED) { - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:dbusy"); -#endif - return -EBUSY; - } - tapestate_set (ti, TS_IDLE); - ti->position=-1; - - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - rc=tapeblock_mediumdetect(ti); - if (rc) { - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_UNUSED); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return rc; // in case of errors, we don't have a size of the medium - } - dev = MKDEV (tapeblock_major, MINOR (inode->i_rdev)); /* Get the device */ - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->blk_filp = filp; - filp->private_data = ti; /* save the dev.info for later reference */ - ti->cqr=NULL; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - - return 0; -} - -int -tapeblock_release(struct inode *inode, struct file *filp) { - long lockflags; - tape_info_t *ti,*lastti; - ti = first_tape_info; - while ((ti != NULL) && (ti->blk_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if ((ti != NULL) && (tapestate_get (ti) == TS_NOT_OPER)) { - if (ti==first_tape_info) { - first_tape_info=ti->next; - } else { - lastti=first_tape_info; - while (lastti->next!=ti) lastti=lastti->next; - lastti->next=ti->next; - } - kfree(ti); - return 0; - } - if ((ti == NULL) || (tapestate_get (ti) != TS_IDLE)) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:notidle!"); -#endif - return -ENXIO; /* error in tape_release */ - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:release:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_UNUSED); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - invalidate_buffers(inode->i_rdev); - return 0; -} - -static void -tapeblock_end_request(tape_info_t* ti) { - struct buffer_head *bh; - int uptodate; - if ((tapestate_get(ti)!=TS_FAILED) && - (tapestate_get(ti)!=TS_DONE)) - BUG(); // A request has to be completed to end it - uptodate=(tapestate_get(ti)==TS_DONE); // is the buffer up to date? -#ifdef TAPE_DEBUG - if (uptodate) { - debug_text_event (tape_debug_area,6,"b:done:"); - debug_int_event (tape_debug_area,6,(long)ti->cqr); - } else { - debug_text_event (tape_debug_area,3,"b:failed:"); - debug_int_event (tape_debug_area,3,(long)ti->cqr); - } -#endif - // now inform ll_rw_block about a request status - while ((bh = ti->current_request->bh) != NULL) { - ti->current_request->bh = bh->b_reqnext; - bh->b_reqnext = NULL; - bh->b_end_io (bh, uptodate); - } - if (!end_that_request_first (ti->current_request, uptodate, "tBLK")) { -#ifndef DEVICE_NO_RANDOM - add_blkdev_randomness (MAJOR (ti->current_request->rq_dev)); -#endif - end_that_request_last (ti->current_request); - } - ti->discipline->free_bread(ti->cqr,ti); - ti->cqr=NULL; - ti->current_request=NULL; - if (tapestate_get(ti)!=TS_NOT_OPER) tapestate_set(ti,TS_IDLE); - return; -} - -static void -tapeblock_exec_IO (tape_info_t* ti) { - int rc; - struct request* req; - if (ti->cqr) { // process done/failed request - while ((tapestate_get(ti)==TS_FAILED) && - ti->blk_retries>0) { - ti->blk_retries--; - ti->position=-1; - tapestate_set(ti,TS_BLOCK_INIT); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:retryreq:"); - debug_int_event (tape_debug_area,3,(long)ti->cqr); -#endif - rc = do_IO (ti->devinfo.irq, ti->cqr->cpaddr, (unsigned long) ti->cqr, - 0x00, ti->cqr->options); - if (rc) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:doIOfail:"); - debug_int_event (tape_debug_area,3,(long)ti->cqr); -#endif - continue; // one retry lost 'cause doIO failed - } - return; - } - tapeblock_end_request (ti); // check state, inform user, free mem, dev=idl - } - if (ti->cqr!=NULL) BUG(); // tape should be idle now, request should be freed! - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - return; - } -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - if (list_empty (&ti->request_queue.queue_head)) { -#else - if (ti->request_queue==NULL) { -#endif - // nothing more to do or device has dissapeared;) -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:Qempty"); -#endif - tapestate_set(ti,TS_IDLE); - return; - } - // queue is not empty, fetch a request and start IO! - req=ti->current_request=tape_next_request(&ti->request_queue); - if (req==NULL) { - BUG(); // Yo. The queue was not reported empy, but no request found. This is _bad_. - } - if (req->cmd!=READ) { // we only support reading - tapestate_set(ti,TS_FAILED); - tapeblock_end_request (ti); // check state, inform user, free mem, dev=idl - tapestate_set(ti,TS_BLOCK_INIT); - schedule_tapeblock_exec_IO(ti); - return; - } - ti->cqr=ti->discipline->bread(req,ti,tapeblock_major); //build channel program from request - if (!ti->cqr) { - // ccw generation failed. we try again later. -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:cqrNULL"); -#endif - schedule_tapeblock_exec_IO(ti); - ti->current_request=NULL; - return; - } - ti->blk_retries = TAPEBLOCK_RETRIES; - rc= do_IO (ti->devinfo.irq, ti->cqr->cpaddr, - (unsigned long) ti->cqr, 0x00, ti->cqr->options); - if (rc) { - // okay. ssch failed. we try later. -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:doIOfail"); -#endif - ti->discipline->free_bread(ti->cqr,ti); - ti->cqr=NULL; - ti->current_request=NULL; - schedule_tapeblock_exec_IO(ti); - return; - } - // our request is in IO. we remove it from the queue and exit - tape_dequeue_request (&ti->request_queue,req); -} - -static void -do_tape_request (request_queue_t * queue) { - tape_info_t* ti; - long lockflags; - for (ti=first_tape_info; - ((ti!=NULL) && ((&ti->request_queue)!=queue)); - ti=ti->next); - if (ti==NULL) BUG(); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get(ti)!=TS_IDLE) { - s390irq_spin_unlock_irqrestore(ti->devinfo.irq,lockflags); - return; - } - if (tapestate_get(ti)!=TS_IDLE) BUG(); - tapestate_set(ti,TS_BLOCK_INIT); - tapeblock_exec_IO(ti); - s390irq_spin_unlock_irqrestore(ti->devinfo.irq,lockflags); -} - -static void -run_tapeblock_exec_IO (tape_info_t* ti) { - long flags_390irq,flags_ior; - spin_lock_irqsave (&io_request_lock, flags_ior); - s390irq_spin_lock_irqsave(ti->devinfo.irq,flags_390irq); - atomic_set(&ti->bh_scheduled,0); - tapeblock_exec_IO(ti); - s390irq_spin_unlock_irqrestore(ti->devinfo.irq,flags_390irq); - spin_unlock_irqrestore (&io_request_lock, flags_ior); -} - -void -schedule_tapeblock_exec_IO (tape_info_t *ti) -{ - /* Protect against rescheduling, when already running */ - if (atomic_compare_and_swap(0,1,&ti->bh_scheduled)) { - return; - } -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - INIT_LIST_HEAD(&ti->bh_tq.list); -#endif - ti->bh_tq.sync = 0; - ti->bh_tq.routine = (void *) (void *) run_tapeblock_exec_IO; - ti->bh_tq.data = ti; - - queue_task (&ti->bh_tq, &tq_immediate); - mark_bh (IMMEDIATE_BH); - return; -} - -/* wrappers around do_tape_request for different kernel versions */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98)) -static void tape_request_fn (void) { - tape_info_t* ti=first_tape_info; - while (ti!=NULL) { - do_tape_request(&ti->request_queue); - ti=ti->next; - } -} -#else -static void tape_request_fn (request_queue_t* queue) { - do_tape_request(queue); -} -#endif - -static request_queue_t* tapeblock_getqueue (kdev_t kdev) { - tape_info_t* ti=first_tape_info; - while ((ti!=NULL) && (MINOR(kdev)!=ti->blk_minor)) - ti=ti->next; - if (ti!=NULL) return &ti->request_queue; - return NULL; -} - -int tapeblock_mediumdetect(tape_info_t* ti) { - ccw_req_t* cqr; - int losize=1,hisize=1,rc; - long lockflags; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:medDet"); -#endif - PRINT_WARN("Detecting media size. This will take _long_, so get yourself a coffee...\n"); - while (1) { //is interruped by break - hisize=hisize << 1; // try twice the size tested before - cqr=ti->discipline->mtseek (ti, hisize); - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (rc) return -EIO; - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - if (ti->kernbuf) { - kfree (ti->kernbuf); - ti->kernbuf=NULL; - } - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - break; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - losize=hisize; - } - cqr = ti->discipline->mtrew (ti, 1); - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - while (losize!=hisize) { - cqr=ti->discipline->mtseek (ti, (hisize+losize)/2+1); - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (rc) return -EIO; - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - if (ti->kernbuf) { - kfree (ti->kernbuf); - ti->kernbuf=NULL; - } - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - hisize=(hisize+losize)/2; - cqr = ti->discipline->mtrew (ti, 1); - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - continue; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - losize=(hisize+losize)/2+1; - } - blk_size[tapeblock_major][ti->blk_minor]=(losize)*(blksize_size[tapeblock_major][ti->blk_minor]/1024); - return 0; -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tapeblock.h linux.21rc5-ac2/drivers/s390/char/tapeblock.h --- linux.21rc5/drivers/s390/char/tapeblock.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tapeblock.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,36 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tapechar.h - * character device frontend for tape device driver - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - **************************************************************************** - */ - -#ifndef TAPEBLOCK_H -#define TAPEBLOCK_H -#include -#define PARTN_BITS 0 - -#define TAPEBLOCK_READAHEAD 30 -#define TAPEBLOCK_MAJOR 0 - -#define TAPEBLOCK_DEFAULTMODE 0060644 - -int tapeblock_open(struct inode *, struct file *); -int tapeblock_release(struct inode *, struct file *); -void tapeblock_setup(tape_info_t* ti); -void schedule_tapeblock_exec_IO (tape_info_t *ti); -int tapeblock_mediumdetect(tape_info_t* ti); -#ifdef CONFIG_DEVFS_FS -void tapeblock_mkdevfstree (tape_info_t* ti); -#endif -int tapeblock_init (void); -void tapeblock_uninit (void); -#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape.c linux.21rc5-ac2/drivers/s390/char/tape.c --- linux.21rc5/drivers/s390/char/tape.c 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,1120 +0,0 @@ - -/*********************************************************************** - * drivers/s390/char/tape.c - * tape device driver for S/390 and zSeries tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - *********************************************************************** - */ - -#include "tapedefs.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef MODULE -#include -#endif -#include -#ifdef CONFIG_S390_TAPE_DYNAMIC -#include -#endif -#include "tape.h" -#ifdef CONFIG_S390_TAPE_3490 -#include "tape3490.h" -#endif -#ifdef CONFIG_S390_TAPE_3480 -#include "tape3480.h" -#endif -#ifdef CONFIG_S390_TAPE_BLOCK -#include "tapeblock.h" -#endif -#ifdef CONFIG_S390_TAPE_CHAR -#include "tapechar.h" -#endif -#ifdef CONFIG_PROC_FS -#include -#endif -#define PRINTK_HEADER "T390:" - - -/* state handling routines */ -inline void tapestate_set (tape_info_t * ti, int newstate); -inline int tapestate_get (tape_info_t * ti); -void tapestate_event (tape_info_t * ti, int event); - -/* our globals */ -tape_info_t *first_tape_info = NULL; -tape_discipline_t *first_discipline = NULL; -tape_frontend_t *first_frontend = NULL; -devreg_t* tape_devreg[128]; -int devregct=0; - -#ifdef TAPE_DEBUG -debug_info_t *tape_debug_area = NULL; -#endif - -char* state_verbose[TS_SIZE]={ - "TS_UNUSED", "TS_IDLE", "TS_DONE", "TS_FAILED", - "TS_BLOCK_INIT", - "TS_BSB_INIT", - "TS_BSF_INIT", - "TS_DSE_INIT", - "TS_EGA_INIT", - "TS_FSB_INIT", - "TS_FSF_INIT", - "TS_LDI_INIT", - "TS_LBL_INIT", - "TS_MSE_INIT", - "TS_NOP_INIT", - "TS_RBA_INIT", - "TS_RBI_INIT", - "TS_RBU_INIT", - "TS_RBL_INIT", - "TS_RDC_INIT", - "TS_RFO_INIT", - "TS_RSD_INIT", - "TS_REW_INIT", - "TS_REW_RELEASE_INIT", - "TS_RUN_INIT", - "TS_SEN_INIT", - "TS_SID_INIT", - "TS_SNP_INIT", - "TS_SPG_INIT", - "TS_SWI_INIT", - "TS_SMR_INIT", - "TS_SYN_INIT", - "TS_TIO_INIT", - "TS_UNA_INIT", - "TS_WRI_INIT", - "TS_WTM_INIT", - "TS_NOT_OPER"}; - -char* event_verbose[TE_SIZE]= { - "TE_START", "TE_DONE", "TE_FAILED", "TE_ERROR", "TE_OTHER"}; - -/* our root devfs handle */ -#ifdef CONFIG_DEVFS_FS -devfs_handle_t tape_devfs_root_entry; - -inline void -tape_mkdevfsroots (tape_info_t* ti) -{ - char devno [5]; - sprintf (devno,"%04x",ti->devinfo.devno); - ti->devfs_dir=devfs_mk_dir (tape_devfs_root_entry, devno, ti); -} - -inline void -tape_rmdevfsroots (tape_info_t* ti) -{ - devfs_unregister (ti->devfs_dir); -} -#endif - -#ifdef CONFIG_PROC_FS -/* our proc tapedevices entry */ -static struct proc_dir_entry *tape_devices_entry; - -typedef struct { - char *data; - int len; -} tempinfo_t; - - -static int -tape_devices_open (struct inode *inode, struct file *file) -{ - int size=80; - tape_info_t* ti; - tempinfo_t* tempinfo; - char* data; - int pos=0; - tempinfo = kmalloc (sizeof(tempinfo_t),GFP_KERNEL); - if (!tempinfo) - return -ENOMEM; - for (ti=first_tape_info;ti!=NULL;ti=ti->next) - size+=80; // FIXME: Guess better! - data=vmalloc(size); - if (!data) { - kfree (tempinfo); - return -ENOMEM; - } - pos+=sprintf(data+pos,"TapeNo\tDevNo\tCuType\tCuModel\tDevType\tDevModel\tState\n"); - for (ti=first_tape_info;ti!=NULL;ti=ti->next) { - pos+=sprintf(data+pos,"%d\t%04X\t%04X\t%02X\t%04X\t%02X\t\t%s\n",ti->rew_minor/2, - ti->devinfo.devno,ti->devinfo.sid_data.cu_type, - ti->devinfo.sid_data.cu_model,ti->devinfo.sid_data.dev_type, - ti->devinfo.sid_data.dev_model,((tapestate_get(ti) >= 0) && - (tapestate_get(ti) < TS_SIZE)) ? - state_verbose[tapestate_get (ti)] : "TS UNKNOWN"); - } - tempinfo->len=pos; - tempinfo->data=data; - file->private_data= (void*) tempinfo; -#ifdef MODULE - MOD_INC_USE_COUNT; -#endif - return 0; -} - -static ssize_t -tape_devices_read (struct file *file, char *user_buf, size_t user_len, loff_t * offset) -{ - loff_t len; - tempinfo_t *p_info = (tempinfo_t *) file->private_data; - - if (*offset >= p_info->len) { - return 0; /* EOF */ - } else { - len = user_len<(p_info->len - *offset)?user_len:(p_info->len - *offset); - if (copy_to_user (user_buf, &(p_info->data[*offset]), len)) - return -EFAULT; - (*offset) += len; - return len; /* number of bytes "read" */ - } -} - -static int -tape_devices_release (struct inode *inode, struct file *file) -{ - int rc = 0; - tempinfo_t *p_info = (tempinfo_t *) file->private_data; - if (p_info) { - if (p_info->data) - vfree (p_info->data); - kfree (p_info); - } -#ifdef MODULE - MOD_DEC_USE_COUNT; -#endif - return rc; -} - -static struct file_operations tape_devices_file_ops = -{ - read:tape_devices_read, /* read */ - open:tape_devices_open, /* open */ - release:tape_devices_release, /* close */ -}; - -static struct inode_operations tape_devices_inode_ops = -{ -#if !(LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - default_file_ops:&tape_devices_file_ops /* file ops */ -#endif /* LINUX_IS_24 */ -}; -#endif /* CONFIG_PROC_FS */ - -/* SECTION: Parameters for tape */ -char *tape[256] = { NULL, }; - -#ifndef MODULE -static char tape_parm_string[1024] __initdata = { 0, }; -static void -tape_split_parm_string (char *str) -{ - char *tmp = str; - int count = 0; - while (tmp != NULL && *tmp != '\0') { - char *end; - int len; - end = strchr (tmp, ','); - if (end == NULL) { - len = strlen (tmp) + 1; - } else { - len = (long) end - (long) tmp + 1; - *end = '\0'; - end++; - } - tape[count] = kmalloc (len * sizeof (char), GFP_ATOMIC); - if (tape[count] == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "can't store tape= parameter no %d\n", - count + 1); - break; - } - memset (tape[count], 0, len * sizeof (char)); - memcpy (tape[count], tmp, len * sizeof (char)); - count++; - tmp = end; - }; -} - -void __init -tape_parm_setup (char *str, int *ints) -{ - int len = strlen (tape_parm_string); - if (len != 0) { - strcat (tape_parm_string, ","); - } - strcat (tape_parm_string, str); -} - -int __init -tape_parm_call_setup (char *str) -{ - int dummy; - tape_parm_setup (str, &dummy); - return 1; -} - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,2,16)) -__setup("tape=", tape_parm_call_setup); -#endif /* kernel <2.2.19 */ -#endif /* not defined MODULE */ - -static inline int -tape_parm_strtoul (char *str, char **stra) -{ - char *temp = str; - int val; - if (*temp == '0') { - temp++; /* strip leading zero */ - if (*temp == 'x') - temp++; /* strip leading x */ - } - val = simple_strtoul (temp, &temp, 16); /* interpret anything as hex */ - *stra = temp; - return val; -} - -static inline devreg_t * -tape_create_devreg (int devno) -{ - devreg_t *devreg = kmalloc (sizeof (devreg_t), GFP_KERNEL); - if (devreg != NULL) { - memset (devreg, 0, sizeof (devreg_t)); - devreg->ci.devno = devno; - devreg->flag = DEVREG_TYPE_DEVNO; - devreg->oper_func = tape_oper_handler; - } - return devreg; -} - -static inline void -tape_parm_parse (char **str) -{ - char *temp; - int from, to,i,irq=0,rc,retries=0,tape_num=0; - s390_dev_info_t dinfo; - tape_info_t* ti,*tempti; - tape_discipline_t* disc; - long lockflags; - if (*str==NULL) { - /* no params present -> leave */ - return; - } - while (*str) { - temp = *str; - from = 0; - to = 0; - - /* turn off autodetect mode, if any range is present */ - from = tape_parm_strtoul (temp, &temp); - to = from; - if (*temp == '-') { - temp++; - to = tape_parm_strtoul (temp, &temp); - } - for (i=from;i<=to;i++) { - retries=0; - // register for attch/detach of a devno - tape_devreg[devregct]=tape_create_devreg(i); - if (tape_devreg[devregct]==NULL) { - PRINT_WARN ("Could not create devreg for devno %04x, dyn. attach for this devno deactivated.\n",i); - } else { - s390_device_register (tape_devreg[devregct++]); - } - // we are activating a device if it is present - for (irq = get_irq_first(); irq!=-ENODEV; irq=get_irq_next(irq)) { - rc = get_dev_info_by_irq (irq, &dinfo); - - disc = first_discipline; - while ((dinfo.devno == i) && (disc != NULL) && (disc->cu_type != dinfo.sid_data.cu_type)) - disc = (tape_discipline_t *) (disc->next); - if ((disc == NULL) || (rc == -ENODEV) || (i!=dinfo.devno)) { - continue; - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"det irq: "); - debug_int_event (tape_debug_area,3,irq); - debug_text_event (tape_debug_area,3,"cu: "); - debug_int_event (tape_debug_area,3,disc->cu_type); -#endif /* TAPE_DEBUG */ - PRINT_INFO ("using devno %04x with discipline %04x on irq %d as tape device %d\n",dinfo.devno,dinfo.sid_data.cu_type,irq,tape_num/2); - /* Allocate tape structure */ - ti = kmalloc (sizeof (tape_info_t), GFP_ATOMIC); - if (ti == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"ti:no mem "); -#endif /* TAPE_DEBUG */ - PRINT_INFO ("tape: can't allocate memory for " - "tape info structure\n"); - continue; - } - memset(ti,0,sizeof(tape_info_t)); - ti->discipline = disc; - disc->tape = ti; - rc = tape_setup (ti, irq, tape_num); - if (rc) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"tsetup err"); - debug_int_exception (tape_debug_area,3,rc); -#endif /* TAPE_DEBUG */ - kfree (ti); - } else { - s390irq_spin_lock_irqsave (irq, lockflags); - if (first_tape_info == NULL) { - first_tape_info = ti; - } else { - tempti = first_tape_info; - while (tempti->next != NULL) - tempti = tempti->next; - tempti->next = ti; - } - s390irq_spin_unlock_irqrestore (irq, lockflags); - } - } - tape_num+=2; - } - str++; - } -} - - -/* SECTION: Managing wrappers for ccwcache */ - -#define TAPE_EMERGENCY_REQUESTS 16 - -static ccw_req_t *tape_emergency_req[TAPE_EMERGENCY_REQUESTS] = -{NULL,}; -static spinlock_t tape_emergency_req_lock = SPIN_LOCK_UNLOCKED; - -static void -tape_init_emergency_req (void) -{ - int i; - for (i = 0; i < TAPE_EMERGENCY_REQUESTS; i++) { - tape_emergency_req[i] = (ccw_req_t *) get_free_page (GFP_KERNEL); - } -} - -#ifdef MODULE // We only cleanup the emergency requests on module unload. -static void -tape_cleanup_emergency_req (void) -{ - int i; - for (i = 0; i < TAPE_EMERGENCY_REQUESTS; i++) { - if (tape_emergency_req[i]) - free_page ((long) (tape_emergency_req[i])); - else - printk (KERN_WARNING PRINTK_HEADER "losing one page for 'in-use' emergency request\n"); - } -} -#endif - -ccw_req_t * -tape_alloc_request (char *magic, int cplength, int datasize) -{ - ccw_req_t *rv = NULL; - int i; - if ((rv = ccw_alloc_request (magic, cplength, datasize)) != NULL) { - return rv; - } - if (cplength * sizeof (ccw1_t) + datasize + sizeof (ccw_req_t) > PAGE_SIZE) { - return NULL; - } - spin_lock (&tape_emergency_req_lock); - for (i = 0; i < TAPE_EMERGENCY_REQUESTS; i++) { - if (tape_emergency_req[i] != NULL) { - rv = tape_emergency_req[i]; - tape_emergency_req[i] = NULL; - } - } - spin_unlock (&tape_emergency_req_lock); - if (rv) { - memset (rv, 0, PAGE_SIZE); - rv->cache = (kmem_cache_t *) (tape_emergency_req + i); - strncpy ((char *) (&rv->magic), magic, 4); - ASCEBC ((char *) (&rv->magic), 4); - rv->cplength = cplength; - rv->datasize = datasize; - rv->data = (void *) ((long) rv + PAGE_SIZE - datasize); - rv->cpaddr = (ccw1_t *) ((long) rv + sizeof (ccw_req_t)); - } - return rv; -} - -void -tape_free_request (ccw_req_t * request) -{ - if (request->cache >= (kmem_cache_t *) tape_emergency_req && - request->cache <= (kmem_cache_t *) (tape_emergency_req + TAPE_EMERGENCY_REQUESTS)) { - *((ccw_req_t **) (request->cache)) = request; - } else { - clear_normalized_cda ((ccw1_t *) (request->cpaddr)); // avoid memory leak caused by modeset_byte - ccw_free_request (request); - } -} - -/* - * Allocate a ccw request and reserve it for tape driver - */ -inline - ccw_req_t * -tape_alloc_ccw_req (tape_info_t * ti, int cplength, int datasize) -{ - char tape_magic_id[] = "tape"; - ccw_req_t *cqr = NULL; - - if (!ti) - return NULL; - cqr = tape_alloc_request (tape_magic_id, cplength, datasize); - - if (!cqr) { -#ifdef TAPE_DEBUG - PRINT_WARN ("empty CQR generated\n"); -#endif - } - cqr->magic = TAPE_MAGIC; /* sets an identifier for tape driver */ - cqr->device = ti; /* save pointer to tape info */ - return cqr; -} - -/* - * Find the tape_info_t structure associated with irq - */ -static inline tape_info_t * -tapedev_find_info (int irq) -{ - tape_info_t *ti; - - ti = first_tape_info; - if (ti != NULL) - do { - if (ti->devinfo.irq == irq) - break; - } while ((ti = (tape_info_t *) ti->next) != NULL); - return ti; -} - -#define QUEUE_THRESHOLD 5 - -/* - * Tape interrupt routine, called from Ingo's I/O layer - */ -void -tape_irq (int irq, void *int_parm, struct pt_regs *regs) -{ - tape_info_t *ti = tapedev_find_info (irq); - - /* analyse devstat and fire event */ - if (ti->devstat.dstat & DEV_STAT_UNIT_CHECK) { - tapestate_event (ti, TE_ERROR); - } else if (ti->devstat.dstat & (DEV_STAT_DEV_END)) { - tapestate_event (ti, TE_DONE); - } else - tapestate_event (ti, TE_OTHER); -} - -int -tape_oper_handler ( int irq, struct _devreg *dreg) { - tape_info_t* ti=first_tape_info; - tape_info_t* newtape; - int rc,tape_num,retries=0,i; - s390_dev_info_t dinfo; - tape_discipline_t* disc; -#ifdef CONFIG_DEVFS_FS - tape_frontend_t* frontend; -#endif - long lockflags; - while ((ti!=NULL) && (ti->devinfo.irq!=irq)) - ti=ti->next; - if (ti!=NULL) { - // irq is (still) used by tape. tell ingo to try again later - PRINT_WARN ("Oper handler for irq %d called while irq still (internaly?) used.\n",irq); - return -EAGAIN; - } - // irq is not used by tape - rc = get_dev_info_by_irq (irq, &dinfo); - if (rc == -ENODEV) { - retries++; - rc = get_dev_info_by_irq (irq, &dinfo); - if (retries > 5) { - PRINT_WARN ("No device information for new dev. could be retrieved.\n"); - return -ENODEV; - } - } - disc = first_discipline; - while ((disc != NULL) && (disc->cu_type != dinfo.sid_data.cu_type)) - disc = (tape_discipline_t *) (disc->next); - if (disc == NULL) - PRINT_WARN ("No matching discipline for cu_type %x found, ignoring device %04x.\n",dinfo.sid_data.cu_type,dinfo.devno); - if (rc == -ENODEV) - PRINT_WARN ("No device information for new dev. could be retrieved.\n"); - if ((disc == NULL) || (rc == -ENODEV)) - return -ENODEV; - - /* Allocate tape structure */ - ti = kmalloc (sizeof (tape_info_t), GFP_ATOMIC); - if (ti == NULL) { - PRINT_INFO ( "tape: can't allocate memory for " - "tape info structure\n"); - return -ENOBUFS; - } - memset(ti,0,sizeof(tape_info_t)); - ti->discipline = disc; - disc->tape = ti; - tape_num=0; - if (*tape) { - // we have static device ranges, so fingure out the tape_num of the attached tape - for (i=0;ici.devno==dinfo.devno) { - tape_num=2*i; - break; - } - } else { - // we are running in autoprobe mode, find a free tape_num - newtape=first_tape_info; - while (newtape!=NULL) { - if (newtape->rew_minor==tape_num) { - // tape num in use. try next one - tape_num+=2; - newtape=first_tape_info; - } else { - // tape num not used by newtape. look at next tape info - newtape=newtape->next; - } - } - } - rc = tape_setup (ti, irq, tape_num); - if (rc) { - kfree (ti); - return -ENOBUFS; - } -#ifdef CONFIG_DEVFS_FS - for (frontend=first_frontend;frontend!=NULL;frontend=frontend->next) - frontend->mkdevfstree(ti); -#endif - s390irq_spin_lock_irqsave (irq,lockflags); - if (first_tape_info == NULL) { - first_tape_info = ti; - } else { - newtape = first_tape_info; - while (newtape->next != NULL) - newtape = newtape->next; - newtape->next = ti; - } - s390irq_spin_unlock_irqrestore (irq, lockflags); - return 0; -} - - -static void -tape_noper_handler ( int irq, int status ) { - tape_info_t *ti=first_tape_info; - tape_info_t *lastti; -#ifdef CONFIG_DEVFS_FS - tape_frontend_t *frontend; -#endif - long lockflags; - s390irq_spin_lock_irqsave(irq,lockflags); - while (ti!=NULL && ti->devinfo.irq!=irq) ti=ti->next; - if (ti==NULL) return; - if (tapestate_get(ti)!=TS_UNUSED) { - // device is in use! - PRINT_WARN ("Tape #%d was detached while it was busy. Expect errors!",ti->blk_minor/2); - tapestate_set(ti,TS_NOT_OPER); - ti->rc=-ENODEV; - ti->wanna_wakeup=1; - switch (tapestate_get(ti)) { - case TS_REW_RELEASE_INIT: - tapestate_set(ti,TS_NOT_OPER); - wake_up (&ti->wq); - break; -#ifdef CONFIG_S390_TAPE_BLOCK - case TS_BLOCK_INIT: - tapestate_set(ti,TS_NOT_OPER); - schedule_tapeblock_exec_IO(ti); - break; -#endif - default: - tapestate_set(ti,TS_NOT_OPER); - wake_up_interruptible (&ti->wq); - } - } else { - // device is unused! - PRINT_WARN ("Tape #%d was detached.\n",ti->blk_minor/2); - if (ti==first_tape_info) { - first_tape_info=ti->next; - } else { - lastti=first_tape_info; - while (lastti->next!=ti) lastti=lastti->next; - lastti->next=ti->next; - } -#ifdef CONFIG_DEVFS_FS - for (frontend=first_frontend;frontend!=NULL;frontend=frontend->next) - frontend->rmdevfstree(ti); - tape_rmdevfsroots(ti); -#endif - kfree(ti); - } - s390irq_spin_unlock_irqrestore(irq,lockflags); - return; -} - - -void -tape_dump_sense (devstat_t * stat) -{ -#ifdef TAPE_DEBUG - int sl; -#endif -#if 0 - - PRINT_WARN ("------------I/O resulted in unit check:-----------\n"); - for (sl = 0; sl < 4; sl++) { - PRINT_WARN ("Sense:"); - for (sct = 0; sct < 8; sct++) { - PRINT_WARN (" %2d:0x%02X", 8 * sl + sct, - stat->ii.sense.data[8 * sl + sct]); - } - PRINT_WARN ("\n"); - } - PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X " - " %02X%02X%02X%02X %02X%02X%02X%02X \n", - stat->ii.sense.data[0], stat->ii.sense.data[1], - stat->ii.sense.data[2], stat->ii.sense.data[3], - stat->ii.sense.data[4], stat->ii.sense.data[5], - stat->ii.sense.data[6], stat->ii.sense.data[7], - stat->ii.sense.data[8], stat->ii.sense.data[9], - stat->ii.sense.data[10], stat->ii.sense.data[11], - stat->ii.sense.data[12], stat->ii.sense.data[13], - stat->ii.sense.data[14], stat->ii.sense.data[15]); - PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X " - " %02X%02X%02X%02X %02X%02X%02X%02X \n", - stat->ii.sense.data[16], stat->ii.sense.data[17], - stat->ii.sense.data[18], stat->ii.sense.data[19], - stat->ii.sense.data[20], stat->ii.sense.data[21], - stat->ii.sense.data[22], stat->ii.sense.data[23], - stat->ii.sense.data[24], stat->ii.sense.data[25], - stat->ii.sense.data[26], stat->ii.sense.data[27], - stat->ii.sense.data[28], stat->ii.sense.data[29], - stat->ii.sense.data[30], stat->ii.sense.data[31]); -#endif -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"SENSE:"); - for (sl=0;sl<31;sl++) { - debug_int_event (tape_debug_area,3,stat->ii.sense.data[sl]); - } - debug_int_exception (tape_debug_area,3,stat->ii.sense.data[31]); -#endif -} - -/* - * Setup tape_info_t structure of a tape device - */ -int -tape_setup (tape_info_t * ti, int irq, int minor) -{ - long lockflags; - int rc = 0; - - if (minor>254) { - PRINT_WARN ("Device id %d on irq %d will not be accessible since this driver is restricted to 128 devices.\n",minor/2,irq); - return -EINVAL; - } - rc = get_dev_info_by_irq (irq, &(ti->devinfo)); - if (rc == -ENODEV) { /* end of device list */ - return rc; - } - ti->rew_minor = minor; - ti->nor_minor = minor + 1; - ti->blk_minor = minor; -#ifdef CONFIG_DEVFS_FS - tape_mkdevfsroots(ti); -#endif - /* Register IRQ */ -#ifdef CONFIG_S390_TAPE_DYNAMIC - rc = s390_request_irq_special (irq, tape_irq, tape_noper_handler,0, "tape", &(ti->devstat)); -#else - rc = s390_request_irq (irq, tape_irq, 0, "tape", &(ti->devstat)); -#endif - s390irq_spin_lock_irqsave (irq, lockflags); - ti->next = NULL; - if (rc) - PRINT_WARN ("Cannot register irq %d, rc=%d\n", irq, rc); - init_waitqueue_head (&ti->wq); - ti->kernbuf = ti->userbuf = ti->discdata = NULL; - tapestate_set (ti, TS_UNUSED); - ti->discdata=NULL; - ti->discipline->setup_assist (ti); - ti->wanna_wakeup=0; - s390irq_spin_unlock_irqrestore (irq, lockflags); - return rc; -} - -/* - * tape_init will register the driver for each tape. - */ -int -tape_init (void) -{ - long lockflags; - s390_dev_info_t dinfo; - tape_discipline_t *disc; - tape_info_t *ti = NULL, *tempti = NULL; - char *opt_char,*opt_block,*opt_3490,*opt_3480; - int irq = 0, rc, retries = 0, tape_num = 0; - static int initialized=0; - - if (initialized) // Only init the devices once - return 0; - initialized=1; - -#ifdef TAPE_DEBUG - tape_debug_area = debug_register ( "tape", 3, 2, 10); - debug_register_view(tape_debug_area,&debug_hex_ascii_view); - debug_text_event (tape_debug_area,3,"begin init"); -#endif /* TAPE_DEBUG */ - - /* print banner */ - PRINT_WARN ("IBM S/390 Tape Device Driver (v1.01).\n"); - PRINT_WARN ("(C) IBM Deutschland Entwicklung GmbH, 2000\n"); - opt_char=opt_block=opt_3480=opt_3490="not present"; -#ifdef CONFIG_S390_TAPE_CHAR - opt_char="built in"; -#endif -#ifdef CONFIG_S390_TAPE_BLOCK - opt_block="built in"; -#endif -#ifdef CONFIG_S390_TAPE_3480 - opt_3480="built in"; -#endif -#ifdef CONFIG_S390_TAPE_3490 - opt_3490="built in"; -#endif - /* print feature info */ - PRINT_WARN ("character device frontend : %s\n",opt_char); - PRINT_WARN ("block device frontend : %s\n",opt_block); - PRINT_WARN ("support for 3480 compatible : %s\n",opt_3480); - PRINT_WARN ("support for 3490 compatible : %s\n",opt_3490); - -#ifndef MODULE - tape_split_parm_string(tape_parm_string); -#endif - if (*tape) - PRINT_INFO ("Using ranges supplied in parameters, disabling autoprobe mode.\n"); - else - PRINT_INFO ("No parameters supplied, enabling autoprobe mode for all supported devices.\n"); -#ifdef CONFIG_S390_TAPE_3490 - if (*tape) - first_discipline = tape3490_init (0); // no autoprobe for devices - else - first_discipline = tape3490_init (1); // do autoprobe since no parm specified - first_discipline->next = NULL; -#endif - -#ifdef CONFIG_S390_TAPE_3480 - if (first_discipline == NULL) { - if (*tape) - first_discipline = tape3480_init (0); // no autoprobe for devices - else - first_discipline = tape3480_init (1); // do autoprobe since no parm specified - first_discipline->next = NULL; - } else { - if (*tape) - first_discipline->next = tape3480_init (0); // no autoprobe for devices - else - first_discipline->next = tape3480_init (1); // do autoprobe since no parm specified - ((tape_discipline_t*) (first_discipline->next))->next=NULL; - } -#endif -#ifdef CONFIG_DEVFS_FS - tape_devfs_root_entry=devfs_mk_dir (NULL, "tape", NULL); -#endif CONFIG_DEVFS_FS - -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"dev detect"); -#endif /* TAPE_DEBUG */ - /* Allocate the tape structures */ - if (*tape!=NULL) { - // we have parameters, continue with parsing the parameters and set the devices online - tape_parm_parse (tape); - } else { - // we are running in autodetect mode, search all devices for compatibles - for (irq = get_irq_first(); irq!=-ENODEV; irq=get_irq_next(irq)) { - rc = get_dev_info_by_irq (irq, &dinfo); - disc = first_discipline; - while ((disc != NULL) && (disc->cu_type != dinfo.sid_data.cu_type)) - disc = (tape_discipline_t *) (disc->next); - if ((disc == NULL) || (rc == -ENODEV)) - continue; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"det irq: "); - debug_int_event (tape_debug_area,3,irq); - debug_text_event (tape_debug_area,3,"cu: "); - debug_int_event (tape_debug_area,3,disc->cu_type); -#endif /* TAPE_DEBUG */ - PRINT_INFO ("using devno %04x with discipline %04x on irq %d as tape device %d\n",dinfo.devno,dinfo.sid_data.cu_type,irq,tape_num/2); - /* Allocate tape structure */ - ti = kmalloc (sizeof (tape_info_t), GFP_ATOMIC); - if (ti == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"ti:no mem "); -#endif /* TAPE_DEBUG */ - PRINT_INFO ("tape: can't allocate memory for " - "tape info structure\n"); - continue; - } - memset(ti,0,sizeof(tape_info_t)); - ti->discipline = disc; - disc->tape = ti; - rc = tape_setup (ti, irq, tape_num); - if (rc) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"tsetup err"); - debug_int_exception (tape_debug_area,3,rc); -#endif /* TAPE_DEBUG */ - kfree (ti); - } else { - s390irq_spin_lock_irqsave (irq, lockflags); - if (first_tape_info == NULL) { - first_tape_info = ti; - } else { - tempti = first_tape_info; - while (tempti->next != NULL) - tempti = tempti->next; - tempti->next = ti; - } - tape_num += 2; - s390irq_spin_unlock_irqrestore (irq, lockflags); - } - } - } - - /* Allocate local buffer for the ccwcache */ - tape_init_emergency_req (); -#ifdef CONFIG_PROC_FS -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - tape_devices_entry = create_proc_entry ("tapedevices", - S_IFREG | S_IRUGO | S_IWUSR, - &proc_root); - tape_devices_entry->proc_fops = &tape_devices_file_ops; - tape_devices_entry->proc_iops = &tape_devices_inode_ops; -#else - tape_devices_entry = (struct proc_dir_entry *) kmalloc - (sizeof (struct proc_dir_entry), GFP_ATOMIC); - if (tape_devices_entry) { - memset (tape_devices_entry, 0, sizeof (struct proc_dir_entry)); - tape_devices_entry->name = "tapedevices"; - tape_devices_entry->namelen = strlen ("tapedevices"); - tape_devices_entry->low_ino = 0; - tape_devices_entry->mode = (S_IFREG | S_IRUGO | S_IWUSR); - tape_devices_entry->nlink = 1; - tape_devices_entry->uid = 0; - tape_devices_entry->gid = 0; - tape_devices_entry->size = 0; - tape_devices_entry->get_info = NULL; - tape_devices_entry->ops = &tape_devices_inode_ops; - proc_register (&proc_root, tape_devices_entry); - } -#endif -#endif /* CONFIG_PROC_FS */ - - return 0; -} - -#ifdef MODULE -MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte (cotte@de.ibm.com)"); -MODULE_DESCRIPTION("Linux for S/390 channel attached tape device driver"); -MODULE_PARM (tape, "1-" __MODULE_STRING (256) "s"); - -int -init_module (void) -{ -#ifdef CONFIG_S390_TAPE_CHAR - tapechar_init (); -#endif -#ifdef CONFIG_S390_TAPE_BLOCK - tapeblock_init (); -#endif - return 0; -} - -void -cleanup_module (void) -{ - tape_info_t *ti ,*temp; - tape_frontend_t* frontend, *tempfe; - tape_discipline_t* disc ,*tempdi; - int i; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"cleaup mod"); -#endif /* TAPE_DEBUG */ - - if (*tape) { - // we are running with parameters. we'll now deregister from our devno's - for (i=0;inext; - //cleanup a device -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"free irq:"); - debug_int_event (tape_debug_area,6,temp->devinfo.irq); -#endif /* TAPE_DEBUG */ - free_irq (temp->devinfo.irq, &(temp->devstat)); - if (temp->discdata) kfree (temp->discdata); - if (temp->kernbuf) kfree (temp->kernbuf); - if (temp->cqr) tape_free_request(temp->cqr); -#ifdef CONFIG_DEVFS_FS - for (frontend=first_frontend;frontend!=NULL;frontend=frontend->next) - frontend->rmdevfstree(temp); - tape_rmdevfsroots(temp); -#endif - kfree (temp); - } -#ifdef CONFIG_DEVFS_FS - devfs_unregister (tape_devfs_root_entry); -#endif CONFIG_DEVFS_FS -#ifdef CONFIG_PROC_FS -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - remove_proc_entry ("tapedevices", &proc_root); -#else - proc_unregister (&proc_root, tape_devices_entry->low_ino); - kfree (tape_devices_entry); -#endif /* LINUX_IS_24 */ -#endif -#ifdef CONFIG_S390_TAPE_CHAR - tapechar_uninit(); -#endif -#ifdef CONFIG_S390_TAPE_BLOCK - tapeblock_uninit(); -#endif - frontend=first_frontend; - while (frontend != NULL) { - tempfe = frontend; - frontend = frontend->next; - kfree (tempfe); - } - disc=first_discipline; - while (disc != NULL) { - if (*tape) - disc->shutdown(0); - else - disc->shutdown(1); - tempdi = disc; - disc = disc->next; - kfree (tempdi); - } - /* Deallocate the local buffer for the ccwcache */ - tape_cleanup_emergency_req (); -#ifdef TAPE_DEBUG - debug_unregister (tape_debug_area); -#endif /* TAPE_DEBUG */ -} -#endif /* MODULE */ - -inline void -tapestate_set (tape_info_t * ti, int newstate) -{ - if (ti->tape_state == TS_NOT_OPER) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"ts_set err"); - debug_text_exception (tape_debug_area,3,"dev n.oper"); -#endif /* TAPE_DEBUG */ - } else { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,4,"ts. dev: "); - debug_int_event (tape_debug_area,4,ti->blk_minor); - debug_text_event (tape_debug_area,4,"old ts: "); - debug_text_event (tape_debug_area,4,(((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >=0 )) ? - state_verbose[tapestate_get (ti)] : - "UNKNOWN TS")); - debug_text_event (tape_debug_area,4,"new ts: "); - debug_text_event (tape_debug_area,4,(((newstate < TS_SIZE) && - (newstate >= 0)) ? - state_verbose[newstate] : - "UNKNOWN TS")); -#endif /* TAPE_DEBUG */ - ti->tape_state = newstate; - } -} - -inline int -tapestate_get (tape_info_t * ti) -{ - return (ti->tape_state); -} - -void -tapestate_event (tape_info_t * ti, int event) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"te! dev: "); - debug_int_event (tape_debug_area,6,ti->blk_minor); - debug_text_event (tape_debug_area,6,"event:"); - debug_text_event (tape_debug_area,6,((event >=0) && - (event < TE_SIZE)) ? - event_verbose[event] : "TE UNKNOWN"); - debug_text_event (tape_debug_area,6,"state:"); - debug_text_event (tape_debug_area,6,((tapestate_get(ti) >= 0) && - (tapestate_get(ti) < TS_SIZE)) ? - state_verbose[tapestate_get (ti)] : - "TS UNKNOWN"); -#endif /* TAPE_DEBUG */ - if (event == TE_ERROR) { - ti->discipline->error_recovery(ti); - } else { - if ((event >= 0) && - (event < TE_SIZE) && - (tapestate_get (ti) >= 0) && - (tapestate_get (ti) < TS_SIZE) && - ((*(ti->discipline->event_table))[tapestate_get (ti)][event] != NULL)) - ((*(ti->discipline->event_table))[tapestate_get (ti)][event]) (ti); - else { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"TE UNEXPEC"); -#endif /* TAPE_DEBUG */ - ti->discipline->default_handler (ti); - } - } -} - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-indent-level: 4 - * c-brace-imaginary-offset: 0 - * c-brace-offset: -4 - * c-argdecl-indent: 4 - * c-label-offset: -4 - * c-continued-statement-offset: 4 - * c-continued-brace-offset: 0 - * indent-tabs-mode: nil - * tab-width: 8 - * End: - */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape_char.c linux.21rc5-ac2/drivers/s390/char/tape_char.c --- linux.21rc5/drivers/s390/char/tape_char.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape_char.c 2003-05-28 15:15:07.000000000 +0100 @@ -0,0 +1,514 @@ +/* + * drivers/s390/char/tape_char.c + * character device frontend for tape device driver + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#define PRINTK_HEADER "TCHAR:" + +#define TAPECHAR_DEVFSMODE 0020644 /* crwxrw-rw- */ +#define TAPECHAR_MAJOR 0 /* get dynamic major */ + +int tapechar_major = TAPECHAR_MAJOR; + +/* + * Prototypes for file operation functions + */ +static ssize_t tapechar_read(struct file *, char *, size_t, loff_t *); +static ssize_t tapechar_write(struct file *, const char *, size_t, loff_t *); +static int tapechar_open(struct inode *,struct file *); +static int tapechar_release(struct inode *,struct file *); +static int tapechar_ioctl(struct inode *, struct file *, unsigned int, + unsigned long); + +/* + * File operation structure for tape character frontend + */ +static struct file_operations tape_fops = +{ + .read = tapechar_read, + .write = tapechar_write, + .ioctl = tapechar_ioctl, + .open = tapechar_open, + .release = tapechar_release, +}; + +#ifdef CONFIG_DEVFS_FS +/* + * Create Char directory with (non)rewinding entries + */ +static int +tapechar_mkdevfstree(struct tape_device *device) +{ + device->char_data.devfs_char_dir = + devfs_mk_dir(device->devfs_dir, "char", device); + if (device->char_data.devfs_char_dir == NULL) + return -ENOENT; + device->char_data.devfs_nonrewinding = + devfs_register(device->char_data.devfs_char_dir, + "nonrewinding", DEVFS_FL_DEFAULT, + tapechar_major, device->first_minor, + TAPECHAR_DEVFSMODE, &tape_fops, device); + if (device->char_data.devfs_nonrewinding == NULL) { + devfs_unregister(device->char_data.devfs_char_dir); + return -ENOENT; + } + device->char_data.devfs_rewinding = + devfs_register(device->char_data.devfs_char_dir, + "rewinding", DEVFS_FL_DEFAULT, + tapechar_major, device->first_minor + 1, + TAPECHAR_DEVFSMODE, &tape_fops, device); + if (device->char_data.devfs_rewinding == NULL) { + devfs_unregister(device->char_data.devfs_nonrewinding); + devfs_unregister(device->char_data.devfs_char_dir); + return -ENOENT; + } + return 0; +} + +/* + * Remove devfs entries + */ +static void +tapechar_rmdevfstree (struct tape_device *device) +{ + if (device->char_data.devfs_nonrewinding) + devfs_unregister(device->char_data.devfs_nonrewinding); + if (device->char_data.devfs_rewinding) + devfs_unregister(device->char_data.devfs_rewinding); + if (device->char_data.devfs_char_dir) + devfs_unregister(device->char_data.devfs_char_dir); +} +#endif + +/* + * This function is called for every new tapedevice + */ +int +tapechar_setup_device(struct tape_device * device) +{ +#ifdef CONFIG_DEVFS_FS + int rc; + + rc = tapechar_mkdevfstree(device); + if (rc) + return rc; +#endif + + tape_hotplug_event(device, tapechar_major, TAPE_HOTPLUG_CHAR_ADD); + return 0; + +} + +void +tapechar_cleanup_device(struct tape_device* device) +{ +#ifdef CONFIG_DEVFS_FS + tapechar_rmdevfstree(device); +#endif + tape_hotplug_event(device, tapechar_major, TAPE_HOTPLUG_CHAR_REMOVE); +} + +static inline int +tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) +{ + struct idal_buffer *new; + + /* Idal buffer must be the same size as the requested block size! */ + if (device->char_data.idal_buf != NULL && + device->char_data.idal_buf->size == block_size) + return 0; + + /* The current idal buffer is not big enough. Allocate a new one. */ + new = idal_buffer_alloc(block_size, 0); + if (new == NULL) + return -ENOMEM; + if (device->char_data.idal_buf != NULL) + idal_buffer_free(device->char_data.idal_buf); + device->char_data.idal_buf = new; + return 0; +} + +/* + * Tape device read function + */ +ssize_t +tapechar_read (struct file *filp, char *data, size_t count, loff_t *ppos) +{ + struct tape_device *device; + struct tape_request *request; + size_t block_size; + int rc; + + DBF_EVENT(6, "TCHAR:read\n"); + device = (struct tape_device *) filp->private_data; + + /* Check position. */ + if (ppos != &filp->f_pos) { + /* + * "A request was outside the capabilities of the device." + * This check uses internal knowledge about how pread and + * read work... + */ + DBF_EVENT(6, "TCHAR:ppos wrong\n"); + return -EOVERFLOW; + } + + /* + * If the tape isn't terminated yet, do it now. And since we then + * are at the end of the tape there wouldn't be anything to read + * anyways. So we return immediatly. + */ + if(device->required_tapemarks) { + return tape_std_terminate_write(device); + } + + /* Find out block size to use */ + if (device->char_data.block_size != 0) { + if (count < device->char_data.block_size) { + DBF_EVENT(3, "TCHAR:read smaller than block " + "size was requested\n"); + return -EINVAL; + } + block_size = device->char_data.block_size; + } else { + block_size = count; + rc = tapechar_check_idalbuffer(device, block_size); + if (rc) + return rc; + } + DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); + /* Let the discipline build the ccw chain. */ + request = device->discipline->read_block(device, block_size); + if (IS_ERR(request)) + return PTR_ERR(request); + /* Execute it. */ + rc = tape_do_io(device, request); + if (rc == 0) { + rc = block_size - device->devstat.rescnt; + DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); + filp->f_pos += rc; + /* Copy data from idal buffer to user space. */ + if (idal_buffer_to_user(device->char_data.idal_buf, + data, rc) != 0) + rc = -EFAULT; + } + tape_put_request(request); + return rc; +} + +/* + * Tape device write function + */ +ssize_t +tapechar_write(struct file *filp, const char *data, size_t count, loff_t *ppos) +{ + struct tape_device *device; + struct tape_request *request; + size_t block_size; + size_t written; + int nblocks; + int i, rc; + + DBF_EVENT(6, "TCHAR:write\n"); + device = (struct tape_device *) filp->private_data; + /* Check position */ + if (ppos != &filp->f_pos) { + /* "A request was outside the capabilities of the device." */ + DBF_EVENT(6, "TCHAR:ppos wrong\n"); + return -EOVERFLOW; + } + /* Find out block size and number of blocks */ + if (device->char_data.block_size != 0) { + if (count < device->char_data.block_size) { + DBF_EVENT(3, "TCHAR:write smaller than block " + "size was requested\n"); + return -EINVAL; + } + block_size = device->char_data.block_size; + nblocks = count / block_size; + } else { + block_size = count; + rc = tapechar_check_idalbuffer(device, block_size); + if (rc) + return rc; + nblocks = 1; + } + DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); + DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); + /* Let the discipline build the ccw chain. */ + request = device->discipline->write_block(device, block_size); + if (IS_ERR(request)) + return PTR_ERR(request); + rc = 0; + written = 0; + for (i = 0; i < nblocks; i++) { + /* Copy data from user space to idal buffer. */ + if (idal_buffer_from_user(device->char_data.idal_buf, + data, block_size)) { + rc = -EFAULT; + break; + } + rc = tape_do_io(device, request); + if (rc) + break; + DBF_EVENT(6, "TCHAR:wbytes: %lx\n", + block_size - device->devstat.rescnt); + filp->f_pos += block_size - device->devstat.rescnt; + written += block_size - device->devstat.rescnt; + if (device->devstat.rescnt != 0) + break; + data += block_size; + } + tape_put_request(request); + + if (rc == -ENOSPC) { + /* + * Ok, the device has no more space. It has NOT written + * the block. + */ + if (device->discipline->process_eov) + device->discipline->process_eov(device); + if (written > 0) + rc = 0; + } + + /* + * After doing a write we always need two tapemarks to correctly + * terminate the tape (one to terminate the file, the second to + * flag the end of recorded data. + * Since process_eov positions the tape in front of the written + * tapemark it doesn't hurt to write two marks again. + */ + if(!rc) + device->required_tapemarks = 2; + + return rc ? rc : written; +} + +/* + * Character frontend tape device open function. + */ +int +tapechar_open (struct inode *inode, struct file *filp) +{ + struct tape_device *device; + int minor, rc; + + MOD_INC_USE_COUNT; + if (major(filp->f_dentry->d_inode->i_rdev) != tapechar_major) + return -ENODEV; + minor = minor(filp->f_dentry->d_inode->i_rdev); + device = tape_get_device(minor / TAPE_MINORS_PER_DEV); + if (IS_ERR(device)) { + MOD_DEC_USE_COUNT; + return PTR_ERR(device); + } + DBF_EVENT(6, "TCHAR:open: %x\n", minor(inode->i_rdev)); + rc = tape_open(device); + if (rc == 0) { + rc = tape_assign(device, TAPE_STATUS_ASSIGN_A); + if (rc == 0) { + filp->private_data = device; + return 0; + } + tape_release(device); + } + tape_put_device(device); + MOD_DEC_USE_COUNT; + return rc; +} + +/* + * Character frontend tape device release function. + */ + +int +tapechar_release(struct inode *inode, struct file *filp) +{ + struct tape_device *device; + + device = (struct tape_device *) filp->private_data; + DBF_EVENT(6, "TCHAR:release: %x\n", minor(inode->i_rdev)); + + /* + * If this is the rewinding tape minor then rewind. In that case we + * write all required tapemarks. Otherwise only one to terminate the + * file. + */ + if ((minor(inode->i_rdev) & 1) != 0) { + if(device->required_tapemarks) + tape_std_terminate_write(device); + tape_mtop(device, MTREW, 1); + } else { + if(device->required_tapemarks > 1) { + if(tape_mtop(device, MTWEOF, 1) == 0) + device->required_tapemarks--; + } + } + + if (device->char_data.idal_buf != NULL) { + idal_buffer_free(device->char_data.idal_buf); + device->char_data.idal_buf = NULL; + } + tape_unassign(device, TAPE_STATUS_ASSIGN_A); + tape_release(device); + filp->private_data = NULL; tape_put_device(device); + MOD_DEC_USE_COUNT; + return 0; +} + +/* + * Tape device io controls. + */ +static int +tapechar_ioctl(struct inode *inp, struct file *filp, + unsigned int no, unsigned long data) +{ + struct tape_device *device; + int rc; + + DBF_EVENT(6, "TCHAR:ioct(%x)\n", no); + + device = (struct tape_device *) filp->private_data; + + if (no == MTIOCTOP) { + struct mtop op; + + if (copy_from_user(&op, (char *) data, sizeof(op)) != 0) + return -EFAULT; + if (op.mt_count < 0) + return -EINVAL; + + /* + * Operations that change tape position should write final + * tapemarks + */ + switch(op.mt_op) { + case MTFSF: + case MTBSF: + case MTFSR: + case MTBSR: + case MTREW: + case MTOFFL: + case MTEOM: + case MTRETEN: + case MTBSFM: + case MTFSFM: + case MTSEEK: + if(device->required_tapemarks) + tape_std_terminate_write(device); + default: + ; + } + rc = tape_mtop(device, op.mt_op, op.mt_count); + + if(op.mt_op == MTWEOF && rc == 0) { + if(op.mt_count > device->required_tapemarks) + device->required_tapemarks = 0; + else + device->required_tapemarks -= op.mt_count; + } + return rc; + } + if (no == MTIOCPOS) { + /* MTIOCPOS: query the tape position. */ + struct mtpos pos; + + rc = tape_mtop(device, MTTELL, 1); + if (rc < 0) + return rc; + pos.mt_blkno = rc; + if (copy_to_user((char *) data, &pos, sizeof(pos)) != 0) + return -EFAULT; + return 0; + } + if (no == MTIOCGET) { + /* MTIOCGET: query the tape drive status. */ + struct mtget get; + + memset(&get, 0, sizeof(get)); + get.mt_type = MT_ISUNKNOWN; + get.mt_resid = device->devstat.rescnt; + get.mt_dsreg = device->tape_status; + /* FIXME: mt_erreg, mt_fileno */ + get.mt_gstat = device->tape_generic_status; + + if(device->medium_state == MS_LOADED) { + rc = tape_mtop(device, MTTELL, 1); + + if(rc < 0) + return rc; + + if(rc == 0) + get.mt_gstat |= GMT_BOT(~0); + + get.mt_blkno = rc; + } + get.mt_erreg = 0; + if (copy_to_user((char *) data, &get, sizeof(get)) != 0) + return -EFAULT; + return 0; + } + /* Try the discipline ioctl function. */ + if (device->discipline->ioctl_fn == NULL) + return -EINVAL; + return device->discipline->ioctl_fn(device, no, data); +} + +/* + * Initialize character device frontend. + */ +int +tapechar_init (void) +{ + int rc; + + /* Register the tape major number to the kernel */ +#ifdef CONFIG_DEVFS_FS + if (tapechar_major == 0) + tapechar_major = devfs_alloc_major(DEVFS_SPECIAL_CHR); +#endif + rc = register_chrdev(tapechar_major, "tape", &tape_fops); + if (rc < 0) { + PRINT_ERR("can't get major %d\n", tapechar_major); + DBF_EVENT(3, "TCHAR:initfail\n"); + return rc; + } + if (tapechar_major == 0) + tapechar_major = rc; /* accept dynamic major number */ + PRINT_INFO("Tape gets major %d for char device\n", tapechar_major); + DBF_EVENT(3, "Tape gets major %d for char device\n", rc); + DBF_EVENT(3, "TCHAR:init ok\n"); + return 0; +} + +/* + * cleanup + */ +void +tapechar_exit(void) +{ + unregister_chrdev (tapechar_major, "tape"); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tapechar.c linux.21rc5-ac2/drivers/s390/char/tapechar.c --- linux.21rc5/drivers/s390/char/tapechar.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tapechar.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,759 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tapechar.c - * character device frontend for tape device driver - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include -#include -#include -#include /* CCW allocations */ -#include -#include -#include -#include -#include -#ifdef MODULE -#define __NO_VERSION__ -#include -#endif -#include "tape.h" -#include "tapechar.h" - -#define PRINTK_HEADER "TCHAR:" - -/* - * file operation structure for tape devices - */ -static struct file_operations tape_fops = -{ - // owner : THIS_MODULE, - llseek:NULL, /* lseek - default */ - read:tape_read, /* read */ - write:tape_write, /* write */ - readdir:NULL, /* readdir - bad */ - poll:NULL, /* poll */ - ioctl:tape_ioctl, /* ioctl */ - mmap:NULL, /* mmap */ - open:tape_open, /* open */ - flush:NULL, /* flush */ - release:tape_release, /* release */ - fsync:NULL, /* fsync */ - fasync:NULL, /* fasync */ - lock:NULL, -}; - -int tape_major = TAPE_MAJOR; - -#ifdef CONFIG_DEVFS_FS -void -tapechar_mkdevfstree (tape_info_t* ti) { - ti->devfs_char_dir=devfs_mk_dir (ti->devfs_dir, "char", ti); - ti->devfs_nonrewinding=devfs_register(ti->devfs_char_dir, "nonrewinding", - DEVFS_FL_DEFAULT,tape_major, - ti->nor_minor, TAPECHAR_DEFAULTMODE, - &tape_fops, ti); - ti->devfs_rewinding=devfs_register(ti->devfs_char_dir, "rewinding", - DEVFS_FL_DEFAULT, tape_major, ti->rew_minor, - TAPECHAR_DEFAULTMODE, &tape_fops, ti); -} - -void -tapechar_rmdevfstree (tape_info_t* ti) { - devfs_unregister(ti->devfs_nonrewinding); - devfs_unregister(ti->devfs_rewinding); - devfs_unregister(ti->devfs_char_dir); -} -#endif - -void -tapechar_setup (tape_info_t * ti) -{ -#ifdef CONFIG_DEVFS_FS - tapechar_mkdevfstree(ti); -#endif -} - -void -tapechar_init (void) -{ - int result; - tape_frontend_t *charfront,*temp; - tape_info_t* ti; - - tape_init(); - - /* Register the tape major number to the kernel */ -#ifdef CONFIG_DEVFS_FS - result = devfs_register_chrdev (tape_major, "tape", &tape_fops); -#else - result = register_chrdev (tape_major, "tape", &tape_fops); -#endif - - if (result < 0) { - PRINT_WARN (KERN_ERR "tape: can't get major %d\n", tape_major); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"c:initfail"); - debug_text_event (tape_debug_area,3,"regchrfail"); -#endif /* TAPE_DEBUG */ - panic ("no major number available for tape char device"); - } - if (tape_major == 0) - tape_major = result; /* accept dynamic major number */ - PRINT_WARN (KERN_ERR " tape gets major %d for character device\n", result); - charfront = kmalloc (sizeof (tape_frontend_t), GFP_KERNEL); - if (charfront == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"c:initfail"); - debug_text_event (tape_debug_area,3,"no mem"); -#endif /* TAPE_DEBUG */ - panic ("no major number available for tape char device"); - } - charfront->device_setup = tapechar_setup; -#ifdef CONFIG_DEVFS_FS - charfront->mkdevfstree = tapechar_mkdevfstree; - charfront->rmdevfstree = tapechar_rmdevfstree; -#endif -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"c:init ok"); -#endif /* TAPE_DEBUG */ - charfront->next=NULL; - if (first_frontend==NULL) { - first_frontend=charfront; - } else { - temp=first_frontend; - while (temp->next!=NULL) - temp=temp->next; - temp->next=charfront; - } - ti=first_tape_info; - while (ti!=NULL) { - tapechar_setup(ti); - ti=ti->next; - } -} - -void -tapechar_uninit (void) -{ - unregister_chrdev (tape_major, "tape"); -} - -/* - * Tape device read function - */ -ssize_t -tape_read (struct file *filp, char *data, size_t count, loff_t * ppos) -{ - long lockflags; - tape_info_t *ti; - size_t block_size; - ccw_req_t *cqr; - int rc; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:read"); -#endif /* TAPE_DEBUG */ - ti = first_tape_info; - while ((ti != NULL) && (ti->rew_filp != filp) && (ti->nor_filp != filp)) - ti = (tape_info_t *) ti->next; - if (ti == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:nodev"); -#endif /* TAPE_DEBUG */ - return -ENODEV; - } - if (ppos != &filp->f_pos) { - /* "A request was outside the capabilities of the device." */ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ppos wrong"); -#endif /* TAPE_DEBUG */ - return -EOVERFLOW; /* errno=75 Value too large for def. data type */ - } - if (ti->block_size == 0) { - block_size = count; - } else { - block_size = ti->block_size; - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:nbytes:"); - debug_int_event (tape_debug_area,6,block_size); -#endif - cqr = ti->discipline->read_block (data, block_size, ti); - if (!cqr) { - return -ENOBUFS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - if (rc) { - tapestate_set(ti,TS_IDLE); - kfree (cqr); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return rc; - } - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - ti->discipline->free_read_block (cqr, ti); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return ti->rc; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:rbytes:"); - debug_int_event (tape_debug_area,6,block_size - ti->devstat.rescnt); -#endif /* TAPE_DEBUG */ - filp->f_pos += block_size - ti->devstat.rescnt; - return block_size - ti->devstat.rescnt; -} - -/* - * Tape device write function - */ -ssize_t -tape_write (struct file *filp, const char *data, size_t count, loff_t * ppos) -{ - long lockflags; - tape_info_t *ti; - size_t block_size; - ccw_req_t *cqr; - int nblocks, i, rc; - size_t written = 0; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:write"); -#endif - ti = first_tape_info; - while ((ti != NULL) && (ti->nor_filp != filp) && (ti->rew_filp != filp)) - ti = (tape_info_t *) ti->next; - if (ti == NULL) - return -ENODEV; - if (ppos != &filp->f_pos) { - /* "A request was outside the capabilities of the device." */ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ppos wrong"); -#endif - return -EOVERFLOW; /* errno=75 Value too large for def. data type */ - } - if ((ti->block_size != 0) && (count % ti->block_size != 0)) - return -EIO; - if (ti->block_size == 0) { - block_size = count; - nblocks = 1; - } else { - block_size = ti->block_size; - nblocks = count / (ti->block_size); - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:nbytes:"); - debug_int_event (tape_debug_area,6,block_size); - debug_text_event (tape_debug_area,6,"c:nblocks:"); - debug_int_event (tape_debug_area,6,nblocks); -#endif - for (i = 0; i < nblocks; i++) { - cqr = ti->discipline->write_block (data + i * block_size, block_size, ti); - if (!cqr) { - return -ENOBUFS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - ti->discipline->free_write_block (cqr, ti); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if ((ti->rc==-ENOSPC) && (i!=0)) - return i*block_size; - return ti->rc; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:wbytes:"); - debug_int_event (tape_debug_area,6,block_size - ti->devstat.rescnt); -#endif - filp->f_pos += block_size - ti->devstat.rescnt; - written += block_size - ti->devstat.rescnt; - if (ti->devstat.rescnt > 0) - return written; - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:wtotal:"); - debug_int_event (tape_debug_area,6,written); -#endif - return written; -} - -static int -tape_mtioctop (struct file *filp, short mt_op, int mt_count) -{ - tape_info_t *ti; - ccw_req_t *cqr = NULL; - int rc; - long lockflags; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:mtio"); - debug_text_event (tape_debug_area,6,"c:ioop:"); - debug_int_event (tape_debug_area,6,mt_op); - debug_text_event (tape_debug_area,6,"c:arg:"); - debug_int_event (tape_debug_area,6,mt_count); -#endif - ti = first_tape_info; - while ((ti != NULL) && (ti->rew_filp != filp) && (ti->nor_filp != filp)) - ti = (tape_info_t *) ti->next; - if (ti == NULL) - return -ENODEV; - switch (mt_op) { - case MTREW: // rewind - - cqr = ti->discipline->mtrew (ti, mt_count); - break; - case MTOFFL: // put drive offline - - cqr = ti->discipline->mtoffl (ti, mt_count); - break; - case MTUNLOAD: // unload the tape - - cqr = ti->discipline->mtunload (ti, mt_count); - break; - case MTWEOF: // write tapemark - - cqr = ti->discipline->mtweof (ti, mt_count); - break; - case MTFSF: // forward space file - - cqr = ti->discipline->mtfsf (ti, mt_count); - break; - case MTBSF: // backward space file - - cqr = ti->discipline->mtbsf (ti, mt_count); - break; - case MTFSFM: // forward space file, stop at BOT side - - cqr = ti->discipline->mtfsfm (ti, mt_count); - break; - case MTBSFM: // backward space file, stop at BOT side - - cqr = ti->discipline->mtbsfm (ti, mt_count); - break; - case MTFSR: // forward space file - - cqr = ti->discipline->mtfsr (ti, mt_count); - break; - case MTBSR: // backward space file - - cqr = ti->discipline->mtbsr (ti, mt_count); - break; - case MTNOP: - cqr = ti->discipline->mtnop (ti, mt_count); - break; - case MTEOM: // postion at the end of portion - - case MTRETEN: // retension the tape - - cqr = ti->discipline->mteom (ti, mt_count); - break; - case MTERASE: - cqr = ti->discipline->mterase (ti, mt_count); - break; - case MTSETDENSITY: - cqr = ti->discipline->mtsetdensity (ti, mt_count); - break; - case MTSEEK: - cqr = ti->discipline->mtseek (ti, mt_count); - break; - case MTSETDRVBUFFER: - cqr = ti->discipline->mtsetdrvbuffer (ti, mt_count); - break; - case MTLOCK: - cqr = ti->discipline->mtsetdrvbuffer (ti, mt_count); - break; - case MTUNLOCK: - cqr = ti->discipline->mtsetdrvbuffer (ti, mt_count); - break; - case MTLOAD: - cqr = ti->discipline->mtload (ti, mt_count); - if (cqr!=NULL) break; // if backend driver has an load function ->use it - // if no medium is in, wait until it gets inserted - if (ti->medium_is_unloaded) { - wait_event_interruptible (ti->wq,ti->medium_is_unloaded==0); - } - return 0; - case MTCOMPRESSION: - cqr = ti->discipline->mtcompression (ti, mt_count); - break; - case MTSETPART: - cqr = ti->discipline->mtsetpart (ti, mt_count); - break; - case MTMKPART: - cqr = ti->discipline->mtmkpart (ti, mt_count); - break; - case MTTELL: // return number of block relative to current file - - cqr = ti->discipline->mttell (ti, mt_count); - break; - case MTSETBLK: - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->block_size = mt_count; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:setblk:"); - debug_int_event (tape_debug_area,6,mt_count); -#endif - return 0; - case MTRESET: - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = ti->userbuf = NULL; - tapestate_set (ti, TS_IDLE); - ti->block_size = 0; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:devreset:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - return 0; - default: -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:inv.mtio"); -#endif - return -EINVAL; - } - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - if (ti->kernbuf != NULL) { - kfree (ti->kernbuf); - ti->kernbuf = NULL; - } - tape_free_request (cqr); - // if medium was unloaded, update the corresponding variable. - switch (mt_op) { - case MTOFFL: - case MTUNLOAD: - ti->medium_is_unloaded=1; - } - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (((mt_op == MTEOM) || (mt_op == MTRETEN)) && (tapestate_get (ti) == TS_FAILED)) - tapestate_set (ti, TS_DONE); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return ti->rc; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - switch (mt_op) { - case MTRETEN: //need to rewind the tape after moving to eom - - return tape_mtioctop (filp, MTREW, 1); - case MTFSFM: //need to skip back over the filemark - - return tape_mtioctop (filp, MTBSFM, 1); - case MTBSF: //need to skip forward over the filemark - - return tape_mtioctop (filp, MTFSF, 1); - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:mtio done"); -#endif - return 0; -} - -/* - * Tape device io controls. - */ -int -tape_ioctl (struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - long lockflags; - tape_info_t *ti; - ccw_req_t *cqr; - struct mtop op; /* structure for MTIOCTOP */ - struct mtpos pos; /* structure for MTIOCPOS */ - struct mtget get; - - int rc; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ioct"); -#endif - ti = first_tape_info; - while ((ti != NULL) && - (ti->rew_minor != MINOR (inode->i_rdev)) && - (ti->nor_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if (ti == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:nodev"); -#endif - return -ENODEV; - } - // check for discipline ioctl overloading - if ((rc = ti->discipline->discipline_ioctl_overload (inode, filp, cmd, arg)) - != -EINVAL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ioverloa"); -#endif - return rc; - } - - switch (cmd) { - case MTIOCTOP: /* tape op command */ - if (copy_from_user (&op, (char *) arg, sizeof (struct mtop))) { - return -EFAULT; - } - return (tape_mtioctop (filp, op.mt_op, op.mt_count)); - case MTIOCPOS: /* query tape position */ - cqr = ti->discipline->mttell (ti, 0); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - pos.mt_blkno = ti->rc; - ti->cqr = NULL; - if (ti->kernbuf != NULL) { - kfree (ti->kernbuf); - ti->kernbuf = NULL; - } - tape_free_request (cqr); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (copy_to_user ((char *) arg, &pos, sizeof (struct mtpos))) - return -EFAULT; - return 0; - case MTIOCGET: - get.mt_erreg = ti->rc; - cqr = ti->discipline->mttell (ti, 0); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - get.mt_blkno = ti->rc; - get.mt_fileno = 0; - get.mt_type = MT_ISUNKNOWN; - get.mt_resid = ti->devstat.rescnt; - get.mt_dsreg = ti->devstat.ii.sense.data[3]; - get.mt_gstat = 0; - if (ti->devstat.ii.sense.data[1] & 0x08) - get.mt_gstat &= GMT_BOT (1); // BOT - - if (ti->devstat.ii.sense.data[1] & 0x02) - get.mt_gstat &= GMT_WR_PROT (1); // write protected - - if (ti->devstat.ii.sense.data[1] & 0x40) - get.mt_gstat &= GMT_ONLINE (1); //drive online - - ti->cqr = NULL; - if (ti->kernbuf != NULL) { - kfree (ti->kernbuf); - ti->kernbuf = NULL; - } - tape_free_request (cqr); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (copy_to_user ((char *) arg, &get, sizeof (struct mtget))) - return -EFAULT; - return 0; - default: -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"c:ioct inv"); -#endif - return -EINVAL; - } -} - -/* - * Tape device open function. - */ -int -tape_open (struct inode *inode, struct file *filp) -{ - tape_info_t *ti; - kdev_t dev; - long lockflags; - - inode = filp->f_dentry->d_inode; - ti = first_tape_info; - while ((ti != NULL) && - (ti->rew_minor != MINOR (inode->i_rdev)) && - (ti->nor_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if (ti == NULL) - return -ENODEV; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:open:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) != TS_UNUSED) { - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:dbusy"); -#endif - return -EBUSY; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - - dev = MKDEV (tape_major, MINOR (inode->i_rdev)); /* Get the device */ - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (ti->rew_minor == MINOR (inode->i_rdev)) - ti->rew_filp = filp; /* save for later reference */ - else - ti->nor_filp = filp; - filp->private_data = ti; /* save the dev.info for later reference */ - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - -#ifdef MODULE - MOD_INC_USE_COUNT; -#endif /* MODULE */ - return 0; -} - -/* - * Tape device release function. - */ -int -tape_release (struct inode *inode, struct file *filp) -{ - long lockflags; - tape_info_t *ti,*lastti; - ccw_req_t *cqr = NULL; - int rc = 0; - - ti = first_tape_info; - while ((ti != NULL) && (ti->rew_minor != MINOR (inode->i_rdev)) && (ti->nor_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if ((ti != NULL) && (tapestate_get (ti) == TS_NOT_OPER)) { - if (ti==first_tape_info) { - first_tape_info=ti->next; - } else { - lastti=first_tape_info; - while (lastti->next!=ti) lastti=lastti->next; - lastti->next=ti->next; - } - kfree(ti); - goto out; - } - if ((ti == NULL) || (tapestate_get (ti) != TS_IDLE)) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:notidle!"); -#endif - rc = -ENXIO; /* error in tape_release */ - goto out; - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:release:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - if (ti->rew_minor == MINOR (inode->i_rdev)) { - cqr = ti->discipline->mtrew (ti, 1); - if (cqr != NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:rewrelea"); -#endif - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_REW_RELEASE_INIT); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - } - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_UNUSED); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -out: -#ifdef MODULE - MOD_DEC_USE_COUNT; -#endif /* MODULE */ - return rc; -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tapechar.h linux.21rc5-ac2/drivers/s390/char/tapechar.h --- linux.21rc5/drivers/s390/char/tapechar.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tapechar.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,34 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tapechar.h - * character device frontend for tape device driver - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - **************************************************************************** - */ - -#ifndef TAPECHAR_H -#define TAPECHAR_H -#include -#define TAPECHAR_DEFAULTMODE 0020644 -#define TAPE_MAJOR 0 /* get dynamic major since no major officialy defined for tape */ -/* - * Prototypes for tape_fops - */ -ssize_t tape_read(struct file *, char *, size_t, loff_t *); -ssize_t tape_write(struct file *, const char *, size_t, loff_t *); -int tape_ioctl(struct inode *,struct file *,unsigned int,unsigned long); -int tape_open (struct inode *,struct file *); -int tape_release (struct inode *,struct file *); -#ifdef CONFIG_DEVFS_FS -void tapechar_mkdevfstree (tape_info_t* ti); -#endif -void tapechar_init (void); -void tapechar_uninit (void); -#endif /* TAPECHAR_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape_core.c linux.21rc5-ac2/drivers/s390/char/tape_core.c --- linux.21rc5/drivers/s390/char/tape_core.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape_core.c 2003-04-25 13:53:58.000000000 +0100 @@ -0,0 +1,1345 @@ +/* + * drivers/s390/char/tape_core.c + * basic function of the tape device driver + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#include +#include +#include +#include // for kernel parameters +#include // for requesting modules +#include // for locks +#include + +#include // for variable types +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#ifdef CONFIG_S390_TAPE_3590 +#include "tape_3590.h" +#endif + +#define PRINTK_HEADER "T390:" + +/* + * Prototypes for some static functions. + */ +static void __tape_do_irq (int, void *, struct pt_regs *); +static void __tape_remove_request(struct tape_device *, struct tape_request *); +static void tape_timeout_io (unsigned long); + +/* + * List of tape disciplines guarded by tape_discipline_lock. + */ +static struct list_head tape_disciplines = LIST_HEAD_INIT(tape_disciplines); +static spinlock_t tape_discipline_lock = SPIN_LOCK_UNLOCKED; + +/* + * Pointer to debug area. + */ +debug_info_t *tape_dbf_area = NULL; + +const char *tape_op_verbose[TO_SIZE] = +{ + [TO_BLOCK] = "BLK", + [TO_BSB] = "BSB", + [TO_BSF] = "BSF", + [TO_DSE] = "DSE", + [TO_FSB] = "FSB", + [TO_FSF] = "FSF", + [TO_LBL] = "LBL", + [TO_NOP] = "NOP", + [TO_RBA] = "RBA", + [TO_RBI] = "RBI", + [TO_RFO] = "RFO", + [TO_REW] = "REW", + [TO_RUN] = "RUN", + [TO_WRI] = "WRI", + [TO_WTM] = "WTM", + [TO_MSEN] = "MSN", + [TO_LOAD] = "LOA", + [TO_READ_CONFIG] = "RCF", + [TO_READ_ATTMSG] = "RAT", + [TO_DIS] = "DIS", + [TO_ASSIGN] = "ASS", + [TO_UNASSIGN] = "UAS", + [TO_BREAKASS] = "BRK" +}; + +/* + * Inline functions, that have to be defined. + */ + +/* + * I/O helper function. Adds the request to the request queue + * and starts it if the tape is idle. Has to be called with + * the device lock held. + */ +static inline int +__do_IO(struct tape_device *device, struct tape_request *request) +{ + int rc = 0; + + if(request->cpaddr == NULL) + BUG(); + + if(request->timeout.expires > 0) { + /* Init should be done by caller */ + DBF_EVENT(6, "(%04x): starting timed request\n", + device->devstat.devno); + + request->timeout.function = tape_timeout_io; + request->timeout.data = (unsigned long) + tape_clone_request(request); + add_timer(&request->timeout); + } + + rc = do_IO(device->devinfo.irq, request->cpaddr, + (unsigned long) request, 0x00, request->options); + + return rc; +} + +static void +__tape_process_queue(void *data) +{ + struct tape_device *device = (struct tape_device *) data; + struct list_head *l, *n; + struct tape_request *request; + int rc; + + DBF_EVENT(6, "tape_process_queue(%p)\n", device); + + /* + * We were told to be quiet. Do nothing for now. + */ + if (TAPE_NOACCESS(device)) { + return; + } + + /* + * Try to start each request on request queue until one is + * started successful. + */ + list_for_each_safe(l, n, &device->req_queue) { + request = list_entry(l, struct tape_request, list); + + /* Happens when new request arrive while still doing one. */ + if (request->status == TAPE_REQUEST_IN_IO) + break; + +#ifdef CONFIG_S390_TAPE_BLOCK + if (request->op == TO_BLOCK) + device->discipline->check_locate(device, request); +#endif + switch(request->op) { + case TO_MSEN: + case TO_ASSIGN: + case TO_UNASSIGN: + case TO_BREAKASS: + break; + default: + if (TAPE_OPEN(device)) + break; + DBF_EVENT(3, + "TAPE(%04x): REQ in UNUSED state\n", + device->devstat.devno); + } + + rc = __do_IO(device, request); + if (rc == 0) { + DBF_EVENT(6, "tape: do_IO success\n"); + request->status = TAPE_REQUEST_IN_IO; + break; + } + /* Start failed. Remove request and indicate failure. */ + DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); + + /* Set final status and remove. */ + request->rc = rc; + __tape_remove_request(device, request); + } +} + +static void +tape_process_queue(void *data) +{ + unsigned long flags; + struct tape_device * device; + + device = (struct tape_device *) data; + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + atomic_set(&device->bh_scheduled, 0); + __tape_process_queue(device); + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); +} + +void +tape_schedule_bh(struct tape_device *device) +{ + /* Protect against rescheduling, when already running. */ + if (atomic_compare_and_swap(0, 1, &device->bh_scheduled)) + return; + + INIT_LIST_HEAD(&device->bh_task.list); + device->bh_task.sync = 0; + device->bh_task.routine = tape_process_queue; + device->bh_task.data = device; + + queue_task(&device->bh_task, &tq_immediate); + mark_bh(IMMEDIATE_BH); + + return; +} + +/* + * Stop running ccw. Has to be called with the device lock held. + */ +static inline int +__tape_halt_io(struct tape_device *device, struct tape_request *request) +{ + int retries; + int rc; + + /* SMB: This should never happen */ + if(request->cpaddr == NULL) + BUG(); + + /* Check if interrupt has already been processed */ + if (request->callback == NULL) + return 0; + + /* Stop a possibly running timer */ + if(request->timeout.expires) { + if(del_timer(&request->timeout) > 0) { + tape_put_request(request); + request->timeout.data = 0L; + } + } + + rc = 0; + for (retries = 0; retries < 5; retries++) { + if (retries < 2) + rc = halt_IO(device->devinfo.irq, + (long) request, request->options); + else + rc = clear_IO(device->devinfo.irq, + (long) request, request->options); + if (rc == 0) + break; /* termination successful */ + if (rc == -ENODEV) + DBF_EXCEPTION(2, "device gone, retry\n"); + else if (rc == -EIO) + DBF_EXCEPTION(2, "I/O error, retry\n"); + else if (rc == -EBUSY) + DBF_EXCEPTION(2, "device busy, retry later\n"); + else + BUG(); + } + if (rc == 0) + request->status = TAPE_REQUEST_DONE; + return rc; +} + +static void +__tape_remove_request(struct tape_device *device, struct tape_request *request) +{ + /* First remove the request from the queue. */ + list_del(&request->list); + + /* This request isn't processed any further. */ + request->status = TAPE_REQUEST_DONE; + + /* Finally, if the callback hasn't been called, do it now. */ + if (request->callback != NULL) { + request->callback(request, request->callback_data); + request->callback = NULL; + } +} + +/* + * Tape state functions + */ +/* + * Printable strings for tape enumerations. + */ +const char *tape_state_string(struct tape_device *device) { + char *s = " ???? "; + + if (TAPE_NOT_OPER(device)) { + s = "NOT_OP"; + } else if (TAPE_NOACCESS(device)) { + s = "NO_ACC"; + } else if (TAPE_BOXED(device)) { + s = "BOXED "; + } else if (TAPE_OPEN(device)) { + s = "IN_USE"; + } else if (TAPE_ASSIGNED(device)) { + s = "ASSIGN"; + } else if (TAPE_INIT(device)) { + s = "INIT "; + } else if (TAPE_UNUSED(device)) { + s = "UNUSED"; + } + + return s; +} + +void +tape_state_set(struct tape_device *device, unsigned int status) +{ + const char *str; + + /* Maybe nothing changed. */ + if (device->tape_status == status) + return; + + DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); + str = tape_state_string(device); + DBF_EVENT(4, "old ts: 0x%08x %s\n", device->tape_status, str); + + device->tape_status = status; + + str = tape_state_string(device); + DBF_EVENT(4, "new ts: 0x%08x %s\n", status, str); + + wake_up(&device->state_change_wq); +} + +void +tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) +{ + if (device->medium_state == newstate) + return; + + switch(newstate){ + case MS_UNLOADED: + device->tape_generic_status |= GMT_DR_OPEN(~0); + PRINT_INFO("(%04x): Tape is unloaded\n", + device->devstat.devno); + break; + case MS_LOADED: + device->tape_generic_status &= ~GMT_DR_OPEN(~0); + PRINT_INFO("(%04x): Tape has been mounted\n", + device->devstat.devno); + break; + default: + // print nothing + break; + } +#ifdef CONFIG_S390_TAPE_BLOCK + tapeblock_medium_change(device); +#endif + device->medium_state = newstate; + wake_up(&device->state_change_wq); +} + +static void +tape_timeout_io(unsigned long data) +{ + struct tape_request *request; + struct tape_device *device; + unsigned long flags; + + request = (struct tape_request *) data; + device = request->device; + + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + if(request->callback != NULL) { + DBF_EVENT(3, "TAPE(%04x): %s timeout\n", + device->devstat.devno, tape_op_verbose[request->op]); + PRINT_ERR("TAPE(%04x): %s timeout\n", + device->devstat.devno, tape_op_verbose[request->op]); + + if(__tape_halt_io(device, request) == 0) + DBF_EVENT(6, "tape_timeout_io: success\n"); + else { + DBF_EVENT(2, "tape_timeout_io: halt_io failed\n"); + PRINT_ERR("tape_timeout_io: halt_io failed\n"); + } + request->rc = -EIO; + + /* Remove from request queue. */ + __tape_remove_request(device, request); + + /* Start next request. */ + if (!list_empty(&device->req_queue)) + tape_schedule_bh(device); + } + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + tape_put_request(request); +} + +/* + * DEVFS Functions + */ +#ifdef CONFIG_DEVFS_FS +devfs_handle_t tape_devfs_root_entry; + +/* + * Create devfs root entry (devno in hex) for device td + */ +static int +tape_mkdevfsroot (struct tape_device* device) +{ + char devno [5]; + + sprintf(devno, "%04x", device->devinfo.devno); + device->devfs_dir = devfs_mk_dir(tape_devfs_root_entry, devno, device); + return (device->devfs_dir == NULL) ? -ENOMEM : 0; +} + +/* + * Remove devfs root entry for a device + */ +static void +tape_rmdevfsroot (struct tape_device *device) +{ + if (device->devfs_dir) { + devfs_unregister(device->devfs_dir); + device->devfs_dir = NULL; + } +} +#endif + +/* + * Enable tape device + */ +int +tape_enable_device(struct tape_device *device, + struct tape_discipline *discipline) +{ + int rc; + + if (!TAPE_INIT(device)) + return -EINVAL; + + /* Register IRQ. */ + rc = s390_request_irq_special(device->devinfo.irq, __tape_do_irq, + tape_noper_handler, 0, + TAPE_MAGIC, &device->devstat); + if (rc) + return rc; + + s390_set_private_data(device->devinfo.irq, tape_clone_device(device)); + + device->discipline = discipline; + + /* Let the discipline have a go at the device. */ + rc = discipline->setup_device(device); + if (rc) { + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + free_irq(device->devinfo.irq, &device->devstat); + return rc; + } + +#ifdef CONFIG_DEVFS_FS + /* Create devfs entries */ + rc = tape_mkdevfsroot(device); + if (rc){ + PRINT_WARN ("Cannot create a devfs directory for " + "device %04x\n", device->devinfo.devno); + device->discipline->cleanup_device(device); + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + free_irq(device->devinfo.irq, &device->devstat); + return rc; + } +#endif + rc = tapechar_setup_device(device); + if (rc) { +#ifdef CONFIG_DEVFS_FS + tape_rmdevfsroot(device); +#endif + device->discipline->cleanup_device(device); + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + free_irq(device->devinfo.irq, &device->devstat); + return rc; + } +#ifdef CONFIG_S390_TAPE_BLOCK + rc = tapeblock_setup_device(device); + if (rc) { + tapechar_cleanup_device(device); +#ifdef CONFIG_DEVFS_FS + tape_rmdevfsroot(device); +#endif + device->discipline->cleanup_device(device); + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + free_irq(device->devinfo.irq, &device->devstat); + return rc; + } +#endif + + TAPE_CLEAR_STATE(device, TAPE_STATUS_INIT); + + return 0; +} + +/* + * Disable tape device. Check if there is a running request and + * terminate it. Post all queued requests with -EIO. + */ +void +tape_disable_device(struct tape_device *device) +{ + struct list_head *l, *n; + struct tape_request *request; + + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + /* Post remaining requests with -EIO */ + list_for_each_safe(l, n, &device->req_queue) { + request = list_entry(l, struct tape_request, list); + if (request->status == TAPE_REQUEST_IN_IO) + __tape_halt_io(device, request); + + request->rc = -EIO; + __tape_remove_request(device, request); + } + + if (TAPE_ASSIGNED(device)) { + spin_unlock(get_irq_lock(device->devinfo.irq)); + if( + tape_unassign( + device, + TAPE_STATUS_ASSIGN_M|TAPE_STATUS_ASSIGN_A + ) == 0 + ) { + printk(KERN_WARNING "%04x: automatically unassigned\n", + device->devinfo.devno); + } + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + } + + TAPE_SET_STATE(device, TAPE_STATUS_NOT_OPER); + spin_unlock_irq(get_irq_lock(device->devinfo.irq)); + + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + +#ifdef CONFIG_S390_TAPE_BLOCK + tapeblock_cleanup_device(device); +#endif + tapechar_cleanup_device(device); +#ifdef CONFIG_DEVFS_FS + tape_rmdevfsroot(device); +#endif + device->discipline->cleanup_device(device); + device->discipline = NULL; + free_irq(device->devinfo.irq, &device->devstat); +} + +/* + * Find discipline by cu_type. + */ +struct tape_discipline * +tape_get_discipline(int cu_type) +{ + struct list_head *l; + struct tape_discipline *discipline, *tmp; + + discipline = NULL; + spin_lock(&tape_discipline_lock); + list_for_each(l, &tape_disciplines) { + tmp = list_entry(l, struct tape_discipline, list); + if (tmp->cu_type == cu_type) { + discipline = tmp; + break; + } + } + if (discipline->owner != NULL) { + if (!try_inc_mod_count(discipline->owner)) + /* Discipline is currently unloaded! */ + discipline = NULL; + } + spin_unlock(&tape_discipline_lock); + return discipline; +} + +/* + * Decrement usage count for discipline. + */ +void +tape_put_discipline(struct tape_discipline *discipline) +{ + spin_lock(&tape_discipline_lock); + if (discipline->owner) + __MOD_DEC_USE_COUNT(discipline->owner); + spin_unlock(&tape_discipline_lock); +} + +/* + * Register backend discipline + */ +int +tape_register_discipline(struct tape_discipline *discipline) +{ + if (!try_inc_mod_count(THIS_MODULE)) + /* Tape module is currently unloaded! */ + return -ENOSYS; + spin_lock(&tape_discipline_lock); + list_add_tail(&discipline->list, &tape_disciplines); + spin_unlock(&tape_discipline_lock); + /* Now add the tape devices with matching cu_type. */ + tape_add_devices(discipline); + return 0; +} + +/* + * Unregister backend discipline + */ +void +__tape_unregister_discipline(struct tape_discipline *discipline) +{ + list_del(&discipline->list); + /* Remove tape devices with matching cu_type. */ + tape_remove_devices(discipline); + MOD_DEC_USE_COUNT; +} + +void +tape_unregister_discipline(struct tape_discipline *discipline) +{ + struct list_head *l; + + spin_lock(&tape_discipline_lock); + list_for_each(l, &tape_disciplines) { + if (list_entry(l, struct tape_discipline, list) == discipline){ + __tape_unregister_discipline(discipline); + break; + } + } + spin_unlock(&tape_discipline_lock); +} + +/* + * Allocate a new tape ccw request + */ +struct tape_request * +tape_alloc_request(int cplength, int datasize) +{ + struct tape_request *request; + + if (datasize > PAGE_SIZE || (cplength*sizeof(ccw1_t)) > PAGE_SIZE) + BUG(); + + DBF_EVENT(5, "tape_alloc_request(%d,%d)\n", cplength, datasize); + + request = (struct tape_request *) + kmalloc(sizeof(struct tape_request), GFP_KERNEL); + if (request == NULL) { + DBF_EXCEPTION(1, "cqra nomem\n"); + return ERR_PTR(-ENOMEM); + } + memset(request, 0, sizeof(struct tape_request)); + INIT_LIST_HEAD(&request->list); + atomic_set(&request->ref_count, 1); + + /* allocate channel program */ + if (cplength > 0) { + request->cpaddr = + kmalloc(cplength*sizeof(ccw1_t), GFP_ATOMIC | GFP_DMA); + if (request->cpaddr == NULL) { + DBF_EXCEPTION(1, "cqra nomem\n"); + kfree(request); + return ERR_PTR(-ENOMEM); + } + memset(request->cpaddr, 0, cplength*sizeof(ccw1_t)); + } + /* alloc small kernel buffer */ + if (datasize > 0) { + request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA); + if (request->cpdata == NULL) { + DBF_EXCEPTION(1, "cqra nomem\n"); + if (request->cpaddr != NULL) + kfree(request->cpaddr); + kfree(request); + return ERR_PTR(-ENOMEM); + } + memset(request->cpdata, 0, datasize); + } + + DBF_EVENT(5, "request=%p(%p/%p)\n", request, request->cpaddr, + request->cpdata); + + return request; +} + +/* + * Free tape ccw request + */ +void +tape_free_request (struct tape_request * request) +{ + DBF_EVENT(5, "tape_free_request(%p)\n", request); + + if (request->device != NULL) { + tape_put_device(request->device); + request->device = NULL; + } + if (request->cpdata != NULL) { + kfree(request->cpdata); + } + if (request->cpaddr != NULL) { + kfree(request->cpaddr); + } + kfree(request); +} + +struct tape_request * +tape_clone_request(struct tape_request *request) +{ + DBF_EVENT(5, "tape_clone_request(%p) = %i\n", request, + atomic_inc_return(&request->ref_count)); + return request; +} + +struct tape_request * +tape_put_request(struct tape_request *request) +{ + int remain; + + DBF_EVENT(4, "tape_put_request(%p)\n", request); + if((remain = atomic_dec_return(&request->ref_count)) > 0) { + DBF_EVENT(5, "remaining = %i\n", remain); + } else { + tape_free_request(request); + } + + return NULL; +} + +/* + * Write sense data to console/dbf + */ +void +tape_dump_sense(struct tape_device* device, struct tape_request *request) +{ + devstat_t *stat; + unsigned int *sptr; + + stat = &device->devstat; + PRINT_INFO("-------------------------------------------------\n"); + PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", + stat->dstat, stat->cstat, stat->cpa); + PRINT_INFO("DEVICE: %04x\n", device->devinfo.devno); + if (request != NULL) + PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); + + sptr = (unsigned int *) stat->ii.sense.data; + PRINT_INFO("Sense data: %08X %08X %08X %08X \n", + sptr[0], sptr[1], sptr[2], sptr[3]); + PRINT_INFO("Sense data: %08X %08X %08X %08X \n", + sptr[4], sptr[5], sptr[6], sptr[7]); + PRINT_INFO("--------------------------------------------------\n"); +} + +/* + * Write sense data to dbf + */ +void +tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request) +{ + devstat_t *stat = &device->devstat; + unsigned int *sptr; + const char* op; + + if (request != NULL) + op = tape_op_verbose[request->op]; + else + op = "---"; + DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", stat->dstat,stat->cstat); + DBF_EVENT(3, "DEVICE: %04x OP\t: %s\n", device->devinfo.devno,op); + sptr = (unsigned int *) stat->ii.sense.data; + DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); + DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); + DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); + DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); +} + +static inline int +__tape_do_io(struct tape_device *device, struct tape_request *request) +{ + /* Some operations may happen even on an unused tape device */ + switch(request->op) { + case TO_MSEN: + case TO_ASSIGN: + case TO_UNASSIGN: + case TO_BREAKASS: + break; + default: + if (!TAPE_OPEN(device)) + return -ENODEV; + } + + /* Add reference to device to the request. This increases the reference + count. */ + request->device = tape_clone_device(device); + request->status = TAPE_REQUEST_QUEUED; + + list_add_tail(&request->list, &device->req_queue); + __tape_process_queue(device); + + return 0; +} + +/* + * Add the request to the request queue, try to start it if the + * tape is idle. Return without waiting for end of i/o. + */ +int +tape_do_io_async(struct tape_device *device, struct tape_request *request) +{ + int rc; + long flags; + + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + /* Add request to request queue and try to start it. */ + rc = __tape_do_io(device, request); + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + return rc; +} + +/* + * tape_do_io/__tape_wake_up + * Add the request to the request queue, try to start it if the + * tape is idle and wait uninterruptible for its completion. + */ +static void +__tape_wake_up(struct tape_request *request, void *data) +{ + request->callback = NULL; + wake_up((wait_queue_head_t *) data); +} + +int +tape_do_io(struct tape_device *device, struct tape_request *request) +{ + wait_queue_head_t wq; + long flags; + int rc; + + DBF_EVENT(5, "tape: tape_do_io(%p, %p)\n", device, request); + + init_waitqueue_head(&wq); + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + /* Setup callback */ + request->callback = __tape_wake_up; + request->callback_data = &wq; + /* Add request to request queue and try to start it. */ + rc = __tape_do_io(device, request); + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + if (rc) + return rc; + /* Request added to the queue. Wait for its completion. */ + wait_event(wq, (request->callback == NULL)); + /* Get rc from request */ + return request->rc; +} + +/* + * tape_do_io_interruptible/__tape_wake_up_interruptible + * Add the request to the request queue, try to start it if the + * tape is idle and wait uninterruptible for its completion. + */ +static void +__tape_wake_up_interruptible(struct tape_request *request, void *data) +{ + request->callback = NULL; + wake_up_interruptible((wait_queue_head_t *) data); +} + +int +tape_do_io_interruptible(struct tape_device *device, + struct tape_request *request) +{ + wait_queue_head_t wq; + long flags; + int rc; + + DBF_EVENT(5, "tape: tape_do_io_int(%p, %p)\n", device, request); + + init_waitqueue_head(&wq); + // debug paranoia + if(!device) BUG(); + if(!request) BUG(); + + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + /* Setup callback */ + request->callback = __tape_wake_up_interruptible; + request->callback_data = &wq; + rc = __tape_do_io(device, request); + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + if (rc) + return rc; + /* Request added to the queue. Wait for its completion. */ + rc = wait_event_interruptible(wq, (request->callback == NULL)); + if (rc != -ERESTARTSYS) + /* Request finished normally. */ + return request->rc; + /* Interrupted by a signal. We have to stop the current request. */ + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + rc = __tape_halt_io(device, request); + if (rc == 0) { + DBF_EVENT(3, "IO stopped on irq %d\n", device->devinfo.irq); + rc = -ERESTARTSYS; + } + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + return rc; +} + + +/* + * Tape interrupt routine, called from Ingo's I/O layer + */ +static void +__tape_do_irq (int irq, void *ds, struct pt_regs *regs) +{ + struct tape_device *device; + struct tape_request *request; + devstat_t *devstat; + int final; + int rc; + + devstat = (devstat_t *) ds; + device = (struct tape_device *) s390_get_private_data(irq); + if (device == NULL) { + PRINT_ERR("could not get device structure for irq %d " + "in interrupt\n", irq); + return; + } + request = (struct tape_request *) devstat->intparm; + + DBF_EVENT(5, "tape: __tape_do_irq(%p, %p)\n", device, request); + + if(request && request->timeout.expires) { + /* + * If the timer was not yet startet the reference to the + * request has to be dropped here. Otherwise it will be + * dropped by the timeout handler. + */ + if(del_timer(&request->timeout) > 0) + request->timeout.data = (unsigned long) + tape_put_request(request); + } + + if (device->devstat.cstat & SCHN_STAT_INCORR_LEN) + DBF_EVENT(4, "tape: incorrect blocksize\n"); + + if (device->devstat.dstat != 0x0c){ + /* + * Any request that does not come back with channel end + * and device end is unusual. Log the sense data. + */ + DBF_EVENT(3,"-- Tape Interrupthandler --\n"); + tape_dump_sense_dbf(device, request); + } + if (TAPE_NOT_OPER(device)) { + DBF_EVENT(6, "tape:device is not operational\n"); + return; + } + + /* Some status handling */ + if(devstat && devstat->dstat & DEV_STAT_UNIT_CHECK) { + unsigned char *sense = devstat->ii.sense.data; + + if(!(sense[1] & SENSE_DRIVE_ONLINE)) + device->tape_generic_status &= ~GMT_ONLINE(~0); + } else { + device->tape_generic_status |= GMT_ONLINE(~0); + } + + rc = device->discipline->irq(device, request); + /* + * rc < 0 : request finished unsuccessfully. + * rc == TAPE_IO_SUCCESS: request finished successfully. + * rc == TAPE_IO_PENDING: request is still running. Ignore rc. + * rc == TAPE_IO_RETRY: request finished but needs another go. + * rc == TAPE_IO_STOP: request needs to get terminated. + */ + final = 0; + switch (rc) { + case TAPE_IO_SUCCESS: + final = 1; + break; + case TAPE_IO_PENDING: + break; + case TAPE_IO_RETRY: +#ifdef CONFIG_S390_TAPE_BLOCK + if (request->op == TO_BLOCK) + device->discipline->check_locate(device, request); +#endif + rc = __do_IO(device, request); + if (rc) { + DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); + final = 1; + } + break; + case TAPE_IO_STOP: + __tape_halt_io(device, request); + rc = -EIO; + final = 1; + break; + default: + if (rc > 0) { + DBF_EVENT(6, "xunknownrc\n"); + PRINT_ERR("Invalid return code from discipline " + "interrupt function.\n"); + rc = -EIO; + } + final = 1; + break; + } + if (final) { + /* This might be an unsolicited interrupt (no request) */ + if(request != NULL) { + /* Set ending status. */ + request->rc = rc; + __tape_remove_request(device, request); + } + /* Start next request. */ + if (!list_empty(&device->req_queue)) + tape_schedule_bh(device); + } +} + +/* + * Lock a shared tape for our exclusive use. + */ +int +tape_assign(struct tape_device *device, int type) +{ + int rc; + + spin_lock_irq(&device->assign_lock); + + /* The device is already assigned */ + rc = 0; + if (!TAPE_ASSIGNED(device)) { + rc = device->discipline->assign(device); + + spin_lock(get_irq_lock(device->devinfo.irq)); + if (rc) { + PRINT_WARN( + "(%04x): assign failed - " + "device might be busy\n", + device->devstat.devno); + DBF_EVENT(3, + "(%04x): assign failed " + "- device might be busy\n", + device->devstat.devno); + TAPE_SET_STATE(device, TAPE_STATUS_BOXED); + } else { + DBF_EVENT(3, "(%04x): assign lpum = %02x\n", + device->devstat.devno, device->devstat.lpum); + tape_state_set( + device, + (device->tape_status | type) & + (~TAPE_STATUS_BOXED) + ); + } + } else { + spin_lock(get_irq_lock(device->devinfo.irq)); + TAPE_SET_STATE(device, type); + } + spin_unlock(get_irq_lock(device->devinfo.irq)); + spin_unlock_irq(&device->assign_lock); + + return rc; +} + +/* + * Unlock a shared tape. + */ +int +tape_unassign(struct tape_device *device, int type) +{ + int rc; + + spin_lock_irq(&device->assign_lock); + + rc = 0; + spin_lock(get_irq_lock(device->devinfo.irq)); + if (!TAPE_ASSIGNED(device)) { + spin_unlock(get_irq_lock(device->devinfo.irq)); + spin_unlock_irq(&device->assign_lock); + return 0; + } + TAPE_CLEAR_STATE(device, type); + spin_unlock(get_irq_lock(device->devinfo.irq)); + + if (!TAPE_ASSIGNED(device)) { + rc = device->discipline->unassign(device); + if (rc) { + PRINT_WARN("(%04x): unassign failed\n", + device->devstat.devno); + DBF_EVENT(3, "(%04x): unassign failed\n", + device->devstat.devno); + } else { + DBF_EVENT(3, "(%04x): unassign lpum = %02x\n", + device->devstat.devno, device->devstat.lpum); + } + } + + spin_unlock_irq(&device->assign_lock); + return rc; +} + +/* + * Tape device open function used by tape_char & tape_block frontends. + */ +int +tape_open(struct tape_device *device) +{ + int rc; + + spin_lock_irq(&tape_discipline_lock); + spin_lock(get_irq_lock(device->devinfo.irq)); + if (TAPE_NOT_OPER(device)) { + DBF_EVENT(6, "TAPE:nodev\n"); + rc = -ENODEV; + } else if (TAPE_OPEN(device)) { + DBF_EVENT(6, "TAPE:dbusy\n"); + rc = -EBUSY; + } else if (device->discipline != NULL && + !try_inc_mod_count(device->discipline->owner)) { + DBF_EVENT(6, "TAPE:nodisc\n"); + rc = -ENODEV; + } else { + TAPE_SET_STATE(device, TAPE_STATUS_OPEN); + rc = 0; + } + spin_unlock(get_irq_lock(device->devinfo.irq)); + spin_unlock_irq(&tape_discipline_lock); + return rc; +} + +/* + * Tape device release function used by tape_char & tape_block frontends. + */ +int +tape_release(struct tape_device *device) +{ + spin_lock_irq(&tape_discipline_lock); + spin_lock(get_irq_lock(device->devinfo.irq)); + + if (TAPE_OPEN(device)) { + TAPE_CLEAR_STATE(device, TAPE_STATUS_OPEN); + + if (device->discipline->owner) + __MOD_DEC_USE_COUNT(device->discipline->owner); + } + spin_unlock(get_irq_lock(device->devinfo.irq)); + spin_unlock_irq(&tape_discipline_lock); + + return 0; +} + +/* + * Execute a magnetic tape command a number of times. + */ +int +tape_mtop(struct tape_device *device, int mt_op, int mt_count) +{ + tape_mtop_fn fn; + int rc; + + DBF_EVENT(6, "TAPE:mtio\n"); + DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); + DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); + + if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) + return -EINVAL; + fn = device->discipline->mtop_array[mt_op]; + if(fn == NULL) + return -EINVAL; + + /* We assume that the backends can handle count up to 500. */ + if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || + mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { + rc = 0; + for (; mt_count > 500; mt_count -= 500) + if ((rc = fn(device, 500)) != 0) + break; + if (rc == 0) + rc = fn(device, mt_count); + } else + rc = fn(device, mt_count); + return rc; + +} + +void +tape_init_disciplines(void) +{ +#ifdef CONFIG_S390_TAPE_34XX + tape_34xx_init(); +#endif +#ifdef CONFIG_S390_TAPE_34XX_MODULE + request_module("tape_34xx"); +#endif + +#ifdef CONFIG_S390_TAPE_3590 + tape_3590_init(); +#endif +#ifdef CONFIG_S390_TAPE_3590_MODULE + request_module("tape_3590"); +#endif + tape_auto_detect(); +} + +/* + * Tape init function. + */ +static int +tape_init (void) +{ + tape_dbf_area = debug_register ( "tape", 1, 2, 4*sizeof(long)); + debug_register_view(tape_dbf_area, &debug_sprintf_view); + debug_set_level(tape_dbf_area, 6); /* FIXME */ + DBF_EVENT(3, "tape init: ($Revision: 1.6 $)\n"); +#ifdef CONFIG_DEVFS_FS + tape_devfs_root_entry = devfs_mk_dir (NULL, "tape", NULL); +#endif /* CONFIG_DEVFS_FS */ + DBF_EVENT(3, "dev detect\n"); + /* Parse the parameters. */ + tape_devmap_init(); +#ifdef CONFIG_PROC_FS + tape_proc_init(); +#endif /* CONFIG_PROC_FS */ + tapechar_init(); +#ifdef CONFIG_S390_TAPE_BLOCK + tapeblock_init(); +#endif + tape_init_disciplines(); + return 0; +} + +/* + * Tape exit function. + */ +void +tape_exit(void) +{ + struct list_head *l, *n; + struct tape_discipline *discipline; + + DBF_EVENT(6, "tape exit\n"); + + /* Cleanup registered disciplines. */ + spin_lock(&tape_discipline_lock); + list_for_each_safe(l, n, &tape_disciplines) { + discipline = list_entry(l, struct tape_discipline, list); + __tape_unregister_discipline(discipline); + } + spin_unlock(&tape_discipline_lock); + + /* Get rid of the frontends */ + tapechar_exit(); +#ifdef CONFIG_S390_TAPE_BLOCK + tapeblock_exit(); +#endif +#ifdef CONFIG_PROC_FS + tape_proc_cleanup(); +#endif + tape_devmap_exit(); +#ifdef CONFIG_DEVFS_FS + devfs_unregister (tape_devfs_root_entry); /* devfs checks for NULL */ +#endif /* CONFIG_DEVFS_FS */ + debug_unregister (tape_dbf_area); +} + +/* + * Issue an hotplug event + */ +void tape_hotplug_event(struct tape_device *device, int devmaj, int action) { +#ifdef CONFIG_HOTPLUG + char *argv[3]; + char *envp[8]; + char devno[20]; + char major[20]; + char minor[20]; + + sprintf(devno, "DEVNO=%04x", device->devinfo.devno); + sprintf(major, "MAJOR=%d", devmaj); + sprintf(minor, "MINOR=%d", device->first_minor); + + argv[0] = hotplug_path; + argv[1] = "tape"; + argv[2] = NULL; + + envp[0] = "HOME=/"; + envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + + switch(action) { + case TAPE_HOTPLUG_CHAR_ADD: + case TAPE_HOTPLUG_BLOCK_ADD: + envp[2] = "ACTION=add"; + break; + case TAPE_HOTPLUG_CHAR_REMOVE: + case TAPE_HOTPLUG_BLOCK_REMOVE: + envp[2] = "ACTION=remove"; + break; + default: + BUG(); + } + switch(action) { + case TAPE_HOTPLUG_CHAR_ADD: + case TAPE_HOTPLUG_CHAR_REMOVE: + envp[3] = "INTERFACE=char"; + break; + case TAPE_HOTPLUG_BLOCK_ADD: + case TAPE_HOTPLUG_BLOCK_REMOVE: + envp[3] = "INTERFACE=block"; + break; + } + envp[4] = devno; + envp[5] = major; + envp[6] = minor; + envp[7] = NULL; + + call_usermodehelper(argv[0], argv, envp); +#endif +} + +MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " + "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); +MODULE_DESCRIPTION("Linux on zSeries channel attached " + "tape device driver ($Revision: 1.6 $)"); + +module_init(tape_init); +module_exit(tape_exit); + +EXPORT_SYMBOL(tape_dbf_area); +EXPORT_SYMBOL(tape_state_string); +EXPORT_SYMBOL(tape_op_verbose); +EXPORT_SYMBOL(tape_state_set); +EXPORT_SYMBOL(tape_med_state_set); +EXPORT_SYMBOL(tape_register_discipline); +EXPORT_SYMBOL(tape_unregister_discipline); +EXPORT_SYMBOL(tape_alloc_request); +EXPORT_SYMBOL(tape_put_request); +EXPORT_SYMBOL(tape_clone_request); +EXPORT_SYMBOL(tape_dump_sense); +EXPORT_SYMBOL(tape_dump_sense_dbf); +EXPORT_SYMBOL(tape_do_io); +EXPORT_SYMBOL(tape_do_io_free); +EXPORT_SYMBOL(tape_do_io_async); +EXPORT_SYMBOL(tape_do_io_interruptible); +EXPORT_SYMBOL(tape_mtop); +EXPORT_SYMBOL(tape_hotplug_event); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tapedefs.h linux.21rc5-ac2/drivers/s390/char/tapedefs.h --- linux.21rc5/drivers/s390/char/tapedefs.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tapedefs.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -/*********************************************************************** - * drivers/s390/char/tapedefs.h - * tape device driver for S/390 and zSeries tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - *********************************************************************** - */ - -/* Kernel Version Compatibility section */ -#include -#include -#include -#include - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,2,17)) -#define TAPE_DEBUG // use s390 debug feature -#else -#undef TAPE_DEBUG // debug feature not supported by our 2.2.16 code -static inline void set_normalized_cda ( ccw1_t * cp, unsigned long address ) { - cp -> cda = address; -} -static inline void clear_normalized_cda ( ccw1_t * ccw ) { - ccw -> cda = 0; -} -#define BUG() PRINT_FATAL("tape390: CRITICAL INTERNAL ERROR OCCURED. REPORT THIS BACK TO LINUX390@DE.IBM.COM\n") -#endif -#define CONFIG_S390_TAPE_DYNAMIC // allow devices to be attached or detached on the fly -#define TAPEBLOCK_RETRIES 20 // number of retries, when a block-dev request fails. - - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) -#define INIT_BLK_DEV(d_major,d_request_fn,d_queue_fn,d_current) \ -do { \ - blk_dev[d_major].queue = d_queue_fn; \ -} while(0) -static inline struct request * -tape_next_request( request_queue_t *queue ) -{ - return blkdev_entry_next_request(&queue->queue_head); -} -static inline void -tape_dequeue_request( request_queue_t * q, struct request *req ) -{ - blkdev_dequeue_request (req); -} -#else -#define s390_dev_info_t dev_info_t -typedef struct request *request_queue_t; -#ifndef init_waitqueue_head -#define init_waitqueue_head(x) do { *x = NULL; } while(0) -#endif -#define blk_init_queue(x,y) do {} while(0) -#define blk_queue_headactive(x,y) do {} while(0) -#define INIT_BLK_DEV(d_major,d_request_fn,d_queue_fn,d_current) \ -do { \ - blk_dev[d_major].request_fn = d_request_fn; \ - blk_dev[d_major].queue = d_queue_fn; \ - blk_dev[d_major].current_request = d_current; \ -} while(0) -static inline struct request * -tape_next_request( request_queue_t *queue ) -{ - return *queue; -} -static inline void -tape_dequeue_request( request_queue_t * q, struct request *req ) -{ - *q = req->next; - req->next = NULL; -} -#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape_devmap.c linux.21rc5-ac2/drivers/s390/char/tape_devmap.c --- linux.21rc5/drivers/s390/char/tape_devmap.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape_devmap.c 2003-05-28 15:15:07.000000000 +0100 @@ -0,0 +1,895 @@ +/* + * drivers/s390/char/tape_devmap.c + * device mapping for tape device driver + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + * + * Device mapping and tape= parameter parsing functions. All devmap + * functions may not be called from interrupt context. In particular + * tape_get_device is a no-no from interrupt context. + */ + +#include +#include +#include +#include + +#include +#include +#include + +/* This is ugly... */ +#define PRINTK_HEADER "tape_devmap:" + +#include "tape.h" + +struct tape_devmap { + struct list_head list; + unsigned int devindex; + unsigned short devno; + devreg_t devreg; + struct tape_device *device; +}; + +struct tape_discmap { + struct list_head list; + devreg_t devreg; + struct tape_discipline *discipline; +}; + +/* + * List of all registered tapes and disciplines. + */ +static struct list_head tape_devreg_list = LIST_HEAD_INIT(tape_devreg_list); +static struct list_head tape_disc_devreg_list = LIST_HEAD_INIT(tape_disc_devreg_list); +int tape_max_devindex = 0; + +/* + * Single spinlock to protect devmap structures and lists. + */ +static spinlock_t tape_devmap_lock = SPIN_LOCK_UNLOCKED; + +/* + * Module/Kernel Parameter Handling. The syntax of tape= is: + * : (0x)?[0-9a-fA-F]+ + * : (-)? + * : (,)* + */ +int tape_autodetect = 0; /* is true, when autodetection is active */ + +/* + * char *tape[] is intended to hold the ranges supplied by the tape= statement + * it is named 'tape' to directly be filled by insmod with the comma separated + * strings when running as a module. + */ +static char *tape[256]; +MODULE_PARM (tape, "1-" __MODULE_STRING (256) "s"); + +#ifndef MODULE +/* + * The parameter parsing functions for builtin-drivers are called + * before kmalloc works. Store the pointers to the parameters strings + * into tape[] for later processing. + */ +static int __init +tape_call_setup (char *str) +{ + static int count = 0; + + if (count < 256) + tape[count++] = str; + return 1; +} + +__setup("tape=", tape_call_setup); +#endif /* not defined MODULE */ + +/* + * Add a range of devices and create the corresponding devreg_t + * structures. The order of the ranges added by this function + * will define the kdevs for the individual devices. + */ +int +tape_add_range(int from, int to) +{ + struct tape_devmap *devmap, *tmp; + struct list_head *l; + int devno; + + if (from > to) { + PRINT_ERR("Invalid device range %04x-%04x", from, to); + return -EINVAL; + } + spin_lock(&tape_devmap_lock); + for (devno = from; devno <= to; devno++) { + devmap = NULL; + list_for_each(l, &tape_devreg_list) { + tmp = list_entry(l, struct tape_devmap, list); + if (tmp->devno == devno) { + devmap = tmp; + break; + } + } + if (devmap == NULL) { + /* This devno is new. */ + devmap = (struct tape_devmap *) + kmalloc(sizeof(struct tape_devmap), + GFP_KERNEL); + if (devmap == NULL) + return -ENOMEM; + memset(devmap, 0, sizeof(struct tape_devmap)); + devmap->devno = devno; + devmap->devindex = tape_max_devindex++; + list_add(&devmap->list, &tape_devreg_list); + devmap->devreg.ci.devno = devno; + devmap->devreg.flag = DEVREG_TYPE_DEVNO; + devmap->devreg.oper_func = tape_oper_handler; + s390_device_register(&devmap->devreg); + } + } + spin_unlock(&tape_devmap_lock); + return 0; +} + +/* + * Read device number from string. The number is always is hex, + * a leading 0x is accepted (and has to be removed for simple_stroul + * to work). + */ +static inline int +tape_devno(char *str, char **endp) +{ + /* remove leading '0x' */ + if (*str == '0') { + str++; + if (*str == 'x') + str++; + } + if (!isxdigit(*str)) + return -EINVAL; + return simple_strtoul(str, endp, 16); /* interpret anything as hex */ +} + +/* + * Parse Kernel/Module Parameters and create devregs for dynamic attach/detach + */ +static int +tape_parm_parse (char *str) +{ + int from, to, rc; + + while (1) { + to = from = tape_devno(str, &str); + if (*str == '-') { + str++; + to = tape_devno(str, &str); + } + /* Negative numbers in from/to indicate errors. */ + if (from >= 0 && to >= 0) { + rc = tape_add_range(from, to); + if (rc) + return rc; + } + if (*str != ',') + break; + str++; + } + if (*str != '\0') { + PRINT_WARN("junk at end of tape parameter string: %s\n", str); + return -EINVAL; + } + return 0; +} + +/* + * Parse parameters stored in tape[]. + */ +static int +tape_parse(void) +{ + int rc, i; + + if (*tape == NULL) { + /* No parameters present */ + PRINT_INFO ("No parameters supplied, enabling auto detect " + "mode for all supported devices.\n"); + tape_autodetect = 1; + return 0; + } + PRINT_INFO("Using ranges supplied in parameters, " + "disabling auto detect mode.\n"); + rc = 0; + for (i = 0; i < 256; i++) { + if (tape[i] == NULL) + break; + rc = tape_parm_parse(tape[i]); + if (rc) { + PRINT_ERR("Invalid tape parameter found.\n"); + break; + } + } + return rc; +} + +/* + * Create a devreg for a discipline. This is only done if no explicit + * tape range is given. The tape_oper_handler will call tape_add_range + * for each device that appears. + */ +static int +tape_add_disc_devreg(struct tape_discipline *discipline) +{ + struct tape_discmap *discmap; + + discmap = (struct tape_discmap *) kmalloc(sizeof(struct tape_discmap), + GFP_KERNEL); + if (discmap == NULL) { + PRINT_WARN("Could not alloc devreg: Out of memory\n" + "Dynamic attach/detach will not work!\n"); + return -ENOMEM; + } + spin_lock(&tape_devmap_lock); + discmap->devreg.ci.hc.ctype = discipline->cu_type; + discmap->devreg.flag = DEVREG_MATCH_CU_TYPE | DEVREG_TYPE_DEVCHARS; + discmap->devreg.oper_func = tape_oper_handler; + s390_device_register(&discmap->devreg); + list_add(&discmap->list, &tape_disc_devreg_list); + spin_unlock(&tape_devmap_lock); + return 0; +} + +/* + * Free devregs for a discipline. + */ +static void +tape_del_disc_devreg(struct tape_discipline *discipline) +{ + struct list_head *l; + struct tape_discmap *discmap; + + spin_lock(&tape_devmap_lock); + list_for_each(l, &tape_disc_devreg_list) { + discmap = list_entry(l, struct tape_discmap, list); + if (discmap->discipline == discipline) { + s390_device_unregister(&discmap->devreg); + list_del(&discmap->list); + kfree(discmap); + break; + } + } + spin_unlock(&tape_devmap_lock); +} + + +/* + * Forget all about device numbers and disciplines. + * This may only be called at module unload or system shutdown. + */ +static void +tape_forget_devregs(void) +{ + struct list_head *l, *n; + struct tape_devmap *devmap; + struct tape_discmap *discmap; + + spin_lock(&tape_devmap_lock); + list_for_each_safe(l, n, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + if (devmap->device != NULL) + BUG(); + s390_device_unregister(&devmap->devreg); + list_del(&devmap->list); + kfree(devmap); + } + list_for_each_safe(l, n, &tape_disc_devreg_list) { + discmap = list_entry(l, struct tape_discmap, list); + s390_device_unregister(&discmap->devreg); + list_del(&discmap->list); + kfree(discmap); + } + spin_unlock(&tape_devmap_lock); +} + +/* + * Allocate memory for a new device structure. + */ +static struct tape_device * +tape_alloc_device(void) +{ + struct tape_device *device; + + device = (struct tape_device *) + kmalloc(sizeof(struct tape_device), GFP_KERNEL); + if (device == NULL) { + DBF_EXCEPTION(2, "ti:no mem\n"); + PRINT_INFO ("can't allocate memory for " + "tape info structure\n"); + return ERR_PTR(-ENOMEM); + } + memset(device, 0, sizeof(struct tape_device)); + device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA); + if (device->modeset_byte == NULL) { + DBF_EXCEPTION(2, "ti:no mem\n"); + PRINT_INFO("can't allocate memory for modeset byte\n"); + kfree(device); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&device->req_queue); + init_waitqueue_head(&device->state_change_wq); + spin_lock_init(&device->assign_lock); + atomic_set(&device->ref_count, 1); + TAPE_SET_STATE(device, TAPE_STATUS_INIT); + device->medium_state = MS_UNKNOWN; + *device->modeset_byte = 0; + + return device; +} + +/* + * Create a device structure. + */ +static struct tape_device * +tape_create_device(int devno) +{ + struct list_head *l; + struct tape_devmap *devmap, *tmp; + struct tape_device *device; + int rc; + + DBF_EVENT(4, "tape_create_device(0x%04x)\n", devno); + + device = tape_alloc_device(); + if (IS_ERR(device)) + return device; + /* Get devinfo from the common io layer. */ + rc = get_dev_info_by_devno(devno, &device->devinfo); + if (rc) { + tape_put_device(device); + return ERR_PTR(rc); + } + spin_lock(&tape_devmap_lock); + devmap = NULL; + list_for_each(l, &tape_devreg_list) { + tmp = list_entry(l, struct tape_devmap, list); + if (tmp->devno == devno) { + devmap = tmp; + break; + } + } + if (devmap != NULL && devmap->device == NULL) { + devmap->device = tape_clone_device(device); + device->first_minor = devmap->devindex * TAPE_MINORS_PER_DEV; + } else if (devmap == NULL) { + /* devno not in tape range. */ + DBF_EVENT(4, "No devmap for entry 0x%04x\n", devno); + tape_put_device(device); + device = ERR_PTR(-ENODEV); + } else { + /* Should not happen. */ + DBF_EVENT(4, "A devmap entry for 0x%04x already exists\n", + devno); + tape_put_device(device); + device = ERR_PTR(-EEXIST); + } + spin_unlock(&tape_devmap_lock); + + return device; +} + +struct tape_device * +tape_clone_device(struct tape_device *device) +{ + DBF_EVENT(4, "tape_clone_device(%p) = %i\n", device, + atomic_inc_return(&device->ref_count)); + return device; +} + +/* + * Find tape device by a device index. + */ +struct tape_device * +tape_get_device(int devindex) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + DBF_EVENT(5, "tape_get_device(%i)\n", devindex); + + device = ERR_PTR(-ENODEV); + spin_lock(&tape_devmap_lock); + /* Find devmap for device with device number devno. */ + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + if (devmap->devindex == devindex) { + if (devmap->device != NULL) { + device = tape_clone_device(devmap->device); + } + break; + } + } + spin_unlock(&tape_devmap_lock); + return device; +} + +/* + * Find tape handle by a devno. + */ +struct tape_device * +tape_get_device_by_devno(int devno) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + DBF_EVENT(5, "tape_get_device_by_devno(0x%04x)\n", devno); + + device = ERR_PTR(-ENODEV); + spin_lock(&tape_devmap_lock); + + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + if(devmap->device != NULL && devmap->devno == devno) { + device = tape_clone_device(devmap->device); + break; + } + } + spin_unlock(&tape_devmap_lock); + + return device; +} + +/* + * Find tape handle by a device irq. + */ +struct tape_device * +tape_get_device_by_irq(int irq) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + DBF_EVENT(5, "tape_get_device_by_irq(0x%02x)\n", irq); + + device = ERR_PTR(-ENODEV); + spin_lock(&tape_devmap_lock); + /* Find devmap for device with device number devno. */ + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + if (devmap->device != NULL && + devmap->device->devinfo.irq == irq) { + device = tape_clone_device(devmap->device); + break; + } + } + spin_unlock(&tape_devmap_lock); + return device; +} + +/* + * Decrease the reference counter of a devices structure. If the + * reference counter reaches zero free the device structure and + * wake up sleepers. + */ +void +tape_put_device(struct tape_device *device) +{ + int remain; + + DBF_EVENT(4, "tape_put_device(%p)\n", device); + + if ((remain = atomic_dec_return(&device->ref_count)) > 0) { + DBF_EVENT(5, "remaining = %i\n", remain); + return; + } + + /* + * Reference counter dropped to zero. This means + * that the device is deleted and the last user + * of the device structure is gone. That is what + * tape_delete_device is waiting for. Do a wake up. + */ + if(remain < 0) { + PRINT_ERR("put device without reference\n"); + return; + } + + /* + * Free memory of a device structure. + */ + kfree(device->modeset_byte); + kfree(device); +} + +/* + * Scan the device range for devices with matching cu_type, create + * their device structures and enable them. + */ +void +tape_add_devices(struct tape_discipline *discipline) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + /* + * Scan tape devices for matching cu type. + */ + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + device = tape_create_device(devmap->devno); + if (IS_ERR(device)) + continue; + + if (device->devinfo.sid_data.cu_type == discipline->cu_type) { + DBF_EVENT(4, "tape_add_devices(%p)\n", discipline); + DBF_EVENT(4, "det irq: %x\n", device->devinfo.irq); + DBF_EVENT(4, "cu : %x\n", discipline->cu_type); + tape_enable_device(device, discipline); + } else { + devmap->device = NULL; + tape_put_device(device); + } + tape_put_device(device); + } + if (tape_autodetect) + tape_add_disc_devreg(discipline); +} + +/* + * Scan the device range for devices with matching cu_type, disable them + * and remove their device structures. + */ +void +tape_remove_devices(struct tape_discipline *discipline) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + if (tape_autodetect) + tape_del_disc_devreg(discipline); + /* + * Go through our tape info list and disable, deq and free + * all devices with matching discipline + */ + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + device = devmap->device; + if (device == NULL) + continue; + if (device->discipline == discipline) { + tape_disable_device(device); + tape_put_device(device); + devmap->device = NULL; + } + } +} + +/* + * Auto detect tape devices. + */ +void +tape_auto_detect(void) +{ + struct tape_device *device; + struct tape_discipline *discipline; + s390_dev_info_t dinfo; + int irq, devno; + + if (!tape_autodetect) + return; + for (irq = get_irq_first(); irq != -ENODEV; irq = get_irq_next(irq)) { + /* Get device info block. */ + devno = get_devno_by_irq(irq); + if (get_dev_info_by_irq(irq, &dinfo) < 0) + continue; + /* Search discipline with matching cu_type */ + discipline = tape_get_discipline(dinfo.sid_data.cu_type); + if (discipline == NULL) + continue; + DBF_EVENT(4, "tape_auto_detect()\n"); + DBF_EVENT(4, "det irq: %x\n", irq); + DBF_EVENT(4, "cu : %x\n", dinfo.sid_data.cu_type); + if (tape_add_range(dinfo.devno, dinfo.devno) == 0) { + device = tape_create_device(devno); + if (!IS_ERR(device)) { + tape_enable_device(device, discipline); + tape_put_device(device); + } + } + tape_put_discipline(discipline); + } +} + +/* + * Private task queue for oper/noper handling... + */ +static DECLARE_TASK_QUEUE(tape_cio_tasks); + +/* + * Oper Handler is called from Ingo's I/O layer when a new tape device is + * attached. + */ +static void +do_tape_oper_handler(void *data) +{ + struct { + int devno; + int cu_type; + struct tq_struct task; + } *p; + struct tape_device *device; + struct tape_discipline *discipline; + unsigned long flags; + + p = (void *) data; + + /* + * Handling the path revalidation scheme or common IO. Devices that + * were detected before will be reactivated. + */ + if(!IS_ERR(device = tape_get_device_by_devno(p->devno))) { + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + if (!TAPE_NOACCESS(device)) { + PRINT_ERR( + "Oper handler for irq %d called, " + "which is (still) internally used.\n", + device->devinfo.irq); + } else { + DBF_EVENT(3, + "T390(%04x): resume processing\n", + p->devno); + TAPE_CLEAR_STATE(device, TAPE_STATUS_NOACCESS); + tape_schedule_bh(device); + } + spin_unlock_irqrestore( + get_irq_lock(device->devinfo.irq), flags); + + tape_put_device(device); + kfree(p); + return; + } + + /* If we get here device is NULL. */ + if (tape_autodetect && tape_add_range(p->devno, p->devno) != 0) { + kfree(p); + return; + } + + /* Find discipline for this device. */ + discipline = tape_get_discipline(p->cu_type); + if (discipline == NULL) { + /* Strange. Should not happen. */ + kfree(p); + return; + } + + device = tape_create_device(p->devno); + if (IS_ERR(device)) { + tape_put_discipline(discipline); + kfree(p); + return; + } + tape_enable_device(device, discipline); + tape_put_device(device); + tape_put_discipline(discipline); + kfree(p); +} + +int +tape_oper_handler(int irq, devreg_t *devreg) +{ + struct { + int devno; + int cu_type; + struct tq_struct task; + } *p; + s390_dev_info_t dinfo; + int rc; + + rc = get_dev_info_by_irq (irq, &dinfo); + if (rc < 0) + return rc; + + /* No memory, we loose. */ + if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) + return -ENOMEM; + + p->devno = dinfo.devno; + p->cu_type = dinfo.sid_data.cu_type; + memset(&p->task, 0, sizeof(struct tq_struct)); + p->task.routine = do_tape_oper_handler; + p->task.data = p; + + /* queue call to do_oper_handler. */ + queue_task(&p->task, &tape_cio_tasks); + run_task_queue(&tape_cio_tasks); + + return 0; +} + + +/* + * Not Oper Handler is called from Ingo's IO layer, when a tape device + * is detached. + */ +static void +do_tape_noper_handler(void *data) +{ + struct { + int irq; + int status; + struct tq_struct task; + } *p; + struct tape_device *device; + struct list_head *l; + struct tape_devmap *devmap; + unsigned long flags; + + p = data; + + /* + * find out devno of leaving device: CIO has already deleted + * this information so we need to find it by irq! + */ + device = tape_get_device_by_irq(p->irq); + if (IS_ERR(device)) { + kfree(p); + return; + } + + /* + * Handle the new path revalidation scheme of the common IO layer. + */ + switch(p->status) { + case DEVSTAT_DEVICE_GONE: + case DEVSTAT_REVALIDATE: /* FIXME: What to do? */ + tape_disable_device(device); + + /* + * Remove the device reference from the device map. + */ + spin_lock(&tape_devmap_lock); + list_for_each(l, &tape_devreg_list) { + devmap = list_entry( + l, struct tape_devmap, list + ); + if (devmap->device == device) { + tape_put_device(device); + devmap->device = NULL; + break; + } + } + spin_unlock(&tape_devmap_lock); + break; + case DEVSTAT_NOT_ACC: + /* + * Device shouldn't be accessed at the moment. The + * currently running request will complete. + */ + spin_lock_irqsave( + get_irq_lock(device->devinfo.irq), flags + ); + DBF_EVENT(3, "T390(%04x): suspend processing\n", + device->devinfo.devno); + TAPE_SET_STATE(device, TAPE_STATUS_NOACCESS); + spin_unlock_irqrestore( + get_irq_lock(device->devinfo.irq), flags + ); + break; + case DEVSTAT_NOT_ACC_ERR: { + struct tape_request *request; + + /* + * Device shouldn't be accessed at the moment. The + * request that was running is lost. + */ + spin_lock_irqsave( + get_irq_lock(device->devinfo.irq), flags + ); + + request = list_entry(device->req_queue.next, + struct tape_request, list); + if( + !list_empty(&device->req_queue) + && + request->status == TAPE_REQUEST_IN_IO + ) { + /* Argh! Might better belong to tape_core.c */ + list_del(&request->list); + request->rc = -EIO; + request->status = TAPE_REQUEST_DONE; + if (request->callback != NULL) { + request->callback( + request, + request->callback_data + ); + request->callback = NULL; + } + } + DBF_EVENT(3, "T390(%04x): suspend processing\n", + device->devinfo.devno); + DBF_EVENT(3, "T390(%04x): request lost\n", + device->devinfo.devno); + TAPE_SET_STATE(device, TAPE_STATUS_NOACCESS); + spin_unlock_irqrestore( + get_irq_lock(device->devinfo.irq), flags + ); + break; + } + default: + PRINT_WARN("T390(%04x): no operation handler called " + "with unknown status(0x%x)\n", + device->devinfo.devno, p->status); + tape_disable_device(device); + + /* + * Remove the device reference from the device map. + */ + spin_lock(&tape_devmap_lock); + list_for_each(l, &tape_devreg_list) { + devmap = list_entry( + l, struct tape_devmap, list + ); + if (devmap->device == device) { + tape_put_device(device); + devmap->device = NULL; + break; + } + } + spin_unlock(&tape_devmap_lock); + } + + tape_put_device(device); + kfree(p); +} + +void +tape_noper_handler(int irq, int status) +{ + struct { + int irq; + int status; + struct tq_struct task; + } *p; + + /* No memory, we loose. */ + if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) + return; + + p->irq = irq; + p->status = status; + memset(&p->task, 0, sizeof(struct tq_struct)); + p->task.routine = do_tape_noper_handler; + p->task.data = p; + + /* queue call to do_oper_handler. */ + queue_task(&p->task, &tape_cio_tasks); + run_task_queue(&tape_cio_tasks); +} + + +int +tape_devmap_init(void) +{ + return tape_parse(); +} + +void +tape_devmap_exit(void) +{ + tape_forget_devregs(); +} + +EXPORT_SYMBOL(tape_get_device); +EXPORT_SYMBOL(tape_get_device_by_irq); +EXPORT_SYMBOL(tape_get_device_by_devno); +EXPORT_SYMBOL(tape_put_device); +EXPORT_SYMBOL(tape_clone_device); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape.h linux.21rc5-ac2/drivers/s390/char/tape.h --- linux.21rc5/drivers/s390/char/tape.h 2003-05-28 15:08:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape.h 2003-04-25 13:53:58.000000000 +0100 @@ -1,203 +1,436 @@ -/*************************************************************************** - * +/* * drivers/s390/char/tape.h - * tape device driver for 3480/3490E tapes. + * tape device driver for 3480/3490E/3590 tapes. * * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader */ #ifndef _TAPE_H - #define _TAPE_H + #include #include - -#define MAX_TAPES 7 /* Max tapes supported is 7*/ -#define TAPE_MAGIC 0xE3C1D7C5 /* is ebcdic-"TAPE" */ - -typedef enum { - TS_UNUSED=0, TS_IDLE, TS_DONE, TS_FAILED, - TS_BLOCK_INIT, - TS_BSB_INIT, - TS_BSF_INIT, - TS_DSE_INIT, - TS_EGA_INIT, - TS_FSB_INIT, - TS_FSF_INIT, - TS_LDI_INIT, - TS_LBL_INIT, - TS_MSE_INIT, - TS_NOP_INIT, - TS_RBA_INIT, - TS_RBI_INIT, - TS_RBU_INIT, - TS_RBL_INIT, - TS_RDC_INIT, - TS_RFO_INIT, - TS_RSD_INIT, - TS_REW_INIT, - TS_REW_RELEASE_INIT, - TS_RUN_INIT, - TS_SEN_INIT, - TS_SID_INIT, - TS_SNP_INIT, - TS_SPG_INIT, - TS_SWI_INIT, - TS_SMR_INIT, - TS_SYN_INIT, - TS_TIO_INIT, - TS_UNA_INIT, - TS_WRI_INIT, - TS_WTM_INIT, - TS_NOT_OPER, - TS_SIZE } tape_stat; - -struct _tape_info_t; //Forward declaration - -typedef enum { - TE_START=0, TE_DONE, TE_FAILED, TE_ERROR, TE_OTHER, - TE_SIZE } tape_events; - -typedef void (*tape_disc_shutdown_t) (int); -typedef void (*tape_event_handler_t) (struct _tape_info_t*); -typedef ccw_req_t* (*tape_ccwgen_t)(struct _tape_info_t* ti,int count); -typedef ccw_req_t* (*tape_reqgen_t)(struct request* req,struct _tape_info_t* ti,int tapeblock_major); -typedef ccw_req_t* (*tape_rwblock_t)(const char* data,size_t count,struct _tape_info_t* ti); -typedef void (*tape_freeblock_t)(ccw_req_t* cqr,struct _tape_info_t* ti); -typedef void (*tape_setup_assist_t) (struct _tape_info_t*); +#include +#include +#include +#include +#include +#include +#include +#include +#include #ifdef CONFIG_DEVFS_FS -typedef void (*tape_devfs_handler_t) (struct _tape_info_t*); +#include #endif -typedef tape_event_handler_t tape_event_table_t[TS_SIZE][TE_SIZE]; -typedef struct _tape_discipline_t { - unsigned int cu_type; - tape_setup_assist_t setup_assist; - tape_event_handler_t error_recovery; - tape_reqgen_t bread; - tape_freeblock_t free_bread; - tape_rwblock_t write_block; - tape_freeblock_t free_write_block; - tape_rwblock_t read_block; - tape_freeblock_t free_read_block; - tape_ccwgen_t mtfsf; - tape_ccwgen_t mtbsf; - tape_ccwgen_t mtfsr; - tape_ccwgen_t mtbsr; - tape_ccwgen_t mtweof; - tape_ccwgen_t mtrew; - tape_ccwgen_t mtoffl; - tape_ccwgen_t mtnop; - tape_ccwgen_t mtbsfm; - tape_ccwgen_t mtfsfm; - tape_ccwgen_t mteom; - tape_ccwgen_t mterase; - tape_ccwgen_t mtsetdensity; - tape_ccwgen_t mtseek; - tape_ccwgen_t mttell; - tape_ccwgen_t mtsetdrvbuffer; - tape_ccwgen_t mtlock; - tape_ccwgen_t mtunlock; - tape_ccwgen_t mtload; - tape_ccwgen_t mtunload; - tape_ccwgen_t mtcompression; - tape_ccwgen_t mtsetpart; - tape_ccwgen_t mtmkpart; - tape_ccwgen_t mtiocget; - tape_ccwgen_t mtiocpos; - tape_disc_shutdown_t shutdown; - int (*discipline_ioctl_overload)(struct inode *,struct file*, unsigned int,unsigned long); - tape_event_table_t* event_table; - tape_event_handler_t default_handler; - struct _tape_info_t* tape; /* pointer for backreference */ - void* next; -} tape_discipline_t __attribute__ ((aligned(8))); -typedef struct _tape_frontend_t { - tape_setup_assist_t device_setup; +/* + * macros s390 debug feature (dbf) + */ +#define DBF_EVENT(d_level, d_str...) \ +do { \ + debug_sprintf_event(tape_dbf_area, d_level, d_str); \ +} while (0) + +#define DBF_EXCEPTION(d_level, d_str...) \ +do { \ + debug_sprintf_exception(tape_dbf_area, d_level, d_str); \ +} while (0) + +#define TAPE_VERSION_MAJOR 2 +#define TAPE_VERSION_MINOR 0 +#define TAPE_MAGIC "tape" + +#define TAPE_MINORS_PER_DEV 2 /* two minors per device */ +#define TAPEBLOCK_HSEC_SIZE 2048 +#define TAPEBLOCK_HSEC_S2B 2 +#define TAPEBLOCK_RETRIES 5 + +/* Event types for hotplug */ +#define TAPE_HOTPLUG_CHAR_ADD 1 +#define TAPE_HOTPLUG_BLOCK_ADD 2 +#define TAPE_HOTPLUG_CHAR_REMOVE 3 +#define TAPE_HOTPLUG_BLOCK_REMOVE 4 + +enum tape_medium_state { + MS_UNKNOWN, + MS_LOADED, + MS_UNLOADED, + MS_SIZE +}; + +enum tape_op { + TO_BLOCK, /* Block read */ + TO_BSB, /* Backward space block */ + TO_BSF, /* Backward space filemark */ + TO_DSE, /* Data security erase */ + TO_FSB, /* Forward space block */ + TO_FSF, /* Forward space filemark */ + TO_LBL, /* Locate block label */ + TO_NOP, /* No operation */ + TO_RBA, /* Read backward */ + TO_RBI, /* Read block information */ + TO_RFO, /* Read forward */ + TO_REW, /* Rewind tape */ + TO_RUN, /* Rewind and unload tape */ + TO_WRI, /* Write block */ + TO_WTM, /* Write tape mark */ + TO_MSEN, /* Medium sense */ + TO_LOAD, /* Load tape */ + TO_READ_CONFIG, /* Read configuration data */ + TO_READ_ATTMSG, /* Read attention message */ + TO_DIS, /* Tape display */ + TO_ASSIGN, /* Assign tape to channel path */ + TO_UNASSIGN, /* Unassign tape from channel path */ + TO_BREAKASS, /* Break the assignment of another host */ + TO_SIZE /* #entries in tape_op_t */ +}; + +/* Forward declaration */ +struct tape_device; + +/* The tape device list lock */ +extern rwlock_t tape_dev_lock; + +/* Tape CCW request */ +struct tape_request { + struct list_head list; /* list head for request queueing. */ + struct tape_device *device; /* tape device of this request */ + ccw1_t *cpaddr; /* address of the channel program. */ + void *cpdata; /* pointer to ccw data. */ + char status; /* status of this request */ + int options; /* options for execution. */ + int retries; /* retry counter for error recovery. */ + + /* + * This timer can be used to automatically cancel a request after + * some time. Specifically the assign request seems to lockup under + * certain circumstances. + */ + struct timer_list timeout; + + enum tape_op op; + int rc; + atomic_t ref_count; + + /* Callback for delivering final status. */ + void (*callback)(struct tape_request *, void *); + void *callback_data; +}; + +/* tape_request->status can be: */ +#define TAPE_REQUEST_INIT 0x00 /* request is ready to be processed */ +#define TAPE_REQUEST_QUEUED 0x01 /* request is queued to be processed */ +#define TAPE_REQUEST_IN_IO 0x02 /* request is currently in IO */ +#define TAPE_REQUEST_DONE 0x03 /* request is completed. */ + +/* Function type for magnetic tape commands */ +typedef int (*tape_mtop_fn)(struct tape_device *, int); + +/* Size of the array containing the mtops for a discipline */ +#define TAPE_NR_MTOPS (MTMKPART+1) + +/* Tape Discipline */ +struct tape_discipline { + struct list_head list; + struct module *owner; + unsigned int cu_type; + int (*setup_device)(struct tape_device *); + void (*cleanup_device)(struct tape_device *); + int (*assign)(struct tape_device *); + int (*unassign)(struct tape_device *); + int (*force_unassign)(struct tape_device *); + int (*irq)(struct tape_device *, struct tape_request *); + struct tape_request *(*read_block)(struct tape_device *, size_t); + struct tape_request *(*write_block)(struct tape_device *, size_t); + void (*process_eov)(struct tape_device*); + /* Block device stuff. */ + struct tape_request *(*bread)(struct tape_device *, struct request *); + void (*check_locate)(struct tape_device *, struct tape_request *); + void (*free_bread)(struct tape_request *); + /* ioctl function for additional ioctls. */ + int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long); + /* Array of tape commands with TAPE_NR_MTOPS entries */ + tape_mtop_fn *mtop_array; +}; + +/* + * The discipline irq function either returns an error code (<0) which + * means that the request has failed with an error or one of the following: + */ +#define TAPE_IO_SUCCESS 0 /* request successful */ +#define TAPE_IO_PENDING 1 /* request still running */ +#define TAPE_IO_RETRY 2 /* retry to current request */ +#define TAPE_IO_STOP 3 /* stop the running request */ + +/* Char Frontend Data */ +struct tape_char_data { + /* Idal buffer to temporaily store character data */ + struct idal_buffer * idal_buf; + /* Block size (in bytes) of the character device (0=auto) */ + int block_size; +#ifdef CONFIG_DEVFS_FS + /* tape//char subdirectory in devfs */ + devfs_handle_t devfs_char_dir; + /* tape//char/nonrewinding entry in devfs */ + devfs_handle_t devfs_nonrewinding; + /* tape//char/rewinding entry in devfs */ + devfs_handle_t devfs_rewinding; +#endif /* CONFIG_DEVFS_FS */ +}; + +#ifdef CONFIG_S390_TAPE_BLOCK +/* Block Frontend Data */ +struct tape_blk_data +{ + /* Block device request queue. */ + request_queue_t request_queue; + /* Block frontend tasklet */ + struct tasklet_struct tasklet; + /* Current position on the tape. */ + unsigned int block_position; + /* The start of the block device image file */ + unsigned int start_block_id; #ifdef CONFIG_DEVFS_FS - tape_devfs_handler_t mkdevfstree; - tape_devfs_handler_t rmdevfstree; + /* tape//block subdirectory in devfs */ + devfs_handle_t devfs_block_dir; + /* tape//block/disc entry in devfs */ + devfs_handle_t devfs_disc; +#endif /* CONFIG_DEVFS_FS */ +}; #endif - void* next; -} tape_frontend_t __attribute__ ((aligned(8))); +#define TAPE_STATUS_INIT 0x00000001 +#define TAPE_STATUS_ASSIGN_M 0x00000002 +#define TAPE_STATUS_ASSIGN_A 0x00000004 +#define TAPE_STATUS_OPEN 0x00000008 +#define TAPE_STATUS_BLOCKDEV 0x00000010 +#define TAPE_STATUS_BOXED 0x20000000 +#define TAPE_STATUS_NOACCESS 0x40000000 +#define TAPE_STATUS_NOT_OPER 0x80000000 + +#define TAPE_SET_STATE(td,st) \ + do { \ + tape_state_set(td, td->tape_status | (st)); \ + } while(0) +#define TAPE_CLEAR_STATE(td,st) \ + do { \ + tape_state_set(td, td->tape_status & ~(st)); \ + } while(0) + +#define TAPE_UNUSED(td) (!TAPE_OPEN(td)) +#define TAPE_INIT(td) (td->tape_status & TAPE_STATUS_INIT) +#define TAPE_ASSIGNED(td) ( \ + td->tape_status & ( \ + TAPE_STATUS_ASSIGN_M | \ + TAPE_STATUS_ASSIGN_A \ + ) \ + ) +#define TAPE_OPEN(td) (td->tape_status & TAPE_STATUS_OPEN) +#define TAPE_BLOCKDEV(td) (td->tape_status & TAPE_STATUS_BLOCKDEV) +#define TAPE_BOXED(td) (td->tape_status & TAPE_STATUS_BOXED) +#define TAPE_NOACCESS(td) (td->tape_status & TAPE_STATUS_NOACCESS) +#define TAPE_NOT_OPER(td) (td->tape_status & TAPE_STATUS_NOT_OPER) + +/* Tape Info */ +struct tape_device { + /* Device discipline information. */ + struct tape_discipline *discipline; + void * discdata; + + /* Generic status bits */ + long tape_generic_status; + unsigned int tape_status; + enum tape_medium_state medium_state; + + /* Number of tapemarks required for correct termination */ + int required_tapemarks; + + /* Waitqueue for state changes and device flags */ + wait_queue_head_t state_change_wq; + unsigned char * modeset_byte; + + /* Reference count. */ + atomic_t ref_count; + + /* For persistent assign */ + spinlock_t assign_lock; + + /* Request queue. */ + struct list_head req_queue; + atomic_t bh_scheduled; + struct tq_struct bh_task; + + /* Common i/o stuff. */ + s390_dev_info_t devinfo; + devstat_t devstat; + + /* each tape device has two minors */ + int first_minor; -typedef struct _tape_info_t { - wait_queue_head_t wq; - s390_dev_info_t devinfo; /* device info from Common I/O */ - int wanna_wakeup; - int rew_minor; /* minor number for the rewinding tape */ - int nor_minor; /* minor number for the nonrewinding tape */ - int blk_minor; /* minor number for the block device */ - devstat_t devstat; /* contains irq, devno, status */ - size_t block_size; /* block size of tape */ - int drive_type; /* Code indicating type of drive */ - struct file *rew_filp; /* backpointer to file structure */ - struct file *nor_filp; - struct file *blk_filp; - int tape_state; /* State of the device. See tape_stat */ - int rc; /* Return code. */ - tape_discipline_t* discipline; - request_queue_t request_queue; - struct request* current_request; - int blk_retries; - long position; - int medium_is_unloaded; // Becomes true when a unload-type operation was issued, false again when medium-insert was detected - ccw_req_t* cqr; - atomic_t bh_scheduled; - struct tq_struct bh_tq; #ifdef CONFIG_DEVFS_FS - devfs_handle_t devfs_dir; /* devfs handle for tape/DEVNO directory */ - devfs_handle_t devfs_char_dir; /* devfs handle for tape/DEVNO/char directory */ - devfs_handle_t devfs_block_dir; /* devfs handle for tape/DEVNO/block directory */ - devfs_handle_t devfs_nonrewinding; /* devfs handle for tape/DEVNO/char/nonrewinding device */ - devfs_handle_t devfs_rewinding; /* devfs handle for tape/DEVNO/char/rewinding device */ - devfs_handle_t devfs_disc; /* devfs handle for tape/DEVNO/block/disc device */ + /* Toplevel devfs directory. */ + devfs_handle_t devfs_dir; +#endif /* CONFIG_DEVFS_FS */ + /* Character device frontend data */ + struct tape_char_data char_data; +#ifdef CONFIG_S390_TAPE_BLOCK + /* Block dev frontend data */ + struct tape_blk_data blk_data; #endif - void* discdata; - void* kernbuf; - void* userbuf; - void* next; -} tape_info_t __attribute__ ((aligned(8))); +}; -/* tape initialisation functions */ -int tape_init(void); -int tape_setup (tape_info_t * ti, int irq, int minor); +/* Externals from tape_core.c */ +struct tape_request *tape_alloc_request(int cplength, int datasize); +struct tape_request *tape_put_request(struct tape_request *); +struct tape_request *tape_clone_request(struct tape_request *); +int tape_do_io(struct tape_device *, struct tape_request *); +int tape_do_io_async(struct tape_device *, struct tape_request *); +int tape_do_io_interruptible(struct tape_device *, struct tape_request *); +void tape_schedule_bh(struct tape_device *); +void tape_hotplug_event(struct tape_device *, int, int); + +static inline int +tape_do_io_free(struct tape_device *device, struct tape_request *request) +{ + int rc; + + rc = tape_do_io(device, request); + tape_put_request(request); + return rc; +} + +int tape_oper_handler(int irq, devreg_t *devreg); +void tape_noper_handler(int irq, int status); +int tape_open(struct tape_device *); +int tape_release(struct tape_device *); +int tape_assign(struct tape_device *, int type); +int tape_unassign(struct tape_device *, int type); +int tape_mtop(struct tape_device *, int, int); + +/* Externals from tape_devmap.c */ +int tape_devmap_init(void); +void tape_devmap_exit(void); + +struct tape_device *tape_get_device(int devindex); +struct tape_device *tape_get_device_by_devno(int devno); +struct tape_device *tape_clone_device(struct tape_device *); +void tape_put_device(struct tape_device *); + +void tape_auto_detect(void); +void tape_add_devices(struct tape_discipline *); +void tape_remove_devices(struct tape_discipline *); + +extern int tape_max_devindex; + +/* Externals from tape_char.c */ +int tapechar_init(void); +void tapechar_exit(void); +int tapechar_setup_device(struct tape_device *); +void tapechar_cleanup_device(struct tape_device *); + +/* Externals from tape_block.c */ +int tapeblock_init (void); +void tapeblock_exit(void); +int tapeblock_setup_device(struct tape_device *); +void tapeblock_cleanup_device(struct tape_device *); +void tapeblock_medium_change(struct tape_device *); + +/* Discipline functions */ +int tape_register_discipline(struct tape_discipline *); +void tape_unregister_discipline(struct tape_discipline *); +struct tape_discipline *tape_get_discipline(int cu_type); +void tape_put_discipline(struct tape_discipline *); +int tape_enable_device(struct tape_device *, struct tape_discipline *); +void tape_disable_device(struct tape_device *device); -/* functoins for alloc'ing ccw stuff */ -inline ccw_req_t * tape_alloc_ccw_req (tape_info_t* ti, int cplength, int datasize); -void tape_free_request (ccw_req_t * request); +/* tape initialisation functions */ +void tape_proc_init (void); +void tape_proc_cleanup (void); /* a function for dumping device sense info */ -void tape_dump_sense (devstat_t * stat); - -#ifdef CONFIG_S390_TAPE_DYNAMIC -/* functions for dyn. dev. attach/detach */ -int tape_oper_handler ( int irq, struct _devreg *dreg); -#endif +void tape_dump_sense(struct tape_device *, struct tape_request *); +void tape_dump_sense_dbf(struct tape_device *, struct tape_request *); /* functions for handling the status of a device */ -inline void tapestate_set (tape_info_t * ti, int newstate); -inline int tapestate_get (tape_info_t * ti); -void tapestate_event (tape_info_t * ti, int event); -extern char* state_verbose[TS_SIZE]; -extern char* event_verbose[TE_SIZE]; - -/****************************************************************************/ - -/* Some linked lists for storing plugins and devices */ -extern tape_info_t *first_tape_info; -extern tape_discipline_t *first_discipline; -extern tape_frontend_t *first_frontend; +inline void tape_state_set (struct tape_device *, unsigned int status); +inline void tape_med_state_set(struct tape_device *, enum tape_medium_state); +const char *tape_state_string(struct tape_device *); + +/* Tape 3480/3490 init/exit functions. */ +int tape_34xx_init(void); +void tape_34xx_exit(void); /* The debug area */ -#ifdef TAPE_DEBUG -extern debug_info_t *tape_debug_area; -#endif +extern debug_info_t *tape_dbf_area; + +/* functions for building ccws */ +static inline ccw1_t * +tape_ccw_cc(ccw1_t *ccw, __u8 cmd_code, __u16 memsize, void *cda) +{ + ccw->cmd_code = cmd_code; + ccw->flags = CCW_FLAG_CC; + ccw->count = memsize; + ccw->cda = (__u32)(addr_t) cda; + return ccw + 1; +} + +static inline ccw1_t * +tape_ccw_end(ccw1_t *ccw, __u8 cmd_code, __u16 memsize, void *cda) +{ + ccw->cmd_code = cmd_code; + ccw->flags = 0; + ccw->count = memsize; + ccw->cda = (__u32)(addr_t) cda; + return ccw + 1; +} + +static inline ccw1_t * +tape_ccw_cmd(ccw1_t *ccw, __u8 cmd_code) +{ + ccw->cmd_code = cmd_code; + ccw->flags = 0; + ccw->count = 0; + ccw->cda = (__u32)(addr_t) &ccw->cmd_code; + return ccw + 1; +} + +static inline ccw1_t * +tape_ccw_repeat(ccw1_t *ccw, __u8 cmd_code, int count) +{ + while (count-- > 0) { + ccw->cmd_code = cmd_code; + ccw->flags = CCW_FLAG_CC; + ccw->count = 0; + ccw->cda = (__u32)(addr_t) &ccw->cmd_code; + ccw++; + } + return ccw; +} + +extern inline ccw1_t* +tape_ccw_cc_idal(ccw1_t *ccw, __u8 cmd_code, struct idal_buffer *idal) +{ + ccw->cmd_code = cmd_code; + ccw->flags = CCW_FLAG_CC; + idal_buffer_set_cda(idal, ccw); + return ccw++; +} + +extern inline ccw1_t* +tape_ccw_end_idal(ccw1_t *ccw, __u8 cmd_code, struct idal_buffer *idal) +{ + ccw->cmd_code = cmd_code; + ccw->flags = 0; + idal_buffer_set_cda(idal, ccw); + return ccw++; +} + +/* Global vars */ +extern const char *tape_op_verbose[]; #endif /* for ifdef tape.h */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape_proc.c linux.21rc5-ac2/drivers/s390/char/tape_proc.c --- linux.21rc5/drivers/s390/char/tape_proc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape_proc.c 2003-04-25 13:53:58.000000000 +0100 @@ -0,0 +1,383 @@ +/* + * drivers/s390/char/tape.c + * tape device driver for S/390 and zSeries tapes. + * + * S390 and zSeries version + * Copyright (C) 2001 IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * + * PROCFS Functions + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "tape.h" + +#define PRINTK_HEADER "T390:" + +static const char *tape_med_st_verbose[MS_SIZE] = +{ + [MS_UNKNOWN] = "UNKNOWN ", + [MS_LOADED] = "LOADED ", + [MS_UNLOADED] = "UNLOADED" +}; + +/* our proc tapedevices entry */ +static struct proc_dir_entry *tape_proc_devices; + +static int tape_proc_show(struct seq_file *m, void *v) { + struct tape_device *device; + struct tape_request *request; + unsigned long n; + + n = ((unsigned long) v - 1); + + if(n == 0) { + seq_printf(m, + "TapeNo\tDevNo\tCuType\tCuModel\tDevType\t" + "DevMod\tBlkSize\tState\tOp\tMedState\n" + ); + } + + device = tape_get_device(n); + if(IS_ERR(device)) + return 0; + + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + + seq_printf(m, + "%d\t%04X\t%04X\t%02X\t%04X\t%02X\t", + device->first_minor/TAPE_MINORS_PER_DEV, + device->devinfo.devno, + device->devinfo.sid_data.cu_type, + device->devinfo.sid_data.cu_model, + device->devinfo.sid_data.dev_type, + device->devinfo.sid_data.dev_model + ); + + /* + * the blocksize is either 'auto' or the blocksize as a decimal number + */ + if(device->char_data.block_size == 0) + seq_printf(m, "auto\t"); + else + seq_printf(m, "%i\t", device->char_data.block_size); + + seq_printf(m, "%s\t", tape_state_string(device)); + + /* + * verbose desciption of current tape operation + */ + if(!list_empty(&device->req_queue)) { + request = list_entry( + device->req_queue.next, struct tape_request, list + ); + + seq_printf(m, "%s\t", tape_op_verbose[request->op]); + } else { + seq_printf(m, "---\t"); + } + + seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]); + + spin_unlock_irq(get_irq_lock(device->devinfo.irq)); + tape_put_device(device); + + return 0; +} + +static void *tape_proc_start(struct seq_file *m, loff_t *pos) { + if(*pos < tape_max_devindex) + return (void *) ((unsigned long) (*pos) + 1); + return NULL; +} + +static void tape_proc_stop(struct seq_file *m, void *v) { +} + +static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos) { + (*pos)++; + return tape_proc_start(m, pos); +} + +static struct seq_operations tape_proc_seq = { + .start = tape_proc_start, + .next = tape_proc_next, + .stop = tape_proc_stop, + .show = tape_proc_show, +}; + +static int tape_proc_open(struct inode *inode, struct file *file) { + return seq_open(file, &tape_proc_seq); +} + +static int +tape_proc_assign(int devno) +{ + int rc; + struct tape_device *device; + + if(IS_ERR(device = tape_get_device_by_devno(devno))) { + DBF_EVENT(3, "TAPE(%04x): assign invalid device\n", devno); + PRINT_ERR("TAPE(%04x): assign invalid device\n", devno); + return PTR_ERR(device); + } + + rc = tape_assign(device, TAPE_STATUS_ASSIGN_M); + + tape_put_device(device); + + return rc; +} + +static int +tape_proc_unassign(int devno) +{ + int rc; + struct tape_device *device; + + if(IS_ERR(device = tape_get_device_by_devno(devno))) { + DBF_EVENT(3, "TAPE(%04x): unassign invalid device\n", devno); + PRINT_ERR("TAPE(%04x): unassign invalid device\n", devno); + return PTR_ERR(device); + } + + rc = tape_unassign(device, TAPE_STATUS_ASSIGN_M); + + tape_put_device(device); + + return rc; +} + +#ifdef SMB_DEBUG_BOX +static int +tape_proc_put_into_box(int devno) +{ + struct tape_device *device; + + if(IS_ERR(device = tape_get_device_by_devno(devno))) { + DBF_EVENT(3, "TAPE(%04x): invalid device\n", devno); + PRINT_ERR("TAPE(%04x): invalid device\n", devno); + return PTR_ERR(device); + } + + TAPE_SET_STATE(device, TAPE_STATUS_BOXED); + + tape_put_device(device); + + return 0; +} +#endif + +#ifdef TAPE390_FORCE_UNASSIGN +static int +tape_proc_force_unassign(int devno) +{ + int rc; + struct tape_device *device; + + if(IS_ERR(device = tape_get_device_by_devno(devno))) { + DBF_EVENT(3, "TAPE(%04x): force unassign invalid device\n", + devno); + PRINT_ERR("TAPE(%04x): force unassign invalid device\n", + devno); + return PTR_ERR(device); + } + + if (!TAPE_BOXED(device)) { + DBF_EVENT(3, "TAPE(%04x): forced unassignment only allowed for" + " boxed device\n", devno); + PRINT_ERR("TAPE(%04x): forced unassignment only allowed for" + " boxed device\n", devno); + rc = -EPERM; + } else if(device->discipline->force_unassign == NULL) { + DBF_EVENT(3, "TAPE(%04x: force unassign is not supported on" + " this device\n", devno); + PRINT_ERR("TAPE(%04x: force unassign is not supported on" + " this device\n", devno); + rc = -EPERM; + } else { + rc = device->discipline->force_unassign(device); + if(rc == 0) + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + TAPE_CLEAR_STATE( + device, + TAPE_STATUS_BOXED + | TAPE_STATUS_ASSIGN_A + | TAPE_STATUS_ASSIGN_M + ); + spin_unlock_irq(get_irq_lock(device->devinfo.irq)); + } + + tape_put_device(device); + return rc; +} +#endif + +/* + * Skips over all characters to the position after a newline or beyond the + * last character of the string. + * Returns the number of characters skiped. + */ +static size_t +tape_proc_skip_eol(const char *buf, size_t len, loff_t *off) +{ + loff_t start = *off; + + while((*off - start) < len) { + if(*(buf+*off) == '\n') { + *off += 1; + break; + } + *off += 1; + } + + return (size_t) (*off - start); +} + +/* + * Skips over whitespace characters and returns the number of characters + * that where skiped. + */ +static size_t +tape_proc_skip_ws(const char *buf, size_t len, loff_t *off) +{ + loff_t start = *off; + + while((*off - start) < len) { + if(*(buf + *off) != ' ' && *(buf + *off) != '\t') + break; + *off += 1; + } + + return (size_t) (*off - start); +} + +static size_t +tape_proc_get_hexvalue(char *buf, size_t len, loff_t *off, unsigned int *hex) +{ + int hexdigit; + loff_t start = *off; + + /* Skip possible space characters */ + tape_proc_skip_ws(buf, len, off); + + /* The hexvalue might start with '0x' or '0X' */ + if((*off - start)+1 < len && *(buf + *off) == '0') + if(*(buf + *off + 1) == 'x' || *(buf + *off + 1) == 'X') + *off += 2; + + *hex = 0; + while((*off - start) < len) { + if(*(buf + *off) >= '0' && *(buf + *off) <= '9') { + hexdigit = *(buf + *off) - '0'; + } else if(*(buf + *off) >= 'a' && *(buf + *off) <= 'f') { + hexdigit = *(buf + *off) - 'a' + 10; + } else if(*(buf + *off) >= 'A' && *(buf + *off) <= 'F') { + hexdigit = *(buf + *off) - 'A' + 10; + } else { + break; + } + *hex = (*hex << 4) + hexdigit; + *off += 1; + } + + return (size_t) (*off - start); +} + +static ssize_t tape_proc_write( + struct file *file, + const char *buf, + size_t len, + loff_t *off +) { + loff_t start = *off; + int devno; + char *s; + + if(PAGE_SIZE < len) + return -EINVAL; + + if((s = kmalloc(len, GFP_KERNEL)) == NULL) + return -ENOMEM; + + if(copy_from_user(s, buf, len) != 0) { + kfree(s); + return -EFAULT; + } + + if(strncmp(s+*off, "assign", 6) == 0) { + (*off) += 6; + tape_proc_get_hexvalue(s, len - 6, off, &devno); + if(devno > 0) + tape_proc_assign(devno); + } else if(strncmp(s+*off, "unassign", 8) == 0) { + (*off) += 8; + tape_proc_get_hexvalue(s, len - (*off - start), off, &devno); + if(devno > 0) + tape_proc_unassign(devno); +#ifdef TAPE390_FORCE_UNASSIGN + } else if(strncmp(s+*off, "forceunassign", 13) == 0) { + (*off) += 13; + tape_proc_get_hexvalue(s, len - (*off - start), off, &devno); + if(devno > 0) + tape_proc_force_unassign(devno); +#endif +#ifdef SMB_DEBUG_BOX + } else if(strncmp(s+*off, "putintobox", 10) == 0) { + (*off) += 10; + tape_proc_get_hexvalue(s, len - (*off - start), off, &devno); + if(devno > 0) + tape_proc_put_into_box(devno); +#endif + } else { + DBF_EVENT(3, "tape_proc_write() parse error\n"); + PRINT_ERR("Invalid /proc/tapedevices command.\n"); + } + tape_proc_skip_eol(s, len - (*off - start), off); + + kfree(s); + + /* Just pretend to have processed all the stuff */ + return len; +} + +static struct file_operations tape_proc_ops = +{ + .open = tape_proc_open, + .read = seq_read, + .write = tape_proc_write, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * Initialize procfs stuff on startup + */ +void tape_proc_init(void) { + tape_proc_devices = create_proc_entry( + "tapedevices", S_IFREG | S_IRUGO | S_IWUSR, &proc_root); + + if (tape_proc_devices == NULL) { + PRINT_WARN("tape: Cannot register procfs entry tapedevices\n"); + return; + } + tape_proc_devices->proc_fops = &tape_proc_ops; + tape_proc_devices->owner = THIS_MODULE; +} + +/* + * Cleanup all stuff registered to the procfs + */ +void tape_proc_cleanup(void) { + if(tape_proc_devices != NULL) + remove_proc_entry ("tapedevices", &proc_root); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape_std.c linux.21rc5-ac2/drivers/s390/char/tape_std.c --- linux.21rc5/drivers/s390/char/tape_std.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape_std.c 2003-05-28 15:15:07.000000000 +0100 @@ -0,0 +1,768 @@ +/* + * drivers/s390/char/tape_std.c + * standard tape device functions for ibm tapes. + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#include +#include +#include +#include +#include +#ifdef CONFIG_S390_TAPE_BLOCK +#include +#endif + +#include +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#define PRINTK_HEADER "T3xxx:" +#define ZLINUX_PASSWD "zLinux PWD" + +/* + * tape_std_assign + */ +int +tape_std_assign(struct tape_device *device) +{ + struct tape_request *request; + + request = tape_alloc_request(2, 11); + if (IS_ERR(request)) + return PTR_ERR(request); + + request->op = TO_ASSIGN; + + /* + * From the documentation assign requests should fail with the + * 'assigned elsewhere' bit set if the tape is already assigned + * to another host. However, it seems, in reality the request + * hangs forever. Therfor we just set a timeout for this request. + */ + init_timer(&request->timeout); + request->timeout.expires = jiffies + 1 * HZ; + + /* Setup the CCWs */ + tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + + return tape_do_io_free(device, request); +} + +/* + * tape_std_unassign + */ +int +tape_std_unassign (struct tape_device *device) +{ + struct tape_request *request; + + request = tape_alloc_request(2, 11); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_UNASSIGN; + tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + return tape_do_io_free(device, request); +} + +#ifdef TAPE390_FORCE_UNASSIGN +/* + * tape_std_force_unassign: forces assignment from another host. + * (Since we need a password this works only with other zLinux hosts!) + */ +int +tape_std_force_unassign(struct tape_device *device) +{ + struct tape_request *request; + struct tape_ca_data *ca_data1; + struct tape_ca_data *ca_data2; + + request = tape_alloc_request(2, 24); + if (IS_ERR(request)) + return PTR_ERR(request); + + request->op = TO_BREAKASS; + ca_data1 = (struct tape_ca_data *) + (((char *) request->cpdata)); + ca_data2 = (struct tape_ca_data *) + (((char *) request->cpdata) + 12); + + ca_data1->function = 0x80; /* Conditional enable */ + strcpy(ca_data1->password, ZLINUX_PASSWD); + ASCEBC(ca_data1->password, 11); + ca_data2->function = 0x40; /* Conditional disable */ + memcpy(ca_data2->password, ca_data1->password, 11); + + tape_ccw_cc(request->cpaddr, CONTROL_ACCESS, 12, ca_data1); + tape_ccw_end(request->cpaddr + 1, CONTROL_ACCESS, 12, ca_data2); + + return tape_do_io_free(device, request); +} +#endif + +/* + * TAPE390_DISPLAY: Show a string on the tape display. + */ +int +tape_std_display(struct tape_device *device, struct display_struct *disp) +{ + struct tape_request *request; + int rc; + + request = tape_alloc_request(2, 17); + if (IS_ERR(request)) { + DBF_EVENT(3, "TAPE: load display failed\n"); + return PTR_ERR(request); + } + + request->op = TO_DIS; + *(unsigned char *) request->cpdata = disp->cntrl; + DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl); + memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8); + memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8); + ASCEBC(((unsigned char*) request->cpdata) + 1, 16); + + tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + + rc = tape_do_io_interruptible(device, request); + tape_put_request(request); + return rc; +} + +/* + * Read block id. + */ +int +tape_std_read_block_id(struct tape_device *device, unsigned int *bid) +{ + struct tape_request *request; + struct { + unsigned int channel_block_id; + unsigned int device_block_id; + } __attribute__ ((packed)) *rbi_data; + int rc; + + request = tape_alloc_request(3, 8); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_RBI; + + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + + /* execute it */ + rc = tape_do_io(device, request); + if (rc == 0) { + /* Get result from read buffer. */ + DBF_EVENT(6, "rbi_data = 0x%08x%08x\n", + *((unsigned int *) request->cpdata), + *(((unsigned int *) request->cpdata)+1)); + rbi_data = (void *) request->cpdata; + *bid = rbi_data->channel_block_id; + } + tape_put_request(request); + return rc; +} + +/* Seek block id */ +int +tape_std_seek_block_id(struct tape_device *device, unsigned int bid) +{ + struct tape_request *request; + + request = tape_alloc_request(3, 4); + if (IS_ERR(request)) + return PTR_ERR(request); + + request->op = TO_LBL; + *(__u32 *) request->cpdata = bid; + + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + + /* execute it */ + return tape_do_io_free(device, request); +} + +int +tape_std_terminate_write(struct tape_device *device) +{ + int rc; + + if(device->required_tapemarks == 0) + return 0; + + DBF_EVENT(5, "(%04x): terminate_write %ixEOF\n", + device->devstat.devno, device->required_tapemarks); + + rc = tape_mtop(device, MTWEOF, device->required_tapemarks); + if (rc) + return rc; + + device->required_tapemarks = 0; + return tape_mtop(device, MTBSR, 1); +} + +/* + * MTLOAD: Loads the tape. + * The default implementation just wait until the tape medium state changes + * to MS_LOADED. + */ +int +tape_std_mtload(struct tape_device *device, int count) +{ + return wait_event_interruptible(device->state_change_wq, + (device->medium_state == MS_LOADED)); +} + +/* + * MTSETBLK: Set block size. + */ +int +tape_std_mtsetblk(struct tape_device *device, int count) +{ + struct idal_buffer *new; + + DBF_EVENT(6, "tape_std_mtsetblk(%d)\n", count); + if (count <= 0) { + /* + * Just set block_size to 0. tapechar_read/tapechar_write + * will realloc the idal buffer if a bigger one than the + * current is needed. + */ + device->char_data.block_size = 0; + return 0; + } + if (device->char_data.idal_buf != NULL && + device->char_data.idal_buf->size == count) + /* We already have a idal buffer of that size. */ + return 0; + /* Allocate a new idal buffer. */ + new = idal_buffer_alloc(count, 0); + if (new == NULL) + return -ENOMEM; + if (device->char_data.idal_buf != NULL) + idal_buffer_free(device->char_data.idal_buf); + + device->char_data.idal_buf = new; + device->char_data.block_size = count; + DBF_EVENT(6, "new blocksize is %d\n", device->char_data.block_size); + return 0; +} + +/* + * MTRESET: Set block size to 0. + */ +int +tape_std_mtreset(struct tape_device *device, int count) +{ + DBF_EVENT(6, "TCHAR:devreset:\n"); + device->char_data.block_size = 0; + return 0; +} + +/* + * MTFSF: Forward space over 'count' file marks. The tape is positioned + * at the EOT (End of Tape) side of the file mark. + */ +int +tape_std_mtfsf(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_FSF; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTFSR: Forward space over 'count' tape blocks (blocksize is set + * via MTSETBLK. + */ +int +tape_std_mtfsr(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_FSB; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTBSR: Backward space over 'count' tape blocks. + * (blocksize is set via MTSETBLK. + */ +int +tape_std_mtbsr(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_BSB; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTWEOF: Write 'count' file marks at the current position. + */ +int +tape_std_mtweof(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_WTM; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTBSFM: Backward space over 'count' file marks. + * The tape is positioned at the BOT (Begin Of Tape) side of the + * last skipped file mark. + */ +int +tape_std_mtbsfm(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_BSF; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTBSF: Backward space over 'count' file marks. The tape is positioned at + * the EOT (End of Tape) side of the last skipped file mark. + */ +int +tape_std_mtbsf(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + int rc; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_BSF; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + rc = tape_do_io(device, request); + if (rc == 0) { + request->op = TO_FSF; + /* need to skip forward over the filemark. */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, FORSPACEFILE, 0, NULL); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ + rc = tape_do_io(device, request); + } + tape_put_request(request); + return rc; +} + +/* + * MTFSFM: Forward space over 'count' file marks. + * The tape is positioned at the BOT (Begin Of Tape) side + * of the last skipped file mark. + */ +int +tape_std_mtfsfm(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + int rc; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_FSF; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + rc = tape_do_io(device, request); + if (rc == 0) { + request->op = TO_BSF; + /* need to skip forward over the filemark. */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, BACKSPACEFILE, 0, NULL); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ + rc = tape_do_io(device, request); + } + tape_put_request(request); + return rc; +} + +/* + * MTREW: Rewind the tape. + */ +int +tape_std_mtrew(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(3, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_REW; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTOFFL: Rewind the tape and put the drive off-line. + * Implement 'rewind unload' + */ +int +tape_std_mtoffl(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(3, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_RUN; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTNOP: 'No operation'. + */ +int +tape_std_mtnop(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_NOP; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTEOM: positions at the end of the portion of the tape already used + * for recordind data. MTEOM positions after the last file mark, ready for + * appending another file. + */ +int +tape_std_mteom(struct tape_device *device, int mt_count) +{ + int rc; + + /* + * Since there is currently no other way to seek, return to the + * BOT and start from there. + */ + if((rc = tape_mtop(device, MTREW, 1)) < 0) + return rc; + + do { + if((rc = tape_mtop(device, MTFSF, 1)) < 0) + return rc; + if((rc = tape_mtop(device, MTFSR, 1)) < 0) + return rc; + } while((device->devstat.dstat & DEV_STAT_UNIT_EXCEP) == 0); + + return tape_mtop(device, MTBSR, 1); +} + +/* + * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind. + */ +int +tape_std_mtreten(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + int rc; + + request = tape_alloc_request(4, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_FSF; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL); + tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL); + tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr); + /* execute it, MTRETEN rc gets ignored */ + rc = tape_do_io_interruptible(device, request); + tape_put_request(request); + return tape_std_mtrew(device, 1); +} + +/* + * MTERASE: erases the tape. + */ +int +tape_std_mterase(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(5, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_DSE; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL); + tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL); + tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL); + tape_ccw_end(request->cpaddr + 4, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTUNLOAD: Rewind the tape and unload it. + */ +int +tape_std_mtunload(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(3, 32); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_RUN; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL); + tape_ccw_end(request->cpaddr + 2, SENSE, 32, request->cpdata); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTCOMPRESSION: used to enable compression. + * Sets the IDRC on/off. + */ +int +tape_std_mtcompression(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + if (mt_count < 0 || mt_count > 1) { + DBF_EXCEPTION(6, "xcom parm\n"); + if (*device->modeset_byte & 0x08) + PRINT_INFO("(%x) Compression is currently on\n", + device->devstat.devno); + else + PRINT_INFO("(%x) Compression is currently off\n", + device->devstat.devno); + PRINT_INFO("Use 1 to switch compression on, 0 to " + "switch it off\n"); + return -EINVAL; + } + request = tape_alloc_request(2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_NOP; + /* setup ccws */ + *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * Read Block + */ +struct tape_request * +tape_std_read_block(struct tape_device *device, size_t count) +{ + struct tape_request *request; + + /* + * We have to alloc 4 ccws in order to be able to transform request + * into a read backward request in error case. + */ + request = tape_alloc_request(4, 0); + if (IS_ERR(request)) { + DBF_EXCEPTION(6, "xrbl fail"); + return request; + } + request->op = TO_RFO; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD, + device->char_data.idal_buf); + DBF_EVENT(6, "xrbl ccwg\n"); + return request; +} + +/* + * Read Block backward transformation function. + */ +void +tape_std_read_backward(struct tape_device *device, struct tape_request *request) +{ + /* + * We have allocated 4 ccws in tape_std_read, so we can now + * transform the request to a read backward, followed by a + * forward space block. + */ + request->op = TO_RBA; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD, + device->char_data.idal_buf); + tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); + tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); + DBF_EVENT(6, "xrop ccwg");} + +/* + * Write Block + */ +struct tape_request * +tape_std_write_block(struct tape_device *device, size_t count) +{ + struct tape_request *request; + + request = tape_alloc_request(2, 0); + if (IS_ERR(request)) { + DBF_EXCEPTION(6, "xwbl fail\n"); + return request; + } + request->op = TO_WRI; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD, + device->char_data.idal_buf); + DBF_EVENT(6, "xwbl ccwg\n"); + return request; +} + +/* + * This routine is called by frontend after an ENOSP on write + */ +void +tape_std_process_eov(struct tape_device *device) +{ + /* + * End of volume: We have to backspace the last written record, then + * we TRY to write a tapemark and then backspace over the written TM + */ + if (tape_mtop(device, MTBSR, 1) < 0) + return; + if (tape_mtop(device, MTWEOF, 1) < 0) + return; + tape_mtop(device, MTBSR, 1); +} + +EXPORT_SYMBOL(tape_std_assign); +EXPORT_SYMBOL(tape_std_unassign); +#ifdef TAPE390_FORCE_UNASSIGN +EXPORT_SYMBOL(tape_std_force_unassign); +#endif +EXPORT_SYMBOL(tape_std_display); +EXPORT_SYMBOL(tape_std_read_block_id); +EXPORT_SYMBOL(tape_std_seek_block_id); +EXPORT_SYMBOL(tape_std_mtload); +EXPORT_SYMBOL(tape_std_mtsetblk); +EXPORT_SYMBOL(tape_std_mtreset); +EXPORT_SYMBOL(tape_std_mtfsf); +EXPORT_SYMBOL(tape_std_mtfsr); +EXPORT_SYMBOL(tape_std_mtbsr); +EXPORT_SYMBOL(tape_std_mtweof); +EXPORT_SYMBOL(tape_std_mtbsfm); +EXPORT_SYMBOL(tape_std_mtbsf); +EXPORT_SYMBOL(tape_std_mtfsfm); +EXPORT_SYMBOL(tape_std_mtrew); +EXPORT_SYMBOL(tape_std_mtoffl); +EXPORT_SYMBOL(tape_std_mtnop); +EXPORT_SYMBOL(tape_std_mteom); +EXPORT_SYMBOL(tape_std_mtreten); +EXPORT_SYMBOL(tape_std_mterase); +EXPORT_SYMBOL(tape_std_mtunload); +EXPORT_SYMBOL(tape_std_mtcompression); +EXPORT_SYMBOL(tape_std_read_block); +EXPORT_SYMBOL(tape_std_read_backward); +EXPORT_SYMBOL(tape_std_write_block); +EXPORT_SYMBOL(tape_std_process_eov); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tape_std.h linux.21rc5-ac2/drivers/s390/char/tape_std.h --- linux.21rc5/drivers/s390/char/tape_std.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tape_std.h 2003-04-25 13:53:58.000000000 +0100 @@ -0,0 +1,155 @@ +/* + * drivers/s390/char/tape_std.h + * standard tape device functions for ibm tapes. + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#ifndef _TAPE_STD_H +#define _TAPE_STD_H + +#include + +/* + * The CCW commands for the Tape type of command. + */ +#define INVALID_00 0x00 /* Invalid cmd */ +#define BACKSPACEBLOCK 0x27 /* Back Space block */ +#define BACKSPACEFILE 0x2f /* Back Space file */ +#define DATA_SEC_ERASE 0x97 /* Data security erase */ +#define ERASE_GAP 0x17 /* Erase Gap */ +#define FORSPACEBLOCK 0x37 /* Forward space block */ +#define FORSPACEFILE 0x3F /* Forward Space file */ +#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */ +#define NOP 0x03 /* No operation */ +#define READ_FORWARD 0x02 /* Read forward */ +#define REWIND 0x07 /* Rewind */ +#define REWIND_UNLOAD 0x0F /* Rewind and Unload */ +#define SENSE 0x04 /* Sense */ +#define NEW_MODE_SET 0xEB /* Guess it is Mode set */ +#define WRITE_CMD 0x01 /* Write */ +#define WRITETAPEMARK 0x1F /* Write Tape Mark */ + +#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */ +#define CONTROL_ACCESS 0xE3 /* Set high speed */ +#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */ +#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */ +#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */ +#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */ +#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */ +#define MODE_SET_C3 0xC3 /* for 3420 */ +#define MODE_SET_CB 0xCB /* for 3420 */ +#define MODE_SET_D3 0xD3 /* for 3420 */ +#define READ_BACKWARD 0x0C /* */ +#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */ +#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */ +#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */ +#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */ +#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */ +#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */ +#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */ +#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */ +#define READ_DEV_CHAR 0x64 /* Read device characteristics */ +#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */ +#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */ +#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */ +#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */ +#define SYNC 0x43 /* Synchronize (flush buffer) */ +#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */ +#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */ +#define READ_CONFIG_DATA 0xFA /* 3490 CMD */ +#define READ_MESSAGE_ID 0x4E /* 3490 CMD */ +#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */ +#define SET_INTERFACE_ID 0x73 /* 3490 CMD */ + +#define SENSE_COMMAND_REJECT 0x80 +#define SENSE_INTERVENTION_REQUIRED 0x40 +#define SENSE_BUS_OUT_CHECK 0x20 +#define SENSE_EQUIPMENT_CHECK 0x10 +#define SENSE_DATA_CHECK 0x08 +#define SENSE_OVERRUN 0x04 +#define SENSE_DEFERRED_UNIT_CHECK 0x02 +#define SENSE_ASSIGNED_ELSEWHERE 0x01 + +#define SENSE_LOCATE_FAILURE 0x80 +#define SENSE_DRIVE_ONLINE 0x40 +#define SENSE_RESERVED 0x20 +#define SENSE_RECORD_SEQUENCE_ERR 0x10 +#define SENSE_BEGINNING_OF_TAPE 0x08 +#define SENSE_WRITE_MODE 0x04 +#define SENSE_WRITE_PROTECT 0x02 +#define SENSE_NOT_CAPABLE 0x01 + +#define SENSE_CHANNEL_ADAPTER_CODE 0xE0 +#define SENSE_CHANNEL_ADAPTER_LOC 0x10 +#define SENSE_REPORTING_CU 0x08 +#define SENSE_AUTOMATIC_LOADER 0x04 +#define SENSE_TAPE_SYNC_MODE 0x02 +#define SENSE_TAPE_POSITIONING 0x01 + +/* Data structure for the CONTROL_ACCESS call */ +struct tape_ca_data { + unsigned char function; + char password[11]; +} __attribute__ ((packed)); + +/* discipline functions */ +struct tape_request *tape_std_read_block(struct tape_device *, size_t); +void tape_std_read_backward(struct tape_device *device, + struct tape_request *request); +struct tape_request *tape_std_write_block(struct tape_device *, size_t); +struct tape_request *tape_std_bread(struct tape_device *, struct request *); +void tape_std_free_bread(struct tape_request *); +void tape_std_check_locate(struct tape_device *, struct tape_request *); +struct tape_request *tape_std_bwrite(struct request *, + struct tape_device *, int); + +/* Some non-mtop commands. */ +int tape_std_assign(struct tape_device *); +int tape_std_unassign(struct tape_device *); +int tape_std_force_unassign(struct tape_device *); +int tape_std_read_block_id(struct tape_device *, unsigned int *); +int tape_std_seek_block_id(struct tape_device *, unsigned int); +int tape_std_display(struct tape_device *, struct display_struct *); +int tape_std_terminate_write(struct tape_device *); + +/* Standard magnetic tape commands. */ +int tape_std_mtbsf(struct tape_device *, int); +int tape_std_mtbsfm(struct tape_device *, int); +int tape_std_mtbsr(struct tape_device *, int); +int tape_std_mtcompression(struct tape_device *, int); +int tape_std_mteom(struct tape_device *, int); +int tape_std_mterase(struct tape_device *, int); +int tape_std_mtfsf(struct tape_device *, int); +int tape_std_mtfsfm(struct tape_device *, int); +int tape_std_mtfsr(struct tape_device *, int); +int tape_std_mtload(struct tape_device *, int); +int tape_std_mtnop(struct tape_device *, int); +int tape_std_mtoffl(struct tape_device *, int); +int tape_std_mtreset(struct tape_device *, int); +int tape_std_mtreten(struct tape_device *, int); +int tape_std_mtrew(struct tape_device *, int); +int tape_std_mtsetblk(struct tape_device *, int); +int tape_std_mtunload(struct tape_device *, int); +int tape_std_mtweof(struct tape_device *, int); + +/* Event handlers */ +void tape_std_default_handler(struct tape_device *); +void tape_std_unexpect_uchk_handler(struct tape_device *); +void tape_std_irq(struct tape_device *); +void tape_std_process_eov(struct tape_device *); + +// the error recovery stuff: +void tape_std_error_recovery(struct tape_device *); +void tape_std_error_recovery_has_failed(struct tape_device *,int error_id); +void tape_std_error_recovery_succeded(struct tape_device *); +void tape_std_error_recovery_do_retry(struct tape_device *); +void tape_std_error_recovery_read_opposite(struct tape_device *); +void tape_std_error_recovery_HWBUG(struct tape_device *, int condno); + +#endif // _TAPE_STD_H diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/char/tubtty.c linux.21rc5-ac2/drivers/s390/char/tubtty.c --- linux.21rc5/drivers/s390/char/tubtty.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/char/tubtty.c 2003-05-28 15:40:29.000000000 +0100 @@ -10,6 +10,7 @@ * Author: Richard Hitt */ #include +#include #include "tubio.h" /* Initialization & uninitialization for tubtty */ @@ -118,6 +119,8 @@ td->read_proc = tty3270_read_proc; td->write_proc = tty3270_write_proc; + ctrlchar_init(); + rc = tty_register_driver(td); if (rc) { printk(KERN_ERR "tty3270 registration failed with %d\n", rc); @@ -875,23 +878,22 @@ { struct tty_struct *tty; int func = -1; + int is_console = 0; + unsigned int cchar; if ((tty = tubp->tty) == NULL) return; if (count < 0) return; - if (count == 2 && (cp[0] == '^' || cp[0] == '\252')) { - switch(cp[1]) { - case 'c': case 'C': - func = INTR_CHAR(tty); - break; - case 'd': case 'D': - func = EOF_CHAR(tty); - break; - case 'z': case 'Z': - func = SUSP_CHAR(tty); - break; - } +#ifdef CONFIG_TN3270_CONSOLE + if (CONSOLE_IS_3270 && tub3270_con_tubp == tubp) + is_console = 1; +#endif + cchar = ctrlchar_handle(cp, count, tty, is_console); + if ((cchar & CTRLCHAR_MASK) != CTRLCHAR_NONE) { + if ((cchar & CTRLCHAR_MASK) != CTRLCHAR_CTRL) + return; + func = cchar & 0xFF; } else if (count == 2 && cp[0] == 0x1b) { /* if ESC */ int inc = 0; char buf[GEOM_INPLEN + 1]; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/Config.in linux.21rc5-ac2/drivers/s390/Config.in --- linux.21rc5/drivers/s390/Config.in 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/Config.in 2003-04-25 13:53:58.000000000 +0100 @@ -52,19 +52,20 @@ if [ "$CONFIG_TN3215" = "y" ]; then bool 'Support for console on 3215 line mode terminal' CONFIG_TN3215_CONSOLE fi -bool 'Support for HWC line mode terminal' CONFIG_HWC -if [ "$CONFIG_HWC" = "y" ]; then - bool ' console on HWC line mode terminal' CONFIG_HWC_CONSOLE - tristate ' Control-Program Identification' CONFIG_HWC_CPI +bool 'Support for SCLP' CONFIG_SCLP +if [ "$CONFIG_SCLP" = "y" ]; then + bool ' Support for SCLP line mode terminal' CONFIG_SCLP_TTY + if [ "$CONFIG_SCLP_TTY" = "y" ]; then + bool ' Support for console on SCLP line mode terminal' CONFIG_SCLP_CONSOLE + fi + tristate ' Control-Program Identification' CONFIG_SCLP_CPI fi tristate 'S/390 tape device support' CONFIG_S390_TAPE if [ "$CONFIG_S390_TAPE" != "n" ]; then comment 'S/390 tape interface support' - bool ' Support for tape character devices' CONFIG_S390_TAPE_CHAR bool ' Support for tape block devices' CONFIG_S390_TAPE_BLOCK comment 'S/390 tape hardware support' - bool ' Support for 3490 tape hardware' CONFIG_S390_TAPE_3490 - bool ' Support for 3480 tape hardware' CONFIG_S390_TAPE_3480 + dep_tristate ' Support for 3480/3490 tape hardware' CONFIG_S390_TAPE_34XX $CONFIG_S390_TAPE fi endmenu @@ -80,6 +81,7 @@ tristate 'Universal TUN/TAP device driver support' CONFIG_TUN bool 'Ethernet (10 or 100Mbit)' CONFIG_NET_ETHERNET bool 'Token Ring driver support' CONFIG_TR + dep_tristate 'Cisco 7000 support' CONFIG_C7000 m bool 'FDDI driver support' CONFIG_FDDI comment 'S/390 network device drivers' bool 'Channel Device Configuration' CONFIG_CHANDEV diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/net/c7000.c linux.21rc5-ac2/drivers/s390/net/c7000.c --- linux.21rc5/drivers/s390/net/c7000.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/net/c7000.c 2003-05-07 15:37:11.000000000 +0100 @@ -0,0 +1,3293 @@ +/* + Cisco/7000 driver -- Copyright (C) 2000 UTS Global LLC. + Author: Bob Scardapane (UTS Global LLC). + Version: 3. + + To use this driver, run the LINUX command: + + insmod c7000 base0=0xYYYY lhost0=s1 uhost0=s2 lappl0=s3 uappl0=s4 dbg=x + + base0=0xYYYY defines the base unit address of the interface. + lhost0=s1 defines the local host name. + uhost0=s2 defines the unit host name. + lappl0=s3 defines the local application name. + uappl0=s4 defines the unit application name. + dbg=x defines the message level. Higher values will result in + additional diagnostic messages. + + Additional interfaces are defined on insmod by using the variable + name groups "base1,lhost1,lappl1,uhost1,uappl1", etc... up to three + additional groups. + + In addition, the module will automatically detect the unit base + addresses by scanning all active irq's for a control unit type + of 0x3088 and a model of 0x61 (CLAW mode). The noauto parameter + can be used to suppress automatic detection. + + The values of lhostx, lapplx, uhostx and uapplx default to: + + lapplx - TCPIP + lhostx - UTS + uapplx - TCPIP + uhostx - C7011 + + Note that the values passed in the insmod command will always + override the automatic detection of the unit base addreeses and + the default values of lapplx, lhostx, uapplx and uhostx. + + The parameter noauto can be used to disable automatic detection of + devices: + + noauto=1 (disable automatic detection) + noauto=0 (Enable automatic detectio. This is the default value.) + + The values in base0 - base3 will be copied to the bases array when + the module is loaded. + + To configure the interface(s), run the LINUX command(s): + + ifconfig ci0 ... + ifconfig ci1 ... + ifconfig ci2 ... + ifconfig ci3 ... + + There is one device structure for each controller in the c7000_devices + array. The base address of each controller is in the bases array. + These arrays parallel each other. There is also one c7000_controller + structure for each controller. This structure is pointed to by field + priv in the individual device structure. + + In each c7000_controller, there are embedded c7000_unit structures. + There is one c7000_unit structure per device number that makes up + a controller (currently 2 units per controller). +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + Global defines +*/ + +/* + Maximum number of controllers. +*/ + +#define MAX_C7000 4 + +/* + Number of units per controller. +*/ + +#define NUNITS 2 + +/* + Define indexes of read and write units in the cunits array. +*/ + +#define C7000_RD 0 +#define C7000_WR 1 + +/* + Number of device buffers. +*/ + +#define C7000_MAXBUF 40 + +/* + Transmission queue length. +*/ + +#define C7000_TXQUEUE_LEN 100 +/* + Size of the IP packet data. +*/ + +#define C7000_DATAL 4096 + +/* + Size of the read header data. +*/ + +#define C7000_READHDRL 4 + +/* + Size of read flag byte. +*/ + +#define C7000_READFFL 1 + +/* + Size of a device buffer. This is how it is arranged in memory: + 4096 (IP packet data) + 4 (read header) + 1 (read flag) = 4101. +*/ + +#define C7000_BUFSIZE C7000_DATAL + C7000_READHDRL + C7000_READFFL + +/* + Size of sigsmod data. +*/ + +#define C7000_SIGSMODL 1 + +/* + Flag value that indicates a read was completed in the flag + field of a c7000_rd_header. +*/ + +#define FLAG_FF 0xff +/* + Size of C7000 sense id data. +*/ + +#define SIDL 32 + +/* + Maximum number of read and write retries. +*/ + +#define C7000_MAX_RETRIES 3 + +/* + Define sense byte0 value for a box reset. +*/ + +#define C7000_BOX_RESET 0x41 + +/* + CCW commands. +*/ + +#define C7000_WRITE_CCW 0x01 /* normal write */ +#define C7000_READ_CCW 0x02 /* normal read */ +#define C7000_NOOP_CCW 0x03 /* no operation */ +#define C7000_SIGSMOD_CCW 0x05 /* signal status modifier */ +#define C7000_TIC_CCW 0x08 /* transfer in channel */ +#define C7000_READHDR_CCW 0x12 /* read header */ +#define C7000_READFF_CCW 0x22 /* read FF flag */ +#define C7000_SID_CCW 0xe4 /* sense identification */ + +/* + Control commands. +*/ + +#define C7000_SYS_VALIDATE 1 +#define C7000_SYS_VALIDATE_RESP 2 +#define C7000_CONN_REQ 33 +#define C7000_CONN_RESP 34 +#define C7000_CONN_CONFRM 35 +#define C7000_DISCONN 36 +#define C7000_BOXERROR 65 + +/* + State machine values. +*/ + +#define C7000_INIT 1 +#define C7000_HALT 2 +#define C7000_SID 3 +#define C7000_SYSVAL 4 +#define C7000_CONNECT 5 +#define C7000_READY 6 +#define C7000_READ 7 +#define C7000_WRITE 8 +#define C7000_DISC 9 +#define C7000_STOP 10 +#define C7000_STOPPED 11 +#define C7000_ERROR 12 + +/* + The lower subchannel is used for read operations and the one that is + one higher is used for write operations. + + Both subchannels are initially in state C7000_INIT. A transition to + state C7000_HALT occurs when halt_IO is issued on each. When the + halts completes a transition to state C7000_SID occurs and a channel + program is issued to do a sense identification on both subchannels. + + When the sense identification completes, the state C7000_SYSVAL is + entered on the read subchannel. A read channel program is issued. + + When the sense identification completes, the write subchannel enters + state C7000_SYSVAL and a system validation request is written. The + read subchannel is also put into this state. + + When both the system validation response is read and an inbound system + validation request is read, the inbound system validation request is + responded to and both subchannels enter the C7000_CONNECT state. + + A read channel program is posted to look for the inbound connect + request. When that is received a connection confirmation is written. + The state of both subchannels is then changed to C7000_READY. A + read channel program is then posted and the state is changed to + C7000_READ. When a read completes, the packet is sent to the higher + layers and the read channel program is restarted. + + When there is a packet to be written, state C7000_WRITE is entered + and a channel program is issued to write the data. The subchannel + is in state C7000_READY when there is nothing to be written. + + When the stop method is executed, a disconnect message is sent and + the state is changed to C7000_DISC in both subchannels. A halt_IO + will be issued to both subchannels and state C7000_STOP will be entered. + When the halt IO completes, state C7000_STOPPED will be set. + + State C7000_ERROR is set when an error occurs in the interrupt + routine. Recycle the interface (ifconfig down / ifconfig up) + to reset this state. +*/ + +/* + Results from c7000_check_csw. +*/ + +enum c7000_rupt { + C7000_NORMAL, + C7000_CHANERR, + C7000_UCK, + C7000_UCK_RESET, + C7000_UE, + C7000_ATTN, + C7000_BUSY, + C7000_OTHER +}; + +/* + Bits set in device structure tbusy field. +*/ + +#define TB_TX 0 /* sk buffer handling in progress */ +#define TB_STOP 1 /* network device stop in progress */ +#define TB_RETRY 2 /* retry in progress */ +#define TB_NOBUFFER 3 /* no buffer on free queue */ + +/* + Bit in c7000_unit.flag_a that indicates the bh routine is busy. +*/ + +#define C7000_BH_ACTIVE 0 + +#define CPrintk(level, args...) \ + if (level <= dbg) \ + printk(args) + +/* + Maximum length of a system validation string. +*/ + +#define NAMLEN 8 + +#define Err_Conn_Confirm 1 +#define Err_Names_not_Matched 166 +#define Err_C7000_NOT_READY 167 +#define Err_Duplicate 170 +#define Err_Closing 171 +#define Err_No_Such_App 172 +#define Err_Host_Not_Ready 173 +#define Err_CLOSING 174 +#define Err_Dup_Link 175 +#define Err_Wrong_Version 179 +#define Err_Wrong_Frame_Size 180 + +/* + Define a macro to extract the logical link identifier from + the c7000 read header command field. +*/ + +#define C7000_LINKID(cmd) ((unsigned char)cmd >> 3) + +/* + Define the control unit type for a Cisco 7000. +*/ + +#define C7000_CU_TYPE 0x3088 + +/* + Define the control unit model for a Cisco 7000. +*/ + +#define C7000_CU_MODEL 0x61 + +/* + Define the default system validate parameters (lapplx, + lhostx, uapplx, uhostx). +*/ + +#define C7000_DFLT_LAPPL "TCPIP" +#define C7000_DFLT_LHOST "UTS" +#define C7000_DFLT_UAPPL "TCPIP" +#define C7000_DFLT_UHOST "C7011" + +/* + Global variables. +*/ + +/* + Base device addresses of the controllers. +*/ + +static int base0 = -1; +static int base1 = -1; +static int base2 = -1; +static int base3 = -1; + +static int bases[MAX_C7000]; + +/* + Local application names. +*/ + +static char *lappl0; +static char *lappl1; +static char *lappl2; +static char *lappl3; + +/* + Local host names. +*/ + +static char *lhost0; +static char *lhost1; +static char *lhost2; +static char *lhost3; + +/* + Unit application names. +*/ + +static char *uappl0; +static char *uappl1; +static char *uappl2; +static char *uappl3; + +/* + Unit hosts names. +*/ + +static char *uhost0; +static char *uhost1; +static char *uhost2; +static char *uhost3; + +/* + Debugging level (higher numbers emit lower priority messages). +*/ + +static unsigned int dbg = 0; + +/* + Parameter that controls auto detection. +*/ + +static int noauto = 0; + +/* + Interface names. +*/ + +static char ifnames[MAX_C7000][8] = {"ci0", "ci1", "ci2", "ci3"}; + +/* + One device structure per controller. +*/ + +/* RBH Try out the new code for 2.4.0 */ +#define NEWSTUFF + +#ifdef NEWSTUFF +#define STRUCT_NET_DEVICE struct net_device +#else +#define STRUCT_NET_DEVICE struct device +#endif + +STRUCT_NET_DEVICE c7000_devices[MAX_C7000]; + +/* + Scratch variable filled in with controller name. +*/ + +static char *controller; + +/* + Identify parameters that can be passed on the LINUX insmod command. +*/ + +MODULE_AUTHOR("Robert Scardapane (UTS Global)"); +MODULE_DESCRIPTION("Network module for Cisco 7000 box."); + +MODULE_PARM(base0, "1i"); +MODULE_PARM_DESC(base0, "Base unit address for 1st C7000 box."); +MODULE_PARM(base1, "1i"); +MODULE_PARM_DESC(base1, "Base unit address for 2nd C7000 box."); +MODULE_PARM(base2, "1i"); +MODULE_PARM_DESC(base2, "Base unit address for 3rd C7000 box."); +MODULE_PARM(base3, "1i"); +MODULE_PARM_DESC(base3, "Base unit address for 4th C7000 box."); + +MODULE_PARM(lappl0, "s"); +MODULE_PARM_DESC(lappl0, "Application name for 1st C7000 box."); +MODULE_PARM(lappl1, "s"); +MODULE_PARM_DESC(lappl1, "Application name for 2nd C7000 box."); +MODULE_PARM(lappl2, "s"); +MODULE_PARM_DESC(lappl2, "Application name for 3rd C7000 box."); +MODULE_PARM(lappl3, "s"); +MODULE_PARM_DESC(lappl3, "Application name for 4th C7000 box."); + +MODULE_PARM(lhost0, "s"); +MODULE_PARM_DESC(lhost0, "Host name for 1st C7000 box."); +MODULE_PARM(lhost1, "s"); +MODULE_PARM_DESC(lhost1, "Host name for 2nd C7000 box."); +MODULE_PARM(lhost2, "s"); +MODULE_PARM_DESC(lhost2, "Host name for 3rd C7000 box."); +MODULE_PARM(lhost3, "s"); +MODULE_PARM_DESC(lhost3, "Host name for 4th C7000 box."); + +MODULE_PARM(uhost0, "s"); +MODULE_PARM_DESC(uhost0, "Unit name for 1st C7000 box."); +MODULE_PARM(uhost1, "s"); +MODULE_PARM_DESC(uhost1, "Unit name for 2nd C7000 box."); +MODULE_PARM(uhost2, "s"); +MODULE_PARM_DESC(uhost2, "Unit name for 3rd C7000 box."); +MODULE_PARM(uhost3, "s"); +MODULE_PARM_DESC(uhost3, "Unit name for 4th C7000 box."); + +MODULE_PARM(uappl0, "s"); +MODULE_PARM_DESC(uappl0, "Unit application name for 1st C7000 box."); +MODULE_PARM(uappl1, "s"); +MODULE_PARM_DESC(uappl1, "Unit application name for 2nd C7000 box."); +MODULE_PARM(uappl2, "s"); +MODULE_PARM_DESC(uappl2, "Unit application name for 3rd C7000 box."); +MODULE_PARM(uappl3, "s"); +MODULE_PARM_DESC(uappl3, "Unit application name for 4th C7000 box."); + +MODULE_PARM(dbg, "1i"); +MODULE_PARM_DESC(dbg, "Message level for debugging."); + +MODULE_PARM(noauto, "1i"); +MODULE_PARM_DESC(noauto, "Control automatic detection of unit base addresses."); + +/* + Structure used to manage unit buffers. +*/ + +struct c7000_buffer { + ccw1_t ccws[7]; /* channel program */ + struct c7000_buffer *next; /* list pointer */ + char *data; /* pointer to actual data */ + int len; /* length of the data */ +}; + +/* + C7000 Control Block. + */ + +struct c7000_control_blk { + unsigned char cmd; + unsigned char ver; + unsigned char link_id; + unsigned char correlator; + unsigned char ret_code; + unsigned char resvd1[3]; + unsigned char unitname[NAMLEN]; + unsigned char hostname[NAMLEN]; + unsigned short rdsize; /* read frame size */ + unsigned short wrtsize; /* write frame size */ + unsigned char resvd2[4]; +}; + +/* + Per unit structure contained within the c7000_controller structure. +*/ + +struct c7000_unit { + ccw1_t ccws[5]; /* control ccws */ + int devno; /* device number */ + int irq; /* subchannel number */ + int IO_active; /* IO activity flag */ + int state; /* fsm state */ + int retries; /* retry counter */ + unsigned long flag_a; /* bh activity flag */ + devstat_t devstat; /* device status */ +#ifdef NEWSTUFF + wait_queue_head_t wait; /* sleep q head */ +#else + struct wait_queue *wait; /* sleep q pointer */ +#endif + struct c7000_controller *cntlp; /* controller pointer */ + struct c7000_buffer *free; /* free buffer anchor */ + struct c7000_buffer *proc_head; /* proc head */ + struct c7000_buffer *proc_tail; /* proc tail */ + struct c7000_buffer *bh_head; /* bh head */ + struct c7000_buffer *bh_tail; /* bh tail */ + struct tq_struct tq; /* bh scheduling */ + char senseid[SIDL]; /* sense id data */ + struct c7000_control_blk control_blk; /* control block */ + unsigned char sigsmod; /* sigsmod flag */ + unsigned char readhdr[4]; /* read header */ + unsigned char readff; /* readff flag */ +}; + +/* + Private structure pointed to by dev->priv. +*/ + +struct c7000_controller { + struct net_device_stats stats; /* statistics */ + STRUCT_NET_DEVICE *dev; /* -> device struct */ + unsigned int base_addr; /* base address */ + char lappl[NAMLEN]; /* local appl */ + char lhost[NAMLEN]; /* local host */ + char uappl[NAMLEN]; /* unit appl */ + char uhost[NAMLEN]; /* unit host */ + unsigned char version; /* version = 2 */ + unsigned char linkid; /* link id */ + struct c7000_unit cunits[NUNITS]; /* embedded units */ +#ifdef NEWSTUFF + int tbusy; +#endif +}; + +/* + This is the structure returned by the C7000_READHDR_CCW. +*/ + +struct c7000_rd_header { + unsigned short len; /* packet length */ + unsigned char cmd; /* command code */ + unsigned char flag; /* flag */ +}; + +/* + Set the device structure transmission busy flag. +*/ + +#ifdef NEWSTUFF +#define c7000_set_busy(dev) netif_stop_queue(dev) +#else +static __inline__ void +c7000_set_busy(STRUCT_NET_DEVICE *dev) +{ + dev->tbusy = 1; + eieio(); + return; +} +#endif + +/* + Clear the device structure transmission busy flag. +*/ + +#ifdef NEWSTUFF +#define c7000_clear_busy(dev) netif_wake_queue(dev) +#else +static __inline__ void +c7000_clear_busy(STRUCT_NET_DEVICE *dev) +{ + dev->tbusy = 0; + eieio(); + return; +} +#endif + +/* + Extract the device structure transmission busy flag. +*/ + +#ifdef NEWSTUFF +#define c7000_check_busy(dev) netif_queue_stopped(dev) +#else +static __inline__ int +c7000_check_busy(STRUCT_NET_DEVICE *dev) +{ + eieio(); + return(dev->tbusy); +} +#endif + +/* + Set a bit in the device structure transmission busy flag. +*/ + +static __inline__ void +c7000_setbit_busy(int nr, STRUCT_NET_DEVICE *dev) +{ +#ifdef NEWSTUFF + netif_stop_queue(dev); + test_and_set_bit(nr, &((struct c7000_controller *)dev->priv)->tbusy); +#else + set_bit(nr, (void *)&dev->tbusy); +#endif + return; +} + +/* + Clear a bit in the device structure transmission busy flag. +*/ + +static __inline__ void +c7000_clearbit_busy(int nr, STRUCT_NET_DEVICE *dev) +{ +#ifdef NEWSTUFF + clear_bit(nr, &((struct c7000_controller *)dev->priv)->tbusy); + netif_wake_queue(dev); +#else + clear_bit(nr, (void *)&dev->tbusy); +#endif + return; +} + +/* + Test and set a bit in the device structure transmission busy flag. +*/ + +static __inline__ int +c7000_ts_busy(int nr, STRUCT_NET_DEVICE *dev) +{ +#ifdef NEWSTUFF + netif_stop_queue(dev); + return test_and_set_bit(nr, &((struct c7000_controller *)dev->priv)->tbusy); +#else + return(test_and_set_bit(nr, (void *)&dev->tbusy)); +#endif +} + +/* + Set the C7000 controller in the error state. +*/ + +static void +c7000_error(struct c7000_controller *ccp) +{ + int i; + struct c7000_unit *cup; + STRUCT_NET_DEVICE *dev = ccp->dev; + + for (i = 0; i < NUNITS; i++) { + cup = &ccp->cunits[i]; + cup->state = C7000_ERROR; + } + + if (dev != NULL) +#ifdef NEWSTUFF + /* RBH XXX Should we be doing this? */ + dev->state &= ~__LINK_STATE_START; +#else + dev->flags &= ~IFF_RUNNING; +#endif + + CPrintk(0, "c7000: c7000_error: base unit 0x%x is down\n", ccp->base_addr); + return; +} + +/* + Based on the SENSE ID information, fill in the + controller name. Note that this is the SENSE ID + information saved by LINUX/390 at boot time. +*/ + +static int +c7000_check_type(senseid_t *id) +{ + + switch (id->cu_type) { + + case C7000_CU_TYPE: + + if (id->cu_model == C7000_CU_MODEL) { + controller = "C7000 "; + return(0); + } + + break; + + default: + break; + } + + return(-1); +} + +/* + Check the device information for the controller. +*/ + +static int +c7000_check_devices(int devno) +{ + int i; + s390_dev_info_t temp; + + /* + Get the SENSE ID information for each device. + */ + + for (i = devno; i < (devno + NUNITS); i++) { + + if (get_dev_info_by_devno(devno, &temp) != 0) + return(-1); + + if (c7000_check_type(&temp.sid_data) == -1) + return(-1); + } + + CPrintk(1, "c7000: c7000_check_devices: device type is %s\n", controller); + return(0); +} + +/* + Issue a halt I/O to device pointed to by cup. +*/ + +static int +c7000_haltio(struct c7000_unit *cup) +{ + __u32 parm; + __u8 flags = 0x00; + __u32 saveflags; + DECLARE_WAITQUEUE(wait, current); + int rc; + + s390irq_spin_lock_irqsave(cup->irq, saveflags); + parm = (unsigned long)cup; + + if ((rc = halt_IO(cup->irq, parm, flags)) != 0) { + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + return(rc); + } + + /* + Wait for the halt I/O to finish. + */ + + add_wait_queue(&cup->wait, &wait); + current->state = TASK_UNINTERRUPTIBLE; + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + schedule(); + remove_wait_queue(&cup->wait, &wait); + return(0); +} + +/* + Issue a start I/O to device pointed to by cup. +*/ + +static int +c7000_doio(struct c7000_unit *cup) +{ + __u32 parm; + __u8 flags = 0x00; + __u32 saveflags; + DECLARE_WAITQUEUE(wait, current); + int rc; + + /* + Do no further I/O while the device is in the ERROR, STOP + or STOPPED state. + */ + + if (cup->state == C7000_ERROR || cup->state == C7000_STOP || cup->state == C7000_STOPPED) + return(-1); + + s390irq_spin_lock_irqsave(cup->irq, saveflags); + parm = (unsigned long)cup; + + if ((rc = do_IO(cup->irq, &cup->ccws[0], parm, 0xff, flags)) != 0) { + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + return(rc); + } + + /* + Wait for the I/O to complete. + */ + + add_wait_queue(&cup->wait, &wait); + current->state = TASK_UNINTERRUPTIBLE; + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + schedule(); + remove_wait_queue(&cup->wait, &wait); + + /* + Interrupt handling may have marked the device in ERROR. + */ + + if (cup->state == C7000_ERROR) + return(-1); + + return(0); +} + +/* + Build a channel program to do a sense id channel program. +*/ + +static void +c7000_bld_senseid_chpgm(struct c7000_unit *cup) +{ + ccw1_t *ccwp; + + ccwp = &cup->ccws[0]; + ccwp->cmd_code = C7000_SID_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(&cup->senseid); + ccwp->count = SIDL; + ccwp++; + ccwp->cmd_code = C7000_NOOP_CCW; + ccwp->flags = CCW_FLAG_SLI; + ccwp->cda = (__u32)NULL; + ccwp->count = 1; + return; +} + +/* + Build a channel program to write a control message. +*/ + +static void +c7000_bld_wrtctl_chpgm(struct c7000_unit *cup) +{ + ccw1_t *ccwp; + + ccwp = &cup->ccws[0]; + ccwp->cmd_code = C7000_WRITE_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(&cup->control_blk); + ccwp->count = sizeof(struct c7000_control_blk); + ccwp++; + ccwp->cmd_code = C7000_READFF_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(&cup->readff); + ccwp->count = C7000_READFFL; + ccwp++; + ccwp->cmd_code = C7000_TIC_CCW; + ccwp->flags = 0; + ccwp->cda = (__u32)virt_to_phys(ccwp + 1); + ccwp->count = 0; + ccwp++; + ccwp->cmd_code = C7000_NOOP_CCW; + ccwp->flags = CCW_FLAG_SLI; + ccwp->cda = (__u32)NULL; + ccwp->count = 1; + return; +} + +/* + Build a write channel program to write the indicated buffer. +*/ + +static void +c7000_bld_wrt_chpgm(struct c7000_unit *cup, struct c7000_buffer *buf) +{ + ccw1_t *ccwp; + struct c7000_controller *ccp = cup->cntlp; + + ccwp = &buf->ccws[0]; + ccwp->cmd_code = C7000_WRITE_CCW | (ccp->linkid << 3); + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(buf->data); + ccwp->count = buf->len; + ccwp++; + ccwp->cmd_code = C7000_READFF_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(buf->data + C7000_DATAL + C7000_READHDRL); + ccwp->count = C7000_READFFL; + ccwp++; + ccwp->cmd_code = C7000_TIC_CCW; + ccwp->flags = 0; + ccwp->cda = (__u32)virt_to_phys(ccwp + 1); + ccwp->count = 0; + ccwp++; + ccwp->cmd_code = C7000_NOOP_CCW; + ccwp->flags = (CCW_FLAG_SLI); + ccwp->cda = (__u32)NULL; + ccwp->count = 1; + return; +} + +/* + Build a channel program to read a control message. +*/ + +static void +c7000_bld_readctl_chpgm(struct c7000_unit *cup) +{ + ccw1_t *ccwp; + + ccwp = &cup->ccws[0]; + ccwp->cmd_code = C7000_READ_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(&cup->control_blk); + ccwp->count = sizeof(struct c7000_control_blk); + ccwp++; + ccwp->cmd_code = C7000_READHDR_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(&cup->readhdr); + ccwp->count = C7000_READHDRL; + ccwp++; + ccwp->cmd_code = C7000_SIGSMOD_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(&cup->sigsmod); + ccwp->count = C7000_SIGSMODL; + ccwp++; + ccwp->cmd_code = C7000_TIC_CCW; + ccwp->flags = 0; + ccwp->cda = (__u32)virt_to_phys(ccwp + 1); + ccwp->count = 0; + ccwp++; + ccwp->cmd_code = C7000_NOOP_CCW; + ccwp->flags = (CCW_FLAG_SLI); + ccwp->cda = (__u32)NULL; + ccwp->count = 1; + return; +} + +/* + Build a channel program to read the indicated buffer. +*/ + +static void +c7000_bld_read_chpgm(struct c7000_unit *cup, struct c7000_buffer *buf) +{ + ccw1_t *ccwp; + + ccwp = &buf->ccws[0]; + ccwp->cmd_code = C7000_READ_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(buf->data); + ccwp->count = C7000_DATAL; + ccwp++; + ccwp->cmd_code = C7000_READHDR_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(buf->data + C7000_DATAL); + ccwp->count = C7000_READHDRL; + ccwp++; + ccwp->cmd_code = C7000_SIGSMOD_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC); + ccwp->cda = (__u32)virt_to_phys(&cup->sigsmod); + ccwp->count = C7000_SIGSMODL; + ccwp++; + ccwp->cmd_code = C7000_TIC_CCW; + ccwp->flags = 0; + ccwp->cda = (__u32)virt_to_phys(ccwp + 3); + ccwp->count = 0; + ccwp++; + ccwp->cmd_code = C7000_READFF_CCW; + ccwp->flags = (CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI); + ccwp->cda = (__u32)virt_to_phys(&cup->readff); + ccwp->count = C7000_READFFL; + ccwp++; + ccwp->cmd_code = C7000_TIC_CCW; + ccwp->flags = 0; + ccwp->cda = (__u32)virt_to_phys(ccwp + 1); + ccwp->count = 0; + ccwp++; + ccwp->cmd_code = C7000_NOOP_CCW; + ccwp->flags = (CCW_FLAG_SLI); + ccwp->cda = (__u32)NULL; + ccwp->count = 1; + return; +} + +/* + Allocate buffer structure headers and buffers for all units + A return value of 0 means that all allocations worked. A -1 + means that an allocation failed. It is expected that the caller + will call c7000_free_buffers when -1 is returned. +*/ + +static int +c7000_alloc_buffers(STRUCT_NET_DEVICE *dev) +{ + int i; + int j; + char *data; + struct c7000_buffer *bufptr; + struct c7000_controller *ccp = (struct c7000_controller *) dev->priv; + struct c7000_unit *cup; + + for (i = 0; i < NUNITS; i++) { + cup = &ccp->cunits[i]; + cup->free = NULL; + + for (j = 0; j < C7000_MAXBUF; j++) { + bufptr = kmalloc(sizeof(struct c7000_buffer), GFP_KERNEL); + data = kmalloc(C7000_BUFSIZE, GFP_KERNEL); + + if (bufptr == NULL) + { + if(data) + kfree(data); + return(-1); + } + + /* + Place filled in buffer header on free anchor. + */ + + bufptr->next = cup->free; + bufptr->data = data; + bufptr->len = 0; + cup->free = bufptr; + + if (data == NULL) + return(-1); + + memset(data, '\0', C7000_BUFSIZE); + } + + } + + CPrintk(1, "c7000: c7000_alloc_buffers: allocated buffers for base unit 0x%lx\n", dev->base_addr); + return(0); +} + +/* + Free buffers on a chain. +*/ + +static void +c7000_free_chain(struct c7000_buffer *buf) +{ + char *data; + struct c7000_buffer *bufptr = buf; + struct c7000_buffer *tmp; + + while (bufptr != NULL) { + data = bufptr->data; + + if (data != NULL) + kfree(data); + + tmp = bufptr; + bufptr = bufptr->next; + kfree(tmp); + } + + return; +} + +/* + Free buffers on all possible chains for all units. +*/ + +static void +c7000_free_buffers(STRUCT_NET_DEVICE *dev) +{ + int i; + struct c7000_controller *ccp = (struct c7000_controller *) dev->priv; + struct c7000_unit *cup; + + for (i = 0; i < NUNITS; i++) { + cup = &ccp->cunits[i]; + c7000_free_chain(cup->free); + cup->free = NULL; + c7000_free_chain(cup->proc_head); + cup->proc_head = cup->proc_tail = NULL; + c7000_free_chain(cup->bh_head); + cup->bh_head = cup->bh_tail = NULL; + } + + CPrintk(1, "c7000: c7000_free_buffers: freed buffers for base unit 0x%lx\n", dev->base_addr); + return; +} + +/* + Obtain a free buffer. Return a pointer to the c7000_buffer + structure OR NULL. +*/ + +struct c7000_buffer * +c7000_get_buffer(struct c7000_unit *cup) +{ + struct c7000_buffer *buf; + + buf = cup->free; + + if (buf == NULL) + return(NULL); + + cup->free = buf->next; + buf->next = NULL; + return(buf); +} + +/* + Release a buffer to the free list. +*/ + +void +c7000_release_buffer(struct c7000_unit *cup, struct c7000_buffer *buf) +{ + struct c7000_buffer *tmp; + + tmp = cup->free; + cup->free = buf; + buf->next = tmp; + return; +} + +/* + Queue a buffer on the end of the processing (proc) chain. +*/ + +void +c7000_queue_buffer(struct c7000_unit *cup, struct c7000_buffer *buf) +{ + buf->next = NULL; + + if (cup->proc_head == NULL) { + cup->proc_head = cup->proc_tail = buf; + return; + } + + cup->proc_tail->next = buf; + cup->proc_tail = buf; + return; +} + +/* + Dequeue a buffer from the start of the processing (proc) chain. +*/ + +struct c7000_buffer * +c7000_dequeue_buffer(struct c7000_unit *cup) +{ + struct c7000_buffer *buf = cup->proc_head; + + if (buf == NULL) + return(NULL); + + cup->proc_head = buf->next; + + if (cup->proc_head == NULL) + cup->proc_tail = NULL; + + buf->next = NULL; + return(buf); +} + +/* + Queue a buffer on the end of the bh routine chain. +*/ + +void +c7000_queue_bh_buffer(struct c7000_unit *cup, struct c7000_buffer *buf) +{ + buf->next = NULL; + + if (cup->bh_head == NULL) { + cup->bh_head = cup->bh_tail = buf; + return; + } + + cup->bh_tail->next = buf; + cup->bh_tail = buf; + return; +} + +/* + Dequeue a buffer from the start of the bh routine chain. +*/ + +struct c7000_buffer * +c7000_dequeue_bh_buffer(struct c7000_unit *cup) +{ + struct c7000_buffer *buf = cup->bh_head; + + if (buf == NULL) + return(NULL); + + cup->bh_head = buf->next; + + if (cup->bh_head == NULL) + cup->bh_tail = NULL; + + buf->next = NULL; + return(buf); +} + +/* + Build up a list of buffers to read. Each buffer is described + by one c7000_buffer structure. The c7000_buffer structure + contains a channel segment that will read that one buffer. + The channel program segments are chained together via TIC + CCWS. +*/ + +static int +c7000_bld_read_chain(struct c7000_unit *cup) +{ + struct c7000_buffer *buf, *pbuf = NULL; + struct c7000_rd_header *head; + int num = 0; + + while (cup->free != NULL) { + + /* + Obtain a buffer for a read channel segment. + */ + + if ((buf = c7000_get_buffer(cup)) == NULL) { + CPrintk(0, "c7000: c7000_bld_read_chain: can not obtain a read buffer for unit 0x%x\n", cup->devno); + return(-ENOMEM); + } + + num++; + buf->len = 0; + + /* + Clear out the read header flag. + */ + + head = (struct c7000_rd_header *)(buf->data + C7000_DATAL); + head->flag = 0x00; + c7000_queue_buffer(cup, buf); + + /* + Build the read channel program segment. + */ + + c7000_bld_read_chpgm(cup, buf); + + /* + Chain the prior (if any) channel program segment to + this one. + */ + + if (pbuf != NULL) + pbuf->ccws[3].cda = pbuf->ccws[5].cda = (__u32)virt_to_phys(&buf->ccws[0]); + + pbuf = buf; + } + + CPrintk(1, "c7000: c7000_bld_read_chain: chained %d buffers for unit 0x%x\n", num, cup->devno); + return(0); +} + +/* + Build up a list of buffers to write. Each buffer is described + by one c7000_buffer structure. The c7000_buffer structure + contains a channel segment that will write that one buffer. + The channel program segments are chained together via TIC + CCWS. +*/ + +static void +c7000_bld_wrt_chain(struct c7000_unit *cup) +{ + struct c7000_buffer *buf = cup->proc_head, *pbuf = NULL; + int num = 0; + + while (buf != NULL) { + c7000_bld_wrt_chpgm(cup, buf); + + /* + Chain the channel program segments together. + */ + + if (pbuf != NULL) + pbuf->ccws[2].cda = (__u32)virt_to_phys(&buf->ccws[0]); + + pbuf = buf; + buf = buf->next; + num++; + } + + CPrintk(1, "c7000: c7000_bld_wrt_chain: chained %d buffers for unit 0x%x\n", num, cup->devno); + return; +} + +/* + Interrupt handler bottom half (bh) routine. + Process all of the buffers on the c7000_unit bh chain. + The bh chain is populated by the interrupt routine when + a READ channel program completes on a buffer. +*/ + +static void +c7000_irq_bh(struct c7000_unit *cup) +{ + struct c7000_buffer *buf, *pbuf; + struct c7000_rd_header *head; + struct sk_buff *skb; + struct c7000_controller *ccp; + STRUCT_NET_DEVICE *dev; + int rc; + __u16 data_length; + __u32 parm; + __u8 flags = 0x00; + __u32 saveflags; + + ccp = cup->cntlp; + dev = ccp->dev; + + s390irq_spin_lock_irqsave(cup->irq, saveflags); + + /* + Process all buffers sent to bh by the interrupt routine. + */ + + while (cup->bh_head != NULL) { + buf = c7000_dequeue_bh_buffer(cup); + + /* + Deference the data as a c7000 header. + */ + + head = (struct c7000_rd_header *)(buf->data + C7000_DATAL); + + /* + If it is a control message, release the buffer and + continue the loop. + */ + + if (C7000_LINKID(head->cmd) == 0) { + CPrintk(0, "c7000: c7000_irq_bh: unexpected control command %d on unit 0x%x\n", head->cmd, cup->devno); + c7000_release_buffer(cup, buf); + continue; + } + + /* + Allocate a socket buffer. + */ + + data_length = head->len; + skb = dev_alloc_skb(data_length); + + /* + Copy the data to the skb. + Send it to the upper layers. + */ + + if (skb != NULL) { + memcpy(skb_put(skb, data_length), buf->data, data_length); + skb->dev = dev; + skb->protocol = htons(ETH_P_IP); + skb->pkt_type = PACKET_HOST; + skb->mac.raw = skb->data; + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_rx(skb); + ccp->stats.rx_packets++; + } else { + CPrintk(0, "c7000: c7000_irq_bh: can not allocate a skb for unit 0x%x\n", cup->devno); + ccp->stats.rx_dropped++; + } + + /* + Rechain the buffer on the processing list. + */ + + head->flag = 0x00; + buf->len = 0; + pbuf = cup->proc_tail; + c7000_queue_buffer(cup, buf); + + /* + Rechain the buffer on the running channel program. + */ + + if (pbuf != NULL) + pbuf->ccws[3].cda = pbuf->ccws[5].cda = (__u32)virt_to_phys(&buf->ccws[0]); + + } + + /* + Restart the READ channel program if IO_active is 0. + */ + + if (test_and_set_bit(0, (void *)&cup->IO_active) == 0) { + + if ((rc = c7000_bld_read_chain(cup)) != 0) { + CPrintk(0, "c7000: c7000_irq_bh: can not build read chain for unit 0x%x, return code %d\n", cup->devno, rc); + c7000_error(cup->cntlp); + clear_bit(C7000_BH_ACTIVE, (void *)&cup->flag_a); + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + return; + } + + parm = (__u32)cup; + cup->state = C7000_READ; + + if ((rc = do_IO(cup->irq, &cup->proc_head->ccws[0], parm, 0xff, flags)) != 0) { + CPrintk(0, "c7000: c7000_irq_bh: can not start READ IO to unit 0x%x, return code %d\n", cup->devno, rc); + c7000_error(cup->cntlp); + clear_bit(C7000_BH_ACTIVE, (void *)&cup->flag_a); + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + return; + } + + CPrintk(1, "c7000: c7000_irq_bh: started READ IO to unit 0x%x\n", cup->devno); + } + + /* + Clear the bh active indication. + */ + + clear_bit(C7000_BH_ACTIVE, (void *)&cup->flag_a); + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + return; +} + +/* + Send a system validate control command to a unit. +*/ + +static int +c7000_send_sysval(struct c7000_unit *cup) +{ + int rc; + struct c7000_controller *ccp = cup->cntlp; + struct c7000_control_blk *ctlblkp = &(cup->control_blk); + + CPrintk(1, "c7000: c7000_send_sysval: send sysval for device 0x%x\n", cup->devno); + + /* + Build the system validate control message. + */ + + memset(ctlblkp, '\0', sizeof(struct c7000_control_blk)); + ctlblkp->cmd = C7000_SYS_VALIDATE; + ctlblkp->correlator = 0; + ctlblkp->link_id = ccp->linkid; + ctlblkp->ver = ccp->version; + memcpy(ctlblkp->hostname, ccp->lhost, NAMLEN); + memcpy(ctlblkp->unitname, ccp->uhost, NAMLEN); + ctlblkp->rdsize = C7000_DATAL; + ctlblkp->wrtsize = C7000_DATAL; + + /* + Build the channel program. + */ + + c7000_bld_wrtctl_chpgm(cup); + + /* + Do the IO and wait for write to complete. + */ + + if ((rc = c7000_doio(cup)) != 0) { + CPrintk(0, "c7000: c7000_send_sysval failed with rc = %d for unit 0x%x\n", rc, cup->devno); + return(-1); + } + + return(0); +} + +/* + Send a system validate response control command to a unit. +*/ + +static int +c7000_send_sysval_resp(struct c7000_unit *cup, unsigned char correlator, int ret_code) +{ + int rc; + struct c7000_controller *ccp = cup->cntlp; + struct c7000_control_blk *ctlblkp = &(cup->control_blk); + + CPrintk(1, "c7000: c7000_send_sysval_resp: send sysval response for device 0x%x\n", cup->devno); + + /* + Build the system validate response control message. + */ + + memset(ctlblkp, '\0', sizeof(struct c7000_control_blk)); + ctlblkp->cmd = C7000_SYS_VALIDATE_RESP; + ctlblkp->correlator = correlator; + ctlblkp->ret_code = ret_code; + ctlblkp->link_id = ccp->linkid; + ctlblkp->ver = ccp->version; + memcpy(ctlblkp->hostname, ccp->lhost, NAMLEN); + memcpy(ctlblkp->unitname, ccp->uhost, NAMLEN); + ctlblkp->rdsize = C7000_DATAL; + ctlblkp->wrtsize = C7000_DATAL; + + /* + Build the channel program. + */ + + c7000_bld_wrtctl_chpgm(cup); + + /* + Do the IO and wait for write to complete. + */ + + if ((rc = c7000_doio(cup)) != 0) { + CPrintk(0, "c7000: c7000_send_sysval_resp failed with rc = %d for unit 0x%x\n", rc, cup->devno); + return(-1); + } + + return(0); +} + +/* + Check the information read in a SYS_VALIDATE control message. +*/ + +static int +c7000_checkinfo(struct c7000_unit *cup) +{ + struct c7000_controller *ccp = cup->cntlp; + struct c7000_control_blk *ctlblkp = &cup->control_blk; + int ret_code = 0; + + if (memcmp(ccp->lhost, ctlblkp->hostname, NAMLEN) || + memcmp(ccp->uhost, ctlblkp->unitname, NAMLEN)) + ret_code = Err_Names_not_Matched; + + if (ctlblkp->ver != ccp->version) + ret_code = Err_Wrong_Version; + + if ((ctlblkp->rdsize < C7000_DATAL) || (ctlblkp->wrtsize < C7000_DATAL)) + ret_code = Err_Wrong_Frame_Size; + + if (ret_code != 0) + CPrintk(0, "c7000: c7000_checkinfo: ret_code %d for device 0x%x\n", ret_code, cup->devno); + + return(ret_code); +} + +/* + Keep reading until a sysval response comes in or an error. +*/ + +static int +c7000_get_sysval_resp(struct c7000_unit *cup) +{ + struct c7000_controller *ccp = cup->cntlp; + int resp = 1; + int req = 1; + int rc; + int ret_code = 0; + + CPrintk(1, "c7000: c7000_get_sysval_resp: get sysval response for unit 0x%x\n", cup->devno); + + /* + Wait for the response to C7000_SYS_VALIDATE and for an + inbound C7000_SYS_VALIDATE. + */ + + while (resp || req) { + + /* + Build the read channel program. + */ + + c7000_bld_readctl_chpgm(cup); + + if ((rc = c7000_doio(cup)) != 0) { + CPrintk(0, "c7000: c7000_get_sysval_resp: failed with rc = %d for unit 0x%x\n", rc, cup->devno); + return(-1); + } + + /* + Process the control message. + */ + + switch (cup->control_blk.cmd) { + + /* + Check that response is positive and return + with success. Otherwise, return with an + error. + */ + + case C7000_SYS_VALIDATE_RESP: + + if (cup->control_blk.ret_code == 0) + resp = 0; + else { + CPrintk(0, "c7000: c7000_get_sysval_resp: receive sysval response for device 0x%x, return code %d\n", + cup->devno, + cup->control_blk.ret_code); + return(-1); + } + + break; + + /* + Check that the request is reasonable and + send a SYS_VALIDATE_RESP. Otherwise, + return with an error. + */ + + case C7000_SYS_VALIDATE: + CPrintk(1, "c7000: c7000_get_sysval_resp: receive sysval for device 0x%x\n", cup->devno); + req = 0; + ret_code = c7000_checkinfo(cup); + + if (c7000_send_sysval_resp(&ccp->cunits[C7000_WR], cup->control_blk.correlator, ret_code) != 0) + return(-1); + + if (ret_code != 0) + return(-1); + + break; + + /* + Anything else is unexpected and will result + in a return with an error. + */ + + default: + CPrintk(0, "c7000: c7000_get_sysval_resp: receive unexpected command for device 0x%x, command %d\n", cup->devno, cup->control_blk.cmd); + return(-1); + break; + } + + } + + return(0); +} + +/* + Send a connection confirm control message. +*/ + +static int +c7000_conn_confrm(struct c7000_unit *cup, unsigned char correlator, int linkid) +{ + int rc; + struct c7000_controller *ccp = cup->cntlp; + struct c7000_control_blk *ctlblkp = &(cup->control_blk); + + CPrintk(1, "c7000: c7000_conn_confrm: send the connection confirmation message for unit 0x%x\n", cup->devno); + + /* + Build the connection confirm control message. + */ + + memset(ctlblkp, '\0', sizeof(struct c7000_control_blk)); + ctlblkp->cmd = C7000_CONN_CONFRM; + ctlblkp->ver = ccp->version; + ctlblkp->link_id = linkid; + ctlblkp->correlator = correlator; + ctlblkp->rdsize = 0; + ctlblkp->wrtsize = 0; + memcpy(ctlblkp->hostname, ccp->lappl, NAMLEN); + memcpy(ctlblkp->unitname, ccp->uappl, NAMLEN); + + /* + Build the channel program. + */ + + c7000_bld_wrtctl_chpgm(cup); + + /* + Do the IO and wait for write to complete. + */ + + if ((rc = c7000_doio(cup)) != 0) { + CPrintk(0, "c7000: c7000_conn_confrm: failed with rc = %d for unit 0x%x\n", rc, cup->devno); + return(-1); + } + + return(0); +} + +/* + Send a connection request control message. +*/ + +static int +c7000_send_conn(struct c7000_unit *cup) +{ + int rc; + struct c7000_controller *ccp = cup->cntlp; + struct c7000_control_blk *ctlblkp = &(cup->control_blk); + + CPrintk(1, "c7000: c7000_send_conn: send the connection request message for unit 0x%x\n", cup->devno); + + /* + Build the connection request control message. + */ + + memset(ctlblkp, '\0', sizeof(struct c7000_control_blk)); + ctlblkp->cmd = C7000_CONN_REQ; + ctlblkp->ver = ccp->version; + ctlblkp->link_id = 0; + ctlblkp->correlator = 0; + ctlblkp->rdsize = C7000_DATAL; + ctlblkp->wrtsize = C7000_DATAL; + memcpy(ctlblkp->hostname, ccp->lappl, NAMLEN); + memcpy(ctlblkp->unitname, ccp->uappl, NAMLEN); + + /* + Build the channel program. + */ + + c7000_bld_wrtctl_chpgm(cup); + + /* + Do the IO and wait for write to complete. + */ + + if ((rc = c7000_doio(cup)) != 0) { + CPrintk(0, "c7000: c7000_send_conn: failed with rc = %d for unit 0x%x\n", rc, cup->devno); + return(-1); + } + + return(0); +} + +/* + Send a disconnect control message to the link with the value of + linkid. +*/ + +static int +c7000_send_disc(struct c7000_unit *cup, int linkid) +{ + int rc; + struct c7000_controller *ccp = cup->cntlp; + struct c7000_control_blk *ctlblkp = &(cup->control_blk); + + CPrintk(1, "c7000: c7000_send_disc: send disconnect message for unit 0x%x\n", cup->devno); + + /* + Build the disconnect control message. + */ + + memset(ctlblkp, '\0', sizeof(struct c7000_control_blk)); + ctlblkp->cmd = C7000_DISCONN; + ctlblkp->ver = ccp->version; + ctlblkp->link_id = linkid; + ctlblkp->correlator = 0; + ctlblkp->rdsize = C7000_DATAL; + ctlblkp->wrtsize = C7000_DATAL; + memcpy(ctlblkp->hostname, ccp->lappl, NAMLEN); + memcpy(ctlblkp->unitname, ccp->uappl, NAMLEN); + + /* + Build the channel program. + */ + + c7000_bld_wrtctl_chpgm(cup); + + /* + Do the IO and wait for write to complete. + */ + + if ((rc = c7000_doio(cup)) != 0) { + CPrintk(0, "c7000: c7000_send_disc: failed with rc = %d for unit 0x%x\n", rc, cup->devno); + return(-1); + } + + return(0); +} + +/* + Resolve the race condition based on the link identifier value. + The adapter microcode assigns the identifiers. A higher value implies + that the race was lost. A side effect of this function is that + ccp->linkid is set to the link identifier to be used for this + connection (provided that 0 is returned). +*/ + +static int +c7000_resolve_race(struct c7000_unit *cup, int local_linkid, int remote_linkid) +{ + struct c7000_controller *ccp = cup->cntlp; + + CPrintk(1, "c7000: c7000_resolve_race: for unit 0x%x, local linkid %d, remote linkid %d\n", cup->devno, local_linkid, remote_linkid); + + /* + This local link identifier should not be zero.. + */ + + if (local_linkid == 0) { + CPrintk(0, "c7000: c7000_resolve_race: error for unit 0x%x, local linkid is null\n", cup->devno); + return(-1); + } + + /* + This indicates that there is no race. Just use our + local link identifier. + */ + + if (remote_linkid == 0) { + ccp->linkid = local_linkid; + return(0); + } + + /* + Send a connection confirm message if we lost the race to + the winning link identifier. + + Either way, save the winning link identifier. + */ + + if (local_linkid > remote_linkid) { + + if (c7000_conn_confrm(&ccp->cunits[C7000_WR], cup->control_blk.correlator, remote_linkid) != 0) { + CPrintk(0, "c7000: c7000_resolve_race: failed for unit 0x%x\n", cup->devno); + return(-1); + } + + ccp->linkid = remote_linkid; + } else { + ccp->linkid = local_linkid; + } + + return(0); +} + +/* + Get connected by processing the connection request/response/confirm + control messages. A connection request has already been sent by + calling function c7000_send_conn. +*/ + +static int +c7000_get_conn(struct c7000_unit *cup) +{ + struct c7000_controller *ccp = cup->cntlp; + int rc; + int cont = 1; + int remote_linkid = 0; + int local_linkid = 0; + + CPrintk(1, "c7000: c7000_get_conn: read the connected message for unit 0x%x\n", cup->devno); + ccp->linkid = 0; + + while (cont == 1) { + + /* + Build the read channel program. + */ + + c7000_bld_readctl_chpgm(cup); + + /* + Start the channel program to read a control message. + */ + + if ((rc = c7000_doio(cup)) != 0) { + CPrintk(0, "c7000: c7000_get_conn: failed with rc = %d for unit 0x%x\n", rc, cup->devno); + return(-1); + } + + /* + Process the control message that was received based + on the command code. + */ + + CPrintk(1, "c7000: c7000_get_conn: received command %d for unit 0x%x\n", cup->control_blk.cmd, cup->devno); + + switch(cup->control_blk.cmd) { + + /* + Save the remote link_id in the message for + a check in c7000_resolve_race. + */ + + case C7000_CONN_REQ: + remote_linkid = cup->control_blk.link_id; + break; + + /* + A connection response received. Resolve + the network race condition (if any) by + comparing the link identifier values. + */ + + case C7000_CONN_RESP: + + if (cup->control_blk.ret_code != 0) { + CPrintk(0, "c7000: c7000_get_conn: failed for unit 0x%x , connection response return code %d\n", + cup->devno, cup->control_blk.ret_code); + return(-1); + } + + local_linkid = cup->control_blk.link_id; + + if (c7000_resolve_race(cup, local_linkid, remote_linkid) != 0) + return(-1); + + break; + + /* + Got a confirmation to our connection request. + Disconnect the remote link identifier (if any). + Break out of the loop. + */ + + case C7000_CONN_CONFRM: + + if (remote_linkid != 0) { + + if (c7000_send_disc(&ccp->cunits[C7000_WR], remote_linkid) != 0) { + CPrintk(0, "c7000: c7000_get_conn: send disconnect failed for unit 0x%x\n", cup->devno); + return(-1); + } + + } + + cont = 0; + break; + + /* + Got a disconnect to our connection request. + Break out of the loop. + */ + + case C7000_DISCONN: + cont = 0; + break; + + /* + Anything else must be an error. + Return with an error immediately. + */ + + default: + CPrintk(0, "c7000: c7000_get_conn: failed for unit 0x%x unexpected command %d\n", + cup->devno, cup->control_blk.cmd); + return(-1); + } + + } + + /* + Be sure that we now have a link identifier. + */ + + if (ccp->linkid == 0) + return(-1); + + return(0); +} + +/* + Get statistics method. +*/ + +struct net_device_stats * +c7000_stats(STRUCT_NET_DEVICE *dev) +{ + struct c7000_controller *ccp = (struct c7000_controller *)dev->priv; + + return(&ccp->stats); +} + +/* + Open method. +*/ + +static int +c7000_open(STRUCT_NET_DEVICE *dev) +{ + int i; + struct c7000_controller *ccp = (struct c7000_controller *)dev->priv; + struct c7000_unit *cup; + int rc; + __u32 parm; + __u8 flags = 0x00; + + c7000_set_busy(dev); + + /* + Allocate space for the unit buffers. + */ + + if (c7000_alloc_buffers(dev) == -1) { + CPrintk(0, "c7000: c7000_open: can not allocate buffer space for base unit 0x%lx\n", dev->base_addr); + c7000_free_buffers(dev); /* free partially allocated buffers */ + c7000_clear_busy(dev); + return(-ENOMEM); + } + + /* + Perform the initialization for all units. + */ + + for (i = 0; i < NUNITS; i++) { + cup = &ccp->cunits[i]; + + /* + Initialize task queue structure used for the bottom + half routine. + */ + +#ifndef NEWSTUFF + cup->tq.next = NULL; +#endif + cup->tq.sync = 0; + cup->tq.routine = (void *)(void *)c7000_irq_bh; + cup->tq.data = cup; + cup->state = C7000_HALT; +#ifdef NEWSTUFF + init_waitqueue_head(&cup->wait); +#endif + CPrintk(1, "c7000: c7000_open: issuing halt to unit 0x%x\n", cup->devno); + + /* + Issue a halt I/O to the unit + */ + + if ((rc = c7000_haltio(cup)) != 0) { + CPrintk(0, "c7000: c7000_open: halt_IO failed with rc = %d for unit 0x%x\n", rc, cup->devno); + continue; + } + + cup->IO_active = 0; + cup->flag_a = 0; + cup->sigsmod = 0x00; + + CPrintk(1, "c7000: c7000_open: halt complete for unit 0x%x\n", cup->devno); + } + + /* + On each subchannel send a sense id. + */ + + for (i = 0; i < NUNITS; i++) { + cup = &ccp->cunits[i]; + + /* + Build SENSE ID channel program. + */ + + c7000_bld_senseid_chpgm(cup); + + /* + Issue the start I/O for SENSE ID channel program. + */ + + CPrintk(1, "c7000: c7000_open: issuing SENSEID to unit 0x%x\n", cup->devno); + + if ((rc = c7000_doio(cup)) != 0) { + CPrintk(0, "c7000: c7000_open: SENSEID failed with rc = %d for unit 0x%x\n", rc, cup->devno); + c7000_clear_busy(dev); + return(-EIO); + } + + CPrintk(1, "c7000: c7000_open: SENSEID complete for unit 0x%x\n", cup->devno); + } + + /* + Send the system validation control message. + */ + + cup = &ccp->cunits[C7000_WR]; + + if (c7000_send_sysval(cup) != 0) { + CPrintk(0, "c7000: c7000_open: can not send sysval for unit 0x%x\n", cup->devno); + c7000_clear_busy(dev); + return(-EIO); + } + + CPrintk(1, "c7000: c7000_open: successfully sent sysval for unit 0x%x\n", cup->devno); + /* + Get the system validation response message. + */ + + cup = &ccp->cunits[C7000_RD]; + + if (c7000_get_sysval_resp(cup) != 0) { + CPrintk(0, "c7000: c7000_open: can not read sysval response for unit 0x%x\n", cup->devno); + c7000_clear_busy(dev); + return(-EIO); + } + + CPrintk(1, "c7000: c7000_open: successfully received sysval reply for unit 0x%x\n", cup->devno); + ccp->cunits[C7000_RD].state = ccp->cunits[C7000_WR].state = C7000_CONNECT; + + cup = &ccp->cunits[C7000_WR]; + + /* + Send a connection request. + */ + + if (c7000_send_conn(cup) != 0) { + CPrintk(0, "c7000: c7000_open: connection failed for unit 0x%x\n", cup->devno); + c7000_clear_busy(dev); + return(-EIO); + } + + cup = &ccp->cunits[C7000_RD]; + + /* + Get the response to our connection request Note that a + network race may occur. This is handled in c7000_get_conn. + */ + + if (c7000_get_conn(cup) != 0) { + CPrintk(0, "c7000: c7000_open: unit 0x%x has connected\n", cup->devno); + c7000_clear_busy(dev); + return(-EIO); + } + + CPrintk(1, "c7000: c7000_open: successfully received connection request for unit 0x%x\n", cup->devno); + ccp->cunits[C7000_RD].state = ccp->cunits[C7000_WR].state = C7000_READY; + + /* + Clear the interface statistics. + */ + + memset(&ccp->stats, '\0', sizeof(struct net_device_stats)); + + if ((rc = c7000_bld_read_chain(cup)) != 0) { + c7000_clear_busy(dev); + return(rc); + } + + /* + Start the C7000_READ channel program but do not wait for it's + completion. + */ + + cup->state = C7000_READ; + parm = (__u32) cup; + set_bit(0, (void *)&cup->IO_active); + + if ((rc = do_IO(cup->irq, &cup->proc_head->ccws[0], parm, 0xff, flags)) != 0) { + CPrintk(0, "c7000: c7000_open: READ failed with return code %d for unit 0x%x\n", rc, cup->devno); + c7000_error(cup->cntlp); + clear_bit(0, (void *)&cup->IO_active); + c7000_clear_busy(dev); + return(-EIO); + } + +#ifdef NEWSTUFF + netif_start_queue(dev); +#else + dev->start = 1; +#endif + CPrintk(0, "c7000: c7000_open: base unit 0x%lx is opened\n", dev->base_addr); + c7000_clear_busy(dev); + MOD_INC_USE_COUNT; /* increment module usage count */ + return(0); +} + +/* + Stop method. +*/ + +static int +c7000_stop(STRUCT_NET_DEVICE *dev) +{ + int i; + struct c7000_controller *ccp = (struct c7000_controller *)dev->priv; + struct c7000_unit *cup; + int rc; + +#ifdef NEWSTUFF +/* nothing? */ +#else + dev->start = 0; +#endif + c7000_set_busy(dev); + + /* + Send a disconnect message. + */ + + ccp->cunits[C7000_RD].state = ccp->cunits[C7000_WR].state = C7000_DISC; + cup = &ccp->cunits[C7000_WR]; + + if (c7000_send_disc(cup, ccp->linkid) != 0) { + CPrintk(0, "c7000: c7000_stop: send of disconnect message failed for unit 0x%x\n", cup->devno); + } + + CPrintk(1, "c7000: c7000_stop: successfully sent disconnect message to unit 0x%x\n", cup->devno); + + /* + Issue a halt I/O to all units. + */ + + for (i = 0; i < NUNITS; i++) { + cup = &ccp->cunits[i]; + cup->state = C7000_STOP; + CPrintk(1, "c7000: c7000_stop: issuing halt to unit 0x%x\n", cup->devno); + + if ((rc = c7000_haltio(cup)) != 0) { + CPrintk(0, "c7000: c7000_stop: halt_IO failed with rc = %d for unit 0x%x\n", rc, cup->devno); + continue; + } + + CPrintk(1, "c7000: c7000_stop: halt complete for unit 0x%x\n", cup->devno); + } + + c7000_free_buffers(dev); + CPrintk(0, "c7000: c7000_stop: base unit 0x%lx is stopped\n", dev->base_addr); + MOD_DEC_USE_COUNT; /* Decrement module usage count */ + return(0); +} + +/* + Configure the interface. +*/ + +static int +c7000_config(STRUCT_NET_DEVICE *dev, struct ifmap *map) +{ + CPrintk(1, "c7000: c7000_config: entered for base unit 0x%lx\n", dev->base_addr); + return(0); +} + +/* + Transmit a packet. +*/ + +static int +c7000_xmit(struct sk_buff *skb, STRUCT_NET_DEVICE *dev) +{ + struct c7000_controller *ccp = (struct c7000_controller *)dev->priv; + struct c7000_unit *cup; + __u32 saveflags; + __u32 parm; + __u8 flags = 0x00; + struct c7000_buffer *buf, *pbuf; + int rc; + + CPrintk(1, "c7000: c7000_xmit: entered for base unit 0x%lx\n", dev->base_addr); + + /* + When the skb pointer is NULL return. + */ + + if (skb == NULL) { + CPrintk(0, "c7000: c7000_xmit: skb pointer is null for base unit 0x%lx\n", dev->base_addr); + ccp->stats.tx_dropped++; + return(-EIO); + } + + cup = &ccp->cunits[C7000_WR]; + + /* + Lock the irq. + */ + + s390irq_spin_lock_irqsave(cup->irq, saveflags); + + /* + When the device transmission busy flag is on , no data + can be sent. Unlock the irq and return EBUSY. + */ + + if (c7000_check_busy(dev)) { + CPrintk(1, "c7000: c7000_xmit: c7000_check_busy returns true for base unit 0x%lx\n", dev->base_addr); + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + return(-EBUSY); + } + + /* + Set the device transmission busy flag on atomically. + */ + + if (c7000_ts_busy(TB_TX, dev)) { + CPrintk(1, "c7000: c7000_xmit: c7000_ts_busy returns true for base unit 0x%lx\n", dev->base_addr); + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + return(-EBUSY); + } + + CPrintk(1, "c7000: c7000_xmit: set TB_TX for unit 0x%x\n", cup->devno); + + /* + Obtain a free buffer. If none are free then mark tbusy + with TB_NOBUFFER and return EBUSY. + */ + + if ((buf = c7000_get_buffer(cup)) == NULL) { + CPrintk(1, "c7000: c7000_xmit: setting TB_NOBUFFER for base unit 0x%lx\n", dev->base_addr); + c7000_setbit_busy(TB_NOBUFFER, dev); + c7000_clearbit_busy(TB_TX, dev); + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + return(-EBUSY); + } + + CPrintk(1, "c7000: c7000_xmit: Got buffer for unit 0x%x\n", cup->devno); + + /* + Save the length of the skb data and then copy it to the + buffer. Queue the buffer on the processing list. + */ + + buf->len = skb->len; + memcpy(buf->data, skb->data, skb->len); + memset(buf->data + C7000_DATAL + C7000_READHDRL, '\0', C7000_READFFL); + pbuf = cup->proc_tail; + c7000_queue_buffer(cup, buf); + + /* + Chain the buffer to the running channel program. + */ + + if (test_bit(0, (void *)&cup->IO_active) && pbuf != NULL) { + c7000_bld_wrt_chpgm(cup, buf); + pbuf->ccws[2].cda = (__u32)virt_to_phys(&buf->ccws[0]); + } + + /* + Free the socket buffer. + */ + + dev_kfree_skb(skb); + + /* + If the unit is not currently doing IO, build a channel + program and start the IO for the buffers on the processing + chain. + */ + + if (test_and_set_bit(0, (void *)&cup->IO_active) == 0) { + CPrintk(1, "c7000: c7000_xmit: start IO for unit 0x%x\n", cup->devno); + c7000_bld_wrt_chain(cup); + parm = (__u32) cup; + cup->state = C7000_WRITE; + + if ((rc = do_IO(cup->irq, &cup->proc_head->ccws[0], parm, 0xff, flags)) != 0) { + CPrintk(0, "c7000: c7000_xmit: do_IO failed with return code %d for unit 0x%x\n", rc, cup->devno); + c7000_error(cup->cntlp); + c7000_clearbit_busy(TB_TX, dev); + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + return(-EIO); + } + + dev->trans_start = jiffies; + CPrintk(1, "c7000: c7000_xmit: do_IO succeeds for unit 0x%x\n", cup->devno); + } + + /* + If the free chain is now NULL, set the TB_NOBUFFER flag. + */ + + if (cup->free == NULL) { + CPrintk(1, "c7000: c7000_xmit: setting TB_NOBUFFER for base unit 0x%lx\n", dev->base_addr); + c7000_setbit_busy(TB_NOBUFFER, dev); + } + + c7000_clearbit_busy(TB_TX, dev); + s390irq_spin_unlock_irqrestore(cup->irq, saveflags); + CPrintk(1, "c7000: c7000_xmit: exits for unit 0x%x\n", cup->devno); + return(0); +} + +/* + Handle an ioctl from a user process. +*/ + +static int +c7000_ioctl(STRUCT_NET_DEVICE *dev, struct ifreq *ifr, int cmd) +{ + CPrintk(1, "c7000: c7000_ioctl: entered for base unit 0x%lx with cmd %d\n", dev->base_addr, cmd); + return(0); +} + +/* + Analyze the interrupt status and return a value + that identifies the type. +*/ + +static enum c7000_rupt +c7000_check_csw(devstat_t *devstat) +{ + + /* + Check for channel detected conditions (except PCI). + */ + + if ((devstat->cstat & ~SCHN_STAT_PCI) != 0) { + CPrintk(0, "c7000: c7000_check_csw: channel status 0x%x for unit 0x%x\n", devstat->cstat, devstat->devno); + return(C7000_CHANERR); + } + + /* + Fast path the normal cases. + */ + + if (devstat->dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) + return(C7000_NORMAL); + + if (devstat->cstat == SCHN_STAT_PCI) + return(C7000_NORMAL); + + /* + Check for exceptions. + */ + + if (devstat->dstat & DEV_STAT_UNIT_CHECK) { + CPrintk(0, "c7000: c7000_check_csw: unit check for unit 0x%x, sense byte0 0x%2.2x\n", devstat->devno, devstat->ii.sense.data[0]); + + if (devstat->ii.sense.data[0] == C7000_BOX_RESET) + return(C7000_UCK_RESET); + else + return(C7000_UCK); + + } else if (devstat->dstat & DEV_STAT_UNIT_EXCEP) { + CPrintk(0, "c7000: c7000_check_csw: unit exception for unit 0x%x\n", devstat->devno); + return(C7000_UE); + + } else if (devstat->dstat & DEV_STAT_ATTENTION) { + CPrintk(0, "c7000: c7000_check_csw: attention for unit 0x%x\n", devstat->devno); + return(C7000_ATTN); + + } else if (devstat->dstat & DEV_STAT_BUSY) { + CPrintk(0, "c7000: c7000_check_csw: busy for unit 0x%x\n", devstat->devno); + return(C7000_BUSY); + + } else { + CPrintk(0, "c7000: c7000_check_csw: channel status 0x%2.2x , device status 0x%2.2x, devstat flags 0x%8.8x for unit 0x%x\n", + devstat->cstat, devstat->dstat, devstat->flag, devstat->devno); + return(C7000_OTHER); + } + + /* NOT REACHED */ + +} + +/* + Retry the last CCW chain to the unit. +*/ + +static void +c7000_retry_io(struct c7000_unit *cup) +{ + int rc; + __u32 parm; + __u8 flags = 0x00; + ccw1_t *ccwp; + + if (++cup->retries > C7000_MAX_RETRIES) { + c7000_error(cup->cntlp); + CPrintk(0, "c7000: c7000_retry_io: retry IO for unit 0x%x exceeds maximum retry count\n", cup->devno); + return; + } + + set_bit(0, (void *)&cup->IO_active); + parm = (__u32)cup; + + if (cup->state == C7000_READ || cup->state == C7000_WRITE) + ccwp = &cup->proc_head->ccws[0]; + else + ccwp = &cup->ccws[0]; + + if ((rc = do_IO(cup->irq, ccwp, parm, 0xff, flags)) != 0) { + CPrintk(0, "c7000: c7000_retry_io: can not retry IO for unit 0x%x, return code %d\n", cup->devno, rc); + clear_bit(0, (void *)&cup->IO_active); + c7000_error(cup->cntlp); + } + + CPrintk(1, "c7000: c7000_retry_io: retry IO for unit 0x%x, retry count %d\n", cup->devno, cup->retries); + return; +} + +/* + Process a read interrupt by scanning the list of buffers + for ones that have completed and queue them for the bottom + half to process. +*/ + +static void +c7000_proc_rintr(struct c7000_unit *cup) +{ + struct c7000_buffer *buf; + struct c7000_rd_header *head; + int num_read = 0; + + while (cup->proc_head != NULL) { + head = (struct c7000_rd_header *)(cup->proc_head->data + C7000_DATAL); + + /* + The flag byte in the read header will be set to + FLAG_FF when the buffer has been read. + */ + + if (head->flag != FLAG_FF) + break; + + /* + Dequeue the buffer from the proc chain + and enqueue it on the bh chain for + the bh routine to process. + */ + + buf = c7000_dequeue_buffer(cup); + c7000_queue_bh_buffer(cup, buf); + num_read++; + } + + CPrintk(1, "c7000: c7000_proc_rintr: %d buffers read for unit 0x%x\n", num_read, cup->devno); + return; +} + +/* + Process all completed buffers on the proc chain. + A buffer is completed if it's READFF flag is FLAG_FF. +*/ + +static int +c7000_proc_wintr(struct c7000_unit *cup) +{ + struct c7000_controller *ccp = cup->cntlp; + struct c7000_buffer *buf; + int num_write = 0; + + if (cup->proc_head == NULL) { + CPrintk(0, "c7000: c7000_proc_wintr: unexpected NULL processing chain pointer for unit 0x%x\n", cup->devno); + return(num_write); + } + + while (cup->proc_head != NULL) { + + /* + Check if the buffer has completed. + */ + + if (*(cup->proc_head->data + C7000_DATAL + C7000_READHDRL) != FLAG_FF) + break; + + /* + Remove buffer from top of processing chain. + Place it on free list. + */ + + buf = c7000_dequeue_buffer(cup); + c7000_release_buffer(cup, buf); + num_write++; + + /* + Update transmitted packets statistic. + */ + + ccp->stats.tx_packets++; + } + + CPrintk(1, "c7000: c7000_proc_wintr: %d buffers written for unit 0x%x\n", num_write, cup->devno); + return(num_write); +} + +/* + Interrupt handler. +*/ + +static void +c7000_intr(int irq, void *initparm, struct pt_regs *regs) +{ + devstat_t *devstat = ((devstat_t *) initparm); + struct c7000_unit *cup = NULL; + struct c7000_controller *ccp = NULL; + STRUCT_NET_DEVICE *dev = NULL; + __u32 parm; + __u8 flags = 0x00; + int rc; + + /* + Discard unsolicited interrupts + */ + + if (devstat->intparm == 0) { + CPrintk(0, "c7000: c7000_intr: unsolicited interrupt for device 0x%x, cstat = 0x%2.2x, dstat = 0x%2.2x, flag = 0x%8.8x\n", + devstat->devno, devstat->cstat, devstat->dstat, devstat->flag); + return; + } + + /* + Obtain the c7000_unit structure pointer. + */ + + cup = (struct c7000_unit *)(devstat->intparm); + + /* + Obtain the c7000_controller structure and device structure + pointers. + */ + + if (cup == NULL) { + CPrintk(0, "c7000: c7000_intr: c7000_unit pointer is NULL in devstat\n"); + return; + } + + ccp = cup->cntlp; + + if (ccp == NULL) { + CPrintk(0, "c7000: c7000_intr: c7000_cntlp pointer is NULL in c7000_unit structure 0x%x for unit 0x%x\n", (int)cup, cup->devno); + return; + } + + dev = ccp->dev; + + if (dev == NULL) { + CPrintk(0, "c7000: c7000_intr: device pointer is NULL in c7000_controller structure 0x%x for unit 0x%x\n", (int)ccp, cup->devno); + return; + } + + /* + Finite state machine (fsm) handling. + */ + + CPrintk(1, "c7000: c7000_intr: entered with state %d flag 0x%8.8x for unit 0x%x\n", cup->state, devstat->flag, cup->devno); + + switch(cup->state) { + + /* + Not expected to be here when in INIT state. + */ + + case C7000_INIT: + + break; + + /* + Enter state C7000_SID and wakeup the sleeping + process in c7000_open. + */ + + case C7000_HALT: + + if ((devstat->flag & DEVSTAT_FINAL_STATUS) == 0) + break; + + cup->state = C7000_SID; + wake_up(&cup->wait); + break; + + /* + Enter state C7000_SYSVAL and wakeup the sleeping + process in c7000_open. + */ + + case C7000_SID: + + if ((devstat->flag & DEVSTAT_FINAL_STATUS) == 0) + break; + + if (c7000_check_csw(devstat) != 0) { + c7000_retry_io(cup); + + if (cup->state == C7000_ERROR) + wake_up(&cup->wait); + + break; + } + + cup->retries = 0; + cup->state = C7000_SYSVAL; + wake_up(&cup->wait); + break; + + /* + Wakeup the sleeping process in c7000_open. + */ + + case C7000_SYSVAL: + + if ((devstat->flag & DEVSTAT_FINAL_STATUS) == 0) + break; + + if (c7000_check_csw(devstat) != 0) { + c7000_retry_io(cup); + + if (cup->state == C7000_ERROR) + wake_up(&cup->wait); + + break; + } + + cup->retries = 0; + wake_up(&cup->wait); + break; + + /* + Wakeup the sleeping process in c7000_open. + */ + + case C7000_CONNECT: + + if ((devstat->flag & DEVSTAT_FINAL_STATUS) == 0) + break; + + if (c7000_check_csw(devstat) != 0) { + c7000_retry_io(cup); + + if (cup->state == C7000_ERROR) + wake_up(&cup->wait); + + break; + } + + cup->retries = 0; + wake_up(&cup->wait); + break; + + /* + Not expected to be entered here. + */ + + case C7000_READY: + break; + + /* + Process the data that was just read. + */ + + case C7000_READ: + + if ((devstat->flag & (DEVSTAT_PCI | DEVSTAT_FINAL_STATUS)) == 0) + break; + + CPrintk(1, "c7000: c7000_intr: process read interrupt for unit 0x%x , devstat flag = 0x%8.8x\n", cup->devno, devstat->flag); + + /* + Check for serious errors. + */ + + if (c7000_check_csw(devstat) != 0) { + ccp->stats.rx_errors++; + c7000_error(cup->cntlp); + break; + } + + /* + Build the bottom half buffer list. + */ + + c7000_proc_rintr(cup); + + /* + When final status is received clear + the IO active bit. + */ + + if (devstat->flag & DEVSTAT_FINAL_STATUS) { + clear_bit(0, (void *)&cup->IO_active); + } + + /* + If there are free buffers redrive the IO. + */ + + if ((devstat->flag & DEVSTAT_FINAL_STATUS) && + (cup->free != NULL)) { + c7000_bld_read_chain(cup); + parm = (__u32)cup; + set_bit(0, (void *)&cup->IO_active); + + if ((rc = do_IO(cup->irq, &cup->proc_head->ccws[0], parm, 0xff, flags)) != 0) { + clear_bit(0, (void *)&cup->IO_active); + CPrintk(0, "c7000: c7000_intr: do_IO failed with return code %d for unit 0x%x\n", rc, cup->devno); + c7000_error(cup->cntlp); + break; + } + + CPrintk(1, "c7000: c7000_intr: started read io for unit 0x%x\n", cup->devno); + } + + /* + Initiate bottom half routine to process + data that was read. + */ + + if (test_and_set_bit(C7000_BH_ACTIVE, (void *)&cup->flag_a) == 0) { + queue_task(&cup->tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); + } + + break; + + /* + Free the transmitted buffers and restart the channel + process (if necessary). + */ + + case C7000_WRITE: + + if ((devstat->flag & DEVSTAT_FINAL_STATUS) == 0) + break; + + if (c7000_check_csw(devstat) != 0) { + ccp->stats.tx_errors++; + c7000_error(cup->cntlp); + break; + } + + /* + If at least one buffer was freed, clear + the NOBUFFER indication. + */ + + if (c7000_proc_wintr(cup) != 0) { + c7000_clearbit_busy(TB_NOBUFFER, dev); + } + + /* + Restart the channel program if there are more + buffers on the processing chain. + */ + + if (cup->proc_head != NULL) { + c7000_bld_wrt_chain(cup); + parm = (__u32)cup; + set_bit(0, (void *)&cup->IO_active); + + if ((rc = do_IO(cup->irq, &cup->proc_head->ccws[0], parm, 0xff, flags)) != 0) { + CPrintk(0, "c7000: c7000_intr: do_IO failed with return code %d for unit 0x%x\n", rc, cup->devno); + clear_bit(0, (void *)&cup->IO_active); + c7000_error(cup->cntlp); + break; + } + + dev->trans_start = jiffies; + } else { + clear_bit(0, (void *)&cup->IO_active); + cup->state = C7000_READY; + } + + break; + + /* + Disconnect message completed. Wakeup the + sleeping process in c7000_stop. + */ + + case C7000_DISC: + if ((devstat->flag & DEVSTAT_FINAL_STATUS) == 0) + break; + + if (c7000_check_csw(devstat) != 0) { + c7000_retry_io(cup); + + if (cup->state == C7000_ERROR) + wake_up(&cup->wait); + + break; + } + + cup->retries = 0; + wake_up(&cup->wait); + break; + + /* + Subchannel is now halted. Wakeup the sleeping + process in c7000_stop. Set the state to C7000_STOPPED. + */ + + case C7000_STOP: + + cup->state = C7000_STOPPED; + wake_up(&cup->wait); + break; + + /* + When in error state, stay there until the + interface is recycled. + */ + + case C7000_ERROR: + + break; + + /* + Should not reach here + */ + + default: + CPrintk(0, "c7000: c7000_intr: entered default case for unit 0x%x, state %d\n", cup->devno, cup->state); + break; + + } + + CPrintk(1, "c7000: c7000_intr: exited with state %d for unit 0x%x\n", cup->state, cup->devno); + return; +} + +/* + Fill in system validation name padding it with blanks. +*/ + +static void +c7000_fill_name(char *dst, char *src) +{ + char *tmp = dst; + int i; + + for (i = 0; i < NAMLEN; i++, tmp++) + *tmp = ' '; + + for (i = 0; i < NAMLEN && *src != '\0'; i++) + *dst++ = *src++; + + return; +} + +/* + Initialization routine called when the device is registered. +*/ + +static int +c7000_init(STRUCT_NET_DEVICE *dev) +{ + struct c7000_controller *ccp; + int i; + int unitaddr; + int irq; + + /* + Find the position of base_addr in the bases array. + */ + + for (i = 0; i < MAX_C7000; i++) + if (bases[i] == dev->base_addr) + break; + + if (i == MAX_C7000) + return(-ENODEV); + + /* + Make sure it is a C7000 type of device. + */ + + if (c7000_check_devices(dev->base_addr) != 0) { + CPrintk(0, "c7000: c7000_init: base unit 0x%lx is not the right type\n", dev->base_addr); + return(-ENODEV); + } + + /* + Initialize the device structure functions. + Note that ARP is not done on the CLAW interface. + There is no ethernet header. + */ + + dev->mtu = C7000_DATAL; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = C7000_TXQUEUE_LEN; + dev->flags = IFF_NOARP; + dev->open = c7000_open; + dev->stop = c7000_stop; + dev->set_config = c7000_config; + dev->hard_start_xmit = c7000_xmit; + dev->do_ioctl = c7000_ioctl; + dev->get_stats = c7000_stats; + + /* + Allocate space for a private data structure. + */ + + if ((ccp = dev->priv = kmalloc(sizeof(struct c7000_controller), GFP_KERNEL)) == NULL) { + CPrintk(0, "c7000: c7000_init: base unit 0x%lx can not be initialized\n", dev->base_addr); + return(-ENOMEM); + } + + CPrintk(1, "c7000: c7000_init: allocated a c7000_controller structure at address 0x%x\n", (int)ccp); + memset(ccp, '\0', sizeof(struct c7000_controller)); + ccp->dev = dev; + ccp->base_addr = dev->base_addr; + + /* + Populate the system validation name with default values. + */ + + c7000_fill_name(ccp->lappl, C7000_DFLT_LAPPL); + c7000_fill_name(ccp->lhost, C7000_DFLT_LHOST); + c7000_fill_name(ccp->uappl, C7000_DFLT_UAPPL); + c7000_fill_name(ccp->uhost, C7000_DFLT_UHOST); + + /* + When values have been supplied, replace the prior defaults. + */ + + if (i == 0) { + + if (lappl0 != NULL) + c7000_fill_name(ccp->lappl, lappl0); + + if (lhost0 != NULL) + c7000_fill_name(ccp->lhost, lhost0); + + if (uappl0 != NULL) + c7000_fill_name(ccp->uappl, uappl0); + + if (uhost0 != NULL) + c7000_fill_name(ccp->uhost, uhost0); + + } else if (i == 1) { + + if (lappl1 != NULL) + c7000_fill_name(ccp->lappl, lappl1); + + if (lhost1 != NULL) + c7000_fill_name(ccp->lhost, lhost1); + + if (uappl1 != NULL) + c7000_fill_name(ccp->uappl, uappl1); + + if (uhost1 != NULL) + c7000_fill_name(ccp->uhost, uhost1); + + } else if (i == 2) { + + if (lappl2 != NULL) + c7000_fill_name(ccp->lappl, lappl2); + + if (lhost2 != NULL) + c7000_fill_name(ccp->lhost, lhost2); + + if (uappl2 != NULL) + c7000_fill_name(ccp->uappl, uappl2); + + if (uhost2 != NULL) + c7000_fill_name(ccp->uhost, uhost2); + + } else { + + if (lappl3 != NULL) + c7000_fill_name(ccp->lappl, lappl3); + + if (lhost3 != NULL) + c7000_fill_name(ccp->lhost, lhost3); + + if (uappl3 != NULL) + c7000_fill_name(ccp->uappl, uappl3); + + if (uhost3 != NULL) + c7000_fill_name(ccp->uhost, uhost3); + + } + + CPrintk(1, "c7000: c7000_init: lappl = %8.8s lhost = %8.8s uappl = %8.8s uhost = %8.8s for base unit 0x%x\n", ccp->lappl, ccp->lhost, ccp->uappl, ccp->uhost, ccp->base_addr); + ccp->version = 2; + ccp->linkid = 0; + + /* + Initialize the fields in the embedded cunits + array. This type of controller occupies a range + of three contiguous device numbers. + */ + + for (i = 0; i < NUNITS; i++) { + unitaddr = dev->base_addr + i; + + /* + Get the subchannel number. + */ + + if ((irq = ccp->cunits[i].irq = get_irq_by_devno(unitaddr)) == -1) { + CPrintk(0, "c7000: c7000_init: can not get subchannel for unit 0x%x\n", unitaddr); + return(-ENODEV); + } + + /* + Get control of the subchannel. + */ + + if (request_irq(irq, c7000_intr, SA_INTERRUPT, dev->name, &ccp->cunits[i].devstat) != 0) { + CPrintk(0, "c7000: c7000_init: can not get control of subchannel 0x%x for unit 0x%x\n", irq, unitaddr); + return(-EBUSY); + } + + CPrintk(1, "c7000: c7000_init: obtained control of subchannel 0x%x for unit 0x%x\n", irq, unitaddr); + ccp->cunits[i].devno = unitaddr; + ccp->cunits[i].IO_active = 0; + ccp->cunits[i].state = C7000_INIT; + ccp->cunits[i].cntlp = ccp; + CPrintk(1, "c7000: c7000_init: initialized unit 0x%x on subchannel 0x%x\n", unitaddr, irq); + } + + return(0); +} + +/* + Probe for the Cisco 7000 unit base addresses. +*/ + +static void +c7000_probe(void) +{ + s390_dev_info_t d; + int i; + int j; + int idx; + + /* + Probe for up to MAX_C7000 devices. + Get the first irq into variable idx. + */ + + idx = get_irq_first(); + + for (j = 0; j < MAX_C7000; j++) { + + if (idx < 0) + break; + + /* + Continue scanning the irq's. Variable idx + maintains the location from the prior scan. + */ + + for (i = idx; i >= 0; i = get_irq_next(i)) { + + /* + Ignore invalid irq's. + */ + + if (get_dev_info_by_irq(i, &d) < 0) + continue; + + /* + A Cisco 7000 is defined as a 3088 model + type 0x61. + */ + + if (d.sid_data.cu_type == C7000_CU_TYPE && + d.sid_data.cu_model == C7000_CU_MODEL) { + CPrintk(0, "c7000_probe: unit probe found 0x%x\n", d.devno); + bases[j] = d.devno; + + /* + Skip the write irq and setup idx + to probe for the next box. + */ + + idx = get_irq_next(i + 1); + break; + } + + } + + } + + return; +} + +/* + Module loading. Register each C7000 interface found via probing + or insmod command parameters. +*/ + +int +init_module(void) +{ + int result; + int i; + + for (i = 0 ; i < MAX_C7000; i++) + bases[i] = -1; + + /* + Perform automatic detection provided it has not been disabled + by the noauto parameter. + */ + + if (noauto == 0) + c7000_probe(); + + /* + Populate bases array from the module basex parameters replacing + what probing found above. + */ + + if (base0 != -1) + bases[0] = base0; + + if (base1 != -1) + bases[1] = base1; + + if (base2 != -1) + bases[2] = base2; + + if (base3 != -1) + bases[3] = base3; + + for (i = 0; i < MAX_C7000; i++) { + + if (bases[i] == -1) + continue; + + /* + Initialize the device structure. + */ + + memset(&c7000_devices[i], '\0', sizeof(STRUCT_NET_DEVICE)); +#ifdef NEWSTUFF + strcpy(c7000_devices[i].name, ifnames[i]); +#else + c7000_devices[i].name = &ifnames[i][0]; +#endif + c7000_devices[i].base_addr = bases[i]; + c7000_devices[i].init = c7000_init; + + /* + Register the device. This creates the interface + such as ci0. + */ + + if ((result = register_netdev(&c7000_devices[i])) != 0) { + CPrintk(0, "c7000: init_module: error %d registering base unit 0x%x\n", + result, bases[i]); + c7000_devices[i].base_addr = -1; + } else { + CPrintk(1, "c7000: init_module: registered base unit 0x%x on interface %s\n", bases[i], ifnames[i]); + } + + } + + CPrintk(0, "c7000: init_module: module loaded\n"); + return(0); +} + +/* + Module unloading. Unregister the interface and free kernel + allocated memory. +*/ + +void +cleanup_module(void) +{ + int i; + int j; + struct c7000_controller *ccp; + + for (i = 0; i < MAX_C7000; i++) { + + if (bases[i] == -1) + continue; + + /* + If the device was registered, it must be unregistered + prior to unloading the module. + */ + + if (c7000_devices[i].base_addr != -1) { + + ccp = (struct c7000_controller *) c7000_devices[i].priv; + + if (ccp != NULL) { + + for (j = 0; j < NUNITS ; j++) { + CPrintk(1, "c7000: clean_module: free subchannel 0x%x for unit 0x%x\n", ccp->cunits[j].irq, ccp->cunits[j].devno); + free_irq(ccp->cunits[j].irq, &ccp->cunits[j].devstat); + } + + CPrintk(1, "c7000: clean_module: free a c7000_controller structure at address 0x%x\n", (int)ccp); + kfree(ccp); + } + + unregister_netdev(&c7000_devices[i]); + } + + bases[i] = -1; + } + + CPrintk(0, "c7000: clean_module: module unloaded\n"); + return; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/net/ctcmain.c linux.21rc5-ac2/drivers/s390/net/ctcmain.c --- linux.21rc5/drivers/s390/net/ctcmain.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/net/ctcmain.c 2003-04-25 13:51:49.000000000 +0100 @@ -1,5 +1,5 @@ /* - * $Id: ctcmain.c,v 1.55 2001/12/03 14:28:45 felfert Exp $ + * $Id: ctcmain.c,v 1.57 2002/10/23 18:51:01 felfert Exp $ * * CTC / ESCON network driver * @@ -35,7 +35,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * - * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.55 $ + * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.57 $ * */ @@ -301,15 +301,23 @@ ctc_profile prof; unsigned char *trans_skb_data; + + __u16 logflags; } channel; #define CHANNEL_FLAGS_READ 0 #define CHANNEL_FLAGS_WRITE 1 #define CHANNEL_FLAGS_INUSE 2 #define CHANNEL_FLAGS_BUFSIZE_CHANGED 4 +#define CHANNEL_FLAGS_FAILED 8 #define CHANNEL_FLAGS_RWMASK 1 #define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK) +#define LOG_FLAG_ILLEGALPKT 1 +#define LOG_FLAG_ILLEGALSIZE 2 +#define LOG_FLAG_OVERRUN 4 +#define LOG_FLAG_NOMEM 8 + /** * Linked list of all detected channels. */ @@ -337,6 +345,11 @@ struct proc_dir_entry *proc_stat_entry; struct proc_dir_entry *proc_ctrl_entry; int proc_registered; + + /** + * Timer for restarting after I/O Errors + */ + fsm_timer restart_timer; } ctc_priv; /** @@ -387,7 +400,7 @@ */ static void print_banner(void) { static int printed = 0; - char vbuf[] = "$Revision: 1.55 $"; + char vbuf[] = "$Revision: 1.57 $"; char *version = vbuf; if (printed) @@ -517,6 +530,7 @@ DEV_EVENT_TXUP, DEV_EVENT_RXDOWN, DEV_EVENT_TXDOWN, + DEV_EVENT_RESTART, /** * MUST be always the last element!! */ @@ -530,6 +544,7 @@ "TX up", "RX down", "TX down", + "Restart", }; /** @@ -745,16 +760,24 @@ skb_pull(pskb, LL_HEADER_LENGTH); if ((ch->protocol == CTC_PROTO_S390) && (header->type != ETH_P_IP)) { - /** - * Check packet type only if we stick strictly - * to S/390's protocol of OS390. This only - * supports IP. Otherwise allow any packet - * type. - */ - printk(KERN_WARNING - "%s Illegal packet type 0x%04x " - "received, dropping\n", - dev->name, header->type); +#ifndef DEBUG + if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) { +#endif + /** + * Check packet type only if we stick strictly + * to S/390's protocol of OS390. This only + * supports IP. Otherwise allow any packet + * type. + */ + printk(KERN_WARNING + "%s Illegal packet type 0x%04x " + "received, dropping\n", + dev->name, header->type); + ch->logflags |= LOG_FLAG_ILLEGALPKT; +#ifndef DEBUG + } +#endif + #ifdef DEBUG ctc_dump_skb(pskb, -6); #endif @@ -767,11 +790,18 @@ if ((header->length == 0) || (header->length > skb_tailroom(pskb)) || (header->length > len)) { - printk(KERN_WARNING - "%s Illegal packet size %d " - "received (MTU=%d blocklen=%d), " - "dropping\n", dev->name, header->length, - dev->mtu, len); +#ifndef DEBUG + if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { +#endif + printk(KERN_WARNING + "%s Illegal packet size %d " + "received (MTU=%d blocklen=%d), " + "dropping\n", dev->name, header->length, + dev->mtu, len); + ch->logflags |= LOG_FLAG_ILLEGALSIZE; +#ifndef DEBUG + } +#endif #ifdef DEBUG ctc_dump_skb(pskb, -6); #endif @@ -780,10 +810,17 @@ return; } if (header->length > skb_tailroom(pskb)) { - printk(KERN_WARNING - "%s Illegal packet size %d " - "(beyond the end of received data), " - "dropping\n", dev->name, header->length); +#ifndef DEBUG + if (!(ch->logflags & LOG_FLAG_OVERRUN)) { +#endif + printk(KERN_WARNING + "%s Illegal packet size %d " + "(beyond the end of received data), " + "dropping\n", dev->name, header->length); + ch->logflags |= LOG_FLAG_OVERRUN; +#ifndef DEBUG + } +#endif #ifdef DEBUG ctc_dump_skb(pskb, -6); #endif @@ -796,9 +833,16 @@ len -= (LL_HEADER_LENGTH + header->length); skb = dev_alloc_skb(pskb->len); if (!skb) { - printk(KERN_WARNING - "%s Out of memory in ctc_unpack_skb\n", - dev->name); +#ifndef DEBUG + if (!(ch->logflags & LOG_FLAG_NOMEM)) { +#endif + printk(KERN_WARNING + "%s Out of memory in ctc_unpack_skb\n", + dev->name); + ch->logflags |= LOG_FLAG_OVERRUN; +#ifndef DEBUG + } +#endif privptr->stats.rx_dropped++; return; } @@ -811,6 +855,10 @@ ctc_tty_netif_rx(skb); else netif_rx(skb); + /** + * Successful rx; reset logflags + */ + ch->logflags = 0; privptr->stats.rx_packets++; privptr->stats.rx_bytes += skb->len; if (len > 0) { @@ -1110,14 +1158,14 @@ fsm_deltimer(&ch->timer); if (len < 8) { - printk(KERN_WARNING "%s: got packet with length %d < 8\n", + printk(KERN_DEBUG "%s: got packet with length %d < 8\n", dev->name, len); privptr->stats.rx_dropped++; privptr->stats.rx_length_errors++; goto again; } if (len > ch->max_bufsize) { - printk(KERN_WARNING "%s: got packet with length %d > %d\n", + printk(KERN_DEBUG "%s: got packet with length %d > %d\n", dev->name, len, ch->max_bufsize); privptr->stats.rx_dropped++; privptr->stats.rx_length_errors++; @@ -1137,7 +1185,7 @@ break; } if ((len < block_len) || (len > check_len)) { - printk(KERN_WARNING "%s: got block length %d != rx length %d\n", + printk(KERN_DEBUG "%s: got block length %d != rx length %d\n", dev->name, block_len, len); #ifdef DEBUG ctc_dump_skb(skb, 0); @@ -1756,6 +1804,16 @@ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_TXDOWN, dev); } } + +static void ch_action_reinit(fsm_instance *fi, int event, void *arg) +{ + channel *ch = (channel *)arg; + net_device *dev = ch->netdev; + ctc_priv *privptr = dev->priv; + + ch_action_iofatal(fi, event, arg); + fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev); +} /** * The statemachine for a channel. @@ -1777,7 +1835,7 @@ { CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode }, { CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr }, { CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal }, - { CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_iofatal }, + { CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit }, { CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail }, { CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio }, @@ -1792,7 +1850,7 @@ { CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr }, { CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode }, { CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal }, - { CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_iofatal }, + { CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit }, { CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail }, { CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio }, @@ -1803,7 +1861,7 @@ { CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr }, { CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail }, { CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal }, - { CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_iofatal }, + { CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit }, { CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio }, { CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail }, @@ -1813,7 +1871,7 @@ { CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc }, // { CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry }, { CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal }, - { CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_iofatal }, + { CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit }, { CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail }, { CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx }, @@ -1824,7 +1882,7 @@ { CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr }, { CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr }, { CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal }, - { CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_iofatal }, + { CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit }, { CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail }, { CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio }, @@ -1833,7 +1891,7 @@ { CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop }, { CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop }, { CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal }, - { CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_iofatal }, + { CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit }, { CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail }, { CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop }, @@ -1857,7 +1915,7 @@ { CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry }, { CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry }, { CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal }, - { CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_iofatal }, + { CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit }, { CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail }, { CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio }, @@ -2274,6 +2332,7 @@ ctc_priv *privptr = dev->priv; int direction; + fsm_deltimer(&privptr->restart_timer); fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); for (direction = READ; direction <= WRITE; direction++) { channel *ch = privptr->channel[direction]; @@ -2301,6 +2360,18 @@ } } +static void dev_action_restart(fsm_instance *fi, int event, void *arg) +{ + net_device *dev = (net_device *)arg; + ctc_priv *privptr = dev->priv; + + printk(KERN_DEBUG "%s: Restarting\n", dev->name); + dev_action_stop(fi, event, arg); + fsm_event(privptr->fsm, DEV_EVENT_STOP, dev); + fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC, + DEV_EVENT_START, dev); +} + /** * Called from channel statemachine * when a channel is up and running. @@ -2402,43 +2473,50 @@ } static const fsm_node dev_fsm[] = { - { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, - { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, - { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, - { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, - - { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, - { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, - { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, - { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, - - { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, - { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, - { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, - { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, - - { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, - { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, - { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, - { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, - { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, - - { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, - { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, - { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, - { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, - - { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, - { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, - { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, - { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, - - { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, - { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, - { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, - { DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop }, - { DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, + + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, + + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, + + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, + + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, + + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, + + { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop }, + { DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop }, + { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, }; static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); @@ -3042,7 +3120,7 @@ proc_net_unregister(ctc_dir.low_ino); #endif } -#endif MODULE +#endif /* MODULE */ /** * Create a device specific subdirectory in /proc/net/ctc/ with the @@ -3501,6 +3579,7 @@ return NULL; } fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); + fsm_settimer(privptr->fsm, &privptr->restart_timer); dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; dev->hard_start_xmit = ctc_tx; dev->open = ctc_open; @@ -3552,6 +3631,7 @@ case chandev_status_gone: for (direction = READ; direction <= WRITE; direction++) { channel *ch = privptr->channel[direction]; + ch->flags |= CHANNEL_FLAGS_FAILED; fsm_event(ch->fsm, CH_EVENT_MC_FAIL, ch); } printk(KERN_WARNING @@ -3560,6 +3640,12 @@ case chandev_status_all_chans_good: for (direction = READ; direction <= WRITE; direction++) { channel *ch = privptr->channel[direction]; + if (!(ch->flags & CHANNEL_FLAGS_FAILED)) + fsm_event(ch->fsm, CH_EVENT_MC_FAIL, ch); + } + for (direction = READ; direction <= WRITE; direction++) { + channel *ch = privptr->channel[direction]; + ch->flags &= ~CHANNEL_FLAGS_FAILED; fsm_event(ch->fsm, CH_EVENT_MC_GOOD, ch); } printk(KERN_WARNING @@ -3857,7 +3943,7 @@ } #define ctc_init init_module -#endif MODULE +#endif /* MODULE */ /** * Initialize module. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/net/ctctty.c linux.21rc5-ac2/drivers/s390/net/ctctty.c --- linux.21rc5/drivers/s390/net/ctctty.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/net/ctctty.c 2003-04-25 13:51:49.000000000 +0100 @@ -1,5 +1,5 @@ /* - * $Id: ctctty.c,v 1.8 2001/05/16 16:28:31 felfert Exp $ + * $Id: ctctty.c,v 1.10 2002/06/05 14:24:45 felfert Exp $ * * CTC / ESCON network driver, tty interface. * @@ -88,6 +88,7 @@ struct semaphore write_sem; struct tq_struct tq; struct timer_list stoptimer; + struct timer_list flowtimer; } ctc_tty_info; /* Description of one CTC-tty */ @@ -114,7 +115,7 @@ static char *ctc_ttyname = CTC_TTY_NAME; #endif -char *ctc_tty_revision = "$Revision: 1.8 $"; +char *ctc_tty_revision = "$Revision: 1.10 $"; static __u32 ctc_tty_magic = CTC_ASYNC_MAGIC; static int ctc_tty_shuttingdown = 0; @@ -163,35 +164,44 @@ static int ctc_tty_readmodem(ctc_tty_info *info) { - int ret = 1; + int c; struct tty_struct *tty; + struct sk_buff *skb; - if ((tty = info->tty)) { - if (info->mcr & UART_MCR_RTS) { - int c = TTY_FLIPBUF_SIZE - tty->flip.count; - struct sk_buff *skb; - - if ((c > 0) && (skb = skb_dequeue(&info->rx_queue))) { - int len = skb->len; - if (len > c) - len = c; - memcpy(tty->flip.char_buf_ptr, skb->data, len); - skb_pull(skb, len); - memset(tty->flip.flag_buf_ptr, 0, len); - tty->flip.count += len; - tty->flip.char_buf_ptr += len; - tty->flip.flag_buf_ptr += len; - tty_flip_buffer_push(tty); - if (skb->len > 0) - skb_queue_head(&info->rx_queue, skb); - else { - kfree_skb(skb); - ret = skb_queue_len(&info->rx_queue); - } - } + if (!(tty = info->tty)) + return 0; + + /* If the upper layer is flow blocked or just + * has no room for data we schedule a timer to + * try again later - wilder + */ + c = TTY_FLIPBUF_SIZE - tty->flip.count; + if ( !(info->mcr & UART_MCR_RTS) || (c <= 0) ) { + /* can't do any work now, wake up later */ + mod_timer(&info->flowtimer, jiffies+(HZ/100) ); + return 0; + } + + if ((skb = skb_dequeue(&info->rx_queue))) { + int len = skb->len; + if (len > c) + len = c; + memcpy(tty->flip.char_buf_ptr, skb->data, len); + skb_pull(skb, len); + memset(tty->flip.flag_buf_ptr, 0, len); + tty->flip.count += len; + tty->flip.char_buf_ptr += len; + tty->flip.flag_buf_ptr += len; + tty_flip_buffer_push(tty); + + if (skb->len > 0){ + skb_queue_head(&info->rx_queue, skb); + }else { + kfree_skb(skb); } } - return ret; + + return skb_queue_len(&info->rx_queue); } void @@ -234,6 +244,11 @@ dev_kfree_skb(skb); return; } + if ( !(info->tty) ) { + dev_kfree_skb(skb); + return; + } + if (skb->len < 6) { dev_kfree_skb(skb); return; @@ -270,7 +285,12 @@ /* Direct deliver failed or queue wasn't empty. * Queue up for later dequeueing via timer-irq. */ - skb_queue_tail(&info->rx_queue, skb); + if (skb_queue_len(&info->rx_queue) < 50) + skb_queue_tail(&info->rx_queue, skb); + else { + kfree_skb(skb); + printk(KERN_DEBUG "ctctty: RX overrun\n"); + } /* Schedule dequeuing */ queue_task(&info->tq, &tq_immediate); mark_bh(IMMEDIATE_BH); @@ -330,6 +350,13 @@ skb_queue_head(&info->tx_queue, skb); else kfree_skb(skb); + + /* The connection is not up yet, try again in one second. - wilder */ + if ( rc == -EBUSY ){ + mod_timer(&info->flowtimer, jiffies+(HZ) ); + return 0; + } + } else { struct tty_struct *tty = info->tty; @@ -484,6 +511,18 @@ info->flags &= ~CTC_ASYNC_NETDEV_OPEN; } +/* Run from the timer queue when we are flow blocked + * to kick start the bottom half - wilder */ +static void +ctc_tty_startupbh(unsigned long data) +{ + ctc_tty_info *info = (ctc_tty_info *)data; + if (( !ctc_tty_shuttingdown) && info) { + queue_task(&info->tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); + } +} + /* * This routine will shutdown a serial port; interrupts are disabled, and * DTR is dropped if the hangup on close termio flag is on. @@ -577,6 +616,12 @@ if (ctc_tty_paranoia_check(info, tty->device, "ctc_tty_write_room")) return 0; + +/* wilder */ + if (skb_queue_len(&info->tx_queue) > 10 ) { + return 0; + } + return CTC_TTY_XMIT_SIZE; } @@ -1066,7 +1111,7 @@ #endif return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always @@ -1262,6 +1307,9 @@ init_timer(&info->stoptimer); info->stoptimer.function = ctc_tty_stopdev; info->stoptimer.data = (unsigned long)info; + init_timer(&info->flowtimer); + info->flowtimer.function = ctc_tty_startupbh; + info->flowtimer.data = (unsigned long)info; info->mcr = UART_MCR_RTS; } return 0; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/net/fsm.h linux.21rc5-ac2/drivers/s390/net/fsm.h --- linux.21rc5/drivers/s390/net/fsm.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/net/fsm.h 2003-04-25 13:51:49.000000000 +0100 @@ -1,4 +1,4 @@ -/* $Id: fsm.h,v 1.4 2001/09/24 10:38:02 mschwide Exp $ +/* $Id: fsm.h,v 1.5 2002/08/05 09:07:56 heicarst Exp $ */ #ifndef _FSM_H_ #define _FSM_H_ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/net/iucv.c linux.21rc5-ac2/drivers/s390/net/iucv.c --- linux.21rc5/drivers/s390/net/iucv.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/net/iucv.c 2003-04-25 13:52:30.000000000 +0100 @@ -1,5 +1,5 @@ /* - * $Id: iucv.c,v 1.32 2002/02/12 21:52:20 felfert Exp $ + * $Id: iucv.c,v 1.39 2003/02/14 15:45:15 felfert Exp $ * * IUCV network driver * @@ -29,7 +29,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * - * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.32 $ + * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.39 $ * */ @@ -302,7 +302,7 @@ if (debuglevel < 3) return; - printk(KERN_DEBUG __FUNCTION__ ": %s\n", title); + printk(KERN_DEBUG "%s: %s\n", __FUNCTION__, title); printk(" "); for (i = 0; i < len; i++) { if (!(i % 16) && i != 0) @@ -318,7 +318,7 @@ #define iucv_debug(lvl, fmt, args...) \ do { \ if (debuglevel >= lvl) \ - printk(KERN_DEBUG __FUNCTION__ ": " fmt "\n" , ## args); \ + printk(KERN_DEBUG __FUNCTION__ ": " fmt "\n", ## args); \ } while (0) #else @@ -338,7 +338,7 @@ static void iucv_banner(void) { - char vbuf[] = "$Revision: 1.32 $"; + char vbuf[] = "$Revision: 1.39 $"; char *version = vbuf; if ((version = strchr(version, ':'))) { @@ -384,6 +384,14 @@ return -ENOMEM; } memset(iucv_param_pool, 0, sizeof(iucv_param) * PARAM_POOL_SIZE); +#if 0 + /* Show parameter pool on startup */ + { + int i; + for (i = 0; i < PARAM_POOL_SIZE; i++) + printk("iparm[%d] at %p\n", i, &iucv_param_pool[i]); + } +#endif /* Initialize task queue */ INIT_LIST_HEAD(&iucv_tq.list); @@ -429,7 +437,7 @@ grab_param(void) { iucv_param *ret; - static int i = 0; + int i = 0; while (atomic_compare_and_swap(0, 1, &iucv_param_pool[i].in_use)) { i++; @@ -1047,6 +1055,7 @@ iucv_handle_t handle, void *pgm_data) { iparml_control *parm; + iparml_control local_parm; struct list_head *lh; ulong b2f0_result = 0; ulong flags; @@ -1103,24 +1112,48 @@ EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget)); } + /* In order to establish an IUCV connection, the procedure is: + * + * b2f0(CONNECT) + * take the ippathid from the b2f0 call + * register the handler to the ippathid + * + * Unfortunately, the ConnectionEstablished message gets sent after the + * b2f0(CONNECT) call but before the register is handled. + * + * In order for this race condition to be eliminated, the IUCV Control + * Interrupts must be disabled for the above procedure. + * + * David Kennedy + */ + + /* Enable everything but IUCV Control messages */ + iucv_setmask(~(IUCVControlInterruptsFlag)); + parm->ipflags1 = (__u8)flags1; b2f0_result = b2f0(CONNECT, parm); + memcpy(&local_parm, parm, sizeof(local_parm)); + release_param(parm); + parm = &local_parm; if (b2f0_result) { - release_param(parm); + iucv_setmask(~0); return b2f0_result; } add_pathid_result = iucv_add_pathid(parm->ippathid, h); *pathid = parm->ippathid; + /* Enable everything again */ + iucv_setmask(~0); + if (msglim) *msglim = parm->ipmsglim; if (flags1_out) *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0; if (add_pathid_result) { - iucv_sever(parm->ippathid, no_memory); + iucv_sever(*pathid, no_memory); printk(KERN_WARNING "%s: add_pathid failed with rc =" " %d\n", __FUNCTION__, add_pathid_result); return(add_pathid_result); @@ -1736,6 +1769,7 @@ { iparml_db *parm; ulong b2f0_result; + iucv_param save_param; iucv_debug(2, "entering"); @@ -1752,7 +1786,13 @@ parm->ipmsgtag = msgtag; parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */ + memcpy((void *)&save_param, (void *)parm, sizeof(iucv_param)); b2f0_result = b2f0(SEND, parm); + if (b2f0_result != 0) { + printk("b2f0 call returned %lx\n", b2f0_result); + iucv_dumpit("PL before:", &save_param, sizeof(iucv_param)); + iucv_dumpit("PL after:", parm, sizeof(iucv_param)); + } if ((b2f0_result == 0) && (msgid)) *msgid = parm->ipmsgid; @@ -2103,6 +2143,24 @@ return b2f0_result; } +void +iucv_setmask_cpu0 (void *result) +{ + iparml_set_mask *parm; + + if (smp_processor_id() != 0) + return; + + iucv_debug(1, "entering"); + parm = (iparml_set_mask *)grab_param(); + parm->ipmask = *((__u8*)result); + *((ulong *)result) = b2f0(SETMASK, parm); + release_param(parm); + + iucv_debug(1, "b2f0_result = %ld", *((ulong *)result)); + iucv_debug(1, "exiting"); +} + /* * Name: iucv_setmask * Purpose: This function enables or disables the following IUCV @@ -2113,28 +2171,25 @@ * 0x40 - Priority_MessagePendingInterruptsFlag * 0x20 - Nonpriority_MessageCompletionInterruptsFlag * 0x10 - Priority_MessageCompletionInterruptsFlag + * 0x08 - IUCVControlInterruptsFlag * Output: NA * Return: b2f0_result - return code from CP */ int iucv_setmask (int SetMaskFlag) { - iparml_set_mask *parm; - ulong b2f0_result = 0; - - iucv_debug(1, "entering"); - - parm = (iparml_set_mask *)grab_param(); - - parm->ipmask = (__u8)SetMaskFlag; - - b2f0_result = b2f0(SETMASK, parm); - release_param(parm); + union { + ulong result; + __u8 param; + } u; - iucv_debug(1, "b2f0_result = %ld", b2f0_result); - iucv_debug(1, "exiting"); + u.param = SetMaskFlag; + if (smp_processor_id() == 0) + iucv_setmask_cpu0(&u); + else + smp_call_function(iucv_setmask_cpu0, &u, 0, 1); - return b2f0_result; + return u.result; } /** @@ -2300,9 +2355,11 @@ case 0x02: /*connection complete */ if (h) { if (interrupt->ConnectionComplete) + { interrupt->ConnectionComplete( (iucv_ConnectionComplete *)int_buf, h->pgm_data); + } else iucv_debug(1, "ConnectionComplete not called"); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/net/iucv.h linux.21rc5-ac2/drivers/s390/net/iucv.h --- linux.21rc5/drivers/s390/net/iucv.h 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/net/iucv.h 2003-04-25 13:52:30.000000000 +0100 @@ -62,6 +62,8 @@ #define Priority_MessagePendingInterruptsFlag 0x40 #define Nonpriority_MessageCompletionInterruptsFlag 0x20 #define Priority_MessageCompletionInterruptsFlag 0x10 +#define IUCVControlInterruptsFlag 0x08 + /* * Mapping of external interrupt buffers should be used with the corresponding * interrupt types. @@ -735,6 +737,7 @@ * 0x40 - Priority_MessagePendingInterruptsFlag * 0x20 - Nonpriority_MessageCompletionInterruptsFlag * 0x10 - Priority_MessageCompletionInterruptsFlag + * 0x08 - IUCVControlInterruptsFlag * Output: NA * Return: Return code from CP IUCV call. */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/net/Makefile linux.21rc5-ac2/drivers/s390/net/Makefile --- linux.21rc5/drivers/s390/net/Makefile 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/net/Makefile 2003-04-22 16:44:37.000000000 +0100 @@ -12,6 +12,7 @@ obj-$(CONFIG_IUCV) += iucv.o fsm.o obj-$(CONFIG_CTC) += ctc.o fsm.o obj-$(CONFIG_IUCV) += netiucv.o +obj-$(CONFIG_C7000) += c7000.o include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/net/netiucv.c linux.21rc5-ac2/drivers/s390/net/netiucv.c --- linux.21rc5/drivers/s390/net/netiucv.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/net/netiucv.c 2003-04-25 13:52:30.000000000 +0100 @@ -1,5 +1,5 @@ /* - * $Id: netiucv.c,v 1.17 2002/02/12 21:52:20 felfert Exp $ + * $Id: netiucv.c,v 1.21 2002/12/09 18:40:44 mschwide Exp $ * * IUCV network driver * @@ -28,7 +28,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * - * RELEASE-TAG: IUCV network driver $Revision: 1.17 $ + * RELEASE-TAG: IUCV network driver $Revision: 1.21 $ * */ @@ -96,6 +96,8 @@ unsigned long txlen; unsigned long tx_time; struct timeval send_stamp; + unsigned long tx_pending; + unsigned long tx_max_pending; } connection_profile; /** @@ -108,6 +110,7 @@ struct sk_buff *rx_buff; struct sk_buff *tx_buff; struct sk_buff_head collect_queue; + struct sk_buff_head commit_queue; spinlock_t collect_lock; int collect_len; int max_buffsize; @@ -443,6 +446,10 @@ iucv_connection *conn = (iucv_connection *)pgm_data; iucv_event ev; +#ifdef DEBUG + printk(KERN_DEBUG "%s() called\n", __FUNCTION__); +#endif + ev.conn = conn; ev.data = (void *)eib; fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev); @@ -619,10 +626,11 @@ iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data; netiucv_priv *privptr = NULL; /* Shut up, gcc! skb is always below 2G. */ - struct sk_buff *skb = (struct sk_buff *)(unsigned long)eib->ipmsgtag; + __u32 single_flag = eib->ipmsgtag; __u32 txbytes = 0; __u32 txpackets = 0; __u32 stat_maxcq = 0; + struct sk_buff *skb; unsigned long saveflags; ll_header header; @@ -632,13 +640,17 @@ fsm_deltimer(&conn->timer); if (conn && conn->netdev && conn->netdev->priv) privptr = (netiucv_priv *)conn->netdev->priv; - if (skb) { + conn->prof.tx_pending--; + if (single_flag) { + if ((skb = skb_dequeue(&conn->commit_queue))) { + atomic_dec(&skb->users); + dev_kfree_skb_any(skb); + } if (privptr) { privptr->stats.tx_packets++; privptr->stats.tx_bytes += (skb->len - NETIUCV_HDRLEN - NETIUCV_HDRLEN); } - dev_kfree_skb_any(skb); } conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head; conn->tx_buff->len = 0; @@ -672,11 +684,17 @@ conn->tx_buff->data, conn->tx_buff->len); conn->prof.doios_multi++; conn->prof.txlen += conn->tx_buff->len; + conn->prof.tx_pending++; + if (conn->prof.tx_pending > conn->prof.tx_max_pending) + conn->prof.tx_max_pending = conn->prof.tx_pending; if (rc != 0) { fsm_deltimer(&conn->timer); + conn->prof.tx_pending--; fsm_newstate(fi, CONN_STATE_IDLE); if (privptr) privptr->stats.tx_errors += txpackets; + printk(KERN_DEBUG "iucv_send returned %08x\n", + rc); } else { if (privptr) { privptr->stats.tx_packets += txpackets; @@ -807,10 +825,15 @@ printk(KERN_DEBUG "%s('%s'): connecting ...\n", conn->netdev->name, conn->userid); #endif + + /* We must set the state before calling iucv_connect because the callback + * handler could be called at any point after the connection request is + * sent. */ + + fsm_newstate(fi, CONN_STATE_SETUPWAIT); rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic, conn->userid, iucv_host, 0, NULL, NULL, conn->handle, conn); - fsm_newstate(fi, CONN_STATE_SETUPWAIT); switch (rc) { case 0: return; @@ -885,6 +908,7 @@ if (conn->handle) iucv_unregister_program(conn->handle); conn->handle = 0; + netiucv_purge_skb_queue(&conn->commit_queue); fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); } @@ -928,6 +952,7 @@ { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx }, { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone }, + { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone }, }; static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); @@ -1130,14 +1155,21 @@ CONN_EVENT_TIMER, conn); conn->prof.send_stamp = xtime; - rc = iucv_send(conn->pathid, NULL, 0, 0, - /* Shut up, gcc! nskb is always below 2G. */ - (__u32)(((unsigned long)nskb)&0xffffffff), 0, - nskb->data, nskb->len); + rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */, + 0, nskb->data, nskb->len); conn->prof.doios_single++; conn->prof.txlen += skb->len; + conn->prof.tx_pending++; + if (conn->prof.tx_pending > conn->prof.tx_max_pending) + conn->prof.tx_max_pending = conn->prof.tx_pending; if (rc != 0) { + netiucv_priv *privptr; fsm_deltimer(&conn->timer); + fsm_newstate(conn->fsm, CONN_STATE_IDLE); + conn->prof.tx_pending--; + privptr = (netiucv_priv *)conn->netdev->priv; + if (privptr) + privptr->stats.tx_errors++; if (copied) dev_kfree_skb(nskb); else { @@ -1148,9 +1180,13 @@ skb_pull(skb, NETIUCV_HDRLEN); skb_trim(skb, skb->len - NETIUCV_HDRLEN); } + printk(KERN_DEBUG "iucv_send returned %08x\n", + rc); } else { if (copied) dev_kfree_skb(skb); + atomic_inc(&nskb->users); + skb_queue_tail(&conn->commit_queue, nskb); } } @@ -1549,12 +1585,7 @@ if (!(dev = find_netdev_by_ino(ino))) return -ENODEV; privptr = (netiucv_priv *)dev->priv; - privptr->conn->prof.maxmulti = 0; - privptr->conn->prof.maxcqueue = 0; - privptr->conn->prof.doios_single = 0; - privptr->conn->prof.doios_multi = 0; - privptr->conn->prof.txlen = 0; - privptr->conn->prof.tx_time = 0; + memset(&(privptr->conn->prof), 0, sizeof(privptr->conn->prof)); return count; } @@ -1593,6 +1624,10 @@ privptr->conn->prof.txlen); p += sprintf(p, "Max. TX IO-time: %ld\n", privptr->conn->prof.tx_time); + p += sprintf(p, "Pending transmits: %ld\n", + privptr->conn->prof.tx_pending); + p += sprintf(p, "Max. pending transmits: %ld\n", + privptr->conn->prof.tx_max_pending); } l = strlen(sbuf); p = sbuf; @@ -1833,6 +1868,7 @@ if (conn) { memset(conn, 0, sizeof(iucv_connection)); skb_queue_head_init(&conn->collect_queue); + skb_queue_head_init(&conn->commit_queue); conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; conn->netdev = dev; @@ -2005,7 +2041,7 @@ static void netiucv_banner(void) { - char vbuf[] = "$Revision: 1.17 $"; + char vbuf[] = "$Revision: 1.21 $"; char *version = vbuf; if ((version = strchr(version, ':'))) { @@ -2066,7 +2102,7 @@ netiucv_proc_create_main(); while (p) { - if (isalnum(*p)) { + if (isalnum(*p) || (*p == '$')) { username[i++] = *p++; username[i] = '\0'; if (i > 8) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/s390io.c linux.21rc5-ac2/drivers/s390/s390io.c --- linux.21rc5/drivers/s390/s390io.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/s390io.c 2003-05-28 15:13:38.000000000 +0100 @@ -1,7 +1,7 @@ /* * drivers/s390/s390io.c * S/390 common I/O routines - * $Revision: 1.171.2.21 $ + * $Revision: 1.246 $ * * S390 version * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH, @@ -116,6 +116,11 @@ ##args); \ } while (0) +#define CIO_HEX_EVENT(imp, args...) do { \ + if (cio_debug_initialized) \ + debug_event(cio_debug_trace_id, imp, ##args); \ + } while (0) + #undef CONFIG_DEBUG_IO #define CONFIG_DEBUG_CRW #define CONFIG_DEBUG_CHSC @@ -175,14 +180,13 @@ int s390_start_IO (int irq, ccw1_t * cpa, unsigned long user_intparm, __u8 lpm, unsigned long flag); -static int s390_send_nop(int irq, __u8 lpm); - #ifdef CONFIG_PROC_FS static int chan_proc_init (void); #endif static inline void do_adapter_IO (__u32 intparm); +static void s390_schedule_path_verification(unsigned long irq); int s390_DevicePathVerification (int irq, __u8 domask); int s390_register_adapter_interrupt (adapter_int_handler_t handler); int s390_unregister_adapter_interrupt (adapter_int_handler_t handler); @@ -251,12 +255,9 @@ if ((to && (from > to)) || (to<0) || (to > 0xffff) - || (from<0) || (from > 0xffff)) { - printk (KERN_WARNING - "Invalid blacklist range %x to %x, skipping\n", from, - to); + || (from<0) || (from > 0xffff)) return; - } + if (!locked) spin_lock_irqsave (&blacklist_lock, flags); @@ -285,14 +286,10 @@ long flags; int i; - if ((to<0) || (to > 0xffff) - || (from<0) || (from > 0xffff)) { - printk (KERN_WARNING - "Invalid blacklist range %x to %x, " - "not freeing\n", - from, to); + if ((to && (from > to)) + || (to<0) || (to > 0xffff) + || (from<0) || (from > 0xffff)) return; - } spin_lock_irqsave (&blacklist_lock, flags); @@ -623,7 +620,6 @@ /* End of blacklist handling */ void s390_displayhex (char *str, void *ptr, s32 cnt); -void s390_displayhex2 (char *str, void *ptr, s32 cnt, int level); void s390_displayhex (char *str, void *ptr, s32 cnt) @@ -644,26 +640,6 @@ } } -void -s390_displayhex2 (char *str, void *ptr, s32 cnt, int level) -{ - s32 cnt1, cnt2, maxcnt2; - u32 *currptr = (__u32 *) ptr; - char buffer[cnt * 12]; - - debug_text_event (cio_debug_msg_id, level, str); - - for (cnt1 = 0; cnt1 < cnt; cnt1 += 16) { - sprintf (buffer, "%08lX ", (unsigned long) currptr); - maxcnt2 = cnt - cnt1; - if (maxcnt2 > 16) - maxcnt2 = 16; - for (cnt2 = 0; cnt2 < maxcnt2; cnt2 += 4) - sprintf (buffer, "%08X ", *currptr++); - } - debug_text_event (cio_debug_msg_id, level, buffer); -} - static int __init cio_setup (char *parm) { @@ -815,93 +791,6 @@ return; } - -/* - * Function: s390_send_nop - * - * sends a nop CCW to the specified subchannel down the given path(s) - */ -static int -s390_send_nop(int irq, __u8 lpm) -{ - char dbf_txt[15]; - ccw1_t *nop_ccw; - devstat_t devstat; - devstat_t *pdevstat = &devstat; - unsigned long flags; - - int irq_ret = 0; - int inlreq = 0; - - SANITY_CHECK(irq); - - if (!ioinfo[irq]->ui.flags.oper) - /* no sense in trying */ - return -ENODEV; - - sprintf(dbf_txt, "snop%x", irq); - CIO_TRACE_EVENT(5, dbf_txt); - - if (!ioinfo[irq]->ui.flags.ready) { - /* - * If there's no handler, use our dummy handler. - */ - irq_ret = request_irq (irq, - init_IRQ_handler, - SA_PROBE, - "SNOP", - pdevstat); - if (!irq_ret) - inlreq = 1; - } else { - pdevstat = ioinfo[irq]->irq_desc.dev_id; - } - - if (irq_ret) - return irq_ret; - - s390irq_spin_lock_irqsave (irq, flags); - - if (init_IRQ_complete) - nop_ccw = kmalloc (sizeof (ccw1_t), GFP_DMA); - else - nop_ccw = alloc_bootmem_low (sizeof (ccw1_t)); - - nop_ccw->cmd_code = CCW_CMD_NOOP; - nop_ccw->cda = 0; - nop_ccw->count = 0; - nop_ccw->flags = CCW_FLAG_SLI; - - memset (pdevstat, '\0', sizeof (devstat_t)); - - irq_ret = s390_start_IO (irq, nop_ccw, 0xE2D5D6D7, lpm, - DOIO_WAIT_FOR_INTERRUPT - | DOIO_TIMEOUT - | DOIO_DONT_CALL_INTHDLR - | DOIO_VALID_LPM); - - if (irq_ret == -ETIMEDOUT) { - - /* better cancel... */ - cancel_IO(irq); - } - - if (init_IRQ_complete) - kfree (nop_ccw); - else - free_bootmem ((unsigned long) nop_ccw, sizeof (ccw1_t)); - - s390irq_spin_unlock_irqrestore (irq, flags); - - if (inlreq) - free_irq (irq, pdevstat); - - return irq_ret; - -} - - - /* * Note : internal use of irqflags SA_PROBE for NOT path grouping * @@ -930,7 +819,7 @@ if (ioinfo[irq]->st) return -ENODEV; - sprintf (dbf_txt, "reqs%x", irq); + sprintf (dbf_txt, "reqsp%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* @@ -1054,6 +943,7 @@ s390irq_spin_unlock_irqrestore (irq, flags); udelay (200000); /* 200 ms */ s390irq_spin_lock_irqsave (irq, flags); + ret = disable_subchannel (irq); if (ret == -EBUSY) { @@ -1130,7 +1020,7 @@ if (!ioinfo[irq]->ui.flags.ready) return -ENODEV; - sprintf (dbf_txt, "dirq%x", irq); + sprintf (dbf_txt, "disirq%x", irq); CIO_TRACE_EVENT (4, dbf_txt); s390irq_spin_lock_irqsave (irq, flags); @@ -1157,7 +1047,7 @@ if (!ioinfo[irq]->ui.flags.ready) return -ENODEV; - sprintf (dbf_txt, "eirq%x", irq); + sprintf (dbf_txt, "enirq%x", irq); CIO_TRACE_EVENT (4, dbf_txt); s390irq_spin_lock_irqsave (irq, flags); @@ -1183,7 +1073,7 @@ SANITY_CHECK (irq); - sprintf (dbf_txt, "esch%x", irq); + sprintf (dbf_txt, "ensch%x", irq); CIO_TRACE_EVENT (2, dbf_txt); /* @@ -1268,7 +1158,7 @@ SANITY_CHECK (irq); - sprintf (dbf_txt, "dsch%x", irq); + sprintf (dbf_txt, "dissch%x", irq); CIO_TRACE_EVENT (2, dbf_txt); if (ioinfo[irq]->ui.flags.busy) { @@ -1367,6 +1257,7 @@ { unsigned long flags; /* PSW flags */ long cr6 __attribute__ ((aligned (8))); + cpuid_t cpuid; asm volatile ("STCK %0":"=m" (irq_IPL_TOD)); @@ -1406,8 +1297,18 @@ global_pgid = (pgid_t *)alloc_bootmem(sizeof(pgid_t)); - global_pgid->cpu_addr = *(__u16 *) __LC_CPUADDR; - global_pgid->cpu_id = ((cpuid_t *) __LC_CPUID)->ident; + cpuid = *(cpuid_t*) __LC_CPUID; + + if (MACHINE_NEW_STIDP) + global_pgid->cpu_addr = 0x8000; + else { +#ifdef CONFIG_SMP + global_pgid->cpu_addr = hard_smp_processor_id(); +#else + global_pgid->cpu_addr = 0; +#endif + } + global_pgid->cpu_id = cpuid.ident; global_pgid->cpu_model = ((cpuid_t *) __LC_CPUID)->machine; global_pgid->tod_high = *(__u32 *) & irq_IPL_TOD; @@ -1445,7 +1346,6 @@ { /* flags */ int ccode; int ret = 0; - char buffer[80]; char dbf_txt[15]; SANITY_CHECK (irq); @@ -1512,7 +1412,14 @@ /* * Issue "Start subchannel" and process condition code */ - ccode = ssch (irq, &(ioinfo[irq]->orb)); + if (flag & DOIO_USE_DIAG98) { + ioinfo[irq]->orb.key = get_storage_key() >> 4; + ioinfo[irq]->orb.cpa = + (__u32) pfix_get_addr((void *)ioinfo[irq]->orb.cpa); + ccode = diag98 (irq, &(ioinfo[irq]->orb)); + } else { + ccode = ssch (irq, &(ioinfo[irq]->orb)); + } sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (4, dbf_txt); @@ -1674,6 +1581,16 @@ break; case 1: /* status pending */ + + /* + * Don't do an inline processing of pending interrupt conditions + * while doing async. I/O. The interrupt will pop up when we are + * enabled again and the I/O can be retried. + */ + if (!ioinfo[irq]->ui.flags.syncio) { + ret = -EBUSY; + break; + } ioinfo[irq]->devstat.flag = DEVSTAT_START_FUNCTION | DEVSTAT_STATUS_PENDING; @@ -1715,14 +1632,6 @@ * than the one used (i.e. path available mask is non-zero). */ if (ioinfo[irq]->devstat.ii.irb.scsw.cc == 3) { - if (flag & DOIO_VALID_LPM) { - ioinfo[irq]->opm &= - ~(ioinfo[irq]->devstat.ii.irb.esw.esw1. - lpum); - } else { - ioinfo[irq]->opm = 0; - - } if (ioinfo[irq]->opm == 0) { ret = -ENODEV; @@ -1736,6 +1645,7 @@ #ifdef CONFIG_DEBUG_IO { + char buffer[80]; stsch (irq, &(ioinfo[irq]->schib)); @@ -1777,39 +1687,23 @@ #endif if (cio_debug_initialized) { stsch (irq, &(ioinfo[irq]->schib)); - - sprintf (buffer, - "s390_start_IO(%04X) - irb for " - "device %04X, after status pending\n", - irq, ioinfo[irq]->devstat.devno); - - s390_displayhex2 (buffer, - &(ioinfo[irq]->devstat.ii. - irb), sizeof (irb_t), 2); - - sprintf (buffer, - "s390_start_IO(%04X) - schib for " - "device %04X, after status pending\n", - irq, ioinfo[irq]->devstat.devno); - - s390_displayhex2 (buffer, - &(ioinfo[irq]->schib), - sizeof (schib_t), 2); + + sprintf(dbf_txt, "sp%x", irq); + CIO_TRACE_EVENT(2, dbf_txt); + CIO_TRACE_EVENT(2, "irb:"); + CIO_HEX_EVENT(2, &(ioinfo[irq]->devstat.ii.irb), + sizeof (irb_t)); + CIO_TRACE_EVENT(2, "schib:"); + CIO_HEX_EVENT(2, &(ioinfo[irq]->schib), + sizeof (schib_t)); if (ioinfo[irq]->devstat. flag & DEVSTAT_FLAG_SENSE_AVAIL) { - sprintf (buffer, - "s390_start_IO(%04X) " - "- sense data for device %04X," - " after status pending\n", - irq, - ioinfo[irq]->devstat.devno); - - s390_displayhex2 (buffer, - ioinfo[irq]->irq_desc. - dev_id->ii.sense.data, - ioinfo[irq]->irq_desc. - dev_id->rescnt, 2); + CIO_TRACE_EVENT(2, "sense:"); + CIO_HEX_EVENT(2, ioinfo[irq]->irq_desc. + dev_id->ii.sense.data, + ioinfo[irq]->irq_desc. + dev_id->rescnt); } } @@ -1860,14 +1754,10 @@ #endif if (cio_debug_initialized) { stsch (irq, &(ioinfo[irq]->schib)); - - sprintf (buffer, "s390_start_IO(%04X) - schib for " - "device %04X, after 'not oper' status\n", - irq, ioinfo[irq]->devstat.devno); - - s390_displayhex2 (buffer, - &(ioinfo[irq]->schib), - sizeof (schib_t), 2); + sprintf(dbf_txt, "no%x", irq); + CIO_TRACE_EVENT(2, dbf_txt); + CIO_HEX_EVENT(2, &(ioinfo[irq]->schib), + sizeof (schib_t)); } break; @@ -1908,6 +1798,9 @@ sprintf (dbf_txt, "doIO%x", irq); CIO_TRACE_EVENT (4, dbf_txt); + if (ioinfo[irq]->ui.flags.noio) + return -EBUSY; + /* * Note: We ignore the device operational status - if not operational, * the SSCH will lead to an -ENODEV condition ... @@ -1963,7 +1856,7 @@ SANITY_CHECK (irq); - sprintf (dbf_txt, "rsIO%x", irq); + sprintf (dbf_txt, "resIO%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* @@ -1982,7 +1875,6 @@ break; case 1: - s390_process_IRQ (irq); ret = -EBUSY; break; @@ -2025,6 +1917,9 @@ SANITY_CHECK (irq); + if (ioinfo[irq]->ui.flags.noio) + return -EBUSY; + /* * we only allow for halt_IO if the device has an I/O handler associated */ @@ -2038,8 +1933,7 @@ if (ioinfo[irq]->ui.flags.w4sense) { return 0; } - CIO_TRACE_EVENT (2, "haltIO"); - sprintf (dbf_txt, "%x", irq); + sprintf (dbf_txt, "haltIO%x", irq); CIO_TRACE_EVENT (2, dbf_txt); /* @@ -2158,6 +2052,16 @@ case 1: /* status pending */ + /* + * Don't do an inline processing of pending interrupt conditions + * while doing async. I/O. The interrupt will pop up when we are + * enabled again and the I/O can be retried. + */ + if (!ioinfo[irq]->ui.flags.syncio) { + ret = -EBUSY; + break; + } + ioinfo[irq]->devstat.flag |= DEVSTAT_STATUS_PENDING; /* @@ -2248,6 +2152,8 @@ if (ioinfo[irq] == INVALID_STORAGE_AREA) return (-ENODEV); + if (ioinfo[irq]->ui.flags.noio) + return -EBUSY; /* * we only allow for clear_IO if the device has an I/O handler associated */ @@ -2260,8 +2166,7 @@ if (ioinfo[irq]->ui.flags.w4sense) return 0; - CIO_TRACE_EVENT (2, "clearIO"); - sprintf (dbf_txt, "%x", irq); + sprintf (dbf_txt, "clearIO%x", irq); CIO_TRACE_EVENT (2, dbf_txt); /* @@ -2418,8 +2323,7 @@ SANITY_CHECK (irq); - CIO_TRACE_EVENT (2, "cancelIO"); - sprintf (dbf_txt, "%x", irq); + sprintf (dbf_txt, "cancelIO%x", irq); CIO_TRACE_EVENT (2, dbf_txt); ccode = xsch (irq); @@ -2435,8 +2339,6 @@ case 1: /* status pending */ - /* process the pending irq... */ - s390_process_IRQ (irq); ret = -EBUSY; break; @@ -2595,7 +2497,7 @@ dp = &ioinfo[irq]->devstat; udp = ioinfo[irq]->irq_desc.dev_id; - + /* * It might be possible that a device was not-oper. at the time * of free_irq() processing. This means the handler is no longer @@ -2719,7 +2621,7 @@ } dp->cpa = dp->ii.irb.scsw.cpa; - + } irb_cc = dp->ii.irb.scsw.cc; @@ -2806,18 +2708,9 @@ irq, dp->devno); s390_displayhex (buffer, &(dp->ii.irb), sizeof (irb_t)); - if (cio_debug_initialized) { - - sprintf (buffer, - "s390_process_IRQ(%04X) - irb for " - "device %04X after channel check " - "or interface control check\n", - irq, dp->devno); - - s390_displayhex2 (buffer, - &(dp->ii.irb), - sizeof (irb_t), 0); - } + sprintf(dbf_txt, "chk%x", irq); + CIO_TRACE_EVENT(0, dbf_txt); + CIO_HEX_EVENT(0, &(dp->ii.irb), sizeof (irb_t)); } ioinfo[irq]->stctl |= stctl; @@ -2840,6 +2733,7 @@ */ if (!(stctl & SCSW_STCTL_ALERT_STATUS) && (ioinfo[irq]->ui.flags.busy == 0)) { + #ifdef CONFIG_DEBUG_IO if (irq != cons_dev) printk (KERN_INFO @@ -2863,16 +2757,9 @@ "subchannel status : %02X\n", dp->devno, irq, dp->dstat, dp->cstat); - if (cio_debug_initialized) { - sprintf (buffer, - "s390_process_IRQ(%04X) - irb for " - "device %04X, ending_status %d\n", irq, - dp->devno, ending_status); - - s390_displayhex2 (buffer, - &(dp->ii.irb), - sizeof (irb_t), 2); - } + sprintf(dbf_txt, "uint%x", irq); + CIO_TRACE_EVENT(2, dbf_txt); + CIO_HEX_EVENT(2, &(dp->ii.irb), sizeof (irb_t)); } /* @@ -2899,13 +2786,25 @@ unsigned long s_flag = 0; if (ending_status) { + /* there is a chance that the command + * that gave us the unit check actually + * was a basic sense, so we must not + * overwrite *udp in that case + */ + if (ioinfo[irq]->ui.flags.w4sense && + (dp->ii.irb.scsw.dstat & DEV_STAT_UNIT_CHECK)) { + CIO_MSG_EVENT(4,"double unit check irq %04x, dstat %02x," + "flags %8x\n", irq, dp->ii.irb.scsw.dstat, + ioinfo[irq]->ui.info, ending_status); + } else { /* * We copy the current status information into the device driver * status area. Then we can use the local devstat area for device * sensing. When finally calling the IRQ handler we must not overlay * the original device status but copy the sense data only. */ - memcpy (udp, dp, sizeof (devstat_t)); + memcpy (udp, dp, sizeof (devstat_t)); + } s_ccw->cmd_code = CCW_CMD_BASIC_SENSE; s_ccw->cda = @@ -2919,10 +2818,13 @@ * process we have to sense synchronously */ if (ioinfo[irq]->ui.flags.unready - || ioinfo[irq]->ui.flags.syncio) { - s_flag = DOIO_WAIT_FOR_INTERRUPT; + || ioinfo[irq]->ui.flags.syncio) + s_flag = DOIO_WAIT_FOR_INTERRUPT + | DOIO_TIMEOUT + | DOIO_VALID_LPM; - } + else + s_flag = DOIO_VALID_LPM; /* * Reset status info @@ -2950,8 +2852,34 @@ ioinfo[irq]->ui.flags.w4sense = 1; ret_io = s390_start_IO (irq, s_ccw, 0xE2C5D5E2, /* = SENSe */ - 0, /* n/a */ + 0xff, s_flag); + switch (ret_io) { + case 0: /* OK */ + break; + case -ENODEV: + /* + * The device is no longer operational. + * We won't get any sense data. + */ + ioinfo[irq]->ui.flags.w4sense = 0; + ioinfo[irq]->ui.flags.oper = 0; + allow4handler = 1; /* to notify the driver */ + break; + case -EBUSY: + /* + * The channel subsystem is either busy, or we have + * a status pending. Retry later. + */ + ioinfo[irq]->ui.flags.w4sense = 0; + ioinfo[irq]->ui.flags.delsense = 1; + break; + default: + printk(KERN_ERR"irq %04X: Unexpected rc %d " + "for BASIC SENSE!\n", irq, ret_io); + ioinfo[irq]->ui.flags.w4sense = 0; + allow4handler = 1; + } } else { /* * we received an Unit Check but we have no final @@ -3024,10 +2952,12 @@ udp->flag |= DEVSTAT_FLAG_SENSE_AVAIL; udp->scnt = sense_count; - if (sense_count >= 0) { + if (sense_count > 0) { memcpy (udp->ii.sense.data, ioinfo[irq]->sense_data, sense_count); + } else if (sense_count == 0) { + udp->flag &= ~DEVSTAT_FLAG_SENSE_AVAIL; } else { panic ("s390_process_IRQ(%04x) encountered " @@ -3090,7 +3020,8 @@ dp->flag |= DEVSTAT_FINAL_STATUS; udp->flag |= DEVSTAT_FINAL_STATUS; - ioinfo[irq]->irq_desc.handler (irq, udp, NULL); + if (!ioinfo[irq]->ui.flags.killio) + ioinfo[irq]->irq_desc.handler (irq, udp, NULL); /* * reset intparm after final status or we will badly present unsolicited @@ -3156,7 +3087,7 @@ udp->flag |= DEVSTAT_SUSPENDED; } - + ioinfo[irq]->irq_desc.handler (irq, udp, NULL); } @@ -3174,16 +3105,15 @@ dp->cstat = 0; dp->dstat = 0; - if (ioinfo[irq]->ulpm != ioinfo[irq]->opm) { - /* - * either it was the only path or it was restricted ... - */ - ioinfo[irq]->opm &= - ~(ioinfo[irq]->devstat.ii.irb.esw.esw1.lpum); - } else { - ioinfo[irq]->opm = 0; - - } + if ((dp->ii.irb.scsw.fctl != 0) && + ((dp->ii.irb.scsw.stctl & SCSW_STCTL_STATUS_PEND) != 0) && + (((dp->ii.irb.scsw.stctl & SCSW_STCTL_INTER_STATUS) == 0) || + ((dp->ii.irb.scsw.actl & SCSW_ACTL_SUSPENDED) != 0))) + if (dp->ii.irb.scsw.pno) { + stsch(irq, &ioinfo[irq]->schib); + ioinfo[irq]->opm &= + ~ioinfo[irq]->schib.pmcw.pnom; + } if (ioinfo[irq]->opm == 0) { ioinfo[irq]->ui.flags.oper = 0; @@ -3234,9 +3164,10 @@ ioinfo[irq]->devstat.intparm = 0; if (!ioinfo[irq]->ui.flags.s_pend - && !ioinfo[irq]->ui.flags.repnone) { - ioinfo[irq]->irq_desc.handler (irq, udp, NULL); + && !ioinfo[irq]->ui.flags.repnone + && !ioinfo[irq]->ui.flags.killio) { + ioinfo[irq]->irq_desc.handler (irq, udp, NULL); } ending_status = 1; @@ -3245,6 +3176,18 @@ } + if (ending_status && + ioinfo[irq]->ui.flags.noio && + !ioinfo[irq]->ui.flags.syncio && + !ioinfo[irq]->ui.flags.w4sense) { + if(ioinfo[irq]->ui.flags.ready) { + s390_schedule_path_verification(irq); + } else { + ioinfo[irq]->ui.flags.killio = 0; + ioinfo[irq]->ui.flags.noio = 0; + } + } + return (ending_status); } @@ -3277,7 +3220,7 @@ if (cons_dev != -1) return -EBUSY; - sprintf (dbf_txt, "scd%x", irq); + sprintf (dbf_txt, "scons%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* @@ -3322,7 +3265,7 @@ if (cons_dev != -1) return -EBUSY; - sprintf (dbf_txt, "rscd%x", irq); + sprintf (dbf_txt, "rcons%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* @@ -3366,7 +3309,7 @@ if (irq != cons_dev) return -EINVAL; - sprintf (dbf_txt, "wcd%x", irq); + sprintf (dbf_txt, "wcons%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* @@ -3417,7 +3360,7 @@ int rc = 0; char dbf_txt[15]; - sprintf (dbf_txt, "eisc%x", irq); + sprintf (dbf_txt, "enisc%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* This one spins until it can get the sync_isc lock for irq# irq */ @@ -3438,7 +3381,6 @@ * we only run the STSCH/MSCH path for the first enablement */ else if (sync_isc_cnt == 1) { - ioinfo[irq]->ui.flags.syncio = 1; ccode = stsch (irq, &(ioinfo[irq]->schib)); @@ -3463,6 +3405,7 @@ * if neccessary */ if (cons_dev != -1) cr6 &= 0xFEFFFFFF; + ioinfo[irq]->ui.flags.syncio = 1; __ctl_load (cr6, 6, 6); rc = 0; retry = 0; @@ -3505,6 +3448,15 @@ if (rc) { /* can only happen if stsch/msch fails */ sync_isc_cnt = 0; atomic_set (&sync_isc, -1); + } else if (sync_isc_cnt == 1) { + int ccode; + + ccode = stsch(irq, &ioinfo[irq]->schib); + if (!ccode && ioinfo[irq]->schib.pmcw.isc != 5) { + ioinfo[irq]->ui.flags.syncio = 0; + sync_isc_cnt = 0; + atomic_set (&sync_isc, -1); + } } } else { #ifdef CONFIG_SYNC_ISC_PARANOIA @@ -3531,7 +3483,7 @@ char dbf_txt[15]; - sprintf (dbf_txt, "disc%x", irq); + sprintf (dbf_txt, "disisc%x", irq); CIO_TRACE_EVENT (4, dbf_txt); if ((irq <= highest_subchannel) && @@ -3621,7 +3573,7 @@ sync_isc_cnt = 0; atomic_set (&sync_isc, -1); - + } else { sync_isc_cnt--; @@ -3641,6 +3593,26 @@ return (rc); } +int diag210 (diag210_t *addr) +{ + int ccode; + + __asm__ __volatile__( +#ifdef CONFIG_ARCH_S390X + " sam31\n" + " diag %1,0,0x210\n" + " sam64\n" +#else + " diag %1,0,0x210\n" +#endif + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) + : "a" (addr) + : "cc" ); + return ccode; +} + /* * Input : * devno - device number @@ -3658,11 +3630,13 @@ CIO_TRACE_EVENT (4, "VMvdinf"); if (init_IRQ_complete) { - p_diag_data = kmalloc (sizeof (diag210_t), GFP_DMA); + p_diag_data = kmalloc (sizeof (diag210_t), GFP_DMA | GFP_ATOMIC); } else { p_diag_data = alloc_bootmem_low (sizeof (diag210_t)); } + if (!p_diag_data) + return; p_diag_data->vrdcdvno = devno; p_diag_data->vrdclen = sizeof (diag210_t); @@ -3980,7 +3954,7 @@ int read_dev_chars (int irq, void **buffer, int length) { - unsigned int flags; + unsigned long flags; ccw1_t *rdc_ccw; devstat_t devstat; char *rdc_buf; @@ -4009,7 +3983,7 @@ return -EUSERS; } - sprintf (dbf_txt, "rdc%x", irq); + sprintf (dbf_txt, "rddevch%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* @@ -4052,8 +4026,7 @@ rdc_ccw->count = length; rdc_ccw->flags = CCW_FLAG_SLI; ret = - set_normalized_cda (rdc_ccw, (unsigned long) - rdc_buf); + set_normalized_cda (rdc_ccw, rdc_buf); if (!ret) { memset (ioinfo[irq]->irq_desc.dev_id, @@ -4139,7 +4112,7 @@ return -EUSERS; } - sprintf (dbf_txt, "rcd%x", irq); + sprintf (dbf_txt, "rdconf%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* @@ -4194,7 +4167,7 @@ if (init_IRQ_complete) { rcd_buf = kmalloc (ioinfo[irq]->senseid.ciw[ciw_cnt]. - count, GFP_DMA); + count, GFP_DMA | GFP_ATOMIC); } else { rcd_buf = alloc_bootmem_low (ioinfo[irq]->senseid. @@ -4553,7 +4526,7 @@ int ret; char dbf_txt[15]; - sprintf (dbf_txt, "dri%x", irq); + sprintf (dbf_txt, "devrec%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* @@ -4576,6 +4549,7 @@ if (!ret) { pgid_t pgid; + int i, mask; /* * First thing we should do is a sensePGID in * order to find out how we can proceed with @@ -4586,13 +4560,24 @@ */ memcpy(&pgid, global_pgid, sizeof(pgid_t)); - ret = s390_SensePGID(irq, 0xff, &pgid); - if (ret == -EOPNOTSUPP) - /* - * Doesn't prevent us from proceeding - */ - ret = 0; + ret = -EAGAIN; + for (i=0; i<8 && ret==-EAGAIN; i++) { + + mask = (0x80 >> i) & ioinfo[irq]->opm; + + if (!mask) + continue; + + ret = s390_SensePGID(irq, mask, &pgid); + + if (ret == -EOPNOTSUPP) + /* + * Doesn't prevent us from proceeding + */ + ret = 0; + + } if (!ret && !ioinfo[irq]->ui.flags.unfriendly) { @@ -4645,18 +4630,9 @@ s390_displayhex (buffer, prcd, lrcd); #endif - if (cio_debug_initialized) { - sprintf (buffer, - "RCD for device(%04X)/" - "subchannel(%04X) returns :\n", - ioinfo[irq]-> - schib.pmcw.dev, - irq); - - s390_displayhex2 - (buffer, prcd, lrcd, - 2); - } + CIO_TRACE_EVENT(2, "rcddata:"); + CIO_HEX_EVENT(2, prcd, lrcd); + if (init_IRQ_complete) { kfree (prcd); } else { @@ -4756,14 +4732,9 @@ int s390_trigger_resense(int irq) { - char dbf_txt[8]; SANITY_CHECK(irq); - CIO_TRACE_EVENT (2, "tsns"); - sprintf(dbf_txt, "%x", irq); - CIO_TRACE_EVENT (2, dbf_txt); - if (ioinfo[irq]->ui.flags.ready) { printk (KERN_WARNING "s390_trigger_resense(%04X): " "Device is in use!\n", irq); @@ -4826,7 +4797,7 @@ char dbf_txt[15]; - sprintf (dbf_txt, "vals%x", irq); + sprintf (dbf_txt, "valsch%x", irq); CIO_TRACE_EVENT (4, dbf_txt); /* @@ -4901,9 +4872,12 @@ alloc_bootmem_low (sizeof (ioinfo_t)); } else { ioinfo[irq] = (ioinfo_t *) - kmalloc (sizeof (ioinfo_t), GFP_DMA); + kmalloc (sizeof (ioinfo_t), GFP_DMA | GFP_ATOMIC); } + if (!ioinfo[irq]) + return -ENOMEM; + memset (ioinfo[irq], '\0', sizeof (ioinfo_t)); memcpy (&ioinfo[irq]->schib, p_init_schib, sizeof (schib_t)); @@ -4964,6 +4938,10 @@ /* disable using this path */ ioinfo[irq]->opm &= ~mask; } + } else { + /* This chpid is not available to us */ + clear_bit(ioinfo[irq]->schib.pmcw.chpid[chp], + &chpids); } } } @@ -5209,8 +5187,7 @@ return -EUSERS; } - CIO_TRACE_EVENT (4, "senseID"); - sprintf (dbf_txt, "%x", irq); + sprintf (dbf_txt, "snsID%x", irq); CIO_TRACE_EVENT (4, dbf_txt); inlreq = 0; /* to make the compiler quiet... */ @@ -5245,11 +5222,17 @@ s390irq_spin_lock (irq); if (init_IRQ_complete) { - sense_ccw = kmalloc (2 * sizeof (ccw1_t), GFP_DMA); + sense_ccw = kmalloc (2 * sizeof (ccw1_t), GFP_DMA | GFP_ATOMIC); } else { sense_ccw = alloc_bootmem_low (2 * sizeof (ccw1_t)); } + if (!sense_ccw) { + s390irq_spin_unlock (irq); + if (inlreq) + free_irq (irq, &devstat); + return -ENOMEM; + } /* more than one path installed ? */ if (ioinfo[irq]->schib.pmcw.pim != 0x80) { @@ -5642,56 +5625,237 @@ return (cc); } -/* - * Device Path Verification - * - * Path verification is accomplished by checking which paths (CHPIDs) are - * available. Further, a path group ID is set, if possible in multipath - * mode, otherwise in single path mode. - * - * Note : This function must not be called during normal device recognition, - * but during device driver initiated request_irq() processing only. - */ -int -s390_DevicePathVerification (int irq, __u8 usermask) +static int +s390_do_path_verification(int irq, __u8 usermask) { - int ccode; - __u8 pathmask; __u8 domask; -#ifdef CONFIG_CHSC - int chp; - int mask; - int old_opm = 0; -#endif /* CONFIG_CHSC */ - - int ret = 0; int i; pgid_t pgid; __u8 dev_path; int first = 1; - + int ret = 0; char dbf_txt[15]; - sprintf (dbf_txt, "dpvf%x", irq); - CIO_TRACE_EVENT (4, dbf_txt); - - if (ioinfo[irq]->st) - return -ENODEV; + sprintf(dbf_txt, "dopv%x", irq); + CIO_TRACE_EVENT(2, dbf_txt); -#ifdef CONFIG_CHSC - old_opm = ioinfo[irq]->opm; -#endif /* CONFIG_CHSC */ - ccode = stsch (irq, &(ioinfo[irq]->schib)); + dev_path = usermask ? usermask : ioinfo[irq]->opm; - if (ccode) { - return -ENODEV; + if (ioinfo[irq]->ui.flags.pgid == 0) { + memcpy (&ioinfo[irq]->pgid, global_pgid, sizeof (pgid_t)); + ioinfo[irq]->ui.flags.pgid = 1; } - if (ioinfo[irq]->schib.pmcw.pim == 0x80) { + + for (i = 0; i < 8 && !ret; i++) { + + domask = dev_path & (0x80>>i); + + if (!domask) + continue; + + if (!test_bit(ioinfo[irq]->schib.pmcw.chpid[i], + &chpids_logical)) + /* Chpid is logically offline, don't do io */ + continue; + + ret = s390_SetPGID (irq, domask); + /* - * no error, just not required for single path only devices + * For the *first* path we are prepared for recovery + * + * - If we fail setting the PGID we assume its + * using a different PGID already (VM) we + * try to sense. */ - ioinfo[irq]->ui.flags.pgid_supp = 0; - ret = 0; + if (ret == -EOPNOTSUPP && first) { + *(int *) &pgid = 0; + + ret = s390_SensePGID (irq, domask, &pgid); + first = 0; + + if (ret == 0) { + /* + * Check whether we retrieved + * a reasonable PGID ... + */ + if (pgid.inf.ps.state1 == SNID_STATE1_GROUPED) + memcpy (&ioinfo[irq]->pgid, + &pgid, sizeof (pgid_t)); + else /* ungrouped or garbage ... */ + ret = -EOPNOTSUPP; + + } else { + ioinfo[irq]->ui.flags.pgid_supp = 0; + +#ifdef CONFIG_DEBUG_IO + printk (KERN_WARNING + "PathVerification(%04X) - Device %04X " + "doesn't support path grouping\n", + irq, ioinfo[irq]->schib.pmcw.dev); +#endif + CIO_MSG_EVENT(2, "PathVerification(%04X) " + "- Device %04X doesn't " + " support path grouping\n", + irq, + ioinfo[irq]->schib.pmcw.dev); + + } + } else if (ret == -EIO) { +#ifdef CONFIG_DEBUG_IO + printk (KERN_ERR "PathVerification(%04X) - I/O error " + "on device %04X\n", irq, + ioinfo[irq]->schib.pmcw.dev); +#endif + + CIO_MSG_EVENT(2, "PathVerification(%04X) - I/O error " + "on device %04X\n", irq, + ioinfo[irq]->schib.pmcw.dev); + + ioinfo[irq]->ui.flags.pgid_supp = 0; + + } else if (ret == -ETIMEDOUT) { +#ifdef CONFIG_DEBUG_IO + printk (KERN_ERR "PathVerification(%04X) - I/O timed " + "out on device %04X\n", irq, + ioinfo[irq]->schib.pmcw.dev); +#endif + CIO_MSG_EVENT(2, "PathVerification(%04X) - I/O timed " + "out on device %04X\n", irq, + ioinfo[irq]->schib.pmcw.dev); + + ioinfo[irq]->ui.flags.pgid_supp = 0; + + } else if (ret == -EAGAIN) { + + ret = 0; + } else if (ret == -EUSERS) { + +#ifdef CONFIG_DEBUG_IO + printk (KERN_ERR "PathVerification(%04X) " + "- Device is locked by someone else!\n", + irq); +#endif + CIO_MSG_EVENT(2, "PathVerification(%04X) " + "- Device is locked by someone else!\n", + irq); + } else if (ret == -ENODEV) { +#ifdef CONFIG_DEBUG_IO + printk (KERN_ERR "PathVerification(%04X) " + "- Device %04X is no longer there?!?\n", + irq, ioinfo[irq]->schib.pmcw.dev); +#endif + CIO_MSG_EVENT(2, "PathVerification(%04X) " + "- Device %04X is no longer there?!?\n", + irq, ioinfo[irq]->schib.pmcw.dev); + + } else if (ret == -EBUSY) { + /* + * The device is busy. Schedule the path verification + * bottom half and we'll hopefully get in next time. + */ + if (!ioinfo[irq]->ui.flags.noio) { + s390_schedule_path_verification(irq); + } + return -EINPROGRESS; + } else if (ret) { +#ifdef CONFIG_DEBUG_IO + printk (KERN_ERR "PathVerification(%04X) " + "- Unexpected error %d on device %04X\n", + irq, ret, ioinfo[irq]->schib.pmcw.dev); +#endif + CIO_MSG_EVENT(2, "PathVerification(%04X) - " + "Unexpected error %d on device %04X\n", + irq, ret, ioinfo[irq]->schib.pmcw.dev); + + ioinfo[irq]->ui.flags.pgid_supp = 0; + } + } + if (stsch(irq, &ioinfo[irq]->schib) != 0) + /* FIXME: tell driver device is dead. */ + return -ENODEV; + + /* + * stsch() doesn't always yield the correct pim, pam, and pom + * values, if no device selection has been performed yet. + * However, after complete path verification they are up to date. + */ + ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim & + ioinfo[irq]->schib.pmcw.pam & + ioinfo[irq]->schib.pmcw.pom; + +#ifdef CONFIG_CHSC + if (ioinfo[irq]->opm) { + for (i=0;i<=7;i++) { + int mask = 0x80 >> i; + if ((ioinfo[irq]->opm & mask) && + (!test_bit(ioinfo[irq]->schib.pmcw.chpid[i], + &chpids_logical))) + /* disable using this path */ + ioinfo[irq]->opm &= ~mask; + } + } +#endif /* CONFIG_CHSC */ + + ioinfo[irq]->ui.flags.noio = 0; + + /* Eventually wake up the device driver. */ + if (ioinfo[irq]->opm != 0) { + devreg_t *pdevreg; + pdevreg = s390_search_devreg(ioinfo[irq]); + + if (pdevreg && pdevreg->oper_func) + pdevreg->oper_func(irq, pdevreg); + } + return ret; + +} + +/* + * Device Path Verification + * + * Path verification is accomplished by checking which paths (CHPIDs) are + * available. Further, a path group ID is set, if possible in multipath + * mode, otherwise in single path mode. + * + * Note : This function must not be called during normal device recognition, + * but during device driver initiated request_irq() processing only. + */ +int +s390_DevicePathVerification (int irq, __u8 usermask) +{ + int ccode; +#ifdef CONFIG_CHSC + int chp; + int mask; + int old_opm = 0; +#endif /* CONFIG_CHSC */ + + int ret = 0; + + char dbf_txt[15]; + devreg_t *pdevreg; + + sprintf (dbf_txt, "dpver%x", irq); + CIO_TRACE_EVENT (4, dbf_txt); + + if (ioinfo[irq]->st) + return -ENODEV; + +#ifdef CONFIG_CHSC + old_opm = ioinfo[irq]->opm; +#endif /* CONFIG_CHSC */ + ccode = stsch (irq, &(ioinfo[irq]->schib)); + + if (ccode) + return -ENODEV; + + if (ioinfo[irq]->schib.pmcw.pim == 0x80) { + /* + * no error, just not required for single path only devices + */ + ioinfo[irq]->ui.flags.pgid_supp = 0; + ret = 0; + ioinfo[irq]->ui.flags.noio = 0; #ifdef CONFIG_CHSC /* @@ -5699,58 +5863,35 @@ */ if (!test_bit(ioinfo[irq]->schib.pmcw.chpid[0], &chpids_logical)) { - not_oper_handler_func_t nopfunc=ioinfo[irq]->nopfunc; - int was_oper = ioinfo[irq]->ui.flags.oper; ioinfo[irq]->opm = 0; ioinfo[irq]->ui.flags.oper = 0; printk(KERN_WARNING "No logical path for sch %d...\n", irq); - if (old_opm && - was_oper && - ioinfo[irq]->ui.flags.ready) { -#ifdef CONFIG_PROC_FS - if (cio_proc_devinfo) - cio_procfs_device_remove - (ioinfo[irq]->devno); -#endif /* CONFIG_PROC_FS */ - free_irq( irq, ioinfo[irq]->irq_desc.dev_id); - if (nopfunc) - nopfunc( irq, DEVSTAT_DEVICE_GONE); - } - ret = -ENODEV; - } else if (!old_opm) { - /* - * check for opm... - */ + if (ioinfo[irq]->nopfunc) + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC); + + return -ENODEV; + } + if (!old_opm) { + ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim & ioinfo[irq]->schib.pmcw.pam & ioinfo[irq]->schib.pmcw.pom; - + if (ioinfo[irq]->opm) { - devreg_t *pdevreg; ioinfo[irq]->ui.flags.oper = 1; - pdevreg = s390_search_devreg( ioinfo[irq] ); - - if (pdevreg) - if (pdevreg->oper_func) - pdevreg->oper_func - ( irq, pdevreg); -#ifdef CONFIG_PROC_FS - if (cio_proc_devinfo) - if (highest_subchannel - < MAX_CIO_PROCFS_ENTRIES) { - cio_procfs_device_create - (ioinfo[irq]->devno); - } -#endif /* CONFIG_PROC_FS */ + pdevreg = s390_search_devreg(ioinfo[irq]); + + if (pdevreg && pdevreg->oper_func) + pdevreg->oper_func(irq, pdevreg); + ret = 0; + } else { + ret = -ENODEV; } - ret = 0; - } else { - ret = 0; } #endif /* CONFIG_CHSC */ return ret; @@ -5763,199 +5904,72 @@ if (ioinfo[irq]->opm) { for (chp=0;chp<=7;chp++) { mask = 0x80 >> chp; - if (ioinfo[irq]->opm & mask) { - if (!test_bit - (ioinfo[irq]->schib.pmcw.chpid[chp], - &chpids_logical)) { - /* disable using this path */ - ioinfo[irq]->opm &= ~mask; - } - } + if ((ioinfo[irq]->opm & mask) + &&(!test_bit(ioinfo[irq]->schib.pmcw.chpid[chp], + &chpids_logical))) + /* disable using this path */ + ioinfo[irq]->opm &= ~mask; } } - if ((ioinfo[irq]->opm == 0) && (old_opm)) { - not_oper_handler_func_t nopfunc=ioinfo[irq]->nopfunc; - int was_oper = ioinfo[irq]->ui.flags.ready; - - ioinfo[irq]->ui.flags.oper = 0; - printk(KERN_WARNING "No logical path for sch %d...\n",irq); - if (was_oper && ioinfo[irq]->ui.flags.oper) { -#ifdef CONFIG_PROC_FS - if (cio_proc_devinfo) - cio_procfs_device_remove(ioinfo[irq]->devno); -#endif /* CONFIG_PROC_FS */ - free_irq( irq, ioinfo[irq]->irq_desc.dev_id); - if (nopfunc) - nopfunc( irq, DEVSTAT_DEVICE_GONE); - } - return -ENODEV; - } - - if (!old_opm) { - /* Hey, we have a new logical path... */ - devreg_t *pdevreg; - - ioinfo[irq]->ui.flags.oper = 1; - pdevreg = s390_search_devreg( ioinfo[irq] ); - - if (pdevreg) - if (pdevreg->oper_func) - pdevreg->oper_func( irq, pdevreg); -#ifdef CONFIG_PROC_FS - if (cio_proc_devinfo) - if (highest_subchannel < MAX_CIO_PROCFS_ENTRIES) { - cio_procfs_device_create(ioinfo[irq]->devno); - } -#endif /* CONFIG_PROC_FS */ - } #endif /* CONFIG_CHSC */ - if ( ioinfo[irq]->ui.flags.pgid_supp == 0 ) - return( 0); /* just exit ... */ + if (ioinfo[irq]->ui.flags.pgid_supp == 0) { - if (usermask) { - dev_path = usermask; - } else { - dev_path = ioinfo[irq]->opm; + if (ioinfo[irq]->opm == 0) + return -ENODEV; + + ioinfo[irq]->ui.flags.oper = 1; + ioinfo[irq]->ui.flags.noio = 0; - } + pdevreg = s390_search_devreg(ioinfo[irq]); + + if (pdevreg && pdevreg->oper_func) + pdevreg->oper_func(irq, pdevreg); - if (ioinfo[irq]->ui.flags.pgid == 0) { - memcpy (&ioinfo[irq]->pgid, global_pgid, sizeof (pgid_t)); - ioinfo[irq]->ui.flags.pgid = 1; + return 0; } - for (i = 0; i < 8 && !ret; i++) { - pathmask = 0x80 >> i; - - domask = dev_path & pathmask; - - if (domask) { - ret = s390_SetPGID (irq, domask); - - /* - * For the *first* path we are prepared - * for recovery - * - * - If we fail setting the PGID we assume its - * using a different PGID already (VM) we - * try to sense. - */ - if (ret == -EOPNOTSUPP && first) { - *(int *) &pgid = 0; - - ret = s390_SensePGID (irq, domask, &pgid); - first = 0; - - if (ret == 0) { - /* - * Check whether we retrieved - * a reasonable PGID ... - */ - if (pgid.inf.ps.state1 == - SNID_STATE1_GROUPED) { - memcpy (&ioinfo[irq]->pgid, - &pgid, sizeof (pgid_t)); - } else { /* ungrouped or garbage ... */ - ret = -EOPNOTSUPP; - - } - } else { - ioinfo[irq]->ui.flags.pgid_supp = 0; - -#ifdef CONFIG_DEBUG_IO - printk (KERN_WARNING - "PathVerification(%04X) " - "- Device %04X doesn't " - " support path grouping\n", - irq, - ioinfo[irq]->schib.pmcw.dev); -#endif - CIO_MSG_EVENT(2, - "PathVerification(%04X) " - "- Device %04X doesn't " - " support path grouping\n", - irq, - ioinfo[irq]->schib. - pmcw.dev); - - } - } else if (ret == -EIO) { -#ifdef CONFIG_DEBUG_IO - printk (KERN_ERR - "PathVerification(%04X) - I/O error " - "on device %04X\n", irq, - ioinfo[irq]->schib.pmcw.dev); -#endif + return s390_do_path_verification (irq, usermask); - CIO_MSG_EVENT(2, - "PathVerification(%04X) - I/O error " - "on device %04X\n", irq, - ioinfo[irq]->schib.pmcw.dev); - - ioinfo[irq]->ui.flags.pgid_supp = 0; +} - } else if (ret == -ETIMEDOUT) { -#ifdef CONFIG_DEBUG_IO - printk (KERN_ERR - "PathVerification(%04X) - I/O timed " - "out on device %04X\n", - irq, - ioinfo[irq]->schib.pmcw.dev); -#endif - CIO_MSG_EVENT(2, - "PathVerification(%04X) - I/O timed " - "out on device %04X\n", irq, - ioinfo[irq]->schib.pmcw.dev); - - ioinfo[irq]->ui.flags.pgid_supp = 0; +void +s390_kick_path_verification (unsigned long irq) +{ + long cr6 __attribute__ ((aligned (8))); - } else if (ret == -EAGAIN) { + atomic_set (&ioinfo[irq]->pver_pending, 0); + /* Do not enter path verification if sync_isc is enabled. */ + __ctl_store (cr6, 6, 6); + if (cr6 & 0x04000000) { + s390_schedule_path_verification (irq); + return; + } + ioinfo[irq]->ui.flags.killio = 0; + s390_DevicePathVerification(irq, 0xff); - ret = 0; +} - } else if (ret == -EUSERS) { - -#ifdef CONFIG_DEBUG_IO - printk (KERN_ERR - "PathVerification(%04X) " - "- Device is locked by someone else!\n", - irq); -#endif - CIO_MSG_EVENT(2, - "PathVerification(%04X) " - "- Device is locked by someone else!\n", - irq); - } else if (ret == -ENODEV) { -#ifdef CONFIG_DEBUG_IO - printk (KERN_ERR - "PathVerification(%04X) " - "- Device %04X is no longer there?!?\n", - irq, ioinfo[irq]->schib.pmcw.dev); -#endif - CIO_MSG_EVENT(2, - "PathVerification(%04X) " - "- Device %04X is no longer there?!?\n", - irq, ioinfo[irq]->schib.pmcw.dev); - } else if (ret) { -#ifdef CONFIG_DEBUG_IO - printk (KERN_ERR - "PathVerification(%04X) " - "- Unexpected error %d on device %04X\n", - irq, ret, ioinfo[irq]->schib.pmcw.dev); -#endif - CIO_MSG_EVENT(2, - "PathVerification(%04X) - " - "Unexpected error %d on device %04X\n", - irq, ret, ioinfo[irq]->schib.pmcw.dev); - - ioinfo[irq]->ui.flags.pgid_supp = 0; - } - } +static void +s390_schedule_path_verification(unsigned long irq) +{ + /* Protect against rescheduling, when already running */ + if (atomic_compare_and_swap (0, 1, &ioinfo[irq]->pver_pending)) { + return; } - return ret; + /* + * Call path verification. + * Note this is always called from inside the i/o layer, so we don't + * need to care about the usermask. + */ + INIT_LIST_HEAD (&ioinfo[irq]->pver_bh.list); + ioinfo[irq]->pver_bh.sync = 0; + ioinfo[irq]->pver_bh.routine = (void*) (void*) s390_kick_path_verification; + ioinfo[irq]->pver_bh.data = (void*) irq; + queue_task (&ioinfo[irq]->pver_bh, &tq_immediate); + mark_bh (IMMEDIATE_BH); } /* @@ -6018,10 +6032,15 @@ s390irq_spin_lock_irqsave (irq, flags); if (init_IRQ_complete) { - spid_ccw = kmalloc (2 * sizeof (ccw1_t), GFP_DMA); + spid_ccw = kmalloc (2 * sizeof (ccw1_t), GFP_DMA | GFP_ATOMIC); } else { spid_ccw = alloc_bootmem_low (2 * sizeof (ccw1_t)); - + } + if (!spid_ccw) { + s390irq_spin_unlock_irqrestore(irq, flags); + if (inlreq) + free_irq(irq, pdevstat); + return -ENOMEM; } spid_ccw[0].cmd_code = CCW_CMD_SUSPEND_RECONN; @@ -6056,17 +6075,19 @@ printk (KERN_DEBUG "SPID - Device %04X " "on Subchannel %04X " "reports pending status, " + "lpm = %x, " "retry : %d\n", ioinfo[irq]->schib.pmcw.dev, - irq, retry); + irq, lpm, retry); #endif CIO_MSG_EVENT(2, "SPID - Device %04X " "on Subchannel %04X " "reports pending status, " + "lpm = %x, " "retry : %d\n", ioinfo[irq]->schib.pmcw. - dev, irq, retry); + dev, irq, lpm, retry); retry--; irq_ret = -EIO; } @@ -6117,11 +6138,12 @@ "SPID - device %04X," " unit check," " retry %d, cnt %02d," - " sns :" + " lpm %x, sns :" " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", ioinfo[irq]->schib.pmcw. dev, retry, pdevstat->scnt, + lpm, pdevstat->ii.sense. data[0], pdevstat->ii.sense. @@ -6143,11 +6165,12 @@ "SPID - device %04X," " unit check," " retry %d, cnt %02d," - " sns :" + " lpm %x, sns :" " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", ioinfo[irq]->schib. pmcw.dev, retry, pdevstat->scnt, + lpm, pdevstat->ii.sense. data[0], pdevstat->ii.sense. @@ -6174,7 +6197,7 @@ /* don't issue warnings during startup unless requested */ if (init_IRQ_complete || cio_notoper_msg) { - printk (KERN_WARNING + printk (KERN_INFO "SPID - Device %04X " "on Subchannel %04X, " "lpm %02X, " @@ -6214,12 +6237,52 @@ irq); retry--; + } else if (irq_ret == -EBUSY) { +#ifdef CONFIG_DEBUG_IO + printk(KERN_WARNING + "SPID - device %x, irq %x is busy!\n", + ioinfo[irq]->schib.pmcw.dev, irq); +#endif /* CONFIG_DEBUG_IO */ + CIO_MSG_EVENT(2, + "SPID - device %x, irq %x is busy!\n", + ioinfo[irq]->schib.pmcw.dev, irq); + retry = 0; + } else if (irq_ret != -ENODEV) { retry--; irq_ret = -EIO; - } else { + } else if (!pdevstat->flag & DEVSTAT_NOT_OPER) { retry = 0; irq_ret = -ENODEV; + } else { + /* don't issue warnings during startup unless requested */ + if (init_IRQ_complete || cio_notoper_msg) { + + printk (KERN_INFO + "SPID - Device %04X " + "on Subchannel %04X, " + "lpm %02X, " + "became 'not operational'\n", + ioinfo[irq]->schib.pmcw. + dev, irq, + lpm); + CIO_MSG_EVENT(2, + "SPID - Device %04X " + "on Subchannel %04X, " + "lpm %02X, " + "became 'not operational'\n", + ioinfo[irq]->schib. + pmcw.dev, irq, + lpm); + } + + retry = 0; + ioinfo[irq]->opm &= ~lpm; + + if (ioinfo[irq]->opm != 0) + irq_ret = -EAGAIN; + else + irq_ret = -ENODEV; } @@ -6302,13 +6365,32 @@ ioinfo[irq]->ui.flags.unfriendly = 0; /* assume it's friendly... */ if (init_IRQ_complete) { - snid_ccw = kmalloc (sizeof (ccw1_t), GFP_DMA); - tmp_pgid = kmalloc (sizeof (pgid_t), GFP_DMA); + snid_ccw = kmalloc (sizeof (ccw1_t), GFP_DMA | GFP_ATOMIC); + tmp_pgid = kmalloc (sizeof (pgid_t), GFP_DMA | GFP_ATOMIC); } else { snid_ccw = alloc_bootmem_low (sizeof (ccw1_t)); tmp_pgid = alloc_bootmem_low (sizeof (pgid_t)); } + if (!snid_ccw || !tmp_pgid) { + if (snid_ccw) { + if (init_IRQ_complete) + kfree(snid_ccw); + else + free_bootmem((unsigned long) snid_ccw, sizeof(ccw1_t)); + } + if (tmp_pgid) { + if (init_IRQ_complete) + kfree(tmp_pgid); + else + free_bootmem((unsigned long) tmp_pgid, sizeof(pgid_t)); + } + s390irq_spin_unlock_irqrestore(irq, flags); + if (inlreq) + free_irq (irq, pdevstat); + return -ENOMEM; + } + snid_ccw->cmd_code = CCW_CMD_SENSE_PGID; snid_ccw->cda = (__u32) virt_to_phys (tmp_pgid); snid_ccw->count = sizeof (pgid_t); @@ -6335,8 +6417,8 @@ * Sense Path Group ID command * further retries wouldn't help ... */ - if (pdevstat->ii.sense.data[0] & - (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)) { + if (pdevstat->ii.sense.data[0] + & (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)) { retry = 0; irq_ret = -EOPNOTSUPP; } else { @@ -6403,7 +6485,7 @@ } else if (pdevstat->flag & DEVSTAT_NOT_OPER) { /* don't issue warnings during startup unless requested */ if (init_IRQ_complete || cio_notoper_msg) { - printk (KERN_WARNING + printk (KERN_INFO "SNID - Device %04X " "on Subchannel %04X, " "lpm %02X, " @@ -6422,7 +6504,8 @@ } retry = 0; - irq_ret = -EIO; + ioinfo[irq]->opm &= ~lpm; + irq_ret = -EAGAIN; } else { retry = 0; /* success ... */ @@ -6432,7 +6515,7 @@ * -- we'll fail other commands if that is * the case */ - if (pgid->inf.ps.state2 == + if (tmp_pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { printk (KERN_WARNING "SNID - Device %04X " @@ -6507,10 +6590,38 @@ ioinfo[irq]->schib.pmcw.dev, irq_ret); retry--; irq_ret = -EIO; - } else { /* -ENODEV ... */ - + } else if (!pdevstat->flag & DEVSTAT_NOT_OPER) { retry = 0; irq_ret = -ENODEV; + } else { + /* don't issue warnings during startup unless requested */ + if (init_IRQ_complete || cio_notoper_msg) { + + printk (KERN_INFO + "SNID - Device %04X " + "on Subchannel %04X, " + "lpm %02X, " + "became 'not operational'\n", + ioinfo[irq]->schib.pmcw. + dev, irq, + lpm); + CIO_MSG_EVENT(2, + "SNID - Device %04X " + "on Subchannel %04X, " + "lpm %02X, " + "became 'not operational'\n", + ioinfo[irq]->schib. + pmcw.dev, irq, + lpm); + } + + retry = 0; + ioinfo[irq]->opm &= ~lpm; + + if (ioinfo[irq]->opm != 0) + irq_ret = -EAGAIN; + else + irq_ret = -ENODEV; } @@ -6650,12 +6761,9 @@ pdevreg = s390_search_devreg (ioinfo[irq]); - if (pdevreg != NULL) { - if (pdevreg->oper_func != NULL) - pdevreg-> - oper_func (irq, pdevreg); + if (pdevreg && pdevreg->oper_func) + pdevreg->oper_func(irq, pdevreg); - } #ifdef CONFIG_PROC_FS /* add new procfs entry */ if (cio_proc_devinfo) @@ -6995,19 +7103,120 @@ __initcall(chsc_get_sch_descriptions); +static int +__check_for_io_and_kill(int irq, __u8 mask, int fatal) +{ + schib_t *schib = &ioinfo[irq]->schib; + int ret = 0; + + if (schib->scsw.actl & SCSW_ACTL_DEVACT) { + if ((ioinfo[irq]->opm != mask) || + (fatal == 0)) { + ret = CIO_PATHGONE_WAIT4INT; + } + if ((schib->scsw.actl & SCSW_ACTL_SCHACT) && + (schib->pmcw.lpum == mask) && + (fatal != 0)) { + int cc; + /* Kill the IO. It won't complete. */ + ioinfo[irq]->ui.flags.noio = 0; + ioinfo[irq]->ui.flags.killio = 1; + cc = clear_IO(irq, 0xD2C9D3D3, 0); + if (cc != 0) { + /* Eek, can't kill io. */ + CIO_CRW_EVENT(0, + "Can't kill io on " + "sch %x, clear_IO " + "returned %d!\n", + irq, cc); + ioinfo[irq]->ui.flags.killio = 0; + s390irq_spin_unlock(irq); + if ((cc == -ENODEV) && + (ioinfo[irq]->nopfunc)) { + ioinfo[irq]->ui.flags.oper = 0; + ioinfo[irq]->nopfunc(irq, + DEVSTAT_DEVICE_GONE); + } + ret = CIO_PATHGONE_DEVGONE; + } else { + ret |= CIO_PATHGONE_WAIT4INT; + } + ioinfo[irq]->ui.flags.noio = 1; + ret |= CIO_PATHGONE_IOERR; + } + + } else if (schib->scsw.actl & (SCSW_ACTL_CLEAR_PEND | + SCSW_ACTL_HALT_PEND | + SCSW_ACTL_START_PEND | + SCSW_ACTL_RESUME_PEND)) { + if ((schib->pmcw.lpum != mask) || + (fatal == 0)) { + ret = CIO_PATHGONE_WAIT4INT; + } else { + int cc; + /* Cancel the i/o. */ + cc = cancel_IO(irq); + switch (cc) { + case 0: + /* i/o cancelled; we can do path verif. */ + ret = CIO_PATHGONE_IOERR; + break; + case -EBUSY: + /* Status pending, we'll get an interrupt */ + ret = CIO_PATHGONE_WAIT4INT; + break; + case -EINVAL: + /* + * There is either not only the start function + * specified or we are subchannel active. + * Do a clear sch. + */ + ioinfo[irq]->ui.flags.noio = 0; + ioinfo[irq]->ui.flags.killio = 1; + cc = clear_IO(irq, 0xD2C9D3D3, 0); + if (cc != 0) { + /* Eek, can't kill io. */ + CIO_CRW_EVENT(0, + "Can't kill io on " + "sch %x, clear_IO " + "returned %d!\n", + irq, cc); + ioinfo[irq]->ui.flags.killio = 0; + s390irq_spin_unlock(irq); + if ((cc == -ENODEV) && + (ioinfo[irq]->nopfunc)) { + ioinfo[irq]->nopfunc(irq, + DEVSTAT_DEVICE_GONE); + ioinfo[irq]->ui.flags.oper = 0; + } + ret = CIO_PATHGONE_DEVGONE; + } else { + ret = CIO_PATHGONE_WAIT4INT + | CIO_PATHGONE_IOERR; + ioinfo[irq]->ui.flags.noio = 1; + } + break; + default: /* -ENODEV */ + s390irq_spin_unlock(irq); + if (ioinfo[irq]->nopfunc) { + ioinfo[irq]->ui.flags.oper = 0; + ioinfo[irq]->nopfunc(irq, + DEVSTAT_DEVICE_GONE); + } + ret = CIO_PATHGONE_DEVGONE; + } + } + } + return ret; +} + void s390_do_chpid_processing( __u8 chpid) { int irq; int j; - int mask; char dbf_txt[15]; - int ccode; - int was_oper; - int chp = 0; - int mask2; - int ret = 0; sprintf(dbf_txt, "chpr%x", chpid); CIO_TRACE_EVENT( 2, dbf_txt); @@ -7017,141 +7226,82 @@ * but the chpid the report came in through. How to handle??? */ clear_bit(chpid, &chpids); - if (!test_and_clear_bit(chpid, &chpids_known)) + if (!test_and_clear_bit(chpid, &chpids_known)) { +#ifdef CONFIG_DEBUG_CHSC + pr_debug(KERN_DEBUG"Got link incident for unknown chpid %x\n", + chpid); +#endif /* CONFIG_DEBUG_CHSC */ return; /* we didn't know the chpid anyway */ + } for (irq=0;irq<=highest_subchannel;irq++) { + schib_t *schib; + if (ioinfo[irq] == INVALID_STORAGE_AREA) continue; /* we don't know the device anyway */ if (ioinfo[irq]->st) continue; /* non-io subchannel */ + schib = &ioinfo[irq]->schib; for (j=0; j<8;j++) { - if (ioinfo[irq]->schib.pmcw.chpid[j] == chpid) { - - /* - * Send nops down each path to find out - * which path is gone (ssch will yield cc=3 - * and the path will be switched off in the opm) - */ - - /* - * Note: irq spinlock is grabbed several times - * in here - */ - for (chp=0;chp<=7;chp++) { - mask2 = 0x80 >> chp; - if (mask2 & ioinfo[irq]->opm) - ret = s390_send_nop (irq, mask2); - } - - s390irq_spin_lock(irq); - - /* - * FIXME: is this neccessary? - */ - ccode = stsch(irq, &ioinfo[irq]->schib); - if (ccode) { -#ifdef CONFIG_DEBUG_CRW - printk( KERN_WARNING - "do_crw_pending: device on " - "sch %x is not operational\n", - irq); -#endif /* CONFIG_DEBUG_CRW */ - CIO_CRW_EVENT( 2, - "device on sch %x is " - "not operational\n", - irq); - ioinfo[irq]->ui.flags.oper = 0; + int mask = 0x80 >> j; + int out = 0; + int err = 0; + + if (schib->pmcw.chpid[j] != chpid) + continue; + + if (stsch(irq, schib) != 0) { + ioinfo[irq]->ui.flags.oper = 0; + if (ioinfo[irq]->nopfunc) + ioinfo[irq]->nopfunc(irq, DEVSTAT_DEVICE_GONE); + break; + } + + s390irq_spin_lock(irq); + + ioinfo[irq]->ui.flags.noio = 1; + + /* Do we still expect an interrupt for outstanding io? */ + if (ioinfo[irq]->ui.flags.busy) { + int rck = __check_for_io_and_kill(irq, mask, 1); + if (rck & CIO_PATHGONE_WAIT4INT) + out=1; + if (rck & CIO_PATHGONE_IOERR) + err=1; + if (rck & CIO_PATHGONE_DEVGONE) break; - } - - ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim & - ioinfo[irq]->schib.pmcw.pam & - ioinfo[irq]->schib.pmcw.pom; - - if (ioinfo[irq]->opm) { - for (chp=0;chp<=7;chp++) { - mask2 = 0x80 >> chp; - if (ioinfo[irq]->opm & mask2) { - if (!test_bit - (ioinfo[irq]-> - schib.pmcw.chpid[chp], - &chpids_logical)) { - /* disable using this path */ - ioinfo[irq]->opm - &= ~mask2; - } - } - } - } + } + + s390irq_spin_unlock(irq); + + /* Tell the device driver not to disturb us. */ + if (ioinfo[irq]->ui.flags.ready && + schib->pmcw.pim != 0x80 && + ioinfo[irq]->nopfunc) { + if (err) + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC_ERR); + else + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC); + } - if (!ioinfo[irq]->opm) { - /* - * sh*t, our last path has gone... - * Set the device status to not operational - * and eventually notify the device driver - */ -#ifdef CONFIG_DEBUG_CRW - printk( KERN_WARNING - "do_crw_pending: Last path gone for " - "device %x, sch %x\n", - ioinfo[irq]->devno, irq); -#endif /* CONFIG_DEBUG_CRW */ - CIO_CRW_EVENT( 2, - "Last path gone for " - "device %x, sch %x\n", - ioinfo[irq]->devno, - irq); - - was_oper = ioinfo[irq]->ui.flags.oper; - - ioinfo[irq]->ui.flags.oper = 0; - - if (was_oper && ioinfo[irq]->ui.flags.ready) { - - not_oper_handler_func_t nopfunc = - ioinfo[irq]->nopfunc; -#ifdef CONFIG_PROC_FS - if (cio_proc_devinfo) - cio_procfs_device_remove - (ioinfo[irq]->devno); -#endif /* CONFIG_PROC_FS */ - free_irq(irq, - ioinfo[irq]->irq_desc.dev_id); - if (nopfunc) - nopfunc(irq, - DEVSTAT_DEVICE_GONE); - } - - } else if (ioinfo[irq]->ui.flags.ready) { - /* - * Re-do path verification for the chpid in question - * FIXME: is this neccessary? - */ - mask = 0x80 >> j; + ioinfo[irq]->opm &= ~mask; + + if (out) + break; - if (!s390_DevicePathVerification(irq,mask)) { -#ifdef CONFIG_DEBUG_CRW - printk( KERN_DEBUG - "DevicePathVerification " - "successful for" - " Subchannel %x, " - "chpid %x\n", - irq, chpid); -#endif /* CONFIG_DEBUG_CRW */ - CIO_CRW_EVENT( 2, - "DevicePathVerification " - "successful for" - " Subchannel %x, " - "chpid %x\n", - irq, chpid); - } - } - - j=8; - s390irq_spin_unlock(irq); + /* + * Always schedule the path verification, even if opm=0. + * Reason: We can't rely on stsch() to return latest&greatest + * values, if a device selections hasn't been performed, and + * we might miss a path we didn't get a mchk for. + */ + if (ioinfo[irq]->ui.flags.ready) + s390_schedule_path_verification(irq); + else { + ioinfo[irq]->ui.flags.noio = 0; + ioinfo[irq]->ui.flags.killio = 0; } - + break; } } } @@ -7163,13 +7313,11 @@ char dbf_txt[15]; int irq = 0; - int ccode; __u32 fla_mask = 0xffff; int chp; - int mask, mask2; - int ret; + int mask; - sprintf(dbf_txt, "accp%x", chpid); + sprintf(dbf_txt, "accpr%x", chpid); CIO_TRACE_EVENT( 2, dbf_txt); if (info != CHSC_SEI_ACC_CHPID) { sprintf(dbf_txt, "fla%x", fla); @@ -7204,8 +7352,13 @@ return; } - if (!test_bit(chpid, &chpids_logical)) + if (!test_bit(chpid, &chpids_logical)) { +#ifdef CONFIG_DEBUG_CHSC + printk(KERN_DEBUG"chpid %x is logically offline, " + "skipping res acc processing\n", chpid); +#endif /* CONFIG_DEBUG_CHSC */ return; /* no need to do the rest */ + } switch (info) { case CHSC_SEI_ACC_CHPID: /* @@ -7216,62 +7369,13 @@ printk( KERN_DEBUG "Looking at chpid %x...\n", chpid); #endif /* CONFIG_DEBUG_CHSC */ - for (irq=0; irq<=__MAX_SUBCHANNELS; irq++) { + for (irq=0; irq<__MAX_SUBCHANNELS; irq++) { if((ioinfo[irq] != INVALID_STORAGE_AREA) - && (!ioinfo[irq]->st)) { - - /* - * Send nops down each path to find out - * which path is there - */ + && (ioinfo[irq]->st != 0)) + continue; - for (chp=0;chp<=7;chp++) { - mask = 0x80 >> chp; - - /* - * check if chpid is in information - * updated by ssd - */ - if ((ioinfo[irq]->ssd_info.valid) && - (ioinfo[irq]->ssd_info.chpid[chp] - == chpid)) - ret = s390_send_nop (irq, mask); - } - - ccode = stsch(irq, &ioinfo[irq]->schib); - if (!ccode) { - - ioinfo[irq]->opm = - ioinfo[irq]->schib.pmcw.pim & - ioinfo[irq]->schib.pmcw.pam & - ioinfo[irq]->schib.pmcw.pom; - - if (ioinfo[irq]->opm) { - for (chp=0;chp<=7;chp++) { - mask = 0x80 >> chp; - if (ioinfo[irq]->opm - & mask) { - if (!test_bit - (ioinfo[irq]-> - schib.pmcw.chpid[chp], - &chpids_logical)) { - /* disable using this path */ - ioinfo[irq]->opm - &= ~mask; - } - } - } - } - - if ((ioinfo[irq]->ui.flags.ready) - && (chpid & ioinfo[irq]->opm)) - s390_DevicePathVerification(irq, chpid); - - } else { - ioinfo[irq]->ui.flags.oper = 0; - } - } else if (ioinfo[irq] == INVALID_STORAGE_AREA) { + if (ioinfo[irq] == INVALID_STORAGE_AREA) { /* * We don't know the device yet, but since a path * may be available now to the device we'll have @@ -7291,9 +7395,41 @@ highest_subchannel = irq; if (valret == 0) s390_device_recognition_irq(irq); + continue; } - } + for (chp=0;chp<=7;chp++) { + mask = 0x80 >> chp; + + /* + * check if chpid is in information + * updated by ssd + */ + if ((!ioinfo[irq]->ssd_info.valid) || + (ioinfo[irq]->ssd_info.chpid[chp] != chpid)) + continue; + + /* Tell the device driver not to disturb us. */ + if (ioinfo[irq]->ui.flags.ready && + ioinfo[irq]->schib.pmcw.pim != 0x80 && + ioinfo[irq]->nopfunc) + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC); + + ioinfo[irq]->ui.flags.noio = 1; + + /* Do we still expect an interrupt for outstanding io? */ + if (ioinfo[irq]->ui.flags.busy) + /* Wait for interrupt. */ + break; + + if (ioinfo[irq]->ui.flags.ready) { + s390_schedule_path_verification(irq); + } else + ioinfo[irq]->ui.flags.noio = 0; + + break; + } + } break; case CHSC_SEI_ACC_LINKADDR: /* @@ -7313,61 +7449,16 @@ chpid, fla); #endif /* CONFIG_DEBUG_CHSC */ - for (irq=0; irq<=__MAX_SUBCHANNELS; irq++) { - + for (irq=0; irq<__MAX_SUBCHANNELS; irq++) { + int j; /* * Walk through all subchannels and * look if our chpid and our (masked) link * address are in somewhere * Do a stsch for the found subchannels and * perform path grouping - */ - if ((ioinfo[irq] != INVALID_STORAGE_AREA) - && (!ioinfo[irq]->st)) { - int j; - - /* Update our ssd_info */ - if (chsc_get_sch_desc_irq(irq)) - break; - - for (j=0;j<8;j++) { - if ((ioinfo[irq]->ssd_info.chpid[j] == chpid) && - ((ioinfo[irq]->ssd_info.fla[j]&fla_mask) == fla)) { - - mask2 = 0x80 >> j; - ret = s390_send_nop (irq, mask2); - - ccode = stsch(irq,&ioinfo[irq]->schib); - if (!ccode) { - ioinfo[irq]->opm = - ioinfo[irq]->schib.pmcw.pim & - ioinfo[irq]->schib.pmcw.pam & - ioinfo[irq]->schib.pmcw.pom; - - if (ioinfo[irq]->opm) { - for (chp=0;chp<=7;chp++) { - mask = 0x80 >> chp; - if (ioinfo[irq]->opm - & mask) { - if (!test_bit - (ioinfo[irq]-> - schib.pmcw.chpid[chp], - &chpids_logical)) { - /* disable using this path */ - ioinfo[irq]->opm - &= ~mask; - } - } - } - } - - if (ioinfo[irq]->ui.flags.ready) - s390_DevicePathVerification(irq, chpid); - } - break; - } - } - } else if (ioinfo[irq] == INVALID_STORAGE_AREA) { + */ + if (ioinfo[irq] == INVALID_STORAGE_AREA) { /* The full program again (see above), grr... */ int valret = 0; @@ -7380,10 +7471,43 @@ highest_subchannel = irq; if (valret == 0) s390_device_recognition_irq(irq); + continue; + } + if (ioinfo[irq]->st != 0) + continue; + + /* Update our ssd_info */ + if (chsc_get_sch_desc_irq(irq)) + break; + + for (j=0;j<8;j++) { + if ((ioinfo[irq]->ssd_info.chpid[j] != chpid) || + ((ioinfo[irq]->ssd_info.fla[j]&fla_mask) != fla)) + continue; + + /* Tell the device driver not to disturb us. */ + if (ioinfo[irq]->ui.flags.ready && + ioinfo[irq]->schib.pmcw.pim != 0x80 && + ioinfo[irq]->nopfunc) + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC); + + ioinfo[irq]->ui.flags.noio = 1; + + /* Do we still expect an interrupt for outstanding io? */ + if (ioinfo[irq]->ui.flags.busy) + /* Wait for interrupt. */ + break; + + if (ioinfo[irq]->ui.flags.ready) { + s390_schedule_path_verification(irq); + } else + ioinfo[irq]->ui.flags.noio = 0; + break; } + break; + } - break; default: BUG(); @@ -7394,7 +7518,7 @@ s390_process_css( void ) { - int ccode; + int ccode, do_sei; CIO_TRACE_EVENT( 2, "prcss"); @@ -7414,18 +7538,26 @@ return; } - /* - * build the chsc request block for store event information - * and do the call - */ - memset(chsc_area_sei,0,sizeof(chsc_area_t)); - chsc_area_sei->request_block.command_code1=0x0010; - chsc_area_sei->request_block.command_code2=0x000E; + do_sei = 1; - ccode = chsc(chsc_area_sei); + while (do_sei) { + + do_sei = 0; + /* + * build the chsc request block for store event information + * and do the call + */ + memset(chsc_area_sei,0,sizeof(chsc_area_t)); + chsc_area_sei->request_block.command_code1=0x0010; + chsc_area_sei->request_block.command_code2=0x000E; + + ccode = chsc(chsc_area_sei); + + + if (ccode) + break; - if (!ccode) { /* for debug purposes, check for problems */ if (chsc_area_sei->response_block.response_code == 0x0003) { #ifdef CONFIG_DEBUG_CHSC @@ -7435,8 +7567,9 @@ CIO_CRW_EVENT( 2, "s390_process_css: " "error in chsc request block!\n"); - - } else if (chsc_area_sei->response_block.response_code == 0x0005) { + break; + } + if (chsc_area_sei->response_block.response_code == 0x0005) { #ifdef CONFIG_DEBUG_CHSC printk( KERN_WARNING "s390_process_css: no event information stored\n"); @@ -7444,8 +7577,9 @@ CIO_CRW_EVENT( 2, "s390_process_css: " "no event information stored\n"); - - } else if (chsc_area_sei->response_block.response_code == 0x0002) { + break; + } + if (chsc_area_sei->response_block.response_code == 0x0002) { #ifdef CONFIG_DEBUG_CHSC printk( KERN_WARNING "s390_process_css: invalid command!\n"); @@ -7453,157 +7587,350 @@ CIO_CRW_EVENT( 2, "s390_process_css: " "invalid command!\n"); + break; + } + if (chsc_area_sei->response_block.response_code != 0x0001) { +#ifdef CONFIG_DEBUG_CHSC + printk( KERN_WARNING + "s390_process_css: unknown response code %d\n", + chsc_area_sei->response_block.response_code); +#endif /* CONFIG_DEBUG_CHSC */ + CIO_CRW_EVENT( 2, + "s390_process_css: unknown response " + "code %d\n", + chsc_area_sei->response_block.response_code); + break; + } + /* everything ok */ +#ifdef CONFIG_DEBUG_CHSC + printk( KERN_DEBUG + "s390_process_css: " + "event information successfully stored\n"); +#endif /* CONFIG_DEBUG_CHSC */ + CIO_CRW_EVENT( 4, + "s390_process_css: " + "event information successfully stored\n"); + + /* Check if there is more event information pending. */ + if (chsc_area_sei->response_block.response_block_data. + sei_res.flags & 0x80) { +#ifdef CONFIG_DEBUG_CHSC + printk(KERN_INFO"s390_process_css: further event " + "information pending...\n"); +#endif /* CONFIG_DEBUG_CHSC */ + CIO_CRW_EVENT( 2, "further event information pending\n"); - } else if (chsc_area_sei->response_block.response_code == 0x0001) { - /* everything ok */ + do_sei = 1; + } + + /* Check if we might have lost some information. */ + if (chsc_area_sei->response_block.response_block_data. + sei_res.flags & 0x40) { #ifdef CONFIG_DEBUG_CHSC - printk( KERN_DEBUG + printk(KERN_ERR"s390_process_css: Event information has " + "been lost due to overflow!\n"); +#endif /* CONFIG_DEBUG_CHSC */ + CIO_CRW_EVENT( 2, "Event information has " + "been lost due to overflow!\n"); + } + + if (chsc_area_sei->response_block. + response_block_data.sei_res.rs != 4) { +#ifdef CONFIG_DEBUG_CHSC + printk( KERN_ERR "s390_process_css: " - "event information successfully stored\n"); + "reporting source (%04X) isn't a chpid!\n", + chsc_area_sei->response_block. + response_block_data.sei_res.rsid); #endif /* CONFIG_DEBUG_CHSC */ - CIO_CRW_EVENT( 4, + CIO_CRW_EVENT( 2, "s390_process_css: " - "event information successfully stored\n"); + "reporting source (%04X) isn't a chpid!\n", + chsc_area_sei->response_block. + response_block_data.sei_res.rsid); + continue; + } - if (chsc_area_sei->response_block. - response_block_data.sei_res.rs != 4) { + /* which kind of information was stored? */ + switch (chsc_area_sei->response_block. + response_block_data.sei_res.cc) { + case 1: /* link incident*/ #ifdef CONFIG_DEBUG_CHSC - printk( KERN_ERR - "s390_process_css: " - "reporting source (%04X) isn't a chpid!" - "Aborting processing of machine check...\n", - chsc_area_sei->response_block. - response_block_data.sei_res.rsid); + printk( KERN_DEBUG + "s390_process_css: " + "channel subsystem reports link incident," + " source is chpid %x\n", + chsc_area_sei->response_block. + response_block_data.sei_res.rsid); #endif /* CONFIG_DEBUG_CHSC */ - CIO_CRW_EVENT( 2, - "s390_process_css: " - "reporting source (%04X) isn't a chpid!" - "Aborting processing of machine check...\n", - chsc_area_sei->response_block. - response_block_data.sei_res.rsid); - return; - } + CIO_CRW_EVENT( 4, + "s390_process_css: " + "channel subsystem reports " + "link incident, " + "source is chpid %x\n", + chsc_area_sei->response_block. + response_block_data.sei_res.rsid); + + s390_do_chpid_processing(chsc_area_sei->response_block. + response_block_data.sei_res.rsid); + + break; - /* which kind of information was stored? */ - switch (chsc_area_sei->response_block. - response_block_data.sei_res.cc) { - case 1: /* link incident*/ + case 2: /* i/o resource accessibiliy */ #ifdef CONFIG_DEBUG_CHSC - printk( KERN_DEBUG - "s390_process_css: " - "channel subsystem reports link incident," - " source is chpid %x\n", - chsc_area_sei->response_block. - response_block_data.sei_res.rsid); + printk( KERN_DEBUG + "s390_process_css: channel subsystem " + "reports some I/O devices " + "may have become accessible\n"); #endif /* CONFIG_DEBUG_CHSC */ - CIO_CRW_EVENT( 4, - "s390_process_css: " - "channel subsystem reports " - "link incident, " - "source is chpid %x\n", - chsc_area_sei->response_block. - response_block_data.sei_res.rsid); - - s390_do_chpid_processing(chsc_area_sei->response_block. - response_block_data.sei_res.rsid); - - break; - - case 2: /* i/o resource accessibiliy */ + CIO_CRW_EVENT( 4, + "s390_process_css: " + "channel subsystem reports " + "some I/O devices " + "may have become accessible\n"); #ifdef CONFIG_DEBUG_CHSC - printk( KERN_DEBUG - "s390_process_css: channel subsystem " - "reports some I/O devices " - "may have become accessable\n"); + printk( KERN_DEBUG + "Data received after sei: \n"); + printk( KERN_DEBUG + "Validity flags: %x\n", + chsc_area_sei->response_block. + response_block_data.sei_res.vf); #endif /* CONFIG_DEBUG_CHSC */ - CIO_CRW_EVENT( 4, - "s390_process_css: " - "channel subsystem reports " - "some I/O devices " - "may have become accessable\n"); + if ((chsc_area_sei->response_block. + response_block_data.sei_res.vf&0x80) + == 0) { #ifdef CONFIG_DEBUG_CHSC - printk( KERN_DEBUG - "Data received after sei: \n"); - printk( KERN_DEBUG - "Validity flags: %x\n", - chsc_area_sei->response_block. - response_block_data.sei_res.vf); + printk( KERN_DEBUG "chpid: %x\n", + chsc_area_sei->response_block. + response_block_data.sei_res.rsid); #endif /* CONFIG_DEBUG_CHSC */ - if ((chsc_area_sei->response_block. - response_block_data.sei_res.vf&0x80) - == 0) { + s390_do_res_acc_processing + (chsc_area_sei->response_block. + response_block_data.sei_res.rsid, + 0, + CHSC_SEI_ACC_CHPID); + } else if ((chsc_area_sei->response_block. + response_block_data.sei_res.vf&0xc0) + == 0x80) { #ifdef CONFIG_DEBUG_CHSC - printk( KERN_DEBUG "chpid: %x\n", - chsc_area_sei->response_block. - response_block_data.sei_res.rsid); + printk( KERN_DEBUG + "chpid: %x link addr: %x\n", + chsc_area_sei->response_block. + response_block_data.sei_res.rsid, + chsc_area_sei->response_block. + response_block_data.sei_res.fla); #endif /* CONFIG_DEBUG_CHSC */ - s390_do_res_acc_processing - (chsc_area_sei->response_block. - response_block_data.sei_res.rsid, - 0, - CHSC_SEI_ACC_CHPID); - } else if ((chsc_area_sei->response_block. - response_block_data.sei_res.vf&0xc0) - == 0x80) { + s390_do_res_acc_processing + (chsc_area_sei->response_block. + response_block_data.sei_res.rsid, + chsc_area_sei->response_block. + response_block_data.sei_res.fla, + CHSC_SEI_ACC_LINKADDR); + } else if ((chsc_area_sei->response_block. + response_block_data.sei_res.vf & 0xc0) + == 0xc0) { #ifdef CONFIG_DEBUG_CHSC - printk( KERN_DEBUG - "chpid: %x link addr: %x\n", - chsc_area_sei->response_block. - response_block_data.sei_res.rsid, - chsc_area_sei->response_block. - response_block_data.sei_res.fla); + printk( KERN_DEBUG + "chpid: %x " + "full link addr: %x\n", + chsc_area_sei->response_block. + response_block_data.sei_res.rsid, + chsc_area_sei->response_block. + response_block_data.sei_res.fla); #endif /* CONFIG_DEBUG_CHSC */ - s390_do_res_acc_processing - (chsc_area_sei->response_block. - response_block_data.sei_res.rsid, - chsc_area_sei->response_block. - response_block_data.sei_res.fla, - CHSC_SEI_ACC_LINKADDR); - } else if ((chsc_area_sei->response_block. - response_block_data.sei_res.vf & 0xc0) - == 0xc0) { + s390_do_res_acc_processing + (chsc_area_sei->response_block. + response_block_data.sei_res.rsid, + chsc_area_sei->response_block. + response_block_data.sei_res.fla, + CHSC_SEI_ACC_FULLLINKADDR); + } #ifdef CONFIG_DEBUG_CHSC - printk( KERN_DEBUG - "chpid: %x " - "full link addr: %x\n", - chsc_area_sei->response_block. - response_block_data.sei_res.rsid, - chsc_area_sei->response_block. - response_block_data.sei_res.fla); + printk( KERN_DEBUG "\n"); #endif /* CONFIG_DEBUG_CHSC */ - s390_do_res_acc_processing - (chsc_area_sei->response_block. - response_block_data.sei_res.rsid, - chsc_area_sei->response_block. - response_block_data.sei_res.fla, - CHSC_SEI_ACC_FULLLINKADDR); - } + + break; + + default: /* other stuff */ #ifdef CONFIG_DEBUG_CHSC - printk( KERN_DEBUG "\n"); + printk( KERN_DEBUG + "s390_process_css: event %d\n", + chsc_area_sei->response_block. + response_block_data.sei_res.cc); #endif /* CONFIG_DEBUG_CHSC */ + CIO_CRW_EVENT( 4, + "s390_process_css: event %d\n", + chsc_area_sei->response_block. + response_block_data.sei_res.cc); + + break; + + } + } + + spin_unlock(&chsc_lock_sei); +} +#endif +static void +__process_chp_gone(int irq, int chpid) +{ + schib_t *schib = &ioinfo[irq]->schib; + int i; + + for (i=0;i<8;i++) { + int mask = 0x80>>i; + int out = 0; + int err = 0; + + if (schib->pmcw.chpid[i] != chpid) + continue; + + if (stsch(irq, schib) != 0) { + ioinfo[irq]->ui.flags.oper = 0; + if (ioinfo[irq]->nopfunc) + ioinfo[irq]->nopfunc(irq, DEVSTAT_DEVICE_GONE); + break; + } + + s390irq_spin_lock(irq); + + ioinfo[irq]->ui.flags.noio = 1; + + /* Do we still expect an interrupt for outstanding io? */ + if (ioinfo[irq]->ui.flags.busy) { + int rck = __check_for_io_and_kill(irq, mask, 1); + if (rck & CIO_PATHGONE_WAIT4INT) + out=1; + if (rck & CIO_PATHGONE_IOERR) + err=1; + if (rck & CIO_PATHGONE_DEVGONE) break; + } + + s390irq_spin_unlock(irq); - default: /* other stuff */ -#ifdef CONFIG_DEBUG_CHSC - printk( KERN_DEBUG - "s390_process_css: event %d\n", - chsc_area_sei->response_block. - response_block_data.sei_res.cc); -#endif /* CONFIG_DEBUG_CHSC */ - CIO_CRW_EVENT( 4, - "s390_process_css: event %d\n", - chsc_area_sei->response_block. - response_block_data.sei_res.cc); + /* Tell the device driver not to disturb us. */ + if (ioinfo[irq]->ui.flags.ready && + schib->pmcw.pim != 0x80 && + ioinfo[irq]->nopfunc) { + if (err) + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC_ERR); + else + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC); + } + + if (out) + break; + + if (ioinfo[irq]->ui.flags.ready) { + s390_schedule_path_verification(irq); + } else { + ioinfo[irq]->ui.flags.noio = 0; + ioinfo[irq]->ui.flags.killio = 0; + } + break; + } - break; +} - } +static void +__process_chp_come(int irq, int chpid) +{ + schib_t *schib = &ioinfo[irq]->schib; + int i; + + for (i=0;i<8;i++) { + + if (schib->pmcw.chpid[i] != chpid) + continue; + + /* Tell the device driver not to disturb us. */ + if (ioinfo[irq]->ui.flags.ready && + schib->pmcw.pim != 0x80 && + ioinfo[irq]->nopfunc) + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC); + + ioinfo[irq]->ui.flags.noio = 1; + + /* Do we still expect an interrupt for outstanding io? */ + if (ioinfo[irq]->ui.flags.busy) + /* Wait for interrupt. */ + break; + + if (ioinfo[irq]->ui.flags.ready) + s390_schedule_path_verification(irq); + else + ioinfo[irq]->ui.flags.noio = 0; + + break; + } +} + +static void +s390_process_chp_source(int chpid, int onoff) +{ + int irq; + int ret; + char dbf_txt[15]; + + sprintf(dbf_txt, "prchp%x", chpid); + CIO_TRACE_EVENT(2, dbf_txt); + +#ifdef CONFIG_CHSC + if (onoff == 0) { + clear_bit(chpid, &chpids); + } else { + set_bit(chpid, &chpids); + set_bit(chpid, &chpids_known); + } +#endif /* CONFIG_CHSC */ + + if (onoff == 0) { + for (irq=0;irq<=highest_subchannel;irq++) { + + if ((ioinfo[irq] == INVALID_STORAGE_AREA) + || (ioinfo[irq]->st != 0)) + continue; + + __process_chp_gone(irq, chpid); } + return; } - spin_unlock(&chsc_lock_sei); + + for (irq=0;irq<__MAX_SUBCHANNELS;irq++) { + + if (ioinfo[irq] == INVALID_STORAGE_AREA) { + ret = s390_validate_subchannel(irq,0); + if (ret == 0) { + if (irq > highest_subchannel) + highest_subchannel = irq; +#ifdef CONFIG_DEBUG_CRW + printk(KERN_DEBUG"process_chp_source: Found " + "device on irq %x\n", irq); +#endif /* CONFIG_DEBUG_CRW */ + CIO_CRW_EVENT(4, "Found device on irq %x\n", + irq); + s390_device_recognition_irq(irq); + } + } else if (ioinfo[irq]->st == 0) { + ret = stsch(irq, &ioinfo[irq]->schib); + if (ret != 0) + ret = -ENXIO; + } else + continue; + + if (ret == -ENXIO) + /* We're through. */ + return; + + if (ret != 0) + continue; + + __process_chp_come(irq, chpid); + } + } -#endif /* * s390_do_crw_pending @@ -7657,7 +7984,24 @@ printk (KERN_NOTICE "do_crw_pending : source is " "channel path %02X\n", chpid); #endif - CIO_CRW_EVENT(2, "source is channel path %02X\n"); + CIO_CRW_EVENT(2, "source is channel path %02X\n", + chpid); + switch (pcrwe->crw.erc) { + case CRW_ERC_IPARM: /* Path has come. */ + s390_process_chp_source(chpid, 1); + break; + case CRW_ERC_PERRI: /* Path has gone. */ + s390_process_chp_source(chpid, 0); + break; + default: +#ifdef CONFIG_DEBUG_CRW + printk(KERN_WARNING"do_crw_pending: don't " + "know how to handle erc=%x\n", + pcrwe->crw.erc); +#endif /* CONFIG_DEBUG_CRW */ + CIO_CRW_EVENT(0, "don't know how to handle " + "erc=%x\n", pcrwe->crw.erc); + } break; case CRW_RSC_CONFIG: @@ -7850,6 +8194,84 @@ } } +static void +__vary_chpid_offline(int irq, int chpid) +{ + schib_t *schib = &ioinfo[irq]->schib; + int i; + + for (i=0;i<8;i++) { + int mask = 0x80>>i; + int out = 0; + unsigned long flags; + + if (ioinfo[irq]->ssd_info.chpid[i] != chpid) + continue; + + s390irq_spin_lock_irqsave(irq, flags); + + ioinfo[irq]->ui.flags.noio = 1; + + /* Hmm, the path is not really gone... */ + if (ioinfo[irq]->ui.flags.busy) { + if (__check_for_io_and_kill(irq, mask, 0) != 0) + out=1; + } + + s390irq_spin_unlock_irqrestore(irq, flags); + + /* Tell the device driver not to disturb us. */ + if (ioinfo[irq]->ui.flags.ready && + schib->pmcw.pim != 0x80 && + ioinfo[irq]->nopfunc) + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC); + + if (out) + break; + + if (ioinfo[irq]->ui.flags.ready) + s390_schedule_path_verification(irq); + else + ioinfo[irq]->ui.flags.noio = 0; + + break; + } + +} + +static void +__vary_chpid_online(int irq, int chpid) +{ + schib_t *schib = &ioinfo[irq]->schib; + int i; + + for (i=0;i<8;i++) { + + if (schib->pmcw.chpid[i] != chpid) + continue; + + /* Tell the device driver not to disturb us. */ + if (ioinfo[irq]->ui.flags.ready && + schib->pmcw.pim != 0x80 && + ioinfo[irq]->nopfunc) + ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC); + + ioinfo[irq]->ui.flags.noio = 1; + + /* Do we still expect an interrupt for outstanding io? */ + if (ioinfo[irq]->ui.flags.busy) + /* Wait for interrupt. */ + break; + + if (ioinfo[irq]->ui.flags.ready) + s390_schedule_path_verification(irq); + else + ioinfo[irq]->ui.flags.noio = 0; + + break; + } +} + /* * Function: s390_vary_chpid @@ -7899,34 +8321,20 @@ for (irq=0;irq<=highest_subchannel;irq++) { - /* - * We don't need to adjust the opm, as this will be done in - * DevicePathVerification... - */ - if (ioinfo[irq] == INVALID_STORAGE_AREA) continue; if (ioinfo[irq]->st) continue; + + if (!ioinfo[irq]->ssd_info.valid) + continue; + + if (on) + __vary_chpid_online(irq, chpid); + else + __vary_chpid_offline(irq, chpid); - if (ioinfo[irq]->ssd_info.valid) { - if ((ioinfo[irq]->ssd_info.chpid[0] == chpid) || - (ioinfo[irq]->ssd_info.chpid[1] == chpid) || - (ioinfo[irq]->ssd_info.chpid[2] == chpid) || - (ioinfo[irq]->ssd_info.chpid[3] == chpid) || - (ioinfo[irq]->ssd_info.chpid[4] == chpid) || - (ioinfo[irq]->ssd_info.chpid[5] == chpid) || - (ioinfo[irq]->ssd_info.chpid[6] == chpid) || - (ioinfo[irq]->ssd_info.chpid[7] == chpid)) { -#ifdef CONFIG_DEBUG_CHSC - printk(KERN_DEBUG "Calling " - "DevicePathVerification for irq %d\n", - irq); -#endif /* CONFIG_DEBUG_CHSC */ - s390_DevicePathVerification(irq, 0); - } - } } return 0; @@ -8624,9 +9032,6 @@ #ifdef CONFIG_DEBUG_IO printk (KERN_DEBUG "/proc/cio_ignore: '%s'\n", buffer); #endif /* CONFIG_DEBUG_IO */ - if (cio_debug_initialized) - debug_sprintf_event (cio_debug_msg_id, 2, - "/proc/cio_ignore: '%s'\n", buffer); blacklist_parse_proc_parameters (buffer); @@ -8795,19 +9200,28 @@ chsc_get_sch_descriptions(); if (!cio_chsc_desc_avail) { len += sprintf(info->data+len, "no info available\n"); - } else { - for (i=0;idata+len, + "%02X n/a\n", + i); + else if (test_bit(i, &chpids_logical)) len += sprintf(info->data+len, "%02X online\n", i); - else if (test_bit(i, &chpids_known)) + else len += sprintf(info->data+len, - "%02X logically offline\n", + "%02X logically " + "offline\n", i); } + } + cont: info->len = len; } } @@ -8940,6 +9354,7 @@ EXPORT_SYMBOL (do_IO); EXPORT_SYMBOL (resume_IO); EXPORT_SYMBOL (ioinfo); +EXPORT_SYMBOL (diag210); EXPORT_SYMBOL (get_dev_info_by_irq); EXPORT_SYMBOL (get_dev_info_by_devno); EXPORT_SYMBOL (get_irq_by_devno); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/s390/s390mach.c linux.21rc5-ac2/drivers/s390/s390mach.c --- linux.21rc5/drivers/s390/s390mach.c 2003-05-28 15:08:05.000000000 +0100 +++ linux.21rc5-ac2/drivers/s390/s390mach.c 2003-05-28 15:13:38.000000000 +0100 @@ -121,7 +121,7 @@ #if 1 ctl_set_bit( 14, 28 ); // enable channel report MCH #endif -#ifdef CONFIG_MACHCK_WARNING +#ifdef CONFIG_MACHCHK_WARNING ctl_set_bit( 14, 24); /* enable warning MCH */ #endif @@ -152,7 +152,6 @@ return; } - /* * s390_do_machine_check * @@ -327,6 +326,16 @@ } /* endif */ #endif +#ifdef CONFIG_MACHCHK_WARNING + if ( pmache->mcic.mcc.mcd.w ) + { + ctrl_alt_del(); // shutdown NOW! +#ifdef S390_MACHCHK_DEBUG + printk( KERN_DEBUG "mach_handler : kill -SIGPWR init\n"); +#endif + } /* endif */ +#endif + s390_enqueue_free_mchchk( pmache ); } else @@ -429,7 +438,6 @@ return; } - /* * s390_enqueue_free_mchchk * @@ -655,4 +663,3 @@ return ( 1 ); } #endif - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sbus/char/aurora.c linux.21rc5-ac2/drivers/sbus/char/aurora.c --- linux.21rc5/drivers/sbus/char/aurora.c 2003-05-28 15:07:58.000000000 +0100 +++ linux.21rc5-ac2/drivers/sbus/char/aurora.c 2003-04-22 16:44:37.000000000 +0100 @@ -1504,7 +1504,7 @@ } bp = port_Board(port); - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_DEBUG "aurora%d: aurora_close: bad port count; " "tty->count is 1, port count is %d\n", board_No(bp), port->count); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sbus/char/sab82532.c linux.21rc5-ac2/drivers/sbus/char/sab82532.c --- linux.21rc5/drivers/sbus/char/sab82532.c 2003-05-28 15:07:58.000000000 +0100 +++ linux.21rc5-ac2/drivers/sbus/char/sab82532.c 2003-04-22 16:44:37.000000000 +0100 @@ -1610,7 +1610,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("sab82532_close ttys%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sbus/char/su.c linux.21rc5-ac2/drivers/sbus/char/su.c --- linux.21rc5/drivers/sbus/char/su.c 2003-05-28 15:07:58.000000000 +0100 +++ linux.21rc5-ac2/drivers/sbus/char/su.c 2003-04-22 16:44:37.000000000 +0100 @@ -39,7 +39,7 @@ do { \ printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ kdevname(tty->device), (info->flags), serial_refcount, \ - info->count,tty->count,s); \ + info->count,atomic_read(&tty->count),s); \ } while (0) #else #define DBG_CNT(s) @@ -1756,7 +1756,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("su_close ttys%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sbus/char/zs.c linux.21rc5-ac2/drivers/sbus/char/zs.c --- linux.21rc5/drivers/sbus/char/zs.c 2003-05-28 15:07:58.000000000 +0100 +++ linux.21rc5-ac2/drivers/sbus/char/zs.c 2003-04-22 16:44:37.000000000 +0100 @@ -1547,7 +1547,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("zs_close tty-%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/aachba.c linux.21rc5-ac2/drivers/scsi/aacraid/aachba.c --- linux.21rc5/drivers/scsi/aacraid/aachba.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/aachba.c 2003-05-28 18:09:01.000000000 +0100 @@ -23,6 +23,7 @@ */ #include +#include #include #include #include @@ -224,6 +225,15 @@ static char *aac_get_status_string(u32 status); #endif +/* + * Non dasd selection is handled entirely in aachba now + */ + +MODULE_PARM(nondasd, "i"); +MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); + +static int nondasd = -1; + /** * aac_get_containers - list containers * @common: adapter to probe @@ -261,13 +271,14 @@ 1, 1, NULL, NULL); if (status < 0 ) { - printk(KERN_WARNING "ProbeContainers: SendFIB failed.\n"); + printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); break; } dresp = (struct aac_mount *)fib_data(fibptr); if ((le32_to_cpu(dresp->status) == ST_OK) && - (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { + (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && + (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { fsa_dev_ptr->valid[index] = 1; fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol); fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity); @@ -520,22 +531,54 @@ dev->name, dev->id, dev->adapter_info.serial[0], dev->adapter_info.serial[1]); - dev->pae_support = 0; + dev->nondasd_support = 0; - if( BITS_PER_LONG >= 64 && - (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ - printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id); + dev->raid_scsi_mode = 0; + if(dev->adapter_info.options & AAC_OPT_NONDASD){ + dev->nondasd_support = 1; + } + + /* + * If the firmware supports ROMB RAID/SCSI mode and we are currently + * in RAID/SCSI mode, set the flag. For now if in this mode we will + * force nondasd support on. If we decide to allow the non-dasd flag + * additional changes changes will have to be made to support + * RAID/SCSI. the function aac_scsi_cmd in this module will have to be + * changed to support the new dev->raid_scsi_mode flag instead of + * leaching off of the dev->nondasd_support flag. Also in linit.c the + * function aac_detect will have to be modified where it sets up the + * max number of channels based on the aac->nondasd_support flag only. + */ + if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) + && (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) + { + dev->nondasd_support = 1; + dev->raid_scsi_mode = 1; + } + if (dev->raid_scsi_mode != 0) + printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",dev->name, dev->id); + + if (nondasd != -1) + dev->nondasd_support = (nondasd!=0); + + if(dev->nondasd_support != 0) + printk(KERN_INFO "%s%d: Non-DASD support enabled\n",dev->name, dev->id); + + dev->pae_support = 0; + if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ dev->pae_support = 1; } /* TODO - dmb temporary until fw can set this bit */ dev->pae_support = (BITS_PER_LONG >= 64); - if(dev->pae_support != 0) { + if(dev->pae_support != 0) + { printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id); + pci_set_dma_mask(dev->pdev, (dma_addr_t)0xFFFFFFFFFFFFFFFFULL); } - if(dev->adapter_info.options & AAC_OPT_NONDASD){ - dev->nondasd_support = 1; - } + fib_complete(fibptr); + fib_free(fibptr); + return rcode; } @@ -554,7 +597,7 @@ cid =TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun); lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; - dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies)); + dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies)); if (fibptr == NULL) BUG(); @@ -599,7 +642,7 @@ cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun); lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; - dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies)); + dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies)); if (fibptr == NULL) BUG(); @@ -765,7 +808,7 @@ lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; } - dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %lu, t = %ld.\n", smp_processor_id(), lba, jiffies)); + dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies)); /* * Allocate and initialize a Fib then setup a BlockWrite command */ @@ -1261,17 +1304,29 @@ /* * Next check the srb status */ - switch(le32_to_cpu(srbreply->srb_status)){ + switch( (le32_to_cpu(srbreply->srb_status))&0x3f){ case SRB_STATUS_ERROR_RECOVERY: case SRB_STATUS_PENDING: case SRB_STATUS_SUCCESS: if(scsicmd->cmnd[0] == INQUIRY ){ u8 b; + u8 b1; /* We can't expose disk devices because we can't tell whether they - * are the raw container drives or stand alone drives + * are the raw container drives or stand alone drives. If they have + * the removable bit set then we should expose them though. */ - b = *(u8*)scsicmd->buffer; - if( (b & 0x0f) == TYPE_DISK ){ + b = (*(u8*)scsicmd->buffer)&0x1f; + b1 = ((u8*)scsicmd->buffer)[1]; + if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER + || (b==TYPE_DISK && (b1&0x80)) ){ + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + /* + * We will allow disk devices if in RAID/SCSI mode and + * the channel is 2 + */ + } else if((dev->raid_scsi_mode)&&(scsicmd->channel == 2)){ + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + } else { scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; } } else { @@ -1293,6 +1348,28 @@ } scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; break; + case INQUIRY: { + u8 b; + u8 b1; + /* We can't expose disk devices because we can't tell whether they + * are the raw container drives or stand alone drives + */ + b = (*(u8*)scsicmd->buffer)&0x0f; + b1 = ((u8*)scsicmd->buffer)[1]; + if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER + || (b==TYPE_DISK && (b1&0x80)) ){ + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + /* + * We will allow disk devices if in RAID/SCSI mode and + * the channel is 2 + */ + } else if((dev->raid_scsi_mode)&&(scsicmd->channel == 2)){ + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + } else { + scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; + } + break; + } default: scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; break; @@ -1348,13 +1425,14 @@ case SRB_STATUS_DOMAIN_VALIDATION_FAIL: default: #ifdef AAC_DETAILED_STATUS_INFO - printk("aacraid: SRB ERROR (%s)\n",aac_get_status_string(le32_to_cpu(srbreply->srb_status))); + printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",le32_to_cpu(srbreply->srb_status&0x3f),aac_get_status_string(le32_to_cpu(srbreply->srb_status)), scsicmd->cmnd[0], le32_to_cpu(srbreply->scsi_status) ); #endif scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; break; } if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition int len; + scsicmd->result |= CHECK_CONDITION; len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))? sizeof(scsicmd->sense_buffer):srbreply->sense_data_size; printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", le32_to_cpu(srbreply->status), len); @@ -1387,6 +1465,7 @@ struct aac_srb *srbcmd; u16 fibsize; u32 flag; + u32 timeout; if( scsicmd->target > 15 || scsicmd->lun > 7) { scsicmd->result = DID_NO_CONNECT << 16; @@ -1428,7 +1507,11 @@ srbcmd->target = cpu_to_le32(scsicmd->target); srbcmd->lun = cpu_to_le32(scsicmd->lun); srbcmd->flags = cpu_to_le32(flag); - srbcmd->timeout = cpu_to_le32(0); // timeout not used + timeout = (scsicmd->timeout-jiffies)/HZ; + if(timeout == 0){ + timeout = 1; + } + srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len); @@ -1533,6 +1616,7 @@ psg->count = cpu_to_le32(1); psg->sg[0].addr = cpu_to_le32(addr); psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen); + /* Cast to pointer from integer of different size */ scsicmd->SCp.ptr = (void *)addr; byte_count = scsicmd->request_bufflen; } @@ -1594,6 +1678,7 @@ psg->sg[0].addr[1] = (u32)(le_addr>>32); psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff); psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen); + /* Cast to pointer from integer of different size */ scsicmd->SCp.ptr = (void *)addr; byte_count = scsicmd->request_bufflen; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/aacraid.h linux.21rc5-ac2/drivers/scsi/aacraid/aacraid.h --- linux.21rc5/drivers/scsi/aacraid/aacraid.h 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/aacraid.h 2003-05-28 20:53:10.000000000 +0100 @@ -1,5 +1,7 @@ -#define dprintk(x) -/*#define dprintk(x) printk x */ +//#define dprintk(x) printk x +#if (!defined(dprintk)) +# define dprintk(x) +#endif /*------------------------------------------------------------------------------ * D E F I N E S @@ -8,12 +10,13 @@ #define MAXIMUM_NUM_CONTAINERS 31 #define MAXIMUM_NUM_ADAPTERS 8 -#define AAC_NUM_FIB 578 -#define AAC_NUM_IO_FIB 512 +#define AAC_NUM_FIB 578 +//#define AAC_NUM_IO_FIB 512 +#define AAC_NUM_IO_FIB 100 -#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1) +#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1) //#define AAC_MAX_TARGET (16) -#define AAC_MAX_LUN (8) +#define AAC_MAX_LUN (8) /* * These macros convert from physical channels to virtual channels @@ -269,11 +272,11 @@ u32 _ReceiverTimeStart; // Timestamp for receipt of fib u32 _ReceiverTimeDone; // Timestamp for completion of fib } _s; - struct list_head _FibLinks; // Used to link Adapter Initiated Fibs on the host +// struct aac_list_head _FibLinks; // Used to link Adapter Initiated Fibs on the host } _u; }; -#define FibLinks _u._FibLinks +//#define FibLinks _u._FibLinks #define FIB_DATA_SIZE_IN_BYTES (512 - sizeof(struct aac_fibhdr)) @@ -629,7 +632,7 @@ struct semaphore wait_sem; // this is used to wait for the next fib to arrive. int wait; // Set to true when thread is in WaitForSingleObject unsigned long count; // total number of FIBs on FibList - struct list_head fibs; + struct list_head fib_list; // this holds fibs which should be 32 bit addresses }; struct fsa_scsi_hba { @@ -639,7 +642,7 @@ u8 ro[MAXIMUM_NUM_CONTAINERS]; u8 locked[MAXIMUM_NUM_CONTAINERS]; u8 deleted[MAXIMUM_NUM_CONTAINERS]; - u32 devno[MAXIMUM_NUM_CONTAINERS]; + s32 devno[MAXIMUM_NUM_CONTAINERS]; }; struct fib { @@ -650,7 +653,6 @@ * The Adapter that this I/O is destined for. */ struct aac_dev *dev; - u64 logicaladdr; /* 64 bit */ /* * This is the event the sendfib routine will wait on if the * caller did not pass one and this is synch io. @@ -667,9 +669,14 @@ * Outstanding I/O queue. */ struct list_head queue; - + /* + * And for the internal issue/reply queues (we may be able + * to merge these two) + */ + struct list_head fiblink; void *data; - struct hw_fib *fib; /* Actual shared object */ + struct hw_fib *hw_fib; /* Actual shared object */ + dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ }; /* @@ -696,6 +703,7 @@ u32 biosrev; u32 biosbuild; u32 cluster; + u32 clusterchannelmask; u32 serial[2]; u32 battery; u32 options; @@ -720,19 +728,22 @@ /* * Supported Options */ -#define AAC_OPT_SNAPSHOT cpu_to_le32(1) -#define AAC_OPT_CLUSTERS cpu_to_le32(1<<1) -#define AAC_OPT_WRITE_CACHE cpu_to_le32(1<<2) -#define AAC_OPT_64BIT_DATA cpu_to_le32(1<<3) -#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4) -#define AAC_OPT_RAID50 cpu_to_le32(1<<5) -#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6) -#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7) -#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8) -#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9) -#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10) -#define AAC_OPT_ALARM cpu_to_le32(1<<11) -#define AAC_OPT_NONDASD cpu_to_le32(1<<12) +#define AAC_OPT_SNAPSHOT cpu_to_le32(1) +#define AAC_OPT_CLUSTERS cpu_to_le32(1<<1) +#define AAC_OPT_WRITE_CACHE cpu_to_le32(1<<2) +#define AAC_OPT_64BIT_DATA cpu_to_le32(1<<3) +#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4) +#define AAC_OPT_RAID50 cpu_to_le32(1<<5) +#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6) +#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7) +#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8) +#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9) +#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10) +#define AAC_OPT_ALARM cpu_to_le32(1<<11) +#define AAC_OPT_NONDASD cpu_to_le32(1<<12) +#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13) +#define AAC_OPT_RAID_SCSI_MODE cpu_to_le32(1<<14) +#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<15) struct aac_dev { @@ -746,13 +757,12 @@ */ dma_addr_t hw_fib_pa; struct hw_fib *hw_fib_va; -#if BITS_PER_LONG >= 64 ulong fib_base_va; -#endif /* * Fib Headers */ - struct fib fibs[AAC_NUM_FIB]; + struct fib *fibs; + struct fib *free_fib; struct fib *timeout_fib; spinlock_t fib_lock; @@ -781,7 +791,7 @@ struct Scsi_Host *scsi_host_ptr; struct fsa_scsi_hba fsa_dev; - int thread_pid; + pid_t thread_pid; int cardtype; /* @@ -804,8 +814,15 @@ */ u8 nondasd_support; u8 pae_support; + u8 raid_scsi_mode; }; +#define AllocateAndMapFibSpace(dev, MapFibContext) \ + dev->a_ops.AllocateAndMapFibSpace(dev, MapFibContext) + +#define UnmapAndFreeFibSpace(dev, MapFibContext) \ + dev->a_ops.UnmapAndFreeFibSpace(dev, MapFibContext) + #define aac_adapter_interrupt(dev) \ dev->a_ops.adapter_interrupt(dev) @@ -1142,7 +1159,9 @@ u32 altoid; // != oid <==> snapshot or broken mirror exists }; -#define FSCS_READONLY 0x0002 /* possible result of broken mirror */ +#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */ +#define FSCS_READONLY 0x0002 /* possible result of broken mirror */ +#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */ struct aac_query_mount { u32 command; @@ -1326,9 +1345,12 @@ */ #define AifCmdEventNotify 1 /* Notify of event */ +#define AifEnContainerChange 4 /* Container configuration change */ #define AifCmdJobProgress 2 /* Progress report */ #define AifCmdAPIReport 3 /* Report from other user of API */ #define AifCmdDriverNotify 4 /* Notify host driver of event */ +#define AifDenMorphComplete 200 /* A morph operation completed */ +#define AifDenVolumeExtendComplete 201 /* A volume expand operation completed */ #define AifReqJobList 100 /* Gets back complete job list */ #define AifReqJobsForCtr 101 /* Gets back jobs for specific container */ #define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */ @@ -1353,16 +1375,6 @@ u8 data[1]; /* Undefined length (from kernel viewpoint) */ }; -static inline u32 fib2addr(struct hw_fib *hw) -{ - return (u32)hw; -} - -static inline struct hw_fib *addr2fib(u32 addr) -{ - return (struct hw_fib *)addr; -} - const char *aac_driverinfo(struct Scsi_Host *); struct fib *fib_alloc(struct aac_dev *dev); int fib_setup(struct aac_dev *dev); @@ -1376,7 +1388,7 @@ int aac_consumer_avail(struct aac_dev * dev, struct aac_queue * q); void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); int fib_complete(struct fib * context); -#define fib_data(fibctx) ((void *)(fibctx)->fib->data) +#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) int aac_detach(struct aac_dev *dev); struct aac_dev *aac_init_adapter(struct aac_dev *dev); int aac_get_containers(struct aac_dev *dev); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/commctrl.c linux.21rc5-ac2/drivers/scsi/aacraid/commctrl.c --- linux.21rc5/drivers/scsi/aacraid/commctrl.c 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/commctrl.c 2003-05-28 20:54:15.000000000 +0100 @@ -63,7 +63,7 @@ if(fibptr == NULL) return -ENOMEM; - kfib = fibptr->fib; + kfib = fibptr->hw_fib; /* * First copy in the header so that we can check the size field. */ @@ -150,7 +150,7 @@ * the list to 0. */ fibctx->count = 0; - INIT_LIST_HEAD(&fibctx->fibs); + INIT_LIST_HEAD(&fibctx->fib_list); fibctx->jiffies = jiffies/HZ; /* * Now add this context onto the adapter's @@ -181,7 +181,7 @@ { struct fib_ioctl f; struct aac_fib_context *fibctx, *aifcp; - struct hw_fib * fib; + struct fib * fib; int status; struct list_head * entry; int found; @@ -211,12 +211,16 @@ } entry = entry->next; } - if (found == 0) + if (found == 0) { + dprintk ((KERN_INFO "Fib not found\n")); return -EINVAL; + } if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || - (fibctx->size != sizeof(struct aac_fib_context))) + (fibctx->size != sizeof(struct aac_fib_context))) { + dprintk ((KERN_INFO "Fib Context corrupt?\n")); return -EINVAL; + } status = 0; spin_lock_irqsave(&dev->fib_lock, flags); /* @@ -224,27 +228,28 @@ * -EAGAIN */ return_fib: - if (!list_empty(&fibctx->fibs)) { + if (!list_empty(&fibctx->fib_list)) { struct list_head * entry; /* * Pull the next fib from the fibs */ - entry = fibctx->fibs.next; + entry = fibctx->fib_list.next; list_del(entry); - fib = list_entry(entry, struct hw_fib, header.FibLinks); + fib = list_entry(entry, struct fib, fiblink); fibctx->count--; spin_unlock_irqrestore(&dev->fib_lock, flags); - if (copy_to_user(f.fib, fib, sizeof(struct hw_fib))) { + if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) { + kfree(fib->hw_fib); kfree(fib); return -EFAULT; } /* * Free the space occupied by this copy of the fib. */ + kfree(fib->hw_fib); kfree(fib); status = 0; - fibctx->jiffies = jiffies/HZ; } else { spin_unlock_irqrestore(&dev->fib_lock, flags); if (f.wait) { @@ -259,28 +264,30 @@ status = -EAGAIN; } } + fibctx->jiffies = jiffies/HZ; return status; } int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) { - struct hw_fib *fib; + struct fib *fib; /* * First free any FIBs that have not been consumed. */ - while (!list_empty(&fibctx->fibs)) { + while (!list_empty(&fibctx->fib_list)) { struct list_head * entry; /* * Pull the next fib from the fibs */ - entry = fibctx->fibs.next; + entry = fibctx->fib_list.next; list_del(entry); - fib = list_entry(entry, struct hw_fib, header.FibLinks); + fib = list_entry(entry, struct fib, fiblink); fibctx->count--; /* * Free the space occupied by this copy of the fib. */ + kfree(fib->hw_fib); kfree(fib); } /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/comminit.c linux.21rc5-ac2/drivers/scsi/aacraid/comminit.c --- linux.21rc5/drivers/scsi/aacraid/comminit.c 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/comminit.c 2003-05-28 20:54:30.000000000 +0100 @@ -39,6 +39,7 @@ #include #include #include +#include #include #include "scsi.h" #include "hosts.h" @@ -58,7 +59,6 @@ struct aac_init *init; dma_addr_t phys; - /* FIXME: Adaptec add 128 bytes to this value - WHY ?? */ size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz; base = pci_alloc_consistent(dev->pdev, size, &phys); @@ -74,14 +74,6 @@ dev->init = (struct aac_init *)(base + fibsize); dev->init_pa = phys + fibsize; - /* - * Cache the upper bits of the virtual mapping for 64bit boxes - * FIXME: this crap should be rewritten - */ -#if BITS_PER_LONG >= 64 - dev->fib_base_va = ((ulong)base & 0xffffffff00000000); -#endif - init = dev->init; init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); @@ -92,16 +84,20 @@ * Adapter Fibs are the first thing allocated so that they * start page aligned */ - init->AdapterFibsVirtualAddress = cpu_to_le32((u32)base); - init->AdapterFibsPhysicalAddress = cpu_to_le32(phys); + dev->fib_base_va = (ulong)base; + + /* We submit the physical address for AIF tags to limit to 32 bits */ + init->AdapterFibsVirtualAddress = cpu_to_le32((u32)phys); + init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys); init->AdapterFibsSize = cpu_to_le32(fibsize); init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib)); + init->HostPhysMemPages = cpu_to_le32(num_physpages); // number of 4k pages of host physical memory /* * Increment the base address by the amount already used */ base = base + fibsize + sizeof(struct aac_init); - phys = phys + fibsize + sizeof(struct aac_init); + phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init)); /* * Align the beginning of Headers to commalign */ @@ -111,8 +107,8 @@ /* * Fill in addresses of the Comm Area Headers and Queues */ - *commaddr = (unsigned long *)base; - init->CommHeaderAddress = cpu_to_le32(phys); + *commaddr = base; + init->CommHeaderAddress = cpu_to_le32((u32)phys); /* * Increment the base address by the size of the CommArea */ @@ -140,8 +136,8 @@ q->lock = &q->lockdata; q->headers.producer = mem; q->headers.consumer = mem+1; - *q->headers.producer = cpu_to_le32(qsize); - *q->headers.consumer = cpu_to_le32(qsize); + *(q->headers.producer) = cpu_to_le32(qsize); + *(q->headers.consumer) = cpu_to_le32(qsize); q->entries = qsize; } @@ -246,9 +242,9 @@ if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT)) return -ENOMEM; - queues = (struct aac_entry *)((unsigned char *)headers + hdrsize); + queues = (struct aac_entry *)(((ulong)headers) + hdrsize); - /* Adapter to Host normal proirity Command queue */ + /* Adapter to Host normal priority Command queue */ comm->queue[HostNormCmdQueue].base = queues; aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES); queues += HOST_NORM_CMD_ENTRIES; @@ -320,14 +316,18 @@ } memset(dev->queues, 0, sizeof(struct aac_queue_block)); - if (aac_comm_init(dev)<0) + if (aac_comm_init(dev)<0){ + kfree(dev->queues); return NULL; + } /* * Initialize the list of fibs */ - if(fib_setup(dev)<0) + if(fib_setup(dev)<0){ + kfree(dev->queues); return NULL; - + } + INIT_LIST_HEAD(&dev->fib_list); init_completion(&dev->aif_completion); /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/commsup.c linux.21rc5-ac2/drivers/scsi/aacraid/commsup.c --- linux.21rc5/drivers/scsi/aacraid/commsup.c 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/commsup.c 2003-05-28 20:55:58.000000000 +0100 @@ -42,6 +42,7 @@ #include #include #include +#include #include "scsi.h" #include "hosts.h" @@ -86,32 +87,32 @@ int fib_setup(struct aac_dev * dev) { struct fib *fibptr; - struct hw_fib *fib; - dma_addr_t fibpa; + struct hw_fib *hw_fib_va; + dma_addr_t hw_fib_pa; int i; if(fib_map_alloc(dev)<0) return -ENOMEM; - fib = dev->hw_fib_va; - fibpa = dev->hw_fib_pa; - memset(fib, 0, sizeof(struct hw_fib) * AAC_NUM_FIB); + hw_fib_va = dev->hw_fib_va; + hw_fib_pa = dev->hw_fib_pa; + memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB); /* * Initialise the fibs */ for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) { fibptr->dev = dev; - fibptr->fib = fib; - fibptr->data = (void *) fibptr->fib->data; + fibptr->hw_fib = hw_fib_va; + fibptr->data = (void *) fibptr->hw_fib->data; fibptr->next = fibptr+1; /* Forward chain the fibs */ init_MUTEX_LOCKED(&fibptr->event_wait); spin_lock_init(&fibptr->event_lock); - fib->header.XferState = cpu_to_le32(0xffffffff); - fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib)); - fibptr->logicaladdr = (unsigned long) fibpa; - fib = (struct hw_fib *)((unsigned char *)fib + sizeof(struct hw_fib)); - fibpa = fibpa + sizeof(struct hw_fib); + hw_fib_va->header.XferState = cpu_to_le32(0xffffffff); + hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib)); + fibptr->hw_fib_pa = hw_fib_pa; + hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib)); + hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib); } /* * Add the fib chain to the free list @@ -152,7 +153,7 @@ * Null out fields that depend on being zero at the start of * each I/O */ - fibptr->fib->header.XferState = cpu_to_le32(0); + fibptr->hw_fib->header.XferState = cpu_to_le32(0); fibptr->callback = NULL; fibptr->callback_data = NULL; @@ -178,9 +179,9 @@ fibptr->next = fibptr->dev->timeout_fib; fibptr->dev->timeout_fib = fibptr; } else { - if (fibptr->fib->header.XferState != 0) { + if (fibptr->hw_fib->header.XferState != 0) { printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", - (void *)fibptr, fibptr->fib->header.XferState); + (void*)fibptr, fibptr->hw_fib->header.XferState); } fibptr->next = fibptr->dev->free_fib; fibptr->dev->free_fib = fibptr; @@ -197,14 +198,14 @@ void fib_init(struct fib *fibptr) { - struct hw_fib *fib = fibptr->fib; + struct hw_fib *hw_fib = fibptr->hw_fib; - fib->header.StructType = FIB_MAGIC; - fib->header.Size = cpu_to_le16(sizeof(struct hw_fib)); - fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); - fib->header.SenderFibAddress = cpu_to_le32(0); - fib->header.ReceiverFibAddress = cpu_to_le32(0); - fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib)); + hw_fib->header.StructType = FIB_MAGIC; + hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib)); + hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); + hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa); + hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); + hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib)); } /** @@ -217,10 +218,10 @@ void fib_dealloc(struct fib * fibptr) { - struct hw_fib *fib = fibptr->fib; - if(fib->header.StructType != FIB_MAGIC) + struct hw_fib *hw_fib = fibptr->hw_fib; + if(hw_fib->header.StructType != FIB_MAGIC) BUG(); - fib->header.XferState = cpu_to_le32(0); + hw_fib->header.XferState = cpu_to_le32(0); } /* @@ -257,7 +258,7 @@ q = &dev->queues->queue[qid]; *index = le32_to_cpu(*(q->headers.producer)); - if (*index - 2 == le32_to_cpu(*(q->headers.consumer))) + if ((*index - 2) == le32_to_cpu(*(q->headers.consumer))) *nonotify = 1; if (qid == AdapHighCmdQueue) { @@ -304,7 +305,7 @@ * success. */ -static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * fib, int wait, struct fib * fibptr, unsigned long *nonotify) +static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) { struct aac_entry * entry = NULL; int map = 0; @@ -322,7 +323,7 @@ /* * Setup queue entry with a command, status and fib mapped */ - entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size)); + entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); map = 1; } else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue) @@ -334,9 +335,10 @@ /* * Setup queue entry with command, status and fib mapped */ - entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size)); - entry->addr = cpu_to_le32(fib->header.SenderFibAddress); /* Restore adapters pointer to the FIB */ - fib->header.ReceiverFibAddress = fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ + entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); + entry->addr = hw_fib->header.SenderFibAddress; + /* Restore adapters pointer to the FIB */ + hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ map = 0; } /* @@ -344,7 +346,7 @@ * in the queue entry. */ if (map) - entry->addr = cpu_to_le32((unsigned long)(fibptr->logicaladdr)); + entry->addr = fibptr->hw_fib_pa; return 0; } @@ -415,11 +417,11 @@ u32 qid; struct aac_dev * dev = fibptr->dev; unsigned long nointr = 0; - struct hw_fib * fib = fibptr->fib; + struct hw_fib * hw_fib = fibptr->hw_fib; struct aac_queue * q; unsigned long flags = 0; - if (!(le32_to_cpu(fib->header.XferState) & HostOwned)) + if (!(le32_to_cpu(hw_fib->header.XferState) & HostOwned)) return -EBUSY; /* * There are 5 cases with the wait and reponse requested flags. @@ -435,19 +437,22 @@ if (wait && !reply) { return -EINVAL; } else if (!wait && reply) { - fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); + hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); FIB_COUNTER_INCREMENT(aac_config.AsyncSent); } else if (!wait && !reply) { - fib->header.XferState |= cpu_to_le32(NoResponseExpected); + hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); } else if (wait && reply) { - fib->header.XferState |= cpu_to_le32(ResponseExpected); + hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); FIB_COUNTER_INCREMENT(aac_config.NormalSent); } /* * Map the fib into 32bits by using the fib number */ - fib->header.SenderData = fibptr-&dev->fibs[0]; /* for callback */ + +// hw_fib->header.SenderFibAddress = ((u32)(fibptr-dev->fibs)) << 1; + hw_fib->header.SenderFibAddress = cpu_to_le32((u32)(ulong)fibptr->hw_fib_pa); + hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); /* * Set FIB state to indicate where it came from and if we want a * response from the adapter. Also load the command from the @@ -455,15 +460,14 @@ * * Map the hw fib pointer as a 32bit value */ - fib->header.SenderFibAddress = fib2addr(fib); - fib->header.Command = cpu_to_le16(command); - fib->header.XferState |= cpu_to_le32(SentFromHost); - fibptr->fib->header.Flags = 0; /* Zero the flags field - its internal only... */ + hw_fib->header.Command = cpu_to_le16(command); + hw_fib->header.XferState |= cpu_to_le32(SentFromHost); + fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/ /* * Set the size of the Fib we want to send to the adapter */ - fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); - if (le16_to_cpu(fib->header.Size) > le16_to_cpu(fib->header.SenderSize)) { + hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); + if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { return -EMSGSIZE; } /* @@ -471,22 +475,25 @@ * the adapter a command is ready. */ if (priority == FsaHigh) { - fib->header.XferState |= cpu_to_le32(HighPriority); + hw_fib->header.XferState |= cpu_to_le32(HighPriority); qid = AdapHighCmdQueue; } else { - fib->header.XferState |= cpu_to_le32(NormalPriority); + hw_fib->header.XferState |= cpu_to_le32(NormalPriority); qid = AdapNormCmdQueue; } q = &dev->queues->queue[qid]; if(wait) spin_lock_irqsave(&fibptr->event_lock, flags); - if(aac_queue_get( dev, &index, qid, fib, 1, fibptr, &nointr)<0) + if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0) return -EWOULDBLOCK; dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); dprintk((KERN_DEBUG "Fib contents:.\n")); - dprintk((KERN_DEBUG " Command = %d.\n", fib->header.Command)); - dprintk((KERN_DEBUG " XferState = %x.\n", fib->header.XferState)); + dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command)); + dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState)); + dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); + dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); + dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); /* * Fill in the Callback and CallbackContext if we are not * going to wait. @@ -500,6 +507,7 @@ q->numpending++; fibptr->done = 0; + fibptr->flags = 0; if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0) return -EWOULDBLOCK; @@ -543,8 +551,7 @@ { u32 index; int status; - - if (*q->headers.producer == *q->headers.consumer) { + if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { status = 0; } else { /* @@ -564,7 +571,7 @@ int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q) { - return (*q->headers.producer != *q->headers.consumer); + return (le32_to_cpu(*q->headers.producer) != le32_to_cpu(*q->headers.consumer)); } @@ -583,7 +590,7 @@ int wasfull = 0; u32 notify; - if (*q->headers.producer+1 == *q->headers.consumer) + if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) wasfull = 1; if (le32_to_cpu(*q->headers.consumer) >= q->entries) @@ -625,16 +632,15 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) { - struct hw_fib * fib = fibptr->fib; + struct hw_fib * hw_fib = fibptr->hw_fib; struct aac_dev * dev = fibptr->dev; unsigned long nointr = 0; - - if (le32_to_cpu(fib->header.XferState) == 0) + if (le32_to_cpu(hw_fib->header.XferState) == 0) return 0; /* * If we plan to do anything check the structure type first. */ - if ( fib->header.StructType != FIB_MAGIC ) { + if ( hw_fib->header.StructType != FIB_MAGIC ) { return -EINVAL; } /* @@ -644,34 +650,34 @@ * and want to send a response back to the adapter. This will * send the completed cdb to the adapter. */ - if (fib->header.XferState & cpu_to_le32(SentFromAdapter)) { - fib->header.XferState |= cpu_to_le32(HostProcessed); - if (fib->header.XferState & cpu_to_le32(HighPriority)) { + if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { + hw_fib->header.XferState |= cpu_to_le32(HostProcessed); + if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) { u32 index; if (size) { size += sizeof(struct aac_fibhdr); - if (size > le16_to_cpu(fib->header.SenderSize)) + if (size > le16_to_cpu(hw_fib->header.SenderSize)) return -EMSGSIZE; - fib->header.Size = cpu_to_le16(size); + hw_fib->header.Size = cpu_to_le16(size); } - if(aac_queue_get(dev, &index, AdapHighRespQueue, fib, 1, NULL, &nointr) < 0) { + if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) { return -EWOULDBLOCK; } if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) { } } - else if (fib->header.XferState & NormalPriority) + else if (hw_fib->header.XferState & NormalPriority) { u32 index; if (size) { size += sizeof(struct aac_fibhdr); - if (size > le16_to_cpu(fib->header.SenderSize)) + if (size > le16_to_cpu(hw_fib->header.SenderSize)) return -EMSGSIZE; - fib->header.Size = cpu_to_le16(size); + hw_fib->header.Size = cpu_to_le16(size); } - if (aac_queue_get(dev, &index, AdapNormRespQueue, fib, 1, NULL, &nointr) < 0) + if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0) return -EWOULDBLOCK; if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) @@ -696,19 +702,19 @@ int fib_complete(struct fib * fibptr) { - struct hw_fib * fib = fibptr->fib; + struct hw_fib * hw_fib = fibptr->hw_fib; /* * Check for a fib which has already been completed */ - if (fib->header.XferState == cpu_to_le32(0)) + if (hw_fib->header.XferState == cpu_to_le32(0)) return 0; /* * If we plan to do anything check the structure type first. */ - if (fib->header.StructType != FIB_MAGIC) + if (hw_fib->header.StructType != FIB_MAGIC) return -EINVAL; /* * This block completes a cdb which orginated on the host and we @@ -716,19 +722,19 @@ * command is complete that we had sent to the adapter and this * cdb could be reused. */ - if((fib->header.XferState & cpu_to_le32(SentFromHost)) && - (fib->header.XferState & cpu_to_le32(AdapterProcessed))) + if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && + (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) { fib_dealloc(fibptr); } - else if(fib->header.XferState & cpu_to_le32(SentFromHost)) + else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) { /* * This handles the case when the host has aborted the I/O * to the adapter because the adapter is not responding */ fib_dealloc(fibptr); - } else if(fib->header.XferState & cpu_to_le32(HostOwned)) { + } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { fib_dealloc(fibptr); } else { BUG(); @@ -776,16 +782,121 @@ * dispatches it to the appropriate routine for handling. */ +#define CONTAINER_TO_BUS(cont) (0) +#define CONTAINER_TO_TARGET(cont) ((cont)) +#define CONTAINER_TO_LUN(cont) (0) + static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) { - struct hw_fib * fib = fibptr->fib; - /* - * Set the status of this FIB to be Invalid parameter. - * - * *(u32 *)fib->data = ST_INVAL; - */ - *(u32 *)fib->data = cpu_to_le32(ST_OK); - fib_adapter_complete(fibptr, sizeof(u32)); + struct hw_fib * hw_fib = fibptr->hw_fib; + struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; + int busy; + u32 container; + + /* Sniff for container changes */ + dprintk ((KERN_INFO "AifCmdDriverNotify=%x\n", le32_to_cpu(*(u32 *)aifcmd->data))); + switch (le32_to_cpu(*(u32 *)aifcmd->data)) { + case AifDenMorphComplete: + case AifDenVolumeExtendComplete: + case AifEnContainerChange: /* Not really a driver notify Event */ + + busy = 0; + container = le32_to_cpu(((u32 *)aifcmd->data)[1]); + dprintk ((KERN_INFO "container=%d(%d,%d,%d,%d) ", + container, + (dev && dev->scsi_host_ptr) + ? dev->scsi_host_ptr->host_no + : -1, + CONTAINER_TO_BUS(container), + CONTAINER_TO_TARGET(container), + CONTAINER_TO_LUN(container))); + + /* + * Find the Scsi_Device associated with the SCSI address, + * and mark it as changed, invalidating the cache. This deals + * with changes to existing device IDs. + */ + + if ((dev != (struct aac_dev *)NULL) + && (dev->scsi_host_ptr != (struct Scsi_Host *)NULL)) { + Scsi_Device * device; + + for (device = dev->scsi_host_ptr->host_queue; + device != (Scsi_Device *)NULL; + device = device->next) { + dprintk((KERN_INFO + "aifd: device (%d,%d,%d,%d)?\n", + dev->scsi_host_ptr->host_no, + device->channel, + device->id, + device->lun)); + if ((device->channel == CONTAINER_TO_BUS(container)) + && (device->id == CONTAINER_TO_TARGET(container)) + && (device->lun == CONTAINER_TO_LUN(container))) { + busy |= (device->access_count != 0); + if (busy == 0) { + device->removable = TRUE; + } + } + } + } + dprintk (("busy=%d\n", busy)); + + /* + * if (busy == 0) { + * scan_scsis(dev->scsi_host_ptr, 1, + * CONTAINER_TO_BUS(container), + * CONTAINER_TO_TARGET(container), + * CONTAINER_TO_LUN(container)); + * } + * is not exported as accessible, so we need to go around it + * another way. So, we look for the "proc/scsi/scsi" entry in + * the proc filesystem (using proc_scsi as a shortcut) and send + * it a message. This deals with new devices that have + * appeared. If the device has gone offline, scan_scsis will + * also discover this, but we do not want the device to + * go away. We need to check the access_count for the + * device since we are not wanting the devices to go away. + */ + if ((busy == 0) + && (proc_scsi != (struct proc_dir_entry *)NULL)) { + struct proc_dir_entry * entry; + + dprintk((KERN_INFO "proc_scsi=%p ", proc_scsi)); + for (entry = proc_scsi->subdir; + entry != (struct proc_dir_entry *)NULL; + entry = entry->next) { + dprintk(("\"%.*s\"[%d]=%x ", entry->namelen, + entry->name, entry->namelen, entry->low_ino)); + if ((entry->low_ino != 0) + && (entry->namelen == 4) + && (memcmp ("scsi", entry->name, 4) == 0)) { + dprintk(("%p->write_proc=%p ", entry, entry->write_proc)); + if (entry->write_proc != (int (*)(struct file *, const char *, unsigned long, void *))NULL) { + char buffer[80]; + int length; + + sprintf (buffer, + "scsi add-single-device %d %d %d %d\n", + dev->scsi_host_ptr->host_no, + CONTAINER_TO_BUS(container), + CONTAINER_TO_TARGET(container), + CONTAINER_TO_LUN(container)); + length = strlen (buffer); + dprintk((KERN_INFO + "echo %.*s > /proc/scsi/scsi\n", + length-1, + buffer)); + length = entry->write_proc( + NULL, buffer, length, NULL); + dprintk((KERN_INFO + "returns %d\n", length)); + } + break; + } + } + } + } } /** @@ -800,8 +911,8 @@ int aac_command_thread(struct aac_dev * dev) { - struct hw_fib *fib, *newfib; - struct fib fibptr; /* for error logging */ + struct hw_fib *hw_fib, *hw_newfib; + struct fib *fib, *newfib; struct aac_queue_block *queues = dev->queues; struct aac_fib_context *fibctx; unsigned long flags; @@ -822,9 +933,9 @@ * Let the DPC know it has a place to send the AIF's to. */ dev->aif_thread = 1; - memset(&fibptr, 0, sizeof(struct fib)); add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); set_current_state(TASK_INTERRUPTIBLE); + dprintk ((KERN_INFO "aac_command_thread start\n")); while(1) { spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); @@ -833,37 +944,47 @@ struct aac_aifcmd * aifcmd; set_current_state(TASK_RUNNING); - + entry = queues->queue[HostNormCmdQueue].cmdq.next; list_del(entry); - + spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); - fib = list_entry(entry, struct hw_fib, header.FibLinks); + fib = list_entry(entry, struct fib, fiblink); /* * We will process the FIB here or pass it to a * worker thread that is TBD. We Really can't * do anything at this point since we don't have * anything defined for this thread to do. */ - memset(&fibptr, 0, sizeof(struct fib)); - fibptr.type = FSAFS_NTC_FIB_CONTEXT; - fibptr.size = sizeof( struct fib ); - fibptr.fib = fib; - fibptr.data = fib->data; - fibptr.dev = dev; + hw_fib = fib->hw_fib; + + memset(fib, 0, sizeof(struct fib)); + fib->type = FSAFS_NTC_FIB_CONTEXT; + fib->size = sizeof( struct fib ); + fib->hw_fib = hw_fib; + fib->data = hw_fib->data; + fib->dev = dev; /* * We only handle AifRequest fibs from the adapter. */ - aifcmd = (struct aac_aifcmd *) fib->data; - if (aifcmd->command == le16_to_cpu(AifCmdDriverNotify)) { - aac_handle_aif(dev, &fibptr); + aifcmd = (struct aac_aifcmd *) hw_fib->data; + if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { + /* Handle Driver Notify Events */ + aac_handle_aif(dev, fib); + *(u32 *)hw_fib->data = cpu_to_le32(ST_OK); + fib_adapter_complete(fib, sizeof(u32)); } else { + struct list_head *entry; /* The u32 here is important and intended. We are using 32bit wrapping time to fit the adapter field */ u32 time_now, time_last; unsigned long flagv; + /* Sniff events */ + if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)) + aac_handle_aif(dev, fib); + time_now = jiffies/HZ; spin_lock_irqsave(&dev->fib_lock, flagv); @@ -885,6 +1006,11 @@ */ if (fibctx->count > 20) { + /* + * It's *not* jiffies folks, + * but jiffies / HZ, so do not + * panic ... + */ time_last = fibctx->jiffies; /* * Has it been > 2 minutes @@ -901,17 +1027,21 @@ * Warning: no sleep allowed while * holding spinlock */ - newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); - if (newfib) { + hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); + newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC); + if (newfib && hw_newfib) { /* * Make the copy of the FIB + * FIXME: check if we need to fix other fields up */ - memcpy(newfib, fib, sizeof(struct hw_fib)); + memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); + memcpy(newfib, fib, sizeof(struct fib)); + newfib->hw_fib = hw_newfib; /* * Put the FIB onto the * fibctx's fibs */ - list_add_tail(&newfib->header.FibLinks, &fibctx->fibs); + list_add_tail(&newfib->fiblink, &fibctx->fib_list); fibctx->count++; /* * Set the event to wake up the @@ -920,17 +1050,22 @@ up(&fibctx->wait_sem); } else { printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); + if(newfib) + kfree(newfib); + if(hw_newfib) + kfree(hw_newfib); } entry = entry->next; } /* * Set the status of this FIB */ - *(u32 *)fib->data = cpu_to_le32(ST_OK); - fib_adapter_complete(&fibptr, sizeof(u32)); + *(u32 *)hw_fib->data = cpu_to_le32(ST_OK); + fib_adapter_complete(fib, sizeof(u32)); spin_unlock_irqrestore(&dev->fib_lock, flagv); } spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); + kfree(fib); } /* * There are no more AIF's diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/dpcsup.c linux.21rc5-ac2/drivers/scsi/aacraid/dpcsup.c --- linux.21rc5/drivers/scsi/aacraid/dpcsup.c 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/dpcsup.c 2003-05-28 20:56:26.000000000 +0100 @@ -74,12 +74,14 @@ */ while(aac_consumer_get(dev, q, &entry)) { - int fast; + u32 fast ; + fast = (entry->addr & cpu_to_le32(0x01)); +// fib = &dev->fibs[(entry->addr >> 1)]; +// hwfib = fib->hw_fib; + hwfib = bus_to_virt(le32_to_cpu(entry->addr & cpu_to_le32(~0x01))); + fib = &dev->fibs[hwfib->header.SenderData]; - fast = (int) (entry->addr & 0x01); - hwfib = addr2fib(entry->addr & ~0x01); aac_consumer_free(dev, q, HostNormRespQueue); - fib = &dev->fibs[hwfib->header.SenderData]; /* * Remove this fib from the Outstanding I/O queue. * But only if it has not already been timed out. @@ -169,30 +171,51 @@ * up the waiters until there are no more QEs. We then return * back to the system. */ + dprintk((KERN_INFO + "dev=%p, dev->comm_phys=%x, dev->comm_addr=%p, dev->comm_size=%u\n", + dev, (u32)dev->comm_phys, dev->comm_addr, (unsigned)dev->comm_size)); + while(aac_consumer_get(dev, q, &entry)) { - struct hw_fib * fib; - fib = addr2fib(entry->addr); + struct fib fibctx; + struct fib *fib = &fibctx; + u32 hw_fib_pa = le32_to_cpu(entry->addr & cpu_to_le32(~0x01)); + struct hw_fib * hw_fib_va = ((dev->comm_phys <= hw_fib_pa) + && (hw_fib_pa < (dev->comm_phys + dev->comm_size))) + ? dev->comm_addr + (hw_fib_pa - dev->comm_phys) + : /* inconceivable */ bus_to_virt(hw_fib_pa); + dprintk((KERN_INFO "hw_fib_pa=%x hw_fib_va=%p\n", hw_fib_pa, hw_fib_va)); - if (dev->aif_thread) { - list_add_tail(&fib->header.FibLinks, &q->cmdq); + /* + * Allocate a FIB at all costs. For non queued stuff + * we can just use the stack so we are happy. We need + * a fib object in order to manage the linked lists + */ + if (dev->aif_thread) + if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC))==NULL) + fib = &fibctx; + + memset(fib, 0, sizeof(struct fib)); + INIT_LIST_HEAD(&fib->fiblink); + fib->type = FSAFS_NTC_FIB_CONTEXT; + fib->size = sizeof(struct fib); + fib->hw_fib = hw_fib_va; + fib->data = hw_fib_va->data; + fib->dev = dev; + + if (dev->aif_thread && fib != &fibctx) + { + list_add_tail(&fib->fiblink, &q->cmdq); aac_consumer_free(dev, q, HostNormCmdQueue); wake_up_interruptible(&q->cmdready); } else { - struct fib fibctx; aac_consumer_free(dev, q, HostNormCmdQueue); spin_unlock_irqrestore(q->lock, flags); - memset(&fibctx, 0, sizeof(struct fib)); - fibctx.type = FSAFS_NTC_FIB_CONTEXT; - fibctx.size = sizeof(struct fib); - fibctx.fib = fib; - fibctx.data = fib->data; - fibctx.dev = dev; /* * Set the status of this FIB */ - *(u32 *)fib->data = cpu_to_le32(ST_OK); - fib_adapter_complete(&fibctx, sizeof(u32)); + *(u32 *)hw_fib_va->data = cpu_to_le32(ST_OK); + fib_adapter_complete(fib, sizeof(u32)); spin_lock_irqsave(q->lock, flags); } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/linit.c linux.21rc5-ac2/drivers/scsi/aacraid/linit.c --- linux.21rc5/drivers/scsi/aacraid/linit.c 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/linit.c 2003-05-28 17:56:47.000000000 +0100 @@ -35,8 +35,8 @@ * */ -#define AAC_DRIVER_VERSION "0.9.9ac6-TEST" -#define AAC_DRIVER_BUILD_DATE __DATE__ +#define AAC_DRIVER_VERSION "1.1.2" +#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__ #include #include @@ -59,12 +59,12 @@ #define AAC_DRIVERNAME "aacraid" MODULE_AUTHOR("Red Hat Inc and Adaptec"); -MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, PERC 320/DC, Adaptec 2120S, 2200S, 5400S, and HP NetRAID-4M devices. http://domsch.com/linux/ or http://linux.adaptec.com"); +MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, Adaptec Advanced Raid Products, and HP NetRAID-4M devices. http://domsch.com/linux/ or http://linux.adaptec.com"); MODULE_LICENSE("GPL"); -MODULE_PARM(nondasd, "i"); -MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); +MODULE_PARM(paemode, "i"); +MODULE_PARM_DESC(paemode, "Control whether dma addressing is using PAE. 0=off, 1=on"); -static int nondasd=-1; +static int paemode = -1; struct aac_dev *aac_devices[MAXIMUM_NUM_ADAPTERS]; @@ -81,25 +81,41 @@ */ static struct aac_driver_ident aac_drivers[] = { - { 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 2/Si */ - { 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */ - { 0x1028, 0x0003, 0x1028, 0x0003, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Si */ - { 0x1028, 0x0004, 0x1028, 0x00d0, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Si */ - { 0x1028, 0x0002, 0x1028, 0x00d1, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */ - { 0x1028, 0x0002, 0x1028, 0x00d9, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */ - { 0x1028, 0x000a, 0x1028, 0x0106, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */ - { 0x1028, 0x000a, 0x1028, 0x011b, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */ - { 0x1028, 0x000a, 0x1028, 0x0121, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di */ - { 0x9005, 0x0283, 0x9005, 0x0283, aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2 }, /* catapult*/ - { 0x9005, 0x0284, 0x9005, 0x0284, aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2 }, /* tomcat*/ - { 0x9005, 0x0285, 0x9005, 0x0286, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1 }, /* Adaptec 2120S (Crusader)*/ - { 0x9005, 0x0285, 0x9005, 0x0285, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2 }, /* Adaptec 2200S (Vulcan)*/ - { 0x9005, 0x0285, 0x9005, 0x0287, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2 }, /* Adaptec 2200S (Vulcan-2m)*/ - { 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* Dell PERC 320/DC */ + { 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 2/Si (Iguana/PERC2Si) */ + { 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di (Opal/PERC3Di) */ + { 0x1028, 0x0003, 0x1028, 0x0003, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Si (SlimFast/PERC3Si */ + { 0x1028, 0x0004, 0x1028, 0x00d0, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */ + { 0x1028, 0x0002, 0x1028, 0x00d1, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di (Viper/PERC3DiV) */ + { 0x1028, 0x0002, 0x1028, 0x00d9, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di (Lexus/PERC3DiL) */ + { 0x1028, 0x000a, 0x1028, 0x0106, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */ + { 0x1028, 0x000a, 0x1028, 0x011b, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di (Dagger/PERC3DiD) */ + { 0x1028, 0x000a, 0x1028, 0x0121, aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2 }, /* PERC 3/Di (Boxster/PERC3DiB) */ + { 0x9005, 0x0283, 0x9005, 0x0283, aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2 }, /* catapult */ + { 0x9005, 0x0284, 0x9005, 0x0284, aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2 }, /* tomcat */ + { 0x9005, 0x0285, 0x9005, 0x0286, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1 }, /* Adaptec 2120S (Crusader) */ + { 0x9005, 0x0285, 0x9005, 0x0285, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2 }, /* Adaptec 2200S (Vulcan) */ + { 0x9005, 0x0285, 0x9005, 0x0287, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2 }, /* Adaptec 2200S (Vulcan-2m) */ + { 0x9005, 0x0285, 0x17aa, 0x0286, aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1 }, /* Legend S220 (Legend Crusader) */ + { 0x9005, 0x0285, 0x17aa, 0x0287, aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2 }, /* Legend S230 (Legend Vulcan) */ + + { 0x9005, 0x0285, 0x9005, 0x0288, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */ + { 0x9005, 0x0285, 0x9005, 0x0289, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */ + { 0x9005, 0x0285, 0x9005, 0x028a, aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020S PCI-X ", 2 }, /* ASR-2020S PCI-X ZCR (Skyhawk) */ + { 0x9005, 0x0285, 0x9005, 0x028b, aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020S PCI-X ", 2 }, /* ASR-2020S SO-DIMM PCI-X ZCR (Terminator) */ + { 0x9005, 0x0285, 0x9005, 0x0290, aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 2 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */ + { 0x9005, 0x0285, 0x1028, 0x0291, aac_rx_init, "aacraid", "DELL ", "CERC SATA RAID 2 ", 2 }, /* CERC SATA RAID 2 PCI SATA 8ch (DellCorsair) */ + { 0x9005, 0x0285, 0x9005, 0x0292, aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 2 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */ + { 0x9005, 0x0285, 0x9005, 0x0293, aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA ", 2 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */ + { 0x9005, 0x0285, 0x9005, 0x0294, aac_rx_init, "aacraid", "ADAPTEC ", "SO-DIMM SATA ZCR ", 2 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */ + /* ServeRAID */ +/* { 0x9005, 0x0250, 0x1014, 0x0279, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec ", 2 }, */ /* (Marco) */ +/* { 0x9005, 0x0250, 0x1014, 0x028c, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec ", 2 }, */ /* (Sebring)*/ + + { 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2 }, /* Perc 320/DC*/ { 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4 }, /* Adaptec 5400S (Mustang)*/ { 0x1011, 0x0046, 0x9005, 0x0364, aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4 }, /* Adaptec 5400S (Mustang)*/ { 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4 }, /* Dell PERC2 "Quad Channel" */ - { 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid", "HP ", "NetRAID-4M ", 4 } /* HP NetRAID-4M */ + { 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4 } /* HP NetRAID-4M */ }; #define NUM_AACTYPES (sizeof(aac_drivers) / sizeof(struct aac_driver_ident)) @@ -159,10 +175,11 @@ struct fsa_scsi_hba *fsa_dev_ptr; char *name = NULL; - printk(KERN_INFO "Red Hat/Adaptec aacraid driver, %s\n", AAC_DRIVER_BUILD_DATE); + printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n", AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE); /* setting up the proc directory structure */ template->proc_name = "aacraid"; + spin_unlock_irq(&io_request_lock); for( index = 0; index != num_aacdrivers; index++ ) { @@ -207,9 +224,6 @@ * These three parameters can be used to allow for wide SCSI * and for host adapters that support multiple buses. */ - host_ptr->max_id = 17; - host_ptr->max_lun = 8; - host_ptr->max_channel = 1; host_ptr->irq = dev->irq; /* Adapter IRQ number */ /* host_ptr->base = ( char * )(dev->resource[0].start & ~0xff); */ host_ptr->base = dev->resource[0].start; @@ -236,9 +250,13 @@ /* attach a pointer back to Scsi_Host */ aac->scsi_host_ptr = host_ptr; aac->pdev = dev; - aac->cardtype = index; aac->name = aac->scsi_host_ptr->hostt->name; aac->id = aac->scsi_host_ptr->unique_id; + aac->cardtype = index; + + aac->fibs = (struct fib*) kmalloc(sizeof(struct fib)*AAC_NUM_FIB, GFP_KERNEL); + spin_lock_init(&aac->fib_lock); + /* Initialize the ordinal number of the device to -1 */ fsa_dev_ptr = &(aac->fsa_dev); for( container = 0; container < MAXIMUM_NUM_CONTAINERS; container++ ) @@ -255,15 +273,6 @@ } dprintk((KERN_DEBUG "%s:%d device initialization successful.\n", name, host_ptr->unique_id)); aac_get_adapter_info(aac); - if(nondasd != -1) - { - /* someone told us how to set this on the cmdline */ - aac->nondasd_support = (nondasd!=0); - } - if(aac->nondasd_support != 0){ - printk(KERN_INFO "%s%d: Non-DASD support enabled\n", aac->name, aac->id); - } - dprintk((KERN_DEBUG "%s:%d options flag %04x.\n",name, host_ptr->unique_id,aac->adapter_info.options)); if(aac->nondasd_support == 1) { /* @@ -280,21 +289,11 @@ aac_devices[aac_count-1] = aac; /* - * dmb - we may need to move these 3 parms somewhere else once + * dmb - we may need to move the setting of these parms somewhere else once * we get a fib that can report the actual numbers */ host_ptr->max_id = AAC_MAX_TARGET; host_ptr->max_lun = AAC_MAX_LUN; - - /* - * If we are PAE capable then our future DMA mappings - * (for read/write commands) are 64bit clean and don't - * need bouncing. This assumes we do no other 32bit only - * allocations (eg fib table expands) after this point. - */ - - if(aac->pae_support) - pci_set_dma_mask(dev, 0xFFFFFFFFFFFFFFFFUL); } } @@ -302,6 +301,7 @@ if((aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops))<0) printk(KERN_WARNING "aacraid: unable to register \"aac\" device.\n"); } + spin_lock_irq(&io_request_lock); template->present = aac_count; /* # of cards of this type found */ return aac_count; @@ -738,7 +738,10 @@ if(write || offset > 0) return 0; *start_ptr = proc_buffer; - return sprintf(proc_buffer, "%s %d\n", "Raid Controller, scsi hba number", host_no); + return sprintf(proc_buffer, + "Adaptec Raid Controller %s %s, scsi hba number %d\n", + AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE, + host_no); } EXPORT_NO_SYMBOLS; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/README linux.21rc5-ac2/drivers/scsi/aacraid/README --- linux.21rc5/drivers/scsi/aacraid/README 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/README 2003-05-28 17:08:04.000000000 +0100 @@ -18,6 +18,12 @@ ADAPTEC 2120S ADAPTEC 2200S ADAPTEC 5400S + Legend S220 + Legend S230 + Adaptec 3230S + Adaptec 3240S + ASR-2020S PCI-X + AAR-2410SA SATA People ------------------------- @@ -28,15 +34,22 @@ added new ioctls, changed scsi interface to use new error handler, increased the number of fibs and outstanding commands to a container) + (fixed 64bit and 64G memory model, changed confusing naming convention + where fibs that go to the hardware are consistently called hw_fibs and + not just fibs like the name of the driver tracking structure) +Mark Salyzyn Fixed panic issues and added some new product ids for upcoming hbas. + Original Driver ------------------------- Adaptec Unix OEM Product Group Mailing List ------------------------- -None currently. Also note this is very different to Brian's original driver +linux-aacraid-devel@dell.com (Interested parties troll here) +http://mbserver.adaptec.com/ (Currently more Community Support than Devel Support) +Also note this is very different to Brian's original driver so don't expect him to support it. -Adaptec does support this driver. Contact either tech support or deanna bonds. +Adaptec does support this driver. Contact either tech support or Mark Salyzyn. Original by Brian Boerner February 2001 Rewritten by Alan Cox, November 2001 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/rx.c linux.21rc5-ac2/drivers/scsi/aacraid/rx.c --- linux.21rc5/drivers/scsi/aacraid/rx.c 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/rx.c 2003-05-12 16:51:41.000000000 +0100 @@ -400,6 +400,11 @@ * Start any kernel threads needed */ dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0); + if(dev->thread_pid < 0) + { + printk(KERN_ERR "aacraid: Unable to create rx thread.\n"); + return -1; + } /* * Tell the adapter that all is configured, and it can start * accepting requests diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/sa.c linux.21rc5-ac2/drivers/scsi/aacraid/sa.c --- linux.21rc5/drivers/scsi/aacraid/sa.c 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/sa.c 2003-05-28 17:40:22.000000000 +0100 @@ -349,7 +349,7 @@ * Wait for the adapter to be up and running. Wait up to 3 minutes. */ while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) { - if (time_after(start+180*HZ, jiffies)) { + if (time_after(jiffies, start+180*HZ)) { status = sa_readl(dev, Mailbox7) >> 16; printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %d.\n", name, instance, le32_to_cpu(status)); return -1; @@ -384,6 +384,11 @@ * Start any kernel threads needed */ dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0); + if (dev->thread_pid < 0) { + printk(KERN_ERR "aacraid: Unable to create command thread.\n"); + return -1; + } + /* * Tell the adapter that all is configure, and it can start * accepting requests diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/aacraid/TODO linux.21rc5-ac2/drivers/scsi/aacraid/TODO --- linux.21rc5/drivers/scsi/aacraid/TODO 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/aacraid/TODO 2003-05-28 17:08:10.000000000 +0100 @@ -2,3 +2,4 @@ o More testing o Feature request: display the firmware/bios/etc revisions in the /proc info +o 2.5.0 and beyond. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/Config.in linux.21rc5-ac2/drivers/scsi/Config.in --- linux.21rc5/drivers/scsi/Config.in 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/Config.in 2003-05-12 16:30:48.000000000 +0100 @@ -65,7 +65,10 @@ dep_tristate 'AdvanSys SCSI support' CONFIG_SCSI_ADVANSYS $CONFIG_SCSI dep_tristate 'Always IN2000 SCSI support' CONFIG_SCSI_IN2000 $CONFIG_SCSI dep_tristate 'AM53/79C974 PCI SCSI support' CONFIG_SCSI_AM53C974 $CONFIG_SCSI $CONFIG_PCI -dep_tristate 'AMI MegaRAID support' CONFIG_SCSI_MEGARAID $CONFIG_SCSI +dep_tristate 'AMI MegaRAID support (old driver)' CONFIG_SCSI_MEGARAID $CONFIG_SCSI +if [ "$CONFIG_SCSI_MEGARAID" != "y" ]; then + dep_tristate 'AMI MegaRAID support (new driver)' CONFIG_SCSI_MEGARAID2 $CONFIG_SCSI +fi dep_tristate 'BusLogic SCSI support' CONFIG_SCSI_BUSLOGIC $CONFIG_SCSI if [ "$CONFIG_SCSI_BUSLOGIC" != "n" ]; then diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/Makefile linux.21rc5-ac2/drivers/scsi/Makefile --- linux.21rc5/drivers/scsi/Makefile 2003-05-28 15:08:59.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/Makefile 2003-05-09 18:19:17.000000000 +0100 @@ -110,6 +110,7 @@ obj-$(CONFIG_SCSI_DC390T) += tmscsim.o obj-$(CONFIG_SCSI_AM53C974) += AM53C974.o obj-$(CONFIG_SCSI_MEGARAID) += megaraid.o +obj-$(CONFIG_SCSI_MEGARAID2) += megaraid2.o obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_SUNESP) += esp.o obj-$(CONFIG_SCSI_GDTH) += gdth.o diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/megaraid2.c linux.21rc5-ac2/drivers/scsi/megaraid2.c --- linux.21rc5/drivers/scsi/megaraid2.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/megaraid2.c 2003-05-03 19:30:21.000000000 +0100 @@ -0,0 +1,5573 @@ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2002 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Copyright (c) 2002 Red Hat, Inc. All rights reserved. + * - fixes + * - speed-ups (list handling fixes, issued_list, optimizations.) + * - lots of cleanups. + * + * Version : v2.00.5 (Apr 24, 2003) - Atul Mukker + * + * Description: Linux device driver for LSI Logic MegaRAID controller + * + * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 + * 518, 520, 531, 532 + * + * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, + * and others. Please send updates to the public mailing list + * linux-megaraid-devel@dell.com, and subscribe to and read archives of this + * list at http://lists.us.dell.com/. + * + * For history of changes, see ChangeLog.megaraid. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sd.h" +#include "scsi.h" +#include "hosts.h" + +#include "megaraid2.h" + +MODULE_AUTHOR ("LSI Logic Corporation"); +MODULE_DESCRIPTION ("LSI Logic MegaRAID driver"); +MODULE_LICENSE ("GPL"); + +static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; +MODULE_PARM(max_cmd_per_lun, "i"); +MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); + +static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; +MODULE_PARM(max_sectors_per_io, "h"); +MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); + + +static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; +MODULE_PARM(max_mbox_busy_wait, "h"); +MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); + +#define RDINDOOR(adapter) readl((adapter)->base + 0x20) +#define RDOUTDOOR(adapter) readl((adapter)->base + 0x2C) +#define WRINDOOR(adapter,value) writel(value, (adapter)->base + 0x20) +#define WROUTDOOR(adapter,value) writel(value, (adapter)->base + 0x2C) + +/* + * Global variables + */ + +static int hba_count; +static adapter_t *hba_soft_state[MAX_CONTROLLERS]; +static struct proc_dir_entry *mega_proc_dir_entry; + +static struct notifier_block mega_notifier = { + .notifier_call = megaraid_reboot_notify +}; + +/* For controller re-ordering */ +static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; + +/* + * The File Operations structure for the serial/ioctl interface of the driver + */ +static struct file_operations megadev_fops = { + .ioctl = megadev_ioctl, + .open = megadev_open, + .release = megadev_close, + .owner = THIS_MODULE, +}; + +/* + * Array to structures for storing the information about the controllers. This + * information is sent to the user level applications, when they do an ioctl + * for this information. + */ +static struct mcontroller mcontroller[MAX_CONTROLLERS]; + +/* The current driver version */ +static u32 driver_ver = 0x02000000; + +/* major number used by the device for character interface */ +static int major; + +#define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) + + +/* + * Debug variable to print some diagnostic messages + */ +static int trace_level; + +/* + * megaraid_validate_parms() + * + * Validate that any module parms passed in + * have proper values. + */ +static void +megaraid_validate_parms(void) +{ + if( (max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN) ) + max_cmd_per_lun = MAX_CMD_PER_LUN; + if( max_mbox_busy_wait > MBOX_BUSY_WAIT ) + max_mbox_busy_wait = MBOX_BUSY_WAIT; +} + + +/** + * megaraid_detect() + * @host_template - Our soft state maintained by mid-layer + * + * the detect entry point for the mid-layer. + * We scan the PCI bus for our controllers and start them. + * + * Note: PCI_DEVICE_ID_PERC4_DI below represents the PERC4/Di class of + * products. All of them share the same vendor id, device id, and subsystem + * vendor id but different subsystem ids. As of now, driver does not use the + * subsystem id. + */ +static int +megaraid_detect(Scsi_Host_Template *host_template) +{ + int i; + u16 dev_sw_table[] = { /* Table of all supported + vendor/device ids */ + + PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DISCOVERY, + PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4_DI, + PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_PERC4_QC_VERDE, + PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, + PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, + PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_AMI_MEGARAID3 }; + + + printk(KERN_NOTICE "megaraid: " MEGARAID_VERSION); + + megaraid_validate_parms(); + + /* + * Scan PCI bus for our all devices. + */ + for( i = 0; i < sizeof(dev_sw_table)/sizeof(u16); i += 2 ) { + + mega_find_card(host_template, dev_sw_table[i], + dev_sw_table[i+1]); + } + + if(hba_count) { + /* + * re-order hosts so that one with bootable logical drive + * comes first + */ + mega_reorder_hosts(); + +#ifdef CONFIG_PROC_FS + mega_proc_dir_entry = proc_mkdir("megaraid", &proc_root); + + if(!mega_proc_dir_entry) { + printk(KERN_WARNING + "megaraid: failed to create megaraid root\n"); + } + else { + for(i = 0; i < hba_count; i++) { + mega_create_proc_entry(i, mega_proc_dir_entry); + } + } +#endif + + /* + * Register the driver as a character device, for applications + * to access it for ioctls. + * First argument (major) to register_chrdev implies a dynamic + * major number allocation. + */ + major = register_chrdev(0, "megadev", &megadev_fops); + + /* + * Register the Shutdown Notification hook in kernel + */ + if(register_reboot_notifier(&mega_notifier)) { + printk(KERN_WARNING + "MegaRAID Shutdown routine not registered!!\n"); + } + + } + + return hba_count; +} + + + +/** + * mega_find_card() - find and start this controller + * @host_template - Our soft state maintained by mid-layer + * @pci_vendor - pci vendor id for this controller + * @pci_device - pci device id for this controller + * + * Scans the PCI bus for this vendor and device id combination, setup the + * resources, and register ourselves as a SCSI HBA driver, and setup all + * parameters for our soft state. + * + * This routine also checks for some buggy firmware and ajust the flags + * accordingly. + */ +static void +mega_find_card(Scsi_Host_Template *host_template, u16 pci_vendor, + u16 pci_device) +{ + struct Scsi_Host *host = NULL; + adapter_t *adapter = NULL; + u32 magic64; + unsigned long mega_baseport; + u16 subsysid, subsysvid; + u8 pci_bus; + u8 pci_dev_func; + u8 irq; + struct pci_dev *pdev = NULL; + u8 did_ioremap_f = 0; + u8 did_req_region_f = 0; + u8 did_scsi_reg_f = 0; + u8 got_ipdev_f = 0; + u8 alloc_int_buf_f = 0; + u8 alloc_scb_f = 0; + u8 got_irq_f = 0; + u8 did_setup_mbox_f = 0; + unsigned long tbase; + unsigned long flag = 0; + int i, j; + + while((pdev = pci_find_device(pci_vendor, pci_device, pdev))) { + + if(pci_enable_device (pdev)) continue; + + pci_bus = pdev->bus->number; + pci_dev_func = pdev->devfn; + + /* + * For these vendor and device ids, signature offsets are not + * valid and 64 bit is implicit + */ + if( (pci_vendor == PCI_VENDOR_ID_DELL && + pci_device == PCI_DEVICE_ID_PERC4_DI) || + (pci_vendor == PCI_VENDOR_ID_LSI_LOGIC && + pci_device == PCI_DEVICE_ID_PERC4_QC_VERDE) ) { + + flag |= BOARD_64BIT; + } + else { + pci_read_config_dword(pdev, PCI_CONF_AMISIG64, + &magic64); + + if (magic64 == HBA_SIGNATURE_64BIT) + flag |= BOARD_64BIT; + } + + subsysvid = pdev->subsystem_vendor; + subsysid = pdev->subsystem_device; + + /* + * If we do not find the valid subsys vendor id, refuse to + * load the driver. This is part of PCI200X compliance + * We load the driver if subsysvid is 0. + */ + if( subsysvid && (subsysvid != AMI_SUBSYS_VID) && + (subsysvid != DELL_SUBSYS_VID) && + (subsysvid != HP_SUBSYS_VID) && + (subsysvid != LSI_SUBSYS_VID) ) continue; + + + printk(KERN_NOTICE "megaraid: found 0x%4.04x:0x%4.04x:bus %d:", + pci_vendor, pci_device, pci_bus); + + printk("slot %d:func %d\n", + PCI_SLOT(pci_dev_func), PCI_FUNC(pci_dev_func)); + + /* Read the base port and IRQ from PCI */ + mega_baseport = pci_resource_start(pdev, 0); + irq = pdev->irq; + + tbase = mega_baseport; + + if( pci_resource_flags(pdev, 0) & IORESOURCE_MEM ) { + + if( check_mem_region(mega_baseport, 128) ) { + printk(KERN_WARNING + "megaraid: mem region busy!\n"); + continue; + } + request_mem_region(mega_baseport, 128, + "MegaRAID: LSI Logic Corporation."); + + mega_baseport = + (unsigned long)ioremap(mega_baseport, 128); + + if( !mega_baseport ) { + printk(KERN_WARNING + "megaraid: could not map hba memory\n"); + + release_mem_region(tbase, 128); + + continue; + } + + flag |= BOARD_MEMMAP; + + did_ioremap_f = 1; + } + else { + mega_baseport += 0x10; + + if( !request_region(mega_baseport, 16, "megaraid") ) + goto fail_attach; + + flag |= BOARD_IOMAP; + + did_req_region_f = 1; + } + + /* Initialize SCSI Host structure */ + host = scsi_register(host_template, sizeof(adapter_t)); + + if(!host) goto fail_attach; + + did_scsi_reg_f = 1; + + scsi_set_pci_device(host, pdev); + + adapter = (adapter_t *)host->hostdata; + memset(adapter, 0, sizeof(adapter_t)); + + /* + * Allocate a pci device structure for allocations done + * internally - all of which would be in memory <4GB + */ + adapter->ipdev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL); + + if( adapter->ipdev == NULL ) goto fail_attach; + + got_ipdev_f = 1; + + memcpy(adapter->ipdev, pdev, sizeof(struct pci_dev)); + + if( pci_set_dma_mask(adapter->ipdev, 0xffffffff) != 0 ) + goto fail_attach; + + printk(KERN_NOTICE + "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", + host->host_no, mega_baseport, irq); + + adapter->base = mega_baseport; + + /* Copy resource info into structure */ + INIT_LIST_HEAD(&adapter->free_list); + INIT_LIST_HEAD(&adapter->pending_list); + + adapter->flag = flag; + spin_lock_init(&adapter->lock); + + host->cmd_per_lun = max_cmd_per_lun; + host->max_sectors = max_sectors_per_io; + + adapter->dev = pdev; + adapter->host = host; + + adapter->host->irq = irq; + + if( flag & BOARD_MEMMAP ) { + adapter->host->base = tbase; + } + else { + adapter->host->io_port = tbase; + adapter->host->n_io_port = 16; + } + + adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; + + /* + * Allocate buffer to issue internal commands. + */ + adapter->mega_buffer = pci_alloc_consistent(adapter->dev, + MEGA_BUFFER_SIZE, &adapter->buf_dma_handle); + + if( !adapter->mega_buffer ) { + printk(KERN_WARNING "megaraid: out of RAM.\n"); + goto fail_attach; + } + alloc_int_buf_f = 1; + + adapter->scb_list = kmalloc(sizeof(scb_t)*MAX_COMMANDS, + GFP_KERNEL); + + if(!adapter->scb_list) { + printk(KERN_WARNING "megaraid: out of RAM.\n"); + goto fail_attach; + } + + alloc_scb_f = 1; + + /* Request our IRQ */ + if( adapter->flag & BOARD_MEMMAP ) { + if(request_irq(irq, megaraid_isr_memmapped, SA_SHIRQ, + "megaraid", adapter)) { + printk(KERN_WARNING + "megaraid: Couldn't register IRQ %d!\n", + irq); + goto fail_attach; + } + } + else { + if(request_irq(irq, megaraid_isr_iomapped, SA_SHIRQ, + "megaraid", adapter)) { + printk(KERN_WARNING + "megaraid: Couldn't register IRQ %d!\n", + irq); + goto fail_attach; + } + } + got_irq_f = 1; + + if( mega_setup_mailbox(adapter) != 0 ) + goto fail_attach; + + did_setup_mbox_f = 1; + + if( mega_query_adapter(adapter) != 0 ) + goto fail_attach; + + /* + * Have checks for some buggy f/w + */ + if((subsysid == 0x1111) && (subsysvid == 0x1111)) { + /* + * Which firmware + */ + if (!strcmp(adapter->fw_version, "3.00") || + !strcmp(adapter->fw_version, "3.01")) { + + printk( KERN_WARNING + "megaraid: Your card is a Dell PERC " + "2/SC RAID controller with " + "firmware\nmegaraid: 3.00 or 3.01. " + "This driver is known to have " + "corruption issues\nmegaraid: with " + "those firmware versions on this " + "specific card. In order\nmegaraid: " + "to protect your data, please upgrade " + "your firmware to version\nmegaraid: " + "3.10 or later, available from the " + "Dell Technical Support web\n" + "megaraid: site at\nhttp://support." + "dell.com/us/en/filelib/download/" + "index.asp?fileid=2940\n" + ); + } + } + + /* + * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with + * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit + * support, since this firmware cannot handle 64 bit + * addressing + */ + + if((subsysvid == HP_SUBSYS_VID) && + ((subsysid == 0x60E7)||(subsysid == 0x60E8))) { + + /* + * which firmware + */ + if( !strcmp(adapter->fw_version, "H01.07") || + !strcmp(adapter->fw_version, "H01.08") || + !strcmp(adapter->fw_version, "H01.09") ) { + + printk(KERN_WARNING + "megaraid: Firmware H.01.07, " + "H.01.08, and H.01.09 on 1M/2M " + "controllers\n" + "megaraid: do not support 64 bit " + "addressing.\nmegaraid: DISABLING " + "64 bit support.\n"); + adapter->flag &= ~BOARD_64BIT; + } + } + + + if(mega_is_bios_enabled(adapter)) { + mega_hbas[hba_count].is_bios_enabled = 1; + } + mega_hbas[hba_count].hostdata_addr = adapter; + + /* + * Find out which channel is raid and which is scsi. This is + * for ROMB support. + */ + mega_enum_raid_scsi(adapter); + + /* + * Find out if a logical drive is set as the boot drive. If + * there is one, will make that as the first logical drive. + * ROMB: Do we have to boot from a physical drive. Then all + * the physical drives would appear before the logical disks. + * Else, all the physical drives would be exported to the mid + * layer after logical drives. + */ + mega_get_boot_drv(adapter); + + if( ! adapter->boot_pdrv_enabled ) { + for( i = 0; i < NVIRT_CHAN; i++ ) + adapter->logdrv_chan[i] = 1; + + for( i = NVIRT_CHAN; ilogdrv_chan[i] = 0; + + adapter->mega_ch_class <<= NVIRT_CHAN; + } + else { + j = adapter->product_info.nchannels; + for( i = 0; i < j; i++ ) + adapter->logdrv_chan[i] = 0; + + for( i = j; i < NVIRT_CHAN + j; i++ ) + adapter->logdrv_chan[i] = 1; + } + + + /* + * Do we support random deletion and addition of logical + * drives + */ + adapter->read_ldidmap = 0; /* set it after first logdrv + delete cmd */ + adapter->support_random_del = mega_support_random_del(adapter); + + /* Initialize SCBs */ + if(mega_init_scb(adapter)) { + goto fail_attach; + } + + /* + * Reset the pending commands counter + */ + atomic_set(&adapter->pend_cmds, 0); + + /* + * Reset the adapter quiescent flag + */ + atomic_set(&adapter->quiescent, 0); + + hba_soft_state[hba_count] = adapter; + + /* + * Fill in the structure which needs to be passed back to the + * application when it does an ioctl() for controller related + * information. + */ + i = hba_count; + + mcontroller[i].base = mega_baseport; + mcontroller[i].irq = irq; + mcontroller[i].numldrv = adapter->numldrv; + mcontroller[i].pcibus = pci_bus; + mcontroller[i].pcidev = pci_device; + mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); + mcontroller[i].pciid = -1; + mcontroller[i].pcivendor = pci_vendor; + mcontroller[i].pcislot = PCI_SLOT (pci_dev_func); + mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; + + + /* Set the Mode of addressing to 64 bit if we can */ + if((adapter->flag & BOARD_64BIT)&&(sizeof(dma_addr_t) == 8)) { + pci_set_dma_mask(pdev, 0xffffffffffffffff); + adapter->has_64bit_addr = 1; + } + else { + pci_set_dma_mask(pdev, 0xffffffff); + adapter->has_64bit_addr = 0; + } + + init_MUTEX(&adapter->int_mtx); + init_waitqueue_head(&adapter->int_waitq); + + adapter->this_id = DEFAULT_INITIATOR_ID; + adapter->host->this_id = DEFAULT_INITIATOR_ID; + +#if MEGA_HAVE_CLUSTERING + /* + * Is cluster support enabled on this controller + * Note: In a cluster the HBAs ( the initiators ) will have + * different target IDs and we cannot assume it to be 7. Call + * to mega_support_cluster() will get the target ids also if + * the cluster support is available + */ + adapter->has_cluster = mega_support_cluster(adapter); + + if( adapter->has_cluster ) { + printk(KERN_NOTICE + "megaraid: Cluster driver, initiator id:%d\n", + adapter->this_id); + } +#endif + + hba_count++; + continue; + +fail_attach: + if( did_setup_mbox_f ) { + pci_free_consistent(adapter->dev, sizeof(mbox64_t), + (void *)adapter->una_mbox64, + adapter->una_mbox64_dma); + } + + if( got_irq_f ) { + irq_disable(adapter); + free_irq(adapter->host->irq, adapter); + } + + if( alloc_scb_f ) { + kfree(adapter->scb_list); + } + + if( alloc_int_buf_f ) { + pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE, + (void *)adapter->mega_buffer, + adapter->buf_dma_handle); + } + + if( got_ipdev_f ) kfree(adapter->ipdev); + + if( did_scsi_reg_f ) scsi_unregister(host); + + if( did_ioremap_f ) { + iounmap((void *)mega_baseport); + release_mem_region(tbase, 128); + } + + if( did_req_region_f ) + release_region(mega_baseport, 16); + } + + return; +} + + +/** + * mega_setup_mailbox() + * @adapter - pointer to our soft state + * + * Allocates a 8 byte aligned memory for the handshake mailbox. + */ +static int +mega_setup_mailbox(adapter_t *adapter) +{ + unsigned long align; + + adapter->una_mbox64 = pci_alloc_consistent(adapter->dev, + sizeof(mbox64_t), &adapter->una_mbox64_dma); + + if( !adapter->una_mbox64 ) return -1; + + adapter->mbox = &adapter->una_mbox64->mbox; + + adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & + (~0UL ^ 0xFUL)); + + adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); + + align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); + + adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; + + /* + * Register the mailbox if the controller is an io-mapped controller + */ + if( adapter->flag & BOARD_IOMAP ) { + + outb_p(adapter->mbox_dma & 0xFF, + adapter->host->io_port + MBOX_PORT0); + + outb_p((adapter->mbox_dma >> 8) & 0xFF, + adapter->host->io_port + MBOX_PORT1); + + outb_p((adapter->mbox_dma >> 16) & 0xFF, + adapter->host->io_port + MBOX_PORT2); + + outb_p((adapter->mbox_dma >> 24) & 0xFF, + adapter->host->io_port + MBOX_PORT3); + + outb_p(ENABLE_MBOX_BYTE, + adapter->host->io_port + ENABLE_MBOX_REGION); + + irq_ack(adapter); + + irq_enable(adapter); + } + + return 0; +} + + +/* + * mega_query_adapter() + * @adapter - pointer to our soft state + * + * Issue the adapter inquiry commands to the controller and find out + * information and parameter about the devices attached + */ +static int +mega_query_adapter(adapter_t *adapter) +{ + dma_addr_t prod_info_dma_handle; + mega_inquiry3 *inquiry3; + u8 raw_mbox[16]; + mbox_t *mbox; + int retval; + + /* Initialize adapter inquiry mailbox */ + + mbox = (mbox_t *)raw_mbox; + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + memset(mbox, 0, 16); + + /* + * Try to issue Inquiry3 command + * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and + * update enquiry3 structure + */ + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; + + raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ + raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ + raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ + + /* Issue a blocking command to the card */ + if ((retval = issue_scb_block(adapter, raw_mbox))) { + /* the adapter does not support 40ld */ + + mraid_ext_inquiry *ext_inq; + mraid_inquiry *inq; + dma_addr_t dma_handle; + + ext_inq = pci_alloc_consistent(adapter->dev, + sizeof(mraid_ext_inquiry), &dma_handle); + + if( ext_inq == NULL ) return -1; + + inq = &ext_inq->raid_inq; + + mbox->xferaddr = (u32)dma_handle; + + /*issue old 0x04 command to adapter */ + mbox->cmd = MEGA_MBOXCMD_ADPEXTINQ; + + issue_scb_block(adapter, raw_mbox); + + /* + * update Enquiry3 and ProductInfo structures with + * mraid_inquiry structure + */ + mega_8_to_40ld(inq, inquiry3, + (mega_product_info *)&adapter->product_info); + + pci_free_consistent(adapter->dev, sizeof(mraid_ext_inquiry), + ext_inq, dma_handle); + + } else { /*adapter supports 40ld */ + adapter->flag |= BOARD_40LD; + + /* + * get product_info, which is static information and will be + * unchanged + */ + prod_info_dma_handle = pci_map_single(adapter->dev, (void *) + &adapter->product_info, + sizeof(mega_product_info), PCI_DMA_FROMDEVICE); + + mbox->xferaddr = prod_info_dma_handle; + + raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ + raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ + + if ((retval = issue_scb_block(adapter, raw_mbox))) + printk(KERN_WARNING + "megaraid: Product_info cmd failed with error: %d\n", + retval); + + pci_dma_sync_single(adapter->dev, prod_info_dma_handle, + sizeof(mega_product_info), + PCI_DMA_FROMDEVICE); + + pci_unmap_single(adapter->dev, prod_info_dma_handle, + sizeof(mega_product_info), PCI_DMA_FROMDEVICE); + } + + + /* + * kernel scans the channels from 0 to <= max_channel + */ + adapter->host->max_channel = + adapter->product_info.nchannels + NVIRT_CHAN -1; + + adapter->host->max_id = 16; /* max targets per channel */ + + adapter->host->max_lun = 7; /* Upto 7 luns for non disk devices */ + + adapter->host->cmd_per_lun = max_cmd_per_lun; + + adapter->numldrv = inquiry3->num_ldrv; + + adapter->max_cmds = adapter->product_info.max_commands; + + if(adapter->max_cmds > MAX_COMMANDS) + adapter->max_cmds = MAX_COMMANDS; + + adapter->host->can_queue = adapter->max_cmds - 1; + + /* + * Get the maximum number of scatter-gather elements supported by this + * firmware + */ + mega_get_max_sgl(adapter); + + adapter->host->sg_tablesize = adapter->sglen; + + + /* use HP firmware and bios version encoding */ + if (adapter->product_info.subsysvid == HP_SUBSYS_VID) { + sprintf (adapter->fw_version, "%c%d%d.%d%d", + adapter->product_info.fw_version[2], + adapter->product_info.fw_version[1] >> 8, + adapter->product_info.fw_version[1] & 0x0f, + adapter->product_info.fw_version[0] >> 8, + adapter->product_info.fw_version[0] & 0x0f); + sprintf (adapter->bios_version, "%c%d%d.%d%d", + adapter->product_info.bios_version[2], + adapter->product_info.bios_version[1] >> 8, + adapter->product_info.bios_version[1] & 0x0f, + adapter->product_info.bios_version[0] >> 8, + adapter->product_info.bios_version[0] & 0x0f); + } else { + memcpy(adapter->fw_version, + (char *)adapter->product_info.fw_version, 4); + adapter->fw_version[4] = 0; + + memcpy(adapter->bios_version, + (char *)adapter->product_info.bios_version, 4); + + adapter->bios_version[4] = 0; + } + + printk(KERN_NOTICE "megaraid: [%s:%s] detected %d logical drives.\n", + adapter->fw_version, adapter->bios_version, adapter->numldrv); + + /* + * Do we support extended (>10 bytes) cdbs + */ + adapter->support_ext_cdb = mega_support_ext_cdb(adapter); + if (adapter->support_ext_cdb) + printk(KERN_NOTICE "megaraid: supports extended CDBs.\n"); + + + return 0; +} + + +/* + * megaraid_queue() + * @scmd - Issue this scsi command + * @done - the callback hook into the scsi mid-layer + * + * The command queuing entry point for the mid-layer. + */ +static int +megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) +{ + adapter_t *adapter; + scb_t *scb; + int busy=0; + + adapter = (adapter_t *)scmd->host->hostdata; + + scmd->scsi_done = done; + + + /* + * Allocate and build a SCB request + * busy flag will be set if mega_build_cmd() command could not + * allocate scb. We will return non-zero status in that case. + * NOTE: scb can be null even though certain commands completed + * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would + * return 0 in that case. + */ + + scb = mega_build_cmd(adapter, scmd, &busy); + + if(scb) { + scb->state |= SCB_PENDQ; + list_add_tail(&scb->list, &adapter->pending_list); + + /* + * Check if the HBA is in quiescent state, e.g., during a + * delete logical drive opertion. If it is, don't run + * the pending_list. + */ + if(atomic_read(&adapter->quiescent) == 0) { + mega_runpendq(adapter); + } + return 0; + } + + return busy; +} + + +/** + * mega_build_cmd() + * @adapter - pointer to our soft state + * @cmd - Prepare using this scsi command + * @busy - busy flag if no resources + * + * Prepares a command and scatter gather list for the controller. This routine + * also finds out if the commands is intended for a logical drive or a + * physical device and prepares the controller command accordingly. + * + * We also re-order the logical drives and physical devices based on their + * boot settings. + */ +static scb_t * +mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy) +{ + mega_ext_passthru *epthru; + mega_passthru *pthru; + scb_t *scb; + mbox_t *mbox; + long seg; + char islogical; + int max_ldrv_num; + int channel = 0; + int target = 0; + int ldrv_num = 0; /* logical drive number */ + + + /* + * filter the internal and ioctl commands + */ + if((cmd->cmnd[0] == MEGA_INTERNAL_CMD)) { + return cmd->buffer; + } + + + /* + * We know what channels our logical drives are on - mega_find_card() + */ + islogical = adapter->logdrv_chan[cmd->channel]; + + /* + * The theory: If physical drive is chosen for boot, all the physical + * devices are exported before the logical drives, otherwise physical + * devices are pushed after logical drives, in which case - Kernel sees + * the physical devices on virtual channel which is obviously converted + * to actual channel on the HBA. + */ + if( adapter->boot_pdrv_enabled ) { + if( islogical ) { + /* logical channel */ + channel = cmd->channel - + adapter->product_info.nchannels; + } + else { + channel = cmd->channel; /* this is physical channel */ + target = cmd->target; + + /* + * boot from a physical disk, that disk needs to be + * exposed first IF both the channels are SCSI, then + * booting from the second channel is not allowed. + */ + if( target == 0 ) { + target = adapter->boot_pdrv_tgt; + } + else if( target == adapter->boot_pdrv_tgt ) { + target = 0; + } + } + } + else { + if( islogical ) { + channel = cmd->channel; /* this is the logical channel + */ + } + else { + channel = cmd->channel - NVIRT_CHAN; /* physical + channel */ + target = cmd->target; + } + } + + + if(islogical) { + + /* have just LUN 0 for each target on virtual channels */ + if (cmd->lun) { + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + + ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); + + + max_ldrv_num = (adapter->flag & BOARD_40LD) ? + MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; + + /* + * max_ldrv_num increases by 0x80 if some logical drive was + * deleted. + */ + if(adapter->read_ldidmap) + max_ldrv_num += 0x80; + + if(ldrv_num > max_ldrv_num ) { + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + + } + else { + if( cmd->lun > 7) { + /* + * Do not support lun >7 for physically accessed + * devices + */ + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + } + + /* + * + * Logical drive commands + * + */ + if(islogical) { + switch (cmd->cmnd[0]) { + case TEST_UNIT_READY: + memset(cmd->request_buffer, 0, cmd->request_bufflen); + +#if MEGA_HAVE_CLUSTERING + /* + * Do we support clustering and is the support enabled + * If no, return success always + */ + if( !adapter->has_cluster ) { + cmd->result = (DID_OK << 16); + cmd->scsi_done(cmd); + return NULL; + } + + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + + scb->raw_mbox[0] = MEGA_CLUSTER_CMD; + scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; + scb->raw_mbox[3] = ldrv_num; + + scb->dma_direction = PCI_DMA_NONE; + + return scb; +#else + cmd->result = (DID_OK << 16); + cmd->scsi_done(cmd); + return NULL; +#endif + + case MODE_SENSE: + memset(cmd->request_buffer, 0, cmd->cmnd[4]); + cmd->result = (DID_OK << 16); + cmd->scsi_done(cmd); + return NULL; + + case READ_CAPACITY: + case INQUIRY: + + if(!(adapter->flag & (1L << cmd->channel))) { + + printk(KERN_NOTICE + "scsi%d: scanning scsi channel %d ", + adapter->host->host_no, + cmd->channel); + printk("for logical drives.\n"); + + adapter->flag |= (1L << cmd->channel); + } + + /* Allocate a SCB and initialize passthru */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + pthru = scb->pthru; + + mbox = (mbox_t *)scb->raw_mbox; + memset(mbox, 0, sizeof(scb->raw_mbox)); + memset(pthru, 0, sizeof(mega_passthru)); + + pthru->timeout = 0; + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 1; + pthru->logdrv = ldrv_num; + pthru->cdblen = cmd->cmd_len; + memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); + + if( adapter->has_64bit_addr ) { + mbox->cmd = MEGA_MBOXCMD_PASSTHRU64; + } + else { + mbox->cmd = MEGA_MBOXCMD_PASSTHRU; + } + + scb->dma_direction = PCI_DMA_FROMDEVICE; + + pthru->numsgelements = mega_build_sglist(adapter, scb, + &pthru->dataxferaddr, &pthru->dataxferlen); + + mbox->xferaddr = scb->pthru_dma_addr; + + return scb; + + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + + /* Allocate a SCB and initialize mailbox */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + mbox = (mbox_t *)scb->raw_mbox; + + memset(mbox, 0, sizeof(scb->raw_mbox)); + mbox->logdrv = ldrv_num; + + /* + * A little hack: 2nd bit is zero for all scsi read + * commands and is set for all scsi write commands + */ + if( adapter->has_64bit_addr ) { + mbox->cmd = (*cmd->cmnd & 0x02) ? + MEGA_MBOXCMD_LWRITE64: + MEGA_MBOXCMD_LREAD64 ; + } + else { + mbox->cmd = (*cmd->cmnd & 0x02) ? + MEGA_MBOXCMD_LWRITE: + MEGA_MBOXCMD_LREAD ; + } + + /* + * 6-byte READ(0x08) or WRITE(0x0A) cdb + */ + if( cmd->cmd_len == 6 ) { + mbox->numsectors = (u32) cmd->cmnd[4]; + mbox->lba = + ((u32)cmd->cmnd[1] << 16) | + ((u32)cmd->cmnd[2] << 8) | + (u32)cmd->cmnd[3]; + + mbox->lba &= 0x1FFFFF; + +#if MEGA_HAVE_STATS + /* + * Take modulo 0x80, since the logical drive + * number increases by 0x80 when a logical + * drive was deleted + */ + if (*cmd->cmnd == READ_6) { + adapter->nreads[ldrv_num%0x80]++; + adapter->nreadblocks[ldrv_num%0x80] += + mbox->numsectors; + } else { + adapter->nwrites[ldrv_num%0x80]++; + adapter->nwriteblocks[ldrv_num%0x80] += + mbox->numsectors; + } +#endif + } + + /* + * 10-byte READ(0x28) or WRITE(0x2A) cdb + */ + if( cmd->cmd_len == 10 ) { + mbox->numsectors = + (u32)cmd->cmnd[8] | + ((u32)cmd->cmnd[7] << 8); + mbox->lba = + ((u32)cmd->cmnd[2] << 24) | + ((u32)cmd->cmnd[3] << 16) | + ((u32)cmd->cmnd[4] << 8) | + (u32)cmd->cmnd[5]; + +#if MEGA_HAVE_STATS + if (*cmd->cmnd == READ_10) { + adapter->nreads[ldrv_num%0x80]++; + adapter->nreadblocks[ldrv_num%0x80] += + mbox->numsectors; + } else { + adapter->nwrites[ldrv_num%0x80]++; + adapter->nwriteblocks[ldrv_num%0x80] += + mbox->numsectors; + } +#endif + } + + /* + * 12-byte READ(0xA8) or WRITE(0xAA) cdb + */ + if( cmd->cmd_len == 12 ) { + mbox->lba = + ((u32)cmd->cmnd[2] << 24) | + ((u32)cmd->cmnd[3] << 16) | + ((u32)cmd->cmnd[4] << 8) | + (u32)cmd->cmnd[5]; + + mbox->numsectors = + ((u32)cmd->cmnd[6] << 24) | + ((u32)cmd->cmnd[7] << 16) | + ((u32)cmd->cmnd[8] << 8) | + (u32)cmd->cmnd[9]; + +#if MEGA_HAVE_STATS + if (*cmd->cmnd == READ_12) { + adapter->nreads[ldrv_num%0x80]++; + adapter->nreadblocks[ldrv_num%0x80] += + mbox->numsectors; + } else { + adapter->nwrites[ldrv_num%0x80]++; + adapter->nwriteblocks[ldrv_num%0x80] += + mbox->numsectors; + } +#endif + } + + /* + * If it is a read command + */ + if( (*cmd->cmnd & 0x0F) == 0x08 ) { + scb->dma_direction = PCI_DMA_FROMDEVICE; + } + else { + scb->dma_direction = PCI_DMA_TODEVICE; + } + + /* Calculate Scatter-Gather info */ + mbox->numsgelements = mega_build_sglist(adapter, scb, + (u32 *)&mbox->xferaddr, (u32 *)&seg); + + return scb; + +#if MEGA_HAVE_CLUSTERING + case RESERVE: /* Fall through */ + case RELEASE: + + /* + * Do we support clustering and is the support enabled + */ + if( ! adapter->has_cluster ) { + + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + + /* Allocate a SCB and initialize mailbox */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + + scb->raw_mbox[0] = MEGA_CLUSTER_CMD; + scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? + MEGA_RESERVE_LD : MEGA_RELEASE_LD; + + scb->raw_mbox[3] = ldrv_num; + + scb->dma_direction = PCI_DMA_NONE; + + return scb; +#endif + + default: + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + } + + /* + * Passthru drive commands + */ + else { + /* Allocate a SCB and initialize passthru */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + + mbox = (mbox_t *)scb->raw_mbox; + memset(mbox, 0, sizeof(scb->raw_mbox)); + + if( adapter->support_ext_cdb ) { + + epthru = mega_prepare_extpassthru(adapter, scb, cmd, + channel, target); + + mbox->cmd = MEGA_MBOXCMD_EXTPTHRU; + + mbox->xferaddr = scb->epthru_dma_addr; + + } + else { + + pthru = mega_prepare_passthru(adapter, scb, cmd, + channel, target); + + /* Initialize mailbox */ + if( adapter->has_64bit_addr ) { + mbox->cmd = MEGA_MBOXCMD_PASSTHRU64; + } + else { + mbox->cmd = MEGA_MBOXCMD_PASSTHRU; + } + + mbox->xferaddr = scb->pthru_dma_addr; + + } + return scb; + } + return NULL; +} + + +/** + * mega_prepare_passthru() + * @adapter - pointer to our soft state + * @scb - our scsi control block + * @cmd - scsi command from the mid-layer + * @channel - actual channel on the controller + * @target - actual id on the controller. + * + * prepare a command for the scsi physical devices. + */ +static mega_passthru * +mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd, + int channel, int target) +{ + mega_passthru *pthru; + + pthru = scb->pthru; + memset(pthru, 0, sizeof (mega_passthru)); + + /* 0=6sec/1=60sec/2=10min/3=3hrs */ + pthru->timeout = 2; + + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 0; + + pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; + + pthru->target = (adapter->flag & BOARD_40LD) ? + (channel << 4) | target : target; + + pthru->cdblen = cmd->cmd_len; + pthru->logdrv = cmd->lun; + + memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); + + /* Not sure about the direction */ + scb->dma_direction = PCI_DMA_BIDIRECTIONAL; + + /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ + switch (cmd->cmnd[0]) { + case INQUIRY: + case READ_CAPACITY: + if(!(adapter->flag & (1L << cmd->channel))) { + + printk(KERN_NOTICE + "scsi%d: scanning scsi channel %d [P%d] ", + adapter->host->host_no, + cmd->channel, channel); + printk("for physical devices.\n"); + + adapter->flag |= (1L << cmd->channel); + } + /* Fall through */ + default: + pthru->numsgelements = mega_build_sglist(adapter, scb, + &pthru->dataxferaddr, &pthru->dataxferlen); + break; + } + return pthru; +} + + +/** + * mega_prepare_extpassthru() + * @adapter - pointer to our soft state + * @scb - our scsi control block + * @cmd - scsi command from the mid-layer + * @channel - actual channel on the controller + * @target - actual id on the controller. + * + * prepare a command for the scsi physical devices. This rountine prepares + * commands for devices which can take extended CDBs (>10 bytes) + */ +static mega_ext_passthru * +mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd, + int channel, int target) +{ + mega_ext_passthru *epthru; + + epthru = scb->epthru; + memset(epthru, 0, sizeof(mega_ext_passthru)); + + /* 0=6sec/1=60sec/2=10min/3=3hrs */ + epthru->timeout = 2; + + epthru->ars = 1; + epthru->reqsenselen = 14; + epthru->islogical = 0; + + epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; + epthru->target = (adapter->flag & BOARD_40LD) ? + (channel << 4) | target : target; + + epthru->cdblen = cmd->cmd_len; + epthru->logdrv = cmd->lun; + + memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); + + /* Not sure about the direction */ + scb->dma_direction = PCI_DMA_BIDIRECTIONAL; + + switch(cmd->cmnd[0]) { + case INQUIRY: + case READ_CAPACITY: + if(!(adapter->flag & (1L << cmd->channel))) { + + printk(KERN_NOTICE + "scsi%d: scanning scsi channel %d [P%d] ", + adapter->host->host_no, + cmd->channel, channel); + printk("for physical devices.\n"); + + adapter->flag |= (1L << cmd->channel); + } + /* Fall through */ + default: + epthru->numsgelements = mega_build_sglist(adapter, scb, + &epthru->dataxferaddr, &epthru->dataxferlen); + break; + } + + return epthru; +} + + +/** + * mega_allocate_scb() + * @adapter - pointer to our soft state + * @cmd - scsi command from the mid-layer + * + * Allocate a SCB structure. This is the central structure for controller + * commands. + */ +static inline scb_t * +mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd) +{ + struct list_head *head = &adapter->free_list; + scb_t *scb; + + /* Unlink command from Free List */ + if( !list_empty(head) ) { + + scb = list_entry(head->next, scb_t, list); + + list_del_init(head->next); + + scb->state = SCB_ACTIVE; + scb->cmd = cmd; + scb->dma_type = MEGA_DMA_TYPE_NONE; + + return scb; + } + + return NULL; +} + + +/** + * mega_runpendq() + * @adapter - pointer to our soft state + * + * Runs through the list of pending requests. + */ +static inline void +mega_runpendq(adapter_t *adapter) +{ + if(!list_empty(&adapter->pending_list)) + __mega_runpendq(adapter); +} + +static void +__mega_runpendq(adapter_t *adapter) +{ + scb_t *scb; + struct list_head *pos, *next; + + /* Issue any pending commands to the card */ + list_for_each_safe(pos, next, &adapter->pending_list) { + + scb = list_entry(pos, scb_t, list); + + if( !(scb->state & SCB_ISSUED) ) { + + if( issue_scb(adapter, scb) != 0 ) + return; + } + } + + return; +} + + +/** + * issue_scb() + * @adapter - pointer to our soft state + * @scb - scsi control block + * + * Post a command to the card if the mailbox is available, otherwise return + * busy. We also take the scb from the pending list if the mailbox is + * available. + */ +static inline int +issue_scb(adapter_t *adapter, scb_t *scb) +{ + volatile mbox64_t *mbox64 = adapter->mbox64; + volatile mbox_t *mbox = adapter->mbox; + unsigned int i = 0; + + if(unlikely(mbox->busy)) { + do { + udelay(1); + i++; + } while( mbox->busy && (i < max_mbox_busy_wait) ); + + if(mbox->busy) return -1; + } + + /* Copy mailbox data into host structure */ + memcpy((char *)mbox, (char *)scb->raw_mbox, 16); + + mbox->cmdid = scb->idx; /* Set cmdid */ + mbox->busy = 1; /* Set busy */ + + + /* + * Increment the pending queue counter + */ + atomic_inc(&adapter->pend_cmds); + + switch (mbox->cmd) { + case MEGA_MBOXCMD_LREAD64: + case MEGA_MBOXCMD_LWRITE64: + case MEGA_MBOXCMD_PASSTHRU64: + case MEGA_MBOXCMD_EXTPTHRU: + mbox64->xfer_segment_lo = mbox->xferaddr; + mbox64->xfer_segment_hi = 0; + mbox->xferaddr = 0xFFFFFFFF; + break; + default: + mbox64->xfer_segment_lo = 0; + mbox64->xfer_segment_hi = 0; + } + + /* + * post the command + */ + scb->state |= SCB_ISSUED; + + if( likely(adapter->flag & BOARD_MEMMAP) ) { + mbox->poll = 0; + mbox->ack = 0; + WRINDOOR(adapter, adapter->mbox_dma | 0x1); + } + else { + irq_enable(adapter); + issue_command(adapter); + } + + return 0; +} + + +/** + * issue_scb_block() + * @adapter - pointer to our soft state + * @raw_mbox - the mailbox + * + * Issue a scb in synchronous and non-interrupt mode + */ +static int +issue_scb_block(adapter_t *adapter, u_char *raw_mbox) +{ + volatile mbox64_t *mbox64 = adapter->mbox64; + volatile mbox_t *mbox = adapter->mbox; + u8 byte; + + raw_mbox[0x1] = 0xFE; /* Set cmdid */ + raw_mbox[0xF] = 1; /* Set busy */ + + /* Wait until mailbox is free */ + if(mega_busywait_mbox (adapter)) + goto bug_blocked_mailbox; + + /* Copy mailbox data into host structure */ + memcpy((char *) mbox, raw_mbox, 16); + + switch (raw_mbox[0]) { + case MEGA_MBOXCMD_LREAD64: + case MEGA_MBOXCMD_LWRITE64: + case MEGA_MBOXCMD_PASSTHRU64: + case MEGA_MBOXCMD_EXTPTHRU: + mbox64->xfer_segment_lo = mbox->xferaddr; + mbox64->xfer_segment_hi = 0; + mbox->xferaddr = 0xFFFFFFFF; + break; + default: + mbox64->xfer_segment_lo = 0; + mbox64->xfer_segment_hi = 0; + } + + if( likely(adapter->flag & BOARD_MEMMAP) ) { + mbox->poll = 0; + mbox->ack = 0; + mbox->numstatus = 0xFF; + mbox->status = 0xFF; + WRINDOOR(adapter, adapter->mbox_dma | 0x1); + + while((volatile u8)mbox->numstatus == 0xFF) + cpu_relax(); + + mbox->numstatus = 0xFF; + + while( (volatile u8)mbox->poll != 0x77 ) + cpu_relax(); + + mbox->poll = 0; + mbox->ack = 0x77; + + WRINDOOR(adapter, adapter->mbox_dma | 0x2); + + while(RDINDOOR(adapter) & 0x2) + cpu_relax(); + } + else { + irq_disable(adapter); + issue_command(adapter); + + while (!((byte = irq_state(adapter)) & INTR_VALID)) + cpu_relax(); + + set_irq_state(adapter, byte); + irq_enable(adapter); + irq_ack(adapter); + } + + return mbox->status; + +bug_blocked_mailbox: + printk(KERN_WARNING "megaraid: Blocked mailbox......!!\n"); + udelay (1000); + return -1; +} + + +/** + * megaraid_isr_iomapped() + * @irq - irq + * @devp - pointer to our soft state + * @regs - unused + * + * Interrupt service routine for io-mapped controllers. + * Find out if our device is interrupting. If yes, acknowledge the interrupt + * and service the completed commands. + */ +static void +megaraid_isr_iomapped(int irq, void *devp, struct pt_regs *regs) +{ + adapter_t *adapter = devp; + unsigned long flags; + + + spin_lock_irqsave(&adapter->lock, flags); + + megaraid_iombox_ack_sequence(adapter); + + /* Loop through any pending requests */ + if( atomic_read(&adapter->quiescent ) == 0) { + mega_runpendq(adapter); + } + + spin_unlock_irqrestore(&adapter->lock, flags); + + return; +} + + +/** + * megaraid_iombox_ack_sequence - interrupt ack sequence for IO mapped HBAs + * @adapter - controller's soft state + * + * Interrupt ackrowledgement sequence for IO mapped HBAs + */ +static inline void +megaraid_iombox_ack_sequence(adapter_t *adapter) +{ + u8 status; + u8 nstatus; + u8 completed[MAX_FIRMWARE_STATUS]; + u8 byte; + + + /* + * loop till F/W has more commands for us to complete. + */ + do { + /* Check if a valid interrupt is pending */ + byte = irq_state(adapter); + if( (byte & VALID_INTR_BYTE) == 0 ) { + return; + } + set_irq_state(adapter, byte); + + while((nstatus = (volatile u8)adapter->mbox->numstatus) + == 0xFF) + cpu_relax(); + adapter->mbox->numstatus = 0xFF; + + status = adapter->mbox->status; + + /* + * decrement the pending queue counter + */ + atomic_sub(nstatus, &adapter->pend_cmds); + + memcpy(completed, (void *)adapter->mbox->completed, nstatus); + + /* Acknowledge interrupt */ + irq_ack(adapter); + + mega_cmd_done(adapter, completed, nstatus, status); + + } while(1); +} + + +/** + * megaraid_isr_memmapped() + * @irq - irq + * @devp - pointer to our soft state + * @regs - unused + * + * Interrupt service routine for memory-mapped controllers. + * Find out if our device is interrupting. If yes, acknowledge the interrupt + * and service the completed commands. + */ +static void +megaraid_isr_memmapped(int irq, void *devp, struct pt_regs *regs) +{ + adapter_t *adapter = devp; + unsigned long flags; + + + spin_lock_irqsave(&adapter->lock, flags); + + megaraid_memmbox_ack_sequence(adapter); + + /* Loop through any pending requests */ + if(atomic_read(&adapter->quiescent) == 0) { + mega_runpendq(adapter); + } + + spin_unlock_irqrestore(&adapter->lock, flags); + + return; +} + + +/** + * megaraid_memmbox_ack_sequence - interrupt ack sequence for memory mapped HBAs + * @adapter - controller's soft state + * + * Interrupt ackrowledgement sequence for memory mapped HBAs + */ +static inline void +megaraid_memmbox_ack_sequence(adapter_t *adapter) +{ + u8 status; + u32 dword = 0; + u8 nstatus; + u8 completed[MAX_FIRMWARE_STATUS]; + + + /* + * loop till F/W has more commands for us to complete. + */ + do { + /* Check if a valid interrupt is pending */ + dword = RDOUTDOOR(adapter); + if( dword != 0x10001234 ) { + /* + * No more pending commands + */ + return; + } + WROUTDOOR(adapter, 0x10001234); + + while((nstatus = adapter->mbox->numstatus) == 0xFF) { + cpu_relax(); + } + adapter->mbox->numstatus = 0xFF; + + status = adapter->mbox->status; + + /* + * decrement the pending queue counter + */ + atomic_sub(nstatus, &adapter->pend_cmds); + + memcpy(completed, (void *)adapter->mbox->completed, nstatus); + + /* Acknowledge interrupt */ + WRINDOOR(adapter, 0x2); + + while( RDINDOOR(adapter) & 0x02 ) cpu_relax(); + + mega_cmd_done(adapter, completed, nstatus, status); + + } while(1); +} + + +/** + * mega_cmd_done() + * @adapter - pointer to our soft state + * @completed - array of ids of completed commands + * @nstatus - number of completed commands + * @status - status of the last command completed + * + * Complete the comamnds and call the scsi mid-layer callback hooks. + */ +static inline void +mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) +{ + mega_ext_passthru *epthru = NULL; + struct scatterlist *sgl; + Scsi_Cmnd *cmd = NULL; + mega_passthru *pthru = NULL; + mbox_t *mbox = NULL; + u8 c; + scb_t *scb; + int cmdid; + int i; + + /* + * for all the commands completed, call the mid-layer callback routine + * and free the scb. + */ + for( i = 0; i < nstatus; i++ ) { + + cmdid = completed[i]; + + if( cmdid == CMDID_INT_CMDS ) { /* internal command */ + scb = &adapter->int_scb; + cmd = scb->cmd; + mbox = (mbox_t *)scb->raw_mbox; + + /* + * Internal command interface do not fire the extended + * passthru or 64-bit passthru + */ + pthru = scb->pthru; + + } + else { + scb = &adapter->scb_list[cmdid]; + cmd = scb->cmd; + pthru = scb->pthru; + epthru = scb->epthru; + mbox = (mbox_t *)scb->raw_mbox; + + /* + * Make sure f/w has completed a valid command + */ + if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { + printk(KERN_CRIT + "megaraid: invalid command "); + printk("Id %d, scb->state:%x, scsi cmd:%p\n", + cmdid, scb->state, scb->cmd); + + continue; + } + + /* + * Was an abort issued for this command + */ + if( scb->state & SCB_ABORT ) { + + printk(KERN_NOTICE + "megaraid: aborted cmd %lx[%x] complete.\n", + scb->cmd->serial_number, scb->idx); + + cmd->result = (DID_ABORT << 16); + + mega_free_scb(adapter, scb); + + cmd->scsi_done(cmd); + + continue; + } + + /* + * Was a reset issued for this command + */ + if( scb->state & SCB_RESET ) { + + printk(KERN_WARNING + "megaraid: reset cmd %lx[%x] complete.\n", + scb->cmd->serial_number, scb->idx); + + scb->cmd->result = (DID_RESET << 16); + + mega_free_scb (adapter, scb); + + cmd->scsi_done(cmd); + + continue; + } + +#if MEGA_HAVE_STATS + { + + int islogical = adapter->logdrv_chan[cmd->channel]; + int logdrv = mbox->logdrv; + + /* + * Maintain an error counter for the logical drive. + * Some application like SNMP agent need such + * statistics + */ + if( status && islogical && (cmd->cmnd[0] == READ_6 || + cmd->cmnd[0] == READ_10 || + cmd->cmnd[0] == READ_12)) { + /* + * Logical drive number increases by 0x80 when + * a logical drive is deleted + */ + adapter->rd_errors[logdrv%0x80]++; + } + + if( status && islogical && (cmd->cmnd[0] == WRITE_6 || + cmd->cmnd[0] == WRITE_10 || + cmd->cmnd[0] == WRITE_12)) { + /* + * Logical drive number increases by 0x80 when + * a logical drive is deleted + */ + adapter->wr_errors[logdrv%0x80]++; + } + + } +#endif + } + + /* + * Do not return the presence of hard disk on the channel so, + * inquiry sent, and returned data==hard disk or removable + * hard disk and not logical, request should return failure! - + * PJ + */ + if(cmd->cmnd[0] == INQUIRY) { + int islogical = adapter->logdrv_chan[cmd->channel]; + + if(!islogical) { + if( cmd->use_sg ) { + sgl = (struct scatterlist *) + cmd->request_buffer; + c = *(u8 *)sgl[0].address; + } + else { + c = *(u8 *)cmd->request_buffer; + } + + if(IS_RAID_CH(adapter, cmd->channel) && + ((c & 0x1F ) == TYPE_DISK)) { + status = 0xF0; + } + } + } + + /* clear result; otherwise, success returns corrupt value */ + cmd->result = 0; + + /* Convert MegaRAID status to Linux error code */ + switch (status) { + case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ + cmd->result |= (DID_OK << 16); + break; + + case 0x02: /* ERROR_ABORTED, i.e. + SCSI_STATUS_CHECK_CONDITION */ + + /* set sense_buffer and result fields */ + if( mbox->cmd == MEGA_MBOXCMD_PASSTHRU || + mbox->cmd == MEGA_MBOXCMD_PASSTHRU64 ) { + + memcpy(cmd->sense_buffer, pthru->reqsensearea, + 14); + + cmd->result = (DRIVER_SENSE << 24) | + (DID_OK << 16) | + (CHECK_CONDITION << 1); + } + else { + if (mbox->cmd == MEGA_MBOXCMD_EXTPTHRU) { + + memcpy(cmd->sense_buffer, + epthru->reqsensearea, 14); + + cmd->result = (DRIVER_SENSE << 24) | + (DID_OK << 16) | + (CHECK_CONDITION << 1); + } else { + cmd->sense_buffer[0] = 0x70; + cmd->sense_buffer[2] = ABORTED_COMMAND; + cmd->result |= (CHECK_CONDITION << 1); + } + } + break; + + case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. + SCSI_STATUS_BUSY */ + cmd->result |= (DID_BUS_BUSY << 16) | status; + break; + + default: +#if MEGA_HAVE_CLUSTERING + /* + * If TEST_UNIT_READY fails, we know + * MEGA_RESERVATION_STATUS failed + */ + if( cmd->cmnd[0] == TEST_UNIT_READY ) { + cmd->result |= (DID_ERROR << 16) | + (RESERVATION_CONFLICT << 1); + } + else + /* + * Error code returned is 1 if Reserve or Release + * failed or the input parameter is invalid + */ + if( status == 1 && + (cmd->cmnd[0] == RESERVE || + cmd->cmnd[0] == RELEASE) ) { + + cmd->result |= (DID_ERROR << 16) | + (RESERVATION_CONFLICT << 1); + } + else +#endif + cmd->result |= (DID_BAD_TARGET << 16)|status; + } + + /* + * Only free SCBs for the commands coming down from the + * mid-layer, not for which were issued internally + * + * For internal command, restore the status returned by the + * firmware so that user can interpret it. + */ + if( cmdid == CMDID_INT_CMDS ) { /* internal command */ + cmd->result = status; + + /* + * Remove the internal command from the pending list + */ + list_del_init(&scb->list); + scb->state = SCB_FREE; + } + else { + mega_free_scb(adapter, scb); + } + + /* + * Call the mid-layer callback for this command + */ + cmd->scsi_done(cmd); + } +} + + +/* + * Free a SCB structure + * Note: We assume the scsi commands associated with this scb is not free yet. + */ +static void +mega_free_scb(adapter_t *adapter, scb_t *scb) +{ + switch( scb->dma_type ) { + + case MEGA_DMA_TYPE_NONE: + break; + + case MEGA_BULK_DATA: + pci_unmap_page(adapter->host->pci_dev, scb->dma_h_bulkdata, + scb->cmd->request_bufflen, scb->dma_direction); + + if( scb->dma_direction == PCI_DMA_FROMDEVICE ) { + pci_dma_sync_single(adapter->host->pci_dev, + scb->dma_h_bulkdata, + scb->cmd->request_bufflen, + PCI_DMA_FROMDEVICE); + } + + break; + + case MEGA_SGLIST: + pci_unmap_sg(adapter->host->pci_dev, scb->cmd->request_buffer, + scb->cmd->use_sg, scb->dma_direction); + + if( scb->dma_direction == PCI_DMA_FROMDEVICE ) { + pci_dma_sync_sg(adapter->host->pci_dev, + scb->cmd->request_buffer, + scb->cmd->use_sg, PCI_DMA_FROMDEVICE); + } + + break; + + default: + break; + } + + /* + * Remove from the pending list + */ + list_del_init(&scb->list); + + /* Link the scb back into free list */ + scb->state = SCB_FREE; + scb->cmd = NULL; + + list_add(&scb->list, &adapter->free_list); +} + + +/* + * Wait until the controller's mailbox is available + */ +static inline int +mega_busywait_mbox (adapter_t *adapter) +{ + if (adapter->mbox->busy) + return __mega_busywait_mbox(adapter); + return 0; +} + +static int +__mega_busywait_mbox (adapter_t *adapter) +{ + volatile mbox_t *mbox = adapter->mbox; + long counter; + + for (counter = 0; counter < 10000; counter++) { + if (!mbox->busy) + return 0; + udelay(100); yield(); + } + return -1; /* give up after 1 second */ +} + +/* + * Copies data to SGLIST + * Note: For 64 bit cards, we need a minimum of one SG element for read/write + */ +static int +mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) +{ + struct scatterlist *sgl; + struct page *page; + unsigned long offset; + Scsi_Cmnd *cmd; + int sgcnt; + int idx; + + cmd = scb->cmd; + + /* Scatter-gather not used */ + if( !cmd->use_sg ) { + + page = virt_to_page(cmd->request_buffer); + + offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK); + + scb->dma_h_bulkdata = pci_map_page(adapter->host->pci_dev, + page, offset, + cmd->request_bufflen, + scb->dma_direction); + scb->dma_type = MEGA_BULK_DATA; + + /* + * We need to handle special 64-bit commands that need a + * minimum of 1 SG + */ + if( adapter->has_64bit_addr ) { + scb->sgl64[0].address = scb->dma_h_bulkdata; + scb->sgl64[0].length = cmd->request_bufflen; + *buf = (u32)scb->sgl_dma_addr; + *len = (u32)cmd->request_bufflen; + return 1; + } + else { + *buf = (u32)scb->dma_h_bulkdata; + *len = (u32)cmd->request_bufflen; + } + + if( scb->dma_direction == PCI_DMA_TODEVICE ) { + pci_dma_sync_single(adapter->host->pci_dev, + scb->dma_h_bulkdata, + cmd->request_bufflen, + PCI_DMA_TODEVICE); + } + + return 0; + } + + sgl = (struct scatterlist *)cmd->request_buffer; + + /* + * Copy Scatter-Gather list info into controller structure. + * + * The number of sg elements returned must not exceed our limit + */ + sgcnt = pci_map_sg(adapter->host->pci_dev, sgl, cmd->use_sg, + scb->dma_direction); + + scb->dma_type = MEGA_SGLIST; + + if( sgcnt > adapter->sglen ) BUG(); + + for( idx = 0; idx < sgcnt; idx++, sgl++ ) { + + if( adapter->has_64bit_addr ) { + scb->sgl64[idx].address = sg_dma_address(sgl); + scb->sgl64[idx].length = sg_dma_len(sgl); + } + else { + scb->sgl[idx].address = sg_dma_address(sgl); + scb->sgl[idx].length = sg_dma_len(sgl); + } + } + + /* Reset pointer and length fields */ + *buf = scb->sgl_dma_addr; + + /* + * For passthru command, dataxferlen must be set, even for commands + * with a sg list + */ + *len = (u32)cmd->request_bufflen; + + if( scb->dma_direction == PCI_DMA_TODEVICE ) { + pci_dma_sync_sg(adapter->host->pci_dev, sgl, cmd->use_sg, + PCI_DMA_TODEVICE); + } + + /* Return count of SG requests */ + return sgcnt; +} + + +/* + * mega_8_to_40ld() + * + * takes all info in AdapterInquiry structure and puts it into ProductInfo and + * Enquiry3 structures for later use + */ +static void +mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, + mega_product_info *product_info) +{ + int i; + + product_info->max_commands = inquiry->adapter_info.max_commands; + enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; + product_info->nchannels = inquiry->adapter_info.nchannels; + + for (i = 0; i < 4; i++) { + product_info->fw_version[i] = + inquiry->adapter_info.fw_version[i]; + + product_info->bios_version[i] = + inquiry->adapter_info.bios_version[i]; + } + enquiry3->cache_flush_interval = + inquiry->adapter_info.cache_flush_interval; + + product_info->dram_size = inquiry->adapter_info.dram_size; + + enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; + + for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { + enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; + enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; + enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; + } + + for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) + enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; +} + + +/* + * Release the controller's resources + */ +static int +megaraid_release(struct Scsi_Host *host) +{ + adapter_t *adapter; + mbox_t *mbox; + u_char raw_mbox[16]; + char buf[12] = { 0 }; + + adapter = (adapter_t *)host->hostdata; + mbox = (mbox_t *)raw_mbox; + + printk(KERN_NOTICE "megaraid: being unloaded..."); + + /* Flush adapter cache */ + memset(mbox, 0, 16); + raw_mbox[0] = FLUSH_ADAPTER; + + irq_disable(adapter); + free_irq(adapter->host->irq, adapter); + + /* Issue a blocking (interrupts disabled) command to the card */ + issue_scb_block(adapter, raw_mbox); + + /* Flush disks cache */ + memset(mbox, 0, 16); + raw_mbox[0] = FLUSH_SYSTEM; + + /* Issue a blocking (interrupts disabled) command to the card */ + issue_scb_block(adapter, raw_mbox); + + + /* Free our resources */ + if( adapter->flag & BOARD_MEMMAP ) { + iounmap((void *)adapter->base); + release_mem_region(adapter->host->base, 128); + } + else { + release_region(adapter->base, 16); + } + + mega_free_sgl(adapter); + +#ifdef CONFIG_PROC_FS + if( adapter->controller_proc_dir_entry ) { + remove_proc_entry("stat", adapter->controller_proc_dir_entry); + remove_proc_entry("config", + adapter->controller_proc_dir_entry); + remove_proc_entry("mailbox", + adapter->controller_proc_dir_entry); +#if MEGA_HAVE_ENH_PROC + remove_proc_entry("rebuild-rate", + adapter->controller_proc_dir_entry); + remove_proc_entry("battery-status", + adapter->controller_proc_dir_entry); + + remove_proc_entry("diskdrives-ch0", + adapter->controller_proc_dir_entry); + remove_proc_entry("diskdrives-ch1", + adapter->controller_proc_dir_entry); + remove_proc_entry("diskdrives-ch2", + adapter->controller_proc_dir_entry); + remove_proc_entry("diskdrives-ch3", + adapter->controller_proc_dir_entry); + + remove_proc_entry("raiddrives-0-9", + adapter->controller_proc_dir_entry); + remove_proc_entry("raiddrives-10-19", + adapter->controller_proc_dir_entry); + remove_proc_entry("raiddrives-20-29", + adapter->controller_proc_dir_entry); + remove_proc_entry("raiddrives-30-39", + adapter->controller_proc_dir_entry); +#endif + + sprintf(buf, "hba%d", adapter->host->host_no); + remove_proc_entry(buf, mega_proc_dir_entry); + } +#endif + + pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE, + adapter->mega_buffer, adapter->buf_dma_handle); + kfree(adapter->scb_list); + pci_free_consistent(adapter->dev, sizeof(mbox64_t), + (void *)adapter->una_mbox64, adapter->una_mbox64_dma); + + kfree(adapter->ipdev); + + hba_count--; + + if( hba_count == 0 ) { + + /* + * Unregister the character device interface to the driver. + */ + unregister_chrdev(major, "megadev"); + + unregister_reboot_notifier(&mega_notifier); + +#ifdef CONFIG_PROC_FS + if( adapter->controller_proc_dir_entry ) { + remove_proc_entry ("megaraid", &proc_root); + } +#endif + + } + + /* + * Release the controller memory. A word of warning this frees + * hostdata and that includes adapter-> so be careful what you + * dereference beyond this point + */ + scsi_unregister(host); + + + printk("ok.\n"); + + return 0; +} + +static inline void +mega_free_sgl(adapter_t *adapter) +{ + scb_t *scb; + int i; + + for(i = 0; i < adapter->max_cmds; i++) { + + scb = &adapter->scb_list[i]; + + if( scb->sgl64 ) { + pci_free_consistent(adapter->dev, + sizeof(mega_sgl64) * adapter->sglen, + scb->sgl64, + scb->sgl_dma_addr); + + scb->sgl64 = NULL; + } + + if( scb->pthru ) { + pci_free_consistent(adapter->dev, sizeof(mega_passthru), + scb->pthru, scb->pthru_dma_addr); + + scb->pthru = NULL; + } + + if( scb->epthru ) { + pci_free_consistent(adapter->dev, + sizeof(mega_ext_passthru), + scb->epthru, scb->epthru_dma_addr); + + scb->epthru = NULL; + } + + } +} + + +/* + * Get information about the card/driver + */ +const char * +megaraid_info(struct Scsi_Host *host) +{ + static char buffer[512]; + adapter_t *adapter; + + adapter = (adapter_t *)host->hostdata; + + sprintf (buffer, + "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", + adapter->fw_version, adapter->product_info.max_commands, + adapter->host->max_id, adapter->host->max_channel, + adapter->host->max_lun); + return buffer; +} + +/* shouldn't be used, but included for completeness */ +static int +megaraid_command (Scsi_Cmnd *cmd) +{ + printk(KERN_WARNING + "megaraid critcal error: synchronous interface is not implemented.\n"); + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + + return 1; +} + + +/** + * megaraid_abort - abort the scsi command + * @scp - command to be aborted + * + * Abort a previous SCSI request. Only commands on the pending list can be + * aborted. All the commands issued to the F/W must complete. + */ +static int +megaraid_abort(Scsi_Cmnd *scp) +{ + adapter_t *adapter; + struct list_head *pos, *next; + scb_t *scb; + long iter; + int rval = SUCCESS; + + adapter = (adapter_t *)scp->host->hostdata; + + ASSERT( spin_is_locked(&adapter->lock) ); + + printk("megaraid: aborting-%ld cmd=%x \n", + scp->serial_number, scp->cmnd[0], scp->channel, scp->target, + scp->lun); + + + list_for_each_safe( pos, next, &adapter->pending_list ) { + + scb = list_entry(pos, scb_t, list); + + if( scb->cmd == scp ) { /* Found command */ + + scb->state |= SCB_ABORT; + + /* + * Check if this command was never issued. If this is + * the case, take it off from the pending list and + * complete. + */ + if( !(scb->state & SCB_ISSUED) ) { + + printk(KERN_WARNING + "megaraid: %ld:%d, driver owner.\n", + scp->serial_number, scb->idx); + + scp->result = (DID_ABORT << 16); + + mega_free_scb(adapter, scb); + + scp->scsi_done(scp); + + break; + } + } + } + + /* + * By this time, either all commands are completed or aborted by + * mid-layer. Do not return until all the commands are actually + * completed by the firmware + */ + iter = 0; + while( !list_empty(&adapter->pending_list) ) { + + /* + * Perform the ack sequence, since interrupts are not + * available right now! + */ + if( adapter->flag & BOARD_MEMMAP ) { + megaraid_memmbox_ack_sequence(adapter); + } + else { + megaraid_iombox_ack_sequence(adapter); + } + + /* + * print a message once every second only + */ + if( !(iter % 1000) ) { + printk( + "megarid: Waiting for %d commands to flush: iter:%ld\n", + atomic_read(&adapter->pend_cmds), iter); + } + + if( iter++ < MBOX_ABORT_SLEEP*1000 ) { + mdelay(1); + } + else { + printk(KERN_WARNING + "megaraid: critical hardware error!\n"); + + rval = FAILED; + + break; + } + } + + if( rval == SUCCESS ) { + printk(KERN_INFO + "megaraid: abort sequence successfully complete.\n"); + } + + return rval; +} + + +static int +megaraid_reset(Scsi_Cmnd *cmd) +{ + adapter_t *adapter; + megacmd_t mc; + long iter; + int rval = SUCCESS; + + adapter = (adapter_t *)cmd->host->hostdata; + + ASSERT( spin_is_locked(&adapter->lock) ); + + printk("megaraid: reset-%ld cmd=%x \n", + cmd->serial_number, cmd->cmnd[0], cmd->channel, cmd->target, + cmd->lun); + + +#if MEGA_HAVE_CLUSTERING + mc.cmd = MEGA_CLUSTER_CMD; + mc.opcode = MEGA_RESET_RESERVATIONS; + + spin_unlock_irq(&adapter->lock); + if( mega_internal_command(adapter, LOCK_INT, &mc, NULL) != 0 ) { + printk(KERN_WARNING + "megaraid: reservation reset failed.\n"); + } + else { + printk(KERN_INFO "megaraid: reservation reset.\n"); + } + spin_lock_irq(&adapter->lock); +#endif + + /* + * Do not return until all the commands are actually completed by the + * firmware + */ + iter = 0; + while( !list_empty(&adapter->pending_list) ) { + + /* + * Perform the ack sequence, since interrupts are not + * available right now! + */ + if( adapter->flag & BOARD_MEMMAP ) { + megaraid_memmbox_ack_sequence(adapter); + } + else { + megaraid_iombox_ack_sequence(adapter); + } + + /* + * print a message once every second only + */ + if( !(iter % 1000) ) { + printk( + "megarid: Waiting for %d commands to flush: iter:%ld\n", + atomic_read(&adapter->pend_cmds), iter); + } + + if( iter++ < MBOX_RESET_SLEEP*1000 ) { + mdelay(1); + } + else { + printk(KERN_WARNING + "megaraid: critical hardware error!\n"); + + rval = FAILED; + + break; + } + } + + if( rval == SUCCESS ) { + printk(KERN_INFO + "megaraid: reset sequence successfully complete.\n"); + } + + return rval; +} + + +#ifdef CONFIG_PROC_FS +/* Following code handles /proc fs */ + +#define CREATE_READ_PROC(string, func) create_proc_read_entry(string, \ + S_IRUSR | S_IFREG, \ + controller_proc_dir_entry, \ + func, adapter) + +/** + * mega_create_proc_entry() + * @index - index in soft state array + * @parent - parent node for this /proc entry + * + * Creates /proc entries for our controllers. + */ +static void +mega_create_proc_entry(int index, struct proc_dir_entry *parent) +{ + struct proc_dir_entry *controller_proc_dir_entry = NULL; + u8 string[64] = { 0 }; + adapter_t *adapter = hba_soft_state[index]; + + sprintf(string, "hba%d", adapter->host->host_no); + + controller_proc_dir_entry = + adapter->controller_proc_dir_entry = proc_mkdir(string, parent); + + if(!controller_proc_dir_entry) { + printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n"); + return; + } + adapter->proc_read = CREATE_READ_PROC("config", proc_read_config); + adapter->proc_stat = CREATE_READ_PROC("stat", proc_read_stat); + adapter->proc_mbox = CREATE_READ_PROC("mailbox", proc_read_mbox); +#if MEGA_HAVE_ENH_PROC + adapter->proc_rr = CREATE_READ_PROC("rebuild-rate", proc_rebuild_rate); + adapter->proc_battery = CREATE_READ_PROC("battery-status", + proc_battery); + + /* + * Display each physical drive on its channel + */ + adapter->proc_pdrvstat[0] = CREATE_READ_PROC("diskdrives-ch0", + proc_pdrv_ch0); + adapter->proc_pdrvstat[1] = CREATE_READ_PROC("diskdrives-ch1", + proc_pdrv_ch1); + adapter->proc_pdrvstat[2] = CREATE_READ_PROC("diskdrives-ch2", + proc_pdrv_ch2); + adapter->proc_pdrvstat[3] = CREATE_READ_PROC("diskdrives-ch3", + proc_pdrv_ch3); + + /* + * Display a set of up to 10 logical drive through each of following + * /proc entries + */ + adapter->proc_rdrvstat[0] = CREATE_READ_PROC("raiddrives-0-9", + proc_rdrv_10); + adapter->proc_rdrvstat[1] = CREATE_READ_PROC("raiddrives-10-19", + proc_rdrv_20); + adapter->proc_rdrvstat[2] = CREATE_READ_PROC("raiddrives-20-29", + proc_rdrv_30); + adapter->proc_rdrvstat[3] = CREATE_READ_PROC("raiddrives-30-39", + proc_rdrv_40); +#endif +} + + +/** + * proc_read_config() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display configuration information about the controller. + */ +static int +proc_read_config(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + + adapter_t *adapter = (adapter_t *)data; + int len = 0; + + len += sprintf(page+len, "%s", MEGARAID_VERSION); + + if(adapter->product_info.product_name[0]) + len += sprintf(page+len, "%s\n", + adapter->product_info.product_name); + + len += sprintf(page+len, "Controller Type: "); + + if( adapter->flag & BOARD_MEMMAP ) { + len += sprintf(page+len, + "438/466/467/471/493/518/520/531/532\n"); + } + else { + len += sprintf(page+len, + "418/428/434\n"); + } + + if(adapter->flag & BOARD_40LD) { + len += sprintf(page+len, + "Controller Supports 40 Logical Drives\n"); + } + + if(adapter->flag & BOARD_64BIT) { + len += sprintf(page+len, + "Controller capable of 64-bit memory addressing\n"); + } + if( adapter->has_64bit_addr ) { + len += sprintf(page+len, + "Controller using 64-bit memory addressing\n"); + } + else { + len += sprintf(page+len, + "Controller is not using 64-bit memory addressing\n"); + } + + len += sprintf(page+len, "Base = %08lx, Irq = %d, ", adapter->base, + adapter->host->irq); + + len += sprintf(page+len, "Logical Drives = %d, Channels = %d\n", + adapter->numldrv, adapter->product_info.nchannels); + + len += sprintf(page+len, "Version =%s:%s, DRAM = %dMb\n", + adapter->fw_version, adapter->bios_version, + adapter->product_info.dram_size); + + len += sprintf(page+len, + "Controller Queue Depth = %d, Driver Queue Depth = %d\n", + adapter->product_info.max_commands, adapter->max_cmds); + + len += sprintf(page+len, "support_ext_cdb = %d\n", + adapter->support_ext_cdb); + len += sprintf(page+len, "support_random_del = %d\n", + adapter->support_random_del); + len += sprintf(page+len, "boot_ldrv_enabled = %d\n", + adapter->boot_ldrv_enabled); + len += sprintf(page+len, "boot_ldrv = %d\n", + adapter->boot_ldrv); + len += sprintf(page+len, "boot_pdrv_enabled = %d\n", + adapter->boot_pdrv_enabled); + len += sprintf(page+len, "boot_pdrv_ch = %d\n", + adapter->boot_pdrv_ch); + len += sprintf(page+len, "boot_pdrv_tgt = %d\n", + adapter->boot_pdrv_tgt); + len += sprintf(page+len, "quiescent = %d\n", + atomic_read(&adapter->quiescent)); + len += sprintf(page+len, "has_cluster = %d\n", + adapter->has_cluster); + + len += sprintf(page+len, "\nModule Parameters:\n"); + len += sprintf(page+len, "max_cmd_per_lun = %d\n", + max_cmd_per_lun); + len += sprintf(page+len, "max_sectors_per_io = %d\n", + max_sectors_per_io); + + *eof = 1; + + return len; +} + + + +/** + * proc_read_stat() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Diaplay statistical information about the I/O activity. + */ +static int +proc_read_stat(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter; + int len; + int i; + + i = 0; /* avoid compilation warnings */ + len = 0; + adapter = (adapter_t *)data; + + len = sprintf(page, "Statistical Information for this controller\n"); + len += sprintf(page+len, "pend_cmds = %d\n", + atomic_read(&adapter->pend_cmds)); +#if MEGA_HAVE_STATS + for(i = 0; i < adapter->numldrv; i++) { + len += sprintf(page+len, "Logical Drive %d:\n", i); + + len += sprintf(page+len, + "\tReads Issued = %lu, Writes Issued = %lu\n", + adapter->nreads[i], adapter->nwrites[i]); + + len += sprintf(page+len, + "\tSectors Read = %lu, Sectors Written = %lu\n", + adapter->nreadblocks[i], adapter->nwriteblocks[i]); + + len += sprintf(page+len, + "\tRead errors = %lu, Write errors = %lu\n\n", + adapter->rd_errors[i], adapter->wr_errors[i]); + } +#else + len += sprintf(page+len, + "IO and error counters not compiled in driver.\n"); +#endif + + *eof = 1; + + return len; +} + + +/** + * proc_read_mbox() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display mailbox information for the last command issued. This information + * is good for debugging. + */ +static int +proc_read_mbox(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + + adapter_t *adapter = (adapter_t *)data; + volatile mbox_t *mbox = adapter->mbox; + int len = 0; + + len = sprintf(page, "Contents of Mail Box Structure\n"); + len += sprintf(page+len, " Fw Command = 0x%02x\n", mbox->cmd); + len += sprintf(page+len, " Cmd Sequence = 0x%02x\n", mbox->cmdid); + len += sprintf(page+len, " No of Sectors= %04d\n", mbox->numsectors); + len += sprintf(page+len, " LBA = 0x%02x\n", mbox->lba); + len += sprintf(page+len, " DTA = 0x%08x\n", mbox->xferaddr); + len += sprintf(page+len, " Logical Drive= 0x%02x\n", mbox->logdrv); + len += sprintf(page+len, " No of SG Elmt= 0x%02x\n", + mbox->numsgelements); + len += sprintf(page+len, " Busy = %01x\n", mbox->busy); + len += sprintf(page+len, " Status = 0x%02x\n", mbox->status); + + *eof = 1; + + return len; +} + + +/** + * proc_rebuild_rate() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display current rebuild rate + */ +static int +proc_rebuild_rate(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + dma_addr_t dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + int len = 0; + + pdev = adapter->ipdev; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { + *eof = 1; + return len; + } + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + + len = sprintf(page, "Adapter inquiry failed.\n"); + + printk(KERN_WARNING "megaraid: inquiry failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + *eof = 1; + + return len; + } + + if( adapter->flag & BOARD_40LD ) { + len = sprintf(page, "Rebuild Rate: [%d%%]\n", + ((mega_inquiry3 *)inquiry)->rebuild_rate); + } + else { + len = sprintf(page, "Rebuild Rate: [%d%%]\n", + ((mraid_ext_inquiry *) + inquiry)->raid_inq.adapter_info.rebuild_rate); + } + + + mega_free_inquiry(inquiry, dma_handle, pdev); + + *eof = 1; + + return len; +} + + +/** + * proc_battery() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the battery module on the controller. + */ +static int +proc_battery(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + dma_addr_t dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + u8 battery_status = 0; + char str[256]; + int len = 0; + + pdev = adapter->ipdev; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { + *eof = 1; + return len; + } + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + + len = sprintf(page, "Adapter inquiry failed.\n"); + + printk(KERN_WARNING "megaraid: inquiry failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + *eof = 1; + + return len; + } + + if( adapter->flag & BOARD_40LD ) { + battery_status = ((mega_inquiry3 *)inquiry)->battery_status; + } + else { + battery_status = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.adapter_info.battery_status; + } + + /* + * Decode the battery status + */ + sprintf(str, "Battery Status:[%d]", battery_status); + + if(battery_status == MEGA_BATT_CHARGE_DONE) + strcat(str, " Charge Done"); + + if(battery_status & MEGA_BATT_MODULE_MISSING) + strcat(str, " Module Missing"); + + if(battery_status & MEGA_BATT_LOW_VOLTAGE) + strcat(str, " Low Voltage"); + + if(battery_status & MEGA_BATT_TEMP_HIGH) + strcat(str, " Temperature High"); + + if(battery_status & MEGA_BATT_PACK_MISSING) + strcat(str, " Pack Missing"); + + if(battery_status & MEGA_BATT_CHARGE_INPROG) + strcat(str, " Charge In-progress"); + + if(battery_status & MEGA_BATT_CHARGE_FAIL) + strcat(str, " Charge Fail"); + + if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) + strcat(str, " Cycles Exceeded"); + + len = sprintf(page, "%s\n", str); + + + mega_free_inquiry(inquiry, dma_handle, pdev); + + *eof = 1; + + return len; +} + + +/** + * proc_pdrv_ch0() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the physical drives on physical channel 0. + */ +static int +proc_pdrv_ch0(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_pdrv(adapter, page, 0)); +} + + +/** + * proc_pdrv_ch1() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the physical drives on physical channel 1. + */ +static int +proc_pdrv_ch1(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_pdrv(adapter, page, 1)); +} + + +/** + * proc_pdrv_ch2() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the physical drives on physical channel 2. + */ +static int +proc_pdrv_ch2(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_pdrv(adapter, page, 2)); +} + + +/** + * proc_pdrv_ch3() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the physical drives on physical channel 3. + */ +static int +proc_pdrv_ch3(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_pdrv(adapter, page, 3)); +} + + +/** + * proc_pdrv() + * @page - buffer to write the data in + * @adapter - pointer to our soft state + * + * Display information about the physical drives. + */ +static int +proc_pdrv(adapter_t *adapter, char *page, int channel) +{ + dma_addr_t dma_handle; + char *scsi_inq; + dma_addr_t scsi_inq_dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + u8 *pdrv_state; + u8 state; + int tgt; + int max_channels; + int len = 0; + char str[80]; + int i; + + pdev = adapter->ipdev; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { + return len; + } + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + + len = sprintf(page, "Adapter inquiry failed.\n"); + + printk(KERN_WARNING "megaraid: inquiry failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; + } + + + scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle); + + if( scsi_inq == NULL ) { + len = sprintf(page, "memory not available for scsi inq.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; + } + + if( adapter->flag & BOARD_40LD ) { + pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; + } + else { + pdrv_state = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.pdrv_info.pdrv_state; + } + + max_channels = adapter->product_info.nchannels; + + if( channel >= max_channels ) return 0; + + for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { + + i = channel*16 + tgt; + + state = *(pdrv_state + i); + + switch( state & 0x0F ) { + + case PDRV_ONLINE: + sprintf(str, + "Channel:%2d Id:%2d State: Online", + channel, tgt); + break; + + case PDRV_FAILED: + sprintf(str, + "Channel:%2d Id:%2d State: Failed", + channel, tgt); + break; + + case PDRV_RBLD: + sprintf(str, + "Channel:%2d Id:%2d State: Rebuild", + channel, tgt); + break; + + case PDRV_HOTSPARE: + sprintf(str, + "Channel:%2d Id:%2d State: Hot spare", + channel, tgt); + break; + + default: + sprintf(str, + "Channel:%2d Id:%2d State: Un-configured", + channel, tgt); + break; + + } + + /* + * This interface displays inquiries for disk drives + * only. Inquries for logical drives and non-disk + * devices are available through /proc/scsi/scsi + */ + memset(scsi_inq, 0, 256); + if( mega_internal_dev_inquiry(adapter, channel, tgt, + scsi_inq_dma_handle) || + (scsi_inq[0] & 0x1F) != TYPE_DISK ) { + continue; + } + + /* + * Check for overflow. We print less than 240 + * characters for inquiry + */ + if( (len + 240) >= PAGE_SIZE ) break; + + len += sprintf(page+len, "%s.\n", str); + + len += mega_print_inquiry(page+len, scsi_inq); + } + + pci_free_consistent(pdev, 256, scsi_inq, scsi_inq_dma_handle); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; +} + + +/* + * Display scsi inquiry + */ +static int +mega_print_inquiry(char *page, char *scsi_inq) +{ + int len = 0; + int i; + + len = sprintf(page, " Vendor: "); + for( i = 8; i < 16; i++ ) { + len += sprintf(page+len, "%c", scsi_inq[i]); + } + + len += sprintf(page+len, " Model: "); + + for( i = 16; i < 32; i++ ) { + len += sprintf(page+len, "%c", scsi_inq[i]); + } + + len += sprintf(page+len, " Rev: "); + + for( i = 32; i < 36; i++ ) { + len += sprintf(page+len, "%c", scsi_inq[i]); + } + + len += sprintf(page+len, "\n"); + + i = scsi_inq[0] & 0x1f; + + len += sprintf(page+len, " Type: %s ", + i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : + "Unknown "); + + len += sprintf(page+len, + " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); + + if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) + len += sprintf(page+len, " CCS\n"); + else + len += sprintf(page+len, "\n"); + + return len; +} + + +/** + * proc_rdrv_10() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_rdrv_10(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_rdrv(adapter, page, 0, 9)); +} + + +/** + * proc_rdrv_20() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_rdrv_20(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_rdrv(adapter, page, 10, 19)); +} + + +/** + * proc_rdrv_30() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_rdrv_30(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_rdrv(adapter, page, 20, 29)); +} + + +/** + * proc_rdrv_40() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_rdrv_40(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_rdrv(adapter, page, 30, 39)); +} + + +/** + * proc_rdrv() + * @page - buffer to write the data in + * @adapter - pointer to our soft state + * @start - starting logical drive to display + * @end - ending logical drive to display + * + * We do not print the inquiry information since its already available through + * /proc/scsi/scsi interface + */ +static int +proc_rdrv(adapter_t *adapter, char *page, int start, int end ) +{ + dma_addr_t dma_handle; + logdrv_param *lparam; + megacmd_t mc; + char *disk_array; + dma_addr_t disk_array_dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + u8 *rdrv_state; + int num_ldrv; + u32 array_sz; + int len = 0; + int i; + + pdev = adapter->ipdev; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { + return len; + } + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + + len = sprintf(page, "Adapter inquiry failed.\n"); + + printk(KERN_WARNING "megaraid: inquiry failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; + } + + memset(&mc, 0, sizeof(megacmd_t)); + + if( adapter->flag & BOARD_40LD ) { + array_sz = sizeof(disk_array_40ld); + + rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; + + num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; + } + else { + array_sz = sizeof(disk_array_8ld); + + rdrv_state = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.logdrv_info.ldrv_state; + + num_ldrv = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.logdrv_info.num_ldrv; + } + + disk_array = pci_alloc_consistent(pdev, array_sz, + &disk_array_dma_handle); + + if( disk_array == NULL ) { + len = sprintf(page, "memory not available.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; + } + + mc.xferaddr = (u32)disk_array_dma_handle; + + if( adapter->flag & BOARD_40LD ) { + mc.cmd = FC_NEW_CONFIG; + mc.opcode = OP_DCMD_READ_CONFIG; + + if( mega_internal_command(adapter, LOCK_INT, &mc, NULL) ) { + + len = sprintf(page, "40LD read config failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + pci_free_consistent(pdev, array_sz, disk_array, + disk_array_dma_handle); + + return len; + } + + } + else { + mc.cmd = NEW_READ_CONFIG_8LD; + + if( mega_internal_command(adapter, LOCK_INT, &mc, NULL) ) { + + mc.cmd = READ_CONFIG_8LD; + + if( mega_internal_command(adapter, LOCK_INT, &mc, + NULL) ){ + + len = sprintf(page, + "8LD read config failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + pci_free_consistent(pdev, array_sz, + disk_array, + disk_array_dma_handle); + + return len; + } + } + } + + for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { + + if( adapter->flag & BOARD_40LD ) { + lparam = + &((disk_array_40ld *)disk_array)->ldrv[i].lparam; + } + else { + lparam = + &((disk_array_8ld *)disk_array)->ldrv[i].lparam; + } + + /* + * Check for overflow. We print less than 240 characters for + * information about each logical drive. + */ + if( (len + 240) >= PAGE_SIZE ) break; + + len += sprintf(page+len, "Logical drive:%2d:, ", i); + + switch( rdrv_state[i] & 0x0F ) { + case RDRV_OFFLINE: + len += sprintf(page+len, "state: offline"); + break; + + case RDRV_DEGRADED: + len += sprintf(page+len, "state: degraded"); + break; + + case RDRV_OPTIMAL: + len += sprintf(page+len, "state: optimal"); + break; + + case RDRV_DELETED: + len += sprintf(page+len, "state: deleted"); + break; + + default: + len += sprintf(page+len, "state: unknown"); + break; + } + + /* + * Check if check consistency or initialization is going on + * for this logical drive. + */ + if( (rdrv_state[i] & 0xF0) == 0x20 ) { + len += sprintf(page+len, + ", check-consistency in progress"); + } + else if( (rdrv_state[i] & 0xF0) == 0x10 ) { + len += sprintf(page+len, + ", initialization in progress"); + } + + len += sprintf(page+len, "\n"); + + len += sprintf(page+len, "Span depth:%3d, ", + lparam->span_depth); + + len += sprintf(page+len, "RAID level:%3d, ", + lparam->level); + + len += sprintf(page+len, "Stripe size:%3d, ", + lparam->stripe_sz ? lparam->stripe_sz/2: 128); + + len += sprintf(page+len, "Row size:%3d\n", + lparam->row_size); + + + len += sprintf(page+len, "Read Policy: "); + + switch(lparam->read_ahead) { + + case NO_READ_AHEAD: + len += sprintf(page+len, "No read ahead, "); + break; + + case READ_AHEAD: + len += sprintf(page+len, "Read ahead, "); + break; + + case ADAP_READ_AHEAD: + len += sprintf(page+len, "Adaptive, "); + break; + + } + + len += sprintf(page+len, "Write Policy: "); + + switch(lparam->write_mode) { + + case WRMODE_WRITE_THRU: + len += sprintf(page+len, "Write thru, "); + break; + + case WRMODE_WRITE_BACK: + len += sprintf(page+len, "Write back, "); + break; + } + + len += sprintf(page+len, "Cache Policy: "); + + switch(lparam->direct_io) { + + case CACHED_IO: + len += sprintf(page+len, "Cached IO\n\n"); + break; + + case DIRECT_IO: + len += sprintf(page+len, "Direct IO\n\n"); + break; + } + } + + mega_free_inquiry(inquiry, dma_handle, pdev); + + pci_free_consistent(pdev, array_sz, disk_array, + disk_array_dma_handle); + + return len; +} + +#endif + + +/** + * megaraid_biosparam() + * @disk + * @dev + * @geom + * + * Return the disk geometry for a particular disk + * Input: + * Disk *disk - Disk geometry + * kdev_t dev - Device node + * int *geom - Returns geometry fields + * geom[0] = heads + * geom[1] = sectors + * geom[2] = cylinders + */ +static int +megaraid_biosparam(Disk *disk, kdev_t dev, int *geom) +{ + int heads, sectors, cylinders; + adapter_t *adapter; + + /* Get pointer to host config structure */ + adapter = (adapter_t *)disk->device->host->hostdata; + + if (IS_RAID_CH(adapter, disk->device->channel)) { + /* Default heads (64) & sectors (32) */ + heads = 64; + sectors = 32; + cylinders = disk->capacity / (heads * sectors); + + /* + * Handle extended translation size for logical drives + * > 1Gb + */ + if (disk->capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = disk->capacity / (heads * sectors); + } + + /* return result */ + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + } + else { + if( !mega_partsize(disk, dev, geom) ) + return 0; + + printk(KERN_WARNING + "megaraid: invalid partition on this disk on channel %d\n", + disk->device->channel); + + /* Default heads (64) & sectors (32) */ + heads = 64; + sectors = 32; + cylinders = disk->capacity / (heads * sectors); + + /* Handle extended translation size for logical drives > 1Gb */ + if (disk->capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = disk->capacity / (heads * sectors); + } + + /* return result */ + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + } + + return 0; +} + +/* + * mega_partsize() + * @disk + * @geom + * + * Purpose : to determine the BIOS mapping used to create the partition + * table, storing the results (cyls, hds, and secs) in geom + * + * Note: Code is picked from scsicam.h + * + * Returns : -1 on failure, 0 on success. + */ +static int +mega_partsize(Disk *disk, kdev_t dev, int *geom) +{ + struct buffer_head *bh; + struct partition *p, *largest = NULL; + int i, largest_cyl; + int heads, cyls, sectors; + int capacity = disk->capacity; + + int ma = MAJOR(dev); + int mi = (MINOR(dev) & ~0xf); + + int block = 1024; + + if (blksize_size[ma]) + block = blksize_size[ma][mi]; + + if (!(bh = bread(MKDEV(ma,mi), 0, block))) + return -1; + + if (*(unsigned short *)(bh->b_data + 510) == 0xAA55 ) { + + for (largest_cyl = -1, + p = (struct partition *)(0x1BE + bh->b_data), i = 0; + i < 4; ++i, ++p) { + + if (!p->sys_ind) continue; + + cyls = p->end_cyl + ((p->end_sector & 0xc0) << 2); + + if (cyls >= largest_cyl) { + largest_cyl = cyls; + largest = p; + } + } + } + + if (largest) { + heads = largest->end_head + 1; + sectors = largest->end_sector & 0x3f; + + if (!heads || !sectors) { + brelse(bh); + return -1; + } + + cyls = capacity/(heads * sectors); + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cyls; + + brelse(bh); + return 0; + } + + brelse(bh); + return -1; +} + + +/** + * megaraid_reboot_notify() + * @this - unused + * @code - shutdown code + * @unused - unused + * + * This routine will be called when the use has done a forced shutdown on the + * system. Flush the Adapter and disks cache. + */ +static int +megaraid_reboot_notify (struct notifier_block *this, unsigned long code, + void *unused) +{ + adapter_t *adapter; + struct Scsi_Host *host; + u8 raw_mbox[16]; + mbox_t *mbox; + int i; + + /* + * Flush the controller's cache irrespective of the codes coming down. + * SYS_DOWN, SYS_HALT, SYS_RESTART, SYS_POWER_OFF + */ + for( i = 0; i < hba_count; i++ ) { + printk(KERN_INFO "megaraid: flushing adapter %d..", i); + host = hba_soft_state[i]->host; + + adapter = (adapter_t *)host->hostdata; + mbox = (mbox_t *)raw_mbox; + + /* Flush adapter cache */ + memset(mbox, 0, 16); + raw_mbox[0] = FLUSH_ADAPTER; + + irq_disable(adapter); + free_irq(adapter->host->irq, adapter); + + /* + * Issue a blocking (interrupts disabled) command to + * the card + */ + issue_scb_block(adapter, raw_mbox); + + /* Flush disks cache */ + memset(mbox, 0, 16); + raw_mbox[0] = FLUSH_SYSTEM; + + issue_scb_block(adapter, raw_mbox); + + printk("Done.\n"); + + if( atomic_read(&adapter->pend_cmds) > 0 ) { + printk(KERN_WARNING "megaraid: pending commands!!\n"); + } + } + + /* + * Have a delibrate delay to make sure all the caches are + * actually flushed. + */ + printk(KERN_INFO "megaraid: cache flush delay: "); + for( i = 9; i >= 0; i-- ) { + printk("\b\b\b[%d]", i); + mdelay(1000); + } + printk("\b\b\b[done]\n"); + mdelay(1000); + + return NOTIFY_DONE; +} + +/** + * mega_init_scb() + * @adapter - pointer to our soft state + * + * Allocate memory for the various pointers in the scb structures: + * scatter-gather list pointer, passthru and extended passthru structure + * pointers. + */ +static int +mega_init_scb(adapter_t *adapter) +{ + scb_t *scb; + int i; + + for( i = 0; i < adapter->max_cmds; i++ ) { + + scb = &adapter->scb_list[i]; + + scb->sgl64 = NULL; + scb->sgl = NULL; + scb->pthru = NULL; + scb->epthru = NULL; + } + + for( i = 0; i < adapter->max_cmds; i++ ) { + + scb = &adapter->scb_list[i]; + + scb->idx = i; + + scb->sgl64 = pci_alloc_consistent(adapter->dev, + sizeof(mega_sgl64) * adapter->sglen, + &scb->sgl_dma_addr); + + scb->sgl = (mega_sglist *)scb->sgl64; + + if( !scb->sgl ) { + printk(KERN_WARNING "RAID: Can't allocate sglist.\n"); + mega_free_sgl(adapter); + return -1; + } + + scb->pthru = pci_alloc_consistent(adapter->dev, + sizeof(mega_passthru), + &scb->pthru_dma_addr); + + if( !scb->pthru ) { + printk(KERN_WARNING "RAID: Can't allocate passthru.\n"); + mega_free_sgl(adapter); + return -1; + } + + scb->epthru = pci_alloc_consistent(adapter->dev, + sizeof(mega_ext_passthru), + &scb->epthru_dma_addr); + + if( !scb->epthru ) { + printk(KERN_WARNING + "Can't allocate extended passthru.\n"); + mega_free_sgl(adapter); + return -1; + } + + + scb->dma_type = MEGA_DMA_TYPE_NONE; + + /* + * Link to free list + * lock not required since we are loading the driver, so no + * commands possible right now. + */ + scb->state = SCB_FREE; + scb->cmd = NULL; + list_add(&scb->list, &adapter->free_list); + } + + return 0; +} + + +/** + * megadev_open() + * @inode - unused + * @filep - unused + * + * Routines for the character/ioctl interface to the driver. Find out if this + * is a valid open. If yes, increment the module use count so that it cannot + * be unloaded. + */ +static int +megadev_open (struct inode *inode, struct file *filep) +{ + /* + * Only allow superuser to access private ioctl interface + */ + if( !capable(CAP_SYS_ADMIN) ) return -EACCES; + + MOD_INC_USE_COUNT; + return 0; +} + + +/** + * megadev_ioctl() + * @inode - Our device inode + * @filep - unused + * @cmd - ioctl command + * @arg - user buffer + * + * ioctl entry point for our private ioctl interface. We move the data in from + * the user space, prepare the command (if necessary, convert the old MIMD + * ioctl to new ioctl command), and issue a synchronous command to the + * controller. + */ +static int +megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, + unsigned long arg) +{ + adapter_t *adapter; + nitioctl_t uioc; + int adapno; + int rval; + mega_passthru *upthru; /* user address for passthru */ + mega_passthru *pthru; /* copy user passthru here */ + dma_addr_t pthru_dma_hndl; + void *data = NULL; /* data to be transferred */ + dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ + megacmd_t mc; + megastat_t *ustats; + int num_ldrv; + u32 uxferaddr = 0; + struct pci_dev *pdev; + + ustats = NULL; /* avoid compilation warnings */ + num_ldrv = 0; + + /* + * Make sure only USCSICMD are issued through this interface. + * MIMD application would still fire different command. + */ + if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { + return -EINVAL; + } + + /* + * Check and convert a possible MIMD command to NIT command. + * mega_m_to_n() copies the data from the user space, so we do not + * have to do it here. + * NOTE: We will need some user address to copyout the data, therefore + * the inteface layer will also provide us with the required user + * addresses. + */ + memset(&uioc, 0, sizeof(nitioctl_t)); + if( (rval = mega_m_to_n( (void *)arg, &uioc)) != 0 ) + return rval; + + + switch( uioc.opcode ) { + + case GET_DRIVER_VER: + if( put_user(driver_ver, (u32 *)uioc.uioc_uaddr) ) + return (-EFAULT); + + break; + + case GET_N_ADAP: + if( put_user(hba_count, (u32 *)uioc.uioc_uaddr) ) + return (-EFAULT); + + /* + * Shucks. MIMD interface returns a positive value for number + * of adapters. TODO: Change it to return 0 when there is no + * applicatio using mimd interface. + */ + return hba_count; + + case GET_ADAP_INFO: + + /* + * Which adapter + */ + if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) + return (-ENODEV); + + if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, + sizeof(struct mcontroller)) ) + return (-EFAULT); + break; + +#if MEGA_HAVE_STATS + + case GET_STATS: + /* + * Which adapter + */ + if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) + return (-ENODEV); + + adapter = hba_soft_state[adapno]; + + ustats = (megastat_t *)uioc.uioc_uaddr; + + if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) + return (-EFAULT); + + /* + * Check for the validity of the logical drive number + */ + if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; + + if( copy_to_user(ustats->nreads, adapter->nreads, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->nwrites, adapter->nwrites, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->rd_errors, adapter->rd_errors, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->wr_errors, adapter->wr_errors, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + return 0; + +#endif + case MBOX_CMD: + + /* + * Which adapter + */ + if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) + return (-ENODEV); + + adapter = hba_soft_state[adapno]; + + /* + * Deletion of logical drive is a special case. The adapter + * should be quiescent before this command is issued. + */ + if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && + uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { + + /* + * Do we support this feature + */ + if( !adapter->support_random_del ) { + printk(KERN_WARNING "megaraid: logdrv "); + printk("delete on non-supporting F/W.\n"); + + return (-EINVAL); + } + + rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); + + if( rval == 0 ) { + memset(&mc, 0, sizeof(megacmd_t)); + + mc.status = rval; + + rval = mega_n_to_m((void *)arg, &mc); + } + + return rval; + } + /* + * This interface only support the regular passthru commands. + * Reject extended passthru and 64-bit passthru + */ + if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || + uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { + + printk(KERN_WARNING "megaraid: rejected passthru.\n"); + + return (-EINVAL); + } + + /* + * For all internal commands, the buffer must be allocated in + * <4GB address range + */ + pdev = adapter->ipdev; + + /* Is it a passthru command or a DCMD */ + if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { + /* Passthru commands */ + + pthru = pci_alloc_consistent(pdev, + sizeof(mega_passthru), + &pthru_dma_hndl); + + if( pthru == NULL ) { + return (-ENOMEM); + } + + /* + * The user passthru structure + */ + upthru = (mega_passthru *)MBOX(uioc)->xferaddr; + + /* + * Copy in the user passthru here. + */ + if( copy_from_user(pthru, (char *)upthru, + sizeof(mega_passthru)) ) { + + pci_free_consistent(pdev, + sizeof(mega_passthru), pthru, + pthru_dma_hndl); + + return (-EFAULT); + } + + /* + * Is there a data transfer + */ + if( pthru->dataxferlen ) { + data = pci_alloc_consistent(pdev, + pthru->dataxferlen, + &data_dma_hndl); + + if( data == NULL ) { + pci_free_consistent(pdev, + sizeof(mega_passthru), + pthru, + pthru_dma_hndl); + + return (-ENOMEM); + } + + /* + * Save the user address and point the kernel + * address at just allocated memory + */ + uxferaddr = pthru->dataxferaddr; + pthru->dataxferaddr = data_dma_hndl; + } + + + /* + * Is data coming down-stream + */ + if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { + /* + * Get the user data + */ + if( copy_from_user(data, (char *)uxferaddr, + pthru->dataxferlen) ) { + rval = (-EFAULT); + goto freemem_and_return; + } + } + + memset(&mc, 0, sizeof(megacmd_t)); + + mc.cmd = MEGA_MBOXCMD_PASSTHRU; + mc.xferaddr = (u32)pthru_dma_hndl; + + /* + * Issue the command + */ + mega_internal_command(adapter, LOCK_INT, &mc, pthru); + + rval = mega_n_to_m((void *)arg, &mc); + + if( rval ) goto freemem_and_return; + + + /* + * Is data going up-stream + */ + if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { + if( copy_to_user((char *)uxferaddr, data, + pthru->dataxferlen) ) { + rval = (-EFAULT); + } + } + + /* + * Send the request sense data also, irrespective of + * whether the user has asked for it or not. + */ + copy_to_user(upthru->reqsensearea, + pthru->reqsensearea, 14); + +freemem_and_return: + if( pthru->dataxferlen ) { + pci_free_consistent(pdev, + pthru->dataxferlen, data, + data_dma_hndl); + } + + pci_free_consistent(pdev, sizeof(mega_passthru), + pthru, pthru_dma_hndl); + + return rval; + } + else { + /* DCMD commands */ + + /* + * Is there a data transfer + */ + if( uioc.xferlen ) { + data = pci_alloc_consistent(pdev, + uioc.xferlen, &data_dma_hndl); + + if( data == NULL ) { + return (-ENOMEM); + } + + uxferaddr = MBOX(uioc)->xferaddr; + } + + /* + * Is data coming down-stream + */ + if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { + /* + * Get the user data + */ + if( copy_from_user(data, (char *)uxferaddr, + uioc.xferlen) ) { + + pci_free_consistent(pdev, + uioc.xferlen, + data, data_dma_hndl); + + return (-EFAULT); + } + } + + memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); + + mc.xferaddr = (u32)data_dma_hndl; + + /* + * Issue the command + */ + mega_internal_command(adapter, LOCK_INT, &mc, NULL); + + rval = mega_n_to_m((void *)arg, &mc); + + if( rval ) { + if( uioc.xferlen ) { + pci_free_consistent(pdev, + uioc.xferlen, data, + data_dma_hndl); + } + + return rval; + } + + /* + * Is data going up-stream + */ + if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { + if( copy_to_user((char *)uxferaddr, data, + uioc.xferlen) ) { + + rval = (-EFAULT); + } + } + + if( uioc.xferlen ) { + pci_free_consistent(pdev, + uioc.xferlen, data, + data_dma_hndl); + } + + return rval; + } + + default: + return (-EINVAL); + } + + return 0; +} + +/** + * mega_m_to_n() + * @arg - user address + * @uioc - new ioctl structure + * + * A thin layer to convert older mimd interface ioctl structure to NIT ioctl + * structure + * + * Converts the older mimd ioctl structure to newer NIT structure + */ +static int +mega_m_to_n(void *arg, nitioctl_t *uioc) +{ + struct uioctl_t uioc_mimd; + char signature[8] = {0}; + u8 opcode; + u8 subopcode; + + + /* + * check is the application conforms to NIT. We do not have to do much + * in that case. + * We exploit the fact that the signature is stored in the very + * begining of the structure. + */ + + if( copy_from_user(signature, (char *)arg, 7) ) + return (-EFAULT); + + if( memcmp(signature, "MEGANIT", 7) == 0 ) { + + /* + * NOTE NOTE: The nit ioctl is still under flux because of + * change of mailbox definition, in HPE. No applications yet + * use this interface and let's not have applications use this + * interface till the new specifitions are in place. + */ + return -EINVAL; +#if 0 + if( copy_from_user(uioc, (char *)arg, sizeof(nitioctl_t)) ) + return (-EFAULT); + return 0; +#endif + } + + /* + * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t + * + * Get the user ioctl structure + */ + if( copy_from_user(&uioc_mimd, (char *)arg, sizeof(struct uioctl_t)) ) + return (-EFAULT); + + + /* + * Get the opcode and subopcode for the commands + */ + opcode = uioc_mimd.ui.fcs.opcode; + subopcode = uioc_mimd.ui.fcs.subopcode; + + switch (opcode) { + case 0x82: + + switch (subopcode) { + + case MEGAIOC_QDRVRVER: /* Query driver version */ + uioc->opcode = GET_DRIVER_VER; + uioc->uioc_uaddr = uioc_mimd.data; + break; + + case MEGAIOC_QNADAP: /* Get # of adapters */ + uioc->opcode = GET_N_ADAP; + uioc->uioc_uaddr = uioc_mimd.data; + break; + + case MEGAIOC_QADAPINFO: /* Get adapter information */ + uioc->opcode = GET_ADAP_INFO; + uioc->adapno = uioc_mimd.ui.fcs.adapno; + uioc->uioc_uaddr = uioc_mimd.data; + break; + + default: + return(-EINVAL); + } + + break; + + + case 0x81: + + uioc->opcode = MBOX_CMD; + uioc->adapno = uioc_mimd.ui.fcs.adapno; + + memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); + + uioc->xferlen = uioc_mimd.ui.fcs.length; + + if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; + if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; + + break; + + case 0x80: + + uioc->opcode = MBOX_CMD; + uioc->adapno = uioc_mimd.ui.fcs.adapno; + + memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); + + /* + * Choose the xferlen bigger of input and output data + */ + uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? + uioc_mimd.outlen : uioc_mimd.inlen; + + if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; + if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; + + break; + + default: + return (-EINVAL); + + } + + return 0; +} + +/* + * mega_n_to_m() + * @arg - user address + * @mc - mailbox command + * + * Updates the status information to the application, depending on application + * conforms to older mimd ioctl interface or newer NIT ioctl interface + */ +static int +mega_n_to_m(void *arg, megacmd_t *mc) +{ + nitioctl_t *uiocp; + megacmd_t *umc; + mega_passthru *upthru; + struct uioctl_t *uioc_mimd; + char signature[8] = {0}; + + /* + * check is the application conforms to NIT. + */ + if( copy_from_user(signature, (char *)arg, 7) ) + return -EFAULT; + + if( memcmp(signature, "MEGANIT", 7) == 0 ) { + + uiocp = (nitioctl_t *)arg; + + if( put_user(mc->status, (u8 *)&MBOX_P(uiocp)->status) ) + return (-EFAULT); + + if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { + + umc = MBOX_P(uiocp); + + upthru = (mega_passthru *)umc->xferaddr; + + if( put_user(mc->status, (u8 *)&upthru->scsistatus) ) + return (-EFAULT); + } + } + else { + uioc_mimd = (struct uioctl_t *)arg; + + if( put_user(mc->status, (u8 *)&uioc_mimd->mbox[17]) ) + return (-EFAULT); + + if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { + + umc = (megacmd_t *)uioc_mimd->mbox; + + upthru = (mega_passthru *)umc->xferaddr; + + if( put_user(mc->status, (u8 *)&upthru->scsistatus) ) + return (-EFAULT); + } + } + + return 0; +} + + +static int +megadev_close (struct inode *inode, struct file *filep) +{ + MOD_DEC_USE_COUNT; + return 0; +} + + +/* + * MEGARAID 'FW' commands. + */ + +/** + * mega_is_bios_enabled() + * @adapter - pointer to our soft state + * + * issue command to find out if the BIOS is enabled for this controller + */ +static int +mega_is_bios_enabled(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + int ret; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof(mbox)); + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + raw_mbox[0] = IS_BIOS_ENABLED; + raw_mbox[2] = GET_BIOS; + + + ret = issue_scb_block(adapter, raw_mbox); + + return *(char *)adapter->mega_buffer; +} + + +/** + * mega_enum_raid_scsi() + * @adapter - pointer to our soft state + * + * Find out what channels are RAID/SCSI. This information is used to + * differentiate the virtual channels and physical channels and to support + * ROMB feature and non-disk devices. + */ +static void +mega_enum_raid_scsi(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + int i; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof(mbox)); + + /* + * issue command to find out what channels are raid/scsi + */ + raw_mbox[0] = CHNL_CLASS; + raw_mbox[2] = GET_CHNL_CLASS; + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + /* + * Non-ROMB firware fail this command, so all channels + * must be shown RAID + */ + adapter->mega_ch_class = 0xFF; + + if(!issue_scb_block(adapter, raw_mbox)) { + adapter->mega_ch_class = *((char *)adapter->mega_buffer); + + } + + for( i = 0; i < adapter->product_info.nchannels; i++ ) { + if( (adapter->mega_ch_class >> i) & 0x01 ) { + printk(KERN_INFO "megaraid: channel[%d] is raid.\n", + i); + } + else { + printk(KERN_INFO "megaraid: channel[%d] is scsi.\n", + i); + } + } + + return; +} + + +/** + * mega_get_boot_drv() + * @adapter - pointer to our soft state + * + * Find out which device is the boot device. Note, any logical drive or any + * phyical device (e.g., a CDROM) can be designated as a boot device. + */ +static void +mega_get_boot_drv(adapter_t *adapter) +{ + struct private_bios_data *prv_bios_data; + unsigned char raw_mbox[16]; + mbox_t *mbox; + u16 cksum = 0; + u8 *cksum_p; + u8 boot_pdrv; + int i; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof(raw_mbox)); + + raw_mbox[0] = BIOS_PVT_DATA; + raw_mbox[2] = GET_BIOS_PVT_DATA; + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + adapter->boot_ldrv_enabled = 0; + adapter->boot_ldrv = 0; + + adapter->boot_pdrv_enabled = 0; + adapter->boot_pdrv_ch = 0; + adapter->boot_pdrv_tgt = 0; + + if(issue_scb_block(adapter, raw_mbox) == 0) { + prv_bios_data = + (struct private_bios_data *)adapter->mega_buffer; + + cksum = 0; + cksum_p = (char *)prv_bios_data; + for (i = 0; i < 14; i++ ) { + cksum += (u16)(*cksum_p++); + } + + if (prv_bios_data->cksum == (u16)(0-cksum) ) { + + /* + * If MSB is set, a physical drive is set as boot + * device + */ + if( prv_bios_data->boot_drv & 0x80 ) { + adapter->boot_pdrv_enabled = 1; + boot_pdrv = prv_bios_data->boot_drv & 0x7F; + adapter->boot_pdrv_ch = boot_pdrv / 16; + adapter->boot_pdrv_tgt = boot_pdrv % 16; + } + else { + adapter->boot_ldrv_enabled = 1; + adapter->boot_ldrv = prv_bios_data->boot_drv; + } + } + } + +} + +/** + * mega_support_random_del() + * @adapter - pointer to our soft state + * + * Find out if this controller supports random deletion and addition of + * logical drives + */ +static int +mega_support_random_del(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + int rval; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof(mbox)); + + /* + * issue command + */ + raw_mbox[0] = FC_DEL_LOGDRV; + raw_mbox[2] = OP_SUP_DEL_LOGDRV; + + rval = issue_scb_block(adapter, raw_mbox); + + return !rval; +} + + +/** + * mega_support_ext_cdb() + * @adapter - pointer to our soft state + * + * Find out if this firmware support cdblen > 10 + */ +static int +mega_support_ext_cdb(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + int rval; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof (mbox)); + /* + * issue command to find out if controller supports extended CDBs. + */ + raw_mbox[0] = 0xA4; + raw_mbox[2] = 0x16; + + rval = issue_scb_block(adapter, raw_mbox); + + return !rval; +} + + +/** + * mega_del_logdrv() + * @adapter - pointer to our soft state + * @logdrv - logical drive to be deleted + * + * Delete the specified logical drive. It is the responsibility of the user + * app to let the OS know about this operation. + */ +static int +mega_del_logdrv(adapter_t *adapter, int logdrv) +{ + DECLARE_WAIT_QUEUE_HEAD(wq); + unsigned long flags; + scb_t *scb; + int rval; + + ASSERT( !spin_is_locked(&adapter->lock) ); + + /* + * Stop sending commands to the controller, queue them internally. + * When deletion is complete, ISR will flush the queue. + */ + atomic_set(&adapter->quiescent, 1); + + /* + * Wait till all the issued commands are complete and there are no + * commands in the pending queue + */ + while( atomic_read(&adapter->pend_cmds) > 0 ) { + + sleep_on_timeout( &wq, 1*HZ ); /* sleep for 1s */ + } + + rval = mega_do_del_logdrv(adapter, logdrv); + + + spin_lock_irqsave(&adapter->lock, flags); + + /* + * If delete operation was successful, add 0x80 to the logical drive + * ids for commands in the pending queue. + */ + if (adapter->read_ldidmap) { + struct list_head *pos; + list_for_each(pos, &adapter->pending_list) { + scb = list_entry(pos, scb_t, list); + if (((mbox_t *)scb->raw_mbox)->logdrv < 0x80 ) + ((mbox_t *)scb->raw_mbox)->logdrv += 0x80 ; + } + } + + atomic_set(&adapter->quiescent, 0); + + mega_runpendq(adapter); + + spin_unlock_irqrestore(&adapter->lock, flags); + + return rval; +} + + +static int +mega_do_del_logdrv(adapter_t *adapter, int logdrv) +{ + int rval; + u8 raw_mbox[16]; + + raw_mbox[0] = FC_DEL_LOGDRV; + raw_mbox[2] = OP_DEL_LOGDRV; + raw_mbox[3] = logdrv; + + /* Issue a blocking command to the card */ + rval = issue_scb_block(adapter, raw_mbox); + + /* log this event */ + if(rval) { + printk(KERN_WARNING "megaraid: Delete LD-%d failed.", logdrv); + return rval; + } + + /* + * After deleting first logical drive, the logical drives must be + * addressed by adding 0x80 to the logical drive id. + */ + adapter->read_ldidmap = 1; + + return rval; +} + + +/** + * mega_get_max_sgl() + * @adapter - pointer to our soft state + * + * Find out the maximum number of scatter-gather elements supported by this + * version of the firmware + */ +static void +mega_get_max_sgl(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof(raw_mbox)); + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + raw_mbox[0] = MAIN_MISC_OPCODE; + raw_mbox[2] = GET_MAX_SG_SUPPORT; + + + if( issue_scb_block(adapter, raw_mbox) ) { + /* + * f/w does not support this command. Choose the default value + */ + adapter->sglen = MIN_SGLIST; + } + else { + adapter->sglen = *((char *)adapter->mega_buffer); + + /* + * Make sure this is not more than the resources we are + * planning to allocate + */ + if ( adapter->sglen > MAX_SGLIST ) + adapter->sglen = MAX_SGLIST; + } + + return; +} + + +/** + * mega_support_cluster() + * @adapter - pointer to our soft state + * + * Find out if this firmware support cluster calls. + */ +static int +mega_support_cluster(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof(raw_mbox)); + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + /* + * Try to get the initiator id. This command will succeed iff the + * clustering is available on this HBA. + */ + raw_mbox[0] = MEGA_GET_TARGET_ID; + + if( issue_scb_block(adapter, raw_mbox) == 0 ) { + + /* + * Cluster support available. Get the initiator target id. + * Tell our id to mid-layer too. + */ + adapter->this_id = *(u32 *)adapter->mega_buffer; + adapter->host->this_id = adapter->this_id; + + return 1; + } + + return 0; +} + + + +/** + * mega_get_ldrv_num() + * @adapter - pointer to our soft state + * @cmd - scsi mid layer command + * @channel - channel on the controller + * + * Calculate the logical drive number based on the information in scsi command + * and the channel number. + */ +static inline int +mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel) +{ + int tgt; + int ldrv_num; + + tgt = cmd->target; + + if ( tgt > adapter->this_id ) + tgt--; /* we do not get inquires for initiator id */ + + ldrv_num = (channel * 15) + tgt; + + + /* + * If we have a logical drive with boot enabled, project it first + */ + if( adapter->boot_ldrv_enabled ) { + if( ldrv_num == 0 ) { + ldrv_num = adapter->boot_ldrv; + } + else { + if( ldrv_num <= adapter->boot_ldrv ) { + ldrv_num--; + } + } + } + + /* + * If "delete logical drive" feature is enabled on this controller. + * Do only if at least one delete logical drive operation was done. + * + * Also, after logical drive deletion, instead of logical drive number, + * the value returned should be 0x80+logical drive id. + * + * These is valid only for IO commands. + */ + + if (adapter->support_random_del && adapter->read_ldidmap ) + switch (cmd->cmnd[0]) { + case READ_6: /* fall through */ + case WRITE_6: /* fall through */ + case READ_10: /* fall through */ + case WRITE_10: + ldrv_num += 0x80; + } + + return ldrv_num; +} + + +/** + * mega_reorder_hosts() + * + * Hack: reorder the scsi hosts in mid-layer so that the controller with the + * boot device on it appears first in the list. + */ +static void +mega_reorder_hosts(void) +{ + struct Scsi_Host *shpnt; + struct Scsi_Host *shone; + struct Scsi_Host *shtwo; + adapter_t *boot_host; + int i; + + /* + * Find the (first) host which has it's BIOS enabled + */ + boot_host = NULL; + for (i = 0; i < MAX_CONTROLLERS; i++) { + if (mega_hbas[i].is_bios_enabled) { + boot_host = mega_hbas[i].hostdata_addr; + break; + } + } + + if (!boot_host) { + printk(KERN_NOTICE "megaraid: no BIOS enabled.\n"); + return; + } + + /* + * Traverse through the list of SCSI hosts for our HBA locations + */ + shone = shtwo = NULL; + for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) { + /* Is it one of ours? */ + for (i = 0; i < MAX_CONTROLLERS; i++) { + if ((adapter_t *) shpnt->hostdata == + mega_hbas[i].hostdata_addr) { + /* Does this one has BIOS enabled */ + if (mega_hbas[i].hostdata_addr == boot_host) { + + /* Are we first */ + if (!shtwo) /* Yes! */ + return; + else /* :-( */ + shone = shpnt; + } else { + if (!shtwo) { + /* were we here before? xchng + * first */ + shtwo = shpnt; + } + } + break; + } + } + /* + * Have we got the boot host and one which does not have the + * bios enabled. + */ + if (shone && shtwo) + break; + } + if (shone && shtwo) { + mega_swap_hosts (shone, shtwo); + } + + return; +} + + +static void +mega_swap_hosts (struct Scsi_Host *shone, struct Scsi_Host *shtwo) +{ + struct Scsi_Host *prevtoshtwo; + struct Scsi_Host *prevtoshone; + struct Scsi_Host *save = NULL; + + /* Are these two nodes adjacent */ + if (shtwo->next == shone) { + + if (shtwo == scsi_hostlist && !shone->next) { + + /* just two nodes */ + scsi_hostlist = shone; + shone->next = shtwo; + shtwo->next = NULL; + } else if (shtwo == scsi_hostlist) { + /* first two nodes of the list */ + + scsi_hostlist = shone; + shtwo->next = shone->next; + scsi_hostlist->next = shtwo; + } else if (!shone->next) { + /* last two nodes of the list */ + + prevtoshtwo = scsi_hostlist; + + while (prevtoshtwo->next != shtwo) + prevtoshtwo = prevtoshtwo->next; + + prevtoshtwo->next = shone; + shone->next = shtwo; + shtwo->next = NULL; + } else { + prevtoshtwo = scsi_hostlist; + + while (prevtoshtwo->next != shtwo) + prevtoshtwo = prevtoshtwo->next; + + prevtoshtwo->next = shone; + shtwo->next = shone->next; + shone->next = shtwo; + } + + } else if (shtwo == scsi_hostlist && !shone->next) { + /* shtwo at head, shone at tail, not adjacent */ + + prevtoshone = scsi_hostlist; + + while (prevtoshone->next != shone) + prevtoshone = prevtoshone->next; + + scsi_hostlist = shone; + shone->next = shtwo->next; + prevtoshone->next = shtwo; + shtwo->next = NULL; + } else if (shtwo == scsi_hostlist && shone->next) { + /* shtwo at head, shone is not at tail */ + + prevtoshone = scsi_hostlist; + while (prevtoshone->next != shone) + prevtoshone = prevtoshone->next; + + scsi_hostlist = shone; + prevtoshone->next = shtwo; + save = shtwo->next; + shtwo->next = shone->next; + shone->next = save; + } else if (!shone->next) { + /* shtwo not at head, shone at tail */ + + prevtoshtwo = scsi_hostlist; + prevtoshone = scsi_hostlist; + + while (prevtoshtwo->next != shtwo) + prevtoshtwo = prevtoshtwo->next; + while (prevtoshone->next != shone) + prevtoshone = prevtoshone->next; + + prevtoshtwo->next = shone; + shone->next = shtwo->next; + prevtoshone->next = shtwo; + shtwo->next = NULL; + + } else { + prevtoshtwo = scsi_hostlist; + prevtoshone = scsi_hostlist; + save = NULL; + + while (prevtoshtwo->next != shtwo) + prevtoshtwo = prevtoshtwo->next; + while (prevtoshone->next != shone) + prevtoshone = prevtoshone->next; + + prevtoshtwo->next = shone; + save = shone->next; + shone->next = shtwo->next; + prevtoshone->next = shtwo; + shtwo->next = save; + } + return; +} + + + +/** + * mega_adapinq() + * @adapter - pointer to our soft state + * @dma_handle - DMA address of the buffer + * + * Issue internal comamnds while interrupts are available. + * We only issue direct mailbox commands from within the driver. ioctl() + * interface using these routines can issue passthru commands. + */ +static int +mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) +{ + megacmd_t mc; + + memset(&mc, 0, sizeof(megacmd_t)); + + if( adapter->flag & BOARD_40LD ) { + mc.cmd = FC_NEW_CONFIG; + mc.opcode = NC_SUBOP_ENQUIRY3; + mc.subopcode = ENQ3_GET_SOLICITED_FULL; + } + else { + mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; + } + + mc.xferaddr = (u32)dma_handle; + + if ( mega_internal_command(adapter, LOCK_INT, &mc, NULL) != 0 ) { + return -1; + } + + return 0; +} + + +/** + * mega_allocate_inquiry() + * @dma_handle - handle returned for dma address + * @pdev - handle to pci device + * + * allocates memory for inquiry structure + */ +static inline caddr_t +mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) +{ + return pci_alloc_consistent(pdev, sizeof(mega_inquiry3), dma_handle); +} + + +static inline void +mega_free_inquiry(caddr_t inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) +{ + pci_free_consistent(pdev, sizeof(mega_inquiry3), inquiry, dma_handle); +} + + +/** mega_internal_dev_inquiry() + * @adapter - pointer to our soft state + * @ch - channel for this device + * @tgt - ID of this device + * @buf_dma_handle - DMA address of the buffer + * + * Issue the scsi inquiry for the specified device. + */ +static int +mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, + dma_addr_t buf_dma_handle) +{ + mega_passthru *pthru; + dma_addr_t pthru_dma_handle; + megacmd_t mc; + int rval; + struct pci_dev *pdev; + + + /* + * For all internal commands, the buffer must be allocated in <4GB + * address range + */ + pdev = adapter->ipdev; + + pthru = pci_alloc_consistent(pdev, sizeof(mega_passthru), + &pthru_dma_handle); + + if( pthru == NULL ) { + return -1; + } + + pthru->timeout = 2; + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 0; + + pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; + + pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; + + pthru->cdblen = 6; + + pthru->cdb[0] = INQUIRY; + pthru->cdb[1] = 0; + pthru->cdb[2] = 0; + pthru->cdb[3] = 0; + pthru->cdb[4] = 255; + pthru->cdb[5] = 0; + + + pthru->dataxferaddr = (u32)buf_dma_handle; + pthru->dataxferlen = 256; + + memset(&mc, 0, sizeof(megacmd_t)); + + mc.cmd = MEGA_MBOXCMD_PASSTHRU; + mc.xferaddr = (u32)pthru_dma_handle; + + rval = mega_internal_command(adapter, LOCK_INT, &mc, pthru); + + pci_free_consistent(pdev, sizeof(mega_passthru), pthru, + pthru_dma_handle); + + return rval; +} + + +/** + * mega_internal_command() + * @adapter - pointer to our soft state + * @ls - the scope of the exclusion lock. + * @mc - the mailbox command + * @pthru - Passthru structure for DCDB commands + * + * Issue the internal commands in interrupt mode. + * The last argument is the address of the passthru structure if the command + * to be fired is a passthru command + * + * lockscope specifies whether the caller has already acquired the lock. Of + * course, the caller must know which lock we are talking about. + * + * Note: parameter 'pthru' is null for non-passthru commands. + */ +static int +mega_internal_command(adapter_t *adapter, lockscope_t ls, megacmd_t *mc, + mega_passthru *pthru ) +{ + Scsi_Cmnd *scmd; + unsigned long flags = 0; + scb_t *scb; + int rval; + + /* + * The internal commands share one command id and hence are + * serialized. This is so because we want to reserve maximum number of + * available command ids for the I/O commands. + */ + down(&adapter->int_mtx); + + scb = &adapter->int_scb; + memset(scb, 0, sizeof(scb_t)); + + scmd = &adapter->int_scmd; + memset(scmd, 0, sizeof(Scsi_Cmnd)); + + scmd->host = adapter->host; + scmd->buffer = (void *)scb; + scmd->cmnd[0] = MEGA_INTERNAL_CMD; + + scb->state |= SCB_ACTIVE; + scb->cmd = scmd; + + memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); + + /* + * Is it a passthru command + */ + if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { + + scb->pthru = pthru; + } + + scb->idx = CMDID_INT_CMDS; + + scmd->state = 0; + + /* + * Get the lock only if the caller has not acquired it already + */ + if( ls == LOCK_INT ) spin_lock_irqsave(&adapter->lock, flags); + + megaraid_queue(scmd, mega_internal_done); + + if( ls == LOCK_INT ) spin_unlock_irqrestore(&adapter->lock, flags); + + /* + * Wait till this command finishes. Do not use + * wait_event_interruptible(). It causes panic if CTRL-C is hit when + * dumping e.g., physical disk information through /proc interface. + * Catching the return value should solve the issue but for now keep + * the call non-interruptible. + */ +#if 0 + wait_event_interruptible(adapter->int_waitq, scmd->state); +#endif + wait_event(adapter->int_waitq, scmd->state); + + rval = scmd->result; + mc->status = scmd->result; + + /* + * Print a debug message for all failed commands. Applications can use + * this information. + */ + if( scmd->result && trace_level ) { + printk("megaraid: cmd [%x, %x, %x] status:[%x]\n", + mc->cmd, mc->opcode, mc->subopcode, scmd->result); + } + + up(&adapter->int_mtx); + + return rval; +} + + +/** + * mega_internal_done() + * @scmd - internal scsi command + * + * Callback routine for internal commands. + */ +static void +mega_internal_done(Scsi_Cmnd *scmd) +{ + adapter_t *adapter; + + adapter = (adapter_t *)scmd->host->hostdata; + + scmd->state = 1; /* thread waiting for its command to complete */ + + /* + * See comment in mega_internal_command() routine for + * wait_event_interruptible() + */ +#if 0 + wake_up_interruptible(&adapter->int_waitq); +#endif + wake_up(&adapter->int_waitq); + +} + +static Scsi_Host_Template driver_template = MEGARAID; + +#include "scsi_module.c" + +/* vi: set ts=8 sw=8 tw=78: */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/megaraid2.h linux.21rc5-ac2/drivers/scsi/megaraid2.h --- linux.21rc5/drivers/scsi/megaraid2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/megaraid2.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,1121 @@ +#ifndef __MEGARAID_H__ +#define __MEGARAID_H__ + +#include +#include + + +#define MEGARAID_VERSION \ + "v2.00.5 (Release Date: Thu Apr 24 14:06:55 EDT 2003)\n" + +/* + * Driver features - change the values to enable or disable features in the + * driver. + */ + +/* + * Comand coalescing - This feature allows the driver to be able to combine + * two or more commands and issue as one command in order to boost I/O + * performance. Useful if the nature of the I/O is sequential. It is not very + * useful for random natured I/Os. + */ +#define MEGA_HAVE_COALESCING 0 + +/* + * Clustering support - Set this flag if you are planning to use the + * clustering services provided by the megaraid controllers and planning to + * setup a cluster + */ +#define MEGA_HAVE_CLUSTERING 1 + +/* + * Driver statistics - Set this flag if you are interested in statics about + * number of I/O completed on each logical drive and how many interrupts + * generated. If enabled, this information is available through /proc + * interface and through the private ioctl. Setting this flag has a + * performance penalty. + */ +#define MEGA_HAVE_STATS 0 + +/* + * Enhanced /proc interface - This feature will allow you to have a more + * detailed /proc interface for megaraid driver. E.g., a real time update of + * the status of the logical drives, battery status, physical drives etc. + */ +#define MEGA_HAVE_ENH_PROC 1 + +#define MAX_DEV_TYPE 32 + +#ifndef PCI_VENDOR_ID_LSI_LOGIC +#define PCI_VENDOR_ID_LSI_LOGIC 0x1000 +#endif + +#ifndef PCI_VENDOR_ID_AMI +#define PCI_VENDOR_ID_AMI 0x101E +#endif + +#ifndef PCI_VENDOR_ID_DELL +#define PCI_VENDOR_ID_DELL 0x1028 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef PCI_DEVICE_ID_AMI_MEGARAID +#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010 +#endif + +#ifndef PCI_DEVICE_ID_AMI_MEGARAID2 +#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060 +#endif + +#ifndef PCI_DEVICE_ID_AMI_MEGARAID3 +#define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960 +#endif + +#define PCI_DEVICE_ID_DISCOVERY 0x000E +#define PCI_DEVICE_ID_PERC4_DI 0x000F +#define PCI_DEVICE_ID_PERC4_QC_VERDE 0x0407 + +/* Sub-System Vendor IDs */ +#define AMI_SUBSYS_VID 0x101E +#define DELL_SUBSYS_VID 0x1028 +#define HP_SUBSYS_VID 0x103C +#define LSI_SUBSYS_VID 0x1000 + +#define HBA_SIGNATURE 0x3344 +#define HBA_SIGNATURE_471 0xCCCC +#define HBA_SIGNATURE_64BIT 0x0299 + +#define MBOX_BUSY_WAIT 10 /* wait for up to 10 usec for + mailbox to be free */ +#define DEFAULT_INITIATOR_ID 7 + +#define MAX_SGLIST 64 /* max supported in f/w */ +#define MIN_SGLIST 26 /* guaranteed to support these many */ +#define MAX_COMMANDS 126 +#define CMDID_INT_CMDS MAX_COMMANDS+1 /* make sure CMDID_INT_CMDS + is less than max commands + supported by any f/w */ + +#define MAX_CDB_LEN 10 +#define MAX_EXT_CDB_LEN 16 /* we support cdb length up to 16 */ + +#define DEF_CMD_PER_LUN 63 +#define MAX_CMD_PER_LUN MAX_COMMANDS +#define MAX_FIRMWARE_STATUS 46 +#define MAX_XFER_PER_CMD (64*1024) +#define MAX_SECTORS_PER_IO 128 + +#define MAX_LOGICAL_DRIVES_40LD 40 +#define FC_MAX_PHYSICAL_DEVICES 256 +#define MAX_LOGICAL_DRIVES_8LD 8 +#define MAX_CHANNELS 5 +#define MAX_TARGET 15 +#define MAX_PHYSICAL_DRIVES MAX_CHANNELS*MAX_TARGET +#define MAX_ROW_SIZE_40LD 32 +#define MAX_ROW_SIZE_8LD 8 +#define MAX_SPAN_DEPTH 8 + +#define NVIRT_CHAN 4 /* # of virtual channels to represent + up to 60 logical drives */ + +#define MEGARAID \ +{ \ + .name = "MegaRAID", \ + .proc_name = "megaraid", \ + .detect = megaraid_detect, \ + .release = megaraid_release, \ + .info = megaraid_info, \ + .command = megaraid_command, \ + .queuecommand = megaraid_queue, \ + .bios_param = megaraid_biosparam, \ + .max_sectors = MAX_SECTORS_PER_IO, \ + .can_queue = MAX_COMMANDS, \ + .this_id = DEFAULT_INITIATOR_ID, \ + .sg_tablesize = MAX_SGLIST, \ + .cmd_per_lun = DEF_CMD_PER_LUN, \ + .present = 0, \ + .unchecked_isa_dma = 0, \ + .use_clustering = ENABLE_CLUSTERING, \ + .use_new_eh_code = 1, \ + .eh_abort_handler = megaraid_abort, \ + .eh_device_reset_handler = megaraid_reset, \ + .eh_bus_reset_handler = megaraid_reset, \ + .eh_host_reset_handler = megaraid_reset, \ + .highmem_io = 1 \ +} + + + +typedef struct { + /* 0x0 */ u8 cmd; + /* 0x1 */ u8 cmdid; + /* 0x2 */ u16 numsectors; + /* 0x4 */ u32 lba; + /* 0x8 */ u32 xferaddr; + /* 0xC */ u8 logdrv; + /* 0xD */ u8 numsgelements; + /* 0xE */ u8 resvd; + /* 0xF */ volatile u8 busy; + /* 0x10 */ volatile u8 numstatus; + /* 0x11 */ volatile u8 status; + /* 0x12 */ volatile u8 completed[MAX_FIRMWARE_STATUS]; + volatile u8 poll; + volatile u8 ack; +} __attribute__ ((packed)) mbox_t; + +typedef struct { + u32 xfer_segment_lo; + u32 xfer_segment_hi; + mbox_t mbox; +} __attribute__ ((packed)) mbox64_t; + + +/* + * Passthru definitions + */ +#define MAX_REQ_SENSE_LEN 0x20 + +typedef struct { + u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */ + u8 ars:1; + u8 reserved:3; + u8 islogical:1; + u8 logdrv; /* if islogical == 1 */ + u8 channel; /* if islogical == 0 */ + u8 target; /* if islogical == 0 */ + u8 queuetag; /* unused */ + u8 queueaction; /* unused */ + u8 cdb[MAX_CDB_LEN]; + u8 cdblen; + u8 reqsenselen; + u8 reqsensearea[MAX_REQ_SENSE_LEN]; + u8 numsgelements; + u8 scsistatus; + u32 dataxferaddr; + u32 dataxferlen; +} __attribute__ ((packed)) mega_passthru; + + +/* + * Extended passthru: support CDB > 10 bytes + */ +typedef struct { + u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */ + u8 ars:1; + u8 rsvd1:1; + u8 cd_rom:1; + u8 rsvd2:1; + u8 islogical:1; + u8 logdrv; /* if islogical == 1 */ + u8 channel; /* if islogical == 0 */ + u8 target; /* if islogical == 0 */ + u8 queuetag; /* unused */ + u8 queueaction; /* unused */ + u8 cdblen; + u8 rsvd3; + u8 cdb[MAX_EXT_CDB_LEN]; + u8 numsgelements; + u8 status; + u8 reqsenselen; + u8 reqsensearea[MAX_REQ_SENSE_LEN]; + u8 rsvd4; + u32 dataxferaddr; + u32 dataxferlen; +} __attribute__ ((packed)) mega_ext_passthru; + +typedef struct { + u64 address; + u32 length; +} __attribute__ ((packed)) mega_sgl64; + +typedef struct { + u32 address; + u32 length; +} __attribute__ ((packed)) mega_sglist; + + +/* Queued command data */ +typedef struct { + int idx; + u32 state; + struct list_head list; + u8 raw_mbox[66]; + u32 dma_type; + u32 dma_direction; + + Scsi_Cmnd *cmd; + dma_addr_t dma_h_bulkdata; + dma_addr_t dma_h_sgdata; + + mega_sglist *sgl; + mega_sgl64 *sgl64; + dma_addr_t sgl_dma_addr; + + mega_passthru *pthru; + dma_addr_t pthru_dma_addr; + mega_ext_passthru *epthru; + dma_addr_t epthru_dma_addr; +} scb_t; + +/* + * Flags to follow the scb as it transitions between various stages + */ +#define SCB_FREE 0x0000 /* on the free list */ +#define SCB_ACTIVE 0x0001 /* off the free list */ +#define SCB_PENDQ 0x0002 /* on the pending queue */ +#define SCB_ISSUED 0x0004 /* issued - owner f/w */ +#define SCB_ABORT 0x0008 /* Got an abort for this one */ +#define SCB_RESET 0x0010 /* Got a reset for this one */ + +/* + * Utilities declare this strcture size as 1024 bytes. So more fields can + * be added in future. + */ +typedef struct { + u32 data_size; /* current size in bytes (not including resvd) */ + + u32 config_signature; + /* Current value is 0x00282008 + * 0x28=MAX_LOGICAL_DRIVES, + * 0x20=Number of stripes and + * 0x08=Number of spans */ + + u8 fw_version[16]; /* printable ASCI string */ + u8 bios_version[16]; /* printable ASCI string */ + u8 product_name[80]; /* printable ASCI string */ + + u8 max_commands; /* Max. concurrent commands supported */ + u8 nchannels; /* Number of SCSI Channels detected */ + u8 fc_loop_present; /* Number of Fibre Loops detected */ + u8 mem_type; /* EDO, FPM, SDRAM etc */ + + u32 signature; + u16 dram_size; /* In terms of MB */ + u16 subsysid; + + u16 subsysvid; + u8 notify_counters; + u8 pad1k[889]; /* 135 + 889 resvd = 1024 total size */ +} __attribute__ ((packed)) mega_product_info; + +struct notify { + u32 global_counter; /* Any change increments this counter */ + + u8 param_counter; /* Indicates any params changed */ + u8 param_id; /* Param modified - defined below */ + u16 param_val; /* New val of last param modified */ + + u8 write_config_counter; /* write config occurred */ + u8 write_config_rsvd[3]; + + u8 ldrv_op_counter; /* Indicates ldrv op started/completed */ + u8 ldrv_opid; /* ldrv num */ + u8 ldrv_opcmd; /* ldrv operation - defined below */ + u8 ldrv_opstatus; /* status of the operation */ + + u8 ldrv_state_counter; /* Indicates change of ldrv state */ + u8 ldrv_state_id; /* ldrv num */ + u8 ldrv_state_new; /* New state */ + u8 ldrv_state_old; /* old state */ + + u8 pdrv_state_counter; /* Indicates change of ldrv state */ + u8 pdrv_state_id; /* pdrv id */ + u8 pdrv_state_new; /* New state */ + u8 pdrv_state_old; /* old state */ + + u8 pdrv_fmt_counter; /* Indicates pdrv format started/over */ + u8 pdrv_fmt_id; /* pdrv id */ + u8 pdrv_fmt_val; /* format started/over */ + u8 pdrv_fmt_rsvd; + + u8 targ_xfer_counter; /* Indicates SCSI-2 Xfer rate change */ + u8 targ_xfer_id; /* pdrv Id */ + u8 targ_xfer_val; /* new Xfer params of last pdrv */ + u8 targ_xfer_rsvd; + + u8 fcloop_id_chg_counter; /* Indicates loopid changed */ + u8 fcloopid_pdrvid; /* pdrv id */ + u8 fcloop_id0; /* loopid on fc loop 0 */ + u8 fcloop_id1; /* loopid on fc loop 1 */ + + u8 fcloop_state_counter; /* Indicates loop state changed */ + u8 fcloop_state0; /* state of fc loop 0 */ + u8 fcloop_state1; /* state of fc loop 1 */ + u8 fcloop_state_rsvd; +} __attribute__ ((packed)); + +#define MAX_NOTIFY_SIZE 0x80 +#define CUR_NOTIFY_SIZE sizeof(struct notify) + +typedef struct { + u32 data_size; /* current size in bytes (not including resvd) */ + + struct notify notify; + + u8 notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE]; + + u8 rebuild_rate; /* Rebuild rate (0% - 100%) */ + u8 cache_flush_interval; /* In terms of Seconds */ + u8 sense_alert; + u8 drive_insert_count; /* drive insertion count */ + + u8 battery_status; + u8 num_ldrv; /* No. of Log Drives configured */ + u8 recon_state[MAX_LOGICAL_DRIVES_40LD / 8]; /* State of + reconstruct */ + u16 ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8]; /* logdrv + Status */ + + u32 ldrv_size[MAX_LOGICAL_DRIVES_40LD];/* Size of each log drv */ + u8 ldrv_prop[MAX_LOGICAL_DRIVES_40LD]; + u8 ldrv_state[MAX_LOGICAL_DRIVES_40LD];/* State of log drives */ + u8 pdrv_state[FC_MAX_PHYSICAL_DEVICES];/* State of phys drvs. */ + u16 pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16]; + + u8 targ_xfer[80]; /* phys device transfer rate */ + u8 pad1k[263]; /* 761 + 263reserved = 1024 bytes total size */ +} __attribute__ ((packed)) mega_inquiry3; + + +/* Structures */ +typedef struct { + u8 max_commands; /* Max concurrent commands supported */ + u8 rebuild_rate; /* Rebuild rate - 0% thru 100% */ + u8 max_targ_per_chan; /* Max targ per channel */ + u8 nchannels; /* Number of channels on HBA */ + u8 fw_version[4]; /* Firmware version */ + u16 age_of_flash; /* Number of times FW has been flashed */ + u8 chip_set_value; /* Contents of 0xC0000832 */ + u8 dram_size; /* In MB */ + u8 cache_flush_interval; /* in seconds */ + u8 bios_version[4]; + u8 board_type; + u8 sense_alert; + u8 write_config_count; /* Increase with every configuration + change */ + u8 drive_inserted_count; /* Increase with every drive inserted + */ + u8 inserted_drive; /* Channel:Id of inserted drive */ + u8 battery_status; /* + * BIT 0: battery module missing + * BIT 1: VBAD + * BIT 2: temprature high + * BIT 3: battery pack missing + * BIT 4,5: + * 00 - charge complete + * 01 - fast charge in progress + * 10 - fast charge fail + * 11 - undefined + * Bit 6: counter > 1000 + * Bit 7: Undefined + */ + u8 dec_fault_bus_info; +} __attribute__ ((packed)) mega_adp_info; + + +typedef struct { + u8 num_ldrv; /* Number of logical drives configured */ + u8 rsvd[3]; + u32 ldrv_size[MAX_LOGICAL_DRIVES_8LD]; + u8 ldrv_prop[MAX_LOGICAL_DRIVES_8LD]; + u8 ldrv_state[MAX_LOGICAL_DRIVES_8LD]; +} __attribute__ ((packed)) mega_ldrv_info; + +typedef struct { + u8 pdrv_state[MAX_PHYSICAL_DRIVES]; + u8 rsvd; +} __attribute__ ((packed)) mega_pdrv_info; + +/* RAID inquiry: Mailbox command 0x05*/ +typedef struct { + mega_adp_info adapter_info; + mega_ldrv_info logdrv_info; + mega_pdrv_info pdrv_info; +} __attribute__ ((packed)) mraid_inquiry; + + +/* RAID extended inquiry: Mailbox command 0x04*/ +typedef struct { + mraid_inquiry raid_inq; + u16 phys_drv_format[MAX_CHANNELS]; + u8 stack_attn; + u8 modem_status; + u8 rsvd[2]; +} __attribute__ ((packed)) mraid_ext_inquiry; + + +typedef struct { + u8 channel; + u8 target; +}__attribute__ ((packed)) adp_device; + +typedef struct { + u32 start_blk; /* starting block */ + u32 num_blks; /* # of blocks */ + adp_device device[MAX_ROW_SIZE_40LD]; +}__attribute__ ((packed)) adp_span_40ld; + +typedef struct { + u32 start_blk; /* starting block */ + u32 num_blks; /* # of blocks */ + adp_device device[MAX_ROW_SIZE_8LD]; +}__attribute__ ((packed)) adp_span_8ld; + +typedef struct { + u8 span_depth; /* Total # of spans */ + u8 level; /* RAID level */ + u8 read_ahead; /* read ahead, no read ahead, adaptive read + ahead */ + u8 stripe_sz; /* Encoded stripe size */ + u8 status; /* Status of the logical drive */ + u8 write_mode; /* write mode, write_through/write_back */ + u8 direct_io; /* direct io or through cache */ + u8 row_size; /* Number of stripes in a row */ +} __attribute__ ((packed)) logdrv_param; + +typedef struct { + logdrv_param lparam; + adp_span_40ld span[MAX_SPAN_DEPTH]; +}__attribute__ ((packed)) logdrv_40ld; + +typedef struct { + logdrv_param lparam; + adp_span_8ld span[MAX_SPAN_DEPTH]; +}__attribute__ ((packed)) logdrv_8ld; + +typedef struct { + u8 type; /* Type of the device */ + u8 cur_status; /* current status of the device */ + u8 tag_depth; /* Level of tagging */ + u8 sync_neg; /* sync negotiation - ENABLE or DISBALE */ + u32 size; /* configurable size in terms of 512 byte + blocks */ +}__attribute__ ((packed)) phys_drv; + +typedef struct { + u8 nlog_drives; /* number of logical drives */ + u8 resvd[3]; + logdrv_40ld ldrv[MAX_LOGICAL_DRIVES_40LD]; + phys_drv pdrv[MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_40ld; + +typedef struct { + u8 nlog_drives; /* number of logical drives */ + u8 resvd[3]; + logdrv_8ld ldrv[MAX_LOGICAL_DRIVES_8LD]; + phys_drv pdrv[MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_8ld; + + +/* + * User ioctl structure. + * This structure will be used for Traditional Method ioctl interface + * commands (0x80),Alternate Buffer Method (0x81) ioctl commands and the + * Driver ioctls. + * The Driver ioctl interface handles the commands at the driver level, + * without being sent to the card. + */ +/* system call imposed limit. Change accordingly */ +#define IOCTL_MAX_DATALEN 4096 + +struct uioctl_t { + u32 inlen; + u32 outlen; + union { + u8 fca[16]; + struct { + u8 opcode; + u8 subopcode; + u16 adapno; +#if BITS_PER_LONG == 32 + u8 *buffer; + u8 pad[4]; +#endif +#if BITS_PER_LONG == 64 + u8 *buffer; +#endif + u32 length; + } __attribute__ ((packed)) fcs; + } __attribute__ ((packed)) ui; + u8 mbox[18]; /* 16 bytes + 2 status bytes */ + mega_passthru pthru; +#if BITS_PER_LONG == 32 + char *data; /* buffer <= 4096 for 0x80 commands */ + char pad[4]; +#endif +#if BITS_PER_LONG == 64 + char *data; +#endif +} __attribute__ ((packed)); + +/* + * struct mcontroller is used to pass information about the controllers in the + * system. Its upto the application how to use the information. We are passing + * as much info about the cards as possible and useful. Before issuing the + * call to find information about the cards, the applicaiton needs to issue a + * ioctl first to find out the number of controllers in the system. + */ +#define MAX_CONTROLLERS 32 + +struct mcontroller { + u64 base; + u8 irq; + u8 numldrv; + u8 pcibus; + u16 pcidev; + u8 pcifun; + u16 pciid; + u16 pcivendor; + u8 pcislot; + u32 uid; +}; + +/* + * mailbox structure used for internal commands + */ +typedef struct { + u8 cmd; + u8 cmdid; + u8 opcode; + u8 subopcode; + u32 lba; + u32 xferaddr; + u8 logdrv; + u8 rsvd[3]; + u8 numstatus; + u8 status; +} __attribute__ ((packed)) megacmd_t; + +/* + * Defines for Driver IOCTL interface + */ +#define MEGAIOC_MAGIC 'm' + +#define MEGAIOC_QNADAP 'm' /* Query # of adapters */ +#define MEGAIOC_QDRVRVER 'e' /* Query driver version */ +#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */ +#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) ) +#define GETADAP(mkadap) ( (mkadap) ^ MEGAIOC_MAGIC << 8 ) + +/* + * Definition for the new ioctl interface (NIT) + */ + +/* + * Vendor specific Group-7 commands + */ +#define VENDOR_SPECIFIC_COMMANDS 0xE0 +#define MEGA_INTERNAL_CMD VENDOR_SPECIFIC_COMMANDS + 0x01 + +/* + * The ioctl command. No other command shall be used for this interface + */ +#define USCSICMD VENDOR_SPECIFIC_COMMANDS + +/* + * Data direction flags + */ +#define UIOC_RD 0x00001 +#define UIOC_WR 0x00002 + +/* + * ioctl opcodes + */ +#define MBOX_CMD 0x00000 /* DCMD or passthru command */ +#define GET_DRIVER_VER 0x10000 /* Get driver version */ +#define GET_N_ADAP 0x20000 /* Get number of adapters */ +#define GET_ADAP_INFO 0x30000 /* Get information about a adapter */ +#define GET_CAP 0x40000 /* Get ioctl capabilities */ +#define GET_STATS 0x50000 /* Get statistics, including error info */ + + +/* + * The ioctl structure. + * MBOX macro converts a nitioctl_t structure to megacmd_t pointer and + * MBOX_P macro converts a nitioctl_t pointer to megacmd_t pointer. + */ +typedef struct { + char signature[8]; /* Must contain "MEGANIT" */ + u32 opcode; /* opcode for the command */ + u32 adapno; /* adapter number */ + union { + u8 __raw_mbox[18]; + caddr_t __uaddr; /* xferaddr for non-mbox cmds */ + }__ua; + +#define uioc_rmbox __ua.__raw_mbox +#define MBOX(uioc) ((megacmd_t *)&((uioc).__ua.__raw_mbox[0])) +#define MBOX_P(uioc) ((megacmd_t *)&((uioc)->__ua.__raw_mbox[0])) +#define uioc_uaddr __ua.__uaddr + + u32 xferlen; /* xferlen for DCMD and non-mbox + commands */ + u32 flags; /* data direction flags */ +}nitioctl_t; + + +/* + * I/O statistics for some applications like SNMP agent. The caller must + * provide the number of logical drives for which status should be reported. + */ +typedef struct { + int num_ldrv; /* Number for logical drives for which the + status should be reported. */ + u32 nreads[MAX_LOGICAL_DRIVES_40LD]; /* number of reads for + each logical drive */ + u32 nreadblocks[MAX_LOGICAL_DRIVES_40LD]; /* number of blocks + read for each logical + drive */ + u32 nwrites[MAX_LOGICAL_DRIVES_40LD]; /* number of writes + for each logical + drive */ + u32 nwriteblocks[MAX_LOGICAL_DRIVES_40LD]; /* number of blocks + writes for each + logical drive */ + u32 rd_errors[MAX_LOGICAL_DRIVES_40LD]; /* number of read + errors for each + logical drive */ + u32 wr_errors[MAX_LOGICAL_DRIVES_40LD]; /* number of write + errors for each + logical drive */ +}megastat_t; + + +struct private_bios_data { + u8 geometry:4; /* + * bits 0-3 - BIOS geometry + * 0x0001 - 1GB + * 0x0010 - 2GB + * 0x1000 - 8GB + * Others values are invalid + */ + u8 unused:4; /* bits 4-7 are unused */ + u8 boot_drv; /* + * logical drive set as boot drive + * 0..7 - for 8LD cards + * 0..39 - for 40LD cards + */ + u8 rsvd[12]; + u16 cksum; /* 0-(sum of first 13 bytes of this structure) */ +} __attribute__ ((packed)); + + + + +/* + * Mailbox and firmware commands and subopcodes used in this driver. + */ + +#define MEGA_MBOXCMD_LREAD 0x01 +#define MEGA_MBOXCMD_LWRITE 0x02 +#define MEGA_MBOXCMD_PASSTHRU 0x03 +#define MEGA_MBOXCMD_ADPEXTINQ 0x04 +#define MEGA_MBOXCMD_ADAPTERINQ 0x05 +#define MEGA_MBOXCMD_LREAD64 0xA7 +#define MEGA_MBOXCMD_LWRITE64 0xA8 +#define MEGA_MBOXCMD_PASSTHRU64 0xC3 +#define MEGA_MBOXCMD_EXTPTHRU 0xE3 + +#define MAIN_MISC_OPCODE 0xA4 /* f/w misc opcode */ +#define GET_MAX_SG_SUPPORT 0x01 /* get max sg len supported by f/w */ + +#define FC_NEW_CONFIG 0xA1 +#define NC_SUBOP_PRODUCT_INFO 0x0E +#define NC_SUBOP_ENQUIRY3 0x0F +#define ENQ3_GET_SOLICITED_FULL 0x02 +#define OP_DCMD_READ_CONFIG 0x04 +#define NEW_READ_CONFIG_8LD 0x67 +#define READ_CONFIG_8LD 0x07 +#define FLUSH_ADAPTER 0x0A +#define FLUSH_SYSTEM 0xFE + +/* + * Command for random deletion of logical drives + */ +#define FC_DEL_LOGDRV 0xA4 /* f/w command */ +#define OP_SUP_DEL_LOGDRV 0x2A /* is feature supported */ +#define OP_GET_LDID_MAP 0x18 /* get ldid and logdrv number map */ +#define OP_DEL_LOGDRV 0x1C /* delete logical drive */ + +/* + * BIOS commands + */ +#define IS_BIOS_ENABLED 0x62 +#define GET_BIOS 0x01 +#define CHNL_CLASS 0xA9 +#define GET_CHNL_CLASS 0x00 +#define SET_CHNL_CLASS 0x01 +#define CH_RAID 0x01 +#define CH_SCSI 0x00 +#define BIOS_PVT_DATA 0x40 +#define GET_BIOS_PVT_DATA 0x00 + + +/* + * Commands to support clustering + */ +#define MEGA_GET_TARGET_ID 0x7D +#define MEGA_CLUSTER_OP 0x70 +#define MEGA_GET_CLUSTER_MODE 0x02 +#define MEGA_CLUSTER_CMD 0x6E +#define MEGA_RESERVE_LD 0x01 +#define MEGA_RELEASE_LD 0x02 +#define MEGA_RESET_RESERVATIONS 0x03 +#define MEGA_RESERVATION_STATUS 0x04 +#define MEGA_RESERVE_PD 0x05 +#define MEGA_RELEASE_PD 0x06 + + +/* + * Module battery status + */ +#define MEGA_BATT_MODULE_MISSING 0x01 +#define MEGA_BATT_LOW_VOLTAGE 0x02 +#define MEGA_BATT_TEMP_HIGH 0x04 +#define MEGA_BATT_PACK_MISSING 0x08 +#define MEGA_BATT_CHARGE_MASK 0x30 +#define MEGA_BATT_CHARGE_DONE 0x00 +#define MEGA_BATT_CHARGE_INPROG 0x10 +#define MEGA_BATT_CHARGE_FAIL 0x20 +#define MEGA_BATT_CYCLES_EXCEEDED 0x40 + +/* + * Physical drive states. + */ +#define PDRV_UNCNF 0 +#define PDRV_ONLINE 3 +#define PDRV_FAILED 4 +#define PDRV_RBLD 5 +#define PDRV_HOTSPARE 6 + + +/* + * Raid logical drive states. + */ +#define RDRV_OFFLINE 0 +#define RDRV_DEGRADED 1 +#define RDRV_OPTIMAL 2 +#define RDRV_DELETED 3 + +/* + * Read, write and cache policies + */ +#define NO_READ_AHEAD 0 +#define READ_AHEAD 1 +#define ADAP_READ_AHEAD 2 +#define WRMODE_WRITE_THRU 0 +#define WRMODE_WRITE_BACK 1 +#define CACHED_IO 0 +#define DIRECT_IO 1 + + +/* + * Each controller's soft state + */ +typedef struct { + int this_id; /* our id, may set to different than 7 if + clustering is available */ + u32 flag; + + unsigned long base; + + /* mbox64 with mbox not aligned on 16-byte boundry */ + mbox64_t *una_mbox64; + dma_addr_t una_mbox64_dma; + + volatile mbox64_t *mbox64;/* ptr to 64-bit mailbox */ + volatile mbox_t *mbox; /* ptr to standard mailbox */ + dma_addr_t mbox_dma; + + struct pci_dev *dev; + struct pci_dev *ipdev; /* for internal allocation */ + + struct list_head free_list; + struct list_head pending_list; + + struct Scsi_Host *host; + +#define MEGA_BUFFER_SIZE (2*1024) + u8 *mega_buffer; + dma_addr_t buf_dma_handle; + + mega_product_info product_info; + + u8 max_cmds; + scb_t *scb_list; + + atomic_t pend_cmds; /* maintain a counter for pending + commands in firmware */ + +#if MEGA_HAVE_STATS + u32 nreads[MAX_LOGICAL_DRIVES_40LD]; + u32 nreadblocks[MAX_LOGICAL_DRIVES_40LD]; + u32 nwrites[MAX_LOGICAL_DRIVES_40LD]; + u32 nwriteblocks[MAX_LOGICAL_DRIVES_40LD]; + u32 rd_errors[MAX_LOGICAL_DRIVES_40LD]; + u32 wr_errors[MAX_LOGICAL_DRIVES_40LD]; +#endif + + /* Host adapter parameters */ + u8 numldrv; + u8 fw_version[7]; + u8 bios_version[7]; + +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *controller_proc_dir_entry; + struct proc_dir_entry *proc_read; + struct proc_dir_entry *proc_stat; + struct proc_dir_entry *proc_mbox; + +#if MEGA_HAVE_ENH_PROC + struct proc_dir_entry *proc_rr; + struct proc_dir_entry *proc_battery; +#define MAX_PROC_CHANNELS 4 + struct proc_dir_entry *proc_pdrvstat[MAX_PROC_CHANNELS]; + struct proc_dir_entry *proc_rdrvstat[MAX_PROC_CHANNELS]; +#endif + +#endif + + int has_64bit_addr; /* are we using 64-bit addressing */ + int support_ext_cdb; + int boot_ldrv_enabled; + int boot_ldrv; + int boot_pdrv_enabled; /* boot from physical drive */ + int boot_pdrv_ch; /* boot physical drive channel */ + int boot_pdrv_tgt; /* boot physical drive target */ + + + int support_random_del; /* Do we support random deletion of + logdrvs */ + int read_ldidmap; /* set after logical drive deltion. The + logical drive number must be read from the + map */ + atomic_t quiescent; /* a stage reached when delete logical + drive needs to be done. Stop + sending requests to the hba till + delete operation is completed */ + spinlock_t lock; + + u8 logdrv_chan[MAX_CHANNELS+NVIRT_CHAN]; /* logical drive are on + what channels. */ + int mega_ch_class; + + u8 sglen; /* f/w supported scatter-gather list length */ + + scb_t int_scb; + Scsi_Cmnd int_scmd; + struct semaphore int_mtx; /* To synchronize the internal + commands */ + wait_queue_head_t int_waitq; /* wait queue for internal + cmds */ + + int has_cluster; /* cluster support on this HBA */ +}adapter_t; + + +struct mega_hbas { + int is_bios_enabled; + adapter_t *hostdata_addr; +}; + + +/* + * For state flag. Do not use LSB(8 bits) which are + * reserved for storing info about channels. + */ +#define IN_ABORT 0x80000000L +#define IN_RESET 0x40000000L +#define BOARD_MEMMAP 0x20000000L +#define BOARD_IOMAP 0x10000000L +#define BOARD_40LD 0x08000000L +#define BOARD_64BIT 0x04000000L + +#define INTR_VALID 0x40 + +#define PCI_CONF_AMISIG 0xa0 +#define PCI_CONF_AMISIG64 0xa4 + + +#define MEGA_DMA_TYPE_NONE 0xFFFF +#define MEGA_BULK_DATA 0x0001 +#define MEGA_SGLIST 0x0002 + +/* + * lockscope definitions, callers can specify the lock scope with this data + * type. LOCK_INT would mean the caller has not acquired the lock before + * making the call and LOCK_EXT would mean otherwise. + */ +typedef enum { LOCK_INT, LOCK_EXT } lockscope_t; + +/* + * Parameters for the io-mapped controllers + */ + +/* I/O Port offsets */ +#define CMD_PORT 0x00 +#define ACK_PORT 0x00 +#define TOGGLE_PORT 0x01 +#define INTR_PORT 0x0a + +#define MBOX_BUSY_PORT 0x00 +#define MBOX_PORT0 0x04 +#define MBOX_PORT1 0x05 +#define MBOX_PORT2 0x06 +#define MBOX_PORT3 0x07 +#define ENABLE_MBOX_REGION 0x0B + +/* I/O Port Values */ +#define ISSUE_BYTE 0x10 +#define ACK_BYTE 0x08 +#define ENABLE_INTR_BYTE 0xc0 +#define DISABLE_INTR_BYTE 0x00 +#define VALID_INTR_BYTE 0x40 +#define MBOX_BUSY_BYTE 0x10 +#define ENABLE_MBOX_BYTE 0x00 + + +/* Setup some port macros here */ +#define issue_command(adapter) \ + outb_p(ISSUE_BYTE, (adapter)->base + CMD_PORT) + +#define irq_state(adapter) inb_p((adapter)->base + INTR_PORT) + +#define set_irq_state(adapter, value) \ + outb_p((value), (adapter)->base + INTR_PORT) + +#define irq_ack(adapter) \ + outb_p(ACK_BYTE, (adapter)->base + ACK_PORT) + +#define irq_enable(adapter) \ + outb_p(ENABLE_INTR_BYTE, (adapter)->base + TOGGLE_PORT) + +#define irq_disable(adapter) \ + outb_p(DISABLE_INTR_BYTE, (adapter)->base + TOGGLE_PORT) + + +/* + * This is our SYSDEP area. All kernel specific detail should be placed here - + * as much as possible + */ + +/* + * End of SYSDEP area + */ + +/* + * ASSERT macro for megaraid. This should panic but printk should do for now + */ +#ifdef DEBUG +#define ASSERT( expression ) \ + if( !(expression) ) { \ + panic("assertion failed: %s, file: %s, line: %d\n", \ + #expression, __FILE__, __LINE__); \ + } +#else +#define ASSERT(expression) +#endif + +#define MBOX_ABORT_SLEEP 60 +#define MBOX_RESET_SLEEP 30 + +const char *megaraid_info (struct Scsi_Host *); + +static int megaraid_detect(Scsi_Host_Template *); +static void mega_find_card(Scsi_Host_Template *, u16, u16); +static int mega_query_adapter(adapter_t *); +static inline int issue_scb(adapter_t *, scb_t *); +static int mega_setup_mailbox(adapter_t *); + +static int megaraid_queue (Scsi_Cmnd *, void (*)(Scsi_Cmnd *)); +static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *); +static inline scb_t *mega_allocate_scb(adapter_t *, Scsi_Cmnd *); +static void __mega_runpendq(adapter_t *); +static inline void mega_runpendq(adapter_t *); +static int issue_scb_block(adapter_t *, u_char *); + +static void megaraid_isr_memmapped(int, void *, struct pt_regs *); +static inline void megaraid_memmbox_ack_sequence(adapter_t *); +static void megaraid_isr_iomapped(int, void *, struct pt_regs *); +static inline void megaraid_iombox_ack_sequence(adapter_t *); + +static void mega_free_scb(adapter_t *, scb_t *); + +static int megaraid_release (struct Scsi_Host *); +static int megaraid_command (Scsi_Cmnd *); +static int megaraid_abort(Scsi_Cmnd *); +static int megaraid_reset(Scsi_Cmnd *); +static int megaraid_biosparam (Disk *, kdev_t, int *); +static int mega_print_inquiry(char *, char *); + +static int mega_build_sglist (adapter_t *adapter, scb_t *scb, + u32 *buffer, u32 *length); +static inline int mega_busywait_mbox (adapter_t *); +static int __mega_busywait_mbox (adapter_t *); +static inline void mega_cmd_done(adapter_t *, u8 [], int, int); +static inline void mega_free_sgl (adapter_t *adapter); +static void mega_8_to_40ld (mraid_inquiry *inquiry, + mega_inquiry3 *enquiry3, mega_product_info *); + +static int megaraid_reboot_notify (struct notifier_block *, + unsigned long, void *); +static int megadev_open (struct inode *, struct file *); +static int megadev_ioctl (struct inode *, struct file *, unsigned int, + unsigned long); +static int mega_m_to_n(void *, nitioctl_t *); +static int mega_n_to_m(void *, megacmd_t *); +static int megadev_close (struct inode *, struct file *); + +static int mega_init_scb (adapter_t *); + +static int mega_is_bios_enabled (adapter_t *); +static void mega_reorder_hosts (void); +static void mega_swap_hosts (struct Scsi_Host *, struct Scsi_Host *); + +#ifdef CONFIG_PROC_FS +static void mega_create_proc_entry(int, struct proc_dir_entry *); +static int proc_read_config(char *, char **, off_t, int, int *, void *); +static int proc_read_stat(char *, char **, off_t, int, int *, void *); +static int proc_read_mbox(char *, char **, off_t, int, int *, void *); +static int proc_rebuild_rate(char *, char **, off_t, int, int *, void *); +static int proc_battery(char *, char **, off_t, int, int *, void *); +static int proc_pdrv_ch0(char *, char **, off_t, int, int *, void *); +static int proc_pdrv_ch1(char *, char **, off_t, int, int *, void *); +static int proc_pdrv_ch2(char *, char **, off_t, int, int *, void *); +static int proc_pdrv_ch3(char *, char **, off_t, int, int *, void *); +static int proc_pdrv(adapter_t *, char *, int); +static int proc_rdrv_10(char *, char **, off_t, int, int *, void *); +static int proc_rdrv_20(char *, char **, off_t, int, int *, void *); +static int proc_rdrv_30(char *, char **, off_t, int, int *, void *); +static int proc_rdrv_40(char *, char **, off_t, int, int *, void *); +static int proc_rdrv(adapter_t *, char *, int, int); +#endif + +static int mega_adapinq(adapter_t *, dma_addr_t); +static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t); +static inline caddr_t mega_allocate_inquiry(dma_addr_t *, struct pci_dev *); +static inline void mega_free_inquiry(caddr_t, dma_addr_t, struct pci_dev *); + +static int mega_support_ext_cdb(adapter_t *); +static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *, + Scsi_Cmnd *, int, int); +static mega_ext_passthru* mega_prepare_extpassthru(adapter_t *, + scb_t *, Scsi_Cmnd *, int, int); +static void mega_enum_raid_scsi(adapter_t *); +static int mega_partsize(Disk *, kdev_t, int *); +static void mega_get_boot_drv(adapter_t *); +static inline int mega_get_ldrv_num(adapter_t *, Scsi_Cmnd *, int); +static int mega_support_random_del(adapter_t *); +static int mega_del_logdrv(adapter_t *, int); +static int mega_do_del_logdrv(adapter_t *, int); +static void mega_get_max_sgl(adapter_t *); +static int mega_internal_command(adapter_t *, lockscope_t, megacmd_t *, + mega_passthru *); +static void mega_internal_done(Scsi_Cmnd *); +static int mega_support_cluster(adapter_t *); +#endif + +/* vi: set ts=8 sw=8 tw=78: */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/README.st linux.21rc5-ac2/drivers/scsi/README.st --- linux.21rc5/drivers/scsi/README.st 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/README.st 2003-04-22 16:44:37.000000000 +0100 @@ -1,8 +1,8 @@ This file contains brief information about the SCSI tape driver. The driver is currently maintained by Kai Mäkisara (email -Kai.Makisara@metla.fi) +Kai.Makisara@kolumbus.fi) -Last modified: Tue Apr 16 22:32:10 2002 by makisara +Last modified: Sun Apr 6 22:44:55 2003 by makisara BASICS @@ -112,7 +112,7 @@ Asynchronous writing. Writing the buffer contents to the tape is started and the write call returns immediately. The status is checked -at the next tape operation. +at the next tape operation. Applies only to variable block mode. Buffered writes and asynchronous writes may in some rare cases cause problems in multivolume operations if there is not enough space on the @@ -147,11 +147,6 @@ supports scatter/gather (the allocation is not limited to "DMA memory" and the buffer can be composed of several fragments). -The threshold for triggering asynchronous write in fixed block mode -is defined by ST_WRITE_THRESHOLD. This may be optimized for each -use pattern. The default triggers asynchronous write after three -default sized writes (10 kB) from tar. - Scatter/gather buffers (buffers that consist of chunks non-contiguous in the physical memory) are used if contiguous buffers can't be allocated. To support all SCSI adapters (including those not @@ -184,7 +179,6 @@ are configurable when the driver is loaded as a module. The keywords are: buffer_kbs=xxx the buffer size in kilobytes is set to xxx -write_threshold_kbs=xxx the write threshold in kilobytes set to xxx max_buffers=xxx the maximum number of tape buffer set to xxx max_sg_segs=xxx the maximum number of scatter/gather segments @@ -213,7 +207,7 @@ where aa is the buffer size in 1024 byte units - bb is the write threshold in 1024 byte units + bb is the write threshold in 1024 byte units (not used any more) cc is the maximum number of tape buffers to allocate (the number of buffers is bounded also by the number of drives detected) dd is the maximum number of scatter/gather segments @@ -321,9 +315,6 @@ MT_ST_SETBOOLEANS MT_ST_CLEARBOOLEANS Sets or clears the option bits. - MT_ST_WRITE_THRESHOLD - Sets the write threshold for this device to kilobytes - specified by the lowest bits. MT_ST_DEF_BLKSIZE Defines the default block size set automatically. Value 0xffffff means that the default is not used any more. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/scsi.c linux.21rc5-ac2/drivers/scsi/scsi.c --- linux.21rc5/drivers/scsi/scsi.c 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/scsi.c 2003-04-22 16:44:37.000000000 +0100 @@ -352,8 +352,9 @@ int interruptable) { struct Scsi_Host *host; - Scsi_Cmnd *SCpnt = NULL; + Scsi_Cmnd *SCpnt; Scsi_Device *SDpnt; + struct list_head *lp; unsigned long flags; if (!device) @@ -364,7 +365,6 @@ spin_lock_irqsave(&device_request_lock, flags); while (1 == 1) { - SCpnt = NULL; if (!device->device_blocked) { if (device->single_lun) { /* @@ -404,26 +404,21 @@ * If asked to wait, we need to wait, otherwise * return NULL. */ - SCpnt = NULL; goto busy; } } /* - * Now we can check for a free command block for this device. + * Is there a free command block for this device? */ - for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) { - if (SCpnt->request.rq_status == RQ_INACTIVE) - break; - } + if (!list_empty(&device->sdev_free_q)) + goto found; } + /* - * If we couldn't find a free command block, and we have been + * Couldn't find a free command block, and we have been * asked to wait, then do so. */ - if (SCpnt) { - break; - } - busy: +busy: /* * If we have been asked to wait for a free block, then * wait here. @@ -475,12 +470,20 @@ return NULL; } } + continue; } else { spin_unlock_irqrestore(&device_request_lock, flags); return NULL; } } +found: + lp = device->sdev_free_q.next; + list_del(lp); + SCpnt = list_entry(lp, Scsi_Cmnd, sc_list); + if (SCpnt->request.rq_status != RQ_INACTIVE) + BUG(); + SCpnt->request.rq_status = RQ_SCSI_BUSY; SCpnt->request.waiting = NULL; /* And no one is waiting for this * to complete */ @@ -526,6 +529,9 @@ SDpnt = SCpnt->device; + /* command is now free - add to list */ + list_add(&SCpnt->sc_list, &SDpnt->sdev_free_q); + SCpnt->request.rq_status = RQ_INACTIVE; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; @@ -1333,14 +1339,10 @@ */ int scsi_retry_command(Scsi_Cmnd * SCpnt) { - memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, - sizeof(SCpnt->data_cmnd)); - SCpnt->request_buffer = SCpnt->buffer; - SCpnt->request_bufflen = SCpnt->bufflen; - SCpnt->use_sg = SCpnt->old_use_sg; - SCpnt->cmd_len = SCpnt->old_cmd_len; - SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; - SCpnt->underflow = SCpnt->old_underflow; + /* + * Restore the SCSI command state. + */ + scsi_setup_cmd_retry(SCpnt); /* * Zero the sense information from the last time we tried @@ -1448,6 +1450,7 @@ spin_lock_irqsave(&device_request_lock, flags); for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) { SDpnt->device_queue = SCnext = SCpnt->next; + list_del(&SCpnt->sc_list); kfree((char *) SCpnt); } SDpnt->has_cmdblocks = 0; @@ -1484,6 +1487,7 @@ SDpnt->queue_depth = 1; /* live to fight another day */ } SDpnt->device_queue = NULL; + INIT_LIST_HEAD(&SDpnt->sdev_free_q); for (j = 0; j < SDpnt->queue_depth; j++) { SCpnt = (Scsi_Cmnd *) @@ -1513,6 +1517,7 @@ SDpnt->device_queue = SCpnt; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; + list_add(&SCpnt->sc_list, &SDpnt->sdev_free_q); } if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */ printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n", diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/scsi_error.c linux.21rc5-ac2/drivers/scsi/scsi_error.c --- linux.21rc5/drivers/scsi/scsi_error.c 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/scsi_error.c 2003-04-22 19:07:25.000000000 +0100 @@ -385,16 +385,10 @@ */ STATIC int scsi_eh_retry_command(Scsi_Cmnd * SCpnt) { - memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, - sizeof(SCpnt->data_cmnd)); - SCpnt->request_buffer = SCpnt->buffer; - SCpnt->request_bufflen = SCpnt->bufflen; - SCpnt->use_sg = SCpnt->old_use_sg; - SCpnt->cmd_len = SCpnt->old_cmd_len; - SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; - SCpnt->underflow = SCpnt->old_underflow; - - scsi_send_eh_cmnd(SCpnt, SCpnt->timeout_per_command); + do { + scsi_setup_cmd_retry(SCpnt); + scsi_send_eh_cmnd(SCpnt, SCpnt->timeout_per_command); + } while (SCpnt->eh_state == NEEDS_RETRY); /* * Hey, we are done. Let's look to see what happened. @@ -425,12 +419,6 @@ ASSERT_LOCK(&io_request_lock, 0); - memcpy((void *) SCpnt->cmnd, (void *) generic_sense, - sizeof(generic_sense)); - - if (SCpnt->device->scsi_level <= SCSI_2) - SCpnt->cmnd[1] = SCpnt->lun << 5; - scsi_result = (!SCpnt->host->hostt->unchecked_isa_dma) ? &scsi_result0[0] : kmalloc(512, GFP_ATOMIC | GFP_DMA); @@ -438,24 +426,40 @@ printk("cannot allocate scsi_result in scsi_request_sense.\n"); return FAILED; } - /* - * Zero the sense buffer. Some host adapters automatically always request - * sense, so it is not a good idea that SCpnt->request_buffer and - * SCpnt->sense_buffer point to the same address (DB). - * 0 is not a valid sense code. - */ - memset((void *) SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); - memset((void *) scsi_result, 0, 256); saved_result = SCpnt->result; - SCpnt->request_buffer = scsi_result; - SCpnt->request_bufflen = 256; - SCpnt->use_sg = 0; - SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); - SCpnt->sc_data_direction = SCSI_DATA_READ; - SCpnt->underflow = 0; - scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + do { + memcpy((void *) SCpnt->cmnd, (void *) generic_sense, + sizeof(generic_sense)); + + if (SCpnt->device->scsi_level <= SCSI_2) + SCpnt->cmnd[1] = SCpnt->lun << 5; + + /* + * Zero the sense buffer. Some host adapters automatically + * always request sense, so it is not a good idea that + * SCpnt->request_buffer and SCpnt->sense_buffer point to + * the same address (DB). 0 is not a valid sense code. + */ + memset((void *) SCpnt->sense_buffer, 0, + sizeof(SCpnt->sense_buffer)); + memset((void *) scsi_result, 0, 256); + + SCpnt->request_buffer = scsi_result; + SCpnt->request_bufflen = 256; + SCpnt->use_sg = 0; + SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); + SCpnt->sc_data_direction = SCSI_DATA_READ; + SCpnt->underflow = 0; + + scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + /* + * If the SCSI device responded with "logical unit + * is in process of becoming ready", we need to + * retry this command. + */ + } while (SCpnt->eh_state == NEEDS_RETRY); /* Last chance to have valid sense data */ if (!scsi_sense_valid(SCpnt)) @@ -479,6 +483,7 @@ SCpnt->cmd_len = SCpnt->old_cmd_len; SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; SCpnt->underflow = SCpnt->old_underflow; + memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); /* * Hey, we are done. Let's look to see what happened. @@ -497,26 +502,34 @@ static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; - memcpy((void *) SCpnt->cmnd, (void *) tur_command, - sizeof(tur_command)); + do { + memcpy((void *) SCpnt->cmnd, (void *) tur_command, + sizeof(tur_command)); - if (SCpnt->device->scsi_level <= SCSI_2) - SCpnt->cmnd[1] = SCpnt->lun << 5; + if (SCpnt->device->scsi_level <= SCSI_2) + SCpnt->cmnd[1] = SCpnt->lun << 5; - /* - * Zero the sense buffer. The SCSI spec mandates that any - * untransferred sense data should be interpreted as being zero. - */ - memset((void *) SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); + /* + * Zero the sense buffer. The SCSI spec mandates that any + * untransferred sense data should be interpreted as being zero. + */ + memset((void *) SCpnt->sense_buffer, 0, + sizeof(SCpnt->sense_buffer)); - SCpnt->request_buffer = NULL; - SCpnt->request_bufflen = 0; - SCpnt->use_sg = 0; - SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); - SCpnt->underflow = 0; - SCpnt->sc_data_direction = SCSI_DATA_NONE; + SCpnt->request_buffer = NULL; + SCpnt->request_bufflen = 0; + SCpnt->use_sg = 0; + SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); + SCpnt->underflow = 0; + SCpnt->sc_data_direction = SCSI_DATA_NONE; - scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + /* + * If the SCSI device responded with "logical unit + * is in process of becoming ready", we need to + * retry this command. + */ + } while (SCpnt->eh_state == NEEDS_RETRY); /* * When we eventually call scsi_finish, we really wish to complete @@ -589,7 +602,6 @@ host = SCpnt->host; - retry: /* * We will use a queued command if possible, otherwise we will emulate the * queuing and calling of completion function ourselves. @@ -672,14 +684,13 @@ SCSI_LOG_ERROR_RECOVERY(3, printk("scsi_send_eh_cmnd: scsi_eh_completed_normally %x\n", ret)); switch (ret) { - case SUCCESS: - SCpnt->eh_state = SUCCESS; - break; - case NEEDS_RETRY: - goto retry; - case FAILED: default: - SCpnt->eh_state = FAILED; + ret = FAILED; + /*FALLTHROUGH*/ + case FAILED: + case NEEDS_RETRY: + case SUCCESS: + SCpnt->eh_state = ret; break; } } else { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/scsi.h linux.21rc5-ac2/drivers/scsi/scsi.h --- linux.21rc5/drivers/scsi/scsi.h 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/scsi.h 2003-05-29 01:44:06.000000000 +0100 @@ -465,6 +465,7 @@ int sectors); extern struct Scsi_Device_Template *scsi_get_request_dev(struct request *); extern int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt); +extern void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt); extern int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int); extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, int block_sectors); @@ -558,6 +559,7 @@ int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize new request */ Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */ + struct list_head sdev_free_q; /* list of free cmds */ /* public: */ unsigned int id, lun, channel; @@ -775,6 +777,8 @@ * received on original command * (auto-sense) */ + struct list_head sc_list; /* Inactive cmd list linkage, guarded + * by device_request_lock. */ unsigned flags; /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/scsi_ioctl.c linux.21rc5-ac2/drivers/scsi/scsi_ioctl.c --- linux.21rc5/drivers/scsi/scsi_ioctl.c 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/scsi_ioctl.c 2003-04-22 16:44:37.000000000 +0100 @@ -279,6 +279,7 @@ cmd[1] = (cmd[1] & 0x1f) | (dev->lun << 5); switch (opcode) { + case SEND_DIAGNOSTIC: case FORMAT_UNIT: timeout = FORMAT_UNIT_TIMEOUT; retries = 1; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/scsi_lib.c linux.21rc5-ac2/drivers/scsi/scsi_lib.c --- linux.21rc5/drivers/scsi/scsi_lib.c 2003-05-28 15:09:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/scsi_lib.c 2003-04-22 16:44:37.000000000 +0100 @@ -208,6 +208,30 @@ } /* + * Function: scsi_setup_cmd_retry() + * + * Purpose: Restore the command state for a retry + * + * Arguments: SCpnt - command to be restored + * + * Returns: Nothing + * + * Notes: Immediately prior to retrying a command, we need + * to restore certain fields that we saved above. + */ +void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt) +{ + memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, + sizeof(SCpnt->data_cmnd)); + SCpnt->request_buffer = SCpnt->buffer; + SCpnt->request_bufflen = SCpnt->bufflen; + SCpnt->use_sg = SCpnt->old_use_sg; + SCpnt->cmd_len = SCpnt->old_cmd_len; + SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; + SCpnt->underflow = SCpnt->old_underflow; +} + +/* * Function: scsi_queue_next_request() * * Purpose: Handle post-processing of completed commands. @@ -722,7 +746,7 @@ printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ", SCpnt->host->host_no, (int) SCpnt->channel, (int) SCpnt->target, (int) SCpnt->lun); - print_command(SCpnt->cmnd); + print_command(SCpnt->data_cmnd); print_sense("sd", SCpnt); SCpnt = scsi_end_request(SCpnt, 0, block_sectors); return; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/scsi_scan.c linux.21rc5-ac2/drivers/scsi/scsi_scan.c --- linux.21rc5/drivers/scsi/scsi_scan.c 2003-05-28 15:09:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/scsi_scan.c 2003-04-22 16:44:37.000000000 +0100 @@ -608,6 +608,7 @@ } else { /* assume no peripheral if any other sort of error */ scsi_release_request(SRpnt); + scsi_release_commandblocks(SDpnt); return 0; } } @@ -631,6 +632,7 @@ */ if ((scsi_result[0] >> 5) == 3) { scsi_release_request(SRpnt); + scsi_release_commandblocks(SDpnt); return 0; /* assume no peripheral if any sort of error */ } /* The Toshiba ROM was "gender-changed" here as an inline hack. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/sd.c linux.21rc5-ac2/drivers/scsi/sd.c --- linux.21rc5/drivers/scsi/sd.c 2003-05-28 15:09:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/sd.c 2003-05-12 16:39:27.000000000 +0100 @@ -852,10 +852,13 @@ break; } - /* Look for non-removable devices that return NOT_READY. + /* Look for non-removable devices that return NOT_READY, + * and don't require manual intervention. * Issue command to spin up drive for these cases. */ if (the_result && !rscsi_disks[i].device->removable && - SRpnt->sr_sense_buffer[2] == NOT_READY) { + SRpnt->sr_sense_buffer[2] == NOT_READY && + ! ( SRpnt->sr_sense_buffer[12] == 0x04 && /* ASC */ + SRpnt->sr_sense_buffer[13] == 0x03 )) { /* ASCQ */ unsigned long time1; if (!spintime) { printk("%s: Spinning up disk...", nbuff); @@ -1013,7 +1016,7 @@ } printk("SCSI device %s: " - "%u %d-byte hdwr sectors (%u MB)\n", + "%u %d-byte hdwr sectors (%d MB)\n", nbuff, rscsi_disks[i].capacity, hard_sector, (sz - sz/625 + 974)/1950); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/sd.h linux.21rc5-ac2/drivers/scsi/sd.h --- linux.21rc5/drivers/scsi/sd.h 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/sd.h 2003-05-29 01:44:06.000000000 +0100 @@ -24,13 +24,13 @@ #endif typedef struct scsi_disk { - unsigned capacity; /* size in blocks */ Scsi_Device *device; - unsigned char ready; /* flag ready for FLOPTICAL */ - unsigned char write_prot; /* flag write_protect for rmvable dev */ - unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */ - unsigned char sector_bit_shift; /* power of 2 sectors per FS block */ + unsigned capacity; /* size in blocks */ + unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */ + unsigned char sector_bit_shift; /* power of 2 sectors per FS block */ unsigned has_part_table:1; /* has partition table */ + unsigned ready:1; /* flag ready for FLOPTICAL */ + unsigned write_prot:1; /* flag write_protect for rmvable dev */ } Scsi_Disk; extern int revalidate_scsidisk(kdev_t dev, int maxusage); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/sim710_d.h linux.21rc5-ac2/drivers/scsi/sim710_d.h --- linux.21rc5/drivers/scsi/sim710_d.h 2003-05-28 15:07:53.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/sim710_d.h 2003-04-22 16:44:37.000000000 +0100 @@ -18,15 +18,12 @@ ABSOLUTE reselected_identify = 0 ABSOLUTE msgin_buf = 0 +ABSOLUTE msg_reject = 0 +ABSOLUTE test1_src = 0 +ABSOLUTE test1_dst = 0 -ABSOLUTE int_bad_extmsg1a = 0xab930000 -ABSOLUTE int_bad_extmsg1b = 0xab930001 -ABSOLUTE int_bad_extmsg2a = 0xab930002 -ABSOLUTE int_bad_extmsg2b = 0xab930003 -ABSOLUTE int_bad_extmsg3a = 0xab930004 -ABSOLUTE int_bad_extmsg3b = 0xab930005 ABSOLUTE int_bad_msg1 = 0xab930006 ABSOLUTE int_bad_msg2 = 0xab930007 ABSOLUTE int_bad_msg3 = 0xab930008 @@ -50,7 +47,7 @@ ABSOLUTE int_disc2 = 0xab93001a ABSOLUTE int_disc3 = 0xab93001b ABSOLUTE int_not_rej = 0xab93001c - +ABSOLUTE int_test1 = 0xab93001d @@ -65,6 +62,9 @@ +ABSOLUTE did_reject = 0x01 + + @@ -74,1641 +74,1709 @@ at 0x00000000 : */ 0x60000200,0x00000000, /* - MOVE SCRATCH0 & 0 TO SCRATCH0 - -at 0x00000002 : */ 0x7c340000,0x00000000, -/* ; Enable selection timer MOVE CTEST7 & 0xef TO CTEST7 -at 0x00000004 : */ 0x7c1bef00,0x00000000, +at 0x00000002 : */ 0x7c1bef00,0x00000000, /* SELECT ATN FROM dsa_select, reselect -at 0x00000006 : */ 0x43000000,0x00000c48, +at 0x00000004 : */ 0x43000000,0x00000cd0, /* JUMP get_status, WHEN STATUS -at 0x00000008 : */ 0x830b0000,0x000000a0, +at 0x00000006 : */ 0x830b0000,0x00000098, /* ; Disable selection timer MOVE CTEST7 | 0x10 TO CTEST7 -at 0x0000000a : */ 0x7a1b1000,0x00000000, +at 0x00000008 : */ 0x7a1b1000,0x00000000, /* MOVE SCRATCH0 | had_select TO SCRATCH0 -at 0x0000000c : */ 0x7a340100,0x00000000, +at 0x0000000a : */ 0x7a340100,0x00000000, /* INT int_sel_no_ident, IF NOT MSG_OUT -at 0x0000000e : */ 0x9e020000,0xab930013, +at 0x0000000c : */ 0x9e020000,0xab930013, /* MOVE SCRATCH0 | had_msgout TO SCRATCH0 -at 0x00000010 : */ 0x7a340200,0x00000000, +at 0x0000000e : */ 0x7a340200,0x00000000, /* MOVE FROM dsa_msgout, when MSG_OUT -at 0x00000012 : */ 0x1e000000,0x00000008, +at 0x00000010 : */ 0x1e000000,0x00000008, /* ENTRY done_ident done_ident: JUMP get_status, IF STATUS -at 0x00000014 : */ 0x830a0000,0x000000a0, +at 0x00000012 : */ 0x830a0000,0x00000098, /* redo_msgin1: JUMP get_msgin1, WHEN MSG_IN -at 0x00000016 : */ 0x870b0000,0x00000920, +at 0x00000014 : */ 0x870b0000,0x00000918, /* INT int_sel_not_cmd, IF NOT CMD -at 0x00000018 : */ 0x9a020000,0xab930014, +at 0x00000016 : */ 0x9a020000,0xab930014, /* ENTRY resume_cmd resume_cmd: MOVE SCRATCH0 | had_cmdout TO SCRATCH0 -at 0x0000001a : */ 0x7a340400,0x00000000, +at 0x00000018 : */ 0x7a340400,0x00000000, /* MOVE FROM dsa_cmnd, WHEN CMD -at 0x0000001c : */ 0x1a000000,0x00000010, +at 0x0000001a : */ 0x1a000000,0x00000010, /* ENTRY resume_pmm resume_pmm: redo_msgin2: JUMP get_msgin2, WHEN MSG_IN -at 0x0000001e : */ 0x870b0000,0x00000a20, +at 0x0000001c : */ 0x870b0000,0x00000a48, /* JUMP get_status, IF STATUS -at 0x00000020 : */ 0x830a0000,0x000000a0, +at 0x0000001e : */ 0x830a0000,0x00000098, /* JUMP input_data, IF DATA_IN -at 0x00000022 : */ 0x810a0000,0x000000e0, +at 0x00000020 : */ 0x810a0000,0x000000d8, /* JUMP output_data, IF DATA_OUT -at 0x00000024 : */ 0x800a0000,0x000004f8, +at 0x00000022 : */ 0x800a0000,0x000004f0, /* INT int_cmd_bad_phase -at 0x00000026 : */ 0x98080000,0xab930009, +at 0x00000024 : */ 0x98080000,0xab930009, /* get_status: ; Disable selection timer MOVE CTEST7 | 0x10 TO CTEST7 -at 0x00000028 : */ 0x7a1b1000,0x00000000, +at 0x00000026 : */ 0x7a1b1000,0x00000000, /* MOVE FROM dsa_status, WHEN STATUS -at 0x0000002a : */ 0x1b000000,0x00000018, +at 0x00000028 : */ 0x1b000000,0x00000018, /* INT int_status_not_msgin, WHEN NOT MSG_IN -at 0x0000002c : */ 0x9f030000,0xab930015, +at 0x0000002a : */ 0x9f030000,0xab930015, /* MOVE FROM dsa_msgin, WHEN MSG_IN -at 0x0000002e : */ 0x1f000000,0x00000020, +at 0x0000002c : */ 0x1f000000,0x00000020, /* INT int_not_cmd_complete, IF NOT 0x00 -at 0x00000030 : */ 0x98040000,0xab930012, +at 0x0000002e : */ 0x98040000,0xab930012, /* CLEAR ACK -at 0x00000032 : */ 0x60000040,0x00000000, +at 0x00000030 : */ 0x60000040,0x00000000, /* ENTRY wait_disc_complete wait_disc_complete: WAIT DISCONNECT -at 0x00000034 : */ 0x48000000,0x00000000, +at 0x00000032 : */ 0x48000000,0x00000000, /* INT int_cmd_complete -at 0x00000036 : */ 0x98080000,0xab93000a, +at 0x00000034 : */ 0x98080000,0xab93000a, /* input_data: MOVE SCRATCH0 | had_datain TO SCRATCH0 -at 0x00000038 : */ 0x7a340800,0x00000000, +at 0x00000036 : */ 0x7a340800,0x00000000, /* ENTRY patch_input_data patch_input_data: JUMP 0 -at 0x0000003a : */ 0x80080000,0x00000000, +at 0x00000038 : */ 0x80080000,0x00000000, /* MOVE FROM dsa_datain+0x0000, WHEN DATA_IN -at 0x0000003c : */ 0x19000000,0x00000028, +at 0x0000003a : */ 0x19000000,0x00000028, /* MOVE FROM dsa_datain+0x0008, WHEN DATA_IN -at 0x0000003e : */ 0x19000000,0x00000030, +at 0x0000003c : */ 0x19000000,0x00000030, /* MOVE FROM dsa_datain+0x0010, WHEN DATA_IN -at 0x00000040 : */ 0x19000000,0x00000038, +at 0x0000003e : */ 0x19000000,0x00000038, /* MOVE FROM dsa_datain+0x0018, WHEN DATA_IN -at 0x00000042 : */ 0x19000000,0x00000040, +at 0x00000040 : */ 0x19000000,0x00000040, /* MOVE FROM dsa_datain+0x0020, WHEN DATA_IN -at 0x00000044 : */ 0x19000000,0x00000048, +at 0x00000042 : */ 0x19000000,0x00000048, /* MOVE FROM dsa_datain+0x0028, WHEN DATA_IN -at 0x00000046 : */ 0x19000000,0x00000050, +at 0x00000044 : */ 0x19000000,0x00000050, /* MOVE FROM dsa_datain+0x0030, WHEN DATA_IN -at 0x00000048 : */ 0x19000000,0x00000058, +at 0x00000046 : */ 0x19000000,0x00000058, /* MOVE FROM dsa_datain+0x0038, WHEN DATA_IN -at 0x0000004a : */ 0x19000000,0x00000060, +at 0x00000048 : */ 0x19000000,0x00000060, /* MOVE FROM dsa_datain+0x0040, WHEN DATA_IN -at 0x0000004c : */ 0x19000000,0x00000068, +at 0x0000004a : */ 0x19000000,0x00000068, /* MOVE FROM dsa_datain+0x0048, WHEN DATA_IN -at 0x0000004e : */ 0x19000000,0x00000070, +at 0x0000004c : */ 0x19000000,0x00000070, /* MOVE FROM dsa_datain+0x0050, WHEN DATA_IN -at 0x00000050 : */ 0x19000000,0x00000078, +at 0x0000004e : */ 0x19000000,0x00000078, /* MOVE FROM dsa_datain+0x0058, WHEN DATA_IN -at 0x00000052 : */ 0x19000000,0x00000080, +at 0x00000050 : */ 0x19000000,0x00000080, /* MOVE FROM dsa_datain+0x0060, WHEN DATA_IN -at 0x00000054 : */ 0x19000000,0x00000088, +at 0x00000052 : */ 0x19000000,0x00000088, /* MOVE FROM dsa_datain+0x0068, WHEN DATA_IN -at 0x00000056 : */ 0x19000000,0x00000090, +at 0x00000054 : */ 0x19000000,0x00000090, /* MOVE FROM dsa_datain+0x0070, WHEN DATA_IN -at 0x00000058 : */ 0x19000000,0x00000098, +at 0x00000056 : */ 0x19000000,0x00000098, /* MOVE FROM dsa_datain+0x0078, WHEN DATA_IN -at 0x0000005a : */ 0x19000000,0x000000a0, +at 0x00000058 : */ 0x19000000,0x000000a0, /* MOVE FROM dsa_datain+0x0080, WHEN DATA_IN -at 0x0000005c : */ 0x19000000,0x000000a8, +at 0x0000005a : */ 0x19000000,0x000000a8, /* MOVE FROM dsa_datain+0x0088, WHEN DATA_IN -at 0x0000005e : */ 0x19000000,0x000000b0, +at 0x0000005c : */ 0x19000000,0x000000b0, /* MOVE FROM dsa_datain+0x0090, WHEN DATA_IN -at 0x00000060 : */ 0x19000000,0x000000b8, +at 0x0000005e : */ 0x19000000,0x000000b8, /* MOVE FROM dsa_datain+0x0098, WHEN DATA_IN -at 0x00000062 : */ 0x19000000,0x000000c0, +at 0x00000060 : */ 0x19000000,0x000000c0, /* MOVE FROM dsa_datain+0x00a0, WHEN DATA_IN -at 0x00000064 : */ 0x19000000,0x000000c8, +at 0x00000062 : */ 0x19000000,0x000000c8, /* MOVE FROM dsa_datain+0x00a8, WHEN DATA_IN -at 0x00000066 : */ 0x19000000,0x000000d0, +at 0x00000064 : */ 0x19000000,0x000000d0, /* MOVE FROM dsa_datain+0x00b0, WHEN DATA_IN -at 0x00000068 : */ 0x19000000,0x000000d8, +at 0x00000066 : */ 0x19000000,0x000000d8, /* MOVE FROM dsa_datain+0x00b8, WHEN DATA_IN -at 0x0000006a : */ 0x19000000,0x000000e0, +at 0x00000068 : */ 0x19000000,0x000000e0, /* MOVE FROM dsa_datain+0x00c0, WHEN DATA_IN -at 0x0000006c : */ 0x19000000,0x000000e8, +at 0x0000006a : */ 0x19000000,0x000000e8, /* MOVE FROM dsa_datain+0x00c8, WHEN DATA_IN -at 0x0000006e : */ 0x19000000,0x000000f0, +at 0x0000006c : */ 0x19000000,0x000000f0, /* MOVE FROM dsa_datain+0x00d0, WHEN DATA_IN -at 0x00000070 : */ 0x19000000,0x000000f8, +at 0x0000006e : */ 0x19000000,0x000000f8, /* MOVE FROM dsa_datain+0x00d8, WHEN DATA_IN -at 0x00000072 : */ 0x19000000,0x00000100, +at 0x00000070 : */ 0x19000000,0x00000100, /* MOVE FROM dsa_datain+0x00e0, WHEN DATA_IN -at 0x00000074 : */ 0x19000000,0x00000108, +at 0x00000072 : */ 0x19000000,0x00000108, /* MOVE FROM dsa_datain+0x00e8, WHEN DATA_IN -at 0x00000076 : */ 0x19000000,0x00000110, +at 0x00000074 : */ 0x19000000,0x00000110, /* MOVE FROM dsa_datain+0x00f0, WHEN DATA_IN -at 0x00000078 : */ 0x19000000,0x00000118, +at 0x00000076 : */ 0x19000000,0x00000118, /* MOVE FROM dsa_datain+0x00f8, WHEN DATA_IN -at 0x0000007a : */ 0x19000000,0x00000120, +at 0x00000078 : */ 0x19000000,0x00000120, /* MOVE FROM dsa_datain+0x0100, WHEN DATA_IN -at 0x0000007c : */ 0x19000000,0x00000128, +at 0x0000007a : */ 0x19000000,0x00000128, /* MOVE FROM dsa_datain+0x0108, WHEN DATA_IN -at 0x0000007e : */ 0x19000000,0x00000130, +at 0x0000007c : */ 0x19000000,0x00000130, /* MOVE FROM dsa_datain+0x0110, WHEN DATA_IN -at 0x00000080 : */ 0x19000000,0x00000138, +at 0x0000007e : */ 0x19000000,0x00000138, /* MOVE FROM dsa_datain+0x0118, WHEN DATA_IN -at 0x00000082 : */ 0x19000000,0x00000140, +at 0x00000080 : */ 0x19000000,0x00000140, /* MOVE FROM dsa_datain+0x0120, WHEN DATA_IN -at 0x00000084 : */ 0x19000000,0x00000148, +at 0x00000082 : */ 0x19000000,0x00000148, /* MOVE FROM dsa_datain+0x0128, WHEN DATA_IN -at 0x00000086 : */ 0x19000000,0x00000150, +at 0x00000084 : */ 0x19000000,0x00000150, /* MOVE FROM dsa_datain+0x0130, WHEN DATA_IN -at 0x00000088 : */ 0x19000000,0x00000158, +at 0x00000086 : */ 0x19000000,0x00000158, /* MOVE FROM dsa_datain+0x0138, WHEN DATA_IN -at 0x0000008a : */ 0x19000000,0x00000160, +at 0x00000088 : */ 0x19000000,0x00000160, /* MOVE FROM dsa_datain+0x0140, WHEN DATA_IN -at 0x0000008c : */ 0x19000000,0x00000168, +at 0x0000008a : */ 0x19000000,0x00000168, /* MOVE FROM dsa_datain+0x0148, WHEN DATA_IN -at 0x0000008e : */ 0x19000000,0x00000170, +at 0x0000008c : */ 0x19000000,0x00000170, /* MOVE FROM dsa_datain+0x0150, WHEN DATA_IN -at 0x00000090 : */ 0x19000000,0x00000178, +at 0x0000008e : */ 0x19000000,0x00000178, /* MOVE FROM dsa_datain+0x0158, WHEN DATA_IN -at 0x00000092 : */ 0x19000000,0x00000180, +at 0x00000090 : */ 0x19000000,0x00000180, /* MOVE FROM dsa_datain+0x0160, WHEN DATA_IN -at 0x00000094 : */ 0x19000000,0x00000188, +at 0x00000092 : */ 0x19000000,0x00000188, /* MOVE FROM dsa_datain+0x0168, WHEN DATA_IN -at 0x00000096 : */ 0x19000000,0x00000190, +at 0x00000094 : */ 0x19000000,0x00000190, /* MOVE FROM dsa_datain+0x0170, WHEN DATA_IN -at 0x00000098 : */ 0x19000000,0x00000198, +at 0x00000096 : */ 0x19000000,0x00000198, /* MOVE FROM dsa_datain+0x0178, WHEN DATA_IN -at 0x0000009a : */ 0x19000000,0x000001a0, +at 0x00000098 : */ 0x19000000,0x000001a0, /* MOVE FROM dsa_datain+0x0180, WHEN DATA_IN -at 0x0000009c : */ 0x19000000,0x000001a8, +at 0x0000009a : */ 0x19000000,0x000001a8, /* MOVE FROM dsa_datain+0x0188, WHEN DATA_IN -at 0x0000009e : */ 0x19000000,0x000001b0, +at 0x0000009c : */ 0x19000000,0x000001b0, /* MOVE FROM dsa_datain+0x0190, WHEN DATA_IN -at 0x000000a0 : */ 0x19000000,0x000001b8, +at 0x0000009e : */ 0x19000000,0x000001b8, /* MOVE FROM dsa_datain+0x0198, WHEN DATA_IN -at 0x000000a2 : */ 0x19000000,0x000001c0, +at 0x000000a0 : */ 0x19000000,0x000001c0, /* MOVE FROM dsa_datain+0x01a0, WHEN DATA_IN -at 0x000000a4 : */ 0x19000000,0x000001c8, +at 0x000000a2 : */ 0x19000000,0x000001c8, /* MOVE FROM dsa_datain+0x01a8, WHEN DATA_IN -at 0x000000a6 : */ 0x19000000,0x000001d0, +at 0x000000a4 : */ 0x19000000,0x000001d0, /* MOVE FROM dsa_datain+0x01b0, WHEN DATA_IN -at 0x000000a8 : */ 0x19000000,0x000001d8, +at 0x000000a6 : */ 0x19000000,0x000001d8, /* MOVE FROM dsa_datain+0x01b8, WHEN DATA_IN -at 0x000000aa : */ 0x19000000,0x000001e0, +at 0x000000a8 : */ 0x19000000,0x000001e0, /* MOVE FROM dsa_datain+0x01c0, WHEN DATA_IN -at 0x000000ac : */ 0x19000000,0x000001e8, +at 0x000000aa : */ 0x19000000,0x000001e8, /* MOVE FROM dsa_datain+0x01c8, WHEN DATA_IN -at 0x000000ae : */ 0x19000000,0x000001f0, +at 0x000000ac : */ 0x19000000,0x000001f0, /* MOVE FROM dsa_datain+0x01d0, WHEN DATA_IN -at 0x000000b0 : */ 0x19000000,0x000001f8, +at 0x000000ae : */ 0x19000000,0x000001f8, /* MOVE FROM dsa_datain+0x01d8, WHEN DATA_IN -at 0x000000b2 : */ 0x19000000,0x00000200, +at 0x000000b0 : */ 0x19000000,0x00000200, /* MOVE FROM dsa_datain+0x01e0, WHEN DATA_IN -at 0x000000b4 : */ 0x19000000,0x00000208, +at 0x000000b2 : */ 0x19000000,0x00000208, /* MOVE FROM dsa_datain+0x01e8, WHEN DATA_IN -at 0x000000b6 : */ 0x19000000,0x00000210, +at 0x000000b4 : */ 0x19000000,0x00000210, /* MOVE FROM dsa_datain+0x01f0, WHEN DATA_IN -at 0x000000b8 : */ 0x19000000,0x00000218, +at 0x000000b6 : */ 0x19000000,0x00000218, /* MOVE FROM dsa_datain+0x01f8, WHEN DATA_IN -at 0x000000ba : */ 0x19000000,0x00000220, +at 0x000000b8 : */ 0x19000000,0x00000220, /* MOVE FROM dsa_datain+0x0200, WHEN DATA_IN -at 0x000000bc : */ 0x19000000,0x00000228, +at 0x000000ba : */ 0x19000000,0x00000228, /* MOVE FROM dsa_datain+0x0208, WHEN DATA_IN -at 0x000000be : */ 0x19000000,0x00000230, +at 0x000000bc : */ 0x19000000,0x00000230, /* MOVE FROM dsa_datain+0x0210, WHEN DATA_IN -at 0x000000c0 : */ 0x19000000,0x00000238, +at 0x000000be : */ 0x19000000,0x00000238, /* MOVE FROM dsa_datain+0x0218, WHEN DATA_IN -at 0x000000c2 : */ 0x19000000,0x00000240, +at 0x000000c0 : */ 0x19000000,0x00000240, /* MOVE FROM dsa_datain+0x0220, WHEN DATA_IN -at 0x000000c4 : */ 0x19000000,0x00000248, +at 0x000000c2 : */ 0x19000000,0x00000248, /* MOVE FROM dsa_datain+0x0228, WHEN DATA_IN -at 0x000000c6 : */ 0x19000000,0x00000250, +at 0x000000c4 : */ 0x19000000,0x00000250, /* MOVE FROM dsa_datain+0x0230, WHEN DATA_IN -at 0x000000c8 : */ 0x19000000,0x00000258, +at 0x000000c6 : */ 0x19000000,0x00000258, /* MOVE FROM dsa_datain+0x0238, WHEN DATA_IN -at 0x000000ca : */ 0x19000000,0x00000260, +at 0x000000c8 : */ 0x19000000,0x00000260, /* MOVE FROM dsa_datain+0x0240, WHEN DATA_IN -at 0x000000cc : */ 0x19000000,0x00000268, +at 0x000000ca : */ 0x19000000,0x00000268, /* MOVE FROM dsa_datain+0x0248, WHEN DATA_IN -at 0x000000ce : */ 0x19000000,0x00000270, +at 0x000000cc : */ 0x19000000,0x00000270, /* MOVE FROM dsa_datain+0x0250, WHEN DATA_IN -at 0x000000d0 : */ 0x19000000,0x00000278, +at 0x000000ce : */ 0x19000000,0x00000278, /* MOVE FROM dsa_datain+0x0258, WHEN DATA_IN -at 0x000000d2 : */ 0x19000000,0x00000280, +at 0x000000d0 : */ 0x19000000,0x00000280, /* MOVE FROM dsa_datain+0x0260, WHEN DATA_IN -at 0x000000d4 : */ 0x19000000,0x00000288, +at 0x000000d2 : */ 0x19000000,0x00000288, /* MOVE FROM dsa_datain+0x0268, WHEN DATA_IN -at 0x000000d6 : */ 0x19000000,0x00000290, +at 0x000000d4 : */ 0x19000000,0x00000290, /* MOVE FROM dsa_datain+0x0270, WHEN DATA_IN -at 0x000000d8 : */ 0x19000000,0x00000298, +at 0x000000d6 : */ 0x19000000,0x00000298, /* MOVE FROM dsa_datain+0x0278, WHEN DATA_IN -at 0x000000da : */ 0x19000000,0x000002a0, +at 0x000000d8 : */ 0x19000000,0x000002a0, /* MOVE FROM dsa_datain+0x0280, WHEN DATA_IN -at 0x000000dc : */ 0x19000000,0x000002a8, +at 0x000000da : */ 0x19000000,0x000002a8, /* MOVE FROM dsa_datain+0x0288, WHEN DATA_IN -at 0x000000de : */ 0x19000000,0x000002b0, +at 0x000000dc : */ 0x19000000,0x000002b0, /* MOVE FROM dsa_datain+0x0290, WHEN DATA_IN -at 0x000000e0 : */ 0x19000000,0x000002b8, +at 0x000000de : */ 0x19000000,0x000002b8, /* MOVE FROM dsa_datain+0x0298, WHEN DATA_IN -at 0x000000e2 : */ 0x19000000,0x000002c0, +at 0x000000e0 : */ 0x19000000,0x000002c0, /* MOVE FROM dsa_datain+0x02a0, WHEN DATA_IN -at 0x000000e4 : */ 0x19000000,0x000002c8, +at 0x000000e2 : */ 0x19000000,0x000002c8, /* MOVE FROM dsa_datain+0x02a8, WHEN DATA_IN -at 0x000000e6 : */ 0x19000000,0x000002d0, +at 0x000000e4 : */ 0x19000000,0x000002d0, /* MOVE FROM dsa_datain+0x02b0, WHEN DATA_IN -at 0x000000e8 : */ 0x19000000,0x000002d8, +at 0x000000e6 : */ 0x19000000,0x000002d8, /* MOVE FROM dsa_datain+0x02b8, WHEN DATA_IN -at 0x000000ea : */ 0x19000000,0x000002e0, +at 0x000000e8 : */ 0x19000000,0x000002e0, /* MOVE FROM dsa_datain+0x02c0, WHEN DATA_IN -at 0x000000ec : */ 0x19000000,0x000002e8, +at 0x000000ea : */ 0x19000000,0x000002e8, /* MOVE FROM dsa_datain+0x02c8, WHEN DATA_IN -at 0x000000ee : */ 0x19000000,0x000002f0, +at 0x000000ec : */ 0x19000000,0x000002f0, /* MOVE FROM dsa_datain+0x02d0, WHEN DATA_IN -at 0x000000f0 : */ 0x19000000,0x000002f8, +at 0x000000ee : */ 0x19000000,0x000002f8, /* MOVE FROM dsa_datain+0x02d8, WHEN DATA_IN -at 0x000000f2 : */ 0x19000000,0x00000300, +at 0x000000f0 : */ 0x19000000,0x00000300, /* MOVE FROM dsa_datain+0x02e0, WHEN DATA_IN -at 0x000000f4 : */ 0x19000000,0x00000308, +at 0x000000f2 : */ 0x19000000,0x00000308, /* MOVE FROM dsa_datain+0x02e8, WHEN DATA_IN -at 0x000000f6 : */ 0x19000000,0x00000310, +at 0x000000f4 : */ 0x19000000,0x00000310, /* MOVE FROM dsa_datain+0x02f0, WHEN DATA_IN -at 0x000000f8 : */ 0x19000000,0x00000318, +at 0x000000f6 : */ 0x19000000,0x00000318, /* MOVE FROM dsa_datain+0x02f8, WHEN DATA_IN -at 0x000000fa : */ 0x19000000,0x00000320, +at 0x000000f8 : */ 0x19000000,0x00000320, /* MOVE FROM dsa_datain+0x0300, WHEN DATA_IN -at 0x000000fc : */ 0x19000000,0x00000328, +at 0x000000fa : */ 0x19000000,0x00000328, /* MOVE FROM dsa_datain+0x0308, WHEN DATA_IN -at 0x000000fe : */ 0x19000000,0x00000330, +at 0x000000fc : */ 0x19000000,0x00000330, /* MOVE FROM dsa_datain+0x0310, WHEN DATA_IN -at 0x00000100 : */ 0x19000000,0x00000338, +at 0x000000fe : */ 0x19000000,0x00000338, /* MOVE FROM dsa_datain+0x0318, WHEN DATA_IN -at 0x00000102 : */ 0x19000000,0x00000340, +at 0x00000100 : */ 0x19000000,0x00000340, /* MOVE FROM dsa_datain+0x0320, WHEN DATA_IN -at 0x00000104 : */ 0x19000000,0x00000348, +at 0x00000102 : */ 0x19000000,0x00000348, /* MOVE FROM dsa_datain+0x0328, WHEN DATA_IN -at 0x00000106 : */ 0x19000000,0x00000350, +at 0x00000104 : */ 0x19000000,0x00000350, /* MOVE FROM dsa_datain+0x0330, WHEN DATA_IN -at 0x00000108 : */ 0x19000000,0x00000358, +at 0x00000106 : */ 0x19000000,0x00000358, /* MOVE FROM dsa_datain+0x0338, WHEN DATA_IN -at 0x0000010a : */ 0x19000000,0x00000360, +at 0x00000108 : */ 0x19000000,0x00000360, /* MOVE FROM dsa_datain+0x0340, WHEN DATA_IN -at 0x0000010c : */ 0x19000000,0x00000368, +at 0x0000010a : */ 0x19000000,0x00000368, /* MOVE FROM dsa_datain+0x0348, WHEN DATA_IN -at 0x0000010e : */ 0x19000000,0x00000370, +at 0x0000010c : */ 0x19000000,0x00000370, /* MOVE FROM dsa_datain+0x0350, WHEN DATA_IN -at 0x00000110 : */ 0x19000000,0x00000378, +at 0x0000010e : */ 0x19000000,0x00000378, /* MOVE FROM dsa_datain+0x0358, WHEN DATA_IN -at 0x00000112 : */ 0x19000000,0x00000380, +at 0x00000110 : */ 0x19000000,0x00000380, /* MOVE FROM dsa_datain+0x0360, WHEN DATA_IN -at 0x00000114 : */ 0x19000000,0x00000388, +at 0x00000112 : */ 0x19000000,0x00000388, /* MOVE FROM dsa_datain+0x0368, WHEN DATA_IN -at 0x00000116 : */ 0x19000000,0x00000390, +at 0x00000114 : */ 0x19000000,0x00000390, /* MOVE FROM dsa_datain+0x0370, WHEN DATA_IN -at 0x00000118 : */ 0x19000000,0x00000398, +at 0x00000116 : */ 0x19000000,0x00000398, /* MOVE FROM dsa_datain+0x0378, WHEN DATA_IN -at 0x0000011a : */ 0x19000000,0x000003a0, +at 0x00000118 : */ 0x19000000,0x000003a0, /* MOVE FROM dsa_datain+0x0380, WHEN DATA_IN -at 0x0000011c : */ 0x19000000,0x000003a8, +at 0x0000011a : */ 0x19000000,0x000003a8, /* MOVE FROM dsa_datain+0x0388, WHEN DATA_IN -at 0x0000011e : */ 0x19000000,0x000003b0, +at 0x0000011c : */ 0x19000000,0x000003b0, /* MOVE FROM dsa_datain+0x0390, WHEN DATA_IN -at 0x00000120 : */ 0x19000000,0x000003b8, +at 0x0000011e : */ 0x19000000,0x000003b8, /* MOVE FROM dsa_datain+0x0398, WHEN DATA_IN -at 0x00000122 : */ 0x19000000,0x000003c0, +at 0x00000120 : */ 0x19000000,0x000003c0, /* MOVE FROM dsa_datain+0x03a0, WHEN DATA_IN -at 0x00000124 : */ 0x19000000,0x000003c8, +at 0x00000122 : */ 0x19000000,0x000003c8, /* MOVE FROM dsa_datain+0x03a8, WHEN DATA_IN -at 0x00000126 : */ 0x19000000,0x000003d0, +at 0x00000124 : */ 0x19000000,0x000003d0, /* MOVE FROM dsa_datain+0x03b0, WHEN DATA_IN -at 0x00000128 : */ 0x19000000,0x000003d8, +at 0x00000126 : */ 0x19000000,0x000003d8, /* MOVE FROM dsa_datain+0x03b8, WHEN DATA_IN -at 0x0000012a : */ 0x19000000,0x000003e0, +at 0x00000128 : */ 0x19000000,0x000003e0, /* MOVE FROM dsa_datain+0x03c0, WHEN DATA_IN -at 0x0000012c : */ 0x19000000,0x000003e8, +at 0x0000012a : */ 0x19000000,0x000003e8, /* MOVE FROM dsa_datain+0x03c8, WHEN DATA_IN -at 0x0000012e : */ 0x19000000,0x000003f0, +at 0x0000012c : */ 0x19000000,0x000003f0, /* MOVE FROM dsa_datain+0x03d0, WHEN DATA_IN -at 0x00000130 : */ 0x19000000,0x000003f8, +at 0x0000012e : */ 0x19000000,0x000003f8, /* MOVE FROM dsa_datain+0x03d8, WHEN DATA_IN -at 0x00000132 : */ 0x19000000,0x00000400, +at 0x00000130 : */ 0x19000000,0x00000400, /* MOVE FROM dsa_datain+0x03e0, WHEN DATA_IN -at 0x00000134 : */ 0x19000000,0x00000408, +at 0x00000132 : */ 0x19000000,0x00000408, /* MOVE FROM dsa_datain+0x03e8, WHEN DATA_IN -at 0x00000136 : */ 0x19000000,0x00000410, +at 0x00000134 : */ 0x19000000,0x00000410, /* MOVE FROM dsa_datain+0x03f0, WHEN DATA_IN -at 0x00000138 : */ 0x19000000,0x00000418, +at 0x00000136 : */ 0x19000000,0x00000418, /* MOVE FROM dsa_datain+0x03f8, WHEN DATA_IN -at 0x0000013a : */ 0x19000000,0x00000420, +at 0x00000138 : */ 0x19000000,0x00000420, /* JUMP end_data_trans -at 0x0000013c : */ 0x80080000,0x00000908, +at 0x0000013a : */ 0x80080000,0x00000900, /* output_data: MOVE SCRATCH0 | had_dataout TO SCRATCH0 -at 0x0000013e : */ 0x7a341000,0x00000000, +at 0x0000013c : */ 0x7a341000,0x00000000, /* ENTRY patch_output_data patch_output_data: JUMP 0 -at 0x00000140 : */ 0x80080000,0x00000000, +at 0x0000013e : */ 0x80080000,0x00000000, /* MOVE FROM dsa_dataout+0x0000, WHEN DATA_OUT -at 0x00000142 : */ 0x18000000,0x00000428, +at 0x00000140 : */ 0x18000000,0x00000428, /* MOVE FROM dsa_dataout+0x0008, WHEN DATA_OUT -at 0x00000144 : */ 0x18000000,0x00000430, +at 0x00000142 : */ 0x18000000,0x00000430, /* MOVE FROM dsa_dataout+0x0010, WHEN DATA_OUT -at 0x00000146 : */ 0x18000000,0x00000438, +at 0x00000144 : */ 0x18000000,0x00000438, /* MOVE FROM dsa_dataout+0x0018, WHEN DATA_OUT -at 0x00000148 : */ 0x18000000,0x00000440, +at 0x00000146 : */ 0x18000000,0x00000440, /* MOVE FROM dsa_dataout+0x0020, WHEN DATA_OUT -at 0x0000014a : */ 0x18000000,0x00000448, +at 0x00000148 : */ 0x18000000,0x00000448, /* MOVE FROM dsa_dataout+0x0028, WHEN DATA_OUT -at 0x0000014c : */ 0x18000000,0x00000450, +at 0x0000014a : */ 0x18000000,0x00000450, /* MOVE FROM dsa_dataout+0x0030, WHEN DATA_OUT -at 0x0000014e : */ 0x18000000,0x00000458, +at 0x0000014c : */ 0x18000000,0x00000458, /* MOVE FROM dsa_dataout+0x0038, WHEN DATA_OUT -at 0x00000150 : */ 0x18000000,0x00000460, +at 0x0000014e : */ 0x18000000,0x00000460, /* MOVE FROM dsa_dataout+0x0040, WHEN DATA_OUT -at 0x00000152 : */ 0x18000000,0x00000468, +at 0x00000150 : */ 0x18000000,0x00000468, /* MOVE FROM dsa_dataout+0x0048, WHEN DATA_OUT -at 0x00000154 : */ 0x18000000,0x00000470, +at 0x00000152 : */ 0x18000000,0x00000470, /* MOVE FROM dsa_dataout+0x0050, WHEN DATA_OUT -at 0x00000156 : */ 0x18000000,0x00000478, +at 0x00000154 : */ 0x18000000,0x00000478, /* MOVE FROM dsa_dataout+0x0058, WHEN DATA_OUT -at 0x00000158 : */ 0x18000000,0x00000480, +at 0x00000156 : */ 0x18000000,0x00000480, /* MOVE FROM dsa_dataout+0x0060, WHEN DATA_OUT -at 0x0000015a : */ 0x18000000,0x00000488, +at 0x00000158 : */ 0x18000000,0x00000488, /* MOVE FROM dsa_dataout+0x0068, WHEN DATA_OUT -at 0x0000015c : */ 0x18000000,0x00000490, +at 0x0000015a : */ 0x18000000,0x00000490, /* MOVE FROM dsa_dataout+0x0070, WHEN DATA_OUT -at 0x0000015e : */ 0x18000000,0x00000498, +at 0x0000015c : */ 0x18000000,0x00000498, /* MOVE FROM dsa_dataout+0x0078, WHEN DATA_OUT -at 0x00000160 : */ 0x18000000,0x000004a0, +at 0x0000015e : */ 0x18000000,0x000004a0, /* MOVE FROM dsa_dataout+0x0080, WHEN DATA_OUT -at 0x00000162 : */ 0x18000000,0x000004a8, +at 0x00000160 : */ 0x18000000,0x000004a8, /* MOVE FROM dsa_dataout+0x0088, WHEN DATA_OUT -at 0x00000164 : */ 0x18000000,0x000004b0, +at 0x00000162 : */ 0x18000000,0x000004b0, /* MOVE FROM dsa_dataout+0x0090, WHEN DATA_OUT -at 0x00000166 : */ 0x18000000,0x000004b8, +at 0x00000164 : */ 0x18000000,0x000004b8, /* MOVE FROM dsa_dataout+0x0098, WHEN DATA_OUT -at 0x00000168 : */ 0x18000000,0x000004c0, +at 0x00000166 : */ 0x18000000,0x000004c0, /* MOVE FROM dsa_dataout+0x00a0, WHEN DATA_OUT -at 0x0000016a : */ 0x18000000,0x000004c8, +at 0x00000168 : */ 0x18000000,0x000004c8, /* MOVE FROM dsa_dataout+0x00a8, WHEN DATA_OUT -at 0x0000016c : */ 0x18000000,0x000004d0, +at 0x0000016a : */ 0x18000000,0x000004d0, /* MOVE FROM dsa_dataout+0x00b0, WHEN DATA_OUT -at 0x0000016e : */ 0x18000000,0x000004d8, +at 0x0000016c : */ 0x18000000,0x000004d8, /* MOVE FROM dsa_dataout+0x00b8, WHEN DATA_OUT -at 0x00000170 : */ 0x18000000,0x000004e0, +at 0x0000016e : */ 0x18000000,0x000004e0, /* MOVE FROM dsa_dataout+0x00c0, WHEN DATA_OUT -at 0x00000172 : */ 0x18000000,0x000004e8, +at 0x00000170 : */ 0x18000000,0x000004e8, /* MOVE FROM dsa_dataout+0x00c8, WHEN DATA_OUT -at 0x00000174 : */ 0x18000000,0x000004f0, +at 0x00000172 : */ 0x18000000,0x000004f0, /* MOVE FROM dsa_dataout+0x00d0, WHEN DATA_OUT -at 0x00000176 : */ 0x18000000,0x000004f8, +at 0x00000174 : */ 0x18000000,0x000004f8, /* MOVE FROM dsa_dataout+0x00d8, WHEN DATA_OUT -at 0x00000178 : */ 0x18000000,0x00000500, +at 0x00000176 : */ 0x18000000,0x00000500, /* MOVE FROM dsa_dataout+0x00e0, WHEN DATA_OUT -at 0x0000017a : */ 0x18000000,0x00000508, +at 0x00000178 : */ 0x18000000,0x00000508, /* MOVE FROM dsa_dataout+0x00e8, WHEN DATA_OUT -at 0x0000017c : */ 0x18000000,0x00000510, +at 0x0000017a : */ 0x18000000,0x00000510, /* MOVE FROM dsa_dataout+0x00f0, WHEN DATA_OUT -at 0x0000017e : */ 0x18000000,0x00000518, +at 0x0000017c : */ 0x18000000,0x00000518, /* MOVE FROM dsa_dataout+0x00f8, WHEN DATA_OUT -at 0x00000180 : */ 0x18000000,0x00000520, +at 0x0000017e : */ 0x18000000,0x00000520, /* MOVE FROM dsa_dataout+0x0100, WHEN DATA_OUT -at 0x00000182 : */ 0x18000000,0x00000528, +at 0x00000180 : */ 0x18000000,0x00000528, /* MOVE FROM dsa_dataout+0x0108, WHEN DATA_OUT -at 0x00000184 : */ 0x18000000,0x00000530, +at 0x00000182 : */ 0x18000000,0x00000530, /* MOVE FROM dsa_dataout+0x0110, WHEN DATA_OUT -at 0x00000186 : */ 0x18000000,0x00000538, +at 0x00000184 : */ 0x18000000,0x00000538, /* MOVE FROM dsa_dataout+0x0118, WHEN DATA_OUT -at 0x00000188 : */ 0x18000000,0x00000540, +at 0x00000186 : */ 0x18000000,0x00000540, /* MOVE FROM dsa_dataout+0x0120, WHEN DATA_OUT -at 0x0000018a : */ 0x18000000,0x00000548, +at 0x00000188 : */ 0x18000000,0x00000548, /* MOVE FROM dsa_dataout+0x0128, WHEN DATA_OUT -at 0x0000018c : */ 0x18000000,0x00000550, +at 0x0000018a : */ 0x18000000,0x00000550, /* MOVE FROM dsa_dataout+0x0130, WHEN DATA_OUT -at 0x0000018e : */ 0x18000000,0x00000558, +at 0x0000018c : */ 0x18000000,0x00000558, /* MOVE FROM dsa_dataout+0x0138, WHEN DATA_OUT -at 0x00000190 : */ 0x18000000,0x00000560, +at 0x0000018e : */ 0x18000000,0x00000560, /* MOVE FROM dsa_dataout+0x0140, WHEN DATA_OUT -at 0x00000192 : */ 0x18000000,0x00000568, +at 0x00000190 : */ 0x18000000,0x00000568, /* MOVE FROM dsa_dataout+0x0148, WHEN DATA_OUT -at 0x00000194 : */ 0x18000000,0x00000570, +at 0x00000192 : */ 0x18000000,0x00000570, /* MOVE FROM dsa_dataout+0x0150, WHEN DATA_OUT -at 0x00000196 : */ 0x18000000,0x00000578, +at 0x00000194 : */ 0x18000000,0x00000578, /* MOVE FROM dsa_dataout+0x0158, WHEN DATA_OUT -at 0x00000198 : */ 0x18000000,0x00000580, +at 0x00000196 : */ 0x18000000,0x00000580, /* MOVE FROM dsa_dataout+0x0160, WHEN DATA_OUT -at 0x0000019a : */ 0x18000000,0x00000588, +at 0x00000198 : */ 0x18000000,0x00000588, /* MOVE FROM dsa_dataout+0x0168, WHEN DATA_OUT -at 0x0000019c : */ 0x18000000,0x00000590, +at 0x0000019a : */ 0x18000000,0x00000590, /* MOVE FROM dsa_dataout+0x0170, WHEN DATA_OUT -at 0x0000019e : */ 0x18000000,0x00000598, +at 0x0000019c : */ 0x18000000,0x00000598, /* MOVE FROM dsa_dataout+0x0178, WHEN DATA_OUT -at 0x000001a0 : */ 0x18000000,0x000005a0, +at 0x0000019e : */ 0x18000000,0x000005a0, /* MOVE FROM dsa_dataout+0x0180, WHEN DATA_OUT -at 0x000001a2 : */ 0x18000000,0x000005a8, +at 0x000001a0 : */ 0x18000000,0x000005a8, /* MOVE FROM dsa_dataout+0x0188, WHEN DATA_OUT -at 0x000001a4 : */ 0x18000000,0x000005b0, +at 0x000001a2 : */ 0x18000000,0x000005b0, /* MOVE FROM dsa_dataout+0x0190, WHEN DATA_OUT -at 0x000001a6 : */ 0x18000000,0x000005b8, +at 0x000001a4 : */ 0x18000000,0x000005b8, /* MOVE FROM dsa_dataout+0x0198, WHEN DATA_OUT -at 0x000001a8 : */ 0x18000000,0x000005c0, +at 0x000001a6 : */ 0x18000000,0x000005c0, /* MOVE FROM dsa_dataout+0x01a0, WHEN DATA_OUT -at 0x000001aa : */ 0x18000000,0x000005c8, +at 0x000001a8 : */ 0x18000000,0x000005c8, /* MOVE FROM dsa_dataout+0x01a8, WHEN DATA_OUT -at 0x000001ac : */ 0x18000000,0x000005d0, +at 0x000001aa : */ 0x18000000,0x000005d0, /* MOVE FROM dsa_dataout+0x01b0, WHEN DATA_OUT -at 0x000001ae : */ 0x18000000,0x000005d8, +at 0x000001ac : */ 0x18000000,0x000005d8, /* MOVE FROM dsa_dataout+0x01b8, WHEN DATA_OUT -at 0x000001b0 : */ 0x18000000,0x000005e0, +at 0x000001ae : */ 0x18000000,0x000005e0, /* MOVE FROM dsa_dataout+0x01c0, WHEN DATA_OUT -at 0x000001b2 : */ 0x18000000,0x000005e8, +at 0x000001b0 : */ 0x18000000,0x000005e8, /* MOVE FROM dsa_dataout+0x01c8, WHEN DATA_OUT -at 0x000001b4 : */ 0x18000000,0x000005f0, +at 0x000001b2 : */ 0x18000000,0x000005f0, /* MOVE FROM dsa_dataout+0x01d0, WHEN DATA_OUT -at 0x000001b6 : */ 0x18000000,0x000005f8, +at 0x000001b4 : */ 0x18000000,0x000005f8, /* MOVE FROM dsa_dataout+0x01d8, WHEN DATA_OUT -at 0x000001b8 : */ 0x18000000,0x00000600, +at 0x000001b6 : */ 0x18000000,0x00000600, /* MOVE FROM dsa_dataout+0x01e0, WHEN DATA_OUT -at 0x000001ba : */ 0x18000000,0x00000608, +at 0x000001b8 : */ 0x18000000,0x00000608, /* MOVE FROM dsa_dataout+0x01e8, WHEN DATA_OUT -at 0x000001bc : */ 0x18000000,0x00000610, +at 0x000001ba : */ 0x18000000,0x00000610, /* MOVE FROM dsa_dataout+0x01f0, WHEN DATA_OUT -at 0x000001be : */ 0x18000000,0x00000618, +at 0x000001bc : */ 0x18000000,0x00000618, /* MOVE FROM dsa_dataout+0x01f8, WHEN DATA_OUT -at 0x000001c0 : */ 0x18000000,0x00000620, +at 0x000001be : */ 0x18000000,0x00000620, /* MOVE FROM dsa_dataout+0x0200, WHEN DATA_OUT -at 0x000001c2 : */ 0x18000000,0x00000628, +at 0x000001c0 : */ 0x18000000,0x00000628, /* MOVE FROM dsa_dataout+0x0208, WHEN DATA_OUT -at 0x000001c4 : */ 0x18000000,0x00000630, +at 0x000001c2 : */ 0x18000000,0x00000630, /* MOVE FROM dsa_dataout+0x0210, WHEN DATA_OUT -at 0x000001c6 : */ 0x18000000,0x00000638, +at 0x000001c4 : */ 0x18000000,0x00000638, /* MOVE FROM dsa_dataout+0x0218, WHEN DATA_OUT -at 0x000001c8 : */ 0x18000000,0x00000640, +at 0x000001c6 : */ 0x18000000,0x00000640, /* MOVE FROM dsa_dataout+0x0220, WHEN DATA_OUT -at 0x000001ca : */ 0x18000000,0x00000648, +at 0x000001c8 : */ 0x18000000,0x00000648, /* MOVE FROM dsa_dataout+0x0228, WHEN DATA_OUT -at 0x000001cc : */ 0x18000000,0x00000650, +at 0x000001ca : */ 0x18000000,0x00000650, /* MOVE FROM dsa_dataout+0x0230, WHEN DATA_OUT -at 0x000001ce : */ 0x18000000,0x00000658, +at 0x000001cc : */ 0x18000000,0x00000658, /* MOVE FROM dsa_dataout+0x0238, WHEN DATA_OUT -at 0x000001d0 : */ 0x18000000,0x00000660, +at 0x000001ce : */ 0x18000000,0x00000660, /* MOVE FROM dsa_dataout+0x0240, WHEN DATA_OUT -at 0x000001d2 : */ 0x18000000,0x00000668, +at 0x000001d0 : */ 0x18000000,0x00000668, /* MOVE FROM dsa_dataout+0x0248, WHEN DATA_OUT -at 0x000001d4 : */ 0x18000000,0x00000670, +at 0x000001d2 : */ 0x18000000,0x00000670, /* MOVE FROM dsa_dataout+0x0250, WHEN DATA_OUT -at 0x000001d6 : */ 0x18000000,0x00000678, +at 0x000001d4 : */ 0x18000000,0x00000678, /* MOVE FROM dsa_dataout+0x0258, WHEN DATA_OUT -at 0x000001d8 : */ 0x18000000,0x00000680, +at 0x000001d6 : */ 0x18000000,0x00000680, /* MOVE FROM dsa_dataout+0x0260, WHEN DATA_OUT -at 0x000001da : */ 0x18000000,0x00000688, +at 0x000001d8 : */ 0x18000000,0x00000688, /* MOVE FROM dsa_dataout+0x0268, WHEN DATA_OUT -at 0x000001dc : */ 0x18000000,0x00000690, +at 0x000001da : */ 0x18000000,0x00000690, /* MOVE FROM dsa_dataout+0x0270, WHEN DATA_OUT -at 0x000001de : */ 0x18000000,0x00000698, +at 0x000001dc : */ 0x18000000,0x00000698, /* MOVE FROM dsa_dataout+0x0278, WHEN DATA_OUT -at 0x000001e0 : */ 0x18000000,0x000006a0, +at 0x000001de : */ 0x18000000,0x000006a0, /* MOVE FROM dsa_dataout+0x0280, WHEN DATA_OUT -at 0x000001e2 : */ 0x18000000,0x000006a8, +at 0x000001e0 : */ 0x18000000,0x000006a8, /* MOVE FROM dsa_dataout+0x0288, WHEN DATA_OUT -at 0x000001e4 : */ 0x18000000,0x000006b0, +at 0x000001e2 : */ 0x18000000,0x000006b0, /* MOVE FROM dsa_dataout+0x0290, WHEN DATA_OUT -at 0x000001e6 : */ 0x18000000,0x000006b8, +at 0x000001e4 : */ 0x18000000,0x000006b8, /* MOVE FROM dsa_dataout+0x0298, WHEN DATA_OUT -at 0x000001e8 : */ 0x18000000,0x000006c0, +at 0x000001e6 : */ 0x18000000,0x000006c0, /* MOVE FROM dsa_dataout+0x02a0, WHEN DATA_OUT -at 0x000001ea : */ 0x18000000,0x000006c8, +at 0x000001e8 : */ 0x18000000,0x000006c8, /* MOVE FROM dsa_dataout+0x02a8, WHEN DATA_OUT -at 0x000001ec : */ 0x18000000,0x000006d0, +at 0x000001ea : */ 0x18000000,0x000006d0, /* MOVE FROM dsa_dataout+0x02b0, WHEN DATA_OUT -at 0x000001ee : */ 0x18000000,0x000006d8, +at 0x000001ec : */ 0x18000000,0x000006d8, /* MOVE FROM dsa_dataout+0x02b8, WHEN DATA_OUT -at 0x000001f0 : */ 0x18000000,0x000006e0, +at 0x000001ee : */ 0x18000000,0x000006e0, /* MOVE FROM dsa_dataout+0x02c0, WHEN DATA_OUT -at 0x000001f2 : */ 0x18000000,0x000006e8, +at 0x000001f0 : */ 0x18000000,0x000006e8, /* MOVE FROM dsa_dataout+0x02c8, WHEN DATA_OUT -at 0x000001f4 : */ 0x18000000,0x000006f0, +at 0x000001f2 : */ 0x18000000,0x000006f0, /* MOVE FROM dsa_dataout+0x02d0, WHEN DATA_OUT -at 0x000001f6 : */ 0x18000000,0x000006f8, +at 0x000001f4 : */ 0x18000000,0x000006f8, /* MOVE FROM dsa_dataout+0x02d8, WHEN DATA_OUT -at 0x000001f8 : */ 0x18000000,0x00000700, +at 0x000001f6 : */ 0x18000000,0x00000700, /* MOVE FROM dsa_dataout+0x02e0, WHEN DATA_OUT -at 0x000001fa : */ 0x18000000,0x00000708, +at 0x000001f8 : */ 0x18000000,0x00000708, /* MOVE FROM dsa_dataout+0x02e8, WHEN DATA_OUT -at 0x000001fc : */ 0x18000000,0x00000710, +at 0x000001fa : */ 0x18000000,0x00000710, /* MOVE FROM dsa_dataout+0x02f0, WHEN DATA_OUT -at 0x000001fe : */ 0x18000000,0x00000718, +at 0x000001fc : */ 0x18000000,0x00000718, /* MOVE FROM dsa_dataout+0x02f8, WHEN DATA_OUT -at 0x00000200 : */ 0x18000000,0x00000720, +at 0x000001fe : */ 0x18000000,0x00000720, /* MOVE FROM dsa_dataout+0x0300, WHEN DATA_OUT -at 0x00000202 : */ 0x18000000,0x00000728, +at 0x00000200 : */ 0x18000000,0x00000728, /* MOVE FROM dsa_dataout+0x0308, WHEN DATA_OUT -at 0x00000204 : */ 0x18000000,0x00000730, +at 0x00000202 : */ 0x18000000,0x00000730, /* MOVE FROM dsa_dataout+0x0310, WHEN DATA_OUT -at 0x00000206 : */ 0x18000000,0x00000738, +at 0x00000204 : */ 0x18000000,0x00000738, /* MOVE FROM dsa_dataout+0x0318, WHEN DATA_OUT -at 0x00000208 : */ 0x18000000,0x00000740, +at 0x00000206 : */ 0x18000000,0x00000740, /* MOVE FROM dsa_dataout+0x0320, WHEN DATA_OUT -at 0x0000020a : */ 0x18000000,0x00000748, +at 0x00000208 : */ 0x18000000,0x00000748, /* MOVE FROM dsa_dataout+0x0328, WHEN DATA_OUT -at 0x0000020c : */ 0x18000000,0x00000750, +at 0x0000020a : */ 0x18000000,0x00000750, /* MOVE FROM dsa_dataout+0x0330, WHEN DATA_OUT -at 0x0000020e : */ 0x18000000,0x00000758, +at 0x0000020c : */ 0x18000000,0x00000758, /* MOVE FROM dsa_dataout+0x0338, WHEN DATA_OUT -at 0x00000210 : */ 0x18000000,0x00000760, +at 0x0000020e : */ 0x18000000,0x00000760, /* MOVE FROM dsa_dataout+0x0340, WHEN DATA_OUT -at 0x00000212 : */ 0x18000000,0x00000768, +at 0x00000210 : */ 0x18000000,0x00000768, /* MOVE FROM dsa_dataout+0x0348, WHEN DATA_OUT -at 0x00000214 : */ 0x18000000,0x00000770, +at 0x00000212 : */ 0x18000000,0x00000770, /* MOVE FROM dsa_dataout+0x0350, WHEN DATA_OUT -at 0x00000216 : */ 0x18000000,0x00000778, +at 0x00000214 : */ 0x18000000,0x00000778, /* MOVE FROM dsa_dataout+0x0358, WHEN DATA_OUT -at 0x00000218 : */ 0x18000000,0x00000780, +at 0x00000216 : */ 0x18000000,0x00000780, /* MOVE FROM dsa_dataout+0x0360, WHEN DATA_OUT -at 0x0000021a : */ 0x18000000,0x00000788, +at 0x00000218 : */ 0x18000000,0x00000788, /* MOVE FROM dsa_dataout+0x0368, WHEN DATA_OUT -at 0x0000021c : */ 0x18000000,0x00000790, +at 0x0000021a : */ 0x18000000,0x00000790, /* MOVE FROM dsa_dataout+0x0370, WHEN DATA_OUT -at 0x0000021e : */ 0x18000000,0x00000798, +at 0x0000021c : */ 0x18000000,0x00000798, /* MOVE FROM dsa_dataout+0x0378, WHEN DATA_OUT -at 0x00000220 : */ 0x18000000,0x000007a0, +at 0x0000021e : */ 0x18000000,0x000007a0, /* MOVE FROM dsa_dataout+0x0380, WHEN DATA_OUT -at 0x00000222 : */ 0x18000000,0x000007a8, +at 0x00000220 : */ 0x18000000,0x000007a8, /* MOVE FROM dsa_dataout+0x0388, WHEN DATA_OUT -at 0x00000224 : */ 0x18000000,0x000007b0, +at 0x00000222 : */ 0x18000000,0x000007b0, /* MOVE FROM dsa_dataout+0x0390, WHEN DATA_OUT -at 0x00000226 : */ 0x18000000,0x000007b8, +at 0x00000224 : */ 0x18000000,0x000007b8, /* MOVE FROM dsa_dataout+0x0398, WHEN DATA_OUT -at 0x00000228 : */ 0x18000000,0x000007c0, +at 0x00000226 : */ 0x18000000,0x000007c0, /* MOVE FROM dsa_dataout+0x03a0, WHEN DATA_OUT -at 0x0000022a : */ 0x18000000,0x000007c8, +at 0x00000228 : */ 0x18000000,0x000007c8, /* MOVE FROM dsa_dataout+0x03a8, WHEN DATA_OUT -at 0x0000022c : */ 0x18000000,0x000007d0, +at 0x0000022a : */ 0x18000000,0x000007d0, /* MOVE FROM dsa_dataout+0x03b0, WHEN DATA_OUT -at 0x0000022e : */ 0x18000000,0x000007d8, +at 0x0000022c : */ 0x18000000,0x000007d8, /* MOVE FROM dsa_dataout+0x03b8, WHEN DATA_OUT -at 0x00000230 : */ 0x18000000,0x000007e0, +at 0x0000022e : */ 0x18000000,0x000007e0, /* MOVE FROM dsa_dataout+0x03c0, WHEN DATA_OUT -at 0x00000232 : */ 0x18000000,0x000007e8, +at 0x00000230 : */ 0x18000000,0x000007e8, /* MOVE FROM dsa_dataout+0x03c8, WHEN DATA_OUT -at 0x00000234 : */ 0x18000000,0x000007f0, +at 0x00000232 : */ 0x18000000,0x000007f0, /* MOVE FROM dsa_dataout+0x03d0, WHEN DATA_OUT -at 0x00000236 : */ 0x18000000,0x000007f8, +at 0x00000234 : */ 0x18000000,0x000007f8, /* MOVE FROM dsa_dataout+0x03d8, WHEN DATA_OUT -at 0x00000238 : */ 0x18000000,0x00000800, +at 0x00000236 : */ 0x18000000,0x00000800, /* MOVE FROM dsa_dataout+0x03e0, WHEN DATA_OUT -at 0x0000023a : */ 0x18000000,0x00000808, +at 0x00000238 : */ 0x18000000,0x00000808, /* MOVE FROM dsa_dataout+0x03e8, WHEN DATA_OUT -at 0x0000023c : */ 0x18000000,0x00000810, +at 0x0000023a : */ 0x18000000,0x00000810, /* MOVE FROM dsa_dataout+0x03f0, WHEN DATA_OUT -at 0x0000023e : */ 0x18000000,0x00000818, +at 0x0000023c : */ 0x18000000,0x00000818, /* MOVE FROM dsa_dataout+0x03f8, WHEN DATA_OUT -at 0x00000240 : */ 0x18000000,0x00000820, +at 0x0000023e : */ 0x18000000,0x00000820, /* ENTRY end_data_trans end_data_trans: redo_msgin3: JUMP get_status, WHEN STATUS -at 0x00000242 : */ 0x830b0000,0x000000a0, +at 0x00000240 : */ 0x830b0000,0x00000098, /* JUMP get_msgin3, WHEN MSG_IN -at 0x00000244 : */ 0x870b0000,0x00000b20, +at 0x00000242 : */ 0x870b0000,0x00000b78, /* INT int_data_bad_phase -at 0x00000246 : */ 0x98080000,0xab93000b, +at 0x00000244 : */ 0x98080000,0xab93000b, /* get_msgin1: MOVE SCRATCH0 | had_msgin TO SCRATCH0 -at 0x00000248 : */ 0x7a344000,0x00000000, +at 0x00000246 : */ 0x7a344000,0x00000000, /* MOVE 1, msgin_buf, WHEN MSG_IN -at 0x0000024a : */ 0x0f000001,0x00000000, +at 0x00000248 : */ 0x0f000001,0x00000000, /* JUMP ext_msg1, IF 0x01 ; Extended Message -at 0x0000024c : */ 0x800c0001,0x00000968, +at 0x0000024a : */ 0x800c0001,0x00000960, /* JUMP ignore_msg1, IF 0x02 ; Save Data Pointers -at 0x0000024e : */ 0x800c0002,0x00000958, +at 0x0000024c : */ 0x800c0002,0x00000950, /* JUMP ignore_msg1, IF 0x03 ; Save Restore Pointers -at 0x00000250 : */ 0x800c0003,0x00000958, +at 0x0000024e : */ 0x800c0003,0x00000950, /* JUMP disc1, IF 0x04 ; Disconnect -at 0x00000252 : */ 0x800c0004,0x000009c8, +at 0x00000250 : */ 0x800c0004,0x000009f0, /* INT int_bad_msg1 -at 0x00000254 : */ 0x98080000,0xab930006, +at 0x00000252 : */ 0x98080000,0xab930006, /* ignore_msg1: CLEAR ACK -at 0x00000256 : */ 0x60000040,0x00000000, +at 0x00000254 : */ 0x60000040,0x00000000, /* JUMP redo_msgin1 -at 0x00000258 : */ 0x80080000,0x00000058, +at 0x00000256 : */ 0x80080000,0x00000050, /* ext_msg1: MOVE SCRATCH0 | had_extmsg TO SCRATCH0 -at 0x0000025a : */ 0x7a348000,0x00000000, +at 0x00000258 : */ 0x7a348000,0x00000000, /* CLEAR ACK -at 0x0000025c : */ 0x60000040,0x00000000, +at 0x0000025a : */ 0x60000040,0x00000000, /* MOVE 1, msgin_buf + 1, WHEN MSG_IN -at 0x0000025e : */ 0x0f000001,0x00000001, +at 0x0000025c : */ 0x0f000001,0x00000001, /* - JUMP ext_msg1a, IF 0x03 + JUMP reject_msg1, IF NOT 0x03 ; Only handle SDTR -at 0x00000260 : */ 0x800c0003,0x00000990, +at 0x0000025e : */ 0x80040003,0x000009b0, /* - INT int_bad_extmsg1a + CLEAR ACK -at 0x00000262 : */ 0x98080000,0xab930000, +at 0x00000260 : */ 0x60000040,0x00000000, +/* + MOVE 1, msgin_buf + 2, WHEN MSG_IN + +at 0x00000262 : */ 0x0f000001,0x00000002, +/* + JUMP reject_msg1, IF NOT 0x01 ; Only handle SDTR + +at 0x00000264 : */ 0x80040001,0x000009b0, /* -ext_msg1a: CLEAR ACK -at 0x00000264 : */ 0x60000040,0x00000000, +at 0x00000266 : */ 0x60000040,0x00000000, /* - MOVE 1, msgin_buf + 2, WHEN MSG_IN + MOVE 2, msgin_buf + 3, WHEN MSG_IN -at 0x00000266 : */ 0x0f000001,0x00000002, +at 0x00000268 : */ 0x0f000002,0x00000003, /* - JUMP ext_msg1b, IF 0x01 ; Must be SDTR + INT int_msg_sdtr1 + +at 0x0000026a : */ 0x98080000,0xab93000c, +/* +reject_msg1: + MOVE SCRATCH1 | did_reject TO SCRATCH1 -at 0x00000268 : */ 0x800c0001,0x000009b0, +at 0x0000026c : */ 0x7a350100,0x00000000, /* - INT int_bad_extmsg1b + SET ATN -at 0x0000026a : */ 0x98080000,0xab930001, +at 0x0000026e : */ 0x58000008,0x00000000, /* -ext_msg1b: CLEAR ACK -at 0x0000026c : */ 0x60000040,0x00000000, +at 0x00000270 : */ 0x60000040,0x00000000, /* - MOVE 2, msgin_buf + 3, WHEN MSG_IN + JUMP reject_msg1a, WHEN NOT MSG_IN -at 0x0000026e : */ 0x0f000002,0x00000003, +at 0x00000272 : */ 0x87030000,0x000009e0, /* - INT int_msg_sdtr1 + MOVE 1, msgin_buf + 7, WHEN MSG_IN -at 0x00000270 : */ 0x98080000,0xab93000c, +at 0x00000274 : */ 0x0f000001,0x00000007, +/* + JUMP reject_msg1 + +at 0x00000276 : */ 0x80080000,0x000009b0, +/* +reject_msg1a: + MOVE 1, msg_reject, WHEN MSG_OUT + +at 0x00000278 : */ 0x0e000001,0x00000000, +/* + JUMP redo_msgin1 + +at 0x0000027a : */ 0x80080000,0x00000050, /* disc1: CLEAR ACK -at 0x00000272 : */ 0x60000040,0x00000000, +at 0x0000027c : */ 0x60000040,0x00000000, /* ENTRY wait_disc1 wait_disc1: WAIT DISCONNECT -at 0x00000274 : */ 0x48000000,0x00000000, +at 0x0000027e : */ 0x48000000,0x00000000, /* INT int_disc1 -at 0x00000276 : */ 0x98080000,0xab930019, +at 0x00000280 : */ 0x98080000,0xab930019, /* ENTRY resume_msgin1a resume_msgin1a: CLEAR ACK -at 0x00000278 : */ 0x60000040,0x00000000, +at 0x00000282 : */ 0x60000040,0x00000000, /* JUMP redo_msgin1 -at 0x0000027a : */ 0x80080000,0x00000058, +at 0x00000284 : */ 0x80080000,0x00000050, /* ENTRY resume_msgin1b resume_msgin1b: SET ATN -at 0x0000027c : */ 0x58000008,0x00000000, +at 0x00000286 : */ 0x58000008,0x00000000, /* CLEAR ACK -at 0x0000027e : */ 0x60000040,0x00000000, +at 0x00000288 : */ 0x60000040,0x00000000, /* INT int_no_msgout1, WHEN NOT MSG_OUT -at 0x00000280 : */ 0x9e030000,0xab93000f, +at 0x0000028a : */ 0x9e030000,0xab93000f, /* MOVE SCRATCH0 | had_msgout TO SCRATCH0 -at 0x00000282 : */ 0x7a340200,0x00000000, +at 0x0000028c : */ 0x7a340200,0x00000000, /* MOVE FROM dsa_msgout, when MSG_OUT -at 0x00000284 : */ 0x1e000000,0x00000008, +at 0x0000028e : */ 0x1e000000,0x00000008, /* JUMP redo_msgin1 -at 0x00000286 : */ 0x80080000,0x00000058, +at 0x00000290 : */ 0x80080000,0x00000050, /* get_msgin2: MOVE SCRATCH0 | had_msgin TO SCRATCH0 -at 0x00000288 : */ 0x7a344000,0x00000000, +at 0x00000292 : */ 0x7a344000,0x00000000, /* MOVE 1, msgin_buf, WHEN MSG_IN -at 0x0000028a : */ 0x0f000001,0x00000000, +at 0x00000294 : */ 0x0f000001,0x00000000, /* JUMP ext_msg2, IF 0x01 ; Extended Message -at 0x0000028c : */ 0x800c0001,0x00000a68, +at 0x00000296 : */ 0x800c0001,0x00000a90, /* JUMP ignore_msg2, IF 0x02 ; Save Data Pointers -at 0x0000028e : */ 0x800c0002,0x00000a58, +at 0x00000298 : */ 0x800c0002,0x00000a80, /* JUMP ignore_msg2, IF 0x03 ; Save Restore Pointers -at 0x00000290 : */ 0x800c0003,0x00000a58, +at 0x0000029a : */ 0x800c0003,0x00000a80, /* JUMP disc2, IF 0x04 ; Disconnect -at 0x00000292 : */ 0x800c0004,0x00000ac8, +at 0x0000029c : */ 0x800c0004,0x00000b20, /* INT int_bad_msg2 -at 0x00000294 : */ 0x98080000,0xab930007, +at 0x0000029e : */ 0x98080000,0xab930007, /* ignore_msg2: CLEAR ACK -at 0x00000296 : */ 0x60000040,0x00000000, +at 0x000002a0 : */ 0x60000040,0x00000000, /* JUMP redo_msgin2 -at 0x00000298 : */ 0x80080000,0x00000078, +at 0x000002a2 : */ 0x80080000,0x00000070, /* ext_msg2: MOVE SCRATCH0 | had_extmsg TO SCRATCH0 -at 0x0000029a : */ 0x7a348000,0x00000000, +at 0x000002a4 : */ 0x7a348000,0x00000000, /* CLEAR ACK -at 0x0000029c : */ 0x60000040,0x00000000, +at 0x000002a6 : */ 0x60000040,0x00000000, /* MOVE 1, msgin_buf + 1, WHEN MSG_IN -at 0x0000029e : */ 0x0f000001,0x00000001, +at 0x000002a8 : */ 0x0f000001,0x00000001, +/* + JUMP reject_msg2, IF NOT 0x03 ; Only handle SDTR + +at 0x000002aa : */ 0x80040003,0x00000ae0, +/* + CLEAR ACK + +at 0x000002ac : */ 0x60000040,0x00000000, /* - JUMP ext_msg2a, IF 0x03 + MOVE 1, msgin_buf + 2, WHEN MSG_IN -at 0x000002a0 : */ 0x800c0003,0x00000a90, +at 0x000002ae : */ 0x0f000001,0x00000002, /* - INT int_bad_extmsg2a + JUMP reject_msg2, IF NOT 0x01 ; Only handle SDTR -at 0x000002a2 : */ 0x98080000,0xab930002, +at 0x000002b0 : */ 0x80040001,0x00000ae0, /* -ext_msg2a: CLEAR ACK -at 0x000002a4 : */ 0x60000040,0x00000000, +at 0x000002b2 : */ 0x60000040,0x00000000, /* - MOVE 1, msgin_buf + 2, WHEN MSG_IN + MOVE 2, msgin_buf + 3, WHEN MSG_IN + +at 0x000002b4 : */ 0x0f000002,0x00000003, +/* + INT int_msg_sdtr2 -at 0x000002a6 : */ 0x0f000001,0x00000002, +at 0x000002b6 : */ 0x98080000,0xab93000d, /* - JUMP ext_msg2b, IF 0x01 ; Must be SDTR +reject_msg2: + MOVE SCRATCH1 | did_reject TO SCRATCH1 -at 0x000002a8 : */ 0x800c0001,0x00000ab0, +at 0x000002b8 : */ 0x7a350100,0x00000000, /* - INT int_bad_extmsg2b + SET ATN -at 0x000002aa : */ 0x98080000,0xab930003, +at 0x000002ba : */ 0x58000008,0x00000000, /* -ext_msg2b: CLEAR ACK -at 0x000002ac : */ 0x60000040,0x00000000, +at 0x000002bc : */ 0x60000040,0x00000000, /* - MOVE 2, msgin_buf + 3, WHEN MSG_IN + JUMP reject_msg2a, WHEN NOT MSG_IN -at 0x000002ae : */ 0x0f000002,0x00000003, +at 0x000002be : */ 0x87030000,0x00000b10, /* - INT int_msg_sdtr2 + MOVE 1, msgin_buf + 7, WHEN MSG_IN + +at 0x000002c0 : */ 0x0f000001,0x00000007, +/* + JUMP reject_msg2 + +at 0x000002c2 : */ 0x80080000,0x00000ae0, +/* +reject_msg2a: + MOVE 1, msg_reject, WHEN MSG_OUT + +at 0x000002c4 : */ 0x0e000001,0x00000000, +/* + JUMP redo_msgin2 -at 0x000002b0 : */ 0x98080000,0xab93000d, +at 0x000002c6 : */ 0x80080000,0x00000070, /* disc2: CLEAR ACK -at 0x000002b2 : */ 0x60000040,0x00000000, +at 0x000002c8 : */ 0x60000040,0x00000000, /* ENTRY wait_disc2 wait_disc2: WAIT DISCONNECT -at 0x000002b4 : */ 0x48000000,0x00000000, +at 0x000002ca : */ 0x48000000,0x00000000, /* INT int_disc2 -at 0x000002b6 : */ 0x98080000,0xab93001a, +at 0x000002cc : */ 0x98080000,0xab93001a, /* ENTRY resume_msgin2a resume_msgin2a: CLEAR ACK -at 0x000002b8 : */ 0x60000040,0x00000000, +at 0x000002ce : */ 0x60000040,0x00000000, /* JUMP redo_msgin2 -at 0x000002ba : */ 0x80080000,0x00000078, +at 0x000002d0 : */ 0x80080000,0x00000070, /* ENTRY resume_msgin2b resume_msgin2b: SET ATN -at 0x000002bc : */ 0x58000008,0x00000000, +at 0x000002d2 : */ 0x58000008,0x00000000, /* CLEAR ACK -at 0x000002be : */ 0x60000040,0x00000000, +at 0x000002d4 : */ 0x60000040,0x00000000, /* INT int_no_msgout2, WHEN NOT MSG_OUT -at 0x000002c0 : */ 0x9e030000,0xab930010, +at 0x000002d6 : */ 0x9e030000,0xab930010, /* MOVE SCRATCH0 | had_msgout TO SCRATCH0 -at 0x000002c2 : */ 0x7a340200,0x00000000, +at 0x000002d8 : */ 0x7a340200,0x00000000, /* MOVE FROM dsa_msgout, when MSG_OUT -at 0x000002c4 : */ 0x1e000000,0x00000008, +at 0x000002da : */ 0x1e000000,0x00000008, /* JUMP redo_msgin2 -at 0x000002c6 : */ 0x80080000,0x00000078, +at 0x000002dc : */ 0x80080000,0x00000070, /* get_msgin3: MOVE SCRATCH0 | had_msgin TO SCRATCH0 -at 0x000002c8 : */ 0x7a344000,0x00000000, +at 0x000002de : */ 0x7a344000,0x00000000, /* MOVE 1, msgin_buf, WHEN MSG_IN -at 0x000002ca : */ 0x0f000001,0x00000000, +at 0x000002e0 : */ 0x0f000001,0x00000000, /* JUMP ext_msg3, IF 0x01 ; Extended Message -at 0x000002cc : */ 0x800c0001,0x00000b68, +at 0x000002e2 : */ 0x800c0001,0x00000bc0, /* JUMP ignore_msg3, IF 0x02 ; Save Data Pointers -at 0x000002ce : */ 0x800c0002,0x00000b58, +at 0x000002e4 : */ 0x800c0002,0x00000bb0, /* JUMP ignore_msg3, IF 0x03 ; Save Restore Pointers -at 0x000002d0 : */ 0x800c0003,0x00000b58, +at 0x000002e6 : */ 0x800c0003,0x00000bb0, /* JUMP disc3, IF 0x04 ; Disconnect -at 0x000002d2 : */ 0x800c0004,0x00000bc8, +at 0x000002e8 : */ 0x800c0004,0x00000c50, /* INT int_bad_msg3 -at 0x000002d4 : */ 0x98080000,0xab930008, +at 0x000002ea : */ 0x98080000,0xab930008, /* ignore_msg3: CLEAR ACK -at 0x000002d6 : */ 0x60000040,0x00000000, +at 0x000002ec : */ 0x60000040,0x00000000, /* JUMP redo_msgin3 -at 0x000002d8 : */ 0x80080000,0x00000908, +at 0x000002ee : */ 0x80080000,0x00000900, /* ext_msg3: MOVE SCRATCH0 | had_extmsg TO SCRATCH0 -at 0x000002da : */ 0x7a348000,0x00000000, +at 0x000002f0 : */ 0x7a348000,0x00000000, /* CLEAR ACK -at 0x000002dc : */ 0x60000040,0x00000000, +at 0x000002f2 : */ 0x60000040,0x00000000, /* MOVE 1, msgin_buf + 1, WHEN MSG_IN -at 0x000002de : */ 0x0f000001,0x00000001, +at 0x000002f4 : */ 0x0f000001,0x00000001, /* - JUMP ext_msg3a, IF 0x03 + JUMP reject_msg3, IF NOT 0x03 ; Only handle SDTR -at 0x000002e0 : */ 0x800c0003,0x00000b90, +at 0x000002f6 : */ 0x80040003,0x00000c10, /* - INT int_bad_extmsg3a + CLEAR ACK -at 0x000002e2 : */ 0x98080000,0xab930004, +at 0x000002f8 : */ 0x60000040,0x00000000, +/* + MOVE 1, msgin_buf + 2, WHEN MSG_IN + +at 0x000002fa : */ 0x0f000001,0x00000002, +/* + JUMP reject_msg3, IF NOT 0x01 ; Only handle SDTR + +at 0x000002fc : */ 0x80040001,0x00000c10, /* -ext_msg3a: CLEAR ACK -at 0x000002e4 : */ 0x60000040,0x00000000, +at 0x000002fe : */ 0x60000040,0x00000000, /* - MOVE 1, msgin_buf + 2, WHEN MSG_IN + MOVE 2, msgin_buf + 3, WHEN MSG_IN + +at 0x00000300 : */ 0x0f000002,0x00000003, +/* + INT int_msg_sdtr3 -at 0x000002e6 : */ 0x0f000001,0x00000002, +at 0x00000302 : */ 0x98080000,0xab93000e, /* - JUMP ext_msg3b, IF 0x01 ; Must be SDTR +reject_msg3: + MOVE SCRATCH1 | did_reject TO SCRATCH1 -at 0x000002e8 : */ 0x800c0001,0x00000bb0, +at 0x00000304 : */ 0x7a350100,0x00000000, /* - INT int_bad_extmsg3b + SET ATN -at 0x000002ea : */ 0x98080000,0xab930005, +at 0x00000306 : */ 0x58000008,0x00000000, /* -ext_msg3b: CLEAR ACK -at 0x000002ec : */ 0x60000040,0x00000000, +at 0x00000308 : */ 0x60000040,0x00000000, /* - MOVE 2, msgin_buf + 3, WHEN MSG_IN + JUMP reject_msg3a, WHEN NOT MSG_IN -at 0x000002ee : */ 0x0f000002,0x00000003, +at 0x0000030a : */ 0x87030000,0x00000c40, /* - INT int_msg_sdtr3 + MOVE 1, msgin_buf + 7, WHEN MSG_IN + +at 0x0000030c : */ 0x0f000001,0x00000007, +/* + JUMP reject_msg3 + +at 0x0000030e : */ 0x80080000,0x00000c10, +/* +reject_msg3a: + MOVE 1, msg_reject, WHEN MSG_OUT -at 0x000002f0 : */ 0x98080000,0xab93000e, +at 0x00000310 : */ 0x0e000001,0x00000000, +/* + JUMP redo_msgin3 + +at 0x00000312 : */ 0x80080000,0x00000900, /* disc3: CLEAR ACK -at 0x000002f2 : */ 0x60000040,0x00000000, +at 0x00000314 : */ 0x60000040,0x00000000, /* ENTRY wait_disc3 wait_disc3: WAIT DISCONNECT -at 0x000002f4 : */ 0x48000000,0x00000000, +at 0x00000316 : */ 0x48000000,0x00000000, /* INT int_disc3 -at 0x000002f6 : */ 0x98080000,0xab93001b, +at 0x00000318 : */ 0x98080000,0xab93001b, /* ENTRY resume_msgin3a resume_msgin3a: CLEAR ACK -at 0x000002f8 : */ 0x60000040,0x00000000, +at 0x0000031a : */ 0x60000040,0x00000000, /* JUMP redo_msgin3 -at 0x000002fa : */ 0x80080000,0x00000908, +at 0x0000031c : */ 0x80080000,0x00000900, /* ENTRY resume_msgin3b resume_msgin3b: SET ATN -at 0x000002fc : */ 0x58000008,0x00000000, +at 0x0000031e : */ 0x58000008,0x00000000, /* CLEAR ACK -at 0x000002fe : */ 0x60000040,0x00000000, +at 0x00000320 : */ 0x60000040,0x00000000, /* INT int_no_msgout3, WHEN NOT MSG_OUT -at 0x00000300 : */ 0x9e030000,0xab930011, +at 0x00000322 : */ 0x9e030000,0xab930011, /* MOVE SCRATCH0 | had_msgout TO SCRATCH0 -at 0x00000302 : */ 0x7a340200,0x00000000, +at 0x00000324 : */ 0x7a340200,0x00000000, /* MOVE FROM dsa_msgout, when MSG_OUT -at 0x00000304 : */ 0x1e000000,0x00000008, +at 0x00000326 : */ 0x1e000000,0x00000008, /* JUMP redo_msgin3 -at 0x00000306 : */ 0x80080000,0x00000908, +at 0x00000328 : */ 0x80080000,0x00000900, /* ENTRY resume_rej_ident resume_rej_ident: CLEAR ATN -at 0x00000308 : */ 0x60000008,0x00000000, +at 0x0000032a : */ 0x60000008,0x00000000, /* MOVE 1, msgin_buf, WHEN MSG_IN -at 0x0000030a : */ 0x0f000001,0x00000000, +at 0x0000032c : */ 0x0f000001,0x00000000, /* INT int_not_rej, IF NOT 0x07 ; Reject -at 0x0000030c : */ 0x98040007,0xab93001c, +at 0x0000032e : */ 0x98040007,0xab93001c, /* CLEAR ACK -at 0x0000030e : */ 0x60000040,0x00000000, +at 0x00000330 : */ 0x60000040,0x00000000, /* JUMP done_ident -at 0x00000310 : */ 0x80080000,0x00000050, +at 0x00000332 : */ 0x80080000,0x00000048, /* ENTRY reselect @@ -1716,73 +1784,92 @@ ; Disable selection timer MOVE CTEST7 | 0x10 TO CTEST7 -at 0x00000312 : */ 0x7a1b1000,0x00000000, +at 0x00000334 : */ 0x7a1b1000,0x00000000, /* WAIT RESELECT resel_err -at 0x00000314 : */ 0x50000000,0x00000c70, +at 0x00000336 : */ 0x50000000,0x00000cf8, /* INT int_resel_not_msgin, WHEN NOT MSG_IN -at 0x00000316 : */ 0x9f030000,0xab930016, +at 0x00000338 : */ 0x9f030000,0xab930016, /* MOVE 1, reselected_identify, WHEN MSG_IN -at 0x00000318 : */ 0x0f000001,0x00000000, +at 0x0000033a : */ 0x0f000001,0x00000000, /* INT int_reselected -at 0x0000031a : */ 0x98080000,0xab930017, +at 0x0000033c : */ 0x98080000,0xab930017, /* resel_err: MOVE CTEST2 & 0x40 TO SFBR -at 0x0000031c : */ 0x74164000,0x00000000, +at 0x0000033e : */ 0x74164000,0x00000000, /* JUMP selected, IF 0x00 -at 0x0000031e : */ 0x800c0000,0x00000cb0, +at 0x00000340 : */ 0x800c0000,0x00000d38, /* MOVE SFBR & 0 TO SFBR -at 0x00000320 : */ 0x7c080000,0x00000000, +at 0x00000342 : */ 0x7c080000,0x00000000, /* ENTRY patch_new_dsa patch_new_dsa: MOVE SFBR | 0x11 TO DSA0 -at 0x00000322 : */ 0x6a101100,0x00000000, +at 0x00000344 : */ 0x6a101100,0x00000000, /* MOVE SFBR | 0x22 TO DSA1 -at 0x00000324 : */ 0x6a112200,0x00000000, +at 0x00000346 : */ 0x6a112200,0x00000000, /* MOVE SFBR | 0x33 TO DSA2 -at 0x00000326 : */ 0x6a123300,0x00000000, +at 0x00000348 : */ 0x6a123300,0x00000000, /* MOVE SFBR | 0x44 TO DSA3 -at 0x00000328 : */ 0x6a134400,0x00000000, +at 0x0000034a : */ 0x6a134400,0x00000000, /* JUMP do_select -at 0x0000032a : */ 0x80080000,0x00000000, +at 0x0000034c : */ 0x80080000,0x00000000, /* selected: INT int_selected -at 0x0000032c : */ 0x98080000,0xab930018, +at 0x0000034e : */ 0x98080000,0xab930018, +/* + +ENTRY test1 +test1: + MOVE MEMORY 4, test1_src, test1_dst + +at 0x00000350 : */ 0xc0000004,0x00000000,0x00000000, +/* + INT int_test1 + +at 0x00000353 : */ 0x98080000,0xab93001d, +}; + +#define A_did_reject 0x00000001 +static u32 A_did_reject_used[] __attribute((unused)) = { + 0x0000026c, + 0x000002b8, + 0x00000304, }; #define A_dsa_cmnd 0x00000010 static u32 A_dsa_cmnd_used[] __attribute((unused)) = { - 0x0000001d, + 0x0000001b, }; #define A_dsa_datain 0x00000028 static u32 A_dsa_datain_used[] __attribute((unused)) = { + 0x0000003b, 0x0000003d, 0x0000003f, 0x00000041, @@ -1910,11 +1997,11 @@ 0x00000135, 0x00000137, 0x00000139, - 0x0000013b, }; #define A_dsa_dataout 0x00000428 static u32 A_dsa_dataout_used[] __attribute((unused)) = { + 0x00000141, 0x00000143, 0x00000145, 0x00000147, @@ -2042,25 +2129,24 @@ 0x0000023b, 0x0000023d, 0x0000023f, - 0x00000241, }; #define A_dsa_msgin 0x00000020 static u32 A_dsa_msgin_used[] __attribute((unused)) = { - 0x0000002f, + 0x0000002d, }; #define A_dsa_msgout 0x00000008 static u32 A_dsa_msgout_used[] __attribute((unused)) = { - 0x00000013, - 0x00000285, - 0x000002c5, - 0x00000305, + 0x00000011, + 0x0000028f, + 0x000002db, + 0x00000327, }; #define A_dsa_select 0x00000000 static u32 A_dsa_select_used[] __attribute((unused)) = { - 0x00000006, + 0x00000004, }; #define A_dsa_size 0x00000828 @@ -2069,285 +2155,290 @@ #define A_dsa_status 0x00000018 static u32 A_dsa_status_used[] __attribute((unused)) = { - 0x0000002b, + 0x00000029, }; #define A_had_cmdout 0x00000004 static u32 A_had_cmdout_used[] __attribute((unused)) = { - 0x0000001a, + 0x00000018, }; #define A_had_datain 0x00000008 static u32 A_had_datain_used[] __attribute((unused)) = { - 0x00000038, + 0x00000036, }; #define A_had_dataout 0x00000010 static u32 A_had_dataout_used[] __attribute((unused)) = { - 0x0000013e, + 0x0000013c, }; #define A_had_extmsg 0x00000080 static u32 A_had_extmsg_used[] __attribute((unused)) = { - 0x0000025a, - 0x0000029a, - 0x000002da, + 0x00000258, + 0x000002a4, + 0x000002f0, }; #define A_had_msgin 0x00000040 static u32 A_had_msgin_used[] __attribute((unused)) = { - 0x00000248, - 0x00000288, - 0x000002c8, + 0x00000246, + 0x00000292, + 0x000002de, }; #define A_had_msgout 0x00000002 static u32 A_had_msgout_used[] __attribute((unused)) = { - 0x00000010, - 0x00000282, - 0x000002c2, - 0x00000302, + 0x0000000e, + 0x0000028c, + 0x000002d8, + 0x00000324, }; #define A_had_select 0x00000001 static u32 A_had_select_used[] __attribute((unused)) = { - 0x0000000c, + 0x0000000a, }; #define A_had_status 0x00000020 static u32 A_had_status_used[] __attribute((unused)) = { }; -#define A_int_bad_extmsg1a 0xab930000 -static u32 A_int_bad_extmsg1a_used[] __attribute((unused)) = { - 0x00000263, -}; - -#define A_int_bad_extmsg1b 0xab930001 -static u32 A_int_bad_extmsg1b_used[] __attribute((unused)) = { - 0x0000026b, -}; - -#define A_int_bad_extmsg2a 0xab930002 -static u32 A_int_bad_extmsg2a_used[] __attribute((unused)) = { - 0x000002a3, -}; - -#define A_int_bad_extmsg2b 0xab930003 -static u32 A_int_bad_extmsg2b_used[] __attribute((unused)) = { - 0x000002ab, -}; - -#define A_int_bad_extmsg3a 0xab930004 -static u32 A_int_bad_extmsg3a_used[] __attribute((unused)) = { - 0x000002e3, -}; - -#define A_int_bad_extmsg3b 0xab930005 -static u32 A_int_bad_extmsg3b_used[] __attribute((unused)) = { - 0x000002eb, -}; - #define A_int_bad_msg1 0xab930006 static u32 A_int_bad_msg1_used[] __attribute((unused)) = { - 0x00000255, + 0x00000253, }; #define A_int_bad_msg2 0xab930007 static u32 A_int_bad_msg2_used[] __attribute((unused)) = { - 0x00000295, + 0x0000029f, }; #define A_int_bad_msg3 0xab930008 static u32 A_int_bad_msg3_used[] __attribute((unused)) = { - 0x000002d5, + 0x000002eb, }; #define A_int_cmd_bad_phase 0xab930009 static u32 A_int_cmd_bad_phase_used[] __attribute((unused)) = { - 0x00000027, + 0x00000025, }; #define A_int_cmd_complete 0xab93000a static u32 A_int_cmd_complete_used[] __attribute((unused)) = { - 0x00000037, + 0x00000035, }; #define A_int_data_bad_phase 0xab93000b static u32 A_int_data_bad_phase_used[] __attribute((unused)) = { - 0x00000247, + 0x00000245, }; #define A_int_disc1 0xab930019 static u32 A_int_disc1_used[] __attribute((unused)) = { - 0x00000277, + 0x00000281, }; #define A_int_disc2 0xab93001a static u32 A_int_disc2_used[] __attribute((unused)) = { - 0x000002b7, + 0x000002cd, }; #define A_int_disc3 0xab93001b static u32 A_int_disc3_used[] __attribute((unused)) = { - 0x000002f7, + 0x00000319, }; #define A_int_msg_sdtr1 0xab93000c static u32 A_int_msg_sdtr1_used[] __attribute((unused)) = { - 0x00000271, + 0x0000026b, }; #define A_int_msg_sdtr2 0xab93000d static u32 A_int_msg_sdtr2_used[] __attribute((unused)) = { - 0x000002b1, + 0x000002b7, }; #define A_int_msg_sdtr3 0xab93000e static u32 A_int_msg_sdtr3_used[] __attribute((unused)) = { - 0x000002f1, + 0x00000303, }; #define A_int_no_msgout1 0xab93000f static u32 A_int_no_msgout1_used[] __attribute((unused)) = { - 0x00000281, + 0x0000028b, }; #define A_int_no_msgout2 0xab930010 static u32 A_int_no_msgout2_used[] __attribute((unused)) = { - 0x000002c1, + 0x000002d7, }; #define A_int_no_msgout3 0xab930011 static u32 A_int_no_msgout3_used[] __attribute((unused)) = { - 0x00000301, + 0x00000323, }; #define A_int_not_cmd_complete 0xab930012 static u32 A_int_not_cmd_complete_used[] __attribute((unused)) = { - 0x00000031, + 0x0000002f, }; #define A_int_not_rej 0xab93001c static u32 A_int_not_rej_used[] __attribute((unused)) = { - 0x0000030d, + 0x0000032f, }; #define A_int_resel_not_msgin 0xab930016 static u32 A_int_resel_not_msgin_used[] __attribute((unused)) = { - 0x00000317, + 0x00000339, }; #define A_int_reselected 0xab930017 static u32 A_int_reselected_used[] __attribute((unused)) = { - 0x0000031b, + 0x0000033d, }; #define A_int_sel_no_ident 0xab930013 static u32 A_int_sel_no_ident_used[] __attribute((unused)) = { - 0x0000000f, + 0x0000000d, }; #define A_int_sel_not_cmd 0xab930014 static u32 A_int_sel_not_cmd_used[] __attribute((unused)) = { - 0x00000019, + 0x00000017, }; #define A_int_selected 0xab930018 static u32 A_int_selected_used[] __attribute((unused)) = { - 0x0000032d, + 0x0000034f, }; #define A_int_status_not_msgin 0xab930015 static u32 A_int_status_not_msgin_used[] __attribute((unused)) = { - 0x0000002d, + 0x0000002b, +}; + +#define A_int_test1 0xab93001d +static u32 A_int_test1_used[] __attribute((unused)) = { + 0x00000354, +}; + +#define A_msg_reject 0x00000000 +static u32 A_msg_reject_used[] __attribute((unused)) = { + 0x00000279, + 0x000002c5, + 0x00000311, }; #define A_msgin_buf 0x00000000 static u32 A_msgin_buf_used[] __attribute((unused)) = { - 0x0000024b, - 0x0000025f, - 0x00000267, - 0x0000026f, - 0x0000028b, - 0x0000029f, - 0x000002a7, + 0x00000249, + 0x0000025d, + 0x00000263, + 0x00000269, + 0x00000275, + 0x00000295, + 0x000002a9, 0x000002af, - 0x000002cb, - 0x000002df, - 0x000002e7, - 0x000002ef, - 0x0000030b, + 0x000002b5, + 0x000002c1, + 0x000002e1, + 0x000002f5, + 0x000002fb, + 0x00000301, + 0x0000030d, + 0x0000032d, }; #define A_reselected_identify 0x00000000 static u32 A_reselected_identify_used[] __attribute((unused)) = { - 0x00000319, + 0x0000033b, +}; + +#define A_test1_dst 0x00000000 +static u32 A_test1_dst_used[] __attribute((unused)) = { + 0x00000352, +}; + +#define A_test1_src 0x00000000 +static u32 A_test1_src_used[] __attribute((unused)) = { + 0x00000351, }; #define Ent_do_select 0x00000000 -#define Ent_done_ident 0x00000050 -#define Ent_end_data_trans 0x00000908 -#define Ent_patch_input_data 0x000000e8 -#define Ent_patch_new_dsa 0x00000c88 -#define Ent_patch_output_data 0x00000500 -#define Ent_reselect 0x00000c48 -#define Ent_resume_cmd 0x00000068 -#define Ent_resume_msgin1a 0x000009e0 -#define Ent_resume_msgin1b 0x000009f0 -#define Ent_resume_msgin2a 0x00000ae0 -#define Ent_resume_msgin2b 0x00000af0 -#define Ent_resume_msgin3a 0x00000be0 -#define Ent_resume_msgin3b 0x00000bf0 -#define Ent_resume_pmm 0x00000078 -#define Ent_resume_rej_ident 0x00000c20 -#define Ent_wait_disc1 0x000009d0 -#define Ent_wait_disc2 0x00000ad0 -#define Ent_wait_disc3 0x00000bd0 -#define Ent_wait_disc_complete 0x000000d0 +#define Ent_done_ident 0x00000048 +#define Ent_end_data_trans 0x00000900 +#define Ent_patch_input_data 0x000000e0 +#define Ent_patch_new_dsa 0x00000d10 +#define Ent_patch_output_data 0x000004f8 +#define Ent_reselect 0x00000cd0 +#define Ent_resume_cmd 0x00000060 +#define Ent_resume_msgin1a 0x00000a08 +#define Ent_resume_msgin1b 0x00000a18 +#define Ent_resume_msgin2a 0x00000b38 +#define Ent_resume_msgin2b 0x00000b48 +#define Ent_resume_msgin3a 0x00000c68 +#define Ent_resume_msgin3b 0x00000c78 +#define Ent_resume_pmm 0x00000070 +#define Ent_resume_rej_ident 0x00000ca8 +#define Ent_test1 0x00000d40 +#define Ent_wait_disc1 0x000009f8 +#define Ent_wait_disc2 0x00000b28 +#define Ent_wait_disc3 0x00000c58 +#define Ent_wait_disc_complete 0x000000c8 static u32 LABELPATCHES[] __attribute((unused)) = { + 0x00000005, 0x00000007, - 0x00000009, + 0x00000013, 0x00000015, - 0x00000017, + 0x0000001d, 0x0000001f, 0x00000021, 0x00000023, - 0x00000025, - 0x0000013d, + 0x0000013b, + 0x00000241, 0x00000243, - 0x00000245, + 0x0000024b, 0x0000024d, 0x0000024f, 0x00000251, - 0x00000253, - 0x00000259, - 0x00000261, - 0x00000269, + 0x00000257, + 0x0000025f, + 0x00000265, + 0x00000273, + 0x00000277, 0x0000027b, - 0x00000287, - 0x0000028d, - 0x0000028f, + 0x00000285, 0x00000291, - 0x00000293, + 0x00000297, 0x00000299, - 0x000002a1, - 0x000002a9, - 0x000002bb, + 0x0000029b, + 0x0000029d, + 0x000002a3, + 0x000002ab, + 0x000002b1, + 0x000002bf, + 0x000002c3, 0x000002c7, - 0x000002cd, - 0x000002cf, 0x000002d1, - 0x000002d3, - 0x000002d9, - 0x000002e1, + 0x000002dd, + 0x000002e3, + 0x000002e5, + 0x000002e7, 0x000002e9, - 0x000002fb, - 0x00000307, - 0x00000311, - 0x00000315, - 0x0000031f, - 0x0000032b, + 0x000002ef, + 0x000002f7, + 0x000002fd, + 0x0000030b, + 0x0000030f, + 0x00000313, + 0x0000031d, + 0x00000329, + 0x00000333, + 0x00000337, + 0x00000341, + 0x0000034d, }; static struct { @@ -2356,6 +2447,6 @@ } EXTERNAL_PATCHES[] __attribute((unused)) = { }; -static u32 INSTRUCTIONS __attribute((unused)) = 407; -static u32 PATCHES __attribute((unused)) = 42; +static u32 INSTRUCTIONS __attribute((unused)) = 426; +static u32 PATCHES __attribute((unused)) = 51; static u32 EXTERNAL_PATCHES_LEN __attribute((unused)) = 0; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/st.c linux.21rc5-ac2/drivers/scsi/st.c --- linux.21rc5/drivers/scsi/st.c 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/st.c 2003-04-22 16:44:37.000000000 +0100 @@ -9,10 +9,10 @@ Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, Michael Schaefer, J"org Weule, and Eric Youngdale. - Copyright 1992 - 2002 Kai Makisara - email Kai.Makisara@metla.fi + Copyright 1992 - 2003 Kai Makisara + email Kai.Makisara@kolumbus.fi - Last modified: Mon Aug 5 22:54:13 2002 by makisara + Last modified: Sun Apr 6 22:44:42 2003 by makisara Some small formal changes - aeb, 950809 Last modified: 18-JAN-1998 Richard Gooch Devfs support @@ -21,7 +21,7 @@ error handling will be discarded. */ -static char *verstr = "20020805"; +static char *verstr = "20030406"; #include @@ -72,7 +72,6 @@ #include "constants.h" static int buffer_kbs; -static int write_threshold_kbs; static int max_buffers = (-1); static int max_sg_segs; static int blocking_open = ST_BLOCKING_OPEN; @@ -84,8 +83,6 @@ MODULE_PARM(buffer_kbs, "i"); MODULE_PARM_DESC(buffer_kbs, "Default driver buffer size (KB; 32)"); -MODULE_PARM(write_threshold_kbs, "i"); -MODULE_PARM_DESC(write_threshold_kbs, "Asynchronous write threshold (KB; 30)"); MODULE_PARM(max_buffers, "i"); MODULE_PARM_DESC(max_buffers, "Maximum number of buffer allocated at initialisation (4)"); MODULE_PARM(max_sg_segs, "i"); @@ -103,8 +100,8 @@ { "buffer_kbs", &buffer_kbs }, - { - "write_threshold_kbs", &write_threshold_kbs + { /* Retained for compatibility */ + "write_threshold_kbs", NULL }, { "max_buffers", &max_buffers @@ -122,7 +119,6 @@ /* The default definitions have been moved to st_options.h */ #define ST_BUFFER_SIZE (ST_BUFFER_BLOCKS * ST_KILOBYTE) -#define ST_WRITE_THRESHOLD (ST_WRITE_THRESHOLD_BLOCKS * ST_KILOBYTE) /* The buffer size should fit into the 24 bits for length in the 6-byte SCSI read and write commands. */ @@ -153,7 +149,6 @@ static int st_nbr_buffers; static ST_buffer **st_buffers = NULL; static int st_buffer_size = ST_BUFFER_SIZE; -static int st_write_threshold = ST_WRITE_THRESHOLD; static int st_max_buffers = ST_MAX_BUFFERS; static int st_max_sg_segs = ST_MAX_SG; @@ -167,6 +162,7 @@ static int set_sg_lengths(ST_buffer *, unsigned int); static int append_to_buffer(const char *, ST_buffer *, int); static int from_buffer(ST_buffer *, char *, int); +static void move_buffer_data(ST_buffer *, int); static int st_init(void); static int st_attach(Scsi_Device *); @@ -1196,6 +1192,7 @@ ssize_t total; ssize_t i, do_count, blks, transfer; ssize_t retval = 0; + int residual, retry_eot = 0, scode; int write_threshold; int doing_write = 0; unsigned char cmd[MAX_COMMAND_SIZE]; @@ -1367,7 +1364,7 @@ write_threshold = 1; } else write_threshold = (STp->buffer)->buffer_blocks * STp->block_size; - if (!STm->do_async_writes) + if (!STm->do_async_writes || STp->block_size > 0) write_threshold--; total = count; @@ -1381,7 +1378,7 @@ b_point = buf; while ((STp->block_size == 0 && !STm->do_async_writes && count > 0) || (STp->block_size != 0 && - (STp->buffer)->buffer_bytes + count > write_threshold)) { + (STp->buffer)->buffer_bytes + count > write_threshold && !retry_eot)) { doing_write = 1; if (STp->block_size == 0) do_count = count; @@ -1398,6 +1395,7 @@ goto out; } + retry_write: if (STp->block_size == 0) blks = transfer = do_count; else { @@ -1420,46 +1418,66 @@ DEBC(printk(ST_DEB_MSG "st%d: Error on write:\n", dev)); if ((SRpnt->sr_sense_buffer[0] & 0x70) == 0x70 && (SRpnt->sr_sense_buffer[2] & 0x40)) { + scode = SRpnt->sr_sense_buffer[2] & 0x0f; if ((SRpnt->sr_sense_buffer[0] & 0x80) != 0) - transfer = (SRpnt->sr_sense_buffer[3] << 24) | + residual = (SRpnt->sr_sense_buffer[3] << 24) | (SRpnt->sr_sense_buffer[4] << 16) | (SRpnt->sr_sense_buffer[5] << 8) | SRpnt->sr_sense_buffer[6]; else if (STp->block_size == 0 && - (SRpnt->sr_sense_buffer[2] & 0x0f) == - VOLUME_OVERFLOW) - transfer = do_count; + scode == VOLUME_OVERFLOW) + residual = do_count; else - transfer = 0; + residual = 0; if (STp->block_size != 0) - transfer *= STp->block_size; - if (transfer <= do_count) { - filp->f_pos += do_count - transfer; - count -= do_count - transfer; + residual *= STp->block_size; + if (residual <= do_count) { + /* Within the data in this write() */ + filp->f_pos += do_count - residual; + count -= do_count - residual; if (STps->drv_block >= 0) { if (STp->block_size == 0 && - transfer < do_count) + residual < do_count) STps->drv_block++; else if (STp->block_size != 0) STps->drv_block += - (do_count - transfer) / + (transfer - residual) / STp->block_size; } STps->eof = ST_EOM_OK; retval = (-ENOSPC); /* EOM within current request */ DEBC(printk(ST_DEB_MSG "st%d: EOM with %d bytes unwritten.\n", - dev, transfer)); + dev, count)); } else { - STps->eof = ST_EOM_ERROR; - STps->drv_block = (-1); /* Too cautious? */ - retval = (-EIO); /* EOM for old data */ - DEBC(printk(ST_DEB_MSG - "st%d: EOM with lost data.\n", - dev)); + /* EOT in within data buffered earlier */ + if (!retry_eot && (SRpnt->sr_sense_buffer[0] & 1) == 0 && + (scode == NO_SENSE || scode == RECOVERED_ERROR)) { + move_buffer_data(STp->buffer, transfer - residual); + retry_eot = TRUE; + if (STps->drv_block >= 0) { + STps->drv_block += (transfer - residual) / + STp->block_size; + } + STps->eof = ST_EOM_OK; + DEBC(printk(ST_DEB_MSG + "st%d: Retry write of %d bytes at EOM.\n", + dev, do_count)); + goto retry_write; + } + else { + /* Either error within data buffered by driver or failed retry */ + STps->eof = ST_EOM_ERROR; + STps->drv_block = (-1); /* Too cautious? */ + retval = (-EIO); /* EOM for old data */ + DEBC(printk(ST_DEB_MSG + "st%d: EOM with lost data.\n", + dev)); + } } } else { STps->drv_block = (-1); /* Too cautious? */ + retry_eot = FALSE; retval = (-EIO); } @@ -1483,7 +1501,7 @@ (STp->buffer)->buffer_bytes = 0; STp->dirty = 0; } - if (count != 0) { + if (count != 0 && !retry_eot) { STp->dirty = 1; i = append_to_buffer(b_point, STp->buffer, count); if (i) { @@ -1499,26 +1517,14 @@ goto out; } - if (STm->do_async_writes && - (((STp->buffer)->buffer_bytes >= STp->write_threshold && - (STp->buffer)->buffer_bytes >= STp->block_size) || - STp->block_size == 0)) { + if (STm->do_async_writes && STp->block_size == 0) { /* Schedule an asynchronous write */ - if (STp->block_size == 0) - (STp->buffer)->writing = (STp->buffer)->buffer_bytes; - else - (STp->buffer)->writing = ((STp->buffer)->buffer_bytes / - STp->block_size) * STp->block_size; - STp->dirty = !((STp->buffer)->writing == - (STp->buffer)->buffer_bytes); - - if (STp->block_size == 0) - blks = (STp->buffer)->writing; - else - blks = (STp->buffer)->writing / STp->block_size; - cmd[2] = blks >> 16; - cmd[3] = blks >> 8; - cmd[4] = blks; + (STp->buffer)->writing = (STp->buffer)->buffer_bytes; + STp->dirty = FALSE; + residual = (STp->buffer)->writing; + cmd[2] = residual >> 16; + cmd[3] = residual >> 8; + cmd[4] = residual; DEB( STp->write_pending = 1; ) SRpnt = st_do_scsi(SRpnt, STp, cmd, (STp->buffer)->writing, @@ -1532,9 +1538,9 @@ } STps->at_sm &= (total == 0); - if (total > 0) + if (total > 0 && !retry_eot) STps->eof = ST_NOEOF; - retval = total; + retval = total - count; out: if (SRpnt != NULL) @@ -1625,9 +1631,14 @@ if (SRpnt->sr_sense_buffer[2] & 0x20) { /* ILI */ if (STp->block_size == 0) { - if (transfer < 0) { + if (transfer <= 0) { + if (transfer < 0) + printk(KERN_NOTICE + "st%d: Failed to read %d byte block with %d byte read.\n", + dev, bytes - transfer, bytes); if (STps->drv_block >= 0) STps->drv_block += 1; + (STp->buffer)->buffer_bytes = 0; return (-ENOMEM); } (STp->buffer)->buffer_bytes = bytes - transfer; @@ -1694,6 +1705,9 @@ } else /* Some other extended sense code */ retval = (-EIO); } + + if ((STp->buffer)->buffer_bytes < 0) /* Caused by bogus sense data */ + (STp->buffer)->buffer_bytes = 0; } /* End of extended sense test */ else { /* Non-extended sense */ @@ -2004,16 +2018,7 @@ debugging = value; ) st_log_options(STp, STm, dev); } else if (code == MT_ST_WRITE_THRESHOLD) { - value = (options & ~MT_ST_OPTIONS) * ST_KILOBYTE; - if (value < 1 || value > st_buffer_size) { - printk(KERN_WARNING - "st%d: Write threshold %d too small or too large.\n", - dev, value); - return (-EIO); - } - STp->write_threshold = value; - printk(KERN_INFO "st%d: Write threshold set to %d bytes.\n", - dev, value); + /* Retained for compatibility */ } else if (code == MT_ST_DEF_BLKSIZE) { value = (options & ~MT_ST_OPTIONS); if (value == ~MT_ST_OPTIONS) { @@ -2155,7 +2160,7 @@ /* Send the mode page in the tape buffer to the drive. Assumes that the mode data in the buffer is correctly formatted. */ -static int write_mode_page(Scsi_Tape *STp, int page) +static int write_mode_page(Scsi_Tape *STp, int page, int slow) { int pgo; unsigned char cmd[MAX_COMMAND_SIZE]; @@ -2174,7 +2179,7 @@ (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR; SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_WRITE, - STp->timeout, 0, TRUE); + (slow ? STp->long_timeout : STp->timeout), 0, TRUE); if (SRpnt == NULL) return (STp->buffer)->syscall_result; @@ -2241,7 +2246,7 @@ b_data[mpoffs + CP_OFF_C_ALGO] = 0; /* no compression */ } - retval = write_mode_page(STp, COMPRESSION_PAGE); + retval = write_mode_page(STp, COMPRESSION_PAGE, FALSE); if (retval) { DEBC(printk(ST_DEB_MSG "st%d: Compression change failed.\n", dev)); return (-EIO); @@ -3055,7 +3060,7 @@ bp[pgo + PP_OFF_RESERVED] = 0; bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | PP_MSK_PSUM_MB; - result = write_mode_page(STp, PART_PAGE); + result = write_mode_page(STp, PART_PAGE, TRUE); if (result) { printk(KERN_INFO "st%d: Partitioning of tape failed.\n", dev); result = (-EIO); @@ -3644,6 +3649,50 @@ } +/* Move data towards start of buffer */ +static void move_buffer_data(ST_buffer * st_bp, int offset) +{ + int src_seg, dst_seg, src_offset = 0, dst_offset; + int count, total; + + if (offset == 0) + return; + + total=st_bp->buffer_bytes - offset; + for (src_seg=0; src_seg < st_bp->sg_segs; src_seg++) { + src_offset = offset; + if (src_offset < st_bp->sg_lengths[src_seg]) + break; + offset -= st_bp->sg_lengths[src_seg]; + } + if (src_seg == st_bp->sg_segs) { /* Should never happen */ + printk(KERN_WARNING "st: zap_buffer offset overflow.\n"); + return; + } + + st_bp->buffer_bytes = st_bp->read_pointer = total; + for (dst_seg=dst_offset=0; total > 0; ) { + count = min(st_bp->sg_lengths[dst_seg] - dst_offset, + st_bp->sg_lengths[src_seg] - src_offset); + memmove(st_bp->sg[dst_seg].address + dst_offset, + st_bp->sg[src_seg].address + src_offset, count); + printk("st: move (%d,%d) -> (%d,%d) count %d\n", + src_seg, src_offset, dst_seg, dst_offset, count); + src_offset += count; + if (src_offset >= st_bp->sg_lengths[src_seg]) { + src_seg++; + src_offset = 0; + } + dst_offset += count; + if (dst_offset >= st_bp->sg_lengths[dst_seg]) { + dst_seg++; + dst_offset = 0; + } + total -= count; + } +} + + /* Set the scatter/gather list length fields to sum up to the transfer length. Return the number of segments being used. */ static int set_sg_lengths(ST_buffer *st_bp, unsigned int length) @@ -3668,15 +3717,6 @@ { if (buffer_kbs > 0) st_buffer_size = buffer_kbs * ST_KILOBYTE; - if (write_threshold_kbs > 0) - st_write_threshold = write_threshold_kbs * ST_KILOBYTE; - else if (buffer_kbs > 0) - st_write_threshold = st_buffer_size - 2048; - if (st_write_threshold > st_buffer_size) { - st_write_threshold = st_buffer_size; - printk(KERN_WARNING "st: write_threshold limited to %d bytes.\n", - st_write_threshold); - } if (max_buffers >= 0) st_max_buffers = max_buffers; if (max_sg_segs >= ST_FIRST_SG) @@ -3695,13 +3735,15 @@ if (ints[0] > 0) { for (i = 0; i < ints[0] && i < ARRAY_SIZE(parms); i++) - *parms[i].val = ints[i + 1]; + if (parms[i].val) + *parms[i].val = ints[i + 1]; } else { while (stp != NULL) { for (i = 0; i < ARRAY_SIZE(parms); i++) { len = strlen(parms[i].name); if (!strncmp(stp, parms[i].name, len) && - (*(stp + len) == ':' || *(stp + len) == '=')) { + (*(stp + len) == ':' || *(stp + len) == '=') && + parms[i].val) { *parms[i].val = simple_strtoul(stp + len + 1, NULL, 0); break; @@ -3863,7 +3905,6 @@ tpnt->fast_mteom = ST_FAST_MTEOM; tpnt->scsi2_logical = ST_SCSI2LOGICAL; tpnt->immediate = ST_NOWAIT; - tpnt->write_threshold = st_write_threshold; tpnt->default_drvbuffer = 0xff; /* No forced buffering */ tpnt->partition = 0; tpnt->new_partition = 0; @@ -3941,8 +3982,8 @@ return 0; printk(KERN_INFO - "st: Version %s, bufsize %d, wrt %d, max init. bufs %d, s/g segs %d\n", - verstr, st_buffer_size, st_write_threshold, st_max_buffers, st_max_sg_segs); + "st: Version %s, bufsize %d, max init. bufs %d, s/g segs %d\n", + verstr, st_buffer_size, st_max_buffers, st_max_sg_segs); write_lock_irqsave(&st_dev_arr_lock, flags); if (!st_registered) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/st.h linux.21rc5-ac2/drivers/scsi/st.h --- linux.21rc5/drivers/scsi/st.h 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/st.h 2003-05-29 01:44:07.000000000 +0100 @@ -86,7 +86,6 @@ unsigned char use_pf; /* Set Page Format bit in all mode selects? */ unsigned char c_algo; /* compression algorithm */ int tape_type; - int write_threshold; int timeout; /* timeout for normal commands */ int long_timeout; /* timeout for commands known to take long time */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/scsi/st_options.h linux.21rc5-ac2/drivers/scsi/st_options.h --- linux.21rc5/drivers/scsi/st_options.h 2003-05-28 15:07:52.000000000 +0100 +++ linux.21rc5-ac2/drivers/scsi/st_options.h 2003-04-22 16:44:37.000000000 +0100 @@ -1,9 +1,9 @@ /* The compile-time configurable defaults for the Linux SCSI tape driver. - Copyright 1995-2002 Kai Makisara. + Copyright 1995-2003 Kai Makisara. - Last modified: Wed May 1 11:06:47 2002 by makisara + Last modified: Sun Apr 6 22:45:15 2003 by makisara */ #ifndef _ST_OPTIONS_H @@ -33,11 +33,6 @@ /* The tape driver buffer size in kilobytes. Must be non-zero. */ #define ST_BUFFER_BLOCKS 32 -/* The number of kilobytes of data in the buffer that triggers an - asynchronous write in fixed block mode. See also ST_ASYNC_WRITES - below. */ -#define ST_WRITE_THRESHOLD_BLOCKS 30 - /* The maximum number of tape buffers the driver tries to allocate at driver initialisation. The number is also constrained by the number of drives detected. If more buffers are needed, they are allocated diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sgi/char/sgiserial.c linux.21rc5-ac2/drivers/sgi/char/sgiserial.c --- linux.21rc5/drivers/sgi/char/sgiserial.c 2003-05-28 15:08:01.000000000 +0100 +++ linux.21rc5-ac2/drivers/sgi/char/sgiserial.c 2003-04-22 16:44:37.000000000 +0100 @@ -1489,7 +1489,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sound/ac97_codec.c linux.21rc5-ac2/drivers/sound/ac97_codec.c --- linux.21rc5/drivers/sound/ac97_codec.c 2003-05-28 15:09:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/sound/ac97_codec.c 2003-05-28 15:26:32.000000000 +0100 @@ -31,6 +31,10 @@ ************************************************************************** * * History + * May 02, 2003 Liam Girdwood + * Removed non existant WM9700 + * Added support for WM9705, WM9708, WM9709, WM9710, WM9711 + * WM9712 and WM9717 * Mar 28, 2002 Randolph Bentson * corrections to support WM9707 in ViewPad 1000 * v0.4 Mar 15 2000 Ollie Lho @@ -63,9 +67,10 @@ static int ac97_init_mixer(struct ac97_codec *codec); -static int wolfson_init00(struct ac97_codec * codec); static int wolfson_init03(struct ac97_codec * codec); static int wolfson_init04(struct ac97_codec * codec); +static int wolfson_init05(struct ac97_codec * codec); +static int wolfson_init11(struct ac97_codec * codec); static int tritech_init(struct ac97_codec * codec); static int tritech_maestro_init(struct ac97_codec * codec); static int sigmatel_9708_init(struct ac97_codec *codec); @@ -96,9 +101,10 @@ static struct ac97_ops null_ops = { NULL, NULL, NULL }; static struct ac97_ops default_ops = { NULL, eapd_control, NULL }; -static struct ac97_ops wolfson_ops00 = { wolfson_init00, NULL, NULL }; static struct ac97_ops wolfson_ops03 = { wolfson_init03, NULL, NULL }; static struct ac97_ops wolfson_ops04 = { wolfson_init04, NULL, NULL }; +static struct ac97_ops wolfson_ops05 = { wolfson_init05, NULL, NULL }; +static struct ac97_ops wolfson_ops11 = { wolfson_init11, NULL, NULL }; static struct ac97_ops tritech_ops = { tritech_init, NULL, NULL }; static struct ac97_ops tritech_m_ops = { tritech_maestro_init, NULL, NULL }; static struct ac97_ops sigmatel_9708_ops = { sigmatel_9708_init, NULL, NULL }; @@ -126,6 +132,7 @@ {0x414B4D00, "Asahi Kasei AK4540", &null_ops}, {0x414B4D01, "Asahi Kasei AK4542", &null_ops}, {0x414B4D02, "Asahi Kasei AK4543", &null_ops}, + {0x414C4326, "ALC100P", &null_ops}, {0x414C4710, "ALC200/200P", &null_ops}, {0x414C4720, "ALC650", &null_ops}, {0x434D4941, "CMedia", &cmedia_ops, AC97_NO_PCM_VOLUME }, @@ -154,9 +161,11 @@ {0x54524106, "TriTech TR28026", &null_ops}, {0x54524108, "TriTech TR28028", &tritech_ops}, {0x54524123, "TriTech TR A5", &null_ops}, - {0x574D4C00, "Wolfson WM9700A", &wolfson_ops00}, - {0x574D4C03, "Wolfson WM9703/WM9707", &wolfson_ops03}, + {0x574D4C03, "Wolfson WM9703/07/08/17", &wolfson_ops03}, {0x574D4C04, "Wolfson WM9704M/WM9704Q", &wolfson_ops04}, + {0x574D4C05, "Wolfson WM9705/WM9710", &wolfson_ops05}, + {0x574D4C09, "Wolfson WM9709", &null_ops}, + {0x574D4C12, "Wolfson WM9711/9712", &wolfson_ops11}, {0x83847600, "SigmaTel STAC????", &null_ops}, {0x83847604, "SigmaTel STAC9701/3/4/5", &null_ops}, {0x83847605, "SigmaTel STAC9704", &null_ops}, @@ -805,8 +814,6 @@ if (!(cap & 0x10)) codec->supported_mixers &= ~SOUND_MASK_ALTPCM; - if(codec->flags & AC97_NO_PCM_VOLUME) - codec->supported_mixers &= ~SOUND_MASK_PCM; /* detect bit resolution */ codec->codec_write(codec, AC97_MASTER_VOL_STEREO, 0x2020); @@ -836,8 +843,15 @@ ac97_set_mixer(codec, md->mixer, md->value); } + /* + * Volume is MUTE only on this device. We have to initialise + * it but its useless beyond that. + */ if(codec->flags & AC97_NO_PCM_VOLUME) + { + codec->supported_mixers &= ~SOUND_MASK_PCM; printk(KERN_WARNING "AC97 codec does not have proper volume support.\n"); + } return 1; } @@ -933,53 +947,52 @@ return 0; } -static int wolfson_init00(struct ac97_codec * codec) -{ - /* This initialization is suspect, but not known to be wrong. - It was copied from the initialization for the WM9704Q, but - that same sequence is known to fail for the WM9707. Thus - this warning may help someone with hardware to test - this code. */ - codec->codec_write(codec, 0x72, 0x0808); - codec->codec_write(codec, 0x74, 0x0808); - - // patch for DVD noise - codec->codec_write(codec, 0x5a, 0x0200); - - // init vol as PCM vol - codec->codec_write(codec, 0x70, - codec->codec_read(codec, AC97_PCMOUT_VOL)); - - codec->codec_write(codec, AC97_SURROUND_MASTER, 0x0000); - return 0; -} - +#define AC97_WM97XX_FMIXER_VOL 0x72 +#define AC97_WM97XX_RMIXER_VOL 0x74 +#define AC97_WM97XX_TEST 0x5a +#define AC97_WM9704_RPCM_VOL 0x70 +#define AC97_WM9711_OUT3VOL 0x16 static int wolfson_init03(struct ac97_codec * codec) { /* this is known to work for the ViewSonic ViewPad 1000 */ - codec->codec_write(codec, 0x72, 0x0808); - codec->codec_write(codec, 0x20, 0x8000); + codec->codec_write(codec, AC97_WM97XX_FMIXER_VOL, 0x0808); + codec->codec_write(codec, AC97_GENERAL_PURPOSE, 0x8000); return 0; } - static int wolfson_init04(struct ac97_codec * codec) { - codec->codec_write(codec, 0x72, 0x0808); - codec->codec_write(codec, 0x74, 0x0808); + codec->codec_write(codec, AC97_WM97XX_FMIXER_VOL, 0x0808); + codec->codec_write(codec, AC97_WM97XX_RMIXER_VOL, 0x0808); // patch for DVD noise - codec->codec_write(codec, 0x5a, 0x0200); + codec->codec_write(codec, AC97_WM97XX_TEST, 0x0200); // init vol as PCM vol - codec->codec_write(codec, 0x70, + codec->codec_write(codec, AC97_WM9704_RPCM_VOL, codec->codec_read(codec, AC97_PCMOUT_VOL)); + /* set rear surround volume */ codec->codec_write(codec, AC97_SURROUND_MASTER, 0x0000); return 0; } +/* WM9705, WM9710 */ +static int wolfson_init05(struct ac97_codec * codec) +{ + /* set front mixer volume */ + codec->codec_write(codec, AC97_WM97XX_FMIXER_VOL, 0x0808); + return 0; +} + +/* WM9711, WM9712 */ +static int wolfson_init11(struct ac97_codec * codec) +{ + /* set out3 volume */ + codec->codec_write(codec, AC97_WM9711_OUT3VOL, 0x0808); + return 0; +} static int tritech_init(struct ac97_codec * codec) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sound/cmpci.c linux.21rc5-ac2/drivers/sound/cmpci.c --- linux.21rc5/drivers/sound/cmpci.c 2003-05-28 15:09:01.000000000 +0100 +++ linux.21rc5-ac2/drivers/sound/cmpci.c 2003-05-29 02:06:23.000000000 +0100 @@ -580,15 +580,17 @@ spin_unlock_irqrestore(&s->lock, flags); } -static void trans_ac3(struct cm_state *s, void *dest, const char *source, int size) +static int trans_ac3(struct cm_state *s, void *dest, const char *source, int size) { int i = size / 2; unsigned long data; unsigned long *dst = (unsigned long *) dest; unsigned short *src = (unsigned short *)source; + int err; do { - __get_user(data, src); + if ((err = __get_user(data, src))) + return err; src++; data <<= 12; // ok for 16-bit data if (s->spdif_counter == 2 || s->spdif_counter == 3) @@ -606,6 +608,8 @@ if (s->spdif_counter == 384) s->spdif_counter = 0; } while (--i); + + return 0; } static void set_adc_rate_unlocked(struct cm_state *s, unsigned rate) @@ -1655,13 +1659,16 @@ continue; } if (s->status & DO_AC3_SW) { + int err; + // clip exceeded data, caught by 033 and 037 if (swptr + 2 * cnt > s->dma_dac.dmasize) cnt = (s->dma_dac.dmasize - swptr) / 2; - trans_ac3(s, s->dma_dac.rawbuf + swptr, buffer, cnt); + if ((err = trans_ac3(s, s->dma_dac.rawbuf + swptr, buffer, cnt))) + return err; swptr = (swptr + 2 * cnt) % s->dma_dac.dmasize; } else if (s->status & DO_DUAL_DAC) { - int i; + int i, err; unsigned long *src, *dst0, *dst1; src = (unsigned long *) buffer; @@ -1669,8 +1676,10 @@ dst1 = (unsigned long *) (s->dma_adc.rawbuf + swptr); // copy left/right sample at one time for (i = 0; i <= cnt / 4; i++) { - *dst0++ = *src++; - *dst1++ = *src++; + if ((err = __get_user(*dst0++, src++))) + return err; + if ((err = __get_user(*dst1++, src++))) + return err; } swptr = (swptr + cnt) % s->dma_dac.dmasize; } else { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sound/cs46xx.c linux.21rc5-ac2/drivers/sound/cs46xx.c --- linux.21rc5/drivers/sound/cs46xx.c 2003-05-28 15:09:01.000000000 +0100 +++ linux.21rc5-ac2/drivers/sound/cs46xx.c 2003-05-28 15:38:20.000000000 +0100 @@ -947,8 +947,8 @@ struct InitStruct { - u32 long off; - u32 long val; + u32 off; + u32 val; } InitArray[] = { {0x00000040, 0x3fc0000f}, {0x0000004c, 0x04800000}, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sound/i810_audio.c linux.21rc5-ac2/drivers/sound/i810_audio.c --- linux.21rc5/drivers/sound/i810_audio.c 2003-05-28 15:09:01.000000000 +0100 +++ linux.21rc5-ac2/drivers/sound/i810_audio.c 2003-05-09 18:19:35.000000000 +0100 @@ -118,6 +118,9 @@ #ifndef PCI_DEVICE_ID_INTEL_ICH4 #define PCI_DEVICE_ID_INTEL_ICH4 0x24c5 #endif +#ifndef PCI_DEVICE_ID_INTEL_ICH5 +#define PCI_DEVICE_ID_INTEL_ICH5 0x24d5 +#endif #ifndef PCI_DEVICE_ID_INTEL_440MX #define PCI_DEVICE_ID_INTEL_440MX 0x7195 #endif @@ -273,6 +276,7 @@ INTELICH2, INTELICH3, INTELICH4, + INTELICH5, SI7012, NVIDIA_NFORCE, AMD768, @@ -286,6 +290,7 @@ "Intel ICH2", "Intel ICH3", "Intel ICH4", + "Intel ICH5", "SiS 7012", "NVIDIA nForce Audio", "AMD 768", @@ -304,7 +309,8 @@ { 1, 0x0000 }, /* INTEL440MX */ { 1, 0x0000 }, /* INTELICH2 */ { 2, 0x0000 }, /* INTELICH3 */ - { 3, 0x0003 }, /* INTELICH4 */ + { 3, 0x0003 }, /* INTELICH4 */ + { 3, 0x0003 }, /* INTELICH5 */ /*@FIXME to be verified*/ { 2, 0x0000 }, /* SI7012 */ /*@FIXME to be verified*/ { 2, 0x0000 }, /* NVIDIA_NFORCE */ /*@FIXME to be verified*/ { 2, 0x0000 }, /* AMD768 */ @@ -324,6 +330,8 @@ PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH3}, {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH4}, + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH5, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH5}, {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SI7012}, {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO, @@ -2802,7 +2810,7 @@ */ /* see i810_ac97_init for the next 7 lines (jsaw) */ inw(card->ac97base); - if ((card->pci_id == PCI_DEVICE_ID_INTEL_ICH4) + if ((card->pci_id == PCI_DEVICE_ID_INTEL_ICH4 || card->pci_id == PCI_DEVICE_ID_INTEL_ICH5) && (card->use_mmio)) { primary_codec_id = (int) readl(card->iobase_mmio + SDM) & 0x3; printk(KERN_INFO "i810_audio: Primary codec has ID %d\n", @@ -2872,7 +2880,7 @@ possible IO channels. Bit 0:1 of SDM then holds the last codec ID spoken to. */ - if ((card->pci_id == PCI_DEVICE_ID_INTEL_ICH4) + if ((card->pci_id == PCI_DEVICE_ID_INTEL_ICH4 || card->pci_id == PCI_DEVICE_ID_INTEL_ICH5) && (card->use_mmio)) { ac97_id = (int) readl(card->iobase_mmio + SDM) & 0x3; printk(KERN_INFO "i810_audio: Connection %d with codec id %d\n", diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/sound/vidc.c linux.21rc5-ac2/drivers/sound/vidc.c --- linux.21rc5/drivers/sound/vidc.c 2003-05-28 15:07:56.000000000 +0100 +++ linux.21rc5-ac2/drivers/sound/vidc.c 2003-04-22 16:44:37.000000000 +0100 @@ -224,9 +224,11 @@ newsize = 208; if (newsize > 4096) newsize = 4096; - for (new2size = 128; new2size < newsize; new2size <<= 1); - if (new2size - newsize > newsize - (new2size >> 1)) - new2size >>= 1; + for (new2size = 128; new2size < newsize; new2size <<= 1) + /* loop */; + + if (new2size - newsize > newsize - (new2size >> 1)) + new2size >>= 1; if (new2size > 4096) { printk(KERN_ERR "VIDC: error: dma buffer (%d) %d > 4K\n", newsize, new2size); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/tc/zs.c linux.21rc5-ac2/drivers/tc/zs.c --- linux.21rc5/drivers/tc/zs.c 2003-05-28 15:08:02.000000000 +0100 +++ linux.21rc5-ac2/drivers/tc/zs.c 2003-04-22 16:44:37.000000000 +0100 @@ -1374,7 +1374,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttyS%02d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/catc.c linux.21rc5-ac2/drivers/usb/catc.c --- linux.21rc5/drivers/usb/catc.c 2003-05-28 15:08:02.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/catc.c 2003-05-20 20:21:28.000000000 +0100 @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -596,20 +597,9 @@ * Receive modes. Broadcast, Multicast, Promisc. */ -static inline u32 ether_crc_le(int cnt, unsigned char *addr) -{ - unsigned int crc = 0xffffffff; - u8 byte, idx, bit; - - for (idx = 0; idx < cnt; idx++) - for (byte = *addr++, bit = 0; bit < 8; bit++, byte >>= 1) - crc = (crc >> 1) ^ (((crc ^ byte) & 1) ? 0xedb88320U : 0); - return crc; -} - static void catc_multicast(unsigned char *addr, u8 *multicast) { - unsigned int crc = ether_crc_le(6, addr); + u32 crc = ether_crc_le(6, addr); multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/devio.c linux.21rc5-ac2/drivers/usb/devio.c --- linux.21rc5/drivers/usb/devio.c 2003-05-28 15:09:02.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/devio.c 2003-04-22 16:44:37.000000000 +0100 @@ -43,7 +43,7 @@ #include #include #include - +#include struct async { struct list_head asynclist; @@ -1078,6 +1078,8 @@ int size; void *buf = 0; int retval = 0; + struct usb_interface *ifp = 0; + struct usb_driver *driver = 0; /* get input parameters and alloc buffer */ if (copy_from_user(&ctrl, (void *) arg, sizeof (ctrl))) @@ -1095,32 +1097,55 @@ } } - /* ioctl to device */ - if (ctrl.ifno < 0) { - switch (ctrl.ioctl_code) { - /* access/release token for issuing control messages - * ask a particular driver to bind/unbind, ... etc - */ - } - retval = -ENOSYS; - - /* ioctl to the driver which has claimed a given interface */ - } else { - struct usb_interface *ifp = 0; - if (!ps->dev) - retval = -ENODEV; - else if (ctrl.ifno >= ps->dev->actconfig->bNumInterfaces) + if (!ps->dev) + retval = -ENODEV; + else if (!(ifp = usb_ifnum_to_if (ps->dev, ctrl.ifno))) + retval = -EINVAL; + else switch (ctrl.ioctl_code) { + + /* disconnect kernel driver from interface, leaving it unbound */ + case USBDEVFS_DISCONNECT: + driver = ifp->driver; + if (driver) { + down (&driver->serialize); + dbg ("disconnect '%s' from dev %d interface %d", + driver->name, ps->dev->devnum, ctrl.ifno); + driver->disconnect (ps->dev, ifp->private_data); + usb_driver_release_interface (driver, ifp); + up (&driver->serialize); + } else retval = -EINVAL; - else { - if (!(ifp = usb_ifnum_to_if (ps->dev, ctrl.ifno))) - retval = -EINVAL; - else if (ifp->driver == 0 || ifp->driver->ioctl == 0) - retval = -ENOSYS; - } - if (retval == 0) + break; + + /* let kernel drivers try to (re)bind to the interface */ + case USBDEVFS_CONNECT: + usb_find_interface_driver_for_ifnum (ps->dev, ctrl.ifno); + break; + + /* talk directly to the interface's driver */ + default: + lock_kernel(); /* against module unload */ + driver = ifp->driver; + if (driver == 0 || driver->ioctl == 0) { + unlock_kernel(); + retval = -ENOSYS; + } else { + if (ifp->driver->owner) { + __MOD_INC_USE_COUNT(ifp->driver->owner); + unlock_kernel(); + } /* ifno might usefully be passed ... */ - retval = ifp->driver->ioctl (ps->dev, ctrl.ioctl_code, buf); + retval = driver->ioctl (ps->dev, ctrl.ioctl_code, buf); /* size = min_t(int, size, retval)? */ + if (ifp->driver->owner) { + __MOD_DEC_USE_COUNT(ifp->driver->owner); + } else { + unlock_kernel(); + } + } + + if (retval == -ENOIOCTLCMD) + retval = -ENOTTY; } /* cleanup and return */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/hid-core.c linux.21rc5-ac2/drivers/usb/hid-core.c --- linux.21rc5/drivers/usb/hid-core.c 2003-05-28 15:09:02.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/hid-core.c 2003-04-22 16:44:37.000000000 +0100 @@ -1099,6 +1099,9 @@ #define USB_VENDOR_ID_TANGTOP 0x0d3d #define USB_DEVICE_ID_TANGTOP_USBPS2 0x0001 +#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f +#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 + #define USB_VENDOR_ID_OKI 0x070a #define USB_VENDOR_ID_OKI_MULITI 0x0007 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/host/usb-ohci.c linux.21rc5-ac2/drivers/usb/host/usb-ohci.c --- linux.21rc5/drivers/usb/host/usb-ohci.c 2003-05-28 15:09:02.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/host/usb-ohci.c 2003-05-12 16:35:58.000000000 +0100 @@ -490,12 +490,17 @@ usb_pipeout (urb->pipe) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); - urb->complete (urb); + if (urb->interval) { + urb->complete (urb); - /* implicitly requeued */ - urb->actual_length = 0; - urb->status = -EINPROGRESS; - td_submit_urb (urb); + /* implicitly requeued */ + urb->actual_length = 0; + urb->status = -EINPROGRESS; + td_submit_urb (urb); + } else { + urb_rm_priv(urb); + urb->complete (urb); + } break; case PIPE_ISOCHRONOUS: diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/host/usb-uhci.c linux.21rc5-ac2/drivers/usb/host/usb-uhci.c --- linux.21rc5/drivers/usb/host/usb-uhci.c 2003-05-28 15:09:02.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/host/usb-uhci.c 2003-05-28 15:41:32.000000000 +0100 @@ -2430,9 +2430,9 @@ _static int process_interrupt (uhci_t *s, struct urb *urb) { - int i, ret = -EINPROGRESS; + int ret = -EINPROGRESS; urb_priv_t *urb_priv = urb->hcpriv; - struct list_head *p = urb_priv->desc_list.next; + struct list_head *p; uhci_desc_t *desc = list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list); int actual_length; @@ -2440,8 +2440,8 @@ //dbg("urb contains interrupt request"); - for (i = 0; p != &urb_priv->desc_list; p = p->next, i++) // Maybe we allow more than one TD later ;-) - { + // Maybe we allow more than one TD later ;-) + while ((p = urb_priv->desc_list.next) != &urb_priv->desc_list) { desc = list_entry (p, uhci_desc_t, desc_list); if (is_td_active(desc)) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/Makefile.lib linux.21rc5-ac2/drivers/usb/Makefile.lib --- linux.21rc5/drivers/usb/Makefile.lib 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/Makefile.lib 2003-05-20 20:21:28.000000000 +0100 @@ -0,0 +1,3 @@ +obj-$(CONFIG_USB_USBNET) += crc32.o +obj-$(CONFIG_USB_CATC) += crc32.o + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/rtl8150.c linux.21rc5-ac2/drivers/usb/rtl8150.c --- linux.21rc5/drivers/usb/rtl8150.c 2003-05-28 15:09:02.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/rtl8150.c 2003-05-12 16:55:40.000000000 +0100 @@ -155,7 +155,7 @@ clear_bit(RX_REG_SET, &dev->flags); } -static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) +static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size) { int ret; @@ -426,7 +426,8 @@ if (rtl8150_reset(dev)) { warn("%s - device reset failed", __FUNCTION__); } - dev->rx_creg = rcr = 0x9e; /* bit7=1 attach Rx info at the end */ + rcr = 0x9e; /* bit7=1 attach Rx info at the end */ + dev->rx_creg = cpu_to_le16(rcr); tcr = 0xd8; cr = 0x0c; set_registers(dev, RCR, 1, &rcr); @@ -471,18 +472,18 @@ dev = netdev->priv; netif_stop_queue(netdev); if (netdev->flags & IFF_PROMISC) { - dev->rx_creg |= 0x0001; + dev->rx_creg |= cpu_to_le16(0x0001); info("%s: promiscuous mode", netdev->name); } else if ((netdev->mc_count > multicast_filter_limit) || (netdev->flags & IFF_ALLMULTI)) { - dev->rx_creg &= 0xfffe; - dev->rx_creg |= 0x0002; + dev->rx_creg &= cpu_to_le16(0xfffe); + dev->rx_creg |= cpu_to_le16(0x0002); info("%s: allmulti set", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ - dev->rx_creg &= 0x00fc; + dev->rx_creg &= cpu_to_le16(0x00fc); } - async_set_registers(dev, RCR, 2, &dev->rx_creg); + async_set_registers(dev, RCR, 2); netif_wake_queue(netdev); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/storage/unusual_devs.h linux.21rc5-ac2/drivers/usb/storage/unusual_devs.h --- linux.21rc5/drivers/usb/storage/unusual_devs.h 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/storage/unusual_devs.h 2003-05-28 21:48:47.000000000 +0100 @@ -90,6 +90,13 @@ "Nex II Digital", US_SC_SCSI, US_PR_BULK, NULL, US_FL_START_STOP), +/* Reported by Thomas Rabe */ +UNUSUAL_DEV( 0x0461, 0x0822, 0x0000, 0x9999, + "Vivitar", + "Vivicam 3610", + US_SC_SCSI, US_PR_BULK, NULL, + US_FL_START_STOP | US_FL_FIX_INQUIRY | US_FL_MODE_XLATE), + /* Reported by Paul Stewart * This entry is needed because the device reports Sub=ff */ UNUSUAL_DEV( 0x04a4, 0x0004, 0x0001, 0x0001, @@ -229,6 +236,13 @@ US_SC_SCSI, US_PR_CB, NULL, US_FL_SINGLE_LUN | US_FL_START_STOP | US_FL_MODE_XLATE ), +/* This entry is needed because the device reports Sub=ff */ +UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0432, + "Sony", + "DSC-F707/U10/U20", + US_SC_SCSI, US_PR_CB, NULL, + US_FL_SINGLE_LUN | US_FL_START_STOP | US_FL_MODE_XLATE ), + /* Reported by wim@geeks.nl */ UNUSUAL_DEV( 0x054c, 0x0025, 0x0100, 0x0100, "Sony", @@ -267,6 +281,12 @@ "PEG Mass Storage", US_SC_8070, US_PR_CBI, NULL, US_FL_FIX_INQUIRY ), + +UNUSUAL_DEV( 0x054c, 0x0058, 0x0000, 0x9999, + "Sony", + "PEG-N760C Mass Storage", + US_SC_8070, US_PR_CBI, NULL, + US_FL_FIX_INQUIRY ), UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299, "Y-E Data", @@ -364,6 +384,12 @@ US_SC_SCSI, US_PR_BULK, NULL, US_FL_FIX_INQUIRY ), +UNUSUAL_DEV( 0x0636, 0x0003, 0x0000, 0x9999, + "Vivitar", + "Vivicam 35Xx", + US_SC_SCSI, US_PR_BULK, NULL, + US_FL_START_STOP | US_FL_FIX_INQUIRY | US_FL_MODE_XLATE), + UNUSUAL_DEV( 0x0644, 0x0000, 0x0100, 0x0100, "TEAC", "Floppy Drive", @@ -435,6 +461,12 @@ US_FL_SINGLE_LUN | US_FL_START_STOP ), #endif +UNUSUAL_DEV( 0x0784, 0x1688, 0x0000, 0x9999, + "Vivitar", + "Vivicam 36xx", + US_SC_SCSI, US_PR_BULK, NULL, + US_FL_START_STOP | US_FL_FIX_INQUIRY | US_FL_MODE_XLATE), + #ifdef CONFIG_USB_STORAGE_FREECOM UNUSUAL_DEV( 0x07ab, 0xfc01, 0x0000, 0x9999, "Freecom", @@ -518,6 +550,19 @@ US_FL_MODE_XLATE ), #endif +/* Datafab KECF-USB Ver A /Jenoptik Jenreader + * Note: there seem to be two versions of the KECF-USB device. + * The other version (see below) needs only US_FL_INQUIRY, + * whereas this version will not mount without also having + * US_FL_START_STOP. + * Submitted by Chris Clayton (chris@theclaytons.freeserve.co.uk) + */ +UNUSUAL_DEV( 0x07c4, 0xb000, 0x0000, 0xffff, + "Datafab", + "KECF-USB Ver A", + US_SC_SCSI, US_PR_BULK, NULL, + US_FL_FIX_INQUIRY | US_FL_START_STOP ), + /* Datafab KECF-USB / Sagatek DCS-CF / Simpletech Flashlink UCF-100 * Only revision 1.13 tested (same for all of the above devices, * based on the Datafab DF-UG-07 chip). Needed for US_FL_FIX_INQUIRY. @@ -594,3 +639,31 @@ US_SC_SCSI, US_PR_SDDR55, NULL, US_FL_SINGLE_LUN), #endif + +/* + * Panasonic/OEMs compact USB CDROMs status + * KXL-840(CD-ROM11): usb_stor_Bulk_max_lun() is danger, need US_FL_SINGLE_LUN + * KXL-RW11(CDRRW02): usb_stor_Bulk_max_lun() is danger, need US_FL_SINGLE_LUN + * KXL-RW20(CDRRW03): original IClass is 0xFF, use US_PR_CB and need init reset + * KXL-RW21(CDRRW06): original IClass is 0xFF, use US_PR_CB and need init reset + * KXL-RW31(CDRRW05): work fine with current code + * KXL-RW32(CDRRW09): work fine with current code + * Checked: Sun Feb 9 JST 2003 Go Taniguchi + */ +UNUSUAL_DEV( 0x04da, 0x0d01, 0x0000, 0xffff, + "MATSHITA", + "CD-ROM11", + US_SC_8020, US_PR_BULK, NULL, US_FL_SINGLE_LUN), +UNUSUAL_DEV( 0x04da, 0x0d02, 0x0000, 0xffff, + "MATSHITA", + "CDRRW02", + US_SC_8020, US_PR_BULK, NULL, US_FL_SINGLE_LUN), +UNUSUAL_DEV( 0x04da, 0x0d03, 0x0000, 0xffff, + "MATSHITA", + "CDRRW03", + US_SC_8020, US_PR_CB, NULL, US_FL_INIT_RESET), +UNUSUAL_DEV( 0x04da, 0x0d06, 0x0000, 0xffff, + "MATSHITA", + "CDRRW06", + US_SC_8020, US_PR_CB, NULL, US_FL_INIT_RESET), + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/storage/usb.c linux.21rc5-ac2/drivers/usb/storage/usb.c --- linux.21rc5/drivers/usb/storage/usb.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/storage/usb.c 2003-04-22 16:44:37.000000000 +0100 @@ -848,7 +848,8 @@ ss->transport_name = "Bulk"; ss->transport = usb_stor_Bulk_transport; ss->transport_reset = usb_stor_Bulk_reset; - ss->max_lun = usb_stor_Bulk_max_lun(ss); + if (!(ss->flags & US_FL_SINGLE_LUN)) + ss->max_lun = usb_stor_Bulk_max_lun(ss); break; #ifdef CONFIG_USB_STORAGE_HP8200e @@ -1027,6 +1028,11 @@ /* now register - our detect function will be called */ ss->htmplt.module = THIS_MODULE; + + /* some device need reset process */ + if (ss->flags & US_FL_INIT_RESET) + ss->transport_reset(ss); + scsi_register_module(MODULE_SCSI_HA, &(ss->htmplt)); /* lock access to the data structures */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/storage/usb.h linux.21rc5-ac2/drivers/usb/storage/usb.h --- linux.21rc5/drivers/usb/storage/usb.h 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/storage/usb.h 2003-05-29 01:44:07.000000000 +0100 @@ -103,6 +103,7 @@ #define US_FL_IGNORE_SER 0x00000010 /* Ignore the serial number given */ #define US_FL_SCM_MULT_TARG 0x00000020 /* supports multiple targets */ #define US_FL_FIX_INQUIRY 0x00000040 /* INQUIRY response needs fixing */ +#define US_FL_INIT_RESET 0x00000080 /* reset process when initialize */ #define USB_STOR_STRING_LEN 32 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/usb.c linux.21rc5-ac2/drivers/usb/usb.c --- linux.21rc5/drivers/usb/usb.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/usb.c 2003-04-22 17:05:08.000000000 +0100 @@ -164,6 +164,26 @@ } } +/* + * usb_ifnum_to_ifpos - convert the interface _number_ (as in interface.bInterfaceNumber) + * to the interface _position_ (as in dev->actconfig->interface + position) + * @dev: the device to use + * @ifnum: the interface number (bInterfaceNumber); not interface position + * + * Note that the number is the same as the position for all interfaces _except_ + * devices with interfaces not sequentially numbered (e.g., 0, 2, 3, etc). + */ +int usb_ifnum_to_ifpos(struct usb_device *dev, unsigned ifnum) +{ + int i; + + for (i = 0; i < dev->actconfig->bNumInterfaces; i++) + if (dev->actconfig->interface[i].altsetting[0].bInterfaceNumber == ifnum) + return i; + + return -EINVAL; +} + /** * usb_deregister - unregister a USB driver * @driver: USB operations of the driver to unregister @@ -546,7 +566,6 @@ iface->private_data = NULL; } - /** * usb_match_id - find first usb_device_id matching device or interface * @dev: the device whose descriptors are considered when matching @@ -759,6 +778,23 @@ return -1; } +/* + * usb_find_interface_driver_for_ifnum - convert ifnum to ifpos via + * usb_ifnum_to_ifpos and call usb_find_interface_driver(). + * @dev: the device to use + * @ifnum: the interface number (bInterfaceNumber); not interface position! + * + * Note usb_find_interface_driver's ifnum parameter is actually interface position. + */ +int usb_find_interface_driver_for_ifnum(struct usb_device *dev, unsigned int ifnum) +{ + int ifpos = usb_ifnum_to_ifpos(dev, ifnum); + + if (0 > ifpos) + return -EINVAL; + + return usb_find_interface_driver(dev, ifpos); +} #ifdef CONFIG_HOTPLUG @@ -2384,6 +2420,7 @@ * into the kernel, and other device drivers are built as modules, * then these symbols need to be exported for the modules to use. */ +EXPORT_SYMBOL(usb_ifnum_to_ifpos); EXPORT_SYMBOL(usb_ifnum_to_if); EXPORT_SYMBOL(usb_epnum_to_ep_desc); @@ -2398,6 +2435,7 @@ EXPORT_SYMBOL(usb_free_dev); EXPORT_SYMBOL(usb_inc_dev_use); +EXPORT_SYMBOL(usb_find_interface_driver_for_ifnum); EXPORT_SYMBOL(usb_driver_claim_interface); EXPORT_SYMBOL(usb_interface_claimed); EXPORT_SYMBOL(usb_driver_release_interface); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/usb/usbnet.c linux.21rc5-ac2/drivers/usb/usbnet.c --- linux.21rc5/drivers/usb/usbnet.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/usb/usbnet.c 2003-05-20 20:21:28.000000000 +0100 @@ -1306,6 +1306,8 @@ #ifdef CONFIG_USB_ZAURUS +#include + /*------------------------------------------------------------------------- * * Zaurus is also a SA-1110 based PDA, but one using a different driver @@ -1318,57 +1320,6 @@ * *-------------------------------------------------------------------------*/ -// NOTE: so far on 2.4 isn't for "bulk data" like this. -// On 2.5 crc32_le() handles this for us. - -static const u32 crc32_table [256] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, - 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, - 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, - 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, - 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, - 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, - 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, - 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, - 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, - 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, - 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, - 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, - 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, - 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, - 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, - 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, - 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, - 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d -}; - -#define CRC32_INITFCS 0xffffffff // Initial FCS value -#define CRC32_FCS(fcs, c) (((fcs) >> 8) ^ crc32_table[((fcs) ^ (c)) & 0xff]) - -/* fcs_compute32 - memcpy and calculate fcs - * Perform a memcpy and calculate fcs using ppp 32bit CRC algorithm. - */ -static inline u32 fcs_compute32 (unsigned char *sp, int len, u32 fcs) -{ - for (;len-- > 0; fcs = CRC32_FCS (fcs, *sp++)) - continue; - return fcs; -} - static struct sk_buff * zaurus_tx_fixup (struct usbnet *dev, struct sk_buff *skb, int flags) { @@ -1387,7 +1338,7 @@ if (skb) { u32 fcs; done: - fcs = fcs_compute32 (skb->data, skb->len, CRC32_INITFCS); + fcs = crc32_le (~0, skb->data, skb->len); fcs = ~fcs; *skb_put (skb, 1) = fcs & 0xff; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/video/aty/atyfb_base.c linux.21rc5-ac2/drivers/video/aty/atyfb_base.c --- linux.21rc5/drivers/video/aty/atyfb_base.c 2003-05-28 15:08:01.000000000 +0100 +++ linux.21rc5-ac2/drivers/video/aty/atyfb_base.c 2003-04-22 16:44:37.000000000 +0100 @@ -360,6 +360,7 @@ /* 3D RAGE Mobility */ { 0x4c4d, 0x4c4d, 0x00, 0x00, m64n_mob_p, 230, 50, M64F_GT | M64F_INTEGRATED | M64F_RESET_3D | M64F_GTB_DSP | M64F_MOBIL_BUS }, + { 0x4c52, 0x4c52, 0x00, 0x00, m64n_mob_p, 230, 40, M64F_GT | M64F_INTEGRATED | M64F_RESET_3D | M64F_GTB_DSP | M64F_MOBIL_BUS | M64F_MAGIC_POSTDIV | M64F_SDRAM_MAGIC_PLL | M64F_XL_DLL }, { 0x4c4e, 0x4c4e, 0x00, 0x00, m64n_mob_a, 230, 50, M64F_GT | M64F_INTEGRATED | M64F_RESET_3D | M64F_GTB_DSP | M64F_MOBIL_BUS }, #endif /* CONFIG_FB_ATY_CT */ }; @@ -438,7 +439,7 @@ #endif /* defined(CONFIG_PPC) */ -#if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_PMAC_BACKLIGHT) +#if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_CT_VAIO_LCD) static void aty_st_lcd(int index, u32 val, const struct fb_info_aty *info) { unsigned long temp; @@ -460,7 +461,7 @@ /* read the register value */ return aty_ld_le32(LCD_DATA, info); } -#endif /* CONFIG_PMAC_PBOOK || CONFIG_PMAC_BACKLIGHT */ +#endif /* CONFIG_PMAC_PBOOK || CONFIG_PMAC_BACKLIGHT || CONFIG_FB_ATY_CT_VAIO_LCD */ /* ------------------------------------------------------------------------- */ @@ -1772,6 +1773,9 @@ #if defined(CONFIG_PPC) int sense; #endif +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + u32 pm, hs; +#endif u8 pll_ref_div; info->aty_cmap_regs = (struct aty_cmap_regs *)(info->ati_regbase+0xc0); @@ -2089,6 +2093,35 @@ var = default_var; #endif /* !__sparc__ */ #endif /* !CONFIG_PPC */ +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + /* Power Management */ + pm=aty_ld_lcd(POWER_MANAGEMENT, info); + pm=(pm & ~PWR_MGT_MODE_MASK) | PWR_MGT_MODE_PCI; + pm|=PWR_MGT_ON; + aty_st_lcd(POWER_MANAGEMENT, pm, info); + udelay(10); + + /* OVR_WID_LEFT_RIGHT */ + hs=aty_ld_le32(OVR_WID_LEFT_RIGHT,info); + hs= 0x00000000; + aty_st_le32(OVR_WID_LEFT_RIGHT, hs, info); + udelay(10); + + /* CONFIG_PANEL */ + hs=aty_ld_lcd(CONFIG_PANEL,info); + hs|=DONT_SHADOW_HEND ; + aty_st_lcd(CONFIG_PANEL, hs, info); + udelay(10); + +#if defined(DEBUG) + printk("LCD_INDEX CONFIG_PANEL LCD_GEN_CTRL POWER_MANAGEMENT\n" + "%08x %08x %08x %08x\n", + aty_ld_le32(LCD_INDEX, info), + aty_ld_lcd(CONFIG_PANEL, info), + aty_ld_lcd(LCD_GEN_CTRL, info), + aty_ld_lcd(POWER_MANAGEMENT, info), +#endif /* DEBUG */ +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ #endif /* !MODULE */ if (noaccel) var.accel_flags &= ~FB_ACCELF_TEXT; @@ -2714,6 +2747,23 @@ /* * Blank the display. */ +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) +static int set_backlight_enable(int on, struct fb_info_aty *info) +{ + unsigned int reg = aty_ld_lcd(POWER_MANAGEMENT, info); + if(on) { + reg=(reg & ~SUSPEND_NOW) | PWR_BLON; + } else { + reg=(reg & ~PWR_BLON) | SUSPEND_NOW; + } + aty_st_lcd(POWER_MANAGEMENT, reg, info); + udelay(10); +#ifdef DEBUG + printk(KERN_INFO "set_backlight_enable(%i): %08x\n", on, aty_ld_lcd(POWER_MANAGEMENT, info) ); +#endif + return 0; +} +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ static void atyfbcon_blank(int blank, struct fb_info *fb) { @@ -2725,6 +2775,9 @@ set_backlight_enable(0); #endif /* CONFIG_PMAC_BACKLIGHT */ +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + set_backlight_enable(!blank, info); +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ gen_cntl = aty_ld_8(CRTC_GEN_CNTL, info); if (blank > 0) switch (blank-1) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/video/aty/mach64_ct.c linux.21rc5-ac2/drivers/video/aty/mach64_ct.c --- linux.21rc5/drivers/video/aty/mach64_ct.c 2003-05-28 15:08:01.000000000 +0100 +++ linux.21rc5-ac2/drivers/video/aty/mach64_ct.c 2003-04-22 16:44:37.000000000 +0100 @@ -178,11 +178,14 @@ } pll->pll_gen_cntl |= mpostdiv<<4; /* mclk */ - if (M64_HAS(MAGIC_POSTDIV)) - pll->pll_ext_cntl = 0; - else +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) pll->pll_ext_cntl = mpostdiv; /* xclk == mclk */ - +#else + if ( M64_HAS(MAGIC_POSTDIV) ) + pll->pll_ext_cntl = 0; + else + pll->pll_ext_cntl = mpostdiv; /* xclk == mclk */ +#endif switch (pll->vclk_post_div_real) { case 2: vpostdiv = 1; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/video/aty/mach64.h linux.21rc5-ac2/drivers/video/aty/mach64.h --- linux.21rc5/drivers/video/aty/mach64.h 2003-05-28 15:08:01.000000000 +0100 +++ linux.21rc5-ac2/drivers/video/aty/mach64.h 2003-04-22 16:44:37.000000000 +0100 @@ -1148,6 +1148,8 @@ #define APC_LUT_MN 0x39 #define APC_LUT_OP 0x3A +/* Values in CONFIG_PANEL */ +#define DONT_SHADOW_HEND 0x00004000 /* Values in LCD_MISC_CNTL */ #define BIAS_MOD_LEVEL_MASK 0x0000ff00 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/video/Config.in linux.21rc5-ac2/drivers/video/Config.in --- linux.21rc5/drivers/video/Config.in 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/video/Config.in 2003-05-09 18:22:28.000000000 +0100 @@ -144,14 +144,20 @@ if [ "$CONFIG_FB_ATY" != "n" ]; then bool ' Mach64 GX support (EXPERIMENTAL)' CONFIG_FB_ATY_GX bool ' Mach64 CT/VT/GT/LT (incl. 3D RAGE) support' CONFIG_FB_ATY_CT + if [ "$CONFIG_FB_ATY_CT" = "y" ]; then + bool ' Sony Vaio C1VE 1024x480 LCD support' CONFIG_FB_ATY_CT_VAIO_LCD + fi fi tristate ' ATI Radeon display support (EXPERIMENTAL)' CONFIG_FB_RADEON +# if [ "$CONFIG_FB_RADEON" = "y" ]; then +# bool ' Sony Vaio C1MV 1280x600 LCD support' CONFIG_FB_RADEON_VAIO_LCD +# fi tristate ' ATI Rage128 display support (EXPERIMENTAL)' CONFIG_FB_ATY128 tristate ' Intel 830M/845G/852GM/855GM/865G display support (EXPERIMENTAL)' CONFIG_FB_INTEL tristate ' SIS acceleration (EXPERIMENTAL)' CONFIG_FB_SIS if [ "$CONFIG_FB_SIS" != "n" ]; then - bool ' SIS 630/540/730 support' CONFIG_FB_SIS_300 - bool ' SIS 315H/315 support' CONFIG_FB_SIS_315 + bool ' SIS 300/305/540/630/730 support' CONFIG_FB_SIS_300 + bool ' SIS 315/650/M650/651/740/Xabre support' CONFIG_FB_SIS_315 fi tristate ' NeoMagic display support (EXPERIMENTAL)' CONFIG_FB_NEOMAGIC tristate ' 3Dfx Banshee/Voodoo3 display support (EXPERIMENTAL)' CONFIG_FB_3DFX @@ -294,6 +300,7 @@ "$CONFIG_FB_PM3" = "y" -o "$CONFIG_FB_TRIDENT" = "y" -o \ "$CONFIG_FB_P9100" = "y" -o "$CONFIG_FB_ATY128" = "y" -o \ "$CONFIG_FB_RIVA" = "y" -o "$CONFIG_FB_RADEON" = "y" -o \ + "$CONFIG_FB_INTEL" = "y" -o \ "$CONFIG_FB_SGIVW" = "y" -o "$CONFIG_FB_CYBER2000" = "y" -o \ "$CONFIG_FB_SA1100" = "y" -o "$CONFIG_FB_3DFX" = "y" -o \ "$CONFIG_FB_PMAG_BA" = "y" -o "$CONFIG_FB_PMAGB_B" = "y" -o \ @@ -314,12 +321,13 @@ "$CONFIG_FB_VALKYRIE" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \ "$CONFIG_FB_IGA" = "m" -o "$CONFIG_FB_MATROX" = "m" -o \ "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_PM2" = "m" -o \ - "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "y" -o \ + "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "m" -o \ "$CONFIG_FB_P9100" = "m" -o "$CONFIG_FB_ATY128" = "m" -o \ "$CONFIG_FB_RIVA" = "m" -o "$CONFIG_FB_3DFX" = "m" -o \ "$CONFIG_FB_SGIVW" = "m" -o "$CONFIG_FB_CYBER2000" = "m" -o \ "$CONFIG_FB_PMAG_BA" = "m" -o "$CONFIG_FB_PMAGB_B" = "m" -o \ "$CONFIG_FB_MAXINE" = "m" -o "$CONFIG_FB_RADEON" = "m" -o \ + "$CONFIG_FB_INTEL" = "m" -o \ "$CONFIG_FB_SA1100" = "m" -o "$CONFIG_FB_SIS" = "m" -o \ "$CONFIG_FB_TX3912" = "m" -o "$CONFIG_FB_NEOMAGIC" = "m" -o \ "$CONFIG_FB_STI" = "m" -o "$CONFIG_FB_INTEL" = "m" ]; then @@ -330,6 +338,7 @@ "$CONFIG_FB_MAC" = "y" -o "$CONFIG_FB_VESA" = "y" -o \ "$CONFIG_FB_VIRTUAL" = "y" -o "$CONFIG_FB_TBOX" = "y" -o \ "$CONFIG_FB_Q40" = "y" -o "$CONFIG_FB_RADEON" = "y" -o \ + "$CONFIG_FB_INTEL" = "y" -o \ "$CONFIG_FB_CONTROL" = "y" -o "$CONFIG_FB_CLGEN" = "y" -o \ "$CONFIG_FB_VIRGE" = "y" -o "$CONFIG_FB_CYBER" = "y" -o \ "$CONFIG_FB_VALKYRIE" = "y" -o "$CONFIG_FB_PLATINUM" = "y" -o \ @@ -352,10 +361,11 @@ "$CONFIG_FB_VALKYRIE" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \ "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_MATROX" = "m" -o \ "$CONFIG_FB_PM2" = "m" -o "$CONFIG_FB_SGIVW" = "m" -o \ - "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "y" -o \ + "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "m" -o \ "$CONFIG_FB_RIVA" = "m" -o "$CONFIG_FB_ATY128" = "m" -o \ "$CONFIG_FB_CYBER2000" = "m" -o "$CONFIG_FB_SIS" = "m" -o \ "$CONFIG_FB_SA1100" = "m" -o "$CONFIG_FB_RADEON" = "m" -o \ + "$CONFIG_FB_INTEL" = "m" -o \ "$CONFIG_FB_PVR2" = "m" -o "$CONFIG_FB_VOODOO1" = "m" -o \ "$CONFIG_FB_NEOMAGIC" = "m" -o "$CONFIG_FB_INTEL" = "m" ]; then define_tristate CONFIG_FBCON_CFB16 m @@ -387,6 +397,7 @@ "$CONFIG_FB_RIVA" = "y" -o "$CONFIG_FB_ATY128" = "y" -o \ "$CONFIG_FB_FM2" = "y" -o "$CONFIG_FB_SGIVW" = "y" -o \ "$CONFIG_FB_RADEON" = "y" -o "$CONFIG_FB_PVR2" = "y" -o \ + "$CONFIG_FB_INTEL" = "y" -o \ "$CONFIG_FB_3DFX" = "y" -o "$CONFIG_FB_SIS" = "y" -o \ "$CONFIG_FB_VOODOO1" = "y" -o "$CONFIG_FB_CYBER2000" = "y" -o \ "$CONFIG_FB_STI" = "y" -o "$CONFIG_FB_INTEL" = "y" ]; then @@ -397,13 +408,13 @@ "$CONFIG_FB_CONTROL" = "m" -o "$CONFIG_FB_CLGEN" = "m" -o \ "$CONFIG_FB_TGA" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \ "$CONFIG_FB_MATROX" = "m" -o "$CONFIG_FB_PM2" = "m" -o \ - "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "y" -o \ + "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "m" -o \ "$CONFIG_FB_RIVA" = "m" -o "$CONFIG_FB_ATY128" = "m" -o \ "$CONFIG_FB_3DFX" = "m" -o "$CONFIG_FB_RADEON" = "m" -o \ + "$CONFIG_FB_INTEL" = "m" -o \ "$CONFIG_FB_SGIVW" = "m" -o "$CONFIG_FB_SIS" = "m" -o \ "$CONFIG_FB_PVR2" = "m" -o "$CONFIG_FB_VOODOO1" = "m" -o \ - "$CONFIG_FB_CYBER2000" = "m" -o "$CONFIG_FB_STI" = "y" -o \ - "$CONFIG_FB_INTEL" = "m" ]; then + "$CONFIG_FB_CYBER2000" = "m" -o "$CONFIG_FB_STI" = "m" ]; then define_tristate CONFIG_FBCON_CFB32 m fi fi @@ -450,9 +461,9 @@ define_tristate CONFIG_FBCON_HGA m fi fi - if [ "$CONFIG_FB_STI" = "y" ]; then - define_tristate CONFIG_FBCON_STI y - fi + fi + if [ "$CONFIG_FB_STI" = "y" ]; then + define_tristate CONFIG_FBCON_STI y fi bool ' Support only 8 pixels wide fonts' CONFIG_FBCON_FONTWIDTH8_ONLY if [ "$CONFIG_SPARC32" = "y" -o "$CONFIG_SPARC64" = "y" ]; then diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/video/modedb.c linux.21rc5-ac2/drivers/video/modedb.c --- linux.21rc5/drivers/video/modedb.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/video/modedb.c 2003-04-22 17:04:40.000000000 +0100 @@ -43,6 +43,20 @@ #define DEFAULT_MODEDB_INDEX 0 static struct fb_videomode modedb[] __initdata = { +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + { + /* 1024x480 @ 65 Hz */ + NULL, 65, 1024, 480, 25203, 24, 24, 1, 17, 144, 4, + 0, FB_VMODE_NONINTERLACED + }, +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ +#if defined(CONFIG_FB_RADEON_VAIO_LCD) + { + /* 1280x600 @ 72 Hz, 45.288 kHz hsync */ + NULL, 72, 1280, 600, 13940, 24, 24, 23, 1, 256, 5, + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + }, +#endif /* CONFIG_FB_RADEON_VAIO_LCD */ { /* 640x400 @ 70 Hz, 31.5 kHz hsync */ NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/drivers/video/vesafb.c linux.21rc5-ac2/drivers/video/vesafb.c --- linux.21rc5/drivers/video/vesafb.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/drivers/video/vesafb.c 2003-05-20 20:27:48.000000000 +0100 @@ -94,6 +94,7 @@ static int inverse = 0; static int mtrr = 0; +static int vram __initdata = 0; /* needed for vram boot option */ static int currcon = 0; static int pmi_setpal = 0; /* pmi for palette changes ??? */ @@ -479,6 +480,10 @@ pmi_setpal=1; else if (! strcmp(this_opt, "mtrr")) mtrr=1; + /* checks for vram boot option */ + else if (! strncmp(this_opt, "vram:", 5)) + vram = simple_strtoul(this_opt+5, NULL, 0); + else if (!strncmp(this_opt, "font:", 5)) strcpy(fb_info.fontname, this_opt+5); } @@ -520,7 +525,14 @@ video_width = screen_info.lfb_width; video_height = screen_info.lfb_height; video_linelength = screen_info.lfb_linelength; - video_size = screen_info.lfb_width * screen_info.lfb_height * video_bpp; + video_size = screen_info.lfb_width * screen_info.lfb_height * video_bpp / 8; + + /* FIXME: Should we clip against declared size for banked devices ? */ + + /* sets video_size according to vram boot option */ + if (vram && vram * 1024 * 1024 > video_size) + video_size = vram * 1024 * 1024; + video_visual = (video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/binfmt_elf.c linux.21rc5-ac2/fs/binfmt_elf.c --- linux.21rc5/fs/binfmt_elf.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/binfmt_elf.c 2003-05-12 16:55:19.000000000 +0100 @@ -375,7 +375,6 @@ unsigned long text_data, elf_entry = ~0UL; char * addr; loff_t offset; - int retval; current->mm->end_code = interp_ex->a_text; text_data = interp_ex->a_text + interp_ex->a_data; @@ -397,11 +396,9 @@ } do_brk(0, text_data); - retval = -ENOEXEC; if (!interpreter->f_op || !interpreter->f_op->read) goto out; - retval = interpreter->f_op->read(interpreter, addr, text_data, &offset); - if (retval < 0) + if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0) goto out; flush_icache_range((unsigned long)addr, (unsigned long)addr + text_data); @@ -607,7 +604,7 @@ retval = setup_arg_pages(bprm); if (retval < 0) { send_sig(SIGKILL, current, 0); - return retval; + goto out_free_dentry; } current->mm->start_stack = bprm->p; @@ -711,7 +708,8 @@ printk(KERN_ERR "Unable to load interpreter\n"); kfree(elf_phdata); send_sig(SIGSEGV, current, 0); - return 0; + retval = -ENOEXEC; /* Nobody gets to see this, but.. */ + goto out; } } @@ -1143,7 +1141,7 @@ psinfo.pr_state = i; psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i]; psinfo.pr_zomb = psinfo.pr_sname == 'Z'; - psinfo.pr_nice = current->nice; + psinfo.pr_nice = task_nice(current); psinfo.pr_flag = current->flags; psinfo.pr_uid = NEW_TO_OLD_UID(current->uid); psinfo.pr_gid = NEW_TO_OLD_GID(current->gid); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/block_dev.c linux.21rc5-ac2/fs/block_dev.c --- linux.21rc5/fs/block_dev.c 2003-05-28 15:07:33.000000000 +0100 +++ linux.21rc5-ac2/fs/block_dev.c 2003-04-22 16:44:38.000000000 +0100 @@ -131,8 +131,9 @@ return 0; } -static int blkdev_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize) +static int blkdev_direct_IO(int rw, struct file * filp, struct kiobuf * iobuf, unsigned long blocknr, int blocksize) { + struct inode * inode = filp->f_dentry->d_inode->i_mapping->host; return generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, blkdev_get_block); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/buffer.c linux.21rc5-ac2/fs/buffer.c --- linux.21rc5/fs/buffer.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/buffer.c 2003-05-27 14:06:12.000000000 +0100 @@ -123,6 +123,36 @@ int bdflush_min[N_PARAM] = { 0, 1, 0, 0, 0, 1*HZ, 0, 0, 0}; int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,10000*HZ, 10000*HZ, 100, 100, 0}; +static inline int write_buffer_delay(struct buffer_head *bh) +{ + struct page *page = bh->b_page; + + if (!TryLockPage(page)) { + spin_unlock(&lru_list_lock); + unlock_buffer(bh); + page->mapping->a_ops->writepage(page); + return 1; + } + + return 0; +} + +static inline void write_buffer(struct buffer_head *bh) +{ + if (buffer_delay(bh)) { + struct page *page = bh->b_page; + + lock_page(page); + if (buffer_delay(bh)) { + page->mapping->a_ops->writepage(page); + return; + } + unlock_page(page); + } + + ll_rw_block(WRITE, 1, &bh); +} + void unlock_buffer(struct buffer_head *bh) { clear_bit(BH_Wait_IO, &bh->b_state); @@ -213,7 +243,13 @@ continue; if (test_and_set_bit(BH_Lock, &bh->b_state)) continue; - if (atomic_set_buffer_clean(bh)) { + if (buffer_delay(bh)) { + if (write_buffer_delay(bh)) { + if (count) + write_locked_buffers(array, count); + return -EAGAIN; + } + } else if (atomic_set_buffer_clean(bh)) { __refile_buffer(bh); get_bh(bh); array[count++] = bh; @@ -323,7 +359,7 @@ lock_kernel(); sync_inodes_sb(sb); - DQUOT_SYNC(dev); + DQUOT_SYNC_SB(sb); lock_super(sb); if (sb->s_dirt && sb->s_op && sb->s_op->write_super) sb->s_op->write_super(sb); @@ -347,7 +383,7 @@ lock_kernel(); sync_inodes(dev); - DQUOT_SYNC(dev); + DQUOT_SYNC_DEV(dev); sync_supers(dev, 1); unlock_kernel(); @@ -735,9 +771,10 @@ bh->b_list = BUF_CLEAN; bh->b_end_io = handler; bh->b_private = private; + bh->b_journal_head = NULL; } -static void end_buffer_io_async(struct buffer_head * bh, int uptodate) +void end_buffer_io_async(struct buffer_head * bh, int uptodate) { static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED; unsigned long flags; @@ -796,6 +833,7 @@ spin_unlock_irqrestore(&page_uptodate_lock, flags); return; } +EXPORT_SYMBOL(end_buffer_io_async); inline void set_buffer_async_io(struct buffer_head *bh) { @@ -852,7 +890,7 @@ * a noop) */ wait_on_buffer(bh); - ll_rw_block(WRITE, 1, &bh); + write_buffer(bh); brelse(bh); spin_lock(&lru_list_lock); } @@ -1298,13 +1336,14 @@ */ static void discard_buffer(struct buffer_head * bh) { - if (buffer_mapped(bh)) { + if (buffer_mapped(bh) || buffer_delay(bh)) { mark_buffer_clean(bh); lock_buffer(bh); clear_bit(BH_Uptodate, &bh->b_state); clear_bit(BH_Mapped, &bh->b_state); clear_bit(BH_Req, &bh->b_state); clear_bit(BH_New, &bh->b_state); + clear_bit(BH_Delay, &bh->b_state); remove_from_queues(bh); unlock_buffer(bh); } @@ -1592,7 +1631,7 @@ set_bit(BH_Uptodate, &bh->b_state); continue; } - if (!buffer_uptodate(bh) && + if (!buffer_uptodate(bh) && !buffer_delay(bh) && (block_start < from || block_end > to)) { ll_rw_block(READ, 1, &bh); *wait_bh++=bh; @@ -1987,7 +2026,7 @@ if (Page_Uptodate(page)) set_bit(BH_Uptodate, &bh->b_state); - if (!buffer_uptodate(bh)) { + if (!buffer_uptodate(bh) && !buffer_delay(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); @@ -2702,7 +2741,7 @@ { #ifdef CONFIG_SMP struct buffer_head * bh; - int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0; + int delalloc = 0, found = 0, locked = 0, dirty = 0, used = 0, lastused = 0; int nlist; static char *buf_types[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY", }; #endif @@ -2717,7 +2756,7 @@ if (!spin_trylock(&lru_list_lock)) return; for(nlist = 0; nlist < NR_LIST; nlist++) { - found = locked = dirty = used = lastused = 0; + delalloc = found = locked = dirty = used = lastused = 0; bh = lru_list[nlist]; if(!bh) continue; @@ -2727,6 +2766,8 @@ locked++; if (buffer_dirty(bh)) dirty++; + if (buffer_delay(bh)) + delalloc++; if (atomic_read(&bh->b_count)) used++, lastused = found; bh = bh->b_next_free; @@ -2737,10 +2778,10 @@ printk("%9s: BUG -> found %d, reported %d\n", buf_types[nlist], found, tmp); } - printk("%9s: %d buffers, %lu kbyte, %d used (last=%d), " - "%d locked, %d dirty\n", + printk("%7s: %d buffers, %lu kbyte, %d used (last=%d), " + "%d locked, %d dirty %d delay\n", buf_types[nlist], found, size_buffers_type[nlist]>>10, - used, lastused, locked, dirty); + used, lastused, locked, dirty, delalloc); } spin_unlock(&lru_list_lock); #endif @@ -2785,7 +2826,7 @@ hash_table = (struct buffer_head **) __get_free_pages(GFP_ATOMIC, order); } while (hash_table == NULL && --order > 0); - printk("Buffer-cache hash table entries: %d (order: %d, %ld bytes)\n", + printk(KERN_INFO "Buffer cache hash table entries: %d (order: %d, %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); if (!hash_table) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/Config.in linux.21rc5-ac2/fs/Config.in --- linux.21rc5/fs/Config.in 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/Config.in 2003-04-28 17:52:08.000000000 +0100 @@ -5,6 +5,14 @@ comment 'File systems' bool 'Quota support' CONFIG_QUOTA +dep_tristate ' Old quota format support' CONFIG_QFMT_V1 $CONFIG_QUOTA +dep_tristate ' VFS v0 quota format support' CONFIG_QFMT_V2 $CONFIG_QUOTA +dep_mbool ' Compatible quota interfaces' CONFIG_QIFACE_COMPAT $CONFIG_QUOTA +if [ "$CONFIG_QUOTA" = "y" -a "$CONFIG_QIFACE_COMPAT" = "y" ]; then + choice ' Compatible quota interfaces' \ + "Original CONFIG_QIFACE_V1 \ + VFSv0 CONFIG_QIFACE_V2" Original +fi tristate 'Kernel automounter support' CONFIG_AUTOFS_FS tristate 'Kernel automounter version 4 support (also supports v3)' CONFIG_AUTOFS4_FS @@ -93,6 +101,10 @@ tristate 'UFS file system support (read only)' CONFIG_UFS_FS dep_mbool ' UFS file system write support (DANGEROUS)' CONFIG_UFS_FS_WRITE $CONFIG_UFS_FS $CONFIG_EXPERIMENTAL +tristate 'XFS filesystem support' CONFIG_XFS_FS +dep_mbool ' Realtime support (EXPERIMENTAL)' CONFIG_XFS_RT $CONFIG_XFS_FS $CONFIG_EXPERIMENTAL +dep_mbool ' Quota support' CONFIG_XFS_QUOTA $CONFIG_XFS_FS + if [ "$CONFIG_NET" = "y" ]; then mainmenu_option next_comment @@ -102,6 +114,7 @@ dep_tristate 'InterMezzo file system support (replicating fs) (EXPERIMENTAL)' CONFIG_INTERMEZZO_FS $CONFIG_INET $CONFIG_EXPERIMENTAL dep_tristate 'NFS file system support' CONFIG_NFS_FS $CONFIG_INET dep_mbool ' Provide NFSv3 client support' CONFIG_NFS_V3 $CONFIG_NFS_FS + dep_mbool ' Allow direct I/O on NFS files (EXPERIMENTAL)' CONFIG_NFS_DIRECTIO $CONFIG_NFS_FS $CONFIG_EXPERIMENTAL dep_bool ' Root file system on NFS' CONFIG_ROOT_NFS $CONFIG_NFS_FS $CONFIG_IP_PNP dep_tristate 'NFS server support' CONFIG_NFSD $CONFIG_INET diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/dquot.c linux.21rc5-ac2/fs/dquot.c --- linux.21rc5/fs/dquot.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/dquot.c 2003-04-22 18:35:15.000000000 +0100 @@ -35,7 +35,7 @@ * Jan Kara, , sponsored by SuSE CR, 10-11/99 * * Used struct list_head instead of own list struct - * Invalidation of dquots with dq_count > 0 no longer possible + * Invalidation of referenced dquots is no longer possible * Improved free_dquots list management * Quota and i_blocks are now updated in one place to avoid races * Warnings are now delayed so we won't block in critical section @@ -45,6 +45,10 @@ * Added dynamic quota structure allocation * Jan Kara 12/2000 * + * Rewritten quota interface. Implemented new quota format and + * formats registering. + * Jan Kara, , 2001,2002 + * * (C) Copyright 1994 - 1997 Marco van Wieringen */ @@ -59,20 +63,53 @@ #include #include #include +#include #include #include +#include +#include #include -#define __DQUOT_VERSION__ "dquot_6.4.0" +static char *quotatypes[] = INITQFNAMES; +static struct quota_format_type *quota_formats; /* List of registered formats */ -int nr_dquots, nr_free_dquots; +int register_quota_format(struct quota_format_type *fmt) +{ + lock_kernel(); + fmt->qf_next = quota_formats; + quota_formats = fmt; + unlock_kernel(); + return 0; +} -static char *quotatypes[] = INITQFNAMES; +void unregister_quota_format(struct quota_format_type *fmt) +{ + struct quota_format_type **actqf; -static inline struct quota_mount_options *sb_dqopt(struct super_block *sb) + lock_kernel(); + for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); + if (*actqf) + *actqf = (*actqf)->qf_next; + unlock_kernel(); +} + +static struct quota_format_type *find_quota_format(int id) { - return &sb->s_dquot; + struct quota_format_type *actqf; + + lock_kernel(); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + if (actqf && !try_inc_mod_count(actqf->qf_owner)) + actqf = NULL; + unlock_kernel(); + return actqf; +} + +static void put_quota_format(struct quota_format_type *fmt) +{ + if (fmt->qf_owner) + __MOD_DEC_USE_COUNT(fmt->qf_owner); } /* @@ -85,11 +122,11 @@ * list is used for the sync and invalidate operations, which must look * at every dquot. * - * Unused dquots (dq_count == 0) are added to the free_dquots list when - * freed, and this list is searched whenever we need an available dquot. - * Dquots are removed from the list as soon as they are used again, and - * nr_free_dquots gives the number of dquots on the list. When dquot is - * invalidated it's completely released from memory. + * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, + * and this list is searched whenever we need an available dquot. Dquots are + * removed from the list as soon as they are used again, and + * dqstats.free_dquots gives the number of dquots on the list. When + * dquot is invalidated it's completely released from memory. * * Dquots with a specific identity (device, type and id) are placed on * one of the dquot_hash[] hash chains. The provides an efficient search @@ -115,35 +152,39 @@ static LIST_HEAD(free_dquots); static struct list_head dquot_hash[NR_DQHASH]; -static struct dqstats dqstats; +struct dqstats dqstats; static void dqput(struct dquot *); static struct dquot *dqduplicate(struct dquot *); -static inline char is_enabled(struct quota_mount_options *dqopt, short type) +static inline void get_dquot_ref(struct dquot *dquot) { - switch (type) { - case USRQUOTA: - return((dqopt->flags & DQUOT_USR_ENABLED) != 0); - case GRPQUOTA: - return((dqopt->flags & DQUOT_GRP_ENABLED) != 0); - } - return(0); + dquot->dq_count++; +} + +static inline void put_dquot_ref(struct dquot *dquot) +{ + dquot->dq_count--; +} + +static inline void get_dquot_dup_ref(struct dquot *dquot) +{ + dquot->dq_dup_ref++; } -static inline char sb_has_quota_enabled(struct super_block *sb, short type) +static inline void put_dquot_dup_ref(struct dquot *dquot) { - return is_enabled(sb_dqopt(sb), type); + dquot->dq_dup_ref--; } -static inline int const hashfn(kdev_t dev, unsigned int id, short type) +static inline int const hashfn(struct super_block *sb, unsigned int id, int type) { - return((HASHDEV(dev) ^ id) * (MAXQUOTAS - type)) % NR_DQHASH; + return((((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type)) % NR_DQHASH; } static inline void insert_dquot_hash(struct dquot *dquot) { - struct list_head *head = dquot_hash + hashfn(dquot->dq_dev, dquot->dq_id, dquot->dq_type); + struct list_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); list_add(&dquot->dq_hash, head); } @@ -153,14 +194,14 @@ INIT_LIST_HEAD(&dquot->dq_hash); } -static inline struct dquot *find_dquot(unsigned int hashent, kdev_t dev, unsigned int id, short type) +static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) { struct list_head *head; struct dquot *dquot; for (head = dquot_hash[hashent].next; head != dquot_hash+hashent; head = head->next) { dquot = list_entry(head, struct dquot, dq_hash); - if (dquot->dq_dev == dev && dquot->dq_id == id && dquot->dq_type == type) + if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) return dquot; } return NODQUOT; @@ -170,14 +211,14 @@ static inline void put_dquot_head(struct dquot *dquot) { list_add(&dquot->dq_free, &free_dquots); - nr_free_dquots++; + dqstats.free_dquots++; } /* Add a dquot to the tail of the free list */ static inline void put_dquot_last(struct dquot *dquot) { list_add(&dquot->dq_free, free_dquots.prev); - nr_free_dquots++; + dqstats.free_dquots++; } /* Move dquot to the head of free list (it must be already on it) */ @@ -193,7 +234,7 @@ return; list_del(&dquot->dq_free); INIT_LIST_HEAD(&dquot->dq_free); - nr_free_dquots--; + dqstats.free_dquots--; } static inline void put_inuse(struct dquot *dquot) @@ -201,12 +242,12 @@ /* We add to the back of inuse list so we don't have to restart * when traversing this list and we block */ list_add(&dquot->dq_inuse, inuse_list.prev); - nr_dquots++; + dqstats.allocated_dquots++; } static inline void remove_inuse(struct dquot *dquot) { - nr_dquots--; + dqstats.allocated_dquots--; list_del(&dquot->dq_inuse); } @@ -243,6 +284,7 @@ wake_up(&dquot->dq_wait_lock); } +/* Wait for dquot to be unused */ static void __wait_dquot_unused(struct dquot *dquot) { DECLARE_WAITQUEUE(wait, current); @@ -258,85 +300,56 @@ current->state = TASK_RUNNING; } -/* - * We don't have to be afraid of deadlocks as we never have quotas on quota files... - */ -static void write_dquot(struct dquot *dquot) +/* Wait for all duplicated dquot references to be dropped */ +static void __wait_dup_drop(struct dquot *dquot) { - short type = dquot->dq_type; - struct file *filp; - mm_segment_t fs; - loff_t offset; - ssize_t ret; - struct semaphore *sem = &dquot->dq_sb->s_dquot.dqio_sem; - struct dqblk dqbuf; - - down(sem); - filp = dquot->dq_sb->s_dquot.files[type]; - offset = dqoff(dquot->dq_id); - fs = get_fs(); - set_fs(KERNEL_DS); + DECLARE_WAITQUEUE(wait, current); - /* - * Note: clear the DQ_MOD flag unconditionally, - * so we don't loop forever on failure. - */ - memcpy(&dqbuf, &dquot->dq_dqb, sizeof(struct dqblk)); - dquot->dq_flags &= ~DQ_MOD; - ret = 0; - if (filp) - ret = filp->f_op->write(filp, (char *)&dqbuf, - sizeof(struct dqblk), &offset); - if (ret != sizeof(struct dqblk)) - printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", - kdevname(dquot->dq_dev)); - - set_fs(fs); - up(sem); - dqstats.writes++; -} - -static void read_dquot(struct dquot *dquot) -{ - short type = dquot->dq_type; - struct file *filp; - mm_segment_t fs; - loff_t offset; + add_wait_queue(&dquot->dq_wait_free, &wait); +repeat: + set_current_state(TASK_UNINTERRUPTIBLE); + if (dquot->dq_dup_ref) { + schedule(); + goto repeat; + } + remove_wait_queue(&dquot->dq_wait_free, &wait); + current->state = TASK_RUNNING; +} - filp = dquot->dq_sb->s_dquot.files[type]; - if (filp == (struct file *)NULL) - return; +static int read_dqblk(struct dquot *dquot) +{ + int ret; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); lock_dquot(dquot); - if (!dquot->dq_sb) /* Invalidated quota? */ - goto out_lock; - /* Now we are sure filp is valid - the dquot isn't invalidated */ - down(&dquot->dq_sb->s_dquot.dqio_sem); - offset = dqoff(dquot->dq_id); - fs = get_fs(); - set_fs(KERNEL_DS); - filp->f_op->read(filp, (char *)&dquot->dq_dqb, sizeof(struct dqblk), &offset); - up(&dquot->dq_sb->s_dquot.dqio_sem); - set_fs(fs); - - if (dquot->dq_bhardlimit == 0 && dquot->dq_bsoftlimit == 0 && - dquot->dq_ihardlimit == 0 && dquot->dq_isoftlimit == 0) - dquot->dq_flags |= DQ_FAKE; - dqstats.reads++; -out_lock: + down(&dqopt->dqio_sem); + ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); + up(&dqopt->dqio_sem); unlock_dquot(dquot); + return ret; +} + +static int commit_dqblk(struct dquot *dquot) +{ + int ret; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + + down(&dqopt->dqio_sem); + ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); + up(&dqopt->dqio_sem); + return ret; } /* Invalidate all dquots on the list, wait for all users. Note that this function is called * after quota is disabled so no new quota might be created. As we only insert to the end of * inuse list, we don't have to restart searching... */ -static void invalidate_dquots(struct super_block *sb, short type) +static void invalidate_dquots(struct super_block *sb, int type) { struct dquot *dquot; struct list_head *head; restart: - for (head = inuse_list.next; head != &inuse_list; head = head->next) { + list_for_each(head, &inuse_list) { dquot = list_entry(head, struct dquot, dq_inuse); if (dquot->dq_sb != sb) continue; @@ -359,37 +372,107 @@ } } -int sync_dquots(kdev_t dev, short type) +static int vfs_quota_sync(struct super_block *sb, int type) { struct list_head *head; struct dquot *dquot; + struct quota_info *dqopt = sb_dqopt(sb); + int cnt; - lock_kernel(); restart: - for (head = inuse_list.next; head != &inuse_list; head = head->next) { + list_for_each(head, &inuse_list) { dquot = list_entry(head, struct dquot, dq_inuse); - if (dev && dquot->dq_dev != dev) + if (sb && dquot->dq_sb != sb) continue; if (type != -1 && dquot->dq_type != type) continue; if (!dquot->dq_sb) /* Invalidated? */ continue; - if (!(dquot->dq_flags & (DQ_MOD | DQ_LOCKED))) + if (!dquot_dirty(dquot) && !(dquot->dq_flags & DQ_LOCKED)) continue; - /* Raise use count so quota won't be invalidated. We can't use dqduplicate() as it does too many tests */ - dquot->dq_count++; + /* Get reference to quota so it won't be invalidated. get_dquot_ref() + * is enough since if dquot is locked/modified it can't be + * on the free list */ + get_dquot_ref(dquot); if (dquot->dq_flags & DQ_LOCKED) wait_on_dquot(dquot); - if (dquot->dq_flags & DQ_MOD) - write_dquot(dquot); + if (dquot_dirty(dquot)) + sb->dq_op->sync_dquot(dquot); dqput(dquot); goto restart; } + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt)) + dqopt->info[cnt].dqi_flags &= ~DQF_ANY_DQUOT_DIRTY; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt) && info_dirty(&dqopt->info[cnt])) + dqopt->ops[cnt]->write_file_info(sb, cnt); dqstats.syncs++; - unlock_kernel(); + return 0; } +static struct super_block *get_super_to_sync(int type) +{ + struct list_head *head; + int cnt, dirty; + +restart: + spin_lock(&sb_lock); + list_for_each(head, &super_blocks) { + struct super_block *sb = list_entry(head, struct super_block, s_list); + + for (cnt = 0, dirty = 0; cnt < MAXQUOTAS; cnt++) + if ((type == cnt || type == -1) && sb_has_quota_enabled(sb, cnt) + && sb_dqopt(sb)->info[cnt].dqi_flags & DQF_ANY_DQUOT_DIRTY) + dirty = 1; + if (!dirty) + continue; + sb->s_count++; + spin_unlock(&sb_lock); + down_read(&sb->s_umount); + if (!sb->s_root) { + drop_super(sb); + goto restart; + } + return sb; + } + spin_unlock(&sb_lock); + return NULL; +} + +void sync_dquots_dev(kdev_t dev, int type) +{ + struct super_block *sb; + + if (dev) { + if ((sb = get_super(dev))) { + lock_kernel(); + if (sb->s_qcop->quota_sync) + sb->s_qcop->quota_sync(sb, type); + unlock_kernel(); + drop_super(sb); + } + } + else { + while ((sb = get_super_to_sync(type))) { + lock_kernel(); + if (sb->s_qcop->quota_sync) + sb->s_qcop->quota_sync(sb, type); + unlock_kernel(); + drop_super(sb); + } + } +} + +void sync_dquots_sb(struct super_block *sb, int type) +{ + lock_kernel(); + if (sb->s_qcop->quota_sync) + sb->s_qcop->quota_sync(sb, type); + unlock_kernel(); +} + /* Free unused dquots from cache */ static void prune_dqcache(int count) { @@ -408,19 +491,38 @@ } } +/* + * This is called from kswapd when we think we need some + * more memory, but aren't really sure how much. So we + * carefully try to free a _bit_ of our dqcache, but not + * too much. + * + * Priority: + * 1 - very urgent: shrink everything + * ... + * 6 - base-level: try to shrink a bit. + */ + int shrink_dqcache_memory(int priority, unsigned int gfp_mask) { + int count = 0; + lock_kernel(); - prune_dqcache(nr_free_dquots / (priority + 1)); + count = dqstats.free_dquots / priority; + prune_dqcache(count); unlock_kernel(); return kmem_cache_shrink(dquot_cachep); } -/* NOTE: If you change this function please check whether dqput_blocks() works right... */ +/* + * Put reference to dquot + * NOTE: If you change this function please check whether dqput_blocks() works right... + */ static void dqput(struct dquot *dquot) { if (!dquot) return; +#ifdef __DQUOT_PARANOIA if (!dquot->dq_count) { printk("VFS: dqput: trying to free free dquot\n"); printk("VFS: device %s, dquot of %s %d\n", @@ -428,33 +530,38 @@ dquot->dq_id); return; } +#endif dqstats.drops++; we_slept: + if (dquot->dq_dup_ref && dquot->dq_count - dquot->dq_dup_ref <= 1) { /* Last unduplicated reference? */ + __wait_dup_drop(dquot); + goto we_slept; + } if (dquot->dq_count > 1) { /* We have more than one user... We can simply decrement use count */ - dquot->dq_count--; + put_dquot_ref(dquot); return; } - if (dquot->dq_flags & DQ_MOD) { - write_dquot(dquot); + if (dquot_dirty(dquot)) { + commit_dqblk(dquot); goto we_slept; } /* sanity check */ if (!list_empty(&dquot->dq_free)) { printk(KERN_ERR "dqput: dquot already on free list??\n"); - dquot->dq_count--; /* J.K. Just decrementing use count seems safer... */ + put_dquot_ref(dquot); return; } - dquot->dq_count--; + put_dquot_ref(dquot); /* If dquot is going to be invalidated invalidate_dquots() is going to free it so */ if (!(dquot->dq_flags & DQ_INVAL)) put_dquot_last(dquot); /* Place at end of LRU free queue */ wake_up(&dquot->dq_wait_free); } -static struct dquot *get_empty_dquot(void) +static struct dquot *get_empty_dquot(struct super_block *sb, int type) { struct dquot *dquot; @@ -468,6 +575,9 @@ INIT_LIST_HEAD(&dquot->dq_free); INIT_LIST_HEAD(&dquot->dq_inuse); INIT_LIST_HEAD(&dquot->dq_hash); + dquot->dq_sb = sb; + dquot->dq_dev = sb->s_dev; + dquot->dq_type = type; dquot->dq_count = 1; /* all dquots go on the inuse_list */ put_inuse(dquot); @@ -475,11 +585,11 @@ return dquot; } -static struct dquot *dqget(struct super_block *sb, unsigned int id, short type) +static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) { - unsigned int hashent = hashfn(sb->s_dev, id, type); + unsigned int hashent = hashfn(sb, id, type); struct dquot *dquot, *empty = NODQUOT; - struct quota_mount_options *dqopt = sb_dqopt(sb); + struct quota_info *dqopt = sb_dqopt(sb); we_slept: if (!is_enabled(dqopt, type)) { @@ -488,23 +598,21 @@ return NODQUOT; } - if ((dquot = find_dquot(hashent, sb->s_dev, id, type)) == NODQUOT) { + if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { if (empty == NODQUOT) { - if ((empty = get_empty_dquot()) == NODQUOT) + if ((empty = get_empty_dquot(sb, type)) == NODQUOT) schedule(); /* Try to wait for a moment... */ goto we_slept; } dquot = empty; - dquot->dq_id = id; - dquot->dq_type = type; - dquot->dq_dev = sb->s_dev; - dquot->dq_sb = sb; + dquot->dq_id = id; /* hash it first so it can be found */ insert_dquot_hash(dquot); - read_dquot(dquot); + read_dqblk(dquot); } else { - if (!dquot->dq_count++) + if (!dquot->dq_count) remove_free_dquot(dquot); + get_dquot_ref(dquot); dqstats.cache_hits++; wait_on_dquot(dquot); if (empty) @@ -516,30 +624,47 @@ dqput(dquot); return NODQUOT; } - dquot->dq_referenced++; + ++dquot->dq_referenced; dqstats.lookups++; return dquot; } +/* Duplicate reference to dquot got from inode */ static struct dquot *dqduplicate(struct dquot *dquot) { if (dquot == NODQUOT) return NODQUOT; - dquot->dq_count++; + get_dquot_ref(dquot); if (!dquot->dq_sb) { printk(KERN_ERR "VFS: dqduplicate(): Invalidated quota to be duplicated!\n"); - dquot->dq_count--; + put_dquot_ref(dquot); return NODQUOT; } if (dquot->dq_flags & DQ_LOCKED) printk(KERN_ERR "VFS: dqduplicate(): Locked quota to be duplicated!\n"); + get_dquot_dup_ref(dquot); dquot->dq_referenced++; dqstats.lookups++; + return dquot; } -static int dqinit_needed(struct inode *inode, short type) +/* Put duplicated reference */ +static void dqputduplicate(struct dquot *dquot) +{ + if (!dquot->dq_dup_ref) { + printk(KERN_ERR "VFS: dqputduplicate(): Duplicated dquot put without duplicate reference.\n"); + return; + } + put_dquot_dup_ref(dquot); + if (!dquot->dq_dup_ref) + wake_up(&dquot->dq_wait_free); + put_dquot_ref(dquot); + dqstats.drops++; +} + +static int dqinit_needed(struct inode *inode, int type) { int cnt; @@ -553,16 +678,13 @@ return 0; } -static void add_dquot_ref(struct super_block *sb, short type) +static void add_dquot_ref(struct super_block *sb, int type) { struct list_head *p; - if (!sb->dq_op) - return; /* nothing to do */ - restart: file_list_lock(); - for (p = sb->s_files.next; p != &sb->s_files; p = p->next) { + list_for_each(p, &sb->s_files) { struct file *filp = list_entry(p, struct file, f_list); struct inode *inode = filp->f_dentry->d_inode; if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) { @@ -582,13 +704,15 @@ /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ static inline int dqput_blocks(struct dquot *dquot) { - if (dquot->dq_count == 1) + if (dquot->dq_dup_ref && dquot->dq_count - dquot->dq_dup_ref <= 1) + return 1; + if (dquot->dq_count <= 1 && dquot->dq_flags & DQ_MOD) return 1; return 0; } /* Remove references to dquots from inode - add dquot to list for freeing if needed */ -int remove_inode_dquot_ref(struct inode *inode, short type, struct list_head *tofree_head) +int remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head) { struct dquot *dquot = inode->i_dquot[type]; int cnt; @@ -635,38 +759,38 @@ static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number) { - dquot->dq_curinodes += number; - dquot->dq_flags |= DQ_MOD; + dquot->dq_dqb.dqb_curinodes += number; + mark_dquot_dirty(dquot); } -static inline void dquot_incr_blocks(struct dquot *dquot, unsigned long number) +static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) { - dquot->dq_curblocks += number; - dquot->dq_flags |= DQ_MOD; + dquot->dq_dqb.dqb_curspace += number; + mark_dquot_dirty(dquot); } static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number) { - if (dquot->dq_curinodes > number) - dquot->dq_curinodes -= number; + if (dquot->dq_dqb.dqb_curinodes > number) + dquot->dq_dqb.dqb_curinodes -= number; else - dquot->dq_curinodes = 0; - if (dquot->dq_curinodes < dquot->dq_isoftlimit) - dquot->dq_itime = (time_t) 0; + dquot->dq_dqb.dqb_curinodes = 0; + if (dquot->dq_dqb.dqb_curinodes < dquot->dq_dqb.dqb_isoftlimit) + dquot->dq_dqb.dqb_itime = (time_t) 0; dquot->dq_flags &= ~DQ_INODES; - dquot->dq_flags |= DQ_MOD; + mark_dquot_dirty(dquot); } -static inline void dquot_decr_blocks(struct dquot *dquot, unsigned long number) +static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) { - if (dquot->dq_curblocks > number) - dquot->dq_curblocks -= number; + if (dquot->dq_dqb.dqb_curspace > number) + dquot->dq_dqb.dqb_curspace -= number; else - dquot->dq_curblocks = 0; - if (dquot->dq_curblocks < dquot->dq_bsoftlimit) - dquot->dq_btime = (time_t) 0; + dquot->dq_dqb.dqb_curspace = 0; + if (toqb(dquot->dq_dqb.dqb_curspace) < dquot->dq_dqb.dqb_bsoftlimit) + dquot->dq_dqb.dqb_btime = (time_t) 0; dquot->dq_flags &= ~DQ_BLKS; - dquot->dq_flags |= DQ_MOD; + mark_dquot_dirty(dquot); } static inline int need_print_warning(struct dquot *dquot, int flag) @@ -739,7 +863,10 @@ static inline char ignore_hardlimit(struct dquot *dquot) { - return capable(CAP_SYS_RESOURCE) && !dquot->dq_sb->s_dquot.rsquash[dquot->dq_type]; + struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; + + return capable(CAP_SYS_RESOURCE) && + (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); } static int check_idq(struct dquot *dquot, ulong inodes, char *warntype) @@ -748,60 +875,60 @@ if (inodes <= 0 || dquot->dq_flags & DQ_FAKE) return QUOTA_OK; - if (dquot->dq_ihardlimit && - (dquot->dq_curinodes + inodes) > dquot->dq_ihardlimit && + if (dquot->dq_dqb.dqb_ihardlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && !ignore_hardlimit(dquot)) { *warntype = IHARDWARN; return NO_QUOTA; } - if (dquot->dq_isoftlimit && - (dquot->dq_curinodes + inodes) > dquot->dq_isoftlimit && - dquot->dq_itime && CURRENT_TIME >= dquot->dq_itime && + if (dquot->dq_dqb.dqb_isoftlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime && CURRENT_TIME >= dquot->dq_dqb.dqb_itime && !ignore_hardlimit(dquot)) { *warntype = ISOFTLONGWARN; return NO_QUOTA; } - if (dquot->dq_isoftlimit && - (dquot->dq_curinodes + inodes) > dquot->dq_isoftlimit && - dquot->dq_itime == 0) { + if (dquot->dq_dqb.dqb_isoftlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime == 0) { *warntype = ISOFTWARN; - dquot->dq_itime = CURRENT_TIME + dquot->dq_sb->s_dquot.inode_expire[dquot->dq_type]; + dquot->dq_dqb.dqb_itime = CURRENT_TIME + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; } return QUOTA_OK; } -static int check_bdq(struct dquot *dquot, ulong blocks, char prealloc, char *warntype) +static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) { *warntype = 0; - if (blocks <= 0 || dquot->dq_flags & DQ_FAKE) + if (space <= 0 || dquot->dq_flags & DQ_FAKE) return QUOTA_OK; - if (dquot->dq_bhardlimit && - (dquot->dq_curblocks + blocks) > dquot->dq_bhardlimit && + if (dquot->dq_dqb.dqb_bhardlimit && + toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bhardlimit && !ignore_hardlimit(dquot)) { if (!prealloc) *warntype = BHARDWARN; return NO_QUOTA; } - if (dquot->dq_bsoftlimit && - (dquot->dq_curblocks + blocks) > dquot->dq_bsoftlimit && - dquot->dq_btime && CURRENT_TIME >= dquot->dq_btime && + if (dquot->dq_dqb.dqb_bsoftlimit && + toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_btime && CURRENT_TIME >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { if (!prealloc) *warntype = BSOFTLONGWARN; return NO_QUOTA; } - if (dquot->dq_bsoftlimit && - (dquot->dq_curblocks + blocks) > dquot->dq_bsoftlimit && - dquot->dq_btime == 0) { + if (dquot->dq_dqb.dqb_bsoftlimit && + toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_btime == 0) { if (!prealloc) { *warntype = BSOFTWARN; - dquot->dq_btime = CURRENT_TIME + dquot->dq_sb->s_dquot.block_expire[dquot->dq_type]; + dquot->dq_dqb.dqb_btime = CURRENT_TIME + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; } else /* @@ -815,148 +942,15 @@ } /* - * Initialize a dquot-struct with new quota info. This is used by the - * system call interface functions. - */ -static int set_dqblk(struct super_block *sb, int id, short type, int flags, struct dqblk *dqblk) -{ - struct dquot *dquot; - int error = -EFAULT; - struct dqblk dq_dqblk; - - if (copy_from_user(&dq_dqblk, dqblk, sizeof(struct dqblk))) - return error; - - if (sb && (dquot = dqget(sb, id, type)) != NODQUOT) { - /* We can't block while changing quota structure... */ - if (id > 0 && ((flags & SET_QUOTA) || (flags & SET_QLIMIT))) { - dquot->dq_bhardlimit = dq_dqblk.dqb_bhardlimit; - dquot->dq_bsoftlimit = dq_dqblk.dqb_bsoftlimit; - dquot->dq_ihardlimit = dq_dqblk.dqb_ihardlimit; - dquot->dq_isoftlimit = dq_dqblk.dqb_isoftlimit; - } - - if ((flags & SET_QUOTA) || (flags & SET_USE)) { - if (dquot->dq_isoftlimit && - dquot->dq_curinodes < dquot->dq_isoftlimit && - dq_dqblk.dqb_curinodes >= dquot->dq_isoftlimit) - dquot->dq_itime = CURRENT_TIME + dquot->dq_sb->s_dquot.inode_expire[type]; - dquot->dq_curinodes = dq_dqblk.dqb_curinodes; - if (dquot->dq_curinodes < dquot->dq_isoftlimit) - dquot->dq_flags &= ~DQ_INODES; - if (dquot->dq_bsoftlimit && - dquot->dq_curblocks < dquot->dq_bsoftlimit && - dq_dqblk.dqb_curblocks >= dquot->dq_bsoftlimit) - dquot->dq_btime = CURRENT_TIME + dquot->dq_sb->s_dquot.block_expire[type]; - dquot->dq_curblocks = dq_dqblk.dqb_curblocks; - if (dquot->dq_curblocks < dquot->dq_bsoftlimit) - dquot->dq_flags &= ~DQ_BLKS; - } - - if (id == 0) { - dquot->dq_sb->s_dquot.block_expire[type] = dquot->dq_btime = dq_dqblk.dqb_btime; - dquot->dq_sb->s_dquot.inode_expire[type] = dquot->dq_itime = dq_dqblk.dqb_itime; - } - - if (dq_dqblk.dqb_bhardlimit == 0 && dq_dqblk.dqb_bsoftlimit == 0 && - dq_dqblk.dqb_ihardlimit == 0 && dq_dqblk.dqb_isoftlimit == 0) - dquot->dq_flags |= DQ_FAKE; - else - dquot->dq_flags &= ~DQ_FAKE; - - dquot->dq_flags |= DQ_MOD; - dqput(dquot); - } - return 0; -} - -static int get_quota(struct super_block *sb, int id, short type, struct dqblk *dqblk) -{ - struct dquot *dquot; - struct dqblk data; - int error = -ESRCH; - - if (!sb || !sb_has_quota_enabled(sb, type)) - goto out; - dquot = dqget(sb, id, type); - if (dquot == NODQUOT) - goto out; - - memcpy(&data, &dquot->dq_dqb, sizeof(struct dqblk)); /* We copy data to preserve them from changing */ - dqput(dquot); - error = -EFAULT; - if (dqblk && !copy_to_user(dqblk, &data, sizeof(struct dqblk))) - error = 0; -out: - return error; -} - -static int get_stats(caddr_t addr) -{ - int error = -EFAULT; - struct dqstats stats; - - dqstats.allocated_dquots = nr_dquots; - dqstats.free_dquots = nr_free_dquots; - - /* make a copy, in case we page-fault in user space */ - memcpy(&stats, &dqstats, sizeof(struct dqstats)); - if (!copy_to_user(addr, &stats, sizeof(struct dqstats))) - error = 0; - return error; -} - -static int quota_root_squash(struct super_block *sb, short type, int *addr) -{ - int new_value, error; - - if (!sb) - return(-ENODEV); - - error = -EFAULT; - if (!copy_from_user(&new_value, addr, sizeof(int))) { - sb_dqopt(sb)->rsquash[type] = new_value; - error = 0; - } - return error; -} - -#if 0 /* We are not going to support filesystems without i_blocks... */ -/* - * This is a simple algorithm that calculates the size of a file in blocks. - * This is only used on filesystems that do not have an i_blocks count. - */ -static u_long isize_to_blocks(loff_t isize, size_t blksize_bits) -{ - u_long blocks; - u_long indirect; - - if (!blksize_bits) - blksize_bits = BLOCK_SIZE_BITS; - blocks = (isize >> blksize_bits) + ((isize & ~((1 << blksize_bits)-1)) ? 1 : 0); - if (blocks > 10) { - indirect = ((blocks - 11) >> 8) + 1; /* single indirect blocks */ - if (blocks > (10 + 256)) { - indirect += ((blocks - 267) >> 16) + 1; /* double indirect blocks */ - if (blocks > (10 + 256 + (256 << 8))) - indirect++; /* triple indirect blocks */ - } - blocks += indirect; - } - return blocks; -} -#endif - -/* * Externally referenced functions through dquot_operations in inode. * * Note: this is a blocking operation. */ -void dquot_initialize(struct inode *inode, short type) +void dquot_initialize(struct inode *inode, int type) { struct dquot *dquot[MAXQUOTAS]; unsigned int id = 0; - short cnt; + int cnt; if (IS_NOQUOTA(inode)) return; @@ -1002,7 +996,7 @@ void dquot_drop(struct inode *inode) { struct dquot *dquot; - short cnt; + int cnt; inode->i_flags &= ~S_QUOTA; for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1017,7 +1011,7 @@ /* * This operation can block, but only after everything is updated */ -int dquot_alloc_block(struct inode *inode, unsigned long number, char warn) +int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) { int cnt, ret = NO_QUOTA; struct dquot *dquot[MAXQUOTAS]; @@ -1038,16 +1032,16 @@ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (dquot[cnt] == NODQUOT) continue; - dquot_incr_blocks(dquot[cnt], number); + dquot_incr_space(dquot[cnt], number); } - inode->i_blocks += number << (BLOCK_SIZE_BITS - 9); + inode_add_bytes(inode, number); /* NOBLOCK End */ ret = QUOTA_OK; warn_put_all: flush_warnings(dquot, warntype); for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (dquot[cnt] != NODQUOT) - dqput(dquot[cnt]); + dqputduplicate(dquot[cnt]); return ret; } @@ -1084,16 +1078,16 @@ flush_warnings(dquot, warntype); for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (dquot[cnt] != NODQUOT) - dqput(dquot[cnt]); + dqputduplicate(dquot[cnt]); return ret; } /* * This is a non-blocking operation. */ -void dquot_free_block(struct inode *inode, unsigned long number) +void dquot_free_space(struct inode *inode, qsize_t number) { - unsigned short cnt; + unsigned int cnt; struct dquot *dquot; /* NOBLOCK Start */ @@ -1101,10 +1095,10 @@ dquot = dqduplicate(inode->i_dquot[cnt]); if (dquot == NODQUOT) continue; - dquot_decr_blocks(dquot, number); - dqput(dquot); + dquot_decr_space(dquot, number); + dqputduplicate(dquot); } - inode->i_blocks -= number << (BLOCK_SIZE_BITS - 9); + inode_sub_bytes(inode, number); /* NOBLOCK End */ } @@ -1113,7 +1107,7 @@ */ void dquot_free_inode(const struct inode *inode, unsigned long number) { - unsigned short cnt; + unsigned int cnt; struct dquot *dquot; /* NOBLOCK Start */ @@ -1122,7 +1116,7 @@ if (dquot == NODQUOT) continue; dquot_decr_inodes(dquot, number); - dqput(dquot); + dqputduplicate(dquot); } /* NOBLOCK End */ } @@ -1134,7 +1128,7 @@ */ int dquot_transfer(struct inode *inode, struct iattr *iattr) { - unsigned long blocks; + qsize_t space; struct dquot *transfer_from[MAXQUOTAS]; struct dquot *transfer_to[MAXQUOTAS]; int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid, @@ -1164,7 +1158,7 @@ } } /* NOBLOCK START: From now on we shouldn't block */ - blocks = (inode->i_blocks >> 1); + space = inode_get_bytes(inode); /* Build the transfer_from list and check the limits */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { /* The second test can fail when quotaoff is in progress... */ @@ -1174,7 +1168,7 @@ if (transfer_from[cnt] == NODQUOT) /* Can happen on quotafiles (quota isn't initialized on them)... */ continue; if (check_idq(transfer_to[cnt], 1, warntype+cnt) == NO_QUOTA || - check_bdq(transfer_to[cnt], blocks, 0, warntype+cnt) == NO_QUOTA) + check_bdq(transfer_to[cnt], space, 0, warntype+cnt) == NO_QUOTA) goto warn_put_all; } @@ -1189,10 +1183,10 @@ continue; dquot_decr_inodes(transfer_from[cnt], 1); - dquot_decr_blocks(transfer_from[cnt], blocks); + dquot_decr_space(transfer_from[cnt], space); dquot_incr_inodes(transfer_to[cnt], 1); - dquot_incr_blocks(transfer_to[cnt], blocks); + dquot_incr_space(transfer_to[cnt], space); if (inode->i_dquot[cnt] == NODQUOT) BUG(); @@ -1208,39 +1202,36 @@ warn_put_all: flush_warnings(transfer_to, warntype); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + /* First we must put duplicate - otherwise we might deadlock */ + if (transfer_from[cnt] != NODQUOT) + dqputduplicate(transfer_from[cnt]); if (transfer_to[cnt] != NODQUOT) dqput(transfer_to[cnt]); - if (transfer_from[cnt] != NODQUOT) - dqput(transfer_from[cnt]); } return ret; } -static int __init dquot_init(void) -{ - int i; - - for (i = 0; i < NR_DQHASH; i++) - INIT_LIST_HEAD(dquot_hash + i); - printk(KERN_NOTICE "VFS: Diskquotas version %s initialized\n", __DQUOT_VERSION__); - return 0; -} -__initcall(dquot_init); - /* * Definitions of diskquota operations. */ struct dquot_operations dquot_operations = { - dquot_initialize, /* mandatory */ - dquot_drop, /* mandatory */ - dquot_alloc_block, - dquot_alloc_inode, - dquot_free_block, - dquot_free_inode, - dquot_transfer + initialize: dquot_initialize, /* mandatory */ + drop: dquot_drop, /* mandatory */ + alloc_space: dquot_alloc_space, + alloc_inode: dquot_alloc_inode, + free_space: dquot_free_space, + free_inode: dquot_free_inode, + transfer: dquot_transfer, + sync_dquot: commit_dqblk }; -static inline void set_enable_flags(struct quota_mount_options *dqopt, short type) +/* Function used by filesystems for initializing the dquot_operations structure */ +void init_dquot_operations(struct dquot_operations *fsdqops) +{ + memcpy(fsdqops, &dquot_operations, sizeof(dquot_operations)); +} + +static inline void set_enable_flags(struct quota_info *dqopt, int type) { switch (type) { case USRQUOTA: @@ -1252,7 +1243,7 @@ } } -static inline void reset_enable_flags(struct quota_mount_options *dqopt, short type) +static inline void reset_enable_flags(struct quota_info *dqopt, int type) { switch (type) { case USRQUOTA: @@ -1265,16 +1256,15 @@ } /* Function in inode.c - remove pointers to dquots in icache */ -extern void remove_dquot_ref(struct super_block *, short); +extern void remove_dquot_ref(struct super_block *, int); /* * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) */ -int quota_off(struct super_block *sb, short type) +int vfs_quota_off(struct super_block *sb, int type) { - struct file *filp; - short cnt; - struct quota_mount_options *dqopt = sb_dqopt(sb); + int cnt; + struct quota_info *dqopt = sb_dqopt(sb); lock_kernel(); if (!sb) @@ -1292,51 +1282,48 @@ /* Note: these are blocking operations */ remove_dquot_ref(sb, cnt); invalidate_dquots(sb, cnt); + if (info_dirty(&dqopt->info[cnt])) + dqopt->ops[cnt]->write_file_info(sb, cnt); + if (dqopt->ops[cnt]->free_file_info) + dqopt->ops[cnt]->free_file_info(sb, cnt); + put_quota_format(dqopt->info[cnt].dqi_format); - filp = dqopt->files[cnt]; + fput(dqopt->files[cnt]); dqopt->files[cnt] = (struct file *)NULL; - dqopt->inode_expire[cnt] = 0; - dqopt->block_expire[cnt] = 0; - fput(filp); - } + dqopt->info[cnt].dqi_flags = 0; + dqopt->info[cnt].dqi_igrace = 0; + dqopt->info[cnt].dqi_bgrace = 0; + dqopt->ops[cnt] = NULL; + } up(&dqopt->dqoff_sem); out: unlock_kernel(); return 0; } -static inline int check_quotafile_size(loff_t size) -{ - ulong blocks = size >> BLOCK_SIZE_BITS; - size_t off = size & (BLOCK_SIZE - 1); - - return !(((blocks % sizeof(struct dqblk)) * BLOCK_SIZE + off % sizeof(struct dqblk)) % sizeof(struct dqblk)); -} - -static int quota_on(struct super_block *sb, short type, char *path) +int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path) { - struct file *f; + struct file *f = NULL; struct inode *inode; - struct dquot *dquot; - struct quota_mount_options *dqopt = sb_dqopt(sb); - char *tmp; + struct quota_info *dqopt = sb_dqopt(sb); + struct quota_format_type *fmt = find_quota_format(format_id); int error; - if (is_enabled(dqopt, type)) - return -EBUSY; + if (!fmt) + return -ESRCH; + if (is_enabled(dqopt, type)) { + error = -EBUSY; + goto out_fmt; + } down(&dqopt->dqoff_sem); - tmp = getname(path); - error = PTR_ERR(tmp); - if (IS_ERR(tmp)) - goto out_lock; - f = filp_open(tmp, O_RDWR, 0600); - putname(tmp); + f = filp_open(path, O_RDWR, 0600); error = PTR_ERR(f); if (IS_ERR(f)) goto out_lock; + dqopt->files[type] = f; error = -EIO; if (!f->f_op || !f->f_op->read || !f->f_op->write) goto out_f; @@ -1345,134 +1332,198 @@ if (!S_ISREG(inode->i_mode)) goto out_f; error = -EINVAL; - if (inode->i_size == 0 || !check_quotafile_size(inode->i_size)) + if (!fmt->qf_ops->check_quota_file(sb, type)) goto out_f; /* We don't want quota on quota files */ dquot_drop(inode); inode->i_flags |= S_NOQUOTA; - dqopt->files[type] = f; - sb->dq_op = &dquot_operations; + dqopt->ops[type] = fmt->qf_ops; + dqopt->info[type].dqi_format = fmt; + if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) + goto out_f; set_enable_flags(dqopt, type); - dquot = dqget(sb, 0, type); - dqopt->inode_expire[type] = (dquot != NODQUOT) ? dquot->dq_itime : MAX_IQ_TIME; - dqopt->block_expire[type] = (dquot != NODQUOT) ? dquot->dq_btime : MAX_DQ_TIME; - dqput(dquot); - add_dquot_ref(sb, type); up(&dqopt->dqoff_sem); return 0; out_f: - filp_close(f, NULL); + if (f) + filp_close(f, NULL); + dqopt->files[type] = NULL; out_lock: up(&dqopt->dqoff_sem); +out_fmt: + put_quota_format(fmt); return error; } -/* - * This is the system call interface. This communicates with - * the user-level programs. Currently this only supports diskquota - * calls. Maybe we need to add the process quotas etc. in the future, - * but we probably should use rlimits for that. - */ -asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr) +/* Generic routine for getting common part of quota structure */ +static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) { - int cmds = 0, type = 0, flags = 0; - kdev_t dev; - struct super_block *sb = NULL; - int ret = -EINVAL; + struct mem_dqblk *dm = &dquot->dq_dqb; - lock_kernel(); - cmds = cmd >> SUBCMDSHIFT; - type = cmd & SUBCMDMASK; + di->dqb_bhardlimit = dm->dqb_bhardlimit; + di->dqb_bsoftlimit = dm->dqb_bsoftlimit; + di->dqb_curspace = dm->dqb_curspace; + di->dqb_ihardlimit = dm->dqb_ihardlimit; + di->dqb_isoftlimit = dm->dqb_isoftlimit; + di->dqb_curinodes = dm->dqb_curinodes; + di->dqb_btime = dm->dqb_btime; + di->dqb_itime = dm->dqb_itime; + di->dqb_valid = QIF_ALL; +} - if ((u_int) type >= MAXQUOTAS) - goto out; - if (id & ~0xFFFF) - goto out; +int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +{ + struct dquot *dquot = dqget(sb, id, type); - ret = -EPERM; - switch (cmds) { - case Q_SYNC: - case Q_GETSTATS: - break; - case Q_GETQUOTA: - if (((type == USRQUOTA && current->euid != id) || - (type == GRPQUOTA && !in_egroup_p(id))) && - !capable(CAP_SYS_ADMIN)) - goto out; - break; - default: - if (!capable(CAP_SYS_ADMIN)) - goto out; - } - - ret = -EINVAL; - dev = NODEV; - if (special != NULL || (cmds != Q_SYNC && cmds != Q_GETSTATS)) { - mode_t mode; - struct nameidata nd; - - ret = user_path_walk(special, &nd); - if (ret) - goto out; - - dev = nd.dentry->d_inode->i_rdev; - mode = nd.dentry->d_inode->i_mode; - path_release(&nd); - - ret = -ENOTBLK; - if (!S_ISBLK(mode)) - goto out; - ret = -ENODEV; - sb = get_super(dev); - if (!sb) - goto out; - } - - ret = -EINVAL; - switch (cmds) { - case Q_QUOTAON: - ret = quota_on(sb, type, (char *) addr); - goto out; - case Q_QUOTAOFF: - ret = quota_off(sb, type); - goto out; - case Q_GETQUOTA: - ret = get_quota(sb, id, type, (struct dqblk *) addr); - goto out; - case Q_SETQUOTA: - flags |= SET_QUOTA; - break; - case Q_SETUSE: - flags |= SET_USE; - break; - case Q_SETQLIM: - flags |= SET_QLIMIT; - break; - case Q_SYNC: - ret = sync_dquots(dev, type); - goto out; - case Q_GETSTATS: - ret = get_stats(addr); - goto out; - case Q_RSQUASH: - ret = quota_root_squash(sb, type, (int *) addr); - goto out; - default: - goto out; - } - - ret = -NODEV; - if (sb && sb_has_quota_enabled(sb, type)) - ret = set_dqblk(sb, id, type, flags, (struct dqblk *) addr); -out: - if (sb) - drop_super(sb); - unlock_kernel(); - return ret; + if (!dquot) + return -EINVAL; + do_get_dqblk(dquot, di); + dqput(dquot); + return 0; } + +/* Generic routine for setting common part of quota structure */ +static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) +{ + struct mem_dqblk *dm = &dquot->dq_dqb; + int check_blim = 0, check_ilim = 0; + + if (di->dqb_valid & QIF_SPACE) { + dm->dqb_curspace = di->dqb_curspace; + check_blim = 1; + } + if (di->dqb_valid & QIF_BLIMITS) { + dm->dqb_bsoftlimit = di->dqb_bsoftlimit; + dm->dqb_bhardlimit = di->dqb_bhardlimit; + check_blim = 1; + } + if (di->dqb_valid & QIF_INODES) { + dm->dqb_curinodes = di->dqb_curinodes; + check_ilim = 1; + } + if (di->dqb_valid & QIF_ILIMITS) { + dm->dqb_isoftlimit = di->dqb_isoftlimit; + dm->dqb_ihardlimit = di->dqb_ihardlimit; + check_ilim = 1; + } + if (di->dqb_valid & QIF_BTIME) + dm->dqb_btime = di->dqb_btime; + if (di->dqb_valid & QIF_ITIME) + dm->dqb_itime = di->dqb_itime; + + if (check_blim) { + if (!dm->dqb_bsoftlimit || toqb(dm->dqb_curspace) < dm->dqb_bsoftlimit) { + dm->dqb_btime = 0; + dquot->dq_flags &= ~DQ_BLKS; + } + else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ + dm->dqb_btime = CURRENT_TIME + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; + } + if (check_ilim) { + if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { + dm->dqb_itime = 0; + dquot->dq_flags &= ~DQ_INODES; + } + else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ + dm->dqb_itime = CURRENT_TIME + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; + } + if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) + dquot->dq_flags &= ~DQ_FAKE; + else + dquot->dq_flags |= DQ_FAKE; + dquot->dq_flags |= DQ_MOD; +} + +int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +{ + struct dquot *dquot = dqget(sb, id, type); + + if (!dquot) + return -EINVAL; + do_set_dqblk(dquot, di); + dqput(dquot); + return 0; +} + +/* Generic routine for getting common part of quota file information */ +int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +{ + struct mem_dqinfo *mi = sb_dqopt(sb)->info + type; + + ii->dqi_bgrace = mi->dqi_bgrace; + ii->dqi_igrace = mi->dqi_igrace; + ii->dqi_flags = mi->dqi_flags & DQF_MASK; + ii->dqi_valid = IIF_ALL; + return 0; +} + +/* Generic routine for setting common part of quota file information */ +int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +{ + struct mem_dqinfo *mi = sb_dqopt(sb)->info + type; + + if (ii->dqi_valid & IIF_BGRACE) + mi->dqi_bgrace = ii->dqi_bgrace; + if (ii->dqi_valid & IIF_IGRACE) + mi->dqi_igrace = ii->dqi_igrace; + if (ii->dqi_valid & IIF_FLAGS) + mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); + mark_info_dirty(mi); + return 0; +} + +struct quotactl_ops vfs_quotactl_ops = { + quota_on: vfs_quota_on, + quota_off: vfs_quota_off, + quota_sync: vfs_quota_sync, + get_info: vfs_get_dqinfo, + set_info: vfs_set_dqinfo, + get_dqblk: vfs_get_dqblk, + set_dqblk: vfs_set_dqblk +}; + +static ctl_table fs_dqstats_table[] = { + {FS_DQ_LOOKUPS, "lookups", &dqstats.lookups, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_DROPS, "drops", &dqstats.drops, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_READS, "reads", &dqstats.reads, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_WRITES, "writes", &dqstats.writes, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_CACHE_HITS, "cache_hits", &dqstats.cache_hits, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_ALLOCATED, "allocated_dquots", &dqstats.allocated_dquots, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_FREE, "free_dquots", &dqstats.free_dquots, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_SYNCS, "syncs", &dqstats.syncs, sizeof(int), 0444, NULL, &proc_dointvec}, + {}, +}; + +static ctl_table fs_table[] = { + {FS_DQSTATS, "quota", NULL, 0, 0555, fs_dqstats_table}, + {}, +}; + +static ctl_table sys_table[] = { + {CTL_FS, "fs", NULL, 0, 0555, fs_table}, + {}, +}; + +static int __init dquot_init(void) +{ + int i; + + register_sysctl_table(sys_table, 0); + for (i = 0; i < NR_DQHASH; i++) + INIT_LIST_HEAD(dquot_hash + i); + printk(KERN_NOTICE "VFS: Disk quotas v%s\n", __DQUOT_VERSION__); + + return 0; +} +__initcall(dquot_init); + +EXPORT_SYMBOL(register_quota_format); +EXPORT_SYMBOL(unregister_quota_format); +EXPORT_SYMBOL(dqstats); +EXPORT_SYMBOL(init_dquot_operations); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/exec.c linux.21rc5-ac2/fs/exec.c --- linux.21rc5/fs/exec.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/exec.c 2003-05-22 23:35:50.000000000 +0100 @@ -50,6 +50,7 @@ int core_uses_pid; char core_pattern[65] = "core"; +int core_setuid_ok = 0; /* The maximal length of core_pattern is also specified in sysctl.c */ static struct linux_binfmt *formats; @@ -333,6 +334,12 @@ if (!mpnt) return -ENOMEM; + if (!vm_enough_memory((STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))>>PAGE_SHIFT)) + { + kmem_cache_free(vm_area_cachep, mpnt); + return -ENOMEM; + } + down_write(¤t->mm->mmap_sem); { mpnt->vm_mm = current->mm; @@ -1082,13 +1089,18 @@ struct file * file; struct inode * inode; int retval = 0; + int fsuid = current->fsuid; lock_kernel(); binfmt = current->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; if (!is_dumpable(current)) - goto fail; + { + if(!core_setuid_ok || !current->task_dumpable) + goto fail; + current->fsuid = 0; + } current->mm->dumpable = 0; if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) goto fail; @@ -1117,6 +1129,8 @@ close_fail: filp_close(file, NULL); fail: + if (fsuid != current->fsuid) + current->fsuid = fsuid; unlock_kernel(); return retval; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/ext2/balloc.c linux.21rc5-ac2/fs/ext2/balloc.c --- linux.21rc5/fs/ext2/balloc.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/ext2/balloc.c 2003-04-22 17:06:33.000000000 +0100 @@ -520,7 +520,7 @@ in_range (tmp, le32_to_cpu(gdp->bg_inode_table), EXT2_SB(sb)->s_itb_per_group)) { ext2_error (sb, "ext2_new_block", - "Allocating block in system zone - block = %lu", + "Allocating block in system zone - block = %u", tmp); ext2_set_bit(j, bh->b_data); DQUOT_FREE_BLOCK(inode, 1); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/ext2/inode.c linux.21rc5-ac2/fs/ext2/inode.c --- linux.21rc5/fs/ext2/inode.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/ext2/inode.c 2003-04-22 16:44:38.000000000 +0100 @@ -592,8 +592,9 @@ { return generic_block_bmap(mapping,block,ext2_get_block); } -static int ext2_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize) +static int ext2_direct_IO(int rw, struct file * filp, struct kiobuf * iobuf, unsigned long blocknr, int blocksize) { + struct inode * inode = filp->f_dentry->d_inode->i_mapping->host; return generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, ext2_get_block); } struct address_space_operations ext2_aops = { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/ext3/super.c linux.21rc5-ac2/fs/ext3/super.c --- linux.21rc5/fs/ext3/super.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/ext3/super.c 2003-05-20 20:17:48.000000000 +0100 @@ -448,6 +448,9 @@ return; } +static struct dquot_operations ext3_qops; +static int (*old_sync_dquot)(struct dquot *dquot); + static struct super_operations ext3_sops = { read_inode: ext3_read_inode, /* BKL held */ write_inode: ext3_write_inode, /* BKL not held. Don't need */ @@ -1128,6 +1131,7 @@ * set up enough so that it can read an inode */ sb->s_op = &ext3_sops; + sb->dq_op = &ext3_qops; INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ sb->s_root = 0; @@ -1758,10 +1762,65 @@ return 0; } +/* Helper function for writing quotas on sync - we need to start transaction before quota file + * is locked for write. Otherwise the are possible deadlocks: + * Process 1 Process 2 + * ext3_create() quota_sync() + * journal_start() write_dquot() + * DQUOT_INIT() down(dqio_sem) + * down(dqio_sem) journal_start() + * + */ + +#ifdef CONFIG_QUOTA + +/* Blocks: (2 data blocks) * (3 indirect + 1 descriptor + 1 bitmap) + superblock */ +#define EXT3_OLD_QFMT_BLOCKS 11 +/* Blocks: quota info + (4 pointer blocks + 1 entry block) * (3 indirect + 1 descriptor + 1 bitmap) + superblock */ +#define EXT3_V0_QFMT_BLOCKS 27 + +static int ext3_sync_dquot(struct dquot *dquot) +{ + int nblocks, ret; + handle_t *handle; + struct quota_info *dqops = sb_dqopt(dquot->dq_sb); + struct inode *qinode; + + switch (dqops->info[dquot->dq_type].dqi_format->qf_fmt_id) { + case QFMT_VFS_OLD: + nblocks = EXT3_OLD_QFMT_BLOCKS; + break; + case QFMT_VFS_V0: + nblocks = EXT3_V0_QFMT_BLOCKS; + break; + default: + nblocks = EXT3_MAX_TRANS_DATA; + } + lock_kernel(); + qinode = dqops->files[dquot->dq_type]->f_dentry->d_inode; + handle = ext3_journal_start(qinode, nblocks); + if (IS_ERR(handle)) { + unlock_kernel(); + return PTR_ERR(handle); + } + unlock_kernel(); + ret = old_sync_dquot(dquot); + lock_kernel(); + ret = ext3_journal_stop(handle, qinode); + unlock_kernel(); + return ret; +} +#endif + static DECLARE_FSTYPE_DEV(ext3_fs_type, "ext3", ext3_read_super); static int __init init_ext3_fs(void) { +#ifdef CONFIG_QUOTA + init_dquot_operations(&ext3_qops); + old_sync_dquot = ext3_qops.sync_dquot; + ext3_qops.sync_dquot = ext3_sync_dquot; +#endif return register_filesystem(&ext3_fs_type); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/fat/inode.c linux.21rc5-ac2/fs/fat/inode.c --- linux.21rc5/fs/fat/inode.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/fat/inode.c 2003-04-23 14:32:48.000000000 +0100 @@ -637,6 +637,7 @@ sbi->cluster_bits = ffs(logical_sector_size * sbi->cluster_size) - 1; sbi->fats = b->fats; sbi->fat_start = CF_LE_W(b->reserved); + sbi->prev_free = 0; if (!b->fat_length && b->fat32_length) { struct fat_boot_fsinfo *fsinfo; struct buffer_head *fsinfo_bh; @@ -675,6 +676,7 @@ sbi->fsinfo_sector); } else { sbi->free_clusters = CF_LE_L(fsinfo->free_clusters); + sbi->prev_free = CF_LE_L(fsinfo->next_cluster); } if (fsinfo_block != 0) @@ -754,7 +756,6 @@ sb->s_magic = MSDOS_SUPER_MAGIC; /* set up enough so that it can read an inode */ init_MUTEX(&sbi->fat_lock); - sbi->prev_free = 0; cp = opts.codepage ? opts.codepage : 437; sprintf(buf, "cp%d", cp); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/fat/misc.c linux.21rc5-ac2/fs/fat/misc.c --- linux.21rc5/fs/fat/misc.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/fat/misc.c 2003-04-23 14:32:48.000000000 +0100 @@ -108,6 +108,7 @@ return; } fsinfo->free_clusters = CF_LE_L(MSDOS_SB(sb)->free_clusters); + fsinfo->next_cluster = CF_LE_L(MSDOS_SB(sb)->prev_free); fat_mark_buffer_dirty(sb, bh); fat_brelse(sb, bh); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/inode.c linux.21rc5-ac2/fs/inode.c --- linux.21rc5/fs/inode.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/inode.c 2003-04-28 17:50:04.000000000 +0100 @@ -832,6 +832,20 @@ return inode; } +void unlock_new_inode(struct inode *inode) +{ + /* + * This is special! We do not need the spinlock + * when clearing I_LOCK, because we're guaranteed + * that nobody else tries to do anything about the + * state of the inode when it is locked, as we + * just created it (so there can be no old holders + * that haven't tested I_LOCK). + */ + inode->i_state &= ~(I_LOCK|I_NEW); + wake_up(&inode->i_wait); +} + /* * This is called without the inode lock held.. Be careful. * @@ -854,31 +868,13 @@ list_add(&inode->i_list, &inode_in_use); list_add(&inode->i_hash, head); inode->i_ino = ino; - inode->i_state = I_LOCK; + inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); - /* reiserfs specific hack right here. We don't - ** want this to last, and are looking for VFS changes - ** that will allow us to get rid of it. - ** -- mason@suse.com - */ - if (sb->s_op->read_inode2) { - sb->s_op->read_inode2(inode, opaque) ; - } else { - sb->s_op->read_inode(inode); - } - /* - * This is special! We do not need the spinlock - * when clearing I_LOCK, because we're guaranteed - * that nobody else tries to do anything about the - * state of the inode when it is locked, as we - * just created it (so there can be no old holders - * that haven't tested I_LOCK). + * Return the locked inode with I_NEW set, the + * caller is responsible for filling in the contents */ - inode->i_state &= ~I_LOCK; - wake_up(&inode->i_wait); - return inode; } @@ -958,8 +954,7 @@ return inode; } - -struct inode *iget4(struct super_block *sb, unsigned long ino, find_inode_t find_actor, void *opaque) +struct inode *iget4_locked(struct super_block *sb, unsigned long ino, find_inode_t find_actor, void *opaque) { struct list_head * head = inode_hashtable + hash(sb,ino); struct inode * inode; @@ -1201,9 +1196,9 @@ /* Functions back in dquot.c */ void put_dquot_list(struct list_head *); -int remove_inode_dquot_ref(struct inode *, short, struct list_head *); +int remove_inode_dquot_ref(struct inode *, int, struct list_head *); -void remove_dquot_ref(struct super_block *sb, short type) +void remove_dquot_ref(struct super_block *sb, int type) { struct inode *inode; struct list_head *act_head; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/iobuf.c linux.21rc5-ac2/fs/iobuf.c --- linux.21rc5/fs/iobuf.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/iobuf.c 2003-04-22 16:44:38.000000000 +0100 @@ -8,8 +8,6 @@ #include #include -#include - static kmem_cache_t *kiobuf_cachep; @@ -27,6 +25,8 @@ static int kiobuf_init(struct kiobuf *iobuf) { + int retval; + init_waitqueue_head(&iobuf->wait_queue); iobuf->array_len = 0; iobuf->nr_pages = 0; @@ -35,7 +35,16 @@ iobuf->blocks = NULL; atomic_set(&iobuf->io_count, 0); iobuf->end_io = NULL; - return expand_kiobuf(iobuf, KIO_STATIC_PAGES); + iobuf->initialized = 0; + retval = expand_kiobuf(iobuf, KIO_STATIC_PAGES); + if (retval) return retval; + retval = alloc_kiobuf_bhs(iobuf); + if (retval) { + kfree(iobuf->maplist); + return retval; + } + iobuf->initialized = 1; + return 0; } int alloc_kiobuf_bhs(struct kiobuf * kiobuf) @@ -89,6 +98,21 @@ } } +void kiobuf_ctor(void * objp, kmem_cache_t * cachep, unsigned long flag) +{ + struct kiobuf * iobuf = (struct kiobuf *) objp; + kiobuf_init(iobuf); +} + +void kiobuf_dtor(void * objp, kmem_cache_t * cachep, unsigned long flag) +{ + struct kiobuf * iobuf = (struct kiobuf *) objp; + if (iobuf->initialized) { + kfree(iobuf->maplist); + free_kiobuf_bhs(iobuf); + } +} + int alloc_kiovec(int nr, struct kiobuf **bufp) { int i; @@ -98,10 +122,11 @@ iobuf = kmem_cache_alloc(kiobuf_cachep, GFP_KERNEL); if (unlikely(!iobuf)) goto nomem; - if (unlikely(kiobuf_init(iobuf))) - goto nomem2; - if (unlikely(alloc_kiobuf_bhs(iobuf))) - goto nomem2; + if (unlikely(!iobuf->initialized)) { + /* try again to complete previously failed ctor */ + if (unlikely(kiobuf_init(iobuf))) + goto nomem2; + } bufp[i] = iobuf; } @@ -121,11 +146,10 @@ for (i = 0; i < nr; i++) { iobuf = bufp[i]; - if (iobuf->locked) - unlock_kiovec(1, &iobuf); - kfree(iobuf->maplist); - free_kiobuf_bhs(iobuf); - kmem_cache_free(kiobuf_cachep, bufp[i]); + init_waitqueue_head(&iobuf->wait_queue); + iobuf->io_count.counter = 0; + iobuf->end_io = NULL; + kmem_cache_free(kiobuf_cachep, iobuf); } } @@ -180,7 +204,7 @@ void __init iobuf_cache_init(void) { kiobuf_cachep = kmem_cache_create("kiobuf", sizeof(struct kiobuf), - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + 0, SLAB_HWCACHE_ALIGN, kiobuf_ctor, kiobuf_dtor); if (!kiobuf_cachep) panic("Cannot create kiobuf SLAB cache"); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/ioctl.c linux.21rc5-ac2/fs/ioctl.c --- linux.21rc5/fs/ioctl.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/ioctl.c 2003-04-22 16:44:38.000000000 +0100 @@ -101,6 +101,16 @@ filp->f_flags &= ~FASYNC; break; + case FIOQSIZE: + if (S_ISDIR(filp->f_dentry->d_inode->i_mode) || + S_ISREG(filp->f_dentry->d_inode->i_mode) || + S_ISLNK(filp->f_dentry->d_inode->i_mode)) { + loff_t res = inode_get_bytes(filp->f_dentry->d_inode); + error = copy_to_user((loff_t *)arg, &res, sizeof(res)) ? -EFAULT : 0; + } + else + error = -ENOTTY; + break; default: error = -ENOTTY; if (S_ISREG(filp->f_dentry->d_inode->i_mode)) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jbd/journal.c linux.21rc5-ac2/fs/jbd/journal.c --- linux.21rc5/fs/jbd/journal.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/jbd/journal.c 2003-04-22 16:44:38.000000000 +0100 @@ -1802,9 +1802,9 @@ if (buffer_jbd(bh)) { /* Someone did it for us! */ - J_ASSERT_BH(bh, bh->b_private != NULL); + J_ASSERT_BH(bh, bh->b_journal_head != NULL); journal_free_journal_head(jh); - jh = bh->b_private; + jh = bh->b_journal_head; } else { /* * We actually don't need jh_splice_lock when @@ -1812,7 +1812,7 @@ */ spin_lock(&jh_splice_lock); set_bit(BH_JBD, &bh->b_state); - bh->b_private = jh; + bh->b_journal_head = jh; jh->b_bh = bh; atomic_inc(&bh->b_count); spin_unlock(&jh_splice_lock); @@ -1821,7 +1821,7 @@ } jh->b_jcount++; spin_unlock(&journal_datalist_lock); - return bh->b_private; + return bh->b_journal_head; } /* @@ -1854,7 +1854,7 @@ J_ASSERT_BH(bh, jh2bh(jh) == bh); BUFFER_TRACE(bh, "remove journal_head"); spin_lock(&jh_splice_lock); - bh->b_private = NULL; + bh->b_journal_head = NULL; jh->b_bh = NULL; /* debug, really */ clear_bit(BH_JBD, &bh->b_state); __brelse(bh); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jbd/transaction.c linux.21rc5-ac2/fs/jbd/transaction.c --- linux.21rc5/fs/jbd/transaction.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/jbd/transaction.c 2003-04-22 16:44:38.000000000 +0100 @@ -1440,6 +1440,7 @@ if (handle->h_sync) { do { old_handle_count = transaction->t_handle_count; + set_current_state(TASK_RUNNING); yield(); } while (old_handle_count != transaction->t_handle_count); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/background.c linux.21rc5-ac2/fs/jffs2/background.c --- linux.21rc5/fs/jffs2/background.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/background.c 2003-04-22 16:44:38.000000000 +0100 @@ -106,9 +106,6 @@ sprintf(current->comm, "jffs2_gcd_mtd%d", c->mtd->index); - /* FIXME in the 2.2 backport */ - current->nice = 10; - for (;;) { spin_lock_irq(¤t->sigmask_lock); siginitsetinv (¤t->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT)); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/crc32.c linux.21rc5-ac2/fs/jffs2/crc32.c --- linux.21rc5/fs/jffs2/crc32.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/crc32.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,97 +0,0 @@ -/* - * COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or - * code or tables extracted from it, as desired without restriction. - * - * First, the polynomial itself and its table of feedback terms. The - * polynomial is - * X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0 - * - * Note that we take it "backwards" and put the highest-order term in - * the lowest-order bit. The X^32 term is "implied"; the LSB is the - * X^31 term, etc. The X^0 term (usually shown as "+1") results in - * the MSB being 1 - * - * Note that the usual hardware shift register implementation, which - * is what we're using (we're merely optimizing it by doing eight-bit - * chunks at a time) shifts bits into the lowest-order term. In our - * implementation, that means shifting towards the right. Why do we - * do it this way? Because the calculated CRC must be transmitted in - * order from highest-order term to lowest-order term. UARTs transmit - * characters in order from LSB to MSB. By storing the CRC this way - * we hand it to the UART in the order low-byte to high-byte; the UART - * sends each low-bit to hight-bit; and the result is transmission bit - * by bit from highest- to lowest-order term without requiring any bit - * shuffling on our part. Reception works similarly - * - * The feedback terms table consists of 256, 32-bit entries. Notes - * - * The table can be generated at runtime if desired; code to do so - * is shown later. It might not be obvious, but the feedback - * terms simply represent the results of eight shift/xor opera - * tions for all combinations of data and CRC register values - * - * The values must be right-shifted by eight bits by the "updcrc - * logic; the shift must be unsigned (bring in zeroes). On some - * hardware you could probably optimize the shift in assembler by - * using byte-swap instructions - * polynomial $edb88320 - */ - -/* $Id: crc32.c,v 1.3 2001/02/07 16:45:32 dwmw2 Exp $ */ - -#include "crc32.h" - -const __u32 crc32_table[256] = { - 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, - 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, - 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, - 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, - 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, - 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, - 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, - 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, - 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, - 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, - 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, - 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, - 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, - 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, - 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, - 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, - 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, - 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, - 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, - 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, - 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, - 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, - 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, - 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, - 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, - 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, - 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, - 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, - 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, - 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, - 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, - 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, - 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, - 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, - 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, - 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, - 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, - 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, - 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, - 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, - 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, - 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, - 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, - 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, - 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, - 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, - 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, - 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, - 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, - 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, - 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, - 0x2d02ef8dL -}; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/crc32.h linux.21rc5-ac2/fs/jffs2/crc32.h --- linux.21rc5/fs/jffs2/crc32.h 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/crc32.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -#ifndef CRC32_H -#define CRC32_H - -/* $Id: crc32.h,v 1.3 2001/02/26 14:44:37 dwmw2 Exp $ */ - -#include - -extern const __u32 crc32_table[256]; - -/* Return a 32-bit CRC of the contents of the buffer. */ - -static inline __u32 -crc32(__u32 val, const void *ss, int len) -{ - const unsigned char *s = ss; - while (--len >= 0) - val = crc32_table[(val ^ *s++) & 0xff] ^ (val >> 8); - return val; -} - -#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/dir.c linux.21rc5-ac2/fs/jffs2/dir.c --- linux.21rc5/fs/jffs2/dir.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/dir.c 2003-05-20 20:21:28.000000000 +0100 @@ -43,7 +43,7 @@ #include #include #include "nodelist.h" -#include "crc32.h" +#include static int jffs2_readdir (struct file *, void *, filldir_t); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/erase.c linux.21rc5-ac2/fs/jffs2/erase.c --- linux.21rc5/fs/jffs2/erase.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/erase.c 2003-05-20 20:21:28.000000000 +0100 @@ -40,7 +40,7 @@ #include #include #include "nodelist.h" -#include "crc32.h" +#include struct erase_priv_struct { struct jffs2_eraseblock *jeb; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/file.c linux.21rc5-ac2/fs/jffs2/file.c --- linux.21rc5/fs/jffs2/file.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/file.c 2003-05-20 20:21:28.000000000 +0100 @@ -42,7 +42,7 @@ #include #include #include "nodelist.h" -#include "crc32.h" +#include extern int generic_file_open(struct inode *, struct file *) __attribute__((weak)); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) __attribute__((weak)); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/gc.c linux.21rc5-ac2/fs/jffs2/gc.c --- linux.21rc5/fs/jffs2/gc.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/gc.c 2003-05-20 20:21:28.000000000 +0100 @@ -43,7 +43,7 @@ #include #include #include "nodelist.h" -#include "crc32.h" +#include static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct inode *inode, struct jffs2_full_dnode *fd); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/Makefile linux.21rc5-ac2/fs/jffs2/Makefile --- linux.21rc5/fs/jffs2/Makefile 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/Makefile 2003-05-20 20:21:28.000000000 +0100 @@ -12,7 +12,7 @@ COMPR_OBJS := compr.o compr_rubin.o compr_rtime.o pushpull.o \ compr_zlib.o -JFFS2_OBJS := crc32.o dir.o file.o ioctl.o nodelist.o malloc.o \ +JFFS2_OBJS := dir.o file.o ioctl.o nodelist.o malloc.o \ read.o nodemgmt.o readinode.o super.o write.o scan.o gc.o \ symlink.o build.o erase.o background.o diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/read.c linux.21rc5-ac2/fs/jffs2/read.c --- linux.21rc5/fs/jffs2/read.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/read.c 2003-05-20 20:21:28.000000000 +0100 @@ -40,7 +40,7 @@ #include #include #include "nodelist.h" -#include "crc32.h" +#include int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_full_dnode *fd, unsigned char *buf, int ofs, int len) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/readinode.c linux.21rc5-ac2/fs/jffs2/readinode.c --- linux.21rc5/fs/jffs2/readinode.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/readinode.c 2003-05-20 20:21:28.000000000 +0100 @@ -44,7 +44,7 @@ #include #include #include "nodelist.h" -#include "crc32.h" +#include D1(void jffs2_print_frag_list(struct jffs2_inode_info *f) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/scan.c linux.21rc5-ac2/fs/jffs2/scan.c --- linux.21rc5/fs/jffs2/scan.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/scan.c 2003-05-20 20:21:28.000000000 +0100 @@ -40,7 +40,7 @@ #include #include #include "nodelist.h" -#include "crc32.h" +#include #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jffs2/write.c linux.21rc5-ac2/fs/jffs2/write.c --- linux.21rc5/fs/jffs2/write.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/jffs2/write.c 2003-05-20 20:21:28.000000000 +0100 @@ -40,7 +40,7 @@ #include #include #include "nodelist.h" -#include "crc32.h" +#include /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash, fill in the raw_inode while you're at it. */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/jfs/inode.c linux.21rc5-ac2/fs/jfs/inode.c --- linux.21rc5/fs/jfs/inode.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/jfs/inode.c 2003-04-22 16:44:38.000000000 +0100 @@ -327,9 +327,10 @@ return generic_block_bmap(mapping, block, jfs_get_block); } -static int jfs_direct_IO(int rw, struct inode *inode, struct kiobuf *iobuf, +static int jfs_direct_IO(int rw, struct file *filp, struct kiobuf *iobuf, unsigned long blocknr, int blocksize) { + struct inode * inode = filp->f_dentry->d_inode->i_mapping->host; return generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, jfs_get_block); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/lockd/svclock.c linux.21rc5-ac2/fs/lockd/svclock.c --- linux.21rc5/fs/lockd/svclock.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/lockd/svclock.c 2003-04-22 16:44:38.000000000 +0100 @@ -62,8 +62,8 @@ nlmsvc_remove_block(block); bp = &nlm_blocked; if (when != NLM_NEVER) { - if ((when += jiffies) == NLM_NEVER) - when ++; + if ((when += jiffies) > NLM_NEVER) + when = NLM_NEVER; while ((b = *bp) && time_before_eq(b->b_when,when)) bp = &b->b_next; } else @@ -176,8 +176,14 @@ struct nlm_rqst *call; /* Create host handle for callback */ + /* We must up the semaphore in case the host lookup does + * garbage collection (which calls nlmsvc_traverse_blocks), + * but this shouldn't be a problem because nlmsvc_lock has + * to retry the lock after this anyway */ + up(&file->f_sema); host = nlmclnt_lookup_host(&rqstp->rq_addr, rqstp->rq_prot, rqstp->rq_vers); + down(&file->f_sema); if (host == NULL) return NULL; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/lockd/svcproc.c linux.21rc5-ac2/fs/lockd/svcproc.c --- linux.21rc5/fs/lockd/svcproc.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/lockd/svcproc.c 2003-04-22 16:44:38.000000000 +0100 @@ -345,6 +345,15 @@ return stat; } +static int +nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, + void *resp) +{ + dprintk("lockd: GRANTED_RES called\n"); + nlmsvc_grant_reply(&argp->cookie, argp->status); + return 0; +} + /* * SHARE: create a DOS share or alter existing share. */ @@ -545,14 +554,12 @@ #define nlmsvc_decode_lockres nlmsvc_decode_void #define nlmsvc_decode_unlockres nlmsvc_decode_void #define nlmsvc_decode_cancelres nlmsvc_decode_void -#define nlmsvc_decode_grantedres nlmsvc_decode_void #define nlmsvc_proc_none nlmsvc_proc_null #define nlmsvc_proc_test_res nlmsvc_proc_null #define nlmsvc_proc_lock_res nlmsvc_proc_null #define nlmsvc_proc_cancel_res nlmsvc_proc_null #define nlmsvc_proc_unlock_res nlmsvc_proc_null -#define nlmsvc_proc_granted_res nlmsvc_proc_null struct nlm_void { int dummy; }; @@ -589,7 +596,7 @@ PROC(lock_res, lockres, norep, res, void, 1), PROC(cancel_res, cancelres, norep, res, void, 1), PROC(unlock_res, unlockres, norep, res, void, 1), - PROC(granted_res, grantedres, norep, res, void, 1), + PROC(granted_res, res, norep, res, void, 1), /* statd callback */ PROC(sm_notify, reboot, void, reboot, void, 1), PROC(none, void, void, void, void, 1), diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/locks.c linux.21rc5-ac2/fs/locks.c --- linux.21rc5/fs/locks.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/locks.c 2003-04-22 16:44:38.000000000 +0100 @@ -935,8 +935,11 @@ goto next_lock; /* If the next lock in the list has entirely bigger * addresses than the new one, insert the lock here. + * + * be careful if fl_end == OFFSET_MAX --okir */ - if (fl->fl_start > caller->fl_end + 1) + if (fl->fl_start > caller->fl_end + 1 + && caller->fl_end != OFFSET_MAX) break; /* If we come here, the new and old lock are of the diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/Makefile linux.21rc5-ac2/fs/Makefile --- linux.21rc5/fs/Makefile 2003-05-28 15:07:33.000000000 +0100 +++ linux.21rc5-ac2/fs/Makefile 2003-04-28 17:52:08.000000000 +0100 @@ -7,20 +7,19 @@ O_TARGET := fs.o -export-objs := filesystems.o open.o dcache.o buffer.o -mod-subdirs := nls +export-objs := filesystems.o open.o dcache.o buffer.o dquot.o +mod-subdirs := nls xfs obj-y := open.o read_write.o devices.o file_table.o buffer.o \ super.o block_dev.o char_dev.o stat.o exec.o pipe.o namei.o \ fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \ dcache.o inode.o attr.o bad_inode.o file.o iobuf.o dnotify.o \ - filesystems.o namespace.o seq_file.o xattr.o + filesystems.o namespace.o seq_file.o quota.o xattr.o -ifeq ($(CONFIG_QUOTA),y) -obj-y += dquot.o -else -obj-y += noquot.o -endif + +obj-$(CONFIG_QUOTA) += dquot.o +obj-$(CONFIG_QFMT_V1) += quota_v1.o +obj-$(CONFIG_QFMT_V2) += quota_v2.o subdir-$(CONFIG_PROC_FS) += proc subdir-y += partitions @@ -68,9 +67,11 @@ subdir-$(CONFIG_SUN_OPENPROMFS) += openpromfs subdir-$(CONFIG_BEFS_FS) += befs subdir-$(CONFIG_JFS_FS) += jfs +subdir-$(CONFIG_XFS_FS) += xfs obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o +obj-$(CONFIG_BINFMT_SOM) += binfmt_som.o obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/Makefile.lib linux.21rc5-ac2/fs/Makefile.lib --- linux.21rc5/fs/Makefile.lib 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/Makefile.lib 2003-05-20 20:21:28.000000000 +0100 @@ -0,0 +1,2 @@ +obj-$(CONFIG_JFFS2_FS) += crc32.o +obj-$(CONFIG_EFI_PARTITION) += crc32.o diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/dir.c linux.21rc5-ac2/fs/nfs/dir.c --- linux.21rc5/fs/nfs/dir.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/dir.c 2003-04-25 13:41:18.000000000 +0100 @@ -36,6 +36,8 @@ static int nfs_readdir(struct file *, void *, filldir_t); static struct dentry *nfs_lookup(struct inode *, struct dentry *); +static int nfs_cached_lookup(struct inode *, struct dentry *, + struct nfs_fh *, struct nfs_fattr *); static int nfs_create(struct inode *, struct dentry *, int); static int nfs_mkdir(struct inode *, struct dentry *, int); static int nfs_rmdir(struct inode *, struct dentry *); @@ -109,13 +111,15 @@ error = NFS_PROTO(inode)->readdir(inode, cred, desc->entry->cookie, page, NFS_SERVER(inode)->dtsize, desc->plus); /* We requested READDIRPLUS, but the server doesn't grok it */ - if (desc->plus && error == -ENOTSUPP) { - NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS; - desc->plus = 0; - goto again; - } - if (error < 0) + if (error < 0) { + if (error == -ENOTSUPP && desc->plus) { + NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS; + NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS; + desc->plus = 0; + goto again; + } goto error; + } SetPageUptodate(page); /* Ensure consistent page alignment of the data. * Note: assumes we have exclusive access to this mapping either @@ -194,8 +198,7 @@ dfprintk(VFS, "NFS: find_dirent_page() searching directory page %ld\n", desc->page_index); - desc->plus = NFS_USE_READDIRPLUS(inode); - page = read_cache_page(&inode->i_data, desc->page_index, + page = read_cache_page(inode->i_mapping, desc->page_index, (filler_t *)nfs_readdir_filler, desc); if (IS_ERR(page)) { status = PTR_ERR(page); @@ -246,6 +249,24 @@ return res; } +static unsigned int nfs_type2dtype[] = { + DT_UNKNOWN, + DT_REG, + DT_DIR, + DT_BLK, + DT_CHR, + DT_LNK, + DT_SOCK, + DT_UNKNOWN, + DT_FIFO +}; + +static inline +unsigned int nfs_type_to_d_type(enum nfs_ftype type) +{ + return nfs_type2dtype[type]; +} + /* * Once we've found the start of the dirent within a page: fill 'er up... */ @@ -262,11 +283,17 @@ dfprintk(VFS, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n", (long long)desc->target); for(;;) { + unsigned d_type = DT_UNKNOWN; /* Note: entry->prev_cookie contains the cookie for * retrieving the current dirent on the server */ fileid = nfs_fileid_to_ino_t(entry->ino); + + /* Use readdirplus info */ + if (desc->plus && (entry->fattr->valid & NFS_ATTR_FATTR)) + d_type = nfs_type_to_d_type(entry->fattr->type); + res = filldir(dirent, entry->name, entry->len, - entry->prev_cookie, fileid, DT_UNKNOWN); + entry->prev_cookie, fileid, d_type); if (res < 0) break; file->f_pos = desc->target = entry->cookie; @@ -333,7 +360,8 @@ /* Reset read descriptor so it searches the page cache from * the start upon the next call to readdir_search_pagecache() */ desc->page_index = 0; - memset(desc->entry, 0, sizeof(*desc->entry)); + desc->entry->cookie = desc->entry->prev_cookie = 0; + desc->entry->eof = 0; out: dfprintk(VFS, "NFS: uncached_readdir() returns %d\n", status); return status; @@ -352,9 +380,11 @@ nfs_readdir_descriptor_t my_desc, *desc = &my_desc; struct nfs_entry my_entry; + struct nfs_fh fh; + struct nfs_fattr fattr; long res; - res = nfs_revalidate(dentry); + res = nfs_revalidate_inode(NFS_SERVER(inode), inode); if (res < 0) return res; @@ -365,12 +395,16 @@ * itself. */ memset(desc, 0, sizeof(*desc)); - memset(&my_entry, 0, sizeof(my_entry)); - desc->file = filp; desc->target = filp->f_pos; - desc->entry = &my_entry; desc->decode = NFS_PROTO(inode)->decode_dirent; + desc->plus = NFS_USE_READDIRPLUS(inode); + + my_entry.cookie = my_entry.prev_cookie = 0; + my_entry.eof = 0; + my_entry.fh = &fh; + my_entry.fattr = &fattr; + desc->entry = &my_entry; while(!desc->entry->eof) { res = readdir_search_pagecache(desc); @@ -434,16 +468,9 @@ } static inline -int nfs_lookup_verify_inode(struct inode *inode, int flags) +int nfs_lookup_verify_inode(struct inode *inode) { - struct nfs_server *server = NFS_SERVER(inode); - /* - * If we're interested in close-to-open cache consistency, - * then we revalidate the inode upon lookup. - */ - if (!(server->flags & NFS_MOUNT_NOCTO) && !(flags & LOOKUP_CONTINUE)) - NFS_CACHEINV(inode); - return nfs_revalidate_inode(server, inode); + return nfs_revalidate_inode(NFS_SERVER(inode), inode); } /* @@ -497,11 +524,20 @@ /* Force a full look up iff the parent directory has changed */ if (nfs_check_verifier(dir, dentry)) { - if (nfs_lookup_verify_inode(inode, flags)) + if (nfs_lookup_verify_inode(inode)) goto out_bad; goto out_valid; } + error = nfs_cached_lookup(dir, dentry, &fhandle, &fattr); + if (!error) { + if (memcmp(NFS_FH(inode), &fhandle, sizeof(struct nfs_fh))!= 0) + goto out_bad; + if (nfs_lookup_verify_inode(inode)) + goto out_bad; + goto out_valid_renew; + } + if (NFS_STALE(inode)) goto out_bad; @@ -513,6 +549,7 @@ if ((error = nfs_refresh_inode(inode, &fattr)) != 0) goto out_bad; + out_valid_renew: nfs_renew_times(dentry); out_valid: unlock_kernel(); @@ -588,6 +625,18 @@ error = -ENOMEM; dentry->d_op = &nfs_dentry_operations; + error = nfs_cached_lookup(dir, dentry, &fhandle, &fattr); + if (!error) { + error = -EACCES; + inode = nfs_fhget(dentry, &fhandle, &fattr); + if (inode) { + d_add(dentry, inode); + nfs_renew_times(dentry); + error = 0; + } + goto out; + } + error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr); inode = NULL; if (error == -ENOENT) @@ -606,6 +655,79 @@ return ERR_PTR(error); } +static inline +int find_dirent_name(nfs_readdir_descriptor_t *desc, struct page *page, struct dentry *dentry) +{ + struct nfs_entry *entry = desc->entry; + int status; + + while((status = dir_decode(desc)) == 0) { + if (entry->len != dentry->d_name.len) + continue; + if (memcmp(entry->name, dentry->d_name.name, entry->len)) + continue; + if (!(entry->fattr->valid & NFS_ATTR_FATTR)) + continue; + break; + } + return status; +} + +/* + * Use the cached Readdirplus results in order to avoid a LOOKUP call + * whenever we believe that the parent directory has not changed. + * + * We assume that any file creation/rename changes the directory mtime. + * As this results in a page cache invalidation whenever it occurs, + * we don't require any other tests for cache coherency. + */ +static +int nfs_cached_lookup(struct inode *dir, struct dentry *dentry, + struct nfs_fh *fh, struct nfs_fattr *fattr) +{ + nfs_readdir_descriptor_t desc; + struct nfs_server *server; + struct nfs_entry entry; + struct page *page; + unsigned long timestamp = NFS_MTIME_UPDATE(dir); + int res; + + if (!NFS_USE_READDIRPLUS(dir)) + return -ENOENT; + server = NFS_SERVER(dir); + if (server->flags & NFS_MOUNT_NOAC) + return -ENOENT; + nfs_revalidate_inode(server, dir); + + entry.fh = fh; + entry.fattr = fattr; + + desc.decode = NFS_PROTO(dir)->decode_dirent; + desc.entry = &entry; + desc.page_index = 0; + desc.plus = 1; + + for(;(page = find_get_page(dir->i_mapping, desc.page_index)); desc.page_index++) { + + res = -EIO; + if (Page_Uptodate(page)) { + desc.ptr = kmap(page); + res = find_dirent_name(&desc, page, dentry); + kunmap(page); + } + page_cache_release(page); + + if (res == 0) + goto out_found; + if (res != -EAGAIN) + break; + } + return -ENOENT; + out_found: + fattr->timestamp = timestamp; + return 0; +} + /* * Code common to create, mkdir, and mknod. */ @@ -613,7 +735,7 @@ struct nfs_fattr *fattr) { struct inode *inode; - int error = -EACCES; + int error = 0; if (fhandle->size == 0 || !(fattr->valid & NFS_ATTR_FATTR)) { struct inode *dir = dentry->d_parent->d_inode; @@ -625,9 +747,12 @@ if (inode) { d_instantiate(dentry, inode); nfs_renew_times(dentry); - error = 0; + } else { + error = -ENOMEM; + goto out_err; } return error; + out_err: d_drop(dentry); return error; @@ -1082,34 +1207,62 @@ int nfs_permission(struct inode *inode, int mask) { - int error = vfs_permission(inode, mask); - - if (!NFS_PROTO(inode)->access) - goto out; - - if (error == -EROFS) - goto out; - - /* - * Trust UNIX mode bits except: - * - * 1) When override capabilities may have been invoked - * 2) When root squashing may be involved - * 3) When ACLs may overturn a negative answer */ - if (!capable(CAP_DAC_OVERRIDE) && !capable(CAP_DAC_READ_SEARCH) - && (current->fsuid != 0) && (current->fsgid != 0) - && error != -EACCES) - goto out; + struct nfs_access_cache *cache = &NFS_I(inode)->cache_access; + struct rpc_cred *cred; + int mode = inode->i_mode; + int error; - error = NFS_PROTO(inode)->access(inode, mask, 0); + if (mask & MAY_WRITE) { + /* + * + * Nobody gets write access to a read-only fs. + * + */ + if (IS_RDONLY(inode) && + (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) + return -EROFS; - if (error == -EACCES && NFS_CLIENT(inode)->cl_droppriv && - current->uid != 0 && current->gid != 0 && - (current->fsuid != current->uid || current->fsgid != current->gid)) - error = NFS_PROTO(inode)->access(inode, mask, 1); + /* + * + * Nobody gets write access to an immutable file. + * + */ + if (IS_IMMUTABLE(inode)) + return -EACCES; + } - out: - return error; + if (!NFS_PROTO(inode)->access) + goto out_notsup; + cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0); + if (cache->cred == cred + && time_before(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode))) { + if (!cache->err) { + /* Is the mask a subset of an accepted mask? */ + if ((cache->mask & mask) == mask) + goto out_cached; + } else { + /* ...or is it a superset of a rejected mask? */ + if ((cache->mask & mask) == cache->mask) + goto out_cached; + } + } + error = NFS_PROTO(inode)->access(inode, cred, mask); + if (!error || error == -EACCES) { + cache->jiffies = jiffies; + if (cache->cred) + put_rpccred(cache->cred); + cache->cred = cred; + cache->mask = mask; + cache->err = error; + return error; + } + put_rpccred(cred); +out_notsup: + nfs_revalidate_inode(NFS_SERVER(inode), inode); + return vfs_permission(inode, mask); +out_cached: + put_rpccred(cred); + return cache->err; } /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/direct.c linux.21rc5-ac2/fs/nfs/direct.c --- linux.21rc5/fs/nfs/direct.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/direct.c 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,374 @@ +/* + * linux/fs/nfs/direct.c + * + * High-performance direct I/O for the NFS client + * + * When an application requests uncached I/O, all read and write requests + * are made directly to the server; data stored or fetched via these + * requests is not cached in the Linux page cache. The client does not + * correct unaligned requests from applications. All requested bytes are + * held on permanent storage before a direct write system call returns to + * an application. Applications that manage their own data caching, such + * as databases, make very good use of direct I/O on local file systems. + * + * Solaris implements an uncached I/O facility called directio() that + * is used for backups and sequential I/O to very large files. Solaris + * also supports uncaching whole NFS partitions with "-o forcedirectio," + * an undocumented mount option. + * + * Note that I/O to read in executables (e.g. kernel_read) cannot use + * direct (kiobuf) reads because there is no vma backing the passed-in + * data buffer. + * + * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust. + * + * Initial implementation: 12/2001 by Chuck Lever + * + * TODO: + * + * 1. Use concurrent asynchronous network requests rather than + * serialized synchronous network requests for normal (non-sync) + * direct I/O. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define NFSDBG_FACILITY (NFSDBG_PAGECACHE | NFSDBG_VFS) +#define VERF_SIZE (2 * sizeof(__u32)) + +static /* inline */ int +nfs_direct_read_rpc(struct file *file, struct nfs_readargs *arg) +{ + int result; + struct inode * inode = file->f_dentry->d_inode; + struct nfs_fattr fattr; + struct rpc_message msg; + struct nfs_readres res = { &fattr, arg->count, 0 }; + +#ifdef CONFIG_NFS_V3 + msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ? + NFS3PROC_READ : NFSPROC_READ; +#else + msg.rpc_proc = NFSPROC_READ; +#endif + msg.rpc_argp = arg; + msg.rpc_resp = &res; + + lock_kernel(); + msg.rpc_cred = nfs_file_cred(file); + fattr.valid = 0; + result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + nfs_refresh_inode(inode, &fattr); + unlock_kernel(); + + return result; +} + +static /* inline */ int +nfs_direct_write_rpc(struct file *file, struct nfs_writeargs *arg, + struct nfs_writeverf *verf) +{ + int result; + struct inode *inode = file->f_dentry->d_inode; + struct nfs_fattr fattr; + struct rpc_message msg; + struct nfs_writeres res = { &fattr, verf, 0 }; + +#ifdef CONFIG_NFS_V3 + msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ? + NFS3PROC_WRITE : NFSPROC_WRITE; +#else + msg.rpc_proc = NFSPROC_WRITE; +#endif + msg.rpc_argp = arg; + msg.rpc_resp = &res; + + lock_kernel(); + msg.rpc_cred = get_rpccred(nfs_file_cred(file)); + fattr.valid = 0; + result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + nfs_write_attributes(inode, &fattr); + put_rpccred(msg.rpc_cred); + unlock_kernel(); + +#ifdef CONFIG_NFS_V3 + if (NFS_PROTO(inode)->version == 3) { + if (result > 0) { + if ((arg->stable == NFS_FILE_SYNC) && + (verf->committed != NFS_FILE_SYNC)) { + printk(KERN_ERR __FUNCTION__ + ": server didn't sync stable write request\n"); + return -EIO; + } + + if (result != arg->count) { + printk(KERN_INFO __FUNCTION__ + ": short write, count=%u, result=%d\n", + arg->count, result); + } + } + return result; + } else { +#endif + verf->committed = NFS_FILE_SYNC; /* NFSv2 always syncs data */ + if (result == 0) + return arg->count; + return result; +#ifdef CONFIG_NFS_V3 + } +#endif +} + +#ifdef CONFIG_NFS_V3 +static /* inline */ int +nfs_direct_commit_rpc(struct inode *inode, loff_t offset, size_t count, + struct nfs_writeverf *verf) +{ + int result; + struct nfs_fattr fattr; + struct nfs_writeargs arg = { NFS_FH(inode), offset, count, 0, 0, + NULL }; + struct nfs_writeres res = { &fattr, verf, 0 }; + struct rpc_message msg = { NFS3PROC_COMMIT, &arg, &res, NULL }; + + fattr.valid = 0; + + lock_kernel(); + result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + nfs_write_attributes(inode, &fattr); + unlock_kernel(); + + return result; +} +#else +static inline int +nfs_direct_commit_rpc(struct inode *inode, loff_t offset, size_t count, + struct nfs_writeverf *verf) +{ + return 0; +} +#endif + +/* + * Walk through the iobuf and create an iovec for each "rsize" bytes. + */ +static int +nfs_direct_read(struct file *file, struct kiobuf *iobuf, loff_t offset, + size_t count) +{ + int curpage, total; + struct inode *inode = file->f_dentry->d_inode; + int rsize = NFS_SERVER(inode)->rsize; + struct page **src = iobuf->maplist, + **end = iobuf->maplist + iobuf->nr_pages; + struct page *pages[NFS_READ_MAXIOV]; + struct nfs_readargs args = { NFS_FH(inode), + offset, + 0, + iobuf->offset, + pages }; + + total = 0; + curpage = 0; + while (count) { + int request, result; + struct page **dst = pages; + + request = count; + if (count > rsize) + request = rsize; + args.count = request; + + do { + if (!*src) + return -EFAULT; + + *dst++ = *src; + /* zero after the first iov */ + if (request < PAGE_SIZE) + break; + request -= PAGE_SIZE; + src++; + } while (request != 0 && src != end); + + result = nfs_direct_read_rpc(file, &args); + + if (result < 0) { + if (result == -EISDIR) + total = -EINVAL; + else + total = result; + break; + } + + total += result; + if (result < args.count) /* NFSv2ism */ + break; + count -= result; + args.offset += result; + args.pgbase += result; + args.pgbase &= ~PAGE_MASK; + }; + return total; +} + +/* + * Walk through the iobuf and create an iovec for each "wsize" bytes. + * If only one network write is necessary, or if the O_SYNC flag or + * 'sync' mount option are present, or if this is a V2 inode, use + * FILE_SYNC. Otherwise, use UNSTABLE and finish with a COMMIT. + * + * The mechanics of this function are much the same as nfs_direct_read, + * with the added complexity of committing unstable writes. + */ +static int +nfs_direct_write(struct file *file, struct kiobuf *iobuf, + loff_t offset, size_t count) +{ + int curpage, total; + int need_commit = 0; + loff_t save_offset = offset; + struct inode *inode = file->f_dentry->d_inode; + int wsize = NFS_SERVER(inode)->wsize; + struct nfs_writeverf first_verf, ret_verf; + struct page *pages[NFS_WRITE_MAXIOV]; + struct nfs_writeargs args = { NFS_FH(inode), 0, 0, NFS_FILE_SYNC, 0, pages }; + +#ifdef CONFIG_NFS_V3 + if ((NFS_PROTO(inode)->version == 3) && (count > wsize) && + (!IS_SYNC(inode))) + args.stable = NFS_UNSTABLE; +#endif + +retry: + total = 0; + curpage = 0; + while (count) { + int request, result; + struct page **dest = pages; + + request = count; + if (count > wsize) + request = wsize; + args.count = request; + args.offset = offset; + args.pgbase = (iobuf->offset + total) & ~PAGE_MASK; + + do { + struct page *page = iobuf->maplist[curpage]; + + if (!page) + return -EFAULT; + + *dest++ = page; + /* zero after the first iov */ + if (request > PAGE_SIZE) + break; + request -= PAGE_SIZE; + curpage++; + } while (request != 0 && curpage < iobuf->nr_pages); + + result = nfs_direct_write_rpc(file, &args, &ret_verf); + + if (result < 0) { + total = result; + break; + } + + if (!total) + memcpy(&first_verf.verifier, &ret_verf.verifier, + VERF_SIZE); + if (ret_verf.committed != NFS_FILE_SYNC) { + need_commit = 1; + if (memcmp(&first_verf.verifier, &ret_verf.verifier, + VERF_SIZE)) + goto print_retry; + } + + total += result; + count -= result; + offset += result; + }; + + /* + * Commit data written so far, even in the event of an error + */ + if (need_commit) { + if (nfs_direct_commit_rpc(inode, save_offset, + iobuf->length - count, &ret_verf)) + goto print_retry; + if (memcmp(&first_verf.verifier, &ret_verf.verifier, + VERF_SIZE)) + goto print_retry; + } + + return total; + +print_retry: + printk(KERN_INFO __FUNCTION__ + ": detected server restart; retrying with FILE_SYNC\n"); + args.stable = NFS_FILE_SYNC; + offset = save_offset; + count = iobuf->length; + goto retry; +} + +/* + * Read or write data, moving the data directly to/from the + * application's buffer without caching in the page cache. + * + * Rules for direct I/O + * + * 1. block size = 512 bytes or more + * 2. file byte offset is block aligned + * 3. byte count is a multiple of block size + * 4. user buffer is not aligned + * 5. user buffer is faulted in and pinned + * + * These are verified before we get here. + */ +int +nfs_direct_IO(int rw, struct file *file, struct kiobuf *iobuf, + unsigned long blocknr, int blocksize) +{ + int result = -EINVAL; + size_t count = iobuf->length; + struct dentry *dentry = file->f_dentry; + struct inode *inode = dentry->d_inode; + loff_t offset = blocknr << inode->i_blkbits; + + switch (rw) { + case READ: + dfprintk(VFS, + "NFS: direct_IO(READ) (%s/%s) off/cnt(%Lu/%d)\n", + dentry->d_parent->d_name.name, + dentry->d_name.name, offset, count); + + result = nfs_direct_read(file, iobuf, offset, count); + break; + case WRITE: + dfprintk(VFS, + "NFS: direct_IO(WRITE) (%s/%s) off/cnt(%Lu/%d)\n", + dentry->d_parent->d_name.name, + dentry->d_name.name, offset, count); + + result = nfs_direct_write(file, iobuf, offset, count); + break; + default: + break; + } + + dfprintk(VFS, "NFS: direct_IO result = %d\n", result); + return result; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/file.c linux.21rc5-ac2/fs/nfs/file.c --- linux.21rc5/fs/nfs/file.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/file.c 2003-04-22 16:44:38.000000000 +0100 @@ -16,6 +16,7 @@ * nfs regular file handling functions */ +#include #include #include #include @@ -200,6 +201,9 @@ sync_page: nfs_sync_page, writepage: nfs_writepage, prepare_write: nfs_prepare_write, +#ifdef CONFIG_NFS_DIRECTIO + direct_IO: nfs_direct_IO, +#endif commit_write: nfs_commit_write }; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/inode.c linux.21rc5-ac2/fs/nfs/inode.c --- linux.21rc5/fs/nfs/inode.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/inode.c 2003-05-01 14:17:44.000000000 +0100 @@ -146,10 +146,14 @@ static void nfs_clear_inode(struct inode *inode) { - struct rpc_cred *cred = NFS_I(inode)->mm_cred; + struct nfs_inode_info *nfsi = NFS_I(inode); + struct rpc_cred *cred = nfsi->mm_cred; if (cred) put_rpccred(cred); + cred = nfsi->cache_access.cred; + if (cred) + put_rpccred(cred); } void @@ -251,6 +255,72 @@ } /* + * Set up the NFS superblock private area using probed values + */ +static int +nfs_setup_superblock(struct super_block *sb, struct nfs_fh *rootfh) +{ + struct nfs_server *server = &sb->u.nfs_sb.s_server; + struct nfs_fattr fattr; + struct nfs_fsinfo fsinfo = { &fattr, }; + struct nfs_pathconf pathinfo = { &fattr, }; + int maxlen, res; + + res = server->rpc_ops->fsinfo(server, rootfh, &fsinfo); + if (res < 0) + return res; + + /* Work out a lot of parameters */ + if (!server->rsize) + server->rsize = nfs_block_size(fsinfo.rtpref, NULL); + if (!server->wsize) + server->wsize = nfs_block_size(fsinfo.wtpref, NULL); + + /* NFSv3: we don't have bsize, but rather rtmult and wtmult... */ + if (!fsinfo.wtmult) + fsinfo.wtmult = 512; + sb->s_blocksize = nfs_block_bits(fsinfo.wtmult, &sb->s_blocksize_bits); + + if (server->rsize > fsinfo.rtmax) + server->rsize = fsinfo.rtmax; + if (server->wsize > fsinfo.wtmax) + server->wsize = fsinfo.wtmax; + + server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + if (server->rpages > NFS_READ_MAXIOV) { + server->rpages = NFS_READ_MAXIOV; + server->rsize = server->rpages << PAGE_CACHE_SHIFT; + } + + server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + if (server->wpages > NFS_WRITE_MAXIOV) { + server->wpages = NFS_WRITE_MAXIOV; + server->wsize = server->wpages << PAGE_CACHE_SHIFT; + } + + server->dtsize = nfs_block_size(fsinfo.dtpref, NULL); + if (server->dtsize > PAGE_CACHE_SIZE) + server->dtsize = PAGE_CACHE_SIZE; + if (server->dtsize > server->rsize) + server->dtsize = server->rsize; + + maxlen = (server->rpc_ops->version == 2) ? NFS2_MAXNAMLEN : NFS3_MAXNAMLEN; + if (!server->namelen) { + res = server->rpc_ops->pathconf(server, rootfh, &pathinfo); + if (!res) + server->namelen = pathinfo.name_max; + } + if (!server->namelen || server->namelen > maxlen) + server->namelen = maxlen; + + sb->s_maxbytes = fsinfo.maxfilesize; + if (sb->s_maxbytes > MAX_LFS_FILESIZE) + sb->s_maxbytes = MAX_LFS_FILESIZE; + + return 0; +} + +/* * The way this works is that the mount process passes a structure * in the data argument which contains the server's IP address * and the root file handle obtained from the server's mount @@ -268,8 +338,7 @@ unsigned int authflavor; struct sockaddr_in srvaddr; struct rpc_timeout timeparms; - struct nfs_fsinfo fsinfo; - int tcp, version, maxlen; + int tcp, version; memset(&sb->u.nfs_sb, 0, sizeof(sb->u.nfs_sb)); if (!data) @@ -298,11 +367,11 @@ sb->s_magic = NFS_SUPER_MAGIC; sb->s_op = &nfs_sops; - sb->s_blocksize_bits = 0; - sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits); server = &sb->u.nfs_sb.s_server; - server->rsize = nfs_block_size(data->rsize, NULL); - server->wsize = nfs_block_size(data->wsize, NULL); + if (data->rsize) + server->rsize = nfs_block_size(data->rsize, NULL); + if (data->wsize) + server->wsize = nfs_block_size(data->wsize, NULL); server->flags = data->flags & NFS_MOUNT_FLAGMASK; if (data->flags & NFS_MOUNT_NOAC) { @@ -326,12 +395,14 @@ INIT_LIST_HEAD(&server->lru_busy); nfsv3_try_again: + server->caps = 0; /* Check NFS protocol revision and initialize RPC op vector * and file handle pool. */ if (data->flags & NFS_MOUNT_VER3) { #ifdef CONFIG_NFS_V3 server->rpc_ops = &nfs_v3_clientops; version = 3; + server->caps |= NFS_CAP_READDIRPLUS; if (data->version < 4) { printk(KERN_NOTICE "NFS: NFSv3 not supported by mount program.\n"); goto out_unlock; @@ -409,62 +480,11 @@ sb->s_root->d_op = &nfs_dentry_operations; /* Get some general file system info */ - if (server->rpc_ops->statfs(server, root, &fsinfo) >= 0) { - if (server->namelen == 0) - server->namelen = fsinfo.namelen; - } else { + if (nfs_setup_superblock(sb, root) < 0) { printk(KERN_NOTICE "NFS: cannot retrieve file system info.\n"); goto out_no_root; - } - - /* Work out a lot of parameters */ - if (data->rsize == 0) - server->rsize = nfs_block_size(fsinfo.rtpref, NULL); - if (data->wsize == 0) - server->wsize = nfs_block_size(fsinfo.wtpref, NULL); - /* NFSv3: we don't have bsize, but rather rtmult and wtmult... */ - if (!fsinfo.bsize) - fsinfo.bsize = (fsinfo.rtmult>fsinfo.wtmult) ? fsinfo.rtmult : fsinfo.wtmult; - /* Also make sure we don't go below rsize/wsize since - * RPC calls are expensive */ - if (fsinfo.bsize < server->rsize) - fsinfo.bsize = server->rsize; - if (fsinfo.bsize < server->wsize) - fsinfo.bsize = server->wsize; - - if (data->bsize == 0) - sb->s_blocksize = nfs_block_bits(fsinfo.bsize, &sb->s_blocksize_bits); - if (server->rsize > fsinfo.rtmax) - server->rsize = fsinfo.rtmax; - if (server->wsize > fsinfo.wtmax) - server->wsize = fsinfo.wtmax; - - server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (server->rpages > NFS_READ_MAXIOV) { - server->rpages = NFS_READ_MAXIOV; - server->rsize = server->rpages << PAGE_CACHE_SHIFT; } - server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (server->wpages > NFS_WRITE_MAXIOV) { - server->wpages = NFS_WRITE_MAXIOV; - server->wsize = server->wpages << PAGE_CACHE_SHIFT; - } - - server->dtsize = nfs_block_size(fsinfo.dtpref, NULL); - if (server->dtsize > PAGE_CACHE_SIZE) - server->dtsize = PAGE_CACHE_SIZE; - if (server->dtsize > server->rsize) - server->dtsize = server->rsize; - - maxlen = (version == 2) ? NFS2_MAXNAMLEN : NFS3_MAXNAMLEN; - - if (server->namelen == 0 || server->namelen > maxlen) - server->namelen = maxlen; - - sb->s_maxbytes = fsinfo.maxfilesize; - if (sb->s_maxbytes > MAX_LFS_FILESIZE) - sb->s_maxbytes = MAX_LFS_FILESIZE; /* Fire up the writeback cache */ if (nfs_reqlist_alloc(server) < 0) { @@ -526,7 +546,8 @@ struct nfs_server *server = &sb->u.nfs_sb.s_server; unsigned char blockbits; unsigned long blockres; - struct nfs_fsinfo res; + struct nfs_fattr attr; + struct nfs_fsstat res = { &attr, }; int error; error = server->rpc_ops->statfs(server, NFS_FH(sb->s_root->d_inode), &res); @@ -534,18 +555,15 @@ if (error < 0) goto out_err; - if (res.bsize == 0) - res.bsize = sb->s_blocksize; - buf->f_bsize = nfs_block_bits(res.bsize, &blockbits); + buf->f_bsize = sb->s_blocksize; + blockbits = sb->s_blocksize_bits; blockres = (1 << blockbits) - 1; buf->f_blocks = (res.tbytes + blockres) >> blockbits; buf->f_bfree = (res.fbytes + blockres) >> blockbits; buf->f_bavail = (res.abytes + blockres) >> blockbits; buf->f_files = res.tfiles; buf->f_ffree = res.afiles; - if (res.namelen == 0 || res.namelen > server->namelen) - res.namelen = server->namelen; - buf->f_namelen = res.namelen; + buf->f_namelen = server->namelen; return 0; out_err: printk("nfs_statfs: statfs error = %d\n", -error); @@ -623,36 +641,35 @@ nfs_zap_caches(inode); } +/* Don't use READDIRPLUS on directories that we believe are too large */ +#define NFS_LIMIT_READDIRPLUS (8*PAGE_SIZE) + /* * Fill in inode information from the fattr. */ static void nfs_fill_inode(struct inode *inode, struct nfs_fh *fh, struct nfs_fattr *fattr) { - /* - * Check whether the mode has been set, as we only want to - * do this once. (We don't allow inodes to change types.) + NFS_FILEID(inode) = fattr->fileid; + inode->i_mode = fattr->mode; + /* Why so? Because we want revalidate for devices/FIFOs, and + * that's precisely what we have in nfs_file_inode_operations. */ - if (inode->i_mode == 0) { - NFS_FILEID(inode) = fattr->fileid; - inode->i_mode = fattr->mode; - /* Why so? Because we want revalidate for devices/FIFOs, and - * that's precisely what we have in nfs_file_inode_operations. - */ - inode->i_op = &nfs_file_inode_operations; - if (S_ISREG(inode->i_mode)) { - inode->i_fop = &nfs_file_operations; - inode->i_data.a_ops = &nfs_file_aops; - } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = &nfs_dir_inode_operations; - inode->i_fop = &nfs_dir_operations; - } else if (S_ISLNK(inode->i_mode)) - inode->i_op = &nfs_symlink_inode_operations; - else - init_special_inode(inode, inode->i_mode, fattr->rdev); - memcpy(&inode->u.nfs_i.fh, fh, sizeof(inode->u.nfs_i.fh)); - } - nfs_refresh_inode(inode, fattr); + inode->i_op = &nfs_file_inode_operations; + if (S_ISREG(inode->i_mode)) { + inode->i_fop = &nfs_file_operations; + inode->i_data.a_ops = &nfs_file_aops; + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = &nfs_dir_inode_operations; + inode->i_fop = &nfs_dir_operations; + if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS) + && fattr->size <= NFS_LIMIT_READDIRPLUS) + NFS_FLAGS(inode) |= NFS_INO_ADVISE_RDPLUS; + } else if (S_ISLNK(inode->i_mode)) + inode->i_op = &nfs_symlink_inode_operations; + else + init_special_inode(inode, inode->i_mode, fattr->rdev); + memcpy(&inode->u.nfs_i.fh, fh, sizeof(inode->u.nfs_i.fh)); } struct nfs_find_desc { @@ -727,7 +744,14 @@ if (!(inode = iget4(sb, ino, nfs_find_actor, &desc))) goto out_no_inode; - nfs_fill_inode(inode, fh, fattr); + /* + * Check whether the mode has been set, as we only want to + * do this once. (We don't allow inodes to change types.) + */ + if (inode->i_mode == 0) + nfs_fill_inode(inode, fh, fattr); + + nfs_refresh_inode(inode, fattr); dprintk("NFS: __nfs_fhget(%x/%Ld ct=%d)\n", inode->i_dev, (long long)NFS_FILEID(inode), atomic_read(&inode->i_count)); @@ -740,12 +764,37 @@ goto out; } +#define IS_TRUNC_DOWN(_inode, _attr) \ + (_attr->ia_valid & ATTR_SIZE && _attr->ia_size < _inode->i_size) +static inline void +nfs_inode_flush_on(struct inode *inode) +{ + atomic_inc(&(NFS_I(inode)->flushers)); + lock_kernel(); + NFS_FLAGS(inode) |= NFS_INO_FLUSH; + unlock_kernel(); + return; +} + +static inline void +nfs_inode_flush_off(struct inode *inode) +{ + atomic_dec(&(NFS_I(inode)->flushers)); + if (atomic_read(&(NFS_I(inode)->flushers)) == 0) { + lock_kernel(); + NFS_FLAGS(inode) &= ~NFS_INO_FLUSH; + wake_up(&inode->i_wait); + unlock_kernel(); + } + return; +} + int nfs_notify_change(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct nfs_fattr fattr; - int error; + int error, flusher=0; /* * Make sure the inode is up-to-date. @@ -761,11 +810,30 @@ if (!S_ISREG(inode->i_mode)) attr->ia_valid &= ~ATTR_SIZE; - filemap_fdatasync(inode->i_mapping); - error = nfs_wb_all(inode); - filemap_fdatawait(inode->i_mapping); - if (error) - goto out; + /* + * If the file is going to be truncated down + * make sure all of the mmapped pages get flushed + * by telling nfs_writepage to flush them synchronously. + * If they are flushed asynchronously and the file size + * changes (again) before they are flushed, data corruption + * will occur. + * XXX: It would be nice if there was an filemap_ api + * that would tell how many (if any) dirty mmapped pages there + * are. That way I would have to take the lock_kernel() when + * its not necessary. + */ + if (IS_TRUNC_DOWN(inode, attr)) { + flusher = 1; + nfs_inode_flush_on(inode); + } + + do { + filemap_fdatasync(inode->i_mapping); + error = nfs_wb_all(inode); + filemap_fdatawait(inode->i_mapping); + if (error) + goto out; + } while (flusher && NFS_I(inode)->npages); error = NFS_PROTO(inode)->setattr(inode, &fattr, attr); if (error) @@ -795,6 +863,9 @@ NFS_CACHEINV(inode); error = nfs_refresh_inode(inode, &fattr); out: + if (flusher) { + nfs_inode_flush_off(inode); + } return error; } @@ -850,15 +921,23 @@ { struct rpc_auth *auth; struct rpc_cred *cred; + int err = 0; lock_kernel(); + /* Ensure that we revalidate the data cache */ + if (! (NFS_SERVER(inode)->flags & NFS_MOUNT_NOCTO)) { + err = __nfs_revalidate_inode(NFS_SERVER(inode),inode); + if (err) + goto out; + } auth = NFS_CLIENT(inode)->cl_auth; cred = rpcauth_lookupcred(auth, 0); filp->private_data = cred; if (filp->f_mode & FMODE_WRITE) nfs_set_mmcred(inode, cred); +out: unlock_kernel(); - return 0; + return err; } int nfs_release(struct inode *inode, struct file *filp) @@ -993,6 +1072,9 @@ goto out_err; } + /* Throw out obsolete READDIRPLUS attributes */ + if (time_before(fattr->timestamp, NFS_READTIME(inode))) + return 0; /* * Make sure the inode's type hasn't changed. */ @@ -1011,7 +1093,7 @@ /* * Update the read time so we don't revalidate too often. */ - NFS_READTIME(inode) = jiffies; + NFS_READTIME(inode) = fattr->timestamp; /* * Note: NFS_CACHE_ISIZE(inode) reflects the state of the cache. @@ -1060,7 +1142,8 @@ inode->i_atime = new_atime; if (NFS_CACHE_MTIME(inode) != new_mtime) { - NFS_MTIME_UPDATE(inode) = jiffies; + if (invalid) + NFS_MTIME_UPDATE(inode) = fattr->timestamp; NFS_CACHE_MTIME(inode) = new_mtime; inode->i_mtime = nfs_time_to_secs(new_mtime); } @@ -1068,6 +1151,16 @@ NFS_CACHE_ISIZE(inode) = new_size; inode->i_size = new_isize; + if (inode->i_mode != fattr->mode || + inode->i_uid != fattr->uid || + inode->i_gid != fattr->gid) { + struct rpc_cred **cred = &NFS_I(inode)->cache_access.cred; + if (*cred) { + put_rpccred(*cred); + *cred = NULL; + } + } + inode->i_mode = fattr->mode; inode->i_nlink = fattr->nlink; inode->i_uid = fattr->uid; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/Makefile linux.21rc5-ac2/fs/nfs/Makefile --- linux.21rc5/fs/nfs/Makefile 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/Makefile 2003-04-22 16:44:38.000000000 +0100 @@ -14,6 +14,7 @@ obj-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o obj-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o +obj-$(CONFIG_NFS_DIRECTIO) += direct.o obj-m := $(O_TARGET) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/nfs2xdr.c linux.21rc5-ac2/fs/nfs/nfs2xdr.c --- linux.21rc5/fs/nfs/nfs2xdr.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/nfs2xdr.c 2003-05-01 14:16:14.000000000 +0100 @@ -118,6 +118,7 @@ fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO; fattr->rdev = 0; } + fattr->timestamp = jiffies; return p; } @@ -395,7 +396,7 @@ int hdrlen, recvd; int status, nr; unsigned int len, pglen; - u32 *end, *entry; + u32 *end, *entry, *kaddr; if ((status = ntohl(*p++))) return -nfs_stat_to_errno(status); @@ -415,7 +416,7 @@ if (pglen > recvd) pglen = recvd; page = rcvbuf->pages; - p = kmap(*page); + kaddr = p = kmap_atomic(*page, KM_USER0); entry = p; end = (u32 *)((char *)p + pglen); for (nr = 0; *p++; nr++) { @@ -436,7 +437,7 @@ if (!nr && (entry[0] != 0 || entry[1] == 0)) goto short_pkt; out: - kunmap(*page); + kunmap_atomic(kaddr, KM_USER0); return nr; short_pkt: entry[0] = entry[1] = 0; @@ -447,8 +448,8 @@ } goto out; err_unmap: - kunmap(*page); - return -errno_NFSERR_IO; + nr = -errno_NFSERR_IO; + goto out; } u32 * @@ -568,7 +569,7 @@ xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); } - strlen = (u32*)kmap(rcvbuf->pages[0]); + strlen = (u32*)kmap_atomic(rcvbuf->pages[0], KM_USER0); /* Convert length of symlink */ len = ntohl(*strlen); if (len > rcvbuf->page_len) @@ -577,7 +578,7 @@ /* NULL terminate the string we got */ string = (char *)(strlen + 1); string[len] = 0; - kunmap(rcvbuf->pages[0]); + kunmap_atomic(strlen, KM_USER0); return 0; } @@ -595,36 +596,18 @@ * Decode STATFS reply */ static int -nfs_xdr_statfsres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res) +nfs_xdr_statfsres(struct rpc_rqst *req, u32 *p, struct nfs2_statfs *res) { int status; - u32 xfer_size; if ((status = ntohl(*p++))) return -nfs_stat_to_errno(status); - /* For NFSv2, we more or less have to guess the preferred - * read/write/readdir sizes from the single 'transfer size' - * value. - */ - xfer_size = ntohl(*p++); /* tsize */ - res->rtmax = 8 * 1024; - res->rtpref = xfer_size; - res->rtmult = xfer_size; - res->wtmax = 8 * 1024; - res->wtpref = xfer_size; - res->wtmult = xfer_size; - res->dtpref = PAGE_CACHE_SIZE; - res->maxfilesize = 0x7FFFFFFF; /* just a guess */ + res->tsize = ntohl(*p++); res->bsize = ntohl(*p++); - - res->tbytes = ntohl(*p++) * res->bsize; - res->fbytes = ntohl(*p++) * res->bsize; - res->abytes = ntohl(*p++) * res->bsize; - res->tfiles = 0; - res->ffiles = 0; - res->afiles = 0; - res->namelen = 0; + res->blocks = ntohl(*p++); + res->bfree = ntohl(*p++); + res->bavail = ntohl(*p++); return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/nfs3proc.c linux.21rc5-ac2/fs/nfs/nfs3proc.c --- linux.21rc5/fs/nfs/nfs3proc.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/nfs3proc.c 2003-05-01 14:16:14.000000000 +0100 @@ -117,12 +117,13 @@ } static int -nfs3_proc_access(struct inode *inode, int mode, int ruid) +nfs3_proc_access(struct inode *inode, struct rpc_cred *cred, int mode) { struct nfs_fattr fattr; struct nfs3_accessargs arg = { NFS_FH(inode), 0 }; struct nfs3_accessres res = { &fattr, 0 }; - int status, flags; + struct rpc_message msg = { NFS3PROC_ACCESS, &arg, &res, cred }; + int status; dprintk("NFS call access\n"); fattr.valid = 0; @@ -140,8 +141,7 @@ if (mode & MAY_EXEC) arg.access |= NFS3_ACCESS_EXECUTE; } - flags = (ruid) ? RPC_CALL_REALUID : 0; - status = rpc_call(NFS_CLIENT(inode), NFS3PROC_ACCESS, &arg, &res, flags); + status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_refresh_inode(inode, &fattr); dprintk("NFS reply access\n"); @@ -483,24 +483,42 @@ return status; } -/* - * This is a combo call of fsstat and fsinfo - */ static int nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, - struct nfs_fsinfo *info) + struct nfs_fsstat *stat) { int status; - dprintk("NFS call fsstat\n"); - memset((char *)info, 0, sizeof(*info)); - status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, info, 0); - if (status < 0) - goto error; + stat->fattr->valid = 0; + dprintk("NFS call statfs\n"); + status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, stat, 0); + dprintk("NFS reply statfs: %d\n", status); + return status; +} + +static int +nfs3_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_fsinfo *info) +{ + int status; + + info->fattr->valid = 0; + dprintk("NFS call fsinfo\n"); status = rpc_call(server->client, NFS3PROC_FSINFO, fhandle, info, 0); + dprintk("NFS reply fsinfo: %d\n", status); + return status; +} -error: - dprintk("NFS reply statfs: %d\n", status); +static int +nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_pathconf *info) +{ + int status; + + info->fattr->valid = 0; + dprintk("NFS call pathconf\n"); + status = rpc_call(server->client, NFS3PROC_PATHCONF, fhandle, info, 0); + dprintk("NFS reply pathconf: %d\n", status); return status; } @@ -529,5 +547,7 @@ nfs3_proc_readdir, nfs3_proc_mknod, nfs3_proc_statfs, + nfs3_proc_fsinfo, + nfs3_proc_pathconf, nfs3_decode_dirent, }; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/nfs3xdr.c linux.21rc5-ac2/fs/nfs/nfs3xdr.c --- linux.21rc5/fs/nfs/nfs3xdr.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/nfs3xdr.c 2003-05-01 14:16:14.000000000 +0100 @@ -181,6 +181,7 @@ /* Update the mode bits */ fattr->valid |= (NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3); + fattr->timestamp = jiffies; return p; } @@ -506,7 +507,7 @@ int hdrlen, recvd; int status, nr; unsigned int len, pglen; - u32 *entry, *end; + u32 *entry, *end, *kaddr; status = ntohl(*p++); /* Decode post_op_attrs */ @@ -536,7 +537,7 @@ if (pglen > recvd) pglen = recvd; page = rcvbuf->pages; - p = kmap(*page); + kaddr = p = kmap_atomic(*page, KM_USER0); end = (u32 *)((char *)p + pglen); entry = p; for (nr = 0; *p++; nr++) { @@ -581,7 +582,7 @@ if (!nr && (entry[0] != 0 || entry[1] == 0)) goto short_pkt; out: - kunmap(*page); + kunmap_atomic(kaddr, KM_USER0); return nr; short_pkt: entry[0] = entry[1] = 0; @@ -592,8 +593,8 @@ } goto out; err_unmap: - kunmap(*page); - return -errno_NFSERR_IO; + nr = -errno_NFSERR_IO; + goto out; } u32 * @@ -616,21 +617,19 @@ p = xdr_decode_hyper(p, &entry->cookie); if (plus) { - p = xdr_decode_post_op_attr(p, &entry->fattr); + entry->fattr->valid = 0; + p = xdr_decode_post_op_attr(p, entry->fattr); /* In fact, a post_op_fh3: */ if (*p++) { - p = xdr_decode_fhandle(p, &entry->fh); + p = xdr_decode_fhandle(p, entry->fh); /* Ugh -- server reply was truncated */ if (p == NULL) { dprintk("NFS: FH truncated\n"); *entry = old; return ERR_PTR(-EAGAIN); } - } else { - /* If we don't get a file handle, the attrs - * aren't worth a lot. */ - entry->fattr.valid = 0; - } + } else + memset((u8*)(entry->fh), 0, sizeof(*entry->fh)); } entry->eof = !p[0] && p[1]; @@ -766,7 +765,7 @@ xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); } - strlen = (u32*)kmap(rcvbuf->pages[0]); + strlen = (u32*)kmap_atomic(rcvbuf->pages[0], KM_USER0); /* Convert length of symlink */ len = ntohl(*strlen); if (len > rcvbuf->page_len) @@ -775,7 +774,7 @@ /* NULL terminate the string we got */ string = (char *)(strlen + 1); string[len] = 0; - kunmap(rcvbuf->pages[0]); + kunmap_atomic(strlen, KM_USER0); return 0; } @@ -913,14 +912,13 @@ * Decode FSSTAT reply */ static int -nfs3_xdr_fsstatres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res) +nfs3_xdr_fsstatres(struct rpc_rqst *req, u32 *p, struct nfs_fsstat *res) { - struct nfs_fattr dummy; int status; status = ntohl(*p++); - p = xdr_decode_post_op_attr(p, &dummy); + p = xdr_decode_post_op_attr(p, res->fattr); if (status != 0) return -nfs_stat_to_errno(status); @@ -930,8 +928,7 @@ p = xdr_decode_hyper(p, &res->tfiles); p = xdr_decode_hyper(p, &res->ffiles); p = xdr_decode_hyper(p, &res->afiles); - - /* ignore invarsec */ + res->invarsec = ntohl(*p++); return 0; } @@ -941,12 +938,11 @@ static int nfs3_xdr_fsinfores(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res) { - struct nfs_fattr dummy; int status; status = ntohl(*p++); - p = xdr_decode_post_op_attr(p, &dummy); + p = xdr_decode_post_op_attr(p, res->fattr); if (status != 0) return -nfs_stat_to_errno(status); @@ -958,8 +954,8 @@ res->wtmult = ntohl(*p++); res->dtpref = ntohl(*p++); p = xdr_decode_hyper(p, &res->maxfilesize); - - /* ignore time_delta and properties */ + p = xdr_decode_time3(p, &res->time_delta); + res->properties = ntohl(*p++); return 0; } @@ -967,20 +963,21 @@ * Decode PATHCONF reply */ static int -nfs3_xdr_pathconfres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res) +nfs3_xdr_pathconfres(struct rpc_rqst *req, u32 *p, struct nfs_pathconf *res) { - struct nfs_fattr dummy; int status; status = ntohl(*p++); - p = xdr_decode_post_op_attr(p, &dummy); + p = xdr_decode_post_op_attr(p, res->fattr); if (status != 0) return -nfs_stat_to_errno(status); res->linkmax = ntohl(*p++); - res->namelen = ntohl(*p++); - - /* ignore remaining fields */ + res->name_max = ntohl(*p++); + res->no_trunc = ntohl(*p++) != 0; + res->chown_restricted = ntohl(*p++) != 0; + res->case_insensitive = ntohl(*p++) != 0; + res->case_preserving = ntohl(*p++) != 0; return 0; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/proc.c linux.21rc5-ac2/fs/nfs/proc.c --- linux.21rc5/fs/nfs/proc.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/proc.c 2003-05-01 14:16:14.000000000 +0100 @@ -351,17 +351,62 @@ static int nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, - struct nfs_fsinfo *info) + struct nfs_fsstat *stat) { int status; + struct nfs2_statfs fsinfo; - dprintk("NFS call statfs\n"); - memset((char *)info, 0, sizeof(*info)); - status = rpc_call(server->client, NFSPROC_STATFS, fhandle, info, 0); + stat->fattr->valid = 0; + dprintk("NFS call statfs\n"); + status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0); dprintk("NFS reply statfs: %d\n", status); + if (status) + goto out; + stat->tbytes = (u64)fsinfo.blocks * fsinfo.bsize; + stat->fbytes = (u64)fsinfo.bfree * fsinfo.bsize; + stat->abytes = (u64)fsinfo.bavail * fsinfo.bsize; + stat->tfiles = 0; + stat->ffiles = 0; + stat->afiles = 0; + stat->invarsec = 0; + out: return status; } +static int +nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_fsinfo *info) +{ + int status; + struct nfs2_statfs fsinfo; + + info->fattr->valid = 0; + dprintk("NFS call fsinfo\n"); + status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0); + dprintk("NFS reply fsinfo: %d\n", status); + if (status) + goto out; + info->rtmax = NFS_MAXDATA; + info->rtpref = fsinfo.tsize; + info->rtmult = fsinfo.bsize; + info->wtmax = NFS_MAXDATA; + info->wtpref = fsinfo.tsize; + info->wtmult = fsinfo.bsize; + info->dtpref = fsinfo.tsize; + info->maxfilesize = 0x7FFFFFFF; + info->time_delta = 0; + info->properties = 0x1b; + out: + return status; +} + +static int +nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_pathconf *info) +{ + return -ENOTSUPP; +} + extern u32 * nfs_decode_dirent(u32 *, struct nfs_entry *, int); struct nfs_rpc_ops nfs_v2_clientops = { @@ -387,5 +432,7 @@ nfs_proc_readdir, nfs_proc_mknod, nfs_proc_statfs, + nfs_proc_fsinfo, + nfs_proc_pathconf, nfs_decode_dirent, }; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/read.c linux.21rc5-ac2/fs/nfs/read.c --- linux.21rc5/fs/nfs/read.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/read.c 2003-04-25 13:39:35.000000000 +0100 @@ -135,9 +135,9 @@ } while (count); if (count) { - char *kaddr = kmap(page); + char *kaddr = kmap_atomic(page, KM_USER0); memset(kaddr + offset, 0, count); - kunmap(page); + kunmap_atomic(kaddr, KM_USER0); } flush_dcache_page(page); SetPageUptodate(page); @@ -420,9 +420,9 @@ if (task->tk_status >= 0) { if (count < PAGE_CACHE_SIZE) { - char *p = kmap(page); + char *p = kmap_atomic(page, KM_USER0); memset(p + count, 0, PAGE_CACHE_SIZE - count); - kunmap(page); + kunmap_atomic(p, KM_USER0); count = 0; } else count -= PAGE_CACHE_SIZE; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/nfs/write.c linux.21rc5-ac2/fs/nfs/write.c --- linux.21rc5/fs/nfs/write.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/nfs/write.c 2003-04-25 13:41:02.000000000 +0100 @@ -123,23 +123,6 @@ } /* - * This function will be used to simulate weak cache consistency - * under NFSv2 when the NFSv3 attribute patch is included. - * For the moment, we just call nfs_refresh_inode(). - */ -static __inline__ int -nfs_write_attributes(struct inode *inode, struct nfs_fattr *fattr) -{ - if ((fattr->valid & NFS_ATTR_FATTR) && !(fattr->valid & NFS_ATTR_WCC)) { - fattr->pre_size = NFS_CACHE_ISIZE(inode); - fattr->pre_mtime = NFS_CACHE_MTIME(inode); - fattr->pre_ctime = NFS_CACHE_CTIME(inode); - fattr->valid |= NFS_ATTR_WCC; - } - return nfs_refresh_inode(inode, fattr); -} - -/* * Write a page synchronously. * Offset is the data offset within the page. */ @@ -242,7 +225,7 @@ struct inode *inode = page->mapping->host; unsigned long end_index; unsigned offset = PAGE_CACHE_SIZE; - int err; + int err, is_sync; end_index = inode->i_size >> PAGE_CACHE_SHIFT; @@ -261,7 +244,8 @@ goto out; do_it: lock_kernel(); - if (NFS_SERVER(inode)->wsize >= PAGE_CACHE_SIZE && !IS_SYNC(inode)) { + is_sync = (IS_SYNC(inode) || NFS_FLUSH(inode)); + if (NFS_SERVER(inode)->wsize >= PAGE_CACHE_SIZE && !is_sync) { err = nfs_writepage_async(NULL, inode, page, 0, offset); if (err >= 0) err = 0; @@ -749,15 +733,18 @@ static void nfs_strategy(struct inode *inode) { - unsigned int dirty, wpages; + unsigned int dirty, wpages, flush; dirty = inode->u.nfs_i.ndirty; wpages = NFS_SERVER(inode)->wpages; + flush = NFS_FLUSH(inode); #ifdef CONFIG_NFS_V3 if (NFS_PROTO(inode)->version == 2) { if (dirty >= NFS_STRATEGY_PAGES * wpages) nfs_flush_file(inode, NULL, 0, 0, 0); - } else if (dirty >= wpages) + } else if (dirty >= wpages) { + nfs_flush_file(inode, NULL, 0, 0, 0); + } else if (dirty && flush) nfs_flush_file(inode, NULL, 0, 0, 0); #else if (dirty >= NFS_STRATEGY_PAGES * wpages) @@ -812,8 +799,15 @@ * If wsize is smaller than page size, update and write * page synchronously. */ - if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE || IS_SYNC(inode)) - return nfs_writepage_sync(file, inode, page, offset, count); + if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE || IS_SYNC(inode)) { + status = nfs_writepage_sync(file, inode, page, offset, count); + if (status > 0) { + if (offset == 0 && status == PAGE_CACHE_SIZE) + SetPageUptodate(page); + return 0; + } + return status; + } /* * Try to find an NFS request corresponding to this page diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/noquot.c linux.21rc5-ac2/fs/noquot.c --- linux.21rc5/fs/noquot.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/noquot.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -/* noquot.c: Quota stubs necessary for when quotas are not - * compiled into the kernel. - */ - -#include -#include -#include - -int nr_dquots, nr_free_dquots; -int max_dquots; - -asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr) -{ - return(-ENOSYS); -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/partitions/efi.c linux.21rc5-ac2/fs/partitions/efi.c --- linux.21rc5/fs/partitions/efi.c 2003-05-28 15:07:34.000000000 +0100 +++ linux.21rc5-ac2/fs/partitions/efi.c 2003-05-20 20:21:28.000000000 +0100 @@ -99,6 +99,7 @@ #include #include #include +#include #include #include #include "check.h" @@ -138,73 +139,6 @@ } __setup("gpt", force_gpt_fn); - -/* - * There are multiple 16-bit CRC polynomials in common use, but this is - * *the* standard CRC-32 polynomial, first popularized by Ethernet. - * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 - */ -#define CRCPOLY_LE 0xedb88320 -/* How many bits at a time to use. Requires a table of 4<>= 1) { - crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); - for (j = 0; j < 1 << CRC_LE_BITS; j += 2 * i) - crc32table_le[i + j] = crc ^ crc32table_le[j]; - } - return 0; -} - -/** - * crc32cleanup_le(): free LE table data - */ -static void __exit crc32cleanup_le(void) -{ - if (crc32table_le) kfree(crc32table_le); - crc32table_le = NULL; -} - -__initcall(crc32init_le); -__exitcall(crc32cleanup_le); - -/** - * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 - * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for - * other uses, or the previous crc32 value if computing incrementally. - * @p - pointer to buffer over which CRC is run - * @len - length of buffer @p - * - */ -static u32 crc32_le(u32 crc, unsigned char const *p, size_t len) -{ - while (len--) { - crc = (crc >> 8) ^ crc32table_le[(crc ^ *p++) & 255]; - } - return crc; -} - - /** * efi_crc32() - EFI version of crc32 function * @buf: buffer to calculate crc32 of @@ -220,7 +154,7 @@ static inline u32 efi_crc32(const void *buf, unsigned long len) { - return (crc32_le(~0L, buf, len) ^ ~0L); + return (crc32(~0L, buf, len) ^ ~0L); } /** diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/proc/array.c linux.21rc5-ac2/fs/proc/array.c --- linux.21rc5/fs/proc/array.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/proc/array.c 2003-05-09 18:23:54.000000000 +0100 @@ -338,16 +338,15 @@ /* scale priority and nice values from timeslices to -20..20 */ /* to make it look like a "normal" Unix priority/nice value */ - priority = task->counter; - priority = 20 - (priority * 10 + DEF_COUNTER / 2) / DEF_COUNTER; - nice = task->nice; + priority = task_prio(task); + nice = task_nice(task); read_lock(&tasklist_lock); ppid = task->pid ? task->p_opptr->pid : 0; read_unlock(&tasklist_lock); res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \ -%lu %lu %lu %lu %lu %lu %lu %lu %d %d\n", +%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", task->pid, task->comm, state, @@ -390,7 +389,9 @@ task->nswap, task->cnswap, task->exit_signal, - task->processor); + task_cpu(task), + task->rt_priority, + task->policy); if(mm) mmput(mm); return res; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/proc/proc_misc.c linux.21rc5-ac2/fs/proc/proc_misc.c --- linux.21rc5/fs/proc/proc_misc.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/proc/proc_misc.c 2003-05-28 15:39:23.000000000 +0100 @@ -107,11 +107,11 @@ a = avenrun[0] + (FIXED_1/200); b = avenrun[1] + (FIXED_1/200); c = avenrun[2] + (FIXED_1/200); - len = sprintf(page,"%d.%02d %d.%02d %d.%02d %d/%d %d\n", + len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n", LOAD_INT(a), LOAD_FRAC(a), LOAD_INT(b), LOAD_FRAC(b), LOAD_INT(c), LOAD_FRAC(c), - nr_running, nr_threads, last_pid); + nr_running(), nr_threads, last_pid); return proc_calc_metrics(page, start, off, count, eof, len); } @@ -123,7 +123,7 @@ int len; uptime = jiffies; - idle = init_tasks[0]->times.tms_utime + init_tasks[0]->times.tms_stime; + idle = init_task.times.tms_utime + init_task.times.tms_stime; /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but that would overflow about every five days at HZ == 100. @@ -156,7 +156,11 @@ struct sysinfo i; int len; int pg_size ; + int committed; + /* FIXME: needs to be in headers */ + extern atomic_t vm_committed_space; + /* * display in kilobytes. */ @@ -165,6 +169,7 @@ si_meminfo(&i); si_swapinfo(&i); pg_size = atomic_read(&page_cache_size) - i.bufferram ; + committed = atomic_read(&vm_committed_space); len = sprintf(page, " total: used: free: shared: buffers: cached:\n" "Mem: %8Lu %8Lu %8Lu %8Lu %8Lu %8Lu\n" @@ -371,11 +376,11 @@ } } - proc_sprintf(page, &off, &len, - "\nctxt %u\n" + len += sprintf(page + len, + "\nctxt %lu\n" "btime %lu\n" "processes %lu\n", - kstat.context_swtch, + nr_context_switches(), xtime.tv_sec - jif / HZ, total_forks); @@ -423,9 +428,9 @@ int count, int *eof, void *data) { extern char saved_command_line[]; - int len; + int len = 0; - len = snprintf(page, count, "%s\n", saved_command_line); + proc_sprintf(page, &off, &len, "%s\n", saved_command_line); return proc_calc_metrics(page, start, off, count, eof, len); } @@ -495,7 +500,8 @@ buf++; p++; count--; read++; } pnt = (char *)prof_buffer + p - sizeof(unsigned int); - copy_to_user(buf,(void *)pnt,count); + if (copy_to_user(buf,(void *)pnt,count)) + return -EFAULT; read += count; *ppos += read; return read; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/quota.c linux.21rc5-ac2/fs/quota.c --- linux.21rc5/fs/quota.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/quota.c 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,667 @@ +/* + * Quota code necessary even when VFS quota support is not compiled + * into the kernel. The interesting stuff is over in dquot.c, here + * we have symbols for initial quotactl(2) handling, the sysctl(2) + * variables, etc - things needed even when quota support disabled. + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_QIFACE_COMPAT +#include +#endif + + +int nr_dquots, nr_free_dquots; + +/* Check validity of quotactl */ +static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + if (type >= MAXQUOTAS) + return -EINVAL; + if (!sb && cmd != Q_SYNC) + return -ENODEV; + /* Is operation supported? */ + if (sb && !sb->s_qcop) + return -ENOSYS; + + switch (cmd) { + case Q_GETFMT: + break; + case Q_QUOTAON: + if (!sb->s_qcop->quota_on) + return -ENOSYS; + break; + case Q_QUOTAOFF: + if (!sb->s_qcop->quota_off) + return -ENOSYS; + break; + case Q_SETINFO: + if (!sb->s_qcop->set_info) + return -ENOSYS; + break; + case Q_GETINFO: + if (!sb->s_qcop->get_info) + return -ENOSYS; + break; + case Q_SETQUOTA: + if (!sb->s_qcop->set_dqblk) + return -ENOSYS; + break; + case Q_GETQUOTA: + if (!sb->s_qcop->get_dqblk) + return -ENOSYS; + break; + case Q_SYNC: + if (sb && !sb->s_qcop->quota_sync) + return -ENOSYS; + break; + case Q_XQUOTAON: + case Q_XQUOTAOFF: + case Q_XQUOTARM: + if (!sb->s_qcop->set_xstate) + return -ENOSYS; + break; + case Q_XGETQSTAT: + if (!sb->s_qcop->get_xstate) + return -ENOSYS; + break; + case Q_XSETQLIM: + if (!sb->s_qcop->set_xquota) + return -ENOSYS; + break; + case Q_XGETQUOTA: + if (!sb->s_qcop->get_xquota) + return -ENOSYS; + break; + default: + return -EINVAL; + } + + /* Is quota turned on for commands which need it? */ + switch (cmd) { + case Q_GETFMT: + case Q_GETINFO: + case Q_QUOTAOFF: + case Q_SETINFO: + case Q_SETQUOTA: + case Q_GETQUOTA: + if (!sb_has_quota_enabled(sb, type)) + return -ESRCH; + } + /* Check privileges */ + if (cmd == Q_GETQUOTA || cmd == Q_XGETQUOTA) { + if (((type == USRQUOTA && current->euid != id) || + (type == GRPQUOTA && !in_egroup_p(id))) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + } + else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO && cmd != Q_XGETQSTAT) + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return 0; +} + +/* Resolve device pathname to superblock */ +static struct super_block *resolve_dev(const char *path) +{ + int ret; + mode_t mode; + struct nameidata nd; + kdev_t dev; + struct super_block *sb; + + ret = user_path_walk(path, &nd); + if (ret) + goto out; + + dev = nd.dentry->d_inode->i_rdev; + mode = nd.dentry->d_inode->i_mode; + path_release(&nd); + + ret = -ENOTBLK; + if (!S_ISBLK(mode)) + goto out; + ret = -ENODEV; + sb = get_super(dev); + if (!sb) + goto out; + return sb; +out: + return ERR_PTR(ret); +} + +/* Copy parameters and call proper function */ +static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, caddr_t addr) +{ + int ret; + + switch (cmd) { + case Q_QUOTAON: { + char *pathname; + + if (IS_ERR(pathname = getname(addr))) + return PTR_ERR(pathname); + ret = sb->s_qcop->quota_on(sb, type, id, pathname); + putname(pathname); + return ret; + } + case Q_QUOTAOFF: + return sb->s_qcop->quota_off(sb, type); + + case Q_GETFMT: { + __u32 fmt; + + fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; + if (copy_to_user(addr, &fmt, sizeof(fmt))) + return -EFAULT; + return 0; + } + case Q_GETINFO: { + struct if_dqinfo info; + + if ((ret = sb->s_qcop->get_info(sb, type, &info))) + return ret; + if (copy_to_user(addr, &info, sizeof(info))) + return -EFAULT; + return 0; + } + case Q_SETINFO: { + struct if_dqinfo info; + + if (copy_from_user(&info, addr, sizeof(info))) + return -EFAULT; + return sb->s_qcop->set_info(sb, type, &info); + } + case Q_GETQUOTA: { + struct if_dqblk idq; + + if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) + return ret; + if (copy_to_user(addr, &idq, sizeof(idq))) + return -EFAULT; + return 0; + } + case Q_SETQUOTA: { + struct if_dqblk idq; + + if (copy_from_user(&idq, addr, sizeof(idq))) + return -EFAULT; + return sb->s_qcop->set_dqblk(sb, type, id, &idq); + } + case Q_SYNC: + if (sb) + return sb->s_qcop->quota_sync(sb, type); + sync_dquots_dev(NODEV, type); + return 0; + + case Q_XQUOTAON: + case Q_XQUOTAOFF: + case Q_XQUOTARM: { + __u32 flags; + + if (copy_from_user(&flags, addr, sizeof(flags))) + return -EFAULT; + return sb->s_qcop->set_xstate(sb, flags, cmd); + } + case Q_XGETQSTAT: { + struct fs_quota_stat fqs; + + if ((ret = sb->s_qcop->get_xstate(sb, &fqs))) + return ret; + if (copy_to_user(addr, &fqs, sizeof(fqs))) + return -EFAULT; + return 0; + } + case Q_XSETQLIM: { + struct fs_disk_quota fdq; + + if (copy_from_user(&fdq, addr, sizeof(fdq))) + return -EFAULT; + return sb->s_qcop->set_xquota(sb, type, id, &fdq); + } + case Q_XGETQUOTA: { + struct fs_disk_quota fdq; + + if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) + return ret; + if (copy_to_user(addr, &fdq, sizeof(fdq))) + return -EFAULT; + return 0; + } + /* We never reach here unless validity check is broken */ + default: + BUG(); + } + return 0; +} + +#ifdef CONFIG_QIFACE_COMPAT +static int check_compat_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + if (type >= MAXQUOTAS) + return -EINVAL; + /* Is operation supported? */ + /* sb==NULL for GETSTATS calls */ + if (sb && !sb->s_qcop) + return -ENOSYS; + + switch (cmd) { + case Q_COMP_QUOTAON: + if (!sb->s_qcop->quota_on) + return -ENOSYS; + break; + case Q_COMP_QUOTAOFF: + if (!sb->s_qcop->quota_off) + return -ENOSYS; + break; + case Q_COMP_SYNC: + if (sb && !sb->s_qcop->quota_sync) + return -ENOSYS; + break; +#ifdef CONFIG_QIFACE_V2 + case Q_V2_SETFLAGS: + case Q_V2_SETGRACE: + case Q_V2_SETINFO: + if (!sb->s_qcop->set_info) + return -ENOSYS; + break; + case Q_V2_GETINFO: + if (!sb->s_qcop->get_info) + return -ENOSYS; + break; + case Q_V2_SETQLIM: + case Q_V2_SETUSE: + case Q_V2_SETQUOTA: + if (!sb->s_qcop->set_dqblk) + return -ENOSYS; + break; + case Q_V2_GETQUOTA: + if (!sb->s_qcop->get_dqblk) + return -ENOSYS; + break; + case Q_V2_GETSTATS: + return 0; /* GETSTATS need no other checks */ +#endif +#ifdef CONFIG_QIFACE_V1 + case Q_V1_SETQLIM: + case Q_V1_SETUSE: + case Q_V1_SETQUOTA: + if (!sb->s_qcop->set_dqblk) + return -ENOSYS; + break; + case Q_V1_GETQUOTA: + if (!sb->s_qcop->get_dqblk) + return -ENOSYS; + break; + case Q_V1_RSQUASH: + if (!sb->s_qcop->set_info) + return -ENOSYS; + break; + case Q_V1_GETSTATS: + return 0; /* GETSTATS need no other checks */ +#endif + default: + return -EINVAL; + } + + /* Is quota turned on for commands which need it? */ + switch (cmd) { + case Q_V2_SETFLAGS: + case Q_V2_SETGRACE: + case Q_V2_SETINFO: + case Q_V2_GETINFO: + case Q_COMP_QUOTAOFF: + case Q_V1_RSQUASH: + case Q_V1_SETQUOTA: + case Q_V1_SETQLIM: + case Q_V1_SETUSE: + case Q_V2_SETQUOTA: + /* Q_V2_SETQLIM: collision with Q_V1_SETQLIM */ + case Q_V2_SETUSE: + case Q_V1_GETQUOTA: + case Q_V2_GETQUOTA: + if (!sb_has_quota_enabled(sb, type)) + return -ESRCH; + } +#ifdef CONFIG_QIFACE_V1 + if (cmd != Q_COMP_QUOTAON && cmd != Q_COMP_QUOTAOFF && cmd != Q_COMP_SYNC && sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id != QFMT_VFS_OLD) +#else + if (cmd != Q_COMP_QUOTAON && cmd != Q_COMP_QUOTAOFF && cmd != Q_COMP_SYNC && sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id != QFMT_VFS_V0) +#endif + return -ESRCH; + + /* Check privileges */ + if (cmd == Q_V1_GETQUOTA || cmd == Q_V2_GETQUOTA) { + if (((type == USRQUOTA && current->euid != id) || + (type == GRPQUOTA && !in_egroup_p(id))) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + } + else if (cmd != Q_V1_GETSTATS && cmd != Q_V2_GETSTATS && cmd != Q_V2_GETINFO && cmd != Q_COMP_SYNC) + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return 0; +} + +#ifdef CONFIG_QIFACE_V1 +static int v1_set_rsquash(struct super_block *sb, int type, int flag) +{ + struct if_dqinfo info; + + info.dqi_valid = IIF_FLAGS; + info.dqi_flags = flag ? V1_DQF_RSQUASH : 0; + return sb->s_qcop->set_info(sb, type, &info); +} + +static int v1_get_dqblk(struct super_block *sb, int type, qid_t id, struct v1c_mem_dqblk *mdq) +{ + struct if_dqblk idq; + int ret; + + if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)) < 0) + return ret; + mdq->dqb_ihardlimit = idq.dqb_ihardlimit; + mdq->dqb_isoftlimit = idq.dqb_isoftlimit; + mdq->dqb_curinodes = idq.dqb_curinodes; + mdq->dqb_bhardlimit = idq.dqb_bhardlimit; + mdq->dqb_bsoftlimit = idq.dqb_bsoftlimit; + mdq->dqb_curblocks = toqb(idq.dqb_curspace); + mdq->dqb_itime = idq.dqb_itime; + mdq->dqb_btime = idq.dqb_btime; + if (id == 0) { /* Times for id 0 are in fact grace times */ + struct if_dqinfo info; + + if ((ret = sb->s_qcop->get_info(sb, type, &info)) < 0) + return ret; + mdq->dqb_btime = info.dqi_bgrace; + mdq->dqb_itime = info.dqi_igrace; + } + return 0; +} + +static int v1_set_dqblk(struct super_block *sb, int type, int cmd, qid_t id, struct v1c_mem_dqblk *mdq) +{ + struct if_dqblk idq; + int ret; + + idq.dqb_valid = 0; + if (cmd == Q_V1_SETQUOTA || cmd == Q_V1_SETQLIM) { + idq.dqb_ihardlimit = mdq->dqb_ihardlimit; + idq.dqb_isoftlimit = mdq->dqb_isoftlimit; + idq.dqb_bhardlimit = mdq->dqb_bhardlimit; + idq.dqb_bsoftlimit = mdq->dqb_bsoftlimit; + idq.dqb_valid |= QIF_LIMITS; + } + if (cmd == Q_V1_SETQUOTA || cmd == Q_V1_SETUSE) { + idq.dqb_curinodes = mdq->dqb_curinodes; + idq.dqb_curspace = ((qsize_t)mdq->dqb_curblocks) << QUOTABLOCK_BITS; + idq.dqb_valid |= QIF_USAGE; + } + ret = sb->s_qcop->set_dqblk(sb, type, id, &idq); + if (!ret && id == 0 && cmd == Q_V1_SETQUOTA) { /* Times for id 0 are in fact grace times */ + struct if_dqinfo info; + + info.dqi_bgrace = mdq->dqb_btime; + info.dqi_igrace = mdq->dqb_itime; + info.dqi_valid = IIF_BGRACE | IIF_IGRACE; + ret = sb->s_qcop->set_info(sb, type, &info); + } + return ret; +} + +static void v1_get_stats(struct v1c_dqstats *dst) +{ + memcpy(dst, &dqstats, sizeof(dqstats)); +} +#endif + +#ifdef CONFIG_QIFACE_V2 +static int v2_get_info(struct super_block *sb, int type, struct v2c_mem_dqinfo *oinfo) +{ + struct if_dqinfo info; + int ret; + + if ((ret = sb->s_qcop->get_info(sb, type, &info)) < 0) + return ret; + oinfo->dqi_bgrace = info.dqi_bgrace; + oinfo->dqi_igrace = info.dqi_igrace; + oinfo->dqi_flags = info.dqi_flags; + oinfo->dqi_blocks = sb_dqopt(sb)->info[type].u.v2_i.dqi_blocks; + oinfo->dqi_free_blk = sb_dqopt(sb)->info[type].u.v2_i.dqi_free_blk; + oinfo->dqi_free_entry = sb_dqopt(sb)->info[type].u.v2_i.dqi_free_entry; + return 0; +} + +static int v2_set_info(struct super_block *sb, int type, int cmd, struct v2c_mem_dqinfo *oinfo) +{ + struct if_dqinfo info; + + info.dqi_valid = 0; + if (cmd == Q_V2_SETGRACE || cmd == Q_V2_SETINFO) { + info.dqi_bgrace = oinfo->dqi_bgrace; + info.dqi_igrace = oinfo->dqi_igrace; + info.dqi_valid |= IIF_BGRACE | IIF_IGRACE; + } + if (cmd == Q_V2_SETFLAGS || cmd == Q_V2_SETINFO) { + info.dqi_flags = oinfo->dqi_flags; + info.dqi_valid |= IIF_FLAGS; + } + /* We don't simulate deadly effects of setting other parameters ;-) */ + return sb->s_qcop->set_info(sb, type, &info); +} + +static int v2_get_dqblk(struct super_block *sb, int type, qid_t id, struct v2c_mem_dqblk *mdq) +{ + struct if_dqblk idq; + int ret; + + if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)) < 0) + return ret; + mdq->dqb_ihardlimit = idq.dqb_ihardlimit; + mdq->dqb_isoftlimit = idq.dqb_isoftlimit; + mdq->dqb_curinodes = idq.dqb_curinodes; + mdq->dqb_bhardlimit = idq.dqb_bhardlimit; + mdq->dqb_bsoftlimit = idq.dqb_bsoftlimit; + mdq->dqb_curspace = idq.dqb_curspace; + mdq->dqb_itime = idq.dqb_itime; + mdq->dqb_btime = idq.dqb_btime; + return 0; +} + +static int v2_set_dqblk(struct super_block *sb, int type, int cmd, qid_t id, struct v2c_mem_dqblk *mdq) +{ + struct if_dqblk idq; + + idq.dqb_valid = 0; + if (cmd == Q_V2_SETQUOTA || cmd == Q_V2_SETQLIM) { + idq.dqb_ihardlimit = mdq->dqb_ihardlimit; + idq.dqb_isoftlimit = mdq->dqb_isoftlimit; + idq.dqb_bhardlimit = mdq->dqb_bhardlimit; + idq.dqb_bsoftlimit = mdq->dqb_bsoftlimit; + idq.dqb_valid |= QIF_LIMITS; + } + if (cmd == Q_V2_SETQUOTA || cmd == Q_V2_SETUSE) { + idq.dqb_curinodes = mdq->dqb_curinodes; + idq.dqb_curspace = mdq->dqb_curspace; + idq.dqb_valid |= QIF_USAGE; + } + return sb->s_qcop->set_dqblk(sb, type, id, &idq); +} + +static void v2_get_stats(struct v2c_dqstats *dst) +{ + memcpy(dst, &dqstats, sizeof(dqstats)); + dst->version = __DQUOT_NUM_VERSION__; +} +#endif + +/* Handle requests to old interface */ +static int do_compat_quotactl(struct super_block *sb, int type, int cmd, qid_t id, caddr_t addr) +{ + int ret; + + switch (cmd) { + case Q_COMP_QUOTAON: { + char *pathname; + + if (IS_ERR(pathname = getname(addr))) + return PTR_ERR(pathname); +#ifdef CONFIG_QIFACE_V1 + ret = sb->s_qcop->quota_on(sb, type, QFMT_VFS_OLD, pathname); +#else + ret = sb->s_qcop->quota_on(sb, type, QFMT_VFS_V0, pathname); +#endif + putname(pathname); + return ret; + } + case Q_COMP_QUOTAOFF: + return sb->s_qcop->quota_off(sb, type); + case Q_COMP_SYNC: + if (sb) + return sb->s_qcop->quota_sync(sb, type); + sync_dquots_dev(NODEV, type); + return 0; +#ifdef CONFIG_QIFACE_V1 + case Q_V1_RSQUASH: { + int flag; + + if (copy_from_user(&flag, addr, sizeof(flag))) + return -EFAULT; + return v1_set_rsquash(sb, type, flag); + } + case Q_V1_GETQUOTA: { + struct v1c_mem_dqblk mdq; + + if ((ret = v1_get_dqblk(sb, type, id, &mdq))) + return ret; + if (copy_to_user(addr, &mdq, sizeof(mdq))) + return -EFAULT; + return 0; + } + case Q_V1_SETQLIM: + case Q_V1_SETUSE: + case Q_V1_SETQUOTA: { + struct v1c_mem_dqblk mdq; + + if (copy_from_user(&mdq, addr, sizeof(mdq))) + return -EFAULT; + return v1_set_dqblk(sb, type, cmd, id, &mdq); + } + case Q_V1_GETSTATS: { + struct v1c_dqstats dst; + + v1_get_stats(&dst); + if (copy_to_user(addr, &dst, sizeof(dst))) + return -EFAULT; + return 0; + } +#endif +#ifdef CONFIG_QIFACE_V2 + case Q_V2_GETINFO: { + struct v2c_mem_dqinfo info; + + if ((ret = v2_get_info(sb, type, &info))) + return ret; + if (copy_to_user(addr, &info, sizeof(info))) + return -EFAULT; + return 0; + } + case Q_V2_SETFLAGS: + case Q_V2_SETGRACE: + case Q_V2_SETINFO: { + struct v2c_mem_dqinfo info; + + if (copy_from_user(&info, addr, sizeof(info))) + return -EFAULT; + + return v2_set_info(sb, type, cmd, &info); + } + case Q_V2_GETQUOTA: { + struct v2c_mem_dqblk mdq; + + if ((ret = v2_get_dqblk(sb, type, id, &mdq))) + return ret; + if (copy_to_user(addr, &mdq, sizeof(mdq))) + return -EFAULT; + return 0; + } + case Q_V2_SETUSE: + case Q_V2_SETQLIM: + case Q_V2_SETQUOTA: { + struct v2c_mem_dqblk mdq; + + if (copy_from_user(&mdq, addr, sizeof(mdq))) + return -EFAULT; + return v2_set_dqblk(sb, type, cmd, id, &mdq); + } + case Q_V2_GETSTATS: { + struct v2c_dqstats dst; + + v2_get_stats(&dst); + if (copy_to_user(addr, &dst, sizeof(dst))) + return -EFAULT; + return 0; + } +#endif + } + BUG(); + return 0; +} +#endif + +/* Macros for short-circuiting the compatibility tests */ +#define NEW_COMMAND(c) ((c) & (0x80 << 16)) +#define XQM_COMMAND(c) (((c) & ('X' << 8)) == ('X' << 8)) + +/* + * This is the system call interface. This communicates with + * the user-level programs. Currently this only supports diskquota + * calls. Maybe we need to add the process quotas etc. in the future, + * but we probably should use rlimits for that. + */ +asmlinkage long sys_quotactl(unsigned int cmd, const char *special, qid_t id, caddr_t addr) +{ + uint cmds, type; + struct super_block *sb = NULL; + int ret = -EINVAL; + + lock_kernel(); + cmds = cmd >> SUBCMDSHIFT; + type = cmd & SUBCMDMASK; + +#ifdef CONFIG_QIFACE_COMPAT + if (cmds != Q_V1_GETSTATS && cmds != Q_V2_GETSTATS && ((cmds != Q_COMP_SYNC && cmds != Q_SYNC) || special)) + if (IS_ERR(sb = resolve_dev(special))) { + ret = PTR_ERR(sb); + sb = NULL; + goto out; + } + if (!NEW_COMMAND(cmds) && !XQM_COMMAND(cmds)) { + if ((ret = check_compat_quotactl_valid(sb, type, cmds, id)) < 0) + goto out; + ret = do_compat_quotactl(sb, type, cmds, id, addr); + goto out; + } +#else + if (cmds != Q_SYNC || special) + if (IS_ERR(sb = resolve_dev(special))) { + ret = PTR_ERR(sb); + sb = NULL; + goto out; + } +#endif + if ((ret = check_quotactl_valid(sb, type, cmds, id)) < 0) + goto out; + ret = do_quotactl(sb, type, cmds, id, addr); +out: + if (sb) + drop_super(sb); + unlock_kernel(); + return ret; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/quota_v1.c linux.21rc5-ac2/fs/quota_v1.c --- linux.21rc5/fs/quota_v1.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/quota_v1.c 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,239 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d) +{ + m->dqb_ihardlimit = d->dqb_ihardlimit; + m->dqb_isoftlimit = d->dqb_isoftlimit; + m->dqb_curinodes = d->dqb_curinodes; + m->dqb_bhardlimit = d->dqb_bhardlimit; + m->dqb_bsoftlimit = d->dqb_bsoftlimit; + m->dqb_curspace = d->dqb_curblocks << QUOTABLOCK_BITS; + m->dqb_itime = d->dqb_itime; + m->dqb_btime = d->dqb_btime; +} + +static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m) +{ + d->dqb_ihardlimit = m->dqb_ihardlimit; + d->dqb_isoftlimit = m->dqb_isoftlimit; + d->dqb_curinodes = m->dqb_curinodes; + d->dqb_bhardlimit = m->dqb_bhardlimit; + d->dqb_bsoftlimit = m->dqb_bsoftlimit; + d->dqb_curblocks = toqb(m->dqb_curspace); + d->dqb_itime = m->dqb_itime; + d->dqb_btime = m->dqb_btime; +} + +static int v1_read_dqblk(struct dquot *dquot) +{ + int type = dquot->dq_type; + struct file *filp; + mm_segment_t fs; + loff_t offset; + struct v1_disk_dqblk dqblk; + + filp = sb_dqopt(dquot->dq_sb)->files[type]; + if (filp == (struct file *)NULL) + return -EINVAL; + + /* Now we are sure filp is valid */ + offset = v1_dqoff(dquot->dq_id); + fs = get_fs(); + set_fs(KERNEL_DS); + filp->f_op->read(filp, (char *)&dqblk, sizeof(struct v1_disk_dqblk), &offset); + set_fs(fs); + + v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); + if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && + dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) + dquot->dq_flags |= DQ_FAKE; + dqstats.reads++; + return 0; +} + +static int v1_commit_dqblk(struct dquot *dquot) +{ + short type = dquot->dq_type; + struct file *filp; + mm_segment_t fs; + loff_t offset; + ssize_t ret; + struct v1_disk_dqblk dqblk; + + filp = sb_dqopt(dquot->dq_sb)->files[type]; + offset = v1_dqoff(dquot->dq_id); + fs = get_fs(); + set_fs(KERNEL_DS); + + /* + * Note: clear the DQ_MOD flag unconditionally, + * so we don't loop forever on failure. + */ + v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); + dquot->dq_flags &= ~DQ_MOD; + if (dquot->dq_id == 0) { + dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; + dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; + } + ret = 0; + if (filp) + ret = filp->f_op->write(filp, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), &offset); + if (ret != sizeof(struct v1_disk_dqblk)) { + printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", + kdevname(dquot->dq_dev)); + if (ret >= 0) + ret = -EIO; + goto out; + } + ret = 0; + +out: + set_fs(fs); + dqstats.writes++; + return ret; +} + +/* Magics of new quota format */ +#define V2_INITQMAGICS {\ + 0xd9c01f11, /* USRQUOTA */\ + 0xd9c01927 /* GRPQUOTA */\ +} + +/* Header of new quota format */ +struct v2_disk_dqheader { + __u32 dqh_magic; /* Magic number identifying file */ + __u32 dqh_version; /* File version */ +}; + +static int v1_check_quota_file(struct super_block *sb, int type) +{ + struct file *f = sb_dqopt(sb)->files[type]; + struct inode *inode = f->f_dentry->d_inode; + ulong blocks; + size_t off; + struct v2_disk_dqheader dqhead; + mm_segment_t fs; + ssize_t size; + loff_t offset = 0; + static const uint quota_magics[] = V2_INITQMAGICS; + + if (!inode->i_size) + return 0; + blocks = inode->i_size >> BLOCK_SIZE_BITS; + off = inode->i_size & (BLOCK_SIZE - 1); + if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) + return 0; + /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ + fs = get_fs(); + set_fs(KERNEL_DS); + size = f->f_op->read(f, (char *)&dqhead, sizeof(struct v2_disk_dqheader), &offset); + set_fs(fs); + if (size != sizeof(struct v2_disk_dqheader)) + return 1; /* Probably not new format */ + if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) + return 1; /* Definitely not new format */ + printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", kdevname(sb->s_dev)); + return 0; /* Seems like a new format file -> refuse it */ +} + +static int v1_read_file_info(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + mm_segment_t fs; + loff_t offset; + struct file *filp = dqopt->files[type]; + struct v1_disk_dqblk dqblk; + int ret; + + down(&dqopt->dqio_sem); + offset = v1_dqoff(0); + fs = get_fs(); + set_fs(KERNEL_DS); + if ((ret = filp->f_op->read(filp, (char *)&dqblk, sizeof(struct v1_disk_dqblk), &offset)) != sizeof(struct v1_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + goto out; + } + ret = 0; + dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; + dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; +out: + up(&dqopt->dqio_sem); + set_fs(fs); + return ret; +} + +static int v1_write_file_info(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + mm_segment_t fs; + struct file *filp = dqopt->files[type]; + struct v1_disk_dqblk dqblk; + loff_t offset; + int ret; + + down(&dqopt->dqio_sem); + dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; + offset = v1_dqoff(0); + fs = get_fs(); + set_fs(KERNEL_DS); + if ((ret = filp->f_op->read(filp, (char *)&dqblk, sizeof(struct v1_disk_dqblk), &offset)) != sizeof(struct v1_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + goto out; + } + dqblk.dqb_itime = dqopt->info[type].dqi_igrace; + dqblk.dqb_btime = dqopt->info[type].dqi_bgrace; + offset = v1_dqoff(0); + ret = filp->f_op->write(filp, (char *)&dqblk, sizeof(struct v1_disk_dqblk), &offset); + if (ret == sizeof(struct v1_disk_dqblk)) + ret = 0; + else if (ret > 0) + ret = -EIO; +out: + up(&dqopt->dqio_sem); + set_fs(fs); + return ret; +} + +static struct quota_format_ops v1_format_ops = { + check_quota_file: v1_check_quota_file, + read_file_info: v1_read_file_info, + write_file_info: v1_write_file_info, + free_file_info: NULL, + read_dqblk: v1_read_dqblk, + commit_dqblk: v1_commit_dqblk, +}; + +static struct quota_format_type v1_quota_format = { + qf_fmt_id: QFMT_VFS_OLD, + qf_ops: &v1_format_ops, + qf_owner: THIS_MODULE +}; + +static int __init init_v1_quota_format(void) +{ + return register_quota_format(&v1_quota_format); +} + +static void __exit exit_v1_quota_format(void) +{ + unregister_quota_format(&v1_quota_format); +} + +EXPORT_NO_SYMBOLS; + +module_init(init_v1_quota_format); +module_exit(exit_v1_quota_format); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/quota_v2.c linux.21rc5-ac2/fs/quota_v2.c --- linux.21rc5/fs/quota_v2.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/quota_v2.c 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,690 @@ +/* + * vfsv0 quota IO operations on file + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define __QUOTA_V2_PARANOIA + +typedef char *dqbuf_t; + +#define GETIDINDEX(id, depth) (((id) >> ((V2_DQTREEDEPTH-(depth)-1)*8)) & 0xff) +#define GETENTRIES(buf) ((struct v2_disk_dqblk *)(((char *)buf)+sizeof(struct v2_disk_dqdbheader))) + +/* Check whether given file is really vfsv0 quotafile */ +static int v2_check_quota_file(struct super_block *sb, int type) +{ + struct v2_disk_dqheader dqhead; + struct file *f = sb_dqopt(sb)->files[type]; + mm_segment_t fs; + ssize_t size; + loff_t offset = 0; + static const uint quota_magics[] = V2_INITQMAGICS; + static const uint quota_versions[] = V2_INITQVERSIONS; + + fs = get_fs(); + set_fs(KERNEL_DS); + size = f->f_op->read(f, (char *)&dqhead, sizeof(struct v2_disk_dqheader), &offset); + set_fs(fs); + if (size != sizeof(struct v2_disk_dqheader)) + return 0; + if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || + le32_to_cpu(dqhead.dqh_version) != quota_versions[type]) + return 0; + return 1; +} + +/* Read information header from quota file */ +static int v2_read_file_info(struct super_block *sb, int type) +{ + mm_segment_t fs; + struct v2_disk_dqinfo dinfo; + struct mem_dqinfo *info = sb_dqopt(sb)->info+type; + struct file *f = sb_dqopt(sb)->files[type]; + ssize_t size; + loff_t offset = V2_DQINFOOFF; + + fs = get_fs(); + set_fs(KERNEL_DS); + size = f->f_op->read(f, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), &offset); + set_fs(fs); + if (size != sizeof(struct v2_disk_dqinfo)) { + printk(KERN_WARNING "Can't read info structure on device %s.\n", + kdevname(f->f_dentry->d_sb->s_dev)); + return -1; + } + info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); + info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); + info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); + info->u.v2_i.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); + info->u.v2_i.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); + info->u.v2_i.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); + return 0; +} + +/* Write information header to quota file */ +static int v2_write_file_info(struct super_block *sb, int type) +{ + mm_segment_t fs; + struct v2_disk_dqinfo dinfo; + struct mem_dqinfo *info = sb_dqopt(sb)->info+type; + struct file *f = sb_dqopt(sb)->files[type]; + ssize_t size; + loff_t offset = V2_DQINFOOFF; + + info->dqi_flags &= ~DQF_INFO_DIRTY; + dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); + dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); + dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); + dinfo.dqi_blocks = cpu_to_le32(info->u.v2_i.dqi_blocks); + dinfo.dqi_free_blk = cpu_to_le32(info->u.v2_i.dqi_free_blk); + dinfo.dqi_free_entry = cpu_to_le32(info->u.v2_i.dqi_free_entry); + fs = get_fs(); + set_fs(KERNEL_DS); + size = f->f_op->write(f, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), &offset); + set_fs(fs); + if (size != sizeof(struct v2_disk_dqinfo)) { + printk(KERN_WARNING "Can't write info structure on device %s.\n", + kdevname(f->f_dentry->d_sb->s_dev)); + return -1; + } + return 0; +} + +static void disk2memdqb(struct mem_dqblk *m, struct v2_disk_dqblk *d) +{ + m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); + m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); + m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); + m->dqb_itime = le64_to_cpu(d->dqb_itime); + m->dqb_bhardlimit = le32_to_cpu(d->dqb_bhardlimit); + m->dqb_bsoftlimit = le32_to_cpu(d->dqb_bsoftlimit); + m->dqb_curspace = le64_to_cpu(d->dqb_curspace); + m->dqb_btime = le64_to_cpu(d->dqb_btime); +} + +static void mem2diskdqb(struct v2_disk_dqblk *d, struct mem_dqblk *m, qid_t id) +{ + d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); + d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); + d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); + d->dqb_itime = cpu_to_le64(m->dqb_itime); + d->dqb_bhardlimit = cpu_to_le32(m->dqb_bhardlimit); + d->dqb_bsoftlimit = cpu_to_le32(m->dqb_bsoftlimit); + d->dqb_curspace = cpu_to_le64(m->dqb_curspace); + d->dqb_btime = cpu_to_le64(m->dqb_btime); + d->dqb_id = cpu_to_le32(id); +} + +static dqbuf_t getdqbuf(void) +{ + dqbuf_t buf = kmalloc(V2_DQBLKSIZE, GFP_KERNEL); + if (!buf) + printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); + return buf; +} + +static inline void freedqbuf(dqbuf_t buf) +{ + kfree(buf); +} + +static ssize_t read_blk(struct file *filp, uint blk, dqbuf_t buf) +{ + mm_segment_t fs; + ssize_t ret; + loff_t offset = blk<f_op->read(filp, (char *)buf, V2_DQBLKSIZE, &offset); + set_fs(fs); + return ret; +} + +static ssize_t write_blk(struct file *filp, uint blk, dqbuf_t buf) +{ + mm_segment_t fs; + ssize_t ret; + loff_t offset = blk<f_op->write(filp, (char *)buf, V2_DQBLKSIZE, &offset); + set_fs(fs); + return ret; + +} + +/* Remove empty block from list and return it */ +static int get_free_dqblk(struct file *filp, struct mem_dqinfo *info) +{ + dqbuf_t buf = getdqbuf(); + struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; + int ret, blk; + + if (!buf) + return -ENOMEM; + if (info->u.v2_i.dqi_free_blk) { + blk = info->u.v2_i.dqi_free_blk; + if ((ret = read_blk(filp, blk, buf)) < 0) + goto out_buf; + info->u.v2_i.dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); + } + else { + memset(buf, 0, V2_DQBLKSIZE); + if ((ret = write_blk(filp, info->u.v2_i.dqi_blocks, buf)) < 0) /* Assure block allocation... */ + goto out_buf; + blk = info->u.v2_i.dqi_blocks++; + } + mark_info_dirty(info); + ret = blk; +out_buf: + freedqbuf(buf); + return ret; +} + +/* Insert empty block to the list */ +static int put_free_dqblk(struct file *filp, struct mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; + int err; + + dh->dqdh_next_free = cpu_to_le32(info->u.v2_i.dqi_free_blk); + dh->dqdh_prev_free = cpu_to_le32(0); + dh->dqdh_entries = cpu_to_le16(0); + info->u.v2_i.dqi_free_blk = blk; + mark_info_dirty(info); + if ((err = write_blk(filp, blk, buf)) < 0) /* Some strange block. We had better leave it... */ + return err; + return 0; +} + +/* Remove given block from the list of blocks with free entries */ +static int remove_free_dqentry(struct file *filp, struct mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(); + struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; + uint nextblk = le32_to_cpu(dh->dqdh_next_free), prevblk = le32_to_cpu(dh->dqdh_prev_free); + int err; + + if (!tmpbuf) + return -ENOMEM; + if (nextblk) { + if ((err = read_blk(filp, nextblk, tmpbuf)) < 0) + goto out_buf; + ((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = dh->dqdh_prev_free; + if ((err = write_blk(filp, nextblk, tmpbuf)) < 0) + goto out_buf; + } + if (prevblk) { + if ((err = read_blk(filp, prevblk, tmpbuf)) < 0) + goto out_buf; + ((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_next_free = dh->dqdh_next_free; + if ((err = write_blk(filp, prevblk, tmpbuf)) < 0) + goto out_buf; + } + else { + info->u.v2_i.dqi_free_entry = nextblk; + mark_info_dirty(info); + } + freedqbuf(tmpbuf); + dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); + if (write_blk(filp, blk, buf) < 0) /* No matter whether write succeeds block is out of list */ + printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Insert given block to the beginning of list with free entries */ +static int insert_free_dqentry(struct file *filp, struct mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(); + struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; + int err; + + if (!tmpbuf) + return -ENOMEM; + dh->dqdh_next_free = cpu_to_le32(info->u.v2_i.dqi_free_entry); + dh->dqdh_prev_free = cpu_to_le32(0); + if ((err = write_blk(filp, blk, buf)) < 0) + goto out_buf; + if (info->u.v2_i.dqi_free_entry) { + if ((err = read_blk(filp, info->u.v2_i.dqi_free_entry, tmpbuf)) < 0) + goto out_buf; + ((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = cpu_to_le32(blk); + if ((err = write_blk(filp, info->u.v2_i.dqi_free_entry, tmpbuf)) < 0) + goto out_buf; + } + freedqbuf(tmpbuf); + info->u.v2_i.dqi_free_entry = blk; + mark_info_dirty(info); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Find space for dquot */ +static uint find_free_dqentry(struct dquot *dquot, int *err) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + struct mem_dqinfo *info = sb_dqopt(dquot->dq_sb)->info+dquot->dq_type; + uint blk, i; + struct v2_disk_dqdbheader *dh; + struct v2_disk_dqblk *ddquot; + struct v2_disk_dqblk fakedquot; + dqbuf_t buf; + + *err = 0; + if (!(buf = getdqbuf())) { + *err = -ENOMEM; + return 0; + } + dh = (struct v2_disk_dqdbheader *)buf; + ddquot = GETENTRIES(buf); + if (info->u.v2_i.dqi_free_entry) { + blk = info->u.v2_i.dqi_free_entry; + if ((*err = read_blk(filp, blk, buf)) < 0) + goto out_buf; + } + else { + blk = get_free_dqblk(filp, info); + if ((int)blk < 0) { + *err = blk; + return 0; + } + memset(buf, 0, V2_DQBLKSIZE); + info->u.v2_i.dqi_free_entry = blk; /* This is enough as block is already zeroed and entry list is empty... */ + mark_info_dirty(info); + } + if (le16_to_cpu(dh->dqdh_entries)+1 >= V2_DQSTRINBLK) /* Block will be full? */ + if ((*err = remove_free_dqentry(filp, info, buf, blk)) < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't remove block (%u) from entry free list.\n", blk); + goto out_buf; + } + dh->dqdh_entries = cpu_to_le16(le16_to_cpu(dh->dqdh_entries)+1); + memset(&fakedquot, 0, sizeof(struct v2_disk_dqblk)); + /* Find free structure in block */ + for (i = 0; i < V2_DQSTRINBLK && memcmp(&fakedquot, ddquot+i, sizeof(struct v2_disk_dqblk)); i++); +#ifdef __QUOTA_V2_PARANOIA + if (i == V2_DQSTRINBLK) { + printk(KERN_ERR "VFS: find_free_dqentry(): Data block full but it shouldn't.\n"); + *err = -EIO; + goto out_buf; + } +#endif + if ((*err = write_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota data block %u.\n", blk); + goto out_buf; + } + dquot->dq_off = (blk<dq_sb)->files[dquot->dq_type]; + struct mem_dqinfo *info = sb_dqopt(dquot->dq_sb)->info + dquot->dq_type; + dqbuf_t buf; + int ret = 0, newson = 0, newact = 0; + u32 *ref; + uint newblk; + + if (!(buf = getdqbuf())) + return -ENOMEM; + if (!*treeblk) { + ret = get_free_dqblk(filp, info); + if (ret < 0) + goto out_buf; + *treeblk = ret; + memset(buf, 0, V2_DQBLKSIZE); + newact = 1; + } + else { + if ((ret = read_blk(filp, *treeblk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read tree quota block %u.\n", *treeblk); + goto out_buf; + } + } + ref = (u32 *)buf; + newblk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]); + if (!newblk) + newson = 1; + if (depth == V2_DQTREEDEPTH-1) { +#ifdef __QUOTA_V2_PARANOIA + if (newblk) { + printk(KERN_ERR "VFS: Inserting already present quota entry (block %u).\n", ref[GETIDINDEX(dquot->dq_id, depth)]); + ret = -EIO; + goto out_buf; + } +#endif + newblk = find_free_dqentry(dquot, &ret); + } + else + ret = do_insert_tree(dquot, &newblk, depth+1); + if (newson && ret >= 0) { + ref[GETIDINDEX(dquot->dq_id, depth)] = cpu_to_le32(newblk); + ret = write_blk(filp, *treeblk, buf); + } + else if (newact && ret < 0) + put_free_dqblk(filp, info, buf, *treeblk); +out_buf: + freedqbuf(buf); + return ret; +} + +/* Wrapper for inserting quota structure into tree */ +static inline int dq_insert_tree(struct dquot *dquot) +{ + int tmp = V2_DQTREEOFF; + return do_insert_tree(dquot, &tmp, 0); +} + +/* + * We don't have to be afraid of deadlocks as we never have quotas on quota files... + */ +static int v2_write_dquot(struct dquot *dquot) +{ + int type = dquot->dq_type; + struct file *filp; + mm_segment_t fs; + loff_t offset; + ssize_t ret; + struct v2_disk_dqblk ddquot; + + if (!dquot->dq_off) + if ((ret = dq_insert_tree(dquot)) < 0) { + printk(KERN_ERR "VFS: Error %d occured while creating quota.\n", ret); + return ret; + } + filp = sb_dqopt(dquot->dq_sb)->files[type]; + offset = dquot->dq_off; + mem2diskdqb(&ddquot, &dquot->dq_dqb, dquot->dq_id); + fs = get_fs(); + set_fs(KERNEL_DS); + ret = filp->f_op->write(filp, (char *)&ddquot, sizeof(struct v2_disk_dqblk), &offset); + set_fs(fs); + if (ret != sizeof(struct v2_disk_dqblk)) { + printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", kdevname(dquot->dq_dev)); + if (ret >= 0) + ret = -ENOSPC; + } + else + ret = 0; + dqstats.writes++; + return ret; +} + +/* Free dquot entry in data block */ +static int free_dqentry(struct dquot *dquot, uint blk) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + struct mem_dqinfo *info = sb_dqopt(dquot->dq_sb)->info + dquot->dq_type; + struct v2_disk_dqdbheader *dh; + dqbuf_t buf = getdqbuf(); + int ret = 0; + + if (!buf) + return -ENOMEM; + if (dquot->dq_off >> V2_DQBLKSIZE_BITS != blk) { + printk(KERN_ERR "VFS: Quota structure has offset to other block (%u) than it should (%u).\n", blk, (uint)(dquot->dq_off >> V2_DQBLKSIZE_BITS)); + goto out_buf; + } + if ((ret = read_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); + goto out_buf; + } + dh = (struct v2_disk_dqdbheader *)buf; + dh->dqdh_entries = cpu_to_le16(le16_to_cpu(dh->dqdh_entries)-1); + if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ + if ((ret = remove_free_dqentry(filp, info, buf, blk)) < 0 || + (ret = put_free_dqblk(filp, info, buf, blk)) < 0) { + printk(KERN_ERR "VFS: Can't move quota data block (%u) to free list.\n", blk); + goto out_buf; + } + } + else { + memset(buf+(dquot->dq_off & ((1 << V2_DQBLKSIZE_BITS)-1)), 0, sizeof(struct v2_disk_dqblk)); + if (le16_to_cpu(dh->dqdh_entries) == V2_DQSTRINBLK-1) { + /* Insert will write block itself */ + if ((ret = insert_free_dqentry(filp, info, buf, blk)) < 0) { + printk(KERN_ERR "VFS: Can't insert quota data block (%u) to free entry list.\n", blk); + goto out_buf; + } + } + else + if ((ret = write_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't write quota data block %u\n", blk); + goto out_buf; + } + } + dquot->dq_off = 0; /* Quota is now unattached */ +out_buf: + freedqbuf(buf); + return ret; +} + +/* Remove reference to dquot from tree */ +static int remove_tree(struct dquot *dquot, uint *blk, int depth) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + struct mem_dqinfo *info = sb_dqopt(dquot->dq_sb)->info + dquot->dq_type; + dqbuf_t buf = getdqbuf(); + int ret = 0; + uint newblk; + u32 *ref = (u32 *)buf; + + if (!buf) + return -ENOMEM; + if ((ret = read_blk(filp, *blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); + goto out_buf; + } + newblk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]); + if (depth == V2_DQTREEDEPTH-1) { + ret = free_dqentry(dquot, newblk); + newblk = 0; + } + else + ret = remove_tree(dquot, &newblk, depth+1); + if (ret >= 0 && !newblk) { + int i; + ref[GETIDINDEX(dquot->dq_id, depth)] = cpu_to_le32(0); + for (i = 0; i < V2_DQBLKSIZE && !buf[i]; i++); /* Block got empty? */ + if (i == V2_DQBLKSIZE) { + put_free_dqblk(filp, info, buf, *blk); + *blk = 0; + } + else + if ((ret = write_blk(filp, *blk, buf)) < 0) + printk(KERN_ERR "VFS: Can't write quota tree block %u.\n", *blk); + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Delete dquot from tree */ +static int v2_delete_dquot(struct dquot *dquot) +{ + uint tmp = V2_DQTREEOFF; + + if (!dquot->dq_off) /* Even not allocated? */ + return 0; + return remove_tree(dquot, &tmp, 0); +} + +/* Find entry in block */ +static loff_t find_block_dqentry(struct dquot *dquot, uint blk) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + dqbuf_t buf = getdqbuf(); + loff_t ret = 0; + int i; + struct v2_disk_dqblk *ddquot = GETENTRIES(buf); + + if (!buf) + return -ENOMEM; + if ((ret = read_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + if (dquot->dq_id) + for (i = 0; i < V2_DQSTRINBLK && le32_to_cpu(ddquot[i].dqb_id) != dquot->dq_id; i++); + else { /* ID 0 as a bit more complicated searching... */ + struct v2_disk_dqblk fakedquot; + + memset(&fakedquot, 0, sizeof(struct v2_disk_dqblk)); + for (i = 0; i < V2_DQSTRINBLK; i++) + if (!le32_to_cpu(ddquot[i].dqb_id) && memcmp(&fakedquot, ddquot+i, sizeof(struct v2_disk_dqblk))) + break; + } + if (i == V2_DQSTRINBLK) { + printk(KERN_ERR "VFS: Quota for id %u referenced but not present.\n", dquot->dq_id); + ret = -EIO; + goto out_buf; + } + else + ret = (blk << V2_DQBLKSIZE_BITS) + sizeof(struct v2_disk_dqdbheader) + i * sizeof(struct v2_disk_dqblk); +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree */ +static loff_t find_tree_dqentry(struct dquot *dquot, uint blk, int depth) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + dqbuf_t buf = getdqbuf(); + loff_t ret = 0; + u32 *ref = (u32 *)buf; + + if (!buf) + return -ENOMEM; + if ((ret = read_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + ret = 0; + blk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]); + if (!blk) /* No reference? */ + goto out_buf; + if (depth < V2_DQTREEDEPTH-1) + ret = find_tree_dqentry(dquot, blk, depth+1); + else + ret = find_block_dqentry(dquot, blk); +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree - wrapper function */ +static inline loff_t find_dqentry(struct dquot *dquot) +{ + return find_tree_dqentry(dquot, V2_DQTREEOFF, 0); +} + +static int v2_read_dquot(struct dquot *dquot) +{ + int type = dquot->dq_type; + struct file *filp; + mm_segment_t fs; + loff_t offset; + struct v2_disk_dqblk ddquot; + int ret = 0; + + filp = sb_dqopt(dquot->dq_sb)->files[type]; + +#ifdef __QUOTA_V2_PARANOIA + if (!filp || !dquot->dq_sb) { /* Invalidated quota? */ + printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); + return -EIO; + } +#endif + offset = find_dqentry(dquot); + if (offset <= 0) { /* Entry not present? */ + if (offset < 0) + printk(KERN_ERR "VFS: Can't read quota structure for id %u.\n", dquot->dq_id); + dquot->dq_off = 0; + dquot->dq_flags |= DQ_FAKE; + memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); + ret = offset; + } + else { + dquot->dq_off = offset; + fs = get_fs(); + set_fs(KERNEL_DS); + if ((ret = filp->f_op->read(filp, (char *)&ddquot, sizeof(struct v2_disk_dqblk), &offset)) != sizeof(struct v2_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + printk(KERN_ERR "VFS: Error while reading quota structure for id %u.\n", dquot->dq_id); + memset(&ddquot, 0, sizeof(struct v2_disk_dqblk)); + } + else + ret = 0; + set_fs(fs); + disk2memdqb(&dquot->dq_dqb, &ddquot); + } + dqstats.reads++; + return ret; +} + +/* Commit changes of dquot to disk - it might also mean deleting it when quota became fake one and user has no blocks... */ +static int v2_commit_dquot(struct dquot *dquot) +{ + /* We clear the flag everytime so we don't loop when there was an IO error... */ + dquot->dq_flags &= ~DQ_MOD; + if (dquot->dq_flags & DQ_FAKE && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) + return v2_delete_dquot(dquot); + else + return v2_write_dquot(dquot); +} + +static struct quota_format_ops v2_format_ops = { + check_quota_file: v2_check_quota_file, + read_file_info: v2_read_file_info, + write_file_info: v2_write_file_info, + free_file_info: NULL, + read_dqblk: v2_read_dquot, + commit_dqblk: v2_commit_dquot, +}; + +static struct quota_format_type v2_quota_format = { + qf_fmt_id: QFMT_VFS_V0, + qf_ops: &v2_format_ops, + qf_owner: THIS_MODULE +}; + +static int __init init_v2_quota_format(void) +{ + return register_quota_format(&v2_quota_format); +} + +static void __exit exit_v2_quota_format(void) +{ + unregister_quota_format(&v2_quota_format); +} + +EXPORT_NO_SYMBOLS; + +module_init(init_v2_quota_format); +module_exit(exit_v2_quota_format); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/reiserfs/buffer2.c linux.21rc5-ac2/fs/reiserfs/buffer2.c --- linux.21rc5/fs/reiserfs/buffer2.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/reiserfs/buffer2.c 2003-04-22 16:44:38.000000000 +0100 @@ -51,11 +51,11 @@ struct buffer_head * reiserfs_bread (struct super_block *super, int n_block, int n_size) { struct buffer_head *result; - PROC_EXP( unsigned int ctx_switches = kstat.context_swtch ); + PROC_EXP( unsigned int ctx_switches = nr_context_switches(); ); result = bread (super -> s_dev, n_block, n_size); PROC_INFO_INC( super, breads ); - PROC_EXP( if( kstat.context_swtch != ctx_switches ) + PROC_EXP( if( nr_context_switches() != ctx_switches ) PROC_INFO_INC( super, bread_miss ) ); return result; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/reiserfs/inode.c linux.21rc5-ac2/fs/reiserfs/inode.c --- linux.21rc5/fs/reiserfs/inode.c 2003-05-28 15:09:03.000000000 +0100 +++ linux.21rc5-ac2/fs/reiserfs/inode.c 2003-05-27 14:20:35.000000000 +0100 @@ -2181,10 +2181,11 @@ } } -static int reiserfs_direct_io(int rw, struct inode *inode, +static int reiserfs_direct_io(int rw, struct file *filp, struct kiobuf *iobuf, unsigned long blocknr, int blocksize) { + struct inode * inode = filp->f_dentry->d_inode->i_mapping->host; lock_kernel(); reiserfs_commit_for_tail(inode); unlock_kernel(); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/reiserfs/prints.c linux.21rc5-ac2/fs/reiserfs/prints.c --- linux.21rc5/fs/reiserfs/prints.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/fs/reiserfs/prints.c 2003-05-20 20:20:26.000000000 +0100 @@ -159,7 +159,7 @@ *skip = 0; - while ((k = strstr (k, "%")) != NULL) + while ((k = strchr (k, '%')) != NULL) { if (k[1] == 'k' || k[1] == 'K' || k[1] == 'h' || k[1] == 't' || k[1] == 'z' || k[1] == 'b' || k[1] == 'y' || k[1] == 'a' ) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/seq_file.c linux.21rc5-ac2/fs/seq_file.c --- linux.21rc5/fs/seq_file.c 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/fs/seq_file.c 2003-04-22 18:27:29.000000000 +0100 @@ -1,7 +1,7 @@ /* * linux/fs/seq_file.c * - * helper functions for making syntetic files from sequences of records. + * helper functions for making synthetic files from sequences of records. * initial implementation -- AV, Oct 2001. */ @@ -10,6 +10,7 @@ #include #include +#include /** * seq_open - initialize sequential file @@ -214,7 +215,7 @@ while ((retval=traverse(m, offset)) == -EAGAIN) ; if (retval) { - /* with extreme perjudice... */ + /* with extreme prejudice... */ file->f_pos = 0; m->index = 0; m->count = 0; @@ -249,7 +250,7 @@ * @s: string * @esc: set of characters that need escaping * - * Puts string into buffer, replacing each occurence of character from + * Puts string into buffer, replacing each occurrence of character from * @esc with usual octal escape. Returns 0 in case of success, -1 - in * case of overflow. */ @@ -295,3 +296,45 @@ m->count = m->size; return -1; } + +static void *single_start(struct seq_file *p, loff_t *pos) +{ + return NULL + (*pos == 0); +} + +static void *single_next(struct seq_file *p, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void single_stop(struct seq_file *p, void *v) +{ +} + +int single_open(struct file *file, int (*show)(struct seq_file *, void*), void *data) +{ + struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL); + int res = -ENOMEM; + + if (op) { + op->start = single_start; + op->next = single_next; + op->stop = single_stop; + op->show = show; + res = seq_open(file, op); + if (!res) + ((struct seq_file *)file->private_data)->private = data; + else + kfree(op); + } + return res; +} + +int single_release(struct inode *inode, struct file *file) +{ + struct seq_operations *op = ((struct seq_file *)file->private_data)->op; + int res = seq_release(inode, file); + kfree(op); + return res; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/super.c linux.21rc5-ac2/fs/super.c --- linux.21rc5/fs/super.c 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/fs/super.c 2003-04-22 16:44:38.000000000 +0100 @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -280,6 +281,8 @@ sema_init(&s->s_dquot.dqio_sem, 1); sema_init(&s->s_dquot.dqoff_sem, 1); s->s_maxbytes = MAX_NON_LFS; + s->dq_op = sb_dquot_ops; + s->s_qcop = sb_quotactl_ops; s->s_op = &empty_sops; } return s; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/Makefile linux.21rc5-ac2/fs/xfs/linux/Makefile --- linux.21rc5/fs/xfs/linux/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/Makefile 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,63 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# +# Makefile for XFS on Linux. + +EXTRA_CFLAGS += -I.. -funsigned-char + +ifeq ($(CONFIG_XFS_DEBUG),y) + EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG -DXFSDEBUG +endif + +O_TARGET := linux_xfs.o +ifneq ($(MAKECMDGOALS),modules_install) + obj-m := $(O_TARGET) +endif + +export-objs := xfs_globals.o + +obj-$(CONFIG_PROC_FS) += xfs_stats.o +obj-$(CONFIG_SYSCTL) += xfs_sysctl.o +obj-y += xfs_aops.o \ + xfs_behavior.o \ + xfs_file.o \ + xfs_fs_subr.o \ + xfs_globals.o \ + xfs_ioctl.o \ + xfs_iomap.o \ + xfs_iops.o \ + xfs_lrw.o \ + xfs_syncd.o \ + xfs_super.o \ + xfs_vfs.o \ + xfs_vnode.o + +include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_aops.c linux.21rc5-ac2/fs/xfs/linux/xfs_aops.c --- linux.21rc5/fs/xfs/linux/xfs_aops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_aops.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,1214 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_trans.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_error.h" +#include "xfs_rw.h" +#include + +STATIC void convert_page(struct inode *, struct page *, + page_buf_bmap_t *, void *, int, int); + +void +linvfs_unwritten_done( + struct buffer_head *bh, + int uptodate) +{ + page_buf_t *pb = (page_buf_t *)bh->b_private; + + ASSERT(buffer_unwritten(bh)); + bh->b_end_io = NULL; + clear_bit(BH_Unwritten, &bh->b_state); + if (!uptodate) + pagebuf_ioerror(pb, -EIO); + if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { + pagebuf_iodone(pb, 1, 1); + } + end_buffer_io_async(bh, uptodate); +} + +/* + * Issue transactions to convert a buffer range from unwritten + * to written extents. + */ +STATIC void +linvfs_unwritten_conv( + xfs_buf_t *bp) +{ + vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *); + int error; + + if (atomic_read(&bp->pb_hold) < 1) + BUG(); + + VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp), + BMAP_UNWRITTEN, NULL, NULL, error); + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + XFS_BUF_UNDATAIO(bp); + pagebuf_iodone(bp, 0, 0); +} + +STATIC int +map_blocks( + struct inode *inode, + loff_t offset, + ssize_t count, + page_buf_bmap_t *pbmapp, + int flags) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error, nmaps = 1; + + if (((flags & (BMAP_DIRECT|BMAP_SYNC)) == BMAP_DIRECT) && + (offset >= inode->i_size)) + count = max_t(ssize_t, count, XFS_WRITE_IO_LOG); +retry: + VOP_BMAP(vp, offset, count, flags, pbmapp, &nmaps, error); + if ((error == EAGAIN) || (error == EIO)) + return -error; + if (unlikely((flags & (BMAP_WRITE|BMAP_DIRECT)) == + (BMAP_WRITE|BMAP_DIRECT) && nmaps && + (pbmapp->pbm_flags & PBMF_DELAY))) { + flags = BMAP_ALLOCATE; + goto retry; + } + if (flags & (BMAP_WRITE|BMAP_ALLOCATE)) { + VMODIFY(vp); + } + return -error; +} + +/* + * match_offset_to_mapping + * Finds the corresponding mapping in block @map array of the + * given @offset within a @page. + */ +STATIC page_buf_bmap_t * +match_offset_to_mapping( + struct page *page, + page_buf_bmap_t *map, + unsigned long offset) +{ + loff_t full_offset; /* offset from start of file */ + + ASSERT(offset < PAGE_CACHE_SIZE); + + full_offset = page->index; /* NB: using 64bit number */ + full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */ + full_offset += offset; /* offset from page start */ + + if (full_offset < map->pbm_offset) + return NULL; + if (map->pbm_offset + map->pbm_bsize > full_offset) + return map; + return NULL; +} + +STATIC void +map_buffer_at_offset( + struct page *page, + struct buffer_head *bh, + unsigned long offset, + int block_bits, + page_buf_bmap_t *mp) +{ + page_buf_daddr_t bn; + loff_t delta; + int sector_shift; + + ASSERT(!(mp->pbm_flags & PBMF_HOLE)); + ASSERT(!(mp->pbm_flags & PBMF_DELAY)); + ASSERT(mp->pbm_bn != PAGE_BUF_DADDR_NULL); + + delta = page->index; + delta <<= PAGE_CACHE_SHIFT; + delta += offset; + delta -= mp->pbm_offset; + delta >>= block_bits; + + sector_shift = block_bits - BBSHIFT; + bn = mp->pbm_bn >> sector_shift; + bn += delta; + ASSERT((bn << sector_shift) >= mp->pbm_bn); + + lock_buffer(bh); + bh->b_blocknr = bn; + bh->b_dev = mp->pbm_target->pbr_kdev; + set_bit(BH_Mapped, &bh->b_state); + clear_bit(BH_Delay, &bh->b_state); +} + +/* + * Look for a page at index which is unlocked and contains our + * unwritten extent flagged buffers at its head. Returns page + * locked and with an extra reference count, and length of the + * unwritten extent component on this page that we can write, + * in units of filesystem blocks. + */ +STATIC struct page * +probe_unwritten_page( + struct address_space *mapping, + unsigned long index, + page_buf_bmap_t *mp, + page_buf_t *pb, + unsigned long max_offset, + unsigned long *fsbs) +{ + struct page *page; + + page = find_trylock_page(mapping, index); + if (!page) + return 0; + + if (page->mapping && page_has_buffers(page)) { + struct buffer_head *bh, *head; + unsigned long p_offset = 0; + + *fsbs = 0; + bh = head = page_buffers(page); + do { + if (!buffer_unwritten(bh)) + break; + if (!match_offset_to_mapping(page, mp, p_offset)) + break; + if (p_offset >= max_offset) + break; + set_buffer_unwritten_io(bh); + bh->b_private = pb; + p_offset += bh->b_size; + (*fsbs)++; + } while ((bh = bh->b_this_page) != head); + + if (p_offset) + return page; + } + + unlock_page(page); + return NULL; +} + +/* + * Look for a page at index which is unlocked and not mapped + * yet - clustering for mmap write case. + */ +STATIC unsigned int +probe_unmapped_page( + struct address_space *mapping, + unsigned long index, + unsigned int pg_offset) +{ + struct page *page; + int ret = 0; + + page = find_trylock_page(mapping, index); + if (!page) + return 0; + + if (page->mapping && PageDirty(page)) { + if (page_has_buffers(page)) { + struct buffer_head *bh, *head; + + bh = head = page_buffers(page); + do { + if (buffer_mapped(bh) || !buffer_uptodate(bh)) + break; + ret += bh->b_size; + if (ret >= pg_offset) + break; + } while ((bh = bh->b_this_page) != head); + } else + ret = PAGE_CACHE_SIZE; + } + + unlock_page(page); + return ret; +} + +STATIC unsigned int +probe_unmapped_cluster( + struct inode *inode, + struct page *startpage, + struct buffer_head *bh, + struct buffer_head *head) +{ + unsigned long tindex, tlast; + unsigned int len, total = 0; + struct address_space *mapping = inode->i_mapping; + + /* First sum forwards in this page */ + do { + if (buffer_mapped(bh)) + break; + total += bh->b_size; + } while ((bh = bh->b_this_page) != head); + + /* If we reached the end of the page, sum forwards in + * following pages. + */ + if (bh == head) { + tlast = inode->i_size >> PAGE_CACHE_SHIFT; + /* Prune this back to avoid pathological behavior */ + tlast = min(tlast, startpage->index + 64); + for (tindex = startpage->index + 1; tindex < tlast; tindex++) { + len = probe_unmapped_page(mapping, tindex, + PAGE_CACHE_SIZE); + if (!len) + break; + total += len; + } + if ((tindex == tlast) && (inode->i_size & ~PAGE_CACHE_MASK)) { + len = probe_unmapped_page(mapping, tindex, + inode->i_size & ~PAGE_CACHE_MASK); + total += len; + } + } + return total; +} + +/* + * Probe for a given page (index) in the inode and test if it is delayed + * and without unwritten buffers. Returns page locked and with an extra + * reference count. + */ +STATIC struct page * +probe_delalloc_page( + struct inode *inode, + unsigned long index) +{ + struct page *page; + + page = find_trylock_page(inode->i_mapping, index); + if (!page) + return NULL; + + if (page->mapping && page_has_buffers(page)) { + struct buffer_head *bh, *head; + int acceptable = 0; + + bh = head = page_buffers(page); + do { + if (buffer_unwritten(bh)) { + acceptable = 0; + break; + } else if (buffer_delay(bh)) { + acceptable = 1; + } + } while ((bh = bh->b_this_page) != head); + + if (acceptable) + return page; + } + + unlock_page(page); + return NULL; +} + +STATIC int +map_unwritten( + struct inode *inode, + struct page *start_page, + struct buffer_head *head, + struct buffer_head *curr, + unsigned long p_offset, + int block_bits, + page_buf_bmap_t *mp, + int all_bh) +{ + struct buffer_head *bh = curr; + page_buf_bmap_t *tmp; + page_buf_t *pb; + loff_t offset, size; + unsigned long nblocks = 0; + + offset = start_page->index; + offset <<= PAGE_CACHE_SHIFT; + offset += p_offset; + + pb = pagebuf_lookup(mp->pbm_target, + mp->pbm_offset, mp->pbm_bsize, 0); + if (!pb) + return -ENOMEM; + + /* Set the count to 1 initially, this will stop an I/O + * completion callout which happens before we have started + * all the I/O from calling pagebuf_iodone too early. + */ + atomic_set(&pb->pb_io_remaining, 1); + + /* First map forwards in the page consecutive buffers + * covering this unwritten extent + */ + do { + if (!buffer_unwritten(bh)) + break; + tmp = match_offset_to_mapping(start_page, mp, p_offset); + if (!tmp) + break; + map_buffer_at_offset(start_page, bh, p_offset, block_bits, mp); + set_buffer_unwritten_io(bh); + bh->b_private = pb; + p_offset += bh->b_size; + nblocks++; + } while ((bh = bh->b_this_page) != head); + + if (unlikely(nblocks == 0)) { + printk("XFS: bad unwritten extent map: bh=0x%p, mp=0x%p\n", + curr, mp); + BUG(); + } + + atomic_add(nblocks, &pb->pb_io_remaining); + + /* If we reached the end of the page, map forwards in any + * following pages which are also covered by this extent. + */ + if (bh == head) { + struct address_space *mapping = inode->i_mapping; + unsigned long tindex, tlast, bs; + struct page *page; + + tlast = inode->i_size >> PAGE_CACHE_SHIFT; + tlast = min(tlast, start_page->index + pb->pb_page_count - 1); + for (tindex = start_page->index + 1; tindex < tlast; tindex++) { + page = probe_unwritten_page(mapping, tindex, mp, pb, + PAGE_CACHE_SIZE, &bs); + if (!page) + break; + nblocks += bs; + atomic_add(bs, &pb->pb_io_remaining); + convert_page(inode, page, mp, pb, 1, all_bh); + } + + if ((tindex == tlast) && (inode->i_size & ~PAGE_CACHE_MASK)) { + page = probe_unwritten_page(mapping, tindex, mp, pb, + inode->i_size & ~PAGE_CACHE_MASK, &bs); + if (page) { + nblocks += bs; + atomic_add(bs, &pb->pb_io_remaining); + convert_page(inode, page, + mp, pb, 1, all_bh); + } + } + } + + size = nblocks; /* NB: using 64bit number here */ + size <<= block_bits; /* convert fsb's to byte range */ + + XFS_BUF_DATAIO(pb); + XFS_BUF_ASYNC(pb); + XFS_BUF_SET_SIZE(pb, size); + XFS_BUF_SET_OFFSET(pb, offset); + XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode)); + XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_conv); + + if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { + pagebuf_iodone(pb, 1, 1); + } + + return 0; +} + +STATIC void +submit_page( + struct page *page, + struct buffer_head *bh_arr[], + int cnt) +{ + struct buffer_head *bh; + int i; + + if (cnt) { + for (i = 0; i < cnt; i++) { + bh = bh_arr[i]; + set_buffer_async_io(bh); + if (buffer_unwritten(bh)) + set_buffer_unwritten_io(bh); + set_bit(BH_Uptodate, &bh->b_state); + clear_bit(BH_Dirty, &bh->b_state); + } + + for (i = 0; i < cnt; i++) + submit_bh(WRITE, bh_arr[i]); + } else + unlock_page(page); +} + +/* + * Allocate & map buffers for page given the extent map. Write it out. + * except for the original page of a writepage, this is called on + * delalloc/unwritten pages only, for the original page it is possible + * that the page has no mapping at all. + */ +STATIC void +convert_page( + struct inode *inode, + struct page *page, + page_buf_bmap_t *maps, + void *private, + int startio, + int all_bh) +{ + struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; + page_buf_bmap_t *mp = maps, *tmp; + unsigned long end, offset, end_index; + int i = 0, index = 0; + int bbits = inode->i_blkbits; + + end_index = inode->i_size >> PAGE_CACHE_SHIFT; + if (page->index < end_index) { + end = PAGE_CACHE_SIZE; + } else { + end = inode->i_size & (PAGE_CACHE_SIZE-1); + } + bh = head = page_buffers(page); + do { + offset = i << bbits; + if (!(Page_Uptodate(page) || buffer_uptodate(bh))) + continue; + if (buffer_mapped(bh) && !buffer_delay(bh) && all_bh) { + if (startio && (offset < end)) { + lock_buffer(bh); + bh_arr[index++] = bh; + } + continue; + } + tmp = match_offset_to_mapping(page, mp, offset); + if (!tmp) + continue; + ASSERT(!(tmp->pbm_flags & PBMF_HOLE)); + ASSERT(!(tmp->pbm_flags & PBMF_DELAY)); + + /* If this is a new unwritten extent buffer (i.e. one + * that we haven't passed in private data for, we must + * now map this buffer too. + */ + if (buffer_unwritten(bh) && !bh->b_end_io) { + ASSERT(tmp->pbm_flags & PBMF_UNWRITTEN); + map_unwritten(inode, page, head, bh, + offset, bbits, tmp, all_bh); + } else { + map_buffer_at_offset(page, bh, offset, bbits, tmp); + if (buffer_unwritten(bh)) { + set_buffer_unwritten_io(bh); + bh->b_private = private; + ASSERT(private); + } + } + if (startio && (offset < end)) { + bh_arr[index++] = bh; + } else { + unlock_buffer(bh); + mark_buffer_dirty(bh); + } + } while (i++, (bh = bh->b_this_page) != head); + + submit_page(page, bh_arr, index); +} + +/* + * Convert & write out a cluster of pages in the same extent as defined + * by mp and following the start page. + */ +STATIC void +cluster_write( + struct inode *inode, + unsigned long tindex, + page_buf_bmap_t *mp, + int startio, + int all_bh) +{ + unsigned long tlast; + struct page *page; + + tlast = (mp->pbm_offset + mp->pbm_bsize) >> PAGE_CACHE_SHIFT; + for (; tindex < tlast; tindex++) { + page = probe_delalloc_page(inode, tindex); + if (!page) + break; + convert_page(inode, page, mp, NULL, startio, all_bh); + } +} + +/* + * Calling this without startio set means we are being asked to make a dirty + * page ready for freeing it's buffers. When called with startio set then + * we are coming from writepage. + * + * When called with startio set it is important that we write the WHOLE + * page if possible. + * The bh->b_state's cannot know if any of the blocks or which block for + * that matter are dirty due to mmap writes, and therefore bh uptodate is + * only vaild if the page itself isn't completely uptodate. Some layers + * may clear the page dirty flag prior to calling write page, under the + * assumption the entire page will be written out; by not writing out the + * whole page the page can be reused before all valid dirty data is + * written out. Note: in the case of a page that has been dirty'd by + * mapwrite and but partially setup by block_prepare_write the + * bh->b_states's will not agree and only ones setup by BPW/BCW will have + * valid state, thus the whole page must be written out thing. + */ + +STATIC int +page_state_convert( + struct page *page, + int startio, + int unmapped) /* also implies page uptodate */ +{ + struct inode *inode = page->mapping->host; + struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; + page_buf_bmap_t *mp, map; + unsigned long p_offset = 0, end_index; + loff_t offset, end_offset; + int len, err, i, cnt = 0; + int flags = startio ? 0 : BMAP_TRYLOCK; + int page_dirty = 1; + + + /* Are we off the end of the file ? */ + end_index = inode->i_size >> PAGE_CACHE_SHIFT; + if (page->index >= end_index) { + unsigned remaining = inode->i_size & (PAGE_CACHE_SIZE-1); + if ((page->index >= end_index+1) || !remaining) { + return -EIO; + } + } + + offset = (loff_t)page->index << PAGE_CACHE_SHIFT; + end_offset = offset + PAGE_CACHE_SIZE; + if (end_offset > inode->i_size) + end_offset = inode->i_size; + + bh = head = page_buffers(page); + mp = NULL; + + len = bh->b_size; + do { + if (!(Page_Uptodate(page) || buffer_uptodate(bh)) && !startio) { + goto next_bh; + } + + if (mp) { + mp = match_offset_to_mapping(page, &map, p_offset); + } + + /* + * First case, map an unwritten extent and prepare for + * extent state conversion transaction on completion. + */ + if (buffer_unwritten(bh)) { + if (!mp) { + err = map_blocks(inode, offset, len, &map, + BMAP_READ|BMAP_IGNSTATE); + if (err) { + goto error; + } + mp = match_offset_to_mapping(page, &map, + p_offset); + } + if (mp) { + if (!bh->b_end_io) { + err = map_unwritten(inode, page, + head, bh, p_offset, + inode->i_blkbits, + mp, unmapped); + if (err) { + goto error; + } + } + if (startio) { + bh_arr[cnt++] = bh; + } else { + unlock_buffer(bh); + mark_buffer_dirty(bh); + } + page_dirty = 0; + } + /* + * Second case, allocate space for a delalloc buffer. + * We can return EAGAIN here in the release page case. + */ + } else if (buffer_delay(bh)) { + if (!mp) { + err = map_blocks(inode, offset, len, &map, + BMAP_ALLOCATE | flags); + if (err) { + goto error; + } + mp = match_offset_to_mapping(page, &map, + p_offset); + } + if (mp) { + map_buffer_at_offset(page, bh, p_offset, + inode->i_blkbits, mp); + if (startio) { + bh_arr[cnt++] = bh; + } else { + unlock_buffer(bh); + mark_buffer_dirty(bh); + } + page_dirty = 0; + } + } else if ((buffer_uptodate(bh) || Page_Uptodate(page)) && + (unmapped || startio)) { + + if (!buffer_mapped(bh)) { + int size; + + /* + * Getting here implies an unmapped buffer + * was found, and we are in a path where we + * need to write the whole page out. + */ + if (!mp) { + size = probe_unmapped_cluster( + inode, page, bh, head); + err = map_blocks(inode, offset, + size, &map, + BMAP_WRITE | BMAP_MMAP); + if (err) { + goto error; + } + mp = match_offset_to_mapping(page, &map, + p_offset); + } + if (mp) { + map_buffer_at_offset(page, + bh, p_offset, + inode->i_blkbits, mp); + if (startio) { + bh_arr[cnt++] = bh; + } else { + unlock_buffer(bh); + mark_buffer_dirty(bh); + } + page_dirty = 0; + } + } else if (startio) { + if (buffer_uptodate(bh) && + !test_and_set_bit(BH_Lock, &bh->b_state)) { + bh_arr[cnt++] = bh; + page_dirty = 0; + } + } + } + +next_bh: + offset += len; + p_offset += len; + bh = bh->b_this_page; + } while (offset < end_offset); + + if (startio) + submit_page(page, bh_arr, cnt); + + if (mp) + cluster_write(inode, page->index + 1, mp, startio, unmapped); + + return page_dirty; + +error: + for (i = 0; i < cnt; i++) { + unlock_buffer(bh_arr[i]); + } + + /* + * If it's delalloc and we have nowhere to put it, + * throw it away, unless the lower layers told + * us to try again. + */ + if (err != -EAGAIN) { + if (!unmapped) { + block_flushpage(page, 0); + } + ClearPageUptodate(page); + } + return err; +} + +STATIC int +linvfs_get_block_core( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create, + int direct, + bmapi_flags_t flags) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + page_buf_bmap_t pbmap; + int retpbbm = 1; + int error; + ssize_t size; + loff_t offset = (loff_t)iblock << inode->i_blkbits; + + /* If we are doing writes at the end of the file, + * allocate in chunks + */ + if (create && (offset >= inode->i_size) /* && !(flags & BMAP_SYNC) */) + size = 1 << XFS_WRITE_IO_LOG; + else + size = 1 << inode->i_blkbits; + + VOP_BMAP(vp, offset, size, + create ? flags : BMAP_READ, &pbmap, &retpbbm, error); + if (error) + return -error; + + if (retpbbm == 0) + return 0; + + if (pbmap.pbm_bn != PAGE_BUF_DADDR_NULL) { + page_buf_daddr_t bn; + loff_t delta; + + /* For unwritten extents do not report a disk address on + * the read case. + */ + if (create || ((pbmap.pbm_flags & PBMF_UNWRITTEN) == 0)) { + delta = offset - pbmap.pbm_offset; + delta >>= inode->i_blkbits; + + bn = pbmap.pbm_bn >> (inode->i_blkbits - BBSHIFT); + bn += delta; + + bh_result->b_blocknr = bn; + set_bit(BH_Mapped, &bh_result->b_state); + } + if (pbmap.pbm_flags & PBMF_UNWRITTEN) { + if (create) + set_bit(BH_Mapped, &bh_result->b_state); + set_bit(BH_Unwritten, &bh_result->b_state); + set_bit(BH_Delay, &bh_result->b_state); + } + } + + /* If this is a realtime file, data might be on a new device */ + bh_result->b_dev = pbmap.pbm_target->pbr_kdev; + + /* If we previously allocated a block out beyond eof and + * we are now coming back to use it then we will need to + * flag it as new even if it has a disk address. + */ + if (create && + ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || + (offset >= inode->i_size))) { + set_bit(BH_New, &bh_result->b_state); + } + + if (pbmap.pbm_flags & PBMF_DELAY) { + if (unlikely(direct)) + BUG(); + if (create) { + set_bit(BH_Mapped, &bh_result->b_state); + } + set_bit(BH_Delay, &bh_result->b_state); + } + + return 0; +} + +int +linvfs_get_block( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create) +{ + return linvfs_get_block_core(inode, iblock, bh_result, + create, 0, BMAP_WRITE); +} + +STATIC int +linvfs_get_block_sync( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create) +{ + return linvfs_get_block_core(inode, iblock, bh_result, + create, 0, BMAP_SYNC|BMAP_WRITE); +} + +STATIC int +linvfs_get_block_direct( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create) +{ + return linvfs_get_block_core(inode, iblock, bh_result, + create, 1, BMAP_WRITE|BMAP_DIRECT); +} + +STATIC int +linvfs_bmap( + struct address_space *mapping, + long block) +{ + struct inode *inode = (struct inode *)mapping->host; + vnode_t *vp = LINVFS_GET_VP(inode); + int error; + + /* block - Linux disk blocks 512b */ + /* bmap input offset - bytes 1b */ + /* bmap output bn - XFS BBs 512b */ + /* bmap output delta - bytes 1b */ + + vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address); + + VOP_RWLOCK(vp, VRWLOCK_READ); + VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); + VOP_RWUNLOCK(vp, VRWLOCK_READ); + return generic_block_bmap(mapping, block, linvfs_get_block_direct); +} + +STATIC int +linvfs_readpage( + struct file *unused, + struct page *page) +{ + return block_read_full_page(page, linvfs_get_block); +} + +STATIC void +count_page_state( + struct page *page, + int *delalloc, + int *unmapped, + int *unwritten) +{ + struct buffer_head *bh, *head; + + *delalloc = *unmapped = *unwritten = 0; + + bh = head = page_buffers(page); + do { + if (buffer_uptodate(bh) && !buffer_mapped(bh)) + (*unmapped) = 1; + else if (buffer_unwritten(bh)) + (*unwritten) = 1; + else if (buffer_delay(bh)) + (*delalloc) = 1; + } while ((bh = bh->b_this_page) != head); +} + + +/* + * writepage: Called from one of two places: + * + * 1. we are flushing a delalloc buffer head. + * + * 2. we are writing out a dirty page. Typically the page dirty + * state is cleared before we get here. In this case is it + * conceivable we have no buffer heads. + * + * For delalloc space on the page we need to allocate space and + * flush it. For unmapped buffer heads on the page we should + * allocate space if the page is uptodate. For any other dirty + * buffer heads on the page we should flush them. + * + * If we detect that a transaction would be required to flush + * the page, we have to check the process flags first, if we + * are already in a transaction or disk I/O during allocations + * is off, we need to fail the writepage and redirty the page. + * We also need to set PF_NOIO ourselves. + */ + +STATIC int +linvfs_writepage( + struct page *page) +{ + int error; + int need_trans; + int delalloc, unmapped, unwritten; + struct inode *inode = page->mapping->host; + + /* + * We need a transaction if: + * 1. There are delalloc buffers on the page + * 2. The page is upto date and we have unmapped buffers + * 3. The page is upto date and we have no buffers + * 4. There are unwritten buffers on the page + */ + + if (!page_has_buffers(page)) { + unmapped = 1; + need_trans = 1; + } else { + count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!Page_Uptodate(page)) + unmapped = 0; + need_trans = delalloc + unmapped + unwritten; + } + + /* + * If we need a transaction and the process flags say + * we are already in a transaction, or no IO is allowed + * then mark the page dirty again and leave the page + * as is. + */ + + if ((current->flags & (PF_FSTRANS|PF_NOIO)) && need_trans) + goto out_fail; + + /* + * Delay hooking up buffer heads until we have + * made our go/no-go decision. + */ + if (!page_has_buffers(page)) + create_empty_buffers(page, inode->i_dev, 1 << inode->i_blkbits); + + /* + * Convert delayed allocate, unwritten or unmapped space + * to real space and flush out to disk. + */ + if (need_trans) + current->flags |= PF_NOIO; + error = page_state_convert(page, 1, unmapped); + if (need_trans) + current->flags &= ~PF_NOIO; + if (error == -EAGAIN) + goto out_fail; + + if (unlikely(error < 0)) { + unlock_page(page); + return error; + } + + return 0; + +out_fail: + SetPageDirty(page); + unlock_page(page); + return 0; +} + +/* + * Called to move a page into cleanable state - and from there + * to be released. Possibly the page is already clean. We always + * have buffer heads in this call. + * + * Returns 0 if the page is ok to release, 1 otherwise. + * + * Possible scenarios are: + * + * 1. We are being called to release a page which has been written + * to via regular I/O. buffer heads will be dirty and possibly + * delalloc. If no delalloc buffer heads in this case then we + * can just return zero. + * + * 2. We are called to release a page which has been written via + * mmap, all we need to do is ensure there is no delalloc + * state in the buffer heads, if not we can let the caller + * free them and we should come back later via writepage. + */ +STATIC int +linvfs_release_page( + struct page *page, + int gfp_mask) +{ + int delalloc, unmapped, unwritten; + + count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!delalloc && !unwritten) + return 1; + + if (!(gfp_mask & __GFP_FS)) + return 0; + + /* + * Convert delalloc space to real space, do not flush the + * data out to disk, that will be done by the caller. + * Never need to allocate space here - we will always + * come back to writepage in that case. + */ + return (page_state_convert(page, 0, 0) == 0) ? 1 : 0; +} + +STATIC int +linvfs_prepare_write( + struct file *file, + struct page *page, + unsigned int from, + unsigned int to) +{ + if (file && (file->f_flags & O_SYNC)) { + return block_prepare_write(page, from, to, + linvfs_get_block_sync); + } else { + return block_prepare_write(page, from, to, + linvfs_get_block); + } +} + +/* + * Initiate I/O on a kiobuf of user memory + */ +STATIC int +linvfs_direct_IO( + int rw, + struct file *file, + struct kiobuf *iobuf, + unsigned long blocknr, + int blocksize) +{ + page_buf_t *pb; + page_buf_bmap_t map; + int error = 0; + int pb_flags, map_flags, pg_index = 0; + size_t length, total; + loff_t offset; + size_t map_size, size; + struct inode *inode = file->f_dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + struct page **maplist = iobuf->maplist; + size_t page_offset = iobuf->offset; + + total = length = iobuf->length; + offset = blocknr; + offset <<= inode->i_blkbits; + + map_flags = (rw ? BMAP_WRITE : BMAP_READ) | BMAP_DIRECT; + pb_flags = (rw ? PBF_WRITE : PBF_READ) | PBF_FORCEIO; + while (length) { + error = map_blocks(inode, offset, length, &map, map_flags); + if (error) + break; + BUG_ON(map.pbm_flags & PBMF_DELAY); + + map_size = map.pbm_bsize - map.pbm_delta; + size = min(map_size, length); + + if ((map.pbm_flags & PBMF_HOLE) || + ((map.pbm_flags & PBMF_UNWRITTEN) && rw == READ)) { + size_t zero_len = size; + + if (rw == WRITE) + break; + + /* Need to zero it all */ + while (zero_len) { + struct page *page; + size_t pg_len; + + pg_len = min((size_t) + (PAGE_CACHE_SIZE - page_offset), + zero_len); + + page = maplist[pg_index]; + + memset(kmap(page) + page_offset, 0, pg_len); + flush_dcache_page(page); + kunmap(page); + + zero_len -= pg_len; + if ((pg_len + page_offset) == PAGE_CACHE_SIZE) { + pg_index++; + page_offset = 0; + } else { + page_offset = (page_offset + pg_len) & + ~PAGE_CACHE_MASK; + } + } + } else { + int pg_count; + + pg_count = (size + page_offset + PAGE_CACHE_SIZE - 1) + >> PAGE_CACHE_SHIFT; + if ((pb = pagebuf_lookup(map.pbm_target, offset, + size, pb_flags)) == NULL) { + error = -ENOMEM; + break; + } + /* Need to hook up pagebuf to kiobuf pages */ + pb->pb_pages = &maplist[pg_index]; + pb->pb_offset = page_offset; + pb->pb_page_count = pg_count; + pb->pb_bn = map.pbm_bn + (map.pbm_delta >> BBSHIFT); + + XFS_BUF_DATAIO(pb); + if (map.pbm_flags & PBMF_UNWRITTEN) { + XFS_BUF_SET_FSPRIVATE(pb, vp); + XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_conv); + } + + error = pagebuf_iostart(pb, pb_flags); + pagebuf_rele(pb); + + if (error) { + if (error > 0) + error = -error; + break; + } + + page_offset = (page_offset + size) & ~PAGE_CACHE_MASK; + if (page_offset) + pg_count--; + pg_index += pg_count; + } + + offset += size; + length -= size; + } + + return (error ? error : (int)(total - length)); +} + +struct address_space_operations linvfs_aops = { + .readpage = linvfs_readpage, + .writepage = linvfs_writepage, + .sync_page = block_sync_page, + .releasepage = linvfs_release_page, + .prepare_write = linvfs_prepare_write, + .commit_write = generic_commit_write, + .bmap = linvfs_bmap, + .direct_IO = linvfs_direct_IO, +}; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_behavior.c linux.21rc5-ac2/fs/xfs/linux/xfs_behavior.c --- linux.21rc5/fs/xfs/linux/xfs_behavior.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_behavior.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + * + */ +#include "xfs.h" + +/* + * Source file used to associate/disassociate behaviors with virtualized + * objects. See xfs_behavior.h for more information about behaviors, etc. + * + * The implementation is split between functions in this file and macros + * in xfs_behavior.h. + */ + +/* + * Insert a new behavior descriptor into a behavior chain. + * + * The behavior chain is ordered based on the 'position' number which + * lives in the first field of the ops vector (higher numbers first). + * + * Attemps to insert duplicate ops result in an EINVAL return code. + * Otherwise, return 0 to indicate success. + */ +int +bhv_insert(bhv_head_t *bhp, bhv_desc_t *bdp) +{ + bhv_desc_t *curdesc, *prev; + int position; + + /* + * Validate the position value of the new behavior. + */ + position = BHV_POSITION(bdp); + ASSERT(position >= BHV_POSITION_BASE && position <= BHV_POSITION_TOP); + + /* + * Find location to insert behavior. Check for duplicates. + */ + prev = NULL; + for (curdesc = bhp->bh_first; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + /* Check for duplication. */ + if (curdesc->bd_ops == bdp->bd_ops) { + ASSERT(0); + return EINVAL; + } + + /* Find correct position */ + if (position >= BHV_POSITION(curdesc)) { + ASSERT(position != BHV_POSITION(curdesc)); + break; /* found it */ + } + + prev = curdesc; + } + + if (prev == NULL) { + /* insert at front of chain */ + bdp->bd_next = bhp->bh_first; + bhp->bh_first = bdp; + } else { + /* insert after prev */ + bdp->bd_next = prev->bd_next; + prev->bd_next = bdp; + } + + return 0; +} + +/* + * Remove a behavior descriptor from a position in a behavior chain; + * the postition is guaranteed not to be the first position. + * Should only be called by the bhv_remove() macro. + */ +void +bhv_remove_not_first(bhv_head_t *bhp, bhv_desc_t *bdp) +{ + bhv_desc_t *curdesc, *prev; + + ASSERT(bhp->bh_first != NULL); + ASSERT(bhp->bh_first->bd_next != NULL); + + prev = bhp->bh_first; + for (curdesc = bhp->bh_first->bd_next; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + if (curdesc == bdp) + break; /* found it */ + prev = curdesc; + } + + ASSERT(curdesc == bdp); + prev->bd_next = bdp->bd_next; /* remove from after prev */ +} + +/* + * Look for a specific ops vector on the specified behavior chain. + * Return the associated behavior descriptor. Or NULL, if not found. + */ +bhv_desc_t * +bhv_lookup(bhv_head_t *bhp, void *ops) +{ + bhv_desc_t *curdesc; + + for (curdesc = bhp->bh_first; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + if (curdesc->bd_ops == ops) + return curdesc; + } + + return NULL; +} + +/* + * Looks for the first behavior within a specified range of positions. + * Return the associated behavior descriptor. Or NULL, if none found. + */ +bhv_desc_t * +bhv_lookup_range(bhv_head_t *bhp, int low, int high) +{ + bhv_desc_t *curdesc; + + for (curdesc = bhp->bh_first; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + int position = BHV_POSITION(curdesc); + + if (position <= high) { + if (position >= low) + return curdesc; + return NULL; + } + } + + return NULL; +} + +/* + * Return the base behavior in the chain, or NULL if the chain + * is empty. + * + * The caller has not read locked the behavior chain, so acquire the + * lock before traversing the chain. + */ +bhv_desc_t * +bhv_base(bhv_head_t *bhp) +{ + bhv_desc_t *curdesc; + + for (curdesc = bhp->bh_first; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + if (curdesc->bd_next == NULL) { + return curdesc; + } + } + + return NULL; +} + +void +bhv_head_init( + bhv_head_t *bhp, + char *name) +{ + bhp->bh_first = NULL; +} + +void +bhv_insert_initial( + bhv_head_t *bhp, + bhv_desc_t *bdp) +{ + ASSERT(bhp->bh_first == NULL); + (bhp)->bh_first = bdp; +} + +void +bhv_head_destroy( + bhv_head_t *bhp) +{ + ASSERT(bhp->bh_first == NULL); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_behavior.h linux.21rc5-ac2/fs/xfs/linux/xfs_behavior.h --- linux.21rc5/fs/xfs/linux/xfs_behavior.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_behavior.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BEHAVIOR_H__ +#define __XFS_BEHAVIOR_H__ + +/* + * Header file used to associate behaviors with virtualized objects. + * + * A virtualized object is an internal, virtualized representation of + * OS entities such as persistent files, processes, or sockets. Examples + * of virtualized objects include vnodes, vprocs, and vsockets. Often + * a virtualized object is referred to simply as an "object." + * + * A behavior is essentially an implementation layer associated with + * an object. Multiple behaviors for an object are chained together, + * the order of chaining determining the order of invocation. Each + * behavior of a given object implements the same set of interfaces + * (e.g., the VOP interfaces). + * + * Behaviors may be dynamically inserted into an object's behavior chain, + * such that the addition is transparent to consumers that already have + * references to the object. Typically, a given behavior will be inserted + * at a particular location in the behavior chain. Insertion of new + * behaviors is synchronized with operations-in-progress (oip's) so that + * the oip's always see a consistent view of the chain. + * + * The term "interpostion" is used to refer to the act of inserting + * a behavior such that it interposes on (i.e., is inserted in front + * of) a particular other behavior. A key example of this is when a + * system implementing distributed single system image wishes to + * interpose a distribution layer (providing distributed coherency) + * in front of an object that is otherwise only accessed locally. + * + * Note that the traditional vnode/inode combination is simply a virtualized + * object that has exactly one associated behavior. + * + * Behavior synchronization is logic which is necessary under certain + * circumstances that there is no conflict between ongoing operations + * traversing the behavior chain and those dunamically modifying the + * behavior chain. Because behavior synchronization adds extra overhead + * to virtual operation invocation, we want to restrict, as much as + * we can, the requirement for this extra code, to those situations + * in which it is truly necessary. + * + * Behavior synchronization is needed whenever there's at least one class + * of object in the system for which: + * 1) multiple behaviors for a given object are supported, + * -- AND -- + * 2a) insertion of a new behavior can happen dynamically at any time during + * the life of an active object, + * -- AND -- + * 3a) insertion of a new behavior needs to synchronize with existing + * ops-in-progress. + * -- OR -- + * 3b) multiple different behaviors can be dynamically inserted at + * any time during the life of an active object + * -- OR -- + * 3c) removal of a behavior can occur at any time during the life of + * an active object. + * -- OR -- + * 2b) removal of a behavior can occur at any time during the life of an + * active object + * + */ + +struct bhv_head_lock; + +/* + * Behavior head. Head of the chain of behaviors. + * Contained within each virtualized object data structure. + */ +typedef struct bhv_head { + struct bhv_desc *bh_first; /* first behavior in chain */ + struct bhv_head_lock *bh_lockp; /* pointer to lock info struct */ +} bhv_head_t; + +/* + * Behavior descriptor. Descriptor associated with each behavior. + * Contained within the behavior's private data structure. + */ +typedef struct bhv_desc { + void *bd_pdata; /* private data for this behavior */ + void *bd_vobj; /* virtual object associated with */ + void *bd_ops; /* ops for this behavior */ + struct bhv_desc *bd_next; /* next behavior in chain */ +} bhv_desc_t; + +/* + * Behavior identity field. A behavior's identity determines the position + * where it lives within a behavior chain, and it's always the first field + * of the behavior's ops vector. The optional id field further identifies the + * subsystem responsible for the behavior. + */ +typedef struct bhv_identity { + __u16 bi_id; /* owning subsystem id */ + __u16 bi_position; /* position in chain */ +} bhv_identity_t; + +typedef bhv_identity_t bhv_position_t; + +#define BHV_IDENTITY_INIT(id,pos) {id, pos} +#define BHV_IDENTITY_INIT_POSITION(pos) BHV_IDENTITY_INIT(0, pos) + +/* + * Define boundaries of position values. + */ +#define BHV_POSITION_INVALID 0 /* invalid position number */ +#define BHV_POSITION_BASE 1 /* base (last) implementation layer */ +#define BHV_POSITION_TOP 63 /* top (first) implementation layer */ + +/* + * Plumbing macros. + */ +#define BHV_HEAD_FIRST(bhp) (ASSERT((bhp)->bh_first), (bhp)->bh_first) +#define BHV_NEXT(bdp) (ASSERT((bdp)->bd_next), (bdp)->bd_next) +#define BHV_NEXTNULL(bdp) ((bdp)->bd_next) +#define BHV_VOBJ(bdp) (ASSERT((bdp)->bd_vobj), (bdp)->bd_vobj) +#define BHV_VOBJNULL(bdp) ((bdp)->bd_vobj) +#define BHV_PDATA(bdp) (bdp)->bd_pdata +#define BHV_OPS(bdp) (bdp)->bd_ops +#define BHV_IDENTITY(bdp) ((bhv_identity_t *)(bdp)->bd_ops) +#define BHV_POSITION(bdp) (BHV_IDENTITY(bdp)->bi_position) + +extern void bhv_head_init(bhv_head_t *, char *); +extern void bhv_head_destroy(bhv_head_t *); +extern int bhv_insert(bhv_head_t *, bhv_desc_t *); +extern void bhv_insert_initial(bhv_head_t *, bhv_desc_t *); + +/* + * Initialize a new behavior descriptor. + * Arguments: + * bdp - pointer to behavior descriptor + * pdata - pointer to behavior's private data + * vobj - pointer to associated virtual object + * ops - pointer to ops for this behavior + */ +#define bhv_desc_init(bdp, pdata, vobj, ops) \ + { \ + (bdp)->bd_pdata = pdata; \ + (bdp)->bd_vobj = vobj; \ + (bdp)->bd_ops = ops; \ + (bdp)->bd_next = NULL; \ + } + +/* + * Remove a behavior descriptor from a behavior chain. + */ +#define bhv_remove(bhp, bdp) \ + { \ + if ((bhp)->bh_first == (bdp)) { \ + /* \ + * Remove from front of chain. \ + * Atomic wrt oip's. \ + */ \ + (bhp)->bh_first = (bdp)->bd_next; \ + } else { \ + /* remove from non-front of chain */ \ + bhv_remove_not_first(bhp, bdp); \ + } \ + (bdp)->bd_vobj = NULL; \ + } + +/* + * Behavior module prototypes. + */ +extern void bhv_remove_not_first(bhv_head_t *bhp, bhv_desc_t *bdp); +extern bhv_desc_t * bhv_lookup(bhv_head_t *bhp, void *ops); +extern bhv_desc_t * bhv_lookup_range(bhv_head_t *bhp, int low, int high); +extern bhv_desc_t * bhv_base(bhv_head_t *bhp); + +/* No bhv locking on Linux */ +#define bhv_lookup_unlocked bhv_lookup +#define bhv_base_unlocked bhv_base + +#endif /* __XFS_BEHAVIOR_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_cred.h linux.21rc5-ac2/fs/xfs/linux/xfs_cred.h --- linux.21rc5/fs/xfs/linux/xfs_cred.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_cred.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_CRED_H__ +#define __XFS_CRED_H__ + +/* + * Credentials + */ +typedef struct cred { + /* EMPTY */ +} cred_t; + +extern struct cred *sys_cred; + +/* this is a hack.. (assums sys_cred is the only cred_t in the system) */ +static __inline int capable_cred(cred_t *cr, int cid) +{ + return (cr == sys_cred) ? 1 : capable(cid); +} + +#endif /* __XFS_CRED_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_file.c linux.21rc5-ac2/fs/xfs/linux/xfs_file.c --- linux.21rc5/fs/xfs/linux/xfs_file.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_file.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_trans.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_error.h" +#include "xfs_rw.h" +#include + +#include +#include /* for PROT_WRITE */ + +static struct vm_operations_struct linvfs_file_vm_ops; + + +STATIC ssize_t +linvfs_read( + struct file *filp, + char *buf, + size_t size, + loff_t *offset) +{ + vnode_t *vp = LINVFS_GET_VP(filp->f_dentry->d_inode); + int error; + + ASSERT(vp); + VOP_READ(vp, filp, buf, size, offset, 0, NULL, error); + return error; +} + + +STATIC ssize_t +linvfs_write( + struct file *file, + const char *buf, + size_t count, + loff_t *ppos) +{ + struct inode *inode = file->f_dentry->d_inode; + loff_t pos; + vnode_t *vp; + int error; /* Use negative errors in this f'n */ + + if ((ssize_t) count < 0) + return -EINVAL; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + + pos = *ppos; + if (pos < 0) + return -EINVAL; + + error = file->f_error; + if (error) { + file->f_error = 0; + return error; + } + + vp = LINVFS_GET_VP(inode); + ASSERT(vp); + + /* We allow multiple direct writers in, there is no + * potential call to vmtruncate in that path. + */ + if (!(file->f_flags & O_DIRECT)) + down(&inode->i_sem); + + VOP_WRITE(vp, file, buf, count, &pos, 0, NULL, error); + *ppos = pos; + + if (!(file->f_flags & O_DIRECT)) + up(&inode->i_sem); + return error; +} + + +STATIC int +linvfs_open( + struct inode *inode, + struct file *filp) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error; + + if (!(filp->f_flags & O_LARGEFILE) && inode->i_size > MAX_NON_LFS) + return -EFBIG; + + ASSERT(vp); + VOP_OPEN(vp, NULL, error); + return -error; +} + + +STATIC int +linvfs_release( + struct inode *inode, + struct file *filp) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error = 0; + + if (vp) + VOP_RELEASE(vp, error); + return -error; +} + + +STATIC int +linvfs_fsync( + struct file *filp, + struct dentry *dentry, + int datasync) +{ + struct inode *inode = dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + int error; + int flags = FSYNC_WAIT; + + if (datasync) + flags |= FSYNC_DATA; + + ASSERT(vp); + VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error); + return -error; +} + +/* + * linvfs_readdir maps to VOP_READDIR(). + * We need to build a uio, cred, ... + */ + +#define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen)) + +STATIC int +linvfs_readdir( + struct file *filp, + void *dirent, + filldir_t filldir) +{ + int error = 0; + vnode_t *vp; + uio_t uio; + iovec_t iov; + int eof = 0; + caddr_t read_buf; + int namelen, size = 0; + size_t rlen = PAGE_CACHE_SIZE; + xfs_off_t start_offset, curr_offset; + xfs_dirent_t *dbp = NULL; + + vp = LINVFS_GET_VP(filp->f_dentry->d_inode); + ASSERT(vp); + + /* Try fairly hard to get memory */ + do { + if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL))) + break; + rlen >>= 1; + } while (rlen >= 1024); + + if (read_buf == NULL) + return -ENOMEM; + + uio.uio_iov = &iov; + uio.uio_fmode = filp->f_mode; + uio.uio_segflg = UIO_SYSSPACE; + curr_offset = uio.uio_offset = filp->f_pos; + + while (!eof) { + uio.uio_resid = iov.iov_len = rlen; + iov.iov_base = read_buf; + uio.uio_iovcnt = 1; + + start_offset = uio.uio_offset; + + VOP_READDIR(vp, &uio, NULL, &eof, error); + if ((uio.uio_offset == start_offset) || error) { + size = 0; + break; + } + + size = rlen - uio.uio_resid; + dbp = (xfs_dirent_t *)read_buf; + while (size > 0) { + namelen = strlen(dbp->d_name); + + if (filldir(dirent, dbp->d_name, namelen, + (loff_t) curr_offset, + (ino_t) dbp->d_ino, + DT_UNKNOWN)) { + goto done; + } + size -= dbp->d_reclen; + curr_offset = (loff_t)dbp->d_off & 0x7fffffff; + dbp = nextdp(dbp); + } + } +done: + if (!error) { + if (size == 0) + filp->f_pos = uio.uio_offset & 0x7fffffff; + else if (dbp) + filp->f_pos = curr_offset; + } + + kfree(read_buf); + return -error; +} + +STATIC int +linvfs_file_mmap( + struct file *filp, + struct vm_area_struct *vma) +{ + struct inode *ip = filp->f_dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(ip); + vattr_t va = { .va_mask = XFS_AT_UPDATIME }; + int error; + + if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) { + xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); + + error = -XFS_SEND_MMAP(mp, vma, 0); + if (error) + return error; + } + + vma->vm_ops = &linvfs_file_vm_ops; + + VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); + return 0; +} + + +STATIC int +linvfs_ioctl( + struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int error; + vnode_t *vp = LINVFS_GET_VP(inode); + + ASSERT(vp); + VOP_IOCTL(vp, inode, filp, cmd, arg, error); + VMODIFY(vp); + + /* NOTE: some of the ioctl's return positive #'s as a + * byte count indicating success, such as + * readlink_by_handle. So we don't "sign flip" + * like most other routines. This means true + * errors need to be returned as a negative value. + */ + return error; +} + +#ifdef HAVE_VMOP_MPROTECT +STATIC int +linvfs_mprotect( + struct vm_area_struct *vma, + unsigned int newflags) +{ + vnode_t *vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode); + int error = 0; + + if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) { + if ((vma->vm_flags & VM_MAYSHARE) && + (newflags & PROT_WRITE) && !(vma->vm_flags & PROT_WRITE)) { + xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); + + error = XFS_SEND_MMAP(mp, vma, VM_WRITE); + } + } + return error; +} +#endif /* HAVE_VMOP_MPROTECT */ + + +struct file_operations linvfs_file_operations = { + .llseek = generic_file_llseek, + .read = linvfs_read, + .write = linvfs_write, + .ioctl = linvfs_ioctl, + .mmap = linvfs_file_mmap, + .open = linvfs_open, + .release = linvfs_release, + .fsync = linvfs_fsync, +}; + +struct file_operations linvfs_dir_operations = { + .read = generic_read_dir, + .readdir = linvfs_readdir, + .ioctl = linvfs_ioctl, + .fsync = linvfs_fsync, +}; + +static struct vm_operations_struct linvfs_file_vm_ops = { + .nopage = filemap_nopage, +#ifdef HAVE_VMOP_MPROTECT + .mprotect = linvfs_mprotect, +#endif +}; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_fs_subr.c linux.21rc5-ac2/fs/xfs/linux/xfs_fs_subr.c --- linux.21rc5/fs/xfs/linux/xfs_fs_subr.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_fs_subr.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +/* + * Stub for no-op vnode operations that return error status. + */ +int +fs_noerr() +{ + return 0; +} + +/* + * Operation unsupported under this file system. + */ +int +fs_nosys() +{ + return ENOSYS; +} + +/* + * Stub for inactive, strategy, and read/write lock/unlock. Does nothing. + */ +/* ARGSUSED */ +void +fs_noval() +{ +} + +/* + * vnode pcache layer for vnode_tosspages. + * 'last' parameter unused but left in for IRIX compatibility + */ +void +fs_tosspages( + bhv_desc_t *bdp, + xfs_off_t first, + xfs_off_t last, + int fiopt) +{ + vnode_t *vp = BHV_TO_VNODE(bdp); + struct inode *ip = LINVFS_GET_IP(vp); + + if (VN_CACHED(vp)) + truncate_inode_pages(ip->i_mapping, first); +} + + +/* + * vnode pcache layer for vnode_flushinval_pages. + * 'last' parameter unused but left in for IRIX compatibility + */ +void +fs_flushinval_pages( + bhv_desc_t *bdp, + xfs_off_t first, + xfs_off_t last, + int fiopt) +{ + vnode_t *vp = BHV_TO_VNODE(bdp); + struct inode *ip = LINVFS_GET_IP(vp); + + if (VN_CACHED(vp)) { + filemap_fdatasync(ip->i_mapping); + fsync_inode_data_buffers(ip); + filemap_fdatawait(ip->i_mapping); + + truncate_inode_pages(ip->i_mapping, first); + } +} + +/* + * vnode pcache layer for vnode_flush_pages. + * 'last' parameter unused but left in for IRIX compatibility + */ +int +fs_flush_pages( + bhv_desc_t *bdp, + xfs_off_t first, + xfs_off_t last, + uint64_t flags, + int fiopt) +{ + vnode_t *vp = BHV_TO_VNODE(bdp); + struct inode *ip = LINVFS_GET_IP(vp); + + if (VN_CACHED(vp)) { + filemap_fdatasync(ip->i_mapping); + fsync_inode_data_buffers(ip); + filemap_fdatawait(ip->i_mapping); + } + + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_fs_subr.h linux.21rc5-ac2/fs/xfs/linux/xfs_fs_subr.h --- linux.21rc5/fs/xfs/linux/xfs_fs_subr.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_fs_subr.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUBR_H__ +#define __XFS_SUBR_H__ + +/* + * Utilities shared among file system implementations. + */ + +struct cred; + +extern int fs_noerr(void); +extern int fs_nosys(void); +extern int fs_nodev(void); +extern void fs_noval(void); +extern void fs_tosspages(bhv_desc_t *, xfs_off_t, xfs_off_t, int); +extern void fs_flushinval_pages(bhv_desc_t *, xfs_off_t, xfs_off_t, int); +extern int fs_flush_pages(bhv_desc_t *, xfs_off_t, xfs_off_t, uint64_t, int); + +#endif /* __XFS_FS_SUBR_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_globals.c linux.21rc5-ac2/fs/xfs/linux/xfs_globals.c --- linux.21rc5/fs/xfs/linux/xfs_globals.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_globals.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains globals needed by XFS that were normally defined + * somewhere else in IRIX. + */ + +#include "xfs.h" +#include "xfs_bmap_btree.h" +#include "xfs_bit.h" + +/* + * System memory size - used to scale certain data structures in XFS. + */ +unsigned long xfs_physmem; + +/* + * Tunable XFS parameters. xfs_params is required even when CONFIG_SYSCTL=n, + * other XFS code uses these values. + */ +xfs_param_t xfs_params = { 128, 32, 0, 1, 0, 0, 0, 3, 30 * HZ }; + +/* + * Global system credential structure. + */ +cred_t sys_cred_val, *sys_cred = &sys_cred_val; + +/* Export XFS symbols used by xfsidbg */ +EXPORT_SYMBOL(xfs_next_bit); +EXPORT_SYMBOL(xfs_contig_bits); +EXPORT_SYMBOL(xfs_bmbt_get_all); +#if ARCH_CONVERT != ARCH_NOCONVERT +EXPORT_SYMBOL(xfs_bmbt_disk_get_all); +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_globals.h linux.21rc5-ac2/fs/xfs/linux/xfs_globals.h --- linux.21rc5/fs/xfs/linux/xfs_globals.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_globals.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_GLOBALS_H__ +#define __XFS_GLOBALS_H__ + +/* + * This file declares globals needed by XFS that were normally defined + * somewhere else in IRIX. + */ + +extern uint64_t xfs_panic_mask; /* set to cause more panics */ +extern unsigned long xfs_physmem; +extern struct cred *sys_cred; + +#endif /* __XFS_GLOBALS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_ioctl.c linux.21rc5-ac2/fs/xfs/linux/xfs_ioctl.c --- linux.21rc5/fs/xfs/linux/xfs_ioctl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_ioctl.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,1117 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" +#include "xfs_dfrag.h" +#include "xfs_fsops.h" + +#include +#include + + +/* + * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to + * a file or fs handle. + * + * XFS_IOC_PATH_TO_FSHANDLE + * returns fs handle for a mount point or path within that mount point + * XFS_IOC_FD_TO_HANDLE + * returns full handle for a FD opened in user space + * XFS_IOC_PATH_TO_HANDLE + * returns full handle for a path + */ +STATIC int +xfs_find_handle( + unsigned int cmd, + unsigned long arg) +{ + int hsize; + xfs_handle_t handle; + xfs_fsop_handlereq_t hreq; + struct inode *inode; + struct vnode *vp; + + if (copy_from_user(&hreq, (xfs_fsop_handlereq_t *)arg, sizeof(hreq))) + return -XFS_ERROR(EFAULT); + + memset((char *)&handle, 0, sizeof(handle)); + + switch (cmd) { + case XFS_IOC_PATH_TO_FSHANDLE: + case XFS_IOC_PATH_TO_HANDLE: { + struct nameidata nd; + int error; + + error = user_path_walk_link(hreq.path, &nd); + if (error) + return error; + + ASSERT(nd.dentry); + ASSERT(nd.dentry->d_inode); + inode = igrab(nd.dentry->d_inode); + path_release(&nd); + break; + } + + case XFS_IOC_FD_TO_HANDLE: { + struct file *file; + + file = fget(hreq.fd); + if (!file) + return -EBADF; + + ASSERT(file->f_dentry); + ASSERT(file->f_dentry->d_inode); + inode = igrab(file->f_dentry->d_inode); + fput(file); + break; + } + + default: + ASSERT(0); + return -XFS_ERROR(EINVAL); + } + + if (inode->i_sb->s_magic != XFS_SB_MAGIC) { + /* we're not in XFS anymore, Toto */ + iput(inode); + return -XFS_ERROR(EINVAL); + } + + /* we need the vnode */ + vp = LINVFS_GET_VP(inode); + if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { + iput(inode); + return -XFS_ERROR(EBADF); + } + + /* now we can grab the fsid */ + memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); + hsize = sizeof(xfs_fsid_t); + + if (cmd != XFS_IOC_PATH_TO_FSHANDLE) { + xfs_inode_t *ip; + bhv_desc_t *bhv; + int lock_mode; + + /* need to get access to the xfs_inode to read the generation */ + bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops); + ASSERT(bhv); + ip = XFS_BHVTOI(bhv); + ASSERT(ip); + lock_mode = xfs_ilock_map_shared(ip); + + /* fill in fid section of handle from inode */ + handle.ha_fid.xfs_fid_len = sizeof(xfs_fid_t) - + sizeof(handle.ha_fid.xfs_fid_len); + handle.ha_fid.xfs_fid_pad = 0; + handle.ha_fid.xfs_fid_gen = ip->i_d.di_gen; + handle.ha_fid.xfs_fid_ino = ip->i_ino; + + xfs_iunlock_map_shared(ip, lock_mode); + + hsize = XFS_HSIZE(handle); + } + + /* now copy our handle into the user buffer & write out the size */ + if (copy_to_user((xfs_handle_t *)hreq.ohandle, &handle, hsize) || + copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) { + iput(inode); + return -XFS_ERROR(EFAULT); + } + + iput(inode); + return 0; +} + + +/* + * Convert userspace handle data into vnode (and inode). + * We [ab]use the fact that all the fsop_handlereq ioctl calls + * have a data structure argument whose first component is always + * a xfs_fsop_handlereq_t, so we can cast to and from this type. + * This allows us to optimise the copy_from_user calls and gives + * a handy, shared routine. + * + * If no error, caller must always VN_RELE the returned vp. + */ +STATIC int +xfs_vget_fsop_handlereq( + xfs_mount_t *mp, + struct inode *parinode, /* parent inode pointer */ + int cap, /* capability level for op */ + unsigned long arg, /* userspace data pointer */ + unsigned long size, /* size of expected struct */ + /* output arguments */ + xfs_fsop_handlereq_t *hreq, + vnode_t **vp, + struct inode **inode) +{ + void *hanp; + size_t hlen; + xfs_fid_t *xfid; + xfs_handle_t *handlep; + xfs_handle_t handle; + xfs_inode_t *ip; + struct inode *inodep; + vnode_t *vpp; + __u32 igen; + ino_t ino; + int error; + + if (!capable(cap)) + return XFS_ERROR(EPERM); + + /* + * Only allow handle opens under a directory. + */ + if (!S_ISDIR(parinode->i_mode)) + return XFS_ERROR(ENOTDIR); + + /* + * Copy the handle down from the user and validate + * that it looks to be in the correct format. + */ + if (copy_from_user(hreq, (struct xfs_fsop_handlereq *)arg, size)) + return XFS_ERROR(EFAULT); + + hanp = hreq->ihandle; + hlen = hreq->ihandlen; + handlep = &handle; + + if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) + return XFS_ERROR(EINVAL); + if (copy_from_user(handlep, hanp, hlen)) + return XFS_ERROR(EFAULT); + if (hlen < sizeof(*handlep)) + memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); + if (hlen > sizeof(handlep->ha_fsid)) { + if (handlep->ha_fid.xfs_fid_len != + (hlen - sizeof(handlep->ha_fsid) + - sizeof(handlep->ha_fid.xfs_fid_len)) + || handlep->ha_fid.xfs_fid_pad) + return XFS_ERROR(EINVAL); + } + + /* + * Crack the handle, obtain the inode # & generation # + */ + xfid = (struct xfs_fid *)&handlep->ha_fid; + if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) { + ino = xfid->xfs_fid_ino; + igen = xfid->xfs_fid_gen; + } else { + return XFS_ERROR(EINVAL); + } + + /* + * Get the XFS inode, building a vnode to go with it. + */ + error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0); + if (error) + return error; + if (ip == NULL) + return XFS_ERROR(EIO); + if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) { + xfs_iput_new(ip, XFS_ILOCK_SHARED); + return XFS_ERROR(ENOENT); + } + + vpp = XFS_ITOV(ip); + inodep = LINVFS_GET_IP(vpp); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + *vp = vpp; + *inode = inodep; + return 0; +} + +STATIC int +xfs_open_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + int new_fd; + int permflag; + struct file *filp; + struct inode *inode; + struct dentry *dentry; + vnode_t *vp; + xfs_fsop_handlereq_t hreq; + struct list_head *lp; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg, + sizeof(xfs_fsop_handlereq_t), + &hreq, &vp, &inode); + if (error) + return -error; + + /* Restrict xfs_open_by_handle to directories & regular files. */ + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { + iput(inode); + return -XFS_ERROR(EINVAL); + } + +#if BITS_PER_LONG != 32 + hreq.oflags |= O_LARGEFILE; +#endif + /* Put open permission in namei format. */ + permflag = hreq.oflags; + if ((permflag+1) & O_ACCMODE) + permflag++; + if (permflag & O_TRUNC) + permflag |= 2; + + /* Can't write directories. */ + if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) { + iput(inode); + return -XFS_ERROR(EISDIR); + } + + if ((new_fd = get_unused_fd()) < 0) { + iput(inode); + return new_fd; + } + + /* Now to find a dentry. If possible, get a well-connected one. */ + spin_lock(&dcache_lock); + for (lp = inode->i_dentry.next; lp != &inode->i_dentry ; lp=lp->next) { + dentry = list_entry(lp, struct dentry, d_alias); + if (! (dentry->d_flags & DCACHE_NFSD_DISCONNECTED)) { + dget_locked(dentry); + dentry->d_vfs_flags |= DCACHE_REFERENCED; + spin_unlock(&dcache_lock); + iput(inode); + goto found; + } + } + spin_unlock(&dcache_lock); + + /* ELSE didn't find dentry. Create anonymous dcache entry. */ + dentry = d_alloc_root(inode); + if (dentry == NULL) { + iput(inode); + put_unused_fd(new_fd); + return -XFS_ERROR(ENOMEM); + } + + /* Keep nfsd happy. */ + dentry->d_flags |= DCACHE_NFSD_DISCONNECTED; + + found: + /* Ensure umount returns EBUSY on umounts while this file is open. */ + mntget(parfilp->f_vfsmnt); + + /* Create file pointer. */ + filp = dentry_open(dentry, parfilp->f_vfsmnt, hreq.oflags); + if (IS_ERR(filp)) { + put_unused_fd(new_fd); + return -XFS_ERROR(-PTR_ERR(filp)); + } + filp->f_mode |= FINVIS; + + fd_install(new_fd, filp); + return new_fd; +} + +STATIC int +xfs_readlink_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + struct iovec aiov; + struct uio auio; + struct inode *inode; + xfs_fsop_handlereq_t hreq; + vnode_t *vp; + __u32 olen; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg, + sizeof(xfs_fsop_handlereq_t), + &hreq, &vp, &inode); + if (error) + return -error; + + /* Restrict this handle operation to symlinks only. */ + if (vp->v_type != VLNK) { + VN_RELE(vp); + return -XFS_ERROR(EINVAL); + } + + if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) { + VN_RELE(vp); + return -XFS_ERROR(EFAULT); + } + aiov.iov_len = olen; + aiov.iov_base = hreq.ohandle; + + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_fmode = FINVIS; + auio.uio_offset = 0; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_resid = olen; + + VOP_READLINK(vp, &auio, NULL, error); + + VN_RELE(vp); + return (olen - auio.uio_resid); +} + +STATIC int +xfs_fssetdm_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + struct fsdmidata fsd; + xfs_fsop_setdm_handlereq_t dmhreq; + struct inode *inode; + bhv_desc_t *bdp; + vnode_t *vp; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_MKNOD, arg, + sizeof(xfs_fsop_setdm_handlereq_t), + (xfs_fsop_handlereq_t *)&dmhreq, + &vp, &inode); + if (error) + return -error; + + if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { + VN_RELE(vp); + return -XFS_ERROR(EFAULT); + } + + bdp = bhv_base_unlocked(VN_BHV_HEAD(vp)); + error = xfs_set_dmattrs(bdp, fsd.fsd_dmevmask, fsd.fsd_dmstate, NULL); + + VN_RELE(vp); + if (error) + return -error; + return 0; +} + +STATIC int +xfs_attrlist_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + attrlist_cursor_kern_t *cursor; + xfs_fsop_attrlist_handlereq_t al_hreq; + struct inode *inode; + vnode_t *vp; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg, + sizeof(xfs_fsop_attrlist_handlereq_t), + (xfs_fsop_handlereq_t *)&al_hreq, + &vp, &inode); + if (error) + return -error; + + cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; + VOP_ATTR_LIST(vp, al_hreq.buffer, al_hreq.buflen, al_hreq.flags, + cursor, NULL, error); + VN_RELE(vp); + if (error) + return -error; + return 0; +} + +STATIC int +xfs_attrmulti_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + xfs_attr_multiop_t *ops; + xfs_fsop_attrmulti_handlereq_t am_hreq; + struct inode *inode; + vnode_t *vp; + int i, size; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg, + sizeof(xfs_fsop_attrmulti_handlereq_t), + (xfs_fsop_handlereq_t *)&am_hreq, + &vp, &inode); + if (error) + return -error; + + size = am_hreq.opcount * sizeof(attr_multiop_t); + ops = (xfs_attr_multiop_t *)kmalloc(size, GFP_KERNEL); + if (!ops) { + VN_RELE(vp); + return -XFS_ERROR(ENOMEM); + } + + if (copy_from_user(ops, am_hreq.ops, size)) { + kfree(ops); + VN_RELE(vp); + return -XFS_ERROR(EFAULT); + } + + for (i = 0; i < am_hreq.opcount; i++) { + switch(ops[i].am_opcode) { + case ATTR_OP_GET: + VOP_ATTR_GET(vp,ops[i].am_attrname, ops[i].am_attrvalue, + &ops[i].am_length, ops[i].am_flags, + NULL, ops[i].am_error); + break; + case ATTR_OP_SET: + VOP_ATTR_SET(vp,ops[i].am_attrname, ops[i].am_attrvalue, + ops[i].am_length, ops[i].am_flags, + NULL, ops[i].am_error); + break; + case ATTR_OP_REMOVE: + VOP_ATTR_REMOVE(vp, ops[i].am_attrname, ops[i].am_flags, + NULL, ops[i].am_error); + break; + default: + ops[i].am_error = EINVAL; + } + } + + if (copy_to_user(am_hreq.ops, ops, size)) + error = -XFS_ERROR(EFAULT); + + kfree(ops); + VN_RELE(vp); + return error; +} + +/* prototypes for a few of the stack-hungry cases that have + * their own functions. Functions are defined after their use + * so gcc doesn't get fancy and inline them with -03 */ + +STATIC int +xfs_ioc_space( + bhv_desc_t *bdp, + vnode_t *vp, + struct file *filp, + unsigned int cmd, + unsigned long arg); + +STATIC int +xfs_ioc_bulkstat( + xfs_mount_t *mp, + unsigned int cmd, + unsigned long arg); + +STATIC int +xfs_ioc_fsgeometry_v1( + xfs_mount_t *mp, + unsigned long arg); + +STATIC int +xfs_ioc_fsgeometry( + xfs_mount_t *mp, + unsigned long arg); + +STATIC int +xfs_ioc_xattr( + vnode_t *vp, + struct file *filp, + unsigned int cmd, + unsigned long arg); + +STATIC int +xfs_ioc_getbmap( + bhv_desc_t *bdp, + struct file *filp, + unsigned int cmd, + unsigned long arg); + +STATIC int +xfs_ioc_getbmapx( + bhv_desc_t *bdp, + unsigned long arg); + +int +xfs_ioctl( + bhv_desc_t *bdp, + struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int error; + vnode_t *vp; + xfs_inode_t *ip; + xfs_mount_t *mp; + + vp = LINVFS_GET_VP(inode); + + vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + switch (cmd) { + + case XFS_IOC_ALLOCSP: + case XFS_IOC_FREESP: + case XFS_IOC_RESVSP: + case XFS_IOC_UNRESVSP: + case XFS_IOC_ALLOCSP64: + case XFS_IOC_FREESP64: + case XFS_IOC_RESVSP64: + case XFS_IOC_UNRESVSP64: + /* + * Only allow the sys admin to reserve space unless + * unwritten extents are enabled. + */ + if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + + return xfs_ioc_space(bdp, vp, filp, cmd, arg); + + case XFS_IOC_DIOINFO: { + struct dioattr da; + + da.d_miniosz = mp->m_sb.sb_blocksize; + da.d_mem = mp->m_sb.sb_blocksize; + + /* + * this only really needs to be BBSIZE. + * it is set to the file system block size to + * avoid having to do block zeroing on short writes. + */ + da.d_maxiosz = XFS_FSB_TO_B(mp, + XFS_B_TO_FSBT(mp, KIO_MAX_ATOMIC_IO << 10)); + + if (copy_to_user((struct dioattr *)arg, &da, sizeof(da))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_FSBULKSTAT_SINGLE: + case XFS_IOC_FSBULKSTAT: + case XFS_IOC_FSINUMBERS: + return xfs_ioc_bulkstat(mp, cmd, arg); + + case XFS_IOC_FSGEOMETRY_V1: + return xfs_ioc_fsgeometry_v1(mp, arg); + + case XFS_IOC_FSGEOMETRY: + return xfs_ioc_fsgeometry(mp, arg); + + case XFS_IOC_FSGETXATTR: + case XFS_IOC_FSSETXATTR: + case XFS_IOC_FSGETXATTRA: + return xfs_ioc_xattr(vp, filp, cmd, arg); + + case XFS_IOC_FSSETDM: { + struct fsdmidata dmi; + + if (copy_from_user(&dmi, (struct fsdmidata *)arg, sizeof(dmi))) + return -XFS_ERROR(EFAULT); + + error = xfs_set_dmattrs(bdp, dmi.fsd_dmevmask, dmi.fsd_dmstate, + NULL); + if (error) + return -error; + return 0; + } + + case XFS_IOC_GETBMAP: + case XFS_IOC_GETBMAPA: + return xfs_ioc_getbmap(bdp, filp, cmd, arg); + + case XFS_IOC_GETBMAPX: + return xfs_ioc_getbmapx(bdp, arg); + + case XFS_IOC_FD_TO_HANDLE: + case XFS_IOC_PATH_TO_HANDLE: + case XFS_IOC_PATH_TO_FSHANDLE: + return xfs_find_handle(cmd, arg); + + case XFS_IOC_OPEN_BY_HANDLE: + return xfs_open_by_handle(mp, arg, filp, inode); + + case XFS_IOC_FSSETDM_BY_HANDLE: + return xfs_fssetdm_by_handle(mp, arg, filp, inode); + + case XFS_IOC_READLINK_BY_HANDLE: + return xfs_readlink_by_handle(mp, arg, filp, inode); + + case XFS_IOC_ATTRLIST_BY_HANDLE: + return xfs_attrlist_by_handle(mp, arg, filp, inode); + + case XFS_IOC_ATTRMULTI_BY_HANDLE: + return xfs_attrmulti_by_handle(mp, arg, filp, inode); + + case XFS_IOC_SWAPEXT: { + error = xfs_swapext((struct xfs_swapext *)arg); + if (error) + return -error; + return 0; + } + + case XFS_IOC_FSCOUNTS: { + xfs_fsop_counts_t out; + + error = xfs_fs_counts(mp, &out); + if (error) + return -error; + + if (copy_to_user((char *)arg, &out, sizeof(out))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_SET_RESBLKS: { + xfs_fsop_resblks_t inout; + __uint64_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&inout, (char *)arg, sizeof(inout))) + return -XFS_ERROR(EFAULT); + + /* input parameter is passed in resblks field of structure */ + in = inout.resblks; + error = xfs_reserve_blocks(mp, &in, &inout); + + if (copy_to_user((char *)arg, &inout, sizeof(inout))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_GET_RESBLKS: { + xfs_fsop_resblks_t out; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + error = xfs_reserve_blocks(mp, NULL, &out); + if (error) + return -error; + + if (copy_to_user((char *)arg, &out, sizeof(out))) + return -XFS_ERROR(EFAULT); + + return 0; + } + + case XFS_IOC_FSGROWFSDATA: { + xfs_growfs_data_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&in, (char *)arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_growfs_data(mp, &in); + if (error) + return -error; + return 0; + } + + case XFS_IOC_FSGROWFSLOG: { + xfs_growfs_log_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&in, (char *)arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_growfs_log(mp, &in); + if (error) + return -error; + return 0; + } + + case XFS_IOC_FSGROWFSRT: { + xfs_growfs_rt_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&in, (char *)arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_growfs_rt(mp, &in); + if (error) + return -error; + return 0; + } + + case XFS_IOC_FREEZE: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xfs_fs_freeze(mp); + return 0; + + case XFS_IOC_THAW: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xfs_fs_thaw(mp); + return 0; + + case XFS_IOC_ERROR_INJECTION: { + xfs_error_injection_t in; + + if (copy_from_user(&in, (char *)arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_errortag_add(in.errtag, mp); + if (error) + return -error; + return 0; + } + + case XFS_IOC_ERROR_CLEARALL: + error = xfs_errortag_clearall(mp); + return -error; + + default: + return -ENOTTY; + } +} + +STATIC int +xfs_ioc_space( + bhv_desc_t *bdp, + vnode_t *vp, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + xfs_flock64_t bf; + int attr_flags = 0; + int error; + + if (filp->f_flags & O_RDONLY) + return -XFS_ERROR(EBADF); + + if (vp->v_type != VREG) + return -XFS_ERROR(EINVAL); + + if (copy_from_user(&bf, (xfs_flock64_t *)arg, sizeof(bf))) + return -XFS_ERROR(EFAULT); + + if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) + attr_flags |= ATTR_NONBLOCK; + if (filp->f_mode & FINVIS) + attr_flags |= ATTR_DMI; + + error = xfs_change_file_space(bdp, cmd, &bf, filp->f_pos, + NULL, attr_flags); + return -error; +} + +STATIC int +xfs_ioc_bulkstat( + xfs_mount_t *mp, + unsigned int cmd, + unsigned long arg) +{ + xfs_fsop_bulkreq_t bulkreq; + int count; /* # of records returned */ + xfs_ino_t inlast; /* last inode number */ + int done; + int error; + + /* done = 1 if there are more stats to get and if bulkstat */ + /* should be called again (unused here, but used in dmapi) */ + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -XFS_ERROR(EIO); + + if (copy_from_user(&bulkreq, (xfs_fsop_bulkreq_t *)arg, + sizeof(xfs_fsop_bulkreq_t))) + return -XFS_ERROR(EFAULT); + + if (copy_from_user(&inlast, (__s64 *)bulkreq.lastip, + sizeof(__s64))) + return -XFS_ERROR(EFAULT); + + if ((count = bulkreq.icount) <= 0) + return -XFS_ERROR(EINVAL); + + if (cmd == XFS_IOC_FSINUMBERS) + error = xfs_inumbers(mp, NULL, &inlast, &count, + bulkreq.ubuffer); + else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) + error = xfs_bulkstat_single(mp, &inlast, + bulkreq.ubuffer, &done); + else { /* XFS_IOC_FSBULKSTAT */ + if (count == 1 && inlast != 0) { + inlast++; + error = xfs_bulkstat_single(mp, &inlast, + bulkreq.ubuffer, &done); + } else { + error = xfs_bulkstat(mp, NULL, &inlast, &count, + (bulkstat_one_pf)xfs_bulkstat_one, + sizeof(xfs_bstat_t), bulkreq.ubuffer, + BULKSTAT_FG_QUICK, &done); + } + } + + if (error) + return -error; + + if (bulkreq.ocount != NULL) { + if (copy_to_user((xfs_ino_t *)bulkreq.lastip, &inlast, + sizeof(xfs_ino_t))) + return -XFS_ERROR(EFAULT); + + if (copy_to_user((__s32 *)bulkreq.ocount, &count, + sizeof(count))) + return -XFS_ERROR(EFAULT); + } + + return 0; +} + +STATIC int +xfs_ioc_fsgeometry_v1( + xfs_mount_t *mp, + unsigned long arg) +{ + xfs_fsop_geom_v1_t fsgeo; + int error; + + error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); + if (error) + return -error; + + if (copy_to_user((xfs_fsop_geom_t *)arg, &fsgeo, sizeof(fsgeo))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_ioc_fsgeometry( + xfs_mount_t *mp, + unsigned long arg) +{ + xfs_fsop_geom_t fsgeo; + int error; + + error = xfs_fs_geometry(mp, &fsgeo, 4); + if (error) + return -error; + + if (copy_to_user((xfs_fsop_geom_t *)arg, &fsgeo, sizeof(fsgeo))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_ioc_xattr( + vnode_t *vp, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + struct fsxattr fa; + vattr_t va; + int error; + + switch (cmd) { + case XFS_IOC_FSGETXATTR: { + va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return -error; + + fa.fsx_xflags = va.va_xflags; + fa.fsx_extsize = va.va_extsize; + fa.fsx_nextents = va.va_nextents; + + if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_FSSETXATTR: { + int attr_flags = 0; + + if (copy_from_user(&fa, (struct fsxattr *)arg, sizeof(fa))) + return -XFS_ERROR(EFAULT); + + va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE; + va.va_xflags = fa.fsx_xflags; + va.va_extsize = fa.fsx_extsize; + + if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) + attr_flags |= ATTR_NONBLOCK; + + VOP_SETATTR(vp, &va, attr_flags, NULL, error); + return -error; + } + + case XFS_IOC_FSGETXATTRA: { + + va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_ANEXTENTS; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return -error; + + fa.fsx_xflags = va.va_xflags; + fa.fsx_extsize = va.va_extsize; + fa.fsx_nextents = va.va_anextents; + + if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa))) + return -XFS_ERROR(EFAULT); + return 0; + } + + default: + return -ENOTTY; + + } +} + +STATIC int +xfs_ioc_getbmap( + bhv_desc_t *bdp, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + struct getbmap bm; + int iflags; + int error; + + if (copy_from_user(&bm, (struct getbmap *)arg, sizeof(bm))) + return -XFS_ERROR(EFAULT); + + if (bm.bmv_count < 2) + return -XFS_ERROR(EINVAL); + + iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); + if (filp->f_mode & FINVIS) + iflags |= BMV_IF_NO_DMAPI_READ; + + error = xfs_getbmap(bdp, &bm, (struct getbmap *)arg+1, iflags); + if (error) + return -error; + + if (copy_to_user((struct getbmap *)arg, &bm, sizeof(bm))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_ioc_getbmapx( + bhv_desc_t *bdp, + unsigned long arg) +{ + struct getbmapx bmx; + struct getbmap bm; + int iflags; + int error; + + if (copy_from_user(&bmx, (struct getbmapx *)arg, sizeof(bmx))) + return -XFS_ERROR(EFAULT); + + if (bmx.bmv_count < 2) + return -XFS_ERROR(EINVAL); + + /* + * Map input getbmapx structure to a getbmap + * structure for xfs_getbmap. + */ + GETBMAP_CONVERT(bmx, bm); + + iflags = bmx.bmv_iflags; + + if (iflags & (~BMV_IF_VALID)) + return -XFS_ERROR(EINVAL); + + iflags |= BMV_IF_EXTENDED; + + error = xfs_getbmap(bdp, &bm, (struct getbmapx *)arg+1, iflags); + if (error) + return -error; + + GETBMAP_CONVERT(bm, bmx); + + if (copy_to_user((struct getbmapx *)arg, &bmx, sizeof(bmx))) + return -XFS_ERROR(EFAULT); + + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_iomap.c linux.21rc5-ac2/fs/xfs/linux/xfs_iomap.c --- linux.21rc5/fs/xfs/linux/xfs_iomap.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_iomap.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,784 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_space.h" +#include "xfs_utils.h" + +#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ + << mp->m_writeio_log) +#define XFS_STRAT_WRITE_IMAPS 2 +#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP + +STATIC int +_xfs_imap_to_bmap( + xfs_iocore_t *io, + xfs_off_t offset, + xfs_bmbt_irec_t *imap, + page_buf_bmap_t *pbmapp, + int imaps, /* Number of imap entries */ + int pbmaps) /* Number of pbmap entries */ +{ + xfs_mount_t *mp; + xfs_fsize_t nisize; + int pbm; + xfs_fsblock_t start_block; + + mp = io->io_mount; + nisize = XFS_SIZE(mp, io); + if (io->io_new_size > nisize) + nisize = io->io_new_size; + + for (pbm = 0; imaps && pbm < pbmaps; imaps--, pbmapp++, imap++, pbm++) { + pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ? + mp->m_rtdev_targp : mp->m_ddev_targp; + pbmapp->pbm_offset = XFS_FSB_TO_B(mp, imap->br_startoff); + pbmapp->pbm_delta = offset - pbmapp->pbm_offset; + pbmapp->pbm_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount); + pbmapp->pbm_flags = 0; + + start_block = imap->br_startblock; + if (start_block == HOLESTARTBLOCK) { + pbmapp->pbm_bn = PAGE_BUF_DADDR_NULL; + pbmapp->pbm_flags = PBMF_HOLE; + } else if (start_block == DELAYSTARTBLOCK) { + pbmapp->pbm_bn = PAGE_BUF_DADDR_NULL; + pbmapp->pbm_flags = PBMF_DELAY; + } else { + pbmapp->pbm_bn = XFS_FSB_TO_DB_IO(io, start_block); + if (ISUNWRITTEN(imap)) + pbmapp->pbm_flags |= PBMF_UNWRITTEN; + } + + if ((pbmapp->pbm_offset + pbmapp->pbm_bsize) >= nisize) { + pbmapp->pbm_flags |= PBMF_EOF; + } + + offset += pbmapp->pbm_bsize - pbmapp->pbm_delta; + } + return pbm; /* Return the number filled */ +} + +int +xfs_iomap( + xfs_iocore_t *io, + xfs_off_t offset, + ssize_t count, + int flags, + page_buf_bmap_t *pbmapp, + int *npbmaps) +{ + xfs_mount_t *mp = io->io_mount; + xfs_fileoff_t offset_fsb, end_fsb; + int error = 0; + int lockmode = 0; + xfs_bmbt_irec_t imap; + int nimaps = 1; + int bmap_flags = 0; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + switch (flags & + (BMAP_READ|BMAP_WRITE|BMAP_ALLOCATE|BMAP_UNWRITTEN)) { + case BMAP_READ: + lockmode = XFS_LCK_MAP_SHARED(mp, io); + bmap_flags = XFS_BMAPI_ENTIRE; + if (flags & BMAP_IGNSTATE) + bmap_flags |= XFS_BMAPI_IGSTATE; + break; + case PBF_WRITE: + lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR; + bmap_flags = 0; + XFS_ILOCK(mp, io, lockmode); + break; + case BMAP_ALLOCATE: + lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD; + bmap_flags = XFS_BMAPI_ENTIRE; + /* Attempt non-blocking lock */ + if (flags & BMAP_TRYLOCK) { + if (!XFS_ILOCK_NOWAIT(mp, io, lockmode)) + return XFS_ERROR(EAGAIN); + } else { + XFS_ILOCK(mp, io, lockmode); + } + break; + case BMAP_UNWRITTEN: + goto phase2; + default: + BUG(); + } + + offset_fsb = XFS_B_TO_FSBT(mp, offset); + end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); + + error = XFS_BMAPI(mp, NULL, io, offset_fsb, + (xfs_filblks_t)(end_fsb - offset_fsb) , + bmap_flags, NULL, 0, &imap, + &nimaps, NULL); + + if (error) + goto out; + +phase2: + switch (flags & (BMAP_WRITE|BMAP_ALLOCATE|BMAP_UNWRITTEN)) { + case BMAP_WRITE: + /* If we found an extent, return it */ + if (nimaps && (imap.br_startblock != HOLESTARTBLOCK)) + break; + + if (flags & (BMAP_DIRECT|BMAP_MMAP)) { + error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset, + count, flags, &imap, &nimaps, nimaps); + } else { + error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, + flags, &imap, &nimaps); + } + break; + case BMAP_ALLOCATE: + /* If we found an extent, return it */ + XFS_IUNLOCK(mp, io, lockmode); + lockmode = 0; + + if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) + break; + + error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps); + break; + case BMAP_UNWRITTEN: + lockmode = 0; + error = XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count); + nimaps = 0; + break; + } + + if (nimaps) { + *npbmaps = _xfs_imap_to_bmap(io, offset, &imap, + pbmapp, nimaps, *npbmaps); + } else if (npbmaps) { + *npbmaps = 0; + } + +out: + if (lockmode) + XFS_IUNLOCK(mp, io, lockmode); + return XFS_ERROR(error); +} + +STATIC int +xfs_flush_space( + xfs_inode_t *ip, + int *fsynced, + int *ioflags) +{ + vnode_t *vp = XFS_ITOV(ip); + + switch (*fsynced) { + case 0: + if (ip->i_delayed_blks) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + fsync_inode_data_buffers(LINVFS_GET_IP(vp)); + xfs_ilock(ip, XFS_ILOCK_EXCL); + *fsynced = 1; + } else { + *ioflags |= BMAP_SYNC; + *fsynced = 2; + } + return 0; + case 1: + *fsynced = 2; + *ioflags |= BMAP_SYNC; + return 0; + case 2: + xfs_iunlock(ip, XFS_ILOCK_EXCL); + fsync_no_super(LINVFS_GET_IP(vp)->i_dev); + xfs_log_force(ip->i_mount, (xfs_lsn_t)0, + XFS_LOG_FORCE|XFS_LOG_SYNC); + xfs_ilock(ip, XFS_ILOCK_EXCL); + *fsynced = 3; + return 0; + } + return 1; +} + +int +xfs_iomap_write_direct( + xfs_inode_t *ip, + loff_t offset, + size_t count, + int flags, + xfs_bmbt_irec_t *ret_imap, + int *nmaps, + int found) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_iocore_t *io = &ip->i_iocore; + xfs_fileoff_t offset_fsb; + xfs_fileoff_t last_fsb; + xfs_filblks_t count_fsb; + xfs_fsize_t isize; + xfs_fsblock_t firstfsb; + int nimaps, maps; + int error; + int bmapi_flag; + int rt; + xfs_trans_t *tp; + xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp; + xfs_bmap_free_t free_list; + int aeof; + xfs_filblks_t datablocks; + int committed; + int numrtextents; + uint resblks; + + /* + * Make sure that the dquots are there. This doesn't hold + * the ilock across a disk read. + */ + + error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED); + if (error) + return XFS_ERROR(error); + + maps = min(XFS_WRITE_IMAPS, *nmaps); + nimaps = maps; + + isize = ip->i_d.di_size; + aeof = (offset + count) > isize; + + if (io->io_new_size > isize) + isize = io->io_new_size; + + offset_fsb = XFS_B_TO_FSBT(mp, offset); + last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); + count_fsb = last_fsb - offset_fsb; + if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) { + xfs_fileoff_t map_last_fsb; + + map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff; + + if (map_last_fsb < last_fsb) { + last_fsb = map_last_fsb; + count_fsb = last_fsb - offset_fsb; + } + ASSERT(count_fsb > 0); + } + + /* + * determine if reserving space on + * the data or realtime partition. + */ + if ((rt = XFS_IS_REALTIME_INODE(ip))) { + int sbrtextsize, iprtextsize; + + sbrtextsize = mp->m_sb.sb_rextsize; + iprtextsize = + ip->i_d.di_extsize ? ip->i_d.di_extsize : sbrtextsize; + numrtextents = (count_fsb + iprtextsize - 1); + do_div(numrtextents, sbrtextsize); + datablocks = 0; + } else { + datablocks = count_fsb; + numrtextents = 0; + } + + /* + * allocate and setup the transaction + */ + xfs_iunlock(ip, XFS_ILOCK_EXCL); + tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); + + resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks); + + error = xfs_trans_reserve(tp, resblks, + XFS_WRITE_LOG_RES(mp), numrtextents, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + + /* + * check for running out of space + */ + if (error) + /* + * Free the transaction structure. + */ + xfs_trans_cancel(tp, 0); + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + if (error) + goto error_out; /* Don't return in above if .. trans .., + need lock to return */ + + if (XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, resblks)) { + error = (EDQUOT); + goto error1; + } + nimaps = 1; + + bmapi_flag = XFS_BMAPI_WRITE; + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + if (!(flags & BMAP_MMAP) && (offset < ip->i_d.di_size || rt)) + bmapi_flag |= XFS_BMAPI_PREALLOC; + + /* + * issue the bmapi() call to allocate the blocks + */ + XFS_BMAP_INIT(&free_list, &firstfsb); + imapp = &imap[0]; + error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, + bmapi_flag, &firstfsb, 0, imapp, &nimaps, &free_list); + if (error) { + goto error0; + } + + /* + * complete the transaction + */ + + error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); + if (error) { + goto error0; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + goto error_out; + } + + /* copy any maps to caller's array and return any error. */ + if (nimaps == 0) { + error = (ENOSPC); + goto error_out; + } + + *ret_imap = imap[0]; + *nmaps = 1; + return 0; + + error0: /* Cancel bmap, unlock inode, and cancel trans */ + xfs_bmap_cancel(&free_list); + + error1: /* Just cancel transaction */ + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); + *nmaps = 0; /* nothing set-up here */ + +error_out: + return XFS_ERROR(error); +} + +int +xfs_iomap_write_delay( + xfs_inode_t *ip, + loff_t offset, + size_t count, + int ioflag, + xfs_bmbt_irec_t *ret_imap, + int *nmaps) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_iocore_t *io = &ip->i_iocore; + xfs_fileoff_t offset_fsb; + xfs_fileoff_t last_fsb; + xfs_fsize_t isize; + xfs_fsblock_t firstblock; + int nimaps; + int error; + xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; + int aeof; + int fsynced = 0; + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); + + /* + * Make sure that the dquots are there. This doesn't hold + * the ilock across a disk read. + */ + + error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); + if (error) + return XFS_ERROR(error); + +retry: + isize = ip->i_d.di_size; + if (io->io_new_size > isize) { + isize = io->io_new_size; + } + + aeof = 0; + offset_fsb = XFS_B_TO_FSBT(mp, offset); + last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); + /* + * If the caller is doing a write at the end of the file, + * then extend the allocation (and the buffer used for the write) + * out to the file system's write iosize. We clean up any extra + * space left over when the file is closed in xfs_inactive(). + * + * We don't bother with this for sync writes, because we need + * to minimize the amount we write for good performance. + */ + if (!(ioflag & BMAP_SYNC) && ((offset + count) > ip->i_d.di_size)) { + xfs_off_t aligned_offset; + unsigned int iosize; + xfs_fileoff_t ioalign; + + iosize = mp->m_writeio_blocks; + aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); + ioalign = XFS_B_TO_FSBT(mp, aligned_offset); + last_fsb = ioalign + iosize; + aeof = 1; + } + + nimaps = XFS_WRITE_IMAPS; + firstblock = NULLFSBLOCK; + + /* + * roundup the allocation request to m_dalign boundary if file size + * is greater that 512K and we are allocating past the allocation eof + */ + if (mp->m_dalign && (isize >= mp->m_dalign) && aeof) { + int eof; + xfs_fileoff_t new_last_fsb; + new_last_fsb = roundup_64(last_fsb, mp->m_dalign); + error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); + if (error) { + return error; + } + if (eof) { + last_fsb = new_last_fsb; + } + } + + error = xfs_bmapi(NULL, ip, offset_fsb, + (xfs_filblks_t)(last_fsb - offset_fsb), + XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | + XFS_BMAPI_ENTIRE, &firstblock, 1, imap, + &nimaps, NULL); + /* + * This can be EDQUOT, if nimaps == 0 + */ + if (error && (error != ENOSPC)) { + return XFS_ERROR(error); + } + /* + * If bmapi returned us nothing, and if we didn't get back EDQUOT, + * then we must have run out of space. + */ + + if (nimaps == 0) { + if (xfs_flush_space(ip, &fsynced, &ioflag)) + return XFS_ERROR(ENOSPC); + + error = 0; + goto retry; + } + + *ret_imap = imap[0]; + *nmaps = 1; + return 0; +} + +/* + * Pass in a delayed allocate extent, convert it to real extents; + * return to the caller the extent we create which maps on top of + * the originating callers request. + * + * Called without a lock on the inode. + */ +int +xfs_iomap_write_allocate( + xfs_inode_t *ip, + xfs_bmbt_irec_t *map, + int *retmap) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_fileoff_t offset_fsb, last_block; + xfs_fileoff_t end_fsb, map_start_fsb; + xfs_fsblock_t first_block; + xfs_bmap_free_t free_list; + xfs_filblks_t count_fsb; + xfs_bmbt_irec_t imap[XFS_STRAT_WRITE_IMAPS]; + xfs_trans_t *tp; + int i, nimaps, committed; + int error = 0; + int nres; + + *retmap = 0; + + /* + * Make sure that the dquots are there. + */ + + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return XFS_ERROR(error); + + offset_fsb = map->br_startoff; + count_fsb = map->br_blockcount; + map_start_fsb = offset_fsb; + + XFS_STATS_ADD(xfsstats.xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); + + while (count_fsb != 0) { + /* + * Set up a transaction with which to allocate the + * backing store for the file. Do allocations in a + * loop until we get some space in the range we are + * interested in. The other space that might be allocated + * is in the delayed allocation extent on which we sit + * but before our buffer starts. + */ + + nimaps = 0; + while (nimaps == 0) { + tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); + nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); + error = xfs_trans_reserve(tp, nres, + XFS_WRITE_LOG_RES(mp), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + + if (error == ENOSPC) { + error = xfs_trans_reserve(tp, 0, + XFS_WRITE_LOG_RES(mp), + 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + } + if (error) { + xfs_trans_cancel(tp, 0); + return XFS_ERROR(error); + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + XFS_BMAP_INIT(&free_list, &first_block); + + nimaps = XFS_STRAT_WRITE_IMAPS; + /* + * Ensure we don't go beyond eof - it is possible + * the extents changed since we did the read call, + * we dropped the ilock in the interim. + */ + + end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size); + xfs_bmap_last_offset(NULL, ip, &last_block, + XFS_DATA_FORK); + last_block = XFS_FILEOFF_MAX(last_block, end_fsb); + if ((map_start_fsb + count_fsb) > last_block) { + count_fsb = last_block - map_start_fsb; + if (count_fsb == 0) { + error = EAGAIN; + goto trans_cancel; + } + } + + /* Go get the actual blocks */ + error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, + XFS_BMAPI_WRITE, &first_block, 1, + imap, &nimaps, &free_list); + + if (error) + goto trans_cancel; + + error = xfs_bmap_finish(&tp, &free_list, + first_block, &committed); + + if (error) + goto trans_cancel; + + error = xfs_trans_commit(tp, + XFS_TRANS_RELEASE_LOG_RES, NULL); + + if (error) + goto error0; + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } + + /* + * See if we were able to allocate an extent that + * covers at least part of the callers request + */ + + for (i = 0; i < nimaps; i++) { + if ((map->br_startoff >= imap[i].br_startoff) && + (map->br_startoff < (imap[i].br_startoff + + imap[i].br_blockcount))) { + *map = imap[i]; + *retmap = 1; + XFS_STATS_INC(xfsstats.xs_xstrat_quick); + return 0; + } + count_fsb -= imap[i].br_blockcount; + } + + /* So far we have not mapped the requested part of the + * file, just surrounding data, try again. + */ + nimaps--; + offset_fsb = imap[nimaps].br_startoff + + imap[nimaps].br_blockcount; + map_start_fsb = offset_fsb; + } + +trans_cancel: + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); +error0: + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return XFS_ERROR(error); +} + +int +xfs_iomap_write_unwritten( + xfs_inode_t *ip, + loff_t offset, + size_t count) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_trans_t *tp; + xfs_fileoff_t offset_fsb; + xfs_filblks_t count_fsb; + xfs_filblks_t numblks_fsb; + xfs_bmbt_irec_t imap; + int committed; + int error; + int nres; + int nimaps; + xfs_fsblock_t firstfsb; + xfs_bmap_free_t free_list; + + offset_fsb = XFS_B_TO_FSBT(mp, offset); + count_fsb = XFS_B_TO_FSB(mp, count); + + do { + nres = XFS_DIOSTRAT_SPACE_RES(mp, 0); + + /* + * set up a transaction to convert the range of extents + * from unwritten to real. Do allocations in a loop until + * we have covered the range passed in. + */ + + tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); + error = xfs_trans_reserve(tp, nres, + XFS_WRITE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + if (error) { + xfs_trans_cancel(tp, 0); + goto error0; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + /* + * Modify the unwritten extent state of the buffer. + */ + XFS_BMAP_INIT(&free_list, &firstfsb); + nimaps = 1; + error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, + XFS_BMAPI_WRITE, &firstfsb, + 1, &imap, &nimaps, &free_list); + if (error) + goto error_on_bmapi_transaction; + + error = xfs_bmap_finish(&(tp), &(free_list), + firstfsb, &committed); + if (error) + goto error_on_bmapi_transaction; + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + if (error) + goto error0; + + if ((numblks_fsb = imap.br_blockcount) == 0) { + /* + * The numblks_fsb value should always get + * smaller, otherwise the loop is stuck. + */ + ASSERT(imap.br_blockcount); + break; + } + offset_fsb += numblks_fsb; + count_fsb -= numblks_fsb; + } while (count_fsb > 0); + + return 0; + +error_on_bmapi_transaction: + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); + xfs_iunlock(ip, XFS_ILOCK_EXCL); +error0: + return XFS_ERROR(error); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_iops.c linux.21rc5-ac2/fs/xfs/linux/xfs_iops.c --- linux.21rc5/fs/xfs/linux/xfs_iops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_iops.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,832 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" + +#include + + +/* + * Pull the link count and size up from the xfs inode to the linux inode + */ +STATIC void +validate_fields( + struct inode *ip) +{ + vnode_t *vp = LINVFS_GET_VP(ip); + vattr_t va; + int error; + + va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; + VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error); + ip->i_nlink = va.va_nlink; + ip->i_size = va.va_size; + ip->i_blocks = va.va_nblocks; +} + +#ifdef CONFIG_XFS_POSIX_ACL +/* + * Determine whether a process has a valid fs_struct (kernel daemons + * like knfsd don't have an fs_struct). + */ +STATIC int inline +has_fs_struct(struct task_struct *task) +{ + return (task->fs != init_task.fs); +} +#endif + +STATIC int +linvfs_mknod( + struct inode *dir, + struct dentry *dentry, + int mode, + int rdev) +{ + struct inode *ip; + vattr_t va; + vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir); + xattr_exists_t test_default_acl = _ACL_DEFAULT_EXISTS; + int have_default_acl = 0; + int error = EINVAL; + + if (test_default_acl) + have_default_acl = test_default_acl(dvp); + +#ifdef CONFIG_XFS_POSIX_ACL + /* + * Conditionally compiled so that the ACL base kernel changes can be + * split out into separate patches - remove this once MS_POSIXACL is + * accepted, or some other way to implement this exists. + */ + if (IS_POSIXACL(dir) && !have_default_acl && has_fs_struct(current)) + mode &= ~current->fs->umask; +#endif + + memset(&va, 0, sizeof(va)); + va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + va.va_type = IFTOVT(mode); + va.va_mode = mode; + + switch (mode & S_IFMT) { + case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: + va.va_rdev = XFS_MKDEV(MAJOR(rdev), MINOR(rdev)); + va.va_mask |= XFS_AT_RDEV; + /*FALLTHROUGH*/ + case S_IFREG: + VOP_CREATE(dvp, dentry, &va, &vp, NULL, error); + break; + case S_IFDIR: + VOP_MKDIR(dvp, dentry, &va, &vp, NULL, error); + break; + default: + error = EINVAL; + break; + } + + if (!error) { + ASSERT(vp); + ip = LINVFS_GET_IP(vp); + if (!ip) { + VN_RELE(vp); + return -ENOMEM; + } + + if (S_ISCHR(mode) || S_ISBLK(mode)) + ip->i_rdev = to_kdev_t(rdev); + validate_fields(dir); + d_instantiate(dentry, ip); + } + + if (!error && have_default_acl) { + _ACL_DECL (pdacl); + + if (!_ACL_ALLOC(pdacl)) { + error = -ENOMEM; + } else { + if (_ACL_GET_DEFAULT(dvp, pdacl)) + error = _ACL_INHERIT(vp, &va, pdacl); + VMODIFY(vp); + _ACL_FREE(pdacl); + } + } + return -error; +} + +STATIC int +linvfs_create( + struct inode *dir, + struct dentry *dentry, + int mode) +{ + return linvfs_mknod(dir, dentry, mode, 0); +} + +STATIC int +linvfs_mkdir( + struct inode *dir, + struct dentry *dentry, + int mode) +{ + return linvfs_mknod(dir, dentry, mode|S_IFDIR, 0); +} + +STATIC struct dentry * +linvfs_lookup( + struct inode *dir, + struct dentry *dentry) +{ + struct inode *ip = NULL; + vnode_t *vp, *cvp = NULL; + int error; + + if (dentry->d_name.len >= MAXNAMELEN) + return ERR_PTR(-ENAMETOOLONG); + + vp = LINVFS_GET_VP(dir); + VOP_LOOKUP(vp, dentry, &cvp, 0, NULL, NULL, error); + if (!error) { + ASSERT(cvp); + ip = LINVFS_GET_IP(cvp); + if (!ip) { + VN_RELE(cvp); + return ERR_PTR(-EACCES); + } + } + if (error && (error != ENOENT)) + return ERR_PTR(-error); + d_add(dentry, ip); /* Negative entry goes in if ip is NULL */ + return NULL; +} + +STATIC int +linvfs_link( + struct dentry *old_dentry, + struct inode *dir, + struct dentry *dentry) +{ + struct inode *ip; /* inode of guy being linked to */ + vnode_t *tdvp; /* target directory for new name/link */ + vnode_t *vp; /* vp of name being linked */ + int error; + + ip = old_dentry->d_inode; /* inode being linked to */ + if (S_ISDIR(ip->i_mode)) + return -EPERM; + + tdvp = LINVFS_GET_VP(dir); + vp = LINVFS_GET_VP(ip); + + VOP_LINK(tdvp, vp, dentry, NULL, error); + if (!error) { + VMODIFY(tdvp); + VN_HOLD(vp); + validate_fields(ip); + d_instantiate(dentry, ip); + } + return -error; +} + +STATIC int +linvfs_unlink( + struct inode *dir, + struct dentry *dentry) +{ + struct inode *inode; + vnode_t *dvp; /* directory containing name to remove */ + int error; + + inode = dentry->d_inode; + dvp = LINVFS_GET_VP(dir); + + VOP_REMOVE(dvp, dentry, NULL, error); + if (!error) { + validate_fields(dir); /* For size only */ + validate_fields(inode); + } + + return -error; +} + +STATIC int +linvfs_symlink( + struct inode *dir, + struct dentry *dentry, + const char *symname) +{ + struct inode *ip; + vattr_t va; + vnode_t *dvp; /* directory containing name to remove */ + vnode_t *cvp; /* used to lookup symlink to put in dentry */ + int error; + + dvp = LINVFS_GET_VP(dir); + cvp = NULL; + + memset(&va, 0, sizeof(va)); + va.va_type = VLNK; + va.va_mode = irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO; + va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + + error = 0; + VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error); + if (!error && cvp) { + ASSERT(cvp->v_type == VLNK); + ip = LINVFS_GET_IP(cvp); + d_instantiate(dentry, ip); + validate_fields(dir); + validate_fields(ip); /* size needs update */ + } + return -error; +} + +STATIC int +linvfs_rmdir( + struct inode *dir, + struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + vnode_t *dvp = LINVFS_GET_VP(dir); + int error; + + VOP_RMDIR(dvp, dentry, NULL, error); + if (!error) { + validate_fields(inode); + validate_fields(dir); + } + return -error; +} + +STATIC int +linvfs_rename( + struct inode *odir, + struct dentry *odentry, + struct inode *ndir, + struct dentry *ndentry) +{ + struct inode *new_inode = ndentry->d_inode; + vnode_t *fvp; /* from directory */ + vnode_t *tvp; /* target directory */ + int error; + + fvp = LINVFS_GET_VP(odir); + tvp = LINVFS_GET_VP(ndir); + + VOP_RENAME(fvp, odentry, tvp, ndentry, NULL, error); + if (error) + return -error; + + if (new_inode) + validate_fields(new_inode); + + validate_fields(odir); + if (ndir != odir) + validate_fields(ndir); + return 0; +} + +STATIC int +linvfs_readlink( + struct dentry *dentry, + char *buf, + int size) +{ + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + uio_t uio; + iovec_t iov; + int error; + + iov.iov_base = buf; + iov.iov_len = size; + + uio.uio_iov = &iov; + uio.uio_offset = 0; + uio.uio_segflg = UIO_USERSPACE; + uio.uio_resid = size; + uio.uio_iovcnt = 1; + + VOP_READLINK(vp, &uio, NULL, error); + if (error) + return -error; + + return (size - uio.uio_resid); +} + +/* + * careful here - this function can get called recursively, so + * we need to be very careful about how much stack we use. + * uio is kmalloced for this reason... + */ +STATIC int +linvfs_follow_link( + struct dentry *dentry, + struct nameidata *nd) +{ + vnode_t *vp; + uio_t *uio; + iovec_t iov; + int error; + char *link; + + ASSERT(dentry); + ASSERT(nd); + + link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL); + if (!link) + return -ENOMEM; + + uio = (uio_t *)kmalloc(sizeof(uio_t), GFP_KERNEL); + if (!uio) { + kfree(link); + return -ENOMEM; + } + + vp = LINVFS_GET_VP(dentry->d_inode); + + iov.iov_base = link; + iov.iov_len = MAXNAMELEN; + + uio->uio_iov = &iov; + uio->uio_offset = 0; + uio->uio_segflg = UIO_SYSSPACE; + uio->uio_resid = MAXNAMELEN; + uio->uio_fmode = 0; + uio->uio_iovcnt = 1; + + VOP_READLINK(vp, uio, NULL, error); + if (error) { + kfree(uio); + kfree(link); + return -error; + } + + link[MAXNAMELEN - uio->uio_resid] = '\0'; + kfree(uio); + + /* vfs_follow_link returns (-) errors */ + error = vfs_follow_link(nd, link); + kfree(link); + return error; +} + +STATIC int +linvfs_permission( + struct inode *inode, + int mode) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error; + + mode <<= 6; /* convert from linux to vnode access bits */ + VOP_ACCESS(vp, mode, NULL, error); + return -error; +} + +STATIC int +linvfs_revalidate( + struct dentry *dentry) +{ + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + + if (unlikely(vp->v_flag & VMODIFIED)) + return vn_revalidate(vp); + return 0; +} + +STATIC int +linvfs_setattr( + struct dentry *dentry, + struct iattr *attr) +{ + struct inode *inode = dentry->d_inode; + unsigned int ia_valid = attr->ia_valid; + vnode_t *vp = LINVFS_GET_VP(inode); + vattr_t vattr; + int flags = 0; + int error; + + memset(&vattr, 0, sizeof(vattr_t)); + if (ia_valid & ATTR_UID) { + vattr.va_mask |= XFS_AT_UID; + vattr.va_uid = attr->ia_uid; + } + if (ia_valid & ATTR_GID) { + vattr.va_mask |= XFS_AT_GID; + vattr.va_gid = attr->ia_gid; + } + if (ia_valid & ATTR_SIZE) { + vattr.va_mask |= XFS_AT_SIZE; + vattr.va_size = attr->ia_size; + } + if (ia_valid & ATTR_ATIME) { + vattr.va_mask |= XFS_AT_ATIME; + vattr.va_atime.tv_sec = attr->ia_atime; + vattr.va_atime.tv_nsec = 0; + } + if (ia_valid & ATTR_MTIME) { + vattr.va_mask |= XFS_AT_MTIME; + vattr.va_mtime.tv_sec = attr->ia_mtime; + vattr.va_mtime.tv_nsec = 0; + } + if (ia_valid & ATTR_CTIME) { + vattr.va_mask |= XFS_AT_CTIME; + vattr.va_ctime.tv_sec = attr->ia_ctime; + vattr.va_ctime.tv_nsec = 0; + } + if (ia_valid & ATTR_MODE) { + vattr.va_mask |= XFS_AT_MODE; + vattr.va_mode = attr->ia_mode; + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) + inode->i_mode &= ~S_ISGID; + } + + if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) + flags = ATTR_UTIME; + + VOP_SETATTR(vp, &vattr, flags, NULL, error); + if (error) + return(-error); /* Positive error up from XFS */ + if (ia_valid & ATTR_SIZE) { + error = vmtruncate(inode, attr->ia_size); + } + + if (!error) { + vn_revalidate(vp); + } + return error; +} + +STATIC void +linvfs_truncate( + struct inode *inode) +{ + block_truncate_page(inode->i_mapping, inode->i_size, linvfs_get_block); +} + + + +/* + * Extended attributes interfaces + */ + +#define SYSTEM_NAME "system." /* VFS shared names/values */ +#define ROOT_NAME "trusted." /* root's own names/values */ +#define USER_NAME "user." /* user's own names/values */ +STATIC xattr_namespace_t xfs_namespace_array[] = { + { .name= SYSTEM_NAME, .namelen= sizeof(SYSTEM_NAME)-1,.exists= NULL }, + { .name= ROOT_NAME, .namelen= sizeof(ROOT_NAME)-1, .exists= NULL }, + { .name= USER_NAME, .namelen= sizeof(USER_NAME)-1, .exists= NULL }, + { .name= NULL } +}; +xattr_namespace_t *xfs_namespaces = &xfs_namespace_array[0]; + +#define POSIXACL_ACCESS "posix_acl_access" +#define POSIXACL_ACCESS_SIZE (sizeof(POSIXACL_ACCESS)-1) +#define POSIXACL_DEFAULT "posix_acl_default" +#define POSIXACL_DEFAULT_SIZE (sizeof(POSIXACL_DEFAULT)-1) +#define POSIXCAP "posix_capabilities" +#define POSIXCAP_SIZE (sizeof(POSIXCAP)-1) +#define POSIXMAC "posix_mac" +#define POSIXMAC_SIZE (sizeof(POSIXMAC)-1) +STATIC xattr_namespace_t sys_namespace_array[] = { + { .name= POSIXACL_ACCESS, + .namelen= POSIXACL_ACCESS_SIZE, .exists= _ACL_ACCESS_EXISTS }, + { .name= POSIXACL_DEFAULT, + .namelen= POSIXACL_DEFAULT_SIZE, .exists= _ACL_DEFAULT_EXISTS }, + { .name= POSIXCAP, + .namelen= POSIXCAP_SIZE, .exists= _CAP_EXISTS }, + { .name= POSIXMAC, + .namelen= POSIXMAC_SIZE, .exists= _MAC_EXISTS }, + { .name= NULL } +}; + +/* + * Some checks to prevent people abusing EAs to get over quota: + * - Don't allow modifying user EAs on devices/symlinks; + * - Don't allow modifying user EAs if sticky bit set; + */ +STATIC int +capable_user_xattr( + struct inode *inode) +{ + if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) && + !capable(CAP_SYS_ADMIN)) + return 0; + if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) && + (current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) + return 0; + return 1; +} + +STATIC int +linvfs_setxattr( + struct dentry *dentry, + const char *name, + void *data, + size_t size, + int flags) +{ + struct inode *inode = dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + char *p = (char *)name; + int xflags = 0; + int error; + + if (strncmp(name, xfs_namespaces[SYSTEM_NAMES].name, + xfs_namespaces[SYSTEM_NAMES].namelen) == 0) { + error = -EINVAL; + if (flags & XATTR_CREATE) + return error; + error = -EOPNOTSUPP; + p += xfs_namespaces[SYSTEM_NAMES].namelen; + if (strcmp(p, POSIXACL_ACCESS) == 0) + error = xfs_acl_vset(vp, (void *) data, size, + _ACL_TYPE_ACCESS); + else if (strcmp(p, POSIXACL_DEFAULT) == 0) + error = xfs_acl_vset(vp, (void *) data, size, + _ACL_TYPE_DEFAULT); + else if (strcmp(p, POSIXCAP) == 0) + error = xfs_cap_vset(vp, (void *) data, size); + if (!error) + error = vn_revalidate(vp); + return error; + } + + /* Convert Linux syscall to XFS internal ATTR flags */ + if (flags & XATTR_CREATE) + xflags |= ATTR_CREATE; + if (flags & XATTR_REPLACE) + xflags |= ATTR_REPLACE; + + if (strncmp(name, xfs_namespaces[ROOT_NAMES].name, + xfs_namespaces[ROOT_NAMES].namelen) == 0) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xflags |= ATTR_ROOT; + p += xfs_namespaces[ROOT_NAMES].namelen; + VOP_ATTR_SET(vp, p, (void *) data, size, xflags, NULL, error); + return -error; + } + if (strncmp(name, xfs_namespaces[USER_NAMES].name, + xfs_namespaces[USER_NAMES].namelen) == 0) { + if (!capable_user_xattr(inode)) + return -EPERM; + p += xfs_namespaces[USER_NAMES].namelen; + VOP_ATTR_SET(vp, p, (void *) data, size, xflags, NULL, error); + return -error; + } + return -EOPNOTSUPP; +} + +STATIC ssize_t +linvfs_getxattr( + struct dentry *dentry, + const char *name, + void *data, + size_t size) +{ + struct inode *inode = dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + char *p = (char *)name; + int xflags = 0; + ssize_t error; + + if (strncmp(name, xfs_namespaces[SYSTEM_NAMES].name, + xfs_namespaces[SYSTEM_NAMES].namelen) == 0) { + error = -EOPNOTSUPP; + p += xfs_namespaces[SYSTEM_NAMES].namelen; + if (strcmp(p, POSIXACL_ACCESS) == 0) + error = xfs_acl_vget(vp, data, size, _ACL_TYPE_ACCESS); + else if (strcmp(p, POSIXACL_DEFAULT) == 0) + error = xfs_acl_vget(vp, data, size, _ACL_TYPE_DEFAULT); + else if (strcmp(p, POSIXCAP) == 0) + error = xfs_cap_vget(vp, data, size); + return error; + } + + /* Convert Linux syscall to XFS internal ATTR flags */ + if (!size) { + xflags |= ATTR_KERNOVAL; + data = NULL; + } + + if (strncmp(name, xfs_namespaces[ROOT_NAMES].name, + xfs_namespaces[ROOT_NAMES].namelen) == 0) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xflags |= ATTR_ROOT; + p += xfs_namespaces[ROOT_NAMES].namelen; + VOP_ATTR_GET(vp, p, data, (int *)&size, xflags, NULL, error); + if (!error) + error = -size; + return -error; + } + if (strncmp(name, xfs_namespaces[USER_NAMES].name, + xfs_namespaces[USER_NAMES].namelen) == 0) { + p += xfs_namespaces[USER_NAMES].namelen; + if (!capable_user_xattr(inode)) + return -EPERM; + VOP_ATTR_GET(vp, p, data, (int *)&size, xflags, NULL, error); + if (!error) + error = -size; + return -error; + } + return -EOPNOTSUPP; +} + + +STATIC ssize_t +linvfs_listxattr( + struct dentry *dentry, + char *data, + size_t size) +{ + attrlist_cursor_kern_t cursor; + xattr_namespace_t *sys; + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + char *k = data; + int xflags = ATTR_KERNAMELS; + int result = 0; + ssize_t error; + + if (!size) + xflags |= ATTR_KERNOVAL; + if (capable(CAP_SYS_ADMIN)) + xflags |= ATTR_KERNFULLS; + + memset(&cursor, 0, sizeof(cursor)); + VOP_ATTR_LIST(vp, data, size, xflags, &cursor, NULL, error); + if (error > 0) + return -error; + result += -error; + + k += result; /* advance start of our buffer */ + for (sys = &sys_namespace_array[0]; sys->name != NULL; sys++) { + if (sys->exists == NULL || !sys->exists(vp)) + continue; + result += xfs_namespaces[SYSTEM_NAMES].namelen; + result += sys->namelen + 1; + if (size) { + if (result > size) + return -ERANGE; + strcpy(k, xfs_namespaces[SYSTEM_NAMES].name); + k += xfs_namespaces[SYSTEM_NAMES].namelen; + strcpy(k, sys->name); + k += sys->namelen + 1; + } + } + return result; +} + +STATIC int +linvfs_removexattr( + struct dentry *dentry, + const char *name) +{ + struct inode *inode = dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + char *p = (char *)name; + int xflags = 0; + int error; + + if (strncmp(name, xfs_namespaces[SYSTEM_NAMES].name, + xfs_namespaces[SYSTEM_NAMES].namelen) == 0) { + error = -EOPNOTSUPP; + p += xfs_namespaces[SYSTEM_NAMES].namelen; + if (strcmp(p, POSIXACL_ACCESS) == 0) + error = xfs_acl_vremove(vp, _ACL_TYPE_ACCESS); + else if (strcmp(p, POSIXACL_DEFAULT) == 0) + error = xfs_acl_vremove(vp, _ACL_TYPE_DEFAULT); + else if (strcmp(p, POSIXCAP) == 0) + error = xfs_cap_vremove(vp); + return error; + } + + if (strncmp(name, xfs_namespaces[ROOT_NAMES].name, + xfs_namespaces[ROOT_NAMES].namelen) == 0) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xflags |= ATTR_ROOT; + p += xfs_namespaces[ROOT_NAMES].namelen; + VOP_ATTR_REMOVE(vp, p, xflags, NULL, error); + return -error; + } + if (strncmp(name, xfs_namespaces[USER_NAMES].name, + xfs_namespaces[USER_NAMES].namelen) == 0) { + p += xfs_namespaces[USER_NAMES].namelen; + if (!capable_user_xattr(inode)) + return -EPERM; + VOP_ATTR_REMOVE(vp, p, xflags, NULL, error); + return -error; + } + return -EOPNOTSUPP; +} + + +struct inode_operations linvfs_file_inode_operations = +{ + .permission = linvfs_permission, + .truncate = linvfs_truncate, + .revalidate = linvfs_revalidate, + .setattr = linvfs_setattr, + .setxattr = linvfs_setxattr, + .getxattr = linvfs_getxattr, + .listxattr = linvfs_listxattr, + .removexattr = linvfs_removexattr, +}; + +struct inode_operations linvfs_dir_inode_operations = +{ + .create = linvfs_create, + .lookup = linvfs_lookup, + .link = linvfs_link, + .unlink = linvfs_unlink, + .symlink = linvfs_symlink, + .mkdir = linvfs_mkdir, + .rmdir = linvfs_rmdir, + .mknod = linvfs_mknod, + .rename = linvfs_rename, + .permission = linvfs_permission, + .revalidate = linvfs_revalidate, + .setattr = linvfs_setattr, + .setxattr = linvfs_setxattr, + .getxattr = linvfs_getxattr, + .listxattr = linvfs_listxattr, + .removexattr = linvfs_removexattr, +}; + +struct inode_operations linvfs_symlink_inode_operations = +{ + .readlink = linvfs_readlink, + .follow_link = linvfs_follow_link, + .permission = linvfs_permission, + .revalidate = linvfs_revalidate, + .setattr = linvfs_setattr, + .setxattr = linvfs_setxattr, + .getxattr = linvfs_getxattr, + .listxattr = linvfs_listxattr, + .removexattr = linvfs_removexattr, +}; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_iops.h linux.21rc5-ac2/fs/xfs/linux/xfs_iops.h --- linux.21rc5/fs/xfs/linux/xfs_iops.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_iops.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_IOPS_H__ +#define __XFS_IOPS_H__ + +/* + * Extended system attributes. + * So far only POSIX ACLs are supported, but this will need to + * grow in time (capabilities, mandatory access control, etc). + */ +#define XFS_SYSTEM_NAMESPACE SYSTEM_POSIXACL + +/* + * Define a table of the namespaces XFS supports + */ +typedef int (*xattr_exists_t)(vnode_t *); + +typedef struct xattr_namespace { + char *name; + unsigned int namelen; + xattr_exists_t exists; +} xattr_namespace_t; + +#define SYSTEM_NAMES 0 +#define ROOT_NAMES 1 +#define USER_NAMES 2 +extern struct xattr_namespace *xfs_namespaces; + + +extern struct inode_operations linvfs_file_inode_operations; +extern struct inode_operations linvfs_dir_inode_operations; +extern struct inode_operations linvfs_symlink_inode_operations; + +extern struct file_operations linvfs_file_operations; +extern struct file_operations linvfs_dir_operations; + +extern struct address_space_operations linvfs_aops; + +extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int); +extern void linvfs_unwritten_done(struct buffer_head *, int); + +#endif /* __XFS_IOPS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_linux.h linux.21rc5-ac2/fs/xfs/linux/xfs_linux.h --- linux.21rc5/fs/xfs/linux/xfs_linux.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_linux.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_LINUX__ +#define __XFS_LINUX__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifndef HAVE_SECTOR_T +typedef long sector_t; /* offset- or number- of disk blocks */ +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef STATIC +#define STATIC static +#endif + +#ifndef EVMS_MAJOR +#define EVMS_MAJOR 117 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,9) +#define page_buffers(page) ((page)->buffers) +#define page_has_buffers(page) ((page)->buffers) +#endif + +/* + * State flag for unwritten extent buffers. + * + * We need to be able to distinguish between these and delayed + * allocate buffers within XFS. The generic IO path code does + * not need to distinguish - we use the BH_Delay flag for both + * delalloc and these ondisk-uninitialised buffers. + */ +#define BH_Unwritten BH_PrivateStart +#define buffer_unwritten(bh) __buffer_state(bh, Unwritten) +static inline void set_buffer_unwritten_io(struct buffer_head *bh) +{ + bh->b_end_io = linvfs_unwritten_done; +} + +#define restricted_chown xfs_params.restrict_chown +#define irix_sgid_inherit xfs_params.sgid_inherit +#define irix_symlink_mode xfs_params.symlink_mode +#define xfs_panic_mask xfs_params.panic_mask +#define xfs_error_level xfs_params.error_level + +#define NBPP PAGE_SIZE +#define DPPSHFT (PAGE_SHIFT - 9) +#define NDPP (1 << (PAGE_SHIFT - 9)) +#define dtop(DD) (((DD) + NDPP - 1) >> DPPSHFT) +#define dtopt(DD) ((DD) >> DPPSHFT) +#define dpoff(DD) ((DD) & (NDPP-1)) + +#define NBBY 8 /* number of bits per byte */ +#define NBPC PAGE_SIZE /* Number of bytes per click */ +#define BPCSHIFT PAGE_SHIFT /* LOG2(NBPC) if exact */ + +/* + * Size of block device i/o is parameterized here. + * Currently the system supports page-sized i/o. + */ +#define BLKDEV_IOSHIFT BPCSHIFT +#define BLKDEV_IOSIZE (1<>BPCSHIFT) +#define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT) +#define btoc64(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) +#define btoct64(x) ((__uint64_t)(x)>>BPCSHIFT) +#define io_btoc(x) (((__psunsigned_t)(x)+(IO_NBPC-1))>>IO_BPCSHIFT) +#define io_btoct(x) ((__psunsigned_t)(x)>>IO_BPCSHIFT) + +/* off_t bytes to clicks */ +#define offtoc(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) +#define offtoct(x) ((xfs_off_t)(x)>>BPCSHIFT) + +/* clicks to off_t bytes */ +#define ctooff(x) ((xfs_off_t)(x)<>BPCSHIFT) +#define ctob64(x) ((__uint64_t)(x)<>BPCSHIFT) + +#ifndef CELL_CAPABLE +#define FSC_NOTIFY_NAME_CHANGED(vp) +#endif + +#ifndef ENOATTR +#define ENOATTR ENODATA /* Attribute not found */ +#endif + +/* Note: EWRONGFS never visible outside the kernel */ +#define EWRONGFS EINVAL /* Mount with wrong filesystem type */ + +/* + * XXX EFSCORRUPTED needs a real value in errno.h. asm-i386/errno.h won't + * return codes out of its known range in errno. + * XXX Also note: needs to be < 1000 and fairly unique on Linux (mustn't + * conflict with any code we use already or any code a driver may use) + * XXX Some options (currently we do #2): + * 1/ New error code ["Filesystem is corrupted", _after_ glibc updated] + * 2/ 990 ["Unknown error 990"] + * 3/ EUCLEAN ["Structure needs cleaning"] + * 4/ Convert EFSCORRUPTED to EIO [just prior to return into userspace] + */ +#define EFSCORRUPTED 990 /* Filesystem is corrupted */ + +#define SYNCHRONIZE() barrier() +#define __return_address __builtin_return_address(0) + +/* + * IRIX (BSD) quotactl makes use of separate commands for user/group, + * whereas on Linux the syscall encodes this information into the cmd + * field (see the QCMD macro in quota.h). These macros help keep the + * code portable - they are not visible from the syscall interface. + */ +#define Q_XSETGQLIM XQM_CMD(0x8) /* set groups disk limits */ +#define Q_XGETGQUOTA XQM_CMD(0x9) /* get groups disk limits */ + +/* IRIX uses a dynamic sizing algorithm (ndquot = 200 + numprocs*2) */ +/* we may well need to fine-tune this if it ever becomes an issue. */ +#define DQUOT_MAX_HEURISTIC 1024 /* NR_DQUOTS */ +#define ndquot DQUOT_MAX_HEURISTIC + +/* IRIX uses the current size of the name cache to guess a good value */ +/* - this isn't the same but is a good enough starting point for now. */ +#define DQUOT_HASH_HEURISTIC files_stat.nr_files + +/* IRIX inodes maintain the project ID also, zero this field on Linux */ +#define DEFAULT_PROJID 0 +#define dfltprid DEFAULT_PROJID + +#define MAXPATHLEN 1024 + +#define FINVIS 0x0100 /* don't update timestamps - XFS */ + +#define MIN(a,b) (min(a,b)) +#define MAX(a,b) (max(a,b)) +#define howmany(x, y) (((x)+((y)-1))/(y)) +#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) + +/* Move the kernel do_div definition off to one side */ + +#if defined __i386__ +/* For ia32 we need to pull some tricks to get past various versions + * of the compiler which do not like us using do_div in the middle + * of large functions. + */ +static inline __u32 xfs_do_div(void *a, __u32 b, int n) +{ + __u32 mod; + + switch (n) { + case 4: + mod = *(__u32 *)a % b; + *(__u32 *)a = *(__u32 *)a / b; + return mod; + case 8: + { + unsigned long __upper, __low, __high, __mod; + __u64 c = *(__u64 *)a; + __upper = __high = c >> 32; + __low = c; + if (__high) { + __upper = __high % (b); + __high = __high / (b); + } + asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper)); + asm("":"=A" (c):"a" (__low),"d" (__high)); + *(__u64 *)a = c; + return __mod; + } + } + + /* NOTREACHED */ + return 0; +} + +/* Side effect free 64 bit mod operation */ +static inline __u32 xfs_do_mod(void *a, __u32 b, int n) +{ + switch (n) { + case 4: + return *(__u32 *)a % b; + case 8: + { + unsigned long __upper, __low, __high, __mod; + __u64 c = *(__u64 *)a; + __upper = __high = c >> 32; + __low = c; + if (__high) { + __upper = __high % (b); + __high = __high / (b); + } + asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper)); + asm("":"=A" (c):"a" (__low),"d" (__high)); + return __mod; + } + } + + /* NOTREACHED */ + return 0; +} +#else +static inline __u32 xfs_do_div(void *a, __u32 b, int n) +{ + __u32 mod; + + switch (n) { + case 4: + mod = *(__u32 *)a % b; + *(__u32 *)a = *(__u32 *)a / b; + return mod; + case 8: + mod = do_div(*(__u64 *)a, b); + return mod; + } + + /* NOTREACHED */ + return 0; +} + +/* Side effect free 64 bit mod operation */ +static inline __u32 xfs_do_mod(void *a, __u32 b, int n) +{ + switch (n) { + case 4: + return *(__u32 *)a % b; + case 8: + { + __u64 c = *(__u64 *)a; + return do_div(c, b); + } + } + + /* NOTREACHED */ + return 0; +} +#endif + +#undef do_div +#define do_div(a, b) xfs_do_div(&(a), (b), sizeof(a)) +#define do_mod(a, b) xfs_do_mod(&(a), (b), sizeof(a)) + +static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y) +{ + x += y - 1; + do_div(x, y); + return(x * y); +} + +#endif /* __XFS_LINUX__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_lrw.c linux.21rc5-ac2/fs/xfs/linux/xfs_lrw.c --- linux.21rc5/fs/xfs/linux/xfs_lrw.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_lrw.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,837 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +/* + * fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff) + * + */ + +#include "xfs.h" + +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_inode_item.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" + +#include + + +/* + * xfs_iozero + * + * xfs_iozero clears the specified range of buffer supplied, + * and marks all the affected blocks as valid and modified. If + * an affected block is not allocated, it will be allocated. If + * an affected block is not completely overwritten, and is not + * valid before the operation, it will be read from disk before + * being partially zeroed. + */ +STATIC int +xfs_iozero( + struct inode *ip, /* inode */ + loff_t pos, /* offset in file */ + size_t count, /* size of data to zero */ + loff_t end_size) /* max file size to set */ +{ + unsigned bytes; + struct page *page; + struct address_space *mapping; + char *kaddr; + int status; + + mapping = ip->i_mapping; + do { + unsigned long index, offset; + + offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ + index = pos >> PAGE_CACHE_SHIFT; + bytes = PAGE_CACHE_SIZE - offset; + if (bytes > count) + bytes = count; + + status = -ENOMEM; + page = grab_cache_page(mapping, index); + if (!page) + break; + + kaddr = kmap(page); + status = mapping->a_ops->prepare_write(NULL, page, offset, + offset + bytes); + if (status) { + goto unlock; + } + + memset((void *) (kaddr + offset), 0, bytes); + flush_dcache_page(page); + status = mapping->a_ops->commit_write(NULL, page, offset, + offset + bytes); + if (!status) { + pos += bytes; + count -= bytes; + if (pos > ip->i_size) + ip->i_size = pos < end_size ? pos : end_size; + } + +unlock: + kunmap(page); + unlock_page(page); + page_cache_release(page); + if (status) + break; + } while (count); + + return (-status); +} + +ssize_t /* bytes read, or (-) error */ +xfs_read( + bhv_desc_t *bdp, + struct file *file, + char *buf, + size_t size, + loff_t *offset, + int ioflags, + cred_t *credp) +{ + ssize_t ret; + xfs_fsize_t n; + xfs_inode_t *ip; + xfs_mount_t *mp; + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + XFS_STATS_INC(xfsstats.xs_read_calls); + + if (file->f_flags & O_DIRECT) { + if (((__psint_t)buf & BBMASK) || + (*offset & mp->m_blockmask) || + (size & mp->m_blockmask)) { + if (*offset == ip->i_d.di_size) { + return (0); + } + return -XFS_ERROR(EINVAL); + } + } + + + n = XFS_MAX_FILE_OFFSET - *offset; + if ((n <= 0) || (size == 0)) + return 0; + + if (n < size) + size = n; + + if (XFS_FORCED_SHUTDOWN(mp)) { + return -EIO; + } + + if (!(ioflags & IO_ISLOCKED)) + xfs_ilock(ip, XFS_IOLOCK_SHARED); + + if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) && + !(file->f_mode & FINVIS)) { + int error; + vrwlock_t locktype = VRWLOCK_READ; + + error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offset, size, + FILP_DELAY_FLAG(file), &locktype); + if (error) { + if (!(ioflags & IO_ISLOCKED)) + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + return -error; + } + } + + ret = generic_file_read(file, buf, size, offset); + + if (!(ioflags & IO_ISLOCKED)) + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + + XFS_STATS_ADD(xfsstats.xs_read_bytes, ret); + + if (unlikely(file->f_mode & FINVIS)) { + /* generic_file_read updates the atime but we need to + * undo that because this I/O was supposed to be invisible. + */ + struct inode *inode = LINVFS_GET_IP(BHV_TO_VNODE(bdp)); + inode->i_atime = ip->i_d.di_atime.t_sec; + } + else { + xfs_ichgtime(ip, XFS_ICHGTIME_ACC); + } + + return ret; +} + +/* + * This routine is called to handle zeroing any space in the last + * block of the file that is beyond the EOF. We do this since the + * size is being increased without writing anything to that block + * and we don't want anyone to read the garbage on the disk. + */ +STATIC int /* error (positive) */ +xfs_zero_last_block( + struct inode *ip, + xfs_iocore_t *io, + xfs_off_t offset, + xfs_fsize_t isize, + xfs_fsize_t end_size) +{ + xfs_fileoff_t last_fsb; + xfs_mount_t *mp; + int nimaps; + int zero_offset; + int zero_len; + int isize_fsb_offset; + int error = 0; + xfs_bmbt_irec_t imap; + loff_t loff; + size_t lsize; + + ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); + ASSERT(offset > isize); + + mp = io->io_mount; + + isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize); + if (isize_fsb_offset == 0) { + /* + * There are no extra bytes in the last block on disk to + * zero, so return. + */ + return 0; + } + + last_fsb = XFS_B_TO_FSBT(mp, isize); + nimaps = 1; + error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap, + &nimaps, NULL); + if (error) { + return error; + } + ASSERT(nimaps > 0); + /* + * If the block underlying isize is just a hole, then there + * is nothing to zero. + */ + if (imap.br_startblock == HOLESTARTBLOCK) { + return 0; + } + /* + * Zero the part of the last block beyond the EOF, and write it + * out sync. We need to drop the ilock while we do this so we + * don't deadlock when the buffer cache calls back to us. + */ + XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); + loff = XFS_FSB_TO_B(mp, last_fsb); + lsize = XFS_FSB_TO_B(mp, 1); + + zero_offset = isize_fsb_offset; + zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset; + + error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size); + + XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + ASSERT(error >= 0); + return error; +} + +/* + * Zero any on disk space between the current EOF and the new, + * larger EOF. This handles the normal case of zeroing the remainder + * of the last block in the file and the unusual case of zeroing blocks + * out beyond the size of the file. This second case only happens + * with fixed size extents and when the system crashes before the inode + * size was updated but after blocks were allocated. If fill is set, + * then any holes in the range are filled and zeroed. If not, the holes + * are left alone as holes. + */ + +int /* error (positive) */ +xfs_zero_eof( + vnode_t *vp, + xfs_iocore_t *io, + xfs_off_t offset, /* starting I/O offset */ + xfs_fsize_t isize, /* current inode size */ + xfs_fsize_t end_size) /* terminal inode size */ +{ + struct inode *ip = LINVFS_GET_IP(vp); + xfs_fileoff_t start_zero_fsb; + xfs_fileoff_t end_zero_fsb; + xfs_fileoff_t prev_zero_fsb; + xfs_fileoff_t zero_count_fsb; + xfs_fileoff_t last_fsb; + xfs_extlen_t buf_len_fsb; + xfs_extlen_t prev_zero_count; + xfs_mount_t *mp; + int nimaps; + int error = 0; + xfs_bmbt_irec_t imap; + loff_t loff; + size_t lsize; + + ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); + ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); + + mp = io->io_mount; + + /* + * First handle zeroing the block on which isize resides. + * We only zero a part of that block so it is handled specially. + */ + error = xfs_zero_last_block(ip, io, offset, isize, end_size); + if (error) { + ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); + ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); + return error; + } + + /* + * Calculate the range between the new size and the old + * where blocks needing to be zeroed may exist. To get the + * block where the last byte in the file currently resides, + * we need to subtract one from the size and truncate back + * to a block boundary. We subtract 1 in case the size is + * exactly on a block boundary. + */ + last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; + start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); + end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); + ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); + if (last_fsb == end_zero_fsb) { + /* + * The size was only incremented on its last block. + * We took care of that above, so just return. + */ + return 0; + } + + ASSERT(start_zero_fsb <= end_zero_fsb); + prev_zero_fsb = NULLFILEOFF; + prev_zero_count = 0; + while (start_zero_fsb <= end_zero_fsb) { + nimaps = 1; + zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; + error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb, + 0, NULL, 0, &imap, &nimaps, NULL); + if (error) { + ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); + ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); + return error; + } + ASSERT(nimaps > 0); + + if (imap.br_startblock == HOLESTARTBLOCK) { + /* + * This loop handles initializing pages that were + * partially initialized by the code below this + * loop. It basically zeroes the part of the page + * that sits on a hole and sets the page as P_HOLE + * and calls remapf if it is a mapped file. + */ + prev_zero_fsb = NULLFILEOFF; + prev_zero_count = 0; + start_zero_fsb = imap.br_startoff + + imap.br_blockcount; + ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); + continue; + } + + /* + * There are blocks in the range requested. + * Zero them a single write at a time. We actually + * don't zero the entire range returned if it is + * too big and simply loop around to get the rest. + * That is not the most efficient thing to do, but it + * is simple and this path should not be exercised often. + */ + buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount, + mp->m_writeio_blocks << 8); + /* + * Drop the inode lock while we're doing the I/O. + * We'll still have the iolock to protect us. + */ + XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + + loff = XFS_FSB_TO_B(mp, start_zero_fsb); + lsize = XFS_FSB_TO_B(mp, buf_len_fsb); + + error = xfs_iozero(ip, loff, lsize, end_size); + + if (error) { + goto out_lock; + } + + prev_zero_fsb = start_zero_fsb; + prev_zero_count = buf_len_fsb; + start_zero_fsb = imap.br_startoff + buf_len_fsb; + ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); + + XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + } + + return 0; + +out_lock: + + XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + ASSERT(error >= 0); + return error; +} + +ssize_t /* bytes written, or (-) error */ +xfs_write( + bhv_desc_t *bdp, + struct file *file, + const char *buf, + size_t size, + loff_t *offset, + int ioflags, + cred_t *credp) +{ + xfs_inode_t *xip; + xfs_mount_t *mp; + ssize_t ret; + int error = 0; + xfs_fsize_t isize, new_size; + xfs_fsize_t n, limit = XFS_MAX_FILE_OFFSET; + xfs_iocore_t *io; + vnode_t *vp; + int iolock; + int direct = file->f_flags & O_DIRECT; + int eventsent = 0; + vrwlock_t locktype; + + XFS_STATS_INC(xfsstats.xs_write_calls); + + vp = BHV_TO_VNODE(bdp); + xip = XFS_BHVTOI(bdp); + + if (size == 0) + return 0; + + io = &(xip->i_iocore); + mp = io->io_mount; + + xfs_check_frozen(mp, bdp, XFS_FREEZE_WRITE); + + if (XFS_FORCED_SHUTDOWN(xip->i_mount)) { + return -EIO; + } + + if (direct) { + if (((__psint_t)buf & BBMASK) || + (*offset & mp->m_blockmask) || + (size & mp->m_blockmask)) { + return XFS_ERROR(-EINVAL); + } + iolock = XFS_IOLOCK_SHARED; + locktype = VRWLOCK_WRITE_DIRECT; + } else { + if (io->io_flags & XFS_IOCORE_RT) + return XFS_ERROR(-EINVAL); + iolock = XFS_IOLOCK_EXCL; + locktype = VRWLOCK_WRITE; + } + + if (ioflags & IO_ISLOCKED) + iolock = 0; + + xfs_ilock(xip, XFS_ILOCK_EXCL|iolock); + + isize = xip->i_d.di_size; + + if (file->f_flags & O_APPEND) + *offset = isize; + +start: + n = limit - *offset; + if (n <= 0) { + xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); + return -EFBIG; + } + if (n < size) + size = n; + + new_size = *offset + size; + if (new_size > isize) { + io->io_new_size = new_size; + } + + if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) && + !(file->f_mode & FINVIS) && !eventsent)) { + loff_t savedsize = *offset; + + xfs_iunlock(xip, XFS_ILOCK_EXCL); + error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, bdp, + *offset, size, + FILP_DELAY_FLAG(file), &locktype); + if (error) { + if (iolock) xfs_iunlock(xip, iolock); + return -error; + } + xfs_ilock(xip, XFS_ILOCK_EXCL); + eventsent = 1; + + /* + * The iolock was dropped and reaquired in XFS_SEND_DATA + * so we have to recheck the size when appending. + * We will only "goto start;" once, since having sent the + * event prevents another call to XFS_SEND_DATA, which is + * what allows the size to change in the first place. + */ + if ((file->f_flags & O_APPEND) && + savedsize != xip->i_d.di_size) { + *offset = isize = xip->i_d.di_size; + goto start; + } + } + + /* + * If the offset is beyond the size of the file, we have a couple + * of things to do. First, if there is already space allocated + * we need to either create holes or zero the disk or ... + * + * If there is a page where the previous size lands, we need + * to zero it out up to the new size. + */ + + if (!direct && (*offset > isize && isize)) { + error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, *offset, + isize, *offset + size); + if (error) { + xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); + return(-error); + } + } + xfs_iunlock(xip, XFS_ILOCK_EXCL); + + /* + * If we're writing the file then make sure to clear the + * setuid and setgid bits if the process is not being run + * by root. This keeps people from modifying setuid and + * setgid binaries. + */ + + if (((xip->i_d.di_mode & ISUID) || + ((xip->i_d.di_mode & (ISGID | (IEXEC >> 3))) == + (ISGID | (IEXEC >> 3)))) && + !capable(CAP_FSETID)) { + error = xfs_write_clear_setuid(xip); + if (error) { + xfs_iunlock(xip, iolock); + return -error; + } + } + +retry: + if (direct) { + xfs_inval_cached_pages(vp, &xip->i_iocore, *offset, 1, 1); + } + + ret = generic_file_write_nolock(file, buf, size, offset); + + if (unlikely(file->f_mode & FINVIS)) { + /* generic_file_write updates the mtime/ctime but we need + * to undo that because this I/O was supposed to be + * invisible. + */ + struct inode *inode = LINVFS_GET_IP(vp); + inode->i_mtime = xip->i_d.di_mtime.t_sec; + inode->i_ctime = xip->i_d.di_ctime.t_sec; + } + else { + xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + } + + if ((ret == -ENOSPC) && + DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) && + !(file->f_mode & FINVIS)) { + + xfs_rwunlock(bdp, locktype); + error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, bdp, + DM_RIGHT_NULL, bdp, DM_RIGHT_NULL, NULL, NULL, + 0, 0, 0); /* Delay flag intentionally unused */ + if (error) + return -error; + xfs_rwlock(bdp, locktype); + *offset = xip->i_d.di_size; + goto retry; + + } + + if (ret <= 0) { + xfs_rwunlock(bdp, locktype); + return ret; + } + + XFS_STATS_ADD(xfsstats.xs_write_bytes, ret); + + if (*offset > xip->i_d.di_size) { + xfs_ilock(xip, XFS_ILOCK_EXCL); + if (*offset > xip->i_d.di_size) { + struct inode *inode = LINVFS_GET_IP(vp); + + inode->i_size = xip->i_d.di_size = *offset; + xip->i_update_core = 1; + xip->i_update_size = 1; + } + xfs_iunlock(xip, XFS_ILOCK_EXCL); + } + + /* Handle various SYNC-type writes */ + if ((file->f_flags & O_SYNC) || IS_SYNC(file->f_dentry->d_inode)) { + + /* + * If we're treating this as O_DSYNC and we have not updated the + * size, force the log. + */ + + if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) + && !(xip->i_update_size)) { + /* + * If an allocation transaction occurred + * without extending the size, then we have to force + * the log up the proper point to ensure that the + * allocation is permanent. We can't count on + * the fact that buffered writes lock out direct I/O + * writes - the direct I/O write could have extended + * the size nontransactionally, then finished before + * we started. xfs_write_file will think that the file + * didn't grow but the update isn't safe unless the + * size change is logged. + * + * Force the log if we've committed a transaction + * against the inode or if someone else has and + * the commit record hasn't gone to disk (e.g. + * the inode is pinned). This guarantees that + * all changes affecting the inode are permanent + * when we return. + */ + + xfs_inode_log_item_t *iip; + xfs_lsn_t lsn; + + iip = xip->i_itemp; + if (iip && iip->ili_last_lsn) { + lsn = iip->ili_last_lsn; + xfs_log_force(mp, lsn, + XFS_LOG_FORCE | XFS_LOG_SYNC); + } else if (xfs_ipincount(xip) > 0) { + xfs_log_force(mp, (xfs_lsn_t)0, + XFS_LOG_FORCE | XFS_LOG_SYNC); + } + + } else { + xfs_trans_t *tp; + + /* + * O_SYNC or O_DSYNC _with_ a size update are handled + * the same way. + * + * If the write was synchronous then we need to make + * sure that the inode modification time is permanent. + * We'll have updated the timestamp above, so here + * we use a synchronous transaction to log the inode. + * It's not fast, but it's necessary. + * + * If this a dsync write and the size got changed + * non-transactionally, then we need to ensure that + * the size change gets logged in a synchronous + * transaction. + */ + + tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC); + if ((error = xfs_trans_reserve(tp, 0, + XFS_SWRITE_LOG_RES(mp), + 0, 0, 0))) { + /* Transaction reserve failed */ + xfs_trans_cancel(tp, 0); + } else { + /* Transaction reserve successful */ + xfs_ilock(xip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, xip); + xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE); + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, (xfs_lsn_t)0); + xfs_iunlock(xip, XFS_ILOCK_EXCL); + } + } + } /* (ioflags & O_SYNC) */ + + /* + * If we are coming from an nfsd thread then insert into the + * reference cache. + */ + + if (!strcmp(current->comm, "nfsd")) + xfs_refcache_insert(xip); + + /* Drop lock this way - the old refcache release is in here */ + if (iolock) + xfs_rwunlock(bdp, locktype); + + return(ret); +} + +/* + * All xfs metadata buffers except log state machine buffers + * get this attached as their b_bdstrat callback function. + * This is so that we can catch a buffer + * after prematurely unpinning it to forcibly shutdown the filesystem. + */ +int +xfs_bdstrat_cb(struct xfs_buf *bp) +{ + xfs_mount_t *mp; + + mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); + if (!XFS_FORCED_SHUTDOWN(mp)) { + pagebuf_iorequest(bp); + return 0; + } else { + xfs_buftrace("XFS__BDSTRAT IOERROR", bp); + /* + * Metadata write that didn't get logged but + * written delayed anyway. These aren't associated + * with a transaction, and can be ignored. + */ + if (XFS_BUF_IODONE_FUNC(bp) == NULL && + (XFS_BUF_ISREAD(bp)) == 0) + return (xfs_bioerror_relse(bp)); + else + return (xfs_bioerror(bp)); + } +} + + +int +xfs_bmap(bhv_desc_t *bdp, + xfs_off_t offset, + ssize_t count, + int flags, + page_buf_bmap_t *pbmapp, + int *npbmaps) +{ + xfs_inode_t *ip = XFS_BHVTOI(bdp); + xfs_iocore_t *io = &ip->i_iocore; + + ASSERT((ip->i_d.di_mode & IFMT) == IFREG); + ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == + ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); + + return xfs_iomap(io, offset, count, flags, pbmapp, npbmaps); +} + +/* + * Wrapper around bdstrat so that we can stop data + * from going to disk in case we are shutting down the filesystem. + * Typically user data goes thru this path; one of the exceptions + * is the superblock. + */ +int +xfsbdstrat( + struct xfs_mount *mp, + struct xfs_buf *bp) +{ + ASSERT(mp); + if (!XFS_FORCED_SHUTDOWN(mp)) { + /* Grio redirection would go here + * if (XFS_BUF_IS_GRIO(bp)) { + */ + + pagebuf_iorequest(bp); + return 0; + } + + xfs_buftrace("XFSBDSTRAT IOERROR", bp); + return (xfs_bioerror_relse(bp)); +} + + +void +XFS_bflush(xfs_buftarg_t *target) +{ + pagebuf_delwri_flush(target, PBDF_WAIT, NULL); +} + +/* + * If the underlying (log or data) device is readonly, there are some + * operations that cannot proceed. + */ +int +xfs_dev_is_read_only(xfs_mount_t *mp, char *message) +{ + if (is_read_only(mp->m_ddev_targp->pbr_kdev) || + is_read_only(mp->m_logdev_targp->pbr_kdev) || + (mp->m_rtdev_targp && is_read_only(mp->m_rtdev_targp->pbr_kdev))) { + cmn_err(CE_NOTE, + "XFS: %s required on read-only device.", message); + cmn_err(CE_NOTE, + "XFS: write access unavailable, cannot proceed."); + return EROFS; + } + + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_lrw.h linux.21rc5-ac2/fs/xfs/linux/xfs_lrw.h --- linux.21rc5/fs/xfs/linux/xfs_lrw.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_lrw.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_LRW_H__ +#define __XFS_LRW_H__ + +struct vnode; +struct bhv_desc; +struct xfs_mount; +struct xfs_iocore; +struct xfs_inode; +struct xfs_bmbt_irec; +struct page_buf_s; +struct page_buf_bmap_s; + +#define XFS_IOMAP_READ_ENTER 3 +/* + * Maximum count of bmaps used by read and write paths. + */ +#define XFS_MAX_RW_NBMAPS 4 + +extern int xfs_bmap(struct bhv_desc *, xfs_off_t, ssize_t, int, + struct page_buf_bmap_s *, int *); +extern int xfsbdstrat(struct xfs_mount *, struct page_buf_s *); +extern int xfs_bdstrat_cb(struct page_buf_s *); + +extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t, + xfs_fsize_t, xfs_fsize_t); +extern ssize_t xfs_read(struct bhv_desc *, struct file *, char *, + size_t, loff_t *, int, struct cred *); +extern ssize_t xfs_write(struct bhv_desc *, struct file *, const char *, + size_t, loff_t *, int, struct cred *); + +extern int xfs_iomap(struct xfs_iocore *, xfs_off_t, ssize_t, int, + struct page_buf_bmap_s *, int *); +extern int xfs_iomap_write_direct(struct xfs_inode *, loff_t, size_t, + int, struct xfs_bmbt_irec *, int *, int); +extern int xfs_iomap_write_delay(struct xfs_inode *, loff_t, size_t, + int, struct xfs_bmbt_irec *, int *); +extern int xfs_iomap_write_allocate(struct xfs_inode *, + struct xfs_bmbt_irec *, int *); +extern int xfs_iomap_write_unwritten(struct xfs_inode *, loff_t, size_t); + +extern int xfs_dev_is_read_only(struct xfs_mount *, char *); + +#define XFS_FSB_TO_DB_IO(io,fsb) \ + (((io)->io_flags & XFS_IOCORE_RT) ? \ + XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \ + XFS_FSB_TO_DADDR((io)->io_mount, (fsb))) + +#endif /* __XFS_LRW_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_stats.c linux.21rc5-ac2/fs/xfs/linux/xfs_stats.c --- linux.21rc5/fs/xfs/linux/xfs_stats.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_stats.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include + +struct xfsstats xfsstats; + +STATIC int +xfs_read_xfsstats( + char *buffer, + char **start, + off_t offset, + int count, + int *eof, + void *data) +{ + int i, j, len; + static struct xstats_entry { + char *desc; + int endpoint; + } xstats[] = { + { "extent_alloc", XFSSTAT_END_EXTENT_ALLOC }, + { "abt", XFSSTAT_END_ALLOC_BTREE }, + { "blk_map", XFSSTAT_END_BLOCK_MAPPING }, + { "bmbt", XFSSTAT_END_BLOCK_MAP_BTREE }, + { "dir", XFSSTAT_END_DIRECTORY_OPS }, + { "trans", XFSSTAT_END_TRANSACTIONS }, + { "ig", XFSSTAT_END_INODE_OPS }, + { "log", XFSSTAT_END_LOG_OPS }, + { "push_ail", XFSSTAT_END_TAIL_PUSHING }, + { "xstrat", XFSSTAT_END_WRITE_CONVERT }, + { "rw", XFSSTAT_END_READ_WRITE_OPS }, + { "attr", XFSSTAT_END_ATTRIBUTE_OPS }, + { "icluster", XFSSTAT_END_INODE_CLUSTER }, + { "vnodes", XFSSTAT_END_VNODE_OPS }, + }; + + for (i=j=len = 0; i < sizeof(xstats)/sizeof(struct xstats_entry); i++) { + len += sprintf(buffer + len, xstats[i].desc); + /* inner loop does each group */ + while (j < xstats[i].endpoint) { + len += sprintf(buffer + len, " %u", + *(((__u32*)&xfsstats) + j)); + j++; + } + buffer[len++] = '\n'; + } + /* extra precision counters */ + len += sprintf(buffer + len, "xpc %Lu %Lu %Lu\n", + xfsstats.xs_xstrat_bytes, + xfsstats.xs_write_bytes, + xfsstats.xs_read_bytes); + + if (offset >= len) { + *start = buffer; + *eof = 1; + return 0; + } + *start = buffer + offset; + if ((len -= offset) > count) + return count; + *eof = 1; + + return len; +} + +void +xfs_init_procfs(void) +{ + if (!proc_mkdir("fs/xfs", 0)) + return; + create_proc_read_entry("fs/xfs/stat", 0, 0, xfs_read_xfsstats, NULL); +} + +void +xfs_cleanup_procfs(void) +{ + remove_proc_entry("fs/xfs/stat", NULL); + remove_proc_entry("fs/xfs", NULL); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_stats.h linux.21rc5-ac2/fs/xfs/linux/xfs_stats.h --- linux.21rc5/fs/xfs/linux/xfs_stats.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_stats.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_STATS_H__ +#define __XFS_STATS_H__ + + +#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) + +/* + * XFS global statistics + */ +struct xfsstats { +# define XFSSTAT_END_EXTENT_ALLOC 4 + __uint32_t xs_allocx; + __uint32_t xs_allocb; + __uint32_t xs_freex; + __uint32_t xs_freeb; +# define XFSSTAT_END_ALLOC_BTREE (XFSSTAT_END_EXTENT_ALLOC+4) + __uint32_t xs_abt_lookup; + __uint32_t xs_abt_compare; + __uint32_t xs_abt_insrec; + __uint32_t xs_abt_delrec; +# define XFSSTAT_END_BLOCK_MAPPING (XFSSTAT_END_ALLOC_BTREE+7) + __uint32_t xs_blk_mapr; + __uint32_t xs_blk_mapw; + __uint32_t xs_blk_unmap; + __uint32_t xs_add_exlist; + __uint32_t xs_del_exlist; + __uint32_t xs_look_exlist; + __uint32_t xs_cmp_exlist; +# define XFSSTAT_END_BLOCK_MAP_BTREE (XFSSTAT_END_BLOCK_MAPPING+4) + __uint32_t xs_bmbt_lookup; + __uint32_t xs_bmbt_compare; + __uint32_t xs_bmbt_insrec; + __uint32_t xs_bmbt_delrec; +# define XFSSTAT_END_DIRECTORY_OPS (XFSSTAT_END_BLOCK_MAP_BTREE+4) + __uint32_t xs_dir_lookup; + __uint32_t xs_dir_create; + __uint32_t xs_dir_remove; + __uint32_t xs_dir_getdents; +# define XFSSTAT_END_TRANSACTIONS (XFSSTAT_END_DIRECTORY_OPS+3) + __uint32_t xs_trans_sync; + __uint32_t xs_trans_async; + __uint32_t xs_trans_empty; +# define XFSSTAT_END_INODE_OPS (XFSSTAT_END_TRANSACTIONS+7) + __uint32_t xs_ig_attempts; + __uint32_t xs_ig_found; + __uint32_t xs_ig_frecycle; + __uint32_t xs_ig_missed; + __uint32_t xs_ig_dup; + __uint32_t xs_ig_reclaims; + __uint32_t xs_ig_attrchg; +# define XFSSTAT_END_LOG_OPS (XFSSTAT_END_INODE_OPS+5) + __uint32_t xs_log_writes; + __uint32_t xs_log_blocks; + __uint32_t xs_log_noiclogs; + __uint32_t xs_log_force; + __uint32_t xs_log_force_sleep; +# define XFSSTAT_END_TAIL_PUSHING (XFSSTAT_END_LOG_OPS+10) + __uint32_t xs_try_logspace; + __uint32_t xs_sleep_logspace; + __uint32_t xs_push_ail; + __uint32_t xs_push_ail_success; + __uint32_t xs_push_ail_pushbuf; + __uint32_t xs_push_ail_pinned; + __uint32_t xs_push_ail_locked; + __uint32_t xs_push_ail_flushing; + __uint32_t xs_push_ail_restarts; + __uint32_t xs_push_ail_flush; +# define XFSSTAT_END_WRITE_CONVERT (XFSSTAT_END_TAIL_PUSHING+2) + __uint32_t xs_xstrat_quick; + __uint32_t xs_xstrat_split; +# define XFSSTAT_END_READ_WRITE_OPS (XFSSTAT_END_WRITE_CONVERT+2) + __uint32_t xs_write_calls; + __uint32_t xs_read_calls; +# define XFSSTAT_END_ATTRIBUTE_OPS (XFSSTAT_END_READ_WRITE_OPS+4) + __uint32_t xs_attr_get; + __uint32_t xs_attr_set; + __uint32_t xs_attr_remove; + __uint32_t xs_attr_list; +# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_ATTRIBUTE_OPS+3) + __uint32_t xs_iflush_count; + __uint32_t xs_icluster_flushcnt; + __uint32_t xs_icluster_flushinode; +# define XFSSTAT_END_VNODE_OPS (XFSSTAT_END_INODE_CLUSTER+8) + __uint32_t vn_active; /* # vnodes not on free lists */ + __uint32_t vn_alloc; /* # times vn_alloc called */ + __uint32_t vn_get; /* # times vn_get called */ + __uint32_t vn_hold; /* # times vn_hold called */ + __uint32_t vn_rele; /* # times vn_rele called */ + __uint32_t vn_reclaim; /* # times vn_reclaim called */ + __uint32_t vn_remove; /* # times vn_remove called */ + __uint32_t vn_free; /* # times vn_free called */ +/* Extra precision counters */ + __uint64_t xs_xstrat_bytes; + __uint64_t xs_write_bytes; + __uint64_t xs_read_bytes; +}; + +extern struct xfsstats xfsstats; + +# define XFS_STATS_INC(count) ( (count)++ ) +# define XFS_STATS_DEC(count) ( (count)-- ) +# define XFS_STATS_ADD(count, inc) ( (count) += (inc) ) + +extern void xfs_init_procfs(void); +extern void xfs_cleanup_procfs(void); + + +#else /* !CONFIG_PROC_FS */ + +# define XFS_STATS_INC(count) +# define XFS_STATS_DEC(count) +# define XFS_STATS_ADD(count, inc) + +static __inline void xfs_init_procfs(void) { }; +static __inline void xfs_cleanup_procfs(void) { }; + +#endif /* !CONFIG_PROC_FS */ + +#endif /* __XFS_STATS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_super.c linux.21rc5-ac2/fs/xfs/linux/xfs_super.c --- linux.21rc5/fs/xfs/linux/xfs_super.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_super.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,845 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_clnt.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" +#include "xfs_version.h" + +#include +#include + +STATIC struct quotactl_ops linvfs_qops; +STATIC struct super_operations linvfs_sops; +STATIC kmem_cache_t * linvfs_inode_cachep; + +STATIC struct xfs_mount_args * +args_allocate( + struct super_block *sb) +{ + struct xfs_mount_args *args; + + args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP); + args->logbufs = args->logbufsize = -1; + strncpy(args->fsname, bdevname(sb->s_dev), MAXNAMELEN); + + /* Copy the already-parsed mount(2) flags we're interested in */ + if (sb->s_flags & MS_NOATIME) + args->flags |= XFSMNT_NOATIME; + + /* Default to 32 bit inodes on Linux all the time */ + args->flags |= XFSMNT_32BITINODES; + + return args; +} + +STATIC __inline__ void +xfs_set_inodeops( + struct inode *inode) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + + if (vp->v_type == VNON) { + remove_inode_hash(inode); + make_bad_inode(inode); + } else if (S_ISREG(inode->i_mode)) { + inode->i_op = &linvfs_file_inode_operations; + inode->i_fop = &linvfs_file_operations; + inode->i_mapping->a_ops = &linvfs_aops; + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = &linvfs_dir_inode_operations; + inode->i_fop = &linvfs_dir_operations; + } else if (S_ISLNK(inode->i_mode)) { + inode->i_op = &linvfs_symlink_inode_operations; + if (inode->i_blocks) + inode->i_mapping->a_ops = &linvfs_aops; + } else { + inode->i_op = &linvfs_file_inode_operations; + init_special_inode(inode, inode->i_mode, + kdev_t_to_nr(inode->i_rdev)); + } +} + +STATIC __inline__ void +xfs_revalidate_inode( + xfs_mount_t *mp, + vnode_t *vp, + xfs_inode_t *ip) +{ + struct inode *inode = LINVFS_GET_IP(vp); + + inode->i_mode = (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type); + inode->i_nlink = ip->i_d.di_nlink; + inode->i_uid = ip->i_d.di_uid; + inode->i_gid = ip->i_d.di_gid; + if (((1 << vp->v_type) & ((1<i_rdev = NODEV; + } else { + xfs_dev_t dev = ip->i_df.if_u2.if_rdev; + inode->i_rdev = XFS_DEV_TO_KDEVT(dev); + } + inode->i_blksize = PAGE_CACHE_SIZE; + inode->i_generation = ip->i_d.di_gen; + inode->i_size = ip->i_d.di_size; + inode->i_blocks = + XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); + inode->i_atime = ip->i_d.di_atime.t_sec; + inode->i_mtime = ip->i_d.di_mtime.t_sec; + inode->i_ctime = ip->i_d.di_ctime.t_sec; + + vp->v_flag &= ~VMODIFIED; +} + +void +xfs_initialize_vnode( + bhv_desc_t *bdp, + vnode_t *vp, + bhv_desc_t *inode_bhv, + int unlock) +{ + xfs_inode_t *ip = XFS_BHVTOI(inode_bhv); + struct inode *inode = LINVFS_GET_IP(vp); + + if (!inode_bhv->bd_vobj) { + vp->v_vfsp = bhvtovfs(bdp); + bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops); + bhv_insert(VN_BHV_HEAD(vp), inode_bhv); + } + + vp->v_type = IFTOVT(ip->i_d.di_mode); + + /* Have we been called during the new inode create process, + * in which case we are too early to fill in the Linux inode. + */ + if (vp->v_type == VNON) + return; + + xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); + + /* For new inodes we need to set the ops vectors, + * and unlock the inode. + */ + if (unlock && (inode->i_state & I_NEW)) { + xfs_set_inodeops(inode); + unlock_new_inode(inode); + } +} + +struct inode * +xfs_get_inode( + bhv_desc_t *bdp, + xfs_ino_t ino, + int flags) +{ + struct vfs *vfsp = bhvtovfs(bdp); + + return iget_locked(vfsp->vfs_super, ino); +} + +/*ARGSUSED*/ +int +xfs_blkdev_get( + xfs_mount_t *mp, + const char *name, + struct block_device **bdevp) +{ + struct nameidata nd; + int error; + + error = path_lookup(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd); + if (error) { + printk("XFS: Invalid device [%s], error=%d\n", name, error); + return -error; + } + + /* I think we actually want bd_acquire here.. --hch */ + *bdevp = bdget(kdev_t_to_nr(nd.dentry->d_inode->i_rdev)); + if (*bdevp) + error = blkdev_get(*bdevp, FMODE_READ|FMODE_WRITE, 0, BDEV_FS); + else + error = -ENOMEM; + + path_release(&nd); + return -error; +} + +void +xfs_blkdev_put( + struct block_device *bdev) +{ + if (bdev) + blkdev_put(bdev, BDEV_FS); +} + +void +xfs_free_buftarg( + xfs_buftarg_t *btp) +{ + pagebuf_delwri_flush(btp, PBDF_WAIT, NULL); + kmem_free(btp, sizeof(*btp)); +} + +void +xfs_relse_buftarg( + xfs_buftarg_t *btp) +{ + destroy_buffers(btp->pbr_kdev); + truncate_inode_pages(btp->pbr_mapping, 0LL); +} + +unsigned int +xfs_getsize_buftarg( + xfs_buftarg_t *btp) +{ + return block_size(btp->pbr_kdev); +} + +void +xfs_setsize_buftarg( + xfs_buftarg_t *btp, + unsigned int blocksize, + unsigned int sectorsize) +{ + btp->pbr_bsize = blocksize; + btp->pbr_sshift = ffs(sectorsize) - 1; + btp->pbr_smask = sectorsize - 1; + + if (set_blocksize(btp->pbr_kdev, sectorsize)) { + printk(KERN_WARNING + "XFS: Cannot set_blocksize to %u on device 0x%x\n", + sectorsize, kdev_t_to_nr(btp->pbr_kdev)); + } +} + +xfs_buftarg_t * +xfs_alloc_buftarg( + struct block_device *bdev) +{ + xfs_buftarg_t *btp; + + btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); + + btp->pbr_dev = bdev->bd_dev; + btp->pbr_kdev = to_kdev_t(btp->pbr_dev); + btp->pbr_bdev = bdev; + btp->pbr_mapping = bdev->bd_inode->i_mapping; + xfs_setsize_buftarg(btp, PAGE_CACHE_SIZE, + get_hardsect_size(btp->pbr_kdev)); + + switch (MAJOR(btp->pbr_dev)) { + case MD_MAJOR: + case EVMS_MAJOR: + btp->pbr_flags = PBR_ALIGNED_ONLY; + break; + case LVM_BLK_MAJOR: + btp->pbr_flags = PBR_SECTOR_ONLY; + break; + } + + return btp; +} + +STATIC __inline__ unsigned int gfp_mask(void) +{ + /* If we're not in a transaction, FS activity is ok */ + if (current->flags & PF_FSTRANS) return GFP_NOFS; + return GFP_KERNEL; +} + +STATIC struct inode * +linvfs_alloc_inode( + struct super_block *sb) +{ + vnode_t *vp; + + vp = (vnode_t *)kmem_cache_alloc(linvfs_inode_cachep, gfp_mask()); + if (!vp) + return NULL; + return LINVFS_GET_IP(vp); +} + +STATIC void +linvfs_destroy_inode( + struct inode *inode) +{ + kmem_cache_free(linvfs_inode_cachep, LINVFS_GET_VP(inode)); +} + +#define VNODE_SIZE \ + (sizeof(vnode_t) - sizeof(struct inode) + offsetof(struct inode, u)) + +STATIC void +init_once( + void *data, + kmem_cache_t *cachep, + unsigned long flags) +{ + vnode_t *vp = (vnode_t *)data; + + if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == + SLAB_CTOR_CONSTRUCTOR) { + struct inode *inode = LINVFS_GET_IP(vp); + + memset(vp, 0, VNODE_SIZE); + init_waitqueue_head(&inode->i_wait); + INIT_LIST_HEAD(&inode->i_hash); + INIT_LIST_HEAD(&inode->i_data.clean_pages); + INIT_LIST_HEAD(&inode->i_data.dirty_pages); + INIT_LIST_HEAD(&inode->i_data.locked_pages); + INIT_LIST_HEAD(&inode->i_dentry); + INIT_LIST_HEAD(&inode->i_dirty_buffers); + INIT_LIST_HEAD(&inode->i_dirty_data_buffers); + INIT_LIST_HEAD(&inode->i_devices); + sema_init(&inode->i_sem, 1); + sema_init(&inode->i_zombie, 1); + spin_lock_init(&inode->i_data.i_shared_lock); + } +} + +STATIC int +init_inodecache( void ) +{ + linvfs_inode_cachep = kmem_cache_create("linvfs_icache", + VNODE_SIZE, 0, SLAB_HWCACHE_ALIGN, + init_once, NULL); + + if (linvfs_inode_cachep == NULL) + return -ENOMEM; + return 0; +} + +STATIC void +destroy_inodecache( void ) +{ + if (kmem_cache_destroy(linvfs_inode_cachep)) + printk(KERN_WARNING "%s: cache still in use!\n", __FUNCTION__); +} + +/* + * Attempt to flush the inode, this will actually fail + * if the inode is pinned, but we dirty the inode again + * at the point when it is unpinned after a log write, + * since this is when the inode itself becomes flushable. + */ +STATIC void +linvfs_write_inode( + struct inode *inode, + int sync) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error, flags = FLUSH_INODE; + + if (vp) { + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + if (sync) + flags |= FLUSH_SYNC; + VOP_IFLUSH(vp, flags, error); + } +} + +STATIC void +linvfs_clear_inode( + struct inode *inode) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + + if (vp) { + vn_rele(vp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + /* + * Do all our cleanup, and remove this vnode. + */ + vn_remove(vp); + } +} + +STATIC void +linvfs_put_super( + struct super_block *sb) +{ + vfs_t *vfsp = LINVFS_GET_VFS(sb); + int error; + + linvfs_stop_syncd(vfsp); + VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error); + if (error == 0) { + VFS_UNMOUNT(vfsp, 0, NULL, error); + } + if (error) { + printk("XFS unmount got error %d\n", error); + printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp); + return; + } + + vfs_deallocate(vfsp); +} + +STATIC void +linvfs_write_super( + struct super_block *sb) +{ + vfs_t *vfsp = LINVFS_GET_VFS(sb); + int error; + + if (sb->s_flags & MS_RDONLY) { + sb->s_dirt = 0; /* paranoia */ + return; + } + /* Push the log and superblock a little */ + VFS_SYNC(vfsp, SYNC_FSDATA, NULL, error); + sb->s_dirt = 0; +} + +STATIC int +linvfs_statfs( + struct super_block *sb, + struct statfs *statp) +{ + vfs_t *vfsp = LINVFS_GET_VFS(sb); + int error; + + VFS_STATVFS(vfsp, statp, NULL, error); + return error; +} + +STATIC int +linvfs_remount( + struct super_block *sb, + int *flags, + char *options) +{ + vfs_t *vfsp = LINVFS_GET_VFS(sb); + struct xfs_mount_args *args = args_allocate(sb); + int error; + + VFS_PARSEARGS(vfsp, options, args, 1, error); + if (error) + goto out; + + VFS_MNTUPDATE(vfsp, flags, args, error); + +out: + kmem_free(args, sizeof(*args)); + return error; +} + +STATIC void +linvfs_freeze_fs( + struct super_block *sb) +{ + vfs_t *vfsp; + vnode_t *vp; + int error; + + vfsp = LINVFS_GET_VFS(sb); + if (sb->s_flags & MS_RDONLY) + return; + VFS_ROOT(vfsp, &vp, error); + VOP_IOCTL(vp, LINVFS_GET_IP(vp), NULL, XFS_IOC_FREEZE, 0, error); + VN_RELE(vp); +} + +STATIC void +linvfs_unfreeze_fs( + struct super_block *sb) +{ + vfs_t *vfsp; + vnode_t *vp; + int error; + + vfsp = LINVFS_GET_VFS(sb); + VFS_ROOT(vfsp, &vp, error); + VOP_IOCTL(vp, LINVFS_GET_IP(vp), NULL, XFS_IOC_THAW, 0, error); + VN_RELE(vp); +} + +STATIC int +linvfs_dentry_to_fh( + struct dentry *dentry, + __u32 *data, + int *lenp, + int need_parent) +{ + struct inode *inode = dentry->d_inode ; + vnode_t *vp = LINVFS_GET_VP(inode); + int maxlen = *lenp; + xfs_fid2_t fid; + int error; + + if (maxlen < 3) + return 255 ; + + VOP_FID2(vp, (struct fid *)&fid, error); + data[0] = (__u32)fid.fid_ino; /* 32 bits of inode is OK */ + data[1] = fid.fid_gen; + + *lenp = 2 ; + if (maxlen < 4 || ! need_parent) + return 2 ; + + inode = dentry->d_parent->d_inode ; + vp = LINVFS_GET_VP(inode); + + VOP_FID2(vp, (struct fid *)&fid, error); + data[2] = (__u32)fid.fid_ino; /* 32 bits of inode is OK */ + *lenp = 3 ; + if (maxlen < 4) + return 3 ; + data[3] = fid.fid_gen; + *lenp = 4 ; + return 4 ; +} + +STATIC struct dentry * +linvfs_fh_to_dentry( + struct super_block *sb, + __u32 *data, + int len, + int fhtype, + int parent) +{ + vnode_t *vp; + struct inode *inode = NULL; + struct list_head *lp; + struct dentry *result; + xfs_fid2_t xfid; + vfs_t *vfsp = LINVFS_GET_VFS(sb); + int error; + + xfid.fid_len = sizeof(xfs_fid2_t) - sizeof(xfid.fid_len); + xfid.fid_pad = 0; + + if (!parent) { + xfid.fid_gen = data[1]; + xfid.fid_ino = (__u64)data[0]; + } else { + if (fhtype == 4) + xfid.fid_gen = data[3]; + else + xfid.fid_gen = 0; + xfid.fid_ino = (__u64)data[2]; + } + + VFS_VGET(vfsp, &vp, (fid_t *)&xfid, error); + if (error || vp == NULL) + return ERR_PTR(-ESTALE) ; + + inode = LINVFS_GET_IP(vp); + spin_lock(&dcache_lock); + for (lp = inode->i_dentry.next; lp != &inode->i_dentry ; lp=lp->next) { + result = list_entry(lp,struct dentry, d_alias); + if (! (result->d_flags & DCACHE_NFSD_DISCONNECTED)) { + dget_locked(result); + result->d_vfs_flags |= DCACHE_REFERENCED; + spin_unlock(&dcache_lock); + iput(inode); + return result; + } + } + spin_unlock(&dcache_lock); + result = d_alloc_root(inode); + if (result == NULL) { + iput(inode); + return ERR_PTR(-ENOMEM); + } + result->d_flags |= DCACHE_NFSD_DISCONNECTED; + return result; +} + +STATIC int +linvfs_show_options( + struct seq_file *m, + struct vfsmount *mnt) +{ + struct vfs *vfsp = LINVFS_GET_VFS(mnt->mnt_sb); + int error; + + VFS_SHOWARGS(vfsp, m, error); + return error; +} + +STATIC int +linvfs_getxstate( + struct super_block *sb, + struct fs_quota_stat *fqs) +{ + struct vfs *vfsp = LINVFS_GET_VFS(sb); + int error; + + VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error); + return -error; +} + +STATIC int +linvfs_setxstate( + struct super_block *sb, + unsigned int flags, + int op) +{ + struct vfs *vfsp = LINVFS_GET_VFS(sb); + int error; + + VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error); + return -error; +} + +STATIC int +linvfs_getxquota( + struct super_block *sb, + int type, + qid_t id, + struct fs_disk_quota *fdq) +{ + struct vfs *vfsp = LINVFS_GET_VFS(sb); + int error, getmode; + + getmode = (type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETQUOTA; + VFS_QUOTACTL(vfsp, getmode, id, (caddr_t)fdq, error); + return -error; +} + +STATIC int +linvfs_setxquota( + struct super_block *sb, + int type, + qid_t id, + struct fs_disk_quota *fdq) +{ + struct vfs *vfsp = LINVFS_GET_VFS(sb); + int error, setmode; + + setmode = (type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETQLIM; + VFS_QUOTACTL(vfsp, setmode, id, (caddr_t)fdq, error); + return -error; +} + +STATIC struct super_block * +linvfs_read_super( + struct super_block *sb, + void *data, + int silent) +{ + vnode_t *rootvp; + struct vfs *vfsp = vfs_allocate(); + struct xfs_mount_args *args = args_allocate(sb); + struct statfs statvfs; + int error; + + vfsp->vfs_super = sb; + LINVFS_SET_VFS(sb, vfsp); + if (sb->s_flags & MS_RDONLY) + vfsp->vfs_flag |= VFS_RDONLY; + bhv_insert_all_vfsops(vfsp); + + VFS_PARSEARGS(vfsp, (char *)data, args, 0, error); + if (error) { + bhv_remove_all_vfsops(vfsp, 1); + goto fail_vfsop; + } + + sb_min_blocksize(sb, BBSIZE); + sb->s_maxbytes = XFS_MAX_FILE_OFFSET; + sb->s_qcop = &linvfs_qops; + sb->s_op = &linvfs_sops; + + VFS_MOUNT(vfsp, args, NULL, error); + if (error) { + bhv_remove_all_vfsops(vfsp, 1); + goto fail_vfsop; + } + + VFS_STATVFS(vfsp, &statvfs, NULL, error); + if (error) + goto fail_unmount; + + sb->s_dirt = 1; + sb->s_magic = XFS_SB_MAGIC; + sb->s_blocksize = statvfs.f_bsize; + sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1; + set_posix_acl_flag(sb); + + VFS_ROOT(vfsp, &rootvp, error); + if (error) + goto fail_unmount; + + sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp)); + if (!sb->s_root) + goto fail_vnrele; + if (is_bad_inode(sb->s_root->d_inode)) + goto fail_vnrele; + if (linvfs_start_syncd(vfsp)) + goto fail_vnrele; + vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address); + + kmem_free(args, sizeof(*args)); + return sb; + +fail_vnrele: + if (sb->s_root) { + dput(sb->s_root); + sb->s_root = NULL; + } else { + VN_RELE(rootvp); + } + +fail_unmount: + VFS_UNMOUNT(vfsp, 0, NULL, error); + +fail_vfsop: + vfs_deallocate(vfsp); + kmem_free(args, sizeof(*args)); + return NULL; +} + + +STATIC struct super_operations linvfs_sops = { + .alloc_inode = linvfs_alloc_inode, + .destroy_inode = linvfs_destroy_inode, + .write_inode = linvfs_write_inode, + .clear_inode = linvfs_clear_inode, + .put_super = linvfs_put_super, + .write_super = linvfs_write_super, + .write_super_lockfs = linvfs_freeze_fs, + .unlockfs = linvfs_unfreeze_fs, + .statfs = linvfs_statfs, + .remount_fs = linvfs_remount, + .fh_to_dentry = linvfs_fh_to_dentry, + .dentry_to_fh = linvfs_dentry_to_fh, + .show_options = linvfs_show_options, +}; + +STATIC struct quotactl_ops linvfs_qops = { + .get_xstate = linvfs_getxstate, + .set_xstate = linvfs_setxstate, + .get_xquota = linvfs_getxquota, + .set_xquota = linvfs_setxquota, +}; + +STATIC struct file_system_type xfs_fs_type = { + .owner = THIS_MODULE, + .name = "xfs", + .read_super = linvfs_read_super, + .fs_flags = FS_REQUIRES_DEV, +}; + + +STATIC int __init +init_xfs_fs( void ) +{ + int error; + struct sysinfo si; + static char message[] __initdata = KERN_INFO \ + XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n"; + + printk(message); + + si_meminfo(&si); + xfs_physmem = si.totalram; + + error = init_inodecache(); + if (error < 0) + goto undo_inodecache; + + error = pagebuf_init(); + if (error < 0) + goto undo_pagebuf; + + vn_init(); + xfs_init(); + uuid_init(); + vfs_initdmapi(); + vfs_initquota(); + + error = register_filesystem(&xfs_fs_type); + if (error) + goto undo_register; + return 0; + +undo_register: + pagebuf_terminate(); + +undo_pagebuf: + destroy_inodecache(); + +undo_inodecache: + return error; +} + +STATIC void __exit +exit_xfs_fs( void ) +{ + unregister_filesystem(&xfs_fs_type); + xfs_cleanup(); + vfs_exitquota(); + vfs_exitdmapi(); + pagebuf_terminate(); + destroy_inodecache(); +} + +module_init(init_xfs_fs); +module_exit(exit_xfs_fs); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); +MODULE_LICENSE("GPL"); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_super.h linux.21rc5-ac2/fs/xfs/linux/xfs_super.h --- linux.21rc5/fs/xfs/linux/xfs_super.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_super.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPER_H__ +#define __XFS_SUPER_H__ + +#ifdef CONFIG_XFS_DMAPI +# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops) +# define vfs_initdmapi() dmapi_init() +# define vfs_exitdmapi() dmapi_uninit() +#else +# define vfs_insertdmapi(vfs) do { } while (0) +# define vfs_initdmapi() do { } while (0) +# define vfs_exitdmapi() do { } while (0) +#endif + +#ifdef CONFIG_XFS_QUOTA +# define vfs_insertquota(vfs) vfs_insertops(vfsp, &xfs_qmops) +# define vfs_initquota() xfs_qm_init() +# define vfs_exitquota() xfs_qm_exit() +#else +# define vfs_insertquota(vfs) do { } while (0) +# define vfs_initquota() do { } while (0) +# define vfs_exitquota() do { } while (0) +#endif + +#ifdef CONFIG_XFS_POSIX_ACL +# define XFS_ACL_STRING "ACLs, " +# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL) +#else +# define XFS_ACL_STRING +# define set_posix_acl_flag(sb) do { } while (0) +#endif + +#ifdef CONFIG_XFS_RT +# define XFS_REALTIME_STRING "realtime, " +#else +# define XFS_REALTIME_STRING +#endif + +#ifdef CONFIG_XFS_VNODE_TRACING +# define XFS_VNTRACE_STRING "VN-trace, " +#else +# define XFS_VNTRACE_STRING +#endif + +#ifdef XFSDEBUG +# define XFS_DBG_STRING "debug" +#else +# define XFS_DBG_STRING "no debug" +#endif + +#define XFS_BUILD_OPTIONS XFS_ACL_STRING \ + XFS_REALTIME_STRING \ + XFS_VNTRACE_STRING \ + XFS_DBG_STRING /* DBG must be last */ + +#define LINVFS_GET_VFS(s) \ + (vfs_t *)((s)->u.generic_sbp) +#define LINVFS_SET_VFS(s, vfsp) \ + ((s)->u.generic_sbp = vfsp) + +struct xfs_mount; +struct pb_target; +struct block_device; + +extern struct inode *xfs_get_inode(bhv_desc_t *, xfs_ino_t, int); +extern void xfs_initialize_vnode(bhv_desc_t *, vnode_t *, bhv_desc_t *, int); + +extern int xfs_blkdev_get(struct xfs_mount *, const char *, + struct block_device **); +extern void xfs_blkdev_put(struct block_device *); + +extern struct pb_target *xfs_alloc_buftarg(struct block_device *); +extern void xfs_relse_buftarg(struct pb_target *); +extern void xfs_free_buftarg(struct pb_target *); + +extern void xfs_setsize_buftarg(struct pb_target *, unsigned int, unsigned int); +extern unsigned int xfs_getsize_buftarg(struct pb_target *); +extern int linvfs_start_syncd(vfs_t *); +extern void linvfs_stop_syncd(vfs_t *); + +#endif /* __XFS_SUPER_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_syncd.c linux.21rc5-ac2/fs/xfs/linux/xfs_syncd.c --- linux.21rc5/fs/xfs/linux/xfs_syncd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_syncd.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include + +static void sync_timeout(unsigned long __data) +{ + struct task_struct * p = (struct task_struct *) __data; + + wake_up_process(p); +} + +#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR|SYNC_REFCACHE) + +int syncd(void *arg) +{ + vfs_t *vfsp = (vfs_t *) arg; + int error; + struct timer_list timer; + + daemonize(); + reparent_to_init(); + spin_lock_irq(¤t->sigmask_lock); + sigfillset(¤t->blocked); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + sprintf(current->comm, "xfs_syncd"); + + vfsp->vfs_sync_task = current; + wmb(); + wake_up(&vfsp->vfs_wait_sync_task); + + init_timer(&timer); + timer.data = (unsigned long)current; + timer.function = sync_timeout; + + do { + mod_timer(&timer, jiffies + xfs_params.sync_interval); + interruptible_sleep_on(&vfsp->vfs_sync); + + if (!(vfsp->vfs_flag & VFS_RDONLY)) + VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error); + } while (!(vfsp->vfs_flag & VFS_UMOUNT)); + + del_timer_sync(&timer); + vfsp->vfs_sync_task = NULL; + wmb(); + wake_up(&vfsp->vfs_wait_sync_task); + return 0; +} + +int +linvfs_start_syncd(vfs_t *vfsp) +{ + int pid; + + pid = kernel_thread(syncd, (void *) vfsp, + CLONE_VM | CLONE_FS | CLONE_FILES); + if (pid < 0) + return pid; + wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task); + return 0; +} + +void +linvfs_stop_syncd(vfs_t *vfsp) +{ + vfsp->vfs_flag |= VFS_UMOUNT; + wmb(); + + wake_up(&vfsp->vfs_sync); + wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_sysctl.c linux.21rc5-ac2/fs/xfs/linux/xfs_sysctl.c --- linux.21rc5/fs/xfs/linux/xfs_sysctl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_sysctl.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_rw.h" +#include +#include + + +STATIC ulong xfs_min[XFS_PARAM] = { \ + 0, 0, 0, 0, 0, 0, 0, 0, HZ }; +STATIC ulong xfs_max[XFS_PARAM] = { \ + XFS_REFCACHE_SIZE_MAX, XFS_REFCACHE_SIZE_MAX, + 1, 1, 1, 1, 127, 11, HZ * 60 }; + +static struct ctl_table_header *xfs_table_header; + + +/* Custom proc handlers */ + +STATIC int +xfs_refcache_resize_proc_handler( + ctl_table *ctl, + int write, + struct file *filp, + void *buffer, + size_t *lenp) +{ + int ret, *valp = ctl->data; + int xfs_refcache_new_size; + int xfs_refcache_old_size = *valp; + + ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); + xfs_refcache_new_size = *valp; + + if (!ret && write && xfs_refcache_new_size != xfs_refcache_old_size) { + xfs_refcache_resize(xfs_refcache_new_size); + /* Don't purge more than size of the cache */ + if (xfs_refcache_new_size < xfs_params.refcache_purge) + xfs_params.refcache_purge = xfs_refcache_new_size; + } + + return ret; +} + +STATIC int +xfs_stats_clear_proc_handler( + ctl_table *ctl, + int write, + struct file *filp, + void *buffer, + size_t *lenp) +{ + int ret, *valp = ctl->data; + __uint32_t vn_active; + + ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); + + if (!ret && write && *valp) { + printk("XFS Clearing xfsstats\n"); + /* save vn_active, it's a universal truth! */ + vn_active = xfsstats.vn_active; + memset(&xfsstats, 0, sizeof(xfsstats)); + xfsstats.vn_active = vn_active; + xfs_params.stats_clear = 0; + } + + return ret; +} + +STATIC ctl_table xfs_table[] = { + {XFS_REFCACHE_SIZE, "refcache_size", &xfs_params.refcache_size, + sizeof(ulong), 0644, NULL, &xfs_refcache_resize_proc_handler, + &sysctl_intvec, NULL, &xfs_min[0], &xfs_max[0]}, + + {XFS_REFCACHE_PURGE, "refcache_purge", &xfs_params.refcache_purge, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[1], &xfs_params.refcache_size}, + + {XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear, + sizeof(ulong), 0644, NULL, &xfs_stats_clear_proc_handler, + &sysctl_intvec, NULL, &xfs_min[2], &xfs_max[2]}, + + {XFS_RESTRICT_CHOWN, "restrict_chown", &xfs_params.restrict_chown, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[3], &xfs_max[3]}, + + {XFS_SGID_INHERIT, "irix_sgid_inherit", &xfs_params.sgid_inherit, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[4], &xfs_max[4]}, + + {XFS_SYMLINK_MODE, "irix_symlink_mode", &xfs_params.symlink_mode, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[5], &xfs_max[5]}, + + {XFS_PANIC_MASK, "panic_mask", &xfs_params.panic_mask, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[6], &xfs_max[6]}, + + {XFS_ERRLEVEL, "error_level", &xfs_params.error_level, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[7], &xfs_max[7]}, + + {XFS_SYNC_INTERVAL, "sync_interval", &xfs_params.sync_interval, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[8], &xfs_max[8]}, + + {0} +}; + +STATIC ctl_table xfs_dir_table[] = { + {FS_XFS, "xfs", NULL, 0, 0555, xfs_table}, + {0} +}; + +STATIC ctl_table xfs_root_table[] = { + {CTL_FS, "fs", NULL, 0, 0555, xfs_dir_table}, + {0} +}; + +void +xfs_sysctl_register(void) +{ + xfs_table_header = register_sysctl_table(xfs_root_table, 1); +} + +void +xfs_sysctl_unregister(void) +{ + if (xfs_table_header) + unregister_sysctl_table(xfs_table_header); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_sysctl.h linux.21rc5-ac2/fs/xfs/linux/xfs_sysctl.h --- linux.21rc5/fs/xfs/linux/xfs_sysctl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_sysctl.h 2003-05-28 21:48:48.000000000 +0100 @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#ifndef __XFS_SYSCTL_H__ +#define __XFS_SYSCTL_H__ + +#include + +/* + * Tunable xfs parameters + */ + +#define XFS_PARAM (sizeof(struct xfs_param) / sizeof(ulong)) + +typedef struct xfs_param { + ulong refcache_size; /* Size of NFS reference cache. */ + ulong refcache_purge; /* # of entries to purge each time. */ + ulong stats_clear; /* Reset all XFS statistics to zero. */ + ulong restrict_chown; /* Root/non-root can give away files. */ + ulong sgid_inherit; /* Inherit ISGID bit if process' GID is */ + /* not a member of the parent dir GID. */ + ulong symlink_mode; /* Symlink creat mode affected by umask. */ + ulong panic_mask; /* bitmask to specify panics on errors. */ + ulong error_level; /* Degree of reporting for internal probs*/ + ulong sync_interval; /* time between sync calls */ +} xfs_param_t; + +/* + * xfs_error_level: + * + * How much error reporting will be done when internal problems are + * encountered. These problems normally return an EFSCORRUPTED to their + * caller, with no other information reported. + * + * 0 No error reports + * 1 Report EFSCORRUPTED errors that will cause a filesystem shutdown + * 5 Report all EFSCORRUPTED errors (all of the above errors, plus any + * additional errors that are known to not cause shutdowns) + * + * xfs_panic_mask bit 0x8 turns the error reports into panics + */ + +enum { + XFS_REFCACHE_SIZE = 1, + XFS_REFCACHE_PURGE = 2, + XFS_STATS_CLEAR = 3, + XFS_RESTRICT_CHOWN = 4, + XFS_SGID_INHERIT = 5, + XFS_SYMLINK_MODE = 6, + XFS_PANIC_MASK = 7, + XFS_ERRLEVEL = 8, + XFS_SYNC_INTERVAL = 9, +}; + +extern xfs_param_t xfs_params; + +#ifdef CONFIG_SYSCTL +extern void xfs_sysctl_register(void); +extern void xfs_sysctl_unregister(void); +#else +# define xfs_sysctl_register() do { } while (0) +# define xfs_sysctl_unregister() do { } while (0) +#endif /* CONFIG_SYSCTL */ + +#endif /* __XFS_SYSCTL_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_version.h linux.21rc5-ac2/fs/xfs/linux/xfs_version.h --- linux.21rc5/fs/xfs/linux/xfs_version.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_version.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Dummy file that can contain a timestamp to put into the + * XFS init string, to help users keep track of what they're + * running + */ + +#ifndef __XFS_VERSION_H__ +#define __XFS_VERSION_H__ + +#define XFS_VERSION_STRING "SGI XFS" + +#endif /* __XFS_VERSION_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_vfs.c linux.21rc5-ac2/fs/xfs/linux/xfs_vfs.c --- linux.21rc5/fs/xfs/linux/xfs_vfs.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_vfs.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_macros.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_clnt.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_imap.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_quota.h" + +int +vfs_mount( + struct bhv_desc *bdp, + struct xfs_mount_args *args, + struct cred *cr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_mount) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_mount)(next, args, cr)); +} + +int +vfs_parseargs( + struct bhv_desc *bdp, + char *s, + struct xfs_mount_args *args, + int f) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_parseargs) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_parseargs)(next, s, args, f)); +} + +int +vfs_showargs( + struct bhv_desc *bdp, + struct seq_file *m) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_showargs) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_showargs)(next, m)); +} + +int +vfs_unmount( + struct bhv_desc *bdp, + int fl, + struct cred *cr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_unmount) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_unmount)(next, fl, cr)); +} + +int +vfs_mntupdate( + struct bhv_desc *bdp, + int *fl, + struct xfs_mount_args *args) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_mntupdate) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_mntupdate)(next, fl, args)); +} + + +int +vfs_root( + struct bhv_desc *bdp, + struct vnode **vpp) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_root) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_root)(next, vpp)); +} + +int +vfs_statvfs( + struct bhv_desc *bdp, + struct statfs *sp, + struct vnode *vp) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_statvfs) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_statvfs)(next, sp, vp)); +} + +int +vfs_sync( + struct bhv_desc *bdp, + int fl, + struct cred *cr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_sync) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_sync)(next, fl, cr)); +} + +int +vfs_vget( + struct bhv_desc *bdp, + struct vnode **vpp, + struct fid *fidp) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_vget) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_vget)(next, vpp, fidp)); +} + +int +vfs_dmapiops( + struct bhv_desc *bdp, + caddr_t addr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_dmapiops) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_dmapiops)(next, addr)); +} + +int +vfs_quotactl( + struct bhv_desc *bdp, + int cmd, + int id, + caddr_t addr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_quotactl) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_quotactl)(next, cmd, id, addr)); +} + +struct inode * +vfs_get_inode( + struct bhv_desc *bdp, + xfs_ino_t ino, + int fl) +{ + struct bhv_desc *next = bdp; + + while (! (bhvtovfsops(next))->vfs_get_inode) + next = BHV_NEXTNULL(next); + return ((*bhvtovfsops(next)->vfs_get_inode)(next, ino, fl)); +} + +void +vfs_init_vnode( + struct bhv_desc *bdp, + struct vnode *vp, + struct bhv_desc *bp, + int unlock) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_init_vnode) + next = BHV_NEXT(next); + ((*bhvtovfsops(next)->vfs_init_vnode)(next, vp, bp, unlock)); +} + +void +vfs_force_shutdown( + struct bhv_desc *bdp, + int fl, + char *file, + int line) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_force_shutdown) + next = BHV_NEXT(next); + ((*bhvtovfsops(next)->vfs_force_shutdown)(next, fl, file, line)); +} + +vfs_t * +vfs_allocate( void ) +{ + struct vfs *vfsp; + + vfsp = kmem_zalloc(sizeof(vfs_t), KM_SLEEP); + bhv_head_init(VFS_BHVHEAD(vfsp), "vfs"); + init_waitqueue_head(&vfsp->vfs_wait_sync_task); + init_waitqueue_head(&vfsp->vfs_sync); + return vfsp; +} + +void +vfs_deallocate( + struct vfs *vfsp) +{ + bhv_head_destroy(VFS_BHVHEAD(vfsp)); + kmem_free(vfsp, sizeof(vfs_t)); +} + +void +vfs_insertops( + struct vfs *vfsp, + struct bhv_vfsops *vfsops) +{ + struct bhv_desc *bdp; + + bdp = kmem_alloc(sizeof(struct bhv_desc), KM_SLEEP); + bhv_desc_init(bdp, NULL, vfsp, vfsops); + bhv_insert(&vfsp->vfs_bh, bdp); +} + +void +vfs_insertbhv( + struct vfs *vfsp, + struct bhv_desc *bdp, + struct vfsops *vfsops, + void *mount) +{ + bhv_desc_init(bdp, mount, vfsp, vfsops); + bhv_insert_initial(&vfsp->vfs_bh, bdp); +} + +void +bhv_remove_vfsops( + struct vfs *vfsp, + int pos) +{ + struct bhv_desc *bhv; + + bhv = bhv_lookup_range(&vfsp->vfs_bh, pos, pos); + if (!bhv) + return; + bhv_remove(&vfsp->vfs_bh, bhv); + kmem_free(bhv, sizeof(*bhv)); +} + +void +bhv_remove_all_vfsops( + struct vfs *vfsp, + int freebase) +{ + struct xfs_mount *mp; + + bhv_remove_vfsops(vfsp, VFS_POSITION_QM); + bhv_remove_vfsops(vfsp, VFS_POSITION_DM); + if (!freebase) + return; + mp = XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfsp), &xfs_vfsops)); + VFS_REMOVEBHV(vfsp, &mp->m_bhv); + xfs_mount_free(mp, 0); +} + +void +bhv_insert_all_vfsops( + struct vfs *vfsp) +{ + struct xfs_mount *mp; + + mp = xfs_mount_init(); + vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp); + vfs_insertdmapi(vfsp); + vfs_insertquota(vfsp); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_vfs.h linux.21rc5-ac2/fs/xfs/linux/xfs_vfs.h --- linux.21rc5/fs/xfs/linux/xfs_vfs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_vfs.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_VFS_H__ +#define __XFS_VFS_H__ + +#include + +struct fid; +struct cred; +struct vnode; +struct statfs; +struct seq_file; +struct super_block; +struct xfs_mount_args; + +typedef struct vfs { + u_int vfs_flag; /* flags */ + fsid_t vfs_fsid; /* file system ID */ + fsid_t *vfs_altfsid; /* An ID fixed for life of FS */ + bhv_head_t vfs_bh; /* head of vfs behavior chain */ + struct super_block *vfs_super; /* Linux superblock structure */ + struct task_struct *vfs_sync_task; + wait_queue_head_t vfs_sync; + wait_queue_head_t vfs_wait_sync_task; +} vfs_t; + +#define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */ + +#define bhvtovfs(bdp) ( (struct vfs *)BHV_VOBJ(bdp) ) +#define bhvtovfsops(bdp) ( (struct vfsops *)BHV_OPS(bdp) ) +#define VFS_BHVHEAD(vfs) ( &(vfs)->vfs_bh ) +#define VFS_REMOVEBHV(vfs, bdp) ( bhv_remove(VFS_BHVHEAD(vfs), bdp) ) + +#define VFS_POSITION_BASE BHV_POSITION_BASE /* chain bottom */ +#define VFS_POSITION_TOP BHV_POSITION_TOP /* chain top */ +#define VFS_POSITION_INVALID BHV_POSITION_INVALID /* invalid pos. num */ + +typedef enum { + VFS_BHV_UNKNOWN, /* not specified */ + VFS_BHV_XFS, /* xfs */ + VFS_BHV_DM, /* data migration */ + VFS_BHV_QM, /* quota manager */ + VFS_BHV_IO, /* IO path */ + VFS_BHV_END /* housekeeping end-of-range */ +} vfs_bhv_t; + +#define VFS_POSITION_XFS (BHV_POSITION_BASE) +#define VFS_POSITION_DM (VFS_POSITION_BASE+10) +#define VFS_POSITION_QM (VFS_POSITION_BASE+20) +#define VFS_POSITION_IO (VFS_POSITION_BASE+30) + +#define VFS_RDONLY 0x0001 /* read-only vfs */ +#define VFS_GRPID 0x0002 /* group-ID assigned from directory */ +#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */ +#define VFS_UMOUNT 0x0008 /* unmount in progress */ +#define VFS_END 0x0008 /* max flag */ + +#define SYNC_ATTR 0x0001 /* sync attributes */ +#define SYNC_CLOSE 0x0002 /* close file system down */ +#define SYNC_DELWRI 0x0004 /* look at delayed writes */ +#define SYNC_WAIT 0x0008 /* wait for i/o to complete */ +#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */ +#define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */ +#define SYNC_REFCACHE 0x0020 /* prune some of the nfs ref cache */ + + +#define IGET_NOALLOC 0x0001 /* vfs_get_inode may return NULL */ + +typedef int (*vfs_mount_t)(bhv_desc_t *, + struct xfs_mount_args *, struct cred *); +typedef int (*vfs_parseargs_t)(bhv_desc_t *, char *, + struct xfs_mount_args *, int); +typedef int (*vfs_showargs_t)(bhv_desc_t *, struct seq_file *); +typedef int (*vfs_unmount_t)(bhv_desc_t *, int, struct cred *); +typedef int (*vfs_mntupdate_t)(bhv_desc_t *, int *, + struct xfs_mount_args *); +typedef int (*vfs_root_t)(bhv_desc_t *, struct vnode **); +typedef int (*vfs_statvfs_t)(bhv_desc_t *, struct statfs *, struct vnode *); +typedef int (*vfs_sync_t)(bhv_desc_t *, int, struct cred *); +typedef int (*vfs_vget_t)(bhv_desc_t *, struct vnode **, struct fid *); +typedef int (*vfs_dmapiops_t)(bhv_desc_t *, caddr_t); +typedef int (*vfs_quotactl_t)(bhv_desc_t *, int, int, caddr_t); +typedef void (*vfs_init_vnode_t)(bhv_desc_t *, + struct vnode *, bhv_desc_t *, int); +typedef void (*vfs_force_shutdown_t)(bhv_desc_t *, int, char *, int); +typedef struct inode * (*vfs_get_inode_t)(bhv_desc_t *, xfs_ino_t, int); + +typedef struct vfsops { + bhv_position_t vf_position; /* behavior chain position */ + vfs_mount_t vfs_mount; /* mount file system */ + vfs_parseargs_t vfs_parseargs; /* parse mount options */ + vfs_showargs_t vfs_showargs; /* unparse mount options */ + vfs_unmount_t vfs_unmount; /* unmount file system */ + vfs_mntupdate_t vfs_mntupdate; /* update file system options */ + vfs_root_t vfs_root; /* get root vnode */ + vfs_statvfs_t vfs_statvfs; /* file system statistics */ + vfs_sync_t vfs_sync; /* flush files */ + vfs_vget_t vfs_vget; /* get vnode from fid */ + vfs_dmapiops_t vfs_dmapiops; /* data migration */ + vfs_quotactl_t vfs_quotactl; /* disk quota */ + vfs_get_inode_t vfs_get_inode; /* bhv specific iget */ + vfs_init_vnode_t vfs_init_vnode; /* initialize a new vnode */ + vfs_force_shutdown_t vfs_force_shutdown; /* crash and burn */ +} vfsops_t; + +/* + * VFS's. Operates on vfs structure pointers (starts at bhv head). + */ +#define VHEAD(v) ((v)->vfs_fbhv) +#define VFS_MOUNT(v, ma,cr, rv) ((rv) = vfs_mount(VHEAD(v), ma,cr)) +#define VFS_PARSEARGS(v, o,ma,f, rv) ((rv) = vfs_parseargs(VHEAD(v), o,ma,f)) +#define VFS_SHOWARGS(v, m, rv) ((rv) = vfs_showargs(VHEAD(v), m)) +#define VFS_UNMOUNT(v, f, cr, rv) ((rv) = vfs_unmount(VHEAD(v), f,cr)) +#define VFS_MNTUPDATE(v, fl, args, rv) ((rv) = vfs_mntupdate(VHEAD(v), fl, args)) +#define VFS_ROOT(v, vpp, rv) ((rv) = vfs_root(VHEAD(v), vpp)) +#define VFS_STATVFS(v, sp,vp, rv) ((rv) = vfs_statvfs(VHEAD(v), sp,vp)) +#define VFS_SYNC(v, flag,cr, rv) ((rv) = vfs_sync(VHEAD(v), flag,cr)) +#define VFS_VGET(v, vpp,fidp, rv) ((rv) = vfs_vget(VHEAD(v), vpp,fidp)) +#define VFS_DMAPIOPS(v, p, rv) ((rv) = vfs_dmapiops(VHEAD(v), p)) +#define VFS_QUOTACTL(v, c,id,p, rv) ((rv) = vfs_quotactl(VHEAD(v), c,id,p)) +#define VFS_GET_INODE(v, ino, fl) ( vfs_get_inode(VHEAD(v), ino,fl) ) +#define VFS_INIT_VNODE(v, vp,b,ul) ( vfs_init_vnode(VHEAD(v), vp,b,ul) ) +#define VFS_FORCE_SHUTDOWN(v, fl,f,l) ( vfs_force_shutdown(VHEAD(v), fl,f,l) ) + +/* + * PVFS's. Operates on behavior descriptor pointers. + */ +#define PVFS_MOUNT(b, ma,cr, rv) ((rv) = vfs_mount(b, ma,cr)) +#define PVFS_PARSEARGS(b, o,ma,f, rv) ((rv) = vfs_parseargs(b, o,ma,f)) +#define PVFS_SHOWARGS(b, m, rv) ((rv) = vfs_showargs(b, m)) +#define PVFS_UNMOUNT(b, f,cr, rv) ((rv) = vfs_unmount(b, f,cr)) +#define PVFS_MNTUPDATE(b, fl, args, rv) ((rv) = vfs_mntupdate(b, fl, args)) +#define PVFS_ROOT(b, vpp, rv) ((rv) = vfs_root(b, vpp)) +#define PVFS_STATVFS(b, sp,vp, rv) ((rv) = vfs_statvfs(b, sp,vp)) +#define PVFS_SYNC(b, flag,cr, rv) ((rv) = vfs_sync(b, flag,cr)) +#define PVFS_VGET(b, vpp,fidp, rv) ((rv) = vfs_vget(b, vpp,fidp)) +#define PVFS_DMAPIOPS(b, p, rv) ((rv) = vfs_dmapiops(b, p)) +#define PVFS_QUOTACTL(b, c,id,p, rv) ((rv) = vfs_quotactl(b, c,id,p)) +#define PVFS_GET_INODE(b, ino,fl) ( vfs_get_inode(b, ino,fl) ) +#define PVFS_INIT_VNODE(b, vp,b2,ul) ( vfs_init_vnode(b, vp,b2,ul) ) +#define PVFS_FORCE_SHUTDOWN(b, fl,f,l) ( vfs_force_shutdown(b, fl,f,l) ) + +extern int vfs_mount(bhv_desc_t *, struct xfs_mount_args *, struct cred *); +extern int vfs_parseargs(bhv_desc_t *, char *, struct xfs_mount_args *, int); +extern int vfs_showargs(bhv_desc_t *, struct seq_file *); +extern int vfs_unmount(bhv_desc_t *, int, struct cred *); +extern int vfs_mntupdate(bhv_desc_t *, int *, struct xfs_mount_args *); +extern int vfs_root(bhv_desc_t *, struct vnode **); +extern int vfs_statvfs(bhv_desc_t *, struct statfs *, struct vnode *); +extern int vfs_sync(bhv_desc_t *, int, struct cred *); +extern int vfs_vget(bhv_desc_t *, struct vnode **, struct fid *); +extern int vfs_dmapiops(bhv_desc_t *, caddr_t); +extern int vfs_quotactl(bhv_desc_t *, int, int, caddr_t); +extern struct inode *vfs_get_inode(bhv_desc_t *, xfs_ino_t, int); +extern void vfs_init_vnode(bhv_desc_t *, struct vnode *, bhv_desc_t *, int); +extern void vfs_force_shutdown(bhv_desc_t *, int, char *, int); + +typedef struct bhv_vfsops { + struct vfsops bhv_common; + void * bhv_custom; +} bhv_vfsops_t; + +#define vfs_bhv_lookup(v, id) ( bhv_lookup_range(&(v)->vfs_bh, (id), (id)) ) +#define vfs_bhv_custom(b) ( ((bhv_vfsops_t *)BHV_OPS(b))->bhv_custom ) +#define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o)) +#define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL ) + +extern vfs_t *vfs_allocate(void); +extern void vfs_deallocate(vfs_t *); +extern void vfs_insertops(vfs_t *, bhv_vfsops_t *); +extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *); + +extern void bhv_insert_all_vfsops(struct vfs *); +extern void bhv_remove_all_vfsops(struct vfs *, int); +extern void bhv_remove_vfsops(struct vfs *, int); + +#endif /* __XFS_VFS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_vnode.c linux.21rc5-ac2/fs/xfs/linux/xfs_vnode.c --- linux.21rc5/fs/xfs/linux/xfs_vnode.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_vnode.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + + +uint64_t vn_generation; /* vnode generation number */ +spinlock_t vnumber_lock = SPIN_LOCK_UNLOCKED; + +/* + * Dedicated vnode inactive/reclaim sync semaphores. + * Prime number of hash buckets since address is used as the key. + */ +#define NVSYNC 37 +#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC]) +sv_t vsync[NVSYNC]; + +/* + * Translate stat(2) file types to vnode types and vice versa. + * Aware of numeric order of S_IFMT and vnode type values. + */ +enum vtype iftovt_tab[] = { + VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, + VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON +}; + +u_short vttoif_tab[] = { + 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO, 0, S_IFSOCK +}; + + +void +vn_init(void) +{ + register sv_t *svp; + register int i; + + for (svp = vsync, i = 0; i < NVSYNC; i++, svp++) + init_sv(svp, SV_DEFAULT, "vsy", i); +} + +/* + * Clean a vnode of filesystem-specific data and prepare it for reuse. + */ +STATIC int +vn_reclaim( + struct vnode *vp) +{ + int error; + + XFS_STATS_INC(xfsstats.vn_reclaim); + vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address); + + /* + * Only make the VOP_RECLAIM call if there are behaviors + * to call. + */ + if (vp->v_fbhv) { + VOP_RECLAIM(vp, error); + if (error) + return -error; + } + ASSERT(vp->v_fbhv == NULL); + + VN_LOCK(vp); + vp->v_flag &= (VRECLM|VWAIT); + VN_UNLOCK(vp, 0); + + vp->v_type = VNON; + vp->v_fbhv = NULL; + +#ifdef CONFIG_XFS_VNODE_TRACING + ktrace_free(vp->v_trace); + vp->v_trace = NULL; +#endif + + return 0; +} + +STATIC void +vn_wakeup( + struct vnode *vp) +{ + VN_LOCK(vp); + if (vp->v_flag & VWAIT) + sv_broadcast(vptosync(vp)); + vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED); + VN_UNLOCK(vp, 0); +} + +int +vn_wait( + struct vnode *vp) +{ + VN_LOCK(vp); + if (vp->v_flag & (VINACT | VRECLM)) { + vp->v_flag |= VWAIT; + sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0); + return 1; + } + VN_UNLOCK(vp, 0); + return 0; +} + +struct vnode * +vn_initialize( + struct inode *inode) +{ + struct vnode *vp = LINVFS_GET_VP(inode); + + XFS_STATS_INC(xfsstats.vn_active); + XFS_STATS_INC(xfsstats.vn_alloc); + + vp->v_flag = VMODIFIED; + spinlock_init(&vp->v_lock, "v_lock"); + + spin_lock(&vnumber_lock); + if (!++vn_generation) /* v_number shouldn't be zero */ + vn_generation++; + vp->v_number = vn_generation; + spin_unlock(&vnumber_lock); + + ASSERT(VN_CACHED(vp) == 0); + + /* Initialize the first behavior and the behavior chain head. */ + vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode"); + +#ifdef CONFIG_XFS_VNODE_TRACING + vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); +#endif /* CONFIG_XFS_VNODE_TRACING */ + + vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address); + return vp; +} + +/* + * Get a reference on a vnode. + */ +vnode_t * +vn_get( + struct vnode *vp, + vmap_t *vmap) +{ + struct inode *inode; + + XFS_STATS_INC(xfsstats.vn_get); + inode = LINVFS_GET_IP(vp); + if (inode->i_state & I_FREEING) + return NULL; + + inode = VFS_GET_INODE(vmap->v_vfsp, vmap->v_ino, IGET_NOALLOC); + if (!inode) /* Inode not present */ + return NULL; + + /* We do not want to create new inodes via vn_get, + * returning NULL here is OK. + */ + if (inode->i_state & I_NEW) { + remove_inode_hash(inode); + make_bad_inode(inode); + unlock_new_inode(inode); + iput(inode); + return NULL; + } + + vn_trace_exit(vp, "vn_get", (inst_t *)__return_address); + + return vp; +} + +/* + * Revalidate the Linux inode from the vnode. + */ +int +vn_revalidate( + struct vnode *vp) +{ + struct inode *inode; + vattr_t va; + int error; + + vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address); + ASSERT(vp->v_fbhv != NULL); + + va.va_mask = XFS_AT_STAT|XFS_AT_GENCOUNT; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (!error) { + inode = LINVFS_GET_IP(vp); + inode->i_mode = VTTOIF(va.va_type) | va.va_mode; + inode->i_nlink = va.va_nlink; + inode->i_uid = va.va_uid; + inode->i_gid = va.va_gid; + inode->i_size = va.va_size; + inode->i_blocks = va.va_nblocks; + inode->i_mtime = va.va_mtime.tv_sec; + inode->i_ctime = va.va_ctime.tv_sec; + inode->i_atime = va.va_atime.tv_sec; + VUNMODIFY(vp); + } + return -error; +} + +/* + * purge a vnode from the cache + * At this point the vnode is guaranteed to have no references (vn_count == 0) + * The caller has to make sure that there are no ways someone could + * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock). + */ +void +vn_purge( + struct vnode *vp, + vmap_t *vmap) +{ + vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address); + +again: + /* + * Check whether vp has already been reclaimed since our caller + * sampled its version while holding a filesystem cache lock that + * its VOP_RECLAIM function acquires. + */ + VN_LOCK(vp); + if (vp->v_number != vmap->v_number) { + VN_UNLOCK(vp, 0); + return; + } + + /* + * If vp is being reclaimed or inactivated, wait until it is inert, + * then proceed. Can't assume that vnode is actually reclaimed + * just because the reclaimed flag is asserted -- a vn_alloc + * reclaim can fail. + */ + if (vp->v_flag & (VINACT | VRECLM)) { + ASSERT(vn_count(vp) == 0); + vp->v_flag |= VWAIT; + sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0); + goto again; + } + + /* + * Another process could have raced in and gotten this vnode... + */ + if (vn_count(vp) > 0) { + VN_UNLOCK(vp, 0); + return; + } + + XFS_STATS_DEC(xfsstats.vn_active); + vp->v_flag |= VRECLM; + VN_UNLOCK(vp, 0); + + /* + * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells + * vp's filesystem to flush and invalidate all cached resources. + * When vn_reclaim returns, vp should have no private data, + * either in a system cache or attached to v_data. + */ + if (vn_reclaim(vp) != 0) + panic("vn_purge: cannot reclaim"); + + /* + * Wakeup anyone waiting for vp to be reclaimed. + */ + vn_wakeup(vp); +} + +/* + * Add a reference to a referenced vnode. + */ +struct vnode * +vn_hold( + struct vnode *vp) +{ + struct inode *inode; + + XFS_STATS_INC(xfsstats.vn_hold); + + VN_LOCK(vp); + inode = igrab(LINVFS_GET_IP(vp)); + ASSERT(inode); + VN_UNLOCK(vp, 0); + + return vp; +} + +/* + * Call VOP_INACTIVE on last reference. + */ +void +vn_rele( + struct vnode *vp) +{ + int vcnt; + int cache; + + XFS_STATS_INC(xfsstats.vn_rele); + + VN_LOCK(vp); + + vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address); + vcnt = vn_count(vp); + + /* + * Since we always get called from put_inode we know + * that i_count won't be decremented after we + * return. + */ + if (!vcnt) { + /* + * As soon as we turn this on, noone can find us in vn_get + * until we turn off VINACT or VRECLM + */ + vp->v_flag |= VINACT; + VN_UNLOCK(vp, 0); + + /* + * Do not make the VOP_INACTIVE call if there + * are no behaviors attached to the vnode to call. + */ + if (vp->v_fbhv) + VOP_INACTIVE(vp, NULL, cache); + + VN_LOCK(vp); + if (vp->v_flag & VWAIT) + sv_broadcast(vptosync(vp)); + + vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED); + } + + VN_UNLOCK(vp, 0); + + vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address); +} + +/* + * Finish the removal of a vnode. + */ +void +vn_remove( + struct vnode *vp) +{ + vmap_t vmap; + + /* Make sure we don't do this to the same vnode twice */ + if (!(vp->v_fbhv)) + return; + + XFS_STATS_INC(xfsstats.vn_remove); + vn_trace_exit(vp, "vn_remove", (inst_t *)__return_address); + + /* + * After the following purge the vnode + * will no longer exist. + */ + VMAP(vp, vmap); + vn_purge(vp, &vmap); +} + + +#ifdef CONFIG_XFS_VNODE_TRACING + +#define KTRACE_ENTER(vp, vk, s, line, ra) \ + ktrace_enter( (vp)->v_trace, \ +/* 0 */ (void *)(__psint_t)(vk), \ +/* 1 */ (void *)(s), \ +/* 2 */ (void *)(__psint_t) line, \ +/* 3 */ (void *)(vn_count(vp)), \ +/* 4 */ (void *)(ra), \ +/* 5 */ (void *)(__psunsigned_t)(vp)->v_flag, \ +/* 6 */ (void *)(__psint_t)smp_processor_id(), \ +/* 7 */ (void *)(__psint_t)(current->pid), \ +/* 8 */ (void *)__return_address, \ +/* 9 */ 0, 0, 0, 0, 0, 0, 0) + +/* + * Vnode tracing code. + */ +void +vn_trace_entry(vnode_t *vp, char *func, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_ENTRY, func, 0, ra); +} + +void +vn_trace_exit(vnode_t *vp, char *func, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_EXIT, func, 0, ra); +} + +void +vn_trace_hold(vnode_t *vp, char *file, int line, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_HOLD, file, line, ra); +} + +void +vn_trace_ref(vnode_t *vp, char *file, int line, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_REF, file, line, ra); +} + +void +vn_trace_rele(vnode_t *vp, char *file, int line, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_RELE, file, line, ra); +} +#endif /* CONFIG_XFS_VNODE_TRACING */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/linux/xfs_vnode.h linux.21rc5-ac2/fs/xfs/linux/xfs_vnode.h --- linux.21rc5/fs/xfs/linux/xfs_vnode.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/linux/xfs_vnode.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_VNODE_H__ +#define __XFS_VNODE_H__ + +struct uio; +struct file; +struct vattr; +struct page_buf_bmap_s; +struct attrlist_cursor_kern; + +/* + * Vnode types (unrelated to on-disk inodes). VNON means no type. + */ +typedef enum vtype { + VNON = 0, + VREG = 1, + VDIR = 2, + VBLK = 3, + VCHR = 4, + VLNK = 5, + VFIFO = 6, + VBAD = 7, + VSOCK = 8 +} vtype_t; + +typedef xfs_ino_t vnumber_t; +typedef struct dentry vname_t; +typedef bhv_head_t vn_bhv_head_t; + +/* + * MP locking protocols: + * v_flag, v_vfsp VN_LOCK/VN_UNLOCK + * v_type read-only or fs-dependent + */ +typedef struct vnode { + __u32 v_flag; /* vnode flags (see below) */ + enum vtype v_type; /* vnode type */ + struct vfs *v_vfsp; /* ptr to containing VFS */ + vnumber_t v_number; /* in-core vnode number */ + vn_bhv_head_t v_bh; /* behavior head */ + spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */ + struct inode v_inode; /* Linux inode */ +#ifdef CONFIG_XFS_VNODE_TRACING + struct ktrace *v_trace; /* trace header structure */ +#endif +} vnode_t; + +#define v_fbhv v_bh.bh_first /* first behavior */ +#define v_fops v_bh.bh_first->bd_ops /* first behavior ops */ + +#define VNODE_POSITION_BASE BHV_POSITION_BASE /* chain bottom */ +#define VNODE_POSITION_TOP BHV_POSITION_TOP /* chain top */ +#define VNODE_POSITION_INVALID BHV_POSITION_INVALID /* invalid pos. num */ + +typedef enum { + VN_BHV_UNKNOWN, /* not specified */ + VN_BHV_XFS, /* xfs */ + VN_BHV_DM, /* data migration */ + VN_BHV_QM, /* quota manager */ + VN_BHV_IO, /* IO path */ + VN_BHV_END /* housekeeping end-of-range */ +} vn_bhv_t; + +#define VNODE_POSITION_XFS (VNODE_POSITION_BASE) +#define VNODE_POSITION_DM (VNODE_POSITION_BASE+10) +#define VNODE_POSITION_QM (VNODE_POSITION_BASE+20) +#define VNODE_POSITION_IO (VNODE_POSITION_BASE+30) + +/* + * Macros for dealing with the behavior descriptor inside of the vnode. + */ +#define BHV_TO_VNODE(bdp) ((vnode_t *)BHV_VOBJ(bdp)) +#define BHV_TO_VNODE_NULL(bdp) ((vnode_t *)BHV_VOBJNULL(bdp)) + +#define VN_BHV_HEAD(vp) ((bhv_head_t *)(&((vp)->v_bh))) +#define vn_bhv_head_init(bhp,name) bhv_head_init(bhp,name) +#define vn_bhv_remove(bhp,bdp) bhv_remove(bhp,bdp) +#define vn_bhv_lookup(bhp,ops) bhv_lookup(bhp,ops) +#define vn_bhv_lookup_unlocked(bhp,ops) bhv_lookup_unlocked(bhp,ops) + +/* + * Vnode to Linux inode mapping. + */ +#define LINVFS_GET_VP(inode) ((vnode_t *)list_entry(inode, vnode_t, v_inode)) +#define LINVFS_GET_IP(vp) (&(vp)->v_inode) + +/* + * Conversion between vnode types/modes and encoded type/mode as + * seen by stat(2) and mknod(2). + */ +extern enum vtype iftovt_tab[]; +extern ushort vttoif_tab[]; +#define IFTOVT(M) (iftovt_tab[((M) & S_IFMT) >> 12]) +#define VTTOIF(T) (vttoif_tab[(int)(T)]) +#define MAKEIMODE(T, M) (VTTOIF(T) | ((M) & ~S_IFMT)) + +/* + * Vnode flags. + */ +#define VINACT 0x1 /* vnode is being inactivated */ +#define VRECLM 0x2 /* vnode is being reclaimed */ +#define VWAIT 0x4 /* waiting for VINACT/VRECLM to end */ +#define VMODIFIED 0x8 /* XFS inode state possibly differs */ + /* to the Linux inode state. */ + +typedef enum vrwlock { VRWLOCK_NONE, VRWLOCK_READ, + VRWLOCK_WRITE, VRWLOCK_WRITE_DIRECT, + VRWLOCK_TRY_READ, VRWLOCK_TRY_WRITE } vrwlock_t; + +/* + * Return values for VOP_INACTIVE. A return value of + * VN_INACTIVE_NOCACHE implies that the file system behavior + * has disassociated its state and bhv_desc_t from the vnode. + */ +#define VN_INACTIVE_CACHE 0 +#define VN_INACTIVE_NOCACHE 1 + +/* + * Values for the cmd code given to VOP_VNODE_CHANGE. + */ +typedef enum vchange { + VCHANGE_FLAGS_FRLOCKS = 0, + VCHANGE_FLAGS_ENF_LOCKING = 1, + VCHANGE_FLAGS_TRUNCATED = 2, + VCHANGE_FLAGS_PAGE_DIRTY = 3, + VCHANGE_FLAGS_IOEXCL_COUNT = 4 +} vchange_t; + + +typedef int (*vop_open_t)(bhv_desc_t *, struct cred *); +typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct file *, char *, + size_t, loff_t *, int, struct cred *); +typedef ssize_t (*vop_write_t)(bhv_desc_t *, struct file *, const char *, + size_t, loff_t *, int, struct cred *); +typedef int (*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *, + unsigned int, unsigned long); +typedef int (*vop_getattr_t)(bhv_desc_t *, struct vattr *, int, + struct cred *); +typedef int (*vop_setattr_t)(bhv_desc_t *, struct vattr *, int, + struct cred *); +typedef int (*vop_access_t)(bhv_desc_t *, int, struct cred *); +typedef int (*vop_lookup_t)(bhv_desc_t *, vname_t *, vnode_t **, + int, vnode_t *, struct cred *); +typedef int (*vop_create_t)(bhv_desc_t *, vname_t *, struct vattr *, + vnode_t **, struct cred *); +typedef int (*vop_remove_t)(bhv_desc_t *, vname_t *, struct cred *); +typedef int (*vop_link_t)(bhv_desc_t *, vnode_t *, vname_t *, + struct cred *); +typedef int (*vop_rename_t)(bhv_desc_t *, vname_t *, vnode_t *, vname_t *, + struct cred *); +typedef int (*vop_mkdir_t)(bhv_desc_t *, vname_t *, struct vattr *, + vnode_t **, struct cred *); +typedef int (*vop_rmdir_t)(bhv_desc_t *, vname_t *, struct cred *); +typedef int (*vop_readdir_t)(bhv_desc_t *, struct uio *, struct cred *, + int *); +typedef int (*vop_symlink_t)(bhv_desc_t *, vname_t *, struct vattr *, + char *, vnode_t **, struct cred *); +typedef int (*vop_readlink_t)(bhv_desc_t *, struct uio *, struct cred *); +typedef int (*vop_fsync_t)(bhv_desc_t *, int, struct cred *, + xfs_off_t, xfs_off_t); +typedef int (*vop_inactive_t)(bhv_desc_t *, struct cred *); +typedef int (*vop_fid2_t)(bhv_desc_t *, struct fid *); +typedef int (*vop_release_t)(bhv_desc_t *); +typedef int (*vop_rwlock_t)(bhv_desc_t *, vrwlock_t); +typedef void (*vop_rwunlock_t)(bhv_desc_t *, vrwlock_t); +typedef int (*vop_frlock_t)(bhv_desc_t *, int, struct file_lock *,int, + xfs_off_t, struct cred *); +typedef int (*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int, + struct page_buf_bmap_s *, int *); +typedef int (*vop_reclaim_t)(bhv_desc_t *); +typedef int (*vop_attr_get_t)(bhv_desc_t *, char *, char *, int *, int, + struct cred *); +typedef int (*vop_attr_set_t)(bhv_desc_t *, char *, char *, int, int, + struct cred *); +typedef int (*vop_attr_remove_t)(bhv_desc_t *, char *, int, struct cred *); +typedef int (*vop_attr_list_t)(bhv_desc_t *, char *, int, int, + struct attrlist_cursor_kern *, struct cred *); +typedef void (*vop_link_removed_t)(bhv_desc_t *, vnode_t *, int); +typedef void (*vop_vnode_change_t)(bhv_desc_t *, vchange_t, __psint_t); +typedef void (*vop_ptossvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int); +typedef void (*vop_pflushinvalvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int); +typedef int (*vop_pflushvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, + uint64_t, int); +typedef int (*vop_iflush_t)(bhv_desc_t *, int); + + +typedef struct vnodeops { + bhv_position_t vn_position; /* position within behavior chain */ + vop_open_t vop_open; + vop_read_t vop_read; + vop_write_t vop_write; + vop_ioctl_t vop_ioctl; + vop_getattr_t vop_getattr; + vop_setattr_t vop_setattr; + vop_access_t vop_access; + vop_lookup_t vop_lookup; + vop_create_t vop_create; + vop_remove_t vop_remove; + vop_link_t vop_link; + vop_rename_t vop_rename; + vop_mkdir_t vop_mkdir; + vop_rmdir_t vop_rmdir; + vop_readdir_t vop_readdir; + vop_symlink_t vop_symlink; + vop_readlink_t vop_readlink; + vop_fsync_t vop_fsync; + vop_inactive_t vop_inactive; + vop_fid2_t vop_fid2; + vop_rwlock_t vop_rwlock; + vop_rwunlock_t vop_rwunlock; + vop_frlock_t vop_frlock; + vop_bmap_t vop_bmap; + vop_reclaim_t vop_reclaim; + vop_attr_get_t vop_attr_get; + vop_attr_set_t vop_attr_set; + vop_attr_remove_t vop_attr_remove; + vop_attr_list_t vop_attr_list; + vop_link_removed_t vop_link_removed; + vop_vnode_change_t vop_vnode_change; + vop_ptossvp_t vop_tosspages; + vop_pflushinvalvp_t vop_flushinval_pages; + vop_pflushvp_t vop_flush_pages; + vop_release_t vop_release; + vop_iflush_t vop_iflush; +} vnodeops_t; + +/* + * VOP's. + */ +#define _VOP_(op, vp) (*((vnodeops_t *)(vp)->v_fops)->op) + +#define VOP_READ(vp,file,buf,size,offset,ioflags,cr,rv) \ + rv = _VOP_(vop_read, vp)((vp)->v_fbhv,file,buf,size,offset,ioflags,cr) +#define VOP_WRITE(vp,file,buf,size,offset,ioflags,cr,rv) \ + rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,buf,size,offset,ioflags,cr) +#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \ + rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n) +#define VOP_OPEN(vp, cr, rv) \ + rv = _VOP_(vop_open, vp)((vp)->v_fbhv, cr) +#define VOP_GETATTR(vp, vap, f, cr, rv) \ + rv = _VOP_(vop_getattr, vp)((vp)->v_fbhv, vap, f, cr) +#define VOP_SETATTR(vp, vap, f, cr, rv) \ + rv = _VOP_(vop_setattr, vp)((vp)->v_fbhv, vap, f, cr) +#define VOP_ACCESS(vp, mode, cr, rv) \ + rv = _VOP_(vop_access, vp)((vp)->v_fbhv, mode, cr) +#define VOP_LOOKUP(vp,d,vpp,f,rdir,cr,rv) \ + rv = _VOP_(vop_lookup, vp)((vp)->v_fbhv,d,vpp,f,rdir,cr) +#define VOP_CREATE(dvp,d,vap,vpp,cr,rv) \ + rv = _VOP_(vop_create, dvp)((dvp)->v_fbhv,d,vap,vpp,cr) +#define VOP_REMOVE(dvp,d,cr,rv) \ + rv = _VOP_(vop_remove, dvp)((dvp)->v_fbhv,d,cr) +#define VOP_LINK(tdvp,fvp,d,cr,rv) \ + rv = _VOP_(vop_link, tdvp)((tdvp)->v_fbhv,fvp,d,cr) +#define VOP_RENAME(fvp,fnm,tdvp,tnm,cr,rv) \ + rv = _VOP_(vop_rename, fvp)((fvp)->v_fbhv,fnm,tdvp,tnm,cr) +#define VOP_MKDIR(dp,d,vap,vpp,cr,rv) \ + rv = _VOP_(vop_mkdir, dp)((dp)->v_fbhv,d,vap,vpp,cr) +#define VOP_RMDIR(dp,d,cr,rv) \ + rv = _VOP_(vop_rmdir, dp)((dp)->v_fbhv,d,cr) +#define VOP_READDIR(vp,uiop,cr,eofp,rv) \ + rv = _VOP_(vop_readdir, vp)((vp)->v_fbhv,uiop,cr,eofp) +#define VOP_SYMLINK(dvp,d,vap,tnm,vpp,cr,rv) \ + rv = _VOP_(vop_symlink, dvp) ((dvp)->v_fbhv,d,vap,tnm,vpp,cr) +#define VOP_READLINK(vp,uiop,cr,rv) \ + rv = _VOP_(vop_readlink, vp)((vp)->v_fbhv,uiop,cr) +#define VOP_FSYNC(vp,f,cr,b,e,rv) \ + rv = _VOP_(vop_fsync, vp)((vp)->v_fbhv,f,cr,b,e) +#define VOP_INACTIVE(vp, cr, rv) \ + rv = _VOP_(vop_inactive, vp)((vp)->v_fbhv, cr) +#define VOP_RELEASE(vp, rv) \ + rv = _VOP_(vop_release, vp)((vp)->v_fbhv) +#define VOP_FID2(vp, fidp, rv) \ + rv = _VOP_(vop_fid2, vp)((vp)->v_fbhv, fidp) +#define VOP_RWLOCK(vp,i) \ + (void)_VOP_(vop_rwlock, vp)((vp)->v_fbhv, i) +#define VOP_RWLOCK_TRY(vp,i) \ + _VOP_(vop_rwlock, vp)((vp)->v_fbhv, i) +#define VOP_RWUNLOCK(vp,i) \ + (void)_VOP_(vop_rwunlock, vp)((vp)->v_fbhv, i) +#define VOP_FRLOCK(vp,c,fl,flags,offset,fr,rv) \ + rv = _VOP_(vop_frlock, vp)((vp)->v_fbhv,c,fl,flags,offset,fr) +#define VOP_RECLAIM(vp, rv) \ + rv = _VOP_(vop_reclaim, vp)((vp)->v_fbhv) +#define VOP_ATTR_GET(vp, name, val, vallenp, fl, cred, rv) \ + rv = _VOP_(vop_attr_get, vp)((vp)->v_fbhv,name,val,vallenp,fl,cred) +#define VOP_ATTR_SET(vp, name, val, vallen, fl, cred, rv) \ + rv = _VOP_(vop_attr_set, vp)((vp)->v_fbhv,name,val,vallen,fl,cred) +#define VOP_ATTR_REMOVE(vp, name, flags, cred, rv) \ + rv = _VOP_(vop_attr_remove, vp)((vp)->v_fbhv,name,flags,cred) +#define VOP_ATTR_LIST(vp, buf, buflen, fl, cursor, cred, rv) \ + rv = _VOP_(vop_attr_list, vp)((vp)->v_fbhv,buf,buflen,fl,cursor,cred) +#define VOP_LINK_REMOVED(vp, dvp, linkzero) \ + (void)_VOP_(vop_link_removed, vp)((vp)->v_fbhv, dvp, linkzero) +#define VOP_VNODE_CHANGE(vp, cmd, val) \ + (void)_VOP_(vop_vnode_change, vp)((vp)->v_fbhv,cmd,val) +/* + * These are page cache functions that now go thru VOPs. + * 'last' parameter is unused and left in for IRIX compatibility + */ +#define VOP_TOSS_PAGES(vp, first, last, fiopt) \ + _VOP_(vop_tosspages, vp)((vp)->v_fbhv,first, last, fiopt) +/* + * 'last' parameter is unused and left in for IRIX compatibility + */ +#define VOP_FLUSHINVAL_PAGES(vp, first, last, fiopt) \ + _VOP_(vop_flushinval_pages, vp)((vp)->v_fbhv,first,last,fiopt) +/* + * 'last' parameter is unused and left in for IRIX compatibility + */ +#define VOP_FLUSH_PAGES(vp, first, last, flags, fiopt, rv) \ + rv = _VOP_(vop_flush_pages, vp)((vp)->v_fbhv,first,last,flags,fiopt) +#define VOP_IOCTL(vp, inode, filp, cmd, arg, rv) \ + rv = _VOP_(vop_ioctl, vp)((vp)->v_fbhv,inode,filp,cmd,arg) +#define VOP_IFLUSH(vp, flags, rv) \ + rv = _VOP_(vop_iflush, vp)((vp)->v_fbhv, flags) + +/* + * Flags for read/write calls - same values as IRIX + */ + +#define IO_NFS 0x00100 +#define IO_ISLOCKED 0x00800 +#define IO_NFS3 0x02000 + +/* + * Flags for VOP_IFLUSH call + */ + +#define FLUSH_SYNC 1 /* wait for flush to complete */ +#define FLUSH_INODE 2 /* flush the inode itself */ +#define FLUSH_LOG 4 /* force the last log entry for + * this inode out to disk */ + +/* + * Flush/Invalidate options for VOP_TOSS_PAGES, VOP_FLUSHINVAL_PAGES and + * VOP_FLUSH_PAGES. + */ +#define FI_NONE 0 /* none */ +#define FI_REMAPF 1 /* Do a remapf prior to the operation */ +#define FI_REMAPF_LOCKED 2 /* Do a remapf prior to the operation. + Prevent VM access to the pages until + the operation completes. */ + +/* + * Vnode attributes. va_mask indicates those attributes the caller + * wants to set (setattr) or extract (getattr). + */ +typedef struct vattr { + int va_mask; /* bit-mask of attributes */ + vtype_t va_type; /* vnode type (for create) */ + mode_t va_mode; /* file access mode */ + uid_t va_uid; /* owner user id */ + gid_t va_gid; /* owner group id */ + xfs_dev_t va_fsid; /* file system id (dev for now) */ + xfs_ino_t va_nodeid; /* node id */ + nlink_t va_nlink; /* number of references to file */ + xfs_off_t va_size; /* file size in bytes */ + timespec_t va_atime; /* time of last access */ + timespec_t va_mtime; /* time of last modification */ + timespec_t va_ctime; /* time file ``created'' */ + xfs_dev_t va_rdev; /* device the file represents */ + u_long va_blksize; /* fundamental block size */ + __int64_t va_nblocks; /* # of blocks allocated */ + u_long va_vcode; /* version code */ + u_long va_xflags; /* random extended file flags */ + u_long va_extsize; /* file extent size */ + u_long va_nextents; /* number of extents in file */ + u_long va_anextents; /* number of attr extents in file */ + int va_projid; /* project id */ + u_int va_gencount; /* object generation count */ +} vattr_t; + +/* + * setattr or getattr attributes + */ +#define XFS_AT_TYPE 0x00000001 +#define XFS_AT_MODE 0x00000002 +#define XFS_AT_UID 0x00000004 +#define XFS_AT_GID 0x00000008 +#define XFS_AT_FSID 0x00000010 +#define XFS_AT_NODEID 0x00000020 +#define XFS_AT_NLINK 0x00000040 +#define XFS_AT_SIZE 0x00000080 +#define XFS_AT_ATIME 0x00000100 +#define XFS_AT_MTIME 0x00000200 +#define XFS_AT_CTIME 0x00000400 +#define XFS_AT_RDEV 0x00000800 +#define XFS_AT_BLKSIZE 0x00001000 +#define XFS_AT_NBLOCKS 0x00002000 +#define XFS_AT_VCODE 0x00004000 +#define XFS_AT_MAC 0x00008000 +#define XFS_AT_UPDATIME 0x00010000 +#define XFS_AT_UPDMTIME 0x00020000 +#define XFS_AT_UPDCTIME 0x00040000 +#define XFS_AT_ACL 0x00080000 +#define XFS_AT_CAP 0x00100000 +#define XFS_AT_INF 0x00200000 +#define XFS_AT_XFLAGS 0x00400000 +#define XFS_AT_EXTSIZE 0x00800000 +#define XFS_AT_NEXTENTS 0x01000000 +#define XFS_AT_ANEXTENTS 0x02000000 +#define XFS_AT_PROJID 0x04000000 +#define XFS_AT_SIZE_NOPERM 0x08000000 +#define XFS_AT_GENCOUNT 0x10000000 + +#define XFS_AT_ALL (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\ + XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\ + XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\ + XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|XFS_AT_MAC|\ + XFS_AT_ACL|XFS_AT_CAP|XFS_AT_INF|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|\ + XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_PROJID|XFS_AT_GENCOUNT) + +#define XFS_AT_STAT (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\ + XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\ + XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\ + XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_PROJID) + +#define XFS_AT_TIMES (XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME) + +#define XFS_AT_UPDTIMES (XFS_AT_UPDATIME|XFS_AT_UPDMTIME|XFS_AT_UPDCTIME) + +#define XFS_AT_NOSET (XFS_AT_NLINK|XFS_AT_RDEV|XFS_AT_FSID|XFS_AT_NODEID|\ + XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\ + XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT) + +#define VREAD 00400 +#define VWRITE 00200 +#define VEXEC 00100 +#define VSGID 02000 /* set group id on execution */ +#define MODEMASK 07777 /* mode bits plus permission bits */ + +/* + * Check whether mandatory file locking is enabled. + */ +#define MANDLOCK(vp, mode) \ + ((vp)->v_type == VREG && ((mode) & (VSGID|(VEXEC>>3))) == VSGID) + +extern void vn_init(void); +extern int vn_wait(struct vnode *); +extern vnode_t *vn_initialize(struct inode *); + +/* + * Acquiring and invalidating vnodes: + * + * if (vn_get(vp, version, 0)) + * ...; + * vn_purge(vp, version); + * + * vn_get and vn_purge must be called with vmap_t arguments, sampled + * while a lock that the vnode's VOP_RECLAIM function acquires is + * held, to ensure that the vnode sampled with the lock held isn't + * recycled (VOP_RECLAIMed) or deallocated between the release of the lock + * and the subsequent vn_get or vn_purge. + */ + +/* + * vnode_map structures _must_ match vn_epoch and vnode structure sizes. + */ +typedef struct vnode_map { + vfs_t *v_vfsp; + vnumber_t v_number; /* in-core vnode number */ + xfs_ino_t v_ino; /* inode # */ +} vmap_t; + +#define VMAP(vp, vmap) {(vmap).v_vfsp = (vp)->v_vfsp, \ + (vmap).v_number = (vp)->v_number, \ + (vmap).v_ino = (vp)->v_inode.i_ino; } + +extern void vn_purge(struct vnode *, vmap_t *); +extern vnode_t *vn_get(struct vnode *, vmap_t *); +extern int vn_revalidate(struct vnode *); +extern void vn_remove(struct vnode *); + +static inline int vn_count(struct vnode *vp) +{ + return atomic_read(&LINVFS_GET_IP(vp)->i_count); +} + +/* + * Vnode reference counting functions (and macros for compatibility). + */ +extern vnode_t *vn_hold(struct vnode *); +extern void vn_rele(struct vnode *); + +#if defined(CONFIG_XFS_VNODE_TRACING) + +#define VN_HOLD(vp) \ + ((void)vn_hold(vp), \ + vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address)) +#define VN_RELE(vp) \ + (vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \ + iput(LINVFS_GET_IP(vp))) + +#else /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */ + +#define VN_HOLD(vp) ((void)vn_hold(vp)) +#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp))) + +#endif /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */ + +/* + * Vname handling macros. + */ +#define VNAME(dentry) ((char *) (dentry)->d_name.name) +#define VNAMELEN(dentry) ((dentry)->d_name.len) +#define VNAME_TO_VNODE(dentry) (LINVFS_GET_VP((dentry)->d_inode)) + +/* + * Vnode spinlock manipulation. + */ +#define VN_LOCK(vp) mutex_spinlock(&(vp)->v_lock) +#define VN_UNLOCK(vp, s) mutex_spinunlock(&(vp)->v_lock, s) +#define VN_FLAGSET(vp,b) vn_flagset(vp,b) +#define VN_FLAGCLR(vp,b) vn_flagclr(vp,b) + +static __inline__ void vn_flagset(struct vnode *vp, uint flag) +{ + spin_lock(&vp->v_lock); + vp->v_flag |= flag; + spin_unlock(&vp->v_lock); +} + +static __inline__ void vn_flagclr(struct vnode *vp, uint flag) +{ + spin_lock(&vp->v_lock); + vp->v_flag &= ~flag; + spin_unlock(&vp->v_lock); +} + +/* + * Some useful predicates. + */ +#define VN_MAPPED(vp) ((LINVFS_GET_IP(vp)->i_mapping->i_mmap != NULL) || \ + (LINVFS_GET_IP(vp)->i_mapping->i_mmap_shared != NULL)) +#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages) +#define VN_DIRTY(vp) (!list_empty(&(LINVFS_GET_IP(vp)->i_dirty_data_buffers))) +#define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED) +#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED) + +/* + * Flags to VOP_SETATTR/VOP_GETATTR. + */ +#define ATTR_UTIME 0x01 /* non-default utime(2) request */ +#define ATTR_EXEC 0x02 /* invocation from exec(2) */ +#define ATTR_COMM 0x04 /* yield common vp attributes */ +#define ATTR_DMI 0x08 /* invocation from a DMI function */ +#define ATTR_LAZY 0x80 /* set/get attributes lazily */ +#define ATTR_NONBLOCK 0x100 /* return EAGAIN if operation would block */ +#define ATTR_NOLOCK 0x200 /* Don't grab any conflicting locks */ +#define ATTR_NOSIZETOK 0x400 /* Don't get the DVN_SIZE_READ token */ + +/* + * Flags to VOP_FSYNC and VOP_RECLAIM. + */ +#define FSYNC_NOWAIT 0 /* asynchronous flush */ +#define FSYNC_WAIT 0x1 /* synchronous fsync or forced reclaim */ +#define FSYNC_INVAL 0x2 /* flush and invalidate cached data */ +#define FSYNC_DATA 0x4 /* synchronous fsync of data only */ + +#if (defined(CONFIG_XFS_VNODE_TRACING)) + +#define VNODE_TRACE_SIZE 16 /* number of trace entries */ + +/* + * Tracing entries. + */ +#define VNODE_KTRACE_ENTRY 1 +#define VNODE_KTRACE_EXIT 2 +#define VNODE_KTRACE_HOLD 3 +#define VNODE_KTRACE_REF 4 +#define VNODE_KTRACE_RELE 5 + +extern void vn_trace_entry(struct vnode *, char *, inst_t *); +extern void vn_trace_exit(struct vnode *, char *, inst_t *); +extern void vn_trace_hold(struct vnode *, char *, int, inst_t *); +extern void vn_trace_ref(struct vnode *, char *, int, inst_t *); +extern void vn_trace_rele(struct vnode *, char *, int, inst_t *); +#define VN_TRACE(vp) \ + vn_trace_ref(vp, __FILE__, __LINE__, (inst_t *)__return_address) + +#else /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */ + +#define vn_trace_entry(a,b,c) +#define vn_trace_exit(a,b,c) +#define vn_trace_hold(a,b,c,d) +#define vn_trace_ref(a,b,c,d) +#define vn_trace_rele(a,b,c,d) +#define VN_TRACE(vp) + +#endif /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */ + +#endif /* __XFS_VNODE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/Makefile linux.21rc5-ac2/fs/xfs/Makefile --- linux.21rc5/fs/xfs/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/Makefile 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,146 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# + +EXTRA_CFLAGS += -I. -funsigned-char + +ifeq ($(CONFIG_XFS_DEBUG),y) + EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG -DXFSDEBUG +endif +ifeq ($(CONFIG_PAGEBUF_DEBUG),y) + EXTRA_CFLAGS += -DPAGEBUF_TRACE +endif + +# fs/Makefile enters fs/xfs twice if CONFIG_XFS_FS is y, once for kernel and +# once for modules. This is necessary because xfsidbg can be built as a module +# even if xfs is in kernel. Alas the shorthand form +# O_TARGET := xfs.o +# obj-m := $(O_TARGET) +# fails when the makefile is run more than once, code gets compiled as both +# kernel and as module, which one gets linked depends on the phase of the moon. +# I just love these layer violations where a makefile behaves differently +# depending on changes to its parent. Work around by only setting obj-m when +# xfs is selected as a module. Keith Owens. + +O_TARGET := xfs.o +ifeq ($(CONFIG_XFS_FS),m) + obj-m := $(O_TARGET) +endif + +obj-$(CONFIG_XFS_RT) += xfs_rtalloc.o +obj-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o +obj-$(CONFIG_XFS_POSIX_CAP) += xfs_cap.o +obj-$(CONFIG_XFS_POSIX_MAC) += xfs_mac.o + +obj-y += xfs_alloc.o \ + xfs_alloc_btree.o \ + xfs_attr.o \ + xfs_attr_fetch.o \ + xfs_attr_leaf.o \ + xfs_bit.o \ + xfs_bmap.o \ + xfs_bmap_btree.o \ + xfs_btree.o \ + xfs_buf_item.o \ + xfs_da_btree.o \ + xfs_dir.o \ + xfs_dir2.o \ + xfs_dir2_block.o \ + xfs_dir2_data.o \ + xfs_dir2_leaf.o \ + xfs_dir2_node.o \ + xfs_dir2_sf.o \ + xfs_dir2_trace.o \ + xfs_dir_leaf.o \ + xfs_error.o \ + xfs_extfree_item.o \ + xfs_fsops.o \ + xfs_ialloc.o \ + xfs_ialloc_btree.o \ + xfs_iget.o \ + xfs_inode.o \ + xfs_inode_item.o \ + xfs_iocore.o \ + xfs_itable.o \ + xfs_dfrag.o \ + xfs_log.o \ + xfs_log_recover.o \ + xfs_macros.o \ + xfs_mount.o \ + xfs_rename.o \ + xfs_trans.o \ + xfs_trans_ail.o \ + xfs_trans_buf.o \ + xfs_trans_extfree.o \ + xfs_trans_inode.o \ + xfs_trans_item.o \ + xfs_utils.o \ + xfs_vfsops.o \ + xfs_vnodeops.o \ + xfs_rw.o + +# Objects not built in this directory +obj-y += pagebuf/pagebuf.o \ + linux/linux_xfs.o \ + support/support_xfs.o + +subdir-$(CONFIG_XFS_FS) += pagebuf linux support + +ifeq ($(CONFIG_XFS_QUOTA),y) + subdir-$(CONFIG_XFS_FS) += quota + obj-y += quota/xfs_quota.o +endif + +# Quota and DMAPI stubs +obj-y += xfs_dmops.o \ + xfs_qmops.o + +# If both xfs and kdb modules are built in then xfsidbg is built in. If xfs is +# a module and kdb modules are being compiled then xfsidbg must be a module, to +# follow xfs. If xfs is built in then xfsidbg tracks the kdb module state. +# This must come after the main xfs code so xfs initialises before xfsidbg. +# KAO +ifneq ($(CONFIG_KDB_MODULES),) + ifeq ($(CONFIG_XFS_FS),y) + obj-$(CONFIG_KDB_MODULES) += xfsidbg.o + else + obj-$(CONFIG_XFS_FS) += xfsidbg.o + endif +endif + +CFLAGS_xfsidbg.o += -I $(TOPDIR)/arch/$(ARCH)/kdb + +include $(TOPDIR)/Rules.make + +# This is really nasty, but Rules.make was never designed for multi directory +# modules. Keith Owens. + +xfs.o: $(patsubst %,_modsubdir_%,$(subdir-m)) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/pagebuf/Makefile linux.21rc5-ac2/fs/xfs/pagebuf/Makefile --- linux.21rc5/fs/xfs/pagebuf/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/pagebuf/Makefile 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,50 @@ +# +# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# +# Makefile for the linux pagebuf routines. +# + +ifeq ($(CONFIG_PAGEBUF_DEBUG),y) + EXTRA_CFLAGS += -g -DDEBUG -DSTATIC="" -DPAGEBUF_TRACE +endif +EXTRA_CFLAGS += -I.. + +O_TARGET := pagebuf.o + +ifneq ($(MAKECMDGOALS),modules_install) + obj-m := $(O_TARGET) +endif + +export-objs += page_buf.o +obj-y += page_buf.o \ + page_buf_locking.o + +include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/pagebuf/page_buf.c linux.21rc5-ac2/fs/xfs/pagebuf/page_buf.c --- linux.21rc5/fs/xfs/pagebuf/page_buf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/pagebuf/page_buf.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,2421 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * page_buf.c + * + * The page_buf module provides an abstract buffer cache model on top of + * the Linux page cache. Cached metadata blocks for a file system are + * hashed to the inode for the block device. The page_buf module + * assembles buffer (page_buf_t) objects on demand to aggregate such + * cached pages for I/O. + * + * + * Written by Steve Lord, Jim Mostek, Russell Cattelan + * and Rajagopal Ananthanarayanan ("ananth") at SGI. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "page_buf_internal.h" + +#define NBBY 8 +#define BBSHIFT 9 +#define BN_ALIGN_MASK ((1 << (PAGE_CACHE_SHIFT - BBSHIFT)) - 1) + +#ifndef GFP_READAHEAD +#define GFP_READAHEAD 0 +#endif + +/* + * A backport of the 2.5 scheduler is used by many vendors of 2.4-based + * distributions. + * We can only guess it's presences by the lack of the SCHED_YIELD flag. + * If the heuristic doesn't work, change this define by hand. + */ +#ifndef SCHED_YIELD +#define __HAVE_NEW_SCHEDULER 1 +#endif + +/* + * cpumask_t is used for supporting NR_CPUS > BITS_PER_LONG. + * If support for this is present, migrate_to_cpu exists and provides + * a wrapper around the set_cpus_allowed routine. + */ +#ifdef copy_cpumask +#define __HAVE_CPUMASK_T 1 +#endif + +#ifndef __HAVE_CPUMASK_T +# ifndef __HAVE_NEW_SCHEDULER +# define migrate_to_cpu(cpu) \ + do { current->cpus_allowed = 1UL << (cpu); } while (0) +# else +# define migrate_to_cpu(cpu) \ + set_cpus_allowed(current, 1UL << (cpu)) +# endif +#endif + +/* + * Debug code + */ + +#ifdef PAGEBUF_TRACE +static spinlock_t pb_trace_lock = SPIN_LOCK_UNLOCKED; +struct pagebuf_trace_buf pb_trace; +EXPORT_SYMBOL(pb_trace); +EXPORT_SYMBOL(pb_trace_func); +#define CIRC_INC(i) (((i) + 1) & (PB_TRACE_BUFSIZE - 1)) + +void +pb_trace_func( + page_buf_t *pb, + int event, + void *misc, + void *ra) +{ + int j; + unsigned long flags; + + if (!pb_params.p_un.debug) return; + + if (ra == NULL) ra = (void *)__builtin_return_address(0); + + spin_lock_irqsave(&pb_trace_lock, flags); + j = pb_trace.start; + pb_trace.start = CIRC_INC(j); + spin_unlock_irqrestore(&pb_trace_lock, flags); + + pb_trace.buf[j].pb = (unsigned long) pb; + pb_trace.buf[j].event = event; + pb_trace.buf[j].flags = pb->pb_flags; + pb_trace.buf[j].hold = pb->pb_hold.counter; + pb_trace.buf[j].lock_value = pb->pb_sema.count.counter; + pb_trace.buf[j].task = (void *)current; + pb_trace.buf[j].misc = misc; + pb_trace.buf[j].ra = ra; + pb_trace.buf[j].offset = pb->pb_file_offset; + pb_trace.buf[j].size = pb->pb_buffer_length; +} +#endif /* PAGEBUF_TRACE */ + +/* + * File wide globals + */ + +STATIC kmem_cache_t *pagebuf_cache; + +#define MAX_IO_DAEMONS NR_CPUS +#define CPU_TO_DAEMON(cpu) (cpu) +STATIC int pb_logio_daemons[MAX_IO_DAEMONS]; +STATIC struct list_head pagebuf_logiodone_tq[MAX_IO_DAEMONS]; +STATIC wait_queue_head_t pagebuf_logiodone_wait[MAX_IO_DAEMONS]; +STATIC int pb_dataio_daemons[MAX_IO_DAEMONS]; +STATIC struct list_head pagebuf_dataiodone_tq[MAX_IO_DAEMONS]; +STATIC wait_queue_head_t pagebuf_dataiodone_wait[MAX_IO_DAEMONS]; + +/* + * For pre-allocated buffer head pool + */ + +#define NR_RESERVED_BH 64 +static wait_queue_head_t pb_resv_bh_wait; +static spinlock_t pb_resv_bh_lock = SPIN_LOCK_UNLOCKED; +struct buffer_head *pb_resv_bh = NULL; /* list of bh */ +int pb_resv_bh_cnt = 0; /* # of bh available */ + +STATIC void pagebuf_daemon_wakeup(int); +STATIC void _pagebuf_ioapply(page_buf_t *); +STATIC void pagebuf_delwri_queue(page_buf_t *, int); + +/* + * Pagebuf module configuration parameters, exported via + * /proc/sys/vm/pagebuf + */ + +unsigned long pagebuf_min[P_PARAM] = { HZ/2, 1*HZ, 0, 0 }; +unsigned long pagebuf_max[P_PARAM] = { HZ*30, HZ*300, 1, 1 }; + +pagebuf_param_t pb_params = {{ HZ, 15 * HZ, 0, 0 }}; + +/* + * Pagebuf statistics variables + */ + +struct pbstats pbstats; + +/* + * Pagebuf allocation / freeing. + */ + +#define pb_to_gfp(flags) \ + (((flags) & PBF_READ_AHEAD) ? GFP_READAHEAD : \ + ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) + +#define pagebuf_allocate(flags) \ + kmem_cache_alloc(pagebuf_cache, pb_to_gfp(flags)) +#define pagebuf_deallocate(pb) \ + kmem_cache_free(pagebuf_cache, (pb)); + +/* + * Pagebuf hashing + */ + +#define NBITS 8 +#define NHASH (1<pb_hash_index] + +STATIC int +_bhash( + dev_t dev, + loff_t base) +{ + int bit, hval; + + base >>= 9; + /* + * dev_t is 16 bits, loff_t is always 64 bits + */ + base ^= dev; + for (bit = hval = 0; base != 0 && bit < sizeof(base) * 8; bit += NBITS) { + hval ^= (int)base & (NHASH-1); + base >>= NBITS; + } + return hval; +} + +/* + * Mapping of multi-page buffers into contingous virtual space + */ + +STATIC void *pagebuf_mapout_locked(page_buf_t *); + +STATIC spinlock_t as_lock = SPIN_LOCK_UNLOCKED; +typedef struct a_list { + void *vm_addr; + struct a_list *next; +} a_list_t; +STATIC a_list_t *as_free_head; +STATIC int as_list_len; + + +/* + * Try to batch vunmaps because they are costly. + */ +STATIC void +free_address( + void *addr) +{ + a_list_t *aentry; + + aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC); + if (aentry) { + spin_lock(&as_lock); + aentry->next = as_free_head; + aentry->vm_addr = addr; + as_free_head = aentry; + as_list_len++; + spin_unlock(&as_lock); + } else { + vunmap(addr); + } +} + +STATIC void +purge_addresses(void) +{ + a_list_t *aentry, *old; + + if (as_free_head == NULL) + return; + + spin_lock(&as_lock); + aentry = as_free_head; + as_free_head = NULL; + as_list_len = 0; + spin_unlock(&as_lock); + + while ((old = aentry) != NULL) { + vunmap(aentry->vm_addr); + aentry = aentry->next; + kfree(old); + } +} + +/* + * Locking model: + * + * Buffers associated with inodes for which buffer locking + * is not enabled are not protected by semaphores, and are + * assumed to be exclusively owned by the caller. There is + * spinlock in the buffer, for use by the caller when concurrent + * access is possible. + */ + +/* + * Internal pagebuf object manipulation + */ + +STATIC void +_pagebuf_initialize( + page_buf_t *pb, + pb_target_t *target, + loff_t range_base, + size_t range_length, + page_buf_flags_t flags) +{ + /* + * We don't want certain flags to appear in pb->pb_flags. + */ + flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD); + + memset(pb, 0, sizeof(page_buf_t)); + atomic_set(&pb->pb_hold, 1); + init_MUTEX_LOCKED(&pb->pb_iodonesema); + INIT_LIST_HEAD(&pb->pb_list); + INIT_LIST_HEAD(&pb->pb_hash_list); + init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */ + PB_SET_OWNER(pb); + pb->pb_target = target; + pb->pb_file_offset = range_base; + /* + * Set buffer_length and count_desired to the same value initially. + * IO routines should use count_desired, which will be the same in + * most cases but may be reset (e.g. XFS recovery). + */ + pb->pb_buffer_length = pb->pb_count_desired = range_length; + pb->pb_flags = flags | PBF_NONE; + pb->pb_bn = PAGE_BUF_DADDR_NULL; + atomic_set(&pb->pb_pin_count, 0); + init_waitqueue_head(&pb->pb_waiters); + + PB_STATS_INC(pbstats.pb_create); + PB_TRACE(pb, PB_TRACE_REC(get), target); +} + +/* + * Allocate a page array capable of holding a specified number + * of pages, and point the page buf at it. + */ +STATIC int +_pagebuf_get_pages( + page_buf_t *pb, + int page_count, + page_buf_flags_t flags) +{ + int gpf_mask = pb_to_gfp(flags); + + /* Make sure that we have a page list */ + if (pb->pb_pages == NULL) { + pb->pb_offset = page_buf_poff(pb->pb_file_offset); + pb->pb_page_count = page_count; + if (page_count <= PB_PAGES) { + pb->pb_pages = pb->pb_page_array; + } else { + pb->pb_pages = kmalloc(sizeof(struct page *) * + page_count, gpf_mask); + if (pb->pb_pages == NULL) + return -ENOMEM; + } + memset(pb->pb_pages, 0, sizeof(struct page *) * page_count); + } + return 0; +} + +/* + * Walk a pagebuf releasing all the pages contained within it. + */ +STATIC inline void +_pagebuf_freepages( + page_buf_t *pb) +{ + int buf_index; + + for (buf_index = 0; buf_index < pb->pb_page_count; buf_index++) { + struct page *page = pb->pb_pages[buf_index]; + + if (page) { + pb->pb_pages[buf_index] = NULL; + page_cache_release(page); + } + } + + if (pb->pb_pages != pb->pb_page_array) + kfree(pb->pb_pages); +} + +/* + * _pagebuf_free_object + * + * _pagebuf_free_object releases the contents specified buffer. + * The modification state of any associated pages is left unchanged. + */ +void +_pagebuf_free_object( + pb_hash_t *hash, /* hash bucket for buffer */ + page_buf_t *pb) /* buffer to deallocate */ +{ + page_buf_flags_t pb_flags = pb->pb_flags; + + PB_TRACE(pb, PB_TRACE_REC(free_obj), 0); + pb->pb_flags |= PBF_FREED; + + if (hash) { + if (!list_empty(&pb->pb_hash_list)) { + hash->pb_count--; + list_del_init(&pb->pb_hash_list); + } + spin_unlock(&hash->pb_hash_lock); + } + + if (!(pb_flags & PBF_FREED)) { + /* release any virtual mapping */ ; + if (pb->pb_flags & _PBF_ADDR_ALLOCATED) { + void *vaddr = pagebuf_mapout_locked(pb); + if (vaddr) { + free_address(vaddr); + } + } + + if (pb->pb_flags & _PBF_MEM_ALLOCATED) { + if (pb->pb_pages) { + /* release the pages in the address list */ + if (pb->pb_pages[0] && + PageSlab(pb->pb_pages[0])) { + /* + * This came from the slab + * allocator free it as such + */ + kfree(pb->pb_addr); + } else { + _pagebuf_freepages(pb); + } + + pb->pb_pages = NULL; + } + pb->pb_flags &= ~_PBF_MEM_ALLOCATED; + } + } + + pagebuf_deallocate(pb); +} + +/* + * _pagebuf_lookup_pages + * + * _pagebuf_lookup_pages finds all pages which match the buffer + * in question and the range of file offsets supplied, + * and builds the page list for the buffer, if the + * page list is not already formed or if not all of the pages are + * already in the list. Invalid pages (pages which have not yet been + * read in from disk) are assigned for any pages which are not found. + */ +STATIC int +_pagebuf_lookup_pages( + page_buf_t *pb, + struct address_space *aspace, + page_buf_flags_t flags) +{ + loff_t next_buffer_offset; + unsigned long page_count, pi, index; + struct page *page; + int gfp_mask, retry_count = 5, rval = 0; + int all_mapped, good_pages; + size_t blocksize; + + /* For pagebufs where we want to map an address, do not use + * highmem pages - so that we do not need to use kmap resources + * to access the data. + * + * For pages where the caller has indicated there may be resource + * contention (e.g. called from a transaction) do not flush + * delalloc pages to obtain memory. + */ + + if (flags & PBF_READ_AHEAD) { + gfp_mask = GFP_READAHEAD; + retry_count = 0; + } else if (flags & PBF_DONT_BLOCK) { + gfp_mask = GFP_NOFS; + } else if (flags & PBF_MAPPABLE) { + gfp_mask = GFP_KERNEL; + } else { + gfp_mask = GFP_HIGHUSER; + } + + next_buffer_offset = pb->pb_file_offset + pb->pb_buffer_length; + + good_pages = page_count = (page_buf_btoc(next_buffer_offset) - + page_buf_btoct(pb->pb_file_offset)); + + if (pb->pb_flags & _PBF_ALL_PAGES_MAPPED) { + /* Bring pages forward in cache */ + for (pi = 0; pi < page_count; pi++) { + mark_page_accessed(pb->pb_pages[pi]); + } + if ((flags & PBF_MAPPED) && !(pb->pb_flags & PBF_MAPPED)) { + all_mapped = 1; + goto mapit; + } + return 0; + } + + /* Ensure pb_pages field has been initialised */ + rval = _pagebuf_get_pages(pb, page_count, flags); + if (rval) + return rval; + + rval = pi = 0; + blocksize = pb->pb_target->pbr_bsize; + + /* Enter the pages in the page list */ + index = (pb->pb_file_offset - pb->pb_offset) >> PAGE_CACHE_SHIFT; + for (all_mapped = 1; pi < page_count; pi++, index++) { + if (pb->pb_pages[pi] == 0) { + retry: + page = find_or_create_page(aspace, index, gfp_mask); + if (!page) { + if (--retry_count > 0) { + PB_STATS_INC(pbstats.pb_page_retries); + pagebuf_daemon_wakeup(1); + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(10); + goto retry; + } + rval = -ENOMEM; + all_mapped = 0; + continue; + } + PB_STATS_INC(pbstats.pb_page_found); + mark_page_accessed(page); + pb->pb_pages[pi] = page; + } else { + page = pb->pb_pages[pi]; + lock_page(page); + } + + /* If we need to do I/O on a page record the fact */ + if (!Page_Uptodate(page)) { + good_pages--; + if ((blocksize == PAGE_CACHE_SIZE) && + (flags & PBF_READ)) + pb->pb_locked = 1; + } + } + + if (!pb->pb_locked) { + for (pi = 0; pi < page_count; pi++) { + if (pb->pb_pages[pi]) + unlock_page(pb->pb_pages[pi]); + } + } + +mapit: + pb->pb_flags |= _PBF_MEM_ALLOCATED; + if (all_mapped) { + pb->pb_flags |= _PBF_ALL_PAGES_MAPPED; + + /* A single page buffer is always mappable */ + if (page_count == 1) { + pb->pb_addr = (caddr_t) + page_address(pb->pb_pages[0]) + pb->pb_offset; + pb->pb_flags |= PBF_MAPPED; + } else if (flags & PBF_MAPPED) { + if (as_list_len > 64) + purge_addresses(); + pb->pb_addr = vmap(pb->pb_pages, page_count, + VM_ALLOC, PAGE_KERNEL); + if (pb->pb_addr == NULL) + return -ENOMEM; + pb->pb_addr += pb->pb_offset; + pb->pb_flags |= PBF_MAPPED | _PBF_ADDR_ALLOCATED; + } + } + /* If some pages were found with data in them + * we are not in PBF_NONE state. + */ + if (good_pages != 0) { + pb->pb_flags &= ~(PBF_NONE); + if (good_pages != page_count) { + pb->pb_flags |= PBF_PARTIAL; + } + } + + PB_TRACE(pb, PB_TRACE_REC(look_pg), good_pages); + + return rval; +} + + +/* + * Pre-allocation of a pool of buffer heads for use in + * low-memory situations. + */ + +/* + * _pagebuf_prealloc_bh + * + * Pre-allocate a pool of "count" buffer heads at startup. + * Puts them on a list at "pb_resv_bh" + * Returns number of bh actually allocated to pool. + */ +STATIC int +_pagebuf_prealloc_bh( + int count) +{ + struct buffer_head *bh; + int i; + + for (i = 0; i < count; i++) { + bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL); + if (!bh) + break; + bh->b_pprev = &pb_resv_bh; + bh->b_next = pb_resv_bh; + pb_resv_bh = bh; + pb_resv_bh_cnt++; + } + return i; +} + +/* + * _pagebuf_get_prealloc_bh + * + * Get one buffer head from our pre-allocated pool. + * If pool is empty, sleep 'til one comes back in. + * Returns aforementioned buffer head. + */ +STATIC struct buffer_head * +_pagebuf_get_prealloc_bh(void) +{ + unsigned long flags; + struct buffer_head *bh; + DECLARE_WAITQUEUE (wait, current); + + spin_lock_irqsave(&pb_resv_bh_lock, flags); + + if (pb_resv_bh_cnt < 1) { + add_wait_queue(&pb_resv_bh_wait, &wait); + do { + set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock_irqrestore(&pb_resv_bh_lock, flags); + pagebuf_run_queues(NULL); + schedule(); + spin_lock_irqsave(&pb_resv_bh_lock, flags); + } while (pb_resv_bh_cnt < 1); + __set_current_state(TASK_RUNNING); + remove_wait_queue(&pb_resv_bh_wait, &wait); + } + + BUG_ON(pb_resv_bh_cnt < 1); + BUG_ON(!pb_resv_bh); + + bh = pb_resv_bh; + pb_resv_bh = bh->b_next; + pb_resv_bh_cnt--; + + spin_unlock_irqrestore(&pb_resv_bh_lock, flags); + return bh; +} + +/* + * _pagebuf_free_bh + * + * Take care of buffer heads that we're finished with. + * Call this instead of just kmem_cache_free(bh_cachep, bh) + * when you're done with a bh. + * + * If our pre-allocated pool is full, just free the buffer head. + * Otherwise, put it back in the pool, and wake up anybody + * waiting for one. + */ +STATIC inline void +_pagebuf_free_bh( + struct buffer_head *bh) +{ + unsigned long flags; + int free; + + if (! (free = pb_resv_bh_cnt >= NR_RESERVED_BH)) { + spin_lock_irqsave(&pb_resv_bh_lock, flags); + + if (! (free = pb_resv_bh_cnt >= NR_RESERVED_BH)) { + bh->b_pprev = &pb_resv_bh; + bh->b_next = pb_resv_bh; + pb_resv_bh = bh; + pb_resv_bh_cnt++; + + if (waitqueue_active(&pb_resv_bh_wait)) { + wake_up(&pb_resv_bh_wait); + } + } + + spin_unlock_irqrestore(&pb_resv_bh_lock, flags); + } + if (free) { + kmem_cache_free(bh_cachep, bh); + } +} + +/* + * Finding and Reading Buffers + */ + +/* + * _pagebuf_find + * + * Looks up, and creates if absent, a lockable buffer for + * a given range of an inode. The buffer is returned + * locked. If other overlapping buffers exist, they are + * released before the new buffer is created and locked, + * which may imply that this call will block until those buffers + * are unlocked. No I/O is implied by this call. + */ +STATIC page_buf_t * +_pagebuf_find( /* find buffer for block */ + pb_target_t *target,/* target for block */ + loff_t ioff, /* starting offset of range */ + size_t isize, /* length of range */ + page_buf_flags_t flags, /* PBF_TRYLOCK */ + page_buf_t *new_pb)/* newly allocated buffer */ +{ + loff_t range_base; + size_t range_length; + int hval; + pb_hash_t *h; + struct list_head *p; + page_buf_t *pb; + int not_locked; + + range_base = (ioff << BBSHIFT); + range_length = (isize << BBSHIFT); + + /* Ensure we never do IOs smaller than the sector size */ + BUG_ON(range_length < (1 << target->pbr_sshift)); + + /* Ensure we never do IOs that are not sector aligned */ + BUG_ON(range_base & (loff_t)target->pbr_smask); + + hval = _bhash(target->pbr_bdev->bd_dev, range_base); + h = &pbhash[hval]; + + spin_lock(&h->pb_hash_lock); + list_for_each(p, &h->pb_hash) { + pb = list_entry(p, page_buf_t, pb_hash_list); + + if ((target == pb->pb_target) && + (pb->pb_file_offset == range_base) && + (pb->pb_buffer_length == range_length)) { + if (pb->pb_flags & PBF_FREED) + break; + /* If we look at something bring it to the + * front of the list for next time + */ + list_del(&pb->pb_hash_list); + list_add(&pb->pb_hash_list, &h->pb_hash); + goto found; + } + } + + /* No match found */ + if (new_pb) { + _pagebuf_initialize(new_pb, target, range_base, + range_length, flags | _PBF_LOCKABLE); + new_pb->pb_hash_index = hval; + h->pb_count++; + list_add(&new_pb->pb_hash_list, &h->pb_hash); + } else { + PB_STATS_INC(pbstats.pb_miss_locked); + } + + spin_unlock(&h->pb_hash_lock); + return (new_pb); + +found: + atomic_inc(&pb->pb_hold); + spin_unlock(&h->pb_hash_lock); + + /* Attempt to get the semaphore without sleeping, + * if this does not work then we need to drop the + * spinlock and do a hard attempt on the semaphore. + */ + not_locked = down_trylock(&pb->pb_sema); + if (not_locked) { + if (!(flags & PBF_TRYLOCK)) { + /* wait for buffer ownership */ + PB_TRACE(pb, PB_TRACE_REC(get_lk), 0); + pagebuf_lock(pb); + PB_STATS_INC(pbstats.pb_get_locked_waited); + } else { + /* We asked for a trylock and failed, no need + * to look at file offset and length here, we + * know that this pagebuf at least overlaps our + * pagebuf and is locked, therefore our buffer + * either does not exist, or is this buffer + */ + + pagebuf_rele(pb); + PB_STATS_INC(pbstats.pb_busy_locked); + return (NULL); + } + } else { + /* trylock worked */ + PB_SET_OWNER(pb); + } + + if (pb->pb_flags & PBF_STALE) + pb->pb_flags &= PBF_MAPPABLE | \ + PBF_MAPPED | \ + _PBF_LOCKABLE | \ + _PBF_ALL_PAGES_MAPPED | \ + _PBF_ADDR_ALLOCATED | \ + _PBF_MEM_ALLOCATED; + PB_TRACE(pb, PB_TRACE_REC(got_lk), 0); + PB_STATS_INC(pbstats.pb_get_locked); + return (pb); +} + + +/* + * pagebuf_find + * + * pagebuf_find returns a buffer matching the specified range of + * data for the specified target, if any of the relevant blocks + * are in memory. The buffer may have unallocated holes, if + * some, but not all, of the blocks are in memory. Even where + * pages are present in the buffer, not all of every page may be + * valid. + */ +page_buf_t * +pagebuf_find( /* find buffer for block */ + /* if the block is in memory */ + pb_target_t *target,/* target for block */ + loff_t ioff, /* starting offset of range */ + size_t isize, /* length of range */ + page_buf_flags_t flags) /* PBF_TRYLOCK */ +{ + return _pagebuf_find(target, ioff, isize, flags, NULL); +} + +/* + * pagebuf_get + * + * pagebuf_get assembles a buffer covering the specified range. + * Some or all of the blocks in the range may be valid. Storage + * in memory for all portions of the buffer will be allocated, + * although backing storage may not be. If PBF_READ is set in + * flags, pagebuf_iostart is called also. + */ +page_buf_t * +pagebuf_get( /* allocate a buffer */ + pb_target_t *target,/* target for buffer */ + loff_t ioff, /* starting offset of range */ + size_t isize, /* length of range */ + page_buf_flags_t flags) /* PBF_TRYLOCK */ +{ + page_buf_t *pb, *new_pb; + int error; + + new_pb = pagebuf_allocate(flags); + if (unlikely(!new_pb)) + return (NULL); + + pb = _pagebuf_find(target, ioff, isize, flags, new_pb); + if (pb != new_pb) { + pagebuf_deallocate(new_pb); + if (unlikely(!pb)) + return (NULL); + } + + PB_STATS_INC(pbstats.pb_get); + + /* fill in any missing pages */ + error = _pagebuf_lookup_pages(pb, pb->pb_target->pbr_mapping, flags); + if (unlikely(error)) { + pagebuf_free(pb); + return (NULL); + } + + /* + * Always fill in the block number now, the mapped cases can do + * their own overlay of this later. + */ + pb->pb_bn = ioff; + pb->pb_count_desired = pb->pb_buffer_length; + + if (flags & PBF_READ) { + if (PBF_NOT_DONE(pb)) { + PB_TRACE(pb, PB_TRACE_REC(get_read), flags); + PB_STATS_INC(pbstats.pb_get_read); + pagebuf_iostart(pb, flags); + } else if (flags & PBF_ASYNC) { + /* + * Read ahead call which is already satisfied, + * drop the buffer + */ + if (flags & (PBF_LOCK | PBF_TRYLOCK)) + pagebuf_unlock(pb); + pagebuf_rele(pb); + return NULL; + } else { + /* We do not want read in the flags */ + pb->pb_flags &= ~PBF_READ; + } + } + + PB_TRACE(pb, PB_TRACE_REC(get_obj), flags); + return (pb); +} + +/* + * Create a skeletal pagebuf (no pages associated with it). + */ +page_buf_t * +pagebuf_lookup( + struct pb_target *target, + loff_t ioff, + size_t isize, + page_buf_flags_t flags) +{ + page_buf_t *pb; + + flags |= _PBF_PRIVATE_BH; + pb = pagebuf_allocate(flags); + if (pb) { + _pagebuf_initialize(pb, target, ioff, isize, flags); + } + return pb; +} + +/* + * If we are not low on memory then do the readahead in a deadlock + * safe manner. + */ +void +pagebuf_readahead( + pb_target_t *target, + loff_t ioff, + size_t isize, + page_buf_flags_t flags) +{ + flags |= (PBF_TRYLOCK|PBF_READ|PBF_ASYNC|PBF_MAPPABLE|PBF_READ_AHEAD); + pagebuf_get(target, ioff, isize, flags); +} + +page_buf_t * +pagebuf_get_empty( + pb_target_t *target) +{ + page_buf_t *pb; + + pb = pagebuf_allocate(_PBF_LOCKABLE); + if (pb) + _pagebuf_initialize(pb, target, 0, 0, _PBF_LOCKABLE); + return pb; +} + +static inline struct page * +mem_to_page( + void *addr) +{ + if (((unsigned long)addr < VMALLOC_START) || + ((unsigned long)addr >= VMALLOC_END)) { + return virt_to_page(addr); + } else { + return vmalloc_to_page(addr); + } +} + +int +pagebuf_associate_memory( + page_buf_t *pb, + void *mem, + size_t len) +{ + int rval; + int i = 0; + size_t ptr; + size_t end, end_cur; + off_t offset; + int page_count; + + page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; + offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK); + if (offset && (len > PAGE_CACHE_SIZE)) + page_count++; + + /* Free any previous set of page pointers */ + if (pb->pb_pages && (pb->pb_pages != pb->pb_page_array)) { + kfree(pb->pb_pages); + } + pb->pb_pages = NULL; + pb->pb_addr = mem; + + rval = _pagebuf_get_pages(pb, page_count, 0); + if (rval) + return rval; + + pb->pb_offset = offset; + ptr = (size_t) mem & PAGE_CACHE_MASK; + end = PAGE_CACHE_ALIGN((size_t) mem + len); + end_cur = end; + /* set up first page */ + pb->pb_pages[0] = mem_to_page(mem); + + ptr += PAGE_CACHE_SIZE; + pb->pb_page_count = ++i; + while (ptr < end) { + pb->pb_pages[i] = mem_to_page((void *)ptr); + pb->pb_page_count = ++i; + ptr += PAGE_CACHE_SIZE; + } + pb->pb_locked = 0; + + pb->pb_count_desired = pb->pb_buffer_length = len; + pb->pb_flags |= PBF_MAPPED | _PBF_PRIVATE_BH; + + return 0; +} + +page_buf_t * +pagebuf_get_no_daddr( + size_t len, + pb_target_t *target) +{ + int rval; + void *rmem = NULL; + page_buf_flags_t flags = _PBF_LOCKABLE | PBF_FORCEIO; + page_buf_t *pb; + size_t tlen = 0; + + if (len > 0x20000) + return(NULL); + + pb = pagebuf_allocate(flags); + if (!pb) + return NULL; + + _pagebuf_initialize(pb, target, 0, len, flags); + + do { + if (tlen == 0) { + tlen = len; /* first time */ + } else { + kfree(rmem); /* free the mem from the previous try */ + tlen <<= 1; /* double the size and try again */ + } + if ((rmem = kmalloc(tlen, GFP_KERNEL)) == 0) { + pagebuf_free(pb); + return NULL; + } + } while ((size_t)rmem != ((size_t)rmem & ~target->pbr_smask)); + + if ((rval = pagebuf_associate_memory(pb, rmem, len)) != 0) { + kfree(rmem); + pagebuf_free(pb); + return NULL; + } + /* otherwise pagebuf_free just ignores it */ + pb->pb_flags |= _PBF_MEM_ALLOCATED; + PB_CLEAR_OWNER(pb); + up(&pb->pb_sema); /* Return unlocked pagebuf */ + + PB_TRACE(pb, PB_TRACE_REC(no_daddr), rmem); + + return pb; +} + + +/* + * pagebuf_hold + * + * Increment reference count on buffer, to hold the buffer concurrently + * with another thread which may release (free) the buffer asynchronously. + * + * Must hold the buffer already to call this function. + */ +void +pagebuf_hold( + page_buf_t *pb) +{ + atomic_inc(&pb->pb_hold); + PB_TRACE(pb, PB_TRACE_REC(hold), 0); +} + +/* + * pagebuf_free + * + * pagebuf_free releases the specified buffer. The modification + * state of any associated pages is left unchanged. + */ +void +pagebuf_free( + page_buf_t *pb) +{ + if (pb->pb_flags & _PBF_LOCKABLE) { + pb_hash_t *h = pb_hash(pb); + + spin_lock(&h->pb_hash_lock); + _pagebuf_free_object(h, pb); + } else { + _pagebuf_free_object(NULL, pb); + } +} + +/* + * pagebuf_rele + * + * pagebuf_rele releases a hold on the specified buffer. If the + * the hold count is 1, pagebuf_rele calls pagebuf_free. + */ +void +pagebuf_rele( + page_buf_t *pb) +{ + pb_hash_t *h; + + PB_TRACE(pb, PB_TRACE_REC(rele), pb->pb_relse); + if (pb->pb_flags & _PBF_LOCKABLE) { + h = pb_hash(pb); + spin_lock(&h->pb_hash_lock); + } else { + h = NULL; + } + + if (atomic_dec_and_test(&pb->pb_hold)) { + int do_free = 1; + + if (pb->pb_relse) { + atomic_inc(&pb->pb_hold); + if (h) + spin_unlock(&h->pb_hash_lock); + (*(pb->pb_relse)) (pb); + do_free = 0; + } + if (pb->pb_flags & PBF_DELWRI) { + pb->pb_flags |= PBF_ASYNC; + atomic_inc(&pb->pb_hold); + if (h && do_free) + spin_unlock(&h->pb_hash_lock); + pagebuf_delwri_queue(pb, 0); + do_free = 0; + } else if (pb->pb_flags & PBF_FS_MANAGED) { + if (h) + spin_unlock(&h->pb_hash_lock); + do_free = 0; + } + + if (do_free) { + _pagebuf_free_object(h, pb); + } + } else if (h) { + spin_unlock(&h->pb_hash_lock); + } +} + + +/* + * Pinning Buffer Storage in Memory + */ + +/* + * pagebuf_pin + * + * pagebuf_pin locks all of the memory represented by a buffer in + * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for + * the same or different buffers affecting a given page, will + * properly count the number of outstanding "pin" requests. The + * buffer may be released after the pagebuf_pin and a different + * buffer used when calling pagebuf_unpin, if desired. + * pagebuf_pin should be used by the file system when it wants be + * assured that no attempt will be made to force the affected + * memory to disk. It does not assure that a given logical page + * will not be moved to a different physical page. + */ +void +pagebuf_pin( + page_buf_t *pb) +{ + atomic_inc(&pb->pb_pin_count); + PB_TRACE(pb, PB_TRACE_REC(pin), pb->pb_pin_count.counter); +} + +/* + * pagebuf_unpin + * + * pagebuf_unpin reverses the locking of memory performed by + * pagebuf_pin. Note that both functions affected the logical + * pages associated with the buffer, not the buffer itself. + */ +void +pagebuf_unpin( + page_buf_t *pb) +{ + if (atomic_dec_and_test(&pb->pb_pin_count)) { + wake_up_all(&pb->pb_waiters); + } + PB_TRACE(pb, PB_TRACE_REC(unpin), pb->pb_pin_count.counter); +} + +int +pagebuf_ispin( + page_buf_t *pb) +{ + return atomic_read(&pb->pb_pin_count); +} + +/* + * pagebuf_wait_unpin + * + * pagebuf_wait_unpin waits until all of the memory associated + * with the buffer is not longer locked in memory. It returns + * immediately if none of the affected pages are locked. + */ +static inline void +_pagebuf_wait_unpin( + page_buf_t *pb) +{ + DECLARE_WAITQUEUE (wait, current); + + if (atomic_read(&pb->pb_pin_count) == 0) + return; + + add_wait_queue(&pb->pb_waiters, &wait); + for (;;) { + current->state = TASK_UNINTERRUPTIBLE; + if (atomic_read(&pb->pb_pin_count) == 0) { + break; + } + pagebuf_run_queues(pb); + schedule(); + } + remove_wait_queue(&pb->pb_waiters, &wait); + current->state = TASK_RUNNING; +} + + +/* + * Buffer Utility Routines + */ + +/* + * pagebuf_iodone + * + * pagebuf_iodone marks a buffer for which I/O is in progress + * done with respect to that I/O. The pb_iodone routine, if + * present, will be called as a side-effect. + */ +void +pagebuf_iodone_sched( + void *v) +{ + page_buf_t *pb = (page_buf_t *)v; + + if (pb->pb_iodone) { + (*(pb->pb_iodone)) (pb); + return; + } + + if (pb->pb_flags & PBF_ASYNC) { + if ((pb->pb_flags & _PBF_LOCKABLE) && !pb->pb_relse) + pagebuf_unlock(pb); + pagebuf_rele(pb); + } +} + +void +pagebuf_iodone( + page_buf_t *pb, + int dataio, + int schedule) +{ + pb->pb_flags &= ~(PBF_READ | PBF_WRITE); + if (pb->pb_error == 0) { + pb->pb_flags &= ~(PBF_PARTIAL | PBF_NONE); + } + + PB_TRACE(pb, PB_TRACE_REC(done), pb->pb_iodone); + + if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) { + if (schedule) { + int daemon = CPU_TO_DAEMON(smp_processor_id()); + + INIT_TQUEUE(&pb->pb_iodone_sched, + pagebuf_iodone_sched, (void *)pb); + queue_task(&pb->pb_iodone_sched, dataio ? + &pagebuf_dataiodone_tq[daemon] : + &pagebuf_logiodone_tq[daemon]); + wake_up(dataio ? + &pagebuf_dataiodone_wait[daemon] : + &pagebuf_logiodone_wait[daemon]); + } else { + pagebuf_iodone_sched(pb); + } + } else { + up(&pb->pb_iodonesema); + } +} + +/* + * pagebuf_ioerror + * + * pagebuf_ioerror sets the error code for a buffer. + */ +void +pagebuf_ioerror( /* mark/clear buffer error flag */ + page_buf_t *pb, /* buffer to mark */ + unsigned int error) /* error to store (0 if none) */ +{ + pb->pb_error = error; + PB_TRACE(pb, PB_TRACE_REC(ioerror), error); +} + +/* + * pagebuf_iostart + * + * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied. + * If necessary, it will arrange for any disk space allocation required, + * and it will break up the request if the block mappings require it. + * The pb_iodone routine in the buffer supplied will only be called + * when all of the subsidiary I/O requests, if any, have been completed. + * pagebuf_iostart calls the pagebuf_ioinitiate routine or + * pagebuf_iorequest, if the former routine is not defined, to start + * the I/O on a given low-level request. + */ +int +pagebuf_iostart( /* start I/O on a buffer */ + page_buf_t *pb, /* buffer to start */ + page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */ + /* PBF_WRITE, PBF_DELWRI, */ + /* PBF_SYNC, PBF_DONT_BLOCK */ +{ + int status = 0; + + PB_TRACE(pb, PB_TRACE_REC(iostart), flags); + + if (flags & PBF_DELWRI) { + pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC); + pb->pb_flags |= flags & + (PBF_DELWRI | PBF_ASYNC | PBF_SYNC); + pagebuf_delwri_queue(pb, 1); + return status; + } + + pb->pb_flags &= + ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI|PBF_READ_AHEAD); + pb->pb_flags |= flags & + (PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_READ_AHEAD); + + BUG_ON(pb->pb_bn == PAGE_BUF_DADDR_NULL); + + /* For writes call internal function which checks for + * filesystem specific callout function and execute it. + */ + if (flags & PBF_WRITE) { + status = __pagebuf_iorequest(pb); + } else { + status = pagebuf_iorequest(pb); + } + + /* Wait for I/O if we are not an async request */ + if ((status == 0) && (flags & PBF_ASYNC) == 0) { + status = pagebuf_iowait(pb); + } + + return status; +} + + +/* + * Helper routines for pagebuf_iorequest (pagebuf I/O completion) + */ + +STATIC __inline__ int +_pagebuf_iolocked( + page_buf_t *pb) +{ + ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE)); + if (pb->pb_flags & PBF_READ) + return pb->pb_locked; + return ((pb->pb_flags & _PBF_LOCKABLE) == 0); +} + +STATIC void +_pagebuf_iodone( + page_buf_t *pb, + int fullpage, + int schedule) +{ + if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { + struct page *page; + int i; + + for (i = 0; i < pb->pb_page_count; i++) { + page = pb->pb_pages[i]; + if (fullpage && !PageError(page)) + SetPageUptodate(page); + if (_pagebuf_iolocked(pb)) + unlock_page(page); + } + pb->pb_locked = 0; + pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), schedule); + } +} + +STATIC void +_end_io_pagebuf( + struct buffer_head *bh, + int uptodate, + int fullpage) +{ + struct page *page = bh->b_page; + page_buf_t *pb = (page_buf_t *)bh->b_private; + + mark_buffer_uptodate(bh, uptodate); + put_bh(bh); + + if (!uptodate) { + SetPageError(page); + pb->pb_error = EIO; + } + + if (fullpage) { + unlock_buffer(bh); + _pagebuf_free_bh(bh); + } else { + static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED; + struct buffer_head *bp; + unsigned long flags; + + spin_lock_irqsave(&page_uptodate_lock, flags); + clear_bit(BH_Async, &bh->b_state); + unlock_buffer(bh); + for (bp = bh->b_this_page; bp != bh; bp = bp->b_this_page) { + if (buffer_locked(bp)) { + if (buffer_async(bp)) + break; + } else if (!buffer_uptodate(bp)) + break; + } + spin_unlock_irqrestore(&page_uptodate_lock, flags); + if (bp == bh && !PageError(page)) + SetPageUptodate(page); + } + + _pagebuf_iodone(pb, fullpage, 1); +} + +STATIC void +_pagebuf_end_io_complete_pages( + struct buffer_head *bh, + int uptodate) +{ + _end_io_pagebuf(bh, uptodate, 1); +} + +STATIC void +_pagebuf_end_io_partial_pages( + struct buffer_head *bh, + int uptodate) +{ + _end_io_pagebuf(bh, uptodate, 0); +} + + +/* + * Initiate I/O on part of a page we are interested in + */ +STATIC void +_pagebuf_page_io( + struct page *page, /* Page structure we are dealing with */ + pb_target_t *pbr, /* device parameters (bsz, ssz, dev) */ + page_buf_t *pb, /* pagebuf holding it, can be NULL */ + page_buf_daddr_t bn, /* starting block number */ + size_t pg_offset, /* starting offset in page */ + size_t pg_length, /* count of data to process */ + int locking, /* page locking in use */ + int rw, /* read/write operation */ + int flush) +{ + size_t sector; + size_t blk_length = 0; + struct buffer_head *bh, *head, *bufferlist[MAX_BUF_PER_PAGE]; + int sector_shift = pbr->pbr_sshift; + int i = 0, cnt = 0; + int public_bh = 0; + int multi_ok; + + if ((pbr->pbr_bsize < PAGE_CACHE_SIZE) && + !(pb->pb_flags & _PBF_PRIVATE_BH)) { + int cache_ok; + + cache_ok = !((pb->pb_flags & PBF_FORCEIO) || (rw == WRITE)); + public_bh = multi_ok = 1; + + if (!page_has_buffers(page)) { + if (!locking) { + lock_page(page); + if (!page_has_buffers(page)) + create_empty_buffers(page, + pbr->pbr_kdev, + 1 << sector_shift); + unlock_page(page); + } else { + create_empty_buffers(page, pbr->pbr_kdev, + 1 << sector_shift); + } + } + + /* Find buffer_heads belonging to just this pagebuf */ + bh = head = page_buffers(page); + do { + if (buffer_uptodate(bh) && cache_ok) + continue; + blk_length = i << sector_shift; + if (blk_length < pg_offset) + continue; + if (blk_length >= pg_offset + pg_length) + break; + + lock_buffer(bh); + get_bh(bh); + bh->b_size = 1 << sector_shift; + bh->b_blocknr = bn + (i - (pg_offset >> sector_shift)); + bufferlist[cnt++] = bh; + } while (i++, (bh = bh->b_this_page) != head); + + goto request; + } + + /* Calculate the block offsets and length we will be using */ + if (pg_offset) { + size_t block_offset; + + block_offset = pg_offset >> sector_shift; + block_offset = pg_offset - (block_offset << sector_shift); + blk_length = (pg_length + block_offset + pbr->pbr_smask) >> + sector_shift; + } else { + blk_length = (pg_length + pbr->pbr_smask) >> sector_shift; + } + + /* This will attempt to make a request bigger than the sector + * size if we are well aligned. + */ + switch (pb->pb_target->pbr_flags) { + case 0: + sector = blk_length << sector_shift; + blk_length = 1; + break; + case PBR_ALIGNED_ONLY: + if ((pg_offset == 0) && (pg_length == PAGE_CACHE_SIZE) && + (((unsigned int) bn) & BN_ALIGN_MASK) == 0) { + sector = blk_length << sector_shift; + blk_length = 1; + break; + } + case PBR_SECTOR_ONLY: + /* Fallthrough, same as default */ + default: + sector = 1 << sector_shift; + } + + /* If we are doing I/O larger than the bh->b_size field then + * we need to split this request up. + */ + while (sector > ((1UL << NBBY * sizeof(bh->b_size)) - 1)) { + sector >>= 1; + blk_length++; + } + + multi_ok = (blk_length != 1); + + for (; blk_length > 0; blk_length--, pg_offset += sector) { + bh = kmem_cache_alloc(bh_cachep, SLAB_NOFS); + if (!bh) + bh = _pagebuf_get_prealloc_bh(); + memset(bh, 0, sizeof(*bh)); + bh->b_size = sector; + bh->b_blocknr = bn++; + bh->b_dev = pbr->pbr_kdev; + set_bit(BH_Lock, &bh->b_state); + set_bh_page(bh, page, pg_offset); + init_waitqueue_head(&bh->b_wait); + atomic_set(&bh->b_count, 1); + bufferlist[cnt++] = bh; + } + +request: + if (cnt) { + void (*callback)(struct buffer_head *, int); + + callback = (multi_ok && public_bh) ? + _pagebuf_end_io_partial_pages : + _pagebuf_end_io_complete_pages; + + /* Account for additional buffers in progress */ + atomic_add(cnt, &pb->pb_io_remaining); + +#ifdef RQ_WRITE_ORDERED + if (flush) + set_bit(BH_Ordered_Flush, &bufferlist[cnt-1]->b_state); +#endif + + for (i = 0; i < cnt; i++) { + bh = bufferlist[i]; + init_buffer(bh, callback, pb); + bh->b_rdev = bh->b_dev; + bh->b_rsector = bh->b_blocknr; + set_bit(BH_Mapped, &bh->b_state); + set_bit(BH_Async, &bh->b_state); + set_bit(BH_Req, &bh->b_state); + if (rw == WRITE) + set_bit(BH_Uptodate, &bh->b_state); + generic_make_request(rw, bh); + } + } else { + if (locking) + unlock_page(page); + } +} + +STATIC void +_pagebuf_page_apply( + page_buf_t *pb, + loff_t offset, + struct page *page, + size_t pg_offset, + size_t pg_length, + int last) +{ + page_buf_daddr_t bn = pb->pb_bn; + pb_target_t *pbr = pb->pb_target; + loff_t pb_offset; + int locking; + + ASSERT(page); + ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE)); + + if ((pbr->pbr_bsize == PAGE_CACHE_SIZE) && + (pb->pb_buffer_length < PAGE_CACHE_SIZE) && + (pb->pb_flags & PBF_READ) && pb->pb_locked) { + bn -= (pb->pb_offset >> pbr->pbr_sshift); + pg_offset = 0; + pg_length = PAGE_CACHE_SIZE; + } else { + pb_offset = offset - pb->pb_file_offset; + if (pb_offset) { + bn += (pb_offset + pbr->pbr_smask) >> pbr->pbr_sshift; + } + } + + locking = _pagebuf_iolocked(pb); + if (pb->pb_flags & PBF_WRITE) { + if (locking && (pb->pb_locked == 0)) + lock_page(page); + _pagebuf_page_io(page, pbr, pb, bn, + pg_offset, pg_length, locking, WRITE, + last && (pb->pb_flags & PBF_FLUSH)); + } else { + _pagebuf_page_io(page, pbr, pb, bn, + pg_offset, pg_length, locking, READ, 0); + } +} + +/* + * pagebuf_iorequest + * + * pagebuf_iorequest is the core I/O request routine. + * It assumes that the buffer is well-formed and + * mapped and ready for physical I/O, unlike + * pagebuf_iostart() and pagebuf_iophysio(). Those + * routines call the pagebuf_ioinitiate routine to start I/O, + * if it is present, or else call pagebuf_iorequest() + * directly if the pagebuf_ioinitiate routine is not present. + * + * This function will be responsible for ensuring access to the + * pages is restricted whilst I/O is in progress - for locking + * pagebufs the pagebuf lock is the mediator, for non-locking + * pagebufs the pages will be locked. In the locking case we + * need to use the pagebuf lock as multiple meta-data buffers + * will reference the same page. + */ +int +pagebuf_iorequest( /* start real I/O */ + page_buf_t *pb) /* buffer to convey to device */ +{ + PB_TRACE(pb, PB_TRACE_REC(ioreq), 0); + + if (pb->pb_flags & PBF_DELWRI) { + pagebuf_delwri_queue(pb, 1); + return 0; + } + + if (pb->pb_flags & PBF_WRITE) { + _pagebuf_wait_unpin(pb); + } + + /* Set the count to 1 initially, this will stop an I/O + * completion callout which happens before we have started + * all the I/O from calling pagebuf_iodone too early. + */ + atomic_set(&pb->pb_io_remaining, 1); + _pagebuf_ioapply(pb); + _pagebuf_iodone(pb, 0, 0); + return 0; +} + +/* + * pagebuf_iowait + * + * pagebuf_iowait waits for I/O to complete on the buffer supplied. + * It returns immediately if no I/O is pending. In any case, it returns + * the error code, if any, or 0 if there is no error. + */ +int +pagebuf_iowait( + page_buf_t *pb) +{ + PB_TRACE(pb, PB_TRACE_REC(iowait), 0); + pagebuf_run_queues(pb); + down(&pb->pb_iodonesema); + PB_TRACE(pb, PB_TRACE_REC(iowaited), (int)pb->pb_error); + return pb->pb_error; +} + +STATIC void * +pagebuf_mapout_locked( + page_buf_t *pb) +{ + void *old_addr = NULL; + + if (pb->pb_flags & PBF_MAPPED) { + if (pb->pb_flags & _PBF_ADDR_ALLOCATED) + old_addr = pb->pb_addr - pb->pb_offset; + pb->pb_addr = NULL; + pb->pb_flags &= ~(PBF_MAPPED | _PBF_ADDR_ALLOCATED); + } + + return old_addr; /* Caller must free the address space, + * we are under a spin lock, probably + * not safe to do vfree here + */ +} + +caddr_t +pagebuf_offset( + page_buf_t *pb, + size_t offset) +{ + struct page *page; + + offset += pb->pb_offset; + + page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT]; + return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1)); +} + +/* + * pagebuf_iomove + * + * Move data into or out of a buffer. + */ +void +pagebuf_iomove( + page_buf_t *pb, /* buffer to process */ + size_t boff, /* starting buffer offset */ + size_t bsize, /* length to copy */ + caddr_t data, /* data address */ + page_buf_rw_t mode) /* read/write flag */ +{ + size_t bend, cpoff, csize; + struct page *page; + + bend = boff + bsize; + while (boff < bend) { + page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)]; + cpoff = page_buf_poff(boff + pb->pb_offset); + csize = min_t(size_t, + PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff); + + ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); + + switch (mode) { + case PBRW_ZERO: + memset(page_address(page) + cpoff, 0, csize); + break; + case PBRW_READ: + memcpy(data, page_address(page) + cpoff, csize); + break; + case PBRW_WRITE: + memcpy(page_address(page) + cpoff, data, csize); + } + + boff += csize; + data += csize; + } +} + +/* + * _pagebuf_ioapply + * + * Applies _pagebuf_page_apply to each page of the page_buf_t. + */ +STATIC void +_pagebuf_ioapply( /* apply function to pages */ + page_buf_t *pb) /* buffer to examine */ +{ + int buf_index; + loff_t buffer_offset = pb->pb_file_offset; + size_t buffer_len = pb->pb_count_desired; + size_t page_offset, len; + size_t cur_offset, cur_len; + + pagebuf_hold(pb); + + cur_offset = pb->pb_offset; + cur_len = buffer_len; + + for (buf_index = 0; buf_index < pb->pb_page_count; buf_index++) { + if (cur_len == 0) + break; + if (cur_offset >= PAGE_CACHE_SIZE) { + cur_offset -= PAGE_CACHE_SIZE; + continue; + } + + page_offset = cur_offset; + cur_offset = 0; + + len = PAGE_CACHE_SIZE - page_offset; + if (len > cur_len) + len = cur_len; + cur_len -= len; + + _pagebuf_page_apply(pb, buffer_offset, + pb->pb_pages[buf_index], page_offset, len, + buf_index+1 == pb->pb_page_count); + buffer_offset += len; + buffer_len -= len; + } + + pagebuf_rele(pb); +} + + +/* + * Pagebuf delayed write buffer handling + */ + +STATIC int pbd_active = 1; +LIST_HEAD(pbd_delwrite_queue); +STATIC spinlock_t pbd_delwrite_lock = SPIN_LOCK_UNLOCKED; + +STATIC void +pagebuf_delwri_queue( + page_buf_t *pb, + int unlock) +{ + PB_TRACE(pb, PB_TRACE_REC(delwri_q), unlock); + spin_lock(&pbd_delwrite_lock); + /* If already in the queue, dequeue and place at tail */ + if (!list_empty(&pb->pb_list)) { + if (unlock) { + atomic_dec(&pb->pb_hold); + } + list_del(&pb->pb_list); + } + + list_add_tail(&pb->pb_list, &pbd_delwrite_queue); + pb->pb_flushtime = jiffies + pb_params.p_un.age_buffer; + spin_unlock(&pbd_delwrite_lock); + + if (unlock && (pb->pb_flags & _PBF_LOCKABLE)) { + pagebuf_unlock(pb); + } +} + +void +pagebuf_delwri_dequeue( + page_buf_t *pb) +{ + PB_TRACE(pb, PB_TRACE_REC(delwri_uq), 0); + spin_lock(&pbd_delwrite_lock); + list_del_init(&pb->pb_list); + pb->pb_flags &= ~PBF_DELWRI; + spin_unlock(&pbd_delwrite_lock); +} + + +/* + * The pagebuf iodone daemons + */ + +STATIC int +pagebuf_iodone_daemon( + void *__bind_cpu, + const char *name, + int pagebuf_daemons[], + struct list_head pagebuf_iodone_tq[], + wait_queue_head_t pagebuf_iodone_wait[]) +{ + int bind_cpu, cpu; + DECLARE_WAITQUEUE (wait, current); + + bind_cpu = (int) (long)__bind_cpu; + cpu = CPU_TO_DAEMON(cpu_logical_map(bind_cpu)); + + /* Set up the thread */ + daemonize(); + + /* Avoid signals */ + spin_lock_irq(¤t->sigmask_lock); + sigfillset(¤t->blocked); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + /* Migrate to the right CPU */ + migrate_to_cpu(cpu); +#ifdef __HAVE_NEW_SCHEDULER + if (smp_processor_id() != cpu) + BUG(); +#else + while (smp_processor_id() != cpu) + schedule(); +#endif + + sprintf(current->comm, "%s/%d", name, bind_cpu); + INIT_LIST_HEAD(&pagebuf_iodone_tq[cpu]); + init_waitqueue_head(&pagebuf_iodone_wait[cpu]); + __set_current_state(TASK_INTERRUPTIBLE); + mb(); + + pagebuf_daemons[cpu] = 1; + + for (;;) { + add_wait_queue(&pagebuf_iodone_wait[cpu], &wait); + + if (TQ_ACTIVE(pagebuf_iodone_tq[cpu])) + __set_task_state(current, TASK_RUNNING); + schedule(); + remove_wait_queue(&pagebuf_iodone_wait[cpu], &wait); + run_task_queue(&pagebuf_iodone_tq[cpu]); + if (pagebuf_daemons[cpu] == 0) + break; + __set_current_state(TASK_INTERRUPTIBLE); + } + + pagebuf_daemons[cpu] = -1; + wake_up_interruptible(&pagebuf_iodone_wait[cpu]); + return 0; +} + +STATIC void +pagebuf_runall_queues( + struct list_head pagebuf_iodone_tq[]) +{ + int pcpu, cpu; + + for (cpu = 0; cpu < min(smp_num_cpus, MAX_IO_DAEMONS); cpu++) { + pcpu = CPU_TO_DAEMON(cpu_logical_map(cpu)); + + run_task_queue(&pagebuf_iodone_tq[pcpu]); + } +} + +STATIC int +pagebuf_logiodone_daemon( + void *__bind_cpu) +{ + return pagebuf_iodone_daemon(__bind_cpu, "xfslogd", pb_logio_daemons, + pagebuf_logiodone_tq, pagebuf_logiodone_wait); +} + +STATIC int +pagebuf_dataiodone_daemon( + void *__bind_cpu) +{ + return pagebuf_iodone_daemon(__bind_cpu, "xfsdatad", pb_dataio_daemons, + pagebuf_dataiodone_tq, pagebuf_dataiodone_wait); +} + + +/* Defines for pagebuf daemon */ +DECLARE_WAIT_QUEUE_HEAD(pbd_waitq); +STATIC int force_flush; + +STATIC void +pagebuf_daemon_wakeup( + int flag) +{ + force_flush = flag; + if (waitqueue_active(&pbd_waitq)) { + wake_up_interruptible(&pbd_waitq); + } +} + +typedef void (*timeout_fn)(unsigned long); + +STATIC int +pagebuf_daemon( + void *data) +{ + int count; + page_buf_t *pb; + struct list_head *curr, *next, tmp; + struct timer_list pb_daemon_timer = + { {NULL, NULL}, 0, 0, (timeout_fn)pagebuf_daemon_wakeup }; + + /* Set up the thread */ + daemonize(); + + /* Avoid signals */ + spin_lock_irq(¤t->sigmask_lock); + sigfillset(¤t->blocked); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + strcpy(current->comm, "pagebufd"); + current->flags |= PF_MEMALLOC; + + INIT_LIST_HEAD(&tmp); + do { + if (pbd_active == 1) { + mod_timer(&pb_daemon_timer, + jiffies + pb_params.p_un.flush_interval); + interruptible_sleep_on(&pbd_waitq); + } + + if (pbd_active == 0) { + del_timer_sync(&pb_daemon_timer); + } + + spin_lock(&pbd_delwrite_lock); + + count = 0; + list_for_each_safe(curr, next, &pbd_delwrite_queue) { + pb = list_entry(curr, page_buf_t, pb_list); + + PB_TRACE(pb, PB_TRACE_REC(walkq1), pagebuf_ispin(pb)); + + if ((pb->pb_flags & PBF_DELWRI) && !pagebuf_ispin(pb) && + (((pb->pb_flags & _PBF_LOCKABLE) == 0) || + !pagebuf_cond_lock(pb))) { + + if (!force_flush && + time_before(jiffies, pb->pb_flushtime)) { + pagebuf_unlock(pb); + break; + } + + list_del(&pb->pb_list); + list_add(&pb->pb_list, &tmp); + + count++; + } + } + + spin_unlock(&pbd_delwrite_lock); + while (!list_empty(&tmp)) { + pb = list_entry(tmp.next, page_buf_t, pb_list); + list_del_init(&pb->pb_list); + pb->pb_flags &= ~PBF_DELWRI; + pb->pb_flags |= PBF_WRITE; + + __pagebuf_iorequest(pb); + } + + if (as_list_len > 0) + purge_addresses(); + if (count) + pagebuf_run_queues(NULL); + + force_flush = 0; + } while (pbd_active == 1); + + pbd_active = -1; + wake_up_interruptible(&pbd_waitq); + + return 0; +} + +void +pagebuf_delwri_flush( + pb_target_t *target, + u_long flags, + int *pinptr) +{ + page_buf_t *pb; + struct list_head *curr, *next, tmp; + int pincount = 0; + int flush_cnt = 0; + + spin_lock(&pbd_delwrite_lock); + INIT_LIST_HEAD(&tmp); + + pagebuf_runall_queues(pagebuf_dataiodone_tq); + + list_for_each_safe(curr, next, &pbd_delwrite_queue) { + pb = list_entry(curr, page_buf_t, pb_list); + + /* + * Skip other targets, markers and in progress buffers + */ + + if ((pb->pb_flags == 0) || (pb->pb_target != target) || + !(pb->pb_flags & PBF_DELWRI)) { + continue; + } + + PB_TRACE(pb, PB_TRACE_REC(walkq2), pagebuf_ispin(pb)); + if (pagebuf_ispin(pb)) { + pincount++; + continue; + } + + if (flags & PBDF_TRYLOCK) { + if (!pagebuf_cond_lock(pb)) { + pincount++; + continue; + } + } + + list_del_init(&pb->pb_list); + if (flags & PBDF_WAIT) { + list_add(&pb->pb_list, &tmp); + pb->pb_flags &= ~PBF_ASYNC; + } + + spin_unlock(&pbd_delwrite_lock); + + if ((flags & PBDF_TRYLOCK) == 0) { + pagebuf_lock(pb); + } + + pb->pb_flags &= ~PBF_DELWRI; + pb->pb_flags |= PBF_WRITE; + + __pagebuf_iorequest(pb); + if (++flush_cnt > 32) { + pagebuf_run_queues(NULL); + flush_cnt = 0; + } + + spin_lock(&pbd_delwrite_lock); + } + + spin_unlock(&pbd_delwrite_lock); + + pagebuf_run_queues(NULL); + + if (pinptr) + *pinptr = pincount; + + if ((flags & PBDF_WAIT) == 0) + return; + + while (!list_empty(&tmp)) { + pb = list_entry(tmp.next, page_buf_t, pb_list); + + list_del_init(&pb->pb_list); + pagebuf_iowait(pb); + if (!pb->pb_relse) + pagebuf_unlock(pb); + pagebuf_rele(pb); + } +} + +STATIC int +pagebuf_daemon_start(void) +{ + int cpu, pcpu; + + kernel_thread(pagebuf_daemon, NULL, CLONE_FS|CLONE_FILES|CLONE_VM); + + for (cpu = 0; cpu < min(smp_num_cpus, MAX_IO_DAEMONS); cpu++) { + pcpu = CPU_TO_DAEMON(cpu_logical_map(cpu)); + + if (kernel_thread(pagebuf_logiodone_daemon, + (void *)(long) cpu, + CLONE_FS|CLONE_FILES|CLONE_VM) < 0) { + printk("pagebuf_logiodone daemon failed to start\n"); + } else { + while (!pb_logio_daemons[pcpu]) + yield(); + } + } + for (cpu = 0; cpu < min(smp_num_cpus, MAX_IO_DAEMONS); cpu++) { + pcpu = CPU_TO_DAEMON(cpu_logical_map(cpu)); + + if (kernel_thread(pagebuf_dataiodone_daemon, + (void *)(long) cpu, + CLONE_FS|CLONE_FILES|CLONE_VM) < 0) { + printk("pagebuf_dataiodone daemon failed to start\n"); + } else { + while (!pb_dataio_daemons[pcpu]) + yield(); + } + } + return 0; +} + +/* + * pagebuf_daemon_stop + * + * Note: do not mark as __exit, it is called from pagebuf_terminate. + */ +STATIC void +pagebuf_daemon_stop(void) +{ + int cpu, pcpu; + + pbd_active = 0; + + wake_up_interruptible(&pbd_waitq); + wait_event_interruptible(pbd_waitq, pbd_active); + + for (pcpu = 0; pcpu < min(smp_num_cpus, MAX_IO_DAEMONS); pcpu++) { + cpu = CPU_TO_DAEMON(cpu_logical_map(pcpu)); + + pb_logio_daemons[cpu] = 0; + wake_up(&pagebuf_logiodone_wait[cpu]); + wait_event_interruptible(pagebuf_logiodone_wait[cpu], + pb_logio_daemons[cpu] == -1); + + pb_dataio_daemons[cpu] = 0; + wake_up(&pagebuf_dataiodone_wait[cpu]); + wait_event_interruptible(pagebuf_dataiodone_wait[cpu], + pb_dataio_daemons[cpu] == -1); + } +} + + +/* + * Pagebuf sysctl interface + */ + +STATIC int +pb_stats_clear_handler( + ctl_table *ctl, + int write, + struct file *filp, + void *buffer, + size_t *lenp) +{ + int ret; + int *valp = ctl->data; + + ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); + + if (!ret && write && *valp) { + printk("XFS Clearing pbstats\n"); + memset(&pbstats, 0, sizeof(pbstats)); + pb_params.p_un.stats_clear = 0; + } + + return ret; +} + +STATIC struct ctl_table_header *pagebuf_table_header; + +STATIC ctl_table pagebuf_table[] = { + {PB_FLUSH_INT, "flush_int", &pb_params.data[0], + sizeof(ulong), 0644, NULL, &proc_doulongvec_ms_jiffies_minmax, + &sysctl_intvec, NULL, &pagebuf_min[0], &pagebuf_max[0]}, + + {PB_FLUSH_AGE, "flush_age", &pb_params.data[1], + sizeof(ulong), 0644, NULL, &proc_doulongvec_ms_jiffies_minmax, + &sysctl_intvec, NULL, &pagebuf_min[1], &pagebuf_max[1]}, + + {PB_STATS_CLEAR, "stats_clear", &pb_params.data[2], + sizeof(ulong), 0644, NULL, &pb_stats_clear_handler, + &sysctl_intvec, NULL, &pagebuf_min[2], &pagebuf_max[2]}, + +#ifdef PAGEBUF_TRACE + {PB_DEBUG, "debug", &pb_params.data[3], + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &pagebuf_min[3], &pagebuf_max[3]}, +#endif + {0} +}; + +STATIC ctl_table pagebuf_dir_table[] = { + {VM_PAGEBUF, "pagebuf", NULL, 0, 0555, pagebuf_table}, + {0} +}; + +STATIC ctl_table pagebuf_root_table[] = { + {CTL_VM, "vm", NULL, 0, 0555, pagebuf_dir_table}, + {0} +}; + +#ifdef CONFIG_PROC_FS +STATIC int +pagebuf_readstats( + char *buffer, + char **start, + off_t offset, + int count, + int *eof, + void *data) +{ + int i, len; + + len = 0; + len += sprintf(buffer + len, "pagebuf"); + for (i = 0; i < sizeof(pbstats) / sizeof(u_int32_t); i++) { + len += sprintf(buffer + len, " %u", + *(((u_int32_t*)&pbstats) + i)); + } + buffer[len++] = '\n'; + + if (offset >= len) { + *start = buffer; + *eof = 1; + return 0; + } + *start = buffer + offset; + if ((len -= offset) > count) + return count; + *eof = 1; + + return len; +} +#endif /* CONFIG_PROC_FS */ + +STATIC void +pagebuf_shaker(void) +{ + pagebuf_daemon_wakeup(1); +} + + +/* + * Initialization and Termination + */ + +int __init +pagebuf_init(void) +{ + int i; + + pagebuf_table_header = register_sysctl_table(pagebuf_root_table, 1); + +#ifdef CONFIG_PROC_FS + if (proc_mkdir("fs/pagebuf", 0)) + create_proc_read_entry( + "fs/pagebuf/stat", 0, 0, pagebuf_readstats, NULL); +#endif + + pagebuf_cache = kmem_cache_create("page_buf_t", sizeof(page_buf_t), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (pagebuf_cache == NULL) { + printk("pagebuf: couldn't init pagebuf cache\n"); + pagebuf_terminate(); + return -ENOMEM; + } + + if (_pagebuf_prealloc_bh(NR_RESERVED_BH) < NR_RESERVED_BH) { + printk("pagebuf: couldn't pre-allocate %d buffer heads\n", + NR_RESERVED_BH); + pagebuf_terminate(); + return -ENOMEM; + } + + init_waitqueue_head(&pb_resv_bh_wait); + + for (i = 0; i < NHASH; i++) { + spin_lock_init(&pbhash[i].pb_hash_lock); + INIT_LIST_HEAD(&pbhash[i].pb_hash); + } + +#ifdef PAGEBUF_TRACE + pb_trace.buf = (pagebuf_trace_t *)kmalloc( + PB_TRACE_BUFSIZE * sizeof(pagebuf_trace_t), GFP_KERNEL); + memset(pb_trace.buf, 0, PB_TRACE_BUFSIZE * sizeof(pagebuf_trace_t)); + pb_trace.start = 0; + pb_trace.end = PB_TRACE_BUFSIZE - 1; +#endif + + pagebuf_daemon_start(); + kmem_shake_register(pagebuf_shaker); + return 0; +} + +/* + * pagebuf_terminate. + * + * Note: do not mark as __exit, this is also called from the __init code. + */ +void +pagebuf_terminate(void) +{ + pagebuf_daemon_stop(); + + kmem_cache_destroy(pagebuf_cache); + kmem_shake_deregister(pagebuf_shaker); + + unregister_sysctl_table(pagebuf_table_header); +#ifdef CONFIG_PROC_FS + remove_proc_entry("fs/pagebuf/stat", NULL); + remove_proc_entry("fs/pagebuf", NULL); +#endif +} + + +/* + * Module management (for kernel debugger module) + */ +EXPORT_SYMBOL(pagebuf_offset); +EXPORT_SYMBOL(pbd_delwrite_queue); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/pagebuf/page_buf.h linux.21rc5-ac2/fs/xfs/pagebuf/page_buf.h --- linux.21rc5/fs/xfs/pagebuf/page_buf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/pagebuf/page_buf.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Written by Steve Lord, Jim Mostek, Russell Cattelan at SGI + */ + +#ifndef __PAGE_BUF_H__ +#define __PAGE_BUF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * Turn this on to get pagebuf lock ownership +#define PAGEBUF_LOCK_TRACKING +*/ + +/* + * Base types + */ + +/* daddr must be signed since -1 is used for bmaps that are not yet allocated */ +typedef loff_t page_buf_daddr_t; + +#define PAGE_BUF_DADDR_NULL ((page_buf_daddr_t) (-1LL)) + +typedef size_t page_buf_dsize_t; /* size of buffer in blocks */ + +#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE) +#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) +#define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT) +#define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK) + +typedef enum page_buf_rw_e { + PBRW_READ = 1, /* transfer into target memory */ + PBRW_WRITE = 2, /* transfer from target memory */ + PBRW_ZERO = 3 /* Zero target memory */ +} page_buf_rw_t; + +typedef enum { /* pbm_flags values */ + PBMF_EOF = 0x01, /* mapping contains EOF */ + PBMF_HOLE = 0x02, /* mapping covers a hole */ + PBMF_DELAY = 0x04, /* mapping covers delalloc region */ + PBMF_UNWRITTEN = 0x20 /* mapping covers allocated */ + /* but uninitialized file data */ +} bmap_flags_t; + +typedef enum { + /* base extent manipulation calls */ + BMAP_READ = (1 << 0), /* read extents */ + BMAP_WRITE = (1 << 1), /* create extents */ + BMAP_ALLOCATE = (1 << 2), /* delayed allocate to real extents */ + BMAP_UNWRITTEN = (1 << 3), /* unwritten extents to real extents */ + /* modifiers */ + BMAP_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ + BMAP_DIRECT = (1 << 5), /* direct instead of buffered write */ + BMAP_MMAP = (1 << 6), /* allocate for mmap write */ + BMAP_SYNC = (1 << 7), /* sync write */ + BMAP_TRYLOCK = (1 << 8), /* non-blocking request */ +} bmapi_flags_t; + +typedef enum page_buf_flags_e { /* pb_flags values */ + PBF_READ = (1 << 0), /* buffer intended for reading from device */ + PBF_WRITE = (1 << 1), /* buffer intended for writing to device */ + PBF_MAPPED = (1 << 2), /* buffer mapped (pb_addr valid) */ + PBF_PARTIAL = (1 << 3), /* buffer partially read */ + PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */ + PBF_NONE = (1 << 5), /* buffer not read at all */ + PBF_DELWRI = (1 << 6), /* buffer has dirty pages */ + PBF_FREED = (1 << 7), /* buffer has been freed and is invalid */ + PBF_SYNC = (1 << 8), /* force updates to disk */ + PBF_MAPPABLE = (1 << 9),/* use directly-addressable pages */ + PBF_STALE = (1 << 10), /* buffer has been staled, do not find it */ + PBF_FS_MANAGED = (1 << 11), /* filesystem controls freeing memory */ + PBF_FS_DATAIOD = (1 << 12), /* schedule IO completion on fs datad */ + + /* flags used only as arguments to access routines */ + PBF_LOCK = (1 << 13), /* lock requested */ + PBF_TRYLOCK = (1 << 14), /* lock requested, but do not wait */ + PBF_DONT_BLOCK = (1 << 15), /* do not block in current thread */ + + /* flags used only internally */ + _PBF_LOCKABLE = (1 << 16), /* page_buf_t may be locked */ + _PBF_PRIVATE_BH = (1 << 17), /* do not use public buffer heads */ + _PBF_ALL_PAGES_MAPPED = (1 << 18), /* all pages in range mapped */ + _PBF_ADDR_ALLOCATED = (1 << 19), /* pb_addr space was allocated */ + _PBF_MEM_ALLOCATED = (1 << 20), /* pb_mem+underlying pages alloc'd */ + + PBF_FORCEIO = (1 << 21), + PBF_FLUSH = (1 << 22), /* flush disk write cache */ + PBF_READ_AHEAD = (1 << 23), + +} page_buf_flags_t; + +#define PBF_UPDATE (PBF_READ | PBF_WRITE) +#define PBF_NOT_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) != 0) +#define PBF_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) == 0) + +#define PBR_SECTOR_ONLY 1 /* only use sector size buffer heads */ +#define PBR_ALIGNED_ONLY 2 /* only use aligned I/O */ + +typedef struct pb_target { + int pbr_flags; + dev_t pbr_dev; + kdev_t pbr_kdev; + struct block_device *pbr_bdev; + struct address_space *pbr_mapping; + unsigned int pbr_bsize; + unsigned int pbr_sshift; + size_t pbr_smask; +} pb_target_t; + +/* + * page_buf_bmap_t: File system I/O map + * + * The pbm_bn, pbm_offset and pbm_length fields are expressed in disk blocks. + * The pbm_length field specifies the size of the underlying backing store + * for the particular mapping. + * + * The pbm_bsize, pbm_size and pbm_delta fields are in bytes and indicate + * the size of the mapping, the number of bytes that are valid to access + * (read or write), and the offset into the mapping, given the offset + * supplied to the file I/O map routine. pbm_delta is the offset of the + * desired data from the beginning of the mapping. + * + * When a request is made to read beyond the logical end of the object, + * pbm_size may be set to 0, but pbm_offset and pbm_length should be set to + * the actual amount of underlying storage that has been allocated, if any. + */ + +typedef struct page_buf_bmap_s { + page_buf_daddr_t pbm_bn; /* block number in file system */ + pb_target_t *pbm_target; /* device to do I/O to */ + loff_t pbm_offset; /* byte offset of mapping in file */ + size_t pbm_delta; /* offset of request into bmap */ + size_t pbm_bsize; /* size of this mapping in bytes */ + bmap_flags_t pbm_flags; /* options flags for mapping */ +} page_buf_bmap_t; + +typedef page_buf_bmap_t pb_bmap_t; + + +/* + * page_buf_t: Buffer structure for page cache-based buffers + * + * This buffer structure is used by the page cache buffer management routines + * to refer to an assembly of pages forming a logical buffer. The actual + * I/O is performed with buffer_head or bio structures, as required by drivers, + * for drivers which do not understand this structure. The buffer structure is + * used on temporary basis only, and discarded when released. + * + * The real data storage is recorded in the page cache. Metadata is + * hashed to the inode for the block device on which the file system resides. + * File data is hashed to the inode for the file. Pages which are only + * partially filled with data have bits set in their block_map entry + * to indicate which disk blocks in the page are not valid. + */ + +struct page_buf_s; +typedef void (*page_buf_iodone_t)(struct page_buf_s *); + /* call-back function on I/O completion */ +typedef void (*page_buf_relse_t)(struct page_buf_s *); + /* call-back function on I/O completion */ +typedef int (*page_buf_bdstrat_t)(struct page_buf_s *); + +#define PB_PAGES 4 + +typedef struct page_buf_s { + struct semaphore pb_sema; /* semaphore for lockables */ + unsigned long pb_flushtime; /* time to flush pagebuf */ + atomic_t pb_pin_count; /* pin count */ + wait_queue_head_t pb_waiters; /* unpin waiters */ + struct list_head pb_list; + page_buf_flags_t pb_flags; /* status flags */ + struct list_head pb_hash_list; + struct pb_target *pb_target; /* logical object */ + atomic_t pb_hold; /* reference count */ + page_buf_daddr_t pb_bn; /* block number for I/O */ + loff_t pb_file_offset; /* offset in file */ + size_t pb_buffer_length; /* size of buffer in bytes */ + size_t pb_count_desired; /* desired transfer size */ + void *pb_addr; /* virtual address of buffer */ + struct tq_struct pb_iodone_sched; + atomic_t pb_io_remaining;/* #outstanding I/O requests */ + page_buf_iodone_t pb_iodone; /* I/O completion function */ + page_buf_relse_t pb_relse; /* releasing function */ + page_buf_bdstrat_t pb_strat; /* pre-write function */ + struct semaphore pb_iodonesema; /* Semaphore for I/O waiters */ + void *pb_fspriv; + void *pb_fspriv2; + void *pb_fspriv3; + unsigned short pb_error; /* error code on I/O */ + unsigned short pb_page_count; /* size of page array */ + unsigned short pb_offset; /* page offset in first page */ + unsigned char pb_locked; /* page array is locked */ + unsigned char pb_hash_index; /* hash table index */ + struct page **pb_pages; /* array of page pointers */ + struct page *pb_page_array[PB_PAGES]; /* inline pages */ +#ifdef PAGEBUF_LOCK_TRACKING + int pb_last_holder; +#endif +} page_buf_t; + + +/* + * page_buf module entry points + */ + +/* Finding and Reading Buffers */ + +extern page_buf_t *pagebuf_find( /* find buffer for block if */ + /* the block is in memory */ + struct pb_target *, /* inode for block */ + loff_t, /* starting offset of range */ + size_t, /* length of range */ + page_buf_flags_t); /* PBF_LOCK */ + +extern page_buf_t *pagebuf_get( /* allocate a buffer */ + struct pb_target *, /* inode for buffer */ + loff_t, /* starting offset of range */ + size_t, /* length of range */ + page_buf_flags_t); /* PBF_LOCK, PBF_READ, */ + /* PBF_ASYNC */ + +extern page_buf_t *pagebuf_lookup( + struct pb_target *, + loff_t, /* starting offset of range */ + size_t, /* length of range */ + page_buf_flags_t); /* PBF_READ, PBF_WRITE, */ + /* PBF_FORCEIO, _PBF_LOCKABLE */ + +extern page_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */ + /* no memory or disk address */ + struct pb_target *); /* mount point "fake" inode */ + +extern page_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */ + /* without disk address */ + size_t len, + struct pb_target *); /* mount point "fake" inode */ + +extern int pagebuf_associate_memory( + page_buf_t *, + void *, + size_t); + + +extern void pagebuf_hold( /* increment reference count */ + page_buf_t *); /* buffer to hold */ + +extern void pagebuf_readahead( /* read ahead into cache */ + struct pb_target *, /* target for buffer (or NULL) */ + loff_t, /* starting offset of range */ + size_t, /* length of range */ + page_buf_flags_t); /* additional read flags */ + +/* Writing and Releasing Buffers */ + +extern void pagebuf_free( /* deallocate a buffer */ + page_buf_t *); /* buffer to deallocate */ + +extern void pagebuf_rele( /* release hold on a buffer */ + page_buf_t *); /* buffer to release */ + +/* Locking and Unlocking Buffers */ + +extern int pagebuf_cond_lock( /* lock buffer, if not locked */ + /* (returns -EBUSY if locked) */ + page_buf_t *); /* buffer to lock */ + +extern int pagebuf_lock_value( /* return count on lock */ + page_buf_t *); /* buffer to check */ + +extern int pagebuf_lock( /* lock buffer */ + page_buf_t *); /* buffer to lock */ + +extern void pagebuf_unlock( /* unlock buffer */ + page_buf_t *); /* buffer to unlock */ + +/* Buffer Utility Routines */ +static inline int pagebuf_geterror(page_buf_t *pb) +{ + return (pb ? pb->pb_error : ENOMEM); +} + +extern void pagebuf_iodone( /* mark buffer I/O complete */ + page_buf_t *, /* buffer to mark */ + int, /* use data/log helper thread. */ + int); /* run completion locally, or in + * a helper thread. */ + +extern void pagebuf_ioerror( /* mark buffer in error (or not) */ + page_buf_t *, /* buffer to mark */ + unsigned int); /* error to store (0 if none) */ + +extern int pagebuf_iostart( /* start I/O on a buffer */ + page_buf_t *, /* buffer to start */ + page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC, */ + /* PBF_READ, PBF_WRITE, */ + /* PBF_DELWRI, PBF_SYNC */ + +extern int pagebuf_iorequest( /* start real I/O */ + page_buf_t *); /* buffer to convey to device */ + + /* + * pagebuf_iorequest is the core I/O request routine. + * It assumes that the buffer is well-formed and + * mapped and ready for physical I/O, unlike + * pagebuf_iostart() and pagebuf_iophysio(). Those + * routines call the inode pagebuf_ioinitiate routine to start I/O, + * if it is present, or else call pagebuf_iorequest() + * directly if the inode pagebuf_ioinitiate routine is not present. + */ + +extern int pagebuf_iowait( /* wait for buffer I/O done */ + page_buf_t *); /* buffer to wait on */ + +extern caddr_t pagebuf_offset(page_buf_t *, size_t); + +extern void pagebuf_iomove( /* move data in/out of pagebuf */ + page_buf_t *, /* buffer to manipulate */ + size_t, /* starting buffer offset */ + size_t, /* length in buffer */ + caddr_t, /* data pointer */ + page_buf_rw_t); /* direction */ + +/* Pinning Buffer Storage in Memory */ + +extern void pagebuf_pin( /* pin buffer in memory */ + page_buf_t *); /* buffer to pin */ + +extern void pagebuf_unpin( /* unpin buffered data */ + page_buf_t *); /* buffer to unpin */ + +extern int pagebuf_ispin( page_buf_t *); /* check if pagebuf is pinned */ + +/* Reading and writing pages */ + +extern void pagebuf_delwri_dequeue(page_buf_t *); + +#define PBDF_WAIT 0x01 +#define PBDF_TRYLOCK 0x02 +extern void pagebuf_delwri_flush( + struct pb_target *, + unsigned long, + int *); + +extern int pagebuf_init(void); +extern void pagebuf_terminate(void); + +static __inline__ int __pagebuf_iorequest(page_buf_t *pb) +{ + if (pb->pb_strat) + return pb->pb_strat(pb); + return pagebuf_iorequest(pb); +} + +static __inline__ void pagebuf_run_queues(page_buf_t *pb) +{ + if (!pb || atomic_read(&pb->pb_io_remaining)) + run_task_queue(&tq_disk); +} + +#endif /* __PAGE_BUF_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/pagebuf/page_buf_internal.h linux.21rc5-ac2/fs/xfs/pagebuf/page_buf_internal.h --- linux.21rc5/fs/xfs/pagebuf/page_buf_internal.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/pagebuf/page_buf_internal.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Written by Steve Lord at SGI + */ + +#ifndef __PAGE_BUF_PRIVATE_H__ +#define __PAGE_BUF_PRIVATE_H__ + +#include "page_buf.h" + +#define _PAGE_BUF_INTERNAL_ +#define PB_DEFINE_TRACES +#include "page_buf_trace.h" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,9) +#define page_buffers(page) ((page)->buffers) +#define page_has_buffers(page) ((page)->buffers) +#endif + +#ifdef PAGEBUF_LOCK_TRACKING +#define PB_SET_OWNER(pb) (pb->pb_last_holder = current->pid) +#define PB_CLEAR_OWNER(pb) (pb->pb_last_holder = -1) +#define PB_GET_OWNER(pb) (pb->pb_last_holder) +#else +#define PB_SET_OWNER(pb) +#define PB_CLEAR_OWNER(pb) +#define PB_GET_OWNER(pb) +#endif /* PAGEBUF_LOCK_TRACKING */ + +/* Tracing utilities for pagebuf */ +typedef struct { + int event; + unsigned long pb; + page_buf_flags_t flags; + unsigned short hold; + unsigned short lock_value; + void *task; + void *misc; + void *ra; + loff_t offset; + size_t size; +} pagebuf_trace_t; + +struct pagebuf_trace_buf { + pagebuf_trace_t *buf; + volatile int start; + volatile int end; +}; + +#define PB_TRACE_BUFSIZE 1024 +#define CIRC_INC(i) (((i) + 1) & (PB_TRACE_BUFSIZE - 1)) + +/* + * Tunable pagebuf parameters + */ + +#define P_PARAM 4 + +typedef union pagebuf_param { + struct { + ulong flush_interval; /* interval between runs of the + * delwri flush daemon. */ + ulong age_buffer; /* time for buffer to age before + * we flush it. */ + ulong debug; /* debug tracing on or off */ + ulong stats_clear; /* clear the pagebuf stats */ + } p_un; + ulong data[P_PARAM]; +} pagebuf_param_t; + +enum { + PB_FLUSH_INT = 1, + PB_FLUSH_AGE = 2, + PB_STATS_CLEAR = 3, + PB_DEBUG = 4 +}; + +extern pagebuf_param_t pb_params; + +/* + * Pagebuf statistics + */ + +struct pbstats { + u_int32_t pb_get; + u_int32_t pb_create; + u_int32_t pb_get_locked; + u_int32_t pb_get_locked_waited; + u_int32_t pb_busy_locked; + u_int32_t pb_miss_locked; + u_int32_t pb_page_retries; + u_int32_t pb_page_found; + u_int32_t pb_get_read; +}; + +extern struct pbstats pbstats; + +#define PB_STATS_INC(count) ( count ++ ) + +#ifndef STATIC +# define STATIC static +#endif + +#endif /* __PAGE_BUF_PRIVATE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/pagebuf/page_buf_locking.c linux.21rc5-ac2/fs/xfs/pagebuf/page_buf_locking.c --- linux.21rc5/fs/xfs/pagebuf/page_buf_locking.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/pagebuf/page_buf_locking.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * page_buf_locking.c + * + * The page_buf module provides an abstract buffer cache model on top of + * the Linux page cache. Cached metadata blocks for a file system are + * hashed to the inode for the block device. The page_buf module + * assembles buffer (page_buf_t) objects on demand to aggregate such + * cached pages for I/O. The page_buf_locking module adds support for + * locking such page buffers. + * + * Written by Steve Lord at SGI + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "page_buf_internal.h" + +/* + * pagebuf_cond_lock + * + * pagebuf_cond_lock locks a buffer object, if it is not already locked. + * Note that this in no way + * locks the underlying pages, so it is only useful for synchronizing + * concurrent use of page buffer objects, not for synchronizing independent + * access to the underlying pages. + */ +int +pagebuf_cond_lock( /* lock buffer, if not locked */ + /* returns -EBUSY if locked) */ + page_buf_t *pb) +{ + int locked; + + ASSERT(pb->pb_flags & _PBF_LOCKABLE); + + locked = down_trylock(&pb->pb_sema) == 0; + if (locked) { + PB_SET_OWNER(pb); + } + + PB_TRACE(pb, PB_TRACE_REC(condlck), locked); + + return(locked ? 0 : -EBUSY); +} + +/* + * pagebuf_lock_value + * + * Return lock value for a pagebuf + */ +int +pagebuf_lock_value( + page_buf_t *pb) +{ + ASSERT(pb->pb_flags & _PBF_LOCKABLE); + return(atomic_read(&pb->pb_sema.count)); +} + +/* + * pagebuf_lock + * + * pagebuf_lock locks a buffer object. Note that this in no way + * locks the underlying pages, so it is only useful for synchronizing + * concurrent use of page buffer objects, not for synchronizing independent + * access to the underlying pages. + */ +int +pagebuf_lock( + page_buf_t *pb) +{ + ASSERT(pb->pb_flags & _PBF_LOCKABLE); + + PB_TRACE(pb, PB_TRACE_REC(lock), 0); + pagebuf_run_queues(pb); + down(&pb->pb_sema); + PB_SET_OWNER(pb); + PB_TRACE(pb, PB_TRACE_REC(locked), 0); + return 0; +} + +/* + * pagebuf_unlock + * + * pagebuf_unlock releases the lock on the buffer object created by + * pagebuf_lock or pagebuf_cond_lock (not any + * pinning of underlying pages created by pagebuf_pin). + */ +void +pagebuf_unlock( /* unlock buffer */ + page_buf_t *pb) /* buffer to unlock */ +{ + ASSERT(pb->pb_flags & _PBF_LOCKABLE); + PB_CLEAR_OWNER(pb); + up(&pb->pb_sema); + PB_TRACE(pb, PB_TRACE_REC(unlock), 0); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/pagebuf/page_buf_trace.h linux.21rc5-ac2/fs/xfs/pagebuf/page_buf_trace.h --- linux.21rc5/fs/xfs/pagebuf/page_buf_trace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/pagebuf/page_buf_trace.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#ifndef __PAGEBUF_TRACE__ +#define __PAGEBUF_TRACE__ + +#ifdef PB_DEFINE_TRACES +#define PB_TRACE_START typedef enum { +#define PB_TRACE_REC(x) pb_trace_point_##x +#define PB_TRACE_END } pb_trace_var_t; +#else +#define PB_TRACE_START static char *event_names[] = { +#define PB_TRACE_REC(x) #x +#define PB_TRACE_END }; +#endif + +PB_TRACE_START +PB_TRACE_REC(get), +PB_TRACE_REC(get_obj), +PB_TRACE_REC(free_obj), +PB_TRACE_REC(look_pg), +PB_TRACE_REC(get_read), +PB_TRACE_REC(no_daddr), +PB_TRACE_REC(hold), +PB_TRACE_REC(rele), +PB_TRACE_REC(done), +PB_TRACE_REC(ioerror), +PB_TRACE_REC(iostart), +PB_TRACE_REC(end_io), +PB_TRACE_REC(do_io), +PB_TRACE_REC(ioreq), +PB_TRACE_REC(iowait), +PB_TRACE_REC(iowaited), +PB_TRACE_REC(free_lk), +PB_TRACE_REC(freed_l), +PB_TRACE_REC(cmp), +PB_TRACE_REC(get_lk), +PB_TRACE_REC(got_lk), +PB_TRACE_REC(skip), +PB_TRACE_REC(lock), +PB_TRACE_REC(locked), +PB_TRACE_REC(unlock), +PB_TRACE_REC(avl_ret), +PB_TRACE_REC(condlck), +PB_TRACE_REC(avl_ins), +PB_TRACE_REC(walkq1), +PB_TRACE_REC(walkq2), +PB_TRACE_REC(walkq3), +PB_TRACE_REC(delwri_q), +PB_TRACE_REC(delwri_uq), +PB_TRACE_REC(pin), +PB_TRACE_REC(unpin), +PB_TRACE_REC(file_write), +PB_TRACE_REC(external), +PB_TRACE_END + +extern void pb_trace_func(page_buf_t *, int, void *, void *); +#ifdef PAGEBUF_TRACE +# define PB_TRACE(pb, event, misc) \ + pb_trace_func(pb, event, (void *) misc, \ + (void *)__builtin_return_address(0)) +#else +# define PB_TRACE(pb, event, misc) do { } while (0) +#endif + +#endif /* __PAGEBUF_TRACE__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/Makefile linux.21rc5-ac2/fs/xfs/quota/Makefile --- linux.21rc5/fs/xfs/quota/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/Makefile 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,56 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# + +EXTRA_CFLAGS += -I $(TOPDIR)/fs/xfs + +ifeq ($(CONFIG_XFS_DEBUG),y) + EXTRA_CFLAGS += -g -DDEBUG -DXFSDEBUG + #EXTRA_CFLAGS += -DQUOTADEBUG +endif + +O_TARGET := xfs_quota.o +ifneq ($(MAKECMDGOALS),modules_install) + obj-m := $(O_TARGET) +endif + +obj-$(CONFIG_PROC_FS) += xfs_qm_stats.o + +obj-y += xfs_dquot.o \ + xfs_dquot_item.o \ + xfs_trans_dquot.o \ + xfs_qm_syscalls.o \ + xfs_qm_bhv.o \ + xfs_qm.o + +export-objs += xfs_qm.o + +include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_dquot.c linux.21rc5-ac2/fs/xfs/quota/xfs_dquot.c --- linux.21rc5/fs/xfs/quota/xfs_dquot.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_dquot.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,1588 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_space.h" +#include "xfs_trans_priv.h" + +#include "xfs_qm.h" + + +/* + LOCK ORDER + + inode lock (ilock) + dquot hash-chain lock (hashlock) + xqm dquot freelist lock (freelistlock + mount's dquot list lock (mplistlock) + user dquot lock - lock ordering among dquots is based on the uid or gid + group dquot lock - similar to udquots. Between the two dquots, the udquot + has to be locked first. + pin lock - the dquot lock must be held to take this lock. + flush lock - ditto. +*/ + +STATIC void xfs_qm_dqflush_done(xfs_buf_t *, xfs_dq_logitem_t *); + +#ifdef DEBUG +dev_t xfs_dqerror_dev = 0; +int xfs_do_dqerror = 0; +int xfs_dqreq_num = 0; +int xfs_dqerror_mod = 33; +#endif + +/* + * Allocate and initialize a dquot. We don't always allocate fresh memory; + * we try to reclaim a free dquot if the number of incore dquots are above + * a threshold. + * The only field inside the core that gets initialized at this point + * is the d_id field. The idea is to fill in the entire q_core + * when we read in the on disk dquot. + */ +xfs_dquot_t * +xfs_qm_dqinit( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type) +{ + xfs_dquot_t *dqp; + boolean_t brandnewdquot; + + brandnewdquot = xfs_qm_dqalloc_incore(&dqp); + dqp->dq_flags = type; + INT_SET(dqp->q_core.d_id, ARCH_CONVERT, id); + dqp->q_mount = mp; + + /* + * No need to re-initialize these if this is a reclaimed dquot. + */ + if (brandnewdquot) { + dqp->dq_flnext = dqp->dq_flprev = dqp; + mutex_init(&dqp->q_qlock, MUTEX_DEFAULT, "xdq"); + initnsema(&dqp->q_flock, 1, "fdq"); + sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); + +#ifdef DQUOT_TRACING + dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP); + xfs_dqtrace_entry(dqp, "DQINIT"); +#endif + } else { + /* + * Only the q_core portion was zeroed in dqreclaim_one(). + * So, we need to reset others. + */ + dqp->q_nrefs = 0; + dqp->q_blkno = 0; + dqp->MPL_NEXT = dqp->HL_NEXT = NULL; + dqp->HL_PREVP = dqp->MPL_PREVP = NULL; + dqp->q_bufoffset = 0; + dqp->q_fileoffset = 0; + dqp->q_transp = NULL; + dqp->q_gdquot = NULL; + dqp->q_res_bcount = 0; + dqp->q_res_icount = 0; + dqp->q_res_rtbcount = 0; + dqp->q_pincount = 0; + dqp->q_hash = 0; + ASSERT(dqp->dq_flnext == dqp->dq_flprev); + +#ifdef DQUOT_TRACING + ASSERT(dqp->q_trace); + xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT"); +#endif + } + + /* + * log item gets initialized later + */ + return (dqp); +} + +/* + * This is called to free all the memory associated with a dquot + */ +void +xfs_qm_dqdestroy( + xfs_dquot_t *dqp) +{ + ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp)); + + mutex_destroy(&dqp->q_qlock); + freesema(&dqp->q_flock); + sv_destroy(&dqp->q_pinwait); + +#ifdef DQUOT_TRACING + if (dqp->q_trace) + ktrace_free(dqp->q_trace); + dqp->q_trace = NULL; +#endif + kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); + atomic_dec(&xfs_Gqm->qm_totaldquots); +} + +/* + * This is what a 'fresh' dquot inside a dquot chunk looks like on disk. + */ +STATIC void +xfs_qm_dqinit_core( + xfs_dqid_t id, + uint type, + xfs_dqblk_t *d) +{ + /* + * Caller has zero'd the entire dquot 'chunk' already. + */ + INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC); + INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION); + INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id); + INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type); +} + + +#ifdef DQUOT_TRACING +/* + * Dquot tracing for debugging. + */ +/* ARGSUSED */ +void +xfs_dqtrace_entry__( + xfs_dquot_t *dqp, + char *func, + void *retaddr, + xfs_inode_t *ip) +{ + xfs_dquot_t *udqp = NULL; + int ino; + + ASSERT(dqp->q_trace); + if (ip) { + ino = ip->i_ino; + udqp = ip->i_udquot; + } + ktrace_enter(dqp->q_trace, + (void *)(__psint_t)DQUOT_KTRACE_ENTRY, + (void *)func, + (void *)(__psint_t)dqp->q_nrefs, + (void *)(__psint_t)dqp->dq_flags, + (void *)(__psint_t)dqp->q_res_bcount, + (void *)(__psint_t)INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_id, ARCH_CONVERT), /* 11 */ + (void *)(__psint_t)current_pid(), + (void *)(__psint_t)ino, + (void *)(__psint_t)retaddr, + (void *)(__psint_t)udqp); + return; +} +#endif + + +/* + * Check the limits and timers of a dquot and start or reset timers + * if necessary. + * This gets called even when quota enforcement is OFF, which makes our + * life a little less complicated. (We just don't reject any quota + * reservations in that case, when enforcement is off). + * We also return 0 as the values of the timers in Q_GETQUOTA calls, when + * enforcement's off. + * In contrast, warnings are a little different in that they don't + * 'automatically' get started when limits get exceeded. + */ +void +xfs_qm_adjust_dqtimers( + xfs_mount_t *mp, + xfs_disk_dquot_t *d) +{ + /* + * The dquot had better be locked. We are modifying it here. + */ + + /* + * root's limits are not real limits. + */ + if (INT_ISZERO(d->d_id, ARCH_CONVERT)) + return; + +#ifdef QUOTADEBUG + if (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)) + ASSERT(INT_GET(d->d_blk_softlimit, ARCH_CONVERT) <= INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)); + if (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)) + ASSERT(INT_GET(d->d_ino_softlimit, ARCH_CONVERT) <= INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)); +#endif + if (INT_ISZERO(d->d_btimer, ARCH_CONVERT)) { + if ((INT_GET(d->d_blk_softlimit, ARCH_CONVERT) && + (INT_GET(d->d_bcount, ARCH_CONVERT) >= INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) || + (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT) && + (INT_GET(d->d_bcount, ARCH_CONVERT) >= INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) { + INT_SET(d->d_btimer, ARCH_CONVERT, CURRENT_TIME + XFS_QI_BTIMELIMIT(mp)); + } + } else { + if ((INT_ISZERO(d->d_blk_softlimit, ARCH_CONVERT) || + (INT_GET(d->d_bcount, ARCH_CONVERT) < INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) && + (INT_ISZERO(d->d_blk_hardlimit, ARCH_CONVERT) || + (INT_GET(d->d_bcount, ARCH_CONVERT) < INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) { + INT_ZERO(d->d_btimer, ARCH_CONVERT); + } + } + + if (INT_ISZERO(d->d_itimer, ARCH_CONVERT)) { + if ((INT_GET(d->d_ino_softlimit, ARCH_CONVERT) && + (INT_GET(d->d_icount, ARCH_CONVERT) >= INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) || + (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT) && + (INT_GET(d->d_icount, ARCH_CONVERT) >= INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) { + INT_SET(d->d_itimer, ARCH_CONVERT, CURRENT_TIME + XFS_QI_ITIMELIMIT(mp)); + } + } else { + if ((INT_ISZERO(d->d_ino_softlimit, ARCH_CONVERT) || + (INT_GET(d->d_icount, ARCH_CONVERT) < INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) && + (INT_ISZERO(d->d_ino_hardlimit, ARCH_CONVERT) || + (INT_GET(d->d_icount, ARCH_CONVERT) < INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) { + INT_ZERO(d->d_itimer, ARCH_CONVERT); + } + } +} + +/* + * Increment or reset warnings of a given dquot. + */ +int +xfs_qm_dqwarn( + xfs_disk_dquot_t *d, + uint flags) +{ + int warned; + + /* + * root's limits are not real limits. + */ + if (INT_ISZERO(d->d_id, ARCH_CONVERT)) + return (0); + + warned = 0; + if (INT_GET(d->d_blk_softlimit, ARCH_CONVERT) && + (INT_GET(d->d_bcount, ARCH_CONVERT) >= + INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) { + if (flags & XFS_QMOPT_DOWARN) { + INT_MOD(d->d_bwarns, ARCH_CONVERT, +1); + warned++; + } + } else { + if (INT_ISZERO(d->d_blk_softlimit, ARCH_CONVERT) || + (INT_GET(d->d_bcount, ARCH_CONVERT) < + INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) { + INT_ZERO(d->d_bwarns, ARCH_CONVERT); + } + } + + if (INT_GET(d->d_ino_softlimit, ARCH_CONVERT) > 0 && + (INT_GET(d->d_icount, ARCH_CONVERT) >= + INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) { + if (flags & XFS_QMOPT_DOWARN) { + INT_MOD(d->d_iwarns, ARCH_CONVERT, +1); + warned++; + } + } else { + if ((INT_ISZERO(d->d_ino_softlimit, ARCH_CONVERT)) || + (INT_GET(d->d_icount, ARCH_CONVERT) < + INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) { + INT_ZERO(d->d_iwarns, ARCH_CONVERT); + } + } +#ifdef QUOTADEBUG + if (INT_GET(d->d_iwarns, ARCH_CONVERT)) + cmn_err(CE_DEBUG, + "--------@@Inode warnings running : %Lu >= %Lu", + INT_GET(d->d_icount, ARCH_CONVERT), + INT_GET(d->d_ino_softlimit, ARCH_CONVERT)); + if (INT_GET(d->d_bwarns, ARCH_CONVERT)) + cmn_err(CE_DEBUG, + "--------@@Blks warnings running : %Lu >= %Lu", + INT_GET(d->d_bcount, ARCH_CONVERT), + INT_GET(d->d_blk_softlimit, ARCH_CONVERT)); +#endif + return (warned); +} + + +/* + * initialize a buffer full of dquots and log the whole thing + */ +STATIC void +xfs_qm_init_dquot_blk( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + xfs_buf_t *bp) +{ + xfs_dqblk_t *d; + int curid, i; + + ASSERT(tp); + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + + d = (xfs_dqblk_t *)XFS_BUF_PTR(bp); + + /* + * ID of the first dquot in the block - id's are zero based. + */ + curid = id - (id % XFS_QM_DQPERBLK(mp)); + ASSERT(curid >= 0); + memset(d, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp))); + for (i = 0; i < XFS_QM_DQPERBLK(mp); i++, d++, curid++) + xfs_qm_dqinit_core(curid, type, d); + xfs_trans_dquot_buf(tp, bp, + type & XFS_DQ_USER ? + XFS_BLI_UDQUOT_BUF : + XFS_BLI_GDQUOT_BUF); + xfs_trans_log_buf(tp, bp, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1); +} + + + +/* + * Allocate a block and fill it with dquots. + * This is called when the bmapi finds a hole. + */ +STATIC int +xfs_qm_dqalloc( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_dquot_t *dqp, + xfs_inode_t *quotip, + xfs_fileoff_t offset_fsb, + xfs_buf_t **O_bpp) +{ + xfs_fsblock_t firstblock; + xfs_bmap_free_t flist; + xfs_bmbt_irec_t map; + int nmaps, error, committed; + xfs_buf_t *bp; + + ASSERT(tp != NULL); + xfs_dqtrace_entry(dqp, "DQALLOC"); + + /* + * Initialize the bmap freelist prior to calling bmapi code. + */ + XFS_BMAP_INIT(&flist, &firstblock); + xfs_ilock(quotip, XFS_ILOCK_EXCL); + /* + * Return if this type of quotas is turned off while we didn't + * have an inode lock + */ + if (XFS_IS_THIS_QUOTA_OFF(dqp)) { + xfs_iunlock(quotip, XFS_ILOCK_EXCL); + return (ESRCH); + } + + /* + * xfs_trans_commit normally decrements the vnode ref count + * when it unlocks the inode. Since we want to keep the quota + * inode around, we bump the vnode ref count now. + */ + VN_HOLD(XFS_ITOV(quotip)); + + xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); + nmaps = 1; + if ((error = xfs_bmapi(tp, quotip, + offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB, + XFS_BMAPI_METADATA | XFS_BMAPI_WRITE, + &firstblock, + XFS_QM_DQALLOC_SPACE_RES(mp), + &map, &nmaps, &flist))) { + goto error0; + } + ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); + ASSERT(nmaps == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + + /* + * Keep track of the blkno to save a lookup later + */ + dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); + + /* now we can just get the buffer (there's nothing to read yet) */ + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, + dqp->q_blkno, + XFS_QI_DQCHUNKLEN(mp), + 0); + if (!bp || (error = XFS_BUF_GETERROR(bp))) + goto error1; + /* + * Make a chunk of dquots out of this buffer and log + * the entire thing. + */ + xfs_qm_init_dquot_blk(tp, mp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT), + dqp->dq_flags & (XFS_DQ_USER|XFS_DQ_GROUP), + bp); + + if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed))) { + goto error1; + } + + *O_bpp = bp; + return 0; + + error1: + xfs_bmap_cancel(&flist); + error0: + xfs_iunlock(quotip, XFS_ILOCK_EXCL); + + return (error); +} + +/* + * Maps a dquot to the buffer containing its on-disk version. + * This returns a ptr to the buffer containing the on-disk dquot + * in the bpp param, and a ptr to the on-disk dquot within that buffer + */ +STATIC int +xfs_qm_dqtobp( + xfs_trans_t *tp, + xfs_dquot_t *dqp, + xfs_disk_dquot_t **O_ddpp, + xfs_buf_t **O_bpp, + uint flags) +{ + xfs_bmbt_irec_t map; + int nmaps, error; + xfs_buf_t *bp; + xfs_inode_t *quotip; + xfs_mount_t *mp; + xfs_disk_dquot_t *ddq; + xfs_dqid_t id; + boolean_t newdquot; + + mp = dqp->q_mount; + id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT); + nmaps = 1; + newdquot = B_FALSE; + + /* + * If we don't know where the dquot lives, find out. + */ + if (dqp->q_blkno == (xfs_daddr_t) 0) { + /* We use the id as an index */ + dqp->q_fileoffset = (xfs_fileoff_t) ((uint)id / + XFS_QM_DQPERBLK(mp)); + nmaps = 1; + quotip = XFS_DQ_TO_QIP(dqp); + xfs_ilock(quotip, XFS_ILOCK_SHARED); + /* + * Return if this type of quotas is turned off while we didn't + * have an inode lock + */ + if (XFS_IS_THIS_QUOTA_OFF(dqp)) { + xfs_iunlock(quotip, XFS_ILOCK_SHARED); + return (ESRCH); + } + /* + * Find the block map; no allocations yet + */ + error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, + XFS_DQUOT_CLUSTER_SIZE_FSB, + XFS_BMAPI_METADATA, + NULL, 0, &map, &nmaps, NULL); + + xfs_iunlock(quotip, XFS_ILOCK_SHARED); + if (error) + return (error); + ASSERT(nmaps == 1); + ASSERT(map.br_blockcount == 1); + + /* + * offset of dquot in the (fixed sized) dquot chunk. + */ + dqp->q_bufoffset = (id % XFS_QM_DQPERBLK(mp)) * + sizeof(xfs_dqblk_t); + if (map.br_startblock == HOLESTARTBLOCK) { + /* + * We don't allocate unless we're asked to + */ + if (!(flags & XFS_QMOPT_DQALLOC)) + return (ENOENT); + + ASSERT(tp); + if ((error = xfs_qm_dqalloc(tp, mp, dqp, quotip, + dqp->q_fileoffset, &bp))) + return (error); + newdquot = B_TRUE; + } else { + /* + * store the blkno etc so that we don't have to do the + * mapping all the time + */ + dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); + } + } + ASSERT(dqp->q_blkno != DELAYSTARTBLOCK); + ASSERT(dqp->q_blkno != HOLESTARTBLOCK); + + /* + * Read in the buffer, unless we've just done the allocation + * (in which case we already have the buf). + */ + if (! newdquot) { + xfs_dqtrace_entry(dqp, "DQTOBP READBUF"); + if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, + dqp->q_blkno, + XFS_QI_DQCHUNKLEN(mp), + 0, &bp))) { + return (error); + } + if (error || !bp) + return XFS_ERROR(error); + } + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + + /* + * calculate the location of the dquot inside the buffer. + */ + ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset); + + /* + * A simple sanity check in case we got a corrupted dquot... + */ + if (xfs_qm_dqcheck(ddq, id, + dqp->dq_flags & (XFS_DQ_USER|XFS_DQ_GROUP), + flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), + "dqtobp")) { + if (!(flags & XFS_QMOPT_DQREPAIR)) { + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EIO); + } + XFS_BUF_BUSY(bp); /* We dirtied this */ + } + + *O_bpp = bp; + *O_ddpp = ddq; + + return (0); +} + + +/* + * Read in the ondisk dquot using dqtobp() then copy it to an incore version, + * and release the buffer immediately. + * + */ +/* ARGSUSED */ +STATIC int +xfs_qm_dqread( + xfs_trans_t *tp, + xfs_dqid_t id, + xfs_dquot_t *dqp, /* dquot to get filled in */ + uint flags) +{ + xfs_disk_dquot_t *ddqp; + xfs_buf_t *bp; + int error; + + /* + * get a pointer to the on-disk dquot and the buffer containing it + * dqp already knows its own type (GROUP/USER). + */ + xfs_dqtrace_entry(dqp, "DQREAD"); + if ((error = xfs_qm_dqtobp(tp, dqp, &ddqp, &bp, flags))) { + return (error); + } + + /* copy everything from disk dquot to the incore dquot */ + memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); + ASSERT(INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id); + xfs_qm_dquot_logitem_init(dqp); + + /* + * Reservation counters are defined as reservation plus current usage + * to avoid having to add everytime. + */ + dqp->q_res_bcount = INT_GET(ddqp->d_bcount, ARCH_CONVERT); + dqp->q_res_icount = INT_GET(ddqp->d_icount, ARCH_CONVERT); + dqp->q_res_rtbcount = INT_GET(ddqp->d_rtbcount, ARCH_CONVERT); + + /* Mark the buf so that this will stay incore a little longer */ + XFS_BUF_SET_VTYPE_REF(bp, B_FS_DQUOT, XFS_DQUOT_REF); + + /* + * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) + * So we need to release with xfs_trans_brelse(). + * The strategy here is identical to that of inodes; we lock + * the dquot in xfs_qm_dqget() before making it accessible to + * others. This is because dquots, like inodes, need a good level of + * concurrency, and we don't want to take locks on the entire buffers + * for dquot accesses. + * Note also that the dquot buffer may even be dirty at this point, if + * this particular dquot was repaired. We still aren't afraid to + * brelse it because we have the changes incore. + */ + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + xfs_trans_brelse(tp, bp); + + return (error); +} + + +/* + * allocate an incore dquot from the kernel heap, + * and fill its core with quota information kept on disk. + * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk + * if it wasn't already allocated. + */ +STATIC int +xfs_qm_idtodq( + xfs_mount_t *mp, + xfs_dqid_t id, /* gid or uid, depending on type */ + uint type, /* UDQUOT or GDQUOT */ + uint flags, /* DQALLOC, DQREPAIR */ + xfs_dquot_t **O_dqpp)/* OUT : incore dquot, not locked */ +{ + xfs_dquot_t *dqp; + int error; + xfs_trans_t *tp; + int cancelflags=0; + + dqp = xfs_qm_dqinit(mp, id, type); + tp = NULL; + if (flags & XFS_QMOPT_DQALLOC) { + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); + if ((error = xfs_trans_reserve(tp, + XFS_QM_DQALLOC_SPACE_RES(mp), + XFS_WRITE_LOG_RES(mp) + + BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1 + + 128, + 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT))) { + cancelflags = 0; + goto error0; + } + cancelflags = XFS_TRANS_RELEASE_LOG_RES; + } + + /* + * Read it from disk; xfs_dqread() takes care of + * all the necessary initialization of dquot's fields (locks, etc) + */ + if ((error = xfs_qm_dqread(tp, id, dqp, flags))) { + /* + * This can happen if quotas got turned off (ESRCH), + * or if the dquot didn't exist on disk and we ask to + * allocate (ENOENT). + */ + xfs_dqtrace_entry(dqp, "DQREAD FAIL"); + cancelflags |= XFS_TRANS_ABORT; + goto error0; + } + if (tp) { + if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, + NULL))) + goto error1; + } + + *O_dqpp = dqp; + ASSERT(! XFS_DQ_IS_LOCKED(dqp)); + return (0); + + error0: + ASSERT(error); + if (tp) + xfs_trans_cancel(tp, cancelflags); + error1: + xfs_qm_dqdestroy(dqp); + *O_dqpp = NULL; + return (error); +} + +/* + * Lookup a dquot in the incore dquot hashtable. We keep two separate + * hashtables for user and group dquots; and, these are global tables + * inside the XQM, not per-filesystem tables. + * The hash chain must be locked by caller, and it is left locked + * on return. Returning dquot is locked. + */ +STATIC int +xfs_qm_dqlookup( + xfs_mount_t *mp, + xfs_dqid_t id, + xfs_dqhash_t *qh, + xfs_dquot_t **O_dqpp) +{ + xfs_dquot_t *dqp; + uint flist_locked; + xfs_dquot_t *d; + + ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); + + flist_locked = B_FALSE; + + /* + * Traverse the hashchain looking for a match + */ + for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) { + /* + * We already have the hashlock. We don't need the + * dqlock to look at the id field of the dquot, since the + * id can't be modified without the hashlock anyway. + */ + if (INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id && dqp->q_mount == mp) { + xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP"); + /* + * All in core dquots must be on the dqlist of mp + */ + ASSERT(dqp->MPL_PREVP != NULL); + + xfs_dqlock(dqp); + if (dqp->q_nrefs == 0) { + ASSERT (XFS_DQ_IS_ON_FREELIST(dqp)); + if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { + xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT"); + + /* + * We may have raced with dqreclaim_one() + * (and lost). So, flag that we don't + * want the dquot to be reclaimed. + */ + dqp->dq_flags |= XFS_DQ_WANT; + xfs_dqunlock(dqp); + xfs_qm_freelist_lock(xfs_Gqm); + xfs_dqlock(dqp); + dqp->dq_flags &= ~(XFS_DQ_WANT); + } + flist_locked = B_TRUE; + } + + /* + * id couldn't have changed; we had the hashlock all + * along + */ + ASSERT(INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id); + + if (flist_locked) { + if (dqp->q_nrefs != 0) { + xfs_qm_freelist_unlock(xfs_Gqm); + flist_locked = B_FALSE; + } else { + /* + * take it off the freelist + */ + xfs_dqtrace_entry(dqp, + "DQLOOKUP: TAKEOFF FL"); + XQM_FREELIST_REMOVE(dqp); + /* xfs_qm_freelist_print(&(xfs_Gqm-> + qm_dqfreelist), + "after removal"); */ + } + } + + /* + * grab a reference + */ + XFS_DQHOLD(dqp); + + if (flist_locked) + xfs_qm_freelist_unlock(xfs_Gqm); + /* + * move the dquot to the front of the hashchain + */ + ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); + if (dqp->HL_PREVP != &qh->qh_next) { + xfs_dqtrace_entry(dqp, + "DQLOOKUP: HASH MOVETOFRONT"); + if ((d = dqp->HL_NEXT)) + d->HL_PREVP = dqp->HL_PREVP; + *(dqp->HL_PREVP) = d; + d = qh->qh_next; + d->HL_PREVP = &dqp->HL_NEXT; + dqp->HL_NEXT = d; + dqp->HL_PREVP = &qh->qh_next; + qh->qh_next = dqp; + } + xfs_dqtrace_entry(dqp, "LOOKUP END"); + *O_dqpp = dqp; + ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); + return (0); + } + } + + *O_dqpp = NULL; + ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); + return (1); +} + +/* + * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a + * a locked dquot, doing an allocation (if requested) as needed. + * When both an inode and an id are given, the inode's id takes precedence. + * That is, if the id changes while we don't hold the ilock inside this + * function, the new dquot is returned, not necessarily the one requested + * in the id argument. + */ +int +xfs_qm_dqget( + xfs_mount_t *mp, + xfs_inode_t *ip, /* locked inode (optional) */ + xfs_dqid_t id, /* gid or uid, depending on type */ + uint type, /* UDQUOT or GDQUOT */ + uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ + xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ +{ + xfs_dquot_t *dqp; + xfs_dqhash_t *h; + uint version; + int error; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || + (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { + return (ESRCH); + } + h = XFS_DQ_HASH(mp, id, type); + +#ifdef DEBUG + if (xfs_do_dqerror) { + if ((xfs_dqerror_dev == mp->m_dev) && + (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { + cmn_err(CE_DEBUG, "Returning error in dqget"); + return (EIO); + } + } +#endif + + again: + +#ifdef DEBUG + ASSERT(type == XFS_DQ_USER || type == XFS_DQ_GROUP); + if (ip) { + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + if (type == XFS_DQ_USER) + ASSERT(ip->i_udquot == NULL); + else + ASSERT(ip->i_gdquot == NULL); + } +#endif + XFS_DQ_HASH_LOCK(h); + + /* + * Look in the cache (hashtable). + * The chain is kept locked during lookup. + */ + if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) { + XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); + /* + * The dquot was found, moved to the front of the chain, + * taken off the freelist if it was on it, and locked + * at this point. Just unlock the hashchain and return. + */ + ASSERT(*O_dqpp); + ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); + XFS_DQ_HASH_UNLOCK(h); + xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)"); + return (0); /* success */ + } + XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); + + /* + * Dquot cache miss. We don't want to keep the inode lock across + * a (potential) disk read. Also we don't want to deal with the lock + * ordering between quotainode and this inode. OTOH, dropping the inode + * lock here means dealing with a chown that can happen before + * we re-acquire the lock. + */ + if (ip) + xfs_iunlock(ip, XFS_ILOCK_EXCL); + /* + * Save the hashchain version stamp, and unlock the chain, so that + * we don't keep the lock across a disk read + */ + version = h->qh_version; + XFS_DQ_HASH_UNLOCK(h); + + /* + * Allocate the dquot on the kernel heap, and read the ondisk + * portion off the disk. Also, do all the necessary initialization + * This can return ENOENT if dquot didn't exist on disk and we didn't + * ask it to allocate; ESRCH if quotas got turned off suddenly. + */ + if ((error = xfs_qm_idtodq(mp, id, type, + flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR| + XFS_QMOPT_DOWARN), + &dqp))) { + if (ip) + xfs_ilock(ip, XFS_ILOCK_EXCL); + return (error); + } + + /* + * See if this is mount code calling to look at the overall quota limits + * which are stored in the id == 0 user or group's dquot. + * Since we may not have done a quotacheck by this point, just return + * the dquot without attaching it to any hashtables, lists, etc, or even + * taking a reference. + * The caller must dqdestroy this once done. + */ + if (flags & XFS_QMOPT_DQSUSER) { + ASSERT(id == 0); + ASSERT(! ip); + goto dqret; + } + + /* + * Dquot lock comes after hashlock in the lock ordering + */ + ASSERT(! XFS_DQ_IS_LOCKED(dqp)); + if (ip) { + xfs_ilock(ip, XFS_ILOCK_EXCL); + if (! XFS_IS_DQTYPE_ON(mp, type)) { + /* inode stays locked on return */ + xfs_qm_dqdestroy(dqp); + return XFS_ERROR(ESRCH); + } + /* + * A dquot could be attached to this inode by now, since + * we had dropped the ilock. + */ + if (type == XFS_DQ_USER) { + if (ip->i_udquot) { + xfs_qm_dqdestroy(dqp); + dqp = ip->i_udquot; + xfs_dqlock(dqp); + goto dqret; + } + } else { + if (ip->i_gdquot) { + xfs_qm_dqdestroy(dqp); + dqp = ip->i_gdquot; + xfs_dqlock(dqp); + goto dqret; + } + } + } + + /* + * Hashlock comes after ilock in lock order + */ + XFS_DQ_HASH_LOCK(h); + if (version != h->qh_version) { + xfs_dquot_t *tmpdqp; + /* + * Now, see if somebody else put the dquot in the + * hashtable before us. This can happen because we didn't + * keep the hashchain lock. We don't have to worry about + * lock order between the two dquots here since dqp isn't + * on any findable lists yet. + */ + if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) { + /* + * Duplicate found. Just throw away the new dquot + * and start over. + */ + xfs_qm_dqput(tmpdqp); + XFS_DQ_HASH_UNLOCK(h); + xfs_qm_dqdestroy(dqp); + XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); + goto again; + } + } + + /* + * Put the dquot at the beginning of the hash-chain and mp's list + * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. + */ + ASSERT(XFS_DQ_IS_HASH_LOCKED(h)); + dqp->q_hash = h; + XQM_HASHLIST_INSERT(h, dqp); + + /* + * Attach this dquot to this filesystem's list of all dquots, + * kept inside the mount structure in m_quotainfo field + */ + xfs_qm_mplist_lock(mp); + + /* + * We return a locked dquot to the caller, with a reference taken + */ + xfs_dqlock(dqp); + dqp->q_nrefs = 1; + + XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp); + + xfs_qm_mplist_unlock(mp); + XFS_DQ_HASH_UNLOCK(h); + dqret: + ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip)); + xfs_dqtrace_entry(dqp, "DQGET DONE"); + *O_dqpp = dqp; + return (0); +} + + +/* + * Release a reference to the dquot (decrement ref-count) + * and unlock it. If there is a group quota attached to this + * dquot, carefully release that too without tripping over + * deadlocks'n'stuff. + */ +void +xfs_qm_dqput( + xfs_dquot_t *dqp) +{ + xfs_dquot_t *gdqp; + + ASSERT(dqp->q_nrefs > 0); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + xfs_dqtrace_entry(dqp, "DQPUT"); + + if (dqp->q_nrefs != 1) { + dqp->q_nrefs--; + xfs_dqunlock(dqp); + return; + } + + /* + * drop the dqlock and acquire the freelist and dqlock + * in the right order; but try to get it out-of-order first + */ + if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { + xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT"); + xfs_dqunlock(dqp); + xfs_qm_freelist_lock(xfs_Gqm); + xfs_dqlock(dqp); + } + + while (1) { + gdqp = NULL; + + /* We can't depend on nrefs being == 1 here */ + if (--dqp->q_nrefs == 0) { + xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST"); + /* + * insert at end of the freelist. + */ + XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp); + + /* + * If we just added a udquot to the freelist, then + * we want to release the gdquot reference that + * it (probably) has. Otherwise it'll keep the + * gdquot from getting reclaimed. + */ + if ((gdqp = dqp->q_gdquot)) { + /* + * Avoid a recursive dqput call + */ + xfs_dqlock(gdqp); + dqp->q_gdquot = NULL; + } + + /* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist), + "@@@@@++ Free list (after append) @@@@@+"); + */ + } + xfs_dqunlock(dqp); + + /* + * If we had a group quota inside the user quota as a hint, + * release it now. + */ + if (! gdqp) + break; + dqp = gdqp; + } + xfs_qm_freelist_unlock(xfs_Gqm); +} + +/* + * Release a dquot. Flush it if dirty, then dqput() it. + * dquot must not be locked. + */ +void +xfs_qm_dqrele( + xfs_dquot_t *dqp) +{ + ASSERT(dqp); + xfs_dqtrace_entry(dqp, "DQRELE"); + + xfs_dqlock(dqp); + /* + * We don't care to flush it if the dquot is dirty here. + * That will create stutters that we want to avoid. + * Instead we do a delayed write when we try to reclaim + * a dirty dquot. Also xfs_sync will take part of the burden... + */ + xfs_qm_dqput(dqp); +} + + +/* + * Write a modified dquot to disk. + * The dquot must be locked and the flush lock too taken by caller. + * The flush lock will not be unlocked until the dquot reaches the disk, + * but the dquot is free to be unlocked and modified by the caller + * in the interim. Dquot is still locked on return. This behavior is + * identical to that of inodes. + */ +int +xfs_qm_dqflush( + xfs_dquot_t *dqp, + uint flags) +{ + xfs_mount_t *mp; + xfs_buf_t *bp; + xfs_disk_dquot_t *ddqp; + int error; + SPLDECL(s); + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); + xfs_dqtrace_entry(dqp, "DQFLUSH"); + + /* + * If not dirty, nada. + */ + if (!XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqfunlock(dqp); + return (0); + } + + /* + * Cant flush a pinned dquot. Wait for it. + */ + xfs_qm_dqunpin_wait(dqp); + + /* + * This may have been unpinned because the filesystem is shutting + * down forcibly. If that's the case we must not write this dquot + * to disk, because the log record didn't make it to disk! + */ + if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) { + dqp->dq_flags &= ~(XFS_DQ_DIRTY); + xfs_dqfunlock(dqp); + return XFS_ERROR(EIO); + } + + /* + * Get the buffer containing the on-disk dquot + * We don't need a transaction envelope because we know that the + * the ondisk-dquot has already been allocated for. + */ + if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { + xfs_dqtrace_entry(dqp, "DQTOBP FAIL"); + ASSERT(error != ENOENT); + /* + * Quotas could have gotten turned off (ESRCH) + */ + xfs_dqfunlock(dqp); + return (error); + } + + if (xfs_qm_dqcheck(&dqp->q_core, INT_GET(ddqp->d_id, ARCH_CONVERT), 0, XFS_QMOPT_DOWARN, + "dqflush (incore copy)")) { + xfs_force_shutdown(dqp->q_mount, XFS_CORRUPT_INCORE); + return XFS_ERROR(EIO); + } + + /* This is the only portion of data that needs to persist */ + memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t)); + + /* + * Clear the dirty field and remember the flush lsn for later use. + */ + dqp->dq_flags &= ~(XFS_DQ_DIRTY); + mp = dqp->q_mount; + + /* lsn is 64 bits */ + AIL_LOCK(mp, s); + dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn; + AIL_UNLOCK(mp, s); + + /* + * Attach an iodone routine so that we can remove this dquot from the + * AIL and release the flush lock once the dquot is synced to disk. + */ + xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *)) + xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item)); + /* + * If the buffer is pinned then push on the log so we won't + * get stuck waiting in the write for too long. + */ + if (XFS_BUF_ISPINNED(bp)) { + xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE"); + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + } + + if (flags & XFS_QMOPT_DELWRI) { + xfs_bdwrite(mp, bp); + } else if (flags & XFS_QMOPT_ASYNC) { + xfs_bawrite(mp, bp); + } else { + error = xfs_bwrite(mp, bp); + } + xfs_dqtrace_entry(dqp, "DQFLUSH END"); + /* + * dqp is still locked, but caller is free to unlock it now. + */ + return (error); + +} + +/* + * This is the dquot flushing I/O completion routine. It is called + * from interrupt level when the buffer containing the dquot is + * flushed to disk. It is responsible for removing the dquot logitem + * from the AIL if it has not been re-logged, and unlocking the dquot's + * flush lock. This behavior is very similar to that of inodes.. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_dqflush_done( + xfs_buf_t *bp, + xfs_dq_logitem_t *qip) +{ + xfs_dquot_t *dqp; + SPLDECL(s); + + dqp = qip->qli_dquot; + + /* + * We only want to pull the item from the AIL if its + * location in the log has not changed since we started the flush. + * Thus, we only bother if the dquot's lsn has + * not changed. First we check the lsn outside the lock + * since it's cheaper, and then we recheck while + * holding the lock before removing the dquot from the AIL. + */ + if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && + qip->qli_item.li_lsn == qip->qli_flush_lsn) { + + AIL_LOCK(dqp->q_mount, s); + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + if (qip->qli_item.li_lsn == qip->qli_flush_lsn) + xfs_trans_delete_ail(dqp->q_mount, + (xfs_log_item_t*)qip, s); + else + AIL_UNLOCK(dqp->q_mount, s); + } + + /* + * Release the dq's flush lock since we're done with it. + */ + xfs_dqfunlock(dqp); +} + + +int +xfs_qm_dqflock_nowait( + xfs_dquot_t *dqp) +{ + int locked; + + locked = cpsema(&((dqp)->q_flock)); + + /* XXX ifdef these out */ + if (locked) + (dqp)->dq_flags |= XFS_DQ_FLOCKED; + return (locked); +} + + +int +xfs_qm_dqlock_nowait( + xfs_dquot_t *dqp) +{ + return (mutex_trylock(&((dqp)->q_qlock))); +} + +void +xfs_dqlock( + xfs_dquot_t *dqp) +{ + mutex_lock(&(dqp->q_qlock), PINOD); +} + +void +xfs_dqunlock( + xfs_dquot_t *dqp) +{ + mutex_unlock(&(dqp->q_qlock)); + if (dqp->q_logitem.qli_dquot == dqp) { + /* Once was dqp->q_mount, but might just have been cleared */ + xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_mountp, + (xfs_log_item_t*)&(dqp->q_logitem)); + } +} + + +void +xfs_dqunlock_nonotify( + xfs_dquot_t *dqp) +{ + mutex_unlock(&(dqp->q_qlock)); +} + +void +xfs_dqlock2( + xfs_dquot_t *d1, + xfs_dquot_t *d2) +{ + if (d1 && d2) { + ASSERT(d1 != d2); + if (INT_GET(d1->q_core.d_id, ARCH_CONVERT) > INT_GET(d2->q_core.d_id, ARCH_CONVERT)) { + xfs_dqlock(d2); + xfs_dqlock(d1); + } else { + xfs_dqlock(d1); + xfs_dqlock(d2); + } + } else { + if (d1) { + xfs_dqlock(d1); + } else if (d2) { + xfs_dqlock(d2); + } + } +} + + +/* + * Take a dquot out of the mount's dqlist as well as the hashlist. + * This is called via unmount as well as quotaoff, and the purge + * will always succeed unless there are soft (temp) references + * outstanding. + * + * This returns 0 if it was purged, 1 if it wasn't. It's not an error code + * that we're returning! XXXsup - not cool. + */ +/* ARGSUSED */ +int +xfs_qm_dqpurge( + xfs_dquot_t *dqp, + uint flags) +{ + xfs_dqhash_t *thishash; + xfs_mount_t *mp; + + mp = dqp->q_mount; + + ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); + ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash)); + + xfs_dqlock(dqp); + /* + * We really can't afford to purge a dquot that is + * referenced, because these are hard refs. + * It shouldn't happen in general because we went thru _all_ inodes in + * dqrele_all_inodes before calling this and didn't let the mountlock go. + * However it is possible that we have dquots with temporary + * references that are not attached to an inode. e.g. see xfs_setattr(). + */ + if (dqp->q_nrefs != 0) { + xfs_dqunlock(dqp); + XFS_DQ_HASH_UNLOCK(dqp->q_hash); + return (1); + } + + ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); + + /* + * If we're turning off quotas, we have to make sure that, for + * example, we don't delete quota disk blocks while dquots are + * in the process of getting written to those disk blocks. + * This dquot might well be on AIL, and we can't leave it there + * if we're turning off quotas. Basically, we need this flush + * lock, and are willing to block on it. + */ + if (! xfs_qm_dqflock_nowait(dqp)) { + /* + * Block on the flush lock after nudging dquot buffer, + * if it is incore. + */ + xfs_qm_dqflock_pushbuf_wait(dqp); + } + + /* + * XXXIf we're turning this type of quotas off, we don't care + * about the dirty metadata sitting in this dquot. OTOH, if + * we're unmounting, we do care, so we flush it and wait. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); + /* dqflush unlocks dqflock */ + /* + * Given that dqpurge is a very rare occurrence, it is OK + * that we're holding the hashlist and mplist locks + * across the disk write. But, ... XXXsup + * + * We don't care about getting disk errors here. We need + * to purge this dquot anyway, so we go ahead regardless. + */ + (void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC); + xfs_dqflock(dqp); + } + ASSERT(dqp->q_pincount == 0); + ASSERT(XFS_FORCED_SHUTDOWN(mp) || + !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); + + thishash = dqp->q_hash; + XQM_HASHLIST_REMOVE(thishash, dqp); + XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp); + /* + * XXX Move this to the front of the freelist, if we can get the + * freelist lock. + */ + ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); + + dqp->q_mount = NULL;; + dqp->q_hash = NULL; + dqp->dq_flags = XFS_DQ_INACTIVE; + memset(&dqp->q_core, 0, sizeof(dqp->q_core)); + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + XFS_DQ_HASH_UNLOCK(thishash); + return (0); +} + + +#ifdef QUOTADEBUG +void +xfs_qm_dqprint(xfs_dquot_t *dqp) +{ + cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------"); + cmn_err(CE_DEBUG, "---- dquotID = %d", + (int)INT_GET(dqp->q_core.d_id, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- type = %s", + XFS_QM_ISUDQ(dqp) ? "USR" : "GRP"); + cmn_err(CE_DEBUG, "---- fs = 0x%p", dqp->q_mount); + cmn_err(CE_DEBUG, "---- blkno = 0x%x", (int) dqp->q_blkno); + cmn_err(CE_DEBUG, "---- boffset = 0x%x", (int) dqp->q_bufoffset); + cmn_err(CE_DEBUG, "---- blkhlimit = %Lu (0x%x)", + INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT), + (int) INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- blkslimit = %Lu (0x%x)", + INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- inohlimit = %Lu (0x%x)", + INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- inoslimit = %Lu (0x%x)", + INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)", + INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)", + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_icount, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- btimer = %d", + (int)INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- itimer = %d", + (int)INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---------------------------"); +} +#endif + +/* + * Give the buffer a little push if it is incore and + * wait on the flush lock. + */ +void +xfs_qm_dqflock_pushbuf_wait( + xfs_dquot_t *dqp) +{ + xfs_buf_t *bp; + + /* + * Check to see if the dquot has been flushed delayed + * write. If so, grab its buffer and send it + * out immediately. We'll be able to acquire + * the flush lock when the I/O completes. + */ + bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno, + XFS_QI_DQCHUNKLEN(dqp->q_mount), + XFS_INCORE_TRYLOCK); + if (bp != NULL) { + if (XFS_BUF_ISDELAYWRITE(bp)) { + if (XFS_BUF_ISPINNED(bp)) { + xfs_log_force(dqp->q_mount, + (xfs_lsn_t)0, + XFS_LOG_FORCE); + } + xfs_bawrite(dqp->q_mount, bp); + } else { + xfs_buf_relse(bp); + } + } + xfs_dqflock(dqp); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_dquot.h linux.21rc5-ac2/fs/xfs/quota/xfs_dquot.h --- linux.21rc5/fs/xfs/quota/xfs_dquot.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_dquot.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DQUOT_H__ +#define __XFS_DQUOT_H__ + +/* + * Dquots are structures that hold quota information about a user or a group, + * much like inodes are for files. In fact, dquots share many characteristics + * with inodes. However, dquots can also be a centralized resource, relative + * to a collection of inodes. In this respect, dquots share some characteristics + * of the superblock. + * XFS dquots exploit both those in its algorithms. They make every attempt + * to not be a bottleneck when quotas are on and have minimal impact, if any, + * when quotas are off. + */ + +/* + * The hash chain headers (hash buckets) + */ +typedef struct xfs_dqhash { + struct xfs_dquot *qh_next; + mutex_t qh_lock; + uint qh_version; /* ever increasing version */ + uint qh_nelems; /* number of dquots on the list */ +} xfs_dqhash_t; + +typedef struct xfs_dqlink { + struct xfs_dquot *ql_next; /* forward link */ + struct xfs_dquot **ql_prevp; /* pointer to prev ql_next */ +} xfs_dqlink_t; + +struct xfs_mount; +struct xfs_trans; + +/* + * This is the marker which is designed to occupy the first few + * bytes of the xfs_dquot_t structure. Even inside this, the freelist pointers + * must come first. + * This serves as the marker ("sentinel") when we have to restart list + * iterations because of locking considerations. + */ +typedef struct xfs_dqmarker { + struct xfs_dquot*dqm_flnext; /* link to freelist: must be first */ + struct xfs_dquot*dqm_flprev; + xfs_dqlink_t dqm_mplist; /* link to mount's list of dquots */ + xfs_dqlink_t dqm_hashlist; /* link to the hash chain */ + uint dqm_flags; /* various flags (XFS_DQ_*) */ +} xfs_dqmarker_t; + +/* + * The incore dquot structure + */ +typedef struct xfs_dquot { + xfs_dqmarker_t q_lists; /* list ptrs, q_flags (marker) */ + xfs_dqhash_t *q_hash; /* the hashchain header */ + struct xfs_mount*q_mount; /* filesystem this relates to */ + struct xfs_trans*q_transp; /* trans this belongs to currently */ + uint q_nrefs; /* # active refs from inodes */ + xfs_daddr_t q_blkno; /* blkno of dquot buffer */ + int q_bufoffset; /* off of dq in buffer (# dquots) */ + xfs_fileoff_t q_fileoffset; /* offset in quotas file */ + + struct xfs_dquot*q_gdquot; /* group dquot, hint only */ + xfs_disk_dquot_t q_core; /* actual usage & quotas */ + xfs_dq_logitem_t q_logitem; /* dquot log item */ + xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */ + xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ + xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ + mutex_t q_qlock; /* quota lock */ + sema_t q_flock; /* flush lock */ + uint q_pincount; /* pin count for this dquot */ + sv_t q_pinwait; /* sync var for pinning */ +#ifdef DQUOT_TRACING + struct ktrace *q_trace; /* trace header structure */ +#endif +} xfs_dquot_t; + + +#define dq_flnext q_lists.dqm_flnext +#define dq_flprev q_lists.dqm_flprev +#define dq_mplist q_lists.dqm_mplist +#define dq_hashlist q_lists.dqm_hashlist +#define dq_flags q_lists.dqm_flags + +#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) + +/* + * Quota Accounting flags + */ +#define XFS_ALL_QUOTA_ACCT (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT) +#define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_GQUOTA_ENFD) +#define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_GQUOTA_CHKD) +#define XFS_ALL_QUOTA_ACTV (XFS_UQUOTA_ACTIVE | XFS_GQUOTA_ACTIVE) +#define XFS_ALL_QUOTA_ACCT_ENFD (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ + XFS_GQUOTA_ACCT|XFS_GQUOTA_ENFD) + +#define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT) +#define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT) +#define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT) + +/* + * Quota Limit Enforcement flags + */ +#define XFS_IS_QUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ENFD) +#define XFS_IS_UQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_UQUOTA_ENFD) +#define XFS_IS_GQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_GQUOTA_ENFD) + +#ifdef DEBUG +static inline int +XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp) +{ + if (mutex_trylock(&dqp->q_qlock)) { + mutex_unlock(&dqp->q_qlock); + return 0; + } + return 1; +} +#endif + + +/* + * The following three routines simply manage the q_flock + * semaphore embedded in the dquot. This semaphore synchronizes + * processes attempting to flush the in-core dquot back to disk. + */ +#define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\ + (dqp)->dq_flags |= XFS_DQ_FLOCKED; } +#define xfs_dqfunlock(dqp) { ASSERT(valusema(&((dqp)->q_flock)) <= 0); \ + vsema(&((dqp)->q_flock)); \ + (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); } + +#define XFS_DQ_PINLOCK(dqp) mutex_spinlock( \ + &(XFS_DQ_TO_QINF(dqp)->qi_pinlock)) +#define XFS_DQ_PINUNLOCK(dqp, s) mutex_spinunlock( \ + &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s) + +#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (valusema(&((dqp)->q_flock)) <= 0) +#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) +#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) +#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) +#define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo) +#define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \ + XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \ + XFS_DQ_TO_QINF(dqp)->qi_gquotaip) + +#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \ + (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ + (XFS_IS_GQUOTA_ON((d)->q_mount)))) +#ifdef DQUOT_TRACING +/* + * Dquot Tracing stuff. + */ +#define DQUOT_TRACE_SIZE 64 +#define DQUOT_KTRACE_ENTRY 1 + +#define xfs_dqtrace_entry_ino(a,b,ip) \ +xfs_dqtrace_entry__((a), (b), (void*)__return_address, (ip)) +#define xfs_dqtrace_entry(a,b) \ +xfs_dqtrace_entry__((a), (b), (void*)__return_address, NULL) +extern void xfs_dqtrace_entry__(xfs_dquot_t *dqp, char *func, + void *, xfs_inode_t *); +#else +#define xfs_dqtrace_entry(a,b) +#define xfs_dqtrace_entry_ino(a,b,ip) +#endif +#ifdef QUOTADEBUG +extern void xfs_qm_dqprint(xfs_dquot_t *); +#else +#define xfs_qm_dqprint(a) +#endif + +extern void xfs_qm_dqdestroy(xfs_dquot_t *); +extern int xfs_qm_dqflush(xfs_dquot_t *, uint); +extern int xfs_qm_dqpurge(xfs_dquot_t *, uint); +extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); +extern int xfs_qm_dqlock_nowait(xfs_dquot_t *); +extern int xfs_qm_dqflock_nowait(xfs_dquot_t *); +extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp); +extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, + xfs_disk_dquot_t *); +extern int xfs_qm_dqwarn(xfs_disk_dquot_t *, uint); +extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, + xfs_dqid_t, uint, uint, xfs_dquot_t **); +extern void xfs_qm_dqput(xfs_dquot_t *); +extern void xfs_qm_dqrele(xfs_dquot_t *); +extern void xfs_dqlock(xfs_dquot_t *); +extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *); +extern void xfs_dqunlock(xfs_dquot_t *); +extern void xfs_dqunlock_nonotify(xfs_dquot_t *); + +#endif /* __XFS_DQUOT_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_dquot_item.c linux.21rc5-ac2/fs/xfs/quota/xfs_dquot_item.c --- linux.21rc5/fs/xfs/quota/xfs_dquot_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_dquot_item.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,715 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_priv.h" + +#include "xfs_qm.h" + + +/* + * returns the number of iovecs needed to log the given dquot item. + */ +/* ARGSUSED */ +STATIC uint +xfs_qm_dquot_logitem_size( + xfs_dq_logitem_t *logitem) +{ + /* + * we need only two iovecs, one for the format, one for the real thing + */ + return (2); +} + +/* + * fills in the vector of log iovecs for the given dquot log item. + */ +STATIC void +xfs_qm_dquot_logitem_format( + xfs_dq_logitem_t *logitem, + xfs_log_iovec_t *logvec) +{ + ASSERT(logitem); + ASSERT(logitem->qli_dquot); + + logvec->i_addr = (xfs_caddr_t)&logitem->qli_format; + logvec->i_len = sizeof(xfs_dq_logformat_t); + logvec++; + logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core; + logvec->i_len = sizeof(xfs_disk_dquot_t); + + ASSERT(2 == logitem->qli_item.li_desc->lid_size); + logitem->qli_format.qlf_size = 2; + +} + +/* + * Increment the pin count of the given dquot. + * This value is protected by pinlock spinlock in the xQM structure. + */ +STATIC void +xfs_qm_dquot_logitem_pin( + xfs_dq_logitem_t *logitem) +{ + unsigned long s; + xfs_dquot_t *dqp; + + dqp = logitem->qli_dquot; + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + s = XFS_DQ_PINLOCK(dqp); + dqp->q_pincount++; + XFS_DQ_PINUNLOCK(dqp, s); +} + +/* + * Decrement the pin count of the given dquot, and wake up + * anyone in xfs_dqwait_unpin() if the count goes to 0. The + * dquot must have been previously pinned with a call to xfs_dqpin(). + */ +/* ARGSUSED */ +STATIC void +xfs_qm_dquot_logitem_unpin( + xfs_dq_logitem_t *logitem, + int stale) +{ + unsigned long s; + xfs_dquot_t *dqp; + + dqp = logitem->qli_dquot; + ASSERT(dqp->q_pincount > 0); + s = XFS_DQ_PINLOCK(dqp); + dqp->q_pincount--; + if (dqp->q_pincount == 0) { + sv_broadcast(&dqp->q_pinwait); + } + XFS_DQ_PINUNLOCK(dqp, s); +} + +/* ARGSUSED */ +STATIC void +xfs_qm_dquot_logitem_unpin_remove( + xfs_dq_logitem_t *logitem, + xfs_trans_t *tp) +{ + xfs_qm_dquot_logitem_unpin(logitem, 0); +} + +/* + * Given the logitem, this writes the corresponding dquot entry to disk + * asynchronously. This is called with the dquot entry securely locked; + * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot + * at the end. + */ +STATIC void +xfs_qm_dquot_logitem_push( + xfs_dq_logitem_t *logitem) +{ + xfs_dquot_t *dqp; + + dqp = logitem->qli_dquot; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); + + /* + * Since we were able to lock the dquot's flush lock and + * we found it on the AIL, the dquot must be dirty. This + * is because the dquot is removed from the AIL while still + * holding the flush lock in xfs_dqflush_done(). Thus, if + * we found it in the AIL and were able to obtain the flush + * lock without sleeping, then there must not have been + * anyone in the process of flushing the dquot. + */ + xfs_qm_dqflush(dqp, XFS_B_DELWRI); + xfs_dqunlock(dqp); +} + +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_qm_dquot_logitem_committed( + xfs_dq_logitem_t *l, + xfs_lsn_t lsn) +{ + /* + * We always re-log the entire dquot when it becomes dirty, + * so, the latest copy _is_ the only one that matters. + */ + return (lsn); +} + + +/* + * This is called to wait for the given dquot to be unpinned. + * Most of these pin/unpin routines are plagiarized from inode code. + */ +void +xfs_qm_dqunpin_wait( + xfs_dquot_t *dqp) +{ + SPLDECL(s); + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + if (dqp->q_pincount == 0) { + return; + } + + /* + * Give the log a push so we don't wait here too long. + */ + xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); + s = XFS_DQ_PINLOCK(dqp); + if (dqp->q_pincount == 0) { + XFS_DQ_PINUNLOCK(dqp, s); + return; + } + sv_wait(&(dqp->q_pinwait), PINOD, + &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s); +} + +/* + * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that + * the dquot is locked by us, but the flush lock isn't. So, here we are + * going to see if the relevant dquot buffer is incore, waiting on DELWRI. + * If so, we want to push it out to help us take this item off the AIL as soon + * as possible. + * + * We must not be holding the AIL_LOCK at this point. Calling incore() to + * search the buffercache can be a time consuming thing, and AIL_LOCK is a + * spinlock. + */ +STATIC void +xfs_qm_dquot_logitem_pushbuf( + xfs_dq_logitem_t *qip) +{ + xfs_dquot_t *dqp; + xfs_mount_t *mp; + xfs_buf_t *bp; + uint dopush; + + dqp = qip->qli_dquot; + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + /* + * The qli_pushbuf_flag keeps others from + * trying to duplicate our effort. + */ + ASSERT(qip->qli_pushbuf_flag != 0); + ASSERT(qip->qli_push_owner == get_thread_id()); + + /* + * If flushlock isn't locked anymore, chances are that the + * inode flush completed and the inode was taken off the AIL. + * So, just get out. + */ + if ((valusema(&(dqp->q_flock)) > 0) || + ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { + qip->qli_pushbuf_flag = 0; + xfs_dqunlock(dqp); + return; + } + mp = dqp->q_mount; + bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno, + XFS_QI_DQCHUNKLEN(mp), + XFS_INCORE_TRYLOCK); + if (bp != NULL) { + if (XFS_BUF_ISDELAYWRITE(bp)) { + dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && + (valusema(&(dqp->q_flock)) <= 0)); + qip->qli_pushbuf_flag = 0; + xfs_dqunlock(dqp); + + if (XFS_BUF_ISPINNED(bp)) { + xfs_log_force(mp, (xfs_lsn_t)0, + XFS_LOG_FORCE); + } + if (dopush) { +#ifdef XFSRACEDEBUG + delay_for_intr(); + delay(300); +#endif + xfs_bawrite(mp, bp); + } else { + xfs_buf_relse(bp); + } + } else { + qip->qli_pushbuf_flag = 0; + xfs_dqunlock(dqp); + xfs_buf_relse(bp); + } + return; + } + + qip->qli_pushbuf_flag = 0; + xfs_dqunlock(dqp); +} + +/* + * This is called to attempt to lock the dquot associated with this + * dquot log item. Don't sleep on the dquot lock or the flush lock. + * If the flush lock is already held, indicating that the dquot has + * been or is in the process of being flushed, then see if we can + * find the dquot's buffer in the buffer cache without sleeping. If + * we can and it is marked delayed write, then we want to send it out. + * We delay doing so until the push routine, though, to avoid sleeping + * in any device strategy routines. + */ +STATIC uint +xfs_qm_dquot_logitem_trylock( + xfs_dq_logitem_t *qip) +{ + xfs_dquot_t *dqp; + uint retval; + + dqp = qip->qli_dquot; + if (dqp->q_pincount > 0) + return (XFS_ITEM_PINNED); + + if (! xfs_qm_dqlock_nowait(dqp)) + return (XFS_ITEM_LOCKED); + + retval = XFS_ITEM_SUCCESS; + if (! xfs_qm_dqflock_nowait(dqp)) { + /* + * The dquot is already being flushed. It may have been + * flushed delayed write, however, and we don't want to + * get stuck waiting for that to complete. So, we want to check + * to see if we can lock the dquot's buffer without sleeping. + * If we can and it is marked for delayed write, then we + * hold it and send it out from the push routine. We don't + * want to do that now since we might sleep in the device + * strategy routine. We also don't want to grab the buffer lock + * here because we'd like not to call into the buffer cache + * while holding the AIL_LOCK. + * Make sure to only return PUSHBUF if we set pushbuf_flag + * ourselves. If someone else is doing it then we don't + * want to go to the push routine and duplicate their efforts. + */ + if (qip->qli_pushbuf_flag == 0) { + qip->qli_pushbuf_flag = 1; + ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno); +#ifdef DEBUG + qip->qli_push_owner = get_thread_id(); +#endif + /* + * The dquot is left locked. + */ + retval = XFS_ITEM_PUSHBUF; + } else { + retval = XFS_ITEM_FLUSHING; + xfs_dqunlock_nonotify(dqp); + } + } + + ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL); + return (retval); +} + + +/* + * Unlock the dquot associated with the log item. + * Clear the fields of the dquot and dquot log item that + * are specific to the current transaction. If the + * hold flags is set, do not unlock the dquot. + */ +STATIC void +xfs_qm_dquot_logitem_unlock( + xfs_dq_logitem_t *ql) +{ + xfs_dquot_t *dqp; + + ASSERT(ql != NULL); + dqp = ql->qli_dquot; + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + /* + * Clear the transaction pointer in the dquot + */ + dqp->q_transp = NULL; + + /* + * dquots are never 'held' from getting unlocked at the end of + * a transaction. Their locking and unlocking is hidden inside the + * transaction layer, within trans_commit. Hence, no LI_HOLD flag + * for the logitem. + */ + xfs_dqunlock(dqp); +} + + +/* + * The transaction with the dquot locked has aborted. The dquot + * must not be dirty within the transaction. We simply unlock just + * as if the transaction had been cancelled. + */ +STATIC void +xfs_qm_dquot_logitem_abort( + xfs_dq_logitem_t *ql) +{ + xfs_qm_dquot_logitem_unlock(ql); +} + +/* + * this needs to stamp an lsn into the dquot, I think. + * rpc's that look at user dquot's would then have to + * push on the dependency recorded in the dquot + */ +/* ARGSUSED */ +STATIC void +xfs_qm_dquot_logitem_committing( + xfs_dq_logitem_t *l, + xfs_lsn_t lsn) +{ + return; +} + + +/* + * This is the ops vector for dquots + */ +struct xfs_item_ops xfs_dquot_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_qm_dquot_logitem_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int)) + xfs_qm_dquot_logitem_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) + xfs_qm_dquot_logitem_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*)) + xfs_qm_dquot_logitem_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_dquot_logitem_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_abort, + .iop_pushbuf = (void(*)(xfs_log_item_t*)) + xfs_qm_dquot_logitem_pushbuf, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_dquot_logitem_committing +}; + +/* + * Initialize the dquot log item for a newly allocated dquot. + * The dquot isn't locked at this point, but it isn't on any of the lists + * either, so we don't care. + */ +void +xfs_qm_dquot_logitem_init( + struct xfs_dquot *dqp) +{ + xfs_dq_logitem_t *lp; + lp = &dqp->q_logitem; + + lp->qli_item.li_type = XFS_LI_DQUOT; + lp->qli_item.li_ops = &xfs_dquot_item_ops; + lp->qli_item.li_mountp = dqp->q_mount; + lp->qli_dquot = dqp; + lp->qli_format.qlf_type = XFS_LI_DQUOT; + lp->qli_format.qlf_id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT); + lp->qli_format.qlf_blkno = dqp->q_blkno; + lp->qli_format.qlf_len = 1; + /* + * This is just the offset of this dquot within its buffer + * (which is currently 1 FSB and probably won't change). + * Hence 32 bits for this offset should be just fine. + * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t)) + * here, and recompute it at recovery time. + */ + lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset; +} + +/*------------------ QUOTAOFF LOG ITEMS -------------------*/ + +/* + * This returns the number of iovecs needed to log the given quotaoff item. + * We only need 1 iovec for an quotaoff item. It just logs the + * quotaoff_log_format structure. + */ +/*ARGSUSED*/ +STATIC uint +xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf) +{ + return (1); +} + +/* + * This is called to fill in the vector of log iovecs for the + * given quotaoff log item. We use only 1 iovec, and we point that + * at the quotaoff_log_format structure embedded in the quotaoff item. + * It is at this point that we assert that all of the extent + * slots in the quotaoff item have been filled. + */ +STATIC void +xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf, + xfs_log_iovec_t *log_vector) +{ + ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF); + + log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format); + log_vector->i_len = sizeof(xfs_qoff_logitem_t); + qf->qql_format.qf_size = 1; +} + + +/* + * Pinning has no meaning for an quotaoff item, so just return. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf) +{ + return; +} + + +/* + * Since pinning has no meaning for an quotaoff item, unpinning does + * not either. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale) +{ + return; +} + +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp) +{ + return; +} + +/* + * Quotaoff items have no locking, so just return success. + */ +/*ARGSUSED*/ +STATIC uint +xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf) +{ + return XFS_ITEM_LOCKED; +} + +/* + * Quotaoff items have no locking or pushing, so return failure + * so that the caller doesn't bother with us. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf) +{ + return; +} + +/* + * The quotaoff-start-item is logged only once and cannot be moved in the log, + * so simply return the lsn at which it's been logged. + */ +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn) +{ + return (lsn); +} + +/* + * The transaction of which this QUOTAOFF is a part has been aborted. + * Just clean up after ourselves. + * Shouldn't this never happen in the case of qoffend logitems? XXX + */ +STATIC void +xfs_qm_qoff_logitem_abort(xfs_qoff_logitem_t *qf) +{ + kmem_free(qf, sizeof(xfs_qoff_logitem_t)); +} + +/* + * There isn't much you can do to push on an quotaoff item. It is simply + * stuck waiting for the log to be flushed to disk. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf) +{ + return; +} + + +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_qm_qoffend_logitem_committed( + xfs_qoff_logitem_t *qfe, + xfs_lsn_t lsn) +{ + xfs_qoff_logitem_t *qfs; + SPLDECL(s); + + qfs = qfe->qql_start_lip; + AIL_LOCK(qfs->qql_item.li_mountp,s); + /* + * Delete the qoff-start logitem from the AIL. + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs, s); + kmem_free(qfs, sizeof(xfs_qoff_logitem_t)); + kmem_free(qfe, sizeof(xfs_qoff_logitem_t)); + return (xfs_lsn_t)-1; +} + +/* + * XXX rcc - don't know quite what to do with this. I think we can + * just ignore it. The only time that isn't the case is if we allow + * the client to somehow see that quotas have been turned off in which + * we can't allow that to get back until the quotaoff hits the disk. + * So how would that happen? Also, do we need different routines for + * quotaoff start and quotaoff end? I suspect the answer is yes but + * to be sure, I need to look at the recovery code and see how quota off + * recovery is handled (do we roll forward or back or do something else). + * If we roll forwards or backwards, then we need two separate routines, + * one that does nothing and one that stamps in the lsn that matters + * (truly makes the quotaoff irrevocable). If we do something else, + * then maybe we don't need two. + */ +/* ARGSUSED */ +STATIC void +xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn) +{ + return; +} + +/* ARGSUSED */ +STATIC void +xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn) +{ + return; +} + +struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_qm_qoff_logitem_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, + .iop_unpin = (void(*)(xfs_log_item_t* ,int)) + xfs_qm_qoff_logitem_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) + xfs_qm_qoff_logitem_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_qoffend_logitem_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_qoffend_logitem_committing +}; + +/* + * This is the ops vector shared by all quotaoff-start log items. + */ +struct xfs_item_ops xfs_qm_qoff_logitem_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_qm_qoff_logitem_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int)) + xfs_qm_qoff_logitem_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) + xfs_qm_qoff_logitem_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_qoff_logitem_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_qoff_logitem_committing +}; + +/* + * Allocate and initialize an quotaoff item of the correct quota type(s). + */ +xfs_qoff_logitem_t * +xfs_qm_qoff_logitem_init( + struct xfs_mount *mp, + xfs_qoff_logitem_t *start, + uint flags) +{ + xfs_qoff_logitem_t *qf; + + qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP); + + qf->qql_item.li_type = XFS_LI_QUOTAOFF; + if (start) + qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops; + else + qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops; + qf->qql_item.li_mountp = mp; + qf->qql_format.qf_type = XFS_LI_QUOTAOFF; + qf->qql_format.qf_flags = flags; + qf->qql_start_lip = start; + return (qf); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_dquot_item.h linux.21rc5-ac2/fs/xfs/quota/xfs_dquot_item.h --- linux.21rc5/fs/xfs/quota/xfs_dquot_item.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_dquot_item.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DQUOT_ITEM_H__ +#define __XFS_DQUOT_ITEM_H__ + +struct xfs_dquot; +struct xfs_trans; +struct xfs_mount; +struct xfs_qoff_logitem; + +typedef struct xfs_dq_logitem { + xfs_log_item_t qli_item; /* common portion */ + struct xfs_dquot *qli_dquot; /* dquot ptr */ + xfs_lsn_t qli_flush_lsn; /* lsn at last flush */ + unsigned short qli_pushbuf_flag; /* 1 bit used in push_ail */ +#ifdef DEBUG + uint64_t qli_push_owner; +#endif + xfs_dq_logformat_t qli_format; /* logged structure */ +} xfs_dq_logitem_t; + +typedef struct xfs_qoff_logitem { + xfs_log_item_t qql_item; /* common portion */ + struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */ + xfs_qoff_logformat_t qql_format; /* logged structure */ +} xfs_qoff_logitem_t; + + +extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *); +extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *, + struct xfs_qoff_logitem *, uint); +extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *, + struct xfs_qoff_logitem *, uint); +extern void xfs_trans_log_quotaoff_item(struct xfs_trans *, + struct xfs_qoff_logitem *); + +#endif /* __XFS_DQUOT_ITEM_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_qm_bhv.c linux.21rc5-ac2/fs/xfs/quota/xfs_qm_bhv.c --- linux.21rc5/fs/xfs/quota/xfs_qm_bhv.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_qm_bhv.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_clnt.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" + +#include "xfs_qm.h" + +#define MNTOPT_QUOTA "quota" /* disk quotas (user) */ +#define MNTOPT_NOQUOTA "noquota" /* no quotas */ +#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ +#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ +#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ +#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ +#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ +#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ +#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ + +STATIC int +xfs_qm_parseargs( + struct bhv_desc *bhv, + char *options, + struct xfs_mount_args *args, + int update) +{ + size_t length; + char *local_options = options; + char *this_char; + int error; + int referenced = update; + + while ((this_char = strsep(&local_options, ",")) != NULL) { + length = strlen(this_char); + if (local_options) + length++; + + if (!strcmp(this_char, MNTOPT_NOQUOTA)) { + args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA); + args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA); + referenced = update; + } else if (!strcmp(this_char, MNTOPT_QUOTA) || + !strcmp(this_char, MNTOPT_UQUOTA) || + !strcmp(this_char, MNTOPT_USRQUOTA)) { + args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF; + referenced = 1; + } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || + !strcmp(this_char, MNTOPT_UQUOTANOENF)) { + args->flags |= XFSMNT_UQUOTA; + args->flags &= ~XFSMNT_UQUOTAENF; + referenced = 1; + } else if (!strcmp(this_char, MNTOPT_GQUOTA) || + !strcmp(this_char, MNTOPT_GRPQUOTA)) { + args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF; + referenced = 1; + } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { + args->flags |= XFSMNT_GQUOTA; + args->flags &= ~XFSMNT_GQUOTAENF; + referenced = 1; + } else { + if (local_options) + *(local_options-1) = ','; + continue; + } + + while (length--) + *this_char++ = ','; + } + + PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error); + if (!error && !referenced) + bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_QM); + return error; +} + +STATIC int +xfs_qm_showargs( + struct bhv_desc *bhv, + struct seq_file *m) +{ + struct vfs *vfsp = bhvtovfs(bhv); + struct xfs_mount *mp = XFS_VFSTOM(vfsp); + int error; + + if (mp->m_qflags & XFS_UQUOTA_ACCT) { + (mp->m_qflags & XFS_UQUOTA_ENFD) ? + seq_puts(m, "," MNTOPT_USRQUOTA) : + seq_puts(m, "," MNTOPT_UQUOTANOENF); + } + + if (mp->m_qflags & XFS_GQUOTA_ACCT) { + (mp->m_qflags & XFS_GQUOTA_ENFD) ? + seq_puts(m, "," MNTOPT_GRPQUOTA) : + seq_puts(m, "," MNTOPT_GQUOTANOENF); + } + + if (!(mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT))) + seq_puts(m, "," MNTOPT_NOQUOTA); + + PVFS_SHOWARGS(BHV_NEXT(bhv), m, error); + return error; +} + +STATIC int +xfs_qm_mount( + struct bhv_desc *bhv, + struct xfs_mount_args *args, + struct cred *cr) +{ + struct vfs *vfsp = bhvtovfs(bhv); + struct xfs_mount *mp = XFS_VFSTOM(vfsp); + int error; + + if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA)) + xfs_qm_mount_quotainit(mp, args->flags); + PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error); + return error; +} + +STATIC int +xfs_qm_syncall( + struct bhv_desc *bhv, + int flags, + cred_t *credp) +{ + struct vfs *vfsp = bhvtovfs(bhv); + struct xfs_mount *mp = XFS_VFSTOM(vfsp); + int error; + + /* + * Get the Quota Manager to flush the dquots. + */ + if (XFS_IS_QUOTA_ON(mp)) { + if ((error = xfs_qm_sync(mp, flags))) { + /* + * If we got an IO error, we will be shutting down. + * So, there's nothing more for us to do here. + */ + ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp)); + if (XFS_FORCED_SHUTDOWN(mp)) { + return XFS_ERROR(error); + } + } + } + PVFS_SYNC(BHV_NEXT(bhv), flags, credp, error); + return error; +} + +/* + * When xfsquotas isn't installed and the superblock had quotas, we need to + * clear the quotaflags from superblock. + */ +STATIC void +xfs_mount_reset_sbqflags( + xfs_mount_t *mp) +{ + xfs_trans_t *tp; + unsigned long s; + + mp->m_qflags = 0; + /* + * It is OK to look at sb_qflags here in mount path, + * without SB_LOCK. + */ + if (mp->m_sb.sb_qflags == 0) + return; + s = XFS_SB_LOCK(mp); + mp->m_sb.sb_qflags = 0; + XFS_SB_UNLOCK(mp, s); + + /* + * if the fs is readonly, let the incore superblock run + * with quotas off but don't flush the update out to disk + */ + if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) + return; +#ifdef QUOTADEBUG + xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes"); +#endif + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); + if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, + XFS_DEFAULT_LOG_COUNT)) { + xfs_trans_cancel(tp, 0); + return; + } + xfs_mod_sb(tp, XFS_SB_QFLAGS); + xfs_trans_commit(tp, 0, NULL); +} + +STATIC int +xfs_qm_newmount( + xfs_mount_t *mp, + uint *needquotamount, + uint *quotaflags) +{ + uint quotaondisk; + uint uquotaondisk = 0, gquotaondisk = 0; + + *quotaflags = 0; + *needquotamount = B_FALSE; + + quotaondisk = XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && + mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT); + + if (quotaondisk) { + uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT; + gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT; + } + + /* + * If the device itself is read-only, we can't allow + * the user to change the state of quota on the mount - + * this would generate a transaction on the ro device, + * which would lead to an I/O error and shutdown + */ + + if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) || + (!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) || + (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) || + (!gquotaondisk && XFS_IS_GQUOTA_ON(mp))) && + xfs_dev_is_read_only(mp, "changing quota state")) { + cmn_err(CE_WARN, + "XFS: please mount with%s%s%s.", + (!quotaondisk ? "out quota" : ""), + (uquotaondisk ? " usrquota" : ""), + (gquotaondisk ? " grpquota" : "")); + return XFS_ERROR(EPERM); + } + + if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { + /* + * Call mount_quotas at this point only if we won't have to do + * a quotacheck. + */ + if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) { + /* + * If the xfs quota code isn't installed, + * we have to reset the quotachk'd bit. + * If an error occured, qm_mount_quotas code + * has already disabled quotas. So, just finish + * mounting, and get on with the boring life + * without disk quotas. + */ + if (xfs_qm_mount_quotas(mp)) + xfs_mount_reset_sbqflags(mp); + } else { + /* + * Clear the quota flags, but remember them. This + * is so that the quota code doesn't get invoked + * before we're ready. This can happen when an + * inode goes inactive and wants to free blocks, + * or via xfs_log_mount_finish. + */ + *needquotamount = B_TRUE; + *quotaflags = mp->m_qflags; + mp->m_qflags = 0; + } + } + + return 0; +} + +STATIC int +xfs_qm_endmount( + xfs_mount_t *mp, + uint needquotamount, + uint quotaflags) +{ + if (needquotamount) { + ASSERT(mp->m_qflags == 0); + mp->m_qflags = quotaflags; + if (xfs_qm_mount_quotas(mp)) + xfs_mount_reset_sbqflags(mp); + } + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + if (! (XFS_IS_QUOTA_ON(mp))) + xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on"); + else + xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on"); +#endif + +#ifdef QUOTADEBUG + if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp)) + cmn_err(CE_WARN, "XFS: mount internalqcheck failed"); +#endif + + return 0; +} + +STATIC void +xfs_qm_dqrele_null( + xfs_dquot_t *dq) +{ + /* + * Called from XFS, where we always check first for a NULL dquot. + */ + if (!dq) + return; + xfs_qm_dqrele(dq); +} + + +struct xfs_qmops xfs_qmcore_xfs = { + .xfs_qminit = xfs_qm_newmount, + .xfs_qmdone = xfs_qm_unmount_quotadestroy, + .xfs_qmmount = xfs_qm_endmount, + .xfs_qmunmount = xfs_qm_unmount_quotas, + .xfs_dqrele = xfs_qm_dqrele_null, + .xfs_dqattach = xfs_qm_dqattach, + .xfs_dqdetach = xfs_qm_dqdetach, + .xfs_dqpurgeall = xfs_qm_dqpurge_all, + .xfs_dqvopalloc = xfs_qm_vop_dqalloc, + .xfs_dqvopcreate = xfs_qm_vop_dqattach_and_dqmod_newinode, + .xfs_dqvoprename = xfs_qm_vop_rename_dqattach, + .xfs_dqvopchown = xfs_qm_vop_chown, + .xfs_dqvopchownresv = xfs_qm_vop_chown_reserve, + .xfs_dqtrxops = &xfs_trans_dquot_ops, +}; + +struct bhv_vfsops xfs_qmops = { { + BHV_IDENTITY_INIT(VFS_BHV_QM, VFS_POSITION_QM), + .vfs_parseargs = xfs_qm_parseargs, + .vfs_showargs = xfs_qm_showargs, + .vfs_mount = xfs_qm_mount, + .vfs_sync = xfs_qm_syncall, + .vfs_quotactl = xfs_qm_quotactl, }, +}; + + +void __init +xfs_qm_init(void) +{ + static char message[] __initdata = + KERN_INFO "SGI XFS Quota Management subsystem\n"; + + printk(message); + mutex_init(&xfs_Gqm_lock, MUTEX_DEFAULT, "xfs_qmlock"); + vfs_bhv_set_custom(&xfs_qmops, &xfs_qmcore_xfs); + xfs_qm_init_procfs(); +} + +void __exit +xfs_qm_exit(void) +{ + vfs_bhv_clr_custom(&xfs_qmops); + xfs_qm_cleanup_procfs(); + if (qm_dqzone) + kmem_cache_destroy(qm_dqzone); + if (qm_dqtrxzone) + kmem_cache_destroy(qm_dqtrxzone); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_qm.c linux.21rc5-ac2/fs/xfs/quota/xfs_qm.c --- linux.21rc5/fs/xfs/quota/xfs_qm.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_qm.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,2831 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_clnt.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_space.h" +#include "xfs_utils.h" + +#include "xfs_qm.h" + +/* + * The global quota manager. There is only one of these for the entire + * system, _not_ one per file system. XQM keeps track of the overall + * quota functionality, including maintaining the freelist and hash + * tables of dquots. + */ +mutex_t xfs_Gqm_lock; +struct xfs_qm *xfs_Gqm; +EXPORT_SYMBOL(xfs_Gqm); /* used by xfsidbg */ + +kmem_zone_t *qm_dqzone; +kmem_zone_t *qm_dqtrxzone; + +STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); +STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); +STATIC int xfs_qm_quotacheck(xfs_mount_t *); + +STATIC int xfs_qm_init_quotainos(xfs_mount_t *); +STATIC void xfs_qm_shake(void); + +#ifdef DEBUG +extern mutex_t qcheck_lock; +#endif + +#ifdef QUOTADEBUG +#define XQM_LIST_PRINT(l, NXT, title) \ +{ \ + xfs_dquot_t *dqp; int i = 0; \ + cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \ + for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \ + cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \ + "bcnt = %d, icnt = %d, refs = %d", \ + ++i, (int) INT_GET(dqp->q_core.d_id, ARCH_CONVERT), \ + DQFLAGTO_TYPESTR(dqp), \ + (int) INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), \ + (int) INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), \ + (int) dqp->q_nrefs); } \ +} +#else +#define XQM_LIST_PRINT(l, NXT, title) do { } while (0) +#endif + +/* + * Initialize the XQM structure. + * Note that there is not one quota manager per file system. + */ +STATIC struct xfs_qm * +xfs_Gqm_init(void) +{ + xfs_qm_t *xqm; + int hsize, i; + + xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); + ASSERT(xqm); + + /* + * Initialize the dquot hash tables. + */ + hsize = (DQUOT_HASH_HEURISTIC < XFS_QM_NCSIZE_THRESHOLD) ? + XFS_QM_HASHSIZE_LOW : XFS_QM_HASHSIZE_HIGH; + xqm->qm_dqhashmask = hsize - 1; + + xqm->qm_usr_dqhtable = (xfs_dqhash_t *)kmem_zalloc(hsize * + sizeof(xfs_dqhash_t), + KM_SLEEP); + xqm->qm_grp_dqhtable = (xfs_dqhash_t *)kmem_zalloc(hsize * + sizeof(xfs_dqhash_t), + KM_SLEEP); + ASSERT(xqm->qm_usr_dqhtable != NULL); + ASSERT(xqm->qm_grp_dqhtable != NULL); + + for (i = 0; i < hsize; i++) { + xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i); + xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i); + } + + /* + * Freelist of all dquots of all file systems + */ + xfs_qm_freelist_init(&(xqm->qm_dqfreelist)); + + /* + * dquot zone. we register our own low-memory callback. + */ + if (!qm_dqzone) { + xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t), + "xfs_dquots"); + qm_dqzone = xqm->qm_dqzone; + } else + xqm->qm_dqzone = qm_dqzone; + + kmem_shake_register(xfs_qm_shake); + + /* + * The t_dqinfo portion of transactions. + */ + if (!qm_dqtrxzone) { + xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t), + "xfs_dqtrx"); + qm_dqtrxzone = xqm->qm_dqtrxzone; + } else + xqm->qm_dqtrxzone = qm_dqtrxzone; + + atomic_set(&xqm->qm_totaldquots, 0); + xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; + xqm->qm_nrefs = 0; +#ifdef DEBUG + mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk"); +#endif + return xqm; +} + +/* + * Destroy the global quota manager when its reference count goes to zero. + */ +void +xfs_qm_destroy( + struct xfs_qm *xqm) +{ + int hsize, i; + + ASSERT(xqm != NULL); + ASSERT(xqm->qm_nrefs == 0); + kmem_shake_deregister(xfs_qm_shake); + hsize = xqm->qm_dqhashmask + 1; + for (i = 0; i < hsize; i++) { + xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); + xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); + } + kmem_free(xqm->qm_usr_dqhtable, hsize * sizeof(xfs_dqhash_t)); + kmem_free(xqm->qm_grp_dqhtable, hsize * sizeof(xfs_dqhash_t)); + xqm->qm_usr_dqhtable = NULL; + xqm->qm_grp_dqhtable = NULL; + xqm->qm_dqhashmask = 0; + xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist)); +#ifdef DEBUG + mutex_destroy(&qcheck_lock); +#endif + kmem_free(xqm, sizeof(xfs_qm_t)); +} + +/* + * Called at mount time to let XQM know that another file system is + * starting quotas. This isn't crucial information as the individual mount + * structures are pretty independent, but it helps the XQM keep a + * global view of what's going on. + */ +/* ARGSUSED */ +STATIC int +xfs_qm_hold_quotafs_ref( + struct xfs_mount *mp) +{ + /* + * Need to lock the xfs_Gqm structure for things like this. For example, + * the structure could disappear between the entry to this routine and + * a HOLD operation if not locked. + */ + XFS_QM_LOCK(xfs_Gqm); + + if (xfs_Gqm == NULL) { + if ((xfs_Gqm = xfs_Gqm_init()) == NULL) { + return (XFS_ERROR(EINVAL)); + } + } + /* + * We can keep a list of all filesystems with quotas mounted for + * debugging and statistical purposes, but ... + * Just take a reference and get out. + */ + XFS_QM_HOLD(xfs_Gqm); + XFS_QM_UNLOCK(xfs_Gqm); + + return 0; +} + + +/* + * Release the reference that a filesystem took at mount time, + * so that we know when we need to destroy the entire quota manager. + */ +/* ARGSUSED */ +STATIC void +xfs_qm_rele_quotafs_ref( + struct xfs_mount *mp) +{ + xfs_dquot_t *dqp, *nextdqp; + + ASSERT(xfs_Gqm); + ASSERT(xfs_Gqm->qm_nrefs > 0); + + /* + * Go thru the freelist and destroy all inactive dquots. + */ + xfs_qm_freelist_lock(xfs_Gqm); + + for (dqp = xfs_Gqm->qm_dqfreelist.qh_next; + dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) { + xfs_dqlock(dqp); + nextdqp = dqp->dq_flnext; + if (dqp->dq_flags & XFS_DQ_INACTIVE) { + ASSERT(dqp->q_mount == NULL); + ASSERT(! XFS_DQ_IS_DIRTY(dqp)); + ASSERT(dqp->HL_PREVP == NULL); + ASSERT(dqp->MPL_PREVP == NULL); + XQM_FREELIST_REMOVE(dqp); + xfs_dqunlock(dqp); + xfs_qm_dqdestroy(dqp); + } else { + xfs_dqunlock(dqp); + } + dqp = nextdqp; + } + xfs_qm_freelist_unlock(xfs_Gqm); + + /* + * Destroy the entire XQM. If somebody mounts with quotaon, this'll + * be restarted. + */ + XFS_QM_LOCK(xfs_Gqm); + XFS_QM_RELE(xfs_Gqm); + if (xfs_Gqm->qm_nrefs == 0) { + xfs_qm_destroy(xfs_Gqm); + xfs_Gqm = NULL; + } + XFS_QM_UNLOCK(xfs_Gqm); +} + +/* + * This is called at mount time from xfs_mountfs to initialize the quotainfo + * structure and start the global quotamanager (xfs_Gqm) if it hasn't done + * so already. Note that the superblock has not been read in yet. + */ +void +xfs_qm_mount_quotainit( + xfs_mount_t *mp, + uint flags) +{ + /* + * User or group quotas has to be on. + */ + ASSERT(flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA)); + + /* + * Initialize the flags in the mount structure. From this point + * onwards we look at m_qflags to figure out if quotas's ON/OFF, etc. + * Note that we enforce nothing if accounting is off. + * ie. XFSMNT_*QUOTA must be ON for XFSMNT_*QUOTAENF. + * It isn't necessary to take the quotaoff lock to do this; this is + * called from mount. + */ + if (flags & XFSMNT_UQUOTA) { + mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); + if (flags & XFSMNT_UQUOTAENF) + mp->m_qflags |= XFS_UQUOTA_ENFD; + } + if (flags & XFSMNT_GQUOTA) { + mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); + if (flags & XFSMNT_GQUOTAENF) + mp->m_qflags |= XFS_GQUOTA_ENFD; + } +} + +/* + * Just destroy the quotainfo structure. + */ +void +xfs_qm_unmount_quotadestroy( + xfs_mount_t *mp) +{ + if (mp->m_quotainfo) + xfs_qm_destroy_quotainfo(mp); +} + + +/* + * This is called from xfs_mountfs to start quotas and initialize all + * necessary data structures like quotainfo. This is also responsible for + * running a quotacheck as necessary. We are guaranteed that the superblock + * is consistently read in at this point. + */ +int +xfs_qm_mount_quotas( + xfs_mount_t *mp) +{ + unsigned long s; + int error = 0; + uint sbf; + + /* + * If a file system had quotas running earlier, but decided to + * mount without -o quota/uquota/gquota options, revoke the + * quotachecked license, and bail out. + */ + if (! XFS_IS_QUOTA_ON(mp) && + (mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT))) { + mp->m_qflags = 0; + goto write_changes; + } + + /* + * If quotas on realtime volumes is not supported, we disable + * quotas immediately. + */ + if (mp->m_sb.sb_rextents) { + cmn_err(CE_NOTE, + "Cannot turn on quotas for realtime filesystem %s", + mp->m_fsname); + mp->m_qflags = 0; + goto write_changes; + } + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + cmn_err(CE_NOTE, "Attempting to turn on disk quotas."); +#endif + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + /* + * Allocate the quotainfo structure inside the mount struct, and + * create quotainode(s), and change/rev superblock if necessary. + */ + if ((error = xfs_qm_init_quotainfo(mp))) { + /* + * We must turn off quotas. + */ + ASSERT(mp->m_quotainfo == NULL); + mp->m_qflags = 0; + goto write_changes; + } + /* + * If any of the quotas are not consistent, do a quotacheck. + */ + if (XFS_QM_NEED_QUOTACHECK(mp)) { +#ifdef DEBUG + cmn_err(CE_NOTE, "Doing a quotacheck. Please wait."); +#endif + if ((error = xfs_qm_quotacheck(mp))) { + cmn_err(CE_WARN, "Quotacheck unsuccessful (Error %d): " + "Disabling quotas.", + error); + /* + * We must turn off quotas. + */ + ASSERT(mp->m_quotainfo != NULL); + ASSERT(xfs_Gqm != NULL); + xfs_qm_destroy_quotainfo(mp); + mp->m_qflags = 0; + goto write_changes; + } +#ifdef DEBUG + cmn_err(CE_NOTE, "Done quotacheck."); +#endif + } + write_changes: + /* + * We actually don't have to acquire the SB_LOCK at all. + * This can only be called from mount, and that's single threaded. XXX + */ + s = XFS_SB_LOCK(mp); + sbf = mp->m_sb.sb_qflags; + mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; + XFS_SB_UNLOCK(mp, s); + + if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { + if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { + /* + * We could only have been turning quotas off. + * We aren't in very good shape actually because + * the incore structures are convinced that quotas are + * off, but the on disk superblock doesn't know that ! + */ + ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); + xfs_fs_cmn_err(CE_ALERT, mp, + "XFS mount_quotas: Superblock update failed!"); + } + } + + if (error) { + xfs_fs_cmn_err(CE_WARN, mp, + "Failed to initialize disk quotas."); + } + return XFS_ERROR(error); +} + +/* + * Called from the vfsops layer. + */ +int +xfs_qm_unmount_quotas( + xfs_mount_t *mp) +{ + xfs_inode_t *uqp, *gqp; + int error = 0; + + /* + * Release the dquots that root inode, et al might be holding, + * before we flush quotas and blow away the quotainfo structure. + */ + ASSERT(mp->m_rootip); + xfs_qm_dqdetach(mp->m_rootip); + if (mp->m_rbmip) + xfs_qm_dqdetach(mp->m_rbmip); + if (mp->m_rsumip) + xfs_qm_dqdetach(mp->m_rsumip); + + /* + * Flush out the quota inodes. + */ + uqp = gqp = NULL; + if (mp->m_quotainfo) { + if ((uqp = mp->m_quotainfo->qi_uquotaip) != NULL) { + xfs_ilock(uqp, XFS_ILOCK_EXCL); + xfs_iflock(uqp); + error = xfs_iflush(uqp, XFS_IFLUSH_SYNC); + xfs_iunlock(uqp, XFS_ILOCK_EXCL); + if (unlikely(error == EFSCORRUPTED)) { + XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)", + XFS_ERRLEVEL_LOW, mp); + goto out; + } + } + if ((gqp = mp->m_quotainfo->qi_gquotaip) != NULL) { + xfs_ilock(gqp, XFS_ILOCK_EXCL); + xfs_iflock(gqp); + error = xfs_iflush(gqp, XFS_IFLUSH_SYNC); + xfs_iunlock(gqp, XFS_ILOCK_EXCL); + if (unlikely(error == EFSCORRUPTED)) { + XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)", + XFS_ERRLEVEL_LOW, mp); + goto out; + } + } + } + if (uqp) { + XFS_PURGE_INODE(uqp); + mp->m_quotainfo->qi_uquotaip = NULL; + } + if (gqp) { + XFS_PURGE_INODE(gqp); + mp->m_quotainfo->qi_gquotaip = NULL; + } +out: + return XFS_ERROR(error); +} + +/* + * Flush all dquots of the given file system to disk. The dquots are + * _not_ purged from memory here, just their data written to disk. + */ +int +xfs_qm_dqflush_all( + xfs_mount_t *mp, + int flags) +{ + int recl; + xfs_dquot_t *dqp; + int niters; + int error; + + if (mp->m_quotainfo == NULL) + return (0); + niters = 0; +again: + xfs_qm_mplist_lock(mp); + FOREACH_DQUOT_IN_MP(dqp, mp) { + xfs_dqlock(dqp); + if (! XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqunlock(dqp); + continue; + } + xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY"); + /* XXX a sentinel would be better */ + recl = XFS_QI_MPLRECLAIMS(mp); + if (! xfs_qm_dqflock_nowait(dqp)) { + /* + * If we can't grab the flush lock then check + * to see if the dquot has been flushed delayed + * write. If so, grab its buffer and send it + * out immediately. We'll be able to acquire + * the flush lock when the I/O completes. + */ + xfs_qm_dqflock_pushbuf_wait(dqp); + } + /* + * Let go of the mplist lock. We don't want to hold it + * across a disk write. + */ + xfs_qm_mplist_unlock(mp); + error = xfs_qm_dqflush(dqp, flags); + xfs_dqunlock(dqp); + if (error) + return (error); + + xfs_qm_mplist_lock(mp); + if (recl != XFS_QI_MPLRECLAIMS(mp)) { + xfs_qm_mplist_unlock(mp); + /* XXX restart limit */ + goto again; + } + } + + xfs_qm_mplist_unlock(mp); + /* return ! busy */ + return (0); +} +/* + * Release the group dquot pointers the user dquots may be + * carrying around as a hint. mplist is locked on entry and exit. + */ +STATIC void +xfs_qm_detach_gdquots( + xfs_mount_t *mp) +{ + xfs_dquot_t *dqp, *gdqp; + int nrecl; + + again: + ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); + dqp = XFS_QI_MPLNEXT(mp); + while (dqp) { + xfs_dqlock(dqp); + if ((gdqp = dqp->q_gdquot)) { + xfs_dqlock(gdqp); + dqp->q_gdquot = NULL; + } + xfs_dqunlock(dqp); + + if (gdqp) { + /* + * Can't hold the mplist lock across a dqput. + * XXXmust convert to marker based iterations here. + */ + nrecl = XFS_QI_MPLRECLAIMS(mp); + xfs_qm_mplist_unlock(mp); + xfs_qm_dqput(gdqp); + + xfs_qm_mplist_lock(mp); + if (nrecl != XFS_QI_MPLRECLAIMS(mp)) + goto again; + } + dqp = dqp->MPL_NEXT; + } +} + +/* + * Go through all the incore dquots of this file system and take them + * off the mplist and hashlist, if the dquot type matches the dqtype + * parameter. This is used when turning off quota accounting for + * users and/or groups, as well as when the filesystem is unmounting. + */ +STATIC int +xfs_qm_dqpurge_int( + xfs_mount_t *mp, + uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/GQUOTA */ +{ + xfs_dquot_t *dqp; + uint dqtype; + int nrecl; + xfs_dquot_t *nextdqp; + int nmisses; + + if (mp->m_quotainfo == NULL) + return (0); + + dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; + dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0; + + xfs_qm_mplist_lock(mp); + + /* + * In the first pass through all incore dquots of this filesystem, + * we release the group dquot pointers the user dquots may be + * carrying around as a hint. We need to do this irrespective of + * what's being turned off. + */ + xfs_qm_detach_gdquots(mp); + + again: + nmisses = 0; + ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); + /* + * Try to get rid of all of the unwanted dquots. The idea is to + * get them off mplist and hashlist, but leave them on freelist. + */ + dqp = XFS_QI_MPLNEXT(mp); + while (dqp) { + /* + * It's OK to look at the type without taking dqlock here. + * We're holding the mplist lock here, and that's needed for + * a dqreclaim. + */ + if ((dqp->dq_flags & dqtype) == 0) { + dqp = dqp->MPL_NEXT; + continue; + } + + if (! xfs_qm_dqhashlock_nowait(dqp)) { + nrecl = XFS_QI_MPLRECLAIMS(mp); + xfs_qm_mplist_unlock(mp); + XFS_DQ_HASH_LOCK(dqp->q_hash); + xfs_qm_mplist_lock(mp); + + /* + * XXXTheoretically, we can get into a very long + * ping pong game here. + * No one can be adding dquots to the mplist at + * this point, but somebody might be taking things off. + */ + if (nrecl != XFS_QI_MPLRECLAIMS(mp)) { + XFS_DQ_HASH_UNLOCK(dqp->q_hash); + goto again; + } + } + + /* + * Take the dquot off the mplist and hashlist. It may remain on + * freelist in INACTIVE state. + */ + nextdqp = dqp->MPL_NEXT; + nmisses += xfs_qm_dqpurge(dqp, flags); + dqp = nextdqp; + } + xfs_qm_mplist_unlock(mp); + return nmisses; +} + +int +xfs_qm_dqpurge_all( + xfs_mount_t *mp, + uint flags) +{ + int ndquots; + + /* + * Purge the dquot cache. + * None of the dquots should really be busy at this point. + */ + if (mp->m_quotainfo) { + while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) { + delay(ndquots * 10); + } + } + return 0; +} + +STATIC int +xfs_qm_dqattach_one( + xfs_inode_t *ip, + xfs_dqid_t id, + uint type, + uint doalloc, + uint dolock, + xfs_dquot_t *udqhint, /* hint */ + xfs_dquot_t **IO_idqpp) +{ + xfs_dquot_t *dqp; + int error; + + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + error = 0; + /* + * See if we already have it in the inode itself. IO_idqpp is + * &i_udquot or &i_gdquot. This made the code look weird, but + * made the logic a lot simpler. + */ + if ((dqp = *IO_idqpp)) { + if (dolock) + xfs_dqlock(dqp); + xfs_dqtrace_entry(dqp, "DQATTACH: found in ip"); + goto done; + } + + /* + * udqhint is the i_udquot field in inode, and is non-NULL only + * when the type arg is XFS_DQ_GROUP. Its purpose is to save a + * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside + * the user dquot. + */ + ASSERT(!udqhint || type == XFS_DQ_GROUP); + if (udqhint && !dolock) + xfs_dqlock(udqhint); + + /* + * No need to take dqlock to look at the id. + * The ID can't change until it gets reclaimed, and it won't + * be reclaimed as long as we have a ref from inode and we hold + * the ilock. + */ + if (udqhint && + (dqp = udqhint->q_gdquot) && + (INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id)) { + ASSERT(XFS_DQ_IS_LOCKED(udqhint)); + xfs_dqlock(dqp); + XFS_DQHOLD(dqp); + ASSERT(*IO_idqpp == NULL); + *IO_idqpp = dqp; + if (!dolock) { + xfs_dqunlock(dqp); + xfs_dqunlock(udqhint); + } + goto done; + } + /* + * We can't hold a dquot lock when we call the dqget code. + * We'll deadlock in no time, because of (not conforming to) + * lock ordering - the inodelock comes before any dquot lock, + * and we may drop and reacquire the ilock in xfs_qm_dqget(). + */ + if (udqhint) + xfs_dqunlock(udqhint); + /* + * Find the dquot from somewhere. This bumps the + * reference count of dquot and returns it locked. + * This can return ENOENT if dquot didn't exist on + * disk and we didn't ask it to allocate; + * ESRCH if quotas got turned off suddenly. + */ + if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type, + doalloc|XFS_QMOPT_DOWARN, &dqp))) { + if (udqhint && dolock) + xfs_dqlock(udqhint); + goto done; + } + + xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget"); + /* + * dqget may have dropped and re-acquired the ilock, but it guarantees + * that the dquot returned is the one that should go in the inode. + */ + *IO_idqpp = dqp; + ASSERT(dqp); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + if (! dolock) { + xfs_dqunlock(dqp); + ASSERT(!udqhint || !XFS_DQ_IS_LOCKED(udqhint)); + goto done; + } + if (! udqhint) + goto done; + + ASSERT(udqhint); + ASSERT(dolock); + ASSERT(! XFS_DQ_IS_LOCKED(udqhint)); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + if (! xfs_qm_dqlock_nowait(udqhint)) { + xfs_dqunlock(dqp); + xfs_dqlock(udqhint); + xfs_dqlock(dqp); + } + done: +#ifdef QUOTADEBUG + if (udqhint) { + if (dolock) + ASSERT(XFS_DQ_IS_LOCKED(udqhint)); + else + ASSERT(! XFS_DQ_IS_LOCKED(udqhint)); + } + if (! error) { + if (dolock) + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + else + ASSERT(! XFS_DQ_IS_LOCKED(dqp)); + } +#endif + return (error); +} + + +/* + * Given a udquot and gdquot, attach a ptr to the group dquot in the + * udquot as a hint for future lookups. The idea sounds simple, but the + * execution isn't, because the udquot might have a group dquot attached + * already and getting rid of that gets us into lock ordering contraints. + * The process is complicated more by the fact that the dquots may or may not + * be locked on entry. + */ +STATIC void +xfs_qm_dqattach_grouphint( + xfs_dquot_t *udq, + xfs_dquot_t *gdq, + uint locked) +{ + xfs_dquot_t *tmp; + +#ifdef QUOTADEBUG + if (locked) { + ASSERT(XFS_DQ_IS_LOCKED(udq)); + ASSERT(XFS_DQ_IS_LOCKED(gdq)); + } else { + ASSERT(! XFS_DQ_IS_LOCKED(udq)); + ASSERT(! XFS_DQ_IS_LOCKED(gdq)); + } +#endif + if (! locked) + xfs_dqlock(udq); + + if ((tmp = udq->q_gdquot)) { + if (tmp == gdq) { + if (! locked) + xfs_dqunlock(udq); + return; + } + + udq->q_gdquot = NULL; + /* + * We can't keep any dqlocks when calling dqrele, + * because the freelist lock comes before dqlocks. + */ + xfs_dqunlock(udq); + if (locked) + xfs_dqunlock(gdq); + /* + * we took a hard reference once upon a time in dqget, + * so give it back when the udquot no longer points at it + * dqput() does the unlocking of the dquot. + */ + xfs_qm_dqrele(tmp); + + ASSERT(! XFS_DQ_IS_LOCKED(udq)); + ASSERT(! XFS_DQ_IS_LOCKED(gdq)); + xfs_dqlock(udq); + xfs_dqlock(gdq); + + } else { + ASSERT(XFS_DQ_IS_LOCKED(udq)); + if (! locked) { + ASSERT(! XFS_DQ_IS_LOCKED(gdq)); + xfs_dqlock(gdq); + } + } + + ASSERT(XFS_DQ_IS_LOCKED(udq)); + ASSERT(XFS_DQ_IS_LOCKED(gdq)); + /* + * Somebody could have attached a gdquot here, + * when we dropped the uqlock. If so, just do nothing. + */ + if (udq->q_gdquot == NULL) { + XFS_DQHOLD(gdq); + udq->q_gdquot = gdq; + } + if (! locked) { + xfs_dqunlock(gdq); + xfs_dqunlock(udq); + } +} + + +/* + * Given a locked inode, attach dquot(s) to it, taking UQUOTAON / GQUOTAON + * in to account. + * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. + * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty + * much made this code a complete mess, but it has been pretty useful. + * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL. + * Inode may get unlocked and relocked in here, and the caller must deal with + * the consequences. + */ +int +xfs_qm_dqattach( + xfs_inode_t *ip, + uint flags) +{ + xfs_mount_t *mp = ip->i_mount; + uint nquotas = 0; + int error = 0; + + if ((! XFS_IS_QUOTA_ON(mp)) || + (! XFS_NOT_DQATTACHED(mp, ip)) || + (ip->i_ino == mp->m_sb.sb_uquotino) || + (ip->i_ino == mp->m_sb.sb_gquotino)) + return (0); + + ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 || + XFS_ISLOCKED_INODE_EXCL(ip)); + + if (! (flags & XFS_QMOPT_ILOCKED)) + xfs_ilock(ip, XFS_ILOCK_EXCL); + + if (XFS_IS_UQUOTA_ON(mp)) { + error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, + flags & XFS_QMOPT_DQALLOC, + flags & XFS_QMOPT_DQLOCK, + NULL, &ip->i_udquot); + if (error) + goto done; + nquotas++; + } + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + if (XFS_IS_GQUOTA_ON(mp)) { + error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, + flags & XFS_QMOPT_DQALLOC, + flags & XFS_QMOPT_DQLOCK, + ip->i_udquot, &ip->i_gdquot); + /* + * Don't worry about the udquot that we may have + * attached above. It'll get detached, if not already. + */ + if (error) + goto done; + nquotas++; + } + + /* + * Attach this group quota to the user quota as a hint. + * This WON'T, in general, result in a thrash. + */ + if (nquotas == 2) { + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(ip->i_udquot); + ASSERT(ip->i_gdquot); + + /* + * We may or may not have the i_udquot locked at this point, + * but this check is OK since we don't depend on the i_gdquot to + * be accurate 100% all the time. It is just a hint, and this + * will succeed in general. + */ + if (ip->i_udquot->q_gdquot == ip->i_gdquot) + goto done; + /* + * Attach i_gdquot to the gdquot hint inside the i_udquot. + */ + xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot, + flags & XFS_QMOPT_DQLOCK); + } + + done: + +#ifdef QUOTADEBUG + if (! error) { + if (ip->i_udquot) { + if (flags & XFS_QMOPT_DQLOCK) + ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot)); + else + ASSERT(! XFS_DQ_IS_LOCKED(ip->i_udquot)); + } + if (ip->i_gdquot) { + if (flags & XFS_QMOPT_DQLOCK) + ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot)); + else + ASSERT(! XFS_DQ_IS_LOCKED(ip->i_gdquot)); + } + if (XFS_IS_UQUOTA_ON(mp)) + ASSERT(ip->i_udquot); + if (XFS_IS_GQUOTA_ON(mp)) + ASSERT(ip->i_gdquot); + } +#endif + + if (! (flags & XFS_QMOPT_ILOCKED)) + xfs_iunlock(ip, XFS_ILOCK_EXCL); + +#ifdef QUOTADEBUG + else + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); +#endif + return (error); +} + +/* + * Release dquots (and their references) if any. + * The inode should be locked EXCL except when this's called by + * xfs_ireclaim. + */ +void +xfs_qm_dqdetach( + xfs_inode_t *ip) +{ + if (!(ip->i_udquot || ip->i_gdquot)) + return; + + ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); + ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); + if (ip->i_udquot) + xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip); + if (ip->i_udquot) { + xfs_qm_dqrele(ip->i_udquot); + ip->i_udquot = NULL; + } + if (ip->i_gdquot) { + xfs_qm_dqrele(ip->i_gdquot); + ip->i_gdquot = NULL; + } +} + +/* + * This is called by VFS_SYNC and flags arg determines the caller, + * and its motives, as done in xfs_sync. + * + * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31 + * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25 + * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA + */ + +int +xfs_qm_sync( + xfs_mount_t *mp, + short flags) +{ + int recl, restarts; + xfs_dquot_t *dqp; + uint flush_flags; + boolean_t nowait; + int error; + + restarts = 0; + /* + * We won't block unless we are asked to. + */ + nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0); + + again: + xfs_qm_mplist_lock(mp); + /* + * dqpurge_all() also takes the mplist lock and iterate thru all dquots + * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared + * when we have the mplist lock, we know that dquots will be consistent + * as long as we have it locked. + */ + if (! XFS_IS_QUOTA_ON(mp)) { + xfs_qm_mplist_unlock(mp); + return (0); + } + FOREACH_DQUOT_IN_MP(dqp, mp) { + /* + * If this is vfs_sync calling, then skip the dquots that + * don't 'seem' to be dirty. ie. don't acquire dqlock. + * This is very similar to what xfs_sync does with inodes. + */ + if (flags & SYNC_BDFLUSH) { + if (! XFS_DQ_IS_DIRTY(dqp)) + continue; + } + + if (nowait) { + /* + * Try to acquire the dquot lock. We are NOT out of + * lock order, but we just don't want to wait for this + * lock, unless somebody wanted us to. + */ + if (! xfs_qm_dqlock_nowait(dqp)) + continue; + } else { + xfs_dqlock(dqp); + } + + /* + * Now, find out for sure if this dquot is dirty or not. + */ + if (! XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqunlock(dqp); + continue; + } + + /* XXX a sentinel would be better */ + recl = XFS_QI_MPLRECLAIMS(mp); + if (! xfs_qm_dqflock_nowait(dqp)) { + if (nowait) { + xfs_dqunlock(dqp); + continue; + } + /* + * If we can't grab the flush lock then if the caller + * really wanted us to give this our best shot, + * see if we can give a push to the buffer before we wait + * on the flush lock. At this point, we know that + * eventhough the dquot is being flushed, + * it has (new) dirty data. + */ + xfs_qm_dqflock_pushbuf_wait(dqp); + } + /* + * Let go of the mplist lock. We don't want to hold it + * across a disk write + */ + flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC; + xfs_qm_mplist_unlock(mp); + xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH"); + error = xfs_qm_dqflush(dqp, flush_flags); + xfs_dqunlock(dqp); + if (error && XFS_FORCED_SHUTDOWN(mp)) + return(0); /* Need to prevent umount failure */ + else if (error) + return (error); + + xfs_qm_mplist_lock(mp); + if (recl != XFS_QI_MPLRECLAIMS(mp)) { + if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS) + break; + + xfs_qm_mplist_unlock(mp); + goto again; + } + } + + xfs_qm_mplist_unlock(mp); + return (0); +} + + +/* + * This initializes all the quota information that's kept in the + * mount structure + */ +int +xfs_qm_init_quotainfo( + xfs_mount_t *mp) +{ + xfs_quotainfo_t *qinf; + int error; + xfs_dquot_t *dqp; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * Tell XQM that we exist as soon as possible. + */ + if ((error = xfs_qm_hold_quotafs_ref(mp))) { + return (error); + } + + qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); + + /* + * See if quotainodes are setup, and if not, allocate them, + * and change the superblock accordingly. + */ + if ((error = xfs_qm_init_quotainos(mp))) { + kmem_free(qinf, sizeof(xfs_quotainfo_t)); + mp->m_quotainfo = NULL; + return (error); + } + + spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin"); + xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); + qinf->qi_dqreclaims = 0; + + /* mutex used to serialize quotaoffs */ + mutex_init(&qinf->qi_quotaofflock, MUTEX_DEFAULT, "qoff"); + + /* Precalc some constants */ + qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); + ASSERT(qinf->qi_dqchunklen); + qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen); + do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t)); + + mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); + + /* + * We try to get the limits from the superuser's limits fields. + * This is quite hacky, but it is standard quota practice. + * We look at the USR dquot with id == 0 first, but if user quotas + * are not enabled we goto the GRP dquot with id == 0. + * We don't really care to keep separate default limits for user + * and group quotas, at least not at this point. + */ + error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0, + (XFS_IS_UQUOTA_RUNNING(mp)) ? + XFS_DQ_USER : XFS_DQ_GROUP, + XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN, + &dqp); + if (! error) { + /* + * The warnings and timers set the grace period given to + * a user or group before he or she can not perform any + * more writing. If it is zero, a default is used. + */ + qinf->qi_btimelimit = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT) : XFS_QM_BTIMELIMIT; + qinf->qi_itimelimit = INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT) : XFS_QM_ITIMELIMIT; + qinf->qi_rtbtimelimit = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT) : XFS_QM_RTBTIMELIMIT; + qinf->qi_bwarnlimit = INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) : XFS_QM_BWARNLIMIT; + qinf->qi_iwarnlimit = INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) : XFS_QM_IWARNLIMIT; + + /* + * We sent the XFS_QMOPT_DQSUSER flag to dqget because + * we don't want this dquot cached. We haven't done a + * quotacheck yet, and quotacheck doesn't like incore dquots. + */ + xfs_qm_dqdestroy(dqp); + } else { + qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; + qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; + qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; + qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; + qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; + } + + return (0); +} + + +/* + * Gets called when unmounting a filesystem or when all quotas get + * turned off. + * This purges the quota inodes, destroys locks and frees itself. + */ +void +xfs_qm_destroy_quotainfo( + xfs_mount_t *mp) +{ + xfs_quotainfo_t *qi; + + qi = mp->m_quotainfo; + ASSERT(qi != NULL); + ASSERT(xfs_Gqm != NULL); + + /* + * Release the reference that XQM kept, so that we know + * when the XQM structure should be freed. We cannot assume + * that xfs_Gqm is non-null after this point. + */ + xfs_qm_rele_quotafs_ref(mp); + + spinlock_destroy(&qi->qi_pinlock); + xfs_qm_list_destroy(&qi->qi_dqlist); + + if (qi->qi_uquotaip) { + XFS_PURGE_INODE(qi->qi_uquotaip); + qi->qi_uquotaip = NULL; /* paranoia */ + } + if (qi->qi_gquotaip) { + XFS_PURGE_INODE(qi->qi_gquotaip); + qi->qi_gquotaip = NULL; + } + mutex_destroy(&qi->qi_quotaofflock); + kmem_free(qi, sizeof(xfs_quotainfo_t)); + mp->m_quotainfo = NULL; +} + + + +/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */ + +/* ARGSUSED */ +STATIC void +xfs_qm_list_init( + xfs_dqlist_t *list, + char *str, + int n) +{ + mutex_init(&list->qh_lock, MUTEX_DEFAULT, str); + list->qh_next = NULL; + list->qh_version = 0; + list->qh_nelems = 0; +} + +STATIC void +xfs_qm_list_destroy( + xfs_dqlist_t *list) +{ + mutex_destroy(&(list->qh_lock)); +} + + +/* + * Stripped down version of dqattach. This doesn't attach, or even look at the + * dquots attached to the inode. The rationale is that there won't be any + * attached at the time this is called from quotacheck. + */ +STATIC int +xfs_qm_dqget_noattach( + xfs_inode_t *ip, + xfs_dquot_t **O_udqpp, + xfs_dquot_t **O_gdqpp) +{ + int error; + xfs_mount_t *mp; + xfs_dquot_t *udqp, *gdqp; + + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + mp = ip->i_mount; + udqp = NULL; + gdqp = NULL; + + if (XFS_IS_UQUOTA_ON(mp)) { + ASSERT(ip->i_udquot == NULL); + /* + * We want the dquot allocated if it doesn't exist. + */ + if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER, + XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, + &udqp))) { + /* + * Shouldn't be able to turn off quotas here. + */ + ASSERT(error != ESRCH); + ASSERT(error != ENOENT); + return (error); + } + ASSERT(udqp); + } + + if (XFS_IS_GQUOTA_ON(mp)) { + ASSERT(ip->i_gdquot == NULL); + if (udqp) + xfs_dqunlock(udqp); + if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_gid, XFS_DQ_GROUP, + XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN, + &gdqp))) { + if (udqp) + xfs_qm_dqrele(udqp); + ASSERT(error != ESRCH); + ASSERT(error != ENOENT); + return (error); + } + ASSERT(gdqp); + + /* Reacquire the locks in the right order */ + if (udqp) { + if (! xfs_qm_dqlock_nowait(udqp)) { + xfs_dqunlock(gdqp); + xfs_dqlock(udqp); + xfs_dqlock(gdqp); + } + } + } + + *O_udqpp = udqp; + *O_gdqpp = gdqp; + +#ifdef QUOTADEBUG + if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp)); + if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp)); +#endif + return (0); +} + +/* + * Create an inode and return with a reference already taken, but unlocked + * This is how we create quota inodes + */ +STATIC int +xfs_qm_qino_alloc( + xfs_mount_t *mp, + xfs_inode_t **ip, + __int64_t sbfields, + uint flags) +{ + xfs_trans_t *tp; + int error; + unsigned long s; + cred_t zerocr; + int committed; + + tp = xfs_trans_alloc(mp,XFS_TRANS_QM_QINOCREATE); + if ((error = xfs_trans_reserve(tp, + XFS_QM_QINOCREATE_SPACE_RES(mp), + XFS_CREATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_CREATE_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + memset(&zerocr, 0, sizeof(zerocr)); + + if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, IFREG, 1, 0, + &zerocr, 0, 1, ip, &committed))) { + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT); + return (error); + } + + /* + * Keep an extra reference to this quota inode. This inode is + * locked exclusively and joined to the transaction already. + */ + ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip)); + VN_HOLD(XFS_ITOV((*ip))); + + /* + * Make the changes in the superblock, and log those too. + * sbfields arg may contain fields other than *QUOTINO; + * VERSIONNUM for example. + */ + s = XFS_SB_LOCK(mp); + if (flags & XFS_QMOPT_SBVERSION) { +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + unsigned oldv = mp->m_sb.sb_versionnum; +#endif + ASSERT(!XFS_SB_VERSION_HASQUOTA(&mp->m_sb)); + ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | + XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == + (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | + XFS_SB_GQUOTINO | XFS_SB_QFLAGS)); + + XFS_SB_VERSION_ADDQUOTA(&mp->m_sb); + mp->m_sb.sb_uquotino = NULLFSINO; + mp->m_sb.sb_gquotino = NULLFSINO; + + /* qflags will get updated _after_ quotacheck */ + mp->m_sb.sb_qflags = 0; +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + cmn_err(CE_NOTE, + "Old superblock version %x, converting to %x.", + oldv, mp->m_sb.sb_versionnum); +#endif + } + if (flags & XFS_QMOPT_UQUOTA) + mp->m_sb.sb_uquotino = (*ip)->i_ino; + else + mp->m_sb.sb_gquotino = (*ip)->i_ino; + XFS_SB_UNLOCK(mp, s); + xfs_mod_sb(tp, sbfields); + + if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, + NULL))) { + xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!"); + return (error); + } + return (0); +} + + +STATIC int +xfs_qm_reset_dqcounts( + xfs_mount_t *mp, + xfs_buf_t *bp, + xfs_dqid_t id, + uint type) +{ + xfs_disk_dquot_t *ddq; + int j; + + xfs_buftrace("RESET DQUOTS", bp); + /* + * Reset all counters and timers. They'll be + * started afresh by xfs_qm_quotacheck. + */ +#ifdef DEBUG + j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); + do_div(j, sizeof(xfs_dqblk_t)); + ASSERT(XFS_QM_DQPERBLK(mp) == j); +#endif + ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp); + for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) { + /* + * Do a sanity check, and if needed, repair the dqblk. Don't + * output any warnings because it's perfectly possible to + * find unitialized dquot blks. See comment in xfs_qm_dqcheck. + */ + (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR, + "xfs_quotacheck"); + INT_SET(ddq->d_bcount, ARCH_CONVERT, 0ULL); + INT_SET(ddq->d_icount, ARCH_CONVERT, 0ULL); + INT_SET(ddq->d_rtbcount, ARCH_CONVERT, 0ULL); + INT_SET(ddq->d_btimer, ARCH_CONVERT, (time_t)0); + INT_SET(ddq->d_itimer, ARCH_CONVERT, (time_t)0); + INT_SET(ddq->d_bwarns, ARCH_CONVERT, 0UL); + INT_SET(ddq->d_iwarns, ARCH_CONVERT, 0UL); + ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); + } + + return (0); +} + +STATIC int +xfs_qm_dqiter_bufs( + xfs_mount_t *mp, + xfs_dqid_t firstid, + xfs_fsblock_t bno, + xfs_filblks_t blkcnt, + uint flags) +{ + xfs_buf_t *bp; + int error; + int notcommitted; + int incr; + + ASSERT(blkcnt > 0); + notcommitted = 0; + incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ? + XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt; + error = 0; + + /* + * Blkcnt arg can be a very big number, and might even be + * larger than the log itself. So, we have to break it up into + * manageable-sized transactions. + * Note that we don't start a permanent transaction here; we might + * not be able to get a log reservation for the whole thing up front, + * and we don't really care to either, because we just discard + * everything if we were to crash in the middle of this loop. + */ + while (blkcnt--) { + error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, bno), + (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp); + if (error) + break; + + (void) xfs_qm_reset_dqcounts(mp, bp, firstid, + flags & XFS_QMOPT_UQUOTA ? + XFS_DQ_USER : XFS_DQ_GROUP); + xfs_bdwrite(mp, bp); + /* + * goto the next block. + */ + bno++; + firstid += XFS_QM_DQPERBLK(mp); + } + return (error); +} + +/* + * Iterate over all allocated USR/GRP dquots in the system, calling a + * caller supplied function for every chunk of dquots that we find. + */ +STATIC int +xfs_qm_dqiterate( + xfs_mount_t *mp, + xfs_inode_t *qip, + uint flags) +{ + xfs_bmbt_irec_t *map; + int i, nmaps; /* number of map entries */ + int error; /* return value */ + xfs_fileoff_t lblkno; + xfs_filblks_t maxlblkcnt; + xfs_dqid_t firstid; + xfs_fsblock_t rablkno; + xfs_filblks_t rablkcnt; + + error = 0; + /* + * This looks racey, but we can't keep an inode lock across a + * trans_reserve. But, this gets called during quotacheck, and that + * happens only at mount time which is single threaded. + */ + if (qip->i_d.di_nblocks == 0) + return (0); + + map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); + + lblkno = 0; + maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAX_FILE_OFFSET); + do { + nmaps = XFS_DQITER_MAP_SIZE; + /* + * We aren't changing the inode itself. Just changing + * some of its data. No new blocks are added here, and + * the inode is never added to the transaction. + */ + xfs_ilock(qip, XFS_ILOCK_SHARED); + error = xfs_bmapi(NULL, qip, lblkno, + maxlblkcnt - lblkno, + XFS_BMAPI_METADATA, + NULL, + 0, map, &nmaps, NULL); + xfs_iunlock(qip, XFS_ILOCK_SHARED); + if (error) + break; + + ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); + for (i = 0; i < nmaps; i++) { + ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); + ASSERT(map[i].br_blockcount); + + + lblkno += map[i].br_blockcount; + + if (map[i].br_startblock == HOLESTARTBLOCK) + continue; + + firstid = (xfs_dqid_t) map[i].br_startoff * + XFS_QM_DQPERBLK(mp); + /* + * Do a read-ahead on the next extent. + */ + if ((i+1 < nmaps) && + (map[i+1].br_startblock != HOLESTARTBLOCK)) { + rablkcnt = map[i+1].br_blockcount; + rablkno = map[i+1].br_startblock; + while (rablkcnt--) { + xfs_baread(mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, rablkno), + (int)XFS_QI_DQCHUNKLEN(mp)); + rablkno++; + } + } + /* + * Iterate thru all the blks in the extent and + * reset the counters of all the dquots inside them. + */ + if ((error = xfs_qm_dqiter_bufs(mp, + firstid, + map[i].br_startblock, + map[i].br_blockcount, + flags))) { + break; + } + } + + if (error) + break; + } while (nmaps > 0); + + kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map)); + + return (error); +} + +/* + * Called by dqusage_adjust in doing a quotacheck. + * Given the inode, and a dquot (either USR or GRP, doesn't matter), + * this updates its incore copy as well as the buffer copy. This is + * so that once the quotacheck is done, we can just log all the buffers, + * as opposed to logging numerous updates to individual dquots. + */ +STATIC void +xfs_qm_quotacheck_dqadjust( + xfs_dquot_t *dqp, + xfs_qcnt_t nblks, + xfs_qcnt_t rtblks) +{ + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + xfs_dqtrace_entry(dqp, "QCHECK DQADJUST"); + /* + * Adjust the inode count and the block count to reflect this inode's + * resource usage. + */ + INT_MOD(dqp->q_core.d_icount, ARCH_CONVERT, +1); + dqp->q_res_icount++; + if (nblks) { + INT_MOD(dqp->q_core.d_bcount, ARCH_CONVERT, nblks); + dqp->q_res_bcount += nblks; + } + if (rtblks) { + INT_MOD(dqp->q_core.d_rtbcount, ARCH_CONVERT, rtblks); + dqp->q_res_rtbcount += rtblks; + } + + /* + * Adjust the timers since we just changed usages + */ + if (! XFS_IS_SUSER_DQUOT(dqp)) + xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core); + + dqp->dq_flags |= XFS_DQ_DIRTY; +} + +STATIC int +xfs_qm_get_rtblks( + xfs_inode_t *ip, + xfs_qcnt_t *O_rtblks) +{ + xfs_filblks_t rtblks; /* total rt blks */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extent entries */ + xfs_bmbt_rec_t *base; /* base of extent array */ + xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ + int error; + + ASSERT(XFS_IS_REALTIME_INODE(ip)); + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + if (!(ifp->if_flags & XFS_IFEXTENTS)) { + if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) + return (error); + } + rtblks = 0; + nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; + for (ep = base; ep < &base[nextents]; ep++) + rtblks += xfs_bmbt_get_blockcount(ep); + *O_rtblks = (xfs_qcnt_t)rtblks; + return (0); +} + +/* + * callback routine supplied to bulkstat(). Given an inumber, find its + * dquots and update them to account for resources taken by that inode. + */ +/* ARGSUSED */ +STATIC int +xfs_qm_dqusage_adjust( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer - NULL */ + xfs_ino_t ino, /* inode number to get data for */ + void *buffer, /* not used */ + xfs_daddr_t bno, /* starting block of inode cluster */ + void *dip, /* on-disk inode pointer (not used) */ + int *res) /* result code value */ +{ + xfs_inode_t *ip; + xfs_dquot_t *udqp, *gdqp; + xfs_qcnt_t nblks, rtblks; + int error; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * rootino must have its resources accounted for, not so with the quota + * inodes. + */ + if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { + *res = BULKSTAT_RV_NOTHING; + return XFS_ERROR(EINVAL); + } + + /* + * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget + * interface expects the inode to be exclusively locked because that's + * the case in all other instances. It's OK that we do this because + * quotacheck is done only at mount time. + */ + if ((error = xfs_iget(mp, tp, ino, XFS_ILOCK_EXCL, &ip, bno))) { + *res = BULKSTAT_RV_NOTHING; + return (error); + } + + if (ip->i_d.di_mode == 0) { + xfs_iput_new(ip, XFS_ILOCK_EXCL); + *res = BULKSTAT_RV_NOTHING; + return XFS_ERROR(ENOENT); + } + + /* + * Obtain the locked dquots. In case of an error (eg. allocation + * fails for ENOSPC), we return the negative of the error number + * to bulkstat, so that it can get propagated to quotacheck() and + * making us disable quotas for the file system. + */ + if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) { + xfs_iput(ip, XFS_ILOCK_EXCL); + *res = BULKSTAT_RV_GIVEUP; + return (error); + } + + rtblks = 0; + if (! XFS_IS_REALTIME_INODE(ip)) { + nblks = (xfs_qcnt_t)ip->i_d.di_nblocks; + } else { + /* + * Walk thru the extent list and count the realtime blocks. + */ + if ((error = xfs_qm_get_rtblks(ip, &rtblks))) { + xfs_iput(ip, XFS_ILOCK_EXCL); + if (udqp) + xfs_qm_dqput(udqp); + if (gdqp) + xfs_qm_dqput(gdqp); + *res = BULKSTAT_RV_GIVEUP; + return (error); + } + nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; + } + ASSERT(ip->i_delayed_blks == 0); + + /* + * We can't release the inode while holding its dquot locks. + * The inode can go into inactive and might try to acquire the dquotlocks. + * So, just unlock here and do a vn_rele at the end. + */ + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + /* + * Add the (disk blocks and inode) resources occupied by this + * inode to its dquots. We do this adjustment in the incore dquot, + * and also copy the changes to its buffer. + * We don't care about putting these changes in a transaction + * envelope because if we crash in the middle of a 'quotacheck' + * we have to start from the beginning anyway. + * Once we're done, we'll log all the dquot bufs. + * + * The *QUOTA_ON checks below may look pretty racey, but quotachecks + * and quotaoffs don't race. (Quotachecks happen at mount time only). + */ + if (XFS_IS_UQUOTA_ON(mp)) { + ASSERT(udqp); + xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks); + xfs_qm_dqput(udqp); + } + if (XFS_IS_GQUOTA_ON(mp)) { + ASSERT(gdqp); + xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks); + xfs_qm_dqput(gdqp); + } + /* + * Now release the inode. This will send it to 'inactive', and + * possibly even free blocks. + */ + VN_RELE(XFS_ITOV(ip)); + + /* + * Goto next inode. + */ + *res = BULKSTAT_RV_DIDONE; + return (0); +} + +/* + * Walk thru all the filesystem inodes and construct a consistent view + * of the disk quota world. + */ +STATIC int +xfs_qm_quotacheck( + xfs_mount_t *mp) +{ + int done, count, error; + xfs_ino_t lastino; + size_t structsz; + xfs_inode_t *uip, *gip; + uint flags; + + count = INT_MAX; + structsz = 1; + lastino = 0; + flags = 0; + + ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp)); + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * There should be no cached dquots. The (simplistic) quotacheck + * algorithm doesn't like that. + */ + ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0); + + cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname); + + /* + * First we go thru all the dquots on disk, USR and GRP, and reset + * their counters to zero. We need a clean slate. + * We don't log our changes till later. + */ + if ((uip = XFS_QI_UQIP(mp))) { + if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA))) + goto error_return; + flags |= XFS_UQUOTA_CHKD; + } + + if ((gip = XFS_QI_GQIP(mp))) { + if ((error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA))) + goto error_return; + flags |= XFS_GQUOTA_CHKD; + } + + do { + /* + * Iterate thru all the inodes in the file system, + * adjusting the corresponding dquot counters in core. + */ + if ((error = xfs_bulkstat(mp, NULL, &lastino, &count, + xfs_qm_dqusage_adjust, + structsz, NULL, + BULKSTAT_FG_IGET|BULKSTAT_FG_VFSLOCKED, + &done))) + break; + + } while (! done); + + /* + * We can get this error if we couldn't do a dquot allocation inside + * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the + * dirty dquots that might be cached, we just want to get rid of them + * and turn quotaoff. The dquots won't be attached to any of the inodes + * at this point (because we intentionally didn't in dqget_noattach). + */ + if (error) { + xfs_qm_dqpurge_all(mp, + XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA| + XFS_QMOPT_QUOTAOFF); + goto error_return; + } + /* + * We've made all the changes that we need to make incore. + * Now flush_them down to disk buffers. + */ + xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI); + + /* + * We didn't log anything, because if we crashed, we'll have to + * start the quotacheck from scratch anyway. However, we must make + * sure that our dquot changes are secure before we put the + * quotacheck'd stamp on the superblock. So, here we do a synchronous + * flush. + */ + XFS_bflush(mp->m_ddev_targp); + + /* + * If one type of quotas is off, then it will lose its + * quotachecked status, since we won't be doing accounting for + * that type anymore. + */ + mp->m_qflags &= ~(XFS_GQUOTA_CHKD | XFS_UQUOTA_CHKD); + mp->m_qflags |= flags; + + XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++"); + + error_return: + cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname); + return (error); +} + +/* + * This is called after the superblock has been read in and we're ready to + * iget the quota inodes. + */ +STATIC int +xfs_qm_init_quotainos( + xfs_mount_t *mp) +{ + xfs_inode_t *uip, *gip; + int error; + __int64_t sbflags; + uint flags; + + ASSERT(mp->m_quotainfo); + uip = gip = NULL; + sbflags = 0; + flags = 0; + + /* + * Get the uquota and gquota inodes + */ + if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) { + if (XFS_IS_UQUOTA_ON(mp) && + mp->m_sb.sb_uquotino != NULLFSINO) { + ASSERT(mp->m_sb.sb_uquotino > 0); + if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, + 0, &uip, 0))) + return XFS_ERROR(error); + } + if (XFS_IS_GQUOTA_ON(mp) && + mp->m_sb.sb_gquotino != NULLFSINO) { + ASSERT(mp->m_sb.sb_gquotino > 0); + if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, + 0, &gip, 0))) { + if (uip) + VN_RELE(XFS_ITOV(uip)); + return XFS_ERROR(error); + } + } + } else { + flags |= XFS_QMOPT_SBVERSION; + sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | + XFS_SB_GQUOTINO | XFS_SB_QFLAGS); + } + + /* + * Create the two inodes, if they don't exist already. The changes + * made above will get added to a transaction and logged in one of + * the qino_alloc calls below. If the device is readonly, + * temporarily switch to read-write to do this. + */ + if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { + if ((error = xfs_qm_qino_alloc(mp, &uip, + sbflags | XFS_SB_UQUOTINO, + flags | XFS_QMOPT_UQUOTA))) + return XFS_ERROR(error); + + flags &= ~XFS_QMOPT_SBVERSION; + } + if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { + if ((error = xfs_qm_qino_alloc(mp, &gip, + sbflags | XFS_SB_GQUOTINO, + flags | XFS_QMOPT_GQUOTA))) { + if (uip) + VN_RELE(XFS_ITOV(uip)); + + return XFS_ERROR(error); + } + } + + XFS_QI_UQIP(mp) = uip; + XFS_QI_GQIP(mp) = gip; + + return (0); +} + + +/* + * Traverse the freelist of dquots and attempt to reclaim a maximum of + * 'howmany' dquots. This operation races with dqlookup(), and attempts to + * favor the lookup function ... + * XXXsup merge this with qm_reclaim_one(). + */ +STATIC int +xfs_qm_shake_freelist( + int howmany) +{ + int nreclaimed; + xfs_dqhash_t *hash; + xfs_dquot_t *dqp, *nextdqp; + int restarts; + int nflushes; + + if (howmany <= 0) + return (0); + + nreclaimed = 0; + restarts = 0; + nflushes = 0; + +#ifdef QUOTADEBUG + cmn_err(CE_DEBUG, "Shake free 0x%x", howmany); +#endif + /* lock order is : hashchainlock, freelistlock, mplistlock */ + tryagain: + xfs_qm_freelist_lock(xfs_Gqm); + + for (dqp = xfs_Gqm->qm_dqfreelist.qh_next; + ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) && + nreclaimed < howmany); ) { + xfs_dqlock(dqp); + + /* + * We are racing with dqlookup here. Naturally we don't + * want to reclaim a dquot that lookup wants. + */ + if (dqp->dq_flags & XFS_DQ_WANT) { + xfs_dqunlock(dqp); + xfs_qm_freelist_unlock(xfs_Gqm); + if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) + return (nreclaimed != howmany); + XQM_STATS_INC(xqmstats.xs_qm_dqwants); + goto tryagain; + } + + /* + * If the dquot is inactive, we are assured that it is + * not on the mplist or the hashlist, and that makes our + * life easier. + */ + if (dqp->dq_flags & XFS_DQ_INACTIVE) { + ASSERT(dqp->q_mount == NULL); + ASSERT(! XFS_DQ_IS_DIRTY(dqp)); + ASSERT(dqp->HL_PREVP == NULL); + ASSERT(dqp->MPL_PREVP == NULL); + XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); + nextdqp = dqp->dq_flnext; + goto off_freelist; + } + + ASSERT(dqp->MPL_PREVP); + /* + * Try to grab the flush lock. If this dquot is in the process of + * getting flushed to disk, we don't want to reclaim it. + */ + if (! xfs_qm_dqflock_nowait(dqp)) { + xfs_dqunlock(dqp); + dqp = dqp->dq_flnext; + continue; + } + + /* + * We have the flush lock so we know that this is not in the + * process of being flushed. So, if this is dirty, flush it + * DELWRI so that we don't get a freelist infested with + * dirty dquots. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY"); + /* + * We flush it delayed write, so don't bother + * releasing the mplock. + */ + (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); + xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ + dqp = dqp->dq_flnext; + continue; + } + /* + * We're trying to get the hashlock out of order. This races + * with dqlookup; so, we giveup and goto the next dquot if + * we couldn't get the hashlock. This way, we won't starve + * a dqlookup process that holds the hashlock that is + * waiting for the freelist lock. + */ + if (! xfs_qm_dqhashlock_nowait(dqp)) { + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + dqp = dqp->dq_flnext; + continue; + } + /* + * This races with dquot allocation code as well as dqflush_all + * and reclaim code. So, if we failed to grab the mplist lock, + * giveup everything and start over. + */ + hash = dqp->q_hash; + ASSERT(hash); + if (! xfs_qm_mplist_nowait(dqp->q_mount)) { + /* XXX put a sentinel so that we can come back here */ + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + XFS_DQ_HASH_UNLOCK(hash); + xfs_qm_freelist_unlock(xfs_Gqm); + if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) + return (nreclaimed != howmany); + goto tryagain; + } + xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING"); +#ifdef QUOTADEBUG + cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n", + dqp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT)); +#endif + ASSERT(dqp->q_nrefs == 0); + nextdqp = dqp->dq_flnext; + XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp); + XQM_HASHLIST_REMOVE(hash, dqp); + xfs_dqfunlock(dqp); + xfs_qm_mplist_unlock(dqp->q_mount); + XFS_DQ_HASH_UNLOCK(hash); + + off_freelist: + XQM_FREELIST_REMOVE(dqp); + xfs_dqunlock(dqp); + nreclaimed++; + XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims); + xfs_qm_dqdestroy(dqp); + dqp = nextdqp; + } + xfs_qm_freelist_unlock(xfs_Gqm); + return (nreclaimed != howmany); +} + + +/* + * The shake manager routine called by shaked() when memory is + * running low. + */ +/* ARGSUSED */ +STATIC void +xfs_qm_shake(void) +{ + int ndqused, nfree, n; + + if (!xfs_Gqm) + return; + + nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */ + /* incore dquots in all f/s's */ + ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; + + ASSERT(ndqused >= 0); + + if (nfree <= ndqused && nfree < ndquot) + return; + + ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ + n = nfree - ndqused - ndquot; /* # over target */ + + (void) xfs_qm_shake_freelist(MAX(nfree, n)); +} + + +/* + * Just pop the least recently used dquot off the freelist and + * recycle it. The returned dquot is locked. + */ +STATIC xfs_dquot_t * +xfs_qm_dqreclaim_one(void) +{ + xfs_dquot_t *dqpout; + xfs_dquot_t *dqp; + int restarts; + int nflushes; + + restarts = 0; + dqpout = NULL; + nflushes = 0; + + /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */ + startagain: + xfs_qm_freelist_lock(xfs_Gqm); + + FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) { + xfs_dqlock(dqp); + + /* + * We are racing with dqlookup here. Naturally we don't + * want to reclaim a dquot that lookup wants. We release the + * freelist lock and start over, so that lookup will grab + * both the dquot and the freelistlock. + */ + if (dqp->dq_flags & XFS_DQ_WANT) { + ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); + xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT"); + xfs_dqunlock(dqp); + xfs_qm_freelist_unlock(xfs_Gqm); + if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) + return (NULL); + XQM_STATS_INC(xqmstats.xs_qm_dqwants); + goto startagain; + } + + /* + * If the dquot is inactive, we are assured that it is + * not on the mplist or the hashlist, and that makes our + * life easier. + */ + if (dqp->dq_flags & XFS_DQ_INACTIVE) { + ASSERT(dqp->q_mount == NULL); + ASSERT(! XFS_DQ_IS_DIRTY(dqp)); + ASSERT(dqp->HL_PREVP == NULL); + ASSERT(dqp->MPL_PREVP == NULL); + XQM_FREELIST_REMOVE(dqp); + xfs_dqunlock(dqp); + dqpout = dqp; + XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); + break; + } + + ASSERT(dqp->q_hash); + ASSERT(dqp->MPL_PREVP); + + /* + * Try to grab the flush lock. If this dquot is in the process of + * getting flushed to disk, we don't want to reclaim it. + */ + if (! xfs_qm_dqflock_nowait(dqp)) { + xfs_dqunlock(dqp); + continue; + } + + /* + * We have the flush lock so we know that this is not in the + * process of being flushed. So, if this is dirty, flush it + * DELWRI so that we don't get a freelist infested with + * dirty dquots. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY"); + /* + * We flush it delayed write, so don't bother + * releasing the freelist lock. + */ + (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); + xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ + continue; + } + + if (! xfs_qm_mplist_nowait(dqp->q_mount)) { + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + continue; + } + + if (! xfs_qm_dqhashlock_nowait(dqp)) + goto mplistunlock; + + ASSERT(dqp->q_nrefs == 0); + xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING"); + XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp); + XQM_HASHLIST_REMOVE(dqp->q_hash, dqp); + XQM_FREELIST_REMOVE(dqp); + dqpout = dqp; + XFS_DQ_HASH_UNLOCK(dqp->q_hash); + mplistunlock: + xfs_qm_mplist_unlock(dqp->q_mount); + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + if (dqpout) + break; + } + + xfs_qm_freelist_unlock(xfs_Gqm); + return (dqpout); +} + + +/*------------------------------------------------------------------*/ + +/* + * Return a new incore dquot. Depending on the number of + * dquots in the system, we either allocate a new one on the kernel heap, + * or reclaim a free one. + * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed + * to reclaim an existing one from the freelist. + */ +boolean_t +xfs_qm_dqalloc_incore( + xfs_dquot_t **O_dqpp) +{ + xfs_dquot_t *dqp; + + /* + * Check against high water mark to see if we want to pop + * a nincompoop dquot off the freelist. + */ + if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) { + /* + * Try to recycle a dquot from the freelist. + */ + if ((dqp = xfs_qm_dqreclaim_one())) { + XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); + /* + * Just zero the core here. The rest will get + * reinitialized by caller. XXX we shouldn't even + * do this zero ... + */ + memset(&dqp->q_core, 0, sizeof(dqp->q_core)); + *O_dqpp = dqp; + return (B_FALSE); + } + XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); + } + + /* + * Allocate a brand new dquot on the kernel heap and return it + * to the caller to initialize. + */ + ASSERT(xfs_Gqm->qm_dqzone != NULL); + *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); + atomic_inc(&xfs_Gqm->qm_totaldquots); + + return (B_TRUE); +} + + +/* + * Start a transaction and write the incore superblock changes to + * disk. flags parameter indicates which fields have changed. + */ +int +xfs_qm_write_sb_changes( + xfs_mount_t *mp, + __int64_t flags) +{ + xfs_trans_t *tp; + int error; + +#ifdef QUOTADEBUG + cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname); +#endif + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); + if ((error = xfs_trans_reserve(tp, 0, + mp->m_sb.sb_sectsize + 128, 0, + 0, + XFS_DEFAULT_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + + xfs_mod_sb(tp, flags); + (void) xfs_trans_commit(tp, 0, NULL); + + return (0); +} + + +/* --------------- utility functions for vnodeops ---------------- */ + + +/* + * Given an inode, a uid and gid (from cred_t) make sure that we have + * allocated relevant dquot(s) on disk, and that we won't exceed inode + * quotas by creating this file. + * This also attaches dquot(s) to the given inode after locking it, + * and returns the dquots corresponding to the uid and/or gid. + * + * in : inode (unlocked) + * out : udquot, gdquot with references taken and unlocked + */ +int +xfs_qm_vop_dqalloc( + xfs_mount_t *mp, + xfs_inode_t *ip, + uid_t uid, + gid_t gid, + uint flags, + xfs_dquot_t **O_udqpp, + xfs_dquot_t **O_gdqpp) +{ + int error; + xfs_dquot_t *uq, *gq; + uint lockflags; + + if (!XFS_IS_QUOTA_ON(mp)) + return 0; + + lockflags = XFS_ILOCK_EXCL; + xfs_ilock(ip, lockflags); + + if ((flags & XFS_QMOPT_INHERIT) && + XFS_INHERIT_GID(ip, XFS_MTOVFS(mp))) + gid = ip->i_d.di_gid; + + /* + * Attach the dquot(s) to this inode, doing a dquot allocation + * if necessary. The dquot(s) will not be locked. + */ + if (XFS_NOT_DQATTACHED(mp, ip)) { + if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC | + XFS_QMOPT_ILOCKED))) { + xfs_iunlock(ip, lockflags); + return (error); + } + } + + uq = gq = NULL; + if ((flags & XFS_QMOPT_UQUOTA) && + XFS_IS_UQUOTA_ON(mp)) { + if (ip->i_d.di_uid != uid) { + /* + * What we need is the dquot that has this uid, and + * if we send the inode to dqget, the uid of the inode + * takes priority over what's sent in the uid argument. + * We must unlock inode here before calling dqget if + * we're not sending the inode, because otherwise + * we'll deadlock by doing trans_reserve while + * holding ilock. + */ + xfs_iunlock(ip, lockflags); + if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, + XFS_DQ_USER, + XFS_QMOPT_DQALLOC | + XFS_QMOPT_DOWARN, + &uq))) { + ASSERT(error != ENOENT); + return (error); + } + /* + * Get the ilock in the right order. + */ + xfs_dqunlock(uq); + lockflags = XFS_ILOCK_SHARED; + xfs_ilock(ip, lockflags); + } else { + /* + * Take an extra reference, because we'll return + * this to caller + */ + ASSERT(ip->i_udquot); + uq = ip->i_udquot; + xfs_dqlock(uq); + XFS_DQHOLD(uq); + xfs_dqunlock(uq); + } + } + if ((flags & XFS_QMOPT_GQUOTA) && + XFS_IS_GQUOTA_ON(mp)) { + if (ip->i_d.di_gid != gid) { + xfs_iunlock(ip, lockflags); + if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, + XFS_DQ_GROUP, + XFS_QMOPT_DQALLOC | + XFS_QMOPT_DOWARN, + &gq))) { + if (uq) + xfs_qm_dqrele(uq); + ASSERT(error != ENOENT); + return (error); + } + xfs_dqunlock(gq); + lockflags = XFS_ILOCK_SHARED; + xfs_ilock(ip, lockflags); + } else { + ASSERT(ip->i_gdquot); + gq = ip->i_gdquot; + xfs_dqlock(gq); + XFS_DQHOLD(gq); + xfs_dqunlock(gq); + } + } + if (uq) + xfs_dqtrace_entry_ino(uq, "DQALLOC", ip); + + xfs_iunlock(ip, lockflags); + if (O_udqpp) + *O_udqpp = uq; + else if (uq) + xfs_qm_dqrele(uq); + if (O_gdqpp) + *O_gdqpp = gq; + else if (gq) + xfs_qm_dqrele(gq); + return (0); +} + +/* + * Actually transfer ownership, and do dquot modifications. + * These were already reserved. + */ +xfs_dquot_t * +xfs_qm_vop_chown( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dquot_t **IO_olddq, + xfs_dquot_t *newdq) +{ + xfs_dquot_t *prevdq; + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); + + /* old dquot */ + prevdq = *IO_olddq; + ASSERT(prevdq); + ASSERT(prevdq != newdq); + + xfs_trans_mod_dquot(tp, prevdq, + XFS_TRANS_DQ_BCOUNT, + -(ip->i_d.di_nblocks)); + xfs_trans_mod_dquot(tp, prevdq, + XFS_TRANS_DQ_ICOUNT, + -1); + + /* the sparkling new dquot */ + xfs_trans_mod_dquot(tp, newdq, + XFS_TRANS_DQ_BCOUNT, + ip->i_d.di_nblocks); + xfs_trans_mod_dquot(tp, newdq, + XFS_TRANS_DQ_ICOUNT, + 1); + + /* + * Take an extra reference, because the inode + * is going to keep this dquot pointer even + * after the trans_commit. + */ + xfs_dqlock(newdq); + XFS_DQHOLD(newdq); + xfs_dqunlock(newdq); + *IO_olddq = newdq; + + return (prevdq); +} + +/* + * Quota reservations for setattr(AT_UID|AT_GID). + */ +int +xfs_qm_vop_chown_reserve( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dquot_t *udqp, + xfs_dquot_t *gdqp, + uint flags) +{ + int error; + xfs_mount_t *mp; + uint delblks; + xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; + + ASSERT(XFS_ISLOCKED_INODE(ip)); + mp = ip->i_mount; + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + delblks = ip->i_delayed_blks; + delblksudq = delblksgdq = unresudq = unresgdq = NULL; + + if (XFS_IS_UQUOTA_ON(mp) && udqp && + ip->i_d.di_uid != (uid_t)INT_GET(udqp->q_core.d_id, ARCH_CONVERT)) { + delblksudq = udqp; + /* + * If there are delayed allocation blocks, then we have to + * unreserve those from the old dquot, and add them to the + * new dquot. + */ + if (delblks) { + ASSERT(ip->i_udquot); + unresudq = ip->i_udquot; + } + } + if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && + ip->i_d.di_gid != INT_GET(gdqp->q_core.d_id, ARCH_CONVERT)) { + delblksgdq = gdqp; + if (delblks) { + ASSERT(ip->i_gdquot); + unresgdq = ip->i_gdquot; + } + } + + if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, + delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, + flags | XFS_QMOPT_RES_REGBLKS))) + return (error); + + /* + * Do the delayed blks reservations/unreservations now. Since, these + * are done without the help of a transaction, if a reservation fails + * its previous reservations won't be automatically undone by trans + * code. So, we have to do it manually here. + */ + if (delblks) { + /* + * Do the reservations first. Unreservation can't fail. + */ + ASSERT(delblksudq || delblksgdq); + ASSERT(unresudq || unresgdq); + if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, + delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, + flags | XFS_QMOPT_RES_REGBLKS))) + return (error); + xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, + unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, + XFS_QMOPT_RES_REGBLKS); + } + + return (0); +} + +int +xfs_qm_vop_rename_dqattach( + xfs_inode_t **i_tab) +{ + xfs_inode_t *ip; + int i; + int error; + + ip = i_tab[0]; + + if (! XFS_IS_QUOTA_ON(ip->i_mount)) + return (0); + + if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { + error = xfs_qm_dqattach(ip, 0); + if (error) + return (error); + } + for (i = 1; (i < 4 && i_tab[i]); i++) { + /* + * Watch out for duplicate entries in the table. + */ + if ((ip = i_tab[i]) != i_tab[i-1]) { + if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { + error = xfs_qm_dqattach(ip, 0); + if (error) + return (error); + } + } + } + return (0); +} + +void +xfs_qm_vop_dqattach_and_dqmod_newinode( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dquot_t *udqp, + xfs_dquot_t *gdqp) +{ + if (!XFS_IS_QUOTA_ON(tp->t_mountp)) + return; + + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); + + if (udqp) { + xfs_dqlock(udqp); + XFS_DQHOLD(udqp); + xfs_dqunlock(udqp); + ASSERT(ip->i_udquot == NULL); + ip->i_udquot = udqp; + ASSERT(ip->i_d.di_uid == INT_GET(udqp->q_core.d_id, ARCH_CONVERT)); + xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); + } + if (gdqp) { + xfs_dqlock(gdqp); + XFS_DQHOLD(gdqp); + xfs_dqunlock(gdqp); + ASSERT(ip->i_gdquot == NULL); + ip->i_gdquot = gdqp; + ASSERT(ip->i_d.di_gid == INT_GET(gdqp->q_core.d_id, ARCH_CONVERT)); + xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); + } +} + +/* ------------- list stuff -----------------*/ +void +xfs_qm_freelist_init(xfs_frlist_t *ql) +{ + ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql; + mutex_init(&ql->qh_lock, MUTEX_DEFAULT, "dqf"); + ql->qh_version = 0; + ql->qh_nelems = 0; +} + +void +xfs_qm_freelist_destroy(xfs_frlist_t *ql) +{ + xfs_dquot_t *dqp, *nextdqp; + + mutex_lock(&ql->qh_lock, PINOD); + for (dqp = ql->qh_next; + dqp != (xfs_dquot_t *)ql; ) { + xfs_dqlock(dqp); + nextdqp = dqp->dq_flnext; +#ifdef QUOTADEBUG + cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp); +#endif + XQM_FREELIST_REMOVE(dqp); + xfs_dqunlock(dqp); + xfs_qm_dqdestroy(dqp); + dqp = nextdqp; + } + /* + * Don't bother about unlocking. + */ + mutex_destroy(&ql->qh_lock); + + ASSERT(ql->qh_nelems == 0); +} + +void +xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq) +{ + dq->dq_flnext = ql->qh_next; + dq->dq_flprev = (xfs_dquot_t *)ql; + ql->qh_next = dq; + dq->dq_flnext->dq_flprev = dq; + xfs_Gqm->qm_dqfreelist.qh_nelems++; + xfs_Gqm->qm_dqfreelist.qh_version++; +} + +void +xfs_qm_freelist_unlink(xfs_dquot_t *dq) +{ + xfs_dquot_t *next = dq->dq_flnext; + xfs_dquot_t *prev = dq->dq_flprev; + + next->dq_flprev = prev; + prev->dq_flnext = next; + dq->dq_flnext = dq->dq_flprev = dq; + xfs_Gqm->qm_dqfreelist.qh_nelems--; + xfs_Gqm->qm_dqfreelist.qh_version++; +} + +void +xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq) +{ + xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq); +} + +int +xfs_qm_dqhashlock_nowait( + xfs_dquot_t *dqp) +{ + int locked; + + locked = mutex_trylock(&((dqp)->q_hash->qh_lock)); + return (locked); +} + +int +xfs_qm_freelist_lock_nowait( + xfs_qm_t *xqm) +{ + int locked; + + locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock)); + return (locked); +} + +int +xfs_qm_mplist_nowait( + xfs_mount_t *mp) +{ + int locked; + + ASSERT(mp->m_quotainfo); + locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp))); + return (locked); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_qm.h linux.21rc5-ac2/fs/xfs/quota/xfs_qm.h --- linux.21rc5/fs/xfs/quota/xfs_qm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_qm.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_QM_H__ +#define __XFS_QM_H__ + +#include "xfs_dquot_item.h" +#include "xfs_dquot.h" +#include "xfs_quota_priv.h" +#include "xfs_qm_stats.h" + +struct xfs_qm; +struct xfs_inode; + +extern mutex_t xfs_Gqm_lock; +extern struct xfs_qm *xfs_Gqm; +extern kmem_zone_t *qm_dqzone; +extern kmem_zone_t *qm_dqtrxzone; + +/* + * Used in xfs_qm_sync called by xfs_sync to count the max times that it can + * iterate over the mountpt's dquot list in one call. + */ +#define XFS_QM_SYNC_MAX_RESTARTS 7 + +/* + * Ditto, for xfs_qm_dqreclaim_one. + */ +#define XFS_QM_RECLAIM_MAX_RESTARTS 4 + +/* + * Ideal ratio of free to in use dquots. Quota manager makes an attempt + * to keep this balance. + */ +#define XFS_QM_DQFREE_RATIO 2 + +/* + * Dquot hashtable constants/threshold values. + */ +#define XFS_QM_NCSIZE_THRESHOLD 5000 +#define XFS_QM_HASHSIZE_LOW 32 +#define XFS_QM_HASHSIZE_HIGH 64 + +/* + * We output a cmn_err when quotachecking a quota file with more than + * this many fsbs. + */ +#define XFS_QM_BIG_QCHECK_NBLKS 500 + +/* + * This defines the unit of allocation of dquots. + * Currently, it is just one file system block, and a 4K blk contains 30 + * (136 * 30 = 4080) dquots. It's probably not worth trying to make + * this more dynamic. + * XXXsup However, if this number is changed, we have to make sure that we don't + * implicitly assume that we do allocations in chunks of a single filesystem + * block in the dquot/xqm code. + */ +#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 +/* + * When doing a quotacheck, we log dquot clusters of this many FSBs at most + * in a single transaction. We don't want to ask for too huge a log reservation. + */ +#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3 + +typedef xfs_dqhash_t xfs_dqlist_t; +/* + * The freelist head. The first two fields match the first two in the + * xfs_dquot_t structure (in xfs_dqmarker_t) + */ +typedef struct xfs_frlist { + struct xfs_dquot *qh_next; + struct xfs_dquot *qh_prev; + mutex_t qh_lock; + uint qh_version; + uint qh_nelems; +} xfs_frlist_t; + +/* + * Quota Manager (global) structure. Lives only in core. + */ +typedef struct xfs_qm { + xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */ + xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */ + uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */ + xfs_frlist_t qm_dqfreelist; /* freelist of dquots */ + atomic_t qm_totaldquots; /* total incore dquots */ + uint qm_nrefs; /* file systems with quota on */ + int qm_dqfree_ratio;/* ratio of free to inuse dquots */ + kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ + kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ +} xfs_qm_t; + +/* + * Various quota information for individual filesystems. + * The mount structure keeps a pointer to this. + */ +typedef struct xfs_quotainfo { + xfs_inode_t *qi_uquotaip; /* user quota inode */ + xfs_inode_t *qi_gquotaip; /* group quota inode */ + lock_t qi_pinlock; /* dquot pinning mutex */ + xfs_dqlist_t qi_dqlist; /* all dquots in filesys */ + int qi_dqreclaims; /* a change here indicates + a removal in the dqlist */ + time_t qi_btimelimit; /* limit for blks timer */ + time_t qi_itimelimit; /* limit for inodes timer */ + time_t qi_rtbtimelimit;/* limit for rt blks timer */ + xfs_qwarncnt_t qi_bwarnlimit; /* limit for num warnings */ + xfs_qwarncnt_t qi_iwarnlimit; /* limit for num warnings */ + mutex_t qi_quotaofflock;/* to serialize quotaoff */ + /* Some useful precalculated constants */ + xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */ + uint qi_dqperchunk; /* # ondisk dqs in above chunk */ +} xfs_quotainfo_t; + + +extern xfs_dqtrxops_t xfs_trans_dquot_ops; + +extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long); +extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *, + xfs_dquot_t *, xfs_dquot_t *, long, long, uint); +extern void xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *); +extern void xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *); + +/* + * We keep the usr and grp dquots separately so that locking will be easier + * to do at commit time. All transactions that we know of at this point + * affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value. + */ +#define XFS_QM_TRANS_MAXDQS 2 +typedef struct xfs_dquot_acct { + xfs_dqtrx_t dqa_usrdquots[XFS_QM_TRANS_MAXDQS]; + xfs_dqtrx_t dqa_grpdquots[XFS_QM_TRANS_MAXDQS]; +} xfs_dquot_acct_t; + +/* + * Users are allowed to have a usage exceeding their softlimit for + * a period this long. + */ +#define XFS_QM_BTIMELIMIT DQ_BTIMELIMIT +#define XFS_QM_RTBTIMELIMIT DQ_BTIMELIMIT +#define XFS_QM_ITIMELIMIT DQ_FTIMELIMIT + +#define XFS_QM_BWARNLIMIT 5 +#define XFS_QM_IWARNLIMIT 5 + +#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock, PINOD)) +#define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock)) +#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++) +#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) + +extern int xfs_qm_init_quotainfo(xfs_mount_t *); +extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); +extern int xfs_qm_mount_quotas(xfs_mount_t *); +extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint); +extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); +extern int xfs_qm_unmount_quotas(xfs_mount_t *); +extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); +extern int xfs_qm_sync(xfs_mount_t *, short); + +/* dquot stuff */ +extern void xfs_qm_dqunlink(xfs_dquot_t *); +extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **); +extern int xfs_qm_dqattach(xfs_inode_t *, uint); +extern void xfs_qm_dqdetach(xfs_inode_t *); +extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); +extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); + +/* vop stuff */ +extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *, + uid_t, gid_t, uint, + xfs_dquot_t **, xfs_dquot_t **); +extern void xfs_qm_vop_dqattach_and_dqmod_newinode( + xfs_trans_t *, xfs_inode_t *, + xfs_dquot_t *, xfs_dquot_t *); +extern int xfs_qm_vop_rename_dqattach(xfs_inode_t **); +extern xfs_dquot_t * xfs_qm_vop_chown(xfs_trans_t *, xfs_inode_t *, + xfs_dquot_t **, xfs_dquot_t *); +extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *, + xfs_dquot_t *, xfs_dquot_t *, uint); + +/* list stuff */ +extern void xfs_qm_freelist_init(xfs_frlist_t *); +extern void xfs_qm_freelist_destroy(xfs_frlist_t *); +extern void xfs_qm_freelist_insert(xfs_frlist_t *, xfs_dquot_t *); +extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *); +extern void xfs_qm_freelist_unlink(xfs_dquot_t *); +extern int xfs_qm_freelist_lock_nowait(xfs_qm_t *); +extern int xfs_qm_mplist_nowait(xfs_mount_t *); +extern int xfs_qm_dqhashlock_nowait(xfs_dquot_t *); + +/* system call interface */ +extern int xfs_qm_quotactl(bhv_desc_t *, int, int, xfs_caddr_t); + +#ifdef DEBUG +extern int xfs_qm_internalqcheck(xfs_mount_t *); +#else +#define xfs_qm_internalqcheck(mp) (0) +#endif + +#endif /* __XFS_QM_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_qm_stats.c linux.21rc5-ac2/fs/xfs/quota/xfs_qm_stats.c --- linux.21rc5/fs/xfs/quota/xfs_qm_stats.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_qm_stats.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" + +#include "xfs_qm.h" + +struct xqmstats xqmstats; + +STATIC int +xfs_qm_read_xfsquota( + char *buffer, + char **start, + off_t offset, + int count, + int *eof, + void *data) +{ + int len; + + /* maximum; incore; ratio free to inuse; freelist */ + len = sprintf(buffer, "%d\t%d\t%d\t%u\n", + ndquot, + xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, + xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, + xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0); + + if (offset >= len) { + *start = buffer; + *eof = 1; + return 0; + } + *start = buffer + offset; + if ((len -= offset) > count) + return count; + *eof = 1; + + return len; +} + +STATIC int +xfs_qm_read_stats( + char *buffer, + char **start, + off_t offset, + int count, + int *eof, + void *data) +{ + int len; + + /* quota performance statistics */ + len = sprintf(buffer, "qm %u %u %u %u %u %u %u %u\n", + xqmstats.xs_qm_dqreclaims, + xqmstats.xs_qm_dqreclaim_misses, + xqmstats.xs_qm_dquot_dups, + xqmstats.xs_qm_dqcachemisses, + xqmstats.xs_qm_dqcachehits, + xqmstats.xs_qm_dqwants, + xqmstats.xs_qm_dqshake_reclaims, + xqmstats.xs_qm_dqinact_reclaims); + + if (offset >= len) { + *start = buffer; + *eof = 1; + return 0; + } + *start = buffer + offset; + if ((len -= offset) > count) + return count; + *eof = 1; + + return len; +} + +void +xfs_qm_init_procfs(void) +{ + create_proc_read_entry("fs/xfs/xqmstat", 0, 0, xfs_qm_read_stats, NULL); + create_proc_read_entry("fs/xfs/xqm", 0, 0, xfs_qm_read_xfsquota, NULL); +} + +void +xfs_qm_cleanup_procfs(void) +{ + remove_proc_entry("fs/xfs/xqm", NULL); + remove_proc_entry("fs/xfs/xqmstat", NULL); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_qm_stats.h linux.21rc5-ac2/fs/xfs/quota/xfs_qm_stats.h --- linux.21rc5/fs/xfs/quota/xfs_qm_stats.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_qm_stats.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_QM_STATS_H__ +#define __XFS_QM_STATS_H__ + + +#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) + +/* + * XQM global statistics + */ +struct xqmstats { + __uint32_t xs_qm_dqreclaims; + __uint32_t xs_qm_dqreclaim_misses; + __uint32_t xs_qm_dquot_dups; + __uint32_t xs_qm_dqcachemisses; + __uint32_t xs_qm_dqcachehits; + __uint32_t xs_qm_dqwants; + __uint32_t xs_qm_dqshake_reclaims; + __uint32_t xs_qm_dqinact_reclaims; +}; + +extern struct xqmstats xqmstats; + +# define XQM_STATS_INC(count) ( (count)++ ) + +extern void xfs_qm_init_procfs(void); +extern void xfs_qm_cleanup_procfs(void); + +#else + +# define XQM_STATS_INC(count) do { } while (0) + +static __inline void xfs_qm_init_procfs(void) { }; +static __inline void xfs_qm_cleanup_procfs(void) { }; + +#endif + +#endif /* __XFS_QM_STATS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_qm_syscalls.c linux.21rc5-ac2/fs/xfs/quota/xfs_qm_syscalls.c --- linux.21rc5/fs/xfs/quota/xfs_qm_syscalls.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_qm_syscalls.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,1444 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" + +#include "xfs_qm.h" + +#ifdef DEBUG +# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args) +#else +# define qdprintk(s, args...) do { } while (0) +#endif + +STATIC int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint); +STATIC int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint, + fs_disk_quota_t *); +STATIC int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *); +STATIC int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint, + fs_disk_quota_t *); +STATIC int xfs_qm_scall_quotaon(xfs_mount_t *, uint); +STATIC int xfs_qm_scall_quotaoff(xfs_mount_t *, uint, boolean_t); +STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); +STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, + uint); +STATIC uint xfs_qm_import_flags(uint); +STATIC uint xfs_qm_export_flags(uint); +STATIC uint xfs_qm_import_qtype_flags(uint); +STATIC uint xfs_qm_export_qtype_flags(uint); +STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *, + fs_disk_quota_t *); + + +/* + * The main distribution switch of all XFS quotactl system calls. + */ +int +xfs_qm_quotactl( + struct bhv_desc *bdp, + int cmd, + int id, + xfs_caddr_t addr) +{ + xfs_mount_t *mp; + int error; + struct vfs *vfsp; + + vfsp = bhvtovfs(bdp); + mp = XFS_VFSTOM(vfsp); + + if (addr == NULL && cmd != Q_SYNC) + return XFS_ERROR(EINVAL); + if (id < 0 && cmd != Q_SYNC) + return XFS_ERROR(EINVAL); + + /* + * The following commands are valid even when quotaoff. + */ + switch (cmd) { + /* + * truncate quota files. quota must be off. + */ + case Q_XQUOTARM: + if (XFS_IS_QUOTA_ON(mp) || addr == NULL) + return XFS_ERROR(EINVAL); + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + return (xfs_qm_scall_trunc_qfiles(mp, + xfs_qm_import_qtype_flags(*(uint *)addr))); + /* + * Get quota status information. + */ + case Q_XGETQSTAT: + return (xfs_qm_scall_getqstat(mp, (fs_quota_stat_t *)addr)); + + /* + * QUOTAON for root f/s and quota enforcement on others.. + * Quota accounting for non-root f/s's must be turned on + * at mount time. + */ + case Q_XQUOTAON: + if (addr == NULL) + return XFS_ERROR(EINVAL); + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + return (xfs_qm_scall_quotaon(mp, + xfs_qm_import_flags(*(uint *)addr))); + case Q_XQUOTAOFF: + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + break; + + default: + break; + } + + if (! XFS_IS_QUOTA_ON(mp)) + return XFS_ERROR(ESRCH); + + switch (cmd) { + case Q_XQUOTAOFF: + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + error = xfs_qm_scall_quotaoff(mp, + xfs_qm_import_flags(*(uint *)addr), + B_FALSE); + break; + + /* + * Defaults to XFS_GETUQUOTA. + */ + case Q_XGETQUOTA: + error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_USER, + (fs_disk_quota_t *)addr); + break; + /* + * Set limits, both hard and soft. Defaults to Q_SETUQLIM. + */ + case Q_XSETQLIM: + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_USER, + (fs_disk_quota_t *)addr); + break; + + case Q_XSETGQLIM: + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_GROUP, + (fs_disk_quota_t *)addr); + break; + + + case Q_XGETGQUOTA: + error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP, + (fs_disk_quota_t *)addr); + break; + + /* + * Quotas are entirely undefined after quotaoff in XFS quotas. + * For instance, there's no way to set limits when quotaoff. + */ + + default: + error = XFS_ERROR(EINVAL); + break; + } + + return (error); +} + +/* + * Turn off quota accounting and/or enforcement for all udquots and/or + * gdquots. Called only at unmount time. + * + * This assumes that there are no dquots of this file system cached + * incore, and modifies the ondisk dquot directly. Therefore, for example, + * it is an error to call this twice, without purging the cache. + */ +STATIC int +xfs_qm_scall_quotaoff( + xfs_mount_t *mp, + uint flags, + boolean_t force) +{ + uint dqtype; + unsigned long s; + int error; + uint inactivate_flags; + xfs_qoff_logitem_t *qoffstart; + int nculprits; + + if (!force && !capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + /* + * No file system can have quotas enabled on disk but not in core. + * Note that quota utilities (like quotaoff) _expect_ + * errno == EEXIST here. + */ + if ((mp->m_qflags & flags) == 0) + return XFS_ERROR(EEXIST); + error = 0; + + flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); + + /* + * We don't want to deal with two quotaoffs messing up each other, + * so we're going to serialize it. quotaoff isn't exactly a performance + * critical thing. + * If quotaoff, then we must be dealing with the root filesystem. + */ + ASSERT(mp->m_quotainfo); + if (mp->m_quotainfo) + mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); + + ASSERT(mp->m_quotainfo); + + /* + * If we're just turning off quota enforcement, change mp and go. + */ + if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { + mp->m_qflags &= ~(flags); + + s = XFS_SB_LOCK(mp); + mp->m_sb.sb_qflags = mp->m_qflags; + XFS_SB_UNLOCK(mp, s); + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + + /* XXX what to do if error ? Revert back to old vals incore ? */ + error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); + return (error); + } + + dqtype = 0; + inactivate_flags = 0; + /* + * If accounting is off, we must turn enforcement off, clear the + * quota 'CHKD' certificate to make it known that we have to + * do a quotacheck the next time this quota is turned on. + */ + if (flags & XFS_UQUOTA_ACCT) { + dqtype |= XFS_QMOPT_UQUOTA; + flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD); + inactivate_flags |= XFS_UQUOTA_ACTIVE; + } + if (flags & XFS_GQUOTA_ACCT) { + dqtype |= XFS_QMOPT_GQUOTA; + flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD); + inactivate_flags |= XFS_GQUOTA_ACTIVE; + } + + /* + * Nothing to do? Don't complain. This happens when we're just + * turning off quota enforcement. + */ + if ((mp->m_qflags & flags) == 0) { + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + return (0); + } + + /* + * Write the LI_QUOTAOFF log record, and do SB changes atomically, + * and synchronously. + */ + xfs_qm_log_quotaoff(mp, &qoffstart, flags); + + /* + * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct + * to take care of the race between dqget and quotaoff. We don't take + * any special locks to reset these bits. All processes need to check + * these bits *after* taking inode lock(s) to see if the particular + * quota type is in the process of being turned off. If *ACTIVE, it is + * guaranteed that all dquot structures and all quotainode ptrs will all + * stay valid as long as that inode is kept locked. + * + * There is no turning back after this. + */ + mp->m_qflags &= ~inactivate_flags; + + /* + * Give back all the dquot reference(s) held by inodes. + * Here we go thru every single incore inode in this file system, and + * do a dqrele on the i_udquot/i_gdquot that it may have. + * Essentially, as long as somebody has an inode locked, this guarantees + * that quotas will not be turned off. This is handy because in a + * transaction once we lock the inode(s) and check for quotaon, we can + * depend on the quota inodes (and other things) being valid as long as + * we keep the lock(s). + */ + xfs_qm_dqrele_all_inodes(mp, flags); + + /* + * Next we make the changes in the quota flag in the mount struct. + * This isn't protected by a particular lock directly, because we + * don't want to take a mrlock everytime we depend on quotas being on. + */ + mp->m_qflags &= ~(flags); + + /* + * Go through all the dquots of this file system and purge them, + * according to what was turned off. We may not be able to get rid + * of all dquots, because dquots can have temporary references that + * are not attached to inodes. eg. xfs_setattr, xfs_create. + * So, if we couldn't purge all the dquots from the filesystem, + * we can't get rid of the incore data structures. + */ + while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype|XFS_QMOPT_QUOTAOFF))) + delay(10 * nculprits); + + /* + * Transactions that had started before ACTIVE state bit was cleared + * could have logged many dquots, so they'd have higher LSNs than + * the first QUOTAOFF log record does. If we happen to crash when + * the tail of the log has gone past the QUOTAOFF record, but + * before the last dquot modification, those dquots __will__ + * recover, and that's not good. + * + * So, we have QUOTAOFF start and end logitems; the start + * logitem won't get overwritten until the end logitem appears... + */ + xfs_qm_log_quotaoff_end(mp, qoffstart, flags); + + /* + * If quotas is completely disabled, close shop. + */ + if ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_ALL) { + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + xfs_qm_destroy_quotainfo(mp); + return (0); + } + + /* + * Release our quotainode references, and vn_purge them, + * if we don't need them anymore. + */ + if ((dqtype & XFS_QMOPT_UQUOTA) && XFS_QI_UQIP(mp)) { + XFS_PURGE_INODE(XFS_QI_UQIP(mp)); + XFS_QI_UQIP(mp) = NULL; + } + if ((dqtype & XFS_QMOPT_GQUOTA) && XFS_QI_GQIP(mp)) { + XFS_PURGE_INODE(XFS_QI_GQIP(mp)); + XFS_QI_GQIP(mp) = NULL; + } + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + + return (error); +} + +STATIC int +xfs_qm_scall_trunc_qfiles( + xfs_mount_t *mp, + uint flags) +{ + int error; + xfs_inode_t *qip; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + error = 0; + if (!XFS_SB_VERSION_HASQUOTA(&mp->m_sb) || flags == 0) { + qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); + return XFS_ERROR(EINVAL); + } + + if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { + error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, &qip, 0); + if (! error) { + (void) xfs_truncate_file(mp, qip); + VN_RELE(XFS_ITOV(qip)); + } + } + + if ((flags & XFS_DQ_GROUP) && mp->m_sb.sb_gquotino != NULLFSINO) { + error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, &qip, 0); + if (! error) { + (void) xfs_truncate_file(mp, qip); + VN_RELE(XFS_ITOV(qip)); + } + } + + return (error); +} + + +/* + * Switch on (a given) quota enforcement for a filesystem. This takes + * effect immediately. + * (Switching on quota accounting must be done at mount time.) + */ +STATIC int +xfs_qm_scall_quotaon( + xfs_mount_t *mp, + uint flags) +{ + int error; + unsigned long s; + uint qf; + uint accflags; + __int64_t sbflags; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + + flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); + /* + * Switching on quota accounting must be done at mount time. + */ + accflags = flags & XFS_ALL_QUOTA_ACCT; + flags &= ~(XFS_ALL_QUOTA_ACCT); + + sbflags = 0; + + if (flags == 0) { + qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags); + return XFS_ERROR(EINVAL); + } + + /* No fs can turn on quotas with a delayed effect */ + ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0); + + /* + * Can't enforce without accounting. We check the superblock + * qflags here instead of m_qflags because rootfs can have + * quota acct on ondisk without m_qflags' knowing. + */ + if (((flags & XFS_UQUOTA_ACCT) == 0 && + (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && + (flags & XFS_UQUOTA_ENFD)) + || + ((flags & XFS_GQUOTA_ACCT) == 0 && + (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && + (flags & XFS_GQUOTA_ENFD))) { + qdprintk("Can't enforce without acct, flags=%x sbflags=%x\n", + flags, mp->m_sb.sb_qflags); + return XFS_ERROR(EINVAL); + } + /* + * If everything's upto-date incore, then don't waste time. + */ + if ((mp->m_qflags & flags) == flags) + return XFS_ERROR(EEXIST); + + /* + * Change sb_qflags on disk but not incore mp->qflags + * if this is the root filesystem. + */ + s = XFS_SB_LOCK(mp); + qf = mp->m_sb.sb_qflags; + mp->m_sb.sb_qflags = qf | flags; + XFS_SB_UNLOCK(mp, s); + + /* + * There's nothing to change if it's the same. + */ + if ((qf & flags) == flags && sbflags == 0) + return XFS_ERROR(EEXIST); + sbflags |= XFS_SB_QFLAGS; + + if ((error = xfs_qm_write_sb_changes(mp, sbflags))) + return (error); + /* + * If we aren't trying to switch on quota enforcement, we are done. + */ + if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != + (mp->m_qflags & XFS_UQUOTA_ACCT)) || + (flags & XFS_ALL_QUOTA_ENFD) == 0) + return (0); + + if (! XFS_IS_QUOTA_RUNNING(mp)) + return XFS_ERROR(ESRCH); + + /* + * Switch on quota enforcement in core. + */ + mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); + mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + + return (0); +} + + + +/* + * Return quota status information, such as uquota-off, enforcements, etc. + */ +STATIC int +xfs_qm_scall_getqstat( + xfs_mount_t *mp, + fs_quota_stat_t *out) +{ + xfs_inode_t *uip, *gip; + boolean_t tempuqip, tempgqip; + + uip = gip = NULL; + tempuqip = tempgqip = B_FALSE; + memset(out, 0, sizeof(fs_quota_stat_t)); + + out->qs_version = FS_QSTAT_VERSION; + if (! XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) { + out->qs_uquota.qfs_ino = NULLFSINO; + out->qs_gquota.qfs_ino = NULLFSINO; + return (0); + } + out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & + (XFS_ALL_QUOTA_ACCT| + XFS_ALL_QUOTA_ENFD)); + out->qs_pad = 0; + out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; + out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; + + if (mp->m_quotainfo) { + uip = mp->m_quotainfo->qi_uquotaip; + gip = mp->m_quotainfo->qi_gquotaip; + } + if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { + if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, &uip, 0) == 0) + tempuqip = B_TRUE; + } + if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { + if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, &gip, 0) == 0) + tempgqip = B_TRUE; + } + if (uip) { + out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; + out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; + if (tempuqip) + VN_RELE(XFS_ITOV(uip)); + } + if (gip) { + out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; + out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; + if (tempgqip) + VN_RELE(XFS_ITOV(gip)); + } + if (mp->m_quotainfo) { + out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp); + out->qs_btimelimit = XFS_QI_BTIMELIMIT(mp); + out->qs_itimelimit = XFS_QI_ITIMELIMIT(mp); + out->qs_rtbtimelimit = XFS_QI_RTBTIMELIMIT(mp); + out->qs_bwarnlimit = XFS_QI_BWARNLIMIT(mp); + out->qs_iwarnlimit = XFS_QI_IWARNLIMIT(mp); + } + return (0); +} + +/* + * Adjust quota limits, and start/stop timers accordingly. + */ +STATIC int +xfs_qm_scall_setqlim( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + fs_disk_quota_t *newlim) +{ + xfs_disk_dquot_t *ddq; + xfs_dquot_t *dqp; + xfs_trans_t *tp; + int error; + xfs_qcnt_t hard, soft; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + + if ((newlim->d_fieldmask & (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK)) == 0) + return (0); + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); + if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, + 0, 0, XFS_DEFAULT_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + + /* + * We don't want to race with a quotaoff so take the quotaoff lock. + * (We don't hold an inode lock, so there's nothing else to stop + * a quotaoff from happening). (XXXThis doesn't currently happen + * because we take the vfslock before calling xfs_qm_sysent). + */ + mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); + + /* + * Get the dquot (locked), and join it to the transaction. + * Allocate the dquot if this doesn't exist. + */ + if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { + xfs_trans_cancel(tp, XFS_TRANS_ABORT); + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + ASSERT(error != ENOENT); + return (error); + } + xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET"); + xfs_trans_dqjoin(tp, dqp); + ddq = &dqp->q_core; + + /* + * Make sure that hardlimits are >= soft limits before changing. + */ + hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : + INT_GET(ddq->d_blk_hardlimit, ARCH_CONVERT); + soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : + INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT); + if (hard == 0 || hard >= soft) { + INT_SET(ddq->d_blk_hardlimit, ARCH_CONVERT, hard); + INT_SET(ddq->d_blk_softlimit, ARCH_CONVERT, soft); + } + else { + qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft); + } + hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : + INT_GET(ddq->d_rtb_hardlimit, ARCH_CONVERT); + soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : + INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT); + if (hard == 0 || hard >= soft) { + INT_SET(ddq->d_rtb_hardlimit, ARCH_CONVERT, hard); + INT_SET(ddq->d_rtb_softlimit, ARCH_CONVERT, soft); + } + else + qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft); + + hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? + (xfs_qcnt_t) newlim->d_ino_hardlimit : + INT_GET(ddq->d_ino_hardlimit, ARCH_CONVERT); + soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? + (xfs_qcnt_t) newlim->d_ino_softlimit : + INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT); + if (hard == 0 || hard >= soft) { + INT_SET(ddq->d_ino_hardlimit, ARCH_CONVERT, hard); + INT_SET(ddq->d_ino_softlimit, ARCH_CONVERT, soft); + } + else + qdprintk("ihard %Ld < isoft %Ld\n", hard, soft); + + if (id == 0) { + /* + * Timelimits for the super user set the relative time + * the other users can be over quota for this file system. + * If it is zero a default is used. + */ + if (newlim->d_fieldmask & FS_DQ_BTIMER) { + mp->m_quotainfo->qi_btimelimit = newlim->d_btimer; + INT_SET(dqp->q_core.d_btimer, ARCH_CONVERT, newlim->d_btimer); + } + if (newlim->d_fieldmask & FS_DQ_ITIMER) { + mp->m_quotainfo->qi_itimelimit = newlim->d_itimer; + INT_SET(dqp->q_core.d_itimer, ARCH_CONVERT, newlim->d_itimer); + } + if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { + mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer; + INT_SET(dqp->q_core.d_rtbtimer, ARCH_CONVERT, newlim->d_rtbtimer); + } + } else /* if (XFS_IS_QUOTA_ENFORCED(mp)) */ { + /* + * If the user is now over quota, start the timelimit. + * The user will not be 'warned'. + * Note that we keep the timers ticking, whether enforcement + * is on or off. We don't really want to bother with iterating + * over all ondisk dquots and turning the timers on/off. + */ + xfs_qm_adjust_dqtimers(mp, ddq); + } + dqp->dq_flags |= XFS_DQ_DIRTY; + xfs_trans_log_dquot(tp, dqp); + + xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT"); + xfs_trans_commit(tp, 0, NULL); + xfs_qm_dqprint(dqp); + xfs_qm_dqrele(dqp); + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + + return (0); +} + +STATIC int +xfs_qm_scall_getquota( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + fs_disk_quota_t *out) +{ + xfs_dquot_t *dqp; + int error; + + /* + * Try to get the dquot. We don't want it allocated on disk, so + * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't + * exist, we'll get ENOENT back. + */ + if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) { + return (error); + } + + xfs_dqtrace_entry(dqp, "Q_GETQUOTA SUCCESS"); + /* + * If everything's NULL, this dquot doesn't quite exist as far as + * our utility programs are concerned. + */ + if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { + xfs_qm_dqput(dqp); + return XFS_ERROR(ENOENT); + } + /* xfs_qm_dqprint(dqp); */ + /* + * Convert the disk dquot to the exportable format + */ + xfs_qm_export_dquot(mp, &dqp->q_core, out); + xfs_qm_dqput(dqp); + return (error ? XFS_ERROR(EFAULT) : 0); +} + + +STATIC int +xfs_qm_log_quotaoff_end( + xfs_mount_t *mp, + xfs_qoff_logitem_t *startqoff, + uint flags) +{ + xfs_trans_t *tp; + int error; + xfs_qoff_logitem_t *qoffi; + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END); + + if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2, + 0, 0, XFS_DEFAULT_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + + qoffi = xfs_trans_get_qoff_item(tp, startqoff, + flags & XFS_ALL_QUOTA_ACCT); + xfs_trans_log_quotaoff_item(tp, qoffi); + + /* + * We have to make sure that the transaction is secure on disk before we + * return and actually stop quota accounting. So, make it synchronous. + * We don't care about quotoff's performance. + */ + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, NULL); + return (error); +} + + +STATIC int +xfs_qm_log_quotaoff( + xfs_mount_t *mp, + xfs_qoff_logitem_t **qoffstartp, + uint flags) +{ + xfs_trans_t *tp; + int error; + unsigned long s; + xfs_qoff_logitem_t *qoffi=NULL; + uint oldsbqflag=0; + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF); + if ((error = xfs_trans_reserve(tp, 0, + sizeof(xfs_qoff_logitem_t) * 2 + + mp->m_sb.sb_sectsize + 128, + 0, + 0, + XFS_DEFAULT_LOG_COUNT))) { + goto error0; + } + + qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); + xfs_trans_log_quotaoff_item(tp, qoffi); + + s = XFS_SB_LOCK(mp); + oldsbqflag = mp->m_sb.sb_qflags; + mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; + XFS_SB_UNLOCK(mp, s); + + xfs_mod_sb(tp, XFS_SB_QFLAGS); + + /* + * We have to make sure that the transaction is secure on disk before we + * return and actually stop quota accounting. So, make it synchronous. + * We don't care about quotoff's performance. + */ + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, NULL); + +error0: + if (error) { + xfs_trans_cancel(tp, 0); + /* + * No one else is modifying sb_qflags, so this is OK. + * We still hold the quotaofflock. + */ + s = XFS_SB_LOCK(mp); + mp->m_sb.sb_qflags = oldsbqflag; + XFS_SB_UNLOCK(mp, s); + } + *qoffstartp = qoffi; + return (error); +} + + +/* + * Translate an internal style on-disk-dquot to the exportable format. + * The main differences are that the counters/limits are all in Basic + * Blocks (BBs) instead of the internal FSBs, and all on-disk data has + * to be converted to the native endianness. + */ +STATIC void +xfs_qm_export_dquot( + xfs_mount_t *mp, + xfs_disk_dquot_t *src, + struct fs_disk_quota *dst) +{ + memset(dst, 0, sizeof(*dst)); + dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */ + dst->d_flags = + xfs_qm_export_qtype_flags(INT_GET(src->d_flags, ARCH_CONVERT)); + dst->d_id = INT_GET(src->d_id, ARCH_CONVERT); + dst->d_blk_hardlimit = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_blk_hardlimit, ARCH_CONVERT)); + dst->d_blk_softlimit = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_blk_softlimit, ARCH_CONVERT)); + dst->d_ino_hardlimit = (__uint64_t) + INT_GET(src->d_ino_hardlimit, ARCH_CONVERT); + dst->d_ino_softlimit = (__uint64_t) + INT_GET(src->d_ino_softlimit, ARCH_CONVERT); + dst->d_bcount = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_bcount, ARCH_CONVERT)); + dst->d_icount = (__uint64_t) INT_GET(src->d_icount, ARCH_CONVERT); + dst->d_btimer = (__uint32_t) INT_GET(src->d_btimer, ARCH_CONVERT); + dst->d_itimer = (__uint32_t) INT_GET(src->d_itimer, ARCH_CONVERT); + dst->d_iwarns = INT_GET(src->d_iwarns, ARCH_CONVERT); + dst->d_bwarns = INT_GET(src->d_bwarns, ARCH_CONVERT); + + dst->d_rtb_hardlimit = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_rtb_hardlimit, ARCH_CONVERT)); + dst->d_rtb_softlimit = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_rtb_softlimit, ARCH_CONVERT)); + dst->d_rtbcount = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_rtbcount, ARCH_CONVERT)); + dst->d_rtbtimer = (__uint32_t) INT_GET(src->d_rtbtimer, ARCH_CONVERT); + dst->d_rtbwarns = INT_GET(src->d_rtbwarns, ARCH_CONVERT); + + /* + * Internally, we don't reset all the timers when quota enforcement + * gets turned off. No need to confuse the userlevel code, + * so return zeroes in that case. + */ + if (! XFS_IS_QUOTA_ENFORCED(mp)) { + dst->d_btimer = 0; + dst->d_itimer = 0; + dst->d_rtbtimer = 0; + } + +#ifdef DEBUG + if (XFS_IS_QUOTA_ENFORCED(mp) && dst->d_id != 0) { + if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) && + (dst->d_blk_softlimit > 0)) { + ASSERT(dst->d_btimer != 0); + } + if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) && + (dst->d_ino_softlimit > 0)) { + ASSERT(dst->d_itimer != 0); + } + } +#endif +} + +STATIC uint +xfs_qm_import_qtype_flags( + uint uflags) +{ + /* + * Can't be both at the same time. + */ + if (((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) == + (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) || + ((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) == 0)) + return (0); + + return (uflags & XFS_USER_QUOTA) ? + XFS_DQ_USER : XFS_DQ_GROUP; +} + +STATIC uint +xfs_qm_export_qtype_flags( + uint flags) +{ + /* + * Can't be both at the same time. + */ + ASSERT((flags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) != + (XFS_GROUP_QUOTA | XFS_USER_QUOTA)); + ASSERT((flags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) != 0); + + return (flags & XFS_DQ_USER) ? + XFS_USER_QUOTA : XFS_GROUP_QUOTA; +} + +STATIC uint +xfs_qm_import_flags( + uint uflags) +{ + uint flags = 0; + + if (uflags & XFS_QUOTA_UDQ_ACCT) + flags |= XFS_UQUOTA_ACCT; + if (uflags & XFS_QUOTA_GDQ_ACCT) + flags |= XFS_GQUOTA_ACCT; + if (uflags & XFS_QUOTA_UDQ_ENFD) + flags |= XFS_UQUOTA_ENFD; + if (uflags & XFS_QUOTA_GDQ_ENFD) + flags |= XFS_GQUOTA_ENFD; + return (flags); +} + + +STATIC uint +xfs_qm_export_flags( + uint flags) +{ + uint uflags; + + uflags = 0; + if (flags & XFS_UQUOTA_ACCT) + uflags |= XFS_QUOTA_UDQ_ACCT; + if (flags & XFS_GQUOTA_ACCT) + uflags |= XFS_QUOTA_GDQ_ACCT; + if (flags & XFS_UQUOTA_ENFD) + uflags |= XFS_QUOTA_UDQ_ENFD; + if (flags & XFS_GQUOTA_ENFD) + uflags |= XFS_QUOTA_GDQ_ENFD; + return (uflags); +} + + +/* + * Go thru all the inodes in the file system, releasing their dquots. + * Note that the mount structure gets modified to indicate that quotas are off + * AFTER this, in the case of quotaoff. This also gets called from + * xfs_rootumount. + */ +void +xfs_qm_dqrele_all_inodes( + struct xfs_mount *mp, + uint flags) +{ + vmap_t vmap; + xfs_inode_t *ip, *topino; + uint ireclaims; + vnode_t *vp; + boolean_t vnode_refd; + + ASSERT(mp->m_quotainfo); + +again: + XFS_MOUNT_ILOCK(mp); + ip = mp->m_inodes; + if (ip == NULL) { + XFS_MOUNT_IUNLOCK(mp); + return; + } + do { + /* Skip markers inserted by xfs_sync */ + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + /* Root inode, rbmip and rsumip have associated blocks */ + if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { + ASSERT(ip->i_udquot == NULL); + ASSERT(ip->i_gdquot == NULL); + ip = ip->i_mnext; + continue; + } + vp = XFS_ITOV_NULL(ip); + if (!vp) { + ASSERT(ip->i_udquot == NULL); + ASSERT(ip->i_gdquot == NULL); + ip = ip->i_mnext; + continue; + } + vnode_refd = B_FALSE; + if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { + /* + * Sample vp mapping while holding the mplock, lest + * we come across a non-existent vnode. + */ + VMAP(vp, vmap); + ireclaims = mp->m_ireclaims; + topino = mp->m_inodes; + XFS_MOUNT_IUNLOCK(mp); + + /* XXX restart limit ? */ + if ( ! (vp = vn_get(vp, &vmap))) + goto again; + xfs_ilock(ip, XFS_ILOCK_EXCL); + vnode_refd = B_TRUE; + } else { + ireclaims = mp->m_ireclaims; + topino = mp->m_inodes; + XFS_MOUNT_IUNLOCK(mp); + } + + /* + * We don't keep the mountlock across the dqrele() call, + * since it can take a while.. + */ + if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { + xfs_qm_dqrele(ip->i_udquot); + ip->i_udquot = NULL; + } + if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) { + xfs_qm_dqrele(ip->i_gdquot); + ip->i_gdquot = NULL; + } + xfs_iunlock(ip, XFS_ILOCK_EXCL); + /* + * Wait until we've dropped the ilock and mountlock to + * do the vn_rele. Or be condemned to an eternity in the + * inactive code in hell. + */ + if (vnode_refd) + VN_RELE(vp); + XFS_MOUNT_ILOCK(mp); + /* + * If an inode was inserted or removed, we gotta + * start over again. + */ + if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) { + /* XXX use a sentinel */ + XFS_MOUNT_IUNLOCK(mp); + goto again; + } + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + + XFS_MOUNT_IUNLOCK(mp); +} + +/*------------------------------------------------------------------------*/ +#ifdef DEBUG +/* + * This contains all the test functions for XFS disk quotas. + * Currently it does a quota accounting check. ie. it walks through + * all inodes in the file system, calculating the dquot accounting fields, + * and prints out any inconsistencies. + */ +xfs_dqhash_t *qmtest_udqtab; +xfs_dqhash_t *qmtest_gdqtab; +int qmtest_hashmask; +int qmtest_nfails; +mutex_t qcheck_lock; + +#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \ + (__psunsigned_t)(id)) & \ + (qmtest_hashmask - 1)) + +#define DQTEST_HASH(mp, id, type) ((type & XFS_DQ_USER) ? \ + (qmtest_udqtab + \ + DQTEST_HASHVAL(mp, id)) : \ + (qmtest_gdqtab + \ + DQTEST_HASHVAL(mp, id))) + +#define DQTEST_LIST_PRINT(l, NXT, title) \ +{ \ + xfs_dqtest_t *dqp; int i = 0;\ + cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \ + for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \ + dqp = (xfs_dqtest_t *)dqp->NXT) { \ + cmn_err(CE_DEBUG, " %d\. \"%d (%s)\" bcnt = %d, icnt = %d", \ + ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \ + dqp->d_bcount, dqp->d_icount); } \ +} + +typedef struct dqtest { + xfs_dqmarker_t q_lists; + xfs_dqhash_t *q_hash; /* the hashchain header */ + xfs_mount_t *q_mount; /* filesystem this relates to */ + xfs_dqid_t d_id; /* user id or group id */ + xfs_qcnt_t d_bcount; /* # disk blocks owned by the user */ + xfs_qcnt_t d_icount; /* # inodes owned by the user */ +} xfs_dqtest_t; + +STATIC void +xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp) +{ + xfs_dquot_t *d; + if (((d) = (h)->qh_next)) + (d)->HL_PREVP = &((dqp)->HL_NEXT); + (dqp)->HL_NEXT = d; + (dqp)->HL_PREVP = &((h)->qh_next); + (h)->qh_next = (xfs_dquot_t *)dqp; + (h)->qh_version++; + (h)->qh_nelems++; +} +STATIC void +xfs_qm_dqtest_print( + xfs_dqtest_t *d) +{ + cmn_err(CE_DEBUG, "-----------DQTEST DQUOT----------------"); + cmn_err(CE_DEBUG, "---- dquot ID = %d", d->d_id); + cmn_err(CE_DEBUG, "---- type = %s", XFS_QM_ISUDQ(d)? "USR" : "GRP"); + cmn_err(CE_DEBUG, "---- fs = 0x%p", d->q_mount); + cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)", + d->d_bcount, (int)d->d_bcount); + cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)", + d->d_icount, (int)d->d_icount); + cmn_err(CE_DEBUG, "---------------------------"); +} + +STATIC void +xfs_qm_dqtest_failed( + xfs_dqtest_t *d, + xfs_dquot_t *dqp, + char *reason, + xfs_qcnt_t a, + xfs_qcnt_t b, + int error) +{ + qmtest_nfails++; + if (error) + cmn_err(CE_DEBUG, "quotacheck failed id=%d, err=%d\nreason: %s", + INT_GET(d->d_id, ARCH_CONVERT), error, reason); + else + cmn_err(CE_DEBUG, "quotacheck failed id=%d (%s) [%d != %d]", + INT_GET(d->d_id, ARCH_CONVERT), reason, (int)a, (int)b); + xfs_qm_dqtest_print(d); + if (dqp) + xfs_qm_dqprint(dqp); +} + +STATIC int +xfs_dqtest_cmp2( + xfs_dqtest_t *d, + xfs_dquot_t *dqp) +{ + int err = 0; + if (INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) != d->d_icount) { + xfs_qm_dqtest_failed(d, dqp, "icount mismatch", + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), + d->d_icount, 0); + err++; + } + if (INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT) != d->d_bcount) { + xfs_qm_dqtest_failed(d, dqp, "bcount mismatch", + INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), + d->d_bcount, 0); + err++; + } + if (INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT) && + INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT) >= + INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT)) { + if (INT_ISZERO(dqp->q_core.d_btimer, ARCH_CONVERT) && + !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT)) { + cmn_err(CE_DEBUG, + "%d [%s] [0x%p] BLK TIMER NOT STARTED", + d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount); + err++; + } + } + if (INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) && + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= + INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)) { + if (INT_ISZERO(dqp->q_core.d_itimer, ARCH_CONVERT) && + !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT)) { + cmn_err(CE_DEBUG, + "%d [%s] [0x%p] INO TIMER NOT STARTED", + d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount); + err++; + } + } +#ifdef QUOTADEBUG + if (!err) { + cmn_err(CE_DEBUG, "%d [%s] [0x%p] qchecked", + d->d_id, XFS_QM_ISUDQ(d) ? "USR" : "GRP", d->q_mount); + } +#endif + return (err); +} + +STATIC void +xfs_dqtest_cmp( + xfs_dqtest_t *d) +{ + xfs_dquot_t *dqp; + int error; + + /* xfs_qm_dqtest_print(d); */ + if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0, + &dqp))) { + xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error); + return; + } + xfs_dqtest_cmp2(d, dqp); + xfs_qm_dqput(dqp); +} + +STATIC int +xfs_qm_internalqcheck_dqget( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + xfs_dqtest_t **O_dq) +{ + xfs_dqtest_t *d; + xfs_dqhash_t *h; + + h = DQTEST_HASH(mp, id, type); + for (d = (xfs_dqtest_t *) h->qh_next; d != NULL; + d = (xfs_dqtest_t *) d->HL_NEXT) { + /* DQTEST_LIST_PRINT(h, HL_NEXT, "@@@@@ dqtestlist @@@@@"); */ + if (d->d_id == id && mp == d->q_mount) { + *O_dq = d; + return (0); + } + } + d = kmem_zalloc(sizeof(xfs_dqtest_t), KM_SLEEP); + d->dq_flags = type; + d->d_id = id; + d->q_mount = mp; + d->q_hash = h; + xfs_qm_hashinsert(h, d); + *O_dq = d; + return (0); +} + +STATIC void +xfs_qm_internalqcheck_get_dquots( + xfs_mount_t *mp, + xfs_dqid_t uid, + xfs_dqid_t gid, + xfs_dqtest_t **ud, + xfs_dqtest_t **gd) +{ + if (XFS_IS_UQUOTA_ON(mp)) + xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud); + if (XFS_IS_GQUOTA_ON(mp)) + xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd); +} + + +STATIC void +xfs_qm_internalqcheck_dqadjust( + xfs_inode_t *ip, + xfs_dqtest_t *d) +{ + d->d_icount++; + d->d_bcount += (xfs_qcnt_t)ip->i_d.di_nblocks; +} + +STATIC int +xfs_qm_internalqcheck_adjust( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t ino, /* inode number to get data for */ + void *buffer, /* not used */ + xfs_daddr_t bno, /* starting block of inode cluster */ + void *dip, /* not used */ + int *res) /* bulkstat result code */ +{ + xfs_inode_t *ip; + xfs_dqtest_t *ud, *gd; + uint lock_flags; + boolean_t ipreleased; + int error; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { + *res = BULKSTAT_RV_NOTHING; + qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n", + (unsigned long long) ino, + (unsigned long long) mp->m_sb.sb_uquotino, + (unsigned long long) mp->m_sb.sb_gquotino); + return XFS_ERROR(EINVAL); + } + ipreleased = B_FALSE; + again: + lock_flags = XFS_ILOCK_SHARED; + if ((error = xfs_iget(mp, tp, ino, lock_flags, &ip, bno))) { + *res = BULKSTAT_RV_NOTHING; + return (error); + } + + if (ip->i_d.di_mode == 0) { + xfs_iput_new(ip, lock_flags); + *res = BULKSTAT_RV_NOTHING; + return XFS_ERROR(ENOENT); + } + + /* + * This inode can have blocks after eof which can get released + * when we send it to inactive. Since we don't check the dquot + * until the after all our calculations are done, we must get rid + * of those now. + */ + if (! ipreleased) { + xfs_iput(ip, lock_flags); + ipreleased = B_TRUE; + goto again; + } + xfs_qm_internalqcheck_get_dquots(mp, + (xfs_dqid_t) ip->i_d.di_uid, + (xfs_dqid_t) ip->i_d.di_gid, + &ud, &gd); + if (XFS_IS_UQUOTA_ON(mp)) { + ASSERT(ud); + xfs_qm_internalqcheck_dqadjust(ip, ud); + } + if (XFS_IS_GQUOTA_ON(mp)) { + ASSERT(gd); + xfs_qm_internalqcheck_dqadjust(ip, gd); + } + xfs_iput(ip, lock_flags); + *res = BULKSTAT_RV_DIDONE; + return (0); +} + + +/* PRIVATE, debugging */ +int +xfs_qm_internalqcheck( + xfs_mount_t *mp) +{ + xfs_ino_t lastino; + int done, count; + int i; + xfs_dqtest_t *d, *e; + xfs_dqhash_t *h1; + int error; + + lastino = 0; + qmtest_hashmask = 32; + count = 5; + done = 0; + qmtest_nfails = 0; + + if (! XFS_IS_QUOTA_ON(mp)) + return XFS_ERROR(ESRCH); + + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); + XFS_bflush(mp->m_ddev_targp); + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); + XFS_bflush(mp->m_ddev_targp); + + mutex_lock(&qcheck_lock, PINOD); + /* There should be absolutely no quota activity while this + is going on. */ + qmtest_udqtab = kmem_zalloc(qmtest_hashmask * + sizeof(xfs_dqhash_t), KM_SLEEP); + qmtest_gdqtab = kmem_zalloc(qmtest_hashmask * + sizeof(xfs_dqhash_t), KM_SLEEP); + do { + /* + * Iterate thru all the inodes in the file system, + * adjusting the corresponding dquot counters + */ + if ((error = xfs_bulkstat(mp, NULL, &lastino, &count, + xfs_qm_internalqcheck_adjust, + 0, NULL, BULKSTAT_FG_IGET, &done))) { + break; + } + } while (! done); + if (error) { + cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); + } + cmn_err(CE_DEBUG, "Checking results against system dquots"); + for (i = 0; i < qmtest_hashmask; i++) { + h1 = &qmtest_udqtab[i]; + for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { + xfs_dqtest_cmp(d); + e = (xfs_dqtest_t *) d->HL_NEXT; + kmem_free(d, sizeof(xfs_dqtest_t)); + d = e; + } + h1 = &qmtest_gdqtab[i]; + for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { + xfs_dqtest_cmp(d); + e = (xfs_dqtest_t *) d->HL_NEXT; + kmem_free(d, sizeof(xfs_dqtest_t)); + d = e; + } + } + + if (qmtest_nfails) { + cmn_err(CE_DEBUG, "******** quotacheck failed ********"); + cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails); + } else { + cmn_err(CE_DEBUG, "******** quotacheck successful! ********"); + } + kmem_free(qmtest_udqtab, qmtest_hashmask * sizeof(xfs_dqhash_t)); + kmem_free(qmtest_gdqtab, qmtest_hashmask * sizeof(xfs_dqhash_t)); + mutex_unlock(&qcheck_lock); + return (qmtest_nfails); +} + +#endif /* DEBUG */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_quota_priv.h linux.21rc5-ac2/fs/xfs/quota/xfs_quota_priv.h --- linux.21rc5/fs/xfs/quota/xfs_quota_priv.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_quota_priv.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_QUOTA_PRIV_H__ +#define __XFS_QUOTA_PRIV_H__ + +/* + * Number of bmaps that we ask from bmapi when doing a quotacheck. + * We make this restriction to keep the memory usage to a minimum. + */ +#define XFS_DQITER_MAP_SIZE 10 + +/* Number of dquots that fit in to a dquot block */ +#define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk) + +#define XFS_ISLOCKED_INODE(ip) (ismrlocked(&(ip)->i_lock, \ + MR_UPDATE | MR_ACCESS) != 0) +#define XFS_ISLOCKED_INODE_EXCL(ip) (ismrlocked(&(ip)->i_lock, \ + MR_UPDATE) != 0) + +#define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t)) + +#define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims) +#define XFS_QI_UQIP(mp) ((mp)->m_quotainfo->qi_uquotaip) +#define XFS_QI_GQIP(mp) ((mp)->m_quotainfo->qi_gquotaip) +#define XFS_QI_DQCHUNKLEN(mp) ((mp)->m_quotainfo->qi_dqchunklen) +#define XFS_QI_BTIMELIMIT(mp) ((mp)->m_quotainfo->qi_btimelimit) +#define XFS_QI_RTBTIMELIMIT(mp) ((mp)->m_quotainfo->qi_rtbtimelimit) +#define XFS_QI_ITIMELIMIT(mp) ((mp)->m_quotainfo->qi_itimelimit) +#define XFS_QI_BWARNLIMIT(mp) ((mp)->m_quotainfo->qi_bwarnlimit) +#define XFS_QI_IWARNLIMIT(mp) ((mp)->m_quotainfo->qi_iwarnlimit) +#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock) + +#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist) +#define XFS_QI_MPLLOCK(mp) ((mp)->m_quotainfo->qi_dqlist.qh_lock) +#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next) +#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems) + +#define XQMLCK(h) (mutex_lock(&((h)->qh_lock), PINOD)) +#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock))) +#ifdef DEBUG +struct xfs_dqhash; +static inline int XQMISLCKD(struct xfs_dqhash *h) +{ + if (mutex_trylock(&h->qh_lock)) { + mutex_unlock(&h->qh_lock); + return 0; + } + return 1; +} +#endif + +#define XFS_DQ_HASH_LOCK(h) XQMLCK(h) +#define XFS_DQ_HASH_UNLOCK(h) XQMUNLCK(h) +#define XFS_DQ_IS_HASH_LOCKED(h) XQMISLCKD(h) + +#define xfs_qm_mplist_lock(mp) XQMLCK(&(XFS_QI_MPL_LIST(mp))) +#define xfs_qm_mplist_unlock(mp) XQMUNLCK(&(XFS_QI_MPL_LIST(mp))) +#define XFS_QM_IS_MPLIST_LOCKED(mp) XQMISLCKD(&(XFS_QI_MPL_LIST(mp))) + +#define xfs_qm_freelist_lock(qm) XQMLCK(&((qm)->qm_dqfreelist)) +#define xfs_qm_freelist_unlock(qm) XQMUNLCK(&((qm)->qm_dqfreelist)) +#define XFS_QM_IS_FREELIST_LOCKED(qm) XQMISLCKD(&((qm)->qm_dqfreelist)) + +/* + * Hash into a bucket in the dquot hash table, based on . + */ +#define XFS_DQ_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \ + (__psunsigned_t)(id)) & \ + (xfs_Gqm->qm_dqhashmask - 1)) +#define XFS_DQ_HASH(mp, id, type) (type == XFS_DQ_USER ? \ + (xfs_Gqm->qm_usr_dqhtable + \ + XFS_DQ_HASHVAL(mp, id)) : \ + (xfs_Gqm->qm_grp_dqhtable + \ + XFS_DQ_HASHVAL(mp, id))) +#define XFS_IS_DQTYPE_ON(mp, type) (type == XFS_DQ_USER ? \ + XFS_IS_UQUOTA_ON(mp):XFS_IS_GQUOTA_ON(mp)) +#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ + INT_ISZERO(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_blk_softlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_rtb_hardlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_bcount, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_rtbcount, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_icount, ARCH_CONVERT)) + +#define HL_PREVP dq_hashlist.ql_prevp +#define HL_NEXT dq_hashlist.ql_next +#define MPL_PREVP dq_mplist.ql_prevp +#define MPL_NEXT dq_mplist.ql_next + + +#define _LIST_REMOVE(h, dqp, PVP, NXT) \ + { \ + xfs_dquot_t *d; \ + if (((d) = (dqp)->NXT)) \ + (d)->PVP = (dqp)->PVP; \ + *((dqp)->PVP) = d; \ + (dqp)->NXT = NULL; \ + (dqp)->PVP = NULL; \ + (h)->qh_version++; \ + (h)->qh_nelems--; \ + } + +#define _LIST_INSERT(h, dqp, PVP, NXT) \ + { \ + xfs_dquot_t *d; \ + if (((d) = (h)->qh_next)) \ + (d)->PVP = &((dqp)->NXT); \ + (dqp)->NXT = d; \ + (dqp)->PVP = &((h)->qh_next); \ + (h)->qh_next = dqp; \ + (h)->qh_version++; \ + (h)->qh_nelems++; \ + } + +#define FOREACH_DQUOT_IN_MP(dqp, mp) \ + for ((dqp) = XFS_QI_MPLNEXT(mp); (dqp) != NULL; (dqp) = (dqp)->MPL_NEXT) + +#define FOREACH_DQUOT_IN_FREELIST(dqp, qlist) \ +for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \ + (dqp) = (dqp)->dq_flnext) + +#define XQM_HASHLIST_INSERT(h, dqp) \ + _LIST_INSERT(h, dqp, HL_PREVP, HL_NEXT) + +#define XQM_FREELIST_INSERT(h, dqp) \ + xfs_qm_freelist_append(h, dqp) + +#define XQM_MPLIST_INSERT(h, dqp) \ + _LIST_INSERT(h, dqp, MPL_PREVP, MPL_NEXT) + +#define XQM_HASHLIST_REMOVE(h, dqp) \ + _LIST_REMOVE(h, dqp, HL_PREVP, HL_NEXT) +#define XQM_FREELIST_REMOVE(dqp) \ + xfs_qm_freelist_unlink(dqp) +#define XQM_MPLIST_REMOVE(h, dqp) \ + { _LIST_REMOVE(h, dqp, MPL_PREVP, MPL_NEXT); \ + XFS_QI_MPLRECLAIMS((dqp)->q_mount)++; } + +#define XFS_DQ_IS_LOGITEM_INITD(dqp) ((dqp)->q_logitem.qli_dquot == (dqp)) + +#define XFS_QM_DQP_TO_DQACCT(tp, dqp) (XFS_QM_ISUDQ(dqp) ? \ + (tp)->t_dqinfo->dqa_usrdquots : \ + (tp)->t_dqinfo->dqa_grpdquots) +#define XFS_IS_SUSER_DQUOT(dqp) \ + (INT_ISZERO((dqp)->q_core.d_id, ARCH_CONVERT)) + +#define XFS_PURGE_INODE(ip) \ + { \ + vmap_t dqvmap; \ + vnode_t *dqvp; \ + dqvp = XFS_ITOV(ip); \ + VMAP(dqvp, dqvmap); \ + VN_RELE(dqvp); \ + } + +#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \ + (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : "???")) +#define DQFLAGTO_DIRTYSTR(d) (XFS_DQ_IS_DIRTY(d) ? "DIRTY" : "NOTDIRTY") + +#endif /* __XFS_QUOTA_PRIV_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/quota/xfs_trans_dquot.c linux.21rc5-ac2/fs/xfs/quota/xfs_trans_dquot.c --- linux.21rc5/fs/xfs/quota/xfs_trans_dquot.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/quota/xfs_trans_dquot.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,929 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_priv.h" + +#include "xfs_qm.h" + +STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); + +/* + * Add the locked dquot to the transaction. + * The dquot must be locked, and it cannot be associated with any + * transaction. + */ +void +xfs_trans_dqjoin( + xfs_trans_t *tp, + xfs_dquot_t *dqp) +{ + xfs_dq_logitem_t *lp; + + ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp)); + lp = &dqp->q_logitem; + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp)); + + /* + * Initialize i_transp so we can later determine if this dquot is + * associated with this transaction. + */ + dqp->q_transp = tp; +} + + +/* + * This is called to mark the dquot as needing + * to be logged when the transaction is committed. The dquot must + * already be associated with the given transaction. + * Note that it marks the entire transaction as dirty. In the ordinary + * case, this gets called via xfs_trans_commit, after the transaction + * is already dirty. However, there's nothing stop this from getting + * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY + * flag. + */ +void +xfs_trans_log_dquot( + xfs_trans_t *tp, + xfs_dquot_t *dqp) +{ + xfs_log_item_desc_t *lidp; + + ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem)); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; +} + +/* + * Carry forward whatever is left of the quota blk reservation to + * the spanky new transaction + */ +STATIC void +xfs_trans_dup_dqinfo( + xfs_trans_t *otp, + xfs_trans_t *ntp) +{ + xfs_dqtrx_t *oq, *nq; + int i,j; + xfs_dqtrx_t *oqa, *nqa; + + if (!otp->t_dqinfo) + return; + + xfs_trans_alloc_dqinfo(ntp); + oqa = otp->t_dqinfo->dqa_usrdquots; + nqa = ntp->t_dqinfo->dqa_usrdquots; + + /* + * Because the quota blk reservation is carried forward, + * it is also necessary to carry forward the DQ_DIRTY flag. + */ + if(otp->t_flags & XFS_TRANS_DQ_DIRTY) + ntp->t_flags |= XFS_TRANS_DQ_DIRTY; + + for (j = 0; j < 2; j++) { + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + if (oqa[i].qt_dquot == NULL) + break; + oq = &oqa[i]; + nq = &nqa[i]; + + nq->qt_dquot = oq->qt_dquot; + nq->qt_bcount_delta = nq->qt_icount_delta = 0; + nq->qt_rtbcount_delta = 0; + + /* + * Transfer whatever is left of the reservations. + */ + nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used; + oq->qt_blk_res = oq->qt_blk_res_used; + + nq->qt_rtblk_res = oq->qt_rtblk_res - + oq->qt_rtblk_res_used; + oq->qt_rtblk_res = oq->qt_rtblk_res_used; + + nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; + oq->qt_ino_res = oq->qt_ino_res_used; + + } + oqa = otp->t_dqinfo->dqa_grpdquots; + nqa = ntp->t_dqinfo->dqa_grpdquots; + } +} + +/* + * Wrap around mod_dquot to account for both user and group quotas. + */ +void +xfs_trans_mod_dquot_byino( + xfs_trans_t *tp, + xfs_inode_t *ip, + uint field, + long delta) +{ + xfs_mount_t *mp; + + ASSERT(tp); + mp = tp->t_mountp; + + if (!XFS_IS_QUOTA_ON(mp) || + ip->i_ino == mp->m_sb.sb_uquotino || + ip->i_ino == mp->m_sb.sb_gquotino) + return; + + if (tp->t_dqinfo == NULL) + xfs_trans_alloc_dqinfo(tp); + + if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) { + (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); + } + if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) { + (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); + } +} + +STATIC xfs_dqtrx_t * +xfs_trans_get_dqtrx( + xfs_trans_t *tp, + xfs_dquot_t *dqp) +{ + int i; + xfs_dqtrx_t *qa; + + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + qa = XFS_QM_DQP_TO_DQACCT(tp, dqp); + + if (qa[i].qt_dquot == NULL || + qa[i].qt_dquot == dqp) { + return (&qa[i]); + } + } + + return (NULL); +} + +/* + * Make the changes in the transaction structure. + * The moral equivalent to xfs_trans_mod_sb(). + * We don't touch any fields in the dquot, so we don't care + * if it's locked or not (most of the time it won't be). + */ +void +xfs_trans_mod_dquot( + xfs_trans_t *tp, + xfs_dquot_t *dqp, + uint field, + long delta) +{ + xfs_dqtrx_t *qtrx; + + ASSERT(tp); + qtrx = NULL; + + if (tp->t_dqinfo == NULL) + xfs_trans_alloc_dqinfo(tp); + /* + * Find either the first free slot or the slot that belongs + * to this dquot. + */ + qtrx = xfs_trans_get_dqtrx(tp, dqp); + ASSERT(qtrx); + if (qtrx->qt_dquot == NULL) + qtrx->qt_dquot = dqp; + + switch (field) { + + /* + * regular disk blk reservation + */ + case XFS_TRANS_DQ_RES_BLKS: + qtrx->qt_blk_res += (ulong)delta; + break; + + /* + * inode reservation + */ + case XFS_TRANS_DQ_RES_INOS: + qtrx->qt_ino_res += (ulong)delta; + break; + + /* + * disk blocks used. + */ + case XFS_TRANS_DQ_BCOUNT: + if (qtrx->qt_blk_res && delta > 0) { + qtrx->qt_blk_res_used += (ulong)delta; + ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used); + } + qtrx->qt_bcount_delta += delta; + break; + + case XFS_TRANS_DQ_DELBCOUNT: + qtrx->qt_delbcnt_delta += delta; + break; + + /* + * Inode Count + */ + case XFS_TRANS_DQ_ICOUNT: + if (qtrx->qt_ino_res && delta > 0) { + qtrx->qt_ino_res_used += (ulong)delta; + ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); + } + qtrx->qt_icount_delta += delta; + break; + + /* + * rtblk reservation + */ + case XFS_TRANS_DQ_RES_RTBLKS: + qtrx->qt_rtblk_res += (ulong)delta; + break; + + /* + * rtblk count + */ + case XFS_TRANS_DQ_RTBCOUNT: + if (qtrx->qt_rtblk_res && delta > 0) { + qtrx->qt_rtblk_res_used += (ulong)delta; + ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); + } + qtrx->qt_rtbcount_delta += delta; + break; + + case XFS_TRANS_DQ_DELRTBCOUNT: + qtrx->qt_delrtb_delta += delta; + break; + + default: + ASSERT(0); + } + tp->t_flags |= XFS_TRANS_DQ_DIRTY; +} + + +/* + * Given an array of dqtrx structures, lock all the dquots associated + * and join them to the transaction, provided they have been modified. + * We know that the highest number of dquots (of one type - usr OR grp), + * involved in a transaction is 2 and that both usr and grp combined - 3. + * So, we don't attempt to make this very generic. + */ +STATIC void +xfs_trans_dqlockedjoin( + xfs_trans_t *tp, + xfs_dqtrx_t *q) +{ + ASSERT(q[0].qt_dquot != NULL); + if (q[1].qt_dquot == NULL) { + xfs_dqlock(q[0].qt_dquot); + xfs_trans_dqjoin(tp, q[0].qt_dquot); + } else { + ASSERT(XFS_QM_TRANS_MAXDQS == 2); + xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); + xfs_trans_dqjoin(tp, q[0].qt_dquot); + xfs_trans_dqjoin(tp, q[1].qt_dquot); + } +} + + +/* + * Called by xfs_trans_commit() and similar in spirit to + * xfs_trans_apply_sb_deltas(). + * Go thru all the dquots belonging to this transaction and modify the + * INCORE dquot to reflect the actual usages. + * Unreserve just the reservations done by this transaction. + * dquot is still left locked at exit. + */ +void +xfs_trans_apply_dquot_deltas( + xfs_trans_t *tp) +{ + int i, j; + xfs_dquot_t *dqp; + xfs_dqtrx_t *qtrx, *qa; + xfs_disk_dquot_t *d; + long totalbdelta; + long totalrtbdelta; + + if (! (tp->t_flags & XFS_TRANS_DQ_DIRTY)) + return; + + ASSERT(tp->t_dqinfo); + qa = tp->t_dqinfo->dqa_usrdquots; + for (j = 0; j < 2; j++) { + if (qa[0].qt_dquot == NULL) { + qa = tp->t_dqinfo->dqa_grpdquots; + continue; + } + + /* + * Lock all of the dquots and join them to the transaction. + */ + xfs_trans_dqlockedjoin(tp, qa); + + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + qtrx = &qa[i]; + /* + * The array of dquots is filled + * sequentially, not sparsely. + */ + if ((dqp = qtrx->qt_dquot) == NULL) + break; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); + + /* + * adjust the actual number of blocks used + */ + d = &dqp->q_core; + + /* + * The issue here is - sometimes we don't make a blkquota + * reservation intentionally to be fair to users + * (when the amount is small). On the other hand, + * delayed allocs do make reservations, but that's + * outside of a transaction, so we have no + * idea how much was really reserved. + * So, here we've accumulated delayed allocation blks and + * non-delay blks. The assumption is that the + * delayed ones are always reserved (outside of a + * transaction), and the others may or may not have + * quota reservations. + */ + totalbdelta = qtrx->qt_bcount_delta + + qtrx->qt_delbcnt_delta; + totalrtbdelta = qtrx->qt_rtbcount_delta + + qtrx->qt_delrtb_delta; +#ifdef QUOTADEBUG + if (totalbdelta < 0) + ASSERT(INT_GET(d->d_bcount, ARCH_CONVERT) >= + (xfs_qcnt_t) -totalbdelta); + + if (totalrtbdelta < 0) + ASSERT(INT_GET(d->d_rtbcount, ARCH_CONVERT) >= + (xfs_qcnt_t) -totalrtbdelta); + + if (qtrx->qt_icount_delta < 0) + ASSERT(INT_GET(d->d_icount, ARCH_CONVERT) >= + (xfs_qcnt_t) -qtrx->qt_icount_delta); +#endif + if (totalbdelta) + INT_MOD(d->d_bcount, ARCH_CONVERT, (xfs_qcnt_t)totalbdelta); + + if (qtrx->qt_icount_delta) + INT_MOD(d->d_icount, ARCH_CONVERT, (xfs_qcnt_t)qtrx->qt_icount_delta); + + if (totalrtbdelta) + INT_MOD(d->d_rtbcount, ARCH_CONVERT, (xfs_qcnt_t)totalrtbdelta); + + /* + * Start/reset the timer(s) if needed. + */ + xfs_qm_adjust_dqtimers(tp->t_mountp, d); + + dqp->dq_flags |= XFS_DQ_DIRTY; + /* + * add this to the list of items to get logged + */ + xfs_trans_log_dquot(tp, dqp); + /* + * Take off what's left of the original reservation. + * In case of delayed allocations, there's no + * reservation that a transaction structure knows of. + */ + if (qtrx->qt_blk_res != 0) { + if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) { + if (qtrx->qt_blk_res > + qtrx->qt_blk_res_used) + dqp->q_res_bcount -= (xfs_qcnt_t) + (qtrx->qt_blk_res - + qtrx->qt_blk_res_used); + else + dqp->q_res_bcount -= (xfs_qcnt_t) + (qtrx->qt_blk_res_used - + qtrx->qt_blk_res); + } + } else { + /* + * These blks were never reserved, either inside + * a transaction or outside one (in a delayed + * allocation). Also, this isn't always a + * negative number since we sometimes + * deliberately skip quota reservations. + */ + if (qtrx->qt_bcount_delta) { + dqp->q_res_bcount += + (xfs_qcnt_t)qtrx->qt_bcount_delta; + } + } + /* + * Adjust the RT reservation. + */ + if (qtrx->qt_rtblk_res != 0) { + if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) { + if (qtrx->qt_rtblk_res > + qtrx->qt_rtblk_res_used) + dqp->q_res_rtbcount -= (xfs_qcnt_t) + (qtrx->qt_rtblk_res - + qtrx->qt_rtblk_res_used); + else + dqp->q_res_rtbcount -= (xfs_qcnt_t) + (qtrx->qt_rtblk_res_used - + qtrx->qt_rtblk_res); + } + } else { + if (qtrx->qt_rtbcount_delta) + dqp->q_res_rtbcount += + (xfs_qcnt_t)qtrx->qt_rtbcount_delta; + } + + /* + * Adjust the inode reservation. + */ + if (qtrx->qt_ino_res != 0) { + ASSERT(qtrx->qt_ino_res >= + qtrx->qt_ino_res_used); + if (qtrx->qt_ino_res > qtrx->qt_ino_res_used) + dqp->q_res_icount -= (xfs_qcnt_t) + (qtrx->qt_ino_res - + qtrx->qt_ino_res_used); + } else { + if (qtrx->qt_icount_delta) + dqp->q_res_icount += + (xfs_qcnt_t)qtrx->qt_icount_delta; + } + + +#ifdef QUOTADEBUG + if (qtrx->qt_rtblk_res != 0) + cmn_err(CE_DEBUG, "RT res %d for 0x%p\n", + (int) qtrx->qt_rtblk_res, dqp); +#endif + ASSERT(dqp->q_res_bcount >= + INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT)); + ASSERT(dqp->q_res_icount >= + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT)); + ASSERT(dqp->q_res_rtbcount >= + INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT)); + } + /* + * Do the group quotas next + */ + qa = tp->t_dqinfo->dqa_grpdquots; + } +} + +/* + * Release the reservations, and adjust the dquots accordingly. + * This is called only when the transaction is being aborted. If by + * any chance we have done dquot modifications incore (ie. deltas) already, + * we simply throw those away, since that's the expected behavior + * when a transaction is curtailed without a commit. + */ +STATIC void +xfs_trans_unreserve_and_mod_dquots( + xfs_trans_t *tp) +{ + int i, j; + xfs_dquot_t *dqp; + xfs_dqtrx_t *qtrx, *qa; + boolean_t locked; + + if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) + return; + + qa = tp->t_dqinfo->dqa_usrdquots; + + for (j = 0; j < 2; j++) { + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + qtrx = &qa[i]; + /* + * We assume that the array of dquots is filled + * sequentially, not sparsely. + */ + if ((dqp = qtrx->qt_dquot) == NULL) + break; + /* + * Unreserve the original reservation. We don't care + * about the number of blocks used field, or deltas. + * Also we don't bother to zero the fields. + */ + locked = B_FALSE; + if (qtrx->qt_blk_res) { + xfs_dqlock(dqp); + locked = B_TRUE; + dqp->q_res_bcount -= + (xfs_qcnt_t)qtrx->qt_blk_res; + } + if (qtrx->qt_ino_res) { + if (!locked) { + xfs_dqlock(dqp); + locked = B_TRUE; + } + dqp->q_res_icount -= + (xfs_qcnt_t)qtrx->qt_ino_res; + } + + if (qtrx->qt_rtblk_res) { + if (!locked) { + xfs_dqlock(dqp); + locked = B_TRUE; + } + dqp->q_res_rtbcount -= + (xfs_qcnt_t)qtrx->qt_rtblk_res; + } + if (locked) + xfs_dqunlock(dqp); + + } + qa = tp->t_dqinfo->dqa_grpdquots; + } +} + +/* + * This reserves disk blocks and inodes against a dquot. + * Flags indicate if the dquot is to be locked here and also + * if the blk reservation is for RT or regular blocks. + * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. + * Returns EDQUOT if quota is exceeded. + */ +STATIC int +xfs_trans_dqresv( + xfs_trans_t *tp, + xfs_dquot_t *dqp, + long nblks, + long ninos, + uint flags) +{ + int error; + xfs_qcnt_t hardlimit; + xfs_qcnt_t softlimit; + time_t btimer; + xfs_qcnt_t *resbcountp; + + if (! (flags & XFS_QMOPT_DQLOCK)) { + xfs_dqlock(dqp); + } + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + if (flags & XFS_TRANS_DQ_RES_BLKS) { + hardlimit = INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT); + softlimit = INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT); + btimer = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT); + resbcountp = &dqp->q_res_bcount; + } else { + ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); + hardlimit = INT_GET(dqp->q_core.d_rtb_hardlimit, ARCH_CONVERT); + softlimit = INT_GET(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT); + btimer = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT); + resbcountp = &dqp->q_res_rtbcount; + } + error = 0; + + if ((flags & XFS_QMOPT_FORCE_RES) == 0 && + !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT) && + XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) { +#ifdef QUOTADEBUG + cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld" + " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit); +#endif + if (nblks > 0) { + /* + * dquot is locked already. See if we'd go over the + * hardlimit or exceed the timelimit if we allocate + * nblks. + */ + if (hardlimit > 0ULL && + (hardlimit <= nblks + *resbcountp)) { + error = EDQUOT; + goto error_return; + } + + if (softlimit > 0ULL && + (softlimit <= nblks + *resbcountp)) { + /* + * If timer or warnings has expired, + * return EDQUOT + */ + if ((btimer != 0 && CURRENT_TIME > btimer) || + (!INT_ISZERO(dqp->q_core.d_bwarns, ARCH_CONVERT) && + INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) >= + XFS_QI_BWARNLIMIT(dqp->q_mount))) { + error = EDQUOT; + goto error_return; + } + } + } + if (ninos > 0) { + if (INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT) > 0ULL && + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= + INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT)) { + error = EDQUOT; + goto error_return; + } else if (INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) > 0ULL && + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= + INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)) { + /* + * If timer or warnings has expired, + * return EDQUOT + */ + if ((!INT_ISZERO(dqp->q_core.d_itimer, ARCH_CONVERT) && + CURRENT_TIME > INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)) || + (!INT_ISZERO(dqp->q_core.d_iwarns, ARCH_CONVERT) && + INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) >= + XFS_QI_IWARNLIMIT(dqp->q_mount))) { + error = EDQUOT; + goto error_return; + } + } + } + } + + /* + * Change the reservation, but not the actual usage. + * Note that q_res_bcount = q_core.d_bcount + resv + */ + (*resbcountp) += (xfs_qcnt_t)nblks; + if (ninos != 0) + dqp->q_res_icount += (xfs_qcnt_t)ninos; + + /* + * note the reservation amt in the trans struct too, + * so that the transaction knows how much was reserved by + * it against this particular dquot. + * We don't do this when we are reserving for a delayed allocation, + * because we don't have the luxury of a transaction envelope then. + */ + if (tp) { + ASSERT(tp->t_dqinfo); + ASSERT(flags & XFS_QMOPT_RESBLK_MASK); + if (nblks != 0) + xfs_trans_mod_dquot(tp, dqp, + flags & XFS_QMOPT_RESBLK_MASK, + nblks); + if (ninos != 0) + xfs_trans_mod_dquot(tp, dqp, + XFS_TRANS_DQ_RES_INOS, + ninos); + } + ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT)); + ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT)); + ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT)); + +error_return: + if (! (flags & XFS_QMOPT_DQLOCK)) { + xfs_dqunlock(dqp); + } + return (error); +} + + +/* + * Given a dquot(s), make disk block and/or inode reservations against them. + * The fact that this does the reservation against both the usr and + * grp quotas is important, because this follows a both-or-nothing + * approach. + * + * flags = XFS_QMOPT_DQLOCK indicate if dquot(s) need to be locked. + * XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. + * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks + * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks + * dquots are unlocked on return, if they were not locked by caller. + */ +int +xfs_trans_reserve_quota_bydquots( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_dquot_t *udqp, + xfs_dquot_t *gdqp, + long nblks, + long ninos, + uint flags) +{ + int resvd; + + if (! XFS_IS_QUOTA_ON(mp)) + return (0); + + if (tp && tp->t_dqinfo == NULL) + xfs_trans_alloc_dqinfo(tp); + + ASSERT(flags & XFS_QMOPT_RESBLK_MASK); + resvd = 0; + + if (udqp) { + if (xfs_trans_dqresv(tp, udqp, nblks, ninos, flags)) + return (EDQUOT); + resvd = 1; + } + + if (gdqp) { + if (xfs_trans_dqresv(tp, gdqp, nblks, ninos, flags)) { + /* + * can't do it, so backout previous reservation + */ + if (resvd) { + xfs_trans_dqresv(tp, udqp, -nblks, -ninos, + flags); + } + return (EDQUOT); + } + } + + /* + * Didnt change anything critical, so, no need to log + */ + return (0); +} + + +/* + * Lock the dquot and change the reservation if we can. + * This doesn't change the actual usage, just the reservation. + * The inode sent in is locked. + * + * Returns 0 on success, EDQUOT or other errors otherwise + */ +STATIC int +xfs_trans_reserve_quota_nblks( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_inode_t *ip, + long nblks, + long ninos, + uint type) +{ + int error; + + if (!XFS_IS_QUOTA_ON(mp)) + return (0); + + ASSERT(ip->i_ino != mp->m_sb.sb_uquotino); + ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); + +#ifdef QUOTADEBUG + if (ip->i_udquot) + ASSERT(! XFS_DQ_IS_LOCKED(ip->i_udquot)); + if (ip->i_gdquot) + ASSERT(! XFS_DQ_IS_LOCKED(ip->i_gdquot)); +#endif + + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); + ASSERT((type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_RTBLKS || + (type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_BLKS); + + /* + * Reserve nblks against these dquots, with trans as the mediator. + */ + error = xfs_trans_reserve_quota_bydquots(tp, mp, + ip->i_udquot, ip->i_gdquot, + nblks, ninos, + type); + return (error); +} + +/* + * This routine is called to allocate a quotaoff log item. + */ +xfs_qoff_logitem_t * +xfs_trans_get_qoff_item( + xfs_trans_t *tp, + xfs_qoff_logitem_t *startqoff, + uint flags) +{ + xfs_qoff_logitem_t *q; + + ASSERT(tp != NULL); + + q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); + ASSERT(q != NULL); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)q); + + return (q); +} + + +/* + * This is called to mark the quotaoff logitem as needing + * to be logged when the transaction is committed. The logitem must + * already be associated with the given transaction. + */ +void +xfs_trans_log_quotaoff_item( + xfs_trans_t *tp, + xfs_qoff_logitem_t *qlp) +{ + xfs_log_item_desc_t *lidp; + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; +} + +STATIC void +xfs_trans_alloc_dqinfo( + xfs_trans_t *tp) +{ + (tp)->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP); +} + +STATIC void +xfs_trans_free_dqinfo( + xfs_trans_t *tp) +{ + if (!tp->t_dqinfo) + return; + kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo); + (tp)->t_dqinfo = NULL; +} + +xfs_dqtrxops_t xfs_trans_dquot_ops = { + .qo_dup_dqinfo = xfs_trans_dup_dqinfo, + .qo_free_dqinfo = xfs_trans_free_dqinfo, + .qo_mod_dquot_byino = xfs_trans_mod_dquot_byino, + .qo_apply_dquot_deltas = xfs_trans_apply_dquot_deltas, + .qo_reserve_quota_nblks = xfs_trans_reserve_quota_nblks, + .qo_reserve_quota_bydquots = xfs_trans_reserve_quota_bydquots, + .qo_unreserve_and_mod_dquots = xfs_trans_unreserve_and_mod_dquots, +}; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/debug.c linux.21rc5-ac2/fs/xfs/support/debug.c --- linux.21rc5/fs/xfs/support/debug.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/debug.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "debug.h" + +#include +#include +#include + +int doass = 1; +static char message[256]; /* keep it off the stack */ +static spinlock_t xfs_err_lock = SPIN_LOCK_UNLOCKED; + +/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */ +#define XFS_MAX_ERR_LEVEL 7 +#define XFS_ERR_MASK ((1 << 3) - 1) +static char *err_level[XFS_MAX_ERR_LEVEL+1] = + {KERN_EMERG, KERN_ALERT, KERN_CRIT, + KERN_ERR, KERN_WARNING, KERN_NOTICE, + KERN_INFO, KERN_DEBUG}; + +void +assfail(char *a, char *f, int l) +{ + printk("XFS assertion failed: %s, file: %s, line: %d\n", a, f, l); + BUG(); +} + +#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM)) + +unsigned long +random(void) +{ + static unsigned long RandomValue = 1; + /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ + register long rv = RandomValue; + register long lo; + register long hi; + + hi = rv / 127773; + lo = rv % 127773; + rv = 16807 * lo - 2836 * hi; + if( rv <= 0 ) rv += 2147483647; + return( RandomValue = rv ); +} + +int +get_thread_id(void) +{ + return current->pid; +} + +#endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */ + +void +cmn_err(register int level, char *fmt, ...) +{ + char *fp = fmt; + int len; + ulong flags; + va_list ap; + + level &= XFS_ERR_MASK; + if (level > XFS_MAX_ERR_LEVEL) + level = XFS_MAX_ERR_LEVEL; + spin_lock_irqsave(&xfs_err_lock,flags); + va_start(ap, fmt); + if (*fmt == '!') fp++; + len = vsprintf(message, fp, ap); + if (message[len-1] != '\n') + strcat(message, "\n"); + printk("%s%s", err_level[level], message); + va_end(ap); + spin_unlock_irqrestore(&xfs_err_lock,flags); + + if (level == CE_PANIC) + BUG(); +} + + +void +icmn_err(register int level, char *fmt, va_list ap) +{ + ulong flags; + int len; + + level &= XFS_ERR_MASK; + if(level > XFS_MAX_ERR_LEVEL) + level = XFS_MAX_ERR_LEVEL; + spin_lock_irqsave(&xfs_err_lock,flags); + len = vsprintf(message, fmt, ap); + if (message[len-1] != '\n') + strcat(message, "\n"); + spin_unlock_irqrestore(&xfs_err_lock,flags); + printk("%s%s", err_level[level], message); + if (level == CE_PANIC) + BUG(); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/debug.h linux.21rc5-ac2/fs/xfs/support/debug.h --- linux.21rc5/fs/xfs/support/debug.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/debug.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_DEBUG_H__ +#define __XFS_SUPPORT_DEBUG_H__ + +#include + +#define CE_DEBUG 7 /* debug */ +#define CE_CONT 6 /* continuation */ +#define CE_NOTE 5 /* notice */ +#define CE_WARN 4 /* warning */ +#define CE_ALERT 1 /* alert */ +#define CE_PANIC 0 /* panic */ + +extern void icmn_err(int, char *, va_list); +extern void cmn_err(int, char *, ...); + +#ifdef DEBUG +# ifdef lint +# define ASSERT(EX) ((void)0) /* avoid "constant in conditional" babble */ +# else +# define ASSERT(EX) ((!doass||(EX))?((void)0):assfail(#EX, __FILE__, __LINE__)) +# endif /* lint */ +#else +# define ASSERT(x) ((void)0) +#endif + +extern int doass; /* dynamically turn off asserts */ +extern void assfail(char *, char *, int); +#ifdef DEBUG +extern unsigned long random(void); +extern int get_thread_id(void); +#endif + +#define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__)) +#define debug_stop_all_cpus(param) /* param is "cpumask_t *" */ + +#endif /* __XFS_SUPPORT_DEBUG_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/kmem.c linux.21rc5-ac2/fs/xfs/support/kmem.c --- linux.21rc5/fs/xfs/support/kmem.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/kmem.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include +#include +#include + +#include "time.h" +#include "kmem.h" + +#define DEF_PRIORITY (6) +#define MAX_SLAB_SIZE 0x10000 + +static __inline unsigned int flag_convert(int flags) +{ +#if DEBUG + if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS))) { + printk(KERN_WARNING + "XFS: memory allocation with wrong flags (%x)\n", flags); + BUG(); + } +#endif + + if (flags & KM_NOSLEEP) + return GFP_ATOMIC; + /* If we're in a transaction, FS activity is not ok */ + else if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS)) + return GFP_NOFS; + else + return GFP_KERNEL; +} + +#define MAX_SHAKE 8 + +static kmem_shake_func_t shake_list[MAX_SHAKE]; +static DECLARE_MUTEX(shake_sem); + +void kmem_shake_register(kmem_shake_func_t sfunc) +{ + int i; + + down(&shake_sem); + for (i = 0; i < MAX_SHAKE; i++) { + if (shake_list[i] == NULL) { + shake_list[i] = sfunc; + break; + } + } + if (i == MAX_SHAKE) + BUG(); + up(&shake_sem); +} + +void kmem_shake_deregister(kmem_shake_func_t sfunc) +{ + int i; + + down(&shake_sem); + for (i = 0; i < MAX_SHAKE; i++) { + if (shake_list[i] == sfunc) + break; + } + if (i == MAX_SHAKE) + BUG(); + for (; i < MAX_SHAKE - 1; i++) { + shake_list[i] = shake_list[i+1]; + } + shake_list[i] = NULL; + up(&shake_sem); +} + +static __inline__ void kmem_shake(void) +{ + int i; + + down(&shake_sem); + for (i = 0; i < MAX_SHAKE && shake_list[i]; i++) + (*shake_list[i])(); + up(&shake_sem); + delay(10); +} + +void * +kmem_alloc(size_t size, int flags) +{ + int shrink = DEF_PRIORITY; /* # times to try to shrink cache */ + void *rval; + +repeat: + if (MAX_SLAB_SIZE < size) { + /* Avoid doing filesystem sensitive stuff to get this */ + rval = __vmalloc(size, flag_convert(flags), PAGE_KERNEL); + } else { + rval = kmalloc(size, flag_convert(flags)); + } + + if (rval || (flags & KM_NOSLEEP)) + return rval; + + /* + * KM_SLEEP callers don't expect a failure + */ + if (shrink) { + kmem_shake(); + + shrink--; + goto repeat; + } + + rval = __vmalloc(size, flag_convert(flags), PAGE_KERNEL); + if (!rval && (flags & KM_SLEEP)) + panic("kmem_alloc: NULL memory on KM_SLEEP request!"); + + return rval; +} + +void * +kmem_zalloc(size_t size, int flags) +{ + void *ptr; + + ptr = kmem_alloc(size, flags); + + if (ptr) + memset((char *)ptr, 0, (int)size); + + return (ptr); +} + +void +kmem_free(void *ptr, size_t size) +{ + if (((unsigned long)ptr < VMALLOC_START) || + ((unsigned long)ptr >= VMALLOC_END)) { + kfree(ptr); + } else { + vfree(ptr); + } +} + +void * +kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags) +{ + void *new; + + new = kmem_alloc(newsize, flags); + if (ptr) { + if (new) + memcpy(new, ptr, + ((oldsize < newsize) ? oldsize : newsize)); + kmem_free(ptr, oldsize); + } + + return new; +} + +kmem_zone_t * +kmem_zone_init(int size, char *zone_name) +{ + return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); +} + +void * +kmem_zone_alloc(kmem_zone_t *zone, int flags) +{ + int shrink = DEF_PRIORITY; /* # times to try to shrink cache */ + void *ptr = NULL; + +repeat: + ptr = kmem_cache_alloc(zone, flag_convert(flags)); + + if (ptr || (flags & KM_NOSLEEP)) + return ptr; + + /* + * KM_SLEEP callers don't expect a failure + */ + if (shrink) { + kmem_shake(); + + shrink--; + goto repeat; + } + + if (flags & KM_SLEEP) + panic("kmem_zone_alloc: NULL memory on KM_SLEEP request!"); + + return NULL; +} + +void * +kmem_zone_zalloc(kmem_zone_t *zone, int flags) +{ + int shrink = DEF_PRIORITY; /* # times to try to shrink cache */ + void *ptr = NULL; + +repeat: + ptr = kmem_cache_alloc(zone, flag_convert(flags)); + + if (ptr) { + memset(ptr, 0, kmem_cache_size(zone)); + return ptr; + } + + if (flags & KM_NOSLEEP) + return ptr; + + /* + * KM_SLEEP callers don't expect a failure + */ + if (shrink) { + kmem_shake(); + + shrink--; + goto repeat; + } + + if (flags & KM_SLEEP) + panic("kmem_zone_zalloc: NULL memory on KM_SLEEP request!"); + + return NULL; +} + +void +kmem_zone_free(kmem_zone_t *zone, void *ptr) +{ + kmem_cache_free(zone, ptr); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/kmem.h linux.21rc5-ac2/fs/xfs/support/kmem.h --- linux.21rc5/fs/xfs/support/kmem.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/kmem.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_KMEM_H__ +#define __XFS_SUPPORT_KMEM_H__ + +#include + +/* + * memory management routines + */ +#define KM_SLEEP 0x0001 +#define KM_NOSLEEP 0x0002 +#define KM_NOFS 0x0004 + +#define kmem_zone kmem_cache_s +#define kmem_zone_t kmem_cache_t + +extern kmem_zone_t *kmem_zone_init(int, char *); +extern void *kmem_zone_zalloc(kmem_zone_t *, int); +extern void *kmem_zone_alloc(kmem_zone_t *, int); +extern void kmem_zone_free(kmem_zone_t *, void *); + +extern void *kmem_alloc(size_t, int); +extern void *kmem_realloc(void *, size_t, size_t, int); +extern void *kmem_zalloc(size_t, int); +extern void kmem_free(void *, size_t); + +typedef void (*kmem_shake_func_t)(void); + +extern void kmem_shake_register(kmem_shake_func_t); +extern void kmem_shake_deregister(kmem_shake_func_t); + +#endif /* __XFS_SUPPORT_KMEM_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/ktrace.c linux.21rc5-ac2/fs/xfs/support/ktrace.c --- linux.21rc5/fs/xfs/support/ktrace.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/ktrace.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,378 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include + +#include +#include "kmem.h" +#include "spin.h" +#include "debug.h" +#include "ktrace.h" + +#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING)) + +static kmem_zone_t *ktrace_hdr_zone; +static kmem_zone_t *ktrace_ent_zone; +static int ktrace_zentries; + +void +ktrace_init(int zentries) +{ + ktrace_zentries = zentries; + + ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), + "ktrace_hdr"); + ASSERT(ktrace_hdr_zone); + + ktrace_ent_zone = kmem_zone_init(ktrace_zentries + * sizeof(ktrace_entry_t), + "ktrace_ent"); + ASSERT(ktrace_ent_zone); +} + +void +ktrace_uninit(void) +{ + kmem_cache_destroy(ktrace_hdr_zone); + kmem_cache_destroy(ktrace_ent_zone); +} + +/* + * ktrace_alloc() + * + * Allocate a ktrace header and enough buffering for the given + * number of entries. + */ +ktrace_t * +ktrace_alloc(int nentries, int sleep) +{ + ktrace_t *ktp; + ktrace_entry_t *ktep; + + ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); + + if (ktp == (ktrace_t*)NULL) { + /* + * KM_SLEEP callers don't expect failure. + */ + if (sleep & KM_SLEEP) + panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); + + return NULL; + } + + /* + * Special treatment for buffers with the ktrace_zentries entries + */ + if (nentries == ktrace_zentries) { + ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, + sleep); + } else { + ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), + sleep); + } + + if (ktep == NULL) { + /* + * KM_SLEEP callers don't expect failure. + */ + if (sleep & KM_SLEEP) + panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); + + kmem_free(ktp, sizeof(*ktp)); + + return NULL; + } + + spinlock_init(&(ktp->kt_lock), "kt_lock"); + + ktp->kt_entries = ktep; + ktp->kt_nentries = nentries; + ktp->kt_index = 0; + ktp->kt_rollover = 0; + + return ktp; +} + + +/* + * ktrace_free() + * + * Free up the ktrace header and buffer. It is up to the caller + * to ensure that no-one is referencing it. + */ +void +ktrace_free(ktrace_t *ktp) +{ + int entries_size; + + if (ktp == (ktrace_t *)NULL) + return; + + spinlock_destroy(&ktp->kt_lock); + + /* + * Special treatment for the Vnode trace buffer. + */ + if (ktp->kt_nentries == ktrace_zentries) { + kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); + } else { + entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t)); + + kmem_free(ktp->kt_entries, entries_size); + } + + kmem_zone_free(ktrace_hdr_zone, ktp); +} + + +/* + * Enter the given values into the "next" entry in the trace buffer. + * kt_index is always the index of the next entry to be filled. + */ +void +ktrace_enter( + ktrace_t *ktp, + void *val0, + void *val1, + void *val2, + void *val3, + void *val4, + void *val5, + void *val6, + void *val7, + void *val8, + void *val9, + void *val10, + void *val11, + void *val12, + void *val13, + void *val14, + void *val15) +{ + static lock_t wrap_lock = SPIN_LOCK_UNLOCKED; + int index; + ktrace_entry_t *ktep; + + ASSERT(ktp != NULL); + + /* + * Grab an entry by pushing the index up to the next one. + */ + spin_lock(&wrap_lock); + index = ktp->kt_index; + if (++ktp->kt_index == ktp->kt_nentries) + ktp->kt_index = 0; + spin_unlock(&wrap_lock); + + if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) + ktp->kt_rollover = 1; + + ASSERT((index >= 0) && (index < ktp->kt_nentries)); + + ktep = &(ktp->kt_entries[index]); + + ktep->val[0] = val0; + ktep->val[1] = val1; + ktep->val[2] = val2; + ktep->val[3] = val3; + ktep->val[4] = val4; + ktep->val[5] = val5; + ktep->val[6] = val6; + ktep->val[7] = val7; + ktep->val[8] = val8; + ktep->val[9] = val9; + ktep->val[10] = val10; + ktep->val[11] = val11; + ktep->val[12] = val12; + ktep->val[13] = val13; + ktep->val[14] = val14; + ktep->val[15] = val15; +} + +/* + * Return the number of entries in the trace buffer. + */ +int +ktrace_nentries( + ktrace_t *ktp) +{ + if (ktp == NULL) { + return 0; + } + + return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index); +} + + +/* + * ktrace_first() + * + * This is used to find the start of the trace buffer. + * In conjunction with ktrace_next() it can be used to + * iterate through the entire trace buffer. This code does + * not do any locking because it is assumed that it is called + * from the debugger. + * + * The caller must pass in a pointer to a ktrace_snap + * structure in which we will keep some state used to + * iterate through the buffer. This state must not touched + * by any code outside of this module. + */ +ktrace_entry_t * +ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) +{ + ktrace_entry_t *ktep; + int index; + int nentries; + + if (ktp->kt_rollover) + index = ktp->kt_index; + else + index = 0; + + ktsp->ks_start = index; + ktep = &(ktp->kt_entries[index]); + + nentries = ktrace_nentries(ktp); + index++; + if (index < nentries) { + ktsp->ks_index = index; + } else { + ktsp->ks_index = 0; + if (index > nentries) + ktep = NULL; + } + return ktep; +} + + +/* + * ktrace_next() + * + * This is used to iterate through the entries of the given + * trace buffer. The caller must pass in the ktrace_snap_t + * structure initialized by ktrace_first(). The return value + * will be either a pointer to the next ktrace_entry or NULL + * if all of the entries have been traversed. + */ +ktrace_entry_t * +ktrace_next( + ktrace_t *ktp, + ktrace_snap_t *ktsp) +{ + int index; + ktrace_entry_t *ktep; + + index = ktsp->ks_index; + if (index == ktsp->ks_start) { + ktep = NULL; + } else { + ktep = &ktp->kt_entries[index]; + } + + index++; + if (index == ktrace_nentries(ktp)) { + ktsp->ks_index = 0; + } else { + ktsp->ks_index = index; + } + + return ktep; +} + +#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING)) +EXPORT_SYMBOL(ktrace_first); +EXPORT_SYMBOL(ktrace_next); +#endif + +/* + * ktrace_skip() + * + * Skip the next "count" entries and return the entry after that. + * Return NULL if this causes us to iterate past the beginning again. + */ + +ktrace_entry_t * +ktrace_skip( + ktrace_t *ktp, + int count, + ktrace_snap_t *ktsp) +{ + int index; + int new_index; + ktrace_entry_t *ktep; + int nentries = ktrace_nentries(ktp); + + index = ktsp->ks_index; + new_index = index + count; + while (new_index >= nentries) { + new_index -= nentries; + } + if (index == ktsp->ks_start) { + /* + * We've iterated around to the start, so we're done. + */ + ktep = NULL; + } else if ((new_index < index) && (index < ktsp->ks_index)) { + /* + * We've skipped past the start again, so we're done. + */ + ktep = NULL; + ktsp->ks_index = ktsp->ks_start; + } else { + ktep = &(ktp->kt_entries[new_index]); + new_index++; + if (new_index == nentries) { + ktsp->ks_index = 0; + } else { + ktsp->ks_index = new_index; + } + } + return ktep; +} + +#else + +ktrace_t * +ktrace_alloc(int nentries, int sleep) +{ + /* + * KM_SLEEP callers don't expect failure. + */ + if (sleep & KM_SLEEP) + panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); + + return NULL; +} +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/ktrace.h linux.21rc5-ac2/fs/xfs/support/ktrace.h --- linux.21rc5/fs/xfs/support/ktrace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/ktrace.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_KTRACE_H__ +#define __XFS_SUPPORT_KTRACE_H__ + + +/* + * Trace buffer entry structure. + */ +typedef struct ktrace_entry { + void *val[16]; +} ktrace_entry_t; + +/* + * Trace buffer header structure. + */ +typedef struct ktrace { + lock_t kt_lock; /* mutex to guard counters */ + int kt_nentries; /* number of entries in trace buf */ + int kt_index; /* current index in entries */ + int kt_rollover; + ktrace_entry_t *kt_entries; /* buffer of entries */ +} ktrace_t; + +/* + * Trace buffer snapshot structure. + */ +typedef struct ktrace_snap { + int ks_start; /* kt_index at time of snap */ + int ks_index; /* current index */ +} ktrace_snap_t; + +/* + * Exported interfaces. + */ +extern ktrace_t *ktrace_alloc(int, int); + +#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING)) + +extern void ktrace_init(int zentries); +extern void ktrace_uninit(void); + +extern void ktrace_free(ktrace_t *); + +extern void ktrace_enter( + ktrace_t *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *); + +extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *); +extern int ktrace_nentries(ktrace_t *); +extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *); +extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *); + +#else + +#define ktrace_free(ktp) +#define ktrace_enter(ktp,v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15) + +#endif + +#endif /* __XFS_SUPPORT_KTRACE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/Makefile linux.21rc5-ac2/fs/xfs/support/Makefile --- linux.21rc5/fs/xfs/support/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/Makefile 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,54 @@ +# +# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# + +EXTRA_CFLAGS += -I.. + +ifeq ($(CONFIG_XFS_DEBUG),y) + EXTRA_CFLAGS += -DDEBUG +endif + +O_TARGET := support_xfs.o +ifneq ($(MAKECMDGOALS),modules_install) + obj-m := $(O_TARGET) +endif + +export-objs := ktrace.o + +obj-y := debug.o \ + kmem.o \ + ktrace.o \ + move.o \ + mrlock.o \ + qsort.o \ + uuid.o + +include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/move.c linux.21rc5-ac2/fs/xfs/support/move.c --- linux.21rc5/fs/xfs/support/move.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/move.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include +#include + +#include +#include "debug.h" +#include "move.h" + +/* + * Move "n" bytes at byte address "cp"; "rw" indicates the direction + * of the move, and the I/O parameters are provided in "uio", which is + * update to reflect the data which was moved. Returns 0 on success or + * a non-zero errno on failure. + */ +int +uiomove(void *cp, size_t n, enum uio_rw rw, struct uio *uio) +{ + register struct iovec *iov; + u_int cnt; + int error; + + while (n > 0 && uio->uio_resid) { + iov = uio->uio_iov; + cnt = (u_int)iov->iov_len; + if (cnt == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + continue; + } + if (cnt > n) + cnt = (u_int)n; + switch (uio->uio_segflg) { + case UIO_USERSPACE: + if (rw == UIO_READ) + error = copy_to_user(iov->iov_base, cp, cnt); + else + error = copy_from_user(cp, iov->iov_base, cnt); + if (error) + return EFAULT; + break; + + + case UIO_SYSSPACE: + if (rw == UIO_READ) + memcpy(iov->iov_base, cp, cnt); + else + memcpy(cp, iov->iov_base, cnt); + break; + + default: + ASSERT(0); + break; + } + iov->iov_base = (void *)((char *)iov->iov_base + cnt); + iov->iov_len -= cnt; + uio->uio_resid -= cnt; + uio->uio_offset += cnt; + cp = (void *)((char *)cp + cnt); + n -= cnt; + } + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/move.h linux.21rc5-ac2/fs/xfs/support/move.h --- linux.21rc5/fs/xfs/support/move.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/move.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#ifndef __XFS_SUPPORT_MOVE_H__ +#define __XFS_SUPPORT_MOVE_H__ + +#include +#include + +typedef struct iovec iovec_t; + +typedef struct uio { + iovec_t *uio_iov; /* pointer to array of iovecs */ + int uio_iovcnt; /* number of iovecs */ + int uio_fmode; /* file mode flags */ + xfs_off_t uio_offset; /* file offset */ + short uio_segflg; /* address space (kernel or user) */ + ssize_t uio_resid; /* residual count */ +} uio_t; + +/* + * I/O direction. + */ +typedef enum uio_rw { UIO_READ, UIO_WRITE } uio_rw_t; + +/* + * Segment flag values. + */ +typedef enum uio_seg { + UIO_USERSPACE, /* uio_iov describes user space */ + UIO_SYSSPACE, /* uio_iov describes system space */ +} uio_seg_t; + + +extern int uiomove (void *, size_t, uio_rw_t, uio_t *); + +#endif /* __XFS_SUPPORT_MOVE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/mrlock.c linux.21rc5-ac2/fs/xfs/support/mrlock.c --- linux.21rc5/fs/xfs/support/mrlock.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/mrlock.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include +#include +#include +#include + +#include "mrlock.h" + + +#if USE_RW_WAIT_QUEUE_SPINLOCK +# define wq_write_lock write_lock +#else +# define wq_write_lock spin_lock +#endif + +/* + * We don't seem to need lock_type (only one supported), name, or + * sequence. But, XFS will pass it so let's leave them here for now. + */ +/* ARGSUSED */ +void +mrlock_init(mrlock_t *mrp, int lock_type, char *name, long sequence) +{ + mrp->mr_count = 0; + mrp->mr_reads_waiting = 0; + mrp->mr_writes_waiting = 0; + init_waitqueue_head(&mrp->mr_readerq); + init_waitqueue_head(&mrp->mr_writerq); + mrp->mr_lock = SPIN_LOCK_UNLOCKED; +} + +/* + * Macros to lock/unlock the mrlock_t. + */ + +#define MRLOCK(m) spin_lock(&(m)->mr_lock); +#define MRUNLOCK(m) spin_unlock(&(m)->mr_lock); + + +/* + * lock_wait should never be called in an interrupt thread. + * + * mrlocks can sleep (i.e. call schedule) and so they can't ever + * be called from an interrupt thread. + * + * threads that wake-up should also never be invoked from interrupt threads. + * + * But, waitqueue_lock is locked from interrupt threads - and we are + * called with interrupts disabled, so it is all OK. + */ + +/* ARGSUSED */ +void +lock_wait(wait_queue_head_t *q, spinlock_t *lock, int rw) +{ + DECLARE_WAITQUEUE( wait, current ); + + __set_current_state(TASK_UNINTERRUPTIBLE); + + wq_write_lock(&q->lock); + if (rw) { + __add_wait_queue_tail(q, &wait); + } else { + __add_wait_queue(q, &wait); + } + + wq_write_unlock(&q->lock); + spin_unlock(lock); + + schedule(); + + wq_write_lock(&q->lock); + __remove_wait_queue(q, &wait); + wq_write_unlock(&q->lock); + + spin_lock(lock); + + /* return with lock held */ +} + +/* ARGSUSED */ +void +mrfree(mrlock_t *mrp) +{ +} + +/* ARGSUSED */ +void +mrlock(mrlock_t *mrp, int type, int flags) +{ + if (type == MR_ACCESS) + mraccess(mrp); + else + mrupdate(mrp); +} + +/* ARGSUSED */ +void +mraccessf(mrlock_t *mrp, int flags) +{ + MRLOCK(mrp); + if(mrp->mr_writes_waiting > 0) { + mrp->mr_reads_waiting++; + lock_wait(&mrp->mr_readerq, &mrp->mr_lock, 0); + mrp->mr_reads_waiting--; + } + while (mrp->mr_count < 0) { + mrp->mr_reads_waiting++; + lock_wait(&mrp->mr_readerq, &mrp->mr_lock, 0); + mrp->mr_reads_waiting--; + } + mrp->mr_count++; + MRUNLOCK(mrp); +} + +/* ARGSUSED */ +void +mrupdatef(mrlock_t *mrp, int flags) +{ + MRLOCK(mrp); + while(mrp->mr_count) { + mrp->mr_writes_waiting++; + lock_wait(&mrp->mr_writerq, &mrp->mr_lock, 1); + mrp->mr_writes_waiting--; + } + + mrp->mr_count = -1; /* writer on it */ + MRUNLOCK(mrp); +} + +int +mrtryaccess(mrlock_t *mrp) +{ + MRLOCK(mrp); + /* + * If anyone is waiting for update access or the lock is held for update + * fail the request. + */ + if(mrp->mr_writes_waiting > 0 || mrp->mr_count < 0) { + MRUNLOCK(mrp); + return 0; + } + mrp->mr_count++; + MRUNLOCK(mrp); + return 1; +} + +int +mrtrypromote(mrlock_t *mrp) +{ + MRLOCK(mrp); + + if(mrp->mr_count == 1) { /* We are the only thread with the lock */ + mrp->mr_count = -1; /* writer on it */ + MRUNLOCK(mrp); + return 1; + } + + MRUNLOCK(mrp); + return 0; +} + +int +mrtryupdate(mrlock_t *mrp) +{ + MRLOCK(mrp); + + if(mrp->mr_count) { + MRUNLOCK(mrp); + return 0; + } + + mrp->mr_count = -1; /* writer on it */ + MRUNLOCK(mrp); + return 1; +} + +static __inline__ void mrwake(mrlock_t *mrp) +{ + /* + * First, if the count is now 0, we need to wake-up anyone waiting. + */ + if (!mrp->mr_count) { + if (mrp->mr_writes_waiting) { /* Wake-up first writer waiting */ + wake_up(&mrp->mr_writerq); + } else if (mrp->mr_reads_waiting) { /* Wakeup any readers waiting */ + wake_up(&mrp->mr_readerq); + } + } +} + +void +mraccunlock(mrlock_t *mrp) +{ + MRLOCK(mrp); + mrp->mr_count--; + mrwake(mrp); + MRUNLOCK(mrp); +} + +void +mrunlock(mrlock_t *mrp) +{ + MRLOCK(mrp); + if (mrp->mr_count < 0) { + mrp->mr_count = 0; + } else { + mrp->mr_count--; + } + mrwake(mrp); + MRUNLOCK(mrp); +} + +int +ismrlocked(mrlock_t *mrp, int type) /* No need to lock since info can change */ +{ + if (type == MR_ACCESS) + return (mrp->mr_count > 0); /* Read lock */ + else if (type == MR_UPDATE) + return (mrp->mr_count < 0); /* Write lock */ + else if (type == (MR_UPDATE | MR_ACCESS)) + return (mrp->mr_count); /* Any type of lock held */ + else /* Any waiters */ + return (mrp->mr_reads_waiting | mrp->mr_writes_waiting); +} + +/* + * Demote from update to access. We better be the only thread with the + * lock in update mode so it should be easy to set to 1. + * Wake-up any readers waiting. + */ + +void +mrdemote(mrlock_t *mrp) +{ + MRLOCK(mrp); + mrp->mr_count = 1; + if (mrp->mr_reads_waiting) { /* Wakeup all readers waiting */ + wake_up(&mrp->mr_readerq); + } + MRUNLOCK(mrp); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/mrlock.h linux.21rc5-ac2/fs/xfs/support/mrlock.h --- linux.21rc5/fs/xfs/support/mrlock.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/mrlock.h 2003-05-28 21:48:48.000000000 +0100 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_MRLOCK_H__ +#define __XFS_SUPPORT_MRLOCK_H__ + +#include +#include +#include +#include +#include + +/* + * Implement mrlocks on Linux that work for XFS. + * + * These are sleep locks and not spinlocks. If one wants read/write spinlocks, + * use read_lock, write_lock, ... see spinlock.h. + */ + +typedef struct mrlock_s { + int mr_count; + unsigned short mr_reads_waiting; + unsigned short mr_writes_waiting; + wait_queue_head_t mr_readerq; + wait_queue_head_t mr_writerq; + spinlock_t mr_lock; +} mrlock_t; + +#define MR_ACCESS 1 +#define MR_UPDATE 2 + +#define MRLOCK_BARRIER 0x1 +#define MRLOCK_ALLOW_EQUAL_PRI 0x8 + +/* + * mraccessf/mrupdatef take flags to be passed in while sleeping; + * only PLTWAIT is currently supported. + */ + +extern void mraccessf(mrlock_t *, int); +extern void mrupdatef(mrlock_t *, int); +extern void mrlock(mrlock_t *, int, int); +extern void mrunlock(mrlock_t *); +extern void mraccunlock(mrlock_t *); +extern int mrtryupdate(mrlock_t *); +extern int mrtryaccess(mrlock_t *); +extern int mrtrypromote(mrlock_t *); +extern void mrdemote(mrlock_t *); + +extern int ismrlocked(mrlock_t *, int); +extern void mrlock_init(mrlock_t *, int type, char *name, long sequence); +extern void mrfree(mrlock_t *); + +#define mrinit(mrp, name) mrlock_init(mrp, MRLOCK_BARRIER, name, -1) +#define mraccess(mrp) mraccessf(mrp, 0) /* grab for READ/ACCESS */ +#define mrupdate(mrp) mrupdatef(mrp, 0) /* grab for WRITE/UPDATE */ +#define mrislocked_access(mrp) ((mrp)->mr_count > 0) +#define mrislocked_update(mrp) ((mrp)->mr_count < 0) + +#endif /* __XFS_SUPPORT_MRLOCK_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/mutex.h linux.21rc5-ac2/fs/xfs/support/mutex.h --- linux.21rc5/fs/xfs/support/mutex.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/mutex.h 2003-05-28 21:48:48.000000000 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * Portions Copyright (c) 2002 Christoph Hellwig. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_MUTEX_H__ +#define __XFS_SUPPORT_MUTEX_H__ + +#include +#include + +/* + * Map the mutex'es from IRIX to Linux semaphores. + * + * Destroy just simply initializes to -99 which should block all other + * callers. + */ +#define MUTEX_DEFAULT 0x0 +typedef struct semaphore mutex_t; + +#define mutex_init(lock, type, name) sema_init(lock, 1) +#define mutex_destroy(lock) sema_init(lock, -99) +#define mutex_lock(lock, num) down(lock) +#define mutex_trylock(lock) (down_trylock(lock) ? 0 : 1) +#define mutex_unlock(lock) up(lock) + +#endif /* __XFS_SUPPORT_MUTEX_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/qsort.c linux.21rc5-ac2/fs/xfs/support/qsort.c --- linux.21rc5/fs/xfs/support/qsort.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/qsort.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,243 @@ +/* Copyright (C) 1991, 1992, 1996, 1997, 1999 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Written by Douglas C. Schmidt (schmidt@ics.uci.edu). + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* If you consider tuning this algorithm, you should consult first: + Engineering a sort function; Jon Bentley and M. Douglas McIlroy; + Software - Practice and Experience; Vol. 23 (11), 1249-1265, 1993. */ + +#include +#include + +/* Byte-wise swap two items of size SIZE. */ +#define SWAP(a, b, size) \ + do \ + { \ + register size_t __size = (size); \ + register char *__a = (a), *__b = (b); \ + do \ + { \ + char __tmp = *__a; \ + *__a++ = *__b; \ + *__b++ = __tmp; \ + } while (--__size > 0); \ + } while (0) + +/* Discontinue quicksort algorithm when partition gets below this size. + This particular magic number was chosen to work best on a Sun 4/260. */ +#define MAX_THRESH 4 + +/* Stack node declarations used to store unfulfilled partition obligations. */ +typedef struct + { + char *lo; + char *hi; + } stack_node; + +/* The next 4 #defines implement a very fast in-line stack abstraction. */ +/* The stack needs log (total_elements) entries (we could even subtract + log(MAX_THRESH)). Since total_elements has type size_t, we get as + upper bound for log (total_elements): + bits per byte (CHAR_BIT) * sizeof(size_t). */ +#define STACK_SIZE (8 * sizeof(unsigned long int)) +#define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top)) +#define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi))) +#define STACK_NOT_EMPTY (stack < top) + + +/* Order size using quicksort. This implementation incorporates + four optimizations discussed in Sedgewick: + + 1. Non-recursive, using an explicit stack of pointer that store the + next array partition to sort. To save time, this maximum amount + of space required to store an array of SIZE_MAX is allocated on the + stack. Assuming a 32-bit (64 bit) integer for size_t, this needs + only 32 * sizeof(stack_node) == 256 bytes (for 64 bit: 1024 bytes). + Pretty cheap, actually. + + 2. Chose the pivot element using a median-of-three decision tree. + This reduces the probability of selecting a bad pivot value and + eliminates certain extraneous comparisons. + + 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving + insertion sort to order the MAX_THRESH items within each partition. + This is a big win, since insertion sort is faster for small, mostly + sorted array segments. + + 4. The larger of the two sub-partitions is always pushed onto the + stack first, with the algorithm then concentrating on the + smaller partition. This *guarantees* no more than log (total_elems) + stack size is needed (actually O(1) in this case)! */ + +void +qsort (void *const pbase, size_t total_elems, size_t size, + int (*cmp)(const void *, const void *)) +{ + register char *base_ptr = (char *) pbase; + + const size_t max_thresh = MAX_THRESH * size; + + if (total_elems == 0) + /* Avoid lossage with unsigned arithmetic below. */ + return; + + if (total_elems > MAX_THRESH) + { + char *lo = base_ptr; + char *hi = &lo[size * (total_elems - 1)]; + stack_node stack[STACK_SIZE]; + stack_node *top = stack + 1; + + while (STACK_NOT_EMPTY) + { + char *left_ptr; + char *right_ptr; + + /* Select median value from among LO, MID, and HI. Rearrange + LO and HI so the three values are sorted. This lowers the + probability of picking a pathological pivot value and + skips a comparison for both the LEFT_PTR and RIGHT_PTR in + the while loops. */ + + char *mid = lo + size * ((hi - lo) / size >> 1); + + if ((*cmp) ((void *) mid, (void *) lo) < 0) + SWAP (mid, lo, size); + if ((*cmp) ((void *) hi, (void *) mid) < 0) + SWAP (mid, hi, size); + else + goto jump_over; + if ((*cmp) ((void *) mid, (void *) lo) < 0) + SWAP (mid, lo, size); + jump_over:; + + left_ptr = lo + size; + right_ptr = hi - size; + + /* Here's the famous ``collapse the walls'' section of quicksort. + Gotta like those tight inner loops! They are the main reason + that this algorithm runs much faster than others. */ + do + { + while ((*cmp) ((void *) left_ptr, (void *) mid) < 0) + left_ptr += size; + + while ((*cmp) ((void *) mid, (void *) right_ptr) < 0) + right_ptr -= size; + + if (left_ptr < right_ptr) + { + SWAP (left_ptr, right_ptr, size); + if (mid == left_ptr) + mid = right_ptr; + else if (mid == right_ptr) + mid = left_ptr; + left_ptr += size; + right_ptr -= size; + } + else if (left_ptr == right_ptr) + { + left_ptr += size; + right_ptr -= size; + break; + } + } + while (left_ptr <= right_ptr); + + /* Set up pointers for next iteration. First determine whether + left and right partitions are below the threshold size. If so, + ignore one or both. Otherwise, push the larger partition's + bounds on the stack and continue sorting the smaller one. */ + + if ((size_t) (right_ptr - lo) <= max_thresh) + { + if ((size_t) (hi - left_ptr) <= max_thresh) + /* Ignore both small partitions. */ + POP (lo, hi); + else + /* Ignore small left partition. */ + lo = left_ptr; + } + else if ((size_t) (hi - left_ptr) <= max_thresh) + /* Ignore small right partition. */ + hi = right_ptr; + else if ((right_ptr - lo) > (hi - left_ptr)) + { + /* Push larger left partition indices. */ + PUSH (lo, right_ptr); + lo = left_ptr; + } + else + { + /* Push larger right partition indices. */ + PUSH (left_ptr, hi); + hi = right_ptr; + } + } + } + + /* Once the BASE_PTR array is partially sorted by quicksort the rest + is completely sorted using insertion sort, since this is efficient + for partitions below MAX_THRESH size. BASE_PTR points to the beginning + of the array to sort, and END_PTR points at the very last element in + the array (*not* one beyond it!). */ + { + char *const end_ptr = &base_ptr[size * (total_elems - 1)]; + char *tmp_ptr = base_ptr; + char *thresh = min(end_ptr, base_ptr + max_thresh); + register char *run_ptr; + + /* Find smallest element in first threshold and place it at the + array's beginning. This is the smallest array element, + and the operation speeds up insertion sort's inner loop. */ + + for (run_ptr = tmp_ptr + size; run_ptr <= thresh; run_ptr += size) + if ((*cmp) ((void *) run_ptr, (void *) tmp_ptr) < 0) + tmp_ptr = run_ptr; + + if (tmp_ptr != base_ptr) + SWAP (tmp_ptr, base_ptr, size); + + /* Insertion sort, running from left-hand-side up to right-hand-side. */ + + run_ptr = base_ptr + size; + while ((run_ptr += size) <= end_ptr) + { + tmp_ptr = run_ptr - size; + while ((*cmp) ((void *) run_ptr, (void *) tmp_ptr) < 0) + tmp_ptr -= size; + + tmp_ptr += size; + if (tmp_ptr != run_ptr) + { + char *trav; + + trav = run_ptr + size; + while (--trav >= run_ptr) + { + char c = *trav; + char *hi, *lo; + + for (hi = lo = trav; (lo -= size) >= tmp_ptr; hi = lo) + *hi = *lo; + *hi = c; + } + } + } + } +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/qsort.h linux.21rc5-ac2/fs/xfs/support/qsort.h --- linux.21rc5/fs/xfs/support/qsort.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/qsort.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#ifndef QSORT_H +#define QSORT_H + +extern void qsort (void *const pbase, + size_t total_elems, + size_t size, + int (*cmp)(const void *, const void *)); + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/sema.h linux.21rc5-ac2/fs/xfs/support/sema.h --- linux.21rc5/fs/xfs/support/sema.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/sema.h 2003-05-28 21:48:48.000000000 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_SEMA_H__ +#define __XFS_SUPPORT_SEMA_H__ + +#include +#include +#include +#include +#include + +/* + * sema_t structure just maps to struct semaphore in Linux kernel. + */ + +typedef struct semaphore sema_t; + +#define init_sema(sp, val, c, d) sema_init(sp, val) +#define initsema(sp, val) sema_init(sp, val) +#define initnsema(sp, val, name) sema_init(sp, val) +#define psema(sp, b) down(sp) +#define vsema(sp) up(sp) +#define valusema(sp) (atomic_read(&(sp)->count)) +#define freesema(sema) + +/* + * Map cpsema (try to get the sema) to down_trylock. We need to switch + * the return values since cpsema returns 1 (acquired) 0 (failed) and + * down_trylock returns the reverse 0 (acquired) 1 (failed). + */ + +#define cpsema(sp) (down_trylock(sp) ? 0 : 1) + +/* + * Didn't do cvsema(sp). Not sure how to map this to up/down/... + * It does a vsema if the values is < 0 other wise nothing. + */ + +#endif /* __XFS_SUPPORT_SEMA_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/spin.h linux.21rc5-ac2/fs/xfs/support/spin.h --- linux.21rc5/fs/xfs/support/spin.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/spin.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * Portions Copyright (c) 2002 Christoph Hellwig. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_SPIN_H__ +#define __XFS_SUPPORT_SPIN_H__ + +#include /* preempt needs this */ +#include + +/* + * Map lock_t from IRIX to Linux spinlocks. + * + * Note that linux turns on/off spinlocks depending on CONFIG_SMP. + * We don't need to worry about SMP or not here. + */ + +typedef spinlock_t lock_t; + +#define spinlock_init(lock, name) spin_lock_init(lock) +#define spinlock_destroy(lock) + +static inline unsigned long mutex_spinlock(lock_t *lock) +{ + spin_lock(lock); + return 0; +} + +/*ARGSUSED*/ +static inline void mutex_spinunlock(lock_t *lock, unsigned long s) +{ + spin_unlock(lock); +} + +static inline void nested_spinlock(lock_t *lock) +{ + spin_lock(lock); +} + +static inline void nested_spinunlock(lock_t *lock) +{ + spin_unlock(lock); +} + +#endif /* __XFS_SUPPORT_SPIN_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/sv.h linux.21rc5-ac2/fs/xfs/support/sv.h --- linux.21rc5/fs/xfs/support/sv.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/sv.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * Portions Copyright (c) 2002 Christoph Hellwig. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_SV_H__ +#define __XFS_SUPPORT_SV_H__ + +#include +#include +#include + +/* + * Synchronisation variables. + * + * (Parameters "pri", "svf" and "rts" are not implemented) + */ + +typedef struct sv_s { + wait_queue_head_t waiters; +} sv_t; + +#define SV_FIFO 0x0 /* sv_t is FIFO type */ +#define SV_LIFO 0x2 /* sv_t is LIFO type */ +#define SV_PRIO 0x4 /* sv_t is PRIO type */ +#define SV_KEYED 0x6 /* sv_t is KEYED type */ +#define SV_DEFAULT SV_FIFO + + +static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state, + unsigned long timeout) +{ + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue_exclusive(&sv->waiters, &wait); + __set_current_state(state); + spin_unlock(lock); + + schedule_timeout(timeout); + + remove_wait_queue(&sv->waiters, &wait); +} + +#define init_sv(sv,type,name,flag) \ + init_waitqueue_head(&(sv)->waiters) +#define sv_init(sv,flag,name) \ + init_waitqueue_head(&(sv)->waiters) +#define sv_destroy(sv) \ + /*NOTHING*/ +#define sv_wait(sv, pri, lock, s) \ + _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) +#define sv_wait_sig(sv, pri, lock, s) \ + _sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) +#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \ + _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts)) +#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \ + _sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts)) +#define sv_signal(sv) \ + wake_up(&(sv)->waiters) +#define sv_broadcast(sv) \ + wake_up_all(&(sv)->waiters) + +#endif /* __XFS_SUPPORT_SV_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/time.h linux.21rc5-ac2/fs/xfs/support/time.h --- linux.21rc5/fs/xfs/support/time.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/time.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_TIME_H__ +#define __XFS_SUPPORT_TIME_H__ + +#include +#include + +typedef struct timespec timespec_t; + +static inline void delay(long ticks) +{ + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(ticks); +} + +static inline void nanotime(struct timespec *tvp) +{ + struct timeval tv; + + do_gettimeofday(&tv); + tvp->tv_sec = tv.tv_sec; + tvp->tv_nsec = tv.tv_usec * 1000; +} + +#endif /* __XFS_SUPPORT_TIME_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/uuid.c linux.21rc5-ac2/fs/xfs/support/uuid.c --- linux.21rc5/fs/xfs/support/uuid.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/uuid.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include +#include +#include "time.h" +#include "uuid.h" +#include "kmem.h" +#include "debug.h" +#include "mutex.h" + +static mutex_t uuid_monitor; +static int uuid_table_size; +static uuid_t *uuid_table; + +void +uuid_init(void) +{ + mutex_init(&uuid_monitor, MUTEX_DEFAULT, "uuid_monitor"); +} + +/* + * uuid_getnodeuniq - obtain the node unique fields of a UUID. + * + * This is not in any way a standard or condoned UUID function; + * it just something that's needed for user-level file handles. + */ +void +uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) +{ + char *uu = (char *)uuid; + + /* on IRIX, this function assumes big-endian fields within + * the uuid, so we use INT_GET to get the same result on + * little-endian systems + */ + + fsid[0] = (INT_GET(*(u_int16_t*)(uu+8), ARCH_CONVERT) << 16) + + INT_GET(*(u_int16_t*)(uu+4), ARCH_CONVERT); + fsid[1] = INT_GET(*(u_int32_t*)(uu ), ARCH_CONVERT); +} + +void +uuid_create_nil(uuid_t *uuid) +{ + memset(uuid, 0, sizeof(*uuid)); +} + +int +uuid_is_nil(uuid_t *uuid) +{ + int i; + char *cp = (char *)uuid; + + if (uuid == NULL) + return 0; + /* implied check of version number here... */ + for (i = 0; i < sizeof *uuid; i++) + if (*cp++) return 0; /* not nil */ + return 1; /* is nil */ +} + +int +uuid_equal(uuid_t *uuid1, uuid_t *uuid2) +{ + return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1; +} + +/* + * Given a 128-bit uuid, return a 64-bit value by adding the top and bottom + * 64-bit words. NOTE: This function can not be changed EVER. Although + * brain-dead, some applications depend on this 64-bit value remaining + * persistent. Specifically, DMI vendors store the value as a persistent + * filehandle. + */ +__uint64_t +uuid_hash64(uuid_t *uuid) +{ + __uint64_t *sp = (__uint64_t *)uuid; + + return sp[0] + sp[1]; +} + +int +uuid_table_insert(uuid_t *uuid) +{ + int i, hole; + + mutex_lock(&uuid_monitor, PVFS); + for (i = 0, hole = -1; i < uuid_table_size; i++) { + if (uuid_is_nil(&uuid_table[i])) { + hole = i; + continue; + } + if (uuid_equal(uuid, &uuid_table[i])) { + mutex_unlock(&uuid_monitor); + return 0; + } + } + if (hole < 0) { + uuid_table = kmem_realloc(uuid_table, + (uuid_table_size + 1) * sizeof(*uuid_table), + uuid_table_size * sizeof(*uuid_table), + KM_SLEEP); + hole = uuid_table_size++; + } + uuid_table[hole] = *uuid; + mutex_unlock(&uuid_monitor); + return 1; +} + +void +uuid_table_remove(uuid_t *uuid) +{ + int i; + + mutex_lock(&uuid_monitor, PVFS); + for (i = 0; i < uuid_table_size; i++) { + if (uuid_is_nil(&uuid_table[i])) + continue; + if (!uuid_equal(uuid, &uuid_table[i])) + continue; + uuid_create_nil(&uuid_table[i]); + break; + } + ASSERT(i < uuid_table_size); + mutex_unlock(&uuid_monitor); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/support/uuid.h linux.21rc5-ac2/fs/xfs/support/uuid.h --- linux.21rc5/fs/xfs/support/uuid.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/support/uuid.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_UUID_H__ +#define __XFS_SUPPORT_UUID_H__ + +typedef struct { + unsigned char __u_bits[16]; +} uuid_t; + +void uuid_init(void); +void uuid_create_nil(uuid_t *uuid); +int uuid_is_nil(uuid_t *uuid); +int uuid_equal(uuid_t *uuid1, uuid_t *uuid2); +void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]); +__uint64_t uuid_hash64(uuid_t *uuid); +int uuid_table_insert(uuid_t *uuid); +void uuid_table_remove(uuid_t *uuid); + +#endif /* __XFS_SUPPORT_UUID_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_acl.c linux.21rc5-ac2/fs/xfs/xfs_acl.c --- linux.21rc5/fs/xfs/xfs_acl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_acl.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,975 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_inum.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_acl.h" +#include "xfs_mac.h" +#include "xfs_attr.h" + +#include + +STATIC int xfs_acl_setmode(vnode_t *, xfs_acl_t *, int *); +STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *); +STATIC void xfs_acl_get_endian(xfs_acl_t *); +STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *); +STATIC int xfs_acl_invalid(xfs_acl_t *); +STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *); +STATIC void xfs_acl_get_attr(vnode_t *, xfs_acl_t *, int, int, int *); +STATIC void xfs_acl_set_attr(vnode_t *, xfs_acl_t *, int, int *); +STATIC int xfs_acl_allow_set(vnode_t *, int); + +kmem_zone_t *xfs_acl_zone; + + +/* + * Test for existence of access ACL attribute as efficiently as possible. + */ +int +xfs_acl_vhasacl_access( + vnode_t *vp) +{ + int error; + + xfs_acl_get_attr(vp, NULL, _ACL_TYPE_ACCESS, ATTR_KERNOVAL, &error); + return (error == 0); +} + +/* + * Test for existence of default ACL attribute as efficiently as possible. + */ +int +xfs_acl_vhasacl_default( + vnode_t *vp) +{ + int error; + + if (vp->v_type != VDIR) + return 0; + xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error); + return (error == 0); +} + +/* + * Convert from extended attribute representation to in-memory for XFS. + */ +STATIC int +posix_acl_xattr_to_xfs( + posix_acl_xattr_header *src, + size_t size, + xfs_acl_t *dest) +{ + posix_acl_xattr_entry *src_entry; + xfs_acl_entry_t *dest_entry; + int n; + + if (!src || !dest) + return EINVAL; + + if (size < sizeof(posix_acl_xattr_header)) + return EINVAL; + + if (src->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION)) + return EINVAL; + + memset(dest, 0, sizeof(xfs_acl_t)); + dest->acl_cnt = posix_acl_xattr_count(size); + if (dest->acl_cnt < 0 || dest->acl_cnt > XFS_ACL_MAX_ENTRIES) + return EINVAL; + + /* + * acl_set_file(3) may request that we set default ACLs with + * zero length -- defend (gracefully) against that here. + */ + if (!dest->acl_cnt) + return 0; + + src_entry = (posix_acl_xattr_entry *)((char *)src + sizeof(*src)); + dest_entry = &dest->acl_entry[0]; + + for (n = 0; n < dest->acl_cnt; n++, src_entry++, dest_entry++) { + dest_entry->ae_perm = le16_to_cpu(src_entry->e_perm); + if (_ACL_PERM_INVALID(dest_entry->ae_perm)) + return EINVAL; + dest_entry->ae_tag = le16_to_cpu(src_entry->e_tag); + switch(dest_entry->ae_tag) { + case ACL_USER: + case ACL_GROUP: + dest_entry->ae_id = le32_to_cpu(src_entry->e_id); + break; + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: + case ACL_MASK: + case ACL_OTHER: + dest_entry->ae_id = ACL_UNDEFINED_ID; + break; + default: + return EINVAL; + } + } + if (xfs_acl_invalid(dest)) + return EINVAL; + + return 0; +} + +/* + * Comparison function called from qsort(). + * Primary key is ae_tag, secondary key is ae_id. + */ +STATIC int +xfs_acl_entry_compare( + const void *va, + const void *vb) +{ + xfs_acl_entry_t *a = (xfs_acl_entry_t *)va, + *b = (xfs_acl_entry_t *)vb; + + if (a->ae_tag == b->ae_tag) + return (a->ae_id - b->ae_id); + return (a->ae_tag - b->ae_tag); +} + +/* + * Convert from in-memory XFS to extended attribute representation. + */ +STATIC int +posix_acl_xfs_to_xattr( + xfs_acl_t *src, + posix_acl_xattr_header *dest, + size_t size) +{ + int n; + size_t new_size = posix_acl_xattr_size(src->acl_cnt); + posix_acl_xattr_entry *dest_entry; + xfs_acl_entry_t *src_entry; + + if (size < new_size) + return -ERANGE; + + /* Need to sort src XFS ACL by */ + qsort(src->acl_entry, src->acl_cnt, sizeof(src->acl_entry[0]), + xfs_acl_entry_compare); + + dest->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION); + dest_entry = &dest->a_entries[0]; + src_entry = &src->acl_entry[0]; + for (n = 0; n < src->acl_cnt; n++, dest_entry++, src_entry++) { + dest_entry->e_perm = cpu_to_le16(src_entry->ae_perm); + if (_ACL_PERM_INVALID(src_entry->ae_perm)) + return -EINVAL; + dest_entry->e_tag = cpu_to_le16(src_entry->ae_tag); + switch (src_entry->ae_tag) { + case ACL_USER: + case ACL_GROUP: + dest_entry->e_id = cpu_to_le32(src_entry->ae_id); + break; + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: + case ACL_MASK: + case ACL_OTHER: + dest_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID); + break; + default: + return -EINVAL; + } + } + return new_size; +} + +int +xfs_acl_vget( + vnode_t *vp, + void *acl, + size_t size, + int kind) +{ + int error; + xfs_acl_t *xfs_acl = NULL; + posix_acl_xattr_header *ext_acl = acl; + int flags = 0; + + VN_HOLD(vp); + if ((error = _MAC_VACCESS(vp, NULL, VREAD))) + goto out; + if(size) { + if (!(_ACL_ALLOC(xfs_acl))) { + error = ENOMEM; + goto out; + } + memset(xfs_acl, 0, sizeof(xfs_acl_t)); + } else + flags = ATTR_KERNOVAL; + + xfs_acl_get_attr(vp, xfs_acl, kind, flags, &error); + if (error) + goto out; + + if (!size) { + error = -posix_acl_xattr_size(XFS_ACL_MAX_ENTRIES); + } else { + if (xfs_acl_invalid(xfs_acl)) { + error = EINVAL; + goto out; + } + if (kind == _ACL_TYPE_ACCESS) { + vattr_t va; + + va.va_mask = XFS_AT_MODE; + VOP_GETATTR(vp, &va, 0, sys_cred, error); + if (error) + goto out; + xfs_acl_sync_mode(va.va_mode, xfs_acl); + } + error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); + } +out: + VN_RELE(vp); + if(xfs_acl) + _ACL_FREE(xfs_acl); + return -error; +} + +int +xfs_acl_vremove( + vnode_t *vp, + int kind) +{ + int error; + + VN_HOLD(vp); + error = xfs_acl_allow_set(vp, kind); + if (!error) { + VOP_ATTR_REMOVE(vp, kind == _ACL_TYPE_DEFAULT? + SGI_ACL_DEFAULT: SGI_ACL_FILE, + ATTR_ROOT, sys_cred, error); + if (error == ENOATTR) + error = 0; /* 'scool */ + } + VN_RELE(vp); + return -error; +} + +int +xfs_acl_vset( + vnode_t *vp, + void *acl, + size_t size, + int kind) +{ + posix_acl_xattr_header *ext_acl = acl; + xfs_acl_t *xfs_acl; + int error; + int basicperms = 0; /* more than std unix perms? */ + + if (!acl) + return -EINVAL; + + if (!(_ACL_ALLOC(xfs_acl))) + return -ENOMEM; + + error = posix_acl_xattr_to_xfs(ext_acl, size, xfs_acl); + if (error) { + _ACL_FREE(xfs_acl); + return -error; + } + if (!xfs_acl->acl_cnt) { + _ACL_FREE(xfs_acl); + return 0; + } + + VN_HOLD(vp); + error = xfs_acl_allow_set(vp, kind); + if (error) + goto out; + + /* Incoming ACL exists, set file mode based on its value */ + if (kind == _ACL_TYPE_ACCESS) + xfs_acl_setmode(vp, xfs_acl, &basicperms); + + /* + * If we have more than std unix permissions, set up the actual attr. + * Otherwise, delete any existing attr. This prevents us from + * having actual attrs for permissions that can be stored in the + * standard permission bits. + */ + if (!basicperms) { + xfs_acl_set_attr(vp, xfs_acl, kind, &error); + } else { + xfs_acl_vremove(vp, _ACL_TYPE_ACCESS); + } + + +out: + VN_RELE(vp); + _ACL_FREE(xfs_acl); + return -error; +} + +int +xfs_acl_iaccess( + xfs_inode_t *ip, + mode_t mode, + cred_t *cr) +{ + xfs_acl_t *acl; + int error; + + if (!(_ACL_ALLOC(acl))) + return -1; + + /* If the file has no ACL return -1. */ + if (xfs_attr_fetch(ip, SGI_ACL_FILE, (char *)acl, sizeof(xfs_acl_t))) { + _ACL_FREE(acl); + return -1; + } + xfs_acl_get_endian(acl); + + /* If the file has an empty ACL return -1. */ + if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) { + _ACL_FREE(acl); + return -1; + } + + /* Synchronize ACL with mode bits */ + xfs_acl_sync_mode(ip->i_d.di_mode, acl); + + error = xfs_acl_access(ip->i_d.di_uid, ip->i_d.di_gid, acl, mode, cr); + _ACL_FREE(acl); + return error; +} + +STATIC int +xfs_acl_allow_set( + vnode_t *vp, + int kind) +{ + vattr_t va; + int error; + + if (kind == _ACL_TYPE_DEFAULT && vp->v_type != VDIR) + return ENOTDIR; + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + return EROFS; + if ((error = _MAC_VACCESS(vp, NULL, VWRITE))) + return error; + va.va_mask = XFS_AT_UID; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return error; + if (va.va_uid != current->fsuid && !capable(CAP_FOWNER)) + return EPERM; + return error; +} + +/* + * Look for any effective exec access, to allow CAP_DAC_OVERRIDE for exec. + * Ignore checking for exec in USER_OBJ when there is no mask, because + * in this "minimal acl" case we don't have any actual acls, and we + * won't even be here. + */ +STATIC int +xfs_acl_find_any_exec( + xfs_acl_t *fap) +{ + int i; + int masked_aces = 0; + int mask = 0; + + for (i = 0; i < fap->acl_cnt; i++) { + if (fap->acl_entry[i].ae_perm & ACL_EXECUTE) { + if (fap->acl_entry[i].ae_tag & (ACL_USER_OBJ|ACL_OTHER)) + return 1; + + if (fap->acl_entry[i].ae_tag == ACL_MASK) + mask = fap->acl_entry[i].ae_perm; + else + masked_aces |= fap->acl_entry[i].ae_perm; + + if ((mask & masked_aces) & ACL_EXECUTE) + return 1; + } + } + + return 0; +} + +/* + * The access control process to determine the access permission: + * if uid == file owner id, use the file owner bits. + * if gid == file owner group id, use the file group bits. + * scan ACL for a maching user or group, and use matched entry + * permission. Use total permissions of all matching group entries, + * until all acl entries are exhausted. The final permission produced + * by matching acl entry or entries needs to be & with group permission. + * if not owner, owning group, or matching entry in ACL, use file + * other bits. Don't allow CAP_DAC_OVERRIDE on exec access unless + * there is some effective exec access somewhere. + */ +STATIC int +xfs_acl_capability_check( + mode_t mode, + cred_t *cr, + xfs_acl_t *fap) +{ + if ((mode & ACL_READ) && !capable_cred(cr, CAP_DAC_READ_SEARCH)) + return EACCES; + if ((mode & ACL_WRITE) && !capable_cred(cr, CAP_DAC_OVERRIDE)) + return EACCES; + if ((mode & ACL_EXECUTE) && + (!capable_cred(cr, CAP_DAC_OVERRIDE) || + !xfs_acl_find_any_exec(fap))) { + return EACCES; + } + + return 0; +} + +/* + * Note: cr is only used here for the capability check if the ACL test fails. + * It is not used to find out the credentials uid or groups etc, as was + * done in IRIX. It is assumed that the uid and groups for the current + * thread are taken from "current" instead of the cr parameter. + */ +STATIC int +xfs_acl_access( + uid_t fuid, + gid_t fgid, + xfs_acl_t *fap, + mode_t md, + cred_t *cr) +{ + xfs_acl_entry_t matched; + int i, allows; + int maskallows = -1; /* true, but not 1, either */ + int seen_userobj = 0; + + matched.ae_tag = 0; /* Invalid type */ + md >>= 6; /* Normalize the bits for comparison */ + + for (i = 0; i < fap->acl_cnt; i++) { + /* + * Break out if we've got a user_obj entry or + * a user entry and the mask (and have processed USER_OBJ) + */ + if (matched.ae_tag == ACL_USER_OBJ) + break; + if (matched.ae_tag == ACL_USER) { + if (maskallows != -1 && seen_userobj) + break; + if (fap->acl_entry[i].ae_tag != ACL_MASK && + fap->acl_entry[i].ae_tag != ACL_USER_OBJ) + continue; + } + /* True if this entry allows the requested access */ + allows = ((fap->acl_entry[i].ae_perm & md) == md); + + switch (fap->acl_entry[i].ae_tag) { + case ACL_USER_OBJ: + seen_userobj = 1; + if (fuid != current->fsuid) + continue; + matched.ae_tag = ACL_USER_OBJ; + matched.ae_perm = allows; + break; + case ACL_USER: + if (fap->acl_entry[i].ae_id != current->fsuid) + continue; + matched.ae_tag = ACL_USER; + matched.ae_perm = allows; + break; + case ACL_GROUP_OBJ: + if ((matched.ae_tag == ACL_GROUP_OBJ || + matched.ae_tag == ACL_GROUP) && !allows) + continue; + if (!in_group_p(fgid)) + continue; + matched.ae_tag = ACL_GROUP_OBJ; + matched.ae_perm = allows; + break; + case ACL_GROUP: + if ((matched.ae_tag == ACL_GROUP_OBJ || + matched.ae_tag == ACL_GROUP) && !allows) + continue; + if (!in_group_p(fap->acl_entry[i].ae_id)) + continue; + matched.ae_tag = ACL_GROUP; + matched.ae_perm = allows; + break; + case ACL_MASK: + maskallows = allows; + break; + case ACL_OTHER: + if (matched.ae_tag != 0) + continue; + matched.ae_tag = ACL_OTHER; + matched.ae_perm = allows; + break; + } + } + /* + * First possibility is that no matched entry allows access. + * The capability to override DAC may exist, so check for it. + */ + switch (matched.ae_tag) { + case ACL_OTHER: + case ACL_USER_OBJ: + if (matched.ae_perm) + return 0; + break; + case ACL_USER: + case ACL_GROUP_OBJ: + case ACL_GROUP: + if (maskallows && matched.ae_perm) + return 0; + break; + case 0: + break; + } + + return xfs_acl_capability_check(md, cr, fap); +} + +/* + * ACL validity checker. + * This acl validation routine checks each ACL entry read in makes sense. + */ +STATIC int +xfs_acl_invalid( + xfs_acl_t *aclp) +{ + xfs_acl_entry_t *entry, *e; + int user = 0, group = 0, other = 0, mask = 0; + int mask_required = 0; + int i, j; + + if (!aclp) + goto acl_invalid; + + if (aclp->acl_cnt > XFS_ACL_MAX_ENTRIES) + goto acl_invalid; + + for (i = 0; i < aclp->acl_cnt; i++) { + entry = &aclp->acl_entry[i]; + switch (entry->ae_tag) { + case ACL_USER_OBJ: + if (user++) + goto acl_invalid; + break; + case ACL_GROUP_OBJ: + if (group++) + goto acl_invalid; + break; + case ACL_OTHER: + if (other++) + goto acl_invalid; + break; + case ACL_USER: + case ACL_GROUP: + for (j = i + 1; j < aclp->acl_cnt; j++) { + e = &aclp->acl_entry[j]; + if (e->ae_id == entry->ae_id && + e->ae_tag == entry->ae_tag) + goto acl_invalid; + } + mask_required++; + break; + case ACL_MASK: + if (mask++) + goto acl_invalid; + break; + default: + goto acl_invalid; + } + } + if (!user || !group || !other || (mask_required && !mask)) + goto acl_invalid; + else + return 0; +acl_invalid: + return EINVAL; +} + +/* + * Do ACL endian conversion. + */ +STATIC void +xfs_acl_get_endian( + xfs_acl_t *aclp) +{ + xfs_acl_entry_t *ace, *end; + + INT_SET(aclp->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); + end = &aclp->acl_entry[0]+aclp->acl_cnt; + for (ace = &aclp->acl_entry[0]; ace < end; ace++) { + INT_SET(ace->ae_tag, ARCH_CONVERT, ace->ae_tag); + INT_SET(ace->ae_id, ARCH_CONVERT, ace->ae_id); + INT_SET(ace->ae_perm, ARCH_CONVERT, ace->ae_perm); + } +} + +/* + * Get the ACL from the EA and do endian conversion. + */ +STATIC void +xfs_acl_get_attr( + vnode_t *vp, + xfs_acl_t *aclp, + int kind, + int flags, + int *error) +{ + int len = sizeof(xfs_acl_t); + + ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1); + flags |= ATTR_ROOT; + VOP_ATTR_GET(vp, + kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE : SGI_ACL_DEFAULT, + (char *)aclp, &len, flags, sys_cred, *error); + if (*error || (flags & ATTR_KERNOVAL)) + return; + xfs_acl_get_endian(aclp); +} + +/* + * Set the EA with the ACL and do endian conversion. + */ +STATIC void +xfs_acl_set_attr( + vnode_t *vp, + xfs_acl_t *aclp, + int kind, + int *error) +{ + xfs_acl_entry_t *ace, *newace, *end; + xfs_acl_t *newacl; + int len; + + if (!(_ACL_ALLOC(newacl))) { + *error = ENOMEM; + return; + } + + len = sizeof(xfs_acl_t) - + (sizeof(xfs_acl_entry_t) * (XFS_ACL_MAX_ENTRIES - aclp->acl_cnt)); + end = &aclp->acl_entry[0]+aclp->acl_cnt; + for (ace = &aclp->acl_entry[0], newace = &newacl->acl_entry[0]; + ace < end; + ace++, newace++) { + INT_SET(newace->ae_tag, ARCH_CONVERT, ace->ae_tag); + INT_SET(newace->ae_id, ARCH_CONVERT, ace->ae_id); + INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm); + } + INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); + VOP_ATTR_SET(vp, + kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE: SGI_ACL_DEFAULT, + (char *)newacl, len, ATTR_ROOT, sys_cred, *error); + _ACL_FREE(newacl); +} + +int +xfs_acl_vtoacl( + vnode_t *vp, + xfs_acl_t *access_acl, + xfs_acl_t *default_acl) +{ + vattr_t va; + int error = 0; + + if (access_acl) { + /* + * Get the Access ACL and the mode. If either cannot + * be obtained for some reason, invalidate the access ACL. + */ + xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error); + if (!error) { + /* Got the ACL, need the mode... */ + va.va_mask = XFS_AT_MODE; + VOP_GETATTR(vp, &va, 0, sys_cred, error); + } + + if (error) + access_acl->acl_cnt = XFS_ACL_NOT_PRESENT; + else /* We have a good ACL and the file mode, synchronize. */ + xfs_acl_sync_mode(va.va_mode, access_acl); + } + + if (default_acl) { + xfs_acl_get_attr(vp, default_acl, _ACL_TYPE_DEFAULT, 0, &error); + if (error) + default_acl->acl_cnt = XFS_ACL_NOT_PRESENT; + } + return error; +} + +/* + * This function retrieves the parent directory's acl, processes it + * and lets the child inherit the acl(s) that it should. + */ +int +xfs_acl_inherit( + vnode_t *vp, + vattr_t *vap, + xfs_acl_t *pdaclp) +{ + xfs_acl_t *cacl; + int error = 0; + int basicperms = 0; + + /* + * If the parent does not have a default ACL, or it's an + * invalid ACL, we're done. + */ + if (!vp) + return 0; + if (!pdaclp || xfs_acl_invalid(pdaclp)) + return 0; + + /* + * Copy the default ACL of the containing directory to + * the access ACL of the new file and use the mode that + * was passed in to set up the correct initial values for + * the u::,g::[m::], and o:: entries. This is what makes + * umask() "work" with ACL's. + */ + + if (!(_ACL_ALLOC(cacl))) + return ENOMEM; + + memcpy(cacl, pdaclp, sizeof(xfs_acl_t)); + xfs_acl_filter_mode(vap->va_mode, cacl); + xfs_acl_setmode(vp, cacl, &basicperms); + + /* + * Set the Default and Access ACL on the file. The mode is already + * set on the file, so we don't need to worry about that. + * + * If the new file is a directory, its default ACL is a copy of + * the containing directory's default ACL. + */ + if (vp->v_type == VDIR) + xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error); + if (!error && !basicperms) + xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error); + _ACL_FREE(cacl); + return error; +} + +/* + * Set up the correct mode on the file based on the supplied ACL. This + * makes sure that the mode on the file reflects the state of the + * u::,g::[m::], and o:: entries in the ACL. Since the mode is where + * the ACL is going to get the permissions for these entries, we must + * synchronize the mode whenever we set the ACL on a file. + */ +STATIC int +xfs_acl_setmode( + vnode_t *vp, + xfs_acl_t *acl, + int *basicperms) +{ + vattr_t va; + xfs_acl_entry_t *ap; + xfs_acl_entry_t *gap = NULL; + int i, error, nomask = 1; + + *basicperms = 1; + + if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) + return 0; + + /* + * Copy the u::, g::, o::, and m:: bits from the ACL into the + * mode. The m:: bits take precedence over the g:: bits. + */ + va.va_mask = XFS_AT_MODE; + VOP_GETATTR(vp, &va, 0, sys_cred, error); + if (error) + return error; + + va.va_mask = XFS_AT_MODE; + va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); + ap = acl->acl_entry; + for (i = 0; i < acl->acl_cnt; ++i) { + switch (ap->ae_tag) { + case ACL_USER_OBJ: + va.va_mode |= ap->ae_perm << 6; + break; + case ACL_GROUP_OBJ: + gap = ap; + break; + case ACL_MASK: /* more than just standard modes */ + nomask = 0; + va.va_mode |= ap->ae_perm << 3; + *basicperms = 0; + break; + case ACL_OTHER: + va.va_mode |= ap->ae_perm; + break; + default: /* more than just standard modes */ + *basicperms = 0; + break; + } + ap++; + } + + /* Set the group bits from ACL_GROUP_OBJ if there's no ACL_MASK */ + if (gap && nomask) + va.va_mode |= gap->ae_perm << 3; + + VOP_SETATTR(vp, &va, 0, sys_cred, error); + return error; +} + +/* + * The permissions for the special ACL entries (u::, g::[m::], o::) are + * actually stored in the file mode (if there is both a group and a mask, + * the group is stored in the ACL entry and the mask is stored on the file). + * This allows the mode to remain automatically in sync with the ACL without + * the need for a call-back to the ACL system at every point where the mode + * could change. This function takes the permissions from the specified mode + * and places it in the supplied ACL. + * + * This implementation draws its validity from the fact that, when the ACL + * was assigned, the mode was copied from the ACL. + * If the mode did not change, therefore, the mode remains exactly what was + * taken from the special ACL entries at assignment. + * If a subsequent chmod() was done, the POSIX spec says that the change in + * mode must cause an update to the ACL seen at user level and used for + * access checks. Before and after a mode change, therefore, the file mode + * most accurately reflects what the special ACL entries should permit/deny. + * + * CAVEAT: If someone sets the SGI_ACL_FILE attribute directly, + * the existing mode bits will override whatever is in the + * ACL. Similarly, if there is a pre-existing ACL that was + * never in sync with its mode (owing to a bug in 6.5 and + * before), it will now magically (or mystically) be + * synchronized. This could cause slight astonishment, but + * it is better than inconsistent permissions. + * + * The supplied ACL is a template that may contain any combination + * of special entries. These are treated as place holders when we fill + * out the ACL. This routine does not add or remove special entries, it + * simply unites each special entry with its associated set of permissions. + */ +STATIC void +xfs_acl_sync_mode( + mode_t mode, + xfs_acl_t *acl) +{ + int i, nomask = 1; + xfs_acl_entry_t *ap; + xfs_acl_entry_t *gap = NULL; + + /* + * Set ACL entries. POSIX1003.1eD16 requires that the MASK + * be set instead of the GROUP entry, if there is a MASK. + */ + for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) { + switch (ap->ae_tag) { + case ACL_USER_OBJ: + ap->ae_perm = (mode >> 6) & 0x7; + break; + case ACL_GROUP_OBJ: + gap = ap; + break; + case ACL_MASK: + nomask = 0; + ap->ae_perm = (mode >> 3) & 0x7; + break; + case ACL_OTHER: + ap->ae_perm = mode & 0x7; + break; + default: + break; + } + } + /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */ + if (gap && nomask) + gap->ae_perm = (mode >> 3) & 0x7; +} + +/* + * When inheriting an Access ACL from a directory Default ACL, + * the ACL bits are set to the intersection of the ACL default + * permission bits and the file permission bits in mode. If there + * are no permission bits on the file then we must not give them + * the ACL. This is what what makes umask() work with ACLs. + */ +STATIC void +xfs_acl_filter_mode( + mode_t mode, + xfs_acl_t *acl) +{ + int i, nomask = 1; + xfs_acl_entry_t *ap; + xfs_acl_entry_t *gap = NULL; + + /* + * Set ACL entries. POSIX1003.1eD16 requires that the MASK + * be merged with GROUP entry, if there is a MASK. + */ + for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) { + switch (ap->ae_tag) { + case ACL_USER_OBJ: + ap->ae_perm &= (mode >> 6) & 0x7; + break; + case ACL_GROUP_OBJ: + gap = ap; + break; + case ACL_MASK: + nomask = 0; + ap->ae_perm &= (mode >> 3) & 0x7; + break; + case ACL_OTHER: + ap->ae_perm &= mode & 0x7; + break; + default: + break; + } + } + /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */ + if (gap && nomask) + gap->ae_perm &= (mode >> 3) & 0x7; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_acl.h linux.21rc5-ac2/fs/xfs/xfs_acl.h --- linux.21rc5/fs/xfs/xfs_acl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_acl.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ACL_H__ +#define __XFS_ACL_H__ + +/* + * Access Control Lists + */ +typedef __uint16_t xfs_acl_perm_t; +typedef __int32_t xfs_acl_type_t; +typedef __int32_t xfs_acl_tag_t; +typedef __int32_t xfs_acl_id_t; + +#define XFS_ACL_MAX_ENTRIES 25 +#define XFS_ACL_NOT_PRESENT (-1) + +typedef struct xfs_acl_entry { + xfs_acl_tag_t ae_tag; + xfs_acl_id_t ae_id; + xfs_acl_perm_t ae_perm; +} xfs_acl_entry_t; + +typedef struct xfs_acl { + __int32_t acl_cnt; + xfs_acl_entry_t acl_entry[XFS_ACL_MAX_ENTRIES]; +} xfs_acl_t; + +/* On-disk XFS extended attribute names */ +#define SGI_ACL_FILE "SGI_ACL_FILE" +#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT" +#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1) +#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) + + +#ifdef __KERNEL__ + +#ifdef CONFIG_XFS_POSIX_ACL + +struct vattr; +struct vnode; +struct xfs_inode; + +extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *); +extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); +extern int xfs_acl_get(struct vnode *, xfs_acl_t *, xfs_acl_t *); +extern int xfs_acl_set(struct vnode *, xfs_acl_t *, xfs_acl_t *); +extern int xfs_acl_vtoacl(struct vnode *, xfs_acl_t *, xfs_acl_t *); +extern int xfs_acl_vhasacl_access(struct vnode *); +extern int xfs_acl_vhasacl_default(struct vnode *); +extern int xfs_acl_vset(struct vnode *, void *, size_t, int); +extern int xfs_acl_vget(struct vnode *, void *, size_t, int); +extern int xfs_acl_vremove(struct vnode *vp, int); + +extern struct kmem_zone *xfs_acl_zone; + +#define _ACL_TYPE_ACCESS 1 +#define _ACL_TYPE_DEFAULT 2 +#define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) + +#define _ACL_DECL(a) xfs_acl_t *(a) = NULL +#define _ACL_ALLOC(a) ((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP)) +#define _ACL_FREE(a) ((a)? kmem_zone_free(xfs_acl_zone, (a)) : 0) +#define _ACL_ZONE_INIT(z,name) ((z) = kmem_zone_init(sizeof(xfs_acl_t), name)) +#define _ACL_ZONE_DESTROY(z) (kmem_cache_destroy(z)) +#define _ACL_INHERIT(c,v,d) (xfs_acl_inherit(c,v,d)) +#define _ACL_GET_ACCESS(pv,pa) (xfs_acl_vtoacl(pv,pa,NULL) == 0) +#define _ACL_GET_DEFAULT(pv,pd) (xfs_acl_vtoacl(pv,NULL,pd) == 0) +#define _ACL_ACCESS_EXISTS xfs_acl_vhasacl_access +#define _ACL_DEFAULT_EXISTS xfs_acl_vhasacl_default +#define _ACL_XFS_IACCESS(i,m,c) (XFS_IFORK_Q(i) ? xfs_acl_iaccess(i,m,c) : -1) + +#else +#define xfs_acl_vset(v,p,sz,t) (-EOPNOTSUPP) +#define xfs_acl_vget(v,p,sz,t) (-EOPNOTSUPP) +#define xfs_acl_vremove(v,t) (-EOPNOTSUPP) +#define _ACL_DECL(a) ((void)0) +#define _ACL_ALLOC(a) (1) /* successfully allocate nothing */ +#define _ACL_FREE(a) ((void)0) +#define _ACL_ZONE_INIT(z,name) ((void)0) +#define _ACL_ZONE_DESTROY(z) ((void)0) +#define _ACL_INHERIT(c,v,d) (0) +#define _ACL_GET_ACCESS(pv,pa) (0) +#define _ACL_GET_DEFAULT(pv,pd) (0) +#define _ACL_ACCESS_EXISTS (NULL) +#define _ACL_DEFAULT_EXISTS (NULL) +#define _ACL_XFS_IACCESS(i,m,c) (-1) +#endif + +#endif /* __KERNEL__ */ + +#endif /* __XFS_ACL_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_ag.h linux.21rc5-ac2/fs/xfs/xfs_ag.h --- linux.21rc5/fs/xfs/xfs_ag.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_ag.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_AG_H__ +#define __XFS_AG_H__ + +/* + * Allocation group header + * This is divided into three structures, placed in sequential 512-byte + * buffers after a copy of the superblock (also in a 512-byte buffer). + */ + +struct xfs_buf; +struct xfs_mount; +struct xfs_trans; + +#define XFS_AGF_MAGIC 0x58414746 /* 'XAGF' */ +#define XFS_AGI_MAGIC 0x58414749 /* 'XAGI' */ +#define XFS_AGF_VERSION 1 +#define XFS_AGI_VERSION 1 +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGF_GOOD_VERSION) +int xfs_agf_good_version(unsigned v); +#define XFS_AGF_GOOD_VERSION(v) xfs_agf_good_version(v) +#else +#define XFS_AGF_GOOD_VERSION(v) ((v) == XFS_AGF_VERSION) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGI_GOOD_VERSION) +int xfs_agi_good_version(unsigned v); +#define XFS_AGI_GOOD_VERSION(v) xfs_agi_good_version(v) +#else +#define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION) +#endif + +/* + * Btree number 0 is bno, 1 is cnt. This value gives the size of the + * arrays below. + */ +#define XFS_BTNUM_AGF ((int)XFS_BTNUM_CNTi + 1) + +/* + * The second word of agf_levels in the first a.g. overlaps the EFS + * superblock's magic number. Since the magic numbers valid for EFS + * are > 64k, our value cannot be confused for an EFS superblock's. + */ + +typedef struct xfs_agf +{ + /* + * Common allocation group header information + */ + __uint32_t agf_magicnum; /* magic number == XFS_AGF_MAGIC */ + __uint32_t agf_versionnum; /* header version == XFS_AGF_VERSION */ + xfs_agnumber_t agf_seqno; /* sequence # starting from 0 */ + xfs_agblock_t agf_length; /* size in blocks of a.g. */ + /* + * Freespace information + */ + xfs_agblock_t agf_roots[XFS_BTNUM_AGF]; /* root blocks */ + __uint32_t agf_spare0; /* spare field */ + __uint32_t agf_levels[XFS_BTNUM_AGF]; /* btree levels */ + __uint32_t agf_spare1; /* spare field */ + __uint32_t agf_flfirst; /* first freelist block's index */ + __uint32_t agf_fllast; /* last freelist block's index */ + __uint32_t agf_flcount; /* count of blocks in freelist */ + xfs_extlen_t agf_freeblks; /* total free blocks */ + xfs_extlen_t agf_longest; /* longest free space */ +} xfs_agf_t; + +#define XFS_AGF_MAGICNUM 0x00000001 +#define XFS_AGF_VERSIONNUM 0x00000002 +#define XFS_AGF_SEQNO 0x00000004 +#define XFS_AGF_LENGTH 0x00000008 +#define XFS_AGF_ROOTS 0x00000010 +#define XFS_AGF_LEVELS 0x00000020 +#define XFS_AGF_FLFIRST 0x00000040 +#define XFS_AGF_FLLAST 0x00000080 +#define XFS_AGF_FLCOUNT 0x00000100 +#define XFS_AGF_FREEBLKS 0x00000200 +#define XFS_AGF_LONGEST 0x00000400 +#define XFS_AGF_NUM_BITS 11 +#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) + +/* disk block (xfs_daddr_t) in the AG */ +#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGF_BLOCK) +xfs_agblock_t xfs_agf_block(struct xfs_mount *mp); +#define XFS_AGF_BLOCK(mp) xfs_agf_block(mp) +#else +#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) +#endif + +/* + * Size of the unlinked inode hash table in the agi. + */ +#define XFS_AGI_UNLINKED_BUCKETS 64 + +typedef struct xfs_agi +{ + /* + * Common allocation group header information + */ + __uint32_t agi_magicnum; /* magic number == XFS_AGI_MAGIC */ + __uint32_t agi_versionnum; /* header version == XFS_AGI_VERSION */ + xfs_agnumber_t agi_seqno; /* sequence # starting from 0 */ + xfs_agblock_t agi_length; /* size in blocks of a.g. */ + /* + * Inode information + * Inodes are mapped by interpreting the inode number, so no + * mapping data is needed here. + */ + xfs_agino_t agi_count; /* count of allocated inodes */ + xfs_agblock_t agi_root; /* root of inode btree */ + __uint32_t agi_level; /* levels in inode btree */ + xfs_agino_t agi_freecount; /* number of free inodes */ + xfs_agino_t agi_newino; /* new inode just allocated */ + xfs_agino_t agi_dirino; /* last directory inode chunk */ + /* + * Hash table of inodes which have been unlinked but are + * still being referenced. + */ + xfs_agino_t agi_unlinked[XFS_AGI_UNLINKED_BUCKETS]; +} xfs_agi_t; + +#define XFS_AGI_MAGICNUM 0x00000001 +#define XFS_AGI_VERSIONNUM 0x00000002 +#define XFS_AGI_SEQNO 0x00000004 +#define XFS_AGI_LENGTH 0x00000008 +#define XFS_AGI_COUNT 0x00000010 +#define XFS_AGI_ROOT 0x00000020 +#define XFS_AGI_LEVEL 0x00000040 +#define XFS_AGI_FREECOUNT 0x00000080 +#define XFS_AGI_NEWINO 0x00000100 +#define XFS_AGI_DIRINO 0x00000200 +#define XFS_AGI_UNLINKED 0x00000400 +#define XFS_AGI_NUM_BITS 11 +#define XFS_AGI_ALL_BITS ((1 << XFS_AGI_NUM_BITS) - 1) + +/* disk block (xfs_daddr_t) in the AG */ +#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log)) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGI_BLOCK) +xfs_agblock_t xfs_agi_block(struct xfs_mount *mp); +#define XFS_AGI_BLOCK(mp) xfs_agi_block(mp) +#else +#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp)) +#endif + +/* + * The third a.g. block contains the a.g. freelist, an array + * of block pointers to blocks owned by the allocation btree code. + */ +#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log)) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGFL_BLOCK) +xfs_agblock_t xfs_agfl_block(struct xfs_mount *mp); +#define XFS_AGFL_BLOCK(mp) xfs_agfl_block(mp) +#else +#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp)) +#endif +#define XFS_AGFL_SIZE(mp) ((mp)->m_sb.sb_sectsize / sizeof(xfs_agblock_t)) + +/* -- nathans TODO ... use of BBSIZE here - should be sector size -- */ +typedef struct xfs_agfl { + xfs_agblock_t agfl_bno[BBSIZE/sizeof(xfs_agblock_t)]; +} xfs_agfl_t; + +/* + * Busy block/extent entry. Used in perag to mark blocks that have been freed + * but whose transactions aren't committed to disk yet. + */ +typedef struct xfs_perag_busy { + xfs_agblock_t busy_start; + xfs_extlen_t busy_length; + struct xfs_trans *busy_tp; /* transaction that did the free */ +} xfs_perag_busy_t; + +/* + * Per-ag incore structure, copies of information in agf and agi, + * to improve the performance of allocation group selection. + * + * pick sizes which fit in allocation buckets well + */ +#if (BITS_PER_LONG == 32) +#define XFS_PAGB_NUM_SLOTS 84 +#elif (BITS_PER_LONG == 64) +#define XFS_PAGB_NUM_SLOTS 128 +#endif + +typedef struct xfs_perag +{ + char pagf_init; /* this agf's entry is initialized */ + char pagi_init; /* this agi's entry is initialized */ + char pagf_metadata; /* the agf is prefered to be metadata */ + char pagi_inodeok; /* The agi is ok for inodes */ + __uint8_t pagf_levels[XFS_BTNUM_AGF]; + /* # of levels in bno & cnt btree */ + __uint32_t pagf_flcount; /* count of blocks in freelist */ + xfs_extlen_t pagf_freeblks; /* total free blocks */ + xfs_extlen_t pagf_longest; /* longest free space */ + xfs_agino_t pagi_freecount; /* number of free inodes */ +#ifdef __KERNEL__ + lock_t pagb_lock; /* lock for pagb_list */ +#endif + int pagb_count; /* pagb slots in use */ + xfs_perag_busy_t *pagb_list; /* unstable blocks */ +} xfs_perag_t; + +#define XFS_AG_MIN_BYTES (1LL << 24) /* 16 MB */ +#define XFS_AG_BEST_BYTES (1LL << 30) /* 1 GB */ +#define XFS_AG_MAX_BYTES (1LL << 32) /* 4 GB */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_MIN_BLOCKS) +xfs_extlen_t xfs_ag_min_blocks(int bl); +#define XFS_AG_MIN_BLOCKS(bl) xfs_ag_min_blocks(bl) +#else +#define XFS_AG_MIN_BLOCKS(bl) ((xfs_extlen_t)(XFS_AG_MIN_BYTES >> bl)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_BEST_BLOCKS) +xfs_extlen_t xfs_ag_best_blocks(int bl, xfs_drfsbno_t blks); +#define XFS_AG_BEST_BLOCKS(bl,blks) xfs_ag_best_blocks(bl,blks) +#else +/*--#define XFS_AG_BEST_BLOCKS(bl) ((xfs_extlen_t)(XFS_AG_BEST_BYTES >> bl))*/ +/* + * Best is XFS_AG_BEST_BLOCKS at and below 64 Gigabyte filesystems, and + * XFS_AG_MAX_BLOCKS above 64 Gigabytes. + */ +#define XFS_AG_BEST_BLOCKS(bl,blks) \ + ((xfs_extlen_t)((1LL << (36 - bl)) >= blks) ? \ + ((xfs_extlen_t)(XFS_AG_BEST_BYTES >> bl)) : \ + XFS_AG_MAX_BLOCKS(bl)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_MAX_BLOCKS) +xfs_extlen_t xfs_ag_max_blocks(int bl); +#define XFS_AG_MAX_BLOCKS(bl) xfs_ag_max_blocks(bl) +#else +#define XFS_AG_MAX_BLOCKS(bl) ((xfs_extlen_t)(XFS_AG_MAX_BYTES >> bl)) +#endif + +#define XFS_MAX_AGNUMBER ((xfs_agnumber_t)(NULLAGNUMBER - 1)) + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_MAXLEVELS) +int xfs_ag_maxlevels(struct xfs_mount *mp); +#define XFS_AG_MAXLEVELS(mp) xfs_ag_maxlevels(mp) +#else +#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MIN_FREELIST) +int xfs_min_freelist(xfs_agf_t *a, struct xfs_mount *mp); +#define XFS_MIN_FREELIST(a,mp) xfs_min_freelist(a,mp) +#else +#define XFS_MIN_FREELIST(a,mp) \ + XFS_MIN_FREELIST_RAW( \ + INT_GET((a)->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT), \ + INT_GET((a)->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT), mp) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MIN_FREELIST_PAG) +int xfs_min_freelist_pag(xfs_perag_t *pag, struct xfs_mount *mp); +#define XFS_MIN_FREELIST_PAG(pag,mp) xfs_min_freelist_pag(pag,mp) +#else +#define XFS_MIN_FREELIST_PAG(pag,mp) \ + XFS_MIN_FREELIST_RAW((uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \ + (uint_t)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MIN_FREELIST_RAW) +int xfs_min_freelist_raw(int bl, int cl, struct xfs_mount *mp); +#define XFS_MIN_FREELIST_RAW(bl,cl,mp) xfs_min_freelist_raw(bl,cl,mp) +#else +#define XFS_MIN_FREELIST_RAW(bl,cl,mp) \ + (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + \ + MIN(cl + 1, XFS_AG_MAXLEVELS(mp))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGB_TO_FSB) +xfs_fsblock_t xfs_agb_to_fsb(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno); +#define XFS_AGB_TO_FSB(mp,agno,agbno) xfs_agb_to_fsb(mp,agno,agbno) +#else +#define XFS_AGB_TO_FSB(mp,agno,agbno) \ + (((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_AGNO) +xfs_agnumber_t xfs_fsb_to_agno(struct xfs_mount *mp, xfs_fsblock_t fsbno); +#define XFS_FSB_TO_AGNO(mp,fsbno) xfs_fsb_to_agno(mp,fsbno) +#else +#define XFS_FSB_TO_AGNO(mp,fsbno) \ + ((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_AGBNO) +xfs_agblock_t xfs_fsb_to_agbno(struct xfs_mount *mp, xfs_fsblock_t fsbno); +#define XFS_FSB_TO_AGBNO(mp,fsbno) xfs_fsb_to_agbno(mp,fsbno) +#else +#define XFS_FSB_TO_AGBNO(mp,fsbno) \ + ((xfs_agblock_t)((fsbno) & XFS_MASK32LO((mp)->m_sb.sb_agblklog))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGB_TO_DADDR) +xfs_daddr_t xfs_agb_to_daddr(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno); +#define XFS_AGB_TO_DADDR(mp,agno,agbno) xfs_agb_to_daddr(mp,agno,agbno) +#else +#define XFS_AGB_TO_DADDR(mp,agno,agbno) \ + ((xfs_daddr_t)(XFS_FSB_TO_BB(mp, \ + (xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))) +#endif +/* + * XFS_DADDR_TO_AGNO and XFS_DADDR_TO_AGBNO moved to xfs_mount.h + * to avoid header file ordering change + */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_DADDR) +xfs_daddr_t xfs_ag_daddr(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_daddr_t d); +#define XFS_AG_DADDR(mp,agno,d) xfs_ag_daddr(mp,agno,d) +#else +#define XFS_AG_DADDR(mp,agno,d) (XFS_AGB_TO_DADDR(mp, agno, 0) + (d)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_AGF) +xfs_agf_t *xfs_buf_to_agf(struct xfs_buf *bp); +#define XFS_BUF_TO_AGF(bp) xfs_buf_to_agf(bp) +#else +#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)XFS_BUF_PTR(bp)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_AGI) +xfs_agi_t *xfs_buf_to_agi(struct xfs_buf *bp); +#define XFS_BUF_TO_AGI(bp) xfs_buf_to_agi(bp) +#else +#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)XFS_BUF_PTR(bp)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_AGFL) +xfs_agfl_t *xfs_buf_to_agfl(struct xfs_buf *bp); +#define XFS_BUF_TO_AGFL(bp) xfs_buf_to_agfl(bp) +#else +#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)XFS_BUF_PTR(bp)) +#endif + +/* + * For checking for bad ranges of xfs_daddr_t's, covering multiple + * allocation groups or a single xfs_daddr_t that's a superblock copy. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_CHECK_DADDR) +void xfs_ag_check_daddr(struct xfs_mount *mp, xfs_daddr_t d, xfs_extlen_t len); +#define XFS_AG_CHECK_DADDR(mp,d,len) xfs_ag_check_daddr(mp,d,len) +#else +#define XFS_AG_CHECK_DADDR(mp,d,len) \ + ((len) == 1 ? \ + ASSERT((d) == XFS_SB_DADDR || \ + XFS_DADDR_TO_AGBNO(mp, d) != XFS_SB_DADDR) : \ + ASSERT(XFS_DADDR_TO_AGNO(mp, d) == \ + XFS_DADDR_TO_AGNO(mp, (d) + (len) - 1))) +#endif + +#endif /* __XFS_AG_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_alloc_btree.c linux.21rc5-ac2/fs/xfs/xfs_alloc_btree.c --- linux.21rc5/fs/xfs/xfs_alloc_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_alloc_btree.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,2204 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Free space allocation for XFS. + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_error.h" + +/* + * Prototypes for internal functions. + */ + +STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int); +STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC int xfs_alloc_lshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *); +STATIC int xfs_alloc_rshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *, + xfs_alloc_key_t *, xfs_btree_cur_t **, int *); +STATIC int xfs_alloc_updkey(xfs_btree_cur_t *, xfs_alloc_key_t *, int); + +/* + * Internal functions. + */ + +/* + * Single level of the xfs_alloc_delete record deletion routine. + * Delete record pointed to by cur/level. + * Remove the record from its block then rebalance the tree. + * Return 0 for error, 1 for done, 2 to go on to the next level. + */ +STATIC int /* error */ +xfs_alloc_delrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level removing record from */ + int *stat) /* fail/done/go-on */ +{ + xfs_agf_t *agf; /* allocation group freelist header */ + xfs_alloc_block_t *block; /* btree block record/key lives in */ + xfs_agblock_t bno; /* btree block number */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ + int i; /* loop index */ + xfs_alloc_key_t key; /* kp points here if block is level 0 */ + xfs_agblock_t lbno; /* left block's block number */ + xfs_buf_t *lbp; /* left block's buffer pointer */ + xfs_alloc_block_t *left; /* left btree block */ + xfs_alloc_key_t *lkp=NULL; /* left block key pointer */ + xfs_alloc_ptr_t *lpp=NULL; /* left block address pointer */ + int lrecs=0; /* number of records in left block */ + xfs_alloc_rec_t *lrp; /* left block record pointer */ + xfs_mount_t *mp; /* mount structure */ + int ptr; /* index in btree block for this rec */ + xfs_agblock_t rbno; /* right block's block number */ + xfs_buf_t *rbp; /* right block's buffer pointer */ + xfs_alloc_block_t *right; /* right btree block */ + xfs_alloc_key_t *rkp; /* right block key pointer */ + xfs_alloc_ptr_t *rpp; /* right block address pointer */ + int rrecs=0; /* number of records in right block */ + xfs_alloc_rec_t *rrp; /* right block record pointer */ + xfs_btree_cur_t *tcur; /* temporary btree cursor */ + + /* + * Get the index of the entry being deleted, check for nothing there. + */ + ptr = cur->bc_ptrs[level]; + if (ptr == 0) { + *stat = 0; + return 0; + } + /* + * Get the buffer & block containing the record or key/ptr. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + /* + * Fail if we're off the end of the block. + */ + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + *stat = 0; + return 0; + } + XFS_STATS_INC(xfsstats.xs_abt_delrec); + /* + * It's a nonleaf. Excise the key and ptr being deleted, by + * sliding the entries past them down one. + * Log the changed areas of the block. + */ + if (level > 0) { + lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); + lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); +#ifdef DEBUG + for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + memmove(&lkp[ptr - 1], &lkp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lkp)); /* INT_: mem copy */ + memmove(&lpp[ptr - 1], &lpp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lpp)); /* INT_: mem copy */ + xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + } + } + /* + * It's a leaf. Excise the record being deleted, by sliding the + * entries past it down one. Log the changed areas of the block. + */ + else { + lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + memmove(&lrp[ptr - 1], &lrp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lrp)); + xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + } + /* + * If it's the first record in the block, we'll need a key + * structure to pass up to the next level (updkey). + */ + if (ptr == 1) { + key.ar_startblock = lrp->ar_startblock; /* INT_: direct copy */ + key.ar_blockcount = lrp->ar_blockcount; /* INT_: direct copy */ + lkp = &key; + } + } + /* + * Decrement and log the number of entries in the block. + */ + INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1); + xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); + /* + * See if the longest free extent in the allocation group was + * changed by this operation. True if it's the by-size btree, and + * this is the leaf level, and there is no right sibling block, + * and this was the last record. + */ + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + mp = cur->bc_mp; + + if (level == 0 && + cur->bc_btnum == XFS_BTNUM_CNT && + INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && + ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + ASSERT(ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT) + 1); + /* + * There are still records in the block. Grab the size + * from the last one. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + rrp = XFS_ALLOC_REC_ADDR(block, INT_GET(block->bb_numrecs, ARCH_CONVERT), cur); + INT_COPY(agf->agf_longest, rrp->ar_blockcount, ARCH_CONVERT); + } + /* + * No free extents left. + */ + else + INT_ZERO(agf->agf_longest, ARCH_CONVERT); + mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest = + INT_GET(agf->agf_longest, ARCH_CONVERT); + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_LONGEST); + } + /* + * Is this the root level? If so, we're almost done. + */ + if (level == cur->bc_nlevels - 1) { + /* + * If this is the root level, + * and there's only one entry left, + * and it's NOT the leaf level, + * then we can get rid of this level. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) { + /* + * lpp is still set to the first pointer in the block. + * Make it the new root of the btree. + */ + bno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); + INT_COPY(agf->agf_roots[cur->bc_btnum], *lpp, ARCH_CONVERT); + INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, -1); + mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_levels[cur->bc_btnum]--; + /* + * Put this buffer/block on the ag's freelist. + */ + if ((error = xfs_alloc_put_freelist(cur->bc_tp, + cur->bc_private.a.agbp, NULL, bno))) + return error; + /* + * Since blocks move to the free list without the + * coordination used in xfs_bmap_finish, we can't allow + * block to be available for reallocation and + * non-transaction writing (user data) until we know + * that the transaction that moved it to the free list + * is permanently on disk. We track the blocks by + * declaring these blocks as "busy"; the busy list is + * maintained on a per-ag basis and each transaction + * records which entries should be removed when the + * iclog commits to disk. If a busy block is + * allocated, the iclog is pushed up to the LSN + * that freed the block. + */ + xfs_alloc_mark_busy(cur->bc_tp, + INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); + + xfs_trans_agbtree_delta(cur->bc_tp, -1); + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_ROOTS | XFS_AGF_LEVELS); + /* + * Update the cursor so there's one fewer level. + */ + xfs_btree_setbuf(cur, level, 0); + cur->bc_nlevels--; + } else if (level > 0 && + (error = xfs_alloc_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * If we deleted the leftmost entry in the block, update the + * key values above us in the tree. + */ + if (ptr == 1 && (error = xfs_alloc_updkey(cur, lkp, level + 1))) + return error; + /* + * If the number of records remaining in the block is at least + * the minimum, we're done. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { + if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * Otherwise, we have to move some records around to keep the + * tree balanced. Look at the left and right sibling blocks to + * see if we can re-balance by moving only one record. + */ + rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); + bno = NULLAGBLOCK; + ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); + /* + * Duplicate the cursor so our btree manipulations here won't + * disrupt the next level up. + */ + if ((error = xfs_btree_dup_cursor(cur, &tcur))) + return error; + /* + * If there's a right sibling, see if it's ok to shift an entry + * out of it. + */ + if (rbno != NULLAGBLOCK) { + /* + * Move the temp cursor to the last entry in the next block. + * Actually any entry but the first would suffice. + */ + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_increment(tcur, level, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Grab a pointer to the block. + */ + rbp = tcur->bc_bufs[level]; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + goto error0; +#endif + /* + * Grab the current block number, for future use. + */ + bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + /* + * If right block is full enough so that removing one entry + * won't make it too empty, and left-shifting an entry out + * of right to us works, we're done. + */ + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_ALLOC_BLOCK_MINRECS(level, cur)) { + if ((error = xfs_alloc_lshift(tcur, level, &i))) + goto error0; + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_ALLOC_BLOCK_MINRECS(level, cur)); + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + if (level > 0 && + (error = xfs_alloc_decrement(cur, level, + &i))) + return error; + *stat = 1; + return 0; + } + } + /* + * Otherwise, grab the number of records in right for + * future reference, and fix up the temp cursor to point + * to our block again (last record). + */ + rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + if (lbno != NULLAGBLOCK) { + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_decrement(tcur, level, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + } + /* + * If there's a left sibling, see if it's ok to shift an entry + * out of it. + */ + if (lbno != NULLAGBLOCK) { + /* + * Move the temp cursor to the first entry in the + * previous block. + */ + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_decrement(tcur, level, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + xfs_btree_firstrec(tcur, level); + /* + * Grab a pointer to the block. + */ + lbp = tcur->bc_bufs[level]; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + goto error0; +#endif + /* + * Grab the current block number, for future use. + */ + bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + /* + * If left block is full enough so that removing one entry + * won't make it too empty, and right-shifting an entry out + * of left to us works, we're done. + */ + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_ALLOC_BLOCK_MINRECS(level, cur)) { + if ((error = xfs_alloc_rshift(tcur, level, &i))) + goto error0; + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_ALLOC_BLOCK_MINRECS(level, cur)); + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + if (level == 0) + cur->bc_ptrs[0]++; + *stat = 1; + return 0; + } + } + /* + * Otherwise, grab the number of records in right for + * future reference. + */ + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * Delete the temp cursor, we're done with it. + */ + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + /* + * If here, we need to do a join to keep the tree balanced. + */ + ASSERT(bno != NULLAGBLOCK); + /* + * See if we can join with the left neighbor block. + */ + if (lbno != NULLAGBLOCK && + lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + /* + * Set "right" to be the starting block, + * "left" to be the left neighbor. + */ + rbno = bno; + right = block; + rbp = bp; + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, lbno, 0, &lbp, + XFS_ALLOC_BTREE_REF))) + return error; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; + } + /* + * If that won't work, see if we can join with the right neighbor block. + */ + else if (rbno != NULLAGBLOCK && + rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + /* + * Set "left" to be the starting block, + * "right" to be the right neighbor. + */ + lbno = bno; + left = block; + lbp = bp; + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, rbno, 0, &rbp, + XFS_ALLOC_BTREE_REF))) + return error; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; + } + /* + * Otherwise, we can't fix the imbalance. + * Just return. This is probably a logic error, but it's not fatal. + */ + else { + if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * We're now going to join "left" and "right" by moving all the stuff + * in "right" to "left" and deleting "right". + */ + if (level > 0) { + /* + * It's a non-leaf. Move keys and pointers. + */ + lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); + rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */ + memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */ + xfs_alloc_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_alloc_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } else { + /* + * It's a leaf. Move records. + */ + lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); + memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp)); + xfs_alloc_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } + /* + * If we joined with the left neighbor, set the buffer in the + * cursor to the left block, and fix up the index. + */ + if (bp != lbp) { + xfs_btree_setbuf(cur, level, lbp); + cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * If we joined with the right neighbor and there's a level above + * us, increment the cursor at that level. + */ + else if (level + 1 < cur->bc_nlevels && + (error = xfs_alloc_increment(cur, level + 1, &i))) + return error; + /* + * Fix up the number of records in the surviving block. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + /* + * Fix up the right block pointer in the surviving block, and log it. + */ + left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ + xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + /* + * If there is a right sibling now, make it point to the + * remaining block. + */ + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_alloc_block_t *rrblock; + xfs_buf_t *rrbp; + + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, + &rrbp, XFS_ALLOC_BTREE_REF))) + return error; + rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); + if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) + return error; + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); + xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); + } + /* + * Free the deleting block by putting it on the freelist. + */ + if ((error = xfs_alloc_put_freelist(cur->bc_tp, cur->bc_private.a.agbp, + NULL, rbno))) + return error; + /* + * Since blocks move to the free list without the coordination + * used in xfs_bmap_finish, we can't allow block to be available + * for reallocation and non-transaction writing (user data) + * until we know that the transaction that moved it to the free + * list is permanently on disk. We track the blocks by declaring + * these blocks as "busy"; the busy list is maintained on a + * per-ag basis and each transaction records which entries + * should be removed when the iclog commits to disk. If a + * busy block is allocated, the iclog is pushed up to the + * LSN that freed the block. + */ + xfs_alloc_mark_busy(cur->bc_tp, + INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); + + xfs_trans_agbtree_delta(cur->bc_tp, -1); + /* + * Adjust the current level's cursor so that we're left referring + * to the right node, after we're done. + * If this leaves the ptr value 0 our caller will fix it up. + */ + if (level > 0) + cur->bc_ptrs[level]--; + /* + * Return value means the next level up has something to do. + */ + *stat = 2; + return 0; + +error0: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} + +/* + * Insert one record/level. Return information to the caller + * allowing the next level up to proceed if necessary. + */ +STATIC int /* error */ +xfs_alloc_insrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to insert record at */ + xfs_agblock_t *bnop, /* i/o: block number inserted */ + xfs_alloc_rec_t *recp, /* i/o: record data inserted */ + xfs_btree_cur_t **curp, /* output: new cursor replacing cur */ + int *stat) /* output: success/failure */ +{ + xfs_agf_t *agf; /* allocation group freelist header */ + xfs_alloc_block_t *block; /* btree block record/key lives in */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ + int i; /* loop index */ + xfs_alloc_key_t key; /* key value being inserted */ + xfs_alloc_key_t *kp; /* pointer to btree keys */ + xfs_agblock_t nbno; /* block number of allocated block */ + xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */ + xfs_alloc_key_t nkey; /* new key value, from split */ + xfs_alloc_rec_t nrec; /* new record value, for caller */ + int optr; /* old ptr value */ + xfs_alloc_ptr_t *pp; /* pointer to btree addresses */ + int ptr; /* index in btree block for this rec */ + xfs_alloc_rec_t *rp; /* pointer to btree records */ + + ASSERT(INT_GET(recp->ar_blockcount, ARCH_CONVERT) > 0); + /* + * If we made it to the root level, allocate a new root block + * and we're done. + */ + if (level >= cur->bc_nlevels) { + XFS_STATS_INC(xfsstats.xs_abt_insrec); + if ((error = xfs_alloc_newroot(cur, &i))) + return error; + *bnop = NULLAGBLOCK; + *stat = i; + return 0; + } + /* + * Make a key out of the record data to be inserted, and save it. + */ + key.ar_startblock = recp->ar_startblock; /* INT_: direct copy */ + key.ar_blockcount = recp->ar_blockcount; /* INT_: direct copy */ + optr = ptr = cur->bc_ptrs[level]; + /* + * If we're off the left edge, return failure. + */ + if (ptr == 0) { + *stat = 0; + return 0; + } + XFS_STATS_INC(xfsstats.xs_abt_insrec); + /* + * Get pointers to the btree buffer and block. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; + /* + * Check that the new entry is being inserted in the right place. + */ + if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (level == 0) { + rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); + xfs_btree_check_rec(cur->bc_btnum, recp, rp); + } else { + kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur); + xfs_btree_check_key(cur->bc_btnum, &key, kp); + } + } +#endif + nbno = NULLAGBLOCK; + ncur = (xfs_btree_cur_t *)0; + /* + * If the block is full, we can't insert the new entry until we + * make the block un-full. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + /* + * First, try shifting an entry to the right neighbor. + */ + if ((error = xfs_alloc_rshift(cur, level, &i))) + return error; + if (i) { + /* nothing */ + } + /* + * Next, try shifting an entry to the left neighbor. + */ + else { + if ((error = xfs_alloc_lshift(cur, level, &i))) + return error; + if (i) + optr = ptr = cur->bc_ptrs[level]; + else { + /* + * Next, try splitting the current block in + * half. If this works we have to re-set our + * variables because we could be in a + * different block now. + */ + if ((error = xfs_alloc_split(cur, level, &nbno, + &nkey, &ncur, &i))) + return error; + if (i) { + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = + xfs_btree_check_sblock(cur, + block, level, bp))) + return error; +#endif + ptr = cur->bc_ptrs[level]; + nrec.ar_startblock = nkey.ar_startblock; /* INT_: direct copy */ + nrec.ar_blockcount = nkey.ar_blockcount; /* INT_: direct copy */ + } + /* + * Otherwise the insert fails. + */ + else { + *stat = 0; + return 0; + } + } + } + } + /* + * At this point we know there's room for our new entry in the block + * we're pointing at. + */ + if (level > 0) { + /* + * It's a non-leaf entry. Make a hole for the new data + * in the key and ptr regions of the block. + */ + kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); + pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); +#ifdef DEBUG + for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) + return error; + } +#endif + memmove(&kp[ptr], &kp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); /* INT_: copy */ + memmove(&pp[ptr], &pp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); /* INT_: copy */ +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, *bnop, level))) + return error; +#endif + /* + * Now stuff the new data in, bump numrecs and log the new data. + */ + kp[ptr - 1] = key; + INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); + INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); + xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); +#ifdef DEBUG + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) + xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, + kp + ptr); +#endif + } else { + /* + * It's a leaf entry. Make a hole for the new record. + */ + rp = XFS_ALLOC_REC_ADDR(block, 1, cur); + memmove(&rp[ptr], &rp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp)); + /* + * Now stuff the new record in, bump numrecs + * and log the new data. + */ + rp[ptr - 1] = *recp; /* INT_: struct copy */ + INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); + xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); +#ifdef DEBUG + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) + xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, + rp + ptr); +#endif + } + /* + * Log the new number of records in the btree header. + */ + xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); + /* + * If we inserted at the start of a block, update the parents' keys. + */ + if (optr == 1 && (error = xfs_alloc_updkey(cur, &key, level + 1))) + return error; + /* + * Look to see if the longest extent in the allocation group + * needs to be updated. + */ + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + if (level == 0 && + cur->bc_btnum == XFS_BTNUM_CNT && + INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && + INT_GET(recp->ar_blockcount, ARCH_CONVERT) > INT_GET(agf->agf_longest, ARCH_CONVERT)) { + /* + * If this is a leaf in the by-size btree and there + * is no right sibling block and this block is bigger + * than the previous longest block, update it. + */ + INT_COPY(agf->agf_longest, recp->ar_blockcount, ARCH_CONVERT); + cur->bc_mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest + = INT_GET(recp->ar_blockcount, ARCH_CONVERT); + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_LONGEST); + } + /* + * Return the new block number, if any. + * If there is one, give back a record value and a cursor too. + */ + *bnop = nbno; + if (nbno != NULLAGBLOCK) { + *recp = nrec; /* INT_: struct copy */ + *curp = ncur; /* INT_: struct copy */ + } + *stat = 1; + return 0; +} + +/* + * Log header fields from a btree block. + */ +STATIC void +xfs_alloc_log_block( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* buffer containing btree block */ + int fields) /* mask of fields: XFS_BB_... */ +{ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + static const short offsets[] = { /* table of offsets */ + offsetof(xfs_alloc_block_t, bb_magic), + offsetof(xfs_alloc_block_t, bb_level), + offsetof(xfs_alloc_block_t, bb_numrecs), + offsetof(xfs_alloc_block_t, bb_leftsib), + offsetof(xfs_alloc_block_t, bb_rightsib), + sizeof(xfs_alloc_block_t) + }; + + xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last); + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * Log keys from a btree block (nonleaf). + */ +STATIC void +xfs_alloc_log_keys( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int kfirst, /* index of first key to log */ + int klast) /* index of last key to log */ +{ + xfs_alloc_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + xfs_alloc_key_t *kp; /* key pointer in btree block */ + int last; /* last byte offset logged */ + + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Log block pointer fields from a btree block (nonleaf). + */ +STATIC void +xfs_alloc_log_ptrs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int pfirst, /* index of first pointer to log */ + int plast) /* index of last pointer to log */ +{ + xfs_alloc_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + xfs_alloc_ptr_t *pp; /* block-pointer pointer in btree blk */ + + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Log records from a btree block (leaf). + */ +STATIC void +xfs_alloc_log_recs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int rfirst, /* index of first record to log */ + int rlast) /* index of last record to log */ +{ + xfs_alloc_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + xfs_alloc_rec_t *rp; /* record pointer for btree block */ + + + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + rp = XFS_ALLOC_REC_ADDR(block, 1, cur); +#ifdef DEBUG + { + xfs_agf_t *agf; + xfs_alloc_rec_t *p; + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) + ASSERT(INT_GET(p->ar_startblock, ARCH_CONVERT) + INT_GET(p->ar_blockcount, ARCH_CONVERT) <= + INT_GET(agf->agf_length, ARCH_CONVERT)); + } +#endif + first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Lookup the record. The cursor is made to point to it, based on dir. + * Return 0 if can't find any such record, 1 for success. + */ +STATIC int /* error */ +xfs_alloc_lookup( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_lookup_t dir, /* <=, ==, or >= */ + int *stat) /* success/failure */ +{ + xfs_agblock_t agbno; /* a.g. relative btree block number */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_alloc_block_t *block=NULL; /* current btree block */ + int diff; /* difference for the current key */ + int error; /* error return value */ + int keyno=0; /* current key number */ + int level; /* level in the btree */ + xfs_mount_t *mp; /* file system mount point */ + + XFS_STATS_INC(xfsstats.xs_abt_lookup); + /* + * Get the allocation group header, and the root block number. + */ + mp = cur->bc_mp; + + { + xfs_agf_t *agf; /* a.g. freespace header */ + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + agno = INT_GET(agf->agf_seqno, ARCH_CONVERT); + agbno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); + } + /* + * Iterate over each level in the btree, starting at the root. + * For each level above the leaves, find the key we need, based + * on the lookup record, then follow the corresponding block + * pointer down to the next level. + */ + for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { + xfs_buf_t *bp; /* buffer pointer for btree block */ + xfs_daddr_t d; /* disk address of btree block */ + + /* + * Get the disk address we're looking for. + */ + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + /* + * If the old buffer at this level is for a different block, + * throw it away, otherwise just use it. + */ + bp = cur->bc_bufs[level]; + if (bp && XFS_BUF_ADDR(bp) != d) + bp = (xfs_buf_t *)0; + if (!bp) { + /* + * Need to get a new buffer. Read it, then + * set it in the cursor, releasing the old one. + */ + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, agno, + agbno, 0, &bp, XFS_ALLOC_BTREE_REF))) + return error; + xfs_btree_setbuf(cur, level, bp); + /* + * Point to the btree block, now that we have the buffer + */ + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, level, + bp))) + return error; + } else + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + /* + * If we already had a key match at a higher level, we know + * we need to use the first entry in this block. + */ + if (diff == 0) + keyno = 1; + /* + * Otherwise we need to search this block. Do a binary search. + */ + else { + int high; /* high entry number */ + xfs_alloc_key_t *kkbase=NULL;/* base of keys in block */ + xfs_alloc_rec_t *krbase=NULL;/* base of records in block */ + int low; /* low entry number */ + + /* + * Get a pointer to keys or records. + */ + if (level > 0) + kkbase = XFS_ALLOC_KEY_ADDR(block, 1, cur); + else + krbase = XFS_ALLOC_REC_ADDR(block, 1, cur); + /* + * Set low and high entry numbers, 1-based. + */ + low = 1; + if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { + /* + * If the block is empty, the tree must + * be an empty leaf. + */ + ASSERT(level == 0 && cur->bc_nlevels == 1); + cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; + *stat = 0; + return 0; + } + /* + * Binary search the block. + */ + while (low <= high) { + xfs_extlen_t blockcount; /* key value */ + xfs_agblock_t startblock; /* key value */ + + XFS_STATS_INC(xfsstats.xs_abt_compare); + /* + * keyno is average of low and high. + */ + keyno = (low + high) >> 1; + /* + * Get startblock & blockcount. + */ + if (level > 0) { + xfs_alloc_key_t *kkp; + + kkp = kkbase + keyno - 1; + startblock = INT_GET(kkp->ar_startblock, ARCH_CONVERT); + blockcount = INT_GET(kkp->ar_blockcount, ARCH_CONVERT); + } else { + xfs_alloc_rec_t *krp; + + krp = krbase + keyno - 1; + startblock = INT_GET(krp->ar_startblock, ARCH_CONVERT); + blockcount = INT_GET(krp->ar_blockcount, ARCH_CONVERT); + } + /* + * Compute difference to get next direction. + */ + if (cur->bc_btnum == XFS_BTNUM_BNO) + diff = (int)startblock - + (int)cur->bc_rec.a.ar_startblock; + else if (!(diff = (int)blockcount - + (int)cur->bc_rec.a.ar_blockcount)) + diff = (int)startblock - + (int)cur->bc_rec.a.ar_startblock; + /* + * Less than, move right. + */ + if (diff < 0) + low = keyno + 1; + /* + * Greater than, move left. + */ + else if (diff > 0) + high = keyno - 1; + /* + * Equal, we're done. + */ + else + break; + } + } + /* + * If there are more levels, set up for the next level + * by getting the block number and filling in the cursor. + */ + if (level > 0) { + /* + * If we moved left, need the previous key number, + * unless there isn't one. + */ + if (diff > 0 && --keyno < 1) + keyno = 1; + agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, agbno, level))) + return error; +#endif + cur->bc_ptrs[level] = keyno; + } + } + /* + * Done with the search. + * See if we need to adjust the results. + */ + if (dir != XFS_LOOKUP_LE && diff < 0) { + keyno++; + /* + * If ge search and we went off the end of the block, but it's + * not the last block, we're in the wrong block. + */ + if (dir == XFS_LOOKUP_GE && + keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && + INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + int i; + + cur->bc_ptrs[0] = keyno; + if ((error = xfs_alloc_increment(cur, 0, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + *stat = 1; + return 0; + } + } + else if (dir == XFS_LOOKUP_LE && diff > 0) + keyno--; + cur->bc_ptrs[0] = keyno; + /* + * Return if we succeeded or not. + */ + if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) + *stat = 0; + else + *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); + return 0; +} + +/* + * Move 1 record left from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_alloc_lshift( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to shift record on */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef DEBUG + int i; /* loop index */ +#endif + xfs_alloc_key_t key; /* key value for leaf level upward */ + xfs_buf_t *lbp; /* buffer for left neighbor block */ + xfs_alloc_block_t *left; /* left neighbor btree block */ + int nrec; /* new number of left block entries */ + xfs_buf_t *rbp; /* buffer for right (current) block */ + xfs_alloc_block_t *right; /* right (current) btree block */ + xfs_alloc_key_t *rkp=NULL; /* key pointer for right block */ + xfs_alloc_ptr_t *rpp=NULL; /* address pointer for right block */ + xfs_alloc_rec_t *rrp=NULL; /* record pointer for right block */ + + /* + * Set up variables for this block as "right". + */ + rbp = cur->bc_bufs[level]; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; +#endif + /* + * If we've got no left sibling then we can't shift an entry left. + */ + if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + if (cur->bc_ptrs[level] <= 1) { + *stat = 0; + return 0; + } + /* + * Set up the left neighbor as "left". + */ + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, + XFS_ALLOC_BTREE_REF))) + return error; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; + /* + * If it's full, it can't take another entry. + */ + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + *stat = 0; + return 0; + } + nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; + /* + * If non-leaf, copy a key and a ptr to the left block. + */ + if (level > 0) { + xfs_alloc_key_t *lkp; /* key pointer for left block */ + xfs_alloc_ptr_t *lpp; /* address pointer for left block */ + + lkp = XFS_ALLOC_KEY_ADDR(left, nrec, cur); + rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); + *lkp = *rkp; + xfs_alloc_log_keys(cur, lbp, nrec, nrec); + lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); + rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) + return error; +#endif + *lpp = *rpp; /* INT_: copy */ + xfs_alloc_log_ptrs(cur, lbp, nrec, nrec); + xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp); + } + /* + * If leaf, copy a record to the left block. + */ + else { + xfs_alloc_rec_t *lrp; /* record pointer for left block */ + + lrp = XFS_ALLOC_REC_ADDR(left, nrec, cur); + rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); + *lrp = *rrp; + xfs_alloc_log_recs(cur, lbp, nrec, nrec); + xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); + } + /* + * Bump and log left's numrecs, decrement and log right's numrecs. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); + xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); + INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); + xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); + /* + * Slide the contents of right down one entry. + */ + if (level > 0) { +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), + level))) + return error; + } +#endif + memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } else { + memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ + key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ + rkp = &key; + } + /* + * Update the parent key values of right. + */ + if ((error = xfs_alloc_updkey(cur, rkp, level + 1))) + return error; + /* + * Slide the cursor value left one. + */ + cur->bc_ptrs[level]--; + *stat = 1; + return 0; +} + +/* + * Allocate a new root block, fill it in. + */ +STATIC int /* error */ +xfs_alloc_newroot( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + xfs_agblock_t lbno; /* left block number */ + xfs_buf_t *lbp; /* left btree buffer */ + xfs_alloc_block_t *left; /* left btree block */ + xfs_mount_t *mp; /* mount structure */ + xfs_agblock_t nbno; /* new block number */ + xfs_buf_t *nbp; /* new (root) buffer */ + xfs_alloc_block_t *new; /* new (root) btree block */ + int nptr; /* new value for key index, 1 or 2 */ + xfs_agblock_t rbno; /* right block number */ + xfs_buf_t *rbp; /* right btree buffer */ + xfs_alloc_block_t *right; /* right btree block */ + + mp = cur->bc_mp; + + ASSERT(cur->bc_nlevels < XFS_AG_MAXLEVELS(mp)); + /* + * Get a buffer from the freelist blocks, for the new root. + */ + if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, + &nbno))) + return error; + /* + * None available, we fail. + */ + if (nbno == NULLAGBLOCK) { + *stat = 0; + return 0; + } + xfs_trans_agbtree_delta(cur->bc_tp, 1); + nbp = xfs_btree_get_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, nbno, + 0); + new = XFS_BUF_TO_ALLOC_BLOCK(nbp); + /* + * Set the root data in the a.g. freespace structure. + */ + { + xfs_agf_t *agf; /* a.g. freespace header */ + xfs_agnumber_t seqno; + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + INT_SET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT, nbno); + INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, 1); + seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); + mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_ROOTS | XFS_AGF_LEVELS); + } + /* + * At the previous root level there are now two blocks: the old + * root, and the new block generated when it was split. + * We don't know which one the cursor is pointing at, so we + * set up variables "left" and "right" for each case. + */ + lbp = cur->bc_bufs[cur->bc_nlevels - 1]; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) + return error; +#endif + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + /* + * Our block is left, pick up the right block. + */ + lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); + rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, rbno, 0, &rbp, + XFS_ALLOC_BTREE_REF))) + return error; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, + cur->bc_nlevels - 1, rbp))) + return error; + nptr = 1; + } else { + /* + * Our block is right, pick up the left block. + */ + rbp = lbp; + right = left; + rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); + lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, lbno, 0, &lbp, + XFS_ALLOC_BTREE_REF))) + return error; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, + cur->bc_nlevels - 1, lbp))) + return error; + nptr = 2; + } + /* + * Fill in the new block's btree header and log it. + */ + INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); + INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); + INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); + INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); + ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); + /* + * Fill in the key data in the new root. + */ + { + xfs_alloc_key_t *kp; /* btree key pointer */ + + kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); + if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { + kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ + kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ + } else { + xfs_alloc_rec_t *rp; /* btree record pointer */ + + rp = XFS_ALLOC_REC_ADDR(left, 1, cur); + kp[0].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ + kp[0].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ + rp = XFS_ALLOC_REC_ADDR(right, 1, cur); + kp[1].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ + kp[1].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ + } + } + xfs_alloc_log_keys(cur, nbp, 1, 2); + /* + * Fill in the pointer data in the new root. + */ + { + xfs_alloc_ptr_t *pp; /* btree address pointer */ + + pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); + INT_SET(pp[0], ARCH_CONVERT, lbno); + INT_SET(pp[1], ARCH_CONVERT, rbno); + } + xfs_alloc_log_ptrs(cur, nbp, 1, 2); + /* + * Fix up the cursor. + */ + xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); + cur->bc_ptrs[cur->bc_nlevels] = nptr; + cur->bc_nlevels++; + *stat = 1; + return 0; +} + +/* + * Move 1 record right from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_alloc_rshift( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to shift record on */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* loop index */ + xfs_alloc_key_t key; /* key value for leaf level upward */ + xfs_buf_t *lbp; /* buffer for left (current) block */ + xfs_alloc_block_t *left; /* left (current) btree block */ + xfs_buf_t *rbp; /* buffer for right neighbor block */ + xfs_alloc_block_t *right; /* right neighbor btree block */ + xfs_alloc_key_t *rkp; /* key pointer for right block */ + xfs_btree_cur_t *tcur; /* temporary cursor */ + + /* + * Set up variables for this block as "left". + */ + lbp = cur->bc_bufs[level]; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; +#endif + /* + * If we've got no right sibling then we can't shift an entry right. + */ + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { + *stat = 0; + return 0; + } + /* + * Set up the right neighbor as "right". + */ + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, + XFS_ALLOC_BTREE_REF))) + return error; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; + /* + * If it's full, it can't take another entry. + */ + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + *stat = 0; + return 0; + } + /* + * Make a hole at the start of the right neighbor block, then + * copy the last left block entry to the hole. + */ + if (level > 0) { + xfs_alloc_key_t *lkp; /* key pointer for left block */ + xfs_alloc_ptr_t *lpp; /* address pointer for left block */ + xfs_alloc_ptr_t *rpp; /* address pointer for right block */ + + lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); + rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) + return error; +#endif + *rkp = *lkp; /* INT_: copy */ + *rpp = *lpp; /* INT_: copy */ + xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); + } else { + xfs_alloc_rec_t *lrp; /* record pointer for left block */ + xfs_alloc_rec_t *rrp; /* record pointer for right block */ + + lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); + memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + *rrp = *lrp; + xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ + key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ + rkp = &key; + xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); + } + /* + * Decrement and log left's numrecs, bump and log right's numrecs. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); + xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); + /* + * Using a temporary cursor, update the parent key values of the + * block on the right. + */ + if ((error = xfs_btree_dup_cursor(cur, &tcur))) + return error; + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_increment(tcur, level, &i)) || + (error = xfs_alloc_updkey(tcur, rkp, level + 1))) + goto error0; + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + *stat = 1; + return 0; +error0: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} + +/* + * Split cur/level block in half. + * Return new block number and its first record (to be inserted into parent). + */ +STATIC int /* error */ +xfs_alloc_split( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to split */ + xfs_agblock_t *bnop, /* output: block number allocated */ + xfs_alloc_key_t *keyp, /* output: first key of new block */ + xfs_btree_cur_t **curp, /* output: new cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* loop index/record number */ + xfs_agblock_t lbno; /* left (current) block number */ + xfs_buf_t *lbp; /* buffer for left block */ + xfs_alloc_block_t *left; /* left (current) btree block */ + xfs_agblock_t rbno; /* right (new) block number */ + xfs_buf_t *rbp; /* buffer for right block */ + xfs_alloc_block_t *right; /* right (new) btree block */ + + /* + * Allocate the new block from the freelist. + * If we can't do it, we're toast. Give up. + */ + if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, + &rbno))) + return error; + if (rbno == NULLAGBLOCK) { + *stat = 0; + return 0; + } + xfs_trans_agbtree_delta(cur->bc_tp, 1); + rbp = xfs_btree_get_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno, + rbno, 0); + /* + * Set up the new block as "right". + */ + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); + /* + * "Left" is the current (according to the cursor) block. + */ + lbp = cur->bc_bufs[level]; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; +#endif + /* + * Fill in the btree header for the new block. + */ + INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); + right->bb_level = left->bb_level; /* INT_: direct copy */ + INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); + /* + * Make sure that if there's an odd number of entries now, that + * each new block will have the same number of entries. + */ + if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && + cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; + /* + * For non-leaf blocks, copy keys and addresses over to the new block. + */ + if (level > 0) { + xfs_alloc_key_t *lkp; /* left btree key pointer */ + xfs_alloc_ptr_t *lpp; /* left btree address pointer */ + xfs_alloc_key_t *rkp; /* right btree key pointer */ + xfs_alloc_ptr_t *rpp; /* right btree address pointer */ + + lkp = XFS_ALLOC_KEY_ADDR(left, i, cur); + lpp = XFS_ALLOC_PTR_ADDR(left, i, cur); + rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); + rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */ + memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); /* INT_: copy */ + xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + *keyp = *rkp; + } + /* + * For leaf blocks, copy records over to the new block. + */ + else { + xfs_alloc_rec_t *lrp; /* left btree record pointer */ + xfs_alloc_rec_t *rrp; /* right btree record pointer */ + + lrp = XFS_ALLOC_REC_ADDR(left, i, cur); + rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); + memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + keyp->ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ + keyp->ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ + } + /* + * Find the left block number by looking in the buffer. + * Adjust numrecs, sibling pointers. + */ + lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); + right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ + INT_SET(left->bb_rightsib, ARCH_CONVERT, rbno); + INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); + xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); + xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + /* + * If there's a block to the new block's right, make that block + * point back to right instead of to left. + */ + if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_alloc_block_t *rrblock; /* rr btree block */ + xfs_buf_t *rrbp; /* buffer for rrblock */ + + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, + &rrbp, XFS_ALLOC_BTREE_REF))) + return error; + rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); + if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) + return error; + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, rbno); + xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); + } + /* + * If the cursor is really in the right block, move it there. + * If it's just pointing past the last entry in left, then we'll + * insert there, so don't change anything in that case. + */ + if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { + xfs_btree_setbuf(cur, level, rbp); + cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * If there are more levels, we'll need another cursor which refers to + * the right block, no matter where this cursor was. + */ + if (level + 1 < cur->bc_nlevels) { + if ((error = xfs_btree_dup_cursor(cur, curp))) + return error; + (*curp)->bc_ptrs[level + 1]++; + } + *bnop = rbno; + *stat = 1; + return 0; +} + +/* + * Update keys at all levels from here to the root along the cursor's path. + */ +STATIC int /* error */ +xfs_alloc_updkey( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_alloc_key_t *keyp, /* new key value to update to */ + int level) /* starting level for update */ +{ + int ptr; /* index of key in block */ + + /* + * Go up the tree from this level toward the root. + * At each level, update the key value to the value input. + * Stop when we reach a level where the cursor isn't pointing + * at the first entry in the block. + */ + for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { + xfs_alloc_block_t *block; /* btree block */ + xfs_buf_t *bp; /* buffer for block */ +#ifdef DEBUG + int error; /* error return value */ +#endif + xfs_alloc_key_t *kp; /* ptr to btree block keys */ + + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + ptr = cur->bc_ptrs[level]; + kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur); + *kp = *keyp; + xfs_alloc_log_keys(cur, bp, ptr, ptr); + } + return 0; +} + +/* + * Externally visible routines. + */ + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_alloc_decrement( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat) /* success/failure */ +{ + xfs_alloc_block_t *block; /* btree block */ + int error; /* error return value */ + int lev; /* btree level */ + + ASSERT(level < cur->bc_nlevels); + /* + * Read-ahead to the left at this level. + */ + xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); + /* + * Decrement the ptr at this level. If we're still in the block + * then we're done. + */ + if (--cur->bc_ptrs[level] > 0) { + *stat = 1; + return 0; + } + /* + * Get a pointer to the btree block. + */ + block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[level]); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, + cur->bc_bufs[level]))) + return error; +#endif + /* + * If we just went off the left edge of the tree, return failure. + */ + if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * March up the tree decrementing pointers. + * Stop when we don't go off the left edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + if (--cur->bc_ptrs[lev] > 0) + break; + /* + * Read-ahead the left block, we're going to read it + * in the next loop. + */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); + } + /* + * If we went off the root then we are seriously confused. + */ + ASSERT(lev < cur->bc_nlevels); + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); lev > level; ) { + xfs_agblock_t agbno; /* block number of btree block */ + xfs_buf_t *bp; /* buffer pointer for block */ + + agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, agbno, 0, &bp, + XFS_ALLOC_BTREE_REF))) + return error; + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; + cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); + } + *stat = 1; + return 0; +} + +/* + * Delete the record pointed to by cur. + * The cursor refers to the place where the record was (could be inserted) + * when the operation returns. + */ +int /* error */ +xfs_alloc_delete( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* result code */ + int level; /* btree level */ + + /* + * Go up the tree, starting at leaf level. + * If 2 is returned then a join was done; go to the next level. + * Otherwise we are done. + */ + for (level = 0, i = 2; i == 2; level++) { + if ((error = xfs_alloc_delrec(cur, level, &i))) + return error; + } + if (i == 0) { + for (level = 1; level < cur->bc_nlevels; level++) { + if (cur->bc_ptrs[level] == 0) { + if ((error = xfs_alloc_decrement(cur, level, &i))) + return error; + break; + } + } + } + *stat = i; + return 0; +} + +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_alloc_get_rec( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t *bno, /* output: starting block of extent */ + xfs_extlen_t *len, /* output: length of extent */ + int *stat) /* output: success/failure */ +{ + xfs_alloc_block_t *block; /* btree block */ +#ifdef DEBUG + int error; /* error return value */ +#endif + int ptr; /* record number */ + + ptr = cur->bc_ptrs[0]; + block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0]))) + return error; +#endif + /* + * Off the right end or left end, return failure. + */ + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { + *stat = 0; + return 0; + } + /* + * Point to the record and extract its data. + */ + { + xfs_alloc_rec_t *rec; /* record data */ + + rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); + *bno = INT_GET(rec->ar_startblock, ARCH_CONVERT); + *len = INT_GET(rec->ar_blockcount, ARCH_CONVERT); + } + *stat = 1; + return 0; +} + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_alloc_increment( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat) /* success/failure */ +{ + xfs_alloc_block_t *block; /* btree block */ + xfs_buf_t *bp; /* tree block buffer */ + int error; /* error return value */ + int lev; /* btree level */ + + ASSERT(level < cur->bc_nlevels); + /* + * Read-ahead to the right at this level. + */ + xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); + /* + * Get a pointer to the btree block. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + /* + * Increment the ptr at this level. If we're still in the block + * then we're done. + */ + if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + *stat = 1; + return 0; + } + /* + * If we just went off the right edge of the tree, return failure. + */ + if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * March up the tree incrementing pointers. + * Stop when we don't go off the right edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + bp = cur->bc_bufs[lev]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; +#endif + if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) + break; + /* + * Read-ahead the right block, we're going to read it + * in the next loop. + */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); + } + /* + * If we went off the root then we are seriously confused. + */ + ASSERT(lev < cur->bc_nlevels); + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_ALLOC_BLOCK(bp); + lev > level; ) { + xfs_agblock_t agbno; /* block number of btree block */ + + agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, agbno, 0, &bp, + XFS_ALLOC_BTREE_REF))) + return error; + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; + cur->bc_ptrs[lev] = 1; + } + *stat = 1; + return 0; +} + +/* + * Insert the current record at the point referenced by cur. + * The cursor may be inconsistent on return if splits have been done. + */ +int /* error */ +xfs_alloc_insert( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* result value, 0 for failure */ + int level; /* current level number in btree */ + xfs_agblock_t nbno; /* new block number (split result) */ + xfs_btree_cur_t *ncur; /* new cursor (split result) */ + xfs_alloc_rec_t nrec; /* record being inserted this level */ + xfs_btree_cur_t *pcur; /* previous level's cursor */ + + level = 0; + nbno = NULLAGBLOCK; + INT_SET(nrec.ar_startblock, ARCH_CONVERT, cur->bc_rec.a.ar_startblock); + INT_SET(nrec.ar_blockcount, ARCH_CONVERT, cur->bc_rec.a.ar_blockcount); + ncur = (xfs_btree_cur_t *)0; + pcur = cur; + /* + * Loop going up the tree, starting at the leaf level. + * Stop when we don't get a split block, that must mean that + * the insert is finished with this level. + */ + do { + /* + * Insert nrec/nbno into this level of the tree. + * Note if we fail, nbno will be null. + */ + if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur, + &i))) { + if (pcur != cur) + xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); + return error; + } + /* + * See if the cursor we just used is trash. + * Can't trash the caller's cursor, but otherwise we should + * if ncur is a new cursor or we're about to be done. + */ + if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) { + cur->bc_nlevels = pcur->bc_nlevels; + xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); + } + /* + * If we got a new cursor, switch to it. + */ + if (ncur) { + pcur = ncur; + ncur = (xfs_btree_cur_t *)0; + } + } while (nbno != NULLAGBLOCK); + *stat = i; + return 0; +} + +/* + * Lookup the record equal to [bno, len] in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_eq( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* success/failure */ +{ + cur->bc_rec.a.ar_startblock = bno; + cur->bc_rec.a.ar_blockcount = len; + return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, stat); +} + +/* + * Lookup the first record greater than or equal to [bno, len] + * in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_ge( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* success/failure */ +{ + cur->bc_rec.a.ar_startblock = bno; + cur->bc_rec.a.ar_blockcount = len; + return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, stat); +} + +/* + * Lookup the first record less than or equal to [bno, len] + * in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_le( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* success/failure */ +{ + cur->bc_rec.a.ar_startblock = bno; + cur->bc_rec.a.ar_blockcount = len; + return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, stat); +} + +/* + * Update the record referred to by cur, to the value given by [bno, len]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +int /* error */ +xfs_alloc_update( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len) /* length of extent */ +{ + xfs_alloc_block_t *block; /* btree block to update */ + int error; /* error return value */ + int ptr; /* current record number (updating) */ + + ASSERT(len > 0); + /* + * Pick up the a.g. freelist struct and the current block. + */ + block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0]))) + return error; +#endif + /* + * Get the address of the rec to be updated. + */ + ptr = cur->bc_ptrs[0]; + { + xfs_alloc_rec_t *rp; /* pointer to updated record */ + + rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); + /* + * Fill in the new contents and log them. + */ + INT_SET(rp->ar_startblock, ARCH_CONVERT, bno); + INT_SET(rp->ar_blockcount, ARCH_CONVERT, len); + xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); + } + /* + * If it's the by-size btree and it's the last leaf block and + * it's the last record... then update the size of the longest + * extent in the a.g., which we cache in the a.g. freelist header. + */ + if (cur->bc_btnum == XFS_BTNUM_CNT && + INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && + ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + xfs_agf_t *agf; /* a.g. freespace header */ + xfs_agnumber_t seqno; + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); + cur->bc_mp->m_perag[seqno].pagf_longest = len; + INT_SET(agf->agf_longest, ARCH_CONVERT, len); + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_LONGEST); + } + /* + * Updating first record in leaf. Pass new key value up to our parent. + */ + if (ptr == 1) { + xfs_alloc_key_t key; /* key containing [bno, len] */ + + INT_SET(key.ar_startblock, ARCH_CONVERT, bno); + INT_SET(key.ar_blockcount, ARCH_CONVERT, len); + if ((error = xfs_alloc_updkey(cur, &key, 1))) + return error; + } + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_alloc_btree.h linux.21rc5-ac2/fs/xfs/xfs_alloc_btree.h --- linux.21rc5/fs/xfs/xfs_alloc_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_alloc_btree.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ALLOC_BTREE_H__ +#define __XFS_ALLOC_BTREE_H__ + +/* + * Freespace on-disk structures + */ + +struct xfs_buf; +struct xfs_btree_cur; +struct xfs_btree_sblock; +struct xfs_mount; + +/* + * There are two on-disk btrees, one sorted by blockno and one sorted + * by blockcount and blockno. All blocks look the same to make the code + * simpler; if we have time later, we'll make the optimizations. + */ +#define XFS_ABTB_MAGIC 0x41425442 /* 'ABTB' for bno tree */ +#define XFS_ABTC_MAGIC 0x41425443 /* 'ABTC' for cnt tree */ + +/* + * Data record/key structure + */ +typedef struct xfs_alloc_rec +{ + xfs_agblock_t ar_startblock; /* starting block number */ + xfs_extlen_t ar_blockcount; /* count of free blocks */ +} xfs_alloc_rec_t, xfs_alloc_key_t; + +typedef xfs_agblock_t xfs_alloc_ptr_t; /* btree pointer type */ + /* btree block header type */ +typedef struct xfs_btree_sblock xfs_alloc_block_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_ALLOC_BLOCK) +xfs_alloc_block_t *xfs_buf_to_alloc_block(struct xfs_buf *bp); +#define XFS_BUF_TO_ALLOC_BLOCK(bp) xfs_buf_to_alloc_block(bp) +#else +#define XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)(XFS_BUF_PTR(bp))) +#endif + +/* + * Real block structures have a size equal to the disk block size. + */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_BLOCK_SIZE) +int xfs_alloc_block_size(int lev, struct xfs_btree_cur *cur); +#define XFS_ALLOC_BLOCK_SIZE(lev,cur) xfs_alloc_block_size(lev,cur) +#else +#define XFS_ALLOC_BLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_BLOCK_MAXRECS) +int xfs_alloc_block_maxrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_ALLOC_BLOCK_MAXRECS(lev,cur) xfs_alloc_block_maxrecs(lev,cur) +#else +#define XFS_ALLOC_BLOCK_MAXRECS(lev,cur) \ + ((cur)->bc_mp->m_alloc_mxr[lev != 0]) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_BLOCK_MINRECS) +int xfs_alloc_block_minrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_ALLOC_BLOCK_MINRECS(lev,cur) xfs_alloc_block_minrecs(lev,cur) +#else +#define XFS_ALLOC_BLOCK_MINRECS(lev,cur) \ + ((cur)->bc_mp->m_alloc_mnr[lev != 0]) +#endif + +/* + * Minimum and maximum blocksize and sectorsize. + * The blocksize upper limit is pretty much arbitrary. + * The sectorsize upper limit is due to sizeof(sb_sectsize). + */ +#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */ +#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */ +#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG) +#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG) +#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */ +#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */ +#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG) +#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG) + +/* + * Block numbers in the AG: + * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BNO_BLOCK) +xfs_agblock_t xfs_bno_block(struct xfs_mount *mp); +#define XFS_BNO_BLOCK(mp) xfs_bno_block(mp) +#else +#define XFS_BNO_BLOCK(mp) ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CNT_BLOCK) +xfs_agblock_t xfs_cnt_block(struct xfs_mount *mp); +#define XFS_CNT_BLOCK(mp) xfs_cnt_block(mp) +#else +#define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1)) +#endif + +/* + * Record, key, and pointer address macros for btree blocks. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_REC_ADDR) +xfs_alloc_rec_t *xfs_alloc_rec_addr(xfs_alloc_block_t *bb, int i, + struct xfs_btree_cur *cur); +#define XFS_ALLOC_REC_ADDR(bb,i,cur) xfs_alloc_rec_addr(bb,i,cur) +#else +#define XFS_ALLOC_REC_ADDR(bb,i,cur) \ + XFS_BTREE_REC_ADDR(XFS_ALLOC_BLOCK_SIZE(0,cur), xfs_alloc, bb, i, \ + XFS_ALLOC_BLOCK_MAXRECS(0, cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_KEY_ADDR) +xfs_alloc_key_t *xfs_alloc_key_addr(xfs_alloc_block_t *bb, int i, + struct xfs_btree_cur *cur); +#define XFS_ALLOC_KEY_ADDR(bb,i,cur) xfs_alloc_key_addr(bb,i,cur) +#else +#define XFS_ALLOC_KEY_ADDR(bb,i,cur) \ + XFS_BTREE_KEY_ADDR(XFS_ALLOC_BLOCK_SIZE(1,cur), xfs_alloc, bb, i, \ + XFS_ALLOC_BLOCK_MAXRECS(1, cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_PTR_ADDR) +xfs_alloc_ptr_t *xfs_alloc_ptr_addr(xfs_alloc_block_t *bb, int i, + struct xfs_btree_cur *cur); +#define XFS_ALLOC_PTR_ADDR(bb,i,cur) xfs_alloc_ptr_addr(bb,i,cur) +#else +#define XFS_ALLOC_PTR_ADDR(bb,i,cur) \ + XFS_BTREE_PTR_ADDR(XFS_ALLOC_BLOCK_SIZE(1,cur), xfs_alloc, bb, i, \ + XFS_ALLOC_BLOCK_MAXRECS(1, cur)) +#endif + +/* + * Prototypes for externally visible routines. + */ + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_alloc_decrement( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat); /* success/failure */ + +/* + * Delete the record pointed to by cur. + * The cursor refers to the place where the record was (could be inserted) + * when the operation returns. + */ +int /* error */ +xfs_alloc_delete( + struct xfs_btree_cur *cur, /* btree cursor */ + int *stat); /* success/failure */ + +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_alloc_get_rec( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t *bno, /* output: starting block of extent */ + xfs_extlen_t *len, /* output: length of extent */ + int *stat); /* output: success/failure */ + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_alloc_increment( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat); /* success/failure */ + +/* + * Insert the current record at the point referenced by cur. + * The cursor may be inconsistent on return if splits have been done. + */ +int /* error */ +xfs_alloc_insert( + struct xfs_btree_cur *cur, /* btree cursor */ + int *stat); /* success/failure */ + +/* + * Lookup the record equal to [bno, len] in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_eq( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat); /* success/failure */ + +/* + * Lookup the first record greater than or equal to [bno, len] + * in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_ge( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat); /* success/failure */ + +/* + * Lookup the first record less than or equal to [bno, len] + * in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_le( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat); /* success/failure */ + +/* + * Update the record referred to by cur, to the value given by [bno, len]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +int /* error */ +xfs_alloc_update( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len); /* length of extent */ + +#endif /* __XFS_ALLOC_BTREE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_alloc.c linux.21rc5-ac2/fs/xfs/xfs_alloc.c --- linux.21rc5/fs/xfs/xfs_alloc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_alloc.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,2626 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Free space allocation for XFS. + */ +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_bit.h" +#include "xfs_error.h" + + +#if defined(DEBUG) +/* + * Allocation tracing. + */ +ktrace_t *xfs_alloc_trace_buf; +#endif + +#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) + +#define XFSA_FIXUP_BNO_OK 1 +#define XFSA_FIXUP_CNT_OK 2 + +int +xfs_alloc_search_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + xfs_agblock_t bno, + xfs_extlen_t len); + +#if defined(XFS_ALLOC_TRACE) +#define TRACE_ALLOC(s,a) \ + xfs_alloc_trace_alloc(fname, s, a, __LINE__) +#define TRACE_FREE(s,a,b,x,f) \ + xfs_alloc_trace_free(fname, s, mp, a, b, x, f, __LINE__) +#define TRACE_MODAGF(s,a,f) \ + xfs_alloc_trace_modagf(fname, s, mp, a, f, __LINE__) +#define TRACE_BUSY(fname,s,ag,agb,l,sl,tp) \ + xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__) +#define TRACE_UNBUSY(fname,s,ag,sl,tp) \ + xfs_alloc_trace_busy(fname, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__) +#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp) \ + xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__) + + +#else +#define TRACE_ALLOC(s,a) +#define TRACE_FREE(s,a,b,x,f) +#define TRACE_MODAGF(s,a,f) +#define TRACE_BUSY(s,a,ag,agb,l,sl,tp) +#define TRACE_UNBUSY(fname,s,ag,sl,tp) +#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp) +#endif /* XFS_ALLOC_TRACE */ + +/* + * Prototypes for per-ag allocation routines + */ + +STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *); +STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *); +STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *); +STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, + xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *); + +/* + * Internal functions. + */ + +/* + * Compute aligned version of the found extent. + * Takes alignment and min length into account. + */ +STATIC int /* success (>= minlen) */ +xfs_alloc_compute_aligned( + xfs_agblock_t foundbno, /* starting block in found extent */ + xfs_extlen_t foundlen, /* length in found extent */ + xfs_extlen_t alignment, /* alignment for allocation */ + xfs_extlen_t minlen, /* minimum length for allocation */ + xfs_agblock_t *resbno, /* result block number */ + xfs_extlen_t *reslen) /* result length */ +{ + xfs_agblock_t bno; + xfs_extlen_t diff; + xfs_extlen_t len; + + if (alignment > 1 && foundlen >= minlen) { + bno = roundup(foundbno, alignment); + diff = bno - foundbno; + len = diff >= foundlen ? 0 : foundlen - diff; + } else { + bno = foundbno; + len = foundlen; + } + *resbno = bno; + *reslen = len; + return len >= minlen; +} + +/* + * Compute best start block and diff for "near" allocations. + * freelen >= wantlen already checked by caller. + */ +STATIC xfs_extlen_t /* difference value (absolute) */ +xfs_alloc_compute_diff( + xfs_agblock_t wantbno, /* target starting block */ + xfs_extlen_t wantlen, /* target length */ + xfs_extlen_t alignment, /* target alignment */ + xfs_agblock_t freebno, /* freespace's starting block */ + xfs_extlen_t freelen, /* freespace's length */ + xfs_agblock_t *newbnop) /* result: best start block from free */ +{ + xfs_agblock_t freeend; /* end of freespace extent */ + xfs_agblock_t newbno1; /* return block number */ + xfs_agblock_t newbno2; /* other new block number */ + xfs_extlen_t newlen1=0; /* length with newbno1 */ + xfs_extlen_t newlen2=0; /* length with newbno2 */ + xfs_agblock_t wantend; /* end of target extent */ + + ASSERT(freelen >= wantlen); + freeend = freebno + freelen; + wantend = wantbno + wantlen; + if (freebno >= wantbno) { + if ((newbno1 = roundup(freebno, alignment)) >= freeend) + newbno1 = NULLAGBLOCK; + } else if (freeend >= wantend && alignment > 1) { + newbno1 = roundup(wantbno, alignment); + newbno2 = newbno1 - alignment; + if (newbno1 >= freeend) + newbno1 = NULLAGBLOCK; + else + newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1); + if (newbno2 < freebno) + newbno2 = NULLAGBLOCK; + else + newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2); + if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) { + if (newlen1 < newlen2 || + (newlen1 == newlen2 && + XFS_ABSDIFF(newbno1, wantbno) > + XFS_ABSDIFF(newbno2, wantbno))) + newbno1 = newbno2; + } else if (newbno2 != NULLAGBLOCK) + newbno1 = newbno2; + } else if (freeend >= wantend) { + newbno1 = wantbno; + } else if (alignment > 1) { + newbno1 = roundup(freeend - wantlen, alignment); + if (newbno1 > freeend - wantlen && + newbno1 - alignment >= freebno) + newbno1 -= alignment; + else if (newbno1 >= freeend) + newbno1 = NULLAGBLOCK; + } else + newbno1 = freeend - wantlen; + *newbnop = newbno1; + return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno); +} + +/* + * Fix up the length, based on mod and prod. + * len should be k * prod + mod for some k. + * If len is too small it is returned unchanged. + * If len hits maxlen it is left alone. + */ +STATIC void +xfs_alloc_fix_len( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_extlen_t k; + xfs_extlen_t rlen; + + ASSERT(args->mod < args->prod); + rlen = args->len; + ASSERT(rlen >= args->minlen); + ASSERT(rlen <= args->maxlen); + if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen || + (args->mod == 0 && rlen < args->prod)) + return; + k = rlen % args->prod; + if (k == args->mod) + return; + if (k > args->mod) { + if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen) + return; + } else { + if ((int)(rlen = rlen - args->prod - (args->mod - k)) < + (int)args->minlen) + return; + } + ASSERT(rlen >= args->minlen); + ASSERT(rlen <= args->maxlen); + args->len = rlen; +} + +/* + * Fix up length if there is too little space left in the a.g. + * Return 1 if ok, 0 if too little, should give up. + */ +STATIC int +xfs_alloc_fix_minleft( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_agf_t *agf; /* a.g. freelist header */ + int diff; /* free space difference */ + + if (args->minleft == 0) + return 1; + agf = XFS_BUF_TO_AGF(args->agbp); + diff = INT_GET(agf->agf_freeblks, ARCH_CONVERT) + + INT_GET(agf->agf_flcount, ARCH_CONVERT) + - args->len - args->minleft; + if (diff >= 0) + return 1; + args->len += diff; /* shrink the allocated space */ + if (args->len >= args->minlen) + return 1; + args->agbno = NULLAGBLOCK; + return 0; +} + +/* + * Update the two btrees, logically removing from freespace the extent + * starting at rbno, rlen blocks. The extent is contained within the + * actual (current) free extent fbno for flen blocks. + * Flags are passed in indicating whether the cursors are set to the + * relevant records. + */ +STATIC int /* error code */ +xfs_alloc_fixup_trees( + xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */ + xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */ + xfs_agblock_t fbno, /* starting block of free extent */ + xfs_extlen_t flen, /* length of free extent */ + xfs_agblock_t rbno, /* starting block of returned extent */ + xfs_extlen_t rlen, /* length of returned extent */ + int flags) /* flags, XFSA_FIXUP_... */ +{ + int error; /* error code */ + int i; /* operation results */ + xfs_agblock_t nfbno1; /* first new free startblock */ + xfs_agblock_t nfbno2; /* second new free startblock */ + xfs_extlen_t nflen1=0; /* first new free length */ + xfs_extlen_t nflen2=0; /* second new free length */ + + /* + * Look up the record in the by-size tree if necessary. + */ + if (flags & XFSA_FIXUP_CNT_OK) { +#ifdef DEBUG + if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN( + i == 1 && nfbno1 == fbno && nflen1 == flen); +#endif + } else { + if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + /* + * Look up the record in the by-block tree if necessary. + */ + if (flags & XFSA_FIXUP_BNO_OK) { +#ifdef DEBUG + if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN( + i == 1 && nfbno1 == fbno && nflen1 == flen); +#endif + } else { + if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } +#ifdef DEBUG + { + xfs_alloc_block_t *bnoblock; + xfs_alloc_block_t *cntblock; + + if (bno_cur->bc_nlevels == 1 && + cnt_cur->bc_nlevels == 1) { + bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]); + cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]); + XFS_WANT_CORRUPTED_RETURN( + INT_GET(bnoblock->bb_numrecs, ARCH_CONVERT) == INT_GET(cntblock->bb_numrecs, ARCH_CONVERT)); + } + } +#endif + /* + * Deal with all four cases: the allocated record is contained + * within the freespace record, so we can have new freespace + * at either (or both) end, or no freespace remaining. + */ + if (rbno == fbno && rlen == flen) + nfbno1 = nfbno2 = NULLAGBLOCK; + else if (rbno == fbno) { + nfbno1 = rbno + rlen; + nflen1 = flen - rlen; + nfbno2 = NULLAGBLOCK; + } else if (rbno + rlen == fbno + flen) { + nfbno1 = fbno; + nflen1 = flen - rlen; + nfbno2 = NULLAGBLOCK; + } else { + nfbno1 = fbno; + nflen1 = rbno - fbno; + nfbno2 = rbno + rlen; + nflen2 = (fbno + flen) - nfbno2; + } + /* + * Delete the entry from the by-size btree. + */ + if ((error = xfs_alloc_delete(cnt_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + /* + * Add new by-size btree entry(s). + */ + if (nfbno1 != NULLAGBLOCK) { + if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 0); + if ((error = xfs_alloc_insert(cnt_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + if (nfbno2 != NULLAGBLOCK) { + if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 0); + if ((error = xfs_alloc_insert(cnt_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + /* + * Fix up the by-block btree entry(s). + */ + if (nfbno1 == NULLAGBLOCK) { + /* + * No remaining freespace, just delete the by-block tree entry. + */ + if ((error = xfs_alloc_delete(bno_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } else { + /* + * Update the by-block entry to start later|be shorter. + */ + if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1))) + return error; + } + if (nfbno2 != NULLAGBLOCK) { + /* + * 2 resulting free entries, need to add one. + */ + if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 0); + if ((error = xfs_alloc_insert(bno_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + return 0; +} + +/* + * Read in the allocation group free block array. + */ +STATIC int /* error */ +xfs_alloc_read_agfl( + xfs_mount_t *mp, /* mount point structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_buf_t **bpp) /* buffer for the ag free block array */ +{ + xfs_buf_t *bp; /* return value */ + int error; + + ASSERT(agno != NULLAGNUMBER); + error = xfs_trans_read_buf( + mp, tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (error) + return error; + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF); + *bpp = bp; + return 0; +} + +#if defined(XFS_ALLOC_TRACE) +/* + * Add an allocation trace entry for an alloc call. + */ +STATIC void +xfs_alloc_trace_alloc( + char *name, /* function tag string */ + char *str, /* additional string */ + xfs_alloc_arg_t *args, /* allocation argument structure */ + int line) /* source line number */ +{ + ktrace_enter(xfs_alloc_trace_buf, + (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)), + (void *)name, + (void *)str, + (void *)args->mp, + (void *)(__psunsigned_t)args->agno, + (void *)(__psunsigned_t)args->agbno, + (void *)(__psunsigned_t)args->minlen, + (void *)(__psunsigned_t)args->maxlen, + (void *)(__psunsigned_t)args->mod, + (void *)(__psunsigned_t)args->prod, + (void *)(__psunsigned_t)args->minleft, + (void *)(__psunsigned_t)args->total, + (void *)(__psunsigned_t)args->alignment, + (void *)(__psunsigned_t)args->len, + (void *)((((__psint_t)args->type) << 16) | + (__psint_t)args->otype), + (void *)(__psint_t)((args->wasdel << 3) | + (args->wasfromfl << 2) | + (args->isfl << 1) | + (args->userdata << 0))); +} + +/* + * Add an allocation trace entry for a free call. + */ +STATIC void +xfs_alloc_trace_free( + char *name, /* function tag string */ + char *str, /* additional string */ + xfs_mount_t *mp, /* file system mount point */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* a.g. relative block number */ + xfs_extlen_t len, /* length of extent */ + int isfl, /* set if is freelist allocation/free */ + int line) /* source line number */ +{ + ktrace_enter(xfs_alloc_trace_buf, + (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)), + (void *)name, + (void *)str, + (void *)mp, + (void *)(__psunsigned_t)agno, + (void *)(__psunsigned_t)agbno, + (void *)(__psunsigned_t)len, + (void *)(__psint_t)isfl, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add an allocation trace entry for modifying an agf. + */ +STATIC void +xfs_alloc_trace_modagf( + char *name, /* function tag string */ + char *str, /* additional string */ + xfs_mount_t *mp, /* file system mount point */ + xfs_agf_t *agf, /* new agf value */ + int flags, /* logging flags for agf */ + int line) /* source line number */ +{ + ktrace_enter(xfs_alloc_trace_buf, + (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)), + (void *)name, + (void *)str, + (void *)mp, + (void *)(__psint_t)flags, + (void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO], + ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT], + ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO], + ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT], + ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_freeblks, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_longest, ARCH_CONVERT)); +} + +STATIC void +xfs_alloc_trace_busy( + char *name, /* function tag string */ + char *str, /* additional string */ + xfs_mount_t *mp, /* file system mount poing */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* a.g. relative block number */ + xfs_extlen_t len, /* length of extent */ + int slot, /* perag Busy slot */ + xfs_trans_t *tp, + int trtype, /* type: add, delete, search */ + int line) /* source line number */ +{ + ktrace_enter(xfs_alloc_trace_buf, + (void *)(__psint_t)(trtype | (line << 16)), + (void *)name, + (void *)str, + (void *)mp, + (void *)(__psunsigned_t)agno, + (void *)(__psunsigned_t)agbno, + (void *)(__psunsigned_t)len, + (void *)(__psint_t)slot, + (void *)tp, + NULL, NULL, NULL, NULL, NULL, NULL, NULL); +} +#endif /* XFS_ALLOC_TRACE */ + +/* + * Allocation group level functions. + */ + +/* + * Allocate a variable extent in the allocation group agno. + * Type and bno are used to determine where in the allocation group the + * extent will start. + * Extent's length (returned in *len) will be between minlen and maxlen, + * and of the form k * prod + mod unless there's nothing that large. + * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent( + xfs_alloc_arg_t *args) /* argument structure for allocation */ +{ + int error=0; +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent"; +#endif + + ASSERT(args->minlen > 0); + ASSERT(args->maxlen > 0); + ASSERT(args->minlen <= args->maxlen); + ASSERT(args->mod < args->prod); + ASSERT(args->alignment > 0); + /* + * Branch to correct routine based on the type. + */ + args->wasfromfl = 0; + switch (args->type) { + case XFS_ALLOCTYPE_THIS_AG: + error = xfs_alloc_ag_vextent_size(args); + break; + case XFS_ALLOCTYPE_NEAR_BNO: + error = xfs_alloc_ag_vextent_near(args); + break; + case XFS_ALLOCTYPE_THIS_BNO: + error = xfs_alloc_ag_vextent_exact(args); + break; + default: + ASSERT(0); + /* NOTREACHED */ + } + if (error) + return error; + /* + * If the allocation worked, need to change the agf structure + * (and log it), and the superblock. + */ + if (args->agbno != NULLAGBLOCK) { + xfs_agf_t *agf; /* allocation group freelist header */ +#ifdef XFS_ALLOC_TRACE + xfs_mount_t *mp = args->mp; +#endif + long slen = (long)args->len; + + ASSERT(args->len >= args->minlen && args->len <= args->maxlen); + ASSERT(!(args->wasfromfl) || !args->isfl); + ASSERT(args->agbno % args->alignment == 0); + if (!(args->wasfromfl)) { + + agf = XFS_BUF_TO_AGF(args->agbp); + INT_MOD(agf->agf_freeblks, ARCH_CONVERT, -(args->len)); + xfs_trans_agblocks_delta(args->tp, + -((long)(args->len))); + args->pag->pagf_freeblks -= args->len; + ASSERT(INT_GET(agf->agf_freeblks, ARCH_CONVERT) + <= INT_GET(agf->agf_length, ARCH_CONVERT)); + TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); + xfs_alloc_log_agf(args->tp, args->agbp, + XFS_AGF_FREEBLKS); + /* search the busylist for these blocks */ + xfs_alloc_search_busy(args->tp, args->agno, + args->agbno, args->len); + } + if (!args->isfl) + xfs_trans_mod_sb(args->tp, + args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS : + XFS_TRANS_SB_FDBLOCKS, -slen); + XFS_STATS_INC(xfsstats.xs_allocx); + XFS_STATS_ADD(xfsstats.xs_allocb, args->len); + } + return 0; +} + +/* + * Allocate a variable extent at exactly agno/bno. + * Extent's length (returned in *len) will be between minlen and maxlen, + * and of the form k * prod + mod unless there's nothing that large. + * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent_exact( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */ + xfs_btree_cur_t *cnt_cur;/* by count btree cursor */ + xfs_agblock_t end; /* end of allocated extent */ + int error; + xfs_agblock_t fbno; /* start block of found extent */ + xfs_agblock_t fend; /* end block of found extent */ + xfs_extlen_t flen; /* length of found extent */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent_exact"; +#endif + int i; /* success/failure of operation */ + xfs_agblock_t maxend; /* end of maximal extent */ + xfs_agblock_t minend; /* end of minimal extent */ + xfs_extlen_t rlen; /* length of returned extent */ + + ASSERT(args->alignment == 1); + /* + * Allocate/initialize a cursor for the by-number freespace btree. + */ + bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_BNO, 0, 0); + /* + * Lookup bno and minlen in the btree (minlen is irrelevant, really). + * Look for the closest free block <= bno, it must contain bno + * if any free block does. + */ + if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i))) + goto error0; + if (!i) { + /* + * Didn't find it, return null. + */ + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + args->agbno = NULLAGBLOCK; + return 0; + } + /* + * Grab the freespace record. + */ + if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + ASSERT(fbno <= args->agbno); + minend = args->agbno + args->minlen; + maxend = args->agbno + args->maxlen; + fend = fbno + flen; + /* + * Give up if the freespace isn't long enough for the minimum request. + */ + if (fend < minend) { + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + args->agbno = NULLAGBLOCK; + return 0; + } + /* + * End of extent will be smaller of the freespace end and the + * maximal requested end. + */ + end = XFS_AGBLOCK_MIN(fend, maxend); + /* + * Fix the length according to mod and prod if given. + */ + args->len = end - args->agbno; + xfs_alloc_fix_len(args); + if (!xfs_alloc_fix_minleft(args)) { + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + return 0; + } + rlen = args->len; + ASSERT(args->agbno + rlen <= fend); + end = args->agbno + rlen; + /* + * We are allocating agbno for rlen [agbno .. end] + * Allocate/initialize a cursor for the by-size btree. + */ + cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_CNT, 0, 0); + ASSERT(args->agbno + args->len <= + INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT)); + if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, + args->agbno, args->len, XFSA_FIXUP_BNO_OK))) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); + goto error0; + } + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + TRACE_ALLOC("normal", args); + args->wasfromfl = 0; + return 0; + +error0: + xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); + TRACE_ALLOC("error", args); + return error; +} + +/* + * Allocate a variable extent near bno in the allocation group agno. + * Extent's length (returned in len) will be between minlen and maxlen, + * and of the form k * prod + mod unless there's nothing that large. + * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent_near( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */ + xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */ + xfs_btree_cur_t *cnt_cur; /* cursor for count btree */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent_near"; +#endif + xfs_agblock_t gtbno; /* start bno of right side entry */ + xfs_agblock_t gtbnoa; /* aligned ... */ + xfs_extlen_t gtdiff; /* difference to right side entry */ + xfs_extlen_t gtlen; /* length of right side entry */ + xfs_extlen_t gtlena; /* aligned ... */ + xfs_agblock_t gtnew; /* useful start bno of right side */ + int error; /* error code */ + int i; /* result code, temporary */ + int j; /* result code, temporary */ + xfs_agblock_t ltbno; /* start bno of left side entry */ + xfs_agblock_t ltbnoa; /* aligned ... */ + xfs_extlen_t ltdiff; /* difference to left side entry */ + /*REFERENCED*/ + xfs_agblock_t ltend; /* end bno of left side entry */ + xfs_extlen_t ltlen; /* length of left side entry */ + xfs_extlen_t ltlena; /* aligned ... */ + xfs_agblock_t ltnew; /* useful start bno of left side */ + xfs_extlen_t rlen; /* length of returned extent */ +#if defined(DEBUG) && defined(__KERNEL__) + /* + * Randomly don't execute the first algorithm. + */ + static int seed; /* randomizing seed value */ + int dofirst; /* set to do first algorithm */ + timespec_t now; /* current time */ + + if (!seed) { + nanotime(&now); + seed = (int)now.tv_sec ^ (int)now.tv_nsec; + } + dofirst = random() & 1; +#endif + /* + * Get a cursor for the by-size btree. + */ + cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_CNT, 0, 0); + ltlen = 0; + bno_cur_lt = bno_cur_gt = NULL; + /* + * See if there are any free extents as big as maxlen. + */ + if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i))) + goto error0; + /* + * If none, then pick up the last entry in the tree unless the + * tree is empty. + */ + if (!i) { + if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno, + <len, &i))) + goto error0; + if (i == 0 || ltlen == 0) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + return 0; + } + ASSERT(i == 1); + } + args->wasfromfl = 0; + /* + * First algorithm. + * If the requested extent is large wrt the freespaces available + * in this a.g., then the cursor will be pointing to a btree entry + * near the right edge of the tree. If it's in the last btree leaf + * block, then we just examine all the entries in that block + * that are big enough, and pick the best one. + * This is written as a while loop so we can break out of it, + * but we never loop back to the top. + */ + while (xfs_btree_islastblock(cnt_cur, 0)) { + xfs_extlen_t bdiff; + int besti=0; + xfs_extlen_t blen=0; + xfs_agblock_t bnew=0; + +#if defined(DEBUG) && defined(__KERNEL__) + if (!dofirst) + break; +#endif + /* + * Start from the entry that lookup found, sequence through + * all larger free blocks. If we're actually pointing at a + * record smaller than maxlen, go to the start of this block, + * and skip all those smaller than minlen. + */ + if (ltlen || args->alignment > 1) { + cnt_cur->bc_ptrs[0] = 1; + do { + if ((error = xfs_alloc_get_rec(cnt_cur, <bno, + <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (ltlen >= args->minlen) + break; + if ((error = xfs_alloc_increment(cnt_cur, 0, &i))) + goto error0; + } while (i); + ASSERT(ltlen >= args->minlen); + if (!i) + break; + } + i = cnt_cur->bc_ptrs[0]; + for (j = 1, blen = 0, bdiff = 0; + !error && j && (blen < args->maxlen || bdiff > 0); + error = xfs_alloc_increment(cnt_cur, 0, &j)) { + /* + * For each entry, decide if it's better than + * the previous best entry. + */ + if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (!xfs_alloc_compute_aligned(ltbno, ltlen, + args->alignment, args->minlen, + <bnoa, <lena)) + continue; + args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); + xfs_alloc_fix_len(args); + ASSERT(args->len >= args->minlen); + if (args->len < blen) + continue; + ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, + args->alignment, ltbno, ltlen, <new); + if (ltnew != NULLAGBLOCK && + (args->len > blen || ltdiff < bdiff)) { + bdiff = ltdiff; + bnew = ltnew; + blen = args->len; + besti = cnt_cur->bc_ptrs[0]; + } + } + /* + * It didn't work. We COULD be in a case where + * there's a good record somewhere, so try again. + */ + if (blen == 0) + break; + /* + * Point at the best entry, and retrieve it again. + */ + cnt_cur->bc_ptrs[0] = besti; + if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + ltend = ltbno + ltlen; + ASSERT(ltend <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT)); + args->len = blen; + if (!xfs_alloc_fix_minleft(args)) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + TRACE_ALLOC("nominleft", args); + return 0; + } + blen = args->len; + /* + * We are allocating starting at bnew for blen blocks. + */ + args->agbno = bnew; + ASSERT(bnew >= ltbno); + ASSERT(bnew + blen <= ltend); + /* + * Set up a cursor for the by-bno tree. + */ + bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, + args->agbp, args->agno, XFS_BTNUM_BNO, 0, 0); + /* + * Fix up the btree entries. + */ + if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, + ltlen, bnew, blen, XFSA_FIXUP_CNT_OK))) + goto error0; + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); + TRACE_ALLOC("first", args); + return 0; + } + /* + * Second algorithm. + * Search in the by-bno tree to the left and to the right + * simultaneously, until in each case we find a space big enough, + * or run into the edge of the tree. When we run into the edge, + * we deallocate that cursor. + * If both searches succeed, we compare the two spaces and pick + * the better one. + * With alignment, it's possible for both to fail; the upper + * level algorithm that picks allocation groups for allocations + * is not supposed to do this. + */ + /* + * Allocate and initialize the cursor for the leftward search. + */ + bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_BNO, 0, 0); + /* + * Lookup <= bno to find the leftward search's starting point. + */ + if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i))) + goto error0; + if (!i) { + /* + * Didn't find anything; use this cursor for the rightward + * search. + */ + bno_cur_gt = bno_cur_lt; + bno_cur_lt = 0; + } + /* + * Found something. Duplicate the cursor for the rightward search. + */ + else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt))) + goto error0; + /* + * Increment the cursor, so we will point at the entry just right + * of the leftward entry if any, or to the leftmost entry. + */ + if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) + goto error0; + if (!i) { + /* + * It failed, there are no rightward entries. + */ + xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + /* + * Loop going left with the leftward cursor, right with the + * rightward cursor, until either both directions give up or + * we find an entry at least as big as minlen. + */ + do { + if (bno_cur_lt) { + if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (xfs_alloc_compute_aligned(ltbno, ltlen, + args->alignment, args->minlen, + <bnoa, <lena)) + break; + if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i))) + goto error0; + if (!i) { + xfs_btree_del_cursor(bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + } + } + if (bno_cur_gt) { + if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (xfs_alloc_compute_aligned(gtbno, gtlen, + args->alignment, args->minlen, + >bnoa, >lena)) + break; + if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) + goto error0; + if (!i) { + xfs_btree_del_cursor(bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + } + } while (bno_cur_lt || bno_cur_gt); + /* + * Got both cursors still active, need to find better entry. + */ + if (bno_cur_lt && bno_cur_gt) { + /* + * Left side is long enough, look for a right side entry. + */ + if (ltlena >= args->minlen) { + /* + * Fix up the length. + */ + args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); + xfs_alloc_fix_len(args); + rlen = args->len; + ltdiff = xfs_alloc_compute_diff(args->agbno, rlen, + args->alignment, ltbno, ltlen, <new); + /* + * Not perfect. + */ + if (ltdiff) { + /* + * Look until we find a better one, run out of + * space, or run off the end. + */ + while (bno_cur_lt && bno_cur_gt) { + if ((error = xfs_alloc_get_rec( + bno_cur_gt, >bno, + >len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + xfs_alloc_compute_aligned(gtbno, gtlen, + args->alignment, args->minlen, + >bnoa, >lena); + /* + * The left one is clearly better. + */ + if (gtbnoa >= args->agbno + ltdiff) { + xfs_btree_del_cursor( + bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + break; + } + /* + * If we reach a big enough entry, + * compare the two and pick the best. + */ + if (gtlena >= args->minlen) { + args->len = + XFS_EXTLEN_MIN(gtlena, + args->maxlen); + xfs_alloc_fix_len(args); + rlen = args->len; + gtdiff = xfs_alloc_compute_diff( + args->agbno, rlen, + args->alignment, + gtbno, gtlen, >new); + /* + * Right side is better. + */ + if (gtdiff < ltdiff) { + xfs_btree_del_cursor( + bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + } + /* + * Left side is better. + */ + else { + xfs_btree_del_cursor( + bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + break; + } + /* + * Fell off the right end. + */ + if ((error = xfs_alloc_increment( + bno_cur_gt, 0, &i))) + goto error0; + if (!i) { + xfs_btree_del_cursor( + bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + break; + } + } + } + /* + * The left side is perfect, trash the right side. + */ + else { + xfs_btree_del_cursor(bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + } + /* + * It's the right side that was found first, look left. + */ + else { + /* + * Fix up the length. + */ + args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen); + xfs_alloc_fix_len(args); + rlen = args->len; + gtdiff = xfs_alloc_compute_diff(args->agbno, rlen, + args->alignment, gtbno, gtlen, >new); + /* + * Right side entry isn't perfect. + */ + if (gtdiff) { + /* + * Look until we find a better one, run out of + * space, or run off the end. + */ + while (bno_cur_lt && bno_cur_gt) { + if ((error = xfs_alloc_get_rec( + bno_cur_lt, <bno, + <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + xfs_alloc_compute_aligned(ltbno, ltlen, + args->alignment, args->minlen, + <bnoa, <lena); + /* + * The right one is clearly better. + */ + if (ltbnoa <= args->agbno - gtdiff) { + xfs_btree_del_cursor( + bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + break; + } + /* + * If we reach a big enough entry, + * compare the two and pick the best. + */ + if (ltlena >= args->minlen) { + args->len = XFS_EXTLEN_MIN( + ltlena, args->maxlen); + xfs_alloc_fix_len(args); + rlen = args->len; + ltdiff = xfs_alloc_compute_diff( + args->agbno, rlen, + args->alignment, + ltbno, ltlen, <new); + /* + * Left side is better. + */ + if (ltdiff < gtdiff) { + xfs_btree_del_cursor( + bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + /* + * Right side is better. + */ + else { + xfs_btree_del_cursor( + bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + } + break; + } + /* + * Fell off the left end. + */ + if ((error = xfs_alloc_decrement( + bno_cur_lt, 0, &i))) + goto error0; + if (!i) { + xfs_btree_del_cursor(bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + break; + } + } + } + /* + * The right side is perfect, trash the left side. + */ + else { + xfs_btree_del_cursor(bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + } + } + } + /* + * If we couldn't get anything, give up. + */ + if (bno_cur_lt == NULL && bno_cur_gt == NULL) { + TRACE_ALLOC("neither", args); + args->agbno = NULLAGBLOCK; + return 0; + } + /* + * At this point we have selected a freespace entry, either to the + * left or to the right. If it's on the right, copy all the + * useful variables to the "left" set so we only have one + * copy of this code. + */ + if (bno_cur_gt) { + bno_cur_lt = bno_cur_gt; + bno_cur_gt = NULL; + ltbno = gtbno; + ltbnoa = gtbnoa; + ltlen = gtlen; + ltlena = gtlena; + j = 1; + } else + j = 0; + /* + * Fix up the length and compute the useful address. + */ + ltend = ltbno + ltlen; + args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); + xfs_alloc_fix_len(args); + if (!xfs_alloc_fix_minleft(args)) { + TRACE_ALLOC("nominleft", args); + xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + return 0; + } + rlen = args->len; + (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno, + ltlen, <new); + ASSERT(ltnew >= ltbno); + ASSERT(ltnew + rlen <= ltend); + ASSERT(ltnew + rlen <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT)); + args->agbno = ltnew; + if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, + ltnew, rlen, XFSA_FIXUP_BNO_OK))) + goto error0; + TRACE_ALLOC(j ? "gt" : "lt", args); + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); + return 0; + + error0: + TRACE_ALLOC("error", args); + if (cnt_cur != NULL) + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); + if (bno_cur_lt != NULL) + xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR); + if (bno_cur_gt != NULL) + xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR); + return error; +} + +/* + * Allocate a variable extent anywhere in the allocation group agno. + * Extent's length (returned in len) will be between minlen and maxlen, + * and of the form k * prod + mod unless there's nothing that large. + * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent_size( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_btree_cur_t *bno_cur; /* cursor for bno btree */ + xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */ + int error; /* error result */ + xfs_agblock_t fbno; /* start of found freespace */ + xfs_extlen_t flen; /* length of found freespace */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent_size"; +#endif + int i; /* temp status variable */ + xfs_agblock_t rbno; /* returned block number */ + xfs_extlen_t rlen; /* length of returned extent */ + + /* + * Allocate and initialize a cursor for the by-size btree. + */ + cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_CNT, 0, 0); + bno_cur = NULL; + /* + * Look for an entry >= maxlen+alignment-1 blocks. + */ + if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, + args->maxlen + args->alignment - 1, &i))) + goto error0; + /* + * If none, then pick up the last entry in the tree unless the + * tree is empty. + */ + if (!i) { + if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &fbno, + &flen, &i))) + goto error0; + if (i == 0 || flen == 0) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + TRACE_ALLOC("noentry", args); + return 0; + } + ASSERT(i == 1); + } + /* + * There's a freespace as big as maxlen+alignment-1, get it. + */ + else { + if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + /* + * In the first case above, we got the last entry in the + * by-size btree. Now we check to see if the space hits maxlen + * once aligned; if not, we search left for something better. + * This can't happen in the second case above. + */ + xfs_alloc_compute_aligned(fbno, flen, args->alignment, args->minlen, + &rbno, &rlen); + rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); + XFS_WANT_CORRUPTED_GOTO(rlen == 0 || + (rlen <= flen && rbno + rlen <= fbno + flen), error0); + if (rlen < args->maxlen) { + xfs_agblock_t bestfbno; + xfs_extlen_t bestflen; + xfs_agblock_t bestrbno; + xfs_extlen_t bestrlen; + + bestrlen = rlen; + bestrbno = rbno; + bestflen = flen; + bestfbno = fbno; + for (;;) { + if ((error = xfs_alloc_decrement(cnt_cur, 0, &i))) + goto error0; + if (i == 0) + break; + if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, + &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (flen < bestrlen) + break; + xfs_alloc_compute_aligned(fbno, flen, args->alignment, + args->minlen, &rbno, &rlen); + rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); + XFS_WANT_CORRUPTED_GOTO(rlen == 0 || + (rlen <= flen && rbno + rlen <= fbno + flen), + error0); + if (rlen > bestrlen) { + bestrlen = rlen; + bestrbno = rbno; + bestflen = flen; + bestfbno = fbno; + if (rlen == args->maxlen) + break; + } + } + if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen, + &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + rlen = bestrlen; + rbno = bestrbno; + flen = bestflen; + fbno = bestfbno; + } + args->wasfromfl = 0; + /* + * Fix up the length. + */ + args->len = rlen; + xfs_alloc_fix_len(args); + if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + TRACE_ALLOC("nominleft", args); + args->agbno = NULLAGBLOCK; + return 0; + } + rlen = args->len; + XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0); + /* + * Allocate and initialize a cursor for the by-block tree. + */ + bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_BNO, 0, 0); + if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, + rbno, rlen, XFSA_FIXUP_CNT_OK))) + goto error0; + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + cnt_cur = bno_cur = NULL; + args->len = rlen; + args->agbno = rbno; + XFS_WANT_CORRUPTED_GOTO( + args->agbno + args->len <= + INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT), + error0); + TRACE_ALLOC("normal", args); + return 0; + +error0: + TRACE_ALLOC("error", args); + if (cnt_cur) + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); + if (bno_cur) + xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Deal with the case where only small freespaces remain. + * Either return the contents of the last freespace record, + * or allocate space from the freelist if there is nothing in the tree. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent_small( + xfs_alloc_arg_t *args, /* allocation argument structure */ + xfs_btree_cur_t *ccur, /* by-size cursor */ + xfs_agblock_t *fbnop, /* result block number */ + xfs_extlen_t *flenp, /* result length */ + int *stat) /* status: 0-freelist, 1-normal/none */ +{ + int error; + xfs_agblock_t fbno; + xfs_extlen_t flen; +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent_small"; +#endif + int i; + + if ((error = xfs_alloc_decrement(ccur, 0, &i))) + goto error0; + if (i) { + if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + /* + * Nothing in the btree, try the freelist. Make sure + * to respect minleft even when pulling from the + * freelist. + */ + else if (args->minlen == 1 && args->alignment == 1 && !args->isfl && + (INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_flcount, + ARCH_CONVERT) > args->minleft)) { + if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno))) + goto error0; + if (fbno != NULLAGBLOCK) { + if (args->userdata) { + xfs_buf_t *bp; + + bp = xfs_btree_get_bufs(args->mp, args->tp, + args->agno, fbno, 0); + xfs_trans_binval(args->tp, bp); + } + args->len = 1; + args->agbno = fbno; + XFS_WANT_CORRUPTED_GOTO( + args->agbno + args->len <= + INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT), + error0); + args->wasfromfl = 1; + TRACE_ALLOC("freelist", args); + *stat = 0; + return 0; + } + /* + * Nothing in the freelist. + */ + else + flen = 0; + } + /* + * Can't allocate from the freelist for some reason. + */ + else + flen = 0; + /* + * Can't do the allocation, give up. + */ + if (flen < args->minlen) { + args->agbno = NULLAGBLOCK; + TRACE_ALLOC("notenough", args); + flen = 0; + } + *fbnop = fbno; + *flenp = flen; + *stat = 1; + TRACE_ALLOC("normal", args); + return 0; + +error0: + TRACE_ALLOC("error", args); + return error; +} + +/* + * Free the extent starting at agno/bno for length. + */ +STATIC int /* error */ +xfs_free_ag_extent( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* buffer for a.g. freelist header */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t bno, /* starting block number */ + xfs_extlen_t len, /* length of extent */ + int isfl) /* set if is freelist blocks - no sb acctg */ +{ + xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */ + xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */ + int error; /* error return value */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_free_ag_extent"; +#endif + xfs_agblock_t gtbno; /* start of right neighbor block */ + xfs_extlen_t gtlen; /* length of right neighbor block */ + int haveleft; /* have a left neighbor block */ + int haveright; /* have a right neighbor block */ + int i; /* temp, result code */ + xfs_agblock_t ltbno; /* start of left neighbor block */ + xfs_extlen_t ltlen; /* length of left neighbor block */ + xfs_mount_t *mp; /* mount point struct for filesystem */ + xfs_agblock_t nbno; /* new starting block of freespace */ + xfs_extlen_t nlen; /* new length of freespace */ + + mp = tp->t_mountp; + /* + * Allocate and initialize a cursor for the by-block btree. + */ + bno_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO, 0, + 0); + cnt_cur = NULL; + /* + * Look for a neighboring block on the left (lower block numbers) + * that is contiguous with this space. + */ + if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft))) + goto error0; + if (haveleft) { + /* + * There is a block to our left. + */ + if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * It's not contiguous, though. + */ + if (ltbno + ltlen < bno) + haveleft = 0; + else { + /* + * If this failure happens the request to free this + * space was invalid, it's (partly) already free. + * Very bad. + */ + XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0); + } + } + /* + * Look for a neighboring block on the right (higher block numbers) + * that is contiguous with this space. + */ + if ((error = xfs_alloc_increment(bno_cur, 0, &haveright))) + goto error0; + if (haveright) { + /* + * There is a block to our right. + */ + if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * It's not contiguous, though. + */ + if (bno + len < gtbno) + haveright = 0; + else { + /* + * If this failure happens the request to free this + * space was invalid, it's (partly) already free. + * Very bad. + */ + XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0); + } + } + /* + * Now allocate and initialize a cursor for the by-size tree. + */ + cnt_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT, 0, + 0); + /* + * Have both left and right contiguous neighbors. + * Merge all three into a single free block. + */ + if (haveleft && haveright) { + /* + * Delete the old by-size entry on the left. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_delete(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Delete the old by-size entry on the right. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_delete(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Delete the old by-block entry for the right block. + */ + if ((error = xfs_alloc_delete(bno_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Move the by-block cursor back to the left neighbor. + */ + if ((error = xfs_alloc_decrement(bno_cur, 0, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); +#ifdef DEBUG + /* + * Check that this is the right record: delete didn't + * mangle the cursor. + */ + { + xfs_agblock_t xxbno; + xfs_extlen_t xxlen; + + if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen, + &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO( + i == 1 && xxbno == ltbno && xxlen == ltlen, + error0); + } +#endif + /* + * Update remaining by-block entry to the new, joined block. + */ + nbno = ltbno; + nlen = len + ltlen + gtlen; + if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) + goto error0; + } + /* + * Have only a left contiguous neighbor. + * Merge it together with the new freespace. + */ + else if (haveleft) { + /* + * Delete the old by-size entry on the left. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_delete(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Back up the by-block cursor to the left neighbor, and + * update its length. + */ + if ((error = xfs_alloc_decrement(bno_cur, 0, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + nbno = ltbno; + nlen = len + ltlen; + if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) + goto error0; + } + /* + * Have only a right contiguous neighbor. + * Merge it together with the new freespace. + */ + else if (haveright) { + /* + * Delete the old by-size entry on the right. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_delete(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Update the starting block and length of the right + * neighbor in the by-block tree. + */ + nbno = bno; + nlen = len + gtlen; + if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) + goto error0; + } + /* + * No contiguous neighbors. + * Insert the new freespace into the by-block tree. + */ + else { + nbno = bno; + nlen = len; + if ((error = xfs_alloc_insert(bno_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + bno_cur = NULL; + /* + * In all cases we need to insert the new freespace in the by-size tree. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 0, error0); + if ((error = xfs_alloc_insert(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + cnt_cur = NULL; + /* + * Update the freespace totals in the ag and superblock. + */ + { + xfs_agf_t *agf; + xfs_perag_t *pag; /* per allocation group data */ + + agf = XFS_BUF_TO_AGF(agbp); + pag = &mp->m_perag[agno]; + INT_MOD(agf->agf_freeblks, ARCH_CONVERT, len); + xfs_trans_agblocks_delta(tp, len); + pag->pagf_freeblks += len; + XFS_WANT_CORRUPTED_GOTO( + INT_GET(agf->agf_freeblks, ARCH_CONVERT) + <= INT_GET(agf->agf_length, ARCH_CONVERT), + error0); + TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); + if (!isfl) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); + XFS_STATS_INC(xfsstats.xs_freex); + XFS_STATS_ADD(xfsstats.xs_freeb, len); + } + TRACE_FREE(haveleft ? + (haveright ? "both" : "left") : + (haveright ? "right" : "none"), + agno, bno, len, isfl); + + /* + * Since blocks move to the free list without the coordination + * used in xfs_bmap_finish, we can't allow block to be available + * for reallocation and non-transaction writing (user data) + * until we know that the transaction that moved it to the free + * list is permanently on disk. We track the blocks by declaring + * these blocks as "busy"; the busy list is maintained on a per-ag + * basis and each transaction records which entries should be removed + * when the iclog commits to disk. If a busy block is allocated, + * the iclog is pushed up to the LSN that freed the block. + */ + xfs_alloc_mark_busy(tp, agno, bno, len); + return 0; + + error0: + TRACE_FREE("error", agno, bno, len, isfl); + if (bno_cur) + xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); + if (cnt_cur) + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Visible (exported) allocation/free functions. + * Some of these are used just by xfs_alloc_btree.c and this file. + */ + +/* + * Compute and fill in value of m_ag_maxlevels. + */ +void +xfs_alloc_compute_maxlevels( + xfs_mount_t *mp) /* file system mount structure */ +{ + int level; + uint maxblocks; + uint maxleafents; + int minleafrecs; + int minnoderecs; + + maxleafents = (mp->m_sb.sb_agblocks + 1) / 2; + minleafrecs = mp->m_alloc_mnr[0]; + minnoderecs = mp->m_alloc_mnr[1]; + maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; + for (level = 1; maxblocks > 1; level++) + maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; + mp->m_ag_maxlevels = level; +} + +/* + * Decide whether to use this allocation group for this allocation. + * If so, fix up the btree freelist's size. + */ +STATIC int /* error */ +xfs_alloc_fix_freelist( + xfs_alloc_arg_t *args, /* allocation argument structure */ + int flags) /* XFS_ALLOC_FLAG_... */ +{ + xfs_buf_t *agbp; /* agf buffer pointer */ + xfs_agf_t *agf; /* a.g. freespace structure pointer */ + xfs_buf_t *agflbp;/* agfl buffer pointer */ + xfs_agblock_t bno; /* freelist block */ + xfs_extlen_t delta; /* new blocks needed in freelist */ + int error; /* error result code */ + xfs_extlen_t longest;/* longest extent in allocation group */ + xfs_mount_t *mp; /* file system mount point structure */ + xfs_extlen_t need; /* total blocks needed in freelist */ + xfs_perag_t *pag; /* per-ag information structure */ + xfs_alloc_arg_t targs; /* local allocation arguments */ + xfs_trans_t *tp; /* transaction pointer */ + + mp = args->mp; + + pag = args->pag; + tp = args->tp; + if (!pag->pagf_init) { + if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags, + &agbp))) + return error; + if (!pag->pagf_init) { + args->agbp = NULL; + return 0; + } + } else + agbp = NULL; + + /* If this is a metadata prefered pag and we are user data + * then try somewhere else if we are not being asked to + * try harder at this point + */ + if (pag->pagf_metadata && args->userdata && flags) { + args->agbp = NULL; + return 0; + } + + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; + /* + * If it looks like there isn't a long enough extent, or enough + * total blocks, reject it. + */ + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || pag->pagf_longest > 0); + if (args->minlen + args->alignment + args->minalignslop - 1 > longest || + (args->minleft && + (int)(pag->pagf_freeblks + pag->pagf_flcount - + need - args->total) < + (int)args->minleft)) { + if (agbp) + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } + /* + * Get the a.g. freespace buffer. + * Can fail if we're not blocking on locks, and it's held. + */ + if (agbp == NULL) { + if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags, + &agbp))) + return error; + if (agbp == NULL) { + args->agbp = NULL; + return 0; + } + } + /* + * Figure out how many blocks we should have in the freelist. + */ + agf = XFS_BUF_TO_AGF(agbp); + need = XFS_MIN_FREELIST(agf, mp); + delta = need > INT_GET(agf->agf_flcount, ARCH_CONVERT) ? + (need - INT_GET(agf->agf_flcount, ARCH_CONVERT)) : 0; + /* + * If there isn't enough total or single-extent, reject it. + */ + longest = INT_GET(agf->agf_longest, ARCH_CONVERT); + longest = (longest > delta) ? (longest - delta) : + (INT_GET(agf->agf_flcount, ARCH_CONVERT) > 0 || longest > 0); + if (args->minlen + args->alignment + args->minalignslop - 1 > longest || + (args->minleft && + (int)(INT_GET(agf->agf_freeblks, ARCH_CONVERT) + + INT_GET(agf->agf_flcount, ARCH_CONVERT) - need - args->total) < + (int)args->minleft)) { + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } + /* + * Make the freelist shorter if it's too long. + */ + while (INT_GET(agf->agf_flcount, ARCH_CONVERT) > need) { + xfs_buf_t *bp; + + if ((error = xfs_alloc_get_freelist(tp, agbp, &bno))) + return error; + if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1))) + return error; + bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); + xfs_trans_binval(tp, bp); + } + /* + * Initialize the args structure. + */ + targs.tp = tp; + targs.mp = mp; + targs.agbp = agbp; + targs.agno = args->agno; + targs.mod = targs.minleft = targs.wasdel = targs.userdata = + targs.minalignslop = 0; + targs.alignment = targs.minlen = targs.prod = targs.isfl = 1; + targs.type = XFS_ALLOCTYPE_THIS_AG; + targs.pag = pag; + if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp))) + return error; + /* + * Make the freelist longer if it's too short. + */ + while (INT_GET(agf->agf_flcount, ARCH_CONVERT) < need) { + targs.agbno = 0; + targs.maxlen = need - INT_GET(agf->agf_flcount, ARCH_CONVERT); + /* + * Allocate as many blocks as possible at once. + */ + if ((error = xfs_alloc_ag_vextent(&targs))) + return error; + /* + * Stop if we run out. Won't happen if callers are obeying + * the restrictions correctly. Can happen for free calls + * on a completely full ag. + */ + if (targs.agbno == NULLAGBLOCK) + break; + /* + * Put each allocated block on the list. + */ + for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) { + if ((error = xfs_alloc_put_freelist(tp, agbp, agflbp, + bno))) + return error; + } + } + args->agbp = agbp; + return 0; +} + +/* + * Get a block from the freelist. + * Returns with the buffer for the block gotten. + */ +int /* error */ +xfs_alloc_get_freelist( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* buffer containing the agf structure */ + xfs_agblock_t *bnop) /* block address retrieved from freelist */ +{ + xfs_agf_t *agf; /* a.g. freespace structure */ + xfs_agfl_t *agfl; /* a.g. freelist structure */ + xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */ + xfs_agblock_t bno; /* block number returned */ + int error; +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_get_freelist"; +#endif + xfs_mount_t *mp; /* mount structure */ + xfs_perag_t *pag; /* per allocation group data */ + + agf = XFS_BUF_TO_AGF(agbp); + /* + * Freelist is empty, give up. + */ + if (INT_ISZERO(agf->agf_flcount, ARCH_CONVERT)) { + *bnop = NULLAGBLOCK; + return 0; + } + /* + * Read the array of free blocks. + */ + mp = tp->t_mountp; + if ((error = xfs_alloc_read_agfl(mp, tp, + INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp))) + return error; + agfl = XFS_BUF_TO_AGFL(agflbp); + /* + * Get the block number and update the data structures. + */ + bno = INT_GET(agfl->agfl_bno[INT_GET(agf->agf_flfirst, ARCH_CONVERT)], ARCH_CONVERT); + INT_MOD(agf->agf_flfirst, ARCH_CONVERT, 1); + xfs_trans_brelse(tp, agflbp); + if (INT_GET(agf->agf_flfirst, ARCH_CONVERT) == XFS_AGFL_SIZE(mp)) + INT_ZERO(agf->agf_flfirst, ARCH_CONVERT); + pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)]; + INT_MOD(agf->agf_flcount, ARCH_CONVERT, -1); + xfs_trans_agflist_delta(tp, -1); + pag->pagf_flcount--; + TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); + *bnop = bno; + + /* + * As blocks are freed, they are added to the per-ag busy list + * and remain there until the freeing transaction is committed to + * disk. Now that we have allocated blocks, this list must be + * searched to see if a block is being reused. If one is, then + * the freeing transaction must be pushed to disk NOW by forcing + * to disk all iclogs up that transaction's LSN. + */ + xfs_alloc_search_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); + return 0; +} + +/* + * Log the given fields from the agf structure. + */ +void +xfs_alloc_log_agf( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* buffer for a.g. freelist header */ + int fields) /* mask of fields to be logged (XFS_AGF_...) */ +{ + int first; /* first byte offset */ + int last; /* last byte offset */ + static const short offsets[] = { + offsetof(xfs_agf_t, agf_magicnum), + offsetof(xfs_agf_t, agf_versionnum), + offsetof(xfs_agf_t, agf_seqno), + offsetof(xfs_agf_t, agf_length), + offsetof(xfs_agf_t, agf_roots[0]), + offsetof(xfs_agf_t, agf_levels[0]), + offsetof(xfs_agf_t, agf_flfirst), + offsetof(xfs_agf_t, agf_fllast), + offsetof(xfs_agf_t, agf_flcount), + offsetof(xfs_agf_t, agf_freeblks), + offsetof(xfs_agf_t, agf_longest), + sizeof(xfs_agf_t) + }; + + xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); + xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); +} + +/* + * Interface for inode allocation to force the pag data to be initialized. + */ +int /* error */ +xfs_alloc_pagf_init( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags) /* XFS_ALLOC_FLAGS_... */ +{ + xfs_buf_t *bp; + int error; + + if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp))) + return error; + if (bp) + xfs_trans_brelse(tp, bp); + return 0; +} + +/* + * Put the block on the freelist for the allocation group. + */ +int /* error */ +xfs_alloc_put_freelist( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* buffer for a.g. freelist header */ + xfs_buf_t *agflbp,/* buffer for a.g. free block array */ + xfs_agblock_t bno) /* block being freed */ +{ + xfs_agf_t *agf; /* a.g. freespace structure */ + xfs_agfl_t *agfl; /* a.g. free block array */ + xfs_agblock_t *blockp;/* pointer to array entry */ + int error; +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_put_freelist"; +#endif + xfs_mount_t *mp; /* mount structure */ + xfs_perag_t *pag; /* per allocation group data */ + + agf = XFS_BUF_TO_AGF(agbp); + mp = tp->t_mountp; + + if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp, + INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp))) + return error; + agfl = XFS_BUF_TO_AGFL(agflbp); + INT_MOD(agf->agf_fllast, ARCH_CONVERT, 1); + if (INT_GET(agf->agf_fllast, ARCH_CONVERT) == XFS_AGFL_SIZE(mp)) + INT_ZERO(agf->agf_fllast, ARCH_CONVERT); + pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)]; + INT_MOD(agf->agf_flcount, ARCH_CONVERT, 1); + xfs_trans_agflist_delta(tp, 1); + pag->pagf_flcount++; + ASSERT(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp)); + blockp = &agfl->agfl_bno[INT_GET(agf->agf_fllast, ARCH_CONVERT)]; + INT_SET(*blockp, ARCH_CONVERT, bno); + TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); + xfs_trans_log_buf(tp, agflbp, + (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl), + (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl + + sizeof(xfs_agblock_t) - 1)); + return 0; +} + +/* + * Read in the allocation group header (free/alloc section). + */ +int /* error */ +xfs_alloc_read_agf( + xfs_mount_t *mp, /* mount point structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags, /* XFS_ALLOC_FLAG_... */ + xfs_buf_t **bpp) /* buffer for the ag freelist header */ +{ + xfs_agf_t *agf; /* ag freelist header */ + int agf_ok; /* set if agf is consistent */ + xfs_buf_t *bp; /* return value */ + xfs_perag_t *pag; /* per allocation group data */ + int error; + + ASSERT(agno != NULLAGNUMBER); + error = xfs_trans_read_buf( + mp, tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), + (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0U, + &bp); + if (error) + return error; + ASSERT(!bp || !XFS_BUF_GETERROR(bp)); + if (!bp) { + *bpp = NULL; + return 0; + } + /* + * Validate the magic number of the agf block. + */ + agf = XFS_BUF_TO_AGF(bp); + agf_ok = + INT_GET(agf->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC && + XFS_AGF_GOOD_VERSION( + INT_GET(agf->agf_versionnum, ARCH_CONVERT)) && + INT_GET(agf->agf_freeblks, ARCH_CONVERT) <= + INT_GET(agf->agf_length, ARCH_CONVERT) && + INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) && + INT_GET(agf->agf_fllast, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) && + INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp); + if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF, + XFS_RANDOM_ALLOC_READ_AGF))) { + XFS_CORRUPTION_ERROR("xfs_alloc_read_agf", + XFS_ERRLEVEL_LOW, mp, agf); + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); + } + pag = &mp->m_perag[agno]; + if (!pag->pagf_init) { + pag->pagf_freeblks = INT_GET(agf->agf_freeblks, ARCH_CONVERT); + pag->pagf_flcount = INT_GET(agf->agf_flcount, ARCH_CONVERT); + pag->pagf_longest = INT_GET(agf->agf_longest, ARCH_CONVERT); + pag->pagf_levels[XFS_BTNUM_BNOi] = + INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT); + pag->pagf_levels[XFS_BTNUM_CNTi] = + INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT); + spinlock_init(&pag->pagb_lock, "xfspagb"); + pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS * + sizeof(xfs_perag_busy_t), KM_SLEEP); + pag->pagf_init = 1; + } +#ifdef DEBUG + else if (!XFS_FORCED_SHUTDOWN(mp)) { + ASSERT(pag->pagf_freeblks == INT_GET(agf->agf_freeblks, ARCH_CONVERT)); + ASSERT(pag->pagf_flcount == INT_GET(agf->agf_flcount, ARCH_CONVERT)); + ASSERT(pag->pagf_longest == INT_GET(agf->agf_longest, ARCH_CONVERT)); + ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == + INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT)); + ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] == + INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT)); + } +#endif + XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF); + *bpp = bp; + return 0; +} + +/* + * Allocate an extent (variable-size). + * Depending on the allocation type, we either look in a single allocation + * group or loop over the allocation groups to find the result. + */ +int /* error */ +xfs_alloc_vextent( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_agblock_t agsize; /* allocation group size */ + int error; + int flags; /* XFS_ALLOC_FLAG_... locking flags */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_vextent"; +#endif + xfs_extlen_t minleft;/* minimum left value, temp copy */ + xfs_mount_t *mp; /* mount structure pointer */ + xfs_agnumber_t sagno; /* starting allocation group number */ + xfs_alloctype_t type; /* input allocation type */ + int bump_rotor = 0; + int no_min = 0; + + mp = args->mp; + type = args->otype = args->type; + args->agbno = NULLAGBLOCK; + /* + * Just fix this up, for the case where the last a.g. is shorter + * (or there's only one a.g.) and the caller couldn't easily figure + * that out (xfs_bmap_alloc). + */ + agsize = mp->m_sb.sb_agblocks; + if (args->maxlen > agsize) + args->maxlen = agsize; + if (args->alignment == 0) + args->alignment = 1; + ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount); + ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize); + ASSERT(args->minlen <= args->maxlen); + ASSERT(args->minlen <= agsize); + ASSERT(args->mod < args->prod); + if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount || + XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize || + args->minlen > args->maxlen || args->minlen > agsize || + args->mod >= args->prod) { + args->fsbno = NULLFSBLOCK; + TRACE_ALLOC("badargs", args); + return 0; + } + minleft = args->minleft; + + switch (type) { + case XFS_ALLOCTYPE_THIS_AG: + case XFS_ALLOCTYPE_NEAR_BNO: + case XFS_ALLOCTYPE_THIS_BNO: + /* + * These three force us into a single a.g. + */ + args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); + down_read(&mp->m_peraglock); + args->pag = &mp->m_perag[args->agno]; + args->minleft = 0; + error = xfs_alloc_fix_freelist(args, 0); + args->minleft = minleft; + if (error) { + TRACE_ALLOC("nofix", args); + goto error0; + } + if (!args->agbp) { + up_read(&mp->m_peraglock); + TRACE_ALLOC("noagbp", args); + break; + } + args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); + if ((error = xfs_alloc_ag_vextent(args))) + goto error0; + up_read(&mp->m_peraglock); + break; + case XFS_ALLOCTYPE_START_BNO: + /* + * Try near allocation first, then anywhere-in-ag after + * the first a.g. fails. + */ + if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) && + (mp->m_flags & XFS_MOUNT_32BITINODES)) { + args->fsbno = XFS_AGB_TO_FSB(mp, mp->m_agfrotor, 0); + bump_rotor = 1; + } + args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); + args->type = XFS_ALLOCTYPE_NEAR_BNO; + /* FALLTHROUGH */ + case XFS_ALLOCTYPE_ANY_AG: + case XFS_ALLOCTYPE_START_AG: + case XFS_ALLOCTYPE_FIRST_AG: + /* + * Rotate through the allocation groups looking for a winner. + */ + if (type == XFS_ALLOCTYPE_ANY_AG) { + /* + * Start with the last place we left off. + */ + args->agno = sagno = mp->m_agfrotor; + args->type = XFS_ALLOCTYPE_THIS_AG; + flags = XFS_ALLOC_FLAG_TRYLOCK; + } else if (type == XFS_ALLOCTYPE_FIRST_AG) { + /* + * Start with allocation group given by bno. + */ + args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); + args->type = XFS_ALLOCTYPE_THIS_AG; + sagno = 0; + flags = 0; + } else { + if (type == XFS_ALLOCTYPE_START_AG) + args->type = XFS_ALLOCTYPE_THIS_AG; + /* + * Start with the given allocation group. + */ + args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno); + flags = XFS_ALLOC_FLAG_TRYLOCK; + } + /* + * Loop over allocation groups twice; first time with + * trylock set, second time without. + */ + down_read(&mp->m_peraglock); + for (;;) { + args->pag = &mp->m_perag[args->agno]; + if (no_min) args->minleft = 0; + error = xfs_alloc_fix_freelist(args, flags); + args->minleft = minleft; + if (error) { + TRACE_ALLOC("nofix", args); + goto error0; + } + /* + * If we get a buffer back then the allocation will fly. + */ + if (args->agbp) { + if ((error = xfs_alloc_ag_vextent(args))) + goto error0; + break; + } + TRACE_ALLOC("loopfailed", args); + /* + * Didn't work, figure out the next iteration. + */ + if (args->agno == sagno && + type == XFS_ALLOCTYPE_START_BNO) + args->type = XFS_ALLOCTYPE_THIS_AG; + if (++(args->agno) == mp->m_sb.sb_agcount) + args->agno = 0; + /* + * Reached the starting a.g., must either be done + * or switch to non-trylock mode. + */ + if (args->agno == sagno) { + if (no_min == 1) { + args->agbno = NULLAGBLOCK; + TRACE_ALLOC("allfailed", args); + break; + } + if (flags == 0) { + no_min = 1; + } else { + flags = 0; + if (type == XFS_ALLOCTYPE_START_BNO) { + args->agbno = XFS_FSB_TO_AGBNO(mp, + args->fsbno); + args->type = XFS_ALLOCTYPE_NEAR_BNO; + } + } + } + } + up_read(&mp->m_peraglock); + if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) + mp->m_agfrotor = (args->agno + 1) % mp->m_sb.sb_agcount; + break; + default: + ASSERT(0); + /* NOTREACHED */ + } + if (args->agbno == NULLAGBLOCK) + args->fsbno = NULLFSBLOCK; + else { + args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno); +#ifdef DEBUG + ASSERT(args->len >= args->minlen); + ASSERT(args->len <= args->maxlen); + ASSERT(args->agbno % args->alignment == 0); + XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), + args->len); +#endif + } + return 0; +error0: + up_read(&mp->m_peraglock); + return error; +} + +/* + * Free an extent. + * Just break up the extent address and hand off to xfs_free_ag_extent + * after fixing up the freelist. + */ +int /* error */ +xfs_free_extent( + xfs_trans_t *tp, /* transaction pointer */ + xfs_fsblock_t bno, /* starting block number of extent */ + xfs_extlen_t len) /* length of extent */ +{ +#ifdef DEBUG + xfs_agf_t *agf; /* a.g. freespace header */ +#endif + xfs_alloc_arg_t args; /* allocation argument structure */ + int error; + + ASSERT(len != 0); + args.tp = tp; + args.mp = tp->t_mountp; + args.agno = XFS_FSB_TO_AGNO(args.mp, bno); + ASSERT(args.agno < args.mp->m_sb.sb_agcount); + args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); + args.alignment = 1; + args.minlen = args.minleft = args.minalignslop = 0; + down_read(&args.mp->m_peraglock); + args.pag = &args.mp->m_perag[args.agno]; + if ((error = xfs_alloc_fix_freelist(&args, 0))) + goto error0; +#ifdef DEBUG + ASSERT(args.agbp != NULL); + agf = XFS_BUF_TO_AGF(args.agbp); + ASSERT(args.agbno + len <= INT_GET(agf->agf_length, ARCH_CONVERT)); +#endif + error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, + len, 0); +error0: + up_read(&args.mp->m_peraglock); + return error; +} + + +/* + * AG Busy list management + * The busy list contains block ranges that have been freed but whose + * transacations have not yet hit disk. If any block listed in a busy + * list is reused, the transaction that freed it must be forced to disk + * before continuing to use the block. + * + * xfs_alloc_mark_busy - add to the per-ag busy list + * xfs_alloc_clear_busy - remove an item from the per-ag busy list + */ +void +xfs_alloc_mark_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + xfs_agblock_t bno, + xfs_extlen_t len) +{ + xfs_mount_t *mp; + xfs_perag_busy_t *bsy; + int n; + SPLDECL(s); + + mp = tp->t_mountp; + s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); + + /* search pagb_list for an open slot */ + for (bsy = mp->m_perag[agno].pagb_list, n = 0; + n < XFS_PAGB_NUM_SLOTS; + bsy++, n++) { + if (bsy->busy_tp == NULL) { + break; + } + } + + if (n < XFS_PAGB_NUM_SLOTS) { + bsy = &mp->m_perag[agno].pagb_list[n]; + mp->m_perag[agno].pagb_count++; + TRACE_BUSY("xfs_alloc_mark_busy", "got", agno, bno, len, n, tp); + bsy->busy_start = bno; + bsy->busy_length = len; + bsy->busy_tp = tp; + xfs_trans_add_busy(tp, agno, n); + } else { + TRACE_BUSY("xfs_alloc_mark_busy", "FULL", agno, bno, len, -1, tp); + /* + * The busy list is full! Since it is now not possible to + * track the free block, make this a synchronous transaction + * to insure that the block is not reused before this + * transaction commits. + */ + xfs_trans_set_sync(tp); + } + + mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); +} + +void +xfs_alloc_clear_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + int idx) +{ + xfs_mount_t *mp; + xfs_perag_busy_t *list; + SPLDECL(s); + + mp = tp->t_mountp; + + s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); + list = mp->m_perag[agno].pagb_list; + + ASSERT(idx < XFS_PAGB_NUM_SLOTS); + if (list[idx].busy_tp == tp) { + TRACE_UNBUSY("xfs_alloc_clear_busy", "found", agno, idx, tp); + list[idx].busy_tp = NULL; + mp->m_perag[agno].pagb_count--; + } else { + TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp); + } + + mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); +} + + +/* + * returns non-zero if any of (agno,bno):len is in a busy list + */ +int +xfs_alloc_search_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + xfs_agblock_t bno, + xfs_extlen_t len) +{ + xfs_mount_t *mp; + xfs_perag_busy_t *bsy; + int n; + xfs_agblock_t uend, bend; + xfs_lsn_t lsn; + int cnt; + SPLDECL(s); + + mp = tp->t_mountp; + + s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); + cnt = mp->m_perag[agno].pagb_count; + + uend = bno + len - 1; + + /* search pagb_list for this slot, skipping open slots */ + for (bsy = mp->m_perag[agno].pagb_list, n = 0; + cnt; bsy++, n++) { + + /* + * (start1,length1) within (start2, length2) + */ + if (bsy->busy_tp != NULL) { + bend = bsy->busy_start + bsy->busy_length - 1; + if ((bno > bend) || + (uend < bsy->busy_start)) { + cnt--; + } else { + TRACE_BUSYSEARCH("xfs_alloc_search_busy", + "found1", agno, bno, len, n, + tp); + break; + } + } + } + + /* + * If a block was found, force the log through the LSN of the + * transaction that freed the block + */ + if (cnt) { + TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, n, tp); + lsn = bsy->busy_tp->t_commit_lsn; + mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); + xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); + } else { + TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, n, tp); + n = -1; + mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); + } + + return n; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_alloc.h linux.21rc5-ac2/fs/xfs/xfs_alloc.h --- linux.21rc5/fs/xfs/xfs_alloc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_alloc.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ALLOC_H__ +#define __XFS_ALLOC_H__ + +struct xfs_buf; +struct xfs_mount; +struct xfs_perag; +struct xfs_trans; + +/* + * Freespace allocation types. Argument to xfs_alloc_[v]extent. + */ +typedef enum xfs_alloctype +{ + XFS_ALLOCTYPE_ANY_AG, /* allocate anywhere, use rotor */ + XFS_ALLOCTYPE_FIRST_AG, /* ... start at ag 0 */ + XFS_ALLOCTYPE_START_AG, /* anywhere, start in this a.g. */ + XFS_ALLOCTYPE_THIS_AG, /* anywhere in this a.g. */ + XFS_ALLOCTYPE_START_BNO, /* near this block else anywhere */ + XFS_ALLOCTYPE_NEAR_BNO, /* in this a.g. and near this block */ + XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */ +} xfs_alloctype_t; + +/* + * Flags for xfs_alloc_fix_freelist. + */ +#define XFS_ALLOC_FLAG_TRYLOCK 0x00000001 /* use trylock for buffer locking */ + +/* + * Argument structure for xfs_alloc routines. + * This is turned into a structure to avoid having 20 arguments passed + * down several levels of the stack. + */ +typedef struct xfs_alloc_arg { + struct xfs_trans *tp; /* transaction pointer */ + struct xfs_mount *mp; /* file system mount point */ + struct xfs_buf *agbp; /* buffer for a.g. freelist header */ + struct xfs_perag *pag; /* per-ag struct for this agno */ + xfs_fsblock_t fsbno; /* file system block number */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_agblock_t agbno; /* allocation group-relative block # */ + xfs_extlen_t minlen; /* minimum size of extent */ + xfs_extlen_t maxlen; /* maximum size of extent */ + xfs_extlen_t mod; /* mod value for extent size */ + xfs_extlen_t prod; /* prod value for extent size */ + xfs_extlen_t minleft; /* min blocks must be left after us */ + xfs_extlen_t total; /* total blocks needed in xaction */ + xfs_extlen_t alignment; /* align answer to multiple of this */ + xfs_extlen_t minalignslop; /* slop for minlen+alignment calcs */ + xfs_extlen_t len; /* output: actual size of extent */ + xfs_alloctype_t type; /* allocation type XFS_ALLOCTYPE_... */ + xfs_alloctype_t otype; /* original allocation type */ + char wasdel; /* set if allocation was prev delayed */ + char wasfromfl; /* set if allocation is from freelist */ + char isfl; /* set if is freelist blocks - !actg */ + char userdata; /* set if this is user data */ +} xfs_alloc_arg_t; + +/* + * Defines for userdata + */ +#define XFS_ALLOC_USERDATA 1 /* allocation is for user data*/ +#define XFS_ALLOC_INITIAL_USER_DATA 2 /* special case start of file */ + + +#ifdef __KERNEL__ + +/* + * Types for alloc tracing. + */ +#define XFS_ALLOC_KTRACE_ALLOC 1 +#define XFS_ALLOC_KTRACE_FREE 2 +#define XFS_ALLOC_KTRACE_MODAGF 3 +#define XFS_ALLOC_KTRACE_BUSY 4 +#define XFS_ALLOC_KTRACE_UNBUSY 5 +#define XFS_ALLOC_KTRACE_BUSYSEARCH 6 + + +/* + * Allocation tracing buffer size. + */ +#define XFS_ALLOC_TRACE_SIZE 4096 + +#ifdef XFS_ALL_TRACE +#define XFS_ALLOC_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_ALLOC_TRACE +#endif + +/* + * Prototypes for visible xfs_alloc.c routines + */ + +/* + * Compute and fill in value of m_ag_maxlevels. + */ +void +xfs_alloc_compute_maxlevels( + struct xfs_mount *mp); /* file system mount structure */ + +/* + * Get a block from the freelist. + * Returns with the buffer for the block gotten. + */ +int /* error */ +xfs_alloc_get_freelist( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *agbp, /* buffer containing the agf structure */ + xfs_agblock_t *bnop); /* block address retrieved from freelist */ + +/* + * Log the given fields from the agf structure. + */ +void +xfs_alloc_log_agf( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *bp, /* buffer for a.g. freelist header */ + int fields);/* mask of fields to be logged (XFS_AGF_...) */ + +/* + * Interface for inode allocation to force the pag data to be initialized. + */ +int /* error */ +xfs_alloc_pagf_init( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags); /* XFS_ALLOC_FLAGS_... */ + +/* + * Put the block on the freelist for the allocation group. + */ +int /* error */ +xfs_alloc_put_freelist( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *agbp, /* buffer for a.g. freelist header */ + struct xfs_buf *agflbp,/* buffer for a.g. free block array */ + xfs_agblock_t bno); /* block being freed */ + +/* + * Read in the allocation group header (free/alloc section). + */ +int /* error */ +xfs_alloc_read_agf( + struct xfs_mount *mp, /* mount point structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags, /* XFS_ALLOC_FLAG_... */ + struct xfs_buf **bpp); /* buffer for the ag freelist header */ + +/* + * Allocate an extent (variable-size). + */ +int /* error */ +xfs_alloc_vextent( + xfs_alloc_arg_t *args); /* allocation argument structure */ + +/* + * Free an extent. + */ +int /* error */ +xfs_free_extent( + struct xfs_trans *tp, /* transaction pointer */ + xfs_fsblock_t bno, /* starting block number of extent */ + xfs_extlen_t len); /* length of extent */ + +void +xfs_alloc_mark_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + xfs_agblock_t bno, + xfs_extlen_t len); + +void +xfs_alloc_clear_busy(xfs_trans_t *tp, + xfs_agnumber_t ag, + int idx); + + +#endif /* __KERNEL__ */ + +#endif /* __XFS_ALLOC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_arch.h linux.21rc5-ac2/fs/xfs/xfs_arch.h --- linux.21rc5/fs/xfs/xfs_arch.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_arch.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ARCH_H__ +#define __XFS_ARCH_H__ + +#ifndef XFS_BIG_FILESYSTEMS +#error XFS_BIG_FILESYSTEMS must be defined true or false +#endif + +#ifdef __KERNEL__ + +#include + +#ifdef __LITTLE_ENDIAN +#define __BYTE_ORDER __LITTLE_ENDIAN +#endif +#ifdef __BIG_ENDIAN +#define __BYTE_ORDER __BIG_ENDIAN +#endif + +#endif /* __KERNEL__ */ + +/* do we need conversion? */ + +#define ARCH_NOCONVERT 1 +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define ARCH_CONVERT 0 +#else +#define ARCH_CONVERT ARCH_NOCONVERT +#endif + +/* generic swapping macros */ + +#ifndef HAVE_SWABMACROS +#define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var)))) +#define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var)))) +#define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var)))) +#endif + +#define INT_SWAP(type, var) \ + ((sizeof(type) == 8) ? INT_SWAP64(type,var) : \ + ((sizeof(type) == 4) ? INT_SWAP32(type,var) : \ + ((sizeof(type) == 2) ? INT_SWAP16(type,var) : \ + (var)))) + +#define INT_SWAP_UNALIGNED_32(from,to) \ + { \ + ((__u8*)(to))[0] = ((__u8*)(from))[3]; \ + ((__u8*)(to))[1] = ((__u8*)(from))[2]; \ + ((__u8*)(to))[2] = ((__u8*)(from))[1]; \ + ((__u8*)(to))[3] = ((__u8*)(from))[0]; \ + } + +#define INT_SWAP_UNALIGNED_64(from,to) \ + { \ + INT_SWAP_UNALIGNED_32( ((__u8*)(from)) + 4, ((__u8*)(to))); \ + INT_SWAP_UNALIGNED_32( ((__u8*)(from)), ((__u8*)(to)) + 4); \ + } + +/* + * get and set integers from potentially unaligned locations + */ + +#define INT_GET_UNALIGNED_16_LE(pointer) \ + ((__u16)((((__u8*)(pointer))[0] ) | (((__u8*)(pointer))[1] << 8 ))) +#define INT_GET_UNALIGNED_16_BE(pointer) \ + ((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1]))) +#define INT_SET_UNALIGNED_16_LE(pointer,value) \ + { \ + ((__u8*)(pointer))[0] = (((value) ) & 0xff); \ + ((__u8*)(pointer))[1] = (((value) >> 8) & 0xff); \ + } +#define INT_SET_UNALIGNED_16_BE(pointer,value) \ + { \ + ((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \ + ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ + } + +#define INT_GET_UNALIGNED_32_LE(pointer) \ + ((__u32)((((__u8*)(pointer))[0] ) | (((__u8*)(pointer))[1] << 8 ) \ + |(((__u8*)(pointer))[2] << 16) | (((__u8*)(pointer))[3] << 24))) +#define INT_GET_UNALIGNED_32_BE(pointer) \ + ((__u32)((((__u8*)(pointer))[0] << 24) | (((__u8*)(pointer))[1] << 16) \ + |(((__u8*)(pointer))[2] << 8) | (((__u8*)(pointer))[3] ))) + +#define INT_GET_UNALIGNED_64_LE(pointer) \ + (((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer))+4)) << 32 ) \ + |((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer)) )) )) +#define INT_GET_UNALIGNED_64_BE(pointer) \ + (((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer)) )) << 32 ) \ + |((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer))+4)) )) + +/* + * now pick the right ones for our MACHINE ARCHITECTURE + */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define INT_GET_UNALIGNED_16(pointer) INT_GET_UNALIGNED_16_LE(pointer) +#define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_LE(pointer,value) +#define INT_GET_UNALIGNED_32(pointer) INT_GET_UNALIGNED_32_LE(pointer) +#define INT_GET_UNALIGNED_64(pointer) INT_GET_UNALIGNED_64_LE(pointer) +#else +#define INT_GET_UNALIGNED_16(pointer) INT_GET_UNALIGNED_16_BE(pointer) +#define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_BE(pointer,value) +#define INT_GET_UNALIGNED_32(pointer) INT_GET_UNALIGNED_32_BE(pointer) +#define INT_GET_UNALIGNED_64(pointer) INT_GET_UNALIGNED_64_BE(pointer) +#endif + +/* define generic INT_ macros */ + +#define INT_GET(reference,arch) \ + (((arch) == ARCH_NOCONVERT) \ + ? \ + (reference) \ + : \ + INT_SWAP((reference),(reference)) \ + ) + +/* does not return a value */ +#define INT_SET(reference,arch,valueref) \ + (__builtin_constant_p(valueref) ? \ + (void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \ + (void)( \ + ((reference) = (valueref)), \ + ( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \ + ) \ + ) + +/* does not return a value */ +#define INT_MOD_EXPR(reference,arch,code) \ + (void)(((arch) == ARCH_NOCONVERT) \ + ? \ + ((reference) code) \ + : \ + ( \ + (reference) = INT_GET((reference),arch) , \ + ((reference) code), \ + INT_SET(reference, arch, reference) \ + ) \ + ) + +/* does not return a value */ +#define INT_MOD(reference,arch,delta) \ + (void)( \ + INT_MOD_EXPR(reference,arch,+=(delta)) \ + ) + +/* + * INT_COPY - copy a value between two locations with the + * _same architecture_ but _potentially different sizes_ + * + * if the types of the two parameters are equal or they are + * in native architecture, a simple copy is done + * + * otherwise, architecture conversions are done + * + */ + +/* does not return a value */ +#define INT_COPY(dst,src,arch) \ + (void)( \ + ((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \ + ? \ + ((dst) = (src)) \ + : \ + INT_SET(dst, arch, INT_GET(src, arch)) \ + ) + +/* + * INT_XLATE - copy a value in either direction between two locations + * with different architectures + * + * dir < 0 - copy from memory to buffer (native to arch) + * dir > 0 - copy from buffer to memory (arch to native) + */ + +/* does not return a value */ +#define INT_XLATE(buf,mem,dir,arch) {\ + ASSERT(dir); \ + if (dir>0) { \ + (mem)=INT_GET(buf, arch); \ + } else { \ + INT_SET(buf, arch, mem); \ + } \ +} + +#define INT_ISZERO(reference,arch) \ + ((reference) == 0) + +#define INT_ZERO(reference,arch) \ + ((reference) = 0) + +#define INT_GET_UNALIGNED_16_ARCH(pointer,arch) \ + ( ((arch) == ARCH_NOCONVERT) \ + ? \ + (INT_GET_UNALIGNED_16(pointer)) \ + : \ + (INT_GET_UNALIGNED_16_BE(pointer)) \ + ) +#define INT_SET_UNALIGNED_16_ARCH(pointer,value,arch) \ + if ((arch) == ARCH_NOCONVERT) { \ + INT_SET_UNALIGNED_16(pointer,value); \ + } else { \ + INT_SET_UNALIGNED_16_BE(pointer,value); \ + } + +#define DIRINO4_GET_ARCH(pointer,arch) \ + ( ((arch) == ARCH_NOCONVERT) \ + ? \ + (INT_GET_UNALIGNED_32(pointer)) \ + : \ + (INT_GET_UNALIGNED_32_BE(pointer)) \ + ) + +#if XFS_BIG_FILESYSTEMS +#define DIRINO_GET_ARCH(pointer,arch) \ + ( ((arch) == ARCH_NOCONVERT) \ + ? \ + (INT_GET_UNALIGNED_64(pointer)) \ + : \ + (INT_GET_UNALIGNED_64_BE(pointer)) \ + ) +#else +/* MACHINE ARCHITECTURE dependent */ +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define DIRINO_GET_ARCH(pointer,arch) \ + DIRINO4_GET_ARCH((((__u8*)pointer)+4),arch) +#else +#define DIRINO_GET_ARCH(pointer,arch) \ + DIRINO4_GET_ARCH(pointer,arch) +#endif +#endif + +#define DIRINO_COPY_ARCH(from,to,arch) \ + if ((arch) == ARCH_NOCONVERT) { \ + memcpy(to,from,sizeof(xfs_ino_t)); \ + } else { \ + INT_SWAP_UNALIGNED_64(from,to); \ + } +#define DIRINO4_COPY_ARCH(from,to,arch) \ + if ((arch) == ARCH_NOCONVERT) { \ + memcpy(to,(((__u8*)from+4)),sizeof(xfs_dir2_ino4_t)); \ + } else { \ + INT_SWAP_UNALIGNED_32(from,to); \ + } + +#endif /* __XFS_ARCH_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_attr.c linux.21rc5-ac2/fs/xfs/xfs_attr.c --- linux.21rc5/fs/xfs/xfs_attr.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_attr.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,2336 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_error.h" +#include "xfs_bit.h" +#include "xfs_quota.h" +#include "xfs_rw.h" +#include "xfs_trans_space.h" + +/* + * xfs_attr.c + * + * Provide the external interfaces to manage attribute lists. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Internal routines when attribute list fits inside the inode. + */ +STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args); + +/* + * Internal routines when attribute list is one block. + */ +STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args); +STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args); +STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context); + +/* + * Internal routines when attribute list is more than one block. + */ +STATIC int xfs_attr_node_addname(xfs_da_args_t *args); +STATIC int xfs_attr_node_removename(xfs_da_args_t *args); +STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context); +STATIC int xfs_attr_fillstate(xfs_da_state_t *state); +STATIC int xfs_attr_refillstate(xfs_da_state_t *state); + +/* + * Routines to manipulate out-of-line attribute values. + */ +STATIC int xfs_attr_rmtval_get(xfs_da_args_t *args); +STATIC int xfs_attr_rmtval_set(xfs_da_args_t *args); +STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args); + +#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */ +#define ATTR_RMTVALUE_TRANSBLKS 8 /* max # of blks in a transaction */ + +#if defined(DEBUG) +ktrace_t *xfs_attr_trace_buf; +#endif + + + +/*======================================================================== + * Overall external interface routines. + *========================================================================*/ + +/*ARGSUSED*/ +int /* error */ +xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp, + int flags, struct cred *cred) +{ + xfs_da_args_t args; + int error; + int namelen; + xfs_inode_t *ip = XFS_BHVTOI(bdp); + + if (!name) + return EINVAL; + ASSERT(MAXNAMELEN-1 <= 0xff); /* length is stored in uint8 */ + namelen = strlen(name); + if (namelen >= MAXNAMELEN) + return EFAULT; /* match IRIX behaviour */ + XFS_STATS_INC(xfsstats.xs_attr_get); + + if (XFS_IFORK_Q(ip) == 0) + return ENOATTR; + + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return (EIO); + + /* + * Do we answer them, or ignore them? + */ + xfs_ilock(ip, XFS_ILOCK_SHARED); + if ((error = xfs_iaccess(XFS_BHVTOI(bdp), IREAD, cred))) { + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return(XFS_ERROR(error)); + } + + /* + * Fill in the arg structure for this request. + */ + memset((char *)&args, 0, sizeof(args)); + args.name = name; + args.namelen = namelen; + args.value = value; + args.valuelen = *valuelenp; + args.flags = flags; + args.hashval = xfs_da_hashname(args.name, args.namelen); + args.dp = ip; + args.whichfork = XFS_ATTR_FORK; + args.trans = NULL; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (XFS_IFORK_Q(ip) == 0 || + (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_anextents == 0)) { + error = XFS_ERROR(ENOATTR); + } else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { + error = xfs_attr_shortform_getvalue(&args); + } else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_get(&args); + } else { + error = xfs_attr_node_get(&args); + } + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + /* + * Return the number of bytes in the value to the caller. + */ + *valuelenp = args.valuelen; + + if (error == EEXIST) + error = 0; + return(error); +} + +/*ARGSUSED*/ +int /* error */ +xfs_attr_set(bhv_desc_t *bdp, char *name, char *value, int valuelen, int flags, + struct cred *cred) +{ + xfs_da_args_t args; + xfs_inode_t *dp; + xfs_fsblock_t firstblock; + xfs_bmap_free_t flist; + int error, err2, committed; + int local, size; + uint nblks; + xfs_mount_t *mp; + int rsvd = (flags & ATTR_ROOT) != 0; + int namelen; + + ASSERT(MAXNAMELEN-1 <= 0xff); /* length is stored in uint8 */ + namelen = strlen(name); + if (namelen >= MAXNAMELEN) + return EFAULT; /* match irix behaviour */ + + XFS_STATS_INC(xfsstats.xs_attr_set); + /* + * Do we answer them, or ignore them? + */ + dp = XFS_BHVTOI(bdp); + mp = dp->i_mount; + if (XFS_FORCED_SHUTDOWN(mp)) + return (EIO); + + xfs_ilock(dp, XFS_ILOCK_SHARED); + if ((error = xfs_iaccess(dp, IWRITE, cred))) { + xfs_iunlock(dp, XFS_ILOCK_SHARED); + return(XFS_ERROR(error)); + } + xfs_iunlock(dp, XFS_ILOCK_SHARED); + + /* + * Attach the dquots to the inode. + */ + if ((error = XFS_QM_DQATTACH(mp, dp, 0))) + return (error); + + /* + * If the inode doesn't have an attribute fork, add one. + * (inode must not be locked when we call this routine) + */ + if (XFS_IFORK_Q(dp) == 0) { + error = xfs_bmap_add_attrfork(dp, rsvd); + if (error) + return(error); + } + + /* + * Fill in the arg structure for this request. + */ + memset((char *)&args, 0, sizeof(args)); + args.name = name; + args.namelen = namelen; + args.value = value; + args.valuelen = valuelen; + args.flags = flags; + args.hashval = xfs_da_hashname(args.name, args.namelen); + args.dp = dp; + args.firstblock = &firstblock; + args.flist = &flist; + args.whichfork = XFS_ATTR_FORK; + args.oknoent = 1; + + /* Determine space new attribute will use, and if it will be inline + * or out of line. + */ + size = xfs_attr_leaf_newentsize(&args, mp->m_sb.sb_blocksize, &local); + + nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); + if (local) { + if (size > (mp->m_sb.sb_blocksize >> 1)) { + /* Double split possible */ + nblks <<= 1; + } + } else { + uint dblocks = XFS_B_TO_FSB(mp, valuelen); + /* Out of line attribute, cannot double split, but make + * room for the attribute value itself. + */ + nblks += dblocks; + nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); + } + + /* Size is now blocks for attribute data */ + args.total = nblks; + + /* + * Start our first transaction of the day. + * + * All future transactions during this code must be "chained" off + * this one via the trans_dup() call. All transactions will contain + * the inode, and the inode will always be marked with trans_ihold(). + * Since the inode will be locked in all transactions, we must log + * the inode in every transaction to let it float upward through + * the log. + */ + args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_SET); + + /* + * Root fork attributes can use reserved data blocks for this + * operation if necessary + */ + + if (rsvd) + args.trans->t_flags |= XFS_TRANS_RESERVE; + + if ((error = xfs_trans_reserve(args.trans, (uint) nblks, + XFS_ATTRSET_LOG_RES(mp, nblks), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ATTRSET_LOG_COUNT))) { + xfs_trans_cancel(args.trans, 0); + return(error); + } + xfs_ilock(dp, XFS_ILOCK_EXCL); + + error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0, + rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : + XFS_QMOPT_RES_REGBLKS); + if (error) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES); + return (error); + } + + xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args.trans, dp); + + /* + * If the attribute list is non-existant or a shortform list, + * upgrade it to a single-leaf-block attribute list. + */ + if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || + ((dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) && + (dp->i_d.di_anextents == 0))) { + + /* + * Build initial attribute list (if required). + */ + if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) + (void)xfs_attr_shortform_create(&args); + + /* + * Try to add the attr to the attribute list in + * the inode. + */ + error = xfs_attr_shortform_addname(&args); + if (error != ENOSPC) { + /* + * Commit the shortform mods, and we're done. + * NOTE: this is also the error path (EEXIST, etc). + */ + ASSERT(args.trans != NULL); + + /* + * If this is a synchronous mount, make sure that + * the transaction goes to disk before returning + * to the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(args.trans); + } + err2 = xfs_trans_commit(args.trans, + XFS_TRANS_RELEASE_LOG_RES, + NULL); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + /* + * Hit the inode change time. + */ + if (!error && (flags & ATTR_KERNOTIME) == 0) { + xfs_ichgtime(dp, XFS_ICHGTIME_CHG); + } + return(error == 0 ? err2 : error); + } + + /* + * It won't fit in the shortform, transform to a leaf block. + * GROT: another possible req'mt for a double-split btree op. + */ + XFS_BMAP_INIT(args.flist, args.firstblock); + error = xfs_attr_shortform_to_leaf(&args); + if (!error) { + error = xfs_bmap_finish(&args.trans, args.flist, + *args.firstblock, &committed); + } + if (error) { + ASSERT(committed); + args.trans = NULL; + xfs_bmap_cancel(&flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args.trans, dp); + } + + /* + * Commit the leaf transformation. We'll need another (linked) + * transaction to add the new attribute to the leaf. + */ + if ((error = xfs_attr_rolltrans(&args.trans, dp))) + goto out; + + } + + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_addname(&args); + } else { + error = xfs_attr_node_addname(&args); + } + if (error) { + goto out; + } + + /* + * If this is a synchronous mount, make sure that the + * transaction goes to disk before returning to the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(args.trans); + } + + /* + * Commit the last in the sequence of transactions. + */ + xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE); + error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES, + NULL); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + /* + * Hit the inode change time. + */ + if (!error && (flags & ATTR_KERNOTIME) == 0) { + xfs_ichgtime(dp, XFS_ICHGTIME_CHG); + } + + return(error); + +out: + if (args.trans) + xfs_trans_cancel(args.trans, + XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + return(error); +} + +/* + * Generic handler routine to remove a name from an attribute list. + * Transitions attribute list from Btree to shortform as necessary. + */ +/*ARGSUSED*/ +int /* error */ +xfs_attr_remove(bhv_desc_t *bdp, char *name, int flags, struct cred *cred) +{ + xfs_da_args_t args; + xfs_inode_t *dp; + xfs_fsblock_t firstblock; + xfs_bmap_free_t flist; + int error; + xfs_mount_t *mp; + int namelen; + + ASSERT(MAXNAMELEN-1<=0xff); /* length is stored in uint8 */ + namelen = strlen(name); + if (namelen>=MAXNAMELEN) + return EFAULT; /* match irix behaviour */ + + XFS_STATS_INC(xfsstats.xs_attr_remove); + + /* + * Do we answer them, or ignore them? + */ + dp = XFS_BHVTOI(bdp); + mp = dp->i_mount; + if (XFS_FORCED_SHUTDOWN(mp)) + return (EIO); + + xfs_ilock(dp, XFS_ILOCK_SHARED); + if ((error = xfs_iaccess(dp, IWRITE, cred))) { + xfs_iunlock(dp, XFS_ILOCK_SHARED); + return(XFS_ERROR(error)); + } else if (XFS_IFORK_Q(dp) == 0 || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + xfs_iunlock(dp, XFS_ILOCK_SHARED); + return(XFS_ERROR(ENOATTR)); + } + xfs_iunlock(dp, XFS_ILOCK_SHARED); + + /* + * Fill in the arg structure for this request. + */ + memset((char *)&args, 0, sizeof(args)); + args.name = name; + args.namelen = namelen; + args.flags = flags; + args.hashval = xfs_da_hashname(args.name, args.namelen); + args.dp = dp; + args.firstblock = &firstblock; + args.flist = &flist; + args.total = 0; + args.whichfork = XFS_ATTR_FORK; + + /* + * Attach the dquots to the inode. + */ + if ((error = XFS_QM_DQATTACH(mp, dp, 0))) + return (error); + + /* + * Start our first transaction of the day. + * + * All future transactions during this code must be "chained" off + * this one via the trans_dup() call. All transactions will contain + * the inode, and the inode will always be marked with trans_ihold(). + * Since the inode will be locked in all transactions, we must log + * the inode in every transaction to let it float upward through + * the log. + */ + args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_RM); + + /* + * Root fork attributes can use reserved data blocks for this + * operation if necessary + */ + + if (flags & ATTR_ROOT) + args.trans->t_flags |= XFS_TRANS_RESERVE; + + if ((error = xfs_trans_reserve(args.trans, + XFS_ATTRRM_SPACE_RES(mp), + XFS_ATTRRM_LOG_RES(mp), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ATTRRM_LOG_COUNT))) { + xfs_trans_cancel(args.trans, 0); + return(error); + + } + + xfs_ilock(dp, XFS_ILOCK_EXCL); + /* + * No need to make quota reservations here. We expect to release some + * blocks not allocate in the common case. + */ + xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args.trans, dp); + + /* + * Decide on what work routines to call based on the inode size. + */ + if (XFS_IFORK_Q(dp) == 0 || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + error = XFS_ERROR(ENOATTR); + goto out; + } + if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { + ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); + error = xfs_attr_shortform_remove(&args); + if (error) { + goto out; + } + } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_removename(&args); + } else { + error = xfs_attr_node_removename(&args); + } + if (error) { + goto out; + } + + /* + * If this is a synchronous mount, make sure that the + * transaction goes to disk before returning to the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(args.trans); + } + + /* + * Commit the last in the sequence of transactions. + */ + xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE); + error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES, + NULL); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + /* + * Hit the inode change time. + */ + if (!error && (flags & ATTR_KERNOTIME) == 0) { + xfs_ichgtime(dp, XFS_ICHGTIME_CHG); + } + + return(error); + +out: + if (args.trans) + xfs_trans_cancel(args.trans, + XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + return(error); +} + +/* + * Generate a list of extended attribute names and optionally + * also value lengths. Positive return value follows the XFS + * convention of being an error, zero or negative return code + * is the length of the buffer returned (negated), indicating + * success. + */ +int +xfs_attr_list(bhv_desc_t *bdp, char *buffer, int bufsize, int flags, + attrlist_cursor_kern_t *cursor, struct cred *cred) +{ + xfs_attr_list_context_t context; + xfs_inode_t *dp; + int error; + + XFS_STATS_INC(xfsstats.xs_attr_list); + + /* + * Validate the cursor. + */ + if (cursor->pad1 || cursor->pad2) + return(XFS_ERROR(EINVAL)); + if ((cursor->initted == 0) && + (cursor->hashval || cursor->blkno || cursor->offset)) + return(XFS_ERROR(EINVAL)); + + /* + * Check for a properly aligned buffer. + */ + if (((long)buffer) & (sizeof(int)-1)) + return(XFS_ERROR(EFAULT)); + if (flags & ATTR_KERNOVAL) + bufsize = 0; + + /* + * Initialize the output buffer. + */ + context.dp = dp = XFS_BHVTOI(bdp); + context.cursor = cursor; + context.count = 0; + context.dupcnt = 0; + context.resynch = 1; + context.flags = flags; + if (!(flags & ATTR_KERNAMELS)) { + context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ + context.firstu = context.bufsize; + context.alist = (attrlist_t *)buffer; + context.alist->al_count = 0; + context.alist->al_more = 0; + context.alist->al_offset[0] = context.bufsize; + } + else { + context.bufsize = bufsize; + context.firstu = context.bufsize; + context.alist = (attrlist_t *)buffer; + } + + if (XFS_FORCED_SHUTDOWN(dp->i_mount)) + return (EIO); + /* + * Do they have permission? + */ + xfs_ilock(dp, XFS_ILOCK_SHARED); + if ((error = xfs_iaccess(dp, IREAD, cred))) { + xfs_iunlock(dp, XFS_ILOCK_SHARED); + return(XFS_ERROR(error)); + } + + /* + * Decide on what work routines to call based on the inode size. + */ + xfs_attr_trace_l_c("syscall start", &context); + if (XFS_IFORK_Q(dp) == 0 || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + error = 0; + } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { + error = xfs_attr_shortform_list(&context); + } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_list(&context); + } else { + error = xfs_attr_node_list(&context); + } + xfs_iunlock(dp, XFS_ILOCK_SHARED); + xfs_attr_trace_l_c("syscall end", &context); + + if (!(context.flags & (ATTR_KERNOVAL|ATTR_KERNAMELS))) { + ASSERT(error >= 0); + } + else { /* must return negated buffer size or the error */ + if (context.count < 0) + error = XFS_ERROR(ERANGE); + else + error = -context.count; + } + + return(error); +} + +int /* error */ +xfs_attr_inactive(xfs_inode_t *dp) +{ + xfs_trans_t *trans; + xfs_mount_t *mp; + int error; + + mp = dp->i_mount; + ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); + + /* XXXsup - why on earth are we taking ILOCK_EXCL here??? */ + xfs_ilock(dp, XFS_ILOCK_EXCL); + if ((XFS_IFORK_Q(dp) == 0) || + (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + return(0); + } + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + /* + * Start our first transaction of the day. + * + * All future transactions during this code must be "chained" off + * this one via the trans_dup() call. All transactions will contain + * the inode, and the inode will always be marked with trans_ihold(). + * Since the inode will be locked in all transactions, we must log + * the inode in every transaction to let it float upward through + * the log. + */ + trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); + if ((error = xfs_trans_reserve(trans, 0, XFS_ATTRINVAL_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ATTRINVAL_LOG_COUNT))) { + xfs_trans_cancel(trans, 0); + return(error); + } + xfs_ilock(dp, XFS_ILOCK_EXCL); + + /* + * No need to make quota reservations here. We expect to release some + * blocks, not allocate, in the common case. + */ + xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(trans, dp); + + /* + * Decide on what work routines to call based on the inode size. + */ + if ((XFS_IFORK_Q(dp) == 0) || + (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + error = 0; + goto out; + } + error = xfs_attr_root_inactive(&trans, dp); + if (error) + goto out; + /* + * signal synchronous inactive transactions unless this + * is a synchronous mount filesystem in which case we + * know that we're here because we've been called out of + * xfs_inactive which means that the last reference is gone + * and the unlink transaction has already hit the disk so + * async inactive transactions are safe. + */ + if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK, + (!(mp->m_flags & XFS_MOUNT_WSYNC) + ? 1 : 0)))) + goto out; + + /* + * Commit the last in the sequence of transactions. + */ + xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); + error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES, + NULL); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + return(error); + +out: + xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + return(error); +} + + + +/*======================================================================== + * External routines when attribute list is inside the inode + *========================================================================*/ + +/* + * Add a name to the shortform attribute list structure + * This is the external routine. + */ +STATIC int +xfs_attr_shortform_addname(xfs_da_args_t *args) +{ + int newsize, retval; + + retval = xfs_attr_shortform_lookup(args); + if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { + return(retval); + } else if (retval == EEXIST) { + if (args->flags & ATTR_CREATE) + return(retval); + retval = xfs_attr_shortform_remove(args); + ASSERT(retval == 0); + } + + newsize = XFS_ATTR_SF_TOTSIZE(args->dp); + newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); + if ((newsize <= XFS_IFORK_ASIZE(args->dp)) && + (args->namelen < XFS_ATTR_SF_ENTSIZE_MAX) && + (args->valuelen < XFS_ATTR_SF_ENTSIZE_MAX)) { + retval = xfs_attr_shortform_add(args); + ASSERT(retval == 0); + } else { + return(XFS_ERROR(ENOSPC)); + } + return(0); +} + + +/*======================================================================== + * External routines when attribute list is one block + *========================================================================*/ + +/* + * Add a name to the leaf attribute list structure + * + * This leaf block cannot have a "remote" value, we only call this routine + * if bmap_one_block() says there is only one block (ie: no remote blks). + */ +int +xfs_attr_leaf_addname(xfs_da_args_t *args) +{ + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int retval, error, committed; + + /* + * Read the (only) block in the attribute list in. + */ + dp = args->dp; + args->blkno = 0; + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + + /* + * Look up the given attribute in the leaf block. Figure out if + * the given flags produce an error or call for an atomic rename. + */ + retval = xfs_attr_leaf_lookup_int(bp, args); + if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { + xfs_da_brelse(args->trans, bp); + return(retval); + } else if (retval == EEXIST) { + if (args->flags & ATTR_CREATE) { /* pure create op */ + xfs_da_brelse(args->trans, bp); + return(retval); + } + args->rename = 1; /* an atomic rename */ + args->blkno2 = args->blkno; /* set 2nd entry info*/ + args->index2 = args->index; + args->rmtblkno2 = args->rmtblkno; + args->rmtblkcnt2 = args->rmtblkcnt; + } + + /* + * Add the attribute to the leaf block, transitioning to a Btree + * if required. + */ + retval = xfs_attr_leaf_add(bp, args); + xfs_da_buf_done(bp); + if (retval == ENOSPC) { + /* + * Promote the attribute list to the Btree format, then + * Commit that transaction so that the node_addname() call + * can manage its own transactions. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_node(args); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + + /* + * Commit the current trans (including the inode) and start + * a new one. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + return (error); + + /* + * Fob the whole rest of the problem off on the Btree code. + */ + error = xfs_attr_node_addname(args); + return(error); + } + + /* + * Commit the transaction that added the attr name so that + * later routines can manage their own transactions. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + return (error); + + /* + * If there was an out-of-line value, allocate the blocks we + * identified for its storage and copy the value. This is done + * after we create the attribute so that we don't overflow the + * maximum size of a transaction and/or hit a deadlock. + */ + if (args->rmtblkno > 0) { + error = xfs_attr_rmtval_set(args); + if (error) + return(error); + } + + /* + * If this is an atomic rename operation, we must "flip" the + * incomplete flags on the "new" and "old" attribute/value pairs + * so that one disappears and one appears atomically. Then we + * must remove the "old" attribute/value pair. + */ + if (args->rename) { + /* + * In a separate transaction, set the incomplete flag on the + * "old" attr and clear the incomplete flag on the "new" attr. + */ + error = xfs_attr_leaf_flipflags(args); + if (error) + return(error); + + /* + * Dismantle the "old" attribute/value pair by removing + * a "remote" value (if it exists). + */ + args->index = args->index2; + args->blkno = args->blkno2; + args->rmtblkno = args->rmtblkno2; + args->rmtblkcnt = args->rmtblkcnt2; + if (args->rmtblkno) { + error = xfs_attr_rmtval_remove(args); + if (error) + return(error); + } + + /* + * Read in the block containing the "old" attr, then + * remove the "old" attr from that block (neat, huh!) + */ + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, + &bp, XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + (void)xfs_attr_leaf_remove(bp, args); + + /* + * If the result is small enough, shrink it all into the inode. + */ + if (xfs_attr_shortform_allfit(bp, dp)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_shortform(bp, args); + /* bp is gone due to xfs_da_shrink_inode */ + if (!error) { + error = xfs_bmap_finish(&args->trans, + args->flist, + *args->firstblock, + &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans + * and started a new one. We need the inode to be + * in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } else + xfs_da_buf_done(bp); + + /* + * Commit the remove and start the next trans in series. + */ + error = xfs_attr_rolltrans(&args->trans, dp); + + } else if (args->rmtblkno > 0) { + /* + * Added a "remote" value, just clear the incomplete flag. + */ + error = xfs_attr_leaf_clearflag(args); + } + return(error); +} + +/* + * Remove a name from the leaf attribute list structure + * + * This leaf block cannot have a "remote" value, we only call this routine + * if bmap_one_block() says there is only one block (ie: no remote blks). + */ +STATIC int +xfs_attr_leaf_removename(xfs_da_args_t *args) +{ + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int committed; + int error; + + /* + * Remove the attribute. + */ + dp = args->dp; + args->blkno = 0; + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) { + return(error); + } + + ASSERT(bp != NULL); + error = xfs_attr_leaf_lookup_int(bp, args); + if (error == ENOATTR) { + xfs_da_brelse(args->trans, bp); + return(error); + } + + (void)xfs_attr_leaf_remove(bp, args); + + /* + * If the result is small enough, shrink it all into the inode. + */ + if (xfs_attr_shortform_allfit(bp, dp)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_shortform(bp, args); + /* bp is gone due to xfs_da_shrink_inode */ + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } else + xfs_da_buf_done(bp); + return(0); +} + +/* + * Look up a name in a leaf attribute list structure. + * + * This leaf block cannot have a "remote" value, we only call this routine + * if bmap_one_block() says there is only one block (ie: no remote blks). + */ +int +xfs_attr_leaf_get(xfs_da_args_t *args) +{ + xfs_dabuf_t *bp; + int error; + + args->blkno = 0; + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + + error = xfs_attr_leaf_lookup_int(bp, args); + if (error != EEXIST) { + xfs_da_brelse(args->trans, bp); + return(error); + } + error = xfs_attr_leaf_getvalue(bp, args); + xfs_da_brelse(args->trans, bp); + if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) { + error = xfs_attr_rmtval_get(args); + } + return(error); +} + +/* + * Copy out attribute entries for attr_list(), for leaf attribute lists. + */ +STATIC int +xfs_attr_leaf_list(xfs_attr_list_context_t *context) +{ + xfs_attr_leafblock_t *leaf; + int error; + xfs_dabuf_t *bp; + + context->cursor->blkno = 0; + error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + leaf = bp->data; + if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + != XFS_ATTR_LEAF_MAGIC)) { + XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, + context->dp->i_mount, leaf); + xfs_da_brelse(NULL, bp); + return(XFS_ERROR(EFSCORRUPTED)); + } + + (void)xfs_attr_leaf_list_int(bp, context); + xfs_da_brelse(NULL, bp); + return(0); +} + + +/*======================================================================== + * External routines when attribute list size > XFS_LBSIZE(mp). + *========================================================================*/ + +/* + * Add a name to a Btree-format attribute list. + * + * This will involve walking down the Btree, and may involve splitting + * leaf nodes and even splitting intermediate nodes up to and including + * the root node (a special case of an intermediate node). + * + * "Remote" attribute values confuse the issue and atomic rename operations + * add a whole extra layer of confusion on top of that. + */ +STATIC int +xfs_attr_node_addname(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + xfs_inode_t *dp; + xfs_mount_t *mp; + int committed, retval, error; + + /* + * Fill in bucket of arguments/results/context to carry around. + */ + dp = args->dp; + mp = dp->i_mount; +restart: + state = xfs_da_state_alloc(); + state->args = args; + state->mp = mp; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_attr_node_ents; + + /* + * Search to see if name already exists, and get back a pointer + * to where it should go. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) + goto out; + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { + goto out; + } else if (retval == EEXIST) { + if (args->flags & ATTR_CREATE) + goto out; + args->rename = 1; /* atomic rename op */ + args->blkno2 = args->blkno; /* set 2nd entry info*/ + args->index2 = args->index; + args->rmtblkno2 = args->rmtblkno; + args->rmtblkcnt2 = args->rmtblkcnt; + args->rmtblkno = 0; + args->rmtblkcnt = 0; + } + + retval = xfs_attr_leaf_add(blk->bp, state->args); + if (retval == ENOSPC) { + if (state->path.active == 1) { + /* + * Its really a single leaf node, but it had + * out-of-line values so it looked like it *might* + * have been a b-tree. + */ + xfs_da_state_free(state); + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_node(args); + if (!error) { + error = xfs_bmap_finish(&args->trans, + args->flist, + *args->firstblock, + &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans + * and started a new one. We need the inode to be + * in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + + /* + * Commit the node conversion and start the next + * trans in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + goto out; + + goto restart; + } + + /* + * Split as many Btree elements as required. + * This code tracks the new and old attr's location + * in the index/blkno/rmtblkno/rmtblkcnt fields and + * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_da_split(state); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } else { + /* + * Addition succeeded, update Btree hashvals. + */ + xfs_da_fixhashpath(state, &state->path); + } + + /* + * Kill the state structure, we're done with it and need to + * allow the buffers to come back later. + */ + xfs_da_state_free(state); + state = NULL; + + /* + * Commit the leaf addition or btree split and start the next + * trans in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + goto out; + + /* + * If there was an out-of-line value, allocate the blocks we + * identified for its storage and copy the value. This is done + * after we create the attribute so that we don't overflow the + * maximum size of a transaction and/or hit a deadlock. + */ + if (args->rmtblkno > 0) { + error = xfs_attr_rmtval_set(args); + if (error) + return(error); + } + + /* + * If this is an atomic rename operation, we must "flip" the + * incomplete flags on the "new" and "old" attribute/value pairs + * so that one disappears and one appears atomically. Then we + * must remove the "old" attribute/value pair. + */ + if (args->rename) { + /* + * In a separate transaction, set the incomplete flag on the + * "old" attr and clear the incomplete flag on the "new" attr. + */ + error = xfs_attr_leaf_flipflags(args); + if (error) + goto out; + + /* + * Dismantle the "old" attribute/value pair by removing + * a "remote" value (if it exists). + */ + args->index = args->index2; + args->blkno = args->blkno2; + args->rmtblkno = args->rmtblkno2; + args->rmtblkcnt = args->rmtblkcnt2; + if (args->rmtblkno) { + error = xfs_attr_rmtval_remove(args); + if (error) + return(error); + } + + /* + * Re-find the "old" attribute entry after any split ops. + * The INCOMPLETE flag means that we will find the "old" + * attr, not the "new" one. + */ + args->flags |= XFS_ATTR_INCOMPLETE; + state = xfs_da_state_alloc(); + state->args = args; + state->mp = mp; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_attr_node_ents; + state->inleaf = 0; + error = xfs_da_node_lookup_int(state, &retval); + if (error) + goto out; + + /* + * Remove the name and update the hashvals in the tree. + */ + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + error = xfs_attr_leaf_remove(blk->bp, args); + xfs_da_fixhashpath(state, &state->path); + + /* + * Check to see if the tree needs to be collapsed. + */ + if (retval && (state->path.active > 1)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_da_join(state); + if (!error) { + error = xfs_bmap_finish(&args->trans, + args->flist, + *args->firstblock, + &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans + * and started a new one. We need the inode to be + * in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } + + /* + * Commit and start the next trans in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + goto out; + + } else if (args->rmtblkno > 0) { + /* + * Added a "remote" value, just clear the incomplete flag. + */ + error = xfs_attr_leaf_clearflag(args); + if (error) + goto out; + } + retval = error = 0; + +out: + if (state) + xfs_da_state_free(state); + if (error) + return(error); + return(retval); +} + +/* + * Remove a name from a B-tree attribute list. + * + * This will involve walking down the Btree, and may involve joining + * leaf nodes and even joining intermediate nodes up to and including + * the root node (a special case of an intermediate node). + */ +STATIC int +xfs_attr_node_removename(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int retval, error, committed; + + /* + * Tie a string around our finger to remind us where we are. + */ + dp = args->dp; + state = xfs_da_state_alloc(); + state->args = args; + state->mp = dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_attr_node_ents; + + /* + * Search to see if name exists, and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error || (retval != EEXIST)) { + if (error == 0) + error = retval; + goto out; + } + + /* + * If there is an out-of-line value, de-allocate the blocks. + * This is done before we remove the attribute so that we don't + * overflow the maximum size of a transaction and/or hit a deadlock. + */ + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->bp != NULL); + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + if (args->rmtblkno > 0) { + /* + * Fill in disk block numbers in the state structure + * so that we can get the buffers back after we commit + * several transactions in the following calls. + */ + error = xfs_attr_fillstate(state); + if (error) + goto out; + + /* + * Mark the attribute as INCOMPLETE, then bunmapi() the + * remote value. + */ + error = xfs_attr_leaf_setflag(args); + if (error) + goto out; + error = xfs_attr_rmtval_remove(args); + if (error) + goto out; + + /* + * Refill the state structure with buffers, the prior calls + * released our buffers. + */ + error = xfs_attr_refillstate(state); + if (error) + goto out; + } + + /* + * Remove the name and update the hashvals in the tree. + */ + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + retval = xfs_attr_leaf_remove(blk->bp, args); + xfs_da_fixhashpath(state, &state->path); + + /* + * Check to see if the tree needs to be collapsed. + */ + if (retval && (state->path.active > 1)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_da_join(state); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + + /* + * Commit the Btree join operation and start a new trans. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + goto out; + } + + /* + * If the result is small enough, push it all into the inode. + */ + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + /* + * Have to get rid of the copy of this dabuf in the state. + */ + ASSERT(state->path.active == 1); + ASSERT(state->path.blk[0].bp); + xfs_da_buf_done(state->path.blk[0].bp); + state->path.blk[0].bp = NULL; + + error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_ATTR_FORK); + if (error) + goto out; + ASSERT(INT_GET(((xfs_attr_leafblock_t *) + bp->data)->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + + if (xfs_attr_shortform_allfit(bp, dp)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_shortform(bp, args); + /* bp is gone due to xfs_da_shrink_inode */ + if (!error) { + error = xfs_bmap_finish(&args->trans, + args->flist, + *args->firstblock, + &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans + * and started a new one. We need the inode to be + * in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } else + xfs_da_brelse(args->trans, bp); + } + error = 0; + +out: + xfs_da_state_free(state); + return(error); +} + +/* + * Fill in the disk block numbers in the state structure for the buffers + * that are attached to the state structure. + * This is done so that we can quickly reattach ourselves to those buffers + * after some set of transaction commit's has released these buffers. + */ +STATIC int +xfs_attr_fillstate(xfs_da_state_t *state) +{ + xfs_da_state_path_t *path; + xfs_da_state_blk_t *blk; + int level; + + /* + * Roll down the "path" in the state structure, storing the on-disk + * block number for those buffers in the "path". + */ + path = &state->path; + ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + for (blk = path->blk, level = 0; level < path->active; blk++, level++) { + if (blk->bp) { + blk->disk_blkno = xfs_da_blkno(blk->bp); + xfs_da_buf_done(blk->bp); + blk->bp = NULL; + } else { + blk->disk_blkno = 0; + } + } + + /* + * Roll down the "altpath" in the state structure, storing the on-disk + * block number for those buffers in the "altpath". + */ + path = &state->altpath; + ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + for (blk = path->blk, level = 0; level < path->active; blk++, level++) { + if (blk->bp) { + blk->disk_blkno = xfs_da_blkno(blk->bp); + xfs_da_buf_done(blk->bp); + blk->bp = NULL; + } else { + blk->disk_blkno = 0; + } + } + + return(0); +} + +/* + * Reattach the buffers to the state structure based on the disk block + * numbers stored in the state structure. + * This is done after some set of transaction commit's has released those + * buffers from our grip. + */ +STATIC int +xfs_attr_refillstate(xfs_da_state_t *state) +{ + xfs_da_state_path_t *path; + xfs_da_state_blk_t *blk; + int level, error; + + /* + * Roll down the "path" in the state structure, storing the on-disk + * block number for those buffers in the "path". + */ + path = &state->path; + ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + for (blk = path->blk, level = 0; level < path->active; blk++, level++) { + if (blk->disk_blkno) { + error = xfs_da_read_buf(state->args->trans, + state->args->dp, + blk->blkno, blk->disk_blkno, + &blk->bp, XFS_ATTR_FORK); + if (error) + return(error); + } else { + blk->bp = NULL; + } + } + + /* + * Roll down the "altpath" in the state structure, storing the on-disk + * block number for those buffers in the "altpath". + */ + path = &state->altpath; + ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + for (blk = path->blk, level = 0; level < path->active; blk++, level++) { + if (blk->disk_blkno) { + error = xfs_da_read_buf(state->args->trans, + state->args->dp, + blk->blkno, blk->disk_blkno, + &blk->bp, XFS_ATTR_FORK); + if (error) + return(error); + } else { + blk->bp = NULL; + } + } + + return(0); +} + +/* + * Look up a filename in a node attribute list. + * + * This routine gets called for any attribute fork that has more than one + * block, ie: both true Btree attr lists and for single-leaf-blocks with + * "remote" values taking up more blocks. + */ +int +xfs_attr_node_get(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + int error, retval; + int i; + + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_attr_node_ents; + + /* + * Search to see if name exists, and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) { + retval = error; + } else if (retval == EEXIST) { + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->bp != NULL); + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + + /* + * Get the value, local or "remote" + */ + retval = xfs_attr_leaf_getvalue(blk->bp, args); + if (!retval && (args->rmtblkno > 0) + && !(args->flags & ATTR_KERNOVAL)) { + retval = xfs_attr_rmtval_get(args); + } + } + + /* + * If not in a transaction, we have to release all the buffers. + */ + for (i = 0; i < state->path.active; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + + xfs_da_state_free(state); + return(retval); +} + +STATIC int /* error */ +xfs_attr_node_list(xfs_attr_list_context_t *context) +{ + attrlist_cursor_kern_t *cursor; + xfs_attr_leafblock_t *leaf; + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + int error, i; + xfs_dabuf_t *bp; + + cursor = context->cursor; + cursor->initted = 1; + + /* + * Do all sorts of validation on the passed-in cursor structure. + * If anything is amiss, ignore the cursor and look up the hashval + * starting from the btree root. + */ + bp = NULL; + if (cursor->blkno > 0) { + error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, + &bp, XFS_ATTR_FORK); + if ((error != 0) && (error != EFSCORRUPTED)) + return(error); + if (bp) { + node = bp->data; + switch (INT_GET(node->hdr.info.magic, ARCH_CONVERT)) { + case XFS_DA_NODE_MAGIC: + xfs_attr_trace_l_cn("wrong blk", context, node); + xfs_da_brelse(NULL, bp); + bp = NULL; + break; + case XFS_ATTR_LEAF_MAGIC: + leaf = bp->data; + if (cursor->hashval > + INT_GET(leaf->entries[ + INT_GET(leaf->hdr.count, + ARCH_CONVERT)-1].hashval, + ARCH_CONVERT)) { + xfs_attr_trace_l_cl("wrong blk", + context, leaf); + xfs_da_brelse(NULL, bp); + bp = NULL; + } else if (cursor->hashval <= + INT_GET(leaf->entries[0].hashval, + ARCH_CONVERT)) { + xfs_attr_trace_l_cl("maybe wrong blk", + context, leaf); + xfs_da_brelse(NULL, bp); + bp = NULL; + } + break; + default: + xfs_attr_trace_l_c("wrong blk - ??", context); + xfs_da_brelse(NULL, bp); + bp = NULL; + } + } + } + + /* + * We did not find what we expected given the cursor's contents, + * so we start from the top and work down based on the hash value. + * Note that start of node block is same as start of leaf block. + */ + if (bp == NULL) { + cursor->blkno = 0; + for (;;) { + error = xfs_da_read_buf(NULL, context->dp, + cursor->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) + return(error); + if (unlikely(bp == NULL)) { + XFS_ERROR_REPORT("xfs_attr_node_list(2)", + XFS_ERRLEVEL_LOW, + context->dp->i_mount); + return(XFS_ERROR(EFSCORRUPTED)); + } + node = bp->data; + if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC) + break; + if (unlikely(INT_GET(node->hdr.info.magic, ARCH_CONVERT) + != XFS_DA_NODE_MAGIC)) { + XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", + XFS_ERRLEVEL_LOW, + context->dp->i_mount, + node); + xfs_da_brelse(NULL, bp); + return(XFS_ERROR(EFSCORRUPTED)); + } + btree = node->btree; + for (i = 0; + i < INT_GET(node->hdr.count, ARCH_CONVERT); + btree++, i++) { + if (cursor->hashval + <= INT_GET(btree->hashval, + ARCH_CONVERT)) { + cursor->blkno = INT_GET(btree->before, ARCH_CONVERT); + xfs_attr_trace_l_cb("descending", + context, btree); + break; + } + } + if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { + xfs_da_brelse(NULL, bp); + return(0); + } + xfs_da_brelse(NULL, bp); + } + } + ASSERT(bp != NULL); + + /* + * Roll upward through the blocks, processing each leaf block in + * order. As long as there is space in the result buffer, keep + * adding the information. + */ + for (;;) { + leaf = bp->data; + if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + != XFS_ATTR_LEAF_MAGIC)) { + XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)", + XFS_ERRLEVEL_LOW, + context->dp->i_mount, leaf); + xfs_da_brelse(NULL, bp); + return(XFS_ERROR(EFSCORRUPTED)); + } + error = xfs_attr_leaf_list_int(bp, context); + if (error || (INT_ISZERO(leaf->hdr.info.forw, ARCH_CONVERT))) + break; /* not really an error, buffer full or EOF */ + cursor->blkno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT); + xfs_da_brelse(NULL, bp); + error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, + &bp, XFS_ATTR_FORK); + if (error) + return(error); + if (unlikely((bp == NULL))) { + XFS_ERROR_REPORT("xfs_attr_node_list(5)", + XFS_ERRLEVEL_LOW, + context->dp->i_mount); + return(XFS_ERROR(EFSCORRUPTED)); + } + } + xfs_da_brelse(NULL, bp); + return(0); +} + + +/*======================================================================== + * External routines for manipulating out-of-line attribute values. + *========================================================================*/ + +/* + * Read the value associated with an attribute from the out-of-line buffer + * that we stored it in. + */ +STATIC int +xfs_attr_rmtval_get(xfs_da_args_t *args) +{ + xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE]; + xfs_mount_t *mp; + xfs_daddr_t dblkno; + xfs_caddr_t dst; + xfs_buf_t *bp; + int nmap, error, tmp, valuelen, blkcnt, i; + xfs_dablk_t lblkno; + + ASSERT(!(args->flags & ATTR_KERNOVAL)); + + mp = args->dp->i_mount; + dst = args->value; + valuelen = args->valuelen; + lblkno = args->rmtblkno; + while (valuelen > 0) { + nmap = ATTR_RMTVALUE_MAPSIZE; + error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno, + args->rmtblkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + NULL, 0, map, &nmap, NULL); + if (error) + return(error); + ASSERT(nmap >= 1); + + for (i = 0; (i < nmap) && (valuelen > 0); i++) { + ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) && + (map[i].br_startblock != HOLESTARTBLOCK)); + dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); + blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); + error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno, + blkcnt, XFS_BUF_LOCK, &bp); + if (error) + return(error); + + tmp = (valuelen < XFS_BUF_SIZE(bp)) + ? valuelen : XFS_BUF_SIZE(bp); + xfs_biomove(bp, 0, tmp, dst, XFS_B_READ); + xfs_buf_relse(bp); + dst += tmp; + valuelen -= tmp; + + lblkno += map[i].br_blockcount; + } + } + ASSERT(valuelen == 0); + return(0); +} + +/* + * Write the value associated with an attribute into the out-of-line buffer + * that we have defined for it. + */ +STATIC int +xfs_attr_rmtval_set(xfs_da_args_t *args) +{ + xfs_mount_t *mp; + xfs_fileoff_t lfileoff; + xfs_inode_t *dp; + xfs_bmbt_irec_t map; + xfs_daddr_t dblkno; + xfs_caddr_t src; + xfs_buf_t *bp; + xfs_dablk_t lblkno; + int blkcnt, valuelen, nmap, error, tmp, committed; + + dp = args->dp; + mp = dp->i_mount; + src = args->value; + + /* + * Find a "hole" in the attribute address space large enough for + * us to drop the new attribute's value into. + */ + blkcnt = XFS_B_TO_FSB(mp, args->valuelen); + lfileoff = 0; + error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, + XFS_ATTR_FORK); + if (error) { + return(error); + } + args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff; + args->rmtblkcnt = blkcnt; + + /* + * Roll through the "value", allocating blocks on disk as required. + */ + while (blkcnt > 0) { + /* + * Allocate a single extent, up to the size of the value. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + nmap = 1; + error = xfs_bmapi(args->trans, dp, (xfs_fileoff_t)lblkno, + blkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA | + XFS_BMAPI_WRITE, + args->firstblock, args->total, &map, &nmap, + args->flist); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + + ASSERT(nmap == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + lblkno += map.br_blockcount; + blkcnt -= map.br_blockcount; + + /* + * Start the next trans in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + return (error); + } + + /* + * Roll through the "value", copying the attribute value to the + * already-allocated blocks. Blocks are written synchronously + * so that we can know they are all on disk before we turn off + * the INCOMPLETE flag. + */ + lblkno = args->rmtblkno; + valuelen = args->valuelen; + while (valuelen > 0) { + /* + * Try to remember where we decided to put the value. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + nmap = 1; + error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno, + args->rmtblkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + args->firstblock, 0, &map, &nmap, NULL); + if (error) { + return(error); + } + ASSERT(nmap == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + + dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), + blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); + + bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, + blkcnt, XFS_BUF_LOCK); + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + + tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : + XFS_BUF_SIZE(bp); + xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE); + if (tmp < XFS_BUF_SIZE(bp)) + xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp); + if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */ + return (error); + } + src += tmp; + valuelen -= tmp; + + lblkno += map.br_blockcount; + } + ASSERT(valuelen == 0); + return(0); +} + +/* + * Remove the value associated with an attribute by deleting the + * out-of-line buffer that it is stored on. + */ +STATIC int +xfs_attr_rmtval_remove(xfs_da_args_t *args) +{ + xfs_mount_t *mp; + xfs_bmbt_irec_t map; + xfs_buf_t *bp; + xfs_daddr_t dblkno; + xfs_dablk_t lblkno; + int valuelen, blkcnt, nmap, error, done, committed; + + mp = args->dp->i_mount; + + /* + * Roll through the "value", invalidating the attribute value's + * blocks. + */ + lblkno = args->rmtblkno; + valuelen = args->rmtblkcnt; + while (valuelen > 0) { + /* + * Try to remember where we decided to put the value. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + nmap = 1; + error = xfs_bmapi(NULL, args->dp, (xfs_fileoff_t)lblkno, + args->rmtblkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + args->firstblock, 0, &map, &nmap, + args->flist); + if (error) { + return(error); + } + ASSERT(nmap == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + + dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), + blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); + + /* + * If the "remote" value is in the cache, remove it. + */ + /* bp = incore(mp->m_dev, dblkno, blkcnt, 1); */ + bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, 1); + if (bp) { + XFS_BUF_STALE(bp); + XFS_BUF_UNDELAYWRITE(bp); + xfs_buf_relse(bp); + bp = NULL; + } + + valuelen -= map.br_blockcount; + + lblkno += map.br_blockcount; + } + + /* + * Keep de-allocating extents until the remote-value region is gone. + */ + lblkno = args->rmtblkno; + blkcnt = args->rmtblkcnt; + done = 0; + while (!done) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + 1, args->firstblock, args->flist, &done); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, args->dp); + } + + /* + * Close out trans and start the next one in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, args->dp))) + return (error); + } + return(0); +} + +#if defined(XFS_ATTR_TRACE) +/* + * Add a trace buffer entry for an attr_list context structure. + */ +void +xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context) +{ + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, + (__psunsigned_t)context->dp, + (__psunsigned_t)context->cursor->hashval, + (__psunsigned_t)context->cursor->blkno, + (__psunsigned_t)context->cursor->offset, + (__psunsigned_t)context->alist, + (__psunsigned_t)context->bufsize, + (__psunsigned_t)context->count, + (__psunsigned_t)context->firstu, + (__psunsigned_t) + (context->count > 0) + ? (ATTR_ENTRY(context->alist, + context->count-1)->a_valuelen) + : 0, + (__psunsigned_t)context->dupcnt, + (__psunsigned_t)context->flags, + (__psunsigned_t)NULL, + (__psunsigned_t)NULL, + (__psunsigned_t)NULL); +} + +/* + * Add a trace buffer entry for a context structure and a Btree node. + */ +void +xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, + struct xfs_da_intnode *node) +{ + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, + (__psunsigned_t)context->dp, + (__psunsigned_t)context->cursor->hashval, + (__psunsigned_t)context->cursor->blkno, + (__psunsigned_t)context->cursor->offset, + (__psunsigned_t)context->alist, + (__psunsigned_t)context->bufsize, + (__psunsigned_t)context->count, + (__psunsigned_t)context->firstu, + (__psunsigned_t) + (context->count > 0) + ? (ATTR_ENTRY(context->alist, + context->count-1)->a_valuelen) + : 0, + (__psunsigned_t)context->dupcnt, + (__psunsigned_t)context->flags, + (__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); +} + +/* + * Add a trace buffer entry for a context structure and a Btree element. + */ +void +xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, + struct xfs_da_node_entry *btree) +{ + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, + (__psunsigned_t)context->dp, + (__psunsigned_t)context->cursor->hashval, + (__psunsigned_t)context->cursor->blkno, + (__psunsigned_t)context->cursor->offset, + (__psunsigned_t)context->alist, + (__psunsigned_t)context->bufsize, + (__psunsigned_t)context->count, + (__psunsigned_t)context->firstu, + (__psunsigned_t) + (context->count > 0) + ? (ATTR_ENTRY(context->alist, + context->count-1)->a_valuelen) + : 0, + (__psunsigned_t)context->dupcnt, + (__psunsigned_t)context->flags, + (__psunsigned_t)INT_GET(btree->hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(btree->before, ARCH_CONVERT), + (__psunsigned_t)NULL); +} + +/* + * Add a trace buffer entry for a context structure and a leaf block. + */ +void +xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, + struct xfs_attr_leafblock *leaf) +{ + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, + (__psunsigned_t)context->dp, + (__psunsigned_t)context->cursor->hashval, + (__psunsigned_t)context->cursor->blkno, + (__psunsigned_t)context->cursor->offset, + (__psunsigned_t)context->alist, + (__psunsigned_t)context->bufsize, + (__psunsigned_t)context->count, + (__psunsigned_t)context->firstu, + (__psunsigned_t) + (context->count > 0) + ? (ATTR_ENTRY(context->alist, + context->count-1)->a_valuelen) + : 0, + (__psunsigned_t)context->dupcnt, + (__psunsigned_t)context->flags, + (__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); +} + +/* + * Add a trace buffer entry for the arguments given to the routine, + * generic form. + */ +void +xfs_attr_trace_enter(int type, char *where, + __psunsigned_t a2, __psunsigned_t a3, + __psunsigned_t a4, __psunsigned_t a5, + __psunsigned_t a6, __psunsigned_t a7, + __psunsigned_t a8, __psunsigned_t a9, + __psunsigned_t a10, __psunsigned_t a11, + __psunsigned_t a12, __psunsigned_t a13, + __psunsigned_t a14, __psunsigned_t a15) +{ + ASSERT(xfs_attr_trace_buf); + ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type), + (void *)where, + (void *)a2, (void *)a3, (void *)a4, + (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10, + (void *)a11, (void *)a12, (void *)a13, + (void *)a14, (void *)a15); +} +#endif /* XFS_ATTR_TRACE */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_attr_fetch.c linux.21rc5-ac2/fs/xfs/xfs_attr_fetch.c --- linux.21rc5/fs/xfs/xfs_attr_fetch.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_attr_fetch.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_itable.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" + +int +xfs_attr_fetch(xfs_inode_t *ip, char *name, char *value, int valuelen) +{ + xfs_da_args_t args; + int error; + + if (XFS_IFORK_Q(ip) == 0) + return ENOATTR; + /* + * Do the argument setup for the xfs_attr routines. + */ + memset((char *)&args, 0, sizeof(args)); + args.dp = ip; + args.flags = ATTR_ROOT; + args.whichfork = XFS_ATTR_FORK; + args.name = name; + args.namelen = strlen(name); + args.value = value; + args.valuelen = valuelen; + args.hashval = xfs_da_hashname(args.name, args.namelen); + args.oknoent = 1; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (args.dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) + error = xfs_attr_shortform_getvalue(&args); + else if (xfs_bmap_one_block(args.dp, XFS_ATTR_FORK)) + error = xfs_attr_leaf_get(&args); + else + error = xfs_attr_node_get(&args); + + if (error == EEXIST) + error = 0; + + return(error); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_attr.h linux.21rc5-ac2/fs/xfs/xfs_attr.h --- linux.21rc5/fs/xfs/xfs_attr.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_attr.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ATTR_H__ +#define __XFS_ATTR_H__ + +/* + * xfs_attr.h + * + * Large attribute lists are structured around Btrees where all the data + * elements are in the leaf nodes. Attribute names are hashed into an int, + * then that int is used as the index into the Btree. Since the hashval + * of an attribute name may not be unique, we may have duplicate keys. + * The internal links in the Btree are logical block offsets into the file. + * + * Small attribute lists use a different format and are packed as tightly + * as possible so as to fit into the literal area of the inode. + */ + +#ifdef XFS_ALL_TRACE +#define XFS_ATTR_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_ATTR_TRACE +#endif + + +/*======================================================================== + * External interfaces + *========================================================================*/ + +#define ATTR_ROOT 0x0002 /* use attrs in root namespace, not user */ +#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */ +#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */ +#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */ +#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */ +#define ATTR_KERNAMELS 0x4000 /* [kernel] list attr names (simple list) */ +#define ATTR_KERNFULLS 0x8000 /* [kernel] full attr list, ie. root+user */ + +/* + * The maximum size (into the kernel or returned from the kernel) of an + * attribute value or the buffer used for an attr_list() call. Larger + * sizes will result in an ERANGE return code. + */ +#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */ + +/* + * Define how lists of attribute names are returned to the user from + * the attr_list() call. A large, 32bit aligned, buffer is passed in + * along with its size. We put an array of offsets at the top that each + * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom. + */ +typedef struct attrlist { + __s32 al_count; /* number of entries in attrlist */ + __s32 al_more; /* T/F: more attrs (do call again) */ + __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */ +} attrlist_t; + +/* + * Show the interesting info about one attribute. This is what the + * al_offset[i] entry points to. + */ +typedef struct attrlist_ent { /* data from attr_list() */ + __u32 a_valuelen; /* number bytes in value of attr */ + char a_name[1]; /* attr name (NULL terminated) */ +} attrlist_ent_t; + +/* + * Given a pointer to the (char*) buffer containing the attr_list() result, + * and an index, return a pointer to the indicated attribute in the buffer. + */ +#define ATTR_ENTRY(buffer, index) \ + ((attrlist_ent_t *) \ + &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ]) + +/* + * Multi-attribute operation vector. + */ +typedef struct attr_multiop { + int am_opcode; /* operation to perform (ATTR_OP_GET, etc.) */ + int am_error; /* [out arg] result of this sub-op (an errno) */ + char *am_attrname; /* attribute name to work with */ + char *am_attrvalue; /* [in/out arg] attribute value (raw bytes) */ + int am_length; /* [in/out arg] length of value */ + int am_flags; /* bitwise OR of attr API flags defined above */ +} attr_multiop_t; + +#define ATTR_OP_GET 1 /* return the indicated attr's value */ +#define ATTR_OP_SET 2 /* set/create the indicated attr/value pair */ +#define ATTR_OP_REMOVE 3 /* remove the indicated attr */ + +/* + * Kernel-internal version of the attrlist cursor. + */ +typedef struct attrlist_cursor_kern { + __u32 hashval; /* hash value of next entry to add */ + __u32 blkno; /* block containing entry (suggestion) */ + __u32 offset; /* offset in list of equal-hashvals */ + __u16 pad1; /* padding to match user-level */ + __u8 pad2; /* padding to match user-level */ + __u8 initted; /* T/F: cursor has been initialized */ +} attrlist_cursor_kern_t; + + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +struct cred; +struct vnode; +struct xfs_inode; +struct attrlist_cursor_kern; +struct xfs_ext_attr; +struct xfs_da_args; + +/* + * Overall external interface routines. + */ +int xfs_attr_get(bhv_desc_t *, char *, char *, int *, int, struct cred *); +int xfs_attr_set(bhv_desc_t *, char *, char *, int, int, struct cred *); +int xfs_attr_remove(bhv_desc_t *, char *, int, struct cred *); +int xfs_attr_list(bhv_desc_t *, char *, int, int, + struct attrlist_cursor_kern *, struct cred *); +int xfs_attr_inactive(struct xfs_inode *dp); + +int xfs_attr_node_get(struct xfs_da_args *); +int xfs_attr_leaf_get(struct xfs_da_args *); +int xfs_attr_shortform_getvalue(struct xfs_da_args *); +int xfs_attr_fetch(struct xfs_inode *, char *, char *, int); + +#endif /* __XFS_ATTR_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_attr_leaf.c linux.21rc5-ac2/fs/xfs/xfs_attr_leaf.c --- linux.21rc5/fs/xfs/xfs_attr_leaf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_attr_leaf.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,3004 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +/* + * xfs_attr_leaf.c + * + * GROT: figure out how to recover gracefully when bmap returns ENOSPC. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_error.h" +#include "xfs_bit.h" + +/* + * xfs_attr_leaf.c + * + * Routines to implement leaf blocks of attributes as Btrees of hashed names. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Routines used for growing the Btree. + */ +STATIC int xfs_attr_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args, + int freemap_index); +STATIC void xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer); +STATIC void xfs_attr_leaf_rebalance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2); +STATIC int xfs_attr_leaf_figure_balance(xfs_da_state_t *state, + xfs_da_state_blk_t *leaf_blk_1, + xfs_da_state_blk_t *leaf_blk_2, + int *number_entries_in_blk1, + int *number_usedbytes_in_blk1); + +/* + * Utility routines. + */ +STATIC void xfs_attr_leaf_moveents(xfs_attr_leafblock_t *src_leaf, + int src_start, + xfs_attr_leafblock_t *dst_leaf, + int dst_start, int move_count, + xfs_mount_t *mp); + + +/*======================================================================== + * External routines when dirsize < XFS_LITINO(mp). + *========================================================================*/ + +/* + * Create the initial contents of a shortform attribute list. + */ +int +xfs_attr_shortform_create(xfs_da_args_t *args) +{ + xfs_attr_sf_hdr_t *hdr; + xfs_inode_t *dp; + xfs_ifork_t *ifp; + + dp = args->dp; + ASSERT(dp != NULL); + ifp = dp->i_afp; + ASSERT(ifp != NULL); + ASSERT(ifp->if_bytes == 0); + if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) { + ifp->if_flags &= ~XFS_IFEXTENTS; /* just in case */ + dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL; + ifp->if_flags |= XFS_IFINLINE; + } else { + ASSERT(ifp->if_flags & XFS_IFINLINE); + } + xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); + hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; + INT_ZERO(hdr->count, ARCH_CONVERT); + INT_SET(hdr->totsize, ARCH_CONVERT, sizeof(*hdr)); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); + return(0); +} + +/* + * Add a name/value pair to the shortform attribute list. + * Overflow from the inode has already been checked for. + */ +int +xfs_attr_shortform_add(xfs_da_args_t *args) +{ + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + int i, offset, size; + xfs_inode_t *dp; + xfs_ifork_t *ifp; + + dp = args->dp; + ifp = dp->i_afp; + ASSERT(ifp->if_flags & XFS_IFINLINE); + sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { + if (sfe->namelen != args->namelen) + continue; + if (memcmp(args->name, sfe->nameval, args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0)) + continue; + return(XFS_ERROR(EEXIST)); + } + + offset = (char *)sfe - (char *)sf; + size = XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); + xfs_idata_realloc(dp, size, XFS_ATTR_FORK); + sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; + sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset); + + sfe->namelen = args->namelen; + INT_SET(sfe->valuelen, ARCH_CONVERT, args->valuelen); + sfe->flags = (args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0; + memcpy(sfe->nameval, args->name, args->namelen); + memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); + INT_MOD(sf->hdr.count, ARCH_CONVERT, 1); + INT_MOD(sf->hdr.totsize, ARCH_CONVERT, size); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); + + return(0); +} + +/* + * Remove a name from the shortform attribute list structure. + */ +int +xfs_attr_shortform_remove(xfs_da_args_t *args) +{ + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + int base, size=0, end, totsize, i; + xfs_inode_t *dp; + + /* + * Remove the attribute. + */ + dp = args->dp; + base = sizeof(xfs_attr_sf_hdr_t); + sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe), + base += size, i++) { + size = XFS_ATTR_SF_ENTSIZE(sfe); + if (sfe->namelen != args->namelen) + continue; + if (memcmp(sfe->nameval, args->name, args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0)) + continue; + break; + } + if (i == INT_GET(sf->hdr.count, ARCH_CONVERT)) + return(XFS_ERROR(ENOATTR)); + + end = base + size; + totsize = INT_GET(sf->hdr.totsize, ARCH_CONVERT); + if (end != totsize) { + memmove(&((char *)sf)[base], &((char *)sf)[end], + totsize - end); + } + INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); + INT_MOD(sf->hdr.totsize, ARCH_CONVERT, -size); + xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); + + return(0); +} + +/* + * Look up a name in a shortform attribute list structure. + */ +/*ARGSUSED*/ +int +xfs_attr_shortform_lookup(xfs_da_args_t *args) +{ + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + int i; + xfs_ifork_t *ifp; + + ifp = args->dp->i_afp; + ASSERT(ifp->if_flags & XFS_IFINLINE); + sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { + if (sfe->namelen != args->namelen) + continue; + if (memcmp(args->name, sfe->nameval, args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0)) + continue; + return(XFS_ERROR(EEXIST)); + } + return(XFS_ERROR(ENOATTR)); +} + +/* + * Look up a name in a shortform attribute list structure. + */ +/*ARGSUSED*/ +int +xfs_attr_shortform_getvalue(xfs_da_args_t *args) +{ + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + int i; + + ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE); + sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data; + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { + if (sfe->namelen != args->namelen) + continue; + if (memcmp(args->name, sfe->nameval, args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0)) + continue; + if (args->flags & ATTR_KERNOVAL) { + args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + return(XFS_ERROR(EEXIST)); + } + if (args->valuelen < INT_GET(sfe->valuelen, ARCH_CONVERT)) { + args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + return(XFS_ERROR(ERANGE)); + } + args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + memcpy(args->value, &sfe->nameval[args->namelen], + args->valuelen); + return(XFS_ERROR(EEXIST)); + } + return(XFS_ERROR(ENOATTR)); +} + +/* + * Convert from using the shortform to the leaf. + */ +int +xfs_attr_shortform_to_leaf(xfs_da_args_t *args) +{ + xfs_inode_t *dp; + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + xfs_da_args_t nargs; + char *tmpbuffer; + int error, i, size; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + xfs_ifork_t *ifp; + + dp = args->dp; + ifp = dp->i_afp; + sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; + size = INT_GET(sf->hdr.totsize, ARCH_CONVERT); + tmpbuffer = kmem_alloc(size, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memcpy(tmpbuffer, ifp->if_u1.if_data, size); + sf = (xfs_attr_shortform_t *)tmpbuffer; + + xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); + bp = NULL; + error = xfs_da_grow_inode(args, &blkno); + if (error) { + /* + * If we hit an IO error middle of the transaction inside + * grow_inode(), we may have inconsistent data. Bail out. + */ + if (error == EIO) + goto out; + xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ + memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ + goto out; + } + + ASSERT(blkno == 0); + error = xfs_attr_leaf_create(args, blkno, &bp); + if (error) { + error = xfs_da_shrink_inode(args, 0, bp); + bp = NULL; + if (error) + goto out; + xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ + memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ + goto out; + } + + memset((char *)&nargs, 0, sizeof(nargs)); + nargs.dp = dp; + nargs.firstblock = args->firstblock; + nargs.flist = args->flist; + nargs.total = args->total; + nargs.whichfork = XFS_ATTR_FORK; + nargs.trans = args->trans; + nargs.oknoent = 1; + + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + nargs.name = (char *)sfe->nameval; + nargs.namelen = sfe->namelen; + nargs.value = (char *)&sfe->nameval[nargs.namelen]; + nargs.valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + nargs.hashval = xfs_da_hashname((char *)sfe->nameval, + sfe->namelen); + nargs.flags = (sfe->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0; + error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */ + ASSERT(error == ENOATTR); + error = xfs_attr_leaf_add(bp, &nargs); + ASSERT(error != ENOSPC); + if (error) + goto out; + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + } + error = 0; + +out: + if(bp) + xfs_da_buf_done(bp); + kmem_free(tmpbuffer, size); + return(error); +} + +STATIC int +xfs_attr_shortform_compare(const void *a, const void *b) +{ + xfs_attr_sf_sort_t *sa, *sb; + + sa = (xfs_attr_sf_sort_t *)a; + sb = (xfs_attr_sf_sort_t *)b; + if (INT_GET(sa->hash, ARCH_CONVERT) + < INT_GET(sb->hash, ARCH_CONVERT)) { + return(-1); + } else if (INT_GET(sa->hash, ARCH_CONVERT) + > INT_GET(sb->hash, ARCH_CONVERT)) { + return(1); + } else { + return(sa->entno - sb->entno); + } +} + +/* + * Copy out entries of shortform attribute lists for attr_list(). + * Shortform atrtribute lists are not stored in hashval sorted order. + * If the output buffer is not large enough to hold them all, then we + * we have to calculate each entries' hashvalue and sort them before + * we can begin returning them to the user. + */ +/*ARGSUSED*/ +int +xfs_attr_shortform_list(xfs_attr_list_context_t *context) +{ + attrlist_cursor_kern_t *cursor; + xfs_attr_sf_sort_t *sbuf, *sbp; + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + xfs_inode_t *dp; + int sbsize, nsbuf, count, i; + + ASSERT(context != NULL); + dp = context->dp; + ASSERT(dp != NULL); + ASSERT(dp->i_afp != NULL); + sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; + ASSERT(sf != NULL); + if (INT_ISZERO(sf->hdr.count, ARCH_CONVERT)) + return(0); + cursor = context->cursor; + ASSERT(cursor != NULL); + + xfs_attr_trace_l_c("sf start", context); + + /* + * If the buffer is large enough, do not bother with sorting. + * Note the generous fudge factor of 16 overhead bytes per entry. + */ + if ((dp->i_afp->if_bytes + INT_GET(sf->hdr.count, ARCH_CONVERT) * 16) + < context->bufsize) { + for (i = 0, sfe = &sf->list[0]; + i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + int ns = (sfe->flags & XFS_ATTR_ROOT)? + ROOT_NAMES : USER_NAMES; + if (((context->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0) && + !(context->flags & ATTR_KERNFULLS)) { + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + continue; + } + if (context->flags & ATTR_KERNOVAL) { + ASSERT(context->flags & ATTR_KERNAMELS); + context->count += xfs_namespaces[ns].namelen + + INT_GET(sfe->namelen, ARCH_CONVERT) + 1; + } + else { + if (xfs_attr_put_listent(context, ns, + (char *)sfe->nameval, + (int)sfe->namelen, + (int)INT_GET(sfe->valuelen, + ARCH_CONVERT))) + break; + } + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + } + xfs_attr_trace_l_c("sf big-gulp", context); + return(0); + } + + /* + * It didn't all fit, so we have to sort everything on hashval. + */ + sbsize = INT_GET(sf->hdr.count, ARCH_CONVERT) * sizeof(*sbuf); + sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); + + /* + * Scan the attribute list for the rest of the entries, storing + * the relevant info from only those that match into a buffer. + */ + nsbuf = 0; + for (i = 0, sfe = &sf->list[0]; + i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + if (unlikely( + ((char *)sfe < (char *)sf) || + ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)) || + (sfe->namelen >= MAXNAMELEN))) { + XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", + XFS_ERRLEVEL_LOW, + context->dp->i_mount, sfe); + xfs_attr_trace_l_c("sf corrupted", context); + kmem_free(sbuf, sbsize); + return XFS_ERROR(EFSCORRUPTED); + } + if (((context->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0) && + !(context->flags & ATTR_KERNFULLS)) { + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + continue; + } + sbp->entno = i; + INT_SET(sbp->hash, ARCH_CONVERT, + xfs_da_hashname((char *)sfe->nameval, sfe->namelen)); + sbp->name = (char *)sfe->nameval; + sbp->namelen = sfe->namelen; + /* These are bytes, and both on-disk, don't endian-flip */ + sbp->valuelen = sfe->valuelen; + sbp->flags = sfe->flags; + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + sbp++; + nsbuf++; + } + + /* + * Sort the entries on hash then entno. + */ + qsort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare); + + /* + * Re-find our place IN THE SORTED LIST. + */ + count = 0; + cursor->initted = 1; + cursor->blkno = 0; + for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { + if (INT_GET(sbp->hash, ARCH_CONVERT) == cursor->hashval) { + if (cursor->offset == count) { + break; + } + count++; + } else if (INT_GET(sbp->hash, ARCH_CONVERT) > cursor->hashval) { + break; + } + } + if (i == nsbuf) { + kmem_free(sbuf, sbsize); + xfs_attr_trace_l_c("blk end", context); + return(0); + } + + /* + * Loop putting entries into the user buffer. + */ + for ( ; i < nsbuf; i++, sbp++) { + int ns = (sbp->flags & XFS_ATTR_ROOT)? ROOT_NAMES:USER_NAMES; + if (cursor->hashval != INT_GET(sbp->hash, ARCH_CONVERT)) { + cursor->hashval = INT_GET(sbp->hash, ARCH_CONVERT); + cursor->offset = 0; + } + if (context->flags & ATTR_KERNOVAL) { + ASSERT(context->flags & ATTR_KERNAMELS); + context->count += xfs_namespaces[ns].namelen + + sbp->namelen + 1; + } + else { + if (xfs_attr_put_listent(context, ns, + sbp->name, sbp->namelen, + INT_GET(sbp->valuelen, ARCH_CONVERT))) + break; + } + cursor->offset++; + } + + kmem_free(sbuf, sbsize); + xfs_attr_trace_l_c("sf E-O-F", context); + return(0); +} + +/* + * Check a leaf attribute block to see if all the entries would fit into + * a shortform attribute list. + */ +int +xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + int bytes, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + + entry = &leaf->entries[0]; + bytes = sizeof(struct xfs_attr_sf_hdr); + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if (entry->flags & XFS_ATTR_INCOMPLETE) + continue; /* don't copy partial entries */ + if (!(entry->flags & XFS_ATTR_LOCAL)) + return(0); + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); + if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) + return(0); + if (INT_GET(name_loc->valuelen, ARCH_CONVERT) >= XFS_ATTR_SF_ENTSIZE_MAX) + return(0); + bytes += sizeof(struct xfs_attr_sf_entry)-1 + + name_loc->namelen + + INT_GET(name_loc->valuelen, ARCH_CONVERT); + } + return( bytes < XFS_IFORK_ASIZE(dp) ); +} + +/* + * Convert a leaf attribute list to shortform attribute list + */ +int +xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_da_args_t nargs; + xfs_inode_t *dp; + char *tmpbuffer; + int error, i; + + dp = args->dp; + tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP); + ASSERT(tmpbuffer != NULL); + + ASSERT(bp != NULL); + memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); + leaf = (xfs_attr_leafblock_t *)tmpbuffer; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); + + /* + * Clean out the prior contents of the attribute list. + */ + error = xfs_da_shrink_inode(args, 0, bp); + if (error) + goto out; + error = xfs_attr_shortform_create(args); + if (error) + goto out; + + /* + * Copy the attributes + */ + memset((char *)&nargs, 0, sizeof(nargs)); + nargs.dp = dp; + nargs.firstblock = args->firstblock; + nargs.flist = args->flist; + nargs.total = args->total; + nargs.whichfork = XFS_ATTR_FORK; + nargs.trans = args->trans; + nargs.oknoent = 1; + entry = &leaf->entries[0]; + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if (entry->flags & XFS_ATTR_INCOMPLETE) + continue; /* don't copy partial entries */ + if (INT_ISZERO(entry->nameidx, ARCH_CONVERT)) + continue; + ASSERT(entry->flags & XFS_ATTR_LOCAL); + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); + nargs.name = (char *)name_loc->nameval; + nargs.namelen = name_loc->namelen; + nargs.value = (char *)&name_loc->nameval[nargs.namelen]; + nargs.valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); + nargs.hashval = INT_GET(entry->hashval, ARCH_CONVERT); + nargs.flags = (entry->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0; + xfs_attr_shortform_add(&nargs); + } + error = 0; + +out: + kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount)); + return(error); +} + +/* + * Convert from using a single leaf to a root node and a leaf. + */ +int +xfs_attr_leaf_to_node(xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_da_intnode_t *node; + xfs_inode_t *dp; + xfs_dabuf_t *bp1, *bp2; + xfs_dablk_t blkno; + int error; + + dp = args->dp; + bp1 = bp2 = NULL; + error = xfs_da_grow_inode(args, &blkno); + if (error) + goto out; + error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1, + XFS_ATTR_FORK); + if (error) + goto out; + ASSERT(bp1 != NULL); + bp2 = NULL; + error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp2, + XFS_ATTR_FORK); + if (error) + goto out; + ASSERT(bp2 != NULL); + memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount)); + xfs_da_buf_done(bp1); + bp1 = NULL; + xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1); + + /* + * Set up the new root node. + */ + error = xfs_da_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK); + if (error) + goto out; + node = bp1->data; + leaf = bp2->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + /* both on-disk, don't endian-flip twice */ + node->btree[0].hashval = + leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval; + INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); + INT_SET(node->hdr.count, ARCH_CONVERT, 1); + xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1); + error = 0; +out: + if (bp1) + xfs_da_buf_done(bp1); + if (bp2) + xfs_da_buf_done(bp2); + return(error); +} + + +/*======================================================================== + * Routines used for growing the Btree. + *========================================================================*/ + +/* + * Create the initial contents of a leaf attribute list + * or a leaf in a node attribute list. + */ +int +xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_hdr_t *hdr; + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int error; + + dp = args->dp; + ASSERT(dp != NULL); + error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + leaf = bp->data; + memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); + hdr = &leaf->hdr; + INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_ATTR_LEAF_MAGIC); + INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); + if (INT_ISZERO(hdr->firstused, ARCH_CONVERT)) { + INT_SET(hdr->firstused, ARCH_CONVERT, + XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN); + } + + INT_SET(hdr->freemap[0].base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_hdr_t)); + INT_SET(hdr->freemap[0].size, ARCH_CONVERT, + INT_GET(hdr->firstused, ARCH_CONVERT) + - INT_GET(hdr->freemap[0].base, + ARCH_CONVERT)); + + xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); + + *bpp = bp; + return(0); +} + +/* + * Split the leaf node, rebalance, then add the new entry. + */ +int +xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, + xfs_da_state_blk_t *newblk) +{ + xfs_dablk_t blkno; + int error; + + /* + * Allocate space for a new leaf node. + */ + ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC); + error = xfs_da_grow_inode(state->args, &blkno); + if (error) + return(error); + error = xfs_attr_leaf_create(state->args, blkno, &newblk->bp); + if (error) + return(error); + newblk->blkno = blkno; + newblk->magic = XFS_ATTR_LEAF_MAGIC; + + /* + * Rebalance the entries across the two leaves. + * NOTE: rebalance() currently depends on the 2nd block being empty. + */ + xfs_attr_leaf_rebalance(state, oldblk, newblk); + error = xfs_da_blk_link(state, oldblk, newblk); + if (error) + return(error); + + /* + * Save info on "old" attribute for "atomic rename" ops, leaf_add() + * modifies the index/blkno/rmtblk/rmtblkcnt fields to show the + * "new" attrs info. Will need the "old" info to remove it later. + * + * Insert the "new" entry in the correct block. + */ + if (state->inleaf) + error = xfs_attr_leaf_add(oldblk->bp, state->args); + else + error = xfs_attr_leaf_add(newblk->bp, state->args); + + /* + * Update last hashval in each block since we added the name. + */ + oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL); + newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL); + return(error); +} + +/* + * Add a name to the leaf attribute list structure. + */ +int +xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_hdr_t *hdr; + xfs_attr_leaf_map_t *map; + int tablesize, entsize, sum, tmp, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT((args->index >= 0) + && (args->index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); + hdr = &leaf->hdr; + entsize = xfs_attr_leaf_newentsize(args, + args->trans->t_mountp->m_sb.sb_blocksize, NULL); + + /* + * Search through freemap for first-fit on new name length. + * (may need to figure in size of entry struct too) + */ + tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) + * sizeof(xfs_attr_leaf_entry_t) + + sizeof(xfs_attr_leaf_hdr_t); + map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1]; + for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) { + if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) { + sum += INT_GET(map->size, ARCH_CONVERT); + continue; + } + if (INT_ISZERO(map->size, ARCH_CONVERT)) + continue; /* no space in this map */ + tmp = entsize; + if (INT_GET(map->base, ARCH_CONVERT) + < INT_GET(hdr->firstused, ARCH_CONVERT)) + tmp += sizeof(xfs_attr_leaf_entry_t); + if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { + tmp = xfs_attr_leaf_add_work(bp, args, i); + return(tmp); + } + sum += INT_GET(map->size, ARCH_CONVERT); + } + + /* + * If there are no holes in the address space of the block, + * and we don't have enough freespace, then compaction will do us + * no good and we should just give up. + */ + if (!hdr->holes && (sum < entsize)) + return(XFS_ERROR(ENOSPC)); + + /* + * Compact the entries to coalesce free space. + * This may change the hdr->count via dropping INCOMPLETE entries. + */ + xfs_attr_leaf_compact(args->trans, bp); + + /* + * After compaction, the block is guaranteed to have only one + * free region, in freemap[0]. If it is not big enough, give up. + */ + if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) + < (entsize + sizeof(xfs_attr_leaf_entry_t))) + return(XFS_ERROR(ENOSPC)); + + return(xfs_attr_leaf_add_work(bp, args, 0)); +} + +/* + * Add a name to a leaf attribute list structure. + */ +STATIC int +xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_hdr_t *hdr; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_attr_leaf_map_t *map; + xfs_mount_t *mp; + int tmp, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + hdr = &leaf->hdr; + ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE)); + ASSERT((args->index >= 0) + && (args->index <= INT_GET(hdr->count, ARCH_CONVERT))); + + /* + * Force open some space in the entry array and fill it in. + */ + entry = &leaf->entries[args->index]; + if (args->index < INT_GET(hdr->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr->count, ARCH_CONVERT) - args->index; + tmp *= sizeof(xfs_attr_leaf_entry_t); + memmove((char *)(entry+1), (char *)entry, tmp); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); + } + INT_MOD(hdr->count, ARCH_CONVERT, 1); + + /* + * Allocate space for the new string (at the end of the run). + */ + map = &hdr->freemap[mapindex]; + mp = args->trans->t_mountp; + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT((INT_GET(map->base, ARCH_CONVERT) & 0x3) == 0); + ASSERT(INT_GET(map->size, ARCH_CONVERT) + >= xfs_attr_leaf_newentsize(args, + mp->m_sb.sb_blocksize, NULL)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT((INT_GET(map->size, ARCH_CONVERT) & 0x3) == 0); + INT_MOD(map->size, ARCH_CONVERT, + -xfs_attr_leaf_newentsize(args, mp->m_sb.sb_blocksize, &tmp)); + INT_SET(entry->nameidx, ARCH_CONVERT, + INT_GET(map->base, ARCH_CONVERT) + + INT_GET(map->size, ARCH_CONVERT)); + INT_SET(entry->hashval, ARCH_CONVERT, args->hashval); + entry->flags = tmp ? XFS_ATTR_LOCAL : 0; + entry->flags |= (args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0; + if (args->rename) { + entry->flags |= XFS_ATTR_INCOMPLETE; + if ((args->blkno2 == args->blkno) && + (args->index2 <= args->index)) { + args->index2++; + } + } + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); + ASSERT((args->index == 0) || (INT_GET(entry->hashval, ARCH_CONVERT) + >= INT_GET((entry-1)->hashval, + ARCH_CONVERT))); + ASSERT((args->index == INT_GET(hdr->count, ARCH_CONVERT)-1) || + (INT_GET(entry->hashval, ARCH_CONVERT) + <= (INT_GET((entry+1)->hashval, ARCH_CONVERT)))); + + /* + * Copy the attribute name and value into the new space. + * + * For "remote" attribute values, simply note that we need to + * allocate space for the "remote" value. We can't actually + * allocate the extents in this transaction, and we can't decide + * which blocks they should be as we might allocate more blocks + * as part of this transaction (a split operation for example). + */ + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); + name_loc->namelen = args->namelen; + INT_SET(name_loc->valuelen, ARCH_CONVERT, args->valuelen); + memcpy((char *)name_loc->nameval, args->name, args->namelen); + memcpy((char *)&name_loc->nameval[args->namelen], args->value, + INT_GET(name_loc->valuelen, ARCH_CONVERT)); + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + name_rmt->namelen = args->namelen; + memcpy((char *)name_rmt->name, args->name, args->namelen); + entry->flags |= XFS_ATTR_INCOMPLETE; + /* just in case */ + INT_ZERO(name_rmt->valuelen, ARCH_CONVERT); + INT_ZERO(name_rmt->valueblk, ARCH_CONVERT); + args->rmtblkno = 1; + args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen); + } + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), + xfs_attr_leaf_entsize(leaf, args->index))); + + /* + * Update the control info for this leaf node + */ + if (INT_GET(entry->nameidx, ARCH_CONVERT) + < INT_GET(hdr->firstused, ARCH_CONVERT)) { + /* both on-disk, don't endian-flip twice */ + hdr->firstused = entry->nameidx; + } + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) + >= ((INT_GET(hdr->count, ARCH_CONVERT) + * sizeof(*entry))+sizeof(*hdr))); + tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1) + * sizeof(xfs_attr_leaf_entry_t) + + sizeof(xfs_attr_leaf_hdr_t); + map = &hdr->freemap[0]; + for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { + if (INT_GET(map->base, ARCH_CONVERT) == tmp) { + INT_MOD(map->base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_entry_t)); + INT_MOD(map->size, ARCH_CONVERT, + -sizeof(xfs_attr_leaf_entry_t)); + } + } + INT_MOD(hdr->usedbytes, ARCH_CONVERT, + xfs_attr_leaf_entsize(leaf, args->index)); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); + return(0); +} + +/* + * Garbage collect a leaf attribute list block by copying it to a new buffer. + */ +STATIC void +xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp) +{ + xfs_attr_leafblock_t *leaf_s, *leaf_d; + xfs_attr_leaf_hdr_t *hdr_s, *hdr_d; + xfs_mount_t *mp; + char *tmpbuffer; + + mp = trans->t_mountp; + tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memcpy(tmpbuffer, bp->data, XFS_LBSIZE(mp)); + memset(bp->data, 0, XFS_LBSIZE(mp)); + + /* + * Copy basic information + */ + leaf_s = (xfs_attr_leafblock_t *)tmpbuffer; + leaf_d = bp->data; + hdr_s = &leaf_s->hdr; + hdr_d = &leaf_d->hdr; + hdr_d->info = hdr_s->info; /* struct copy */ + INT_SET(hdr_d->firstused, ARCH_CONVERT, XFS_LBSIZE(mp)); + /* handle truncation gracefully */ + if (INT_ISZERO(hdr_d->firstused, ARCH_CONVERT)) { + INT_SET(hdr_d->firstused, ARCH_CONVERT, + XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN); + } + INT_ZERO(hdr_d->usedbytes, ARCH_CONVERT); + INT_ZERO(hdr_d->count, ARCH_CONVERT); + hdr_d->holes = 0; + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_hdr_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, + INT_GET(hdr_d->firstused, ARCH_CONVERT) + - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + + /* + * Copy all entry's in the same (sorted) order, + * but allocate name/value pairs packed and in sequence. + */ + xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0, + (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp); + + xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1); + + kmem_free(tmpbuffer, XFS_LBSIZE(mp)); +} + +/* + * Redistribute the attribute list entries between two leaf nodes, + * taking into account the size of the new entry. + * + * NOTE: if new block is empty, then it will get the upper half of the + * old block. At present, all (one) callers pass in an empty second block. + * + * This code adjusts the args->index/blkno and args->index2/blkno2 fields + * to match what it is doing in splitting the attribute leaf block. Those + * values are used in "atomic rename" operations on attributes. Note that + * the "new" and "old" values can end up in different blocks. + */ +STATIC void +xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2) +{ + xfs_da_args_t *args; + xfs_da_state_blk_t *tmp_blk; + xfs_attr_leafblock_t *leaf1, *leaf2; + xfs_attr_leaf_hdr_t *hdr1, *hdr2; + int count, totallen, max, space, swap; + + /* + * Set up environment. + */ + ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC); + ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + args = state->args; + + /* + * Check ordering of blocks, reverse if it makes things simpler. + * + * NOTE: Given that all (current) callers pass in an empty + * second block, this code should never set "swap". + */ + swap = 0; + if (xfs_attr_leaf_order(blk1->bp, blk2->bp)) { + tmp_blk = blk1; + blk1 = blk2; + blk2 = tmp_blk; + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + swap = 1; + } + hdr1 = &leaf1->hdr; + hdr2 = &leaf2->hdr; + + /* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. Then get + * the direction to copy and the number of elements to move. + * + * "inleaf" is true if the new entry should be inserted into blk1. + * If "swap" is also true, then reverse the sense of "inleaf". + */ + state->inleaf = xfs_attr_leaf_figure_balance(state, blk1, blk2, + &count, &totallen); + if (swap) + state->inleaf = !state->inleaf; + + /* + * Move any entries required from leaf to leaf: + */ + if (count < INT_GET(hdr1->count, ARCH_CONVERT)) { + /* + * Figure the total bytes to be added to the destination leaf. + */ + /* number entries being moved */ + count = INT_GET(hdr1->count, ARCH_CONVERT) - count; + space = INT_GET(hdr1->usedbytes, ARCH_CONVERT) - totallen; + space += count * sizeof(xfs_attr_leaf_entry_t); + + /* + * leaf2 is the destination, compact it if it looks tight. + */ + max = INT_GET(hdr2->firstused, ARCH_CONVERT) + - sizeof(xfs_attr_leaf_hdr_t); + max -= INT_GET(hdr2->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t); + if (space > max) { + xfs_attr_leaf_compact(args->trans, blk2->bp); + } + + /* + * Move high entries from leaf1 to low end of leaf2. + */ + xfs_attr_leaf_moveents(leaf1, + INT_GET(hdr1->count, ARCH_CONVERT)-count, + leaf2, 0, count, state->mp); + + xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); + xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); + } else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) { + /* + * I assert that since all callers pass in an empty + * second buffer, this code should never execute. + */ + + /* + * Figure the total bytes to be added to the destination leaf. + */ + /* number entries being moved */ + count -= INT_GET(hdr1->count, ARCH_CONVERT); + space = totallen - INT_GET(hdr1->usedbytes, ARCH_CONVERT); + space += count * sizeof(xfs_attr_leaf_entry_t); + + /* + * leaf1 is the destination, compact it if it looks tight. + */ + max = INT_GET(hdr1->firstused, ARCH_CONVERT) + - sizeof(xfs_attr_leaf_hdr_t); + max -= INT_GET(hdr1->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t); + if (space > max) { + xfs_attr_leaf_compact(args->trans, blk1->bp); + } + + /* + * Move low entries from leaf2 to high end of leaf1. + */ + xfs_attr_leaf_moveents(leaf2, 0, leaf1, + (int)INT_GET(hdr1->count, ARCH_CONVERT), count, + state->mp); + + xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); + xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); + } + + /* + * Copy out last hashval in each block for B-tree code. + */ + blk1->hashval = + INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT); + blk2->hashval = + INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT); + + /* + * Adjust the expected index for insertion. + * NOTE: this code depends on the (current) situation that the + * second block was originally empty. + * + * If the insertion point moved to the 2nd block, we must adjust + * the index. We must also track the entry just following the + * new entry for use in an "atomic rename" operation, that entry + * is always the "old" entry and the "new" entry is what we are + * inserting. The index/blkno fields refer to the "old" entry, + * while the index2/blkno2 fields refer to the "new" entry. + */ + if (blk1->index > INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { + ASSERT(state->inleaf == 0); + blk2->index = blk1->index + - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + args->index = args->index2 = blk2->index; + args->blkno = args->blkno2 = blk2->blkno; + } else if (blk1->index == INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { + if (state->inleaf) { + args->index = blk1->index; + args->blkno = blk1->blkno; + args->index2 = 0; + args->blkno2 = blk2->blkno; + } else { + blk2->index = blk1->index + - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + args->index = args->index2 = blk2->index; + args->blkno = args->blkno2 = blk2->blkno; + } + } else { + ASSERT(state->inleaf == 1); + args->index = args->index2 = blk1->index; + args->blkno = args->blkno2 = blk1->blkno; + } +} + +/* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. + * GROT: Is this really necessary? With other than a 512 byte blocksize, + * GROT: there will always be enough room in either block for a new entry. + * GROT: Do a double-split for this case? + */ +STATIC int +xfs_attr_leaf_figure_balance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2, + int *countarg, int *usedbytesarg) +{ + xfs_attr_leafblock_t *leaf1, *leaf2; + xfs_attr_leaf_hdr_t *hdr1, *hdr2; + xfs_attr_leaf_entry_t *entry; + int count, max, index, totallen, half; + int lastdelta, foundit, tmp; + + /* + * Set up environment. + */ + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + hdr1 = &leaf1->hdr; + hdr2 = &leaf2->hdr; + foundit = 0; + totallen = 0; + + /* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. + */ + max = INT_GET(hdr1->count, ARCH_CONVERT) + + INT_GET(hdr2->count, ARCH_CONVERT); + half = (max+1) * sizeof(*entry); + half += INT_GET(hdr1->usedbytes, ARCH_CONVERT) + + INT_GET(hdr2->usedbytes, ARCH_CONVERT) + + xfs_attr_leaf_newentsize(state->args, + state->blocksize, NULL); + half /= 2; + lastdelta = state->blocksize; + entry = &leaf1->entries[0]; + for (count = index = 0; count < max; entry++, index++, count++) { + +#define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A)) + /* + * The new entry is in the first block, account for it. + */ + if (count == blk1->index) { + tmp = totallen + sizeof(*entry) + + xfs_attr_leaf_newentsize(state->args, + state->blocksize, + NULL); + if (XFS_ATTR_ABS(half - tmp) > lastdelta) + break; + lastdelta = XFS_ATTR_ABS(half - tmp); + totallen = tmp; + foundit = 1; + } + + /* + * Wrap around into the second block if necessary. + */ + if (count == INT_GET(hdr1->count, ARCH_CONVERT)) { + leaf1 = leaf2; + entry = &leaf1->entries[0]; + index = 0; + } + + /* + * Figure out if next leaf entry would be too much. + */ + tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1, + index); + if (XFS_ATTR_ABS(half - tmp) > lastdelta) + break; + lastdelta = XFS_ATTR_ABS(half - tmp); + totallen = tmp; +#undef XFS_ATTR_ABS + } + + /* + * Calculate the number of usedbytes that will end up in lower block. + * If new entry not in lower block, fix up the count. + */ + totallen -= count * sizeof(*entry); + if (foundit) { + totallen -= sizeof(*entry) + + xfs_attr_leaf_newentsize(state->args, + state->blocksize, + NULL); + } + + *countarg = count; + *usedbytesarg = totallen; + return(foundit); +} + +/*======================================================================== + * Routines used for shrinking the Btree. + *========================================================================*/ + +/* + * Check a leaf block and its neighbors to see if the block should be + * collapsed into one or the other neighbor. Always keep the block + * with the smaller block number. + * If the current block is over 50% full, don't try to join it, return 0. + * If the block is empty, fill in the state structure and return 2. + * If it can be collapsed, fill in the state structure and return 1. + * If nothing can be done, return 0. + * + * GROT: allow for INCOMPLETE entries in calculation. + */ +int +xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) +{ + xfs_attr_leafblock_t *leaf; + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *info; + int count, bytes, forward, error, retval, i; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + + /* + * Check for the degenerate case of the block being over 50% full. + * If so, it's not worth even looking to see if we might be able + * to coalesce with a sibling. + */ + blk = &state->path.blk[ state->path.active-1 ]; + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + leaf = (xfs_attr_leafblock_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes = sizeof(xfs_attr_leaf_hdr_t) + + count * sizeof(xfs_attr_leaf_entry_t) + + INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + if (bytes > (state->blocksize >> 1)) { + *action = 0; /* blk over 50%, don't try to join */ + return(0); + } + + /* + * Check for the degenerate case of the block being empty. + * If the block is empty, we'll simply delete it, no need to + * coalesce it with a sibling block. We choose (aribtrarily) + * to merge with the forward block unless it is NULL. + */ + if (count == 0) { + /* + * Make altpath point to the block we want to keep and + * path point to the block we want to drop (this one). + */ + forward = (!INT_ISZERO(info->forw, ARCH_CONVERT)); + memcpy(&state->altpath, &state->path, sizeof(state->path)); + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 2; + } + return(0); + } + + /* + * Examine each sibling block to see if we can coalesce with + * at least 25% free space to spare. We need to figure out + * whether to merge with the forward or the backward block. + * We prefer coalescing with the lower numbered sibling so as + * to shrink an attribute list over time. + */ + /* start with smaller blk num */ + forward = (INT_GET(info->forw, ARCH_CONVERT) + < INT_GET(info->back, ARCH_CONVERT)); + for (i = 0; i < 2; forward = !forward, i++) { + if (forward) + blkno = INT_GET(info->forw, ARCH_CONVERT); + else + blkno = INT_GET(info->back, ARCH_CONVERT); + if (blkno == 0) + continue; + error = xfs_da_read_buf(state->args->trans, state->args->dp, + blkno, -1, &bp, XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + + leaf = (xfs_attr_leafblock_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes = state->blocksize - (state->blocksize>>2); + bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + count += INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + bytes -= count * sizeof(xfs_attr_leaf_entry_t); + bytes -= sizeof(xfs_attr_leaf_hdr_t); + xfs_da_brelse(state->args->trans, bp); + if (bytes >= 0) + break; /* fits with at least 25% to spare */ + } + if (i >= 2) { + *action = 0; + return(0); + } + + /* + * Make altpath point to the block we want to keep (the lower + * numbered block) and path point to the block we want to drop. + */ + memcpy(&state->altpath, &state->path, sizeof(state->path)); + if (blkno < blk->blkno) { + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + } else { + error = xfs_da_path_shift(state, &state->path, forward, + 0, &retval); + } + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 1; + } + return(0); +} + +/* + * Remove a name from the leaf attribute list structure. + * + * Return 1 if leaf is less than 37% full, 0 if >= 37% full. + * If two leaves are 37% full, when combined they will leave 25% free. + */ +int +xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_hdr_t *hdr; + xfs_attr_leaf_map_t *map; + xfs_attr_leaf_entry_t *entry; + int before, after, smallest, entsize; + int tablesize, tmp, i; + xfs_mount_t *mp; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + hdr = &leaf->hdr; + mp = args->trans->t_mountp; + ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) + && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); + ASSERT((args->index >= 0) + && (args->index < INT_GET(hdr->count, ARCH_CONVERT))); + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) + >= ((INT_GET(hdr->count, ARCH_CONVERT) + * sizeof(*entry))+sizeof(*hdr))); + entry = &leaf->entries[args->index]; + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) + >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp)); + + /* + * Scan through free region table: + * check for adjacency of free'd entry with an existing one, + * find smallest free region in case we need to replace it, + * adjust any map that borders the entry table, + */ + tablesize = INT_GET(hdr->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t) + + sizeof(xfs_attr_leaf_hdr_t); + map = &hdr->freemap[0]; + tmp = INT_GET(map->size, ARCH_CONVERT); + before = after = -1; + smallest = XFS_ATTR_LEAF_MAPSIZE - 1; + entsize = xfs_attr_leaf_entsize(leaf, args->index); + for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + if (INT_GET(map->base, ARCH_CONVERT) == tablesize) { + INT_MOD(map->base, ARCH_CONVERT, + -sizeof(xfs_attr_leaf_entry_t)); + INT_MOD(map->size, ARCH_CONVERT, + sizeof(xfs_attr_leaf_entry_t)); + } + + if ((INT_GET(map->base, ARCH_CONVERT) + + INT_GET(map->size, ARCH_CONVERT)) + == INT_GET(entry->nameidx, ARCH_CONVERT)) { + before = i; + } else if (INT_GET(map->base, ARCH_CONVERT) + == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) { + after = i; + } else if (INT_GET(map->size, ARCH_CONVERT) < tmp) { + tmp = INT_GET(map->size, ARCH_CONVERT); + smallest = i; + } + } + + /* + * Coalesce adjacent freemap regions, + * or replace the smallest region. + */ + if ((before >= 0) || (after >= 0)) { + if ((before >= 0) && (after >= 0)) { + map = &hdr->freemap[before]; + INT_MOD(map->size, ARCH_CONVERT, entsize); + INT_MOD(map->size, ARCH_CONVERT, + INT_GET(hdr->freemap[after].size, + ARCH_CONVERT)); + INT_ZERO(hdr->freemap[after].base, ARCH_CONVERT); + INT_ZERO(hdr->freemap[after].size, ARCH_CONVERT); + } else if (before >= 0) { + map = &hdr->freemap[before]; + INT_MOD(map->size, ARCH_CONVERT, entsize); + } else { + map = &hdr->freemap[after]; + /* both on-disk, don't endian flip twice */ + map->base = entry->nameidx; + INT_MOD(map->size, ARCH_CONVERT, entsize); + } + } else { + /* + * Replace smallest region (if it is smaller than free'd entry) + */ + map = &hdr->freemap[smallest]; + if (INT_GET(map->size, ARCH_CONVERT) < entsize) { + INT_SET(map->base, ARCH_CONVERT, + INT_GET(entry->nameidx, ARCH_CONVERT)); + INT_SET(map->size, ARCH_CONVERT, entsize); + } + } + + /* + * Did we remove the first entry? + */ + if (INT_GET(entry->nameidx, ARCH_CONVERT) + == INT_GET(hdr->firstused, ARCH_CONVERT)) + smallest = 1; + else + smallest = 0; + + /* + * Compress the remaining entries and zero out the removed stuff. + */ + memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); + INT_MOD(hdr->usedbytes, ARCH_CONVERT, -entsize); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), + entsize)); + + tmp = (INT_GET(hdr->count, ARCH_CONVERT) - args->index) + * sizeof(xfs_attr_leaf_entry_t); + memmove((char *)entry, (char *)(entry+1), tmp); + INT_MOD(hdr->count, ARCH_CONVERT, -1); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); + entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)]; + memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t)); + + /* + * If we removed the first entry, re-find the first used byte + * in the name area. Note that if the entry was the "firstused", + * then we don't have a "hole" in our block resulting from + * removing the name. + */ + if (smallest) { + tmp = XFS_LBSIZE(mp); + entry = &leaf->entries[0]; + for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; + i >= 0; entry++, i--) { + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) + >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) + < XFS_LBSIZE(mp)); + if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp) + tmp = INT_GET(entry->nameidx, ARCH_CONVERT); + } + INT_SET(hdr->firstused, ARCH_CONVERT, tmp); + if (INT_ISZERO(hdr->firstused, ARCH_CONVERT)) { + INT_SET(hdr->firstused, ARCH_CONVERT, + tmp - XFS_ATTR_LEAF_NAME_ALIGN); + } + } else { + hdr->holes = 1; /* mark as needing compaction */ + } + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); + + /* + * Check if leaf is less than 50% full, caller may want to + * "join" the leaf with a sibling if so. + */ + tmp = sizeof(xfs_attr_leaf_hdr_t); + tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t); + tmp += INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */ +} + +/* + * Move all the attribute list entries from drop_leaf into save_leaf. + */ +void +xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk) +{ + xfs_attr_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf; + xfs_attr_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr; + xfs_mount_t *mp; + char *tmpbuffer; + + /* + * Set up environment. + */ + mp = state->mp; + ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC); + ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC); + drop_leaf = drop_blk->bp->data; + save_leaf = save_blk->bp->data; + ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + drop_hdr = &drop_leaf->hdr; + save_hdr = &save_leaf->hdr; + + /* + * Save last hashval from dying block for later Btree fixup. + */ + drop_blk->hashval = + INT_GET(drop_leaf->entries[INT_GET(drop_leaf->hdr.count, + ARCH_CONVERT)-1].hashval, + ARCH_CONVERT); + + /* + * Check if we need a temp buffer, or can we do it in place. + * Note that we don't check "leaf" for holes because we will + * always be dropping it, toosmall() decided that for us already. + */ + if (save_hdr->holes == 0) { + /* + * dest leaf has no holes, so we add there. May need + * to make some room in the entry array. + */ + if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { + xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0, + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + } else { + xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, + INT_GET(save_hdr->count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), + mp); + } + } else { + /* + * Destination has holes, so we make a temporary copy + * of the leaf and add them both to that. + */ + tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memset(tmpbuffer, 0, state->blocksize); + tmp_leaf = (xfs_attr_leafblock_t *)tmpbuffer; + tmp_hdr = &tmp_leaf->hdr; + tmp_hdr->info = save_hdr->info; /* struct copy */ + INT_ZERO(tmp_hdr->count, ARCH_CONVERT); + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize); + if (INT_ISZERO(tmp_hdr->firstused, ARCH_CONVERT)) { + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, + state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN); + } + INT_ZERO(tmp_hdr->usedbytes, ARCH_CONVERT); + if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { + xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0, + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), + mp); + xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, + INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(save_hdr->count, ARCH_CONVERT), + mp); + } else { + xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0, + (int)INT_GET(save_hdr->count, ARCH_CONVERT), + mp); + xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, + INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), + mp); + } + memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize); + kmem_free(tmpbuffer, state->blocksize); + } + + xfs_da_log_buf(state->args->trans, save_blk->bp, 0, + state->blocksize - 1); + + /* + * Copy out last hashval in each block for B-tree code. + */ + save_blk->hashval = + INT_GET(save_leaf->entries[INT_GET(save_leaf->hdr.count, + ARCH_CONVERT)-1].hashval, + ARCH_CONVERT); +} + +/*======================================================================== + * Routines used for finding things in the Btree. + *========================================================================*/ + +/* + * Look up a name in a leaf attribute list structure. + * This is the internal routine, it uses the caller's buffer. + * + * Note that duplicate keys are allowed, but only check within the + * current leaf node. The Btree code must check in adjacent leaf nodes. + * + * Return in args->index the index into the entry[] array of either + * the found entry, or where the entry should have been (insert before + * that entry). + * + * Don't change the args->value unless we find the attribute. + */ +int +xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + int probe, span; + xfs_dahash_t hashval; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) + < (XFS_LBSIZE(args->dp->i_mount)/8)); + + /* + * Binary search. (note: small blocks will skip this loop) + */ + hashval = args->hashval; + probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2; + for (entry = &leaf->entries[probe]; span > 4; + entry = &leaf->entries[probe]) { + span /= 2; + if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval) + probe += span; + else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval) + probe -= span; + else + break; + } + ASSERT((probe >= 0) && \ + ((INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)))); + ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) + == hashval)); + + /* + * Since we may have duplicate hashval's, find the first matching + * hashval in the leaf. + */ + while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) + >= hashval)) { + entry--; + probe--; + } + while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) { + entry++; + probe++; + } + if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) + || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { + args->index = probe; + return(XFS_ERROR(ENOATTR)); + } + + /* + * Duplicate keys may be present, so search all of them for a match. + */ + for ( ; (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval); + entry++, probe++) { +/* + * GROT: Add code to remove incomplete entries. + */ + /* + * If we are looking for INCOMPLETE entries, show only those. + * If we are looking for complete entries, show only those. + */ + if ((args->flags & XFS_ATTR_INCOMPLETE) != + (entry->flags & XFS_ATTR_INCOMPLETE)) { + continue; + } + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, probe); + if (name_loc->namelen != args->namelen) + continue; + if (memcmp(args->name, (char *)name_loc->nameval, + args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((entry->flags & XFS_ATTR_ROOT) != 0)) + continue; + args->index = probe; + return(XFS_ERROR(EEXIST)); + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, probe); + if (name_rmt->namelen != args->namelen) + continue; + if (memcmp(args->name, (char *)name_rmt->name, + args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((entry->flags & XFS_ATTR_ROOT) != 0)) + continue; + args->index = probe; + args->rmtblkno + = INT_GET(name_rmt->valueblk, ARCH_CONVERT); + args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, + INT_GET(name_rmt->valuelen, + ARCH_CONVERT)); + return(XFS_ERROR(EEXIST)); + } + } + args->index = probe; + return(XFS_ERROR(ENOATTR)); +} + +/* + * Get the value associated with an attribute name from a leaf attribute + * list structure. + */ +int +xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + int valuelen; + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) + < (XFS_LBSIZE(args->dp->i_mount)/8)); + ASSERT(args->index < ((int)INT_GET(leaf->hdr.count, ARCH_CONVERT))); + + entry = &leaf->entries[args->index]; + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); + ASSERT(name_loc->namelen == args->namelen); + ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0); + valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); + if (args->flags & ATTR_KERNOVAL) { + args->valuelen = valuelen; + return(0); + } + if (args->valuelen < valuelen) { + args->valuelen = valuelen; + return(XFS_ERROR(ERANGE)); + } + args->valuelen = valuelen; + memcpy(args->value, &name_loc->nameval[args->namelen], valuelen); + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + ASSERT(name_rmt->namelen == args->namelen); + ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); + valuelen = INT_GET(name_rmt->valuelen, ARCH_CONVERT); + args->rmtblkno = INT_GET(name_rmt->valueblk, ARCH_CONVERT); + args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen); + if (args->flags & ATTR_KERNOVAL) { + args->valuelen = valuelen; + return(0); + } + if (args->valuelen < valuelen) { + args->valuelen = valuelen; + return(XFS_ERROR(ERANGE)); + } + args->valuelen = valuelen; + } + return(0); +} + +/*======================================================================== + * Utility routines. + *========================================================================*/ + +/* + * Move the indicated entries from one leaf to another. + * NOTE: this routine modifies both source and destination leaves. + */ +/*ARGSUSED*/ +STATIC void +xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, + xfs_attr_leafblock_t *leaf_d, int start_d, + int count, xfs_mount_t *mp) +{ + xfs_attr_leaf_hdr_t *hdr_s, *hdr_d; + xfs_attr_leaf_entry_t *entry_s, *entry_d; + int desti, tmp, i; + + /* + * Check for nothing to do. + */ + if (count == 0) + return; + + /* + * Set up environment. + */ + ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + hdr_s = &leaf_s->hdr; + hdr_d = &leaf_d->hdr; + ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) + && (INT_GET(hdr_s->count, ARCH_CONVERT) + < (XFS_LBSIZE(mp)/8))); + ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_s->count, ARCH_CONVERT) + * sizeof(*entry_s))+sizeof(*hdr_s))); + ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_d->count, ARCH_CONVERT) + * sizeof(*entry_d))+sizeof(*hdr_d))); + + ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT)); + ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT)); + ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT)); + + /* + * Move the entries in the destination leaf up to make a hole? + */ + if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d; + tmp *= sizeof(xfs_attr_leaf_entry_t); + entry_s = &leaf_d->entries[start_d]; + entry_d = &leaf_d->entries[start_d + count]; + memmove((char *)entry_d, (char *)entry_s, tmp); + } + + /* + * Copy all entry's in the same (sorted) order, + * but allocate attribute info packed and in sequence. + */ + entry_s = &leaf_s->entries[start_s]; + entry_d = &leaf_d->entries[start_d]; + desti = start_d; + for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) { + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + >= INT_GET(hdr_s->firstused, ARCH_CONVERT)); + tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i); +#ifdef GROT + /* + * Code to drop INCOMPLETE entries. Difficult to use as we + * may also need to change the insertion index. Code turned + * off for 6.2, should be revisited later. + */ + if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ + memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); + INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); + INT_MOD(hdr_s->count, ARCH_CONVERT, -1); + entry_d--; /* to compensate for ++ in loop hdr */ + desti--; + if ((start_s + i) < offset) + result++; /* insertion index adjustment */ + } else { +#endif /* GROT */ + INT_MOD(hdr_d->firstused, ARCH_CONVERT, -tmp); + /* both on-disk, don't endian flip twice */ + entry_d->hashval = entry_s->hashval; + /* both on-disk, don't endian flip twice */ + entry_d->nameidx = hdr_d->firstused; + entry_d->flags = entry_s->flags; + ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp + <= XFS_LBSIZE(mp)); + memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti), + XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp); + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp + <= XFS_LBSIZE(mp)); + memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); + INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); + INT_MOD(hdr_d->usedbytes, ARCH_CONVERT, tmp); + INT_MOD(hdr_s->count, ARCH_CONVERT, -1); + INT_MOD(hdr_d->count, ARCH_CONVERT, 1); + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t) + + sizeof(xfs_attr_leaf_hdr_t); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp); +#ifdef GROT + } +#endif /* GROT */ + } + + /* + * Zero out the entries we just copied. + */ + if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) { + tmp = count * sizeof(xfs_attr_leaf_entry_t); + entry_s = &leaf_s->entries[start_s]; + ASSERT(((char *)entry_s + tmp) <= + ((char *)leaf_s + XFS_LBSIZE(mp))); + memset((char *)entry_s, 0, tmp); + } else { + /* + * Move the remaining entries down to fill the hole, + * then zero the entries at the top. + */ + tmp = INT_GET(hdr_s->count, ARCH_CONVERT) - count; + tmp *= sizeof(xfs_attr_leaf_entry_t); + entry_s = &leaf_s->entries[start_s + count]; + entry_d = &leaf_s->entries[start_s]; + memmove((char *)entry_d, (char *)entry_s, tmp); + + tmp = count * sizeof(xfs_attr_leaf_entry_t); + entry_s = &leaf_s->entries[INT_GET(hdr_s->count, + ARCH_CONVERT)]; + ASSERT(((char *)entry_s + tmp) <= + ((char *)leaf_s + XFS_LBSIZE(mp))); + memset((char *)entry_s, 0, tmp); + } + + /* + * Fill in the freemap information + */ + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_hdr_t)); + INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, + INT_GET(hdr_d->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, + INT_GET(hdr_d->firstused, ARCH_CONVERT) + - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + INT_ZERO(hdr_d->freemap[1].base, ARCH_CONVERT); + INT_ZERO(hdr_d->freemap[2].base, ARCH_CONVERT); + INT_ZERO(hdr_d->freemap[1].size, ARCH_CONVERT); + INT_ZERO(hdr_d->freemap[2].size, ARCH_CONVERT); + hdr_s->holes = 1; /* leaf may not be compact */ +} + +/* + * Compare two leaf blocks "order". + * Return 0 unless leaf2 should go before leaf1. + */ +int +xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) +{ + xfs_attr_leafblock_t *leaf1, *leaf2; + + leaf1 = leaf1_bp->data; + leaf2 = leaf2_bp->data; + ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC) && + (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC)); + if ( (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) + && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) + && ( (INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) + || (INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT))) ) { + return(1); + } + return(0); +} + +/* + * Pick up the last hashvalue from a leaf block. + */ +xfs_dahash_t +xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count) +{ + xfs_attr_leafblock_t *leaf; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + if (count) + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + if (INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + return(0); + return(INT_GET(leaf->entries[INT_GET(leaf->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); +} + +/* + * Calculate the number of bytes used to store the indicated attribute + * (whether local or remote only calculate bytes in this block). + */ +int +xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index) +{ + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + int size; + + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + if (leaf->entries[index].flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index); + size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen, + INT_GET(name_loc->valuelen, + ARCH_CONVERT)); + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index); + size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen); + } + return(size); +} + +/* + * Calculate the number of bytes that would be required to store the new + * attribute (whether local or remote only calculate bytes in this block). + * This routine decides as a side effect whether the attribute will be + * a "local" or a "remote" attribute. + */ +int +xfs_attr_leaf_newentsize(xfs_da_args_t *args, int blocksize, int *local) +{ + int size; + + size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(args->namelen, args->valuelen); + if (size < XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(blocksize)) { + if (local) { + *local = 1; + } + } else { + size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(args->namelen); + if (local) { + *local = 0; + } + } + return(size); +} + +/* + * Copy out attribute list entries for attr_list(), for leaf attribute lists. + */ +int +xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) +{ + attrlist_cursor_kern_t *cursor; + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + int retval, i; + + ASSERT(bp != NULL); + leaf = bp->data; + cursor = context->cursor; + cursor->initted = 1; + + xfs_attr_trace_l_cl("blk start", context, leaf); + + /* + * Re-find our place in the leaf block if this is a new syscall. + */ + if (context->resynch) { + entry = &leaf->entries[0]; + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); + entry++, i++) { + if (INT_GET(entry->hashval, ARCH_CONVERT) + == cursor->hashval) { + if (cursor->offset == context->dupcnt) { + context->dupcnt = 0; + break; + } + context->dupcnt++; + } else if (INT_GET(entry->hashval, ARCH_CONVERT) + > cursor->hashval) { + context->dupcnt = 0; + break; + } + } + if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { + xfs_attr_trace_l_c("not found", context); + return(0); + } + } else { + entry = &leaf->entries[0]; + i = 0; + } + context->resynch = 0; + + /* + * We have found our place, start copying out the new attributes. + */ + retval = 0; + for ( ; (i < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + && (retval == 0); entry++, i++) { + int ns = (entry->flags & XFS_ATTR_ROOT)? ROOT_NAMES:USER_NAMES; + + if (INT_GET(entry->hashval, ARCH_CONVERT) != cursor->hashval) { + cursor->hashval = INT_GET(entry->hashval, ARCH_CONVERT); + cursor->offset = 0; + } + + if (entry->flags & XFS_ATTR_INCOMPLETE) + continue; /* skip incomplete entries */ + if (((context->flags & ATTR_ROOT) != 0) != + ((entry->flags & XFS_ATTR_ROOT) != 0) && + !(context->flags & ATTR_KERNFULLS)) + continue; /* skip non-matching entries */ + + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); + if (context->flags & ATTR_KERNOVAL) { + ASSERT(context->flags & ATTR_KERNAMELS); + context->count += xfs_namespaces[ns].namelen + + (int)name_loc->namelen + 1; + } else { + retval = xfs_attr_put_listent(context, ns, + (char *)name_loc->nameval, + (int)name_loc->namelen, + (int)INT_GET(name_loc->valuelen, + ARCH_CONVERT)); + } + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); + if (context->flags & ATTR_KERNOVAL) { + ASSERT(context->flags & ATTR_KERNAMELS); + context->count += xfs_namespaces[ns].namelen + + (int)name_rmt->namelen + 1; + } else { + retval = xfs_attr_put_listent(context, ns, + (char *)name_rmt->name, + (int)name_rmt->namelen, + (int)INT_GET(name_rmt->valuelen, + ARCH_CONVERT)); + } + } + if (retval == 0) { + cursor->offset++; + } + } + xfs_attr_trace_l_cl("blk end", context, leaf); + return(retval); +} + +#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \ + (((struct attrlist_ent *) 0)->a_name - (char *) 0) +#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \ + ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \ + & ~(sizeof(u_int32_t)-1)) + +/* + * Format an attribute and copy it out to the user's buffer. + * Take care to check values and protect against them changing later, + * we may be reading them directly out of a user buffer. + */ +/*ARGSUSED*/ +int +xfs_attr_put_listent(xfs_attr_list_context_t *context, + int ns, char *name, int namelen, int valuelen) +{ + attrlist_ent_t *aep; + int arraytop; + + ASSERT(!(context->flags & ATTR_KERNOVAL)); + if (context->flags & ATTR_KERNAMELS) { + char *offset; + xattr_namespace_t *nsp; + + ASSERT(context->count >= 0); + + nsp = &xfs_namespaces[ns]; + arraytop = context->count + nsp->namelen + namelen+1; + if (arraytop > context->firstu) { + context->count = -1; /* insufficient space */ + return(1); + } + offset = (char *)context->alist + context->count; + strncpy(offset, nsp->name, nsp->namelen); /* namespace */ + offset += nsp->namelen; + strncpy(offset, name, namelen); /* real name */ + offset += namelen; + *offset = '\0'; + context->count += nsp->namelen + namelen + 1; + return(0); + } + + ASSERT(context->count >= 0); + ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); + ASSERT(context->firstu >= sizeof(*context->alist)); + ASSERT(context->firstu <= context->bufsize); + + arraytop = sizeof(*context->alist) + + context->count * sizeof(context->alist->al_offset[0]); + context->firstu -= ATTR_ENTSIZE(namelen); + if (context->firstu < arraytop) { + xfs_attr_trace_l_c("buffer full", context); + context->alist->al_more = 1; + return(1); + } + + aep = (attrlist_ent_t *)&(((char *)context->alist)[ context->firstu ]); + aep->a_valuelen = valuelen; + memcpy(aep->a_name, name, namelen); + aep->a_name[ namelen ] = 0; + context->alist->al_offset[ context->count++ ] = context->firstu; + context->alist->al_count = context->count; + xfs_attr_trace_l_c("add", context); + return(0); +} + +/*======================================================================== + * Manage the INCOMPLETE flag in a leaf entry + *========================================================================*/ + +/* + * Clear the INCOMPLETE flag on an entry in a leaf block. + */ +int +xfs_attr_leaf_clearflag(xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_dabuf_t *bp; + int error; +#ifdef DEBUG + xfs_attr_leaf_name_local_t *name_loc; + int namelen; + char *name; +#endif /* DEBUG */ + + /* + * Set up the operation. + */ + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) { + return(error); + } + ASSERT(bp != NULL); + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); + ASSERT(args->index >= 0); + entry = &leaf->entries[ args->index ]; + ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); + +#ifdef DEBUG + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); + namelen = name_loc->namelen; + name = (char *)name_loc->nameval; + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + namelen = name_rmt->namelen; + name = (char *)name_rmt->name; + } + ASSERT(INT_GET(entry->hashval, ARCH_CONVERT) == args->hashval); + ASSERT(namelen == args->namelen); + ASSERT(memcmp(name, args->name, namelen) == 0); +#endif /* DEBUG */ + + entry->flags &= ~XFS_ATTR_INCOMPLETE; + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); + + if (args->rmtblkno) { + ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); + INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); + } + xfs_da_buf_done(bp); + + /* + * Commit the flag value change and start the next trans in series. + */ + error = xfs_attr_rolltrans(&args->trans, args->dp); + + return(error); +} + +/* + * Set the INCOMPLETE flag on an entry in a leaf block. + */ +int +xfs_attr_leaf_setflag(xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_dabuf_t *bp; + int error; + + /* + * Set up the operation. + */ + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) { + return(error); + } + ASSERT(bp != NULL); + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); + ASSERT(args->index >= 0); + entry = &leaf->entries[ args->index ]; + + ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0); + entry->flags |= XFS_ATTR_INCOMPLETE; + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); + if ((entry->flags & XFS_ATTR_LOCAL) == 0) { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + INT_ZERO(name_rmt->valueblk, ARCH_CONVERT); + INT_ZERO(name_rmt->valuelen, ARCH_CONVERT); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); + } + xfs_da_buf_done(bp); + + /* + * Commit the flag value change and start the next trans in series. + */ + error = xfs_attr_rolltrans(&args->trans, args->dp); + + return(error); +} + +/* + * In a single transaction, clear the INCOMPLETE flag on the leaf entry + * given by args->blkno/index and set the INCOMPLETE flag on the leaf + * entry given by args->blkno2/index2. + * + * Note that they could be in different blocks, or in the same block. + */ +int +xfs_attr_leaf_flipflags(xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf1, *leaf2; + xfs_attr_leaf_entry_t *entry1, *entry2; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_dabuf_t *bp1, *bp2; + int error; +#ifdef DEBUG + xfs_attr_leaf_name_local_t *name_loc; + int namelen1, namelen2; + char *name1, *name2; +#endif /* DEBUG */ + + /* + * Read the block containing the "old" attr + */ + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp1, + XFS_ATTR_FORK); + if (error) { + return(error); + } + ASSERT(bp1 != NULL); + + /* + * Read the block containing the "new" attr, if it is different + */ + if (args->blkno2 != args->blkno) { + error = xfs_da_read_buf(args->trans, args->dp, args->blkno2, + -1, &bp2, XFS_ATTR_FORK); + if (error) { + return(error); + } + ASSERT(bp2 != NULL); + } else { + bp2 = bp1; + } + + leaf1 = bp1->data; + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < INT_GET(leaf1->hdr.count, ARCH_CONVERT)); + ASSERT(args->index >= 0); + entry1 = &leaf1->entries[ args->index ]; + + leaf2 = bp2->data; + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index2 < INT_GET(leaf2->hdr.count, ARCH_CONVERT)); + ASSERT(args->index2 >= 0); + entry2 = &leaf2->entries[ args->index2 ]; + +#ifdef DEBUG + if (entry1->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf1, args->index); + namelen1 = name_loc->namelen; + name1 = (char *)name_loc->nameval; + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); + namelen1 = name_rmt->namelen; + name1 = (char *)name_rmt->name; + } + if (entry2->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf2, args->index2); + namelen2 = name_loc->namelen; + name2 = (char *)name_loc->nameval; + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2); + namelen2 = name_rmt->namelen; + name2 = (char *)name_rmt->name; + } + ASSERT(INT_GET(entry1->hashval, ARCH_CONVERT) == INT_GET(entry2->hashval, ARCH_CONVERT)); + ASSERT(namelen1 == namelen2); + ASSERT(memcmp(name1, name2, namelen1) == 0); +#endif /* DEBUG */ + + ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE); + ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0); + + entry1->flags &= ~XFS_ATTR_INCOMPLETE; + xfs_da_log_buf(args->trans, bp1, + XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1))); + if (args->rmtblkno) { + ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); + INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); + INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); + xfs_da_log_buf(args->trans, bp1, + XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); + } + + entry2->flags |= XFS_ATTR_INCOMPLETE; + xfs_da_log_buf(args->trans, bp2, + XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2))); + if ((entry2->flags & XFS_ATTR_LOCAL) == 0) { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2); + INT_ZERO(name_rmt->valueblk, ARCH_CONVERT); + INT_ZERO(name_rmt->valuelen, ARCH_CONVERT); + xfs_da_log_buf(args->trans, bp2, + XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt))); + } + xfs_da_buf_done(bp1); + if (bp1 != bp2) + xfs_da_buf_done(bp2); + + /* + * Commit the flag value change and start the next trans in series. + */ + error = xfs_attr_rolltrans(&args->trans, args->dp); + + return(error); +} + +/*======================================================================== + * Indiscriminately delete the entire attribute fork + *========================================================================*/ + +/* + * Recurse (gasp!) through the attribute nodes until we find leaves. + * We're doing a depth-first traversal in order to invalidate everything. + */ +int +xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp) +{ + xfs_da_blkinfo_t *info; + xfs_daddr_t blkno; + xfs_dabuf_t *bp; + int error; + + /* + * Read block 0 to see what we have to work with. + * We only get here if we have extents, since we remove + * the extents in reverse order the extent containing + * block 0 must still be there. + */ + error = xfs_da_read_buf(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK); + if (error) + return(error); + blkno = xfs_da_blkno(bp); + + /* + * Invalidate the tree, even if the "tree" is only a single leaf block. + * This is a depth-first traversal! + */ + info = bp->data; + if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + error = xfs_attr_node_inactive(trans, dp, bp, 1); + } else if (INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + error = xfs_attr_leaf_inactive(trans, dp, bp); + } else { + error = XFS_ERROR(EIO); + xfs_da_brelse(*trans, bp); + } + if (error) + return(error); + + /* + * Invalidate the incore copy of the root block. + */ + error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK); + if (error) + return(error); + xfs_da_binval(*trans, bp); /* remove from cache */ + /* + * Commit the invalidate and start the next transaction. + */ + error = xfs_attr_rolltrans(trans, dp); + + return (error); +} + +/* + * Recurse (gasp!) through the attribute nodes until we find leaves. + * We're doing a depth-first traversal in order to invalidate everything. + */ +int +xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, + int level) +{ + xfs_da_blkinfo_t *info; + xfs_da_intnode_t *node; + xfs_dablk_t child_fsb; + xfs_daddr_t parent_blkno, child_blkno; + int error, count, i; + xfs_dabuf_t *child_bp; + + /* + * Since this code is recursive (gasp!) we must protect ourselves. + */ + if (level > XFS_DA_NODE_MAXDEPTH) { + xfs_da_brelse(*trans, bp); /* no locks for later trans */ + return(XFS_ERROR(EIO)); + } + + node = bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) + == XFS_DA_NODE_MAGIC); + parent_blkno = xfs_da_blkno(bp); /* save for re-read later */ + count = INT_GET(node->hdr.count, ARCH_CONVERT); + if (!count) { + xfs_da_brelse(*trans, bp); + return(0); + } + child_fsb = INT_GET(node->btree[0].before, ARCH_CONVERT); + xfs_da_brelse(*trans, bp); /* no locks for later trans */ + + /* + * If this is the node level just above the leaves, simply loop + * over the leaves removing all of them. If this is higher up + * in the tree, recurse downward. + */ + for (i = 0; i < count; i++) { + /* + * Read the subsidiary block to see what we have to work with. + * Don't do this in a transaction. This is a depth-first + * traversal of the tree so we may deal with many blocks + * before we come back to this one. + */ + error = xfs_da_read_buf(*trans, dp, child_fsb, -2, &child_bp, + XFS_ATTR_FORK); + if (error) + return(error); + if (child_bp) { + /* save for re-read later */ + child_blkno = xfs_da_blkno(child_bp); + + /* + * Invalidate the subtree, however we have to. + */ + info = child_bp->data; + if (INT_GET(info->magic, ARCH_CONVERT) + == XFS_DA_NODE_MAGIC) { + error = xfs_attr_node_inactive(trans, dp, + child_bp, level+1); + } else if (INT_GET(info->magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC) { + error = xfs_attr_leaf_inactive(trans, dp, + child_bp); + } else { + error = XFS_ERROR(EIO); + xfs_da_brelse(*trans, child_bp); + } + if (error) + return(error); + + /* + * Remove the subsidiary block from the cache + * and from the log. + */ + error = xfs_da_get_buf(*trans, dp, 0, child_blkno, + &child_bp, XFS_ATTR_FORK); + if (error) + return(error); + xfs_da_binval(*trans, child_bp); + } + + /* + * If we're not done, re-read the parent to get the next + * child block number. + */ + if ((i+1) < count) { + error = xfs_da_read_buf(*trans, dp, 0, parent_blkno, + &bp, XFS_ATTR_FORK); + if (error) + return(error); + child_fsb = INT_GET(node->btree[i+1].before, ARCH_CONVERT); + xfs_da_brelse(*trans, bp); + } + /* + * Atomically commit the whole invalidate stuff. + */ + if ((error = xfs_attr_rolltrans(trans, dp))) + return (error); + } + + return(0); +} + +/* + * Invalidate all of the "remote" value regions pointed to by a particular + * leaf block. + * Note that we must release the lock on the buffer so that we are not + * caught holding something that the logging code wants to flush to disk. + */ +int +xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_attr_inactive_list_t *list, *lp; + int error, count, size, tmp, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + + /* + * Count the number of "remote" value extents. + */ + count = 0; + entry = &leaf->entries[0]; + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if ( INT_GET(entry->nameidx, ARCH_CONVERT) + && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); + if (!INT_ISZERO(name_rmt->valueblk, ARCH_CONVERT)) + count++; + } + } + + /* + * If there are no "remote" values, we're done. + */ + if (count == 0) { + xfs_da_brelse(*trans, bp); + return(0); + } + + /* + * Allocate storage for a list of all the "remote" value extents. + */ + size = count * sizeof(xfs_attr_inactive_list_t); + list = (xfs_attr_inactive_list_t *)kmem_alloc(size, KM_SLEEP); + + /* + * Identify each of the "remote" value extents. + */ + lp = list; + entry = &leaf->entries[0]; + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if ( INT_GET(entry->nameidx, ARCH_CONVERT) + && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); + if (!INT_ISZERO(name_rmt->valueblk, ARCH_CONVERT)) { + /* both on-disk, don't endian flip twice */ + lp->valueblk = name_rmt->valueblk; + INT_SET(lp->valuelen, ARCH_CONVERT, + XFS_B_TO_FSB(dp->i_mount, + INT_GET(name_rmt->valuelen, + ARCH_CONVERT))); + lp++; + } + } + } + xfs_da_brelse(*trans, bp); /* unlock for trans. in freextent() */ + + /* + * Invalidate each of the "remote" value extents. + */ + error = 0; + for (lp = list, i = 0; i < count; i++, lp++) { + tmp = xfs_attr_leaf_freextent(trans, dp, + INT_GET(lp->valueblk, + ARCH_CONVERT), + INT_GET(lp->valuelen, + ARCH_CONVERT)); + if (error == 0) + error = tmp; /* save only the 1st errno */ + } + + kmem_free((xfs_caddr_t)list, size); + return(error); +} + +/* + * Look at all the extents for this logical region, + * invalidate any buffers that are incore/in transactions. + */ +int +xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, + xfs_dablk_t blkno, int blkcnt) +{ + xfs_bmbt_irec_t map; + xfs_dablk_t tblkno; + int tblkcnt, dblkcnt, nmap, error; + xfs_daddr_t dblkno; + xfs_buf_t *bp; + + /* + * Roll through the "value", invalidating the attribute value's + * blocks. + */ + tblkno = blkno; + tblkcnt = blkcnt; + while (tblkcnt > 0) { + /* + * Try to remember where we decided to put the value. + */ + nmap = 1; + error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + NULL, 0, &map, &nmap, NULL); + if (error) { + return(error); + } + ASSERT(nmap == 1); + ASSERT(map.br_startblock != DELAYSTARTBLOCK); + + /* + * If it's a hole, these are already unmapped + * so there's nothing to invalidate. + */ + if (map.br_startblock != HOLESTARTBLOCK) { + + dblkno = XFS_FSB_TO_DADDR(dp->i_mount, + map.br_startblock); + dblkcnt = XFS_FSB_TO_BB(dp->i_mount, + map.br_blockcount); + bp = xfs_trans_get_buf(*trans, + dp->i_mount->m_ddev_targp, + dblkno, dblkcnt, XFS_BUF_LOCK); + xfs_trans_binval(*trans, bp); + /* + * Roll to next transaction. + */ + if ((error = xfs_attr_rolltrans(trans, dp))) + return (error); + } + + tblkno += map.br_blockcount; + tblkcnt -= map.br_blockcount; + } + + return(0); +} + + +/* + * Roll from one trans in the sequence of PERMANENT transactions to the next. + */ +int +xfs_attr_rolltrans(xfs_trans_t **transp, xfs_inode_t *dp) +{ + xfs_trans_t *trans; + unsigned int logres, count; + int error; + + /* + * Ensure that the inode is always logged. + */ + trans = *transp; + xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); + + /* + * Copy the critical parameters from one trans to the next. + */ + logres = trans->t_log_res; + count = trans->t_log_count; + *transp = xfs_trans_dup(trans); + + /* + * Commit the current transaction. + * If this commit failed, then it'd just unlock those items that + * are not marked ihold. That also means that a filesystem shutdown + * is in progress. The caller takes the responsibility to cancel + * the duplicate transaction that gets returned. + */ + if ((error = xfs_trans_commit(trans, 0, NULL))) + return (error); + + trans = *transp; + + /* + * Reserve space in the log for th next transaction. + * This also pushes items in the "AIL", the list of logged items, + * out to disk if they are taking up space at the tail of the log + * that we want to use. This requires that either nothing be locked + * across this call, or that anything that is locked be logged in + * the prior and the next transactions. + */ + error = xfs_trans_reserve(trans, 0, logres, 0, + XFS_TRANS_PERM_LOG_RES, count); + /* + * Ensure that the inode is in the new transaction and locked. + */ + if (!error) { + xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(trans, dp); + } + return (error); + +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_attr_leaf.h linux.21rc5-ac2/fs/xfs/xfs_attr_leaf.h --- linux.21rc5/fs/xfs/xfs_attr_leaf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_attr_leaf.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ATTR_LEAF_H__ +#define __XFS_ATTR_LEAF_H__ + +/* + * Attribute storage layout, internal structure, access macros, etc. + * + * Attribute lists are structured around Btrees where all the data + * elements are in the leaf nodes. Attribute names are hashed into an int, + * then that int is used as the index into the Btree. Since the hashval + * of an attribute name may not be unique, we may have duplicate keys. The + * internal links in the Btree are logical block offsets into the file. + */ + +struct attrlist; +struct attrlist_cursor_kern; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_da_state; +struct xfs_da_state_blk; +struct xfs_inode; +struct xfs_trans; + +/*======================================================================== + * Attribute structure when equal to XFS_LBSIZE(mp) bytes. + *========================================================================*/ + +/* + * This is the structure of the leaf nodes in the Btree. + * + * Struct leaf_entry's are packed from the top. Name/values grow from the + * bottom but are not packed. The freemap contains run-length-encoded entries + * for the free bytes after the leaf_entry's, but only the N largest such, + * smaller runs are dropped. When the freemap doesn't show enough space + * for an allocation, we compact the name/value area and try again. If we + * still don't have enough space, then we have to split the block. The + * name/value structs (both local and remote versions) must be 32bit aligned. + * + * Since we have duplicate hash keys, for each key that matches, compare + * the actual name string. The root and intermediate node search always + * takes the first-in-the-block key match found, so we should only have + * to work "forw"ard. If none matches, continue with the "forw"ard leaf + * nodes until the hash key changes or the attribute name is found. + * + * We store the fact that an attribute is a ROOT versus USER attribute in + * the leaf_entry. The namespaces are independent only because we also look + * at the root/user bit when we are looking for a matching attribute name. + * + * We also store a "incomplete" bit in the leaf_entry. It shows that an + * attribute is in the middle of being created and should not be shown to + * the user if we crash during the time that the bit is set. We clear the + * bit when we have finished setting up the attribute. We do this because + * we cannot create some large attributes inside a single transaction, and we + * need some indication that we weren't finished if we crash in the middle. + */ +#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */ + +typedef struct xfs_attr_leafblock { + struct xfs_attr_leaf_hdr { /* constant-structure header block */ + xfs_da_blkinfo_t info; /* block type, links, etc. */ + __uint16_t count; /* count of active leaf_entry's */ + __uint16_t usedbytes; /* num bytes of names/values stored */ + __uint16_t firstused; /* first used byte in name area */ + __uint8_t holes; /* != 0 if blk needs compaction */ + __uint8_t pad1; + struct xfs_attr_leaf_map { /* RLE map of free bytes */ + __uint16_t base; /* base of free region */ + __uint16_t size; /* length of free region */ + } freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */ + } hdr; + struct xfs_attr_leaf_entry { /* sorted on key, not name */ + xfs_dahash_t hashval; /* hash value of name */ + __uint16_t nameidx; /* index into buffer of name/value */ + __uint8_t flags; /* LOCAL, ROOT and INCOMPLETE flags */ + __uint8_t pad2; /* unused pad byte */ + } entries[1]; /* variable sized array */ + struct xfs_attr_leaf_name_local { + __uint16_t valuelen; /* number of bytes in value */ + __uint8_t namelen; /* length of name bytes */ + __uint8_t nameval[1]; /* name/value bytes */ + } namelist; /* grows from bottom of buf */ + struct xfs_attr_leaf_name_remote { + xfs_dablk_t valueblk; /* block number of value bytes */ + __uint32_t valuelen; /* number of bytes in value */ + __uint8_t namelen; /* length of name bytes */ + __uint8_t name[1]; /* name bytes */ + } valuelist; /* grows from bottom of buf */ +} xfs_attr_leafblock_t; +typedef struct xfs_attr_leaf_hdr xfs_attr_leaf_hdr_t; +typedef struct xfs_attr_leaf_map xfs_attr_leaf_map_t; +typedef struct xfs_attr_leaf_entry xfs_attr_leaf_entry_t; +typedef struct xfs_attr_leaf_name_local xfs_attr_leaf_name_local_t; +typedef struct xfs_attr_leaf_name_remote xfs_attr_leaf_name_remote_t; + +/* + * Flags used in the leaf_entry[i].flags field. + * NOTE: the INCOMPLETE bit must not collide with the flags bits specified + * on the system call, they are "or"ed together for various operations. + */ +#define XFS_ATTR_LOCAL_BIT 0 /* attr is stored locally */ +#define XFS_ATTR_ROOT_BIT 1 /* limit access to attr to userid 0 */ +#define XFS_ATTR_INCOMPLETE_BIT 7 /* attr in middle of create/delete */ +#define XFS_ATTR_LOCAL (1 << XFS_ATTR_LOCAL_BIT) +#define XFS_ATTR_ROOT (1 << XFS_ATTR_ROOT_BIT) +#define XFS_ATTR_INCOMPLETE (1 << XFS_ATTR_INCOMPLETE_BIT) + +/* + * Alignment for namelist and valuelist entries (since they are mixed + * there can be only one alignment value) + */ +#define XFS_ATTR_LEAF_NAME_ALIGN ((uint)sizeof(xfs_dablk_t)) + +/* + * Cast typed pointers for "local" and "remote" name/value structs. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_NAME_REMOTE) +xfs_attr_leaf_name_remote_t * +xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx); +#define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx) \ + xfs_attr_leaf_name_remote(leafp,idx) +#else +#define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx) /* remote name struct ptr */ \ + ((xfs_attr_leaf_name_remote_t *) \ + &((char *)(leafp))[ INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT) ]) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_NAME_LOCAL) +xfs_attr_leaf_name_local_t * +xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx); +#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \ + xfs_attr_leaf_name_local(leafp,idx) +#else +#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) /* local name struct ptr */ \ + ((xfs_attr_leaf_name_local_t *) \ + &((char *)(leafp))[ INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT) ]) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_NAME) +char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx); +#define XFS_ATTR_LEAF_NAME(leafp,idx) xfs_attr_leaf_name(leafp,idx) +#else +#define XFS_ATTR_LEAF_NAME(leafp,idx) /* generic name struct ptr */ \ + (&((char *)(leafp))[ INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT) ]) +#endif + +/* + * Calculate total bytes used (including trailing pad for alignment) for + * a "local" name/value structure, a "remote" name/value structure, and + * a pointer which might be either. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_ENTSIZE_REMOTE) +int xfs_attr_leaf_entsize_remote(int nlen); +#define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen) \ + xfs_attr_leaf_entsize_remote(nlen) +#else +#define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen) /* space for remote struct */ \ + (((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \ + XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_ENTSIZE_LOCAL) +int xfs_attr_leaf_entsize_local(int nlen, int vlen); +#define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen) \ + xfs_attr_leaf_entsize_local(nlen,vlen) +#else +#define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen) /* space for local struct */ \ + (((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) + \ + XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX) +int xfs_attr_leaf_entsize_local_max(int bsize); +#define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize) \ + xfs_attr_leaf_entsize_local_max(bsize) +#else +#define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize) /* max local struct size */ \ + (((bsize) >> 1) + ((bsize) >> 2)) +#endif + + +/*======================================================================== + * Structure used to pass context around among the routines. + *========================================================================*/ + +typedef struct xfs_attr_list_context { + struct xfs_inode *dp; /* inode */ + struct attrlist_cursor_kern *cursor;/* position in list */ + struct attrlist *alist; /* output buffer */ + int count; /* num used entries */ + int dupcnt; /* count dup hashvals seen */ + int bufsize;/* total buffer size */ + int firstu; /* first used byte in buffer */ + int flags; /* from VOP call */ + int resynch;/* T/F: resynch with cursor */ +} xfs_attr_list_context_t; + +/* + * Used to keep a list of "remote value" extents when unlinking an inode. + */ +typedef struct xfs_attr_inactive_list { + xfs_dablk_t valueblk; /* block number of value bytes */ + int valuelen; /* number of bytes in value */ +} xfs_attr_inactive_list_t; + + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Internal routines when dirsize < XFS_LITINO(mp). + */ +int xfs_attr_shortform_create(struct xfs_da_args *args); +int xfs_attr_shortform_add(struct xfs_da_args *add); +int xfs_attr_shortform_lookup(struct xfs_da_args *args); +int xfs_attr_shortform_getvalue(struct xfs_da_args *args); +int xfs_attr_shortform_to_leaf(struct xfs_da_args *args); +int xfs_attr_shortform_remove(struct xfs_da_args *remove); +int xfs_attr_shortform_list(struct xfs_attr_list_context *context); +int xfs_attr_shortform_replace(struct xfs_da_args *args); +int xfs_attr_shortform_allfit(struct xfs_dabuf *bp, struct xfs_inode *dp); + +/* + * Internal routines when dirsize == XFS_LBSIZE(mp). + */ +int xfs_attr_leaf_to_node(struct xfs_da_args *args); +int xfs_attr_leaf_to_shortform(struct xfs_dabuf *bp, + struct xfs_da_args *args); +int xfs_attr_leaf_clearflag(struct xfs_da_args *args); +int xfs_attr_leaf_setflag(struct xfs_da_args *args); +int xfs_attr_leaf_flipflags(xfs_da_args_t *args); + +/* + * Routines used for growing the Btree. + */ +int xfs_attr_leaf_create(struct xfs_da_args *args, xfs_dablk_t which_block, + struct xfs_dabuf **bpp); +int xfs_attr_leaf_split(struct xfs_da_state *state, + struct xfs_da_state_blk *oldblk, + struct xfs_da_state_blk *newblk); +int xfs_attr_leaf_lookup_int(struct xfs_dabuf *leaf, + struct xfs_da_args *args); +int xfs_attr_leaf_getvalue(struct xfs_dabuf *bp, struct xfs_da_args *args); +int xfs_attr_leaf_add(struct xfs_dabuf *leaf_buffer, + struct xfs_da_args *args); +int xfs_attr_leaf_remove(struct xfs_dabuf *leaf_buffer, + struct xfs_da_args *args); +int xfs_attr_leaf_list_int(struct xfs_dabuf *bp, + struct xfs_attr_list_context *context); + +/* + * Routines used for shrinking the Btree. + */ +int xfs_attr_leaf_toosmall(struct xfs_da_state *state, int *retval); +void xfs_attr_leaf_unbalance(struct xfs_da_state *state, + struct xfs_da_state_blk *drop_blk, + struct xfs_da_state_blk *save_blk); +int xfs_attr_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp); +int xfs_attr_node_inactive(struct xfs_trans **trans, struct xfs_inode *dp, + struct xfs_dabuf *bp, int level); +int xfs_attr_leaf_inactive(struct xfs_trans **trans, struct xfs_inode *dp, + struct xfs_dabuf *bp); +int xfs_attr_leaf_freextent(struct xfs_trans **trans, struct xfs_inode *dp, + xfs_dablk_t blkno, int blkcnt); + +/* + * Utility routines. + */ +xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_dabuf *bp, int *count); +int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp, + struct xfs_dabuf *leaf2_bp); +int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int blocksize, + int *local); +int xfs_attr_leaf_entsize(struct xfs_attr_leafblock *leaf, int index); +int xfs_attr_put_listent(struct xfs_attr_list_context *context, + int ns, char *name, int namelen, int valuelen); +int xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp); + +#endif /* __XFS_ATTR_LEAF_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_attr_sf.h linux.21rc5-ac2/fs/xfs/xfs_attr_sf.h --- linux.21rc5/fs/xfs/xfs_attr_sf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_attr_sf.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ATTR_SF_H__ +#define __XFS_ATTR_SF_H__ + +/* + * Attribute storage when stored inside the inode. + * + * Small attribute lists are packed as tightly as possible so as + * to fit into the literal area of the inode. + */ + +struct xfs_inode; + +/* + * Entries are packed toward the top as tight as possible. + */ +typedef struct xfs_attr_shortform { + struct xfs_attr_sf_hdr { /* constant-structure header block */ + __uint16_t totsize; /* total bytes in shortform list */ + __uint8_t count; /* count of active entries */ + } hdr; + struct xfs_attr_sf_entry { + __uint8_t namelen; /* actual length of name (no NULL) */ + __uint8_t valuelen; /* actual length of value (no NULL) */ + __uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */ + __uint8_t nameval[1]; /* name & value bytes concatenated */ + } list[1]; /* variable sized array */ +} xfs_attr_shortform_t; +typedef struct xfs_attr_sf_hdr xfs_attr_sf_hdr_t; +typedef struct xfs_attr_sf_entry xfs_attr_sf_entry_t; + +/* + * We generate this then sort it, attr_list() must return things in hash-order. + */ +typedef struct xfs_attr_sf_sort { + __uint8_t entno; /* entry number in original list */ + __uint8_t namelen; /* length of name value (no null) */ + __uint8_t valuelen; /* length of value */ + __uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */ + xfs_dahash_t hash; /* this entry's hash value */ + char *name; /* name value, pointer into buffer */ +} xfs_attr_sf_sort_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_ENTSIZE_BYNAME) +int xfs_attr_sf_entsize_byname(int nlen, int vlen); +#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) \ + xfs_attr_sf_entsize_byname(nlen,vlen) +#else +#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) /* space name/value uses */ \ + ((int)sizeof(xfs_attr_sf_entry_t)-1 + (nlen)+(vlen)) +#endif +#define XFS_ATTR_SF_ENTSIZE_MAX /* max space for name&value */ \ + ((1 << (NBBY*(int)sizeof(__uint8_t))) - 1) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_ENTSIZE) +int xfs_attr_sf_entsize(xfs_attr_sf_entry_t *sfep); +#define XFS_ATTR_SF_ENTSIZE(sfep) xfs_attr_sf_entsize(sfep) +#else +#define XFS_ATTR_SF_ENTSIZE(sfep) /* space an entry uses */ \ + ((int)sizeof(xfs_attr_sf_entry_t)-1 + (sfep)->namelen+(sfep)->valuelen) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_NEXTENTRY) +xfs_attr_sf_entry_t *xfs_attr_sf_nextentry(xfs_attr_sf_entry_t *sfep); +#define XFS_ATTR_SF_NEXTENTRY(sfep) xfs_attr_sf_nextentry(sfep) +#else +#define XFS_ATTR_SF_NEXTENTRY(sfep) /* next entry in struct */ \ + ((xfs_attr_sf_entry_t *) \ + ((char *)(sfep) + XFS_ATTR_SF_ENTSIZE(sfep))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_TOTSIZE) +int xfs_attr_sf_totsize(struct xfs_inode *dp); +#define XFS_ATTR_SF_TOTSIZE(dp) xfs_attr_sf_totsize(dp) +#else +#define XFS_ATTR_SF_TOTSIZE(dp) /* total space in use */ \ + (INT_GET(((xfs_attr_shortform_t *)((dp)->i_afp->if_u1.if_data))->hdr.totsize, ARCH_CONVERT)) +#endif + +#ifdef XFS_ALL_TRACE +#define XFS_ATTR_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_ATTR_TRACE +#endif + +/* + * Kernel tracing support for attribute lists + */ +struct xfs_attr_list_context; +struct xfs_da_intnode; +struct xfs_da_node_entry; +struct xfs_attr_leafblock; + +#define XFS_ATTR_TRACE_SIZE 4096 /* size of global trace buffer */ + +/* + * Trace record types. + */ +#define XFS_ATTR_KTRACE_L_C 1 /* context */ +#define XFS_ATTR_KTRACE_L_CN 2 /* context, node */ +#define XFS_ATTR_KTRACE_L_CB 3 /* context, btree */ +#define XFS_ATTR_KTRACE_L_CL 4 /* context, leaf */ + +#if defined(XFS_ATTR_TRACE) + +void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context); +void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, + struct xfs_da_intnode *node); +void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, + struct xfs_da_node_entry *btree); +void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, + struct xfs_attr_leafblock *leaf); +void xfs_attr_trace_enter(int type, char *where, + __psunsigned_t a2, __psunsigned_t a3, + __psunsigned_t a4, __psunsigned_t a5, + __psunsigned_t a6, __psunsigned_t a7, + __psunsigned_t a8, __psunsigned_t a9, + __psunsigned_t a10, __psunsigned_t a11, + __psunsigned_t a12, __psunsigned_t a13, + __psunsigned_t a14, __psunsigned_t a15); +#else +#define xfs_attr_trace_l_c(w,c) +#define xfs_attr_trace_l_cn(w,c,n) +#define xfs_attr_trace_l_cb(w,c,b) +#define xfs_attr_trace_l_cl(w,c,l) +#endif /* XFS_ATTR_TRACE */ + +#endif /* __XFS_ATTR_SF_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_bit.c linux.21rc5-ac2/fs/xfs/xfs_bit.c --- linux.21rc5/fs/xfs/xfs_bit.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_bit.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * XFS bit manipulation routines, used in non-realtime code. + */ + +#include "xfs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" + + +#ifndef HAVE_ARCH_HIGHBIT +/* + * Index of high bit number in byte, -1 for none set, 0..7 otherwise. + */ +const char xfs_highbit[256] = { + -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */ + 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */ + 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */ + 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */ + 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */ + 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */ + 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */ + 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */ + 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */ + 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */ +}; +#endif + +/* + * Count of bits set in byte, 0..8. + */ +static const char xfs_countbit[256] = { + 0, 1, 1, 2, 1, 2, 2, 3, /* 00 .. 07 */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 08 .. 0f */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 10 .. 17 */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 18 .. 1f */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 20 .. 27 */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 28 .. 2f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 30 .. 37 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 38 .. 3f */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 40 .. 47 */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 48 .. 4f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 50 .. 57 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 58 .. 5f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 60 .. 67 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 68 .. 6f */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 70 .. 77 */ + 4, 5, 5, 6, 5, 6, 6, 7, /* 78 .. 7f */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 80 .. 87 */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 88 .. 8f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 90 .. 97 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 98 .. 9f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* a0 .. a7 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* a8 .. af */ + 3, 4, 4, 5, 4, 5, 5, 6, /* b0 .. b7 */ + 4, 5, 5, 6, 5, 6, 6, 7, /* b8 .. bf */ + 2, 3, 3, 4, 3, 4, 4, 5, /* c0 .. c7 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* c8 .. cf */ + 3, 4, 4, 5, 4, 5, 5, 6, /* d0 .. d7 */ + 4, 5, 5, 6, 5, 6, 6, 7, /* d8 .. df */ + 3, 4, 4, 5, 4, 5, 5, 6, /* e0 .. e7 */ + 4, 5, 5, 6, 5, 6, 6, 7, /* e8 .. ef */ + 4, 5, 5, 6, 5, 6, 6, 7, /* f0 .. f7 */ + 5, 6, 6, 7, 6, 7, 7, 8, /* f8 .. ff */ +}; + +/* + * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set. + */ +int inline +xfs_highbit32( + __uint32_t v) +{ +#ifdef HAVE_ARCH_HIGHBIT + return highbit32(v); +#else + int i; + + if (v & 0xffff0000) + if (v & 0xff000000) + i = 24; + else + i = 16; + else if (v & 0x0000ffff) + if (v & 0x0000ff00) + i = 8; + else + i = 0; + else + return -1; + return i + xfs_highbit[(v >> i) & 0xff]; +#endif +} + +/* + * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set. + */ +int +xfs_lowbit64( + __uint64_t v) +{ + int n; + n = ffs((unsigned)v); + if (n == 0) { + n = ffs(v >> 32); + if (n >= 0) + n+=32; + } + return n-1; +} + +/* + * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set. + */ +int +xfs_highbit64( + __uint64_t v) +{ + __uint32_t h = v >> 32; + if (h) + return xfs_highbit32(h) + 32; + return xfs_highbit32((__u32)v); +} + + +/* + * Count the number of bits set in the bitmap starting with bit + * start_bit. Size is the size of the bitmap in words. + * + * Do the counting by mapping a byte value to the number of set + * bits for that value using the xfs_countbit array, i.e. + * xfs_countbit[0] == 0, xfs_countbit[1] == 1, xfs_countbit[2] == 1, + * xfs_countbit[3] == 2, etc. + */ +int +xfs_count_bits(uint *map, uint size, uint start_bit) +{ + register int bits; + register unsigned char *bytep; + register unsigned char *end_map; + int byte_bit; + + bits = 0; + end_map = (char*)(map + size); + bytep = (char*)(map + (start_bit & ~0x7)); + byte_bit = start_bit & 0x7; + + /* + * If the caller fell off the end of the map, return 0. + */ + if (bytep >= end_map) { + return (0); + } + + /* + * If start_bit is not byte aligned, then process the + * first byte separately. + */ + if (byte_bit != 0) { + /* + * Shift off the bits we don't want to look at, + * before indexing into xfs_countbit. + */ + bits += xfs_countbit[(*bytep >> byte_bit)]; + bytep++; + } + + /* + * Count the bits in each byte until the end of the bitmap. + */ + while (bytep < end_map) { + bits += xfs_countbit[*bytep]; + bytep++; + } + + return (bits); +} + +/* + * Count the number of contiguous bits set in the bitmap starting with bit + * start_bit. Size is the size of the bitmap in words. + */ +int +xfs_contig_bits(uint *map, uint size, uint start_bit) +{ +#if BITS_PER_LONG == 32 + return find_next_zero_bit(map,size*sizeof(uint)*8,start_bit) - start_bit; +#else + /* + * The first argument to find_next_zero_bit needs to be aligned, + * but this is coming from the xfs_buf_log_format_t on-disk + * struct, which can't be padded or otherwise modified w/o breaking + * on-disk compatibility... so create a temporary, aligned + * variable, copy over the bitmap, and send that to find_next_zero_bit + * This only happens in recovery, so it's ugly but not too bad. + */ + void * addr; + int bit; + size_t bitmap_size = size * sizeof(uint); + + addr = (void *)kmem_alloc(bitmap_size, KM_SLEEP); + memcpy(addr, map, size * sizeof(uint)); + + bit = find_next_zero_bit(addr,size*sizeof(uint)*8,start_bit) - start_bit; + + kmem_free(addr, bitmap_size); + + return bit; +#endif +} + +/* + * This takes the bit number to start looking from and + * returns the next set bit from there. It returns -1 + * if there are no more bits set or the start bit is + * beyond the end of the bitmap. + * + * Size is the number of words, not bytes, in the bitmap. + */ +int xfs_next_bit(uint *map, uint size, uint start_bit) +{ + uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT); + uint result = start_bit & ~(NBWORD - 1); + uint tmp; + + size <<= BIT_TO_WORD_SHIFT; + + if (start_bit >= size) + return -1; + size -= result; + start_bit &= (NBWORD - 1); + if (start_bit) { + tmp = *p++; + /* set to zero first offset bits */ + tmp &= (~0U << start_bit); + if (size < NBWORD) + goto found_first; + if (tmp != 0U) + goto found_middle; + size -= NBWORD; + result += NBWORD; + } + while (size >= NBWORD) { + if ((tmp = *p++) != 0U) + goto found_middle; + result += NBWORD; + size -= NBWORD; + } + if (!size) + return -1; + tmp = *p; +found_first: +found_middle: + return result + ffs(tmp) - 1; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_bit.h linux.21rc5-ac2/fs/xfs/xfs_bit.h --- linux.21rc5/fs/xfs/xfs_bit.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_bit.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BIT_H__ +#define __XFS_BIT_H__ + +/* + * XFS bit manipulation routines. + */ + +/* + * masks with n high/low bits set, 32-bit values & 64-bit values + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK32HI) +__uint32_t xfs_mask32hi(int n); +#define XFS_MASK32HI(n) xfs_mask32hi(n) +#else +#define XFS_MASK32HI(n) ((__uint32_t)-1 << (32 - (n))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK64HI) +__uint64_t xfs_mask64hi(int n); +#define XFS_MASK64HI(n) xfs_mask64hi(n) +#else +#define XFS_MASK64HI(n) ((__uint64_t)-1 << (64 - (n))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK32LO) +__uint32_t xfs_mask32lo(int n); +#define XFS_MASK32LO(n) xfs_mask32lo(n) +#else +#define XFS_MASK32LO(n) (((__uint32_t)1 << (n)) - 1) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK64LO) +__uint64_t xfs_mask64lo(int n); +#define XFS_MASK64LO(n) xfs_mask64lo(n) +#else +#define XFS_MASK64LO(n) (((__uint64_t)1 << (n)) - 1) +#endif + +/* Get high bit set out of 32-bit argument, -1 if none set */ +extern int xfs_highbit32(__uint32_t v); + +/* Get low bit set out of 64-bit argument, -1 if none set */ +extern int xfs_lowbit64(__uint64_t v); + +/* Get high bit set out of 64-bit argument, -1 if none set */ +extern int xfs_highbit64(__uint64_t); + +/* Count set bits in map starting with start_bit */ +extern int xfs_count_bits(uint *map, uint size, uint start_bit); + +/* Count continuous one bits in map starting with start_bit */ +extern int xfs_contig_bits(uint *map, uint size, uint start_bit); + +/* Find next set bit in map */ +extern int xfs_next_bit(uint *map, uint size, uint start_bit); + +#endif /* __XFS_BIT_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_bmap_btree.c linux.21rc5-ac2/fs/xfs/xfs_bmap_btree.c --- linux.21rc5/fs/xfs/xfs_bmap_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_bmap_btree.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,2815 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_itable.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_bit.h" +#include "xfs_bmap.h" +#include "xfs_error.h" +#include "xfs_quota.h" + +#ifdef DEBUG +ktrace_t *xfs_bmbt_trace_buf; +#endif + +/* + * Prototypes for internal btree functions. + */ + + +STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *); +STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *, + xfs_bmbt_key_t *, xfs_btree_cur_t **, int *); +STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int); + + +#if defined(XFS_BMBT_TRACE) +/* + * Add a trace buffer entry for the arguments given to the routine, + * generic form. + */ +STATIC void +xfs_bmbt_trace_enter( + char *func, + xfs_btree_cur_t *cur, + char *s, + int type, + int line, + __psunsigned_t a0, + __psunsigned_t a1, + __psunsigned_t a2, + __psunsigned_t a3, + __psunsigned_t a4, + __psunsigned_t a5, + __psunsigned_t a6, + __psunsigned_t a7, + __psunsigned_t a8, + __psunsigned_t a9, + __psunsigned_t a10) +{ + xfs_inode_t *ip; + int whichfork; + + ip = cur->bc_private.b.ip; + whichfork = cur->bc_private.b.whichfork; + ktrace_enter(xfs_bmbt_trace_buf, + (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), + (void *)func, (void *)s, (void *)ip, (void *)cur, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10); + ASSERT(ip->i_btrace); + ktrace_enter(ip->i_btrace, + (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), + (void *)func, (void *)s, (void *)ip, (void *)cur, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10); +} +/* + * Add a trace buffer entry for arguments, for a buffer & 1 integer arg. + */ +STATIC void +xfs_bmbt_trace_argbi( + char *func, + xfs_btree_cur_t *cur, + xfs_buf_t *b, + int i, + int line) +{ + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBI, line, + (__psunsigned_t)b, i, 0, 0, + 0, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for a buffer & 2 integer args. + */ +STATIC void +xfs_bmbt_trace_argbii( + char *func, + xfs_btree_cur_t *cur, + xfs_buf_t *b, + int i0, + int i1, + int line) +{ + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBII, line, + (__psunsigned_t)b, i0, i1, 0, + 0, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for 3 block-length args + * and an integer arg. + */ +STATIC void +xfs_bmbt_trace_argfffi( + char *func, + xfs_btree_cur_t *cur, + xfs_dfiloff_t o, + xfs_dfsbno_t b, + xfs_dfilblks_t i, + int j, + int line) +{ + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGFFFI, line, + o >> 32, (int)o, b >> 32, (int)b, + i >> 32, (int)i, (int)j, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for one integer arg. + */ +STATIC void +xfs_bmbt_trace_argi( + char *func, + xfs_btree_cur_t *cur, + int i, + int line) +{ + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGI, line, + i, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for int, fsblock, key. + */ +STATIC void +xfs_bmbt_trace_argifk( + char *func, + xfs_btree_cur_t *cur, + int i, + xfs_fsblock_t f, + xfs_bmbt_key_t *k, + int line) +{ + xfs_dfsbno_t d; + xfs_dfiloff_t o; + + d = (xfs_dfsbno_t)f; + o = INT_GET(k->br_startoff, ARCH_CONVERT); + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line, + i, d >> 32, (int)d, o >> 32, + (int)o, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for int, fsblock, rec. + */ +STATIC void +xfs_bmbt_trace_argifr( + char *func, + xfs_btree_cur_t *cur, + int i, + xfs_fsblock_t f, + xfs_bmbt_rec_t *r, + int line) +{ + xfs_dfsbno_t b; + xfs_dfilblks_t c; + xfs_dfsbno_t d; + xfs_dfiloff_t o; + xfs_bmbt_irec_t s; + + d = (xfs_dfsbno_t)f; + xfs_bmbt_disk_get_all(r, &s); + o = (xfs_dfiloff_t)s.br_startoff; + b = (xfs_dfsbno_t)s.br_startblock; + c = s.br_blockcount; + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFR, line, + i, d >> 32, (int)d, o >> 32, + (int)o, b >> 32, (int)b, c >> 32, + (int)c, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for int, key. + */ +STATIC void +xfs_bmbt_trace_argik( + char *func, + xfs_btree_cur_t *cur, + int i, + xfs_bmbt_key_t *k, + int line) +{ + xfs_dfiloff_t o; + + o = INT_GET(k->br_startoff, ARCH_CONVERT); + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line, + i, o >> 32, (int)o, 0, + 0, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for the cursor/operation. + */ +STATIC void +xfs_bmbt_trace_cursor( + char *func, + xfs_btree_cur_t *cur, + char *s, + int line) +{ + xfs_bmbt_rec_t r; + + xfs_bmbt_set_all(&r, &cur->bc_rec.b); + xfs_bmbt_trace_enter(func, cur, s, XFS_BMBT_KTRACE_CUR, line, + (cur->bc_nlevels << 24) | (cur->bc_private.b.flags << 16) | + cur->bc_private.b.allocated, + INT_GET(r.l0, ARCH_CONVERT) >> 32, (int)INT_GET(r.l0, ARCH_CONVERT), INT_GET(r.l1, ARCH_CONVERT) >> 32, (int)INT_GET(r.l1, ARCH_CONVERT), + (unsigned long)cur->bc_bufs[0], (unsigned long)cur->bc_bufs[1], + (unsigned long)cur->bc_bufs[2], (unsigned long)cur->bc_bufs[3], + (cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1], + (cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]); +} + +#define XFS_BMBT_TRACE_ARGBI(c,b,i) \ + xfs_bmbt_trace_argbi(fname, c, b, i, __LINE__) +#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \ + xfs_bmbt_trace_argbii(fname, c, b, i, j, __LINE__) +#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \ + xfs_bmbt_trace_argfffi(fname, c, o, b, i, j, __LINE__) +#define XFS_BMBT_TRACE_ARGI(c,i) \ + xfs_bmbt_trace_argi(fname, c, i, __LINE__) +#define XFS_BMBT_TRACE_ARGIFK(c,i,f,k) \ + xfs_bmbt_trace_argifk(fname, c, i, f, k, __LINE__) +#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ + xfs_bmbt_trace_argifr(fname, c, i, f, r, __LINE__) +#define XFS_BMBT_TRACE_ARGIK(c,i,k) \ + xfs_bmbt_trace_argik(fname, c, i, k, __LINE__) +#define XFS_BMBT_TRACE_CURSOR(c,s) \ + xfs_bmbt_trace_cursor(fname, c, s, __LINE__) +static char ARGS[] = "args"; +static char ENTRY[] = "entry"; +static char ERROR[] = "error"; +#undef EXIT +static char EXIT[] = "exit"; +#else +#define XFS_BMBT_TRACE_ARGBI(c,b,i) +#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) +#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) +#define XFS_BMBT_TRACE_ARGI(c,i) +#define XFS_BMBT_TRACE_ARGIFK(c,i,f,k) +#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) +#define XFS_BMBT_TRACE_ARGIK(c,i,k) +#define XFS_BMBT_TRACE_CURSOR(c,s) +#endif /* XFS_BMBT_TRACE */ + + +/* + * Internal functions. + */ + +/* + * Delete record pointed to by cur/level. + */ +STATIC int /* error */ +xfs_bmbt_delrec( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + xfs_bmbt_block_t *block; /* bmap btree block */ + xfs_fsblock_t bno; /* fs-relative block number */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_delrec"; +#endif + int i; /* loop counter */ + int j; /* temp state */ + xfs_bmbt_key_t key; /* bmap btree key */ + xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */ + xfs_fsblock_t lbno; /* left sibling block number */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_bmbt_block_t *left; /* left btree block */ + xfs_bmbt_key_t *lkp; /* left btree key */ + xfs_bmbt_ptr_t *lpp; /* left address pointer */ + int lrecs=0; /* left record count */ + xfs_bmbt_rec_t *lrp; /* left record pointer */ + xfs_mount_t *mp; /* file system mount point */ + xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ + int ptr; /* key/record index */ + xfs_fsblock_t rbno; /* right sibling block number */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_bmbt_block_t *right; /* right btree block */ + xfs_bmbt_key_t *rkp; /* right btree key */ + xfs_bmbt_rec_t *rp; /* pointer to bmap btree rec */ + xfs_bmbt_ptr_t *rpp; /* right address pointer */ + xfs_bmbt_block_t *rrblock; /* right-right btree block */ + xfs_buf_t *rrbp; /* right-right buffer pointer */ + int rrecs=0; /* right record count */ + xfs_bmbt_rec_t *rrp; /* right record pointer */ + xfs_btree_cur_t *tcur; /* temporary btree cursor */ + int numrecs; /* temporary numrec count */ + int numlrecs, numrrecs; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + ptr = cur->bc_ptrs[level]; + tcur = (xfs_btree_cur_t *)0; + if (ptr == 0) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + block = xfs_bmbt_get_block(cur, level, &bp); + numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } +#endif + if (ptr > numrecs) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + XFS_STATS_INC(xfsstats.xs_bmbt_delrec); + if (level > 0) { + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); +#ifdef DEBUG + for (i = ptr; i < numrecs; i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + } +#endif + if (ptr < numrecs) { + memmove(&kp[ptr - 1], &kp[ptr], + (numrecs - ptr) * sizeof(*kp)); + memmove(&pp[ptr - 1], &pp[ptr], /* INT_: direct copy */ + (numrecs - ptr) * sizeof(*pp)); + xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1); + xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1); + } + } else { + rp = XFS_BMAP_REC_IADDR(block, 1, cur); + if (ptr < numrecs) { + memmove(&rp[ptr - 1], &rp[ptr], + (numrecs - ptr) * sizeof(*rp)); + xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1); + } + if (ptr == 1) { + INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(rp)); + kp = &key; + } + } + numrecs--; + INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); + /* + * We're at the root level. + * First, shrink the root block in-memory. + * Try to get rid of the next level down. + * If we can't then there's nothing left to do. + */ + if (level == cur->bc_nlevels - 1) { + xfs_iroot_realloc(cur->bc_private.b.ip, -1, + cur->bc_private.b.whichfork); + if ((error = xfs_bmbt_killroot(cur))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + if (ptr == 1 && (error = xfs_bmbt_updkey(cur, kp, level + 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) { + if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); + /* + * One child of root, need to get a chance to copy its contents + * into the root and delete it. Can't go up to next level, + * there's nothing to delete there. + */ + if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK && + level == cur->bc_nlevels - 2) { + if ((error = xfs_bmbt_killroot(cur))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK); + if ((error = xfs_btree_dup_cursor(cur, &tcur))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + bno = NULLFSBLOCK; + if (rbno != NULLFSBLOCK) { + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_bmbt_increment(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + rbp = tcur->bc_bufs[level]; + right = XFS_BUF_TO_BMBT_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } +#endif + bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_BMAP_BLOCK_IMINRECS(level, cur)) { + if ((error = xfs_bmbt_lshift(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_BMAP_BLOCK_IMINRECS(level, tcur)); + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + tcur = NULL; + if (level > 0) { + if ((error = xfs_bmbt_decrement(cur, + level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, + ERROR); + goto error0; + } + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + } + rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + if (lbno != NULLFSBLOCK) { + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_bmbt_decrement(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + } + if (lbno != NULLFSBLOCK) { + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * decrement to last in block + */ + if ((error = xfs_bmbt_decrement(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + lbp = tcur->bc_bufs[level]; + left = XFS_BUF_TO_BMBT_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } +#endif + bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_BMAP_BLOCK_IMINRECS(level, cur)) { + if ((error = xfs_bmbt_rshift(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_BMAP_BLOCK_IMINRECS(level, tcur)); + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + tcur = NULL; + if (level == 0) + cur->bc_ptrs[0]++; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + } + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + tcur = NULL; + mp = cur->bc_mp; + ASSERT(bno != NULLFSBLOCK); + if (lbno != NULLFSBLOCK && + lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + rbno = bno; + right = block; + rbp = bp; + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + left = XFS_BUF_TO_BMBT_BLOCK(lbp); + if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + } else if (rbno != NULLFSBLOCK && + rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + lbno = bno; + left = block; + lbp = bp; + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + right = XFS_BUF_TO_BMBT_BLOCK(rbp); + if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + } else { + if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + numlrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + numrrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + if (level > 0) { + lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur); + lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur); + rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); + rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < numrrecs; i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + } +#endif + memcpy(lkp, rkp, numrrecs * sizeof(*lkp)); + memcpy(lpp, rpp, numrrecs * sizeof(*lpp)); + xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs); + xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); + } else { + lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur); + rrp = XFS_BMAP_REC_IADDR(right, 1, cur); + memcpy(lrp, rrp, numrrecs * sizeof(*lrp)); + xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); + } + INT_MOD(left->bb_numrecs, ARCH_CONVERT, numrrecs); + left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ + xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS); + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, + INT_GET(left->bb_rightsib, ARCH_CONVERT), + 0, &rrbp, XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp); + if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); + xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); + } + xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1, + cur->bc_private.b.flist, mp); + cur->bc_private.b.ip->i_d.di_nblocks--; + xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE); + XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip, + XFS_TRANS_DQ_BCOUNT, -1L); + xfs_trans_binval(cur->bc_tp, rbp); + if (bp != lbp) { + cur->bc_bufs[level] = lbp; + cur->bc_ptrs[level] += lrecs; + cur->bc_ra[level] = 0; + } else if ((error = xfs_bmbt_increment(cur, level + 1, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (level > 0) + cur->bc_ptrs[level]--; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 2; + return 0; + +error0: + if (tcur) + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} + +#ifdef XFSDEBUG +/* + * Get the data from the pointed-to record. + */ +int +xfs_bmbt_get_rec( + xfs_btree_cur_t *cur, + xfs_fileoff_t *off, + xfs_fsblock_t *bno, + xfs_filblks_t *len, + xfs_exntst_t *state, + int *stat) +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; +#ifdef DEBUG + int error; +#endif + int ptr; + xfs_bmbt_rec_t *rp; + + block = xfs_bmbt_get_block(cur, 0, &bp); + ptr = cur->bc_ptrs[0]; +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) + return error; +#endif + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { + *stat = 0; + return 0; + } + rp = XFS_BMAP_REC_IADDR(block, ptr, cur); + *off = xfs_bmbt_disk_get_startoff(rp); + *bno = xfs_bmbt_disk_get_startblock(rp); + *len = xfs_bmbt_disk_get_blockcount(rp); + *state = xfs_bmbt_disk_get_state(rp); + *stat = 1; + return 0; +} +#endif + +/* + * Insert one record/level. Return information to the caller + * allowing the next level up to proceed if necessary. + */ +STATIC int /* error */ +xfs_bmbt_insrec( + xfs_btree_cur_t *cur, + int level, + xfs_fsblock_t *bnop, + xfs_bmbt_rec_t *recp, + xfs_btree_cur_t **curp, + int *stat) /* no-go/done/continue */ +{ + xfs_bmbt_block_t *block; /* bmap btree block */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_insrec"; +#endif + int i; /* loop index */ + xfs_bmbt_key_t key; /* bmap btree key */ + xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */ + int logflags; /* inode logging flags */ + xfs_fsblock_t nbno; /* new block number */ + struct xfs_btree_cur *ncur; /* new btree cursor */ + xfs_bmbt_key_t nkey; /* new btree key value */ + xfs_bmbt_rec_t nrec; /* new record count */ + int optr; /* old key/record index */ + xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ + int ptr; /* key/record index */ + xfs_bmbt_rec_t *rp=NULL; /* pointer to bmap btree rec */ + int numrecs; + + ASSERT(level < cur->bc_nlevels); + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp); + ncur = (xfs_btree_cur_t *)0; + INT_SET(key.br_startoff, ARCH_CONVERT, + xfs_bmbt_disk_get_startoff(recp)); + optr = ptr = cur->bc_ptrs[level]; + if (ptr == 0) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + XFS_STATS_INC(xfsstats.xs_bmbt_insrec); + block = xfs_bmbt_get_block(cur, level, &bp); + numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (ptr <= numrecs) { + if (level == 0) { + rp = XFS_BMAP_REC_IADDR(block, ptr, cur); + xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp); + } else { + kp = XFS_BMAP_KEY_IADDR(block, ptr, cur); + xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp); + } + } +#endif + nbno = NULLFSBLOCK; + if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) { + /* + * A root block, that can be made bigger. + */ + xfs_iroot_realloc(cur->bc_private.b.ip, 1, + cur->bc_private.b.whichfork); + block = xfs_bmbt_get_block(cur, level, &bp); + } else if (level == cur->bc_nlevels - 1) { + if ((error = xfs_bmbt_newroot(cur, &logflags, stat)) || + *stat == 0) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, + logflags); + block = xfs_bmbt_get_block(cur, level, &bp); + } else { + if ((error = xfs_bmbt_rshift(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (i) { + /* nothing */ + } else { + if ((error = xfs_bmbt_lshift(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (i) { + optr = ptr = cur->bc_ptrs[level]; + } else { + if ((error = xfs_bmbt_split(cur, level, + &nbno, &nkey, &ncur, + &i))) { + XFS_BMBT_TRACE_CURSOR(cur, + ERROR); + return error; + } + if (i) { + block = xfs_bmbt_get_block( + cur, level, &bp); +#ifdef DEBUG + if ((error = + xfs_btree_check_lblock(cur, + block, level, bp))) { + XFS_BMBT_TRACE_CURSOR( + cur, ERROR); + return error; + } +#endif + ptr = cur->bc_ptrs[level]; + xfs_bmbt_disk_set_allf(&nrec, + nkey.br_startoff, 0, 0, + XFS_EXT_NORM); + } else { + XFS_BMBT_TRACE_CURSOR(cur, + EXIT); + *stat = 0; + return 0; + } + } + } + } + } + numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + if (level > 0) { + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); +#ifdef DEBUG + for (i = numrecs; i >= ptr; i--) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), + level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memmove(&kp[ptr], &kp[ptr - 1], + (numrecs - ptr + 1) * sizeof(*kp)); + memmove(&pp[ptr], &pp[ptr - 1], /* INT_: direct copy */ + (numrecs - ptr + 1) * sizeof(*pp)); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)*bnop, + level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + kp[ptr - 1] = key; + INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); + numrecs++; + INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + xfs_bmbt_log_keys(cur, bp, ptr, numrecs); + xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs); + } else { + rp = XFS_BMAP_REC_IADDR(block, 1, cur); + memmove(&rp[ptr], &rp[ptr - 1], + (numrecs - ptr + 1) * sizeof(*rp)); + rp[ptr - 1] = *recp; + numrecs++; + INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + xfs_bmbt_log_recs(cur, bp, ptr, numrecs); + } + xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); +#ifdef DEBUG + if (ptr < numrecs) { + if (level == 0) + xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1, + rp + ptr); + else + xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1, + kp + ptr); + } +#endif + if (optr == 1 && (error = xfs_bmbt_updkey(cur, &key, level + 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + *bnop = nbno; + if (nbno != NULLFSBLOCK) { + *recp = nrec; + *curp = ncur; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + +STATIC int +xfs_bmbt_killroot( + xfs_btree_cur_t *cur) +{ + xfs_bmbt_block_t *block; + xfs_bmbt_block_t *cblock; + xfs_buf_t *cbp; + xfs_bmbt_key_t *ckp; + xfs_bmbt_ptr_t *cpp; +#ifdef DEBUG + int error; +#endif +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_killroot"; +#endif + int i; + xfs_bmbt_key_t *kp; + xfs_inode_t *ip; + xfs_ifork_t *ifp; + int level; + xfs_bmbt_ptr_t *pp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + level = cur->bc_nlevels - 1; + ASSERT(level >= 1); + /* + * Don't deal with the root block needs to be a leaf case. + * We're just going to turn the thing back into extents anyway. + */ + if (level == 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; + } + block = xfs_bmbt_get_block(cur, level, &cbp); + /* + * Give up if the root has multiple children. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) != 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; + } + /* + * Only do this if the next level will fit. + * Then the data must be copied up to the inode, + * instead of freeing the root you free the next level. + */ + cbp = cur->bc_bufs[level - 1]; + cblock = XFS_BUF_TO_BMBT_BLOCK(cbp); + if (INT_GET(cblock->bb_numrecs, ARCH_CONVERT) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; + } + ASSERT(INT_GET(cblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO); + ASSERT(INT_GET(cblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO); + ip = cur->bc_private.b.ip; + ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork); + ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) == + XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes)); + i = (int)(INT_GET(cblock->bb_numrecs, ARCH_CONVERT) - XFS_BMAP_BLOCK_IMAXRECS(level, cur)); + if (i) { + xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork); + block = ifp->if_broot; + } + INT_MOD(block->bb_numrecs, ARCH_CONVERT, i); + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) == INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); + memcpy(kp, ckp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*kp)); + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); + cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(cpp[i], ARCH_CONVERT), level - 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memcpy(pp, cpp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*pp)); + xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1, + cur->bc_private.b.flist, cur->bc_mp); + ip->i_d.di_nblocks--; + XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip, + XFS_TRANS_DQ_BCOUNT, -1L); + xfs_trans_binval(cur->bc_tp, cbp); + cur->bc_bufs[level - 1] = NULL; + INT_MOD(block->bb_level, ARCH_CONVERT, -1); + xfs_trans_log_inode(cur->bc_tp, ip, + XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); + cur->bc_nlevels--; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; +} + +/* + * Log key values from the btree block. + */ +STATIC void +xfs_bmbt_log_keys( + xfs_btree_cur_t *cur, + xfs_buf_t *bp, + int kfirst, + int klast) +{ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_log_keys"; +#endif + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast); + tp = cur->bc_tp; + if (bp) { + xfs_bmbt_block_t *block; + int first; + xfs_bmbt_key_t *kp; + int last; + + block = XFS_BUF_TO_BMBT_BLOCK(bp); + kp = XFS_BMAP_KEY_DADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(tp, bp, first, last); + } else { + xfs_inode_t *ip; + + ip = cur->bc_private.b.ip; + xfs_trans_log_inode(tp, ip, + XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); +} + +/* + * Log pointer values from the btree block. + */ +STATIC void +xfs_bmbt_log_ptrs( + xfs_btree_cur_t *cur, + xfs_buf_t *bp, + int pfirst, + int plast) +{ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_log_ptrs"; +#endif + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast); + tp = cur->bc_tp; + if (bp) { + xfs_bmbt_block_t *block; + int first; + int last; + xfs_bmbt_ptr_t *pp; + + block = XFS_BUF_TO_BMBT_BLOCK(bp); + pp = XFS_BMAP_PTR_DADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(tp, bp, first, last); + } else { + xfs_inode_t *ip; + + ip = cur->bc_private.b.ip; + xfs_trans_log_inode(tp, ip, + XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); +} + +/* + * Lookup the record. The cursor is made to point to it, based on dir. + */ +STATIC int /* error */ +xfs_bmbt_lookup( + xfs_btree_cur_t *cur, + xfs_lookup_t dir, + int *stat) /* success/failure */ +{ + xfs_bmbt_block_t *block=NULL; + xfs_buf_t *bp; + xfs_daddr_t d; + xfs_sfiloff_t diff; + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_lookup"; +#endif + xfs_fsblock_t fsbno=0; + int high; + int i; + int keyno=0; + xfs_bmbt_key_t *kkbase=NULL; + xfs_bmbt_key_t *kkp; + xfs_bmbt_rec_t *krbase=NULL; + xfs_bmbt_rec_t *krp; + int level; + int low; + xfs_mount_t *mp; + xfs_bmbt_ptr_t *pp; + xfs_bmbt_irec_t *rp; + xfs_fileoff_t startoff; + xfs_trans_t *tp; + + XFS_STATS_INC(xfsstats.xs_bmbt_lookup); + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, (int)dir); + tp = cur->bc_tp; + mp = cur->bc_mp; + rp = &cur->bc_rec.b; + for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { + if (level < cur->bc_nlevels - 1) { + d = XFS_FSB_TO_DADDR(mp, fsbno); + bp = cur->bc_bufs[level]; + if (bp && XFS_BUF_ADDR(bp) != d) + bp = (xfs_buf_t *)0; + if (!bp) { + if ((error = xfs_btree_read_bufl(mp, tp, fsbno, + 0, &bp, XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + xfs_btree_setbuf(cur, level, bp); + block = XFS_BUF_TO_BMBT_BLOCK(bp); + if ((error = xfs_btree_check_lblock(cur, block, + level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } else + block = XFS_BUF_TO_BMBT_BLOCK(bp); + } else + block = xfs_bmbt_get_block(cur, level, &bp); + if (diff == 0) + keyno = 1; + else { + if (level > 0) + kkbase = XFS_BMAP_KEY_IADDR(block, 1, cur); + else + krbase = XFS_BMAP_REC_IADDR(block, 1, cur); + low = 1; + if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { + ASSERT(level == 0); + cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + while (low <= high) { + XFS_STATS_INC(xfsstats.xs_bmbt_compare); + keyno = (low + high) >> 1; + if (level > 0) { + kkp = kkbase + keyno - 1; + startoff = INT_GET(kkp->br_startoff, ARCH_CONVERT); + } else { + krp = krbase + keyno - 1; + startoff = xfs_bmbt_disk_get_startoff(krp); + } + diff = (xfs_sfiloff_t) + (startoff - rp->br_startoff); + if (diff < 0) + low = keyno + 1; + else if (diff > 0) + high = keyno - 1; + else + break; + } + } + if (level > 0) { + if (diff > 0 && --keyno < 1) + keyno = 1; + pp = XFS_BMAP_PTR_IADDR(block, keyno, cur); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + fsbno = INT_GET(*pp, ARCH_CONVERT); + cur->bc_ptrs[level] = keyno; + } + } + if (dir != XFS_LOOKUP_LE && diff < 0) { + keyno++; + /* + * If ge search and we went off the end of the block, but it's + * not the last block, we're in the wrong block. + */ + if (dir == XFS_LOOKUP_GE && keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && + INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + cur->bc_ptrs[0] = keyno; + if ((error = xfs_bmbt_increment(cur, 0, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + } + else if (dir == XFS_LOOKUP_LE && diff > 0) + keyno--; + cur->bc_ptrs[0] = keyno; + if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + } else { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); + } + return 0; +} + +/* + * Move 1 record left from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_bmbt_lshift( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_lshift"; +#endif +#ifdef DEBUG + int i; /* loop counter */ +#endif + xfs_bmbt_key_t key; /* bmap btree key */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_bmbt_block_t *left; /* left btree block */ + xfs_bmbt_key_t *lkp=NULL; /* left btree key */ + xfs_bmbt_ptr_t *lpp; /* left address pointer */ + int lrecs; /* left record count */ + xfs_bmbt_rec_t *lrp=NULL; /* left record pointer */ + xfs_mount_t *mp; /* file system mount point */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_bmbt_block_t *right; /* right btree block */ + xfs_bmbt_key_t *rkp=NULL; /* right btree key */ + xfs_bmbt_ptr_t *rpp=NULL; /* right address pointer */ + xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */ + int rrecs; /* right record count */ + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + if (level == cur->bc_nlevels - 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + rbp = cur->bc_bufs[level]; + right = XFS_BUF_TO_BMBT_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + if (cur->bc_ptrs[level] <= 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + mp = cur->bc_mp; + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, + &lbp, XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + left = XFS_BUF_TO_BMBT_BLOCK(lbp); + if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; + if (level > 0) { + lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur); + rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); + *lkp = *rkp; + xfs_bmbt_log_keys(cur, lbp, lrecs, lrecs); + lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur); + rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + *lpp = *rpp; /* INT_: direct copy */ + xfs_bmbt_log_ptrs(cur, lbp, lrecs, lrecs); + } else { + lrp = XFS_BMAP_REC_IADDR(left, lrecs, cur); + rrp = XFS_BMAP_REC_IADDR(right, 1, cur); + *lrp = *rrp; + xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs); + } + INT_SET(left->bb_numrecs, ARCH_CONVERT, lrecs); + xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); +#ifdef DEBUG + if (level > 0) + xfs_btree_check_key(XFS_BTNUM_BMAP, lkp - 1, lkp); + else + xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp); +#endif + rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; + INT_SET(right->bb_numrecs, ARCH_CONVERT, rrecs); + xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS); + if (level > 0) { +#ifdef DEBUG + for (i = 0; i < rrecs; i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), + level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memmove(rkp, rkp + 1, rrecs * sizeof(*rkp)); + memmove(rpp, rpp + 1, rrecs * sizeof(*rpp)); + xfs_bmbt_log_keys(cur, rbp, 1, rrecs); + xfs_bmbt_log_ptrs(cur, rbp, 1, rrecs); + } else { + memmove(rrp, rrp + 1, rrecs * sizeof(*rrp)); + xfs_bmbt_log_recs(cur, rbp, 1, rrecs); + INT_SET(key.br_startoff, ARCH_CONVERT, + xfs_bmbt_disk_get_startoff(rrp)); + rkp = &key; + } + if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + cur->bc_ptrs[level]--; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + +/* + * Move 1 record right from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_bmbt_rshift( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_rshift"; +#endif + int i; /* loop counter */ + xfs_bmbt_key_t key; /* bmap btree key */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_bmbt_block_t *left; /* left btree block */ + xfs_bmbt_key_t *lkp; /* left btree key */ + xfs_bmbt_ptr_t *lpp; /* left address pointer */ + xfs_bmbt_rec_t *lrp; /* left record pointer */ + xfs_mount_t *mp; /* file system mount point */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_bmbt_block_t *right; /* right btree block */ + xfs_bmbt_key_t *rkp; /* right btree key */ + xfs_bmbt_ptr_t *rpp; /* right address pointer */ + xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */ + struct xfs_btree_cur *tcur; /* temporary btree cursor */ + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + if (level == cur->bc_nlevels - 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + lbp = cur->bc_bufs[level]; + left = XFS_BUF_TO_BMBT_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + mp = cur->bc_mp; + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, + &rbp, XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + right = XFS_BUF_TO_BMBT_BLOCK(rbp); + if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + if (level > 0) { + lkp = XFS_BMAP_KEY_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lpp = XFS_BMAP_PTR_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); + rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); +#ifdef DEBUG + for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + *rkp = *lkp; + *rpp = *lpp; /* INT_: direct copy */ + xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + } else { + lrp = XFS_BMAP_REC_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rrp = XFS_BMAP_REC_IADDR(right, 1, cur); + memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + *rrp = *lrp; + xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + INT_SET(key.br_startoff, ARCH_CONVERT, + xfs_bmbt_disk_get_startoff(rrp)); + rkp = &key; + } + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); + xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); +#ifdef DEBUG + if (level > 0) + xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1); + else + xfs_btree_check_rec(XFS_BTNUM_BMAP, rrp, rrp + 1); +#endif + xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS); + if ((error = xfs_btree_dup_cursor(cur, &tcur))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_bmbt_increment(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(tcur, ERROR); + goto error1; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_bmbt_updkey(tcur, rkp, level + 1))) { + XFS_BMBT_TRACE_CURSOR(tcur, ERROR); + goto error1; + } + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +error0: + XFS_BMBT_TRACE_CURSOR(cur, ERROR); +error1: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} + +/* + * Determine the extent state. + */ +/* ARGSUSED */ +STATIC xfs_exntst_t +xfs_extent_state( + xfs_filblks_t blks, + int extent_flag) +{ + if (extent_flag) { + ASSERT(blks != 0); /* saved for DMIG */ + return XFS_EXT_UNWRITTEN; + } + return XFS_EXT_NORM; +} + + +/* + * Split cur/level block in half. + * Return new block number and its first record (to be inserted into parent). + */ +STATIC int /* error */ +xfs_bmbt_split( + xfs_btree_cur_t *cur, + int level, + xfs_fsblock_t *bnop, + xfs_bmbt_key_t *keyp, + xfs_btree_cur_t **curp, + int *stat) /* success/failure */ +{ + xfs_alloc_arg_t args; /* block allocation args */ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_split"; +#endif + int i; /* loop counter */ + xfs_fsblock_t lbno; /* left sibling block number */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_bmbt_block_t *left; /* left btree block */ + xfs_bmbt_key_t *lkp; /* left btree key */ + xfs_bmbt_ptr_t *lpp; /* left address pointer */ + xfs_bmbt_rec_t *lrp; /* left record pointer */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_bmbt_block_t *right; /* right btree block */ + xfs_bmbt_key_t *rkp; /* right btree key */ + xfs_bmbt_ptr_t *rpp; /* right address pointer */ + xfs_bmbt_block_t *rrblock; /* right-right btree block */ + xfs_buf_t *rrbp; /* right-right buffer pointer */ + xfs_bmbt_rec_t *rrp; /* right record pointer */ + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, keyp); + args.tp = cur->bc_tp; + args.mp = cur->bc_mp; + lbp = cur->bc_bufs[level]; + lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp)); + left = XFS_BUF_TO_BMBT_BLOCK(lbp); + args.fsbno = cur->bc_private.b.firstblock; + if (args.fsbno == NULLFSBLOCK) { + args.fsbno = lbno; + args.type = XFS_ALLOCTYPE_START_BNO; + } else if (cur->bc_private.b.flist->xbf_low) + args.type = XFS_ALLOCTYPE_FIRST_AG; + else + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.mod = args.minleft = args.alignment = args.total = args.isfl = + args.userdata = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; + if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return XFS_ERROR(ENOSPC); + } + if ((error = xfs_alloc_vextent(&args))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (args.fsbno == NULLFSBLOCK) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + cur->bc_private.b.firstblock = args.fsbno; + cur->bc_private.b.allocated++; + cur->bc_private.b.ip->i_d.di_nblocks++; + xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE); + XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip, + XFS_TRANS_DQ_BCOUNT, 1L); + rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0); + right = XFS_BUF_TO_BMBT_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, left, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + INT_SET(right->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); + right->bb_level = left->bb_level; /* INT_: direct copy */ + INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); + if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && + cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; + if (level > 0) { + lkp = XFS_BMAP_KEY_IADDR(left, i, cur); + lpp = XFS_BMAP_PTR_IADDR(left, i, cur); + rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); + rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + keyp->br_startoff = INT_GET(rkp->br_startoff, ARCH_CONVERT); + } else { + lrp = XFS_BMAP_REC_IADDR(left, i, cur); + rrp = XFS_BMAP_REC_IADDR(right, 1, cur); + memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + keyp->br_startoff = xfs_bmbt_disk_get_startoff(rrp); + } + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); + right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ + INT_SET(left->bb_rightsib, ARCH_CONVERT, args.fsbno); + INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); + xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS); + xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + if ((error = xfs_btree_read_bufl(args.mp, args.tp, + INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp); + if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.fsbno); + xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); + } + if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { + xfs_btree_setbuf(cur, level, rbp); + cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + if (level + 1 < cur->bc_nlevels) { + if ((error = xfs_btree_dup_cursor(cur, curp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + (*curp)->bc_ptrs[level + 1]++; + } + *bnop = args.fsbno; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + + +/* + * Update keys for the record. + */ +STATIC int +xfs_bmbt_updkey( + xfs_btree_cur_t *cur, + xfs_bmbt_key_t *keyp, /* on-disk format */ + int level) +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; +#ifdef DEBUG + int error; +#endif +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_updkey"; +#endif + xfs_bmbt_key_t *kp; + int ptr; + + ASSERT(level >= 1); + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGIK(cur, level, keyp); + for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { + block = xfs_bmbt_get_block(cur, level, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + ptr = cur->bc_ptrs[level]; + kp = XFS_BMAP_KEY_IADDR(block, ptr, cur); + *kp = *keyp; + xfs_bmbt_log_keys(cur, bp, ptr, ptr); + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; +} + +/* + * Convert on-disk form of btree root to in-memory form. + */ +void +xfs_bmdr_to_bmbt( + xfs_bmdr_block_t *dblock, + int dblocklen, + xfs_bmbt_block_t *rblock, + int rblocklen) +{ + int dmxr; + xfs_bmbt_key_t *fkp; + xfs_bmbt_ptr_t *fpp; + xfs_bmbt_key_t *tkp; + xfs_bmbt_ptr_t *tpp; + + INT_SET(rblock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); + rblock->bb_level = dblock->bb_level; /* both in on-disk format */ + ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0); + rblock->bb_numrecs = dblock->bb_numrecs;/* both in on-disk format */ + INT_SET(rblock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); + INT_SET(rblock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); + dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); + fkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); + tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); + fpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); + tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); + dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT); + memcpy(tkp, fkp, sizeof(*fkp) * dmxr); + memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ +} + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_bmbt_decrement( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_decrement"; +#endif + xfs_fsblock_t fsbno; + int lev; + xfs_mount_t *mp; + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + ASSERT(level < cur->bc_nlevels); + if (level < cur->bc_nlevels - 1) + xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); + if (--cur->bc_ptrs[level] > 0) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + block = xfs_bmbt_get_block(cur, level, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + if (--cur->bc_ptrs[lev] > 0) + break; + if (lev < cur->bc_nlevels - 1) + xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); + } + if (lev == cur->bc_nlevels) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + tp = cur->bc_tp; + mp = cur->bc_mp; + for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) { + fsbno = INT_GET(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_BMBT_BLOCK(bp); + if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + +/* + * Delete the record pointed to by cur. + */ +int /* error */ +xfs_bmbt_delete( + xfs_btree_cur_t *cur, + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_delete"; +#endif + int i; + int level; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + for (level = 0, i = 2; i == 2; level++) { + if ((error = xfs_bmbt_delrec(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } + if (i == 0) { + for (level = 1; level < cur->bc_nlevels; level++) { + if (cur->bc_ptrs[level] == 0) { + if ((error = xfs_bmbt_decrement(cur, level, + &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + break; + } + } + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = i; + return 0; +} + +/* + * Convert a compressed bmap extent record to an uncompressed form. + * This code must be in sync with the routines xfs_bmbt_get_startoff, + * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state. + */ + +STATIC __inline__ void +__xfs_bmbt_get_all( + __uint64_t l0, + __uint64_t l1, + xfs_bmbt_irec_t *s) +{ + int ext_flag; + xfs_exntst_t st; + + ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN)); + s->br_startoff = ((xfs_fileoff_t)l0 & + XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; +#if XFS_BIG_FILESYSTEMS + s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) | + (((xfs_fsblock_t)l1) >> 21); +#else +#ifdef DEBUG + { + xfs_dfsbno_t b; + + b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) | + (((xfs_dfsbno_t)l1) >> 21); + ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); + s->br_startblock = (xfs_fsblock_t)b; + } +#else /* !DEBUG */ + s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21); +#endif /* DEBUG */ +#endif /* XFS_BIG_FILESYSTEMS */ + s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21)); + /* This is xfs_extent_state() in-line */ + if (ext_flag) { + ASSERT(s->br_blockcount != 0); /* saved for DMIG */ + st = XFS_EXT_UNWRITTEN; + } else + st = XFS_EXT_NORM; + s->br_state = st; +} + +void +xfs_bmbt_get_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s) +{ + __xfs_bmbt_get_all(r->l0, r->l1, s); +} + +/* + * Get the block pointer for the given level of the cursor. + * Fill in the buffer pointer, if applicable. + */ +xfs_bmbt_block_t * +xfs_bmbt_get_block( + xfs_btree_cur_t *cur, + int level, + xfs_buf_t **bpp) +{ + xfs_ifork_t *ifp; + xfs_bmbt_block_t *rval; + + if (level < cur->bc_nlevels - 1) { + *bpp = cur->bc_bufs[level]; + rval = XFS_BUF_TO_BMBT_BLOCK(*bpp); + } else { + *bpp = 0; + ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, + cur->bc_private.b.whichfork); + rval = ifp->if_broot; + } + return rval; +} + +/* + * Extract the blockcount field from an in memory bmap extent record. + */ +xfs_filblks_t +xfs_bmbt_get_blockcount( + xfs_bmbt_rec_t *r) +{ + return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21)); +} + +/* + * Extract the startblock field from an in memory bmap extent record. + */ +xfs_fsblock_t +xfs_bmbt_get_startblock( + xfs_bmbt_rec_t *r) +{ +#if XFS_BIG_FILESYSTEMS + return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) | + (((xfs_fsblock_t)r->l1) >> 21); +#else +#ifdef DEBUG + xfs_dfsbno_t b; + + b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) | + (((xfs_dfsbno_t)r->l1) >> 21); + ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); + return (xfs_fsblock_t)b; +#else /* !DEBUG */ + return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21); +#endif /* DEBUG */ +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Extract the startoff field from an in memory bmap extent record. + */ +xfs_fileoff_t +xfs_bmbt_get_startoff( + xfs_bmbt_rec_t *r) +{ + return ((xfs_fileoff_t)r->l0 & + XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; +} + +xfs_exntst_t +xfs_bmbt_get_state( + xfs_bmbt_rec_t *r) +{ + int ext_flag; + + ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN)); + return xfs_extent_state(xfs_bmbt_get_blockcount(r), + ext_flag); +} + +#if ARCH_CONVERT != ARCH_NOCONVERT +/* Endian flipping versions of the bmbt extraction functions */ +void +xfs_bmbt_disk_get_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s) +{ + __uint64_t l0, l1; + + l0 = INT_GET(r->l0, ARCH_CONVERT); + l1 = INT_GET(r->l1, ARCH_CONVERT); + + __xfs_bmbt_get_all(l0, l1, s); +} + +/* + * Extract the blockcount field from an on disk bmap extent record. + */ +xfs_filblks_t +xfs_bmbt_disk_get_blockcount( + xfs_bmbt_rec_t *r) +{ + return (xfs_filblks_t)(INT_GET(r->l1, ARCH_CONVERT) & XFS_MASK64LO(21)); +} + +/* + * Extract the startblock field from an on disk bmap extent record. + */ +xfs_fsblock_t +xfs_bmbt_disk_get_startblock( + xfs_bmbt_rec_t *r) +{ +#if XFS_BIG_FILESYSTEMS + return (((xfs_fsblock_t)INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(9)) << 43) | + (((xfs_fsblock_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21); +#else +#ifdef DEBUG + xfs_dfsbno_t b; + + b = (((xfs_dfsbno_t)INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(9)) << 43) | + (((xfs_dfsbno_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21); + ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); + return (xfs_fsblock_t)b; +#else /* !DEBUG */ + return (xfs_fsblock_t)(((xfs_dfsbno_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21); +#endif /* DEBUG */ +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Extract the startoff field from a disk format bmap extent record. + */ +xfs_fileoff_t +xfs_bmbt_disk_get_startoff( + xfs_bmbt_rec_t *r) +{ + return ((xfs_fileoff_t)INT_GET(r->l0, ARCH_CONVERT) & + XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; +} + +xfs_exntst_t +xfs_bmbt_disk_get_state( + xfs_bmbt_rec_t *r) +{ + int ext_flag; + + ext_flag = (int)((INT_GET(r->l0, ARCH_CONVERT)) >> (64 - BMBT_EXNTFLAG_BITLEN)); + return xfs_extent_state(xfs_bmbt_disk_get_blockcount(r), + ext_flag); +} +#endif + + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_bmbt_increment( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_increment"; +#endif + xfs_fsblock_t fsbno; + int lev; + xfs_mount_t *mp; + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + ASSERT(level < cur->bc_nlevels); + if (level < cur->bc_nlevels - 1) + xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); + block = xfs_bmbt_get_block(cur, level, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + block = xfs_bmbt_get_block(cur, lev, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) + break; + if (lev < cur->bc_nlevels - 1) + xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); + } + if (lev == cur->bc_nlevels) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + tp = cur->bc_tp; + mp = cur->bc_mp; + for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) { + fsbno = INT_GET(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_BMBT_BLOCK(bp); + if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + cur->bc_ptrs[lev] = 1; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + +/* + * Insert the current record at the point referenced by cur. + */ +int /* error */ +xfs_bmbt_insert( + xfs_btree_cur_t *cur, + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_insert"; +#endif + int i; + int level; + xfs_fsblock_t nbno; + xfs_btree_cur_t *ncur; + xfs_bmbt_rec_t nrec; + xfs_btree_cur_t *pcur; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + level = 0; + nbno = NULLFSBLOCK; + xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b); + ncur = (xfs_btree_cur_t *)0; + pcur = cur; + do { + if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur, + &i))) { + if (pcur != cur) + xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) { + cur->bc_nlevels = pcur->bc_nlevels; + cur->bc_private.b.allocated += + pcur->bc_private.b.allocated; + pcur->bc_private.b.allocated = 0; + ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) || + (cur->bc_private.b.ip->i_d.di_flags & + XFS_DIFLAG_REALTIME)); + cur->bc_private.b.firstblock = + pcur->bc_private.b.firstblock; + ASSERT(cur->bc_private.b.flist == + pcur->bc_private.b.flist); + xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); + } + if (ncur) { + pcur = ncur; + ncur = (xfs_btree_cur_t *)0; + } + } while (nbno != NULLFSBLOCK); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = i; + return 0; +error0: + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; +} + +/* + * Log fields from the btree block header. + */ +void +xfs_bmbt_log_block( + xfs_btree_cur_t *cur, + xfs_buf_t *bp, + int fields) +{ + int first; +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_log_block"; +#endif + int last; + xfs_trans_t *tp; + static const short offsets[] = { + offsetof(xfs_bmbt_block_t, bb_magic), + offsetof(xfs_bmbt_block_t, bb_level), + offsetof(xfs_bmbt_block_t, bb_numrecs), + offsetof(xfs_bmbt_block_t, bb_leftsib), + offsetof(xfs_bmbt_block_t, bb_rightsib), + sizeof(xfs_bmbt_block_t) + }; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGBI(cur, bp, fields); + tp = cur->bc_tp; + if (bp) { + xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, + &last); + xfs_trans_log_buf(tp, bp, first, last); + } else + xfs_trans_log_inode(tp, cur->bc_private.b.ip, + XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); +} + +/* + * Log record values from the btree block. + */ +void +xfs_bmbt_log_recs( + xfs_btree_cur_t *cur, + xfs_buf_t *bp, + int rfirst, + int rlast) +{ + xfs_bmbt_block_t *block; + int first; +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_log_recs"; +#endif + int last; + xfs_bmbt_rec_t *rp; + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast); + ASSERT(bp); + tp = cur->bc_tp; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + rp = XFS_BMAP_REC_DADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(tp, bp, first, last); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); +} + +int /* error */ +xfs_bmbt_lookup_eq( + xfs_btree_cur_t *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + int *stat) /* success/failure */ +{ + cur->bc_rec.b.br_startoff = off; + cur->bc_rec.b.br_startblock = bno; + cur->bc_rec.b.br_blockcount = len; + return xfs_bmbt_lookup(cur, XFS_LOOKUP_EQ, stat); +} + +int /* error */ +xfs_bmbt_lookup_ge( + xfs_btree_cur_t *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + int *stat) /* success/failure */ +{ + cur->bc_rec.b.br_startoff = off; + cur->bc_rec.b.br_startblock = bno; + cur->bc_rec.b.br_blockcount = len; + return xfs_bmbt_lookup(cur, XFS_LOOKUP_GE, stat); +} + +int /* error */ +xfs_bmbt_lookup_le( + xfs_btree_cur_t *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + int *stat) /* success/failure */ +{ + cur->bc_rec.b.br_startoff = off; + cur->bc_rec.b.br_startblock = bno; + cur->bc_rec.b.br_blockcount = len; + return xfs_bmbt_lookup(cur, XFS_LOOKUP_LE, stat); +} + +/* + * Give the bmap btree a new root block. Copy the old broot contents + * down into a real block and make the broot point to it. + */ +int /* error */ +xfs_bmbt_newroot( + xfs_btree_cur_t *cur, /* btree cursor */ + int *logflags, /* logging flags for inode */ + int *stat) /* return status - 0 fail */ +{ + xfs_alloc_arg_t args; /* allocation arguments */ + xfs_bmbt_block_t *block; /* bmap btree block */ + xfs_buf_t *bp; /* buffer for block */ + xfs_bmbt_block_t *cblock; /* child btree block */ + xfs_bmbt_key_t *ckp; /* child key pointer */ + xfs_bmbt_ptr_t *cpp; /* child ptr pointer */ + int error; /* error return code */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_newroot"; +#endif +#ifdef DEBUG + int i; /* loop counter */ +#endif + xfs_bmbt_key_t *kp; /* pointer to bmap btree key */ + int level; /* btree level */ + xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + level = cur->bc_nlevels - 1; + block = xfs_bmbt_get_block(cur, level, &bp); + /* + * Copy the root into a real block. + */ + args.mp = cur->bc_mp; + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); + args.tp = cur->bc_tp; + args.fsbno = cur->bc_private.b.firstblock; + args.mod = args.minleft = args.alignment = args.total = args.isfl = + args.userdata = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; + if (args.fsbno == NULLFSBLOCK) { +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + args.fsbno = INT_GET(*pp, ARCH_CONVERT); + args.type = XFS_ALLOCTYPE_START_BNO; + } else if (args.wasdel) + args.type = XFS_ALLOCTYPE_FIRST_AG; + else + args.type = XFS_ALLOCTYPE_NEAR_BNO; + if ((error = xfs_alloc_vextent(&args))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (args.fsbno == NULLFSBLOCK) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + cur->bc_private.b.firstblock = args.fsbno; + cur->bc_private.b.allocated++; + cur->bc_private.b.ip->i_d.di_nblocks++; + XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip, + XFS_TRANS_DQ_BCOUNT, 1L); + bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0); + cblock = XFS_BUF_TO_BMBT_BLOCK(bp); + *cblock = *block; + INT_MOD(block->bb_level, ARCH_CONVERT, +1); + INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); + cur->bc_nlevels++; + cur->bc_ptrs[level + 1] = 1; + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); + memcpy(ckp, kp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*kp)); + cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memcpy(cpp, pp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*pp)); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)args.fsbno, + level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + INT_SET(*pp, ARCH_CONVERT, args.fsbno); + xfs_iroot_realloc(cur->bc_private.b.ip, 1 - INT_GET(cblock->bb_numrecs, ARCH_CONVERT), + cur->bc_private.b.whichfork); + xfs_btree_setbuf(cur, level, bp); + /* + * Do all this logging at the end so that + * the root is at the right level. + */ + xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS); + xfs_bmbt_log_keys(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); + xfs_bmbt_log_ptrs(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *logflags |= + XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork); + *stat = 1; + return 0; +} + +/* + * Set all the fields in a bmap extent record from the uncompressed form. + */ +void +xfs_bmbt_set_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s) +{ + int extent_flag; + + ASSERT((s->br_state == XFS_EXT_NORM) || + (s->br_state == XFS_EXT_UNWRITTEN)); + extent_flag = (s->br_state == XFS_EXT_NORM) ? 0 : 1; + ASSERT((s->br_startoff & XFS_MASK64HI(9)) == 0); + ASSERT((s->br_blockcount & XFS_MASK64HI(43)) == 0); +#if XFS_BIG_FILESYSTEMS + ASSERT((s->br_startblock & XFS_MASK64HI(12)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | + ((xfs_bmbt_rec_base_t)s->br_startblock >> 43); + r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(s->br_startblock)) { + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | + (xfs_bmbt_rec_base_t)XFS_MASK64LO(9); + r->l1 = XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } else { + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9); + r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Set all the fields in a bmap extent record from the arguments. + */ +void +xfs_bmbt_set_allf( + xfs_bmbt_rec_t *r, + xfs_fileoff_t o, + xfs_fsblock_t b, + xfs_filblks_t c, + xfs_exntst_t v) +{ + int extent_flag; + + ASSERT((v == XFS_EXT_NORM) || (v == XFS_EXT_UNWRITTEN)); + extent_flag = (v == XFS_EXT_NORM) ? 0 : 1; + ASSERT((o & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0); + ASSERT((c & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); +#if XFS_BIG_FILESYSTEMS + ASSERT((b & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9) | + ((xfs_bmbt_rec_base_t)b >> 43); + r->l1 = ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(b)) { + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9) | + (xfs_bmbt_rec_base_t)XFS_MASK64LO(9); + r->l1 = XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } else { + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9); + r->l1 = ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} + +#if ARCH_CONVERT != ARCH_NOCONVERT +/* + * Set all the fields in a bmap extent record from the uncompressed form. + */ +void +xfs_bmbt_disk_set_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s) +{ + int extent_flag; + + ASSERT((s->br_state == XFS_EXT_NORM) || + (s->br_state == XFS_EXT_UNWRITTEN)); + extent_flag = (s->br_state == XFS_EXT_NORM) ? 0 : 1; + ASSERT((s->br_startoff & XFS_MASK64HI(9)) == 0); + ASSERT((s->br_blockcount & XFS_MASK64HI(43)) == 0); +#if XFS_BIG_FILESYSTEMS + ASSERT((s->br_startblock & XFS_MASK64HI(12)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | + ((xfs_bmbt_rec_base_t)s->br_startblock >> 43)); + INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(s->br_startblock)) { + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | + (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); + INT_SET(r->l1, ARCH_CONVERT, XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); + } else { + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9)); + INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Set all the fields in a disk format bmap extent record from the arguments. + */ +void +xfs_bmbt_disk_set_allf( + xfs_bmbt_rec_t *r, + xfs_fileoff_t o, + xfs_fsblock_t b, + xfs_filblks_t c, + xfs_exntst_t v) +{ + int extent_flag; + + ASSERT((v == XFS_EXT_NORM) || (v == XFS_EXT_UNWRITTEN)); + extent_flag = (v == XFS_EXT_NORM) ? 0 : 1; + ASSERT((o & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0); + ASSERT((c & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); +#if XFS_BIG_FILESYSTEMS + ASSERT((b & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9) | + ((xfs_bmbt_rec_base_t)b >> 43)); + INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(b)) { + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9) | + (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); + INT_SET(r->l1, ARCH_CONVERT, XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); + } else { + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9)); + INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} +#endif + +/* + * Set the blockcount field in a bmap extent record. + */ +void +xfs_bmbt_set_blockcount( + xfs_bmbt_rec_t *r, + xfs_filblks_t v) +{ + ASSERT((v & XFS_MASK64HI(43)) == 0); + r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) | + (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21)); +} + +/* + * Set the startblock field in a bmap extent record. + */ +void +xfs_bmbt_set_startblock( + xfs_bmbt_rec_t *r, + xfs_fsblock_t v) +{ +#if XFS_BIG_FILESYSTEMS + ASSERT((v & XFS_MASK64HI(12)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) | + (xfs_bmbt_rec_base_t)(v >> 43); + r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) | + (xfs_bmbt_rec_base_t)(v << 21); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(v)) { + r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9); + r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)v << 21) | + (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } else { + r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9); + r->l1 = ((xfs_bmbt_rec_base_t)v << 21) | + (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Set the startoff field in a bmap extent record. + */ +void +xfs_bmbt_set_startoff( + xfs_bmbt_rec_t *r, + xfs_fileoff_t v) +{ + ASSERT((v & XFS_MASK64HI(9)) == 0); + r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) | + ((xfs_bmbt_rec_base_t)v << 9) | + (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); +} + +/* + * Set the extent state field in a bmap extent record. + */ +void +xfs_bmbt_set_state( + xfs_bmbt_rec_t *r, + xfs_exntst_t v) +{ + ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN); + if (v == XFS_EXT_NORM) + r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN); + else + r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN); +} + +/* + * Convert in-memory form of btree root to on-disk form. + */ +void +xfs_bmbt_to_bmdr( + xfs_bmbt_block_t *rblock, + int rblocklen, + xfs_bmdr_block_t *dblock, + int dblocklen) +{ + int dmxr; + xfs_bmbt_key_t *fkp; + xfs_bmbt_ptr_t *fpp; + xfs_bmbt_key_t *tkp; + xfs_bmbt_ptr_t *tpp; + + ASSERT(INT_GET(rblock->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC); + ASSERT(INT_GET(rblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO); + ASSERT(INT_GET(rblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO); + ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0); + dblock->bb_level = rblock->bb_level; /* both in on-disk format */ + dblock->bb_numrecs = rblock->bb_numrecs;/* both in on-disk format */ + dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); + fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); + tkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); + fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); + tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); + dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT); + memcpy(tkp, fkp, sizeof(*fkp) * dmxr); + memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ +} + +/* + * Update the record to the passed values. + */ +int +xfs_bmbt_update( + xfs_btree_cur_t *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + xfs_exntst_t state) +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; + int error; +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_update"; +#endif + xfs_bmbt_key_t key; + int ptr; + xfs_bmbt_rec_t *rp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGFFFI(cur, (xfs_dfiloff_t)off, (xfs_dfsbno_t)bno, + (xfs_dfilblks_t)len, (int)state); + block = xfs_bmbt_get_block(cur, 0, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + ptr = cur->bc_ptrs[0]; + rp = XFS_BMAP_REC_IADDR(block, ptr, cur); + xfs_bmbt_disk_set_allf(rp, off, bno, len, state); + xfs_bmbt_log_recs(cur, bp, ptr, ptr); + if (ptr > 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; + } + INT_SET(key.br_startoff, ARCH_CONVERT, off); + if ((error = xfs_bmbt_updkey(cur, &key, 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; +} + +/* + * Check an extent list, which has just been read, for + * any bit in the extent flag field. ASSERT on debug + * kernels, as this condition should not occur. + * Return an error condition (1) if any flags found, + * otherwise return 0. + */ + +int +xfs_check_nostate_extents( + xfs_bmbt_rec_t *ep, + xfs_extnum_t num) +{ + for (; num > 0; num--, ep++) { + if ((ep->l0 >> + (64 - BMBT_EXNTFLAG_BITLEN)) != 0) { + ASSERT(0); + return 1; + } + } + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_bmap_btree.h linux.21rc5-ac2/fs/xfs/xfs_bmap_btree.h --- linux.21rc5/fs/xfs/xfs_bmap_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_bmap_btree.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,713 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BMAP_BTREE_H__ +#define __XFS_BMAP_BTREE_H__ + +#define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */ + +struct xfs_btree_cur; +struct xfs_btree_lblock; +struct xfs_mount; +struct xfs_inode; + +/* + * Bmap root header, on-disk form only. + */ +typedef struct xfs_bmdr_block +{ + __uint16_t bb_level; /* 0 is a leaf */ + __uint16_t bb_numrecs; /* current # of data records */ +} xfs_bmdr_block_t; + +/* + * Bmap btree record and extent descriptor. + * For 32-bit kernels, + * l0:31 is an extent flag (value 1 indicates non-normal). + * l0:0-30 and l1:9-31 are startoff. + * l1:0-8, l2:0-31, and l3:21-31 are startblock. + * l3:0-20 are blockcount. + * For 64-bit kernels, + * l0:63 is an extent flag (value 1 indicates non-normal). + * l0:9-62 are startoff. + * l0:0-8 and l1:21-63 are startblock. + * l1:0-20 are blockcount. + */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN + +#define BMBT_TOTAL_BITLEN 128 /* 128 bits, 16 bytes */ +#define BMBT_EXNTFLAG_BITOFF 0 +#define BMBT_EXNTFLAG_BITLEN 1 +#define BMBT_STARTOFF_BITOFF (BMBT_EXNTFLAG_BITOFF + BMBT_EXNTFLAG_BITLEN) +#define BMBT_STARTOFF_BITLEN 54 +#define BMBT_STARTBLOCK_BITOFF (BMBT_STARTOFF_BITOFF + BMBT_STARTOFF_BITLEN) +#define BMBT_STARTBLOCK_BITLEN 52 +#define BMBT_BLOCKCOUNT_BITOFF \ + (BMBT_STARTBLOCK_BITOFF + BMBT_STARTBLOCK_BITLEN) +#define BMBT_BLOCKCOUNT_BITLEN (BMBT_TOTAL_BITLEN - BMBT_BLOCKCOUNT_BITOFF) + +#else + +#define BMBT_TOTAL_BITLEN 128 /* 128 bits, 16 bytes */ +#define BMBT_EXNTFLAG_BITOFF 63 +#define BMBT_EXNTFLAG_BITLEN 1 +#define BMBT_STARTOFF_BITOFF (BMBT_EXNTFLAG_BITOFF - BMBT_STARTOFF_BITLEN) +#define BMBT_STARTOFF_BITLEN 54 +#define BMBT_STARTBLOCK_BITOFF 85 /* 128 - 43 (other 9 is in first word) */ +#define BMBT_STARTBLOCK_BITLEN 52 +#define BMBT_BLOCKCOUNT_BITOFF 64 /* Start of second 64 bit container */ +#define BMBT_BLOCKCOUNT_BITLEN 21 + +#endif + + +#define BMBT_USE_64 1 + +typedef struct xfs_bmbt_rec_32 +{ + __uint32_t l0, l1, l2, l3; +} xfs_bmbt_rec_32_t; +typedef struct xfs_bmbt_rec_64 +{ + __uint64_t l0, l1; +} xfs_bmbt_rec_64_t; + +typedef __uint64_t xfs_bmbt_rec_base_t; /* use this for casts */ +typedef xfs_bmbt_rec_64_t xfs_bmbt_rec_t, xfs_bmdr_rec_t; + +/* + * Values and macros for delayed-allocation startblock fields. + */ +#define STARTBLOCKVALBITS 17 +#define STARTBLOCKMASKBITS (15 + XFS_BIG_FILESYSTEMS * 20) +#define DSTARTBLOCKMASKBITS (15 + 20) +#define STARTBLOCKMASK \ + (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) +#define DSTARTBLOCKMASK \ + (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_ISNULLSTARTBLOCK) +int isnullstartblock(xfs_fsblock_t x); +#define ISNULLSTARTBLOCK(x) isnullstartblock(x) +#else +#define ISNULLSTARTBLOCK(x) (((x) & STARTBLOCKMASK) == STARTBLOCKMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_ISNULLDSTARTBLOCK) +int isnulldstartblock(xfs_dfsbno_t x); +#define ISNULLDSTARTBLOCK(x) isnulldstartblock(x) +#else +#define ISNULLDSTARTBLOCK(x) (((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_NULLSTARTBLOCK) +xfs_fsblock_t nullstartblock(int k); +#define NULLSTARTBLOCK(k) nullstartblock(k) +#else +#define NULLSTARTBLOCK(k) \ + ((ASSERT(k < (1 << STARTBLOCKVALBITS))), (STARTBLOCKMASK | (k))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_STARTBLOCKVAL) +xfs_filblks_t startblockval(xfs_fsblock_t x); +#define STARTBLOCKVAL(x) startblockval(x) +#else +#define STARTBLOCKVAL(x) ((xfs_filblks_t)((x) & ~STARTBLOCKMASK)) +#endif + +/* + * Possible extent formats. + */ +typedef enum { + XFS_EXTFMT_NOSTATE = 0, + XFS_EXTFMT_HASSTATE +} xfs_exntfmt_t; + +/* + * Possible extent states. + */ +typedef enum { + XFS_EXT_NORM, XFS_EXT_UNWRITTEN, + XFS_EXT_DMAPI_OFFLINE +} xfs_exntst_t; + +/* + * Extent state and extent format macros. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_EXTFMT_INODE ) +xfs_exntfmt_t xfs_extfmt_inode(struct xfs_inode *ip); +#define XFS_EXTFMT_INODE(x) xfs_extfmt_inode(x) +#else +#define XFS_EXTFMT_INODE(x) \ + (XFS_SB_VERSION_HASEXTFLGBIT(&((x)->i_mount->m_sb)) ? \ + XFS_EXTFMT_HASSTATE : XFS_EXTFMT_NOSTATE) +#endif +#define ISUNWRITTEN(x) ((x)->br_state == XFS_EXT_UNWRITTEN) + +/* + * Incore version of above. + */ +typedef struct xfs_bmbt_irec +{ + xfs_fileoff_t br_startoff; /* starting file offset */ + xfs_fsblock_t br_startblock; /* starting block number */ + xfs_filblks_t br_blockcount; /* number of blocks */ + xfs_exntst_t br_state; /* extent state */ +} xfs_bmbt_irec_t; + +/* + * Key structure for non-leaf levels of the tree. + */ +typedef struct xfs_bmbt_key +{ + xfs_dfiloff_t br_startoff; /* starting file offset */ +} xfs_bmbt_key_t, xfs_bmdr_key_t; + +typedef xfs_dfsbno_t xfs_bmbt_ptr_t, xfs_bmdr_ptr_t; /* btree pointer type */ + /* btree block header type */ +typedef struct xfs_btree_lblock xfs_bmbt_block_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_BMBT_BLOCK) +xfs_bmbt_block_t *xfs_buf_to_bmbt_block(struct xfs_buf *bp); +#define XFS_BUF_TO_BMBT_BLOCK(bp) xfs_buf_to_bmbt_block(bp) +#else +#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)(XFS_BUF_PTR(bp))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_RBLOCK_DSIZE) +int xfs_bmap_rblock_dsize(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) xfs_bmap_rblock_dsize(lev,cur) +#else +#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) ((cur)->bc_private.b.forksize) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_RBLOCK_ISIZE) +int xfs_bmap_rblock_isize(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) xfs_bmap_rblock_isize(lev,cur) +#else +#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) \ + ((int)XFS_IFORK_PTR((cur)->bc_private.b.ip, \ + (cur)->bc_private.b.whichfork)->if_broot_bytes) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_IBLOCK_SIZE) +int xfs_bmap_iblock_size(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_IBLOCK_SIZE(lev,cur) xfs_bmap_iblock_size(lev,cur) +#else +#define XFS_BMAP_IBLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_DSIZE) +int xfs_bmap_block_dsize(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_DSIZE(lev,cur) xfs_bmap_block_dsize(lev,cur) +#else +#define XFS_BMAP_BLOCK_DSIZE(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BMAP_RBLOCK_DSIZE(lev,cur) : \ + XFS_BMAP_IBLOCK_SIZE(lev,cur)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_ISIZE) +int xfs_bmap_block_isize(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_ISIZE(lev,cur) xfs_bmap_block_isize(lev,cur) +#else +#define XFS_BMAP_BLOCK_ISIZE(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BMAP_RBLOCK_ISIZE(lev,cur) : \ + XFS_BMAP_IBLOCK_SIZE(lev,cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_DMAXRECS) +int xfs_bmap_block_dmaxrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) xfs_bmap_block_dmaxrecs(lev,cur) +#else +#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \ + xfs_bmdr, (lev) == 0) : \ + ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0])) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_IMAXRECS) +int xfs_bmap_block_imaxrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur) xfs_bmap_block_imaxrecs(lev,cur) +#else +#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur), \ + xfs_bmbt, (lev) == 0) : \ + ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0])) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_DMINRECS) +int xfs_bmap_block_dminrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_DMINRECS(lev,cur) xfs_bmap_block_dminrecs(lev,cur) +#else +#define XFS_BMAP_BLOCK_DMINRECS(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \ + xfs_bmdr, (lev) == 0) : \ + ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_IMINRECS) +int xfs_bmap_block_iminrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_IMINRECS(lev,cur) xfs_bmap_block_iminrecs(lev,cur) +#else +#define XFS_BMAP_BLOCK_IMINRECS(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur), \ + xfs_bmbt, (lev) == 0) : \ + ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_REC_DADDR) +xfs_bmbt_rec_t * +xfs_bmap_rec_daddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_REC_DADDR(bb,i,cur) xfs_bmap_rec_daddr(bb,i,cur) +#else +#define XFS_BMAP_REC_DADDR(bb,i,cur) \ + XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_DSIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_REC_IADDR) +xfs_bmbt_rec_t * +xfs_bmap_rec_iaddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_REC_IADDR(bb,i,cur) xfs_bmap_rec_iaddr(bb,i,cur) +#else +#define XFS_BMAP_REC_IADDR(bb,i,cur) \ + XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_ISIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_KEY_DADDR) +xfs_bmbt_key_t * +xfs_bmap_key_daddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_KEY_DADDR(bb,i,cur) xfs_bmap_key_daddr(bb,i,cur) +#else +#define XFS_BMAP_KEY_DADDR(bb,i,cur) \ + XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_DSIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_KEY_IADDR) +xfs_bmbt_key_t * +xfs_bmap_key_iaddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_KEY_IADDR(bb,i,cur) xfs_bmap_key_iaddr(bb,i,cur) +#else +#define XFS_BMAP_KEY_IADDR(bb,i,cur) \ + XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_ISIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_PTR_DADDR) +xfs_bmbt_ptr_t * +xfs_bmap_ptr_daddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_PTR_DADDR(bb,i,cur) xfs_bmap_ptr_daddr(bb,i,cur) +#else +#define XFS_BMAP_PTR_DADDR(bb,i,cur) \ + XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_DSIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_PTR_IADDR) +xfs_bmbt_ptr_t * +xfs_bmap_ptr_iaddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_PTR_IADDR(bb,i,cur) xfs_bmap_ptr_iaddr(bb,i,cur) +#else +#define XFS_BMAP_PTR_IADDR(bb,i,cur) \ + XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_ISIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif + +/* + * These are to be used when we know the size of the block and + * we don't have a cursor. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_REC_ADDR) +xfs_bmbt_rec_t *xfs_bmap_broot_rec_addr(xfs_bmbt_block_t *bb, int i, int sz); +#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) xfs_bmap_broot_rec_addr(bb,i,sz) +#else +#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) \ + XFS_BTREE_REC_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_KEY_ADDR) +xfs_bmbt_key_t *xfs_bmap_broot_key_addr(xfs_bmbt_block_t *bb, int i, int sz); +#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) xfs_bmap_broot_key_addr(bb,i,sz) +#else +#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) \ + XFS_BTREE_KEY_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_PTR_ADDR) +xfs_bmbt_ptr_t *xfs_bmap_broot_ptr_addr(xfs_bmbt_block_t *bb, int i, int sz); +#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) xfs_bmap_broot_ptr_addr(bb,i,sz) +#else +#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \ + XFS_BTREE_PTR_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_NUMRECS) +int xfs_bmap_broot_numrecs(xfs_bmdr_block_t *bb); +#define XFS_BMAP_BROOT_NUMRECS(bb) xfs_bmap_broot_numrecs(bb) +#else +#define XFS_BMAP_BROOT_NUMRECS(bb) (INT_GET((bb)->bb_numrecs, ARCH_CONVERT)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_MAXRECS) +int xfs_bmap_broot_maxrecs(int sz); +#define XFS_BMAP_BROOT_MAXRECS(sz) xfs_bmap_broot_maxrecs(sz) +#else +#define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_SPACE_CALC) +int xfs_bmap_broot_space_calc(int nrecs); +#define XFS_BMAP_BROOT_SPACE_CALC(nrecs) xfs_bmap_broot_space_calc(nrecs) +#else +#define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \ + ((int)(sizeof(xfs_bmbt_block_t) + \ + ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_SPACE) +int xfs_bmap_broot_space(xfs_bmdr_block_t *bb); +#define XFS_BMAP_BROOT_SPACE(bb) xfs_bmap_broot_space(bb) +#else +#define XFS_BMAP_BROOT_SPACE(bb) \ + XFS_BMAP_BROOT_SPACE_CALC(INT_GET((bb)->bb_numrecs, ARCH_CONVERT)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMDR_SPACE_CALC) +int xfs_bmdr_space_calc(int nrecs); +#define XFS_BMDR_SPACE_CALC(nrecs) xfs_bmdr_space_calc(nrecs) +#else +#define XFS_BMDR_SPACE_CALC(nrecs) \ + ((int)(sizeof(xfs_bmdr_block_t) + \ + ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))) +#endif + +/* + * Maximum number of bmap btree levels. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BM_MAXLEVELS) +int xfs_bm_maxlevels(struct xfs_mount *mp, int w); +#define XFS_BM_MAXLEVELS(mp,w) xfs_bm_maxlevels(mp,w) +#else +#define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[w]) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_SANITY_CHECK) +int xfs_bmap_sanity_check(struct xfs_mount *mp, xfs_bmbt_block_t *bb, + int level); +#define XFS_BMAP_SANITY_CHECK(mp,bb,level) \ + xfs_bmap_sanity_check(mp,bb,level) +#else +#define XFS_BMAP_SANITY_CHECK(mp,bb,level) \ + (INT_GET((bb)->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC && \ + INT_GET((bb)->bb_level, ARCH_CONVERT) == level && \ + INT_GET((bb)->bb_numrecs, ARCH_CONVERT) > 0 && \ + INT_GET((bb)->bb_numrecs, ARCH_CONVERT) <= (mp)->m_bmap_dmxr[(level) != 0]) +#endif + +/* + * Trace buffer entry types. + */ +#define XFS_BMBT_KTRACE_ARGBI 1 +#define XFS_BMBT_KTRACE_ARGBII 2 +#define XFS_BMBT_KTRACE_ARGFFFI 3 +#define XFS_BMBT_KTRACE_ARGI 4 +#define XFS_BMBT_KTRACE_ARGIFK 5 +#define XFS_BMBT_KTRACE_ARGIFR 6 +#define XFS_BMBT_KTRACE_ARGIK 7 +#define XFS_BMBT_KTRACE_CUR 8 + +#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */ +#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */ + +#if defined(XFS_ALL_TRACE) +#define XFS_BMBT_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_BMBT_TRACE +#endif + + +#ifdef __KERNEL__ + +/* + * Prototypes for xfs_bmap.c to call. + */ + +void +xfs_bmdr_to_bmbt( + xfs_bmdr_block_t *, + int, + xfs_bmbt_block_t *, + int); + +int +xfs_bmbt_decrement( + struct xfs_btree_cur *, + int, + int *); + +int +xfs_bmbt_delete( + struct xfs_btree_cur *, + int *); + +void +xfs_bmbt_get_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s); + +xfs_bmbt_block_t * +xfs_bmbt_get_block( + struct xfs_btree_cur *cur, + int level, + struct xfs_buf **bpp); + +xfs_filblks_t +xfs_bmbt_get_blockcount( + xfs_bmbt_rec_t *r); + +xfs_fsblock_t +xfs_bmbt_get_startblock( + xfs_bmbt_rec_t *r); + +xfs_fileoff_t +xfs_bmbt_get_startoff( + xfs_bmbt_rec_t *r); + +xfs_exntst_t +xfs_bmbt_get_state( + xfs_bmbt_rec_t *r); + +#if ARCH_CONVERT != ARCH_NOCONVERT +void +xfs_bmbt_disk_get_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s); + +xfs_exntst_t +xfs_bmbt_disk_get_state( + xfs_bmbt_rec_t *r); + +xfs_filblks_t +xfs_bmbt_disk_get_blockcount( + xfs_bmbt_rec_t *r); + +xfs_fsblock_t +xfs_bmbt_disk_get_startblock( + xfs_bmbt_rec_t *r); + +xfs_fileoff_t +xfs_bmbt_disk_get_startoff( + xfs_bmbt_rec_t *r); + +#else +#define xfs_bmbt_disk_get_all(r, s) \ + xfs_bmbt_get_all(r, s) +#define xfs_bmbt_disk_get_state(r) \ + xfs_bmbt_get_state(r) +#define xfs_bmbt_disk_get_blockcount(r) \ + xfs_bmbt_get_blockcount(r) +#define xfs_bmbt_disk_get_startblock(r) \ + xfs_bmbt_get_blockcount(r) +#define xfs_bmbt_disk_get_startoff(r) \ + xfs_bmbt_get_startoff(r) +#endif + +int +xfs_bmbt_increment( + struct xfs_btree_cur *, + int, + int *); + +int +xfs_bmbt_insert( + struct xfs_btree_cur *, + int *); + +int +xfs_bmbt_insert_many( + struct xfs_btree_cur *, + int, + xfs_bmbt_rec_t *, + int *); + +void +xfs_bmbt_log_block( + struct xfs_btree_cur *, + struct xfs_buf *, + int); + +void +xfs_bmbt_log_recs( + struct xfs_btree_cur *, + struct xfs_buf *, + int, + int); + +int +xfs_bmbt_lookup_eq( + struct xfs_btree_cur *, + xfs_fileoff_t, + xfs_fsblock_t, + xfs_filblks_t, + int *); + +int +xfs_bmbt_lookup_ge( + struct xfs_btree_cur *, + xfs_fileoff_t, + xfs_fsblock_t, + xfs_filblks_t, + int *); + +int +xfs_bmbt_lookup_le( + struct xfs_btree_cur *, + xfs_fileoff_t, + xfs_fsblock_t, + xfs_filblks_t, + int *); + +/* + * Give the bmap btree a new root block. Copy the old broot contents + * down into a real block and make the broot point to it. + */ +int /* error */ +xfs_bmbt_newroot( + struct xfs_btree_cur *cur, /* btree cursor */ + int *logflags, /* logging flags for inode */ + int *stat); /* return status - 0 fail */ + +void +xfs_bmbt_set_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s); + +void +xfs_bmbt_set_allf( + xfs_bmbt_rec_t *r, + xfs_fileoff_t o, + xfs_fsblock_t b, + xfs_filblks_t c, + xfs_exntst_t v); + +void +xfs_bmbt_set_blockcount( + xfs_bmbt_rec_t *r, + xfs_filblks_t v); + +void +xfs_bmbt_set_startblock( + xfs_bmbt_rec_t *r, + xfs_fsblock_t v); + +void +xfs_bmbt_set_startoff( + xfs_bmbt_rec_t *r, + xfs_fileoff_t v); + +void +xfs_bmbt_set_state( + xfs_bmbt_rec_t *r, + xfs_exntst_t v); + +#if ARCH_CONVERT != ARCH_NOCONVERT +void +xfs_bmbt_disk_set_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s); + +void +xfs_bmbt_disk_set_allf( + xfs_bmbt_rec_t *r, + xfs_fileoff_t o, + xfs_fsblock_t b, + xfs_filblks_t c, + xfs_exntst_t v); +#else +#define xfs_bmbt_disk_set_all(r, s) \ + xfs_bmbt_set_all(r, s) +#define xfs_bmbt_disk_set_allf(r, o, b, c, v) \ + xfs_bmbt_set_allf(r, o, b, c, v) +#endif + +void +xfs_bmbt_to_bmdr( + xfs_bmbt_block_t *, + int, + xfs_bmdr_block_t *, + int); + +int +xfs_bmbt_update( + struct xfs_btree_cur *, + xfs_fileoff_t, + xfs_fsblock_t, + xfs_filblks_t, + xfs_exntst_t); + +#ifdef XFSDEBUG +/* + * Get the data from the pointed-to record. + */ +int +xfs_bmbt_get_rec( + struct xfs_btree_cur *, + xfs_fileoff_t *, + xfs_fsblock_t *, + xfs_filblks_t *, + xfs_exntst_t *, + int *); +#endif + + +/* + * Search an extent list for the extent which includes block + * bno. + */ +xfs_bmbt_rec_t * +xfs_bmap_do_search_extents( + xfs_bmbt_rec_t *, + xfs_extnum_t, + xfs_extnum_t, + xfs_fileoff_t, + int *, + xfs_extnum_t *, + xfs_bmbt_irec_t *, + xfs_bmbt_irec_t *); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_BMAP_BTREE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_bmap.c linux.21rc5-ac2/fs/xfs/xfs_bmap.c --- linux.21rc5/fs/xfs/xfs_bmap.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_bmap.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,6247 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_dmapi.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_itable.h" +#include "xfs_extfree_item.h" +#include "xfs_alloc.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_bit.h" +#include "xfs_rw.h" +#include "xfs_quota.h" +#include "xfs_trans_space.h" +#include "xfs_buf_item.h" + +#ifdef DEBUG +ktrace_t *xfs_bmap_trace_buf; +#endif + +#ifdef XFSDEBUG +STATIC void +xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork); +#endif + +kmem_zone_t *xfs_bmap_free_item_zone; + +/* + * Prototypes for internal bmap routines. + */ + + +/* + * Called from xfs_bmap_add_attrfork to handle extents format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags); /* inode logging flags */ + +/* + * Called from xfs_bmap_add_attrfork to handle local format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_local( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags); /* inode logging flags */ + +/* + * Called by xfs_bmapi to update extent list structure and the btree + * after allocating space (or doing a delayed allocation). + */ +STATIC int /* error */ +xfs_bmap_add_extent( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_fsblock_t *first, /* pointer to firstblock variable */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + int *logflagsp, /* inode logging flags */ + int whichfork, /* data or attr fork */ + int rsvd); /* OK to allocate reserved blocks */ + +/* + * Called by xfs_bmap_add_extent to handle cases converting a delayed + * allocation to a real allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_delay_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ + xfs_fsblock_t *first, /* pointer to firstblock variable */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + int *logflagsp, /* inode logging flags */ + int rsvd); /* OK to allocate reserved blocks */ + +/* + * Called by xfs_bmap_add_extent to handle cases converting a hole + * to a delayed allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_hole_delay( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp,/* inode logging flags */ + int rsvd); /* OK to allocate reserved blocks */ + +/* + * Called by xfs_bmap_add_extent to handle cases converting a hole + * to a real allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_hole_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp, /* inode logging flags */ + int whichfork); /* data or attr fork */ + +/* + * Called by xfs_bmap_add_extent to handle cases converting an unwritten + * allocation to a real allocation or vice versa. + */ +STATIC int /* error */ +xfs_bmap_add_extent_unwritten_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp); /* inode logging flags */ + +/* + * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. + * It figures out where to ask the underlying allocator to put the new extent. + */ +STATIC int /* error */ +xfs_bmap_alloc( + xfs_bmalloca_t *ap); /* bmap alloc argument struct */ + +/* + * Transform a btree format file with only one leaf node, where the + * extents list will fit in the inode, into an extents format file. + * Since the extent list is already in-core, all we have to do is + * give up the space for the btree root and pitch the leaf block. + */ +STATIC int /* error */ +xfs_bmap_btree_to_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_btree_cur_t *cur, /* btree cursor */ + int *logflagsp, /* inode logging flags */ + int whichfork); /* data or attr fork */ + +#ifdef XFSDEBUG +/* + * Check that the extents list for the inode ip is in the right order. + */ +STATIC void +xfs_bmap_check_extents( + xfs_inode_t *ip, /* incore inode pointer */ + int whichfork); /* data or attr fork */ +#else +#define xfs_bmap_check_extents(ip,w) +#endif + +/* + * Called by xfs_bmapi to update extent list structure and the btree + * after removing space (or undoing a delayed allocation). + */ +STATIC int /* error */ +xfs_bmap_del_extent( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_trans_t *tp, /* current trans pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp,/* inode logging flags */ + int whichfork, /* data or attr fork */ + int rsvd); /* OK to allocate reserved blocks */ + +/* + * Remove the entry "free" from the free item list. Prev points to the + * previous entry, unless "free" is the head of the list. + */ +STATIC void +xfs_bmap_del_free( + xfs_bmap_free_t *flist, /* free item list header */ + xfs_bmap_free_item_t *prev, /* previous item on list, if any */ + xfs_bmap_free_item_t *free); /* list item to be freed */ + +/* + * Remove count entries from the extents array for inode "ip", starting + * at index "idx". Copies the remaining items down over the deleted ones, + * and gives back the excess memory. + */ +STATIC void +xfs_bmap_delete_exlist( + xfs_inode_t *ip, /* incode inode pointer */ + xfs_extnum_t idx, /* starting delete index */ + xfs_extnum_t count, /* count of items to delete */ + int whichfork); /* data or attr fork */ + +/* + * Convert an extents-format file into a btree-format file. + * The new file will have a root block (in the inode) and a single child block. + */ +STATIC int /* error */ +xfs_bmap_extents_to_btree( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first-block-allocated */ + xfs_bmap_free_t *flist, /* blocks freed in xaction */ + xfs_btree_cur_t **curp, /* cursor returned to caller */ + int wasdel, /* converting a delayed alloc */ + int *logflagsp, /* inode logging flags */ + int whichfork); /* data or attr fork */ + +/* + * Insert new item(s) in the extent list for inode "ip". + * Count new items are inserted at offset idx. + */ +STATIC void +xfs_bmap_insert_exlist( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* starting index of new items */ + xfs_extnum_t count, /* number of inserted items */ + xfs_bmbt_irec_t *new, /* items to insert */ + int whichfork); /* data or attr fork */ + +/* + * Convert a local file to an extents file. + * This code is sort of bogus, since the file data needs to get + * logged so it won't be lost. The bmap-level manipulations are ok, though. + */ +STATIC int /* error */ +xfs_bmap_local_to_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated in xaction */ + xfs_extlen_t total, /* total blocks needed by transaction */ + int *logflagsp, /* inode logging flags */ + int whichfork); /* data or attr fork */ + +/* + * Search the extents list for the inode, for the extent containing bno. + * If bno lies in a hole, point to the next entry. If bno lies past eof, + * *eofp will be set, and *prevp will contain the last entry (null if none). + * Else, *lastxp will be set to the index of the found + * entry; *gotp will contain the entry. + */ +STATIC xfs_bmbt_rec_t * /* pointer to found extent entry */ +xfs_bmap_search_extents( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fileoff_t bno, /* block number searched for */ + int whichfork, /* data or attr fork */ + int *eofp, /* out: end of file found */ + xfs_extnum_t *lastxp, /* out: last extent index */ + xfs_bmbt_irec_t *gotp, /* out: extent entry found */ + xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */ + +#ifdef XFS_BMAP_TRACE +/* + * Add a bmap trace buffer entry. Base routine for the others. + */ +STATIC void +xfs_bmap_trace_addentry( + int opcode, /* operation */ + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(ies) */ + xfs_extnum_t cnt, /* count of entries, 1 or 2 */ + xfs_bmbt_rec_t *r1, /* first record */ + xfs_bmbt_rec_t *r2, /* second record or null */ + int whichfork); /* data or attr fork */ + +/* + * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. + */ +STATIC void +xfs_bmap_trace_delete( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(entries) deleted */ + xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */ + int whichfork); /* data or attr fork */ + +/* + * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or + * reading in the extents list from the disk (in the btree). + */ +STATIC void +xfs_bmap_trace_insert( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(entries) inserted */ + xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */ + xfs_bmbt_irec_t *r1, /* inserted record 1 */ + xfs_bmbt_irec_t *r2, /* inserted record 2 or null */ + int whichfork); /* data or attr fork */ + +/* + * Add bmap trace entry after updating an extent list entry in place. + */ +STATIC void +xfs_bmap_trace_post_update( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry updated */ + int whichfork); /* data or attr fork */ + +/* + * Add bmap trace entry prior to updating an extent list entry in place. + */ +STATIC void +xfs_bmap_trace_pre_update( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry to be updated */ + int whichfork); /* data or attr fork */ + +#else +#define xfs_bmap_trace_delete(f,d,ip,i,c,w) +#define xfs_bmap_trace_insert(f,d,ip,i,c,r1,r2,w) +#define xfs_bmap_trace_post_update(f,d,ip,i,w) +#define xfs_bmap_trace_pre_update(f,d,ip,i,w) +#endif /* XFS_BMAP_TRACE */ + +/* + * Compute the worst-case number of indirect blocks that will be used + * for ip's delayed extent of length "len". + */ +STATIC xfs_filblks_t +xfs_bmap_worst_indlen( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_filblks_t len); /* delayed extent length */ + +#ifdef DEBUG +/* + * Perform various validation checks on the values being returned + * from xfs_bmapi(). + */ +STATIC void +xfs_bmap_validate_ret( + xfs_fileoff_t bno, + xfs_filblks_t len, + int flags, + xfs_bmbt_irec_t *mval, + int nmap, + int ret_nmap); +#else +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) +#endif /* DEBUG */ + +#if defined(DEBUG) && defined(XFS_RW_TRACE) +STATIC void +xfs_bunmap_trace( + xfs_inode_t *ip, + xfs_fileoff_t bno, + xfs_filblks_t len, + int flags, + inst_t *ra); +#else +#define xfs_bunmap_trace(ip, bno, len, flags, ra) +#endif /* DEBUG && XFS_RW_TRACE */ + +STATIC int +xfs_bmap_count_tree( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_fsblock_t blockno, + int levelin, + int *count); + +STATIC int +xfs_bmap_count_leaves( + xfs_bmbt_rec_t *frp, + int numrecs, + int *count); + +/* + * Bmap internal routines. + */ + +/* + * Called from xfs_bmap_add_attrfork to handle btree format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_btree( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags) /* inode logging flags */ +{ + xfs_btree_cur_t *cur; /* btree cursor */ + int error; /* error return value */ + xfs_mount_t *mp; /* file system mount struct */ + int stat; /* newroot status */ + + mp = ip->i_mount; + if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) + *flags |= XFS_ILOG_DBROOT; + else { + cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, + XFS_DATA_FORK); + cur->bc_private.b.flist = flist; + cur->bc_private.b.firstblock = *firstblock; + if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) + goto error0; + ASSERT(stat == 1); /* must be at least one entry */ + if ((error = xfs_bmbt_newroot(cur, flags, &stat))) + goto error0; + if (stat == 0) { + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + return XFS_ERROR(ENOSPC); + } + *firstblock = cur->bc_private.b.firstblock; + cur->bc_private.b.allocated = 0; + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + } + return 0; +error0: + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Called from xfs_bmap_add_attrfork to handle extents format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags) /* inode logging flags */ +{ + xfs_btree_cur_t *cur; /* bmap btree cursor */ + int error; /* error return value */ + + if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) + return 0; + cur = NULL; + error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0, + flags, XFS_DATA_FORK); + if (cur) { + cur->bc_private.b.allocated = 0; + xfs_btree_del_cursor(cur, + error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + } + return error; +} + +/* + * Called from xfs_bmap_add_attrfork to handle local format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_local( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags) /* inode logging flags */ +{ + xfs_da_args_t dargs; /* args for dir/attr code */ + int error; /* error return value */ + xfs_mount_t *mp; /* mount structure pointer */ + + if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) + return 0; + if ((ip->i_d.di_mode & IFMT) == IFDIR) { + mp = ip->i_mount; + memset(&dargs, 0, sizeof(dargs)); + dargs.dp = ip; + dargs.firstblock = firstblock; + dargs.flist = flist; + dargs.total = mp->m_dirblkfsbs; + dargs.whichfork = XFS_DATA_FORK; + dargs.trans = tp; + error = XFS_DIR_SHORTFORM_TO_SINGLE(mp, &dargs); + } else + error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags, + XFS_DATA_FORK); + return error; +} + +/* + * Called by xfs_bmapi to update extent list structure and the btree + * after allocating space (or doing a delayed allocation). + */ +STATIC int /* error */ +xfs_bmap_add_extent( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_fsblock_t *first, /* pointer to firstblock variable */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + int *logflagsp, /* inode logging flags */ + int whichfork, /* data or attr fork */ + int rsvd) /* OK to use reserved data blocks */ +{ + xfs_btree_cur_t *cur; /* btree cursor or null */ + xfs_filblks_t da_new; /* new count del alloc blocks used */ + xfs_filblks_t da_old; /* old count del alloc blocks used */ + int error; /* error return value */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent"; +#endif + xfs_ifork_t *ifp; /* inode fork ptr */ + int logflags; /* returned value */ + xfs_extnum_t nextents; /* number of extents in file now */ + + XFS_STATS_INC(xfsstats.xs_add_exlist); + cur = *curp; + ifp = XFS_IFORK_PTR(ip, whichfork); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(idx <= nextents); + da_old = da_new = 0; + error = 0; + /* + * This is the first extent added to a new/empty file. + * Special case this one, so other routines get to assume there are + * already extents in the list. + */ + if (nextents == 0) { + xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new, + NULL, whichfork); + xfs_bmap_insert_exlist(ip, 0, 1, new, whichfork); + ASSERT(cur == NULL); + ifp->if_lastex = 0; + if (!ISNULLSTARTBLOCK(new->br_startblock)) { + XFS_IFORK_NEXT_SET(ip, whichfork, 1); + logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); + } else + logflags = 0; + } + /* + * Any kind of new delayed allocation goes here. + */ + else if (ISNULLSTARTBLOCK(new->br_startblock)) { + if (cur) + ASSERT((cur->bc_private.b.flags & + XFS_BTCUR_BPRV_WASDEL) == 0); + if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, cur, new, + &logflags, rsvd))) + goto done; + } + /* + * Real allocation off the end of the file. + */ + else if (idx == nextents) { + if (cur) + ASSERT((cur->bc_private.b.flags & + XFS_BTCUR_BPRV_WASDEL) == 0); + if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new, + &logflags, whichfork))) + goto done; + } else { + xfs_bmbt_irec_t prev; /* old extent at offset idx */ + + /* + * Get the record referred to by idx. + */ + xfs_bmbt_get_all(&ifp->if_u1.if_extents[idx], &prev); + /* + * If it's a real allocation record, and the new allocation ends + * after the start of the referred to record, then we're filling + * in a delayed or unwritten allocation with a real one, or + * converting real back to unwritten. + */ + if (!ISNULLSTARTBLOCK(new->br_startblock) && + new->br_startoff + new->br_blockcount > prev.br_startoff) { + if (prev.br_state != XFS_EXT_UNWRITTEN && + ISNULLSTARTBLOCK(prev.br_startblock)) { + da_old = STARTBLOCKVAL(prev.br_startblock); + if (cur) + ASSERT(cur->bc_private.b.flags & + XFS_BTCUR_BPRV_WASDEL); + if ((error = xfs_bmap_add_extent_delay_real(ip, + idx, &cur, new, &da_new, first, flist, + &logflags, rsvd))) + goto done; + } else if (new->br_state == XFS_EXT_NORM) { + ASSERT(new->br_state == XFS_EXT_NORM); + if ((error = xfs_bmap_add_extent_unwritten_real( + ip, idx, &cur, new, &logflags))) + goto done; + } else { + ASSERT(new->br_state == XFS_EXT_UNWRITTEN); + if ((error = xfs_bmap_add_extent_unwritten_real( + ip, idx, &cur, new, &logflags))) + goto done; + } + ASSERT(*curp == cur || *curp == NULL); + } + /* + * Otherwise we're filling in a hole with an allocation. + */ + else { + if (cur) + ASSERT((cur->bc_private.b.flags & + XFS_BTCUR_BPRV_WASDEL) == 0); + if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, + new, &logflags, whichfork))) + goto done; + } + } + + ASSERT(*curp == cur || *curp == NULL); + /* + * Convert to a btree if necessary. + */ + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) { + int tmp_logflags; /* partial log flag return val */ + + ASSERT(cur == NULL); + error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first, + flist, &cur, da_old > 0, &tmp_logflags, whichfork); + logflags |= tmp_logflags; + if (error) + goto done; + } + /* + * Adjust for changes in reserved delayed indirect blocks. + * Nothing to do for disk quotas here. + */ + if (da_old || da_new) { + xfs_filblks_t nblks; + + nblks = da_new; + if (cur) + nblks += cur->bc_private.b.allocated; + ASSERT(nblks <= da_old); + if (nblks < da_old) + xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, + (int)(da_old - nblks), rsvd); + } + /* + * Clear out the allocated field, done with it now in any case. + */ + if (cur) { + cur->bc_private.b.allocated = 0; + *curp = cur; + } +done: +#ifdef XFSDEBUG + if (!error) + xfs_bmap_check_leaf_extents(*curp, ip, whichfork); +#endif + *logflagsp = logflags; + return error; +} + +/* + * Called by xfs_bmap_add_extent to handle cases converting a delayed + * allocation to a real allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_delay_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ + xfs_fsblock_t *first, /* pointer to firstblock variable */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + int *logflagsp, /* inode logging flags */ + int rsvd) /* OK to use reserved data block allocation */ +{ + xfs_bmbt_rec_t *base; /* base of extent entry list */ + xfs_btree_cur_t *cur; /* btree cursor */ + int diff; /* temp value */ + xfs_bmbt_rec_t *ep; /* extent entry for idx */ + int error; /* error return value */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent_delay_real"; +#endif + int i; /* temp state */ + xfs_fileoff_t new_endoff; /* end offset of new entry */ + xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ + /* left is 0, right is 1, prev is 2 */ + int rval=0; /* return value (logging flags) */ + int state = 0;/* state bits, accessed thru macros */ + xfs_filblks_t temp; /* value for dnew calculations */ + xfs_filblks_t temp2; /* value for dnew calculations */ + int tmp_rval; /* partial logging flags */ + enum { /* bit number definitions for state */ + LEFT_CONTIG, RIGHT_CONTIG, + LEFT_FILLING, RIGHT_FILLING, + LEFT_DELAY, RIGHT_DELAY, + LEFT_VALID, RIGHT_VALID + }; + +#define LEFT r[0] +#define RIGHT r[1] +#define PREV r[2] +#define MASK(b) (1 << (b)) +#define MASK2(a,b) (MASK(a) | MASK(b)) +#define MASK3(a,b,c) (MASK2(a,b) | MASK(c)) +#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d)) +#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b))) +#define STATE_TEST(b) (state & MASK(b)) +#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \ + ((state &= ~MASK(b)), 0)) +#define SWITCH_STATE \ + (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG)) + + /* + * Set up a bunch of variables to make the tests simpler. + */ + cur = *curp; + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + xfs_bmbt_get_all(ep, &PREV); + new_endoff = new->br_startoff + new->br_blockcount; + ASSERT(PREV.br_startoff <= new->br_startoff); + ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); + /* + * Set flags determining what part of the previous delayed allocation + * extent is being replaced by a real allocation. + */ + STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); + STATE_SET(RIGHT_FILLING, + PREV.br_startoff + PREV.br_blockcount == new_endoff); + /* + * Check and set flags if this segment has a left neighbor. + * Don't set contiguous if the combined extent would be too large. + */ + if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { + xfs_bmbt_get_all(ep - 1, &LEFT); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); + } + STATE_SET(LEFT_CONTIG, + STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && + LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && + LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && + LEFT.br_state == new->br_state && + LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); + /* + * Check and set flags if this segment has a right neighbor. + * Don't set contiguous if the combined extent would be too large. + * Also check for all-three-contiguous being too large. + */ + if (STATE_SET_TEST(RIGHT_VALID, + idx < + ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { + xfs_bmbt_get_all(ep + 1, &RIGHT); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); + } + STATE_SET(RIGHT_CONTIG, + STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && + new_endoff == RIGHT.br_startoff && + new->br_startblock + new->br_blockcount == + RIGHT.br_startblock && + new->br_state == RIGHT.br_state && + new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && + ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) != + MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) || + LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount + <= MAXEXTLEN)); + error = 0; + /* + * Switch out based on the FILLING and CONTIG state bits. + */ + switch (SWITCH_STATE) { + + case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + /* + * Filling in all of a previously delayed allocation extent. + * The left and right neighbors are both contiguous with new. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + PREV.br_blockcount + + RIGHT.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + ip->i_d.di_nextents--; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + + PREV.br_blockcount + + RIGHT.br_blockcount, LEFT.br_state))) + goto done; + } + *dnew = 0; + break; + + case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG): + /* + * Filling in all of a previously delayed allocation extent. + * The left neighbor is contiguous, the right is not. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + PREV.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff, + LEFT.br_startblock, LEFT.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + + PREV.br_blockcount, LEFT.br_state))) + goto done; + } + *dnew = 0; + break; + + case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG): + /* + * Filling in all of a previously delayed allocation extent. + * The right neighbor is contiguous, the left is not. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_startblock(ep, new->br_startblock); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount + RIGHT.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, PREV.br_startoff, + new->br_startblock, + PREV.br_blockcount + + RIGHT.br_blockcount, PREV.br_state))) + goto done; + } + *dnew = 0; + break; + + case MASK2(LEFT_FILLING, RIGHT_FILLING): + /* + * Filling in all of a previously delayed allocation extent. + * Neither the left nor right neighbors are contiguous with + * the new one. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_startblock(ep, new->br_startblock); + xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + *dnew = 0; + break; + + case MASK2(LEFT_FILLING, LEFT_CONTIG): + /* + * Filling in the first part of a previous delayed allocation. + * The left neighbor is contiguous. + */ + xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + new->br_blockcount); + xfs_bmbt_set_startoff(ep, + PREV.br_startoff + new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1, + XFS_DATA_FORK); + temp = PREV.br_blockcount - new->br_blockcount; + xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, temp); + ip->i_df.if_lastex = idx - 1; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff, + LEFT.br_startblock, LEFT.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + + new->br_blockcount, + LEFT.br_state))) + goto done; + } + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + STARTBLOCKVAL(PREV.br_startblock)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx, + XFS_DATA_FORK); + *dnew = temp; + break; + + case MASK(LEFT_FILLING): + /* + * Filling in the first part of a previous delayed allocation. + * The left neighbor is not contiguous. + */ + xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_startoff(ep, new_endoff); + temp = PREV.br_blockcount - new->br_blockcount; + xfs_bmbt_set_blockcount(ep, temp); + xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_nextents > ip->i_df.if_ext_max) { + error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + first, flist, &cur, 1, &tmp_rval, + XFS_DATA_FORK); + rval |= tmp_rval; + if (error) + goto done; + } + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + STARTBLOCKVAL(PREV.br_startblock) - + (cur ? cur->bc_private.b.allocated : 0)); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx + 1]; + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1, + XFS_DATA_FORK); + *dnew = temp; + break; + + case MASK2(RIGHT_FILLING, RIGHT_CONTIG): + /* + * Filling in the last part of a previous delayed allocation. + * The right neighbor is contiguous with the new allocation. + */ + temp = PREV.br_blockcount - new->br_blockcount; + xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, temp); + xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, + new->br_blockcount + RIGHT.br_blockcount, + RIGHT.br_state); + xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, + new->br_blockcount + + RIGHT.br_blockcount, + RIGHT.br_state))) + goto done; + } + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + STARTBLOCKVAL(PREV.br_startblock)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, + XFS_DATA_FORK); + *dnew = temp; + break; + + case MASK(RIGHT_FILLING): + /* + * Filling in the last part of a previous delayed allocation. + * The right neighbor is not contiguous. + */ + temp = PREV.br_blockcount - new->br_blockcount; + xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, temp); + xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, + new, NULL, XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_nextents > ip->i_df.if_ext_max) { + error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + first, flist, &cur, 1, &tmp_rval, + XFS_DATA_FORK); + rval |= tmp_rval; + if (error) + goto done; + } + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + STARTBLOCKVAL(PREV.br_startblock) - + (cur ? cur->bc_private.b.allocated : 0)); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); + *dnew = temp; + break; + + case 0: + /* + * Filling in the middle part of a previous delayed allocation. + * Contiguity is impossible here. + * This case is avoided almost all the time. + */ + temp = new->br_startoff - PREV.br_startoff; + xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, temp); + r[0] = *new; + r[1].br_startoff = new_endoff; + temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; + r[1].br_blockcount = temp2; + xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_nextents > ip->i_df.if_ext_max) { + error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + first, flist, &cur, 1, &tmp_rval, + XFS_DATA_FORK); + rval |= tmp_rval; + if (error) + goto done; + } + temp = xfs_bmap_worst_indlen(ip, temp); + temp2 = xfs_bmap_worst_indlen(ip, temp2); + diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) - + (cur ? cur->bc_private.b.allocated : 0)); + if (diff > 0 && + xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -diff, rsvd)) { + /* + * Ick gross gag me with a spoon. + */ + ASSERT(0); /* want to see if this ever happens! */ + while (diff > 0) { + if (temp) { + temp--; + diff--; + if (!diff || + !xfs_mod_incore_sb(ip->i_mount, + XFS_SBS_FDBLOCKS, -diff, rsvd)) + break; + } + if (temp2) { + temp2--; + diff--; + if (!diff || + !xfs_mod_incore_sb(ip->i_mount, + XFS_SBS_FDBLOCKS, -diff, rsvd)) + break; + } + } + } + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); + xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2, + XFS_DATA_FORK); + xfs_bmbt_set_startblock(ep + 2, NULLSTARTBLOCK((int)temp2)); + xfs_bmap_trace_post_update(fname, "0", ip, idx + 2, + XFS_DATA_FORK); + *dnew = temp + temp2; + break; + + case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + case MASK2(LEFT_FILLING, RIGHT_CONTIG): + case MASK2(RIGHT_FILLING, LEFT_CONTIG): + case MASK2(LEFT_CONTIG, RIGHT_CONTIG): + case MASK(LEFT_CONTIG): + case MASK(RIGHT_CONTIG): + /* + * These cases are all impossible. + */ + ASSERT(0); + } + *curp = cur; +done: + *logflagsp = rval; + return error; +#undef LEFT +#undef RIGHT +#undef PREV +#undef MASK +#undef MASK2 +#undef MASK3 +#undef MASK4 +#undef STATE_SET +#undef STATE_TEST +#undef STATE_SET_TEST +#undef SWITCH_STATE +} + +/* + * Called by xfs_bmap_add_extent to handle cases converting an unwritten + * allocation to a real allocation or vice versa. + */ +STATIC int /* error */ +xfs_bmap_add_extent_unwritten_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp) /* inode logging flags */ +{ + xfs_bmbt_rec_t *base; /* base of extent entry list */ + xfs_btree_cur_t *cur; /* btree cursor */ + xfs_bmbt_rec_t *ep; /* extent entry for idx */ + int error; /* error return value */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent_unwritten_real"; +#endif + int i; /* temp state */ + xfs_fileoff_t new_endoff; /* end offset of new entry */ + xfs_exntst_t newext; /* new extent state */ + xfs_exntst_t oldext; /* old extent state */ + xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ + /* left is 0, right is 1, prev is 2 */ + int rval=0; /* return value (logging flags) */ + int state = 0;/* state bits, accessed thru macros */ + enum { /* bit number definitions for state */ + LEFT_CONTIG, RIGHT_CONTIG, + LEFT_FILLING, RIGHT_FILLING, + LEFT_DELAY, RIGHT_DELAY, + LEFT_VALID, RIGHT_VALID + }; + +#define LEFT r[0] +#define RIGHT r[1] +#define PREV r[2] +#define MASK(b) (1 << (b)) +#define MASK2(a,b) (MASK(a) | MASK(b)) +#define MASK3(a,b,c) (MASK2(a,b) | MASK(c)) +#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d)) +#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b))) +#define STATE_TEST(b) (state & MASK(b)) +#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \ + ((state &= ~MASK(b)), 0)) +#define SWITCH_STATE \ + (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG)) + + /* + * Set up a bunch of variables to make the tests simpler. + */ + error = 0; + cur = *curp; + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + xfs_bmbt_get_all(ep, &PREV); + newext = new->br_state; + oldext = (newext == XFS_EXT_UNWRITTEN) ? + XFS_EXT_NORM : XFS_EXT_UNWRITTEN; + ASSERT(PREV.br_state == oldext); + new_endoff = new->br_startoff + new->br_blockcount; + ASSERT(PREV.br_startoff <= new->br_startoff); + ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); + /* + * Set flags determining what part of the previous oldext allocation + * extent is being replaced by a newext allocation. + */ + STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); + STATE_SET(RIGHT_FILLING, + PREV.br_startoff + PREV.br_blockcount == new_endoff); + /* + * Check and set flags if this segment has a left neighbor. + * Don't set contiguous if the combined extent would be too large. + */ + if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { + xfs_bmbt_get_all(ep - 1, &LEFT); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); + } + STATE_SET(LEFT_CONTIG, + STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && + LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && + LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && + LEFT.br_state == newext && + LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); + /* + * Check and set flags if this segment has a right neighbor. + * Don't set contiguous if the combined extent would be too large. + * Also check for all-three-contiguous being too large. + */ + if (STATE_SET_TEST(RIGHT_VALID, + idx < + ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { + xfs_bmbt_get_all(ep + 1, &RIGHT); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); + } + STATE_SET(RIGHT_CONTIG, + STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && + new_endoff == RIGHT.br_startoff && + new->br_startblock + new->br_blockcount == + RIGHT.br_startblock && + newext == RIGHT.br_state && + new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && + ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) != + MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) || + LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount + <= MAXEXTLEN)); + /* + * Switch out based on the FILLING and CONTIG state bits. + */ + switch (SWITCH_STATE) { + + case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + /* + * Setting all of a previous oldext extent to newext. + * The left and right neighbors are both contiguous with new. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + PREV.br_blockcount + + RIGHT.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + ip->i_d.di_nextents -= 2; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + PREV.br_blockcount + + RIGHT.br_blockcount, LEFT.br_state))) + goto done; + } + break; + + case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG): + /* + * Setting all of a previous oldext extent to newext. + * The left neighbor is contiguous, the right is not. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + PREV.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + ip->i_d.di_nextents--; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + PREV.br_blockcount, + LEFT.br_state))) + goto done; + } + break; + + case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG): + /* + * Setting all of a previous oldext extent to newext. + * The right neighbor is contiguous, the left is not. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount + RIGHT.br_blockcount); + xfs_bmbt_set_state(ep, newext); + xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); + ip->i_d.di_nextents--; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, + new->br_blockcount + RIGHT.br_blockcount, + newext))) + goto done; + } + break; + + case MASK2(LEFT_FILLING, RIGHT_FILLING): + /* + * Setting all of a previous oldext extent to newext. + * Neither the left nor right neighbors are contiguous with + * the new one. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_state(ep, newext); + xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + newext))) + goto done; + } + break; + + case MASK2(LEFT_FILLING, LEFT_CONTIG): + /* + * Setting the first part of a previous oldext extent to newext. + * The left neighbor is contiguous. + */ + xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + new->br_blockcount); + xfs_bmbt_set_startoff(ep, + PREV.br_startoff + new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_startblock(ep, + new->br_startblock + new->br_blockcount); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount - new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, + PREV.br_startoff + new->br_blockcount, + PREV.br_startblock + new->br_blockcount, + PREV.br_blockcount - new->br_blockcount, + oldext))) + goto done; + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + if (xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + new->br_blockcount, + LEFT.br_state)) + goto done; + } + break; + + case MASK(LEFT_FILLING): + /* + * Setting the first part of a previous oldext extent to newext. + * The left neighbor is not contiguous. + */ + xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK); + ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); + xfs_bmbt_set_startoff(ep, new_endoff); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount - new->br_blockcount); + xfs_bmbt_set_startblock(ep, + new->br_startblock + new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK); + xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, + PREV.br_startoff + new->br_blockcount, + PREV.br_startblock + new->br_blockcount, + PREV.br_blockcount - new->br_blockcount, + oldext))) + goto done; + cur->bc_rec.b = *new; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + break; + + case MASK2(RIGHT_FILLING, RIGHT_CONTIG): + /* + * Setting the last part of a previous oldext extent to newext. + * The right neighbor is contiguous with the new allocation. + */ + xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount - new->br_blockcount); + xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, + new->br_blockcount + RIGHT.br_blockcount, newext); + xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, + PREV.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, PREV.br_startoff, + PREV.br_startblock, + PREV.br_blockcount - new->br_blockcount, + oldext))) + goto done; + if ((error = xfs_bmbt_increment(cur, 0, &i))) + goto done; + if ((error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, + new->br_blockcount + RIGHT.br_blockcount, + newext))) + goto done; + } + break; + + case MASK(RIGHT_FILLING): + /* + * Setting the last part of a previous oldext extent to newext. + * The right neighbor is not contiguous. + */ + xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount - new->br_blockcount); + xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); + xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, + new, NULL, XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, PREV.br_startoff, + PREV.br_startblock, + PREV.br_blockcount - new->br_blockcount, + oldext))) + goto done; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + break; + + case 0: + /* + * Setting the middle part of a previous oldext extent to + * newext. Contiguity is impossible here. + * One extent becomes three extents. + */ + xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, + new->br_startoff - PREV.br_startoff); + xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); + r[0] = *new; + r[1].br_startoff = new_endoff; + r[1].br_blockcount = + PREV.br_startoff + PREV.br_blockcount - new_endoff; + r[1].br_startblock = new->br_startblock + new->br_blockcount; + r[1].br_state = oldext; + xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + ip->i_d.di_nextents += 2; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + /* new right extent - oldext */ + if ((error = xfs_bmbt_update(cur, r[1].br_startoff, + r[1].br_startblock, r[1].br_blockcount, + r[1].br_state))) + goto done; + /* new left extent - oldext */ + PREV.br_blockcount = + new->br_startoff - PREV.br_startoff; + cur->bc_rec.b = PREV; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_increment(cur, 0, &i))) + goto done; + ASSERT(i == 1); + /* new middle extent - newext */ + cur->bc_rec.b = *new; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + break; + + case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + case MASK2(LEFT_FILLING, RIGHT_CONTIG): + case MASK2(RIGHT_FILLING, LEFT_CONTIG): + case MASK2(LEFT_CONTIG, RIGHT_CONTIG): + case MASK(LEFT_CONTIG): + case MASK(RIGHT_CONTIG): + /* + * These cases are all impossible. + */ + ASSERT(0); + } + *curp = cur; +done: + *logflagsp = rval; + return error; +#undef LEFT +#undef RIGHT +#undef PREV +#undef MASK +#undef MASK2 +#undef MASK3 +#undef MASK4 +#undef STATE_SET +#undef STATE_TEST +#undef STATE_SET_TEST +#undef SWITCH_STATE +} + +/* + * Called by xfs_bmap_add_extent to handle cases converting a hole + * to a delayed allocation. + */ +/*ARGSUSED*/ +STATIC int /* error */ +xfs_bmap_add_extent_hole_delay( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp, /* inode logging flags */ + int rsvd) /* OK to allocate reserved blocks */ +{ + xfs_bmbt_rec_t *base; /* base of extent entry list */ + xfs_bmbt_rec_t *ep; /* extent list entry for idx */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent_hole_delay"; +#endif + xfs_bmbt_irec_t left; /* left neighbor extent entry */ + xfs_filblks_t newlen=0; /* new indirect size */ + xfs_filblks_t oldlen=0; /* old indirect size */ + xfs_bmbt_irec_t right; /* right neighbor extent entry */ + int state; /* state bits, accessed thru macros */ + xfs_filblks_t temp; /* temp for indirect calculations */ + enum { /* bit number definitions for state */ + LEFT_CONTIG, RIGHT_CONTIG, + LEFT_DELAY, RIGHT_DELAY, + LEFT_VALID, RIGHT_VALID + }; + +#define MASK(b) (1 << (b)) +#define MASK2(a,b) (MASK(a) | MASK(b)) +#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b))) +#define STATE_TEST(b) (state & MASK(b)) +#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \ + ((state &= ~MASK(b)), 0)) +#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG)) + + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + state = 0; + ASSERT(ISNULLSTARTBLOCK(new->br_startblock)); + /* + * Check and set flags if this segment has a left neighbor + */ + if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { + xfs_bmbt_get_all(ep - 1, &left); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); + } + /* + * Check and set flags if the current (right) segment exists. + * If it doesn't exist, we're converting the hole at end-of-file. + */ + if (STATE_SET_TEST(RIGHT_VALID, + idx < + ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { + xfs_bmbt_get_all(ep, &right); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock)); + } + /* + * Set contiguity flags on the left and right neighbors. + * Don't let extents get too large, even if the pieces are contiguous. + */ + STATE_SET(LEFT_CONTIG, + STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) && + left.br_startoff + left.br_blockcount == new->br_startoff && + left.br_blockcount + new->br_blockcount <= MAXEXTLEN); + STATE_SET(RIGHT_CONTIG, + STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) && + new->br_startoff + new->br_blockcount == right.br_startoff && + new->br_blockcount + right.br_blockcount <= MAXEXTLEN && + (!STATE_TEST(LEFT_CONTIG) || + (left.br_blockcount + new->br_blockcount + + right.br_blockcount <= MAXEXTLEN))); + /* + * Switch out based on the contiguity flags. + */ + switch (SWITCH_STATE) { + + case MASK2(LEFT_CONTIG, RIGHT_CONTIG): + /* + * New allocation is contiguous with delayed allocations + * on the left and on the right. + * Merge all three into a single extent list entry. + */ + temp = left.br_blockcount + new->br_blockcount + + right.br_blockcount; + xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, temp); + oldlen = STARTBLOCKVAL(left.br_startblock) + + STARTBLOCKVAL(new->br_startblock) + + STARTBLOCKVAL(right.br_startblock); + newlen = xfs_bmap_worst_indlen(ip, temp); + xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); + xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + break; + + case MASK(LEFT_CONTIG): + /* + * New allocation is contiguous with a delayed allocation + * on the left. + * Merge the new allocation with the left neighbor. + */ + temp = left.br_blockcount + new->br_blockcount; + xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, temp); + oldlen = STARTBLOCKVAL(left.br_startblock) + + STARTBLOCKVAL(new->br_startblock); + newlen = xfs_bmap_worst_indlen(ip, temp); + xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); + xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + break; + + case MASK(RIGHT_CONTIG): + /* + * New allocation is contiguous with a delayed allocation + * on the right. + * Merge the new allocation with the right neighbor. + */ + xfs_bmap_trace_pre_update(fname, "RC", ip, idx, XFS_DATA_FORK); + temp = new->br_blockcount + right.br_blockcount; + oldlen = STARTBLOCKVAL(new->br_startblock) + + STARTBLOCKVAL(right.br_startblock); + newlen = xfs_bmap_worst_indlen(ip, temp); + xfs_bmbt_set_allf(ep, new->br_startoff, + NULLSTARTBLOCK((int)newlen), temp, right.br_state); + xfs_bmap_trace_post_update(fname, "RC", ip, idx, XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + break; + + case 0: + /* + * New allocation is not contiguous with another + * delayed allocation. + * Insert a new entry. + */ + oldlen = newlen = 0; + xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + break; + } + if (oldlen != newlen) { + ASSERT(oldlen > newlen); + xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, + (int)(oldlen - newlen), rsvd); + /* + * Nothing to do for disk quota accounting here. + */ + } + *logflagsp = 0; + return 0; +#undef MASK +#undef MASK2 +#undef STATE_SET +#undef STATE_TEST +#undef STATE_SET_TEST +#undef SWITCH_STATE +} + +/* + * Called by xfs_bmap_add_extent to handle cases converting a hole + * to a real allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_hole_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp, /* inode logging flags */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *ep; /* pointer to extent entry ins. point */ + int error; /* error return value */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent_hole_real"; +#endif + int i; /* temp state */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_irec_t left; /* left neighbor extent entry */ + xfs_bmbt_irec_t right; /* right neighbor extent entry */ + int state; /* state bits, accessed thru macros */ + enum { /* bit number definitions for state */ + LEFT_CONTIG, RIGHT_CONTIG, + LEFT_DELAY, RIGHT_DELAY, + LEFT_VALID, RIGHT_VALID + }; + +#define MASK(b) (1 << (b)) +#define MASK2(a,b) (MASK(a) | MASK(b)) +#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b))) +#define STATE_TEST(b) (state & MASK(b)) +#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \ + ((state &= ~MASK(b)), 0)) +#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG)) + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); + ep = &ifp->if_u1.if_extents[idx]; + state = 0; + /* + * Check and set flags if this segment has a left neighbor. + */ + if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { + xfs_bmbt_get_all(ep - 1, &left); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); + } + /* + * Check and set flags if this segment has a current value. + * Not true if we're inserting into the "hole" at eof. + */ + if (STATE_SET_TEST(RIGHT_VALID, + idx < + ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { + xfs_bmbt_get_all(ep, &right); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock)); + } + /* + * We're inserting a real allocation between "left" and "right". + * Set the contiguity flags. Don't let extents get too large. + */ + STATE_SET(LEFT_CONTIG, + STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && + left.br_startoff + left.br_blockcount == new->br_startoff && + left.br_startblock + left.br_blockcount == new->br_startblock && + left.br_state == new->br_state && + left.br_blockcount + new->br_blockcount <= MAXEXTLEN); + STATE_SET(RIGHT_CONTIG, + STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && + new->br_startoff + new->br_blockcount == right.br_startoff && + new->br_startblock + new->br_blockcount == + right.br_startblock && + new->br_state == right.br_state && + new->br_blockcount + right.br_blockcount <= MAXEXTLEN && + (!STATE_TEST(LEFT_CONTIG) || + left.br_blockcount + new->br_blockcount + + right.br_blockcount <= MAXEXTLEN)); + + /* + * Select which case we're in here, and implement it. + */ + switch (SWITCH_STATE) { + + case MASK2(LEFT_CONTIG, RIGHT_CONTIG): + /* + * New allocation is contiguous with real allocations on the + * left and on the right. + * Merge all three into a single extent list entry. + */ + xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, + whichfork); + xfs_bmbt_set_blockcount(ep - 1, + left.br_blockcount + new->br_blockcount + + right.br_blockcount); + xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, + whichfork); + xfs_bmap_trace_delete(fname, "LC|RC", ip, + idx, 1, whichfork); + xfs_bmap_delete_exlist(ip, idx, 1, whichfork); + ifp->if_lastex = idx - 1; + XFS_IFORK_NEXT_SET(ip, whichfork, + XFS_IFORK_NEXTENTS(ip, whichfork) - 1); + if (cur == NULL) { + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); + return 0; + } + *logflagsp = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff, + right.br_startblock, right.br_blockcount, &i))) + return error; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + return error; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + return error; + ASSERT(i == 1); + error = xfs_bmbt_update(cur, left.br_startoff, + left.br_startblock, + left.br_blockcount + new->br_blockcount + + right.br_blockcount, left.br_state); + return error; + + case MASK(LEFT_CONTIG): + /* + * New allocation is contiguous with a real allocation + * on the left. + * Merge the new allocation with the left neighbor. + */ + xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork); + xfs_bmbt_set_blockcount(ep - 1, + left.br_blockcount + new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork); + ifp->if_lastex = idx - 1; + if (cur == NULL) { + *logflagsp = XFS_ILOG_FEXT(whichfork); + return 0; + } + *logflagsp = 0; + if ((error = xfs_bmbt_lookup_eq(cur, left.br_startoff, + left.br_startblock, left.br_blockcount, &i))) + return error; + ASSERT(i == 1); + error = xfs_bmbt_update(cur, left.br_startoff, + left.br_startblock, + left.br_blockcount + new->br_blockcount, + left.br_state); + return error; + + case MASK(RIGHT_CONTIG): + /* + * New allocation is contiguous with a real allocation + * on the right. + * Merge the new allocation with the right neighbor. + */ + xfs_bmap_trace_pre_update(fname, "RC", ip, idx, whichfork); + xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, + new->br_blockcount + right.br_blockcount, + right.br_state); + xfs_bmap_trace_post_update(fname, "RC", ip, idx, whichfork); + ifp->if_lastex = idx; + if (cur == NULL) { + *logflagsp = XFS_ILOG_FEXT(whichfork); + return 0; + } + *logflagsp = 0; + if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff, + right.br_startblock, right.br_blockcount, &i))) + return error; + ASSERT(i == 1); + error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, + new->br_blockcount + right.br_blockcount, + right.br_state); + return error; + + case 0: + /* + * New allocation is not contiguous with another + * real allocation. + * Insert a new entry. + */ + xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, + whichfork); + xfs_bmap_insert_exlist(ip, idx, 1, new, whichfork); + ifp->if_lastex = idx; + XFS_IFORK_NEXT_SET(ip, whichfork, + XFS_IFORK_NEXTENTS(ip, whichfork) + 1); + if (cur == NULL) { + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); + return 0; + } + *logflagsp = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, &i))) + return error; + ASSERT(i == 0); + cur->bc_rec.b.br_state = new->br_state; + if ((error = xfs_bmbt_insert(cur, &i))) + return error; + ASSERT(i == 1); + return 0; + } +#undef MASK +#undef MASK2 +#undef STATE_SET +#undef STATE_TEST +#undef STATE_SET_TEST +#undef SWITCH_STATE + /* NOTREACHED */ + ASSERT(0); + return 0; /* keep gcc quite */ +} + +#define XFS_ALLOC_GAP_UNITS 4 + +/* + * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. + * It figures out where to ask the underlying allocator to put the new extent. + */ +STATIC int /* error */ +xfs_bmap_alloc( + xfs_bmalloca_t *ap) /* bmap alloc argument struct */ +{ + xfs_fsblock_t adjust; /* adjustment to block numbers */ + xfs_alloctype_t atype=0; /* type for allocation routines */ + int error; /* error return value */ + xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ + xfs_mount_t *mp; /* mount point structure */ + int nullfb; /* true if ap->firstblock isn't set */ + int rt; /* true if inode is realtime */ +#ifdef __KERNEL__ + xfs_extlen_t prod=0; /* product factor for allocators */ + xfs_extlen_t ralen=0; /* realtime allocation length */ +#endif + +#define ISLEGAL(x,y) \ + (rt ? \ + (x) < mp->m_sb.sb_rblocks : \ + XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ + XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ + XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) + + /* + * Set up variables. + */ + mp = ap->ip->i_mount; + nullfb = ap->firstblock == NULLFSBLOCK; + rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; + fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); +#ifdef __KERNEL__ + if (rt) { + xfs_extlen_t extsz; /* file extent size for rt */ + xfs_fileoff_t nexto; /* next file offset */ + xfs_extlen_t orig_alen; /* original ap->alen */ + xfs_fileoff_t orig_end; /* original off+len */ + xfs_fileoff_t orig_off; /* original ap->off */ + xfs_extlen_t mod_off; /* modulus calculations */ + xfs_fileoff_t prevo; /* previous file offset */ + xfs_rtblock_t rtx; /* realtime extent number */ + xfs_extlen_t temp; /* temp for rt calculations */ + + /* + * Set prod to match the realtime extent size. + */ + if (!(extsz = ap->ip->i_d.di_extsize)) + extsz = mp->m_sb.sb_rextsize; + prod = extsz / mp->m_sb.sb_rextsize; + orig_off = ap->off; + orig_alen = ap->alen; + orig_end = orig_off + orig_alen; + /* + * If the file offset is unaligned vs. the extent size + * we need to align it. This will be possible unless + * the file was previously written with a kernel that didn't + * perform this alignment. + */ + mod_off = do_mod(orig_off, extsz); + if (mod_off) { + ap->alen += mod_off; + ap->off -= mod_off; + } + /* + * Same adjustment for the end of the requested area. + */ + if ((temp = (ap->alen % extsz))) + ap->alen += extsz - temp; + /* + * If the previous block overlaps with this proposed allocation + * then move the start forward without adjusting the length. + */ + prevo = + ap->prevp->br_startoff == NULLFILEOFF ? + 0 : + (ap->prevp->br_startoff + + ap->prevp->br_blockcount); + if (ap->off != orig_off && ap->off < prevo) + ap->off = prevo; + /* + * If the next block overlaps with this proposed allocation + * then move the start back without adjusting the length, + * but not before offset 0. + * This may of course make the start overlap previous block, + * and if we hit the offset 0 limit then the next block + * can still overlap too. + */ + nexto = (ap->eof || ap->gotp->br_startoff == NULLFILEOFF) ? + NULLFILEOFF : ap->gotp->br_startoff; + if (!ap->eof && + ap->off + ap->alen != orig_end && + ap->off + ap->alen > nexto) + ap->off = nexto > ap->alen ? nexto - ap->alen : 0; + /* + * If we're now overlapping the next or previous extent that + * means we can't fit an extsz piece in this hole. Just move + * the start forward to the first legal spot and set + * the length so we hit the end. + */ + if ((ap->off != orig_off && ap->off < prevo) || + (ap->off + ap->alen != orig_end && + ap->off + ap->alen > nexto)) { + ap->off = prevo; + ap->alen = nexto - prevo; + } + /* + * If the result isn't a multiple of rtextents we need to + * remove blocks until it is. + */ + if ((temp = (ap->alen % mp->m_sb.sb_rextsize))) { + /* + * We're not covering the original request, or + * we won't be able to once we fix the length. + */ + if (orig_off < ap->off || + orig_end > ap->off + ap->alen || + ap->alen - temp < orig_alen) + return XFS_ERROR(EINVAL); + /* + * Try to fix it by moving the start up. + */ + if (ap->off + temp <= orig_off) { + ap->alen -= temp; + ap->off += temp; + } + /* + * Try to fix it by moving the end in. + */ + else if (ap->off + ap->alen - temp >= orig_end) + ap->alen -= temp; + /* + * Set the start to the minimum then trim the length. + */ + else { + ap->alen -= orig_off - ap->off; + ap->off = orig_off; + ap->alen -= ap->alen % mp->m_sb.sb_rextsize; + } + /* + * Result doesn't cover the request, fail it. + */ + if (orig_off < ap->off || orig_end > ap->off + ap->alen) + return XFS_ERROR(EINVAL); + } + ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); + /* + * If the offset & length are not perfectly aligned + * then kill prod, it will just get us in trouble. + */ + if (do_mod(ap->off, extsz) || ap->alen % extsz) + prod = 1; + /* + * Set ralen to be the actual requested length in rtextents. + */ + ralen = ap->alen / mp->m_sb.sb_rextsize; + /* + * If the old value was close enough to MAXEXTLEN that + * we rounded up to it, cut it back so it's legal again. + * Note that if it's a really large request (bigger than + * MAXEXTLEN), we don't hear about that number, and can't + * adjust the starting point to match it. + */ + if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) + ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; + /* + * If it's an allocation to an empty file at offset 0, + * pick an extent that will space things out in the rt area. + */ + if (ap->eof && ap->off == 0) { + error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); + if (error) + return error; + ap->rval = rtx * mp->m_sb.sb_rextsize; + } else + ap->rval = 0; + } +#else + if (rt) + ap->rval = 0; +#endif /* __KERNEL__ */ + else if (nullfb) + ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); + else + ap->rval = ap->firstblock; + /* + * If allocating at eof, and there's a previous real block, + * try to use it's last block as our starting point. + */ + if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF && + !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && + ISLEGAL(ap->prevp->br_startblock + ap->prevp->br_blockcount, + ap->prevp->br_startblock)) { + ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount; + /* + * Adjust for the gap between prevp and us. + */ + adjust = ap->off - + (ap->prevp->br_startoff + ap->prevp->br_blockcount); + if (adjust && + ISLEGAL(ap->rval + adjust, ap->prevp->br_startblock)) + ap->rval += adjust; + } + /* + * If not at eof, then compare the two neighbor blocks. + * Figure out whether either one gives us a good starting point, + * and pick the better one. + */ + else if (!ap->eof) { + xfs_fsblock_t gotbno; /* right side block number */ + xfs_fsblock_t gotdiff=0; /* right side difference */ + xfs_fsblock_t prevbno; /* left side block number */ + xfs_fsblock_t prevdiff=0; /* left side difference */ + + /* + * If there's a previous (left) block, select a requested + * start block based on it. + */ + if (ap->prevp->br_startoff != NULLFILEOFF && + !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && + (prevbno = ap->prevp->br_startblock + + ap->prevp->br_blockcount) && + ISLEGAL(prevbno, ap->prevp->br_startblock)) { + /* + * Calculate gap to end of previous block. + */ + adjust = prevdiff = ap->off - + (ap->prevp->br_startoff + + ap->prevp->br_blockcount); + /* + * Figure the startblock based on the previous block's + * end and the gap size. + * Heuristic! + * If the gap is large relative to the piece we're + * allocating, or using it gives us an illegal block + * number, then just use the end of the previous block. + */ + if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen && + ISLEGAL(prevbno + prevdiff, + ap->prevp->br_startblock)) + prevbno += adjust; + else + prevdiff += adjust; + /* + * If the firstblock forbids it, can't use it, + * must use default. + */ + if (!rt && !nullfb && + XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) + prevbno = NULLFSBLOCK; + } + /* + * No previous block or can't follow it, just default. + */ + else + prevbno = NULLFSBLOCK; + /* + * If there's a following (right) block, select a requested + * start block based on it. + */ + if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) { + /* + * Calculate gap to start of next block. + */ + adjust = gotdiff = ap->gotp->br_startoff - ap->off; + /* + * Figure the startblock based on the next block's + * start and the gap size. + */ + gotbno = ap->gotp->br_startblock; + /* + * Heuristic! + * If the gap is large relative to the piece we're + * allocating, or using it gives us an illegal block + * number, then just use the start of the next block + * offset by our length. + */ + if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen && + ISLEGAL(gotbno - gotdiff, gotbno)) + gotbno -= adjust; + else if (ISLEGAL(gotbno - ap->alen, gotbno)) { + gotbno -= ap->alen; + gotdiff += adjust - ap->alen; + } else + gotdiff += adjust; + /* + * If the firstblock forbids it, can't use it, + * must use default. + */ + if (!rt && !nullfb && + XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) + gotbno = NULLFSBLOCK; + } + /* + * No next block, just default. + */ + else + gotbno = NULLFSBLOCK; + /* + * If both valid, pick the better one, else the only good + * one, else ap->rval is already set (to 0 or the inode block). + */ + if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) + ap->rval = prevdiff <= gotdiff ? prevbno : gotbno; + else if (prevbno != NULLFSBLOCK) + ap->rval = prevbno; + else if (gotbno != NULLFSBLOCK) + ap->rval = gotbno; + } + /* + * If allowed, use ap->rval; otherwise must use firstblock since + * it's in the right allocation group. + */ + if (nullfb || rt || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno) + ; + else + ap->rval = ap->firstblock; + /* + * Realtime allocation, done through xfs_rtallocate_extent. + */ + if (rt) { +#ifndef __KERNEL__ + ASSERT(0); +#else + xfs_rtblock_t rtb; + + atype = ap->rval == 0 ? + XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; + do_div(ap->rval, mp->m_sb.sb_rextsize); + rtb = ap->rval; + ap->alen = ralen; + if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen, + &ralen, atype, ap->wasdel, prod, &rtb))) + return error; + if (rtb == NULLFSBLOCK && prod > 1 && + (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, + ap->alen, &ralen, atype, + ap->wasdel, 1, &rtb))) + return error; + ap->rval = rtb; + if (ap->rval != NULLFSBLOCK) { + ap->rval *= mp->m_sb.sb_rextsize; + ralen *= mp->m_sb.sb_rextsize; + ap->alen = ralen; + ap->ip->i_d.di_nblocks += ralen; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= ralen; + /* + * Adjust the disk quota also. This was reserved + * earlier. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : + XFS_TRANS_DQ_RTBCOUNT, + (long) ralen); + } else + ap->alen = 0; +#endif /* __KERNEL__ */ + } + /* + * Normal allocation, done through xfs_alloc_vextent. + */ + else { + xfs_agnumber_t ag; + xfs_alloc_arg_t args; + xfs_extlen_t blen; + xfs_extlen_t delta; + int isaligned; + xfs_extlen_t longest; + xfs_extlen_t need; + xfs_extlen_t nextminlen=0; + int notinit; + xfs_perag_t *pag; + xfs_agnumber_t startag; + int tryagain; + + tryagain = isaligned = 0; + args.tp = ap->tp; + args.mp = mp; + args.fsbno = ap->rval; + args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks); + blen = 0; + if (nullfb) { + args.type = XFS_ALLOCTYPE_START_BNO; + args.total = ap->total; + /* + * Find the longest available space. + * We're going to try for the whole allocation at once. + */ + startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); + notinit = 0; + down_read(&mp->m_peraglock); + while (blen < ap->alen) { + pag = &mp->m_perag[ag]; + if (!pag->pagf_init && + (error = xfs_alloc_pagf_init(mp, args.tp, + ag, XFS_ALLOC_FLAG_TRYLOCK))) { + up_read(&mp->m_peraglock); + return error; + } + /* + * See xfs_alloc_fix_freelist... + */ + if (pag->pagf_init) { + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? + need - pag->pagf_flcount : 0; + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || + pag->pagf_longest > 0); + if (blen < longest) + blen = longest; + } else + notinit = 1; + if (++ag == mp->m_sb.sb_agcount) + ag = 0; + if (ag == startag) + break; + } + up_read(&mp->m_peraglock); + /* + * Since the above loop did a BUF_TRYLOCK, it is + * possible that there is space for this request. + */ + if (notinit || blen < ap->minlen) + args.minlen = ap->minlen; + /* + * If the best seen length is less than the request + * length, use the best as the minimum. + */ + else if (blen < ap->alen) + args.minlen = blen; + /* + * Otherwise we've seen an extent as big as alen, + * use that as the minimum. + */ + else + args.minlen = ap->alen; + } else if (ap->low) { + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = args.minlen = ap->minlen; + } else { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.total = ap->total; + args.minlen = ap->minlen; + } + if (ap->ip->i_d.di_extsize) { + args.prod = ap->ip->i_d.di_extsize; + if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); + } else if (mp->m_sb.sb_blocksize >= NBPP) { + args.prod = 1; + args.mod = 0; + } else { + args.prod = NBPP >> mp->m_sb.sb_blocklog; + if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); + } + /* + * If we are not low on available data blocks, and the + * underlying logical volume manager is a stripe, and + * the file offset is zero then try to allocate data + * blocks on stripe unit boundary. + * NOTE: ap->aeof is only set if the allocation length + * is >= the stripe unit and the allocation offset is + * at the end of file. + */ + if (!ap->low && ap->aeof) { + if (!ap->off) { + args.alignment = mp->m_dalign; + atype = args.type; + isaligned = 1; + /* + * Adjust for alignment + */ + if (blen > args.alignment && blen <= ap->alen) + args.minlen = blen - args.alignment; + args.minalignslop = 0; + } else { + /* + * First try an exact bno allocation. + * If it fails then do a near or start bno + * allocation with alignment turned on. + */ + atype = args.type; + tryagain = 1; + args.type = XFS_ALLOCTYPE_THIS_BNO; + args.alignment = 1; + /* + * Compute the minlen+alignment for the + * next case. Set slop so that the value + * of minlen+alignment+slop doesn't go up + * between the calls. + */ + if (blen > mp->m_dalign && blen <= ap->alen) + nextminlen = blen - mp->m_dalign; + else + nextminlen = args.minlen; + if (nextminlen + mp->m_dalign > args.minlen + 1) + args.minalignslop = + nextminlen + mp->m_dalign - + args.minlen - 1; + else + args.minalignslop = 0; + } + } else { + args.alignment = 1; + args.minalignslop = 0; + } + args.minleft = ap->minleft; + args.wasdel = ap->wasdel; + args.isfl = 0; + args.userdata = ap->userdata; + if ((error = xfs_alloc_vextent(&args))) + return error; + if (tryagain && args.fsbno == NULLFSBLOCK) { + /* + * Exact allocation failed. Now try with alignment + * turned on. + */ + args.type = atype; + args.fsbno = ap->rval; + args.alignment = mp->m_dalign; + args.minlen = nextminlen; + args.minalignslop = 0; + isaligned = 1; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (isaligned && args.fsbno == NULLFSBLOCK) { + /* + * allocation failed, so turn off alignment and + * try again. + */ + args.type = atype; + args.fsbno = ap->rval; + args.alignment = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (args.fsbno == NULLFSBLOCK && nullfb && + args.minlen > ap->minlen) { + args.minlen = ap->minlen; + args.type = XFS_ALLOCTYPE_START_BNO; + args.fsbno = ap->rval; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (args.fsbno == NULLFSBLOCK && nullfb) { + args.fsbno = 0; + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = ap->minlen; + args.minleft = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + ap->low = 1; + } + if (args.fsbno != NULLFSBLOCK) { + ap->firstblock = ap->rval = args.fsbno; + ASSERT(nullfb || fb_agno == args.agno || + (ap->low && fb_agno < args.agno)); + ap->alen = args.len; + ap->ip->i_d.di_nblocks += args.len; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= args.len; + /* + * Adjust the disk quota also. This was reserved + * earlier. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : + XFS_TRANS_DQ_BCOUNT, + (long) args.len); + } else { + ap->rval = NULLFSBLOCK; + ap->alen = 0; + } + } + return 0; +#undef ISLEGAL +} + +/* + * Transform a btree format file with only one leaf node, where the + * extents list will fit in the inode, into an extents format file. + * Since the extent list is already in-core, all we have to do is + * give up the space for the btree root and pitch the leaf block. + */ +STATIC int /* error */ +xfs_bmap_btree_to_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_btree_cur_t *cur, /* btree cursor */ + int *logflagsp, /* inode logging flags */ + int whichfork) /* data or attr fork */ +{ + /* REFERENCED */ + xfs_bmbt_block_t *cblock;/* child btree block */ + xfs_fsblock_t cbno; /* child block number */ + xfs_buf_t *cbp; /* child block's buffer */ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork data */ + xfs_mount_t *mp; /* mount point structure */ + xfs_bmbt_ptr_t *pp; /* ptr to block address */ + xfs_bmbt_block_t *rblock;/* root btree block */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); + rblock = ifp->if_broot; + ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) == 1); + ASSERT(INT_GET(rblock->bb_numrecs, ARCH_CONVERT) == 1); + ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1); + mp = ip->i_mount; + pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes); + *logflagsp = 0; +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), 1))) + return error; +#endif + cbno = INT_GET(*pp, ARCH_CONVERT); + if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, + XFS_BMAP_BTREE_REF))) + return error; + cblock = XFS_BUF_TO_BMBT_BLOCK(cbp); + if ((error = xfs_btree_check_lblock(cur, cblock, 0, cbp))) + return error; + xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp); + ip->i_d.di_nblocks--; + XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); + xfs_trans_binval(tp, cbp); + if (cur->bc_bufs[0] == cbp) + cur->bc_bufs[0] = NULL; + xfs_iroot_realloc(ip, -1, whichfork); + ASSERT(ifp->if_broot == NULL); + ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); + return 0; +} + +/* + * Called by xfs_bmapi to update extent list structure and the btree + * after removing space (or undoing a delayed allocation). + */ +STATIC int /* error */ +xfs_bmap_del_extent( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_trans_t *tp, /* current transaction pointer */ + xfs_extnum_t idx, /* extent number to update/delete */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *del, /* data to remove from extent list */ + int *logflagsp, /* inode logging flags */ + int whichfork, /* data or attr fork */ + int rsvd) /* OK to allocate reserved blocks */ +{ + xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ + xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ + xfs_fsblock_t del_endblock=0; /* first block past del */ + xfs_fileoff_t del_endoff; /* first offset past del */ + int delay; /* current block is delayed allocated */ + int do_fx; /* free extent at end of routine */ + xfs_bmbt_rec_t *ep; /* current extent entry pointer */ + int error; /* error return value */ + int flags; /* inode logging flags */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_del_extent"; +#endif + xfs_bmbt_irec_t got; /* current extent entry */ + xfs_fileoff_t got_endoff; /* first offset past got */ + int i; /* temp state */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_mount_t *mp; /* mount structure */ + xfs_filblks_t nblks; /* quota/sb block count */ + xfs_bmbt_irec_t new; /* new record to be inserted */ + /* REFERENCED */ + xfs_extnum_t nextents; /* number of extents in list */ + uint qfield; /* quota field to update */ + xfs_filblks_t temp; /* for indirect length calculations */ + xfs_filblks_t temp2; /* for indirect length calculations */ + + XFS_STATS_INC(xfsstats.xs_del_exlist); + mp = ip->i_mount; + ifp = XFS_IFORK_PTR(ip, whichfork); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(idx >= 0 && idx < nextents); + ASSERT(del->br_blockcount > 0); + ep = &ifp->if_u1.if_extents[idx]; + xfs_bmbt_get_all(ep, &got); + ASSERT(got.br_startoff <= del->br_startoff); + del_endoff = del->br_startoff + del->br_blockcount; + got_endoff = got.br_startoff + got.br_blockcount; + ASSERT(got_endoff >= del_endoff); + delay = ISNULLSTARTBLOCK(got.br_startblock); + ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay); + flags = 0; + qfield = 0; + error = 0; + /* + * If deleting a real allocation, must free up the disk space. + */ + if (!delay) { + flags = XFS_ILOG_CORE; + /* + * Realtime allocation. Free it and record di_nblocks update. + */ + if (whichfork == XFS_DATA_FORK && + (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { + xfs_fsblock_t bno; + xfs_filblks_t len; + + ASSERT(do_mod(del->br_blockcount, + mp->m_sb.sb_rextsize) == 0); + ASSERT(do_mod(del->br_startblock, + mp->m_sb.sb_rextsize) == 0); + bno = del->br_startblock; + len = del->br_blockcount; + do_div(bno, mp->m_sb.sb_rextsize); + do_div(len, mp->m_sb.sb_rextsize); + if ((error = xfs_rtfree_extent(ip->i_transp, bno, + (xfs_extlen_t)len))) + goto done; + do_fx = 0; + nblks = len * mp->m_sb.sb_rextsize; + qfield = XFS_TRANS_DQ_RTBCOUNT; + } + /* + * Ordinary allocation. + */ + else { + do_fx = 1; + nblks = del->br_blockcount; + qfield = XFS_TRANS_DQ_BCOUNT; + } + /* + * Set up del_endblock and cur for later. + */ + del_endblock = del->br_startblock + del->br_blockcount; + if (cur) { + if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, + got.br_startblock, got.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + } + da_old = da_new = 0; + } else { + da_old = STARTBLOCKVAL(got.br_startblock); + da_new = 0; + nblks = 0; + do_fx = 0; + } + /* + * Set flag value to use in switch statement. + * Left-contig is 2, right-contig is 1. + */ + switch (((got.br_startoff == del->br_startoff) << 1) | + (got_endoff == del_endoff)) { + case 3: + /* + * Matches the whole extent. Delete the entry. + */ + xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork); + xfs_bmap_delete_exlist(ip, idx, 1, whichfork); + ifp->if_lastex = idx; + if (delay) + break; + XFS_IFORK_NEXT_SET(ip, whichfork, + XFS_IFORK_NEXTENTS(ip, whichfork) - 1); + flags |= XFS_ILOG_CORE; + if (!cur) { + flags |= XFS_ILOG_FEXT(whichfork); + break; + } + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + break; + + case 2: + /* + * Deleting the first part of the extent. + */ + xfs_bmap_trace_pre_update(fname, "2", ip, idx, whichfork); + xfs_bmbt_set_startoff(ep, del_endoff); + temp = got.br_blockcount - del->br_blockcount; + xfs_bmbt_set_blockcount(ep, temp); + ifp->if_lastex = idx; + if (delay) { + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + da_old); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "2", ip, idx, + whichfork); + da_new = temp; + break; + } + xfs_bmbt_set_startblock(ep, del_endblock); + xfs_bmap_trace_post_update(fname, "2", ip, idx, whichfork); + if (!cur) { + flags |= XFS_ILOG_FEXT(whichfork); + break; + } + if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, + got.br_blockcount - del->br_blockcount, + got.br_state))) + goto done; + break; + + case 1: + /* + * Deleting the last part of the extent. + */ + temp = got.br_blockcount - del->br_blockcount; + xfs_bmap_trace_pre_update(fname, "1", ip, idx, whichfork); + xfs_bmbt_set_blockcount(ep, temp); + ifp->if_lastex = idx; + if (delay) { + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + da_old); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "1", ip, idx, + whichfork); + da_new = temp; + break; + } + xfs_bmap_trace_post_update(fname, "1", ip, idx, whichfork); + if (!cur) { + flags |= XFS_ILOG_FEXT(whichfork); + break; + } + if ((error = xfs_bmbt_update(cur, got.br_startoff, + got.br_startblock, + got.br_blockcount - del->br_blockcount, + got.br_state))) + goto done; + break; + + case 0: + /* + * Deleting the middle of the extent. + */ + temp = del->br_startoff - got.br_startoff; + xfs_bmap_trace_pre_update(fname, "0", ip, idx, whichfork); + xfs_bmbt_set_blockcount(ep, temp); + new.br_startoff = del_endoff; + temp2 = got_endoff - del_endoff; + new.br_blockcount = temp2; + new.br_state = got.br_state; + if (!delay) { + new.br_startblock = del_endblock; + flags |= XFS_ILOG_CORE; + if (cur) { + if ((error = xfs_bmbt_update(cur, + got.br_startoff, + got.br_startblock, temp, + got.br_state))) + goto done; + if ((error = xfs_bmbt_increment(cur, 0, &i))) + goto done; + cur->bc_rec.b = new; + error = xfs_bmbt_insert(cur, &i); + if (error && error != ENOSPC) + goto done; + /* + * If get no-space back from btree insert, + * it tried a split, and we have a zero + * block reservation. + * Fix up our state and return the error. + */ + if (error == ENOSPC) { + /* + * Reset the cursor, don't trust + * it after any insert operation. + */ + if ((error = xfs_bmbt_lookup_eq(cur, + got.br_startoff, + got.br_startblock, + temp, &i))) + goto done; + ASSERT(i == 1); + /* + * Update the btree record back + * to the original value. + */ + if ((error = xfs_bmbt_update(cur, + got.br_startoff, + got.br_startblock, + got.br_blockcount, + got.br_state))) + goto done; + /* + * Reset the extent record back + * to the original value. + */ + xfs_bmbt_set_blockcount(ep, + got.br_blockcount); + flags = 0; + error = XFS_ERROR(ENOSPC); + goto done; + } + ASSERT(i == 1); + } else + flags |= XFS_ILOG_FEXT(whichfork); + XFS_IFORK_NEXT_SET(ip, whichfork, + XFS_IFORK_NEXTENTS(ip, whichfork) + 1); + } else { + ASSERT(whichfork == XFS_DATA_FORK); + temp = xfs_bmap_worst_indlen(ip, temp); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + temp2 = xfs_bmap_worst_indlen(ip, temp2); + new.br_startblock = NULLSTARTBLOCK((int)temp2); + da_new = temp + temp2; + while (da_new > da_old) { + if (temp) { + temp--; + da_new--; + xfs_bmbt_set_startblock(ep, + NULLSTARTBLOCK((int)temp)); + } + if (da_new == da_old) + break; + if (temp2) { + temp2--; + da_new--; + new.br_startblock = + NULLSTARTBLOCK((int)temp2); + } + } + } + xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork); + xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL, + whichfork); + xfs_bmap_insert_exlist(ip, idx + 1, 1, &new, whichfork); + ifp->if_lastex = idx + 1; + break; + } + /* + * If we need to, add to list of extents to delete. + */ + if (do_fx) + xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist, + mp); + /* + * Adjust inode # blocks in the file. + */ + if (nblks) + ip->i_d.di_nblocks -= nblks; + /* + * Adjust quota data. + */ + if (qfield) + XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks); + + /* + * Account for change in delayed indirect blocks. + * Nothing to do for disk quota accounting here. + */ + ASSERT(da_old >= da_new); + if (da_old > da_new) + xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int)(da_old - da_new), + rsvd); +done: + *logflagsp = flags; + return error; +} + +/* + * Remove the entry "free" from the free item list. Prev points to the + * previous entry, unless "free" is the head of the list. + */ +STATIC void +xfs_bmap_del_free( + xfs_bmap_free_t *flist, /* free item list header */ + xfs_bmap_free_item_t *prev, /* previous item on list, if any */ + xfs_bmap_free_item_t *free) /* list item to be freed */ +{ + if (prev) + prev->xbfi_next = free->xbfi_next; + else + flist->xbf_first = free->xbfi_next; + flist->xbf_count--; + kmem_zone_free(xfs_bmap_free_item_zone, free); +} + +/* + * Remove count entries from the extents array for inode "ip", starting + * at index "idx". Copies the remaining items down over the deleted ones, + * and gives back the excess memory. + */ +STATIC void +xfs_bmap_delete_exlist( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* starting delete index */ + xfs_extnum_t count, /* count of items to delete */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extent list */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extents in list after */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + base = ifp->if_u1.if_extents; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count; + memmove(&base[idx], &base[idx + count], + (nextents - idx) * sizeof(*base)); + xfs_iext_realloc(ip, -count, whichfork); +} + +/* + * Convert an extents-format file into a btree-format file. + * The new file will have a root block (in the inode) and a single child block. + */ +STATIC int /* error */ +xfs_bmap_extents_to_btree( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first-block-allocated */ + xfs_bmap_free_t *flist, /* blocks freed in xaction */ + xfs_btree_cur_t **curp, /* cursor returned to caller */ + int wasdel, /* converting a delayed alloc */ + int *logflagsp, /* inode logging flags */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_block_t *ablock; /* allocated (child) bt block */ + xfs_buf_t *abp; /* buffer for ablock */ + xfs_alloc_arg_t args; /* allocation arguments */ + xfs_bmbt_rec_t *arp; /* child record pointer */ + xfs_bmbt_block_t *block; /* btree root block */ + xfs_btree_cur_t *cur; /* bmap btree cursor */ + xfs_bmbt_rec_t *ep; /* extent list pointer */ + int error; /* error return value */ + xfs_extnum_t i, cnt; /* extent list index */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_key_t *kp; /* root block key pointer */ + xfs_mount_t *mp; /* mount structure */ + xfs_extnum_t nextents; /* extent list size */ + xfs_bmbt_ptr_t *pp; /* root block address pointer */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + /* + * Make space in the inode incore. + */ + xfs_iroot_realloc(ip, 1, whichfork); + ifp->if_flags |= XFS_IFBROOT; + /* + * Fill in the root. + */ + block = ifp->if_broot; + INT_SET(block->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); + INT_SET(block->bb_level, ARCH_CONVERT, 1); + INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); + INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); + INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); + /* + * Need a cursor. Can't allocate until bb_level is filled in. + */ + mp = ip->i_mount; + cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, + whichfork); + cur->bc_private.b.firstblock = *firstblock; + cur->bc_private.b.flist = flist; + cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; + /* + * Convert to a btree with two levels, one record in root. + */ + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); + args.tp = tp; + args.mp = mp; + if (*firstblock == NULLFSBLOCK) { + args.type = XFS_ALLOCTYPE_START_BNO; + args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); + } else if (flist->xbf_low) { + args.type = XFS_ALLOCTYPE_START_BNO; + args.fsbno = *firstblock; + } else { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.fsbno = *firstblock; + } + args.minlen = args.maxlen = args.prod = 1; + args.total = args.minleft = args.alignment = args.mod = args.isfl = + args.minalignslop = 0; + args.wasdel = wasdel; + *logflagsp = 0; + if ((error = xfs_alloc_vextent(&args))) { + xfs_iroot_realloc(ip, -1, whichfork); + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; + } + /* + * Allocation can't fail, the space was reserved. + */ + ASSERT(args.fsbno != NULLFSBLOCK); + ASSERT(*firstblock == NULLFSBLOCK || + args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) || + (flist->xbf_low && + args.agno > XFS_FSB_TO_AGNO(mp, *firstblock))); + *firstblock = cur->bc_private.b.firstblock = args.fsbno; + cur->bc_private.b.allocated++; + ip->i_d.di_nblocks++; + XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); + abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); + /* + * Fill in the child block. + */ + ablock = XFS_BUF_TO_BMBT_BLOCK(abp); + INT_SET(ablock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); + INT_ZERO(ablock->bb_level, ARCH_CONVERT); + INT_SET(ablock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); + INT_SET(ablock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); + arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) { + if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) { + arp->l0 = INT_GET(ep->l0, ARCH_CONVERT); + arp->l1 = INT_GET(ep->l1, ARCH_CONVERT); + arp++; cnt++; + } + } + INT_SET(ablock->bb_numrecs, ARCH_CONVERT, cnt); + ASSERT(INT_GET(ablock->bb_numrecs, ARCH_CONVERT) == XFS_IFORK_NEXTENTS(ip, whichfork)); + /* + * Fill in the root key and pointer. + */ + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); + INT_SET(kp->br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(arp)); + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); + INT_SET(*pp, ARCH_CONVERT, args.fsbno); + /* + * Do all this logging at the end so that + * the root is at the right level. + */ + xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS); + xfs_bmbt_log_recs(cur, abp, 1, INT_GET(ablock->bb_numrecs, ARCH_CONVERT)); + ASSERT(*curp == NULL); + *curp = cur; + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork); + return 0; +} + +/* + * Insert new item(s) in the extent list for inode "ip". + * Count new items are inserted at offset idx. + */ +STATIC void +xfs_bmap_insert_exlist( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* starting index of new items */ + xfs_extnum_t count, /* number of inserted items */ + xfs_bmbt_irec_t *new, /* items to insert */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* extent list base */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* extent list size */ + xfs_extnum_t to; /* extent list index */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + xfs_iext_realloc(ip, count, whichfork); + base = ifp->if_u1.if_extents; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + memmove(&base[idx + count], &base[idx], + (nextents - (idx + count)) * sizeof(*base)); + for (to = idx; to < idx + count; to++, new++) + xfs_bmbt_set_all(&base[to], new); +} + +/* + * Convert a local file to an extents file. + * This code is out of bounds for data forks of regular files, + * since the file data needs to get logged so things will stay consistent. + * (The bmap-level manipulations are ok, though). + */ +STATIC int /* error */ +xfs_bmap_local_to_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated in xaction */ + xfs_extlen_t total, /* total blocks needed by transaction */ + int *logflagsp, /* inode logging flags */ + int whichfork) /* data or attr fork */ +{ + int error; /* error return value */ + int flags; /* logging flags returned */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_local_to_extents"; +#endif + xfs_ifork_t *ifp; /* inode fork pointer */ + + /* + * We don't want to deal with the case of keeping inode data inline yet. + * So sending the data fork of a regular inode is illegal. + */ + ASSERT(!((ip->i_d.di_mode & IFMT) == IFREG && + whichfork == XFS_DATA_FORK)); + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); + flags = 0; + error = 0; + if (ifp->if_bytes) { + xfs_alloc_arg_t args; /* allocation arguments */ + xfs_buf_t *bp; /* buffer for extent list block */ + xfs_bmbt_rec_t *ep; /* extent list pointer */ + + args.tp = tp; + args.mp = ip->i_mount; + ASSERT(ifp->if_flags & XFS_IFINLINE); + /* + * Allocate a block. We know we need only one, since the + * file currently fits in an inode. + */ + if (*firstblock == NULLFSBLOCK) { + args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); + args.type = XFS_ALLOCTYPE_START_BNO; + } else { + args.fsbno = *firstblock; + args.type = XFS_ALLOCTYPE_NEAR_BNO; + } + args.total = total; + args.mod = args.minleft = args.alignment = args.wasdel = + args.isfl = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + if ((error = xfs_alloc_vextent(&args))) + goto done; + /* + * Can't fail, the space was reserved. + */ + ASSERT(args.fsbno != NULLFSBLOCK); + ASSERT(args.len == 1); + *firstblock = args.fsbno; + bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); + memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data, + ifp->if_bytes); + xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); + xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); + xfs_iext_realloc(ip, 1, whichfork); + ep = ifp->if_u1.if_extents; + xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); + xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork); + XFS_IFORK_NEXT_SET(ip, whichfork, 1); + ip->i_d.di_nblocks = 1; + XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip, + XFS_TRANS_DQ_BCOUNT, 1L); + flags |= XFS_ILOG_FEXT(whichfork); + } else + ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); + ifp->if_flags &= ~XFS_IFINLINE; + ifp->if_flags |= XFS_IFEXTENTS; + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); + flags |= XFS_ILOG_CORE; +done: + *logflagsp = flags; + return error; +} + +xfs_bmbt_rec_t * /* pointer to found extent entry */ +xfs_bmap_do_search_extents( + xfs_bmbt_rec_t *base, /* base of extent list */ + xfs_extnum_t lastx, /* last extent index used */ + xfs_extnum_t nextents, /* extent list size */ + xfs_fileoff_t bno, /* block number searched for */ + int *eofp, /* out: end of file found */ + xfs_extnum_t *lastxp, /* out: last extent index */ + xfs_bmbt_irec_t *gotp, /* out: extent entry found */ + xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ +{ + xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + xfs_bmbt_irec_t got; /* extent list entry, decoded */ + int high; /* high index of binary search */ + int low; /* low index of binary search */ + + if (lastx != NULLEXTNUM && lastx < nextents) + ep = base + lastx; + else + ep = NULL; + prevp->br_startoff = NULLFILEOFF; + if (ep && bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep)) && + bno < got.br_startoff + + (got.br_blockcount = xfs_bmbt_get_blockcount(ep))) + *eofp = 0; + else if (ep && lastx < nextents - 1 && + bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep + 1)) && + bno < got.br_startoff + + (got.br_blockcount = xfs_bmbt_get_blockcount(ep + 1))) { + lastx++; + ep++; + *eofp = 0; + } else if (nextents == 0) + *eofp = 1; + else if (bno == 0 && + (got.br_startoff = xfs_bmbt_get_startoff(base)) == 0) { + ep = base; + lastx = 0; + got.br_blockcount = xfs_bmbt_get_blockcount(ep); + *eofp = 0; + } else { + /* binary search the extents array */ + low = 0; + high = nextents - 1; + while (low <= high) { + XFS_STATS_INC(xfsstats.xs_cmp_exlist); + lastx = (low + high) >> 1; + ep = base + lastx; + got.br_startoff = xfs_bmbt_get_startoff(ep); + got.br_blockcount = xfs_bmbt_get_blockcount(ep); + if (bno < got.br_startoff) + high = lastx - 1; + else if (bno >= got.br_startoff + got.br_blockcount) + low = lastx + 1; + else { + got.br_startblock = xfs_bmbt_get_startblock(ep); + got.br_state = xfs_bmbt_get_state(ep); + *eofp = 0; + *lastxp = lastx; + *gotp = got; + return ep; + } + } + if (bno >= got.br_startoff + got.br_blockcount) { + lastx++; + if (lastx == nextents) { + *eofp = 1; + got.br_startblock = xfs_bmbt_get_startblock(ep); + got.br_state = xfs_bmbt_get_state(ep); + *prevp = got; + ep = NULL; + } else { + *eofp = 0; + xfs_bmbt_get_all(ep, prevp); + ep++; + got.br_startoff = xfs_bmbt_get_startoff(ep); + got.br_blockcount = xfs_bmbt_get_blockcount(ep); + } + } else { + *eofp = 0; + if (ep > base) + xfs_bmbt_get_all(ep - 1, prevp); + } + } + if (ep) { + got.br_startblock = xfs_bmbt_get_startblock(ep); + got.br_state = xfs_bmbt_get_state(ep); + } + *lastxp = lastx; + *gotp = got; + return ep; +} + +/* + * Search the extents list for the inode, for the extent containing bno. + * If bno lies in a hole, point to the next entry. If bno lies past eof, + * *eofp will be set, and *prevp will contain the last entry (null if none). + * Else, *lastxp will be set to the index of the found + * entry; *gotp will contain the entry. + */ +STATIC xfs_bmbt_rec_t * /* pointer to found extent entry */ +xfs_bmap_search_extents( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fileoff_t bno, /* block number searched for */ + int whichfork, /* data or attr fork */ + int *eofp, /* out: end of file found */ + xfs_extnum_t *lastxp, /* out: last extent index */ + xfs_bmbt_irec_t *gotp, /* out: extent entry found */ + xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ +{ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_rec_t *base; /* base of extent list */ + xfs_extnum_t lastx; /* last extent index used */ + xfs_extnum_t nextents; /* extent list size */ + + XFS_STATS_INC(xfsstats.xs_look_exlist); + ifp = XFS_IFORK_PTR(ip, whichfork); + lastx = ifp->if_lastex; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; + + return xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp, + lastxp, gotp, prevp); +} + + +#ifdef XFS_BMAP_TRACE +/* + * Add a bmap trace buffer entry. Base routine for the others. + */ +STATIC void +xfs_bmap_trace_addentry( + int opcode, /* operation */ + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(ies) */ + xfs_extnum_t cnt, /* count of entries, 1 or 2 */ + xfs_bmbt_rec_t *r1, /* first record */ + xfs_bmbt_rec_t *r2, /* second record or null */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t tr2; + + ASSERT(cnt == 1 || cnt == 2); + ASSERT(r1 != NULL); + if (cnt == 1) { + ASSERT(r2 == NULL); + r2 = &tr2; + memset(&tr2, 0, sizeof(tr2)); + } else + ASSERT(r2 != NULL); + ktrace_enter(xfs_bmap_trace_buf, + (void *)(__psint_t)(opcode | (whichfork << 16)), + (void *)fname, (void *)desc, (void *)ip, + (void *)(__psint_t)idx, + (void *)(__psint_t)cnt, + (void *)(__psunsigned_t)(ip->i_ino >> 32), + (void *)(__psunsigned_t)(unsigned)ip->i_ino, + (void *)(__psunsigned_t)(INT_GET(r1->l0, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l0, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r1->l1, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l1, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r2->l0, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l0, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r2->l1, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l1, ARCH_CONVERT)) + ); + ASSERT(ip->i_xtrace); + ktrace_enter(ip->i_xtrace, + (void *)(__psint_t)(opcode | (whichfork << 16)), + (void *)fname, (void *)desc, (void *)ip, + (void *)(__psint_t)idx, + (void *)(__psint_t)cnt, + (void *)(__psunsigned_t)(ip->i_ino >> 32), + (void *)(__psunsigned_t)(unsigned)ip->i_ino, + (void *)(__psunsigned_t)(INT_GET(r1->l0, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l0, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r1->l1, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l1, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r2->l0, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l0, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r2->l1, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l1, ARCH_CONVERT)) + ); +} + +/* + * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. + */ +STATIC void +xfs_bmap_trace_delete( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(entries) deleted */ + xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */ + int whichfork) /* data or attr fork */ +{ + xfs_ifork_t *ifp; /* inode fork pointer */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx, + cnt, &ifp->if_u1.if_extents[idx], + cnt == 2 ? &ifp->if_u1.if_extents[idx + 1] : NULL, + whichfork); +} + +/* + * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or + * reading in the extents list from the disk (in the btree). + */ +STATIC void +xfs_bmap_trace_insert( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(entries) inserted */ + xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */ + xfs_bmbt_irec_t *r1, /* inserted record 1 */ + xfs_bmbt_irec_t *r2, /* inserted record 2 or null */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t tr1; /* compressed record 1 */ + xfs_bmbt_rec_t tr2; /* compressed record 2 if needed */ + + xfs_bmbt_set_all(&tr1, r1); + if (cnt == 2) { + ASSERT(r2 != NULL); + xfs_bmbt_set_all(&tr2, r2); + } else { + ASSERT(cnt == 1); + ASSERT(r2 == NULL); + } + xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx, + cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork); +} + +/* + * Add bmap trace entry after updating an extent list entry in place. + */ +STATIC void +xfs_bmap_trace_post_update( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry updated */ + int whichfork) /* data or attr fork */ +{ + xfs_ifork_t *ifp; /* inode fork pointer */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx, + 1, &ifp->if_u1.if_extents[idx], NULL, whichfork); +} + +/* + * Add bmap trace entry prior to updating an extent list entry in place. + */ +STATIC void +xfs_bmap_trace_pre_update( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry to be updated */ + int whichfork) /* data or attr fork */ +{ + xfs_ifork_t *ifp; /* inode fork pointer */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1, + &ifp->if_u1.if_extents[idx], NULL, whichfork); +} +#endif /* XFS_BMAP_TRACE */ + +/* + * Compute the worst-case number of indirect blocks that will be used + * for ip's delayed extent of length "len". + */ +STATIC xfs_filblks_t +xfs_bmap_worst_indlen( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_filblks_t len) /* delayed extent length */ +{ + int level; /* btree level number */ + int maxrecs; /* maximum record count at this level */ + xfs_mount_t *mp; /* mount structure */ + xfs_filblks_t rval; /* return value */ + + mp = ip->i_mount; + maxrecs = mp->m_bmap_dmxr[0]; + for (level = 0, rval = 0; + level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); + level++) { + len += maxrecs - 1; + do_div(len, maxrecs); + rval += len; + if (len == 1) + return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - + level - 1; + if (level == 0) + maxrecs = mp->m_bmap_dmxr[1]; + } + return rval; +} + +#if defined(DEBUG) && defined(XFS_RW_TRACE) +STATIC void +xfs_bunmap_trace( + xfs_inode_t *ip, + xfs_fileoff_t bno, + xfs_filblks_t len, + int flags, + inst_t *ra) +{ + if (ip->i_rwtrace == NULL) + return; + ktrace_enter(ip->i_rwtrace, + (void *)(__psint_t)XFS_BUNMAPI, + (void *)ip, + (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff), + (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff), + (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff), + (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff), + (void *)(__psint_t)len, + (void *)(__psint_t)flags, + (void *)(__psint_t)private.p_cpuid, + (void *)ra, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0); +} +#endif + +/* + * Convert inode from non-attributed to attributed. + * Must not be in a transaction, ip must not be locked. + */ +int /* error code */ +xfs_bmap_add_attrfork( + xfs_inode_t *ip, /* incore inode pointer */ + int rsvd) /* OK to allocated reserved blocks in trans */ +{ + int blks; /* space reservation */ + int committed; /* xaction was committed */ + int error; /* error return value */ + xfs_fsblock_t firstblock; /* 1st block/ag allocated */ + xfs_bmap_free_t flist; /* freed extent list */ + int logflags; /* logging flags */ + xfs_mount_t *mp; /* mount structure */ + unsigned long s; /* spinlock spl value */ + xfs_trans_t *tp; /* transaction pointer */ + + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t)); + if (XFS_IFORK_Q(ip)) + return 0; + mp = ip->i_mount; + ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); + tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK); + blks = XFS_ADDAFORK_SPACE_RES(mp); + if (rsvd) + tp->t_flags |= XFS_TRANS_RESERVE; + if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT))) + goto error0; + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ? + XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : + XFS_QMOPT_RES_REGBLKS); + if (error) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES); + return error; + } + if (XFS_IFORK_Q(ip)) + goto error1; + if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { + /* + * For inodes coming from pre-6.2 filesystems. + */ + ASSERT(ip->i_d.di_aformat == 0); + ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; + } + ASSERT(ip->i_d.di_anextents == 0); + VN_HOLD(XFS_ITOV(ip)); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + switch (ip->i_d.di_format) { + case XFS_DINODE_FMT_DEV: + ip->i_d.di_forkoff = roundup(sizeof(dev_t), 8) >> 3; + break; + case XFS_DINODE_FMT_UUID: + ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; + break; + case XFS_DINODE_FMT_LOCAL: + case XFS_DINODE_FMT_EXTENTS: + case XFS_DINODE_FMT_BTREE: + ip->i_d.di_forkoff = mp->m_attroffset >> 3; + break; + default: + ASSERT(0); + error = XFS_ERROR(EINVAL); + goto error1; + } + ip->i_df.if_ext_max = + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(ip->i_afp == NULL); + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); + ip->i_afp->if_ext_max = + XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + ip->i_afp->if_flags = XFS_IFEXTENTS; + logflags = 0; + XFS_BMAP_INIT(&flist, &firstblock); + switch (ip->i_d.di_format) { + case XFS_DINODE_FMT_LOCAL: + error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist, + &logflags); + break; + case XFS_DINODE_FMT_EXTENTS: + error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, + &flist, &logflags); + break; + case XFS_DINODE_FMT_BTREE: + error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist, + &logflags); + break; + default: + error = 0; + break; + } + if (logflags) + xfs_trans_log_inode(tp, ip, logflags); + if (error) + goto error2; + if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) { + s = XFS_SB_LOCK(mp); + if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) { + XFS_SB_VERSION_ADDATTR(&mp->m_sb); + XFS_SB_UNLOCK(mp, s); + xfs_mod_sb(tp, XFS_SB_VERSIONNUM); + } else + XFS_SB_UNLOCK(mp, s); + } + if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed))) + goto error2; + error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES, NULL); + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t)); + return error; +error2: + xfs_bmap_cancel(&flist); +error1: + ASSERT(ismrlocked(&ip->i_lock,MR_UPDATE)); + xfs_iunlock(ip, XFS_ILOCK_EXCL); +error0: + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t)); + return error; +} + +/* + * Add the extent to the list of extents to be free at transaction end. + * The list is maintained sorted (by block number). + */ +/* ARGSUSED */ +void +xfs_bmap_add_free( + xfs_fsblock_t bno, /* fs block number of extent */ + xfs_filblks_t len, /* length of extent */ + xfs_bmap_free_t *flist, /* list of extents */ + xfs_mount_t *mp) /* mount point structure */ +{ + xfs_bmap_free_item_t *cur; /* current (next) element */ + xfs_bmap_free_item_t *new; /* new element */ + xfs_bmap_free_item_t *prev; /* previous element */ +#ifdef DEBUG + xfs_agnumber_t agno; + xfs_agblock_t agbno; + + ASSERT(bno != NULLFSBLOCK); + ASSERT(len > 0); + ASSERT(len <= MAXEXTLEN); + ASSERT(!ISNULLSTARTBLOCK(bno)); + agno = XFS_FSB_TO_AGNO(mp, bno); + agbno = XFS_FSB_TO_AGBNO(mp, bno); + ASSERT(agno < mp->m_sb.sb_agcount); + ASSERT(agbno < mp->m_sb.sb_agblocks); + ASSERT(len < mp->m_sb.sb_agblocks); + ASSERT(agbno + len <= mp->m_sb.sb_agblocks); +#endif + ASSERT(xfs_bmap_free_item_zone != NULL); + new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); + new->xbfi_startblock = bno; + new->xbfi_blockcount = (xfs_extlen_t)len; + for (prev = NULL, cur = flist->xbf_first; + cur != NULL; + prev = cur, cur = cur->xbfi_next) { + if (cur->xbfi_startblock >= bno) + break; + } + if (prev) + prev->xbfi_next = new; + else + flist->xbf_first = new; + new->xbfi_next = cur; + flist->xbf_count++; +} + +/* + * Compute and fill in the value of the maximum depth of a bmap btree + * in this filesystem. Done once, during mount. + */ +void +xfs_bmap_compute_maxlevels( + xfs_mount_t *mp, /* file system mount structure */ + int whichfork) /* data or attr fork */ +{ + int level; /* btree level */ + uint maxblocks; /* max blocks at this level */ + uint maxleafents; /* max leaf entries possible */ + int maxrootrecs; /* max records in root block */ + int minleafrecs; /* min records in leaf block */ + int minnoderecs; /* min records in node block */ + int sz; /* root block size */ + + /* + * The maximum number of extents in a file, hence the maximum + * number of leaf entries, is controlled by the type of di_nextents + * (a signed 32-bit number, xfs_extnum_t), or by di_anextents + * (a signed 16-bit number, xfs_aextnum_t). + */ + maxleafents = (whichfork == XFS_DATA_FORK) ? MAXEXTNUM : MAXAEXTNUM; + minleafrecs = mp->m_bmap_dmnr[0]; + minnoderecs = mp->m_bmap_dmnr[1]; + sz = (whichfork == XFS_DATA_FORK) ? + mp->m_attroffset : + mp->m_sb.sb_inodesize - mp->m_attroffset; + maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0); + maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; + for (level = 1; maxblocks > 1; level++) { + if (maxblocks <= maxrootrecs) + maxblocks = 1; + else + maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; + } + mp->m_bm_maxlevels[whichfork] = level; +} + +/* + * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi + * caller. Frees all the extents that need freeing, which must be done + * last due to locking considerations. We never free any extents in + * the first transaction. This is to allow the caller to make the first + * transaction a synchronous one so that the pointers to the data being + * broken in this transaction will be permanent before the data is actually + * freed. This is necessary to prevent blocks from being reallocated + * and written to before the free and reallocation are actually permanent. + * We do not just make the first transaction synchronous here, because + * there are more efficient ways to gain the same protection in some cases + * (see the file truncation code). + * + * Return 1 if the given transaction was committed and a new one + * started, and 0 otherwise in the committed parameter. + */ +/*ARGSUSED*/ +int /* error */ +xfs_bmap_finish( + xfs_trans_t **tp, /* transaction pointer addr */ + xfs_bmap_free_t *flist, /* i/o: list extents to free */ + xfs_fsblock_t firstblock, /* controlled ag for allocs */ + int *committed) /* xact committed or not */ +{ + xfs_efd_log_item_t *efd; /* extent free data */ + xfs_efi_log_item_t *efi; /* extent free intention */ + int error; /* error return value */ + xfs_bmap_free_item_t *free; /* free extent list item */ + unsigned int logres; /* new log reservation */ + unsigned int logcount; /* new log count */ + xfs_mount_t *mp; /* filesystem mount structure */ + xfs_bmap_free_item_t *next; /* next item on free list */ + xfs_trans_t *ntp; /* new transaction pointer */ + + ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); + if (flist->xbf_count == 0) { + *committed = 0; + return 0; + } + ntp = *tp; + efi = xfs_trans_get_efi(ntp, flist->xbf_count); + for (free = flist->xbf_first; free; free = free->xbfi_next) + xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock, + free->xbfi_blockcount); + logres = ntp->t_log_res; + logcount = ntp->t_log_count; + ntp = xfs_trans_dup(*tp); + error = xfs_trans_commit(*tp, 0, NULL); + *tp = ntp; + *committed = 1; + /* + * We have a new transaction, so we should return committed=1, + * even though we're returning an error. + */ + if (error) { + return error; + } + if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES, + logcount))) + return error; + efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count); + for (free = flist->xbf_first; free != NULL; free = next) { + next = free->xbfi_next; + if ((error = xfs_free_extent(ntp, free->xbfi_startblock, + free->xbfi_blockcount))) { + /* + * The bmap free list will be cleaned up at a + * higher level. The EFI will be canceled when + * this transaction is aborted. + * Need to force shutdown here to make sure it + * happens, since this transaction may not be + * dirty yet. + */ + mp = ntp->t_mountp; + if (!XFS_FORCED_SHUTDOWN(mp)) + xfs_force_shutdown(mp, + (error == EFSCORRUPTED) ? + XFS_CORRUPT_INCORE : + XFS_METADATA_IO_ERROR); + return error; + } + xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock, + free->xbfi_blockcount); + xfs_bmap_del_free(flist, NULL, free); + } + return 0; +} + +/* + * Free up any items left in the list. + */ +void +xfs_bmap_cancel( + xfs_bmap_free_t *flist) /* list of bmap_free_items */ +{ + xfs_bmap_free_item_t *free; /* free list item */ + xfs_bmap_free_item_t *next; + + if (flist->xbf_count == 0) + return; + ASSERT(flist->xbf_first != NULL); + for (free = flist->xbf_first; free; free = next) { + next = free->xbfi_next; + xfs_bmap_del_free(flist, NULL, free); + } + ASSERT(flist->xbf_count == 0); +} + +/* + * Returns EINVAL if the specified file is not swappable. + */ +int /* error */ +xfs_bmap_check_swappable( + xfs_inode_t *ip) /* incore inode */ +{ + xfs_bmbt_rec_t *base; /* base of extent array */ + xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ + xfs_fileoff_t end_fsb; /* last block of file within size */ + xfs_bmbt_irec_t ext; /* extent list entry, decoded */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_fileoff_t lastaddr; /* last block number seen */ + xfs_extnum_t nextents; /* number of extent entries */ + int retval = 0; /* return value */ + + xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + + /* + * Check for a zero length file. + */ + if (ip->i_d.di_size == 0) + goto check_done; + + ASSERT(XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) == XFS_DINODE_FMT_BTREE || + XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS); + + ifp = &ip->i_df; + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (retval = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) + goto check_done; + /* + * Scan extents until the file size is reached. Look for + * holes or unwritten extents, since I/O to these would cause + * a transaction. + */ + end_fsb = XFS_B_TO_FSB(ip->i_mount, ip->i_d.di_size); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; + for (lastaddr = 0, ep = base; ep < &base[nextents]; ep++) { + xfs_bmbt_get_all(ep, &ext); + if (lastaddr < ext.br_startoff || + ext.br_state != XFS_EXT_NORM) { + goto error_done; + } + if (end_fsb <= (lastaddr = ext.br_startoff + + ext.br_blockcount)) + goto check_done; + } +error_done: + retval = XFS_ERROR(EINVAL); + + +check_done: + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + return retval; +} + +/* + * Returns the file-relative block number of the first unused block(s) + * in the file with at least "len" logically contiguous blocks free. + * This is the lowest-address hole if the file has holes, else the first block + * past the end of file. + * Return 0 if the file is currently local (in-inode). + */ +int /* error */ +xfs_bmap_first_unused( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + xfs_extlen_t len, /* size of hole to find */ + xfs_fileoff_t *first_unused, /* unused block */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extent array */ + xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_fileoff_t lastaddr; /* last block number seen */ + xfs_fileoff_t lowest; /* lowest useful block */ + xfs_fileoff_t max; /* starting useful block */ + xfs_fileoff_t off; /* offset for this block */ + xfs_extnum_t nextents; /* number of extent entries */ + + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || + XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || + XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + *first_unused = 0; + return 0; + } + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + lowest = *first_unused; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; + for (lastaddr = 0, max = lowest, ep = base; + ep < &base[nextents]; + ep++) { + off = xfs_bmbt_get_startoff(ep); + /* + * See if the hole before this extent will work. + */ + if (off >= lowest + len && off - max >= len) { + *first_unused = max; + return 0; + } + lastaddr = off + xfs_bmbt_get_blockcount(ep); + max = XFS_FILEOFF_MAX(lastaddr, lowest); + } + *first_unused = max; + return 0; +} + +/* + * Returns the file-relative block number of the last block + 1 before + * last_block (input value) in the file. + * This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. + */ +int /* error */ +xfs_bmap_last_before( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + xfs_fileoff_t *last_block, /* last block */ + int whichfork) /* data or attr fork */ +{ + xfs_fileoff_t bno; /* input file offset */ + int eof; /* hit end of file */ + xfs_bmbt_rec_t *ep; /* pointer to last extent */ + int error; /* error return value */ + xfs_bmbt_irec_t got; /* current extent value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t lastx; /* last extent used */ + xfs_bmbt_irec_t prev; /* previous extent value */ + + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) + return XFS_ERROR(EIO); + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + *last_block = 0; + return 0; + } + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + bno = *last_block - 1; + ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, + &prev); + if (eof || xfs_bmbt_get_startoff(ep) > bno) { + if (prev.br_startoff == NULLFILEOFF) + *last_block = 0; + else + *last_block = prev.br_startoff + prev.br_blockcount; + } + /* + * Otherwise *last_block is already the right answer. + */ + return 0; +} + +/* + * Returns the file-relative block number of the first block past eof in + * the file. This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. + */ +int /* error */ +xfs_bmap_last_offset( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + xfs_fileoff_t *last_block, /* last block */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extent array */ + xfs_bmbt_rec_t *ep; /* pointer to last extent */ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extent entries */ + + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) + return XFS_ERROR(EIO); + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + *last_block = 0; + return 0; + } + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (!nextents) { + *last_block = 0; + return 0; + } + base = &ifp->if_u1.if_extents[0]; + ASSERT(base != NULL); + ep = &base[nextents - 1]; + *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep); + return 0; +} + +/* + * Returns whether the selected fork of the inode has exactly one + * block or not. For the data fork we check this matches di_size, + * implying the file's range is 0..bsize-1. + */ +int /* 1=>1 block, 0=>otherwise */ +xfs_bmap_one_block( + xfs_inode_t *ip, /* incore inode */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *ep; /* ptr to fork's extent */ + xfs_ifork_t *ifp; /* inode fork pointer */ + int rval; /* return value */ + xfs_bmbt_irec_t s; /* internal version of extent */ + +#ifndef DEBUG + if (whichfork == XFS_DATA_FORK) + return ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize; +#endif /* !DEBUG */ + if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) + return 0; + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) + return 0; + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + ep = ifp->if_u1.if_extents; + xfs_bmbt_get_all(ep, &s); + rval = s.br_startoff == 0 && s.br_blockcount == 1; + if (rval && whichfork == XFS_DATA_FORK) + ASSERT(ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize); + return rval; +} + +/* + * Read in the extents to if_extents. + * All inode fields are set up by caller, we just traverse the btree + * and copy the records in. If the file system cannot contain unwritten + * extents, the records are checked for no "state" flags. + */ +int /* error */ +xfs_bmap_read_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_block_t *block; /* current btree block */ + xfs_fsblock_t bno; /* block # of "block" */ + xfs_buf_t *bp; /* buffer for "block" */ + int error; /* error return value */ + xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_read_extents"; +#endif + xfs_extnum_t i, j; /* index into the extents list */ + xfs_ifork_t *ifp; /* fork structure */ + int level; /* btree level, for checking */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_bmbt_ptr_t *pp; /* pointer to block address */ + /* REFERENCED */ + xfs_extnum_t room; /* number of entries there's room for */ + xfs_bmbt_rec_t *trp; /* target record pointer */ + + bno = NULLFSBLOCK; + mp = ip->i_mount; + ifp = XFS_IFORK_PTR(ip, whichfork); + exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE : + XFS_EXTFMT_INODE(ip); + block = ifp->if_broot; + /* + * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. + */ + ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); + level = INT_GET(block->bb_level, ARCH_CONVERT); + pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); + ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); + ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); + ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); + bno = INT_GET(*pp, ARCH_CONVERT); + /* + * Go down the tree until leaf level is reached, following the first + * pointer (leftmost) at each level. + */ + while (level-- > 0) { + if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + return error; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + XFS_WANT_CORRUPTED_GOTO( + XFS_BMAP_SANITY_CHECK(mp, block, level), + error0); + if (level == 0) + break; + pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, + 1, mp->m_bmap_dmxr[1]); + XFS_WANT_CORRUPTED_GOTO( + XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)), + error0); + bno = INT_GET(*pp, ARCH_CONVERT); + xfs_trans_brelse(tp, bp); + } + /* + * Here with bp and block set to the leftmost leaf node in the tree. + */ + room = ifp->if_bytes / (uint)sizeof(*trp); + trp = ifp->if_u1.if_extents; + i = 0; + /* + * Loop over all leaf nodes. Copy information to the extent list. + */ + for (;;) { + xfs_bmbt_rec_t *frp, *temp; + xfs_fsblock_t nextbno; + xfs_extnum_t num_recs; + + + num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + if (unlikely(i + num_recs > room)) { + ASSERT(i + num_recs <= room); + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt dinode %Lu, (btree extents). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino); + XFS_ERROR_REPORT("xfs_bmap_read_extents(1)", + XFS_ERRLEVEL_LOW, + ip->i_mount); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO( + XFS_BMAP_SANITY_CHECK(mp, block, 0), + error0); + /* + * Read-ahead the next leaf block, if any. + */ + nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + if (nextbno != NULLFSBLOCK) + xfs_btree_reada_bufl(mp, nextbno, 1); + /* + * Copy records into the extent list. + */ + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, + block, 1, mp->m_bmap_dmxr[0]); + temp = trp; + for (j = 0; j < num_recs; j++, frp++, trp++) { + trp->l0 = INT_GET(frp->l0, ARCH_CONVERT); + trp->l1 = INT_GET(frp->l1, ARCH_CONVERT); + } + if (exntf == XFS_EXTFMT_NOSTATE) { + /* + * Check all attribute bmap btree records and + * any "older" data bmap btree records for a + * set bit in the "extent flag" position. + */ + if (unlikely(xfs_check_nostate_extents(temp, num_recs))) { + XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", + XFS_ERRLEVEL_LOW, + ip->i_mount); + goto error0; + } + } + i += num_recs; + xfs_trans_brelse(tp, bp); + bno = nextbno; + /* + * If we've reached the end, stop. + */ + if (bno == NULLFSBLOCK) + break; + if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + return error; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + } + ASSERT(i == ifp->if_bytes / (uint)sizeof(*trp)); + ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); + xfs_bmap_trace_exlist(fname, ip, i, whichfork); + return 0; +error0: + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); +} + +#ifdef XFS_BMAP_TRACE +/* + * Add bmap trace insert entries for all the contents of the extent list. + */ +void +xfs_bmap_trace_exlist( + char *fname, /* function name */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t cnt, /* count of entries in the list */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extent list */ + xfs_bmbt_rec_t *ep; /* current entry in extent list */ + xfs_extnum_t idx; /* extent list entry number */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_irec_t s; /* extent list record */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(cnt == ifp->if_bytes / (uint)sizeof(*base)); + base = ifp->if_u1.if_extents; + for (idx = 0, ep = base; idx < cnt; idx++, ep++) { + xfs_bmbt_get_all(ep, &s); + xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL, + whichfork); + } +} +#endif + +#ifdef DEBUG +/* + * Validate that the bmbt_irecs being returned from bmapi are valid + * given the callers original parameters. Specifically check the + * ranges of the returned irecs to ensure that they only extent beyond + * the given parameters if the XFS_BMAPI_ENTIRE flag was set. + */ +STATIC void +xfs_bmap_validate_ret( + xfs_fileoff_t bno, + xfs_filblks_t len, + int flags, + xfs_bmbt_irec_t *mval, + int nmap, + int ret_nmap) +{ + int i; /* index to map values */ + + ASSERT(ret_nmap <= nmap); + + for (i = 0; i < ret_nmap; i++) { + ASSERT(mval[i].br_blockcount > 0); + if (!(flags & XFS_BMAPI_ENTIRE)) { + ASSERT(mval[i].br_startoff >= bno); + ASSERT(mval[i].br_blockcount <= len); + ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= + bno + len); + } else { + ASSERT(mval[i].br_startoff < bno + len); + ASSERT(mval[i].br_startoff + mval[i].br_blockcount > + bno); + } + ASSERT(i == 0 || + mval[i - 1].br_startoff + mval[i - 1].br_blockcount == + mval[i].br_startoff); + if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY)) + ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && + mval[i].br_startblock != HOLESTARTBLOCK); + ASSERT(mval[i].br_state == XFS_EXT_NORM || + mval[i].br_state == XFS_EXT_UNWRITTEN); + } +} +#endif /* DEBUG */ + + +/* + * Map file blocks to filesystem blocks. + * File range is given by the bno/len pair. + * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set) + * into a hole or past eof. + * Only allocates blocks from a single allocation group, + * to avoid locking problems. + * The returned value in "firstblock" from the first call in a transaction + * must be remembered and presented to subsequent calls in "firstblock". + * An upper bound for the number of blocks to be allocated is supplied to + * the first call in "total"; if no allocation group has that many free + * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). + */ +int /* error */ +xfs_bmapi( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + xfs_fileoff_t bno, /* starting file offs. mapped */ + xfs_filblks_t len, /* length to map in file */ + int flags, /* XFS_BMAPI_... */ + xfs_fsblock_t *firstblock, /* first allocated block + controls a.g. for allocs */ + xfs_extlen_t total, /* total blocks needed */ + xfs_bmbt_irec_t *mval, /* output: map values */ + int *nmap, /* i/o: mval size/count */ + xfs_bmap_free_t *flist) /* i/o: list extents to free */ +{ + xfs_fsblock_t abno; /* allocated block number */ + xfs_extlen_t alen; /* allocated extent length */ + xfs_fileoff_t aoff; /* allocated file offset */ + xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */ + char contig; /* allocation must be one extent */ + xfs_btree_cur_t *cur; /* bmap btree cursor */ + char delay; /* this request is for delayed alloc */ + xfs_fileoff_t end; /* end of mapped file region */ + int eof; /* we've hit the end of extent list */ + xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + int error; /* error return */ + char exact; /* don't do all of wasdelayed extent */ + xfs_bmbt_irec_t got; /* current extent list record */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extlen_t indlen; /* indirect blocks length */ + char inhole; /* current location is hole in file */ + xfs_extnum_t lastx; /* last useful extent number */ + int logflags; /* flags for transaction logging */ + xfs_extlen_t minleft; /* min blocks left after allocation */ + xfs_extlen_t minlen; /* min allocation size */ + xfs_mount_t *mp; /* xfs mount structure */ + int n; /* current extent index */ + int nallocs; /* number of extents alloc\'d */ + xfs_extnum_t nextents; /* number of extents in file */ + xfs_fileoff_t obno; /* old block number (offset) */ + xfs_bmbt_irec_t prev; /* previous extent list record */ + char stateless; /* ignore state flag set */ + int tmp_logflags; /* temp flags holder */ + char trim; /* output trimmed to match range */ + char userdata; /* allocating non-metadata */ + char wasdelay; /* old extent was delayed */ + int whichfork; /* data or attr fork */ + char wr; /* this is a write request */ + char rsvd; /* OK to allocate reserved blocks */ +#ifdef DEBUG + xfs_fileoff_t orig_bno; /* original block number value */ + int orig_flags; /* original flags arg value */ + xfs_filblks_t orig_len; /* original value of len arg */ + xfs_bmbt_irec_t *orig_mval; /* original value of mval */ + int orig_nmap; /* original value of *nmap */ + + orig_bno = bno; + orig_len = len; + orig_flags = flags; + orig_mval = mval; + orig_nmap = *nmap; +#endif + ASSERT(*nmap >= 1); + ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE)); + whichfork = (flags & XFS_BMAPI_ATTRFORK) ? + XFS_ATTR_FORK : XFS_DATA_FORK; + mp = ip->i_mount; + if (unlikely(XFS_TEST_ERROR( + (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL), + mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { + XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + if ((wr = (flags & XFS_BMAPI_WRITE)) != 0) + XFS_STATS_INC(xfsstats.xs_blk_mapw); + else + XFS_STATS_INC(xfsstats.xs_blk_mapr); + delay = (flags & XFS_BMAPI_DELAY) != 0; + trim = (flags & XFS_BMAPI_ENTIRE) == 0; + userdata = (flags & XFS_BMAPI_METADATA) == 0; + exact = (flags & XFS_BMAPI_EXACT) != 0; + rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; + contig = (flags & XFS_BMAPI_CONTIG) != 0; + /* + * stateless is used to combine extents which + * differ only due to the state of the extents. + * This technique is used from xfs_getbmap() + * when the caller does not wish to see the + * separation (which is the default). + * + * This technique is also used when writing a + * buffer which has been partially written, + * (usually by being flushed during a chunkread), + * to ensure one write takes place. This also + * prevents a change in the xfs inode extents at + * this time, intentionally. This change occurs + * on completion of the write operation, in + * xfs_strat_comp(), where the xfs_bmapi() call + * is transactioned, and the extents combined. + */ + stateless = (flags & XFS_BMAPI_IGSTATE) != 0; + if (stateless && wr) /* if writing unwritten space, no */ + wr = 0; /* allocations are allowed */ + ASSERT(wr || !delay); + logflags = 0; + nallocs = 0; + cur = NULL; + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + ASSERT(wr && tp); + if ((error = xfs_bmap_local_to_extents(tp, ip, + firstblock, total, &logflags, whichfork))) + goto error0; + } + if (wr && *firstblock == NULLFSBLOCK) { + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) + minleft = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1; + else + minleft = 1; + } else + minleft = 0; + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + goto error0; + ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, + &prev); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + n = 0; + end = bno + len; + obno = bno; + bma.ip = NULL; + while (bno < end && n < *nmap) { + /* + * Reading past eof, act as though there's a hole + * up to end. + */ + if (eof && !wr) + got.br_startoff = end; + inhole = eof || got.br_startoff > bno; + wasdelay = wr && !inhole && !delay && + ISNULLSTARTBLOCK(got.br_startblock); + /* + * First, deal with the hole before the allocated space + * that we found, if any. + */ + if (wr && (inhole || wasdelay)) { + /* + * For the wasdelay case, we could also just + * allocate the stuff asked for in this bmap call + * but that wouldn't be as good. + */ + if (wasdelay && !exact) { + alen = (xfs_extlen_t)got.br_blockcount; + aoff = got.br_startoff; + if (lastx != NULLEXTNUM && lastx) { + ep = &ifp->if_u1.if_extents[lastx - 1]; + xfs_bmbt_get_all(ep, &prev); + } + } else if (wasdelay) { + alen = (xfs_extlen_t) + XFS_FILBLKS_MIN(len, + (got.br_startoff + + got.br_blockcount) - bno); + aoff = bno; + } else { + alen = (xfs_extlen_t) + XFS_FILBLKS_MIN(len, MAXEXTLEN); + if (!eof) + alen = (xfs_extlen_t) + XFS_FILBLKS_MIN(alen, + got.br_startoff - bno); + aoff = bno; + } + minlen = contig ? alen : 1; + if (delay) { + indlen = (xfs_extlen_t) + xfs_bmap_worst_indlen(ip, alen); + ASSERT(indlen > 0); + /* + * Make a transaction-less quota reservation for + * delayed allocation blocks. This number gets + * adjusted later. + * We return EDQUOT if we haven't allocated + * blks already inside this loop; + */ + if (XFS_TRANS_RESERVE_BLKQUOTA( + mp, NULL, ip, (long)alen)) { + if (n == 0) { + *nmap = 0; + ASSERT(cur == NULL); + return XFS_ERROR(EDQUOT); + } + break; + } + if (xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, + -(alen + indlen), rsvd)) { + XFS_TRANS_UNRESERVE_BLKQUOTA( + mp, NULL, ip, (long)alen); + break; + } + ip->i_delayed_blks += alen; + abno = NULLSTARTBLOCK(indlen); + } else { + /* + * If first time, allocate and fill in + * once-only bma fields. + */ + if (bma.ip == NULL) { + bma.tp = tp; + bma.ip = ip; + bma.prevp = &prev; + bma.gotp = &got; + bma.total = total; + bma.userdata = 0; + } + /* Indicate if this is the first user data + * in the file, or just any user data. + */ + if (userdata) { + bma.userdata = (aoff == 0) ? + XFS_ALLOC_INITIAL_USER_DATA : + XFS_ALLOC_USERDATA; + } + /* + * Fill in changeable bma fields. + */ + bma.eof = eof; + bma.firstblock = *firstblock; + bma.alen = alen; + bma.off = aoff; + bma.wasdel = wasdelay; + bma.minlen = minlen; + bma.low = flist->xbf_low; + bma.minleft = minleft; + /* + * Only want to do the alignment at the + * eof if it is userdata and allocation length + * is larger than a stripe unit. + */ + if (mp->m_dalign && alen >= mp->m_dalign && + userdata && whichfork == XFS_DATA_FORK) { + if ((error = xfs_bmap_isaeof(ip, aoff, + whichfork, &bma.aeof))) + goto error0; + } else + bma.aeof = 0; + /* + * Call allocator. + */ + if ((error = xfs_bmap_alloc(&bma))) + goto error0; + /* + * Copy out result fields. + */ + abno = bma.rval; + if ((flist->xbf_low = bma.low)) + minleft = 0; + alen = bma.alen; + aoff = bma.off; + ASSERT(*firstblock == NULLFSBLOCK || + XFS_FSB_TO_AGNO(mp, *firstblock) == + XFS_FSB_TO_AGNO(mp, bma.firstblock) || + (flist->xbf_low && + XFS_FSB_TO_AGNO(mp, *firstblock) < + XFS_FSB_TO_AGNO(mp, bma.firstblock))); + *firstblock = bma.firstblock; + if (cur) + cur->bc_private.b.firstblock = + *firstblock; + if (abno == NULLFSBLOCK) + break; + if ((ifp->if_flags & XFS_IFBROOT) && !cur) { + cur = xfs_btree_init_cursor(mp, + tp, NULL, 0, XFS_BTNUM_BMAP, + ip, whichfork); + cur->bc_private.b.firstblock = + *firstblock; + cur->bc_private.b.flist = flist; + } + /* + * Bump the number of extents we've allocated + * in this call. + */ + nallocs++; + } + if (cur) + cur->bc_private.b.flags = + wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0; + got.br_startoff = aoff; + got.br_startblock = abno; + got.br_blockcount = alen; + got.br_state = XFS_EXT_NORM; /* assume normal */ + /* + * Determine state of extent, and the filesystem. + * A wasdelay extent has been initialized, so + * shouldn't be flagged as unwritten. + */ + if (wr && XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { + if (!wasdelay && (flags & XFS_BMAPI_PREALLOC)) + got.br_state = XFS_EXT_UNWRITTEN; + } + error = xfs_bmap_add_extent(ip, lastx, &cur, &got, + firstblock, flist, &tmp_logflags, whichfork, + rsvd); + logflags |= tmp_logflags; + if (error) + goto error0; + lastx = ifp->if_lastex; + ep = &ifp->if_u1.if_extents[lastx]; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + xfs_bmbt_get_all(ep, &got); + ASSERT(got.br_startoff <= aoff); + ASSERT(got.br_startoff + got.br_blockcount >= + aoff + alen); +#ifdef DEBUG + if (delay) { + ASSERT(ISNULLSTARTBLOCK(got.br_startblock)); + ASSERT(STARTBLOCKVAL(got.br_startblock) > 0); + } + ASSERT(got.br_state == XFS_EXT_NORM || + got.br_state == XFS_EXT_UNWRITTEN); +#endif + /* + * Fall down into the found allocated space case. + */ + } else if (inhole) { + /* + * Reading in a hole. + */ + mval->br_startoff = bno; + mval->br_startblock = HOLESTARTBLOCK; + mval->br_blockcount = + XFS_FILBLKS_MIN(len, got.br_startoff - bno); + mval->br_state = XFS_EXT_NORM; + bno += mval->br_blockcount; + len -= mval->br_blockcount; + mval++; + n++; + continue; + } + /* + * Then deal with the allocated space we found. + */ + ASSERT(ep != NULL); + if (trim && (got.br_startoff + got.br_blockcount > obno)) { + if (obno > bno) + bno = obno; + ASSERT((bno >= obno) || (n == 0)); + ASSERT(bno < end); + mval->br_startoff = bno; + if (ISNULLSTARTBLOCK(got.br_startblock)) { + ASSERT(!wr || delay); + mval->br_startblock = DELAYSTARTBLOCK; + } else + mval->br_startblock = + got.br_startblock + + (bno - got.br_startoff); + /* + * Return the minimum of what we got and what we + * asked for for the length. We can use the len + * variable here because it is modified below + * and we could have been there before coming + * here if the first part of the allocation + * didn't overlap what was asked for. + */ + mval->br_blockcount = + XFS_FILBLKS_MIN(end - bno, got.br_blockcount - + (bno - got.br_startoff)); + mval->br_state = got.br_state; + ASSERT(mval->br_blockcount <= len); + } else { + *mval = got; + if (ISNULLSTARTBLOCK(mval->br_startblock)) { + ASSERT(!wr || delay); + mval->br_startblock = DELAYSTARTBLOCK; + } + } + + /* + * Check if writing previously allocated but + * unwritten extents. + */ + if (wr && mval->br_state == XFS_EXT_UNWRITTEN && + ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) { + /* + * Modify (by adding) the state flag, if writing. + */ + ASSERT(mval->br_blockcount <= len); + if ((ifp->if_flags & XFS_IFBROOT) && !cur) { + cur = xfs_btree_init_cursor(mp, + tp, NULL, 0, XFS_BTNUM_BMAP, + ip, whichfork); + cur->bc_private.b.firstblock = + *firstblock; + cur->bc_private.b.flist = flist; + } + mval->br_state = XFS_EXT_NORM; + error = xfs_bmap_add_extent(ip, lastx, &cur, mval, + firstblock, flist, &tmp_logflags, whichfork, + rsvd); + logflags |= tmp_logflags; + if (error) + goto error0; + lastx = ifp->if_lastex; + ep = &ifp->if_u1.if_extents[lastx]; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + xfs_bmbt_get_all(ep, &got); + /* + * We may have combined previously unwritten + * space with written space, so generate + * another request. + */ + if (mval->br_blockcount < len) + continue; + } + + ASSERT(!trim || + ((mval->br_startoff + mval->br_blockcount) <= end)); + ASSERT(!trim || (mval->br_blockcount <= len) || + (mval->br_startoff < obno)); + bno = mval->br_startoff + mval->br_blockcount; + len = end - bno; + if (n > 0 && mval->br_startoff == mval[-1].br_startoff) { + ASSERT(mval->br_startblock == mval[-1].br_startblock); + ASSERT(mval->br_blockcount > mval[-1].br_blockcount); + ASSERT(mval->br_state == mval[-1].br_state); + mval[-1].br_blockcount = mval->br_blockcount; + mval[-1].br_state = mval->br_state; + } else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK && + mval[-1].br_startblock != DELAYSTARTBLOCK && + mval[-1].br_startblock != HOLESTARTBLOCK && + mval->br_startblock == + mval[-1].br_startblock + mval[-1].br_blockcount && + (stateless || mval[-1].br_state == mval->br_state)) { + ASSERT(mval->br_startoff == + mval[-1].br_startoff + mval[-1].br_blockcount); + mval[-1].br_blockcount += mval->br_blockcount; + } else if (n > 0 && + mval->br_startblock == DELAYSTARTBLOCK && + mval[-1].br_startblock == DELAYSTARTBLOCK && + mval->br_startoff == + mval[-1].br_startoff + mval[-1].br_blockcount) { + mval[-1].br_blockcount += mval->br_blockcount; + mval[-1].br_state = mval->br_state; + } else if (!((n == 0) && + ((mval->br_startoff + mval->br_blockcount) <= + obno))) { + mval++; + n++; + } + /* + * If we're done, stop now. Stop when we've allocated + * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise + * the transaction may get too big. + */ + if (bno >= end || n >= *nmap || nallocs >= *nmap) + break; + /* + * Else go on to the next record. + */ + ep++; + lastx++; + if (lastx >= nextents) { + eof = 1; + prev = got; + } else + xfs_bmbt_get_all(ep, &got); + } + ifp->if_lastex = lastx; + *nmap = n; + /* + * Transform from btree to extents, give it cur. + */ + if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && + XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) { + ASSERT(wr && cur); + error = xfs_bmap_btree_to_extents(tp, ip, cur, + &tmp_logflags, whichfork); + logflags |= tmp_logflags; + if (error) + goto error0; + } + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || + XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max); + error = 0; + +error0: + /* + * Log everything. Do this after conversion, there's no point in + * logging the extent list if we've converted to btree format. + */ + if ((logflags & XFS_ILOG_FEXT(whichfork)) && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) + logflags &= ~XFS_ILOG_FEXT(whichfork); + else if ((logflags & XFS_ILOG_FBROOT(whichfork)) && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) + logflags &= ~XFS_ILOG_FBROOT(whichfork); + /* + * Log whatever the flags say, even if error. Otherwise we might miss + * detecting a case where the data is changed, there's an error, + * and it's not logged so we don't shutdown when we should. + */ + if (logflags) { + ASSERT(tp && wr); + xfs_trans_log_inode(tp, ip, logflags); + } + if (cur) { + if (!error) { + ASSERT(*firstblock == NULLFSBLOCK || + XFS_FSB_TO_AGNO(mp, *firstblock) == + XFS_FSB_TO_AGNO(mp, + cur->bc_private.b.firstblock) || + (flist->xbf_low && + XFS_FSB_TO_AGNO(mp, *firstblock) < + XFS_FSB_TO_AGNO(mp, + cur->bc_private.b.firstblock))); + *firstblock = cur->bc_private.b.firstblock; + } + xfs_btree_del_cursor(cur, + error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + } + if (!error) + xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, + orig_nmap, *nmap); + return error; +} + +/* + * Map file blocks to filesystem blocks, simple version. + * One block (extent) only, read-only. + * For flags, only the XFS_BMAPI_ATTRFORK flag is examined. + * For the other flag values, the effect is as if XFS_BMAPI_METADATA + * was set and all the others were clear. + */ +int /* error */ +xfs_bmapi_single( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + int whichfork, /* data or attr fork */ + xfs_fsblock_t *fsb, /* output: mapped block */ + xfs_fileoff_t bno) /* starting file offs. mapped */ +{ + int eof; /* we've hit the end of extent list */ + int error; /* error return */ + xfs_bmbt_irec_t got; /* current extent list record */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t lastx; /* last useful extent number */ + xfs_bmbt_irec_t prev; /* previous extent list record */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + if (unlikely( + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) { + XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return XFS_ERROR(EIO); + XFS_STATS_INC(xfsstats.xs_blk_mapr); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + (void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, + &prev); + /* + * Reading past eof, act as though there's a hole + * up to end. + */ + if (eof || got.br_startoff > bno) { + *fsb = NULLFSBLOCK; + return 0; + } + ASSERT(!ISNULLSTARTBLOCK(got.br_startblock)); + ASSERT(bno < got.br_startoff + got.br_blockcount); + *fsb = got.br_startblock + (bno - got.br_startoff); + ifp->if_lastex = lastx; + return 0; +} + +/* + * Unmap (remove) blocks from a file. + * If nexts is nonzero then the number of extents to remove is limited to + * that value. If not all extents in the block range can be removed then + * *done is set. + */ +int /* error */ +xfs_bunmapi( + xfs_trans_t *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t bno, /* starting offset to unmap */ + xfs_filblks_t len, /* length to unmap in file */ + int flags, /* misc flags */ + xfs_extnum_t nexts, /* number of extents max */ + xfs_fsblock_t *firstblock, /* first allocated block + controls a.g. for allocs */ + xfs_bmap_free_t *flist, /* i/o: list extents to free */ + int *done) /* set if not done yet */ +{ + xfs_btree_cur_t *cur; /* bmap btree cursor */ + xfs_bmbt_irec_t del; /* extent being deleted */ + int eof; /* is deleting at eof */ + xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + int error; /* error return value */ + xfs_extnum_t extno; /* extent number in list */ + xfs_bmbt_irec_t got; /* current extent list entry */ + xfs_ifork_t *ifp; /* inode fork pointer */ + int isrt; /* freeing in rt area */ + xfs_extnum_t lastx; /* last extent index used */ + int logflags; /* transaction logging flags */ + xfs_extlen_t mod; /* rt extent offset */ + xfs_mount_t *mp; /* mount structure */ + xfs_extnum_t nextents; /* size of extent list */ + xfs_bmbt_irec_t prev; /* previous extent list entry */ + xfs_fileoff_t start; /* first file offset deleted */ + int tmp_logflags; /* partial logging flags */ + int wasdel; /* was a delayed alloc extent */ + int whichfork; /* data or attribute fork */ + int rsvd; /* OK to allocate reserved blocks */ + xfs_fsblock_t sum; + + xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address); + whichfork = (flags & XFS_BMAPI_ATTRFORK) ? + XFS_ATTR_FORK : XFS_DATA_FORK; + ifp = XFS_IFORK_PTR(ip, whichfork); + if (unlikely( + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { + XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + mp = ip->i_mount; + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; + ASSERT(len > 0); + ASSERT(nexts >= 0); + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (nextents == 0) { + *done = 1; + return 0; + } + XFS_STATS_INC(xfsstats.xs_blk_unmap); + isrt = (whichfork == XFS_DATA_FORK) && + (ip->i_d.di_flags & XFS_DIFLAG_REALTIME); + start = bno; + bno = start + len - 1; + ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, + &prev); + /* + * Check to see if the given block number is past the end of the + * file, back up to the last block if so... + */ + if (eof) { + ep = &ifp->if_u1.if_extents[--lastx]; + xfs_bmbt_get_all(ep, &got); + bno = got.br_startoff + got.br_blockcount - 1; + } + logflags = 0; + if (ifp->if_flags & XFS_IFBROOT) { + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); + cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, + whichfork); + cur->bc_private.b.firstblock = *firstblock; + cur->bc_private.b.flist = flist; + cur->bc_private.b.flags = 0; + } else + cur = NULL; + extno = 0; + while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 && + (nexts == 0 || extno < nexts)) { + /* + * Is the found extent after a hole in which bno lives? + * Just back up to the previous extent, if so. + */ + if (got.br_startoff > bno) { + if (--lastx < 0) + break; + ep--; + xfs_bmbt_get_all(ep, &got); + } + /* + * Is the last block of this extent before the range + * we're supposed to delete? If so, we're done. + */ + bno = XFS_FILEOFF_MIN(bno, + got.br_startoff + got.br_blockcount - 1); + if (bno < start) + break; + /* + * Then deal with the (possibly delayed) allocated space + * we found. + */ + ASSERT(ep != NULL); + del = got; + wasdel = ISNULLSTARTBLOCK(del.br_startblock); + if (got.br_startoff < start) { + del.br_startoff = start; + del.br_blockcount -= start - got.br_startoff; + if (!wasdel) + del.br_startblock += start - got.br_startoff; + } + if (del.br_startoff + del.br_blockcount > bno + 1) + del.br_blockcount = bno + 1 - del.br_startoff; + sum = del.br_startblock + del.br_blockcount; + if (isrt && + (mod = do_mod(sum, mp->m_sb.sb_rextsize))) { + /* + * Realtime extent not lined up at the end. + * The extent could have been split into written + * and unwritten pieces, or we could just be + * unmapping part of it. But we can't really + * get rid of part of a realtime extent. + */ + if (del.br_state == XFS_EXT_UNWRITTEN || + !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { + /* + * This piece is unwritten, or we're not + * using unwritten extents. Skip over it. + */ + ASSERT(bno >= mod); + bno -= mod > del.br_blockcount ? + del.br_blockcount : mod; + if (bno < got.br_startoff) { + if (--lastx >= 0) + xfs_bmbt_get_all(--ep, &got); + } + continue; + } + /* + * It's written, turn it unwritten. + * This is better than zeroing it. + */ + ASSERT(del.br_state == XFS_EXT_NORM); + ASSERT(xfs_trans_get_block_res(tp) > 0); + /* + * If this spans a realtime extent boundary, + * chop it back to the start of the one we end at. + */ + if (del.br_blockcount > mod) { + del.br_startoff += del.br_blockcount - mod; + del.br_startblock += del.br_blockcount - mod; + del.br_blockcount = mod; + } + del.br_state = XFS_EXT_UNWRITTEN; + error = xfs_bmap_add_extent(ip, lastx, &cur, &del, + firstblock, flist, &logflags, XFS_DATA_FORK, 0); + if (error) + goto error0; + goto nodelete; + } + if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { + /* + * Realtime extent is lined up at the end but not + * at the front. We'll get rid of full extents if + * we can. + */ + mod = mp->m_sb.sb_rextsize - mod; + if (del.br_blockcount > mod) { + del.br_blockcount -= mod; + del.br_startoff += mod; + del.br_startblock += mod; + } else if ((del.br_startoff == start && + (del.br_state == XFS_EXT_UNWRITTEN || + xfs_trans_get_block_res(tp) == 0)) || + !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { + /* + * Can't make it unwritten. There isn't + * a full extent here so just skip it. + */ + ASSERT(bno >= del.br_blockcount); + bno -= del.br_blockcount; + if (bno < got.br_startoff) { + if (--lastx >= 0) + xfs_bmbt_get_all(--ep, &got); + } + continue; + } else if (del.br_state == XFS_EXT_UNWRITTEN) { + /* + * This one is already unwritten. + * It must have a written left neighbor. + * Unwrite the killed part of that one and + * try again. + */ + ASSERT(lastx > 0); + xfs_bmbt_get_all(ep - 1, &prev); + ASSERT(prev.br_state == XFS_EXT_NORM); + ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock)); + ASSERT(del.br_startblock == + prev.br_startblock + prev.br_blockcount); + if (prev.br_startoff < start) { + mod = start - prev.br_startoff; + prev.br_blockcount -= mod; + prev.br_startblock += mod; + prev.br_startoff = start; + } + prev.br_state = XFS_EXT_UNWRITTEN; + error = xfs_bmap_add_extent(ip, lastx - 1, &cur, + &prev, firstblock, flist, &logflags, + XFS_DATA_FORK, 0); + if (error) + goto error0; + goto nodelete; + } else { + ASSERT(del.br_state == XFS_EXT_NORM); + del.br_state = XFS_EXT_UNWRITTEN; + error = xfs_bmap_add_extent(ip, lastx, &cur, + &del, firstblock, flist, &logflags, + XFS_DATA_FORK, 0); + if (error) + goto error0; + goto nodelete; + } + } + if (wasdel) { + ASSERT(STARTBLOCKVAL(del.br_startblock) > 0); + xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, + (int)del.br_blockcount, rsvd); + /* Unreserve our quota space */ + XFS_TRANS_RESERVE_QUOTA_NBLKS( + mp, NULL, ip, -((long)del.br_blockcount), 0, + isrt ? XFS_QMOPT_RES_RTBLKS : + XFS_QMOPT_RES_REGBLKS); + ip->i_delayed_blks -= del.br_blockcount; + if (cur) + cur->bc_private.b.flags |= + XFS_BTCUR_BPRV_WASDEL; + } else if (cur) + cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; + /* + * If it's the case where the directory code is running + * with no block reservation, and the deleted block is in + * the middle of its extent, and the resulting insert + * of an extent would cause transformation to btree format, + * then reject it. The calling code will then swap + * blocks around instead. + * We have to do this now, rather than waiting for the + * conversion to btree format, since the transaction + * will be dirty. + */ + if (!wasdel && xfs_trans_get_block_res(tp) == 0 && + XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max && + del.br_startoff > got.br_startoff && + del.br_startoff + del.br_blockcount < + got.br_startoff + got.br_blockcount) { + error = XFS_ERROR(ENOSPC); + goto error0; + } + error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del, + &tmp_logflags, whichfork, rsvd); + logflags |= tmp_logflags; + if (error) + goto error0; + bno = del.br_startoff - 1; +nodelete: + lastx = ifp->if_lastex; + /* + * If not done go on to the next (previous) record. + * Reset ep in case the extents array was re-alloced. + */ + ep = &ifp->if_u1.if_extents[lastx]; + if (bno != (xfs_fileoff_t)-1 && bno >= start) { + if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) || + xfs_bmbt_get_startoff(ep) > bno) { + lastx--; + ep--; + } + if (lastx >= 0) + xfs_bmbt_get_all(ep, &got); + extno++; + } + } + ifp->if_lastex = lastx; + *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0; + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + /* + * Convert to a btree if necessary. + */ + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) { + ASSERT(cur == NULL); + error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, + &cur, 0, &tmp_logflags, whichfork); + logflags |= tmp_logflags; + if (error) + goto error0; + } + /* + * transform from btree to extents, give it cur + */ + else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && + XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) { + ASSERT(cur != NULL); + error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, + whichfork); + logflags |= tmp_logflags; + if (error) + goto error0; + } + /* + * transform from extents to local? + */ + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + error = 0; +error0: + /* + * Log everything. Do this after conversion, there's no point in + * logging the extent list if we've converted to btree format. + */ + if ((logflags & XFS_ILOG_FEXT(whichfork)) && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) + logflags &= ~XFS_ILOG_FEXT(whichfork); + else if ((logflags & XFS_ILOG_FBROOT(whichfork)) && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) + logflags &= ~XFS_ILOG_FBROOT(whichfork); + /* + * Log inode even in the error case, if the transaction + * is dirty we'll need to shut down the filesystem. + */ + if (logflags) + xfs_trans_log_inode(tp, ip, logflags); + if (cur) { + if (!error) { + *firstblock = cur->bc_private.b.firstblock; + cur->bc_private.b.allocated = 0; + } + xfs_btree_del_cursor(cur, + error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + } + return error; +} + +/* + * Fcntl interface to xfs_bmapi. + */ +int /* error code */ +xfs_getbmap( + bhv_desc_t *bdp, /* XFS behavior descriptor*/ + struct getbmap *bmv, /* user bmap structure */ + void *ap, /* pointer to user's array */ + int interface) /* interface flags */ +{ + __int64_t bmvend; /* last block requested */ + int error; /* return value */ + __int64_t fixlen; /* length for -1 case */ + int i; /* extent number */ + xfs_inode_t *ip; /* xfs incore inode pointer */ + vnode_t *vp; /* corresponding vnode */ + int lock; /* lock state */ + xfs_bmbt_irec_t *map; /* buffer for user's data */ + xfs_mount_t *mp; /* file system mount point */ + int nex; /* # of user extents can do */ + int nexleft; /* # of user extents left */ + int subnex; /* # of bmapi's can do */ + int nmap; /* number of map entries */ + struct getbmap out; /* output structure */ + int whichfork; /* data or attr fork */ + int prealloced; /* this is a file with + * preallocated data space */ + int sh_unwritten; /* true, if unwritten */ + /* extents listed separately */ + int bmapi_flags; /* flags for xfs_bmapi */ + __int32_t oflags; /* getbmapx bmv_oflags field */ + + vp = BHV_TO_VNODE(bdp); + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; + sh_unwritten = (interface & BMV_IF_PREALLOC) != 0; + + /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not + * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ + * bit is set for the file, generate a read event in order + * that the DMAPI application may do its thing before we return + * the extents. Usually this means restoring user file data to + * regions of the file that look like holes. + * + * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify + * BMV_IF_NO_DMAPI_READ so that read events are generated. + * If this were not true, callers of ioctl( XFS_IOC_GETBMAP ) + * could misinterpret holes in a DMAPI file as true holes, + * when in fact they may represent offline user data. + */ + if ( (interface & BMV_IF_NO_DMAPI_READ) == 0 + && DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) + && whichfork == XFS_DATA_FORK) { + + error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, 0, 0, 0, NULL); + if (error) + return XFS_ERROR(error); + } + + if (whichfork == XFS_ATTR_FORK) { + if (XFS_IFORK_Q(ip)) { + if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && + ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) + return XFS_ERROR(EINVAL); + } else if (unlikely( + ip->i_d.di_aformat != 0 && + ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { + XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_format != XFS_DINODE_FMT_BTREE && + ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) + return XFS_ERROR(EINVAL); + if (whichfork == XFS_DATA_FORK) { + if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) { + prealloced = 1; + fixlen = XFS_MAX_FILE_OFFSET; + } else { + prealloced = 0; + fixlen = ip->i_d.di_size; + } + } else { + prealloced = 0; + fixlen = 1LL << 32; + } + + if (bmv->bmv_length == -1) { + fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); + bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset), + (__int64_t)0); + } else if (bmv->bmv_length < 0) + return XFS_ERROR(EINVAL); + if (bmv->bmv_length == 0) { + bmv->bmv_entries = 0; + return 0; + } + nex = bmv->bmv_count - 1; + if (nex <= 0) + return XFS_ERROR(EINVAL); + bmvend = bmv->bmv_offset + bmv->bmv_length; + + xfs_ilock(ip, XFS_IOLOCK_SHARED); + + if (whichfork == XFS_DATA_FORK && ip->i_delayed_blks) { + /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */ + VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); + } + + ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0); + + lock = xfs_ilock_map_shared(ip); + + /* + * Don't let nex be bigger than the number of extents + * we can have assuming alternating holes and real extents. + */ + if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) + nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; + + bmapi_flags = XFS_BMAPI_AFLAG(whichfork) | + ((sh_unwritten) ? 0 : XFS_BMAPI_IGSTATE); + + /* + * Allocate enough space to handle "subnex" maps at a time. + */ + subnex = 16; + map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP); + + bmv->bmv_entries = 0; + + if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0) { + error = 0; + goto unlock_and_return; + } + + nexleft = nex; + + do { + nmap = (nexleft > subnex) ? subnex : nexleft; + error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), + XFS_BB_TO_FSB(mp, bmv->bmv_length), + bmapi_flags, NULL, 0, map, &nmap, NULL); + if (error) + goto unlock_and_return; + ASSERT(nmap <= subnex); + + for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { + nexleft--; + oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ? + BMV_OF_PREALLOC : 0; + out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff); + out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount); + ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); + if (prealloced && + map[i].br_startblock == HOLESTARTBLOCK && + out.bmv_offset + out.bmv_length == bmvend) { + /* + * came to hole at end of file + */ + goto unlock_and_return; + } else { + out.bmv_block = + (map[i].br_startblock == HOLESTARTBLOCK) ? + -1 : + XFS_FSB_TO_DB(ip, map[i].br_startblock); + + /* return either getbmap/getbmapx structure. */ + if (interface & BMV_IF_EXTENDED) { + struct getbmapx outx; + + GETBMAP_CONVERT(out,outx); + outx.bmv_oflags = oflags; + outx.bmv_unused1 = outx.bmv_unused2 = 0; + if (copy_to_user(ap, &outx, + sizeof(outx))) { + error = XFS_ERROR(EFAULT); + goto unlock_and_return; + } + } else { + if (copy_to_user(ap, &out, + sizeof(out))) { + error = XFS_ERROR(EFAULT); + goto unlock_and_return; + } + } + bmv->bmv_offset = + out.bmv_offset + out.bmv_length; + bmv->bmv_length = MAX((__int64_t)0, + (__int64_t)(bmvend - bmv->bmv_offset)); + bmv->bmv_entries++; + ap = (interface & BMV_IF_EXTENDED) ? + (void *)((struct getbmapx *)ap + 1) : + (void *)((struct getbmap *)ap + 1); + } + } + } while (nmap && nexleft && bmv->bmv_length); + +unlock_and_return: + xfs_iunlock_map_shared(ip, lock); + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + + kmem_free(map, subnex * sizeof(*map)); + + return error; +} + +/* + * Check the last inode extent to determine whether this allocation will result + * in blocks being allocated at the end of the file. When we allocate new data + * blocks at the end of the file which do not start at the previous data block, + * we will try to align the new blocks at stripe unit boundaries. + */ +int /* error */ +xfs_bmap_isaeof( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fileoff_t off, /* file offset in fsblocks */ + int whichfork, /* data or attribute fork */ + char *aeof) /* return value */ +{ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ + xfs_extnum_t nextents; /* size of extent list */ + xfs_bmbt_irec_t s; /* expanded extent list entry */ + + ASSERT(whichfork == XFS_DATA_FORK); + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(NULL, ip, whichfork))) + return error; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (nextents == 0) { + *aeof = 1; + return 0; + } + /* + * Go to the last extent + */ + lastrec = &ifp->if_u1.if_extents[nextents - 1]; + xfs_bmbt_get_all(lastrec, &s); + /* + * Check we are allocating in the last extent (for delayed allocations) + * or past the last extent for non-delayed allocations. + */ + *aeof = (off >= s.br_startoff && + off < s.br_startoff + s.br_blockcount && + ISNULLSTARTBLOCK(s.br_startblock)) || + off >= s.br_startoff + s.br_blockcount; + return 0; +} + +/* + * Check if the endoff is outside the last extent. If so the caller will grow + * the allocation to a stripe unit boundary. + */ +int /* error */ +xfs_bmap_eof( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fileoff_t endoff, /* file offset in fsblocks */ + int whichfork, /* data or attribute fork */ + int *eof) /* result value */ +{ + xfs_fsblock_t blockcount; /* extent block count */ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ + xfs_extnum_t nextents; /* size of extent list */ + xfs_fileoff_t startoff; /* extent starting file offset */ + + ASSERT(whichfork == XFS_DATA_FORK); + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(NULL, ip, whichfork))) + return error; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (nextents == 0) { + *eof = 1; + return 0; + } + /* + * Go to the last extent + */ + lastrec = &ifp->if_u1.if_extents[nextents - 1]; + startoff = xfs_bmbt_get_startoff(lastrec); + blockcount = xfs_bmbt_get_blockcount(lastrec); + *eof = endoff >= startoff + blockcount; + return 0; +} + +#ifdef XFSDEBUG +/* + * Check that the extents list for the inode ip is in the right order. + */ +STATIC void +xfs_bmap_check_extents( + xfs_inode_t *ip, /* incore inode pointer */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extents list */ + xfs_bmbt_rec_t *ep; /* current extent entry */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extents in list */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + base = ifp->if_u1.if_extents; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + for (ep = base; ep < &base[nextents - 1]; ep++) { + xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, + (void *)(ep + 1)); + } +} + +STATIC +xfs_buf_t * +xfs_bmap_get_bp( + xfs_btree_cur_t *cur, + xfs_fsblock_t bno) +{ + int i; + xfs_buf_t *bp; + + if (!cur) + return(NULL); + + bp = NULL; + for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) { + bp = cur->bc_bufs[i]; + if (!bp) break; + if (XFS_BUF_ADDR(bp) == bno) + break; /* Found it */ + } + if (i == XFS_BTREE_MAXLEVELS) + bp = NULL; + + if (!bp) { /* Chase down all the log items to see if the bp is there */ + xfs_log_item_chunk_t *licp; + xfs_trans_t *tp; + + tp = cur->bc_tp; + licp = &tp->t_items; + while (!bp && licp != NULL) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { + licp = licp->lic_next; + continue; + } + for (i = 0; i < licp->lic_unused; i++) { + xfs_log_item_desc_t *lidp; + xfs_log_item_t *lip; + xfs_buf_log_item_t *bip; + xfs_buf_t *lbp; + + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lidp = XFS_LIC_SLOT(licp, i); + lip = lidp->lid_item; + if (lip->li_type != XFS_LI_BUF) + continue; + + bip = (xfs_buf_log_item_t *)lip; + lbp = bip->bli_buf; + + if (XFS_BUF_ADDR(lbp) == bno) { + bp = lbp; + break; /* Found it */ + } + } + licp = licp->lic_next; + } + } + return(bp); +} + +void +xfs_check_block( + xfs_bmbt_block_t *block, + xfs_mount_t *mp, + int root, + short sz) +{ + int i, j, dmxr; + xfs_bmbt_ptr_t *pp, *thispa; /* pointer to block address */ + xfs_bmbt_key_t *prevp, *keyp; + + ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); + + prevp = NULL; + for( i = 1; i <= INT_GET(block->bb_numrecs, ARCH_CONVERT);i++) { + dmxr = mp->m_bmap_dmxr[0]; + + if (root) { + keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz); + } else { + keyp = XFS_BTREE_KEY_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, i, dmxr); + } + + if (prevp) { + xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp); + } + prevp = keyp; + + /* + * Compare the block numbers to see if there are dups. + */ + + if (root) { + pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz); + } else { + pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, i, dmxr); + } + for (j = i+1; j <= INT_GET(block->bb_numrecs, ARCH_CONVERT); j++) { + if (root) { + thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz); + } else { + thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, j, dmxr); + } + if (INT_GET(*thispa, ARCH_CONVERT) == + INT_GET(*pp, ARCH_CONVERT)) { + cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld", + __FUNCTION__, j, i, + INT_GET(*thispa, ARCH_CONVERT)); + panic("%s: ptrs are equal in node\n", + __FUNCTION__); + } + } + } +} + +/* + * Check that the extents for the inode ip are in the right order in all + * btree leaves. + */ + +STATIC void +xfs_bmap_check_leaf_extents( + xfs_btree_cur_t *cur, /* btree cursor or null */ + xfs_inode_t *ip, /* incore inode pointer */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_block_t *block; /* current btree block */ + xfs_fsblock_t bno; /* block # of "block" */ + xfs_buf_t *bp; /* buffer for "block" */ + int error; /* error return value */ + xfs_extnum_t i=0; /* index into the extents list */ + xfs_ifork_t *ifp; /* fork structure */ + int level; /* btree level, for checking */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_bmbt_ptr_t *pp; /* pointer to block address */ + xfs_bmbt_rec_t *ep, *lastp; /* extent pointers in block entry */ + int bp_release = 0; + + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { + return; + } + + bno = NULLFSBLOCK; + mp = ip->i_mount; + ifp = XFS_IFORK_PTR(ip, whichfork); + block = ifp->if_broot; + /* + * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. + */ + ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); + level = INT_GET(block->bb_level, ARCH_CONVERT); + xfs_check_block(block, mp, 1, ifp->if_broot_bytes); + pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); + ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); + ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); + ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); + bno = INT_GET(*pp, ARCH_CONVERT); + /* + * Go down the tree until leaf level is reached, following the first + * pointer (leftmost) at each level. + */ + while (level-- > 0) { + /* See if buf is in cur first */ + bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); + if (bp) { + bp_release = 0; + } else { + bp_release = 1; + } + if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + goto error_norelse; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + XFS_WANT_CORRUPTED_GOTO( + XFS_BMAP_SANITY_CHECK(mp, block, level), + error0); + if (level == 0) + break; + + /* + * Check this block for basic sanity (increasing keys and + * no duplicate blocks). + */ + + xfs_check_block(block, mp, 0, 0); + pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, + 1, mp->m_bmap_dmxr[1]); + XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)), error0); + bno = INT_GET(*pp, ARCH_CONVERT); + if (bp_release) { + bp_release = 0; + xfs_trans_brelse(NULL, bp); + } + } + + /* + * Here with bp and block set to the leftmost leaf node in the tree. + */ + i = 0; + + /* + * Loop over all leaf nodes checking that all extents are in the right order. + */ + lastp = NULL; + for (;;) { + xfs_bmbt_rec_t *frp; + xfs_fsblock_t nextbno; + xfs_extnum_t num_recs; + + + num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + + /* + * Read-ahead the next leaf block, if any. + */ + + nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + + /* + * Check all the extents to make sure they are OK. + * If we had a previous block, the last entry should + * conform with the first entry in this one. + */ + + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, + block, 1, mp->m_bmap_dmxr[0]); + + for (ep = frp;ep < frp + (num_recs - 1); ep++) { + if (lastp) { + xfs_btree_check_rec(XFS_BTNUM_BMAP, + (void *)lastp, (void *)ep); + } + xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, + (void *)(ep + 1)); + } + lastp = frp + num_recs - 1; /* For the next iteration */ + + i += num_recs; + if (bp_release) { + bp_release = 0; + xfs_trans_brelse(NULL, bp); + } + bno = nextbno; + /* + * If we've reached the end, stop. + */ + if (bno == NULLFSBLOCK) + break; + + bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); + if (bp) { + bp_release = 0; + } else { + bp_release = 1; + } + if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + goto error_norelse; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + } + if (bp_release) { + bp_release = 0; + xfs_trans_brelse(NULL, bp); + } + return; + +error0: + cmn_err(CE_WARN, "%s: at error0", __FUNCTION__); + if (bp_release) + xfs_trans_brelse(NULL, bp); +error_norelse: + cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents", + i, __FUNCTION__); + panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__); + return; +} +#endif + +/* + * Count fsblocks of the given fork. + */ +int /* error */ +xfs_bmap_count_blocks( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + int whichfork, /* data or attr fork */ + int *count) /* out: count of blocks */ +{ + xfs_bmbt_block_t *block; /* current btree block */ + xfs_fsblock_t bno; /* block # of "block" */ + xfs_ifork_t *ifp; /* fork structure */ + int level; /* btree level, for checking */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_bmbt_ptr_t *pp; /* pointer to block address */ + + bno = NULLFSBLOCK; + mp = ip->i_mount; + ifp = XFS_IFORK_PTR(ip, whichfork); + if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { + if (unlikely(xfs_bmap_count_leaves(ifp->if_u1.if_extents, + ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), + count) < 0)) { + XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + return 0; + } + + /* + * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. + */ + block = ifp->if_broot; + ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); + level = INT_GET(block->bb_level, ARCH_CONVERT); + pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); + ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); + ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); + ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); + bno = INT_GET(*pp, ARCH_CONVERT); + + if (unlikely(xfs_bmap_count_tree(mp, tp, bno, level, count) < 0)) { + XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, + mp); + return XFS_ERROR(EFSCORRUPTED); + } + + return 0; +} + +/* + * Recursively walks each level of a btree + * to count total fsblocks is use. + */ +int /* error */ +xfs_bmap_count_tree( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_fsblock_t blockno, /* file system block number */ + int levelin, /* level in btree */ + int *count) /* Count of blocks */ +{ + int error; + xfs_buf_t *bp, *nbp; + int level = levelin; + xfs_bmbt_ptr_t *pp; + xfs_fsblock_t bno = blockno; + xfs_fsblock_t nextbno; + xfs_bmbt_block_t *block, *nextblock; + int numrecs; + xfs_bmbt_rec_t *frp; + + if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) + return error; + *count += 1; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + + if (--level) { + /* Not at node above leafs, count this level of nodes */ + nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + while (nextbno != NULLFSBLOCK) { + if ((error = xfs_btree_read_bufl(mp, tp, nextbno, + 0, &nbp, XFS_BMAP_BTREE_REF))) + return error; + *count += 1; + nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp); + nextbno = INT_GET(nextblock->bb_rightsib, ARCH_CONVERT); + xfs_trans_brelse(tp, nbp); + } + + /* Dive to the next level */ + pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); + bno = INT_GET(*pp, ARCH_CONVERT); + if (unlikely((error = + xfs_bmap_count_tree(mp, tp, bno, level, count)) < 0)) { + xfs_trans_brelse(tp, bp); + XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + xfs_trans_brelse(tp, bp); + } else { + /* count all level 1 nodes and their leaves */ + for (;;) { + nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); + if (unlikely(xfs_bmap_count_leaves(frp, numrecs, count) < 0)) { + xfs_trans_brelse(tp, bp); + XFS_ERROR_REPORT("xfs_bmap_count_tree(2)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + xfs_trans_brelse(tp, bp); + if (nextbno == NULLFSBLOCK) + break; + bno = nextbno; + if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + return error; + *count += 1; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + } + } + return 0; +} + +/* + * Count leaf blocks given a pointer to an extent list. + */ +int +xfs_bmap_count_leaves( + xfs_bmbt_rec_t *frp, + int numrecs, + int *count) +{ + int b; + + for ( b = 1; b <= numrecs; b++, frp++) + *count += xfs_bmbt_disk_get_blockcount(frp); + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_bmap.h linux.21rc5-ac2/fs/xfs/xfs_bmap.h --- linux.21rc5/fs/xfs/xfs_bmap.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_bmap.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,393 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BMAP_H__ +#define __XFS_BMAP_H__ + +struct getbmap; +struct xfs_bmbt_irec; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * List of extents to be free "later". + * The list is kept sorted on xbf_startblock. + */ +typedef struct xfs_bmap_free_item +{ + xfs_fsblock_t xbfi_startblock;/* starting fs block number */ + xfs_extlen_t xbfi_blockcount;/* number of blocks in extent */ + struct xfs_bmap_free_item *xbfi_next; /* link to next entry */ +} xfs_bmap_free_item_t; + +/* + * Header for free extent list. + */ +typedef struct xfs_bmap_free +{ + xfs_bmap_free_item_t *xbf_first; /* list of to-be-free extents */ + int xbf_count; /* count of items on list */ + int xbf_low; /* kludge: alloc in low mode */ +} xfs_bmap_free_t; + +#define XFS_BMAP_MAX_NMAP 4 + +/* + * Flags for xfs_bmapi + */ +#define XFS_BMAPI_WRITE 0x001 /* write operation: allocate space */ +#define XFS_BMAPI_DELAY 0x002 /* delayed write operation */ +#define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */ +#define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */ +#define XFS_BMAPI_EXACT 0x010 /* allocate only to spec'd bounds */ +#define XFS_BMAPI_ATTRFORK 0x020 /* use attribute fork not data */ +#define XFS_BMAPI_ASYNC 0x040 /* bunmapi xactions can be async */ +#define XFS_BMAPI_RSVBLOCKS 0x080 /* OK to alloc. reserved data blocks */ +#define XFS_BMAPI_PREALLOC 0x100 /* preallocation op: unwritten space */ +#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */ + /* combine contig. space */ +#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAPI_AFLAG) +int xfs_bmapi_aflag(int w); +#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w) +#else +#define XFS_BMAPI_AFLAG(w) ((w) == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0) +#endif + +/* + * Special values for xfs_bmbt_irec_t br_startblock field. + */ +#define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL) +#define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL) + +/* + * Trace operations for bmap extent tracing + */ +#define XFS_BMAP_KTRACE_DELETE 1 +#define XFS_BMAP_KTRACE_INSERT 2 +#define XFS_BMAP_KTRACE_PRE_UP 3 +#define XFS_BMAP_KTRACE_POST_UP 4 + +#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */ +#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */ + +#if defined(XFS_ALL_TRACE) +#define XFS_BMAP_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_BMAP_TRACE +#endif + + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_INIT) +void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp); +#define XFS_BMAP_INIT(flp,fbp) xfs_bmap_init(flp,fbp) +#else +#define XFS_BMAP_INIT(flp,fbp) \ + ((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \ + (flp)->xbf_low = 0, *(fbp) = NULLFSBLOCK) +#endif + +/* + * Argument structure for xfs_bmap_alloc. + */ +typedef struct xfs_bmalloca { + xfs_fsblock_t firstblock; /* i/o first block allocated */ + xfs_fsblock_t rval; /* starting block of new extent */ + xfs_fileoff_t off; /* offset in file filling in */ + struct xfs_trans *tp; /* transaction pointer */ + struct xfs_inode *ip; /* incore inode pointer */ + struct xfs_bmbt_irec *prevp; /* extent before the new one */ + struct xfs_bmbt_irec *gotp; /* extent after, or delayed */ + xfs_extlen_t alen; /* i/o length asked/allocated */ + xfs_extlen_t total; /* total blocks needed for xaction */ + xfs_extlen_t minlen; /* mininum allocation size (blocks) */ + xfs_extlen_t minleft; /* amount must be left after alloc */ + char eof; /* set if allocating past last extent */ + char wasdel; /* replacing a delayed allocation */ + char userdata;/* set if is user data */ + char low; /* low on space, using seq'l ags */ + char aeof; /* allocated space at eof */ +} xfs_bmalloca_t; + +#ifdef __KERNEL__ +/* + * Convert inode from non-attributed to attributed. + * Must not be in a transaction, ip must not be locked. + */ +int /* error code */ +xfs_bmap_add_attrfork( + struct xfs_inode *ip, /* incore inode pointer */ + int rsvd); /* flag for reserved block allocation */ + +/* + * Add the extent to the list of extents to be free at transaction end. + * The list is maintained sorted (by block number). + */ +void +xfs_bmap_add_free( + xfs_fsblock_t bno, /* fs block number of extent */ + xfs_filblks_t len, /* length of extent */ + xfs_bmap_free_t *flist, /* list of extents */ + struct xfs_mount *mp); /* mount point structure */ + +/* + * Routine to clean up the free list data structure when + * an error occurs during a transaction. + */ +void +xfs_bmap_cancel( + xfs_bmap_free_t *flist); /* free list to clean up */ + +/* + * Routine to check if a specified inode is swap capable. + */ +int +xfs_bmap_check_swappable( + struct xfs_inode *ip); /* incore inode */ + +/* + * Compute and fill in the value of the maximum depth of a bmap btree + * in this filesystem. Done once, during mount. + */ +void +xfs_bmap_compute_maxlevels( + struct xfs_mount *mp, /* file system mount structure */ + int whichfork); /* data or attr fork */ + +/* + * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi + * caller. Frees all the extents that need freeing, which must be done + * last due to locking considerations. + * + * Return 1 if the given transaction was committed and a new one allocated, + * and 0 otherwise. + */ +int /* error */ +xfs_bmap_finish( + struct xfs_trans **tp, /* transaction pointer addr */ + xfs_bmap_free_t *flist, /* i/o: list extents to free */ + xfs_fsblock_t firstblock, /* controlled a.g. for allocs */ + int *committed); /* xact committed or not */ + +/* + * Returns the file-relative block number of the first unused block in the file. + * This is the lowest-address hole if the file has holes, else the first block + * past the end of file. + */ +int /* error */ +xfs_bmap_first_unused( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_extlen_t len, /* size of hole to find */ + xfs_fileoff_t *unused, /* unused block num */ + int whichfork); /* data or attr fork */ + +/* + * Returns the file-relative block number of the last block + 1 before + * last_block (input value) in the file. + * This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. + */ +int /* error */ +xfs_bmap_last_before( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t *last_block, /* last block */ + int whichfork); /* data or attr fork */ + +/* + * Returns the file-relative block number of the first block past eof in + * the file. This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. + */ +int /* error */ +xfs_bmap_last_offset( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t *unused, /* last block num */ + int whichfork); /* data or attr fork */ + +/* + * Returns whether the selected fork of the inode has exactly one + * block or not. For the data fork we check this matches di_size, + * implying the file's range is 0..bsize-1. + */ +int +xfs_bmap_one_block( + struct xfs_inode *ip, /* incore inode */ + int whichfork); /* data or attr fork */ + +/* + * Read in the extents to iu_extents. + * All inode fields are set up by caller, we just traverse the btree + * and copy the records in. + */ +int /* error */ +xfs_bmap_read_extents( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + int whichfork); /* data or attr fork */ + +#if defined(XFS_BMAP_TRACE) +/* + * Add bmap trace insert entries for all the contents of the extent list. + */ +void +xfs_bmap_trace_exlist( + char *fname, /* function name */ + struct xfs_inode *ip, /* incore inode pointer */ + xfs_extnum_t cnt, /* count of entries in list */ + int whichfork); /* data or attr fork */ +#else +#define xfs_bmap_trace_exlist(f,ip,c,w) +#endif + +/* + * Map file blocks to filesystem blocks. + * File range is given by the bno/len pair. + * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set) + * into a hole or past eof. + * Only allocates blocks from a single allocation group, + * to avoid locking problems. + * The returned value in "firstblock" from the first call in a transaction + * must be remembered and presented to subsequent calls in "firstblock". + * An upper bound for the number of blocks to be allocated is supplied to + * the first call in "total"; if no allocation group has that many free + * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). + */ +int /* error */ +xfs_bmapi( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t bno, /* starting file offs. mapped */ + xfs_filblks_t len, /* length to map in file */ + int flags, /* XFS_BMAPI_... */ + xfs_fsblock_t *firstblock, /* first allocated block + controls a.g. for allocs */ + xfs_extlen_t total, /* total blocks needed */ + struct xfs_bmbt_irec *mval, /* output: map values */ + int *nmap, /* i/o: mval size/count */ + xfs_bmap_free_t *flist); /* i/o: list extents to free */ + +/* + * Map file blocks to filesystem blocks, simple version. + * One block only, read-only. + * For flags, only the XFS_BMAPI_ATTRFORK flag is examined. + * For the other flag values, the effect is as if XFS_BMAPI_METADATA + * was set and all the others were clear. + */ +int /* error */ +xfs_bmapi_single( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + int whichfork, /* data or attr fork */ + xfs_fsblock_t *fsb, /* output: mapped block */ + xfs_fileoff_t bno); /* starting file offs. mapped */ + +/* + * Unmap (remove) blocks from a file. + * If nexts is nonzero then the number of extents to remove is limited to + * that value. If not all extents in the block range can be removed then + * *done is set. + */ +int /* error */ +xfs_bunmapi( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t bno, /* starting offset to unmap */ + xfs_filblks_t len, /* length to unmap in file */ + int flags, /* XFS_BMAPI_... */ + xfs_extnum_t nexts, /* number of extents max */ + xfs_fsblock_t *firstblock, /* first allocated block + controls a.g. for allocs */ + xfs_bmap_free_t *flist, /* i/o: list extents to free */ + int *done); /* set if not done yet */ + +/* + * Fcntl interface to xfs_bmapi. + */ +int /* error code */ +xfs_getbmap( + bhv_desc_t *bdp, /* XFS behavior descriptor*/ + struct getbmap *bmv, /* user bmap structure */ + void *ap, /* pointer to user's array */ + int iflags); /* interface flags */ + +/* + * Check the last inode extent to determine whether this allocation will result + * in blocks being allocated at the end of the file. When we allocate new data + * blocks at the end of the file which do not start at the previous data block, + * we will try to align the new blocks at stripe unit boundaries. + */ +int +xfs_bmap_isaeof( + struct xfs_inode *ip, + xfs_fileoff_t off, + int whichfork, + char *aeof); + +/* + * Check if the endoff is outside the last extent. If so the caller will grow + * the allocation to a stripe unit boundary + */ +int +xfs_bmap_eof( + struct xfs_inode *ip, + xfs_fileoff_t endoff, + int whichfork, + int *eof); + +/* + * Count fsblocks of the given fork. + */ +int +xfs_bmap_count_blocks( + xfs_trans_t *tp, + xfs_inode_t *ip, + int whichfork, + int *count); + +/* + * Check an extent list, which has just been read, for + * any bit in the extent flag field. + */ +int +xfs_check_nostate_extents( + xfs_bmbt_rec_t *ep, + xfs_extnum_t num); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_BMAP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_btree.c linux.21rc5-ac2/fs/xfs/xfs_btree.c --- linux.21rc5/fs/xfs/xfs_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_btree.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,949 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains common code for the space manager's btree implementations. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bit.h" +#include "xfs_error.h" + +/* + * Cursor allocation zone. + */ +kmem_zone_t *xfs_btree_cur_zone; + +/* + * Btree magic numbers. + */ +const __uint32_t xfs_magics[XFS_BTNUM_MAX] = +{ + XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC +}; + +/* + * Prototypes for internal routines. + */ + +/* + * Checking routine: return maxrecs for the block. + */ +STATIC int /* number of records fitting in block */ +xfs_btree_maxrecs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_block_t *block);/* generic btree block pointer */ + +/* + * Internal routines. + */ + +/* + * Checking routine: return maxrecs for the block. + */ +STATIC int /* number of records fitting in block */ +xfs_btree_maxrecs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_block_t *block) /* generic btree block pointer */ +{ + switch (cur->bc_btnum) { + case XFS_BTNUM_BNO: + case XFS_BTNUM_CNT: + return (int)XFS_ALLOC_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); + case XFS_BTNUM_BMAP: + return (int)XFS_BMAP_BLOCK_IMAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); + case XFS_BTNUM_INO: + return (int)XFS_INOBT_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); + default: + ASSERT(0); + return 0; + } +} + +/* + * External routines. + */ + +#ifdef DEBUG +/* + * Debug routine: check that block header is ok. + */ +void +xfs_btree_check_block( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_block_t *block, /* generic btree block pointer */ + int level, /* level of the btree block */ + xfs_buf_t *bp) /* buffer containing block, if any */ +{ + if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) + xfs_btree_check_lblock(cur, (xfs_btree_lblock_t *)block, level, + bp); + else + xfs_btree_check_sblock(cur, (xfs_btree_sblock_t *)block, level, + bp); +} + +/* + * Debug routine: check that keys are in the right order. + */ +void +xfs_btree_check_key( + xfs_btnum_t btnum, /* btree identifier */ + void *ak1, /* pointer to left (lower) key */ + void *ak2) /* pointer to right (higher) key */ +{ + switch (btnum) { + case XFS_BTNUM_BNO: { + xfs_alloc_key_t *k1; + xfs_alloc_key_t *k2; + + k1 = ak1; + k2 = ak2; + ASSERT(INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT)); + break; + } + case XFS_BTNUM_CNT: { + xfs_alloc_key_t *k1; + xfs_alloc_key_t *k2; + + k1 = ak1; + k2 = ak2; + ASSERT(INT_GET(k1->ar_blockcount, ARCH_CONVERT) < INT_GET(k2->ar_blockcount, ARCH_CONVERT) || + (INT_GET(k1->ar_blockcount, ARCH_CONVERT) == INT_GET(k2->ar_blockcount, ARCH_CONVERT) && + INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT))); + break; + } + case XFS_BTNUM_BMAP: { + xfs_bmbt_key_t *k1; + xfs_bmbt_key_t *k2; + + k1 = ak1; + k2 = ak2; + ASSERT(INT_GET(k1->br_startoff, ARCH_CONVERT) < INT_GET(k2->br_startoff, ARCH_CONVERT)); + break; + } + case XFS_BTNUM_INO: { + xfs_inobt_key_t *k1; + xfs_inobt_key_t *k2; + + k1 = ak1; + k2 = ak2; + ASSERT(INT_GET(k1->ir_startino, ARCH_CONVERT) < INT_GET(k2->ir_startino, ARCH_CONVERT)); + break; + } + default: + ASSERT(0); + } +} +#endif /* DEBUG */ + +/* + * Checking routine: check that long form block header is ok. + */ +/* ARGSUSED */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lblock( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_lblock_t *block, /* btree long form block pointer */ + int level, /* level of the btree block */ + xfs_buf_t *bp) /* buffer for block, if any */ +{ + int lblock_ok; /* block passes checks */ + xfs_mount_t *mp; /* file system mount point */ + + mp = cur->bc_mp; + lblock_ok = + INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] && + INT_GET(block->bb_level, ARCH_CONVERT) == level && + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && + !INT_ISZERO(block->bb_leftsib, ARCH_CONVERT) && + (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_leftsib, ARCH_CONVERT))) && + !INT_ISZERO(block->bb_rightsib, ARCH_CONVERT) && + (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_rightsib, ARCH_CONVERT))); + if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK, + XFS_RANDOM_BTREE_CHECK_LBLOCK))) { + if (bp) + xfs_buftrace("LBTREE ERROR", bp); + XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW, + mp); + return XFS_ERROR(EFSCORRUPTED); + } + return 0; +} + +/* + * Checking routine: check that (long) pointer is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lptr( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_dfsbno_t ptr, /* btree block disk address */ + int level) /* btree block level */ +{ + xfs_mount_t *mp; /* file system mount point */ + + mp = cur->bc_mp; + XFS_WANT_CORRUPTED_RETURN( + level > 0 && + ptr != NULLDFSBNO && + XFS_FSB_SANITY_CHECK(mp, ptr)); + return 0; +} + +#ifdef DEBUG +/* + * Debug routine: check that records are in the right order. + */ +void +xfs_btree_check_rec( + xfs_btnum_t btnum, /* btree identifier */ + void *ar1, /* pointer to left (lower) record */ + void *ar2) /* pointer to right (higher) record */ +{ + switch (btnum) { + case XFS_BTNUM_BNO: { + xfs_alloc_rec_t *r1; + xfs_alloc_rec_t *r2; + + r1 = ar1; + r2 = ar2; + ASSERT(INT_GET(r1->ar_startblock, ARCH_CONVERT) + INT_GET(r1->ar_blockcount, ARCH_CONVERT) <= + INT_GET(r2->ar_startblock, ARCH_CONVERT)); + break; + } + case XFS_BTNUM_CNT: { + xfs_alloc_rec_t *r1; + xfs_alloc_rec_t *r2; + + r1 = ar1; + r2 = ar2; + ASSERT(INT_GET(r1->ar_blockcount, ARCH_CONVERT) < INT_GET(r2->ar_blockcount, ARCH_CONVERT) || + (INT_GET(r1->ar_blockcount, ARCH_CONVERT) == INT_GET(r2->ar_blockcount, ARCH_CONVERT) && + INT_GET(r1->ar_startblock, ARCH_CONVERT) < INT_GET(r2->ar_startblock, ARCH_CONVERT))); + break; + } + case XFS_BTNUM_BMAP: { + xfs_bmbt_rec_t *r1; + xfs_bmbt_rec_t *r2; + + r1 = ar1; + r2 = ar2; + ASSERT(xfs_bmbt_disk_get_startoff(r1) + + xfs_bmbt_disk_get_blockcount(r1) <= + xfs_bmbt_disk_get_startoff(r2)); + break; + } + case XFS_BTNUM_INO: { + xfs_inobt_rec_t *r1; + xfs_inobt_rec_t *r2; + + r1 = ar1; + r2 = ar2; + ASSERT(INT_GET(r1->ir_startino, ARCH_CONVERT) + XFS_INODES_PER_CHUNK <= + INT_GET(r2->ir_startino, ARCH_CONVERT)); + break; + } + default: + ASSERT(0); + } +} +#endif /* DEBUG */ + +/* + * Checking routine: check that block header is ok. + */ +/* ARGSUSED */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_sblock( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_sblock_t *block, /* btree short form block pointer */ + int level, /* level of the btree block */ + xfs_buf_t *bp) /* buffer containing block */ +{ + xfs_buf_t *agbp; /* buffer for ag. freespace struct */ + xfs_agf_t *agf; /* ag. freespace structure */ + xfs_agblock_t agflen; /* native ag. freespace length */ + int sblock_ok; /* block passes checks */ + + agbp = cur->bc_private.a.agbp; + agf = XFS_BUF_TO_AGF(agbp); + agflen = INT_GET(agf->agf_length, ARCH_CONVERT); + sblock_ok = + INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] && + INT_GET(block->bb_level, ARCH_CONVERT) == level && + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && + (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK || + INT_GET(block->bb_leftsib, ARCH_CONVERT) < agflen) && + !INT_ISZERO(block->bb_leftsib, ARCH_CONVERT) && + (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK || + INT_GET(block->bb_rightsib, ARCH_CONVERT) < agflen) && + !INT_ISZERO(block->bb_rightsib, ARCH_CONVERT); + if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp, + XFS_ERRTAG_BTREE_CHECK_SBLOCK, + XFS_RANDOM_BTREE_CHECK_SBLOCK))) { + if (bp) + xfs_buftrace("SBTREE ERROR", bp); + XFS_ERROR_REPORT("xfs_btree_check_sblock", XFS_ERRLEVEL_LOW, + cur->bc_mp); + return XFS_ERROR(EFSCORRUPTED); + } + return 0; +} + +/* + * Checking routine: check that (short) pointer is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_sptr( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t ptr, /* btree block disk address */ + int level) /* btree block level */ +{ + xfs_buf_t *agbp; /* buffer for ag. freespace struct */ + xfs_agf_t *agf; /* ag. freespace structure */ + + agbp = cur->bc_private.a.agbp; + agf = XFS_BUF_TO_AGF(agbp); + XFS_WANT_CORRUPTED_RETURN( + level > 0 && + ptr != NULLAGBLOCK && ptr != 0 && + ptr < INT_GET(agf->agf_length, ARCH_CONVERT)); + return 0; +} + +/* + * Delete the btree cursor. + */ +void +xfs_btree_del_cursor( + xfs_btree_cur_t *cur, /* btree cursor */ + int error) /* del because of error */ +{ + int i; /* btree level */ + + /* + * Clear the buffer pointers, and release the buffers. + * If we're doing this in the face of an error, we + * need to make sure to inspect all of the entries + * in the bc_bufs array for buffers to be unlocked. + * This is because some of the btree code works from + * level n down to 0, and if we get an error along + * the way we won't have initialized all the entries + * down to 0. + */ + for (i = 0; i < cur->bc_nlevels; i++) { + if (cur->bc_bufs[i]) + xfs_btree_setbuf(cur, i, NULL); + else if (!error) + break; + } + /* + * Can't free a bmap cursor without having dealt with the + * allocated indirect blocks' accounting. + */ + ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || + cur->bc_private.b.allocated == 0); + /* + * Free the cursor. + */ + kmem_zone_free(xfs_btree_cur_zone, cur); +} + +/* + * Duplicate the btree cursor. + * Allocate a new one, copy the record, re-get the buffers. + */ +int /* error */ +xfs_btree_dup_cursor( + xfs_btree_cur_t *cur, /* input cursor */ + xfs_btree_cur_t **ncur) /* output cursor */ +{ + xfs_buf_t *bp; /* btree block's buffer pointer */ + int error; /* error return value */ + int i; /* level number of btree block */ + xfs_mount_t *mp; /* mount structure for filesystem */ + xfs_btree_cur_t *new; /* new cursor value */ + xfs_trans_t *tp; /* transaction pointer, can be NULL */ + + tp = cur->bc_tp; + mp = cur->bc_mp; + /* + * Allocate a new cursor like the old one. + */ + new = xfs_btree_init_cursor(mp, tp, cur->bc_private.a.agbp, + cur->bc_private.a.agno, cur->bc_btnum, cur->bc_private.b.ip, + cur->bc_private.b.whichfork); + /* + * Copy the record currently in the cursor. + */ + new->bc_rec = cur->bc_rec; + /* + * For each level current, re-get the buffer and copy the ptr value. + */ + for (i = 0; i < new->bc_nlevels; i++) { + new->bc_ptrs[i] = cur->bc_ptrs[i]; + new->bc_ra[i] = cur->bc_ra[i]; + if ((bp = cur->bc_bufs[i])) { + if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, + XFS_BUF_ADDR(bp), mp->m_bsize, 0, &bp))) { + xfs_btree_del_cursor(new, error); + *ncur = NULL; + return error; + } + new->bc_bufs[i] = bp; + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + } else + new->bc_bufs[i] = NULL; + } + /* + * For bmap btrees, copy the firstblock, flist, and flags values, + * since init cursor doesn't get them. + */ + if (new->bc_btnum == XFS_BTNUM_BMAP) { + new->bc_private.b.firstblock = cur->bc_private.b.firstblock; + new->bc_private.b.flist = cur->bc_private.b.flist; + new->bc_private.b.flags = cur->bc_private.b.flags; + } + *ncur = new; + return 0; +} + +/* + * Change the cursor to point to the first record at the given level. + * Other levels are unaffected. + */ +int /* success=1, failure=0 */ +xfs_btree_firstrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level) /* level to change */ +{ + xfs_btree_block_t *block; /* generic btree block pointer */ + xfs_buf_t *bp; /* buffer containing block */ + + /* + * Get the block pointer for this level. + */ + block = xfs_btree_get_block(cur, level, &bp); + xfs_btree_check_block(cur, block, level, bp); + /* + * It's empty, there is no such record. + */ + if (INT_ISZERO(block->bb_h.bb_numrecs, ARCH_CONVERT)) + return 0; + /* + * Set the ptr value to 1, that's the first record/key. + */ + cur->bc_ptrs[level] = 1; + return 1; +} + +/* + * Retrieve the block pointer from the cursor at the given level. + * This may be a bmap btree root or from a buffer. + */ +xfs_btree_block_t * /* generic btree block pointer */ +xfs_btree_get_block( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree */ + xfs_buf_t **bpp) /* buffer containing the block */ +{ + xfs_btree_block_t *block; /* return value */ + xfs_buf_t *bp; /* return buffer */ + xfs_ifork_t *ifp; /* inode fork pointer */ + int whichfork; /* data or attr fork */ + + if (cur->bc_btnum == XFS_BTNUM_BMAP && level == cur->bc_nlevels - 1) { + whichfork = cur->bc_private.b.whichfork; + ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, whichfork); + block = (xfs_btree_block_t *)ifp->if_broot; + bp = NULL; + } else { + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_BLOCK(bp); + } + ASSERT(block != NULL); + *bpp = bp; + return block; +} + +/* + * Get a buffer for the block, return it with no data read. + * Long-form addressing. + */ +xfs_buf_t * /* buffer for fsbno */ +xfs_btree_get_bufl( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_fsblock_t fsbno, /* file system block number */ + uint lock) /* lock flags for get_buf */ +{ + xfs_buf_t *bp; /* buffer pointer (return value) */ + xfs_daddr_t d; /* real disk block address */ + + ASSERT(fsbno != NULLFSBLOCK); + d = XFS_FSB_TO_DADDR(mp, fsbno); + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + return bp; +} + +/* + * Get a buffer for the block, return it with no data read. + * Short-form addressing. + */ +xfs_buf_t * /* buffer for agno/agbno */ +xfs_btree_get_bufs( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + uint lock) /* lock flags for get_buf */ +{ + xfs_buf_t *bp; /* buffer pointer (return value) */ + xfs_daddr_t d; /* real disk block address */ + + ASSERT(agno != NULLAGNUMBER); + ASSERT(agbno != NULLAGBLOCK); + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + return bp; +} + +/* + * Allocate a new btree cursor. + * The cursor is either for allocation (A) or bmap (B) or inodes (I). + */ +xfs_btree_cur_t * /* new btree cursor */ +xfs_btree_init_cursor( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* (A only) buffer for agf structure */ + /* (I only) buffer for agi structure */ + xfs_agnumber_t agno, /* (AI only) allocation group number */ + xfs_btnum_t btnum, /* btree identifier */ + xfs_inode_t *ip, /* (B only) inode owning the btree */ + int whichfork) /* (B only) data or attr fork */ +{ + xfs_agf_t *agf; /* (A) allocation group freespace */ + xfs_agi_t *agi; /* (I) allocation group inodespace */ + xfs_btree_cur_t *cur; /* return value */ + xfs_ifork_t *ifp; /* (I) inode fork pointer */ + int nlevels=0; /* number of levels in the btree */ + + ASSERT(xfs_btree_cur_zone != NULL); + /* + * Allocate a new cursor. + */ + cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); + /* + * Deduce the number of btree levels from the arguments. + */ + switch (btnum) { + case XFS_BTNUM_BNO: + case XFS_BTNUM_CNT: + agf = XFS_BUF_TO_AGF(agbp); + nlevels = INT_GET(agf->agf_levels[btnum], ARCH_CONVERT); + break; + case XFS_BTNUM_BMAP: + ifp = XFS_IFORK_PTR(ip, whichfork); + nlevels = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1; + break; + case XFS_BTNUM_INO: + agi = XFS_BUF_TO_AGI(agbp); + nlevels = INT_GET(agi->agi_level, ARCH_CONVERT); + break; + default: + ASSERT(0); + } + /* + * Fill in the common fields. + */ + cur->bc_tp = tp; + cur->bc_mp = mp; + cur->bc_nlevels = nlevels; + cur->bc_btnum = btnum; + cur->bc_blocklog = mp->m_sb.sb_blocklog; + /* + * Fill in private fields. + */ + switch (btnum) { + case XFS_BTNUM_BNO: + case XFS_BTNUM_CNT: + /* + * Allocation btree fields. + */ + cur->bc_private.a.agbp = agbp; + cur->bc_private.a.agno = agno; + break; + case XFS_BTNUM_BMAP: + /* + * Bmap btree fields. + */ + cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork); + cur->bc_private.b.ip = ip; + cur->bc_private.b.firstblock = NULLFSBLOCK; + cur->bc_private.b.flist = NULL; + cur->bc_private.b.allocated = 0; + cur->bc_private.b.flags = 0; + cur->bc_private.b.whichfork = whichfork; + break; + case XFS_BTNUM_INO: + /* + * Inode allocation btree fields. + */ + cur->bc_private.i.agbp = agbp; + cur->bc_private.i.agno = agno; + break; + default: + ASSERT(0); + } + return cur; +} + +/* + * Check for the cursor referring to the last block at the given level. + */ +int /* 1=is last block, 0=not last block */ +xfs_btree_islastblock( + xfs_btree_cur_t *cur, /* btree cursor */ + int level) /* level to check */ +{ + xfs_btree_block_t *block; /* generic btree block pointer */ + xfs_buf_t *bp; /* buffer containing block */ + + block = xfs_btree_get_block(cur, level, &bp); + xfs_btree_check_block(cur, block, level, bp); + if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) + return INT_GET(block->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO; + else + return INT_GET(block->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK; +} + +/* + * Change the cursor to point to the last record in the current block + * at the given level. Other levels are unaffected. + */ +int /* success=1, failure=0 */ +xfs_btree_lastrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level) /* level to change */ +{ + xfs_btree_block_t *block; /* generic btree block pointer */ + xfs_buf_t *bp; /* buffer containing block */ + + /* + * Get the block pointer for this level. + */ + block = xfs_btree_get_block(cur, level, &bp); + xfs_btree_check_block(cur, block, level, bp); + /* + * It's empty, there is no such record. + */ + if (INT_ISZERO(block->bb_h.bb_numrecs, ARCH_CONVERT)) + return 0; + /* + * Set the ptr value to numrecs, that's the last record/key. + */ + cur->bc_ptrs[level] = INT_GET(block->bb_h.bb_numrecs, ARCH_CONVERT); + return 1; +} + +/* + * Compute first and last byte offsets for the fields given. + * Interprets the offsets table, which contains struct field offsets. + */ +void +xfs_btree_offsets( + __int64_t fields, /* bitmask of fields */ + const short *offsets, /* table of field offsets */ + int nbits, /* number of bits to inspect */ + int *first, /* output: first byte offset */ + int *last) /* output: last byte offset */ +{ + int i; /* current bit number */ + __int64_t imask; /* mask for current bit number */ + + ASSERT(fields != 0); + /* + * Find the lowest bit, so the first byte offset. + */ + for (i = 0, imask = 1LL; ; i++, imask <<= 1) { + if (imask & fields) { + *first = offsets[i]; + break; + } + } + /* + * Find the highest bit, so the last byte offset. + */ + for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) { + if (imask & fields) { + *last = offsets[i + 1] - 1; + break; + } + } +} + +/* + * Get a buffer for the block, return it read in. + * Long-form addressing. + */ +int /* error */ +xfs_btree_read_bufl( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_fsblock_t fsbno, /* file system block number */ + uint lock, /* lock flags for read_buf */ + xfs_buf_t **bpp, /* buffer for fsbno */ + int refval) /* ref count value for buffer */ +{ + xfs_buf_t *bp; /* return value */ + xfs_daddr_t d; /* real disk block address */ + int error; + + ASSERT(fsbno != NULLFSBLOCK); + d = XFS_FSB_TO_DADDR(mp, fsbno); + if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, + mp->m_bsize, lock, &bp))) { + return error; + } + ASSERT(!bp || !XFS_BUF_GETERROR(bp)); + if (bp != NULL) { + XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); + } + *bpp = bp; + return 0; +} + +/* + * Get a buffer for the block, return it read in. + * Short-form addressing. + */ +int /* error */ +xfs_btree_read_bufs( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + uint lock, /* lock flags for read_buf */ + xfs_buf_t **bpp, /* buffer for agno/agbno */ + int refval) /* ref count value for buffer */ +{ + xfs_buf_t *bp; /* return value */ + xfs_daddr_t d; /* real disk block address */ + int error; + + ASSERT(agno != NULLAGNUMBER); + ASSERT(agbno != NULLAGBLOCK); + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, + mp->m_bsize, lock, &bp))) { + return error; + } + ASSERT(!bp || !XFS_BUF_GETERROR(bp)); + if (bp != NULL) { + switch (refval) { + case XFS_ALLOC_BTREE_REF: + XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); + break; + case XFS_INO_BTREE_REF: + XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, refval); + break; + } + } + *bpp = bp; + return 0; +} + +/* + * Read-ahead the block, don't wait for it, don't return a buffer. + * Long-form addressing. + */ +/* ARGSUSED */ +void +xfs_btree_reada_bufl( + xfs_mount_t *mp, /* file system mount point */ + xfs_fsblock_t fsbno, /* file system block number */ + xfs_extlen_t count) /* count of filesystem blocks */ +{ + xfs_daddr_t d; + + ASSERT(fsbno != NULLFSBLOCK); + d = XFS_FSB_TO_DADDR(mp, fsbno); + xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); +} + +/* + * Read-ahead the block, don't wait for it, don't return a buffer. + * Short-form addressing. + */ +/* ARGSUSED */ +void +xfs_btree_reada_bufs( + xfs_mount_t *mp, /* file system mount point */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + xfs_extlen_t count) /* count of filesystem blocks */ +{ + xfs_daddr_t d; + + ASSERT(agno != NULLAGNUMBER); + ASSERT(agbno != NULLAGBLOCK); + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); +} + +/* + * Read-ahead btree blocks, at the given level. + * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. + */ +int +xfs_btree_readahead_core( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + int lr) /* left/right bits */ +{ + xfs_alloc_block_t *a; + xfs_bmbt_block_t *b; + xfs_inobt_block_t *i; + int rval = 0; + + ASSERT(cur->bc_bufs[lev] != NULL); + cur->bc_ra[lev] |= lr; + switch (cur->bc_btnum) { + case XFS_BTNUM_BNO: + case XFS_BTNUM_CNT: + a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); + if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(a->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, + INT_GET(a->bb_leftsib, ARCH_CONVERT), 1); + rval++; + } + if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(a->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, + INT_GET(a->bb_rightsib, ARCH_CONVERT), 1); + rval++; + } + break; + case XFS_BTNUM_BMAP: + b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]); + if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(b->bb_leftsib, ARCH_CONVERT) != NULLDFSBNO) { + xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_leftsib, ARCH_CONVERT), 1); + rval++; + } + if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(b->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_rightsib, ARCH_CONVERT), 1); + rval++; + } + break; + case XFS_BTNUM_INO: + i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); + if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(i->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, + INT_GET(i->bb_leftsib, ARCH_CONVERT), 1); + rval++; + } + if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(i->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, + INT_GET(i->bb_rightsib, ARCH_CONVERT), 1); + rval++; + } + break; + default: + ASSERT(0); + } + return rval; +} + +/* + * Set the buffer for level "lev" in the cursor to bp, releasing + * any previous buffer. + */ +void +xfs_btree_setbuf( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + xfs_buf_t *bp) /* new buffer to set */ +{ + xfs_btree_block_t *b; /* btree block */ + xfs_buf_t *obp; /* old buffer pointer */ + + obp = cur->bc_bufs[lev]; + if (obp) + xfs_trans_brelse(cur->bc_tp, obp); + cur->bc_bufs[lev] = bp; + cur->bc_ra[lev] = 0; + if (!bp) + return; + b = XFS_BUF_TO_BLOCK(bp); + if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) { + if (INT_GET(b->bb_u.l.bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) + cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; + if (INT_GET(b->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) + cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; + } else { + if (INT_GET(b->bb_u.s.bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) + cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; + if (INT_GET(b->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) + cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; + } +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_btree.h linux.21rc5-ac2/fs/xfs/xfs_btree.h --- linux.21rc5/fs/xfs/xfs_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_btree.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,592 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BTREE_H__ +#define __XFS_BTREE_H__ + +struct xfs_buf; +struct xfs_bmap_free; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * This nonsense is to make -wlint happy. + */ +#define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi) +#define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi) +#define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi) + +#define XFS_BTNUM_BNO ((xfs_btnum_t)XFS_BTNUM_BNOi) +#define XFS_BTNUM_CNT ((xfs_btnum_t)XFS_BTNUM_CNTi) +#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi) +#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi) + +/* + * Short form header: space allocation btrees. + */ +typedef struct xfs_btree_sblock +{ + __uint32_t bb_magic; /* magic number for block type */ + __uint16_t bb_level; /* 0 is a leaf */ + __uint16_t bb_numrecs; /* current # of data records */ + xfs_agblock_t bb_leftsib; /* left sibling block or NULLAGBLOCK */ + xfs_agblock_t bb_rightsib; /* right sibling block or NULLAGBLOCK */ +} xfs_btree_sblock_t; + +/* + * Long form header: bmap btrees. + */ +typedef struct xfs_btree_lblock +{ + __uint32_t bb_magic; /* magic number for block type */ + __uint16_t bb_level; /* 0 is a leaf */ + __uint16_t bb_numrecs; /* current # of data records */ + xfs_dfsbno_t bb_leftsib; /* left sibling block or NULLDFSBNO */ + xfs_dfsbno_t bb_rightsib; /* right sibling block or NULLDFSBNO */ +} xfs_btree_lblock_t; + +/* + * Combined header and structure, used by common code. + */ +typedef struct xfs_btree_hdr +{ + __uint32_t bb_magic; /* magic number for block type */ + __uint16_t bb_level; /* 0 is a leaf */ + __uint16_t bb_numrecs; /* current # of data records */ +} xfs_btree_hdr_t; + +typedef struct xfs_btree_block +{ + xfs_btree_hdr_t bb_h; /* header */ + union { + struct { + xfs_agblock_t bb_leftsib; + xfs_agblock_t bb_rightsib; + } s; /* short form pointers */ + struct { + xfs_dfsbno_t bb_leftsib; + xfs_dfsbno_t bb_rightsib; + } l; /* long form pointers */ + } bb_u; /* rest */ +} xfs_btree_block_t; + +/* + * For logging record fields. + */ +#define XFS_BB_MAGIC 0x01 +#define XFS_BB_LEVEL 0x02 +#define XFS_BB_NUMRECS 0x04 +#define XFS_BB_LEFTSIB 0x08 +#define XFS_BB_RIGHTSIB 0x10 +#define XFS_BB_NUM_BITS 5 +#define XFS_BB_ALL_BITS ((1 << XFS_BB_NUM_BITS) - 1) + +/* + * Boolean to select which form of xfs_btree_block_t.bb_u to use. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BTREE_LONG_PTRS) +int xfs_btree_long_ptrs(xfs_btnum_t btnum); +#define XFS_BTREE_LONG_PTRS(btnum) ((btnum) == XFS_BTNUM_BMAP) +#else +#define XFS_BTREE_LONG_PTRS(btnum) ((btnum) == XFS_BTNUM_BMAP) +#endif + +/* + * Magic numbers for btree blocks. + */ +extern const __uint32_t xfs_magics[]; + +/* + * Maximum and minimum records in a btree block. + * Given block size, type prefix, and leaf flag (0 or 1). + * The divisor below is equivalent to lf ? (e1) : (e2) but that produces + * compiler warnings. + */ +#define XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) \ + ((int)(((bsz) - (uint)sizeof(t ## _block_t)) / \ + (((lf) * (uint)sizeof(t ## _rec_t)) + \ + ((1 - (lf)) * \ + ((uint)sizeof(t ## _key_t) + (uint)sizeof(t ## _ptr_t)))))) +#define XFS_BTREE_BLOCK_MINRECS(bsz,t,lf) \ + (XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) / 2) + +/* + * Record, key, and pointer address calculation macros. + * Given block size, type prefix, block pointer, and index of requested entry + * (first entry numbered 1). + */ +#define XFS_BTREE_REC_ADDR(bsz,t,bb,i,mxr) \ + ((t ## _rec_t *)((char *)(bb) + sizeof(t ## _block_t) + \ + ((i) - 1) * sizeof(t ## _rec_t))) +#define XFS_BTREE_KEY_ADDR(bsz,t,bb,i,mxr) \ + ((t ## _key_t *)((char *)(bb) + sizeof(t ## _block_t) + \ + ((i) - 1) * sizeof(t ## _key_t))) +#define XFS_BTREE_PTR_ADDR(bsz,t,bb,i,mxr) \ + ((t ## _ptr_t *)((char *)(bb) + sizeof(t ## _block_t) + \ + (mxr) * sizeof(t ## _key_t) + ((i) - 1) * sizeof(t ## _ptr_t))) + +#define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */ + +/* + * Btree cursor structure. + * This collects all information needed by the btree code in one place. + */ +typedef struct xfs_btree_cur +{ + struct xfs_trans *bc_tp; /* transaction we're in, if any */ + struct xfs_mount *bc_mp; /* file system mount struct */ + union { + xfs_alloc_rec_t a; + xfs_bmbt_irec_t b; + xfs_inobt_rec_t i; + } bc_rec; /* current insert/search record value */ + struct xfs_buf *bc_bufs[XFS_BTREE_MAXLEVELS]; /* buf ptr per level */ + int bc_ptrs[XFS_BTREE_MAXLEVELS]; /* key/record # */ + __uint8_t bc_ra[XFS_BTREE_MAXLEVELS]; /* readahead bits */ +#define XFS_BTCUR_LEFTRA 1 /* left sibling has been read-ahead */ +#define XFS_BTCUR_RIGHTRA 2 /* right sibling has been read-ahead */ + __uint8_t bc_nlevels; /* number of levels in the tree */ + __uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */ + xfs_btnum_t bc_btnum; /* identifies which btree type */ + union { + struct { /* needed for BNO, CNT */ + struct xfs_buf *agbp; /* agf buffer pointer */ + xfs_agnumber_t agno; /* ag number */ + } a; + struct { /* needed for BMAP */ + struct xfs_inode *ip; /* pointer to our inode */ + struct xfs_bmap_free *flist; /* list to free after */ + xfs_fsblock_t firstblock; /* 1st blk allocated */ + int allocated; /* count of alloced */ + short forksize; /* fork's inode space */ + char whichfork; /* data or attr fork */ + char flags; /* flags */ +#define XFS_BTCUR_BPRV_WASDEL 1 /* was delayed */ + } b; + struct { /* needed for INO */ + struct xfs_buf *agbp; /* agi buffer pointer */ + xfs_agnumber_t agno; /* ag number */ + } i; + } bc_private; /* per-btree type data */ +} xfs_btree_cur_t; + +#define XFS_BTREE_NOERROR 0 +#define XFS_BTREE_ERROR 1 + +/* + * Convert from buffer to btree block header. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_BLOCK) +xfs_btree_block_t *xfs_buf_to_block(struct xfs_buf *bp); +#define XFS_BUF_TO_BLOCK(bp) xfs_buf_to_block(bp) +#else +#define XFS_BUF_TO_BLOCK(bp) ((xfs_btree_block_t *)(XFS_BUF_PTR(bp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_LBLOCK) +xfs_btree_lblock_t *xfs_buf_to_lblock(struct xfs_buf *bp); +#define XFS_BUF_TO_LBLOCK(bp) xfs_buf_to_lblock(bp) +#else +#define XFS_BUF_TO_LBLOCK(bp) ((xfs_btree_lblock_t *)(XFS_BUF_PTR(bp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBLOCK) +xfs_btree_sblock_t *xfs_buf_to_sblock(struct xfs_buf *bp); +#define XFS_BUF_TO_SBLOCK(bp) xfs_buf_to_sblock(bp) +#else +#define XFS_BUF_TO_SBLOCK(bp) ((xfs_btree_sblock_t *)(XFS_BUF_PTR(bp))) +#endif + +#ifdef __KERNEL__ + +#ifdef DEBUG +/* + * Debug routine: check that block header is ok. + */ +void +xfs_btree_check_block( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_block_t *block, /* generic btree block pointer */ + int level, /* level of the btree block */ + struct xfs_buf *bp); /* buffer containing block, if any */ + +/* + * Debug routine: check that keys are in the right order. + */ +void +xfs_btree_check_key( + xfs_btnum_t btnum, /* btree identifier */ + void *ak1, /* pointer to left (lower) key */ + void *ak2); /* pointer to right (higher) key */ + +/* + * Debug routine: check that records are in the right order. + */ +void +xfs_btree_check_rec( + xfs_btnum_t btnum, /* btree identifier */ + void *ar1, /* pointer to left (lower) record */ + void *ar2); /* pointer to right (higher) record */ +#else +#define xfs_btree_check_block(a,b,c,d) +#define xfs_btree_check_key(a,b,c) +#define xfs_btree_check_rec(a,b,c) +#endif /* DEBUG */ + +/* + * Checking routine: check that long form block header is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lblock( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_lblock_t *block, /* btree long form block pointer */ + int level, /* level of the btree block */ + struct xfs_buf *bp); /* buffer containing block, if any */ + +/* + * Checking routine: check that (long) pointer is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lptr( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_dfsbno_t ptr, /* btree block disk address */ + int level); /* btree block level */ + +/* + * Checking routine: check that short form block header is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_sblock( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_sblock_t *block, /* btree short form block pointer */ + int level, /* level of the btree block */ + struct xfs_buf *bp); /* buffer containing block */ + +/* + * Checking routine: check that (short) pointer is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_sptr( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t ptr, /* btree block disk address */ + int level); /* btree block level */ + +/* + * Delete the btree cursor. + */ +void +xfs_btree_del_cursor( + xfs_btree_cur_t *cur, /* btree cursor */ + int error); /* del because of error */ + +/* + * Duplicate the btree cursor. + * Allocate a new one, copy the record, re-get the buffers. + */ +int /* error */ +xfs_btree_dup_cursor( + xfs_btree_cur_t *cur, /* input cursor */ + xfs_btree_cur_t **ncur);/* output cursor */ + +/* + * Change the cursor to point to the first record in the current block + * at the given level. Other levels are unaffected. + */ +int /* success=1, failure=0 */ +xfs_btree_firstrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level); /* level to change */ + +/* + * Retrieve the block pointer from the cursor at the given level. + * This may be a bmap btree root or from a buffer. + */ +xfs_btree_block_t * /* generic btree block pointer */ +xfs_btree_get_block( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree */ + struct xfs_buf **bpp); /* buffer containing the block */ + +/* + * Get a buffer for the block, return it with no data read. + * Long-form addressing. + */ +struct xfs_buf * /* buffer for fsbno */ +xfs_btree_get_bufl( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_fsblock_t fsbno, /* file system block number */ + uint lock); /* lock flags for get_buf */ + +/* + * Get a buffer for the block, return it with no data read. + * Short-form addressing. + */ +struct xfs_buf * /* buffer for agno/agbno */ +xfs_btree_get_bufs( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + uint lock); /* lock flags for get_buf */ + +/* + * Allocate a new btree cursor. + * The cursor is either for allocation (A) or bmap (B). + */ +xfs_btree_cur_t * /* new btree cursor */ +xfs_btree_init_cursor( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *agbp, /* (A only) buffer for agf structure */ + xfs_agnumber_t agno, /* (A only) allocation group number */ + xfs_btnum_t btnum, /* btree identifier */ + struct xfs_inode *ip, /* (B only) inode owning the btree */ + int whichfork); /* (B only) data/attr fork */ + +/* + * Check for the cursor referring to the last block at the given level. + */ +int /* 1=is last block, 0=not last block */ +xfs_btree_islastblock( + xfs_btree_cur_t *cur, /* btree cursor */ + int level); /* level to check */ + +/* + * Change the cursor to point to the last record in the current block + * at the given level. Other levels are unaffected. + */ +int /* success=1, failure=0 */ +xfs_btree_lastrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level); /* level to change */ + +/* + * Compute first and last byte offsets for the fields given. + * Interprets the offsets table, which contains struct field offsets. + */ +void +xfs_btree_offsets( + __int64_t fields, /* bitmask of fields */ + const short *offsets,/* table of field offsets */ + int nbits, /* number of bits to inspect */ + int *first, /* output: first byte offset */ + int *last); /* output: last byte offset */ + +/* + * Get a buffer for the block, return it read in. + * Long-form addressing. + */ +int /* error */ +xfs_btree_read_bufl( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_fsblock_t fsbno, /* file system block number */ + uint lock, /* lock flags for read_buf */ + struct xfs_buf **bpp, /* buffer for fsbno */ + int refval);/* ref count value for buffer */ + +/* + * Get a buffer for the block, return it read in. + * Short-form addressing. + */ +int /* error */ +xfs_btree_read_bufs( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + uint lock, /* lock flags for read_buf */ + struct xfs_buf **bpp, /* buffer for agno/agbno */ + int refval);/* ref count value for buffer */ + +/* + * Read-ahead the block, don't wait for it, don't return a buffer. + * Long-form addressing. + */ +void /* error */ +xfs_btree_reada_bufl( + struct xfs_mount *mp, /* file system mount point */ + xfs_fsblock_t fsbno, /* file system block number */ + xfs_extlen_t count); /* count of filesystem blocks */ + +/* + * Read-ahead the block, don't wait for it, don't return a buffer. + * Short-form addressing. + */ +void /* error */ +xfs_btree_reada_bufs( + struct xfs_mount *mp, /* file system mount point */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + xfs_extlen_t count); /* count of filesystem blocks */ + +/* + * Read-ahead btree blocks, at the given level. + * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. + */ +int /* readahead block count */ +xfs_btree_readahead_core( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + int lr); /* left/right bits */ + +static inline int /* readahead block count */ +xfs_btree_readahead( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + int lr) /* left/right bits */ +{ + if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev]) + return 0; + + return xfs_btree_readahead_core(cur, lev, lr); +} + + +/* + * Set the buffer for level "lev" in the cursor to bp, releasing + * any previous buffer. + */ +void +xfs_btree_setbuf( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + struct xfs_buf *bp); /* new buffer to set */ + +#endif /* __KERNEL__ */ + + +/* + * Min and max functions for extlen, agblock, fileoff, and filblks types. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_EXTLEN_MIN) +xfs_extlen_t xfs_extlen_min(xfs_extlen_t a, xfs_extlen_t b); +#define XFS_EXTLEN_MIN(a,b) xfs_extlen_min(a,b) +#else +#define XFS_EXTLEN_MIN(a,b) \ + ((xfs_extlen_t)(a) < (xfs_extlen_t)(b) ? \ + (xfs_extlen_t)(a) : (xfs_extlen_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_EXTLEN_MAX) +xfs_extlen_t xfs_extlen_max(xfs_extlen_t a, xfs_extlen_t b); +#define XFS_EXTLEN_MAX(a,b) xfs_extlen_max(a,b) +#else +#define XFS_EXTLEN_MAX(a,b) \ + ((xfs_extlen_t)(a) > (xfs_extlen_t)(b) ? \ + (xfs_extlen_t)(a) : (xfs_extlen_t)(b)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGBLOCK_MIN) +xfs_agblock_t xfs_agblock_min(xfs_agblock_t a, xfs_agblock_t b); +#define XFS_AGBLOCK_MIN(a,b) xfs_agblock_min(a,b) +#else +#define XFS_AGBLOCK_MIN(a,b) \ + ((xfs_agblock_t)(a) < (xfs_agblock_t)(b) ? \ + (xfs_agblock_t)(a) : (xfs_agblock_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGBLOCK_MAX) +xfs_agblock_t xfs_agblock_max(xfs_agblock_t a, xfs_agblock_t b); +#define XFS_AGBLOCK_MAX(a,b) xfs_agblock_max(a,b) +#else +#define XFS_AGBLOCK_MAX(a,b) \ + ((xfs_agblock_t)(a) > (xfs_agblock_t)(b) ? \ + (xfs_agblock_t)(a) : (xfs_agblock_t)(b)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILEOFF_MIN) +xfs_fileoff_t xfs_fileoff_min(xfs_fileoff_t a, xfs_fileoff_t b); +#define XFS_FILEOFF_MIN(a,b) xfs_fileoff_min(a,b) +#else +#define XFS_FILEOFF_MIN(a,b) \ + ((xfs_fileoff_t)(a) < (xfs_fileoff_t)(b) ? \ + (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILEOFF_MAX) +xfs_fileoff_t xfs_fileoff_max(xfs_fileoff_t a, xfs_fileoff_t b); +#define XFS_FILEOFF_MAX(a,b) xfs_fileoff_max(a,b) +#else +#define XFS_FILEOFF_MAX(a,b) \ + ((xfs_fileoff_t)(a) > (xfs_fileoff_t)(b) ? \ + (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILBLKS_MIN) +xfs_filblks_t xfs_filblks_min(xfs_filblks_t a, xfs_filblks_t b); +#define XFS_FILBLKS_MIN(a,b) xfs_filblks_min(a,b) +#else +#define XFS_FILBLKS_MIN(a,b) \ + ((xfs_filblks_t)(a) < (xfs_filblks_t)(b) ? \ + (xfs_filblks_t)(a) : (xfs_filblks_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILBLKS_MAX) +xfs_filblks_t xfs_filblks_max(xfs_filblks_t a, xfs_filblks_t b); +#define XFS_FILBLKS_MAX(a,b) xfs_filblks_max(a,b) +#else +#define XFS_FILBLKS_MAX(a,b) \ + ((xfs_filblks_t)(a) > (xfs_filblks_t)(b) ? \ + (xfs_filblks_t)(a) : (xfs_filblks_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_SANITY_CHECK) +int xfs_fsb_sanity_check(struct xfs_mount *mp, xfs_fsblock_t fsb); +#define XFS_FSB_SANITY_CHECK(mp,fsb) xfs_fsb_sanity_check(mp,fsb) +#else +#define XFS_FSB_SANITY_CHECK(mp,fsb) \ + (XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \ + XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks) +#endif + +/* + * Macros to set EFSCORRUPTED & return/branch. + */ +#define XFS_WANT_CORRUPTED_GOTO(x,l) \ + { \ + int fs_is_ok = (x); \ + ASSERT(fs_is_ok); \ + if (unlikely(!fs_is_ok)) { \ + XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \ + XFS_ERRLEVEL_LOW, NULL); \ + error = XFS_ERROR(EFSCORRUPTED); \ + goto l; \ + } \ + } + +#define XFS_WANT_CORRUPTED_RETURN(x) \ + { \ + int fs_is_ok = (x); \ + ASSERT(fs_is_ok); \ + if (unlikely(!fs_is_ok)) { \ + XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \ + XFS_ERRLEVEL_LOW, NULL); \ + return XFS_ERROR(EFSCORRUPTED); \ + } \ + } + +#endif /* __XFS_BTREE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_buf.h linux.21rc5-ac2/fs/xfs/xfs_buf.h --- linux.21rc5/fs/xfs/xfs_buf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_buf.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BUF_H__ +#define __XFS_BUF_H__ + +/* These are just for xfs_syncsub... it sets an internal variable + * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t + */ +#define XFS_B_ASYNC PBF_ASYNC +#define XFS_B_DELWRI PBF_DELWRI +#define XFS_B_READ PBF_READ +#define XFS_B_WRITE PBF_WRITE +#define XFS_B_STALE PBF_STALE + +#define XFS_BUF_TRYLOCK PBF_TRYLOCK +#define XFS_INCORE_TRYLOCK PBF_TRYLOCK +#define XFS_BUF_LOCK PBF_LOCK +#define XFS_BUF_MAPPED PBF_MAPPED + +#define BUF_BUSY PBF_DONT_BLOCK + +#define XFS_BUF_BFLAGS(x) ((x)->pb_flags) +#define XFS_BUF_ZEROFLAGS(x) \ + ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_DELWRI)) + +#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE) +#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE) +#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE) +#define XFS_BUF_SUPER_STALE(x) do { \ + XFS_BUF_STALE(x); \ + xfs_buf_undelay(x); \ + XFS_BUF_DONE(x); \ + } while (0) + +#define XFS_BUF_MANAGE PBF_FS_MANAGED +#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED) + +static inline void xfs_buf_undelay(page_buf_t *pb) +{ + if (pb->pb_flags & PBF_DELWRI) { + if (pb->pb_list.next != &pb->pb_list) { + pagebuf_delwri_dequeue(pb); + pagebuf_rele(pb); + } else { + pb->pb_flags &= ~PBF_DELWRI; + } + } +} + +#define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI) +#define XFS_BUF_UNDELAYWRITE(x) xfs_buf_undelay(x) +#define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI) + +#define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no) +#define XFS_BUF_GETERROR(x) pagebuf_geterror(x) +#define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0) + +#define XFS_BUF_DONE(x) ((x)->pb_flags &= ~(PBF_PARTIAL|PBF_NONE)) +#define XFS_BUF_UNDONE(x) ((x)->pb_flags |= PBF_PARTIAL|PBF_NONE) +#define XFS_BUF_ISDONE(x) (!(PBF_NOT_DONE(x))) + +#define XFS_BUF_BUSY(x) ((x)->pb_flags |= PBF_FORCEIO) +#define XFS_BUF_UNBUSY(x) ((x)->pb_flags &= ~PBF_FORCEIO) +#define XFS_BUF_ISBUSY(x) (1) + +#define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC) +#define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC) +#define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC) + +#define XFS_BUF_FLUSH(x) ((x)->pb_flags |= PBF_FLUSH) +#define XFS_BUF_UNFLUSH(x) ((x)->pb_flags &= ~PBF_FLUSH) +#define XFS_BUF_ISFLUSH(x) ((x)->pb_flags & PBF_FLUSH) + +#define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n") +#define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n") +#define XFS_BUF_ISSHUT(x) (0) + +#define XFS_BUF_HOLD(x) pagebuf_hold(x) +#define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ) +#define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ) +#define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ) + +#define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE) +#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE) +#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE) + +#define XFS_BUF_ISUNINITIAL(x) (0) +#define XFS_BUF_UNUNINITIAL(x) (0) + +#define XFS_BUF_BP_ISMAPPED(bp) 1 + +typedef struct page_buf_s xfs_buf_t; +#define xfs_buf page_buf_s + +typedef struct pb_target xfs_buftarg_t; +#define xfs_buftarg pb_target + +#define XFS_BUF_DATAIO(x) ((x)->pb_flags |= PBF_FS_DATAIOD) +#define XFS_BUF_UNDATAIO(x) ((x)->pb_flags &= ~PBF_FS_DATAIOD) + +#define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone +#define XFS_BUF_SET_IODONE_FUNC(buf, func) \ + (buf)->pb_iodone = (func) +#define XFS_BUF_CLR_IODONE_FUNC(buf) \ + (buf)->pb_iodone = NULL +#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \ + (buf)->pb_strat = (func) +#define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \ + (buf)->pb_strat = NULL + +#define XFS_BUF_FSPRIVATE(buf, type) \ + ((type)(buf)->pb_fspriv) +#define XFS_BUF_SET_FSPRIVATE(buf, value) \ + (buf)->pb_fspriv = (void *)(value) +#define XFS_BUF_FSPRIVATE2(buf, type) \ + ((type)(buf)->pb_fspriv2) +#define XFS_BUF_SET_FSPRIVATE2(buf, value) \ + (buf)->pb_fspriv2 = (void *)(value) +#define XFS_BUF_FSPRIVATE3(buf, type) \ + ((type)(buf)->pb_fspriv3) +#define XFS_BUF_SET_FSPRIVATE3(buf, value) \ + (buf)->pb_fspriv3 = (void *)(value) +#define XFS_BUF_SET_START(buf) + +#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \ + (buf)->pb_relse = (value) + +#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr) + +extern inline xfs_caddr_t xfs_buf_offset(page_buf_t *bp, size_t offset) +{ + if (bp->pb_flags & PBF_MAPPED) + return XFS_BUF_PTR(bp) + offset; + return (xfs_caddr_t) pagebuf_offset(bp, offset); +} + +#define XFS_BUF_SET_PTR(bp, val, count) \ + pagebuf_associate_memory(bp, val, count) +#define XFS_BUF_ADDR(bp) ((bp)->pb_bn) +#define XFS_BUF_SET_ADDR(bp, blk) \ + ((bp)->pb_bn = (page_buf_daddr_t)(blk)) +#define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset) +#define XFS_BUF_SET_OFFSET(bp, off) \ + ((bp)->pb_file_offset = (off)) +#define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired) +#define XFS_BUF_SET_COUNT(bp, cnt) \ + ((bp)->pb_count_desired = (cnt)) +#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length) +#define XFS_BUF_SET_SIZE(bp, cnt) \ + ((bp)->pb_buffer_length = (cnt)) +#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) +#define XFS_BUF_SET_VTYPE(bp, type) +#define XFS_BUF_SET_REF(bp, ref) + +#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp) + +#define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp) +#define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0) +#define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp) +#define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp) +#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema); + +/* setup the buffer target from a buftarg structure */ +#define XFS_BUF_SET_TARGET(bp, target) \ + (bp)->pb_target = (target) + +#define XFS_BUF_TARGET(bp) ((bp)->pb_target) +#define XFS_BUF_TARGET_DEV(bp) ((bp)->pb_target->pbr_dev) +#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) +#define XFS_BUF_SET_VTYPE(bp, type) +#define XFS_BUF_SET_REF(bp, ref) + +#define xfs_buf_read(target, blkno, len, flags) \ + pagebuf_get((target), (blkno), (len), \ + PBF_LOCK | PBF_READ | PBF_MAPPED | PBF_MAPPABLE) +#define xfs_buf_get(target, blkno, len, flags) \ + pagebuf_get((target), (blkno), (len), \ + PBF_LOCK | PBF_MAPPED | PBF_MAPPABLE) + +#define xfs_buf_read_flags(target, blkno, len, flags) \ + pagebuf_get((target), (blkno), (len), \ + PBF_READ | PBF_MAPPABLE | flags) +#define xfs_buf_get_flags(target, blkno, len, flags) \ + pagebuf_get((target), (blkno), (len), \ + PBF_MAPPABLE | flags) + +static inline int xfs_bawrite(void *mp, page_buf_t *bp) +{ + int ret; + + bp->pb_fspriv3 = mp; + bp->pb_strat = xfs_bdstrat_cb; + xfs_buf_undelay(bp); + if ((ret = pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC)) == 0) + pagebuf_run_queues(bp); + return ret; +} + +static inline void xfs_buf_relse(page_buf_t *bp) +{ + if ((bp->pb_flags & _PBF_LOCKABLE) && !bp->pb_relse) + pagebuf_unlock(bp); + + pagebuf_rele(bp); +} + + +#define xfs_bpin(bp) pagebuf_pin(bp) +#define xfs_bunpin(bp) pagebuf_unpin(bp) + +#ifdef PAGEBUF_TRACE +# define PB_DEFINE_TRACES +# include +# define xfs_buftrace(id, bp) PB_TRACE(bp, PB_TRACE_REC(external), (void *)id) +#else +# define xfs_buftrace(id, bp) do { } while (0) +#endif + + +#define xfs_biodone(pb) \ + pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), 0) + +#define xfs_incore(buftarg,blkno,len,lockit) \ + pagebuf_find(buftarg, blkno ,len, lockit) + + +#define xfs_biomove(pb, off, len, data, rw) \ + pagebuf_iomove((pb), (off), (len), (data), \ + ((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ) + +#define xfs_biozero(pb, off, len) \ + pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO) + + +static inline int XFS_bwrite(page_buf_t *pb) +{ + int sync = (pb->pb_flags & PBF_ASYNC) == 0; + int error; + + pb->pb_flags |= PBF_SYNC; + + xfs_buf_undelay(pb); + + __pagebuf_iorequest(pb); + + if (sync) { + error = pagebuf_iowait(pb); + xfs_buf_relse(pb); + } else { + pagebuf_run_queues(pb); + error = 0; + } + + return error; +} + + +#define XFS_bdwrite(pb) \ + pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC) + +static inline int xfs_bdwrite(void *mp, page_buf_t *bp) +{ + bp->pb_strat = xfs_bdstrat_cb; + bp->pb_fspriv3 = mp; + + return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC); +} + +#define XFS_bdstrat(bp) pagebuf_iorequest(bp) + +#define xfs_iowait(pb) pagebuf_iowait(pb) + + +/* + * Go through all incore buffers, and release buffers + * if they belong to the given device. This is used in + * filesystem error handling to preserve the consistency + * of its metadata. + */ + +extern void XFS_bflush(xfs_buftarg_t *); +#define xfs_binval(buftarg) XFS_bflush(buftarg) + +#define xfs_incore_relse(buftarg,delwri_only,wait) \ + xfs_relse_buftarg(buftarg) + +#define xfs_baread(target, rablkno, ralen) \ + pagebuf_readahead((target), (rablkno), \ + (ralen), PBF_DONT_BLOCK) + +#define XFS_getrbuf(sleep,mp) \ + pagebuf_get_empty((mp)->m_ddev_targp) +#define XFS_ngetrbuf(len,mp) \ + pagebuf_get_no_daddr(len,(mp)->m_ddev_targp) +#define XFS_freerbuf(bp) pagebuf_free(bp) +#define XFS_nfreerbuf(bp) pagebuf_free(bp) + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_buf_item.c linux.21rc5-ac2/fs/xfs/xfs_buf_item.c --- linux.21rc5/fs/xfs/xfs_buf_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_buf_item.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,1218 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains the implementation of the xfs_buf_log_item. + * It contains the item operations used to manipulate the buf log + * items as well as utility routines used by the buffer specific + * transaction routines. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_rw.h" +#include "xfs_bit.h" +#include "xfs_error.h" + + +#define ROUNDUPNBWORD(x) (((x) + (NBWORD - 1)) & ~(NBWORD - 1)) + +kmem_zone_t *xfs_buf_item_zone; + +#ifdef XFS_TRANS_DEBUG +/* + * This function uses an alternate strategy for tracking the bytes + * that the user requests to be logged. This can then be used + * in conjunction with the bli_orig array in the buf log item to + * catch bugs in our callers' code. + * + * We also double check the bits set in xfs_buf_item_log using a + * simple algorithm to check that every byte is accounted for. + */ +STATIC void +xfs_buf_item_log_debug( + xfs_buf_log_item_t *bip, + uint first, + uint last) +{ + uint x; + uint byte; + uint nbytes; + uint chunk_num; + uint word_num; + uint bit_num; + uint bit_set; + uint *wordp; + + ASSERT(bip->bli_logged != NULL); + byte = first; + nbytes = last - first + 1; + bfset(bip->bli_logged, first, nbytes); + for (x = 0; x < nbytes; x++) { + chunk_num = byte >> XFS_BLI_SHIFT; + word_num = chunk_num >> BIT_TO_WORD_SHIFT; + bit_num = chunk_num & (NBWORD - 1); + wordp = &(bip->bli_format.blf_data_map[word_num]); + bit_set = *wordp & (1 << bit_num); + ASSERT(bit_set); + byte++; + } +} + +/* + * This function is called when we flush something into a buffer without + * logging it. This happens for things like inodes which are logged + * separately from the buffer. + */ +void +xfs_buf_item_flush_log_debug( + xfs_buf_t *bp, + uint first, + uint last) +{ + xfs_buf_log_item_t *bip; + uint nbytes; + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) { + return; + } + + ASSERT(bip->bli_logged != NULL); + nbytes = last - first + 1; + bfset(bip->bli_logged, first, nbytes); +} + +/* + * This function is called to verify that our caller's have logged + * all the bytes that they changed. + * + * It does this by comparing the original copy of the buffer stored in + * the buf log item's bli_orig array to the current copy of the buffer + * and ensuring that all bytes which miscompare are set in the bli_logged + * array of the buf log item. + */ +STATIC void +xfs_buf_item_log_check( + xfs_buf_log_item_t *bip) +{ + char *orig; + char *buffer; + int x; + xfs_buf_t *bp; + + ASSERT(bip->bli_orig != NULL); + ASSERT(bip->bli_logged != NULL); + + bp = bip->bli_buf; + ASSERT(XFS_BUF_COUNT(bp) > 0); + ASSERT(XFS_BUF_PTR(bp) != NULL); + orig = bip->bli_orig; + buffer = XFS_BUF_PTR(bp); + for (x = 0; x < XFS_BUF_COUNT(bp); x++) { + if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) + cmn_err(CE_PANIC, + "xfs_buf_item_log_check bip %x buffer %x orig %x index %d", + bip, bp, orig, x); + } +} +#else +#define xfs_buf_item_log_debug(x,y,z) +#define xfs_buf_item_log_check(x) +#endif + +STATIC void xfs_buf_error_relse(xfs_buf_t *bp); + +/* + * This returns the number of log iovecs needed to log the + * given buf log item. + * + * It calculates this as 1 iovec for the buf log format structure + * and 1 for each stretch of non-contiguous chunks to be logged. + * Contiguous chunks are logged in a single iovec. + * + * If the XFS_BLI_STALE flag has been set, then log nothing. + */ +uint +xfs_buf_item_size( + xfs_buf_log_item_t *bip) +{ + uint nvecs; + int next_bit; + int last_bit; + xfs_buf_t *bp; + + ASSERT(atomic_read(&bip->bli_refcount) > 0); + if (bip->bli_flags & XFS_BLI_STALE) { + /* + * The buffer is stale, so all we need to log + * is the buf log format structure with the + * cancel flag in it. + */ + xfs_buf_item_trace("SIZE STALE", bip); + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + return 1; + } + + bp = bip->bli_buf; + ASSERT(bip->bli_flags & XFS_BLI_LOGGED); + nvecs = 1; + last_bit = xfs_next_bit(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, 0); + ASSERT(last_bit != -1); + nvecs++; + while (last_bit != -1) { + /* + * This takes the bit number to start looking from and + * returns the next set bit from there. It returns -1 + * if there are no more bits set or the start bit is + * beyond the end of the bitmap. + */ + next_bit = xfs_next_bit(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, + last_bit + 1); + /* + * If we run out of bits, leave the loop, + * else if we find a new set of bits bump the number of vecs, + * else keep scanning the current set of bits. + */ + if (next_bit == -1) { + last_bit = -1; + } else if (next_bit != last_bit + 1) { + last_bit = next_bit; + nvecs++; + } else if (xfs_buf_offset(bp, next_bit * XFS_BLI_CHUNK) != + (xfs_buf_offset(bp, last_bit * XFS_BLI_CHUNK) + + XFS_BLI_CHUNK)) { + last_bit = next_bit; + nvecs++; + } else { + last_bit++; + } + } + + xfs_buf_item_trace("SIZE NORM", bip); + return nvecs; +} + +/* + * This is called to fill in the vector of log iovecs for the + * given log buf item. It fills the first entry with a buf log + * format structure, and the rest point to contiguous chunks + * within the buffer. + */ +void +xfs_buf_item_format( + xfs_buf_log_item_t *bip, + xfs_log_iovec_t *log_vector) +{ + uint base_size; + uint nvecs; + xfs_log_iovec_t *vecp; + xfs_buf_t *bp; + int first_bit; + int last_bit; + int next_bit; + uint nbits; + uint buffer_offset; + + ASSERT(atomic_read(&bip->bli_refcount) > 0); + ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || + (bip->bli_flags & XFS_BLI_STALE)); + bp = bip->bli_buf; + ASSERT(XFS_BUF_BP_ISMAPPED(bp)); + vecp = log_vector; + + /* + * The size of the base structure is the size of the + * declared structure plus the space for the extra words + * of the bitmap. We subtract one from the map size, because + * the first element of the bitmap is accounted for in the + * size of the base structure. + */ + base_size = + (uint)(sizeof(xfs_buf_log_format_t) + + ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); + vecp->i_addr = (xfs_caddr_t)&bip->bli_format; + vecp->i_len = base_size; + vecp++; + nvecs = 1; + + if (bip->bli_flags & XFS_BLI_STALE) { + /* + * The buffer is stale, so all we need to log + * is the buf log format structure with the + * cancel flag in it. + */ + xfs_buf_item_trace("FORMAT STALE", bip); + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + bip->bli_format.blf_size = nvecs; + return; + } + + /* + * Fill in an iovec for each set of contiguous chunks. + */ + first_bit = xfs_next_bit(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, 0); + ASSERT(first_bit != -1); + last_bit = first_bit; + nbits = 1; + for (;;) { + /* + * This takes the bit number to start looking from and + * returns the next set bit from there. It returns -1 + * if there are no more bits set or the start bit is + * beyond the end of the bitmap. + */ + next_bit = xfs_next_bit(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, + (uint)last_bit + 1); + /* + * If we run out of bits fill in the last iovec and get + * out of the loop. + * Else if we start a new set of bits then fill in the + * iovec for the series we were looking at and start + * counting the bits in the new one. + * Else we're still in the same set of bits so just + * keep counting and scanning. + */ + if (next_bit == -1) { + buffer_offset = first_bit * XFS_BLI_CHUNK; + vecp->i_addr = xfs_buf_offset(bp, buffer_offset); + vecp->i_len = nbits * XFS_BLI_CHUNK; + nvecs++; + break; + } else if (next_bit != last_bit + 1) { + buffer_offset = first_bit * XFS_BLI_CHUNK; + vecp->i_addr = xfs_buf_offset(bp, buffer_offset); + vecp->i_len = nbits * XFS_BLI_CHUNK; + nvecs++; + vecp++; + first_bit = next_bit; + last_bit = next_bit; + nbits = 1; + } else if (xfs_buf_offset(bp, next_bit << XFS_BLI_SHIFT) != + (xfs_buf_offset(bp, last_bit << XFS_BLI_SHIFT) + + XFS_BLI_CHUNK)) { + buffer_offset = first_bit * XFS_BLI_CHUNK; + vecp->i_addr = xfs_buf_offset(bp, buffer_offset); + vecp->i_len = nbits * XFS_BLI_CHUNK; +/* You would think we need to bump the nvecs here too, but we do not + * this number is used by recovery, and it gets confused by the boundary + * split here + * nvecs++; + */ + vecp++; + first_bit = next_bit; + last_bit = next_bit; + nbits = 1; + } else { + last_bit++; + nbits++; + } + } + bip->bli_format.blf_size = nvecs; + + /* + * Check to make sure everything is consistent. + */ + xfs_buf_item_trace("FORMAT NORM", bip); + xfs_buf_item_log_check(bip); +} + +/* + * This is called to pin the buffer associated with the buf log + * item in memory so it cannot be written out. Simply call bpin() + * on the buffer to do this. + */ +void +xfs_buf_item_pin( + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + + bp = bip->bli_buf; + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || + (bip->bli_flags & XFS_BLI_STALE)); + xfs_buf_item_trace("PIN", bip); + xfs_buftrace("XFS_PIN", bp); + xfs_bpin(bp); +} + + +/* + * This is called to unpin the buffer associated with the buf log + * item which was previously pinned with a call to xfs_buf_item_pin(). + * Just call bunpin() on the buffer to do this. + * + * Also drop the reference to the buf item for the current transaction. + * If the XFS_BLI_STALE flag is set and we are the last reference, + * then free up the buf log item and unlock the buffer. + */ +void +xfs_buf_item_unpin( + xfs_buf_log_item_t *bip, + int stale) +{ + xfs_mount_t *mp; + xfs_buf_t *bp; + int freed; + SPLDECL(s); + + bp = bip->bli_buf; + ASSERT(bp != NULL); + ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + xfs_buf_item_trace("UNPIN", bip); + xfs_buftrace("XFS_UNPIN", bp); + + freed = atomic_dec_and_test(&bip->bli_refcount); + mp = bip->bli_item.li_mountp; + xfs_bunpin(bp); + if (freed && stale) { + ASSERT(bip->bli_flags & XFS_BLI_STALE); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); + ASSERT(XFS_BUF_ISSTALE(bp)); +/** + ASSERT(bp->b_pincount == 0); +**/ + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + xfs_buf_item_trace("UNPIN STALE", bip); + xfs_buftrace("XFS_UNPIN STALE", bp); + AIL_LOCK(mp,s); + /* + * If we get called here because of an IO error, we may + * or may not have the item on the AIL. xfs_trans_delete_ail() + * will take care of that situation. + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); + xfs_buf_item_relse(bp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); + xfs_buf_relse(bp); + } +} + +/* + * this is called from uncommit in the forced-shutdown path. + * we need to check to see if the reference count on the log item + * is going to drop to zero. If so, unpin will free the log item + * so we need to free the item's descriptor (that points to the item) + * in the transaction. + */ +void +xfs_buf_item_unpin_remove( + xfs_buf_log_item_t *bip, + xfs_trans_t *tp) +{ + xfs_buf_t *bp; + xfs_log_item_desc_t *lidp; + int stale = 0; + + bp = bip->bli_buf; + /* + * will xfs_buf_item_unpin() call xfs_buf_item_relse()? + */ + if ((atomic_read(&bip->bli_refcount) == 1) && + (bip->bli_flags & XFS_BLI_STALE)) { + ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0); + xfs_buf_item_trace("UNPIN REMOVE", bip); + xfs_buftrace("XFS_UNPIN_REMOVE", bp); + /* + * yes -- clear the xaction descriptor in-use flag + * and free the chunk if required. We can safely + * do some work here and then call buf_item_unpin + * to do the rest because if the if is true, then + * we are holding the buffer locked so no one else + * will be able to bump up the refcount. + */ + lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) bip); + stale = lidp->lid_flags & XFS_LID_BUF_STALE; + xfs_trans_free_item(tp, lidp); + /* + * Since the transaction no longer refers to the buffer, + * the buffer should no longer refer to the transaction. + */ + XFS_BUF_SET_FSPRIVATE2(bp, NULL); + } + + xfs_buf_item_unpin(bip, stale); + + return; +} + +/* + * This is called to attempt to lock the buffer associated with this + * buf log item. Don't sleep on the buffer lock. If we can't get + * the lock right away, return 0. If we can get the lock, pull the + * buffer from the free list, mark it busy, and return 1. + */ +uint +xfs_buf_item_trylock( + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + + bp = bip->bli_buf; + + if (XFS_BUF_ISPINNED(bp)) { + return XFS_ITEM_PINNED; + } + + if (!XFS_BUF_CPSEMA(bp)) { + return XFS_ITEM_LOCKED; + } + + /* + * Remove the buffer from the free list. Only do this + * if it's on the free list. Private buffers like the + * superblock buffer are not. + */ + XFS_BUF_HOLD(bp); + + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + xfs_buf_item_trace("TRYLOCK SUCCESS", bip); + return XFS_ITEM_SUCCESS; +} + +/* + * Release the buffer associated with the buf log item. + * If there is no dirty logged data associated with the + * buffer recorded in the buf log item, then free the + * buf log item and remove the reference to it in the + * buffer. + * + * This call ignores the recursion count. It is only called + * when the buffer should REALLY be unlocked, regardless + * of the recursion count. + * + * If the XFS_BLI_HOLD flag is set in the buf log item, then + * free the log item if necessary but do not unlock the buffer. + * This is for support of xfs_trans_bhold(). Make sure the + * XFS_BLI_HOLD field is cleared if we don't free the item. + */ +void +xfs_buf_item_unlock( + xfs_buf_log_item_t *bip) +{ + int aborted; + xfs_buf_t *bp; + uint hold; + + bp = bip->bli_buf; + xfs_buftrace("XFS_UNLOCK", bp); + + /* + * Clear the buffer's association with this transaction. + */ + XFS_BUF_SET_FSPRIVATE2(bp, NULL); + + /* + * If this is a transaction abort, don't return early. + * Instead, allow the brelse to happen. + * Normally it would be done for stale (cancelled) buffers + * at unpin time, but we'll never go through the pin/unpin + * cycle if we abort inside commit. + */ + aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0; + + /* + * If the buf item is marked stale, then don't do anything. + * We'll unlock the buffer and free the buf item when the + * buffer is unpinned for the last time. + */ + if (bip->bli_flags & XFS_BLI_STALE) { + bip->bli_flags &= ~XFS_BLI_LOGGED; + xfs_buf_item_trace("UNLOCK STALE", bip); + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + if (!aborted) + return; + } + + /* + * Drop the transaction's reference to the log item if + * it was not logged as part of the transaction. Otherwise + * we'll drop the reference in xfs_buf_item_unpin() when + * the transaction is really through with the buffer. + */ + if (!(bip->bli_flags & XFS_BLI_LOGGED)) { + atomic_dec(&bip->bli_refcount); + } else { + /* + * Clear the logged flag since this is per + * transaction state. + */ + bip->bli_flags &= ~XFS_BLI_LOGGED; + } + + /* + * Before possibly freeing the buf item, determine if we should + * release the buffer at the end of this routine. + */ + hold = bip->bli_flags & XFS_BLI_HOLD; + xfs_buf_item_trace("UNLOCK", bip); + + /* + * If the buf item isn't tracking any data, free it. + * Otherwise, if XFS_BLI_HOLD is set clear it. + */ + if (xfs_count_bits(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, 0) == 0) { + xfs_buf_item_relse(bp); + } else if (hold) { + bip->bli_flags &= ~XFS_BLI_HOLD; + } + + /* + * Release the buffer if XFS_BLI_HOLD was not set. + */ + if (!hold) { + xfs_buf_relse(bp); + } +} + +/* + * This is called to find out where the oldest active copy of the + * buf log item in the on disk log resides now that the last log + * write of it completed at the given lsn. + * We always re-log all the dirty data in a buffer, so usually the + * latest copy in the on disk log is the only one that matters. For + * those cases we simply return the given lsn. + * + * The one exception to this is for buffers full of newly allocated + * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF + * flag set, indicating that only the di_next_unlinked fields from the + * inodes in the buffers will be replayed during recovery. If the + * original newly allocated inode images have not yet been flushed + * when the buffer is so relogged, then we need to make sure that we + * keep the old images in the 'active' portion of the log. We do this + * by returning the original lsn of that transaction here rather than + * the current one. + */ +xfs_lsn_t +xfs_buf_item_committed( + xfs_buf_log_item_t *bip, + xfs_lsn_t lsn) +{ + xfs_buf_item_trace("COMMITTED", bip); + if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && + (bip->bli_item.li_lsn != 0)) { + return bip->bli_item.li_lsn; + } + return (lsn); +} + +/* + * This is called when the transaction holding the buffer is aborted. + * Just behave as if the transaction had been cancelled. If we're shutting down + * and have aborted this transaction, we'll trap this buffer when it tries to + * get written out. + */ +void +xfs_buf_item_abort( + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + + bp = bip->bli_buf; + xfs_buftrace("XFS_ABORT", bp); + XFS_BUF_SUPER_STALE(bp); + xfs_buf_item_unlock(bip); + return; +} + +/* + * This is called to asynchronously write the buffer associated with this + * buf log item out to disk. The buffer will already have been locked by + * a successful call to xfs_buf_item_trylock(). If the buffer still has + * B_DELWRI set, then get it going out to disk with a call to bawrite(). + * If not, then just release the buffer. + */ +void +xfs_buf_item_push( + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + xfs_buf_item_trace("PUSH", bip); + + bp = bip->bli_buf; + + if (XFS_BUF_ISDELAYWRITE(bp)) { + xfs_bawrite(bip->bli_item.li_mountp, bp); + } else { + xfs_buf_relse(bp); + } +} + +/* ARGSUSED */ +void +xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn) +{ +} + +/* + * This is the ops vector shared by all buf log items. + */ +struct xfs_item_ops xfs_buf_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_buf_item_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_buf_item_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *)) + xfs_buf_item_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_buf_item_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_buf_item_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_buf_item_committing +}; + + +/* + * Allocate a new buf log item to go with the given buffer. + * Set the buffer's b_fsprivate field to point to the new + * buf log item. If there are other item's attached to the + * buffer (see xfs_buf_attach_iodone() below), then put the + * buf log item at the front. + */ +void +xfs_buf_item_init( + xfs_buf_t *bp, + xfs_mount_t *mp) +{ + xfs_log_item_t *lip; + xfs_buf_log_item_t *bip; + int chunks; + int map_size; + + /* + * Check to see if there is already a buf log item for + * this buffer. If there is, it is guaranteed to be + * the first. If we do already have one, there is + * nothing to do here so return. + */ + if (XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *) != mp) + XFS_BUF_SET_FSPRIVATE3(bp, mp); + XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); + if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { + lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + if (lip->li_type == XFS_LI_BUF) { + return; + } + } + + /* + * chunks is the number of XFS_BLI_CHUNK size pieces + * the buffer can be divided into. Make sure not to + * truncate any pieces. map_size is the size of the + * bitmap needed to describe the chunks of the buffer. + */ + chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLI_CHUNK - 1)) >> XFS_BLI_SHIFT); + map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT); + + bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone, + KM_SLEEP); + bip->bli_item.li_type = XFS_LI_BUF; + bip->bli_item.li_ops = &xfs_buf_item_ops; + bip->bli_item.li_mountp = mp; + bip->bli_buf = bp; + bip->bli_format.blf_type = XFS_LI_BUF; + bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); + bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); + bip->bli_format.blf_map_size = map_size; +#ifdef XFS_BLI_TRACE + bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP); +#endif + +#ifdef XFS_TRANS_DEBUG + /* + * Allocate the arrays for tracking what needs to be logged + * and what our callers request to be logged. bli_orig + * holds a copy of the original, clean buffer for comparison + * against, and bli_logged keeps a 1 bit flag per byte in + * the buffer to indicate which bytes the callers have asked + * to have logged. + */ + bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP); + memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp)); + bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP); +#endif + + /* + * Put the buf item into the list of items attached to the + * buffer at the front. + */ + if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { + bip->bli_item.li_bio_list = + XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + } + XFS_BUF_SET_FSPRIVATE(bp, bip); +} + + +/* + * Mark bytes first through last inclusive as dirty in the buf + * item's bitmap. + */ +void +xfs_buf_item_log( + xfs_buf_log_item_t *bip, + uint first, + uint last) +{ + uint first_bit; + uint last_bit; + uint bits_to_set; + uint bits_set; + uint word_num; + uint *wordp; + uint bit; + uint end_bit; + uint mask; + + /* + * Mark the item as having some dirty data for + * quick reference in xfs_buf_item_dirty. + */ + bip->bli_flags |= XFS_BLI_DIRTY; + + /* + * Convert byte offsets to bit numbers. + */ + first_bit = first >> XFS_BLI_SHIFT; + last_bit = last >> XFS_BLI_SHIFT; + + /* + * Calculate the total number of bits to be set. + */ + bits_to_set = last_bit - first_bit + 1; + + /* + * Get a pointer to the first word in the bitmap + * to set a bit in. + */ + word_num = first_bit >> BIT_TO_WORD_SHIFT; + wordp = &(bip->bli_format.blf_data_map[word_num]); + + /* + * Calculate the starting bit in the first word. + */ + bit = first_bit & (uint)(NBWORD - 1); + + /* + * First set any bits in the first word of our range. + * If it starts at bit 0 of the word, it will be + * set below rather than here. That is what the variable + * bit tells us. The variable bits_set tracks the number + * of bits that have been set so far. End_bit is the number + * of the last bit to be set in this word plus one. + */ + if (bit) { + end_bit = MIN(bit + bits_to_set, (uint)NBWORD); + mask = ((1 << (end_bit - bit)) - 1) << bit; + *wordp |= mask; + wordp++; + bits_set = end_bit - bit; + } else { + bits_set = 0; + } + + /* + * Now set bits a whole word at a time that are between + * first_bit and last_bit. + */ + while ((bits_to_set - bits_set) >= NBWORD) { + *wordp |= 0xffffffff; + bits_set += NBWORD; + wordp++; + } + + /* + * Finally, set any bits left to be set in one last partial word. + */ + end_bit = bits_to_set - bits_set; + if (end_bit) { + mask = (1 << end_bit) - 1; + *wordp |= mask; + } + + xfs_buf_item_log_debug(bip, first, last); +} + + +/* + * Return 1 if the buffer has some data that has been logged (at any + * point, not just the current transaction) and 0 if not. + */ +uint +xfs_buf_item_dirty( + xfs_buf_log_item_t *bip) +{ + return (bip->bli_flags & XFS_BLI_DIRTY); +} + +/* + * This is called when the buf log item is no longer needed. It should + * free the buf log item associated with the given buffer and clear + * the buffer's pointer to the buf log item. If there are no more + * items in the list, clear the b_iodone field of the buffer (see + * xfs_buf_attach_iodone() below). + */ +void +xfs_buf_item_relse( + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + xfs_buftrace("XFS_RELSE", bp); + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); + if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && + (XFS_BUF_IODONE_FUNC(bp) != NULL)) { + ASSERT((XFS_BUF_ISUNINITIAL(bp)) == 0); + XFS_BUF_CLR_IODONE_FUNC(bp); + } + +#ifdef XFS_TRANS_DEBUG + kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); + bip->bli_orig = NULL; + kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY); + bip->bli_logged = NULL; +#endif /* XFS_TRANS_DEBUG */ + +#ifdef XFS_BLI_TRACE + ktrace_free(bip->bli_trace); +#endif + kmem_zone_free(xfs_buf_item_zone, bip); +} + + +/* + * Add the given log item with its callback to the list of callbacks + * to be called when the buffer's I/O completes. If it is not set + * already, set the buffer's b_iodone() routine to be + * xfs_buf_iodone_callbacks() and link the log item into the list of + * items rooted at b_fsprivate. Items are always added as the second + * entry in the list if there is a first, because the buf item code + * assumes that the buf log item is first. + */ +void +xfs_buf_attach_iodone( + xfs_buf_t *bp, + void (*cb)(xfs_buf_t *, xfs_log_item_t *), + xfs_log_item_t *lip) +{ + xfs_log_item_t *head_lip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + + lip->li_cb = cb; + if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { + head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + lip->li_bio_list = head_lip->li_bio_list; + head_lip->li_bio_list = lip; + } else { + XFS_BUF_SET_FSPRIVATE(bp, lip); + } + + ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) || + (XFS_BUF_IODONE_FUNC(bp) == NULL)); + XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); +} + +STATIC void +xfs_buf_do_callbacks( + xfs_buf_t *bp, + xfs_log_item_t *lip) +{ + xfs_log_item_t *nlip; + + while (lip != NULL) { + nlip = lip->li_bio_list; + ASSERT(lip->li_cb != NULL); + /* + * Clear the next pointer so we don't have any + * confusion if the item is added to another buf. + * Don't touch the log item after calling its + * callback, because it could have freed itself. + */ + lip->li_bio_list = NULL; + lip->li_cb(bp, lip); + lip = nlip; + } +} + +/* + * This is the iodone() function for buffers which have had callbacks + * attached to them by xfs_buf_attach_iodone(). It should remove each + * log item from the buffer's list and call the callback of each in turn. + * When done, the buffer's fsprivate field is set to NULL and the buffer + * is unlocked with a call to iodone(). + */ +void +xfs_buf_iodone_callbacks( + xfs_buf_t *bp) +{ + xfs_log_item_t *lip; + static ulong lasttime; + static dev_t lastdev; + xfs_mount_t *mp; + + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + + if (XFS_BUF_GETERROR(bp) != 0) { + /* + * If we've already decided to shutdown the filesystem + * because of IO errors, there's no point in giving this + * a retry. + */ + mp = lip->li_mountp; + if (XFS_FORCED_SHUTDOWN(mp)) { + ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); + XFS_BUF_SUPER_STALE(bp); + xfs_buftrace("BUF_IODONE_CB", bp); + xfs_buf_do_callbacks(bp, lip); + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + + /* + * XFS_SHUT flag gets set when we go thru the + * entire buffer cache and deliberately start + * throwing away delayed write buffers. + * Since there's no biowait done on those, + * we should just brelse them. + */ + if (XFS_BUF_ISSHUT(bp)) { + XFS_BUF_UNSHUT(bp); + xfs_buf_relse(bp); + } else { + xfs_biodone(bp); + } + + return; + } + + if ((XFS_BUF_TARGET_DEV(bp) != lastdev) || + (time_after(jiffies, (lasttime + 5*HZ)))) { + lasttime = jiffies; + prdev("XFS write error in file system meta-data " + "block 0x%Lx in %s", + XFS_BUF_TARGET_DEV(bp), + XFS_BUF_ADDR(bp), mp->m_fsname); + } + lastdev = XFS_BUF_TARGET_DEV(bp); + + if (XFS_BUF_ISASYNC(bp)) { + /* + * If the write was asynchronous then noone will be + * looking for the error. Clear the error state + * and write the buffer out again delayed write. + * + * XXXsup This is OK, so long as we catch these + * before we start the umount; we don't want these + * DELWRI metadata bufs to be hanging around. + */ + XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */ + + if (!(XFS_BUF_ISSTALE(bp))) { + XFS_BUF_DELAYWRITE(bp); + XFS_BUF_DONE(bp); + XFS_BUF_SET_START(bp); + } + ASSERT(XFS_BUF_IODONE_FUNC(bp)); + xfs_buftrace("BUF_IODONE ASYNC", bp); + xfs_buf_relse(bp); + } else { + /* + * If the write of the buffer was not asynchronous, + * then we want to make sure to return the error + * to the caller of bwrite(). Because of this we + * cannot clear the B_ERROR state at this point. + * Instead we install a callback function that + * will be called when the buffer is released, and + * that routine will clear the error state and + * set the buffer to be written out again after + * some delay. + */ + /* We actually overwrite the existing b-relse + function at times, but we're gonna be shutting down + anyway. */ + XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse); + XFS_BUF_DONE(bp); + XFS_BUF_V_IODONESEMA(bp); + } + return; + } +#ifdef XFSERRORDEBUG + xfs_buftrace("XFS BUFCB NOERR", bp); +#endif + xfs_buf_do_callbacks(bp, lip); + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + xfs_biodone(bp); +} + +/* + * This is a callback routine attached to a buffer which gets an error + * when being written out synchronously. + */ +STATIC void +xfs_buf_error_relse( + xfs_buf_t *bp) +{ + xfs_log_item_t *lip; + xfs_mount_t *mp; + + lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + mp = (xfs_mount_t *)lip->li_mountp; + ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); + + XFS_BUF_STALE(bp); + XFS_BUF_DONE(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_ERROR(bp,0); + xfs_buftrace("BUF_ERROR_RELSE", bp); + if (! XFS_FORCED_SHUTDOWN(mp)) + xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); + /* + * We have to unpin the pinned buffers so do the + * callbacks. + */ + xfs_buf_do_callbacks(bp, lip); + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + XFS_BUF_SET_BRELSE_FUNC(bp,NULL); + xfs_buf_relse(bp); +} + + +/* + * This is the iodone() function for buffers which have been + * logged. It is called when they are eventually flushed out. + * It should remove the buf item from the AIL, and free the buf item. + * It is called by xfs_buf_iodone_callbacks() above which will take + * care of cleaning up the buffer itself. + */ +/* ARGSUSED */ +void +xfs_buf_iodone( + xfs_buf_t *bp, + xfs_buf_log_item_t *bip) +{ + struct xfs_mount *mp; + SPLDECL(s); + + ASSERT(bip->bli_buf == bp); + + mp = bip->bli_item.li_mountp; + + /* + * If we are forcibly shutting down, this may well be + * off the AIL already. That's because we simulate the + * log-committed callbacks to unpin these buffers. Or we may never + * have put this item on AIL because of the transaction was + * aborted forcibly. xfs_trans_delete_ail() takes care of these. + * + * Either way, AIL is useless if we're forcing a shutdown. + */ + AIL_LOCK(mp,s); + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); + +#ifdef XFS_TRANS_DEBUG + kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); + bip->bli_orig = NULL; + kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY); + bip->bli_logged = NULL; +#endif /* XFS_TRANS_DEBUG */ + +#ifdef XFS_BLI_TRACE + ktrace_free(bip->bli_trace); +#endif + kmem_zone_free(xfs_buf_item_zone, bip); +} + +#if defined(XFS_BLI_TRACE) +void +xfs_buf_item_trace( + char *id, + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + ASSERT(bip->bli_trace != NULL); + + bp = bip->bli_buf; + ktrace_enter(bip->bli_trace, + (void *)id, + (void *)bip->bli_buf, + (void *)((unsigned long)bip->bli_flags), + (void *)((unsigned long)bip->bli_recur), + (void *)((unsigned long)atomic_read(&bip->bli_refcount)), + (void *)XFS_BUF_ADDR(bp), + (void *)((unsigned long)XFS_BUF_COUNT(bp)), + (void *)((unsigned long)(0xFFFFFFFF & (XFS_BFLAGS(bp) >> 32))), + (void *)((unsigned long)(0xFFFFFFFF & XFS_BFLAGS(bp))), + XFS_BUF_FSPRIVATE(bp, void *), + XFS_BUF_FSPRIVATE2(bp, void *), + (void *)((unsigned long)bp->b_pincount), + (void *)XFS_BUF_IODONE_FUNC(bp), + (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))), + (void *)bip->bli_item.li_desc, + (void *)((unsigned long)bip->bli_item.li_flags)); +} +#endif /* XFS_BLI_TRACE */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_buf_item.h linux.21rc5-ac2/fs/xfs/xfs_buf_item.h --- linux.21rc5/fs/xfs/xfs_buf_item.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_buf_item.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BUF_ITEM_H__ +#define __XFS_BUF_ITEM_H__ + +/* + * This is the structure used to lay out a buf log item in the + * log. The data map describes which 128 byte chunks of the buffer + * have been logged. This structure works only on buffers that + * reside up to the first TB in the filesystem. These buffers are + * generated only by pre-6.2 systems and are known as XFS_LI_6_1_BUF. + */ +typedef struct xfs_buf_log_format_v1 { + unsigned short blf_type; /* buf log item type indicator */ + unsigned short blf_size; /* size of this item */ + __int32_t blf_blkno; /* starting blkno of this buf */ + ushort blf_flags; /* misc state */ + ushort blf_len; /* number of blocks in this buf */ + unsigned int blf_map_size; /* size of data bitmap in words */ + unsigned int blf_data_map[1];/* variable size bitmap of */ + /* regions of buffer in this item */ +} xfs_buf_log_format_v1_t; + +/* + * This is a form of the above structure with a 64 bit blkno field. + * For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything. + */ +typedef struct xfs_buf_log_format_t { + unsigned short blf_type; /* buf log item type indicator */ + unsigned short blf_size; /* size of this item */ + ushort blf_flags; /* misc state */ + ushort blf_len; /* number of blocks in this buf */ + __int64_t blf_blkno; /* starting blkno of this buf */ + unsigned int blf_map_size; /* size of data bitmap in words */ + unsigned int blf_data_map[1];/* variable size bitmap of */ + /* regions of buffer in this item */ +} xfs_buf_log_format_t; + +/* + * This flag indicates that the buffer contains on disk inodes + * and requires special recovery handling. + */ +#define XFS_BLI_INODE_BUF 0x1 +/* + * This flag indicates that the buffer should not be replayed + * during recovery because its blocks are being freed. + */ +#define XFS_BLI_CANCEL 0x2 +/* + * This flag indicates that the buffer contains on disk + * user or group dquots and may require special recovery handling. + */ +#define XFS_BLI_UDQUOT_BUF 0x4 +/* #define XFS_BLI_PDQUOT_BUF 0x8 */ +#define XFS_BLI_GDQUOT_BUF 0x10 + +#define XFS_BLI_CHUNK 128 +#define XFS_BLI_SHIFT 7 +#define BIT_TO_WORD_SHIFT 5 +#define NBWORD (NBBY * sizeof(unsigned int)) + +/* + * buf log item flags + */ +#define XFS_BLI_HOLD 0x01 +#define XFS_BLI_DIRTY 0x02 +#define XFS_BLI_STALE 0x04 +#define XFS_BLI_LOGGED 0x08 +#define XFS_BLI_INODE_ALLOC_BUF 0x10 + + +#ifdef __KERNEL__ + +struct xfs_buf; +struct ktrace; +struct xfs_mount; + +/* + * This is the in core log item structure used to track information + * needed to log buffers. It tracks how many times the lock has been + * locked, and which 128 byte chunks of the buffer are dirty. + */ +typedef struct xfs_buf_log_item { + xfs_log_item_t bli_item; /* common item structure */ + struct xfs_buf *bli_buf; /* real buffer pointer */ + unsigned int bli_flags; /* misc flags */ + unsigned int bli_recur; /* lock recursion count */ + atomic_t bli_refcount; /* cnt of tp refs */ +#ifdef DEBUG + struct ktrace *bli_trace; /* event trace buf */ +#endif +#ifdef XFS_TRANS_DEBUG + char *bli_orig; /* original buffer copy */ + char *bli_logged; /* bytes logged (bitmap) */ +#endif + xfs_buf_log_format_t bli_format; /* in-log header */ +} xfs_buf_log_item_t; + +/* + * This structure is used during recovery to record the buf log + * items which have been canceled and should not be replayed. + */ +typedef struct xfs_buf_cancel { + xfs_daddr_t bc_blkno; + uint bc_len; + int bc_refcount; + struct xfs_buf_cancel *bc_next; +} xfs_buf_cancel_t; + +#define XFS_BLI_TRACE_SIZE 32 + + +#if defined(XFS_ALL_TRACE) +#define XFS_BLI_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_BLI_TRACE +#endif + +#if defined(XFS_BLI_TRACE) +void xfs_buf_item_trace(char *, xfs_buf_log_item_t *); +#else +#define xfs_buf_item_trace(id, bip) +#endif + +void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *); +void xfs_buf_item_relse(struct xfs_buf *); +void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint); +uint xfs_buf_item_dirty(xfs_buf_log_item_t *); +void xfs_buf_attach_iodone(struct xfs_buf *, + void(*)(struct xfs_buf *, xfs_log_item_t *), + xfs_log_item_t *); +void xfs_buf_iodone_callbacks(struct xfs_buf *); +void xfs_buf_iodone(struct xfs_buf *, xfs_buf_log_item_t *); + +#ifdef XFS_TRANS_DEBUG +void +xfs_buf_item_flush_log_debug( + struct xfs_buf *bp, + uint first, + uint last); +#else +#define xfs_buf_item_flush_log_debug(bp, first, last) +#endif + +#endif /* __KERNEL__ */ + +#endif /* __XFS_BUF_ITEM_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_cap.c linux.21rc5-ac2/fs/xfs/xfs_cap.c --- linux.21rc5/fs/xfs/xfs_cap.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_cap.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +STATIC int xfs_cap_allow_set(vnode_t *); + + +/* + * Test for existence of capability attribute as efficiently as possible. + */ +int +xfs_cap_vhascap( + vnode_t *vp) +{ + int error; + int len = sizeof(xfs_cap_set_t); + int flags = ATTR_KERNOVAL|ATTR_ROOT; + + VOP_ATTR_GET(vp, SGI_CAP_LINUX, NULL, &len, flags, sys_cred, error); + return (error == 0); +} + +/* + * Convert from extended attribute representation to in-memory for XFS. + */ +STATIC int +posix_cap_xattr_to_xfs( + posix_cap_xattr *src, + size_t size, + xfs_cap_set_t *dest) +{ + if (!src || !dest) + return EINVAL; + + if (src->c_version != cpu_to_le32(POSIX_CAP_XATTR_VERSION)) + return EINVAL; + if (src->c_abiversion != cpu_to_le32(_LINUX_CAPABILITY_VERSION)) + return EINVAL; + + if (size < sizeof(posix_cap_xattr)) + return EINVAL; + + ASSERT(sizeof(dest->cap_effective) == sizeof(src->c_effective)); + + dest->cap_effective = src->c_effective; + dest->cap_permitted = src->c_permitted; + dest->cap_inheritable = src->c_inheritable; + + return 0; +} + +/* + * Convert from in-memory XFS to extended attribute representation. + */ +STATIC int +posix_cap_xfs_to_xattr( + xfs_cap_set_t *src, + posix_cap_xattr *xattr_cap, + size_t size) +{ + size_t new_size = posix_cap_xattr_size(); + + if (size < new_size) + return -ERANGE; + + ASSERT(sizeof(xattr_cap->c_effective) == sizeof(src->cap_effective)); + + xattr_cap->c_version = cpu_to_le32(POSIX_CAP_XATTR_VERSION); + xattr_cap->c_abiversion = cpu_to_le32(_LINUX_CAPABILITY_VERSION); + xattr_cap->c_effective = src->cap_effective; + xattr_cap->c_permitted = src->cap_permitted; + xattr_cap->c_inheritable= src->cap_inheritable; + + return new_size; +} + +int +xfs_cap_vget( + vnode_t *vp, + void *cap, + size_t size) +{ + int error; + int len = sizeof(xfs_cap_set_t); + int flags = ATTR_ROOT; + xfs_cap_set_t xfs_cap = { 0 }; + posix_cap_xattr *xattr_cap = cap; + char *data = (char *)&xfs_cap; + + VN_HOLD(vp); + if ((error = _MAC_VACCESS(vp, NULL, VREAD))) + goto out; + + if (!size) { + flags |= ATTR_KERNOVAL; + data = NULL; + } + VOP_ATTR_GET(vp, SGI_CAP_LINUX, data, &len, flags, sys_cred, error); + if (error) + goto out; + ASSERT(len == sizeof(xfs_cap_set_t)); + + error = (size)? -posix_cap_xattr_size() : + -posix_cap_xfs_to_xattr(&xfs_cap, xattr_cap, size); +out: + VN_RELE(vp); + return -error; +} + +int +xfs_cap_vremove( + vnode_t *vp) +{ + int error; + + VN_HOLD(vp); + error = xfs_cap_allow_set(vp); + if (!error) { + VOP_ATTR_REMOVE(vp, SGI_CAP_LINUX, ATTR_ROOT, sys_cred, error); + if (error == ENOATTR) + error = 0; /* 'scool */ + } + VN_RELE(vp); + return -error; +} + +int +xfs_cap_vset( + vnode_t *vp, + void *cap, + size_t size) +{ + posix_cap_xattr *xattr_cap = cap; + xfs_cap_set_t xfs_cap; + int error; + + if (!cap) + return -EINVAL; + + error = posix_cap_xattr_to_xfs(xattr_cap, size, &xfs_cap); + if (error) + return -error; + + VN_HOLD(vp); + error = xfs_cap_allow_set(vp); + if (error) + goto out; + + VOP_ATTR_SET(vp, SGI_CAP_LINUX, (char *)&xfs_cap, + sizeof(xfs_cap_set_t), ATTR_ROOT, sys_cred, error); +out: + VN_RELE(vp); + return -error; +} + +STATIC int +xfs_cap_allow_set( + vnode_t *vp) +{ + vattr_t va; + int error; + + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + return EROFS; + if ((error = _MAC_VACCESS(vp, NULL, VWRITE))) + return error; + va.va_mask = XFS_AT_UID; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return error; + if (va.va_uid != current->fsuid && !capable(CAP_FOWNER)) + return EPERM; + return error; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_cap.h linux.21rc5-ac2/fs/xfs/xfs_cap.h --- linux.21rc5/fs/xfs/xfs_cap.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_cap.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_CAP_H__ +#define __XFS_CAP_H__ + +/* + * Capabilities + */ +typedef __uint64_t xfs_cap_value_t; + +typedef struct xfs_cap_set { + xfs_cap_value_t cap_effective; /* use in capability checks */ + xfs_cap_value_t cap_permitted; /* combined with file attrs */ + xfs_cap_value_t cap_inheritable;/* pass through exec */ +} xfs_cap_set_t; + +/* On-disk XFS extended attribute names */ +#define SGI_CAP_FILE "SGI_CAP_FILE" +#define SGI_CAP_FILE_SIZE (sizeof(SGI_CAP_FILE)-1) +#define SGI_CAP_LINUX "SGI_CAP_LINUX" +#define SGI_CAP_LINUX_SIZE (sizeof(SGI_CAP_LINUX)-1) + +/* + * For Linux, we take the bitfields directly from capability.h + * and no longer attempt to keep this attribute ondisk compatible + * with IRIX. Since this attribute is only set on exectuables, + * it just doesn't make much sense to try. We do use a different + * named attribute though, to avoid confusion. + */ + +#ifdef __KERNEL__ + +#ifdef CONFIG_FS_POSIX_CAP + +#include + +struct vnode; + +extern int xfs_cap_vhascap(struct vnode *); +extern int xfs_cap_vset(struct vnode *, void *, size_t); +extern int xfs_cap_vget(struct vnode *, void *, size_t); +extern int xfs_cap_vremove(struct vnode *vp); + +#define _CAP_EXISTS xfs_cap_vhascap + +#else +#define xfs_cap_vset(v,p,sz) (-EOPNOTSUPP) +#define xfs_cap_vget(v,p,sz) (-EOPNOTSUPP) +#define xfs_cap_vremove(v) (-EOPNOTSUPP) +#define _CAP_EXISTS (NULL) +#endif + +#endif /* __KERNEL__ */ + +#endif /* __XFS_CAP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_clnt.h linux.21rc5-ac2/fs/xfs/xfs_clnt.h --- linux.21rc5/fs/xfs/xfs_clnt.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_clnt.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_CLNT_H__ +#define __XFS_CLNT_H__ + +/* + * XFS arguments structure, constructed from the arguments we + * are passed via the mount system call. + * + * NOTE: The mount system call is handled differently between + * Linux and IRIX. In IRIX we worked work with a binary data + * structure coming in across the syscall interface from user + * space (the mount userspace knows about each filesystem type + * and the set of valid options for it, and converts the users + * argument string into a binary structure _before_ making the + * system call), and the ABI issues that this implies. + * + * In Linux, we are passed a comma separated set of options; + * ie. a NULL terminated string of characters. Userspace mount + * code does not have any knowledge of mount options expected by + * each filesystem type and so each filesystem parses its mount + * options in kernel space. + * + * For the Linux port, we kept this structure pretty much intact + * and use it internally (because the existing code groks it). + */ +struct xfs_mount_args { + int flags; /* flags -> see XFSMNT_... macros below */ + int logbufs; /* Number of log buffers, -1 to default */ + int logbufsize; /* Size of log buffers, -1 to default */ + char fsname[MAXNAMELEN]; /* data device name */ + char rtname[MAXNAMELEN]; /* realtime device filename */ + char logname[MAXNAMELEN]; /* journal device filename */ + char mtpt[MAXNAMELEN]; /* filesystem mount point */ + int sunit; /* stripe unit (BBs) */ + int swidth; /* stripe width (BBs), multiple of sunit */ + uchar_t iosizelog; /* log2 of the preferred I/O size */ +}; + +/* + * XFS mount option flags + */ +#define XFSMNT_CHKLOG 0x00000001 /* check log */ +#define XFSMNT_WSYNC 0x00000002 /* safe mode nfs mount + * compatible */ +#define XFSMNT_INO64 0x00000004 /* move inode numbers up + * past 2^32 */ +#define XFSMNT_UQUOTA 0x00000008 /* user quota accounting */ +#define XFSMNT_PQUOTA 0x00000010 /* IRIX prj quota accounting */ +#define XFSMNT_UQUOTAENF 0x00000020 /* user quota limit + * enforcement */ +#define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit + * enforcement */ +#define XFSMNT_NOATIME 0x00000100 /* don't modify access + * times on reads */ +#define XFSMNT_NOALIGN 0x00000200 /* don't allocate at + * stripe boundaries*/ +#define XFSMNT_RETERR 0x00000400 /* return error to user */ +#define XFSMNT_NORECOVERY 0x00000800 /* no recovery, implies + * read-only mount */ +#define XFSMNT_SHARED 0x00001000 /* shared XFS mount */ +#define XFSMNT_IOSIZE 0x00002000 /* optimize for I/O size */ +#define XFSMNT_OSYNCISOSYNC 0x00004000 /* o_sync is REALLY o_sync */ + /* (osyncisdsync is now default) */ +#define XFSMNT_32BITINODES 0x00200000 /* restrict inodes to 32 + * bits of address space */ +#define XFSMNT_GQUOTA 0x00400000 /* group quota accounting */ +#define XFSMNT_GQUOTAENF 0x00800000 /* group quota limit + * enforcement */ +#define XFSMNT_NOUUID 0x01000000 /* Ignore fs uuid */ +#define XFSMNT_DMAPI 0x02000000 /* enable dmapi/xdsm */ +#define XFSMNT_NOLOGFLUSH 0x04000000 /* Don't flush for log blocks */ + +#endif /* __XFS_CLNT_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_da_btree.c linux.21rc5-ac2/fs/xfs/xfs_da_btree.c --- linux.21rc5/fs/xfs/xfs_da_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_da_btree.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,2658 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_error.h" +#include "xfs_bit.h" + +#if defined(XFSDEBUG) && defined(CONFIG_KDB) +#undef xfs_buftrace +#define xfs_buftrace(A,B) \ + printk(" xfs_buftrace : %s (0x%p)\n", A, B); \ + BUG(); +#endif + +/* + * xfs_da_btree.c + * + * Routines to implement directories as Btrees of hashed names. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Routines used for growing the Btree. + */ +STATIC int xfs_da_root_split(xfs_da_state_t *state, + xfs_da_state_blk_t *existing_root, + xfs_da_state_blk_t *new_child); +STATIC int xfs_da_node_split(xfs_da_state_t *state, + xfs_da_state_blk_t *existing_blk, + xfs_da_state_blk_t *split_blk, + xfs_da_state_blk_t *blk_to_add, + int treelevel, + int *result); +STATIC void xfs_da_node_rebalance(xfs_da_state_t *state, + xfs_da_state_blk_t *node_blk_1, + xfs_da_state_blk_t *node_blk_2); +STATIC void xfs_da_node_add(xfs_da_state_t *state, + xfs_da_state_blk_t *old_node_blk, + xfs_da_state_blk_t *new_node_blk); + +/* + * Routines used for shrinking the Btree. + */ +STATIC int xfs_da_root_join(xfs_da_state_t *state, + xfs_da_state_blk_t *root_blk); +STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval); +STATIC void xfs_da_node_remove(xfs_da_state_t *state, + xfs_da_state_blk_t *drop_blk); +STATIC void xfs_da_node_unbalance(xfs_da_state_t *state, + xfs_da_state_blk_t *src_node_blk, + xfs_da_state_blk_t *dst_node_blk); + +/* + * Utility routines. + */ +STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count); +STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp); +STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra); + + +/*======================================================================== + * Routines used for growing the Btree. + *========================================================================*/ + +/* + * Create the initial contents of an intermediate node. + */ +int +xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, + xfs_dabuf_t **bpp, int whichfork) +{ + xfs_da_intnode_t *node; + xfs_dabuf_t *bp; + int error; + xfs_trans_t *tp; + + tp = args->trans; + error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + node = bp->data; + INT_ZERO(node->hdr.info.forw, ARCH_CONVERT); + INT_ZERO(node->hdr.info.back, ARCH_CONVERT); + INT_SET(node->hdr.info.magic, ARCH_CONVERT, XFS_DA_NODE_MAGIC); + INT_ZERO(node->hdr.info.pad, ARCH_CONVERT); + INT_ZERO(node->hdr.count, ARCH_CONVERT); + INT_SET(node->hdr.level, ARCH_CONVERT, level); + + xfs_da_log_buf(tp, bp, + XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); + + *bpp = bp; + return(0); +} + +/* + * Split a leaf node, rebalance, then possibly split + * intermediate nodes, rebalance, etc. + */ +int /* error */ +xfs_da_split(xfs_da_state_t *state) +{ + xfs_da_state_blk_t *oldblk, *newblk, *addblk; + xfs_da_intnode_t *node; + xfs_dabuf_t *bp; + int max, action, error, i; + + /* + * Walk back up the tree splitting/inserting/adjusting as necessary. + * If we need to insert and there isn't room, split the node, then + * decide which fragment to insert the new block from below into. + * Note that we may split the root this way, but we need more fixup. + */ + max = state->path.active - 1; + ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH)); + ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || + state->path.blk[max].magic == XFS_DIRX_LEAF_MAGIC(state->mp)); + + addblk = &state->path.blk[max]; /* initial dummy value */ + for (i = max; (i >= 0) && addblk; state->path.active--, i--) { + oldblk = &state->path.blk[i]; + newblk = &state->altpath.blk[i]; + + /* + * If a leaf node then + * Allocate a new leaf node, then rebalance across them. + * else if an intermediate node then + * We split on the last layer, must we split the node? + */ + switch (oldblk->magic) { + case XFS_ATTR_LEAF_MAGIC: +#ifndef __KERNEL__ + return(ENOTTY); +#else + error = xfs_attr_leaf_split(state, oldblk, newblk); + if ((error != 0) && (error != ENOSPC)) { + return(error); /* GROT: attr is inconsistent */ + } + if (!error) { + addblk = newblk; + break; + } + /* + * Entry wouldn't fit, split the leaf again. + */ + state->extravalid = 1; + if (state->inleaf) { + state->extraafter = 0; /* before newblk */ + error = xfs_attr_leaf_split(state, oldblk, + &state->extrablk); + } else { + state->extraafter = 1; /* after newblk */ + error = xfs_attr_leaf_split(state, newblk, + &state->extrablk); + } + if (error) + return(error); /* GROT: attr inconsistent */ + addblk = newblk; + break; +#endif + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + error = xfs_dir_leaf_split(state, oldblk, newblk); + if ((error != 0) && (error != ENOSPC)) { + return(error); /* GROT: dir is inconsistent */ + } + if (!error) { + addblk = newblk; + break; + } + /* + * Entry wouldn't fit, split the leaf again. + */ + state->extravalid = 1; + if (state->inleaf) { + state->extraafter = 0; /* before newblk */ + error = xfs_dir_leaf_split(state, oldblk, + &state->extrablk); + if (error) + return(error); /* GROT: dir incon. */ + addblk = newblk; + } else { + state->extraafter = 1; /* after newblk */ + error = xfs_dir_leaf_split(state, newblk, + &state->extrablk); + if (error) + return(error); /* GROT: dir incon. */ + addblk = newblk; + } + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + error = xfs_dir2_leafn_split(state, oldblk, newblk); + if (error) + return error; + addblk = newblk; + break; + case XFS_DA_NODE_MAGIC: + error = xfs_da_node_split(state, oldblk, newblk, addblk, + max - i, &action); + xfs_da_buf_done(addblk->bp); + addblk->bp = NULL; + if (error) + return(error); /* GROT: dir is inconsistent */ + /* + * Record the newly split block for the next time thru? + */ + if (action) + addblk = newblk; + else + addblk = NULL; + break; + } + + /* + * Update the btree to show the new hashval for this child. + */ + xfs_da_fixhashpath(state, &state->path); + /* + * If we won't need this block again, it's getting dropped + * from the active path by the loop control, so we need + * to mark it done now. + */ + if (i > 0 || !addblk) + xfs_da_buf_done(oldblk->bp); + } + if (!addblk) + return(0); + + /* + * Split the root node. + */ + ASSERT(state->path.active == 0); + oldblk = &state->path.blk[0]; + error = xfs_da_root_split(state, oldblk, addblk); + if (error) { + xfs_da_buf_done(oldblk->bp); + xfs_da_buf_done(addblk->bp); + addblk->bp = NULL; + return(error); /* GROT: dir is inconsistent */ + } + + /* + * Update pointers to the node which used to be block 0 and + * just got bumped because of the addition of a new root node. + * There might be three blocks involved if a double split occurred, + * and the original block 0 could be at any position in the list. + */ + + node = oldblk->bp->data; + if (!INT_ISZERO(node->hdr.info.forw, ARCH_CONVERT)) { + if (INT_GET(node->hdr.info.forw, ARCH_CONVERT) == addblk->blkno) { + bp = addblk->bp; + } else { + ASSERT(state->extravalid); + bp = state->extrablk.bp; + } + node = bp->data; + INT_SET(node->hdr.info.back, ARCH_CONVERT, oldblk->blkno); + xfs_da_log_buf(state->args->trans, bp, + XFS_DA_LOGRANGE(node, &node->hdr.info, + sizeof(node->hdr.info))); + } + node = oldblk->bp->data; + if (INT_GET(node->hdr.info.back, ARCH_CONVERT)) { + if (INT_GET(node->hdr.info.back, ARCH_CONVERT) == addblk->blkno) { + bp = addblk->bp; + } else { + ASSERT(state->extravalid); + bp = state->extrablk.bp; + } + node = bp->data; + INT_SET(node->hdr.info.forw, ARCH_CONVERT, oldblk->blkno); + xfs_da_log_buf(state->args->trans, bp, + XFS_DA_LOGRANGE(node, &node->hdr.info, + sizeof(node->hdr.info))); + } + xfs_da_buf_done(oldblk->bp); + xfs_da_buf_done(addblk->bp); + addblk->bp = NULL; + return(0); +} + +/* + * Split the root. We have to create a new root and point to the two + * parts (the split old root) that we just created. Copy block zero to + * the EOF, extending the inode in process. + */ +STATIC int /* error */ +xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2) +{ + xfs_da_intnode_t *node, *oldroot; + xfs_da_args_t *args; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + int error, size; + xfs_inode_t *dp; + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_dir2_leaf_t *leaf; + + /* + * Copy the existing (incorrect) block from the root node position + * to a free space somewhere. + */ + args = state->args; + ASSERT(args != NULL); + error = xfs_da_grow_inode(args, &blkno); + if (error) + return(error); + dp = args->dp; + tp = args->trans; + mp = state->mp; + error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + node = bp->data; + oldroot = blk1->bp->data; + if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + size = (int)((char *)&oldroot->btree[INT_GET(oldroot->hdr.count, ARCH_CONVERT)] - + (char *)oldroot); + } else { + ASSERT(XFS_DIR_IS_V2(mp)); + ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + leaf = (xfs_dir2_leaf_t *)oldroot; + size = (int)((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] - + (char *)leaf); + } + memcpy(node, oldroot, size); + xfs_da_log_buf(tp, bp, 0, size - 1); + xfs_da_buf_done(blk1->bp); + blk1->bp = bp; + blk1->blkno = blkno; + + /* + * Set up the new root node. + */ + error = xfs_da_node_create(args, + args->whichfork == XFS_DATA_FORK && + XFS_DIR_IS_V2(mp) ? mp->m_dirleafblk : 0, + INT_GET(node->hdr.level, ARCH_CONVERT) + 1, &bp, args->whichfork); + if (error) + return(error); + node = bp->data; + INT_SET(node->btree[0].hashval, ARCH_CONVERT, blk1->hashval); + INT_SET(node->btree[0].before, ARCH_CONVERT, blk1->blkno); + INT_SET(node->btree[1].hashval, ARCH_CONVERT, blk2->hashval); + INT_SET(node->btree[1].before, ARCH_CONVERT, blk2->blkno); + INT_SET(node->hdr.count, ARCH_CONVERT, 2); + +#ifdef DEBUG + if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + ASSERT(blk1->blkno >= mp->m_dirleafblk && + blk1->blkno < mp->m_dirfreeblk); + ASSERT(blk2->blkno >= mp->m_dirleafblk && + blk2->blkno < mp->m_dirfreeblk); + } +#endif + + /* Header is already logged by xfs_da_node_create */ + xfs_da_log_buf(tp, bp, + XFS_DA_LOGRANGE(node, node->btree, + sizeof(xfs_da_node_entry_t) * 2)); + xfs_da_buf_done(bp); + + return(0); +} + +/* + * Split the node, rebalance, then add the new entry. + */ +STATIC int /* error */ +xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, + xfs_da_state_blk_t *newblk, + xfs_da_state_blk_t *addblk, + int treelevel, int *result) +{ + xfs_da_intnode_t *node; + xfs_dablk_t blkno; + int newcount, error; + int useextra; + + node = oldblk->bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + + /* + * With V2 the extra block is data or freespace. + */ + useextra = state->extravalid && XFS_DIR_IS_V1(state->mp); + newcount = 1 + useextra; + /* + * Do we have to split the node? + */ + if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > state->node_ents) { + /* + * Allocate a new node, add to the doubly linked chain of + * nodes, then move some of our excess entries into it. + */ + error = xfs_da_grow_inode(state->args, &blkno); + if (error) + return(error); /* GROT: dir is inconsistent */ + + error = xfs_da_node_create(state->args, blkno, treelevel, + &newblk->bp, state->args->whichfork); + if (error) + return(error); /* GROT: dir is inconsistent */ + newblk->blkno = blkno; + newblk->magic = XFS_DA_NODE_MAGIC; + xfs_da_node_rebalance(state, oldblk, newblk); + error = xfs_da_blk_link(state, oldblk, newblk); + if (error) + return(error); + *result = 1; + } else { + *result = 0; + } + + /* + * Insert the new entry(s) into the correct block + * (updating last hashval in the process). + * + * xfs_da_node_add() inserts BEFORE the given index, + * and as a result of using node_lookup_int() we always + * point to a valid entry (not after one), but a split + * operation always results in a new block whose hashvals + * FOLLOW the current block. + * + * If we had double-split op below us, then add the extra block too. + */ + node = oldblk->bp->data; + if (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)) { + oldblk->index++; + xfs_da_node_add(state, oldblk, addblk); + if (useextra) { + if (state->extraafter) + oldblk->index++; + xfs_da_node_add(state, oldblk, &state->extrablk); + state->extravalid = 0; + } + } else { + newblk->index++; + xfs_da_node_add(state, newblk, addblk); + if (useextra) { + if (state->extraafter) + newblk->index++; + xfs_da_node_add(state, newblk, &state->extrablk); + state->extravalid = 0; + } + } + + return(0); +} + +/* + * Balance the btree elements between two intermediate nodes, + * usually one full and one empty. + * + * NOTE: if blk2 is empty, then it will get the upper half of blk1. + */ +STATIC void +xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2) +{ + xfs_da_intnode_t *node1, *node2, *tmpnode; + xfs_da_node_entry_t *btree_s, *btree_d; + int count, tmp; + xfs_trans_t *tp; + + node1 = blk1->bp->data; + node2 = blk2->bp->data; + /* + * Figure out how many entries need to move, and in which direction. + * Swap the nodes around if that makes it simpler. + */ + if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && + ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + tmpnode = node1; + node1 = node2; + node2 = tmpnode; + } + ASSERT(INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + count = (INT_GET(node1->hdr.count, ARCH_CONVERT) - INT_GET(node2->hdr.count, ARCH_CONVERT)) / 2; + if (count == 0) + return; + tp = state->args->trans; + /* + * Two cases: high-to-low and low-to-high. + */ + if (count > 0) { + /* + * Move elements in node2 up to make a hole. + */ + if ((tmp = INT_GET(node2->hdr.count, ARCH_CONVERT)) > 0) { + tmp *= (uint)sizeof(xfs_da_node_entry_t); + btree_s = &node2->btree[0]; + btree_d = &node2->btree[count]; + memmove(btree_d, btree_s, tmp); + } + + /* + * Move the req'd B-tree elements from high in node1 to + * low in node2. + */ + INT_MOD(node2->hdr.count, ARCH_CONVERT, count); + tmp = count * (uint)sizeof(xfs_da_node_entry_t); + btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count]; + btree_d = &node2->btree[0]; + memcpy(btree_d, btree_s, tmp); + INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count)); + + } else { + /* + * Move the req'd B-tree elements from low in node2 to + * high in node1. + */ + count = -count; + tmp = count * (uint)sizeof(xfs_da_node_entry_t); + btree_s = &node2->btree[0]; + btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)]; + memcpy(btree_d, btree_s, tmp); + INT_MOD(node1->hdr.count, ARCH_CONVERT, count); + xfs_da_log_buf(tp, blk1->bp, + XFS_DA_LOGRANGE(node1, btree_d, tmp)); + + /* + * Move elements in node2 down to fill the hole. + */ + tmp = INT_GET(node2->hdr.count, ARCH_CONVERT) - count; + tmp *= (uint)sizeof(xfs_da_node_entry_t); + btree_s = &node2->btree[count]; + btree_d = &node2->btree[0]; + memmove(btree_d, btree_s, tmp); + INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count)); + } + + /* + * Log header of node 1 and all current bits of node 2. + */ + xfs_da_log_buf(tp, blk1->bp, + XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr))); + xfs_da_log_buf(tp, blk2->bp, + XFS_DA_LOGRANGE(node2, &node2->hdr, + sizeof(node2->hdr) + + sizeof(node2->btree[0]) * INT_GET(node2->hdr.count, ARCH_CONVERT))); + + /* + * Record the last hashval from each block for upward propagation. + * (note: don't use the swapped node pointers) + */ + node1 = blk1->bp->data; + node2 = blk2->bp->data; + blk1->hashval = INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk2->hashval = INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + + /* + * Adjust the expected index for insertion. + */ + if (blk1->index >= INT_GET(node1->hdr.count, ARCH_CONVERT)) { + blk2->index = blk1->index - INT_GET(node1->hdr.count, ARCH_CONVERT); + blk1->index = INT_GET(node1->hdr.count, ARCH_CONVERT) + 1; /* make it invalid */ + } +} + +/* + * Add a new entry to an intermediate node. + */ +STATIC void +xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, + xfs_da_state_blk_t *newblk) +{ + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + int tmp; + xfs_mount_t *mp; + + node = oldblk->bp->data; + mp = state->mp; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT((oldblk->index >= 0) && (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT))); + ASSERT(newblk->blkno != 0); + if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) + ASSERT(newblk->blkno >= mp->m_dirleafblk && + newblk->blkno < mp->m_dirfreeblk); + + /* + * We may need to make some room before we insert the new node. + */ + tmp = 0; + btree = &node->btree[ oldblk->index ]; + if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) { + tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree); + memmove(btree + 1, btree, tmp); + } + INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval); + INT_SET(btree->before, ARCH_CONVERT, newblk->blkno); + xfs_da_log_buf(state->args->trans, oldblk->bp, + XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); + INT_MOD(node->hdr.count, ARCH_CONVERT, +1); + xfs_da_log_buf(state->args->trans, oldblk->bp, + XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); + + /* + * Copy the last hash value from the oldblk to propagate upwards. + */ + oldblk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); +} + +/*======================================================================== + * Routines used for shrinking the Btree. + *========================================================================*/ + +/* + * Deallocate an empty leaf node, remove it from its parent, + * possibly deallocating that block, etc... + */ +int +xfs_da_join(xfs_da_state_t *state) +{ + xfs_da_state_blk_t *drop_blk, *save_blk; + int action, error; + + action = 0; + drop_blk = &state->path.blk[ state->path.active-1 ]; + save_blk = &state->altpath.blk[ state->path.active-1 ]; + ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC); + ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC || + drop_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp)); + + /* + * Walk back up the tree joining/deallocating as necessary. + * When we stop dropping blocks, break out. + */ + for ( ; state->path.active >= 2; drop_blk--, save_blk--, + state->path.active--) { + /* + * See if we can combine the block with a neighbor. + * (action == 0) => no options, just leave + * (action == 1) => coalesce, then unlink + * (action == 2) => block empty, unlink it + */ + switch (drop_blk->magic) { + case XFS_ATTR_LEAF_MAGIC: +#ifndef __KERNEL__ + error = ENOTTY; +#else + error = xfs_attr_leaf_toosmall(state, &action); +#endif + if (error) + return(error); + if (action == 0) + return(0); +#ifdef __KERNEL__ + xfs_attr_leaf_unbalance(state, drop_blk, save_blk); +#endif + break; + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + error = xfs_dir_leaf_toosmall(state, &action); + if (error) + return(error); + if (action == 0) + return(0); + xfs_dir_leaf_unbalance(state, drop_blk, save_blk); + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + error = xfs_dir2_leafn_toosmall(state, &action); + if (error) + return error; + if (action == 0) + return 0; + xfs_dir2_leafn_unbalance(state, drop_blk, save_blk); + break; + case XFS_DA_NODE_MAGIC: + /* + * Remove the offending node, fixup hashvals, + * check for a toosmall neighbor. + */ + xfs_da_node_remove(state, drop_blk); + xfs_da_fixhashpath(state, &state->path); + error = xfs_da_node_toosmall(state, &action); + if (error) + return(error); + if (action == 0) + return 0; + xfs_da_node_unbalance(state, drop_blk, save_blk); + break; + } + xfs_da_fixhashpath(state, &state->altpath); + error = xfs_da_blk_unlink(state, drop_blk, save_blk); + xfs_da_state_kill_altpath(state); + if (error) + return(error); + error = xfs_da_shrink_inode(state->args, drop_blk->blkno, + drop_blk->bp); + drop_blk->bp = NULL; + if (error) + return(error); + } + /* + * We joined all the way to the top. If it turns out that + * we only have one entry in the root, make the child block + * the new root. + */ + xfs_da_node_remove(state, drop_blk); + xfs_da_fixhashpath(state, &state->path); + error = xfs_da_root_join(state, &state->path.blk[0]); + return(error); +} + +/* + * We have only one entry in the root. Copy the only remaining child of + * the old root to block 0 as the new root node. + */ +STATIC int +xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) +{ + xfs_da_intnode_t *oldroot; + /* REFERENCED */ + xfs_da_blkinfo_t *blkinfo; + xfs_da_args_t *args; + xfs_dablk_t child; + xfs_dabuf_t *bp; + int error; + + args = state->args; + ASSERT(args != NULL); + ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); + oldroot = root_blk->bp->data; + ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(INT_ISZERO(oldroot->hdr.info.forw, ARCH_CONVERT)); + ASSERT(INT_ISZERO(oldroot->hdr.info.back, ARCH_CONVERT)); + + /* + * If the root has more than one child, then don't do anything. + */ + if (INT_GET(oldroot->hdr.count, ARCH_CONVERT) > 1) + return(0); + + /* + * Read in the (only) child block, then copy those bytes into + * the root block's buffer and free the original child block. + */ + child = INT_GET(oldroot->btree[ 0 ].before, ARCH_CONVERT); + ASSERT(child != 0); + error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + blkinfo = bp->data; + if (INT_GET(oldroot->hdr.level, ARCH_CONVERT) == 1) { + ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || + INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + } else { + ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + } + ASSERT(INT_ISZERO(blkinfo->forw, ARCH_CONVERT)); + ASSERT(INT_ISZERO(blkinfo->back, ARCH_CONVERT)); + memcpy(root_blk->bp->data, bp->data, state->blocksize); + xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1); + error = xfs_da_shrink_inode(args, child, bp); + return(error); +} + +/* + * Check a node block and its neighbors to see if the block should be + * collapsed into one or the other neighbor. Always keep the block + * with the smaller block number. + * If the current block is over 50% full, don't try to join it, return 0. + * If the block is empty, fill in the state structure and return 2. + * If it can be collapsed, fill in the state structure and return 1. + * If nothing can be done, return 0. + */ +STATIC int +xfs_da_node_toosmall(xfs_da_state_t *state, int *action) +{ + xfs_da_intnode_t *node; + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *info; + int count, forward, error, retval, i; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + + /* + * Check for the degenerate case of the block being over 50% full. + * If so, it's not worth even looking to see if we might be able + * to coalesce with a sibling. + */ + blk = &state->path.blk[ state->path.active-1 ]; + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + node = (xfs_da_intnode_t *)info; + count = INT_GET(node->hdr.count, ARCH_CONVERT); + if (count > (state->node_ents >> 1)) { + *action = 0; /* blk over 50%, don't try to join */ + return(0); /* blk over 50%, don't try to join */ + } + + /* + * Check for the degenerate case of the block being empty. + * If the block is empty, we'll simply delete it, no need to + * coalesce it with a sibling block. We choose (aribtrarily) + * to merge with the forward block unless it is NULL. + */ + if (count == 0) { + /* + * Make altpath point to the block we want to keep and + * path point to the block we want to drop (this one). + */ + forward = (!INT_ISZERO(info->forw, ARCH_CONVERT)); + memcpy(&state->altpath, &state->path, sizeof(state->path)); + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 2; + } + return(0); + } + + /* + * Examine each sibling block to see if we can coalesce with + * at least 25% free space to spare. We need to figure out + * whether to merge with the forward or the backward block. + * We prefer coalescing with the lower numbered sibling so as + * to shrink a directory over time. + */ + /* start with smaller blk num */ + forward = (INT_GET(info->forw, ARCH_CONVERT) + < INT_GET(info->back, ARCH_CONVERT)); + for (i = 0; i < 2; forward = !forward, i++) { + if (forward) + blkno = INT_GET(info->forw, ARCH_CONVERT); + else + blkno = INT_GET(info->back, ARCH_CONVERT); + if (blkno == 0) + continue; + error = xfs_da_read_buf(state->args->trans, state->args->dp, + blkno, -1, &bp, state->args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + + node = (xfs_da_intnode_t *)info; + count = state->node_ents; + count -= state->node_ents >> 2; + count -= INT_GET(node->hdr.count, ARCH_CONVERT); + node = bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + count -= INT_GET(node->hdr.count, ARCH_CONVERT); + xfs_da_brelse(state->args->trans, bp); + if (count >= 0) + break; /* fits with at least 25% to spare */ + } + if (i >= 2) { + *action = 0; + return(0); + } + + /* + * Make altpath point to the block we want to keep (the lower + * numbered block) and path point to the block we want to drop. + */ + memcpy(&state->altpath, &state->path, sizeof(state->path)); + if (blkno < blk->blkno) { + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + if (error) { + return(error); + } + if (retval) { + *action = 0; + return(0); + } + } else { + error = xfs_da_path_shift(state, &state->path, forward, + 0, &retval); + if (error) { + return(error); + } + if (retval) { + *action = 0; + return(0); + } + } + *action = 1; + return(0); +} + +/* + * Walk back up the tree adjusting hash values as necessary, + * when we stop making changes, return. + */ +void +xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path) +{ + xfs_da_state_blk_t *blk; + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + xfs_dahash_t lasthash=0; + int level, count; + + level = path->active-1; + blk = &path->blk[ level ]; + switch (blk->magic) { +#ifdef __KERNEL__ + case XFS_ATTR_LEAF_MAGIC: + lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); + if (count == 0) + return; + break; +#endif + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + lasthash = xfs_dir_leaf_lasthash(blk->bp, &count); + if (count == 0) + return; + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count); + if (count == 0) + return; + break; + case XFS_DA_NODE_MAGIC: + lasthash = xfs_da_node_lasthash(blk->bp, &count); + if (count == 0) + return; + break; + } + for (blk--, level--; level >= 0; blk--, level--) { + node = blk->bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + btree = &node->btree[ blk->index ]; + if (INT_GET(btree->hashval, ARCH_CONVERT) == lasthash) + break; + blk->hashval = lasthash; + INT_SET(btree->hashval, ARCH_CONVERT, lasthash); + xfs_da_log_buf(state->args->trans, blk->bp, + XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); + + lasthash = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + } +} + +/* + * Remove an entry from an intermediate node. + */ +STATIC void +xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk) +{ + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + int tmp; + + node = drop_blk->bp->data; + ASSERT(drop_blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)); + ASSERT(drop_blk->index >= 0); + + /* + * Copy over the offending entry, or just zero it out. + */ + btree = &node->btree[drop_blk->index]; + if (drop_blk->index < (INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { + tmp = INT_GET(node->hdr.count, ARCH_CONVERT) - drop_blk->index - 1; + tmp *= (uint)sizeof(xfs_da_node_entry_t); + memmove(btree, btree + 1, tmp); + xfs_da_log_buf(state->args->trans, drop_blk->bp, + XFS_DA_LOGRANGE(node, btree, tmp)); + btree = &node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ]; + } + memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); + xfs_da_log_buf(state->args->trans, drop_blk->bp, + XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); + INT_MOD(node->hdr.count, ARCH_CONVERT, -1); + xfs_da_log_buf(state->args->trans, drop_blk->bp, + XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); + + /* + * Copy the last hash value from the block to propagate upwards. + */ + btree--; + drop_blk->hashval = INT_GET(btree->hashval, ARCH_CONVERT); +} + +/* + * Unbalance the btree elements between two intermediate nodes, + * move all Btree elements from one node into another. + */ +STATIC void +xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk) +{ + xfs_da_intnode_t *drop_node, *save_node; + xfs_da_node_entry_t *btree; + int tmp; + xfs_trans_t *tp; + + drop_node = drop_blk->bp->data; + save_node = save_blk->bp->data; + ASSERT(INT_GET(drop_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(save_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + tp = state->args->trans; + + /* + * If the dying block has lower hashvals, then move all the + * elements in the remaining block up to make a hole. + */ + if ((INT_GET(drop_node->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(save_node->btree[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(drop_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT))) + { + btree = &save_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT) ]; + tmp = INT_GET(save_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); + memmove(btree, &save_node->btree[0], tmp); + btree = &save_node->btree[0]; + xfs_da_log_buf(tp, save_blk->bp, + XFS_DA_LOGRANGE(save_node, btree, + (INT_GET(save_node->hdr.count, ARCH_CONVERT) + INT_GET(drop_node->hdr.count, ARCH_CONVERT)) * + sizeof(xfs_da_node_entry_t))); + } else { + btree = &save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT) ]; + xfs_da_log_buf(tp, save_blk->bp, + XFS_DA_LOGRANGE(save_node, btree, + INT_GET(drop_node->hdr.count, ARCH_CONVERT) * + sizeof(xfs_da_node_entry_t))); + } + + /* + * Move all the B-tree elements from drop_blk to save_blk. + */ + tmp = INT_GET(drop_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); + memcpy(btree, &drop_node->btree[0], tmp); + INT_MOD(save_node->hdr.count, ARCH_CONVERT, INT_GET(drop_node->hdr.count, ARCH_CONVERT)); + + xfs_da_log_buf(tp, save_blk->bp, + XFS_DA_LOGRANGE(save_node, &save_node->hdr, + sizeof(save_node->hdr))); + + /* + * Save the last hashval in the remaining block for upward propagation. + */ + save_blk->hashval = INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); +} + +/*======================================================================== + * Routines used for finding things in the Btree. + *========================================================================*/ + +/* + * Walk down the Btree looking for a particular filename, filling + * in the state structure as we go. + * + * We will set the state structure to point to each of the elements + * in each of the nodes where either the hashval is or should be. + * + * We support duplicate hashval's so for each entry in the current + * node that could contain the desired hashval, descend. This is a + * pruned depth-first tree search. + */ +int /* error */ +xfs_da_node_lookup_int(xfs_da_state_t *state, int *result) +{ + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *curr; + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + xfs_dablk_t blkno; + int probe, span, max, error, retval; + xfs_dahash_t hashval; + xfs_da_args_t *args; + + args = state->args; + /* + * Descend thru the B-tree searching each level for the right + * node to use, until the right hashval is found. + */ + if (args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(state->mp)) + blkno = state->mp->m_dirleafblk; + else + blkno = 0; + for (blk = &state->path.blk[0], state->path.active = 1; + state->path.active <= XFS_DA_NODE_MAXDEPTH; + blk++, state->path.active++) { + /* + * Read the next node down in the tree. + */ + blk->blkno = blkno; + error = xfs_da_read_buf(state->args->trans, state->args->dp, + blkno, -1, &blk->bp, + state->args->whichfork); + if (error) { + blk->blkno = 0; + state->path.active--; + return(error); + } + ASSERT(blk->bp != NULL); + curr = blk->bp->data; + ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || + INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || + INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + + /* + * Search an intermediate node for a match. + */ + blk->magic = INT_GET(curr->magic, ARCH_CONVERT); + if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + node = blk->bp->data; + blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + + /* + * Binary search. (note: small blocks will skip loop) + */ + max = INT_GET(node->hdr.count, ARCH_CONVERT); + probe = span = max / 2; + hashval = state->args->hashval; + for (btree = &node->btree[probe]; span > 4; + btree = &node->btree[probe]) { + span /= 2; + if (INT_GET(btree->hashval, ARCH_CONVERT) < hashval) + probe += span; + else if (INT_GET(btree->hashval, ARCH_CONVERT) > hashval) + probe -= span; + else + break; + } + ASSERT((probe >= 0) && (probe < max)); + ASSERT((span <= 4) || (INT_GET(btree->hashval, ARCH_CONVERT) == hashval)); + + /* + * Since we may have duplicate hashval's, find the first + * matching hashval in the node. + */ + while ((probe > 0) && (INT_GET(btree->hashval, ARCH_CONVERT) >= hashval)) { + btree--; + probe--; + } + while ((probe < max) && (INT_GET(btree->hashval, ARCH_CONVERT) < hashval)) { + btree++; + probe++; + } + + /* + * Pick the right block to descend on. + */ + if (probe == max) { + blk->index = max-1; + blkno = INT_GET(node->btree[ max-1 ].before, ARCH_CONVERT); + } else { + blk->index = probe; + blkno = INT_GET(btree->before, ARCH_CONVERT); + } + } +#ifdef __KERNEL__ + else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); + break; + } +#endif + else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL); + break; + } + else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL); + break; + } + } + + /* + * A leaf block that ends in the hashval that we are interested in + * (final hashval == search hashval) means that the next block may + * contain more entries with the same hashval, shift upward to the + * next leaf and keep searching. + */ + for (;;) { + if (blk->magic == XFS_DIR_LEAF_MAGIC) { + ASSERT(XFS_DIR_IS_V1(state->mp)); + retval = xfs_dir_leaf_lookup_int(blk->bp, state->args, + &blk->index); + } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { + ASSERT(XFS_DIR_IS_V2(state->mp)); + retval = xfs_dir2_leafn_lookup_int(blk->bp, state->args, + &blk->index, state); + } +#ifdef __KERNEL__ + else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { + retval = xfs_attr_leaf_lookup_int(blk->bp, state->args); + blk->index = state->args->index; + state->args->blkno = blk->blkno; + } +#endif + if (((retval == ENOENT) || (retval == ENOATTR)) && + (blk->hashval == state->args->hashval)) { + error = xfs_da_path_shift(state, &state->path, 1, 1, + &retval); + if (error) + return(error); + if (retval == 0) { + continue; + } +#ifdef __KERNEL__ + else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { + /* path_shift() gives ENOENT */ + retval = XFS_ERROR(ENOATTR); + } +#endif + } + break; + } + *result = retval; + return(0); +} + +/*======================================================================== + * Utility routines. + *========================================================================*/ + +/* + * Link a new block into a doubly linked list of blocks (of whatever type). + */ +int /* error */ +xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, + xfs_da_state_blk_t *new_blk) +{ + xfs_da_blkinfo_t *old_info, *new_info, *tmp_info; + xfs_da_args_t *args; + int before=0, error; + xfs_dabuf_t *bp; + + /* + * Set up environment. + */ + args = state->args; + ASSERT(args != NULL); + old_info = old_blk->bp->data; + new_info = new_blk->bp->data; + ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || + old_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || + old_blk->magic == XFS_ATTR_LEAF_MAGIC); + ASSERT(old_blk->magic == INT_GET(old_info->magic, ARCH_CONVERT)); + ASSERT(new_blk->magic == INT_GET(new_info->magic, ARCH_CONVERT)); + ASSERT(old_blk->magic == new_blk->magic); + + switch (old_blk->magic) { +#ifdef __KERNEL__ + case XFS_ATTR_LEAF_MAGIC: + before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); + break; +#endif + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + before = xfs_dir_leaf_order(old_blk->bp, new_blk->bp); + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp); + break; + case XFS_DA_NODE_MAGIC: + before = xfs_da_node_order(old_blk->bp, new_blk->bp); + break; + } + + /* + * Link blocks in appropriate order. + */ + if (before) { + /* + * Link new block in before existing block. + */ + INT_SET(new_info->forw, ARCH_CONVERT, old_blk->blkno); + new_info->back = old_info->back; /* INT_: direct copy */ + if (INT_GET(old_info->back, ARCH_CONVERT)) { + error = xfs_da_read_buf(args->trans, args->dp, + INT_GET(old_info->back, + ARCH_CONVERT), -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + tmp_info = bp->data; + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(old_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == old_blk->blkno); + INT_SET(tmp_info->forw, ARCH_CONVERT, new_blk->blkno); + xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); + xfs_da_buf_done(bp); + } + INT_SET(old_info->back, ARCH_CONVERT, new_blk->blkno); + } else { + /* + * Link new block in after existing block. + */ + new_info->forw = old_info->forw; /* INT_: direct copy */ + INT_SET(new_info->back, ARCH_CONVERT, old_blk->blkno); + if (INT_GET(old_info->forw, ARCH_CONVERT)) { + error = xfs_da_read_buf(args->trans, args->dp, + INT_GET(old_info->forw, ARCH_CONVERT), -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + tmp_info = bp->data; + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) + == INT_GET(old_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) + == old_blk->blkno); + INT_SET(tmp_info->back, ARCH_CONVERT, new_blk->blkno); + xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); + xfs_da_buf_done(bp); + } + INT_SET(old_info->forw, ARCH_CONVERT, new_blk->blkno); + } + + xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); + xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); + return(0); +} + +/* + * Compare two intermediate nodes for "order". + */ +STATIC int +xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp) +{ + xfs_da_intnode_t *node1, *node2; + + node1 = node1_bp->data; + node2 = node2_bp->data; + ASSERT((INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) && + (INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC)); + if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && + ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < + INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + return(1); + } + return(0); +} + +/* + * Pick up the last hashvalue from an intermediate node. + */ +STATIC uint +xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count) +{ + xfs_da_intnode_t *node; + + node = bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + if (count) + *count = INT_GET(node->hdr.count, ARCH_CONVERT); + if (INT_ISZERO(node->hdr.count, ARCH_CONVERT)) + return(0); + return(INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); +} + +/* + * Unlink a block from a doubly linked list of blocks. + */ +int /* error */ +xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk) +{ + xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info; + xfs_da_args_t *args; + xfs_dabuf_t *bp; + int error; + + /* + * Set up environment. + */ + args = state->args; + ASSERT(args != NULL); + save_info = save_blk->bp->data; + drop_info = drop_blk->bp->data; + ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || + save_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || + save_blk->magic == XFS_ATTR_LEAF_MAGIC); + ASSERT(save_blk->magic == INT_GET(save_info->magic, ARCH_CONVERT)); + ASSERT(drop_blk->magic == INT_GET(drop_info->magic, ARCH_CONVERT)); + ASSERT(save_blk->magic == drop_blk->magic); + ASSERT((INT_GET(save_info->forw, ARCH_CONVERT) == drop_blk->blkno) || + (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno)); + ASSERT((INT_GET(drop_info->forw, ARCH_CONVERT) == save_blk->blkno) || + (INT_GET(drop_info->back, ARCH_CONVERT) == save_blk->blkno)); + + /* + * Unlink the leaf block from the doubly linked chain of leaves. + */ + if (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno) { + save_info->back = drop_info->back; /* INT_: direct copy */ + if (INT_GET(drop_info->back, ARCH_CONVERT)) { + error = xfs_da_read_buf(args->trans, args->dp, + INT_GET(drop_info->back, + ARCH_CONVERT), -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + tmp_info = bp->data; + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(save_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == drop_blk->blkno); + INT_SET(tmp_info->forw, ARCH_CONVERT, save_blk->blkno); + xfs_da_log_buf(args->trans, bp, 0, + sizeof(*tmp_info) - 1); + xfs_da_buf_done(bp); + } + } else { + save_info->forw = drop_info->forw; /* INT_: direct copy */ + if (INT_GET(drop_info->forw, ARCH_CONVERT)) { + error = xfs_da_read_buf(args->trans, args->dp, + INT_GET(drop_info->forw, ARCH_CONVERT), -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + tmp_info = bp->data; + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) + == INT_GET(save_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) + == drop_blk->blkno); + INT_SET(tmp_info->back, ARCH_CONVERT, save_blk->blkno); + xfs_da_log_buf(args->trans, bp, 0, + sizeof(*tmp_info) - 1); + xfs_da_buf_done(bp); + } + } + + xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); + return(0); +} + +/* + * Move a path "forward" or "!forward" one block at the current level. + * + * This routine will adjust a "path" to point to the next block + * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the + * Btree, including updating pointers to the intermediate nodes between + * the new bottom and the root. + */ +int /* error */ +xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, + int forward, int release, int *result) +{ + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *info; + xfs_da_intnode_t *node; + xfs_da_args_t *args; + xfs_dablk_t blkno=0; + int level, error; + + /* + * Roll up the Btree looking for the first block where our + * current index is not at the edge of the block. Note that + * we skip the bottom layer because we want the sibling block. + */ + args = state->args; + ASSERT(args != NULL); + ASSERT(path != NULL); + ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + level = (path->active-1) - 1; /* skip bottom layer in path */ + for (blk = &path->blk[level]; level >= 0; blk--, level--) { + ASSERT(blk->bp != NULL); + node = blk->bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + if (forward && (blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { + blk->index++; + blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + break; + } else if (!forward && (blk->index > 0)) { + blk->index--; + blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + break; + } + } + if (level < 0) { + *result = XFS_ERROR(ENOENT); /* we're out of our tree */ + ASSERT(args->oknoent); + return(0); + } + + /* + * Roll down the edge of the subtree until we reach the + * same depth we were at originally. + */ + for (blk++, level++; level < path->active; blk++, level++) { + /* + * Release the old block. + * (if it's dirty, trans won't actually let go) + */ + if (release) + xfs_da_brelse(args->trans, blk->bp); + + /* + * Read the next child block. + */ + blk->blkno = blkno; + error = xfs_da_read_buf(args->trans, args->dp, blkno, -1, + &blk->bp, args->whichfork); + if (error) + return(error); + ASSERT(blk->bp != NULL); + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || + INT_GET(info->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || + INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + blk->magic = INT_GET(info->magic, ARCH_CONVERT); + if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + node = (xfs_da_intnode_t *)info; + blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + if (forward) + blk->index = 0; + else + blk->index = INT_GET(node->hdr.count, ARCH_CONVERT)-1; + blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + } else { + ASSERT(level == path->active-1); + blk->index = 0; + switch(blk->magic) { +#ifdef __KERNEL__ + case XFS_ATTR_LEAF_MAGIC: + blk->hashval = xfs_attr_leaf_lasthash(blk->bp, + NULL); + break; +#endif + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + blk->hashval = xfs_dir_leaf_lasthash(blk->bp, + NULL); + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, + NULL); + break; + default: + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC || + blk->magic == + XFS_DIRX_LEAF_MAGIC(state->mp)); + break; + } + } + } + *result = 0; + return(0); +} + + +/*======================================================================== + * Utility routines. + *========================================================================*/ + +/* + * Implement a simple hash on a character string. + * Rotate the hash value by 7 bits, then XOR each character in. + * This is implemented with some source-level loop unrolling. + */ +xfs_dahash_t +xfs_da_hashname(uchar_t *name, int namelen) +{ + xfs_dahash_t hash; + +#define ROTL(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#ifdef SLOWVERSION + /* + * This is the old one-byte-at-a-time version. + */ + for (hash = 0; namelen > 0; namelen--) { + hash = *name++ ^ ROTL(hash, 7); + } + return(hash); +#else + /* + * Do four characters at a time as long as we can. + */ + for (hash = 0; namelen >= 4; namelen -= 4, name += 4) { + hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^ + (name[3] << 0) ^ ROTL(hash, 7 * 4); + } + /* + * Now do the rest of the characters. + */ + switch (namelen) { + case 3: + return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^ + ROTL(hash, 7 * 3); + case 2: + return (name[0] << 7) ^ (name[1] << 0) ^ ROTL(hash, 7 * 2); + case 1: + return (name[0] << 0) ^ ROTL(hash, 7 * 1); + case 0: + return hash; + } + /* NOTREACHED */ +#endif +#undef ROTL + return 0; /* keep gcc happy */ +} + +/* + * Add a block to the btree ahead of the file. + * Return the new block number to the caller. + */ +int +xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) +{ + xfs_fileoff_t bno, b; + xfs_bmbt_irec_t map; + xfs_bmbt_irec_t *mapp; + xfs_inode_t *dp; + int nmap, error, w, count, c, got, i, mapi; + xfs_fsize_t size; + xfs_trans_t *tp; + xfs_mount_t *mp; + + dp = args->dp; + mp = dp->i_mount; + w = args->whichfork; + tp = args->trans; + /* + * For new directories adjust the file offset and block count. + */ + if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) { + bno = mp->m_dirleafblk; + count = mp->m_dirblkfsbs; + } else { + bno = 0; + count = 1; + } + /* + * Find a spot in the file space to put the new block. + */ + if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, w))) { + return error; + } + if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) + ASSERT(bno >= mp->m_dirleafblk && bno < mp->m_dirfreeblk); + /* + * Try mapping it in one filesystem block. + */ + nmap = 1; + ASSERT(args->firstblock != NULL); + if ((error = xfs_bmapi(tp, dp, bno, count, + XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| + XFS_BMAPI_CONTIG, + args->firstblock, args->total, &map, &nmap, + args->flist))) { + return error; + } + ASSERT(nmap <= 1); + if (nmap == 1) { + mapp = ↦ + mapi = 1; + } + /* + * If we didn't get it and the block might work if fragmented, + * try without the CONTIG flag. Loop until we get it all. + */ + else if (nmap == 0 && count > 1) { + mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); + for (b = bno, mapi = 0; b < bno + count; ) { + nmap = MIN(XFS_BMAP_MAX_NMAP, count); + c = (int)(bno + count - b); + if ((error = xfs_bmapi(tp, dp, b, c, + XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE| + XFS_BMAPI_METADATA, + args->firstblock, args->total, + &mapp[mapi], &nmap, args->flist))) { + kmem_free(mapp, sizeof(*mapp) * count); + return error; + } + if (nmap < 1) + break; + mapi += nmap; + b = mapp[mapi - 1].br_startoff + + mapp[mapi - 1].br_blockcount; + } + } else { + mapi = 0; + mapp = NULL; + } + /* + * Count the blocks we got, make sure it matches the total. + */ + for (i = 0, got = 0; i < mapi; i++) + got += mapp[i].br_blockcount; + if (got != count || mapp[0].br_startoff != bno || + mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != + bno + count) { + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * count); + return XFS_ERROR(ENOSPC); + } + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * count); + *new_blkno = (xfs_dablk_t)bno; + /* + * For version 1 directories, adjust the file size if it changed. + */ + if (w == XFS_DATA_FORK && XFS_DIR_IS_V1(mp)) { + ASSERT(mapi == 1); + if ((error = xfs_bmap_last_offset(tp, dp, &bno, w))) + return error; + size = XFS_FSB_TO_B(mp, bno); + if (size != dp->i_d.di_size) { + dp->i_d.di_size = size; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + } + } + return 0; +} + +/* + * Ick. We need to always be able to remove a btree block, even + * if there's no space reservation because the filesystem is full. + * This is called if xfs_bunmapi on a btree block fails due to ENOSPC. + * It swaps the target block with the last block in the file. The + * last block in the file can always be removed since it can't cause + * a bmap btree split to do that. + */ +STATIC int +xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, + xfs_dabuf_t **dead_bufp) +{ + xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno; + xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf; + xfs_fileoff_t lastoff; + xfs_inode_t *ip; + xfs_trans_t *tp; + xfs_mount_t *mp; + int error, w, entno, level, dead_level; + xfs_da_blkinfo_t *dead_info, *sib_info; + xfs_da_intnode_t *par_node, *dead_node; + xfs_dir_leafblock_t *dead_leaf; + xfs_dir2_leaf_t *dead_leaf2; + xfs_dahash_t dead_hash; + + dead_buf = *dead_bufp; + dead_blkno = *dead_blknop; + tp = args->trans; + ip = args->dp; + w = args->whichfork; + ASSERT(w == XFS_DATA_FORK); + mp = ip->i_mount; + if (XFS_DIR_IS_V2(mp)) { + lastoff = mp->m_dirfreeblk; + error = xfs_bmap_last_before(tp, ip, &lastoff, w); + } else + error = xfs_bmap_last_offset(tp, ip, &lastoff, w); + if (error) + return error; + if (unlikely(lastoff == 0)) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW, + mp); + return XFS_ERROR(EFSCORRUPTED); + } + /* + * Read the last block in the btree space. + */ + last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs; + if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w))) + return error; + /* + * Copy the last block into the dead buffer and log it. + */ + memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize); + xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1); + dead_info = dead_buf->data; + /* + * Get values from the moved block. + */ + if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + ASSERT(XFS_DIR_IS_V1(mp)); + dead_leaf = (xfs_dir_leafblock_t *)dead_info; + dead_level = 0; + dead_hash = + INT_GET(dead_leaf->entries[INT_GET(dead_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + } else if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + ASSERT(XFS_DIR_IS_V2(mp)); + dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; + dead_level = 0; + dead_hash = INT_GET(dead_leaf2->ents[INT_GET(dead_leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + } else { + ASSERT(INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + dead_node = (xfs_da_intnode_t *)dead_info; + dead_level = INT_GET(dead_node->hdr.level, ARCH_CONVERT); + dead_hash = INT_GET(dead_node->btree[INT_GET(dead_node->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + } + sib_buf = par_buf = NULL; + /* + * If the moved block has a left sibling, fix up the pointers. + */ + if ((sib_blkno = INT_GET(dead_info->back, ARCH_CONVERT))) { + if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) + goto done; + sib_info = sib_buf->data; + if (unlikely( + INT_GET(sib_info->forw, ARCH_CONVERT) != last_blkno || + INT_GET(sib_info->magic, ARCH_CONVERT) != INT_GET(dead_info->magic, ARCH_CONVERT))) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + INT_SET(sib_info->forw, ARCH_CONVERT, dead_blkno); + xfs_da_log_buf(tp, sib_buf, + XFS_DA_LOGRANGE(sib_info, &sib_info->forw, + sizeof(sib_info->forw))); + xfs_da_buf_done(sib_buf); + sib_buf = NULL; + } + /* + * If the moved block has a right sibling, fix up the pointers. + */ + if ((sib_blkno = INT_GET(dead_info->forw, ARCH_CONVERT))) { + if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) + goto done; + sib_info = sib_buf->data; + if (unlikely( + INT_GET(sib_info->back, ARCH_CONVERT) != last_blkno + || INT_GET(sib_info->magic, ARCH_CONVERT) + != INT_GET(dead_info->magic, ARCH_CONVERT))) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + INT_SET(sib_info->back, ARCH_CONVERT, dead_blkno); + xfs_da_log_buf(tp, sib_buf, + XFS_DA_LOGRANGE(sib_info, &sib_info->back, + sizeof(sib_info->back))); + xfs_da_buf_done(sib_buf); + sib_buf = NULL; + } + par_blkno = XFS_DIR_IS_V1(mp) ? 0 : mp->m_dirleafblk; + level = -1; + /* + * Walk down the tree looking for the parent of the moved block. + */ + for (;;) { + if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) + goto done; + par_node = par_buf->data; + if (unlikely( + INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC || + (level >= 0 && level != INT_GET(par_node->hdr.level, ARCH_CONVERT) + 1))) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + level = INT_GET(par_node->hdr.level, ARCH_CONVERT); + for (entno = 0; + entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && + INT_GET(par_node->btree[entno].hashval, ARCH_CONVERT) < dead_hash; + entno++) + continue; + if (unlikely(entno == INT_GET(par_node->hdr.count, ARCH_CONVERT))) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + par_blkno = INT_GET(par_node->btree[entno].before, ARCH_CONVERT); + if (level == dead_level + 1) + break; + xfs_da_brelse(tp, par_buf); + par_buf = NULL; + } + /* + * We're in the right parent block. + * Look for the right entry. + */ + for (;;) { + for (; + entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && + INT_GET(par_node->btree[entno].before, ARCH_CONVERT) != last_blkno; + entno++) + continue; + if (entno < INT_GET(par_node->hdr.count, ARCH_CONVERT)) + break; + par_blkno = INT_GET(par_node->hdr.info.forw, ARCH_CONVERT); + xfs_da_brelse(tp, par_buf); + par_buf = NULL; + if (unlikely(par_blkno == 0)) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) + goto done; + par_node = par_buf->data; + if (unlikely( + INT_GET(par_node->hdr.level, ARCH_CONVERT) != level || + INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + entno = 0; + } + /* + * Update the parent entry pointing to the moved block. + */ + INT_SET(par_node->btree[entno].before, ARCH_CONVERT, dead_blkno); + xfs_da_log_buf(tp, par_buf, + XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before, + sizeof(par_node->btree[entno].before))); + xfs_da_buf_done(par_buf); + xfs_da_buf_done(dead_buf); + *dead_blknop = last_blkno; + *dead_bufp = last_buf; + return 0; +done: + if (par_buf) + xfs_da_brelse(tp, par_buf); + if (sib_buf) + xfs_da_brelse(tp, sib_buf); + xfs_da_brelse(tp, last_buf); + return error; +} + +/* + * Remove a btree block from a directory or attribute. + */ +int +xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, + xfs_dabuf_t *dead_buf) +{ + xfs_inode_t *dp; + int done, error, w, count; + xfs_fileoff_t bno; + xfs_fsize_t size; + xfs_trans_t *tp; + xfs_mount_t *mp; + + dp = args->dp; + w = args->whichfork; + tp = args->trans; + mp = dp->i_mount; + if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) + count = mp->m_dirblkfsbs; + else + count = 1; + for (;;) { + /* + * Remove extents. If we get ENOSPC for a dir we have to move + * the last block to the place we want to kill. + */ + if ((error = xfs_bunmapi(tp, dp, dead_blkno, count, + XFS_BMAPI_AFLAG(w)|XFS_BMAPI_METADATA, + 0, args->firstblock, args->flist, + &done)) == ENOSPC) { + if (w != XFS_DATA_FORK) + goto done; + if ((error = xfs_da_swap_lastblock(args, &dead_blkno, + &dead_buf))) + goto done; + } else if (error) + goto done; + else + break; + } + ASSERT(done); + xfs_da_binval(tp, dead_buf); + /* + * Adjust the directory size for version 1. + */ + if (w == XFS_DATA_FORK && XFS_DIR_IS_V1(mp)) { + if ((error = xfs_bmap_last_offset(tp, dp, &bno, w))) + return error; + size = XFS_FSB_TO_B(dp->i_mount, bno); + if (size != dp->i_d.di_size) { + dp->i_d.di_size = size; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + } + } + return 0; +done: + xfs_da_binval(tp, dead_buf); + return error; +} + +/* + * See if the mapping(s) for this btree block are valid, i.e. + * don't contain holes, are logically contiguous, and cover the whole range. + */ +STATIC int +xfs_da_map_covers_blocks( + int nmap, + xfs_bmbt_irec_t *mapp, + xfs_dablk_t bno, + int count) +{ + int i; + xfs_fileoff_t off; + + for (i = 0, off = bno; i < nmap; i++) { + if (mapp[i].br_startblock == HOLESTARTBLOCK || + mapp[i].br_startblock == DELAYSTARTBLOCK) { + return 0; + } + if (off != mapp[i].br_startoff) { + return 0; + } + off += mapp[i].br_blockcount; + } + return off == bno + count; +} + +/* + * Make a dabuf. + * Used for get_buf, read_buf, read_bufr, and reada_buf. + */ +STATIC int +xfs_da_do_buf( + xfs_trans_t *trans, + xfs_inode_t *dp, + xfs_dablk_t bno, + xfs_daddr_t *mappedbnop, + xfs_dabuf_t **bpp, + int whichfork, + int caller, + inst_t *ra) +{ + xfs_buf_t *bp = 0; + xfs_buf_t **bplist; + int error=0; + int i; + xfs_bmbt_irec_t map; + xfs_bmbt_irec_t *mapp; + xfs_daddr_t mappedbno; + xfs_mount_t *mp; + int nbplist=0; + int nfsb; + int nmap; + xfs_dabuf_t *rbp; + + mp = dp->i_mount; + if (whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) + nfsb = mp->m_dirblkfsbs; + else + nfsb = 1; + mappedbno = *mappedbnop; + /* + * Caller doesn't have a mapping. -2 means don't complain + * if we land in a hole. + */ + if (mappedbno == -1 || mappedbno == -2) { + /* + * Optimize the one-block case. + */ + if (nfsb == 1) { + xfs_fsblock_t fsb; + + if ((error = + xfs_bmapi_single(trans, dp, whichfork, &fsb, + (xfs_fileoff_t)bno))) { + return error; + } + mapp = ↦ + if (fsb == NULLFSBLOCK) { + nmap = 0; + } else { + map.br_startblock = fsb; + map.br_startoff = (xfs_fileoff_t)bno; + map.br_blockcount = 1; + nmap = 1; + } + } else { + mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP); + nmap = nfsb; + if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno, + nfsb, + XFS_BMAPI_METADATA | + XFS_BMAPI_AFLAG(whichfork), + NULL, 0, mapp, &nmap, NULL))) + goto exit0; + } + } else { + map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno); + map.br_startoff = (xfs_fileoff_t)bno; + map.br_blockcount = nfsb; + mapp = ↦ + nmap = 1; + } + if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) { + error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED); + if (unlikely(error == EFSCORRUPTED)) { + if (xfs_error_level >= XFS_ERRLEVEL_LOW) { + int i; + cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n", + (long long)bno); + cmn_err(CE_ALERT, "dir: inode %lld\n", + (long long)dp->i_ino); + for (i = 0; i < nmap; i++) { + cmn_err(CE_ALERT, + "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d\n", + i, + mapp[i].br_startoff, + mapp[i].br_startblock, + mapp[i].br_blockcount, + mapp[i].br_state); + } + } + XFS_ERROR_REPORT("xfs_da_do_buf(1)", + XFS_ERRLEVEL_LOW, mp); + } + goto exit0; + } + if (caller != 3 && nmap > 1) { + bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP); + nbplist = 0; + } else + bplist = NULL; + /* + * Turn the mapping(s) into buffer(s). + */ + for (i = 0; i < nmap; i++) { + int nmapped; + + mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock); + if (i == 0) + *mappedbnop = mappedbno; + nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount); + switch (caller) { + case 0: + bp = xfs_trans_get_buf(trans, mp->m_ddev_targp, + mappedbno, nmapped, 0); + error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO); + break; + case 1: +#ifndef __KERNEL__ + case 2: +#endif + bp = NULL; + error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp, + mappedbno, nmapped, 0, &bp); + break; +#ifdef __KERNEL__ + case 3: + xfs_baread(mp->m_ddev_targp, mappedbno, nmapped); + error = 0; + bp = NULL; + break; +#endif + } + if (error) { + if (bp) + xfs_trans_brelse(trans, bp); + goto exit1; + } + if (!bp) + continue; + if (caller == 1) { + if (whichfork == XFS_ATTR_FORK) { + XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE, + XFS_ATTR_BTREE_REF); + } else { + XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE, + XFS_DIR_BTREE_REF); + } + } + if (bplist) { + bplist[nbplist++] = bp; + } + } + /* + * Build a dabuf structure. + */ + if (bplist) { + rbp = xfs_da_buf_make(nbplist, bplist, ra); + } else if (bp) + rbp = xfs_da_buf_make(1, &bp, ra); + else + rbp = NULL; + /* + * For read_buf, check the magic number. + */ + if (caller == 1) { + xfs_dir2_data_t *data; + xfs_dir2_free_t *free; + xfs_da_blkinfo_t *info; + uint magic, magic1; + + info = rbp->data; + data = rbp->data; + free = rbp->data; + magic = INT_GET(info->magic, ARCH_CONVERT); + magic1 = INT_GET(data->hdr.magic, ARCH_CONVERT); + if (unlikely( + XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) && + (magic != XFS_DIR_LEAF_MAGIC) && + (magic != XFS_ATTR_LEAF_MAGIC) && + (magic != XFS_DIR2_LEAF1_MAGIC) && + (magic != XFS_DIR2_LEAFN_MAGIC) && + (magic1 != XFS_DIR2_BLOCK_MAGIC) && + (magic1 != XFS_DIR2_DATA_MAGIC) && + (INT_GET(free->hdr.magic, ARCH_CONVERT) != XFS_DIR2_FREE_MAGIC), + mp, XFS_ERRTAG_DA_READ_BUF, + XFS_RANDOM_DA_READ_BUF))) { + xfs_buftrace("DA READ ERROR", rbp->bps[0]); + XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)", + XFS_ERRLEVEL_LOW, mp, info); + error = XFS_ERROR(EFSCORRUPTED); + xfs_da_brelse(trans, rbp); + nbplist = 0; + goto exit1; + } + } + if (bplist) { + kmem_free(bplist, sizeof(*bplist) * nmap); + } + if (mapp != &map) { + kmem_free(mapp, sizeof(*mapp) * nfsb); + } + if (bpp) + *bpp = rbp; + return 0; +exit1: + if (bplist) { + for (i = 0; i < nbplist; i++) + xfs_trans_brelse(trans, bplist[i]); + kmem_free(bplist, sizeof(*bplist) * nmap); + } +exit0: + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * nfsb); + if (bpp) + *bpp = NULL; + return error; +} + +/* + * Get a buffer for the dir/attr block. + */ +int +xfs_da_get_buf( + xfs_trans_t *trans, + xfs_inode_t *dp, + xfs_dablk_t bno, + xfs_daddr_t mappedbno, + xfs_dabuf_t **bpp, + int whichfork) +{ + return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0, + (inst_t *)__return_address); +} + +/* + * Get a buffer for the dir/attr block, fill in the contents. + */ +int +xfs_da_read_buf( + xfs_trans_t *trans, + xfs_inode_t *dp, + xfs_dablk_t bno, + xfs_daddr_t mappedbno, + xfs_dabuf_t **bpp, + int whichfork) +{ + return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1, + (inst_t *)__return_address); +} + +/* + * Readahead the dir/attr block. + */ +xfs_daddr_t +xfs_da_reada_buf( + xfs_trans_t *trans, + xfs_inode_t *dp, + xfs_dablk_t bno, + int whichfork) +{ + xfs_daddr_t rval; + + rval = -1; + if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3, + (inst_t *)__return_address)) + return -1; + else + return rval; +} + +/* + * Calculate the number of bits needed to hold i different values. + */ +uint +xfs_da_log2_roundup(uint i) +{ + uint rval; + + for (rval = 0; rval < NBBY * sizeof(i); rval++) { + if ((1 << rval) >= i) + break; + } + return(rval); +} + +kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */ +kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */ + +/* + * Allocate a dir-state structure. + * We don't put them on the stack since they're large. + */ +xfs_da_state_t * +xfs_da_state_alloc(void) +{ + return kmem_zone_zalloc(xfs_da_state_zone, KM_SLEEP); +} + +/* + * Kill the altpath contents of a da-state structure. + */ +void +xfs_da_state_kill_altpath(xfs_da_state_t *state) +{ + int i; + + for (i = 0; i < state->altpath.active; i++) { + if (state->altpath.blk[i].bp) { + if (state->altpath.blk[i].bp != state->path.blk[i].bp) + xfs_da_buf_done(state->altpath.blk[i].bp); + state->altpath.blk[i].bp = NULL; + } + } + state->altpath.active = 0; +} + +/* + * Free a da-state structure. + */ +void +xfs_da_state_free(xfs_da_state_t *state) +{ + int i; + + xfs_da_state_kill_altpath(state); + for (i = 0; i < state->path.active; i++) { + if (state->path.blk[i].bp) + xfs_da_buf_done(state->path.blk[i].bp); + } + if (state->extravalid && state->extrablk.bp) + xfs_da_buf_done(state->extrablk.bp); +#ifdef DEBUG + memset((char *)state, 0, sizeof(*state)); +#endif /* DEBUG */ + kmem_zone_free(xfs_da_state_zone, state); +} + +#ifdef XFS_DABUF_DEBUG +xfs_dabuf_t *xfs_dabuf_global_list; +lock_t xfs_dabuf_global_lock; +#endif + +/* + * Create a dabuf. + */ +/* ARGSUSED */ +STATIC xfs_dabuf_t * +xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra) +{ + xfs_buf_t *bp; + xfs_dabuf_t *dabuf; + int i; + int off; + + if (nbuf == 1) + dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_SLEEP); + else + dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_SLEEP); + dabuf->dirty = 0; +#ifdef XFS_DABUF_DEBUG + dabuf->ra = ra; + dabuf->dev = XFS_BUF_TARGET_DEV(bps[0]); + dabuf->blkno = XFS_BUF_ADDR(bps[0]); +#endif + if (nbuf == 1) { + dabuf->nbuf = 1; + bp = bps[0]; + dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp)); + dabuf->data = XFS_BUF_PTR(bp); + dabuf->bps[0] = bp; + } else { + dabuf->nbuf = nbuf; + for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) { + dabuf->bps[i] = bp = bps[i]; + dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp)); + } + dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP); + for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) { + bp = bps[i]; + memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp), + XFS_BUF_COUNT(bp)); + } + } +#ifdef XFS_DABUF_DEBUG + { + SPLDECL(s); + xfs_dabuf_t *p; + + s = mutex_spinlock(&xfs_dabuf_global_lock); + for (p = xfs_dabuf_global_list; p; p = p->next) { + ASSERT(p->blkno != dabuf->blkno || + p->dev != dabuf->dev); + } + dabuf->prev = NULL; + if (xfs_dabuf_global_list) + xfs_dabuf_global_list->prev = dabuf; + dabuf->next = xfs_dabuf_global_list; + xfs_dabuf_global_list = dabuf; + mutex_spinunlock(&xfs_dabuf_global_lock, s); + } +#endif + return dabuf; +} + +/* + * Un-dirty a dabuf. + */ +STATIC void +xfs_da_buf_clean(xfs_dabuf_t *dabuf) +{ + xfs_buf_t *bp; + int i; + int off; + + if (dabuf->dirty) { + ASSERT(dabuf->nbuf > 1); + dabuf->dirty = 0; + for (i = off = 0; i < dabuf->nbuf; + i++, off += XFS_BUF_COUNT(bp)) { + bp = dabuf->bps[i]; + memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off, + XFS_BUF_COUNT(bp)); + } + } +} + +/* + * Release a dabuf. + */ +void +xfs_da_buf_done(xfs_dabuf_t *dabuf) +{ + ASSERT(dabuf); + ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); + if (dabuf->dirty) + xfs_da_buf_clean(dabuf); + if (dabuf->nbuf > 1) + kmem_free(dabuf->data, BBTOB(dabuf->bbcount)); +#ifdef XFS_DABUF_DEBUG + { + SPLDECL(s); + + s = mutex_spinlock(&xfs_dabuf_global_lock); + if (dabuf->prev) + dabuf->prev->next = dabuf->next; + else + xfs_dabuf_global_list = dabuf->next; + if (dabuf->next) + dabuf->next->prev = dabuf->prev; + mutex_spinunlock(&xfs_dabuf_global_lock, s); + } + memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf)); +#endif + if (dabuf->nbuf == 1) + kmem_zone_free(xfs_dabuf_zone, dabuf); + else + kmem_free(dabuf, XFS_DA_BUF_SIZE(dabuf->nbuf)); +} + +/* + * Log transaction from a dabuf. + */ +void +xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last) +{ + xfs_buf_t *bp; + uint f; + int i; + uint l; + int off; + + ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); + if (dabuf->nbuf == 1) { + ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0])); + xfs_trans_log_buf(tp, dabuf->bps[0], first, last); + return; + } + dabuf->dirty = 1; + ASSERT(first <= last); + for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) { + bp = dabuf->bps[i]; + f = off; + l = f + XFS_BUF_COUNT(bp) - 1; + if (f < first) + f = first; + if (l > last) + l = last; + if (f <= l) + xfs_trans_log_buf(tp, bp, f - off, l - off); + /* + * B_DONE is set by xfs_trans_log buf. + * If we don't set it on a new buffer (get not read) + * then if we don't put anything in the buffer it won't + * be set, and at commit it it released into the cache, + * and then a read will fail. + */ + else if (!(XFS_BUF_ISDONE(bp))) + XFS_BUF_DONE(bp); + } + ASSERT(last < off); +} + +/* + * Release dabuf from a transaction. + * Have to free up the dabuf before the buffers are released, + * since the synchronization on the dabuf is really the lock on the buffer. + */ +void +xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf) +{ + xfs_buf_t *bp; + xfs_buf_t **bplist; + int i; + int nbuf; + + ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); + if ((nbuf = dabuf->nbuf) == 1) { + bplist = &bp; + bp = dabuf->bps[0]; + } else { + bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP); + memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist)); + } + xfs_da_buf_done(dabuf); + for (i = 0; i < nbuf; i++) + xfs_trans_brelse(tp, bplist[i]); + if (bplist != &bp) + kmem_free(bplist, nbuf * sizeof(*bplist)); +} + +/* + * Invalidate dabuf from a transaction. + */ +void +xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf) +{ + xfs_buf_t *bp; + xfs_buf_t **bplist; + int i; + int nbuf; + + ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); + if ((nbuf = dabuf->nbuf) == 1) { + bplist = &bp; + bp = dabuf->bps[0]; + } else { + bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP); + memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist)); + } + xfs_da_buf_done(dabuf); + for (i = 0; i < nbuf; i++) + xfs_trans_binval(tp, bplist[i]); + if (bplist != &bp) + kmem_free(bplist, nbuf * sizeof(*bplist)); +} + +/* + * Get the first daddr from a dabuf. + */ +xfs_daddr_t +xfs_da_blkno(xfs_dabuf_t *dabuf) +{ + ASSERT(dabuf->nbuf); + ASSERT(dabuf->data); + return XFS_BUF_ADDR(dabuf->bps[0]); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_da_btree.h linux.21rc5-ac2/fs/xfs/xfs_da_btree.h --- linux.21rc5/fs/xfs/xfs_da_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_da_btree.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DA_BTREE_H__ +#define __XFS_DA_BTREE_H__ + +struct xfs_buf; +struct xfs_bmap_free; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; +struct zone; + +/*======================================================================== + * Directory Structure when greater than XFS_LBSIZE(mp) bytes. + *========================================================================*/ + +/* + * This structure is common to both leaf nodes and non-leaf nodes in the Btree. + * + * Is is used to manage a doubly linked list of all blocks at the same + * level in the Btree, and to identify which type of block this is. + */ +#define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */ +#define XFS_DIR_LEAF_MAGIC 0xfeeb /* magic number: directory leaf blks */ +#define XFS_ATTR_LEAF_MAGIC 0xfbee /* magic number: attribute leaf blks */ +#define XFS_DIR2_LEAF1_MAGIC 0xd2f1 /* magic number: v2 dirlf single blks */ +#define XFS_DIR2_LEAFN_MAGIC 0xd2ff /* magic number: v2 dirlf multi blks */ + +#define XFS_DIRX_LEAF_MAGIC(mp) \ + (XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC) + +typedef struct xfs_da_blkinfo { + xfs_dablk_t forw; /* previous block in list */ + xfs_dablk_t back; /* following block in list */ + __uint16_t magic; /* validity check on block */ + __uint16_t pad; /* unused */ +} xfs_da_blkinfo_t; + +/* + * This is the structure of the root and intermediate nodes in the Btree. + * The leaf nodes are defined above. + * + * Entries are not packed. + * + * Since we have duplicate keys, use a binary search but always follow + * all match in the block, not just the first match found. + */ +#define XFS_DA_NODE_MAXDEPTH 5 /* max depth of Btree */ + +typedef struct xfs_da_intnode { + struct xfs_da_node_hdr { /* constant-structure header block */ + xfs_da_blkinfo_t info; /* block type, links, etc. */ + __uint16_t count; /* count of active entries */ + __uint16_t level; /* level above leaves (leaf == 0) */ + } hdr; + struct xfs_da_node_entry { + xfs_dahash_t hashval; /* hash value for this descendant */ + xfs_dablk_t before; /* Btree block before this key */ + } btree[1]; /* variable sized array of keys */ +} xfs_da_intnode_t; +typedef struct xfs_da_node_hdr xfs_da_node_hdr_t; +typedef struct xfs_da_node_entry xfs_da_node_entry_t; + +#define XFS_DA_MAXHASH ((xfs_dahash_t)-1) /* largest valid hash value */ + +/* + * Macros used by directory code to interface to the filesystem. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LBSIZE) +int xfs_lbsize(struct xfs_mount *mp); +#define XFS_LBSIZE(mp) xfs_lbsize(mp) +#else +#define XFS_LBSIZE(mp) ((mp)->m_sb.sb_blocksize) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LBLOG) +int xfs_lblog(struct xfs_mount *mp); +#define XFS_LBLOG(mp) xfs_lblog(mp) +#else +#define XFS_LBLOG(mp) ((mp)->m_sb.sb_blocklog) +#endif + +/* + * Macros used by directory code to interface to the kernel + */ + +/* + * Macros used to manipulate directory off_t's + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_MAKE_BNOENTRY) +__uint32_t xfs_da_make_bnoentry(struct xfs_mount *mp, xfs_dablk_t bno, + int entry); +#define XFS_DA_MAKE_BNOENTRY(mp,bno,entry) \ + xfs_da_make_bnoentry(mp,bno,entry) +#else +#define XFS_DA_MAKE_BNOENTRY(mp,bno,entry) \ + (((bno) << (mp)->m_dircook_elog) | (entry)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_MAKE_COOKIE) +xfs_off_t xfs_da_make_cookie(struct xfs_mount *mp, xfs_dablk_t bno, int entry, + xfs_dahash_t hash); +#define XFS_DA_MAKE_COOKIE(mp,bno,entry,hash) \ + xfs_da_make_cookie(mp,bno,entry,hash) +#else +#define XFS_DA_MAKE_COOKIE(mp,bno,entry,hash) \ + (((xfs_off_t)XFS_DA_MAKE_BNOENTRY(mp, bno, entry) << 32) | (hash)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_COOKIE_HASH) +xfs_dahash_t xfs_da_cookie_hash(struct xfs_mount *mp, xfs_off_t cookie); +#define XFS_DA_COOKIE_HASH(mp,cookie) xfs_da_cookie_hash(mp,cookie) +#else +#define XFS_DA_COOKIE_HASH(mp,cookie) ((xfs_dahash_t)(cookie)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_COOKIE_BNO) +xfs_dablk_t xfs_da_cookie_bno(struct xfs_mount *mp, xfs_off_t cookie); +#define XFS_DA_COOKIE_BNO(mp,cookie) xfs_da_cookie_bno(mp,cookie) +#else +#define XFS_DA_COOKIE_BNO(mp,cookie) \ + (((xfs_off_t)(cookie) >> 31) == -1LL ? \ + (xfs_dablk_t)0 : \ + (xfs_dablk_t)((xfs_off_t)(cookie) >> ((mp)->m_dircook_elog + 32))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_COOKIE_ENTRY) +int xfs_da_cookie_entry(struct xfs_mount *mp, xfs_off_t cookie); +#define XFS_DA_COOKIE_ENTRY(mp,cookie) xfs_da_cookie_entry(mp,cookie) +#else +#define XFS_DA_COOKIE_ENTRY(mp,cookie) \ + (((xfs_off_t)(cookie) >> 31) == -1LL ? \ + (xfs_dablk_t)0 : \ + (xfs_dablk_t)(((xfs_off_t)(cookie) >> 32) & \ + ((1 << (mp)->m_dircook_elog) - 1))) +#endif + + +/*======================================================================== + * Btree searching and modification structure definitions. + *========================================================================*/ + +/* + * Structure to ease passing around component names. + */ +typedef struct xfs_da_args { + uchar_t *name; /* string (maybe not NULL terminated) */ + int namelen; /* length of string (maybe no NULL) */ + uchar_t *value; /* set of bytes (maybe contain NULLs) */ + int valuelen; /* length of value */ + int flags; /* argument flags (eg: ATTR_NOCREATE) */ + xfs_dahash_t hashval; /* hash value of name */ + xfs_ino_t inumber; /* input/output inode number */ + struct xfs_inode *dp; /* directory inode to manipulate */ + xfs_fsblock_t *firstblock; /* ptr to firstblock for bmap calls */ + struct xfs_bmap_free *flist; /* ptr to freelist for bmap_finish */ + struct xfs_trans *trans; /* current trans (changes over time) */ + xfs_extlen_t total; /* total blocks needed, for 1st bmap */ + int whichfork; /* data or attribute fork */ + xfs_dablk_t blkno; /* blkno of attr leaf of interest */ + int index; /* index of attr of interest in blk */ + xfs_dablk_t rmtblkno; /* remote attr value starting blkno */ + int rmtblkcnt; /* remote attr value block count */ + int rename; /* T/F: this is an atomic rename op */ + xfs_dablk_t blkno2; /* blkno of 2nd attr leaf of interest */ + int index2; /* index of 2nd attr in blk */ + xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */ + int rmtblkcnt2; /* remote attr value block count */ + int justcheck; /* check for ok with no space */ + int addname; /* T/F: this is an add operation */ + int oknoent; /* T/F: ok to return ENOENT, else die */ +} xfs_da_args_t; + +/* + * Structure to describe buffer(s) for a block. + * This is needed in the directory version 2 format case, when + * multiple non-contiguous fsblocks might be needed to cover one + * logical directory block. + * If the buffer count is 1 then the data pointer points to the + * same place as the b_addr field for the buffer, else to kmem_alloced memory. + */ +typedef struct xfs_dabuf { + int nbuf; /* number of buffer pointers present */ + short dirty; /* data needs to be copied back */ + short bbcount; /* how large is data in bbs */ + void *data; /* pointer for buffers' data */ +#ifdef XFS_DABUF_DEBUG + inst_t *ra; /* return address of caller to make */ + struct xfs_dabuf *next; /* next in global chain */ + struct xfs_dabuf *prev; /* previous in global chain */ + dev_t dev; /* device for buffer */ + xfs_daddr_t blkno; /* daddr first in bps[0] */ +#endif + struct xfs_buf *bps[1]; /* actually nbuf of these */ +} xfs_dabuf_t; +#define XFS_DA_BUF_SIZE(n) \ + (sizeof(xfs_dabuf_t) + sizeof(struct xfs_buf *) * ((n) - 1)) + +#ifdef XFS_DABUF_DEBUG +extern xfs_dabuf_t *xfs_dabuf_global_list; +#endif + +/* + * Storage for holding state during Btree searches and split/join ops. + * + * Only need space for 5 intermediate nodes. With a minimum of 62-way + * fanout to the Btree, we can support over 900 million directory blocks, + * which is slightly more than enough. + */ +typedef struct xfs_da_state_blk { + xfs_dabuf_t *bp; /* buffer containing block */ + xfs_dablk_t blkno; /* filesystem blkno of buffer */ + xfs_daddr_t disk_blkno; /* on-disk blkno (in BBs) of buffer */ + int index; /* relevant index into block */ + xfs_dahash_t hashval; /* last hash value in block */ + int magic; /* blk's magic number, ie: blk type */ +} xfs_da_state_blk_t; + +typedef struct xfs_da_state_path { + int active; /* number of active levels */ + xfs_da_state_blk_t blk[XFS_DA_NODE_MAXDEPTH]; +} xfs_da_state_path_t; + +typedef struct xfs_da_state { + xfs_da_args_t *args; /* filename arguments */ + struct xfs_mount *mp; /* filesystem mount point */ + unsigned int blocksize; /* logical block size */ + unsigned int node_ents; /* how many entries in danode */ + xfs_da_state_path_t path; /* search/split paths */ + xfs_da_state_path_t altpath; /* alternate path for join */ + unsigned int inleaf : 1; /* insert into 1->lf, 0->splf */ + unsigned int extravalid : 1; /* T/F: extrablk is in use */ + unsigned int extraafter : 1; /* T/F: extrablk is after new */ + xfs_da_state_blk_t extrablk; /* for double-splits on leafs */ + /* for dirv2 extrablk is data */ +} xfs_da_state_t; + +/* + * Utility macros to aid in logging changed structure fields. + */ +#define XFS_DA_LOGOFF(BASE, ADDR) ((char *)(ADDR) - (char *)(BASE)) +#define XFS_DA_LOGRANGE(BASE, ADDR, SIZE) \ + (uint)(XFS_DA_LOGOFF(BASE, ADDR)), \ + (uint)(XFS_DA_LOGOFF(BASE, ADDR)+(SIZE)-1) + + +#ifdef __KERNEL__ +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Routines used for growing the Btree. + */ +int xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, + xfs_dabuf_t **bpp, int whichfork); +int xfs_da_split(xfs_da_state_t *state); + +/* + * Routines used for shrinking the Btree. + */ +int xfs_da_join(xfs_da_state_t *state); +void xfs_da_fixhashpath(xfs_da_state_t *state, + xfs_da_state_path_t *path_to_to_fix); + +/* + * Routines used for finding things in the Btree. + */ +int xfs_da_node_lookup_int(xfs_da_state_t *state, int *result); +int xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, + int forward, int release, int *result); +/* + * Utility routines. + */ +int xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk); +int xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, + xfs_da_state_blk_t *new_blk); + +/* + * Utility routines. + */ +int xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno); +int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp, + xfs_dablk_t bno, xfs_daddr_t mappedbno, + xfs_dabuf_t **bp, int whichfork); +int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp, + xfs_dablk_t bno, xfs_daddr_t mappedbno, + xfs_dabuf_t **bpp, int whichfork); +xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp, + xfs_dablk_t bno, int whichfork); +int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, + xfs_dabuf_t *dead_buf); + +uint xfs_da_hashname(uchar_t *name_string, int name_length); +uint xfs_da_log2_roundup(uint i); +xfs_da_state_t *xfs_da_state_alloc(void); +void xfs_da_state_free(xfs_da_state_t *state); +void xfs_da_state_kill_altpath(xfs_da_state_t *state); + +void xfs_da_buf_done(xfs_dabuf_t *dabuf); +void xfs_da_log_buf(struct xfs_trans *tp, xfs_dabuf_t *dabuf, uint first, + uint last); +void xfs_da_brelse(struct xfs_trans *tp, xfs_dabuf_t *dabuf); +void xfs_da_binval(struct xfs_trans *tp, xfs_dabuf_t *dabuf); +xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf); + +extern struct kmem_zone *xfs_da_state_zone; +#endif /* __KERNEL__ */ + +#endif /* __XFS_DA_BTREE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dfrag.c linux.21rc5-ac2/fs/xfs/xfs_dfrag.c --- linux.21rc5/fs/xfs/xfs_dfrag.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dfrag.c 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,385 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_ialloc.h" +#include "xfs_itable.h" +#include "xfs_dfrag.h" +#include "xfs_error.h" +#include "xfs_mac.h" +#include "xfs_rw.h" + +/* + * Syssgi interface for swapext + */ +int +xfs_swapext( + xfs_swapext_t *sxp) +{ + xfs_swapext_t sx; + xfs_inode_t *ip=NULL, *tip=NULL, *ips[2]; + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_bstat_t *sbp; + struct file *fp = NULL, *tfp = NULL; + vnode_t *vp, *tvp; + bhv_desc_t *bdp, *tbdp; + vn_bhv_head_t *bhp, *tbhp; + uint lock_flags=0; + int ilf_fields, tilf_fields; + int error = 0; + xfs_ifork_t tempif, *ifp, *tifp; + __uint64_t tmp; + int aforkblks = 0; + int taforkblks = 0; + int locked = 0; + + if (copy_from_user(&sx, sxp, sizeof(sx))) + return XFS_ERROR(EFAULT); + + /* Pull information for the target fd */ + if (((fp = fget((int)sx.sx_fdtarget)) == NULL) || + ((vp = LINVFS_GET_VP(fp->f_dentry->d_inode)) == NULL)) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + bhp = VN_BHV_HEAD(vp); + bdp = vn_bhv_lookup(bhp, &xfs_vnodeops); + if (bdp == NULL) { + error = XFS_ERROR(EBADF); + goto error0; + } else { + ip = XFS_BHVTOI(bdp); + } + + if (((tfp = fget((int)sx.sx_fdtmp)) == NULL) || + ((tvp = LINVFS_GET_VP(tfp->f_dentry->d_inode)) == NULL)) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + tbhp = VN_BHV_HEAD(tvp); + tbdp = vn_bhv_lookup(tbhp, &xfs_vnodeops); + if (tbdp == NULL) { + error = XFS_ERROR(EBADF); + goto error0; + } else { + tip = XFS_BHVTOI(tbdp); + } + + if (ip->i_ino == tip->i_ino) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + mp = ip->i_mount; + + sbp = &sx.sx_stat; + + if (XFS_FORCED_SHUTDOWN(mp)) { + error = XFS_ERROR(EIO); + goto error0; + } + + locked = 1; + + /* Lock in i_ino order */ + if (ip->i_ino < tip->i_ino) { + ips[0] = ip; + ips[1] = tip; + } else { + ips[0] = tip; + ips[1] = ip; + } + lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; + xfs_lock_inodes(ips, 2, 0, lock_flags); + + /* Check permissions */ + if ((error = _MAC_XFS_IACCESS(ip, MACWRITE, NULL))) { + goto error0; + } + if ((error = _MAC_XFS_IACCESS(tip, MACWRITE, NULL))) { + goto error0; + } + if ((current->fsuid != ip->i_d.di_uid) && + (error = xfs_iaccess(ip, IWRITE, NULL)) && + !capable_cred(NULL, CAP_FOWNER)) { + goto error0; + } + if ((current->fsuid != tip->i_d.di_uid) && + (error = xfs_iaccess(tip, IWRITE, NULL)) && + !capable_cred(NULL, CAP_FOWNER)) { + goto error0; + } + + /* Verify both files are either real-time or non-realtime */ + if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != + (tip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + /* Should never get a local format */ + if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || + tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + if (VN_CACHED(tvp) != 0) + xfs_inval_cached_pages(XFS_ITOV(tip), &(tip->i_iocore), + (loff_t)0, 0, 0); + + /* Verify O_DIRECT for ftmp */ + if (VN_CACHED(tvp) != 0) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + /* Verify all data are being swapped */ + if (sx.sx_offset != 0 || + sx.sx_length != ip->i_d.di_size || + sx.sx_length != tip->i_d.di_size) { + error = XFS_ERROR(EFAULT); + goto error0; + } + + /* + * If the target has extended attributes, the tmp file + * must also in order to ensure the correct data fork + * format. + */ + if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + /* + * Compare the current change & modify times with that + * passed in. If they differ, we abort this swap. + * This is the mechanism used to ensure the calling + * process that the file was not changed out from + * under it. + */ + if ((sbp->bs_ctime.tv_sec != ip->i_d.di_ctime.t_sec) || + (sbp->bs_ctime.tv_nsec != ip->i_d.di_ctime.t_nsec) || + (sbp->bs_mtime.tv_sec != ip->i_d.di_mtime.t_sec) || + (sbp->bs_mtime.tv_nsec != ip->i_d.di_mtime.t_nsec)) { + error = XFS_ERROR(EBUSY); + goto error0; + } + + /* We need to fail if the file is memory mapped. Once we have tossed + * all existing pages, the page fault will have no option + * but to go to the filesystem for pages. By making the page fault call + * VOP_READ (or write in the case of autogrow) they block on the iolock + * until we have switched the extents. + */ + if (VN_MAPPED(vp)) { + error = XFS_ERROR(EBUSY); + goto error0; + } + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_iunlock(tip, XFS_ILOCK_EXCL); + + /* + * There is a race condition here since we gave up the + * ilock. However, the data fork will not change since + * we have the iolock (locked for truncation too) so we + * are safe. We don't really care if non-io related + * fields change. + */ + + VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF); + + tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT); + if ((error = xfs_trans_reserve(tp, 0, + XFS_ICHANGE_LOG_RES(mp), 0, + 0, 0))) { + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + xfs_iunlock(tip, XFS_IOLOCK_EXCL); + xfs_trans_cancel(tp, 0); + return error; + } + xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); + + /* + * Count the number of extended attribute blocks + */ + if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) && + (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { + error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks); + if (error) { + xfs_iunlock(ip, lock_flags); + xfs_iunlock(tip, lock_flags); + xfs_trans_cancel(tp, 0); + return error; + } + } + if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) && + (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { + error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, + &taforkblks); + if (error) { + xfs_iunlock(ip, lock_flags); + xfs_iunlock(tip, lock_flags); + xfs_trans_cancel(tp, 0); + return error; + } + } + + /* + * Swap the data forks of the inodes + */ + ifp = &ip->i_df; + tifp = &tip->i_df; + tempif = *ifp; /* struct copy */ + *ifp = *tifp; /* struct copy */ + *tifp = tempif; /* struct copy */ + + /* + * Fix the on-disk inode values + */ + tmp = (__uint64_t)ip->i_d.di_nblocks; + ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks; + tip->i_d.di_nblocks = tmp + taforkblks - aforkblks; + + tmp = (__uint64_t) ip->i_d.di_nextents; + ip->i_d.di_nextents = tip->i_d.di_nextents; + tip->i_d.di_nextents = tmp; + + tmp = (__uint64_t) ip->i_d.di_format; + ip->i_d.di_format = tip->i_d.di_format; + tip->i_d.di_format = tmp; + + ilf_fields = XFS_ILOG_CORE; + + switch(ip->i_d.di_format) { + case XFS_DINODE_FMT_EXTENTS: + /* If the extents fit in the inode, fix the + * pointer. Otherwise it's already NULL or + * pointing to the extent. + */ + if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) { + ifp->if_u1.if_extents = + ifp->if_u2.if_inline_ext; + } + ilf_fields |= XFS_ILOG_DEXT; + break; + case XFS_DINODE_FMT_BTREE: + ilf_fields |= XFS_ILOG_DBROOT; + break; + } + + tilf_fields = XFS_ILOG_CORE; + + switch(tip->i_d.di_format) { + case XFS_DINODE_FMT_EXTENTS: + /* If the extents fit in the inode, fix the + * pointer. Otherwise it's already NULL or + * pointing to the extent. + */ + if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) { + tifp->if_u1.if_extents = + tifp->if_u2.if_inline_ext; + } + tilf_fields |= XFS_ILOG_DEXT; + break; + case XFS_DINODE_FMT_BTREE: + tilf_fields |= XFS_ILOG_DBROOT; + break; + } + + /* + * Increment vnode ref counts since xfs_trans_commit & + * xfs_trans_cancel will both unlock the inodes and + * decrement the associated ref counts. + */ + VN_HOLD(vp); + VN_HOLD(tvp); + + xfs_trans_ijoin(tp, ip, lock_flags); + xfs_trans_ijoin(tp, tip, lock_flags); + + xfs_trans_log_inode(tp, ip, ilf_fields); + xfs_trans_log_inode(tp, tip, tilf_fields); + + /* + * If this is a synchronous mount, make sure that the + * transaction goes to disk before returning to the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT, NULL); + + fput(fp); + fput(tfp); + + return error; + + error0: + if (locked) { + xfs_iunlock(ip, lock_flags); + xfs_iunlock(tip, lock_flags); + } + + if (fp != NULL) fput(fp); + if (tfp != NULL) fput(tfp); + + return error; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dfrag.h linux.21rc5-ac2/fs/xfs/xfs_dfrag.h --- linux.21rc5/fs/xfs/xfs_dfrag.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dfrag.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DFRAG_H__ +#define __XFS_DFRAG_H__ + +/* + * Structure passed to xfs_swapext + */ + +typedef struct xfs_swapext +{ + __int64_t sx_version; /* version */ + __int64_t sx_fdtarget; /* fd of target file */ + __int64_t sx_fdtmp; /* fd of tmp file */ + xfs_off_t sx_offset; /* offset into file */ + xfs_off_t sx_length; /* leng from offset */ + char sx_pad[16]; /* pad space, unused */ + xfs_bstat_t sx_stat; /* stat of target b4 copy */ +} xfs_swapext_t; + +/* + * Version flag + */ +#define XFS_SX_VERSION 0 + +#ifdef __KERNEL__ +/* + * Prototypes for visible xfs_dfrag.c routines. + */ + +/* + * Syscall interface for xfs_swapext + */ +int xfs_swapext(struct xfs_swapext *sx); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_DFRAG_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dinode.h linux.21rc5-ac2/fs/xfs/xfs_dinode.h --- linux.21rc5/fs/xfs/xfs_dinode.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dinode.h 2003-05-07 15:41:09.000000000 +0100 @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DINODE_H__ +#define __XFS_DINODE_H__ + +struct xfs_buf; +struct xfs_mount; + +#define XFS_DINODE_VERSION_1 1 +#define XFS_DINODE_VERSION_2 2 +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DINODE_GOOD_VERSION) +int xfs_dinode_good_version(int v); +#define XFS_DINODE_GOOD_VERSION(v) xfs_dinode_good_version(v) +#else +#define XFS_DINODE_GOOD_VERSION(v) (((v) == XFS_DINODE_VERSION_1) || \ + ((v) == XFS_DINODE_VERSION_2)) +#endif +#define XFS_DINODE_MAGIC 0x494e /* 'IN' */ + +/* + * Disk inode structure. + * This is just the header; the inode is expanded to fill a variable size + * with the last field expanding. It is split into the core and "other" + * because we only need the core part in the in-core inode. + */ +typedef struct xfs_timestamp { + __int32_t t_sec; /* timestamp seconds */ + __int32_t t_nsec; /* timestamp nanoseconds */ +} xfs_timestamp_t; + +/* + * Note: Coordinate changes to this structure with the XFS_DI_* #defines + * below and the offsets table in xfs_ialloc_log_di(). + */ +typedef struct xfs_dinode_core +{ + __uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */ + __uint16_t di_mode; /* mode and type of file */ + __int8_t di_version; /* inode version */ + __int8_t di_format; /* format of di_c data */ + __uint16_t di_onlink; /* old number of links to file */ + __uint32_t di_uid; /* owner's user id */ + __uint32_t di_gid; /* owner's group id */ + __uint32_t di_nlink; /* number of links to file */ + __uint16_t di_projid; /* owner's project id */ + __uint8_t di_pad[10]; /* unused, zeroed space */ + xfs_timestamp_t di_atime; /* time last accessed */ + xfs_timestamp_t di_mtime; /* time last modified */ + xfs_timestamp_t di_ctime; /* time created/inode modified */ + xfs_fsize_t di_size; /* number of bytes in file */ + xfs_drfsbno_t di_nblocks; /* # of direct & btree blocks used */ + xfs_extlen_t di_extsize; /* basic/minimum extent size for file */ + xfs_extnum_t di_nextents; /* number of extents in data fork */ + xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/ + __uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */ + __int8_t di_aformat; /* format of attr fork's data */ + __uint32_t di_dmevmask; /* DMIG event mask */ + __uint16_t di_dmstate; /* DMIG state info */ + __uint16_t di_flags; /* random flags, XFS_DIFLAG_... */ + __uint32_t di_gen; /* generation number */ +} xfs_dinode_core_t; + +typedef struct xfs_dinode +{ + xfs_dinode_core_t di_core; + /* + * In adding anything between the core and the union, be + * sure to update the macros like XFS_LITINO below and + * XFS_BMAP_RBLOCK_DSIZE in xfs_bmap_btree.h. + */ + xfs_agino_t di_next_unlinked;/* agi unlinked list ptr */ + union { + xfs_bmdr_block_t di_bmbt; /* btree root block */ + xfs_bmbt_rec_32_t di_bmx[1]; /* extent list */ + xfs_dir_shortform_t di_dirsf; /* shortform directory */ + xfs_dir2_sf_t di_dir2sf; /* shortform directory v2 */ + char di_c[1]; /* local contents */ + xfs_dev_t di_dev; /* device for IFCHR/IFBLK */ + uuid_t di_muuid; /* mount point value */ + char di_symlink[1]; /* local symbolic link */ + } di_u; + union { + xfs_bmdr_block_t di_abmbt; /* btree root block */ + xfs_bmbt_rec_32_t di_abmx[1]; /* extent list */ + xfs_attr_shortform_t di_attrsf; /* shortform attribute list */ + } di_a; +} xfs_dinode_t; + +/* + * The 32 bit link count in the inode theoretically maxes out at UINT_MAX. + * Since the pathconf interface is signed, we use 2^31 - 1 instead. + * The old inode format had a 16 bit link count, so its maximum is USHRT_MAX. + */ +#define XFS_MAXLINK ((1U << 31) - 1U) +#define XFS_MAXLINK_1 65535U + +/* + * Bit names for logging disk inodes only + */ +#define XFS_DI_MAGIC 0x0000001 +#define XFS_DI_MODE 0x0000002 +#define XFS_DI_VERSION 0x0000004 +#define XFS_DI_FORMAT 0x0000008 +#define XFS_DI_ONLINK 0x0000010 +#define XFS_DI_UID 0x0000020 +#define XFS_DI_GID 0x0000040 +#define XFS_DI_NLINK 0x0000080 +#define XFS_DI_PROJID 0x0000100 +#define XFS_DI_PAD 0x0000200 +#define XFS_DI_ATIME 0x0000400 +#define XFS_DI_MTIME 0x0000800 +#define XFS_DI_CTIME 0x0001000 +#define XFS_DI_SIZE 0x0002000 +#define XFS_DI_NBLOCKS 0x0004000 +#define XFS_DI_EXTSIZE 0x0008000 +#define XFS_DI_NEXTENTS 0x0010000 +#define XFS_DI_NAEXTENTS 0x0020000 +#define XFS_DI_FORKOFF 0x0040000 +#define XFS_DI_AFORMAT 0x0080000 +#define XFS_DI_DMEVMASK 0x0100000 +#define XFS_DI_DMSTATE 0x0200000 +#define XFS_DI_FLAGS 0x0400000 +#define XFS_DI_GEN 0x0800000 +#define XFS_DI_NEXT_UNLINKED 0x1000000 +#define XFS_DI_U 0x2000000 +#define XFS_DI_A 0x4000000 +#define XFS_DI_NUM_BITS 27 +#define XFS_DI_ALL_BITS ((1 << XFS_DI_NUM_BITS) - 1) +#define XFS_DI_CORE_BITS (XFS_DI_ALL_BITS & ~(XFS_DI_U|XFS_DI_A)) + +/* + * Values for di_format + */ +typedef enum xfs_dinode_fmt +{ + XFS_DINODE_FMT_DEV, /* CHR, BLK: di_dev */ + XFS_DINODE_FMT_LOCAL, /* DIR, REG: di_c */ + /* LNK: di_symlink */ + XFS_DINODE_FMT_EXTENTS, /* DIR, REG, LNK: di_bmx */ + XFS_DINODE_FMT_BTREE, /* DIR, REG, LNK: di_bmbt */ + XFS_DINODE_FMT_UUID /* MNT: di_uuid */ +} xfs_dinode_fmt_t; + +/* + * Inode minimum and maximum sizes. + */ +#define XFS_DINODE_MIN_LOG 8 +#define XFS_DINODE_MAX_LOG 11 +#define XFS_DINODE_MIN_SIZE (1 << XFS_DINODE_MIN_LOG) +#define XFS_DINODE_MAX_SIZE (1 << XFS_DINODE_MAX_LOG) + +/* + * Inode size for given fs. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LITINO) +int xfs_litino(struct xfs_mount *mp); +#define XFS_LITINO(mp) xfs_litino(mp) +#else +#define XFS_LITINO(mp) ((mp)->m_litino) +#endif +#define XFS_BROOT_SIZE_ADJ \ + (sizeof(xfs_bmbt_block_t) - sizeof(xfs_bmdr_block_t)) + +/* + * Fork identifiers. Here so utilities can use them without including + * xfs_inode.h. + */ +#define XFS_DATA_FORK 0 +#define XFS_ATTR_FORK 1 + +/* + * Inode data & attribute fork sizes, per inode. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_Q) +int xfs_cfork_q_arch(xfs_dinode_core_t *dcp, xfs_arch_t arch); +int xfs_cfork_q(xfs_dinode_core_t *dcp); +#define XFS_CFORK_Q_ARCH(dcp,arch) xfs_cfork_q_arch(dcp,arch) +#define XFS_CFORK_Q(dcp) xfs_cfork_q(dcp) +#else +#define XFS_CFORK_Q_ARCH(dcp,arch) (!INT_ISZERO((dcp)->di_forkoff, arch)) +#define XFS_CFORK_Q(dcp) ((dcp)->di_forkoff != 0) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_BOFF) +int xfs_cfork_boff_arch(xfs_dinode_core_t *dcp, xfs_arch_t arch); +int xfs_cfork_boff(xfs_dinode_core_t *dcp); +#define XFS_CFORK_BOFF_ARCH(dcp,arch) xfs_cfork_boff_arch(dcp,arch) +#define XFS_CFORK_BOFF(dcp) xfs_cfork_boff(dcp) +#else +#define XFS_CFORK_BOFF_ARCH(dcp,arch) ((int)(INT_GET((dcp)->di_forkoff, arch) << 3)) +#define XFS_CFORK_BOFF(dcp) ((int)((dcp)->di_forkoff << 3)) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_DSIZE) +int xfs_cfork_dsize_arch(xfs_dinode_core_t *dcp, struct xfs_mount *mp, xfs_arch_t arch); +int xfs_cfork_dsize(xfs_dinode_core_t *dcp, struct xfs_mount *mp); +#define XFS_CFORK_DSIZE_ARCH(dcp,mp,arch) xfs_cfork_dsize_arch(dcp,mp,arch) +#define XFS_CFORK_DSIZE(dcp,mp) xfs_cfork_dsize(dcp,mp) +#else +#define XFS_CFORK_DSIZE_ARCH(dcp,mp,arch) \ + (XFS_CFORK_Q_ARCH(dcp, arch) ? XFS_CFORK_BOFF_ARCH(dcp, arch) : XFS_LITINO(mp)) +#define XFS_CFORK_DSIZE(dcp,mp) \ + (XFS_CFORK_Q(dcp) ? XFS_CFORK_BOFF(dcp) : XFS_LITINO(mp)) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_ASIZE) +int xfs_cfork_asize_arch(xfs_dinode_core_t *dcp, struct xfs_mount *mp, xfs_arch_t arch); +int xfs_cfork_asize(xfs_dinode_core_t *dcp, struct xfs_mount *mp); +#define XFS_CFORK_ASIZE_ARCH(dcp,mp,arch) xfs_cfork_asize_arch(dcp,mp,arch) +#define XFS_CFORK_ASIZE(dcp,mp) xfs_cfork_asize(dcp,mp) +#else +#define XFS_CFORK_ASIZE_ARCH(dcp,mp,arch) \ + (XFS_CFORK_Q_ARCH(dcp, arch) ? XFS_LITINO(mp) - XFS_CFORK_BOFF_ARCH(dcp, arch) : 0) +#define XFS_CFORK_ASIZE(dcp,mp) \ + (XFS_CFORK_Q(dcp) ? XFS_LITINO(mp) - XFS_CFORK_BOFF(dcp) : 0) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_SIZE) +int xfs_cfork_size_arch(xfs_dinode_core_t *dcp, struct xfs_mount *mp, int w, xfs_arch_t arch); +int xfs_cfork_size(xfs_dinode_core_t *dcp, struct xfs_mount *mp, int w); +#define XFS_CFORK_SIZE_ARCH(dcp,mp,w,arch) xfs_cfork_size_arch(dcp,mp,w,arch) +#define XFS_CFORK_SIZE(dcp,mp,w) xfs_cfork_size(dcp,mp,w) +#else +#define XFS_CFORK_SIZE_ARCH(dcp,mp,w,arch) \ + ((w) == XFS_DATA_FORK ? \ + XFS_CFORK_DSIZE_ARCH(dcp, mp, arch) : XFS_CFORK_ASIZE_ARCH(dcp, mp, arch)) +#define XFS_CFORK_SIZE(dcp,mp,w) \ + ((w) == XFS_DATA_FORK ? \ + XFS_CFORK_DSIZE(dcp, mp) : XFS_CFORK_ASIZE(dcp, mp)) + +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_DSIZE) +int xfs_dfork_dsize_arch(xfs_dinode_t *dip, struct xfs_mount *mp, xfs_arch_t arch); +int xfs_dfork_dsize(xfs_dinode_t *dip, struct xfs_mount *mp); +#define XFS_DFORK_DSIZE_ARCH(dip,mp,arch) xfs_dfork_dsize_arch(dip,mp,arch) +#define XFS_DFORK_DSIZE(dip,mp) xfs_dfork_dsize(dip,mp) +#else +#define XFS_DFORK_DSIZE_ARCH(dip,mp,arch) XFS_CFORK_DSIZE_ARCH(&(dip)->di_core, mp, arch) +#define XFS_DFORK_DSIZE(dip,mp) XFS_DFORK_DSIZE_ARCH(dip,mp,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_ASIZE) +int xfs_dfork_asize_arch(xfs_dinode_t *dip, struct xfs_mount *mp, xfs_arch_t arch); +int xfs_dfork_asize(xfs_dinode_t *dip, struct xfs_mount *mp); +#define XFS_DFORK_ASIZE_ARCH(dip,mp,arch) xfs_dfork_asize_arch(dip,mp,arch) +#define XFS_DFORK_ASIZE(dip,mp) xfs_dfork_asize(dip,mp) +#else +#define XFS_DFORK_ASIZE_ARCH(dip,mp,arch) XFS_CFORK_ASIZE_ARCH(&(dip)->di_core, mp, arch) +#define XFS_DFORK_ASIZE(dip,mp) XFS_DFORK_ASIZE_ARCH(dip,mp,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_SIZE) +int xfs_dfork_size_arch(xfs_dinode_t *dip, struct xfs_mount *mp, int w, xfs_arch_t arch); +int xfs_dfork_size(xfs_dinode_t *dip, struct xfs_mount *mp, int w); +#define XFS_DFORK_SIZE_ARCH(dip,mp,w,arch) xfs_dfork_size_arch(dip,mp,w,arch) +#define XFS_DFORK_SIZE(dip,mp,w) xfs_dfork_size(dip,mp,w) +#else +#define XFS_DFORK_SIZE_ARCH(dip,mp,w,arch) XFS_CFORK_SIZE_ARCH(&(dip)->di_core, mp, w, arch) +#define XFS_DFORK_SIZE(dip,mp,w) XFS_DFORK_SIZE_ARCH(dip,mp,w,ARCH_NOCONVERT) + +#endif + +/* + * Macros for accessing per-fork disk inode information. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_Q) +int xfs_dfork_q_arch(xfs_dinode_t *dip, xfs_arch_t arch); +int xfs_dfork_q(xfs_dinode_t *dip); +#define XFS_DFORK_Q_ARCH(dip,arch) xfs_dfork_q_arch(dip,arch) +#define XFS_DFORK_Q(dip) xfs_dfork_q(dip) +#else +#define XFS_DFORK_Q_ARCH(dip,arch) XFS_CFORK_Q_ARCH(&(dip)->di_core, arch) +#define XFS_DFORK_Q(dip) XFS_DFORK_Q_ARCH(dip,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_BOFF) +int xfs_dfork_boff_arch(xfs_dinode_t *dip, xfs_arch_t arch); +int xfs_dfork_boff(xfs_dinode_t *dip); +#define XFS_DFORK_BOFF_ARCH(dip,arch) xfs_dfork_boff_arch(dip,arch) +#define XFS_DFORK_BOFF(dip) xfs_dfork_boff(dip) +#else +#define XFS_DFORK_BOFF_ARCH(dip,arch) XFS_CFORK_BOFF_ARCH(&(dip)->di_core, arch) +#define XFS_DFORK_BOFF(dip) XFS_DFORK_BOFF_ARCH(dip,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_DPTR) +char *xfs_dfork_dptr_arch(xfs_dinode_t *dip, xfs_arch_t arch); +char *xfs_dfork_dptr(xfs_dinode_t *dip); +#define XFS_DFORK_DPTR_ARCH(dip,arch) xfs_dfork_dptr_arch(dip,arch) +#define XFS_DFORK_DPTR(dip) xfs_dfork_dptr(dip) +#else +#define XFS_DFORK_DPTR_ARCH(dip,arch) ((dip)->di_u.di_c) +#define XFS_DFORK_DPTR(dip) XFS_DFORK_DPTR_ARCH(dip,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_APTR) +char *xfs_dfork_aptr_arch(xfs_dinode_t *dip, xfs_arch_t arch); +char *xfs_dfork_aptr(xfs_dinode_t *dip); +#define XFS_DFORK_APTR_ARCH(dip,arch) xfs_dfork_aptr_arch(dip,arch) +#define XFS_DFORK_APTR(dip) xfs_dfork_aptr(dip) +#else +#define XFS_DFORK_APTR_ARCH(dip,arch) ((dip)->di_u.di_c + XFS_DFORK_BOFF_ARCH(dip, arch)) +#define XFS_DFORK_APTR(dip) XFS_DFORK_APTR_ARCH(dip,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_PTR) +char *xfs_dfork_ptr_arch(xfs_dinode_t *dip, int w, xfs_arch_t arch); +char *xfs_dfork_ptr(xfs_dinode_t *dip, int w); +#define XFS_DFORK_PTR_ARCH(dip,w,arch) xfs_dfork_ptr_arch(dip,w,arch) +#define XFS_DFORK_PTR(dip,w) xfs_dfork_ptr(dip,w) +#else +#define XFS_DFORK_PTR_ARCH(dip,w,arch) \ + ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR_ARCH(dip, arch) : XFS_DFORK_APTR_ARCH(dip, arch)) +#define XFS_DFORK_PTR(dip,w) XFS_DFORK_PTR_ARCH(dip,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_FORMAT) +int xfs_cfork_format_arch(xfs_dinode_core_t *dcp, int w, xfs_arch_t arch); +int xfs_cfork_format(xfs_dinode_core_t *dcp, int w); +#define XFS_CFORK_FORMAT_ARCH(dcp,w,arch) xfs_cfork_format_arch(dcp,w,arch) +#define XFS_CFORK_FORMAT(dcp,w) xfs_cfork_format(dcp,w) +#else +#define XFS_CFORK_FORMAT_ARCH(dcp,w,arch) \ + ((w) == XFS_DATA_FORK ? INT_GET((dcp)->di_format, arch) : INT_GET((dcp)->di_aformat, arch)) +#define XFS_CFORK_FORMAT(dcp,w) XFS_CFORK_FORMAT_ARCH(dcp,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_FMT_SET) +void xfs_cfork_fmt_set_arch(xfs_dinode_core_t *dcp, int w, int n, xfs_arch_t arch); +void xfs_cfork_fmt_set(xfs_dinode_core_t *dcp, int w, int n); +#define XFS_CFORK_FMT_SET_ARCH(dcp,w,n,arch) xfs_cfork_fmt_set_arch(dcp,w,n,arch) +#define XFS_CFORK_FMT_SET(dcp,w,n) xfs_cfork_fmt_set(dcp,w,n) +#else +#define XFS_CFORK_FMT_SET_ARCH(dcp,w,n,arch) \ + ((w) == XFS_DATA_FORK ? \ + (INT_SET((dcp)->di_format, arch, (n))) : \ + (INT_SET((dcp)->di_aformat, arch, (n)))) +#define XFS_CFORK_FMT_SET(dcp,w,n) XFS_CFORK_FMT_SET_ARCH(dcp,w,n,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_NEXTENTS) +int xfs_cfork_nextents_arch(xfs_dinode_core_t *dcp, int w, xfs_arch_t arch); +int xfs_cfork_nextents(xfs_dinode_core_t *dcp, int w); +#define XFS_CFORK_NEXTENTS_ARCH(dcp,w,arch) xfs_cfork_nextents_arch(dcp,w,arch) +#define XFS_CFORK_NEXTENTS(dcp,w) xfs_cfork_nextents(dcp,w) +#else +#define XFS_CFORK_NEXTENTS_ARCH(dcp,w,arch) \ + ((w) == XFS_DATA_FORK ? INT_GET((dcp)->di_nextents, arch) : INT_GET((dcp)->di_anextents, arch)) +#define XFS_CFORK_NEXTENTS(dcp,w) XFS_CFORK_NEXTENTS_ARCH(dcp,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_NEXT_SET) +void xfs_cfork_next_set_arch(xfs_dinode_core_t *dcp, int w, int n, xfs_arch_t arch); +void xfs_cfork_next_set(xfs_dinode_core_t *dcp, int w, int n); +#define XFS_CFORK_NEXT_SET_ARCH(dcp,w,n,arch) xfs_cfork_next_set_arch(dcp,w,n,arch) +#define XFS_CFORK_NEXT_SET(dcp,w,n) xfs_cfork_next_set(dcp,w,n) +#else +#define XFS_CFORK_NEXT_SET_ARCH(dcp,w,n,arch) \ + ((w) == XFS_DATA_FORK ? \ + (INT_SET((dcp)->di_nextents, arch, (n))) : \ + (INT_SET((dcp)->di_anextents, arch, (n)))) +#define XFS_CFORK_NEXT_SET(dcp,w,n) XFS_CFORK_NEXT_SET_ARCH(dcp,w,n,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_FORMAT) +int xfs_dfork_format_arch(xfs_dinode_t *dip, int w, xfs_arch_t arch); +int xfs_dfork_format(xfs_dinode_t *dip, int w); +#define XFS_DFORK_FORMAT_ARCH(dip,w,arch) xfs_dfork_format_arch(dip,w,arch) +#define XFS_DFORK_FORMAT(dip,w) xfs_dfork_format(dip,w) +#else +#define XFS_DFORK_FORMAT_ARCH(dip,w,arch) XFS_CFORK_FORMAT_ARCH(&(dip)->di_core, w, arch) +#define XFS_DFORK_FORMAT(dip,w) XFS_DFORK_FORMAT_ARCH(dip,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_FMT_SET) +void xfs_dfork_fmt_set_arch(xfs_dinode_t *dip, int w, int n, xfs_arch_t arch); +void xfs_dfork_fmt_set(xfs_dinode_t *dip, int w, int n); +#define XFS_DFORK_FMT_SET_ARCH(dip,w,n,arch) xfs_dfork_fmt_set_arch(dip,w,n,arch) +#define XFS_DFORK_FMT_SET(dip,w,n) xfs_dfork_fmt_set(dip,w,n) +#else +#define XFS_DFORK_FMT_SET_ARCH(dip,w,n,arch) XFS_CFORK_FMT_SET_ARCH(&(dip)->di_core, w, n, arch) +#define XFS_DFORK_FMT_SET(dip,w,n) XFS_DFORK_FMT_SET_ARCH(dip,w,n,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_NEXTENTS) +int xfs_dfork_nextents_arch(xfs_dinode_t *dip, int w, xfs_arch_t arch); +int xfs_dfork_nextents(xfs_dinode_t *dip, int w); +#define XFS_DFORK_NEXTENTS_ARCH(dip,w,arch) xfs_dfork_nextents_arch(dip,w,arch) +#define XFS_DFORK_NEXTENTS(dip,w) xfs_dfork_nextents(dip,w) +#else +#define XFS_DFORK_NEXTENTS_ARCH(dip,w,arch) XFS_CFORK_NEXTENTS_ARCH(&(dip)->di_core, w, arch) +#define XFS_DFORK_NEXTENTS(dip,w) XFS_DFORK_NEXTENTS_ARCH(dip,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_NEXT_SET) +void xfs_dfork_next_set_arch(xfs_dinode_t *dip, int w, int n, xfs_arch_t arch); +void xfs_dfork_next_set(xfs_dinode_t *dip, int w, int n); +#define XFS_DFORK_NEXT_SET_ARCH(dip,w,n,arch) xfs_dfork_next_set_arch(dip,w,n,arch) +#define XFS_DFORK_NEXT_SET(dip,w,n) xfs_dfork_next_set(dip,w,n) +#else +#define XFS_DFORK_NEXT_SET_ARCH(dip,w,n,arch) XFS_CFORK_NEXT_SET_ARCH(&(dip)->di_core, w, n, arch) +#define XFS_DFORK_NEXT_SET(dip,w,n) XFS_DFORK_NEXT_SET_ARCH(dip,w,n,ARCH_NOCONVERT) + +#endif + +/* + * File types (mode field) + */ +#define IFMT 0170000 /* type of file */ +#define IFIFO 0010000 /* named pipe (fifo) */ +#define IFCHR 0020000 /* character special */ +#define IFDIR 0040000 /* directory */ +#define IFBLK 0060000 /* block special */ +#define IFREG 0100000 /* regular */ +#define IFLNK 0120000 /* symbolic link */ +#define IFSOCK 0140000 /* socket */ +#define IFMNT 0160000 /* mount point */ + +/* + * File execution and access modes. + */ +#define ISUID 04000 /* set user id on execution */ +#define ISGID 02000 /* set group id on execution */ +#define ISVTX 01000 /* sticky directory */ +#define IREAD 0400 /* read, write, execute permissions */ +#define IWRITE 0200 +#define IEXEC 0100 + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_DINODE) +xfs_dinode_t *xfs_buf_to_dinode(struct xfs_buf *bp); +#define XFS_BUF_TO_DINODE(bp) xfs_buf_to_dinode(bp) +#else +#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)(XFS_BUF_PTR(bp))) +#endif + +/* + * Values for di_flags + * There should be a one-to-one correspondence between these flags and the + * XFS_XFLAG_s. + */ +#define XFS_DIFLAG_REALTIME_BIT 0 /* file's blocks come from rt area */ +#define XFS_DIFLAG_PREALLOC_BIT 1 /* file space has been preallocated */ +#define XFS_DIFLAG_NEWRTBM_BIT 2 /* for rtbitmap inode, new format */ +#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) +#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) +#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) +#define XFS_DIFLAG_ALL \ + (XFS_DIFLAG_REALTIME|XFS_DIFLAG_PREALLOC|XFS_DIFLAG_NEWRTBM) + +#endif /* __XFS_DINODE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_block.c linux.21rc5-ac2/fs/xfs/xfs_dir2_block.c --- linux.21rc5/fs/xfs/xfs_dir2_block.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_block.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1249 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_block.c + * XFS V2 directory implementation, single-block form. + * See xfs_dir2_block.h for the format. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_trace.h" +#include "xfs_error.h" + +/* + * Local function prototypes. + */ +static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, xfs_dabuf_t *bp, int first, + int last); +static void xfs_dir2_block_log_tail(xfs_trans_t *tp, xfs_dabuf_t *bp); +static int xfs_dir2_block_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **bpp, + int *entno); +static int xfs_dir2_block_sort(const void *a, const void *b); + +/* + * Add an entry to a block directory. + */ +int /* error */ +xfs_dir2_block_addname( + xfs_da_args_t *args) /* directory op arguments */ +{ + xfs_dir2_data_free_t *bf; /* bestfree table in block */ + xfs_dir2_block_t *block; /* directory block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* buffer for block */ + xfs_dir2_block_tail_t *btp; /* block tail */ + int compact; /* need to compact leaf ents */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* directory inode */ + xfs_dir2_data_unused_t *dup; /* block unused entry */ + int error; /* error return value */ + xfs_dir2_data_unused_t *enddup=NULL; /* unused at end of data */ + xfs_dahash_t hash; /* hash value of found entry */ + int high; /* high index for binary srch */ + int highstale; /* high stale index */ + int lfloghigh=0; /* last final leaf to log */ + int lfloglow=0; /* first final leaf to log */ + int len; /* length of the new entry */ + int low; /* low index for binary srch */ + int lowstale; /* low stale index */ + int mid=0; /* midpoint for binary srch */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log header */ + int needscan; /* need to rescan freespace */ + xfs_dir2_data_off_t *tagp; /* pointer to tag value */ + xfs_trans_t *tp; /* transaction structure */ + + xfs_dir2_trace_args("block_addname", args); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Read the (one and only) directory block into dabuf bp. + */ + if ((error = + xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) { + return error; + } + ASSERT(bp != NULL); + block = bp->data; + /* + * Check the magic number, corrupted if wrong. + */ + if (unlikely(INT_GET(block->hdr.magic, ARCH_CONVERT) + != XFS_DIR2_BLOCK_MAGIC)) { + XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", + XFS_ERRLEVEL_LOW, mp, block); + xfs_da_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); + } + len = XFS_DIR2_DATA_ENTSIZE(args->namelen); + /* + * Set up pointers to parts of the block. + */ + bf = block->hdr.bestfree; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * No stale entries? Need space for entry and new leaf. + */ + if (INT_ISZERO(btp->stale, ARCH_CONVERT)) { + /* + * Tag just before the first leaf entry. + */ + tagp = (xfs_dir2_data_off_t *)blp - 1; + /* + * Data object just before the first leaf entry. + */ + enddup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + /* + * If it's not free then can't do this add without cleaning up: + * the space before the first leaf entry needs to be free so it + * can be expanded to hold the pointer to the new entry. + */ + if (INT_GET(enddup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + dup = enddup = NULL; + /* + * Check out the biggest freespace and see if it's the same one. + */ + else { + dup = (xfs_dir2_data_unused_t *) + ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); + if (dup == enddup) { + /* + * It is the biggest freespace, is it too small + * to hold the new leaf too? + */ + if (INT_GET(dup->length, ARCH_CONVERT) < len + (uint)sizeof(*blp)) { + /* + * Yes, we use the second-largest + * entry instead if it works. + */ + if (INT_GET(bf[1].length, ARCH_CONVERT) >= len) + dup = (xfs_dir2_data_unused_t *) + ((char *)block + + INT_GET(bf[1].offset, ARCH_CONVERT)); + else + dup = NULL; + } + } else { + /* + * Not the same free entry, + * just check its length. + */ + if (INT_GET(dup->length, ARCH_CONVERT) < len) { + dup = NULL; + } + } + } + compact = 0; + } + /* + * If there are stale entries we'll use one for the leaf. + * Is the biggest entry enough to avoid compaction? + */ + else if (INT_GET(bf[0].length, ARCH_CONVERT) >= len) { + dup = (xfs_dir2_data_unused_t *) + ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); + compact = 0; + } + /* + * Will need to compact to make this work. + */ + else { + /* + * Tag just before the first leaf entry. + */ + tagp = (xfs_dir2_data_off_t *)blp - 1; + /* + * Data object just before the first leaf entry. + */ + dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + /* + * If it's not free then the data will go where the + * leaf data starts now, if it works at all. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + if (INT_GET(dup->length, ARCH_CONVERT) + (INT_GET(btp->stale, ARCH_CONVERT) - 1) * + (uint)sizeof(*blp) < len) + dup = NULL; + } else if ((INT_GET(btp->stale, ARCH_CONVERT) - 1) * (uint)sizeof(*blp) < len) + dup = NULL; + else + dup = (xfs_dir2_data_unused_t *)blp; + compact = 1; + } + /* + * If this isn't a real add, we're done with the buffer. + */ + if (args->justcheck) + xfs_da_brelse(tp, bp); + /* + * If we don't have space for the new entry & leaf ... + */ + if (!dup) { + /* + * Not trying to actually do anything, or don't have + * a space reservation: return no-space. + */ + if (args->justcheck || args->total == 0) + return XFS_ERROR(ENOSPC); + /* + * Convert to the next larger format. + * Then add the new entry in that format. + */ + error = xfs_dir2_block_to_leaf(args, bp); + xfs_da_buf_done(bp); + if (error) + return error; + return xfs_dir2_leaf_addname(args); + } + /* + * Just checking, and it would work, so say so. + */ + if (args->justcheck) + return 0; + needlog = needscan = 0; + /* + * If need to compact the leaf entries, do it now. + * Leave the highest-numbered stale entry stale. + * XXX should be the one closest to mid but mid is not yet computed. + */ + if (compact) { + int fromidx; /* source leaf index */ + int toidx; /* target leaf index */ + + for (fromidx = toidx = INT_GET(btp->count, ARCH_CONVERT) - 1, + highstale = lfloghigh = -1; + fromidx >= 0; + fromidx--) { + if (INT_GET(blp[fromidx].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { + if (highstale == -1) + highstale = toidx; + else { + if (lfloghigh == -1) + lfloghigh = toidx; + continue; + } + } + if (fromidx < toidx) + blp[toidx] = blp[fromidx]; + toidx--; + } + lfloglow = toidx + 1 - (INT_GET(btp->stale, ARCH_CONVERT) - 1); + lfloghigh -= INT_GET(btp->stale, ARCH_CONVERT) - 1; + INT_MOD(btp->count, ARCH_CONVERT, -(INT_GET(btp->stale, ARCH_CONVERT) - 1)); + xfs_dir2_data_make_free(tp, bp, + (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), + (xfs_dir2_data_aoff_t)((INT_GET(btp->stale, ARCH_CONVERT) - 1) * sizeof(*blp)), + &needlog, &needscan); + blp += INT_GET(btp->stale, ARCH_CONVERT) - 1; + INT_SET(btp->stale, ARCH_CONVERT, 1); + /* + * If we now need to rebuild the bestfree map, do so. + * This needs to happen before the next call to use_free. + */ + if (needscan) { + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, + &needlog, NULL); + needscan = 0; + } + } + /* + * Set leaf logging boundaries to impossible state. + * For the no-stale case they're set explicitly. + */ + else if (INT_GET(btp->stale, ARCH_CONVERT)) { + lfloglow = INT_GET(btp->count, ARCH_CONVERT); + lfloghigh = -1; + } + /* + * Find the slot that's first lower than our hash value, -1 if none. + */ + for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; low <= high; ) { + mid = (low + high) >> 1; + if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) + break; + if (hash < args->hashval) + low = mid + 1; + else + high = mid - 1; + } + while (mid >= 0 && INT_GET(blp[mid].hashval, ARCH_CONVERT) >= args->hashval) { + mid--; + } + /* + * No stale entries, will use enddup space to hold new leaf. + */ + if (INT_ISZERO(btp->stale, ARCH_CONVERT)) { + /* + * Mark the space needed for the new leaf entry, now in use. + */ + xfs_dir2_data_use_free(tp, bp, enddup, + (xfs_dir2_data_aoff_t) + ((char *)enddup - (char *)block + INT_GET(enddup->length, ARCH_CONVERT) - + sizeof(*blp)), + (xfs_dir2_data_aoff_t)sizeof(*blp), + &needlog, &needscan); + /* + * Update the tail (entry count). + */ + INT_MOD(btp->count, ARCH_CONVERT, +1); + /* + * If we now need to rebuild the bestfree map, do so. + * This needs to happen before the next call to use_free. + */ + if (needscan) { + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, + &needlog, NULL); + needscan = 0; + } + /* + * Adjust pointer to the first leaf entry, we're about to move + * the table up one to open up space for the new leaf entry. + * Then adjust our index to match. + */ + blp--; + mid++; + if (mid) + memmove(blp, &blp[1], mid * sizeof(*blp)); + lfloglow = 0; + lfloghigh = mid; + } + /* + * Use a stale leaf for our new entry. + */ + else { + for (lowstale = mid; + lowstale >= 0 && + INT_GET(blp[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; + lowstale--) + continue; + for (highstale = mid + 1; + highstale < INT_GET(btp->count, ARCH_CONVERT) && + INT_GET(blp[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && + (lowstale < 0 || mid - lowstale > highstale - mid); + highstale++) + continue; + /* + * Move entries toward the low-numbered stale entry. + */ + if (lowstale >= 0 && + (highstale == INT_GET(btp->count, ARCH_CONVERT) || + mid - lowstale <= highstale - mid)) { + if (mid - lowstale) + memmove(&blp[lowstale], &blp[lowstale + 1], + (mid - lowstale) * sizeof(*blp)); + lfloglow = MIN(lowstale, lfloglow); + lfloghigh = MAX(mid, lfloghigh); + } + /* + * Move entries toward the high-numbered stale entry. + */ + else { + ASSERT(highstale < INT_GET(btp->count, ARCH_CONVERT)); + mid++; + if (highstale - mid) + memmove(&blp[mid + 1], &blp[mid], + (highstale - mid) * sizeof(*blp)); + lfloglow = MIN(mid, lfloglow); + lfloghigh = MAX(highstale, lfloghigh); + } + INT_MOD(btp->stale, ARCH_CONVERT, -1); + } + /* + * Point to the new data entry. + */ + dep = (xfs_dir2_data_entry_t *)dup; + /* + * Fill in the leaf entry. + */ + INT_SET(blp[mid].hashval, ARCH_CONVERT, args->hashval); + INT_SET(blp[mid].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); + /* + * Mark space for the data entry used. + */ + xfs_dir2_data_use_free(tp, bp, dup, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), + (xfs_dir2_data_aoff_t)len, &needlog, &needscan); + /* + * Create the new data entry. + */ + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + dep->namelen = args->namelen; + memcpy(dep->name, args->name, args->namelen); + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + /* + * Clean up the bestfree array and log the header, tail, and entry. + */ + if (needscan) + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, + NULL); + if (needlog) + xfs_dir2_data_log_header(tp, bp); + xfs_dir2_block_log_tail(tp, bp); + xfs_dir2_data_log_entry(tp, bp, dep); + xfs_dir2_data_check(dp, bp); + xfs_da_buf_done(bp); + return 0; +} + +/* + * Readdir for block directories. + */ +int /* error */ +xfs_dir2_block_getdents( + xfs_trans_t *tp, /* transaction (NULL) */ + xfs_inode_t *dp, /* incore inode */ + uio_t *uio, /* caller's buffer control */ + int *eofp, /* eof reached? (out) */ + xfs_dirent_t *dbp, /* caller's buffer */ + xfs_dir2_put_t put) /* abi's formatting function */ +{ + xfs_dir2_block_t *block; /* directory block structure */ + xfs_dabuf_t *bp; /* buffer for block */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_dir2_data_unused_t *dup; /* block unused entry */ + char *endptr; /* end of the data entries */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_put_args_t p; /* arg package for put rtn */ + char *ptr; /* current data entry */ + int wantoff; /* starting block offset */ + + mp = dp->i_mount; + /* + * If the block number in the offset is out of range, we're done. + */ + if (XFS_DIR2_DATAPTR_TO_DB(mp, uio->uio_offset) > mp->m_dirdatablk) { + *eofp = 1; + return 0; + } + /* + * Can't read the block, give up, else get dabuf in bp. + */ + if ((error = + xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) { + return error; + } + ASSERT(bp != NULL); + /* + * Extract the byte offset we start at from the seek pointer. + * We'll skip entries before this. + */ + wantoff = XFS_DIR2_DATAPTR_TO_OFF(mp, uio->uio_offset); + block = bp->data; + xfs_dir2_data_check(dp, bp); + /* + * Set up values for the loop. + */ + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + ptr = (char *)block->u; + endptr = (char *)XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + p.dbp = dbp; + p.put = put; + p.uio = uio; + /* + * Loop over the data portion of the block. + * Each object is a real entry (dep) or an unused one (dup). + */ + while (ptr < endptr) { + dup = (xfs_dir2_data_unused_t *)ptr; + /* + * Unused, skip it. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ptr += INT_GET(dup->length, ARCH_CONVERT); + continue; + } + + dep = (xfs_dir2_data_entry_t *)ptr; + + /* + * Bump pointer for the next iteration. + */ + ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen); + /* + * The entry is before the desired starting point, skip it. + */ + if ((char *)dep - (char *)block < wantoff) + continue; + /* + * Set up argument structure for put routine. + */ + p.namelen = dep->namelen; + + p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + ptr - (char *)block); +#if XFS_BIG_FILESYSTEMS + p.ino = INT_GET(dep->inumber, ARCH_CONVERT) + mp->m_inoadd; +#else + p.ino = INT_GET(dep->inumber, ARCH_CONVERT); +#endif + p.name = (char *)dep->name; + + /* + * Put the entry in the caller's buffer. + */ + error = p.put(&p); + + /* + * If it didn't fit, set the final offset to here & return. + */ + if (!p.done) { + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + (char *)dep - (char *)block); + xfs_da_brelse(tp, bp); + return error; + } + } + + /* + * Reached the end of the block. + * Set the offset to a nonexistent block 1 and return. + */ + *eofp = 1; + + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0); + + xfs_da_brelse(tp, bp); + + return 0; +} + +/* + * Log leaf entries from the block. + */ +static void +xfs_dir2_block_log_leaf( + xfs_trans_t *tp, /* transaction structure */ + xfs_dabuf_t *bp, /* block buffer */ + int first, /* index of first logged leaf */ + int last) /* index of last logged leaf */ +{ + xfs_dir2_block_t *block; /* directory block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_mount_t *mp; /* filesystem mount point */ + + mp = tp->t_mountp; + block = bp->data; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block), + (uint)((char *)&blp[last + 1] - (char *)block - 1)); +} + +/* + * Log the block tail. + */ +static void +xfs_dir2_block_log_tail( + xfs_trans_t *tp, /* transaction structure */ + xfs_dabuf_t *bp) /* block buffer */ +{ + xfs_dir2_block_t *block; /* directory block structure */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_mount_t *mp; /* filesystem mount point */ + + mp = tp->t_mountp; + block = bp->data; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block), + (uint)((char *)(btp + 1) - (char *)block - 1)); +} + +/* + * Look up an entry in the block. This is the external routine, + * xfs_dir2_block_lookup_int does the real work. + */ +int /* error */ +xfs_dir2_block_lookup( + xfs_da_args_t *args) /* dir lookup arguments */ +{ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* incore inode */ + int ent; /* entry index */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + + xfs_dir2_trace_args("block_lookup", args); + /* + * Get the buffer, look up the entry. + * If not found (ENOENT) then return, have no buffer. + */ + if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) + return error; + dp = args->dp; + mp = dp->i_mount; + block = bp->data; + xfs_dir2_data_check(dp, bp); + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Get the offset from the leaf entry, to point to the data. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + /* + * Fill in inode number, release the block. + */ + args->inumber = INT_GET(dep->inumber, ARCH_CONVERT); + xfs_da_brelse(args->trans, bp); + return XFS_ERROR(EEXIST); +} + +/* + * Internal block lookup routine. + */ +static int /* error */ +xfs_dir2_block_lookup_int( + xfs_da_args_t *args, /* dir lookup arguments */ + xfs_dabuf_t **bpp, /* returned block buffer */ + int *entno) /* returned entry number */ +{ + xfs_dir2_dataptr_t addr; /* data entry address */ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* incore inode */ + int error; /* error return value */ + xfs_dahash_t hash; /* found hash value */ + int high; /* binary search high index */ + int low; /* binary search low index */ + int mid; /* binary search current idx */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Read the buffer, return error if we can't get it. + */ + if ((error = + xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) { + return error; + } + ASSERT(bp != NULL); + block = bp->data; + xfs_dir2_data_check(dp, bp); + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Loop doing a binary search for our hash value. + * Find our entry, ENOENT if it's not there. + */ + for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; ; ) { + ASSERT(low <= high); + mid = (low + high) >> 1; + if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) + break; + if (hash < args->hashval) + low = mid + 1; + else + high = mid - 1; + if (low > high) { + ASSERT(args->oknoent); + xfs_da_brelse(tp, bp); + return XFS_ERROR(ENOENT); + } + } + /* + * Back up to the first one with the right hash value. + */ + while (mid > 0 && INT_GET(blp[mid - 1].hashval, ARCH_CONVERT) == args->hashval) { + mid--; + } + /* + * Now loop forward through all the entries with the + * right hash value looking for our name. + */ + do { + if ((addr = INT_GET(blp[mid].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Get pointer to the entry from the leaf. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr)); + /* + * Compare, if it's right give back buffer & entry number. + */ + if (dep->namelen == args->namelen && + dep->name[0] == args->name[0] && + memcmp(dep->name, args->name, args->namelen) == 0) { + *bpp = bp; + *entno = mid; + return 0; + } + } while (++mid < INT_GET(btp->count, ARCH_CONVERT) && INT_GET(blp[mid].hashval, ARCH_CONVERT) == hash); + /* + * No match, release the buffer and return ENOENT. + */ + ASSERT(args->oknoent); + xfs_da_brelse(tp, bp); + return XFS_ERROR(ENOENT); +} + +/* + * Remove an entry from a block format directory. + * If that makes the block small enough to fit in shortform, transform it. + */ +int /* error */ +xfs_dir2_block_removename( + xfs_da_args_t *args) /* directory operation args */ +{ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* incore inode */ + int ent; /* block leaf entry index */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log block header */ + int needscan; /* need to fixup bestfree */ + xfs_dir2_sf_hdr_t sfh; /* shortform header */ + int size; /* shortform size */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("block_removename", args); + /* + * Look up the entry in the block. Gets the buffer and entry index. + * It will always be there, the vnodeops level does a lookup first. + */ + if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { + return error; + } + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + block = bp->data; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Point to the data entry using the leaf entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + /* + * Mark the data entry's space free. + */ + needlog = needscan = 0; + xfs_dir2_data_make_free(tp, bp, + (xfs_dir2_data_aoff_t)((char *)dep - (char *)block), + XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); + /* + * Fix up the block tail. + */ + INT_MOD(btp->stale, ARCH_CONVERT, +1); + xfs_dir2_block_log_tail(tp, bp); + /* + * Remove the leaf entry by marking it stale. + */ + INT_SET(blp[ent].address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + xfs_dir2_block_log_leaf(tp, bp, ent, ent); + /* + * Fix up bestfree, log the header if necessary. + */ + if (needscan) + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, + NULL); + if (needlog) + xfs_dir2_data_log_header(tp, bp); + xfs_dir2_data_check(dp, bp); + /* + * See if the size as a shortform is good enough. + */ + if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) > + XFS_IFORK_DSIZE(dp)) { + xfs_da_buf_done(bp); + return 0; + } + /* + * If it works, do the conversion. + */ + return xfs_dir2_block_to_sf(args, bp, size, &sfh); +} + +/* + * Replace an entry in a V2 block directory. + * Change the inode number to the new value. + */ +int /* error */ +xfs_dir2_block_replace( + xfs_da_args_t *args) /* directory operation args */ +{ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* incore inode */ + int ent; /* leaf entry index */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + + xfs_dir2_trace_args("block_replace", args); + /* + * Lookup the entry in the directory. Get buffer and entry index. + * This will always succeed since the caller has already done a lookup. + */ + if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { + return error; + } + dp = args->dp; + mp = dp->i_mount; + block = bp->data; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Point to the data entry we need to change. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) != args->inumber); + /* + * Change the inode number to the new value. + */ + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + xfs_dir2_data_log_entry(args->trans, bp, dep); + xfs_dir2_data_check(dp, bp); + xfs_da_buf_done(bp); + return 0; +} + +/* + * Qsort comparison routine for the block leaf entries. + */ +static int /* sort order */ +xfs_dir2_block_sort( + const void *a, /* first leaf entry */ + const void *b) /* second leaf entry */ +{ + const xfs_dir2_leaf_entry_t *la; /* first leaf entry */ + const xfs_dir2_leaf_entry_t *lb; /* second leaf entry */ + + la = a; + lb = b; + return INT_GET(la->hashval, ARCH_CONVERT) < INT_GET(lb->hashval, ARCH_CONVERT) ? -1 : + (INT_GET(la->hashval, ARCH_CONVERT) > INT_GET(lb->hashval, ARCH_CONVERT) ? 1 : 0); +} + +/* + * Convert a V2 leaf directory to a V2 block directory if possible. + */ +int /* error */ +xfs_dir2_leaf_to_block( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *lbp, /* leaf buffer */ + xfs_dabuf_t *dbp) /* data buffer */ +{ + xfs_dir2_data_off_t *bestsp; /* leaf bests table */ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* unused data entry */ + int error; /* error return value */ + int from; /* leaf from index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* file system mount point */ + int needlog; /* need to log data header */ + int needscan; /* need to scan for bestfree */ + xfs_dir2_sf_hdr_t sfh; /* shortform header */ + int size; /* bytes used */ + xfs_dir2_data_off_t *tagp; /* end of entry (tag) */ + int to; /* block/leaf to index */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_bb("leaf_to_block", args, lbp, dbp); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + leaf = lbp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + /* + * If there are data blocks other than the first one, take this + * opportunity to remove trailing empty data blocks that may have + * been left behind during no-space-reservation operations. + * These will show up in the leaf bests table. + */ + while (dp->i_d.di_size > mp->m_dirblksize) { + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + if (INT_GET(bestsp[INT_GET(ltp->bestcount, ARCH_CONVERT) - 1], ARCH_CONVERT) == + mp->m_dirblksize - (uint)sizeof(block->hdr)) { + if ((error = + xfs_dir2_leaf_trim_data(args, lbp, + (xfs_dir2_db_t)(INT_GET(ltp->bestcount, ARCH_CONVERT) - 1)))) + goto out; + } else { + error = 0; + goto out; + } + } + /* + * Read the data block if we don't already have it, give up if it fails. + */ + if (dbp == NULL && + (error = xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &dbp, + XFS_DATA_FORK))) { + goto out; + } + block = dbp->data; + ASSERT(INT_GET(block->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + /* + * Size of the "leaf" area in the block. + */ + size = (uint)sizeof(block->tail) + + (uint)sizeof(*lep) * (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); + /* + * Look at the last data entry. + */ + tagp = (xfs_dir2_data_off_t *)((char *)block + mp->m_dirblksize) - 1; + dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + /* + * If it's not free or is too short we can't do it. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG || INT_GET(dup->length, ARCH_CONVERT) < size) { + error = 0; + goto out; + } + /* + * Start converting it to block form. + */ + INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); + needlog = 1; + needscan = 0; + /* + * Use up the space at the end of the block (blp/btp). + */ + xfs_dir2_data_use_free(tp, dbp, dup, mp->m_dirblksize - size, size, + &needlog, &needscan); + /* + * Initialize the block tail. + */ + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + INT_SET(btp->count, ARCH_CONVERT, INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); + INT_ZERO(btp->stale, ARCH_CONVERT); + xfs_dir2_block_log_tail(tp, dbp); + /* + * Initialize the block leaf area. We compact out stale entries. + */ + lep = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + lep[to++] = leaf->ents[from]; + } + ASSERT(to == INT_GET(btp->count, ARCH_CONVERT)); + xfs_dir2_block_log_leaf(tp, dbp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); + /* + * Scan the bestfree if we need it and log the data block header. + */ + if (needscan) + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, + NULL); + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + /* + * Pitch the old leaf block. + */ + error = xfs_da_shrink_inode(args, mp->m_dirleafblk, lbp); + lbp = NULL; + if (error) { + goto out; + } + /* + * Now see if the resulting block can be shrunken to shortform. + */ + if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) > + XFS_IFORK_DSIZE(dp)) { + error = 0; + goto out; + } + return xfs_dir2_block_to_sf(args, dbp, size, &sfh); +out: + if (lbp) + xfs_da_buf_done(lbp); + if (dbp) + xfs_da_buf_done(dbp); + return error; +} + +/* + * Convert the shortform directory to block form. + */ +int /* error */ +xfs_dir2_sf_to_block( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dir2_db_t blkno; /* dir-relative block # (0) */ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail pointer */ + char *buf; /* sf buffer */ + int buf_len; + xfs_dir2_data_entry_t *dep; /* data entry pointer */ + xfs_inode_t *dp; /* incore directory inode */ + int dummy; /* trash */ + xfs_dir2_data_unused_t *dup; /* unused entry pointer */ + int endoffset; /* end of data objects */ + int error; /* error return value */ + int i; /* index */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log block header */ + int needscan; /* need to scan block freespc */ + int newoffset; /* offset from current entry */ + int offset; /* target block offset */ + xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_data_off_t *tagp; /* end of data entry */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("sf_to_block", args); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Bomb out if the shortform directory is way too short. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + /* + * Copy the directory into the stack buffer. + * Then pitch the incore inode data so we can make extents. + */ + + buf_len = dp->i_df.if_bytes; + buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP); + + memcpy(buf, sfp, dp->i_df.if_bytes); + xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK); + dp->i_d.di_size = 0; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + /* + * Reset pointer - old sfp is gone. + */ + sfp = (xfs_dir2_sf_t *)buf; + /* + * Add block 0 to the inode. + */ + error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno); + if (error) { + kmem_free(buf, buf_len); + return error; + } + /* + * Initialize the data block. + */ + error = xfs_dir2_data_init(args, blkno, &bp); + if (error) { + kmem_free(buf, buf_len); + return error; + } + block = bp->data; + INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); + /* + * Compute size of block "tail" area. + */ + i = (uint)sizeof(*btp) + + (INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); + /* + * The whole thing is initialized to free by the init routine. + * Say we're using the leaf and tail area. + */ + dup = (xfs_dir2_data_unused_t *)block->u; + needlog = needscan = 0; + xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog, + &needscan); + ASSERT(needscan == 0); + /* + * Fill in the tail. + */ + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + INT_SET(btp->count, ARCH_CONVERT, INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2); /* ., .. */ + INT_ZERO(btp->stale, ARCH_CONVERT); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + endoffset = (uint)((char *)blp - (char *)block); + /* + * Remove the freespace, we'll manage it. + */ + xfs_dir2_data_use_free(tp, bp, dup, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), + INT_GET(dup->length, ARCH_CONVERT), &needlog, &needscan); + /* + * Create entry for . + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATA_DOT_OFFSET); + INT_SET(dep->inumber, ARCH_CONVERT, dp->i_ino); + dep->namelen = 1; + dep->name[0] = '.'; + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + xfs_dir2_data_log_entry(tp, bp, dep); + INT_SET(blp[0].hashval, ARCH_CONVERT, xfs_dir_hash_dot); + INT_SET(blp[0].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + /* + * Create entry for .. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET); + INT_SET(dep->inumber, ARCH_CONVERT, XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT)); + dep->namelen = 2; + dep->name[0] = dep->name[1] = '.'; + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + xfs_dir2_data_log_entry(tp, bp, dep); + INT_SET(blp[1].hashval, ARCH_CONVERT, xfs_dir_hash_dotdot); + INT_SET(blp[1].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + offset = XFS_DIR2_DATA_FIRST_OFFSET; + /* + * Loop over existing entries, stuff them in. + */ + if ((i = 0) == INT_GET(sfp->hdr.count, ARCH_CONVERT)) + sfep = NULL; + else + sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + /* + * Need to preserve the existing offset values in the sf directory. + * Insert holes (unused entries) where necessary. + */ + while (offset < endoffset) { + /* + * sfep is null when we reach the end of the list. + */ + if (sfep == NULL) + newoffset = endoffset; + else + newoffset = XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT); + /* + * There should be a hole here, make one. + */ + if (offset < newoffset) { + dup = (xfs_dir2_data_unused_t *) + ((char *)block + offset); + INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(dup->length, ARCH_CONVERT, newoffset - offset); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t) + ((char *)dup - (char *)block)); + xfs_dir2_data_log_unused(tp, bp, dup); + (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block, + dup, &dummy); + offset += INT_GET(dup->length, ARCH_CONVERT); + continue; + } + /* + * Copy a real entry. + */ + dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset); + INT_SET(dep->inumber, ARCH_CONVERT, XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT)); + dep->namelen = sfep->namelen; + memcpy(dep->name, sfep->name, dep->namelen); + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + xfs_dir2_data_log_entry(tp, bp, dep); + INT_SET(blp[2 + i].hashval, ARCH_CONVERT, xfs_da_hashname((char *)sfep->name, sfep->namelen)); + INT_SET(blp[2 + i].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, + (char *)dep - (char *)block)); + offset = (int)((char *)(tagp + 1) - (char *)block); + if (++i == INT_GET(sfp->hdr.count, ARCH_CONVERT)) + sfep = NULL; + else + sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); + } + /* Done with the temporary buffer */ + kmem_free(buf, buf_len); + /* + * Sort the leaf entries by hash value. + */ + qsort(blp, INT_GET(btp->count, ARCH_CONVERT), sizeof(*blp), xfs_dir2_block_sort); + /* + * Log the leaf entry area and tail. + * Already logged the header in data_init, ignore needlog. + */ + ASSERT(needscan == 0); + xfs_dir2_block_log_leaf(tp, bp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); + xfs_dir2_block_log_tail(tp, bp); + xfs_dir2_data_check(dp, bp); + xfs_da_buf_done(bp); + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_block.h linux.21rc5-ac2/fs/xfs/xfs_dir2_block.h --- linux.21rc5/fs/xfs/xfs_dir2_block.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_block.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_BLOCK_H__ +#define __XFS_DIR2_BLOCK_H__ + +/* + * xfs_dir2_block.h + * Directory version 2, single block format structures + */ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_dir2_data_hdr; +struct xfs_dir2_leaf_entry; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * The single block format is as follows: + * xfs_dir2_data_hdr_t structure + * xfs_dir2_data_entry_t and xfs_dir2_data_unused_t structures + * xfs_dir2_leaf_entry_t structures + * xfs_dir2_block_tail_t structure + */ + +#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */ + +typedef struct xfs_dir2_block_tail { + __uint32_t count; /* count of leaf entries */ + __uint32_t stale; /* count of stale lf entries */ +} xfs_dir2_block_tail_t; + +/* + * Generic single-block structure, for xfs_db. + */ +typedef struct xfs_dir2_block { + xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_BLOCK_MAGIC */ + xfs_dir2_data_union_t u[1]; + xfs_dir2_leaf_entry_t leaf[1]; + xfs_dir2_block_tail_t tail; +} xfs_dir2_block_t; + +/* + * Pointer to the leaf header embedded in a data block (1-block format) + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BLOCK_TAIL_P) +xfs_dir2_block_tail_t * +xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block); +#define XFS_DIR2_BLOCK_TAIL_P(mp,block) xfs_dir2_block_tail_p(mp,block) +#else +#define XFS_DIR2_BLOCK_TAIL_P(mp,block) \ + (((xfs_dir2_block_tail_t *)((char *)(block) + (mp)->m_dirblksize)) - 1) +#endif + +/* + * Pointer to the leaf entries embedded in a data block (1-block format) + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BLOCK_LEAF_P) +struct xfs_dir2_leaf_entry *xfs_dir2_block_leaf_p_arch( + xfs_dir2_block_tail_t *btp, xfs_arch_t arch); +#define XFS_DIR2_BLOCK_LEAF_P_ARCH(btp,arch) \ + xfs_dir2_block_leaf_p_arch(btp,arch) +#else +#define XFS_DIR2_BLOCK_LEAF_P_ARCH(btp,arch) \ + (((struct xfs_dir2_leaf_entry *)(btp)) - INT_GET((btp)->count, arch)) +#endif + +/* + * Function declarations. + */ + +extern int + xfs_dir2_block_addname(struct xfs_da_args *args); + +extern int + xfs_dir2_block_getdents(struct xfs_trans *tp, struct xfs_inode *dp, + struct uio *uio, int *eofp, struct xfs_dirent *dbp, + xfs_dir2_put_t put); + +extern int + xfs_dir2_block_lookup(struct xfs_da_args *args); + +extern int + xfs_dir2_block_removename(struct xfs_da_args *args); + +extern int + xfs_dir2_block_replace(struct xfs_da_args *args); + +extern int + xfs_dir2_leaf_to_block(struct xfs_da_args *args, struct xfs_dabuf *lbp, + struct xfs_dabuf *dbp); + +extern int + xfs_dir2_sf_to_block(struct xfs_da_args *args); + +#endif /* __XFS_DIR2_BLOCK_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2.c linux.21rc5-ac2/fs/xfs/xfs_dir2.c --- linux.21rc5/fs/xfs/xfs_dir2.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,860 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * XFS v2 directory implmentation. + * Top-level and utility routines. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_dir2_sf.h" +#include "xfs_dir2_trace.h" +#include "xfs_error.h" +#include "xfs_bit.h" + +/* + * Declarations for interface routines. + */ +static void xfs_dir2_mount(xfs_mount_t *mp); +static int xfs_dir2_isempty(xfs_inode_t *dp); +static int xfs_dir2_init(xfs_trans_t *tp, xfs_inode_t *dp, + xfs_inode_t *pdp); +static int xfs_dir2_createname(xfs_trans_t *tp, xfs_inode_t *dp, + char *name, int namelen, xfs_ino_t inum, + xfs_fsblock_t *first, + xfs_bmap_free_t *flist, xfs_extlen_t total); +static int xfs_dir2_lookup(xfs_trans_t *tp, xfs_inode_t *dp, char *name, + int namelen, xfs_ino_t *inum); +static int xfs_dir2_removename(xfs_trans_t *tp, xfs_inode_t *dp, + char *name, int namelen, xfs_ino_t ino, + xfs_fsblock_t *first, + xfs_bmap_free_t *flist, xfs_extlen_t total); +static int xfs_dir2_getdents(xfs_trans_t *tp, xfs_inode_t *dp, uio_t *uio, + int *eofp); +static int xfs_dir2_replace(xfs_trans_t *tp, xfs_inode_t *dp, char *name, + int namelen, xfs_ino_t inum, + xfs_fsblock_t *first, xfs_bmap_free_t *flist, + xfs_extlen_t total); +static int xfs_dir2_canenter(xfs_trans_t *tp, xfs_inode_t *dp, char *name, + int namelen); +static int xfs_dir2_shortform_validate_ondisk(xfs_mount_t *mp, + xfs_dinode_t *dip); + +/* + * Utility routine declarations. + */ +static int xfs_dir2_put_dirent64_direct(xfs_dir2_put_args_t *pa); +static int xfs_dir2_put_dirent64_uio(xfs_dir2_put_args_t *pa); + +/* + * Directory operations vector. + */ +xfs_dirops_t xfsv2_dirops = { + .xd_mount = xfs_dir2_mount, + .xd_isempty = xfs_dir2_isempty, + .xd_init = xfs_dir2_init, + .xd_createname = xfs_dir2_createname, + .xd_lookup = xfs_dir2_lookup, + .xd_removename = xfs_dir2_removename, + .xd_getdents = xfs_dir2_getdents, + .xd_replace = xfs_dir2_replace, + .xd_canenter = xfs_dir2_canenter, + .xd_shortform_validate_ondisk = xfs_dir2_shortform_validate_ondisk, + .xd_shortform_to_single = xfs_dir2_sf_to_block, +}; + +/* + * Interface routines. + */ + +/* + * Initialize directory-related fields in the mount structure. + */ +static void +xfs_dir2_mount( + xfs_mount_t *mp) /* filesystem mount point */ +{ + mp->m_dirversion = 2; + ASSERT((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) <= + XFS_MAX_BLOCKSIZE); + mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog); + mp->m_dirblkfsbs = 1 << mp->m_sb.sb_dirblklog; + mp->m_dirdatablk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_DATA_FIRSTDB(mp)); + mp->m_dirleafblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_LEAF_FIRSTDB(mp)); + mp->m_dirfreeblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_FREE_FIRSTDB(mp)); + mp->m_attr_node_ents = + (mp->m_sb.sb_blocksize - (uint)sizeof(xfs_da_node_hdr_t)) / + (uint)sizeof(xfs_da_node_entry_t); + mp->m_dir_node_ents = + (mp->m_dirblksize - (uint)sizeof(xfs_da_node_hdr_t)) / + (uint)sizeof(xfs_da_node_entry_t); + mp->m_dir_magicpct = (mp->m_dirblksize * 37) / 100; +} + +/* + * Return 1 if directory contains only "." and "..". + */ +static int /* return code */ +xfs_dir2_isempty( + xfs_inode_t *dp) /* incore inode structure */ +{ + xfs_dir2_sf_t *sfp; /* shortform directory structure */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + /* + * Might happen during shutdown. + */ + if (dp->i_d.di_size == 0) { + return 1; + } + if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp)) + return 0; + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + return INT_ISZERO(sfp->hdr.count, ARCH_CONVERT); +} + +/* + * Initialize a directory with its "." and ".." entries. + */ +static int /* error */ +xfs_dir2_init( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + xfs_inode_t *pdp) /* incore parent directory inode */ +{ + xfs_da_args_t args; /* operation arguments */ + int error; /* error return value */ + + memset((char *)&args, 0, sizeof(args)); + args.dp = dp; + args.trans = tp; + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + if ((error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino))) { + return error; + } + return xfs_dir2_sf_create(&args, pdp->i_ino); +} + +/* + Enter a name in a directory. + */ +static int /* error */ +xfs_dir2_createname( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* new entry name */ + int namelen, /* new entry name length */ + xfs_ino_t inum, /* new entry inode number */ + xfs_fsblock_t *first, /* bmap's firstblock */ + xfs_bmap_free_t *flist, /* bmap's freeblock list */ + xfs_extlen_t total) /* bmap's total block count */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) { + return rval; + } + XFS_STATS_INC(xfsstats.xs_dir_create); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = inum; + args.dp = dp; + args.firstblock = first; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = 0; + args.addname = args.oknoent = 1; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_addname(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_addname(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_addname(&args); + else + rval = xfs_dir2_node_addname(&args); + return rval; +} + +/* + * Lookup a name in a directory, give back the inode number. + */ +static int /* error */ +xfs_dir2_lookup( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* lookup name */ + int namelen, /* lookup name length */ + xfs_ino_t *inum) /* out: inode number */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + XFS_STATS_INC(xfsstats.xs_dir_lookup); + + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = 0; + args.dp = dp; + args.firstblock = NULL; + args.flist = NULL; + args.total = 0; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = args.addname = 0; + args.oknoent = 1; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_lookup(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_lookup(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_lookup(&args); + else + rval = xfs_dir2_node_lookup(&args); + if (rval == EEXIST) + rval = 0; + if (rval == 0) + *inum = args.inumber; + return rval; +} + +/* + * Remove an entry from a directory. + */ +static int /* error */ +xfs_dir2_removename( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* name of entry to remove */ + int namelen, /* name length of entry to remove */ + xfs_ino_t ino, /* inode number of entry to remove */ + xfs_fsblock_t *first, /* bmap's firstblock */ + xfs_bmap_free_t *flist, /* bmap's freeblock list */ + xfs_extlen_t total) /* bmap's total block count */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + XFS_STATS_INC(xfsstats.xs_dir_remove); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = ino; + args.dp = dp; + args.firstblock = first; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = args.addname = args.oknoent = 0; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_removename(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_removename(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_removename(&args); + else + rval = xfs_dir2_node_removename(&args); + return rval; +} + +/* + * Read a directory. + */ +static int /* error */ +xfs_dir2_getdents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + uio_t *uio, /* caller's buffer control */ + int *eofp) /* out: eof reached */ +{ + int alignment; /* alignment required for ABI */ + xfs_dirent_t *dbp; /* malloc'ed buffer */ + xfs_dir2_put_t put; /* entry formatting routine */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + XFS_STATS_INC(xfsstats.xs_dir_getdents); + /* + * If our caller has given us a single contiguous aligned memory buffer, + * just work directly within that buffer. If it's in user memory, + * lock it down first. + */ + alignment = sizeof(xfs_off_t) - 1; + if ((uio->uio_iovcnt == 1) && + (((__psint_t)uio->uio_iov[0].iov_base & alignment) == 0) && + ((uio->uio_iov[0].iov_len & alignment) == 0)) { + dbp = NULL; + put = xfs_dir2_put_dirent64_direct; + } else { + dbp = kmem_alloc(sizeof(*dbp) + MAXNAMELEN, KM_SLEEP); + put = xfs_dir2_put_dirent64_uio; + } + + *eofp = 0; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_getdents(dp, uio, eofp, dbp, put); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + ; + } else if (v) + rval = xfs_dir2_block_getdents(tp, dp, uio, eofp, dbp, put); + else + rval = xfs_dir2_leaf_getdents(tp, dp, uio, eofp, dbp, put); + if (dbp != NULL) + kmem_free(dbp, sizeof(*dbp) + MAXNAMELEN); + return rval; +} + +/* + * Replace the inode number of a directory entry. + */ +static int /* error */ +xfs_dir2_replace( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* name of entry to replace */ + int namelen, /* name length of entry to replace */ + xfs_ino_t inum, /* new inode number */ + xfs_fsblock_t *first, /* bmap's firstblock */ + xfs_bmap_free_t *flist, /* bmap's freeblock list */ + xfs_extlen_t total) /* bmap's total block count */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) { + return rval; + } + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = inum; + args.dp = dp; + args.firstblock = first; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = args.addname = args.oknoent = 0; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_replace(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_replace(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_replace(&args); + else + rval = xfs_dir2_node_replace(&args); + return rval; +} + +/* + * See if this entry can be added to the directory without allocating space. + */ +static int /* error */ +xfs_dir2_canenter( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* name of entry to add */ + int namelen) /* name length of entry to add */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = 0; + args.dp = dp; + args.firstblock = NULL; + args.flist = NULL; + args.total = 0; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = args.addname = args.oknoent = 1; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_addname(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_addname(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_addname(&args); + else + rval = xfs_dir2_node_addname(&args); + return rval; +} + +/* + * Dummy routine for shortform inode validation. + * Can't really do this. + */ +/* ARGSUSED */ +static int /* error */ +xfs_dir2_shortform_validate_ondisk( + xfs_mount_t *mp, /* filesystem mount point */ + xfs_dinode_t *dip) /* ondisk inode */ +{ + return 0; +} + +/* + * Utility routines. + */ + +/* + * Add a block to the directory. + * This routine is for data and free blocks, not leaf/node blocks + * which are handled by xfs_da_grow_inode. + */ +int /* error */ +xfs_dir2_grow_inode( + xfs_da_args_t *args, /* operation arguments */ + int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */ + xfs_dir2_db_t *dbp) /* out: block number added */ +{ + xfs_fileoff_t bno; /* directory offset of new block */ + int count; /* count of filesystem blocks */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + int got; /* blocks actually mapped */ + int i; /* temp mapping index */ + xfs_bmbt_irec_t map; /* single structure for bmap */ + int mapi; /* mapping index */ + xfs_bmbt_irec_t *mapp; /* bmap mapping structure(s) */ + xfs_mount_t *mp; /* filesystem mount point */ + int nmap; /* number of bmap entries */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_s("grow_inode", args, space); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Set lowest possible block in the space requested. + */ + bno = XFS_B_TO_FSBT(mp, space * XFS_DIR2_SPACE_SIZE); + count = mp->m_dirblkfsbs; + /* + * Find the first hole for our block. + */ + if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, XFS_DATA_FORK))) { + return error; + } + nmap = 1; + ASSERT(args->firstblock != NULL); + /* + * Try mapping the new block contiguously (one extent). + */ + if ((error = xfs_bmapi(tp, dp, bno, count, + XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, + args->firstblock, args->total, &map, &nmap, + args->flist))) { + return error; + } + ASSERT(nmap <= 1); + /* + * Got it in 1. + */ + if (nmap == 1) { + mapp = ↦ + mapi = 1; + } + /* + * Didn't work and this is a multiple-fsb directory block. + * Try again with contiguous flag turned on. + */ + else if (nmap == 0 && count > 1) { + xfs_fileoff_t b; /* current file offset */ + + /* + * Space for maximum number of mappings. + */ + mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); + /* + * Iterate until we get to the end of our block. + */ + for (b = bno, mapi = 0; b < bno + count; ) { + int c; /* current fsb count */ + + /* + * Can't map more than MAX_NMAP at once. + */ + nmap = MIN(XFS_BMAP_MAX_NMAP, count); + c = (int)(bno + count - b); + if ((error = xfs_bmapi(tp, dp, b, c, + XFS_BMAPI_WRITE|XFS_BMAPI_METADATA, + args->firstblock, args->total, + &mapp[mapi], &nmap, args->flist))) { + kmem_free(mapp, sizeof(*mapp) * count); + return error; + } + if (nmap < 1) + break; + /* + * Add this bunch into our table, go to the next offset. + */ + mapi += nmap; + b = mapp[mapi - 1].br_startoff + + mapp[mapi - 1].br_blockcount; + } + } + /* + * Didn't work. + */ + else { + mapi = 0; + mapp = NULL; + } + /* + * See how many fsb's we got. + */ + for (i = 0, got = 0; i < mapi; i++) + got += mapp[i].br_blockcount; + /* + * Didn't get enough fsb's, or the first/last block's are wrong. + */ + if (got != count || mapp[0].br_startoff != bno || + mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != + bno + count) { + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * count); + return XFS_ERROR(ENOSPC); + } + /* + * Done with the temporary mapping table. + */ + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * count); + *dbp = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)bno); + /* + * Update file's size if this is the data space and it grew. + */ + if (space == XFS_DIR2_DATA_SPACE) { + xfs_fsize_t size; /* directory file (data) size */ + + size = XFS_FSB_TO_B(mp, bno + count); + if (size > dp->i_d.di_size) { + dp->i_d.di_size = size; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + } + } + return 0; +} + +/* + * See if the directory is a single-block form directory. + */ +int /* error */ +xfs_dir2_isblock( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + int *vp) /* out: 1 is block, 0 is not block */ +{ + xfs_fileoff_t last; /* last file offset */ + xfs_mount_t *mp; /* filesystem mount point */ + int rval; /* return value */ + + mp = dp->i_mount; + if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK))) { + return rval; + } + rval = XFS_FSB_TO_B(mp, last) == mp->m_dirblksize; + ASSERT(rval == 0 || dp->i_d.di_size == mp->m_dirblksize); + *vp = rval; + return 0; +} + +/* + * See if the directory is a single-leaf form directory. + */ +int /* error */ +xfs_dir2_isleaf( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + int *vp) /* out: 1 is leaf, 0 is not leaf */ +{ + xfs_fileoff_t last; /* last file offset */ + xfs_mount_t *mp; /* filesystem mount point */ + int rval; /* return value */ + + mp = dp->i_mount; + if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK))) { + return rval; + } + *vp = last == mp->m_dirleafblk + (1 << mp->m_sb.sb_dirblklog); + return 0; +} + +/* + * Getdents put routine for 64-bit ABI, direct form. + */ +static int /* error */ +xfs_dir2_put_dirent64_direct( + xfs_dir2_put_args_t *pa) /* argument bundle */ +{ + xfs_dirent_t *idbp; /* dirent pointer */ + iovec_t *iovp; /* io vector */ + int namelen; /* entry name length */ + int reclen; /* entry total length */ + uio_t *uio; /* I/O control */ + + namelen = pa->namelen; + reclen = DIRENTSIZE(namelen); + uio = pa->uio; + /* + * Won't fit in the remaining space. + */ + if (reclen > uio->uio_resid) { + pa->done = 0; + return 0; + } + iovp = uio->uio_iov; + idbp = (xfs_dirent_t *)iovp->iov_base; + iovp->iov_base = (char *)idbp + reclen; + iovp->iov_len -= reclen; + uio->uio_resid -= reclen; + idbp->d_reclen = reclen; + idbp->d_ino = pa->ino; + idbp->d_off = pa->cook; + idbp->d_name[namelen] = '\0'; + pa->done = 1; + memcpy(idbp->d_name, pa->name, namelen); + return 0; +} + +/* + * Getdents put routine for 64-bit ABI, uio form. + */ +static int /* error */ +xfs_dir2_put_dirent64_uio( + xfs_dir2_put_args_t *pa) /* argument bundle */ +{ + xfs_dirent_t *idbp; /* dirent pointer */ + int namelen; /* entry name length */ + int reclen; /* entry total length */ + int rval; /* return value */ + uio_t *uio; /* I/O control */ + + namelen = pa->namelen; + reclen = DIRENTSIZE(namelen); + uio = pa->uio; + /* + * Won't fit in the remaining space. + */ + if (reclen > uio->uio_resid) { + pa->done = 0; + return 0; + } + idbp = pa->dbp; + idbp->d_reclen = reclen; + idbp->d_ino = pa->ino; + idbp->d_off = pa->cook; + idbp->d_name[namelen] = '\0'; + memcpy(idbp->d_name, pa->name, namelen); + rval = uiomove((caddr_t)idbp, reclen, UIO_READ, uio); + pa->done = (rval == 0); + return rval; +} + +/* + * Remove the given block from the directory. + * This routine is used for data and free blocks, leaf/node are done + * by xfs_da_shrink_inode. + */ +int +xfs_dir2_shrink_inode( + xfs_da_args_t *args, /* operation arguments */ + xfs_dir2_db_t db, /* directory block number */ + xfs_dabuf_t *bp) /* block's buffer */ +{ + xfs_fileoff_t bno; /* directory file offset */ + xfs_dablk_t da; /* directory file offset */ + int done; /* bunmap is finished */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_db("shrink_inode", args, db, bp); + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + da = XFS_DIR2_DB_TO_DA(mp, db); + /* + * Unmap the fsblock(s). + */ + if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs, + XFS_BMAPI_METADATA, 0, args->firstblock, args->flist, + &done))) { + /* + * ENOSPC actually can happen if we're in a removename with + * no space reservation, and the resulting block removal + * would cause a bmap btree split or conversion from extents + * to btree. This can only happen for un-fragmented + * directory blocks, since you need to be punching out + * the middle of an extent. + * In this case we need to leave the block in the file, + * and not binval it. + * So the block has to be in a consistent empty state + * and appropriately logged. + * We don't free up the buffer, the caller can tell it + * hasn't happened since it got an error back. + */ + return error; + } + ASSERT(done); + /* + * Invalidate the buffer from the transaction. + */ + xfs_da_binval(tp, bp); + /* + * If it's not a data block, we're done. + */ + if (db >= XFS_DIR2_LEAF_FIRSTDB(mp)) + return 0; + /* + * If the block isn't the last one in the directory, we're done. + */ + if (dp->i_d.di_size > XFS_DIR2_DB_OFF_TO_BYTE(mp, db + 1, 0)) + return 0; + bno = da; + if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) { + /* + * This can't really happen unless there's kernel corruption. + */ + return error; + } + if (db == mp->m_dirdatablk) + ASSERT(bno == 0); + else + ASSERT(bno > 0); + /* + * Set the size to the new last block. + */ + dp->i_d.di_size = XFS_FSB_TO_B(mp, bno); + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_data.c linux.21rc5-ac2/fs/xfs/xfs_dir2_data.c --- linux.21rc5/fs/xfs/xfs_dir2_data.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_data.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,855 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_data.c + * Core data block handling routines for XFS V2 directories. + * See xfs_dir2_data.h for data structures. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_error.h" + +#ifdef DEBUG +/* + * Check the consistency of the data block. + * The input can also be a block-format directory. + * Pop an assert if we find anything bad. + */ +void +xfs_dir2_data_check( + xfs_inode_t *dp, /* incore inode pointer */ + xfs_dabuf_t *bp) /* data block's buffer */ +{ + xfs_dir2_dataptr_t addr; /* addr for leaf lookup */ + xfs_dir2_data_free_t *bf; /* bestfree table */ + xfs_dir2_block_tail_t *btp=NULL; /* block tail */ + int count; /* count of entries found */ + xfs_dir2_data_t *d; /* data block pointer */ + xfs_dir2_data_entry_t *dep; /* data entry */ + xfs_dir2_data_free_t *dfp; /* bestfree entry */ + xfs_dir2_data_unused_t *dup; /* unused entry */ + char *endp; /* end of useful data */ + int freeseen; /* mask of bestfrees seen */ + xfs_dahash_t hash; /* hash of current name */ + int i; /* leaf index */ + int lastfree; /* last entry was unused */ + xfs_dir2_leaf_entry_t *lep=NULL; /* block leaf entries */ + xfs_mount_t *mp; /* filesystem mount point */ + char *p; /* current data position */ + int stale; /* count of stale leaves */ + + mp = dp->i_mount; + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + bf = d->hdr.bestfree; + p = (char *)d->u; + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); + lep = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + endp = (char *)lep; + } else + endp = (char *)d + mp->m_dirblksize; + count = lastfree = freeseen = 0; + /* + * Account for zero bestfree entries. + */ + if (INT_ISZERO(bf[0].length, ARCH_CONVERT)) { + ASSERT(INT_ISZERO(bf[0].offset, ARCH_CONVERT)); + freeseen |= 1 << 0; + } + if (INT_ISZERO(bf[1].length, ARCH_CONVERT)) { + ASSERT(INT_ISZERO(bf[1].offset, ARCH_CONVERT)); + freeseen |= 1 << 1; + } + if (INT_ISZERO(bf[2].length, ARCH_CONVERT)) { + ASSERT(INT_ISZERO(bf[2].offset, ARCH_CONVERT)); + freeseen |= 1 << 2; + } + ASSERT(INT_GET(bf[0].length, ARCH_CONVERT) >= INT_GET(bf[1].length, ARCH_CONVERT)); + ASSERT(INT_GET(bf[1].length, ARCH_CONVERT) >= INT_GET(bf[2].length, ARCH_CONVERT)); + /* + * Loop over the data/unused entries. + */ + while (p < endp) { + dup = (xfs_dir2_data_unused_t *)p; + /* + * If it's unused, look for the space in the bestfree table. + * If we find it, account for that, else make sure it + * doesn't need to be there. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ASSERT(lastfree == 0); + ASSERT(INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT) == + (char *)dup - (char *)d); + dfp = xfs_dir2_data_freefind(d, dup); + if (dfp) { + i = (int)(dfp - bf); + ASSERT((freeseen & (1 << i)) == 0); + freeseen |= 1 << i; + } else + ASSERT(INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(bf[2].length, ARCH_CONVERT)); + p += INT_GET(dup->length, ARCH_CONVERT); + lastfree = 1; + continue; + } + /* + * It's a real entry. Validate the fields. + * If this is a block directory then make sure it's + * in the leaf section of the block. + * The linear search is crude but this is DEBUG code. + */ + dep = (xfs_dir2_data_entry_t *)p; + ASSERT(dep->namelen != 0); + ASSERT(xfs_dir_ino_validate(mp, INT_GET(dep->inumber, ARCH_CONVERT)) == 0); + ASSERT(INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT) == + (char *)dep - (char *)d); + count++; + lastfree = 0; + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + (xfs_dir2_data_aoff_t) + ((char *)dep - (char *)d)); + hash = xfs_da_hashname((char *)dep->name, dep->namelen); + for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { + if (INT_GET(lep[i].address, ARCH_CONVERT) == addr && + INT_GET(lep[i].hashval, ARCH_CONVERT) == hash) + break; + } + ASSERT(i < INT_GET(btp->count, ARCH_CONVERT)); + } + p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); + } + /* + * Need to have seen all the entries and all the bestfree slots. + */ + ASSERT(freeseen == 7); + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + for (i = stale = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { + if (INT_GET(lep[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + stale++; + if (i > 0) + ASSERT(INT_GET(lep[i].hashval, ARCH_CONVERT) >= INT_GET(lep[i - 1].hashval, ARCH_CONVERT)); + } + ASSERT(count == INT_GET(btp->count, ARCH_CONVERT) - INT_GET(btp->stale, ARCH_CONVERT)); + ASSERT(stale == INT_GET(btp->stale, ARCH_CONVERT)); + } +} +#endif + +/* + * Given a data block and an unused entry from that block, + * return the bestfree entry if any that corresponds to it. + */ +xfs_dir2_data_free_t * +xfs_dir2_data_freefind( + xfs_dir2_data_t *d, /* data block */ + xfs_dir2_data_unused_t *dup) /* data unused entry */ +{ + xfs_dir2_data_free_t *dfp; /* bestfree entry */ + xfs_dir2_data_aoff_t off; /* offset value needed */ +#if defined(DEBUG) && defined(__KERNEL__) + int matched; /* matched the value */ + int seenzero; /* saw a 0 bestfree entry */ +#endif + + off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)d); +#if defined(DEBUG) && defined(__KERNEL__) + /* + * Validate some consistency in the bestfree table. + * Check order, non-overlapping entries, and if we find the + * one we're looking for it has to be exact. + */ + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0; + dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; + dfp++) { + if (INT_ISZERO(dfp->offset, ARCH_CONVERT)) { + ASSERT(INT_ISZERO(dfp->length, ARCH_CONVERT)); + seenzero = 1; + continue; + } + ASSERT(seenzero == 0); + if (INT_GET(dfp->offset, ARCH_CONVERT) == off) { + matched = 1; + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(dup->length, ARCH_CONVERT)); + } else if (off < INT_GET(dfp->offset, ARCH_CONVERT)) + ASSERT(off + INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(dfp->offset, ARCH_CONVERT)); + else + ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) + INT_GET(dfp->length, ARCH_CONVERT) <= off); + ASSERT(matched || INT_GET(dfp->length, ARCH_CONVERT) >= INT_GET(dup->length, ARCH_CONVERT)); + if (dfp > &d->hdr.bestfree[0]) + ASSERT(INT_GET(dfp[-1].length, ARCH_CONVERT) >= INT_GET(dfp[0].length, ARCH_CONVERT)); + } +#endif + /* + * If this is smaller than the smallest bestfree entry, + * it can't be there since they're sorted. + */ + if (INT_GET(dup->length, ARCH_CONVERT) < INT_GET(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length, ARCH_CONVERT)) + return NULL; + /* + * Look at the three bestfree entries for our guy. + */ + for (dfp = &d->hdr.bestfree[0]; + dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; + dfp++) { + if (INT_ISZERO(dfp->offset, ARCH_CONVERT)) + return NULL; + if (INT_GET(dfp->offset, ARCH_CONVERT) == off) + return dfp; + } + /* + * Didn't find it. This only happens if there are duplicate lengths. + */ + return NULL; +} + +/* + * Insert an unused-space entry into the bestfree table. + */ +xfs_dir2_data_free_t * /* entry inserted */ +xfs_dir2_data_freeinsert( + xfs_dir2_data_t *d, /* data block pointer */ + xfs_dir2_data_unused_t *dup, /* unused space */ + int *loghead) /* log the data header (out) */ +{ + xfs_dir2_data_free_t *dfp; /* bestfree table pointer */ + xfs_dir2_data_free_t new; /* new bestfree entry */ + +#ifdef __KERNEL__ + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); +#endif + dfp = d->hdr.bestfree; + INT_COPY(new.length, dup->length, ARCH_CONVERT); + INT_SET(new.offset, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dup - (char *)d)); + /* + * Insert at position 0, 1, or 2; or not at all. + */ + if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[0].length, ARCH_CONVERT)) { + dfp[2] = dfp[1]; + dfp[1] = dfp[0]; + dfp[0] = new; + *loghead = 1; + return &dfp[0]; + } + if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[1].length, ARCH_CONVERT)) { + dfp[2] = dfp[1]; + dfp[1] = new; + *loghead = 1; + return &dfp[1]; + } + if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[2].length, ARCH_CONVERT)) { + dfp[2] = new; + *loghead = 1; + return &dfp[2]; + } + return NULL; +} + +/* + * Remove a bestfree entry from the table. + */ +void +xfs_dir2_data_freeremove( + xfs_dir2_data_t *d, /* data block pointer */ + xfs_dir2_data_free_t *dfp, /* bestfree entry pointer */ + int *loghead) /* out: log data header */ +{ +#ifdef __KERNEL__ + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); +#endif + /* + * It's the first entry, slide the next 2 up. + */ + if (dfp == &d->hdr.bestfree[0]) { + d->hdr.bestfree[0] = d->hdr.bestfree[1]; + d->hdr.bestfree[1] = d->hdr.bestfree[2]; + } + /* + * It's the second entry, slide the 3rd entry up. + */ + else if (dfp == &d->hdr.bestfree[1]) + d->hdr.bestfree[1] = d->hdr.bestfree[2]; + /* + * Must be the last entry. + */ + else + ASSERT(dfp == &d->hdr.bestfree[2]); + /* + * Clear the 3rd entry, must be zero now. + */ + INT_ZERO(d->hdr.bestfree[2].length, ARCH_CONVERT); + INT_ZERO(d->hdr.bestfree[2].offset, ARCH_CONVERT); + *loghead = 1; +} + +/* + * Given a data block, reconstruct its bestfree map. + */ +void +xfs_dir2_data_freescan( + xfs_mount_t *mp, /* filesystem mount point */ + xfs_dir2_data_t *d, /* data block pointer */ + int *loghead, /* out: log data header */ + char *aendp) /* in: caller's endp */ +{ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* active data entry */ + xfs_dir2_data_unused_t *dup; /* unused data entry */ + char *endp; /* end of block's data */ + char *p; /* current entry pointer */ + +#ifdef __KERNEL__ + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); +#endif + /* + * Start by clearing the table. + */ + memset(d->hdr.bestfree, 0, sizeof(d->hdr.bestfree)); + *loghead = 1; + /* + * Set up pointers. + */ + p = (char *)d->u; + if (aendp) + endp = aendp; + else if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); + endp = (char *)XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + } else + endp = (char *)d + mp->m_dirblksize; + /* + * Loop over the block's entries. + */ + while (p < endp) { + dup = (xfs_dir2_data_unused_t *)p; + /* + * If it's a free entry, insert it. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ASSERT((char *)dup - (char *)d == + INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT)); + xfs_dir2_data_freeinsert(d, dup, loghead); + p += INT_GET(dup->length, ARCH_CONVERT); + } + /* + * For active entries, check their tags and skip them. + */ + else { + dep = (xfs_dir2_data_entry_t *)p; + ASSERT((char *)dep - (char *)d == + INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT)); + p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); + } + } +} + +/* + * Initialize a data block at the given block number in the directory. + * Give back the buffer for the created block. + */ +int /* error */ +xfs_dir2_data_init( + xfs_da_args_t *args, /* directory operation args */ + xfs_dir2_db_t blkno, /* logical dir block number */ + xfs_dabuf_t **bpp) /* output block buffer */ +{ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_data_t *d; /* pointer to block */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* unused entry pointer */ + int error; /* error return value */ + int i; /* bestfree index */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + int t; /* temp */ + + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Get the buffer set up for the block. + */ + error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, blkno), -1, &bp, + XFS_DATA_FORK); + if (error) { + return error; + } + ASSERT(bp != NULL); + /* + * Initialize the header. + */ + d = bp->data; + INT_SET(d->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); + INT_SET(d->hdr.bestfree[0].offset, ARCH_CONVERT, (xfs_dir2_data_off_t)sizeof(d->hdr)); + for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { + INT_ZERO(d->hdr.bestfree[i].length, ARCH_CONVERT); + INT_ZERO(d->hdr.bestfree[i].offset, ARCH_CONVERT); + } + /* + * Set up an unused entry for the block's body. + */ + dup = &d->u[0].unused; + INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + + t=mp->m_dirblksize - (uint)sizeof(d->hdr); + INT_SET(d->hdr.bestfree[0].length, ARCH_CONVERT, t); + INT_SET(dup->length, ARCH_CONVERT, t); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)dup - (char *)d)); + /* + * Log it and return it. + */ + xfs_dir2_data_log_header(tp, bp); + xfs_dir2_data_log_unused(tp, bp, dup); + *bpp = bp; + return 0; +} + +/* + * Log an active data entry from the block. + */ +void +xfs_dir2_data_log_entry( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* block buffer */ + xfs_dir2_data_entry_t *dep) /* data entry pointer */ +{ + xfs_dir2_data_t *d; /* data block pointer */ + + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d), + (uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) - + (char *)d - 1)); +} + +/* + * Log a data block header. + */ +void +xfs_dir2_data_log_header( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp) /* block buffer */ +{ + xfs_dir2_data_t *d; /* data block pointer */ + + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d), + (uint)(sizeof(d->hdr) - 1)); +} + +/* + * Log a data unused entry. + */ +void +xfs_dir2_data_log_unused( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* block buffer */ + xfs_dir2_data_unused_t *dup) /* data unused pointer */ +{ + xfs_dir2_data_t *d; /* data block pointer */ + + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + /* + * Log the first part of the unused entry. + */ + xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)d), + (uint)((char *)&dup->length + sizeof(dup->length) - + 1 - (char *)d)); + /* + * Log the end (tag) of the unused entry. + */ + xfs_da_log_buf(tp, bp, + (uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT) - (char *)d), + (uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT) - (char *)d + + sizeof(xfs_dir2_data_off_t) - 1)); +} + +/* + * Make a byte range in the data block unused. + * Its current contents are unimportant. + */ +void +xfs_dir2_data_make_free( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* block buffer */ + xfs_dir2_data_aoff_t offset, /* starting byte offset */ + xfs_dir2_data_aoff_t len, /* length in bytes */ + int *needlogp, /* out: log header */ + int *needscanp) /* out: regen bestfree */ +{ + xfs_dir2_data_t *d; /* data block pointer */ + xfs_dir2_data_free_t *dfp; /* bestfree pointer */ + char *endptr; /* end of data area */ + xfs_mount_t *mp; /* filesystem mount point */ + int needscan; /* need to regen bestfree */ + xfs_dir2_data_unused_t *newdup; /* new unused entry */ + xfs_dir2_data_unused_t *postdup; /* unused entry after us */ + xfs_dir2_data_unused_t *prevdup; /* unused entry before us */ + + mp = tp->t_mountp; + d = bp->data; + /* + * Figure out where the end of the data area is. + */ + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) + endptr = (char *)d + mp->m_dirblksize; + else { + xfs_dir2_block_tail_t *btp; /* block tail */ + + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); + endptr = (char *)XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + } + /* + * If this isn't the start of the block, then back up to + * the previous entry and see if it's free. + */ + if (offset > sizeof(d->hdr)) { + xfs_dir2_data_off_t *tagp; /* tag just before us */ + + tagp = (xfs_dir2_data_off_t *)((char *)d + offset) - 1; + prevdup = (xfs_dir2_data_unused_t *)((char *)d + INT_GET(*tagp, ARCH_CONVERT)); + if (INT_GET(prevdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + prevdup = NULL; + } else + prevdup = NULL; + /* + * If this isn't the end of the block, see if the entry after + * us is free. + */ + if ((char *)d + offset + len < endptr) { + postdup = + (xfs_dir2_data_unused_t *)((char *)d + offset + len); + if (INT_GET(postdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + postdup = NULL; + } else + postdup = NULL; + ASSERT(*needscanp == 0); + needscan = 0; + /* + * Previous and following entries are both free, + * merge everything into a single free entry. + */ + if (prevdup && postdup) { + xfs_dir2_data_free_t *dfp2; /* another bestfree pointer */ + + /* + * See if prevdup and/or postdup are in bestfree table. + */ + dfp = xfs_dir2_data_freefind(d, prevdup); + dfp2 = xfs_dir2_data_freefind(d, postdup); + /* + * We need a rescan unless there are exactly 2 free entries + * namely our two. Then we know what's happening, otherwise + * since the third bestfree is there, there might be more + * entries. + */ + needscan = !INT_ISZERO(d->hdr.bestfree[2].length, ARCH_CONVERT); + /* + * Fix up the new big freespace. + */ + INT_MOD(prevdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(prevdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, prevdup); + if (!needscan) { + /* + * Has to be the case that entries 0 and 1 are + * dfp and dfp2 (don't know which is which), and + * entry 2 is empty. + * Remove entry 1 first then entry 0. + */ + ASSERT(dfp && dfp2); + if (dfp == &d->hdr.bestfree[1]) { + dfp = &d->hdr.bestfree[0]; + ASSERT(dfp2 == dfp); + dfp2 = &d->hdr.bestfree[1]; + } + xfs_dir2_data_freeremove(d, dfp2, needlogp); + xfs_dir2_data_freeremove(d, dfp, needlogp); + /* + * Now insert the new entry. + */ + dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp); + ASSERT(dfp == &d->hdr.bestfree[0]); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(prevdup->length, ARCH_CONVERT)); + ASSERT(INT_ISZERO(dfp[1].length, ARCH_CONVERT)); + ASSERT(INT_ISZERO(dfp[2].length, ARCH_CONVERT)); + } + } + /* + * The entry before us is free, merge with it. + */ + else if (prevdup) { + dfp = xfs_dir2_data_freefind(d, prevdup); + INT_MOD(prevdup->length, ARCH_CONVERT, len); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(prevdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, prevdup); + /* + * If the previous entry was in the table, the new entry + * is longer, so it will be in the table too. Remove + * the old one and add the new one. + */ + if (dfp) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + (void)xfs_dir2_data_freeinsert(d, prevdup, needlogp); + } + /* + * Otherwise we need a scan if the new entry is big enough. + */ + else + needscan = INT_GET(prevdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); + } + /* + * The following entry is free, merge with it. + */ + else if (postdup) { + dfp = xfs_dir2_data_freefind(d, postdup); + newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); + INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + /* + * If the following entry was in the table, the new entry + * is longer, so it will be in the table too. Remove + * the old one and add the new one. + */ + if (dfp) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); + } + /* + * Otherwise we need a scan if the new entry is big enough. + */ + else + needscan = INT_GET(newdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); + } + /* + * Neither neighbor is free. Make a new entry. + */ + else { + newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); + INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup->length, ARCH_CONVERT, len); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); + } + *needscanp = needscan; +} + +/* + * Take a byte range out of an existing unused space and make it un-free. + */ +void +xfs_dir2_data_use_free( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* data block buffer */ + xfs_dir2_data_unused_t *dup, /* unused entry */ + xfs_dir2_data_aoff_t offset, /* starting offset to use */ + xfs_dir2_data_aoff_t len, /* length to use */ + int *needlogp, /* out: need to log header */ + int *needscanp) /* out: need regen bestfree */ +{ + xfs_dir2_data_t *d; /* data block */ + xfs_dir2_data_free_t *dfp; /* bestfree pointer */ + int matchback; /* matches end of freespace */ + int matchfront; /* matches start of freespace */ + int needscan; /* need to regen bestfree */ + xfs_dir2_data_unused_t *newdup; /* new unused entry */ + xfs_dir2_data_unused_t *newdup2; /* another new unused entry */ + int oldlen; /* old unused entry's length */ + + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG); + ASSERT(offset >= (char *)dup - (char *)d); + ASSERT(offset + len <= (char *)dup + INT_GET(dup->length, ARCH_CONVERT) - (char *)d); + ASSERT((char *)dup - (char *)d == INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT)); + /* + * Look up the entry in the bestfree table. + */ + dfp = xfs_dir2_data_freefind(d, dup); + oldlen = INT_GET(dup->length, ARCH_CONVERT); + ASSERT(dfp || oldlen <= INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT)); + /* + * Check for alignment with front and back of the entry. + */ + matchfront = (char *)dup - (char *)d == offset; + matchback = (char *)dup + oldlen - (char *)d == offset + len; + ASSERT(*needscanp == 0); + needscan = 0; + /* + * If we matched it exactly we just need to get rid of it from + * the bestfree table. + */ + if (matchfront && matchback) { + if (dfp) { + needscan = !INT_ISZERO(d->hdr.bestfree[2].offset, ARCH_CONVERT); + if (!needscan) + xfs_dir2_data_freeremove(d, dfp, needlogp); + } + } + /* + * We match the first part of the entry. + * Make a new entry with the remaining freespace. + */ + else if (matchfront) { + newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); + INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup->length, ARCH_CONVERT, oldlen - len); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + /* + * If it was in the table, remove it and add the new one. + */ + if (dfp) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); + ASSERT(dfp != NULL); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); + ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); + /* + * If we got inserted at the last slot, + * that means we don't know if there was a better + * choice for the last slot, or not. Rescan. + */ + needscan = dfp == &d->hdr.bestfree[2]; + } + } + /* + * We match the last part of the entry. + * Trim the allocated space off the tail of the entry. + */ + else if (matchback) { + newdup = dup; + INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) + (((char *)d + offset) - (char *)newdup)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + /* + * If it was in the table, remove it and add the new one. + */ + if (dfp) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); + ASSERT(dfp != NULL); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); + ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); + /* + * If we got inserted at the last slot, + * that means we don't know if there was a better + * choice for the last slot, or not. Rescan. + */ + needscan = dfp == &d->hdr.bestfree[2]; + } + } + /* + * Poking out the middle of an entry. + * Make two new entries. + */ + else { + newdup = dup; + INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) + (((char *)d + offset) - (char *)newdup)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len); + INT_SET(newdup2->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup2->length, ARCH_CONVERT, oldlen - len - INT_GET(newdup->length, ARCH_CONVERT)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup2, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup2 - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup2); + /* + * If the old entry was in the table, we need to scan + * if the 3rd entry was valid, since these entries + * are smaller than the old one. + * If we don't need to scan that means there were 1 or 2 + * entries in the table, and removing the old and adding + * the 2 new will work. + */ + if (dfp) { + needscan = !INT_ISZERO(d->hdr.bestfree[2].length, ARCH_CONVERT); + if (!needscan) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + (void)xfs_dir2_data_freeinsert(d, newdup, + needlogp); + (void)xfs_dir2_data_freeinsert(d, newdup2, + needlogp); + } + } + } + *needscanp = needscan; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_data.h linux.21rc5-ac2/fs/xfs/xfs_dir2_data.h --- linux.21rc5/fs/xfs/xfs_dir2_data.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_data.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_DATA_H__ +#define __XFS_DIR2_DATA_H__ + +/* + * Directory format 2, data block structures. + */ + +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_inode; +struct xfs_trans; + +/* + * Constants. + */ +#define XFS_DIR2_DATA_MAGIC 0x58443244 /* XD2D: for multiblock dirs */ +#define XFS_DIR2_DATA_ALIGN_LOG 3 /* i.e., 8 bytes */ +#define XFS_DIR2_DATA_ALIGN (1 << XFS_DIR2_DATA_ALIGN_LOG) +#define XFS_DIR2_DATA_FREE_TAG 0xffff +#define XFS_DIR2_DATA_FD_COUNT 3 + +/* + * Directory address space divided into sections, + * spaces separated by 32gb. + */ +#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG)) +#define XFS_DIR2_DATA_SPACE 0 +#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE) +#define XFS_DIR2_DATA_FIRSTDB(mp) \ + XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATA_OFFSET) + +/* + * Offsets of . and .. in data space (always block 0) + */ +#define XFS_DIR2_DATA_DOT_OFFSET \ + ((xfs_dir2_data_aoff_t)sizeof(xfs_dir2_data_hdr_t)) +#define XFS_DIR2_DATA_DOTDOT_OFFSET \ + (XFS_DIR2_DATA_DOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(1)) +#define XFS_DIR2_DATA_FIRST_OFFSET \ + (XFS_DIR2_DATA_DOTDOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(2)) + +/* + * Structures. + */ + +/* + * Describe a free area in the data block. + * The freespace will be formatted as a xfs_dir2_data_unused_t. + */ +typedef struct xfs_dir2_data_free { + xfs_dir2_data_off_t offset; /* start of freespace */ + xfs_dir2_data_off_t length; /* length of freespace */ +} xfs_dir2_data_free_t; + +/* + * Header for the data blocks. + * Always at the beginning of a directory-sized block. + * The code knows that XFS_DIR2_DATA_FD_COUNT is 3. + */ +typedef struct xfs_dir2_data_hdr { + __uint32_t magic; /* XFS_DIR2_DATA_MAGIC */ + /* or XFS_DIR2_BLOCK_MAGIC */ + xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT]; +} xfs_dir2_data_hdr_t; + +/* + * Active entry in a data block. Aligned to 8 bytes. + * Tag appears as the last 2 bytes. + */ +typedef struct xfs_dir2_data_entry { + xfs_ino_t inumber; /* inode number */ + __uint8_t namelen; /* name length */ + __uint8_t name[1]; /* name bytes, no null */ + /* variable offset */ + xfs_dir2_data_off_t tag; /* starting offset of us */ +} xfs_dir2_data_entry_t; + +/* + * Unused entry in a data block. Aligned to 8 bytes. + * Tag appears as the last 2 bytes. + */ +typedef struct xfs_dir2_data_unused { + __uint16_t freetag; /* XFS_DIR2_DATA_FREE_TAG */ + xfs_dir2_data_off_t length; /* total free length */ + /* variable offset */ + xfs_dir2_data_off_t tag; /* starting offset of us */ +} xfs_dir2_data_unused_t; + +typedef union { + xfs_dir2_data_entry_t entry; + xfs_dir2_data_unused_t unused; +} xfs_dir2_data_union_t; + +/* + * Generic data block structure, for xfs_db. + */ +typedef struct xfs_dir2_data { + xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_DATA_MAGIC */ + xfs_dir2_data_union_t u[1]; +} xfs_dir2_data_t; + +/* + * Macros. + */ + +/* + * Size of a data entry. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATA_ENTSIZE) +int xfs_dir2_data_entsize(int n); +#define XFS_DIR2_DATA_ENTSIZE(n) xfs_dir2_data_entsize(n) +#else +#define XFS_DIR2_DATA_ENTSIZE(n) \ + ((int)(roundup(offsetof(xfs_dir2_data_entry_t, name[0]) + (n) + \ + (uint)sizeof(xfs_dir2_data_off_t), XFS_DIR2_DATA_ALIGN))) +#endif + +/* + * Pointer to an entry's tag word. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATA_ENTRY_TAG_P) +xfs_dir2_data_off_t *xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep); +#define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep) +#else +#define XFS_DIR2_DATA_ENTRY_TAG_P(dep) \ + ((xfs_dir2_data_off_t *)\ + ((char *)(dep) + XFS_DIR2_DATA_ENTSIZE((dep)->namelen) - \ + (uint)sizeof(xfs_dir2_data_off_t))) +#endif + +/* + * Pointer to a freespace's tag word. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATA_UNUSED_TAG_P) +xfs_dir2_data_off_t *xfs_dir2_data_unused_tag_p_arch( + xfs_dir2_data_unused_t *dup, xfs_arch_t arch); +#define XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup,arch) \ + xfs_dir2_data_unused_tag_p_arch(dup,arch) +#else +#define XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup,arch) \ + ((xfs_dir2_data_off_t *)\ + ((char *)(dup) + INT_GET((dup)->length, arch) \ + - (uint)sizeof(xfs_dir2_data_off_t))) +#endif + +/* + * Function declarations. + */ + +#ifdef DEBUG +extern void + xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp); +#else +#define xfs_dir2_data_check(dp,bp) +#endif + +extern xfs_dir2_data_free_t * + xfs_dir2_data_freefind(xfs_dir2_data_t *d, + xfs_dir2_data_unused_t *dup); + +extern xfs_dir2_data_free_t * + xfs_dir2_data_freeinsert(xfs_dir2_data_t *d, + xfs_dir2_data_unused_t *dup, int *loghead); + +extern void + xfs_dir2_data_freeremove(xfs_dir2_data_t *d, + xfs_dir2_data_free_t *dfp, int *loghead); + +extern void + xfs_dir2_data_freescan(struct xfs_mount *mp, xfs_dir2_data_t *d, + int *loghead, char *aendp); + +extern int + xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno, + struct xfs_dabuf **bpp); + +extern void + xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp, + xfs_dir2_data_entry_t *dep); + +extern void + xfs_dir2_data_log_header(struct xfs_trans *tp, struct xfs_dabuf *bp); + +extern void + xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp, + xfs_dir2_data_unused_t *dup); + +extern void + xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp, + xfs_dir2_data_aoff_t offset, + xfs_dir2_data_aoff_t len, int *needlogp, + int *needscanp); + +extern void + xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp, + xfs_dir2_data_unused_t *dup, + xfs_dir2_data_aoff_t offset, + xfs_dir2_data_aoff_t len, int *needlogp, + int *needscanp); + +#endif /* __XFS_DIR2_DATA_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2.h linux.21rc5-ac2/fs/xfs/xfs_dir2.h --- linux.21rc5/fs/xfs/xfs_dir2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_H__ +#define __XFS_DIR2_H__ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_dir2_put_args; +struct xfs_inode; +struct xfs_trans; + +/* + * Directory version 2. + * There are 4 possible formats: + * shortform + * single block - data with embedded leaf at the end + * multiple data blocks, single leaf+freeindex block + * data blocks, node&leaf blocks (btree), freeindex blocks + * + * The shortform format is in xfs_dir2_sf.h. + * The single block format is in xfs_dir2_block.h. + * The data block format is in xfs_dir2_data.h. + * The leaf and freeindex block formats are in xfs_dir2_leaf.h. + * Node blocks are the same as the other version, in xfs_da_btree.h. + */ + +/* + * Byte offset in data block and shortform entry. + */ +typedef __uint16_t xfs_dir2_data_off_t; +#define NULLDATAOFF 0xffffU +typedef uint xfs_dir2_data_aoff_t; /* argument form */ + +/* + * Directory block number (logical dirblk in file) + */ +typedef __uint32_t xfs_dir2_db_t; + +/* + * Byte offset in a directory. + */ +typedef xfs_off_t xfs_dir2_off_t; + +/* + * For getdents, argument struct for put routines. + */ +typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa); +typedef struct xfs_dir2_put_args { + xfs_off_t cook; /* cookie of (next) entry */ + xfs_intino_t ino; /* inode number */ + struct xfs_dirent *dbp; /* buffer pointer */ + char *name; /* directory entry name */ + int namelen; /* length of name */ + int done; /* output: set if value was stored */ + xfs_dir2_put_t put; /* put function ptr (i/o) */ + struct uio *uio; /* uio control structure */ +} xfs_dir2_put_args_t; + +#define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2) +extern xfs_dirops_t xfsv2_dirops; + +/* + * Other interfaces used by the rest of the dir v2 code. + */ +extern int + xfs_dir2_grow_inode(struct xfs_da_args *args, int space, + xfs_dir2_db_t *dbp); + +extern int + xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); + +extern int + xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); + +extern int + xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, + struct xfs_dabuf *bp); + +#endif /* __XFS_DIR2_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_leaf.c linux.21rc5-ac2/fs/xfs/xfs_dir2_leaf.c --- linux.21rc5/fs/xfs/xfs_dir2_leaf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_leaf.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1897 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_leaf.c + * XFS directory version 2 implementation - single leaf form + * see xfs_dir2_leaf.h for data structures. + * These directories have multiple XFS_DIR2_DATA blocks and one + * XFS_DIR2_LEAF1 block containing the hash table and freespace map. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_dir2_trace.h" +#include "xfs_error.h" +#include "xfs_bit.h" + +/* + * Local function declarations. + */ +#ifdef DEBUG +static void xfs_dir2_leaf_check(xfs_inode_t *dp, xfs_dabuf_t *bp); +#else +#define xfs_dir2_leaf_check(dp, bp) +#endif +static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **lbpp, + int *indexp, xfs_dabuf_t **dbpp); + +/* + * Convert a block form directory to a leaf form directory. + */ +int /* error */ +xfs_dir2_block_to_leaf( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *dbp) /* input block's buffer */ +{ + xfs_dir2_data_off_t *bestsp; /* leaf's bestsp entries */ + xfs_dablk_t blkno; /* leaf block's bno */ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */ + xfs_dir2_block_tail_t *btp; /* block's tail */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dabuf_t *lbp; /* leaf block's buffer */ + xfs_dir2_db_t ldb; /* leaf block's bno */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf's tail */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log block header */ + int needscan; /* need to rescan bestfree */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_b("block_to_leaf", args, dbp); + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Add the leaf block to the inode. + * This interface will only put blocks in the leaf/node range. + * Since that's empty now, we'll get the root (block 0 in range). + */ + if ((error = xfs_da_grow_inode(args, &blkno))) { + return error; + } + ldb = XFS_DIR2_DA_TO_DB(mp, blkno); + ASSERT(ldb == XFS_DIR2_LEAF_FIRSTDB(mp)); + /* + * Initialize the leaf block, get a buffer for it. + */ + if ((error = xfs_dir2_leaf_init(args, ldb, &lbp, XFS_DIR2_LEAF1_MAGIC))) { + return error; + } + ASSERT(lbp != NULL); + leaf = lbp->data; + block = dbp->data; + xfs_dir2_data_check(dp, dbp); + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Set the counts in the leaf header. + */ + INT_COPY(leaf->hdr.count, btp->count, ARCH_CONVERT); /* INT_: type change */ + INT_COPY(leaf->hdr.stale, btp->stale, ARCH_CONVERT); /* INT_: type change */ + /* + * Could compact these but I think we always do the conversion + * after squeezing out stale entries. + */ + memcpy(leaf->ents, blp, INT_GET(btp->count, ARCH_CONVERT) * sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, lbp, 0, INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1); + needscan = 0; + needlog = 1; + /* + * Make the space formerly occupied by the leaf entries and block + * tail be free. + */ + xfs_dir2_data_make_free(tp, dbp, + (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), + (xfs_dir2_data_aoff_t)((char *)block + mp->m_dirblksize - + (char *)blp), + &needlog, &needscan); + /* + * Fix up the block header, make it a data block. + */ + INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); + if (needscan) + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, + NULL); + /* + * Set up leaf tail and bests table. + */ + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + INT_SET(ltp->bestcount, ARCH_CONVERT, 1); + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + INT_COPY(bestsp[0], block->hdr.bestfree[0].length, ARCH_CONVERT); + /* + * Log the data header and leaf bests table. + */ + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + xfs_dir2_leaf_check(dp, lbp); + xfs_dir2_data_check(dp, dbp); + xfs_dir2_leaf_log_bests(tp, lbp, 0, 0); + xfs_da_buf_done(lbp); + return 0; +} + +/* + * Add an entry to a leaf form directory. + */ +int /* error */ +xfs_dir2_leaf_addname( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dir2_data_off_t *bestsp; /* freespace table in leaf */ + int compact; /* need to compact leaves */ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* data unused entry */ + int error; /* error return value */ + int grown; /* allocated new data block */ + int highstale; /* index of next stale leaf */ + int i; /* temporary, index */ + int index; /* leaf table position */ + xfs_dabuf_t *lbp; /* leaf's buffer */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int length; /* length of new entry */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry table pointer */ + int lfloglow; /* low leaf logging index */ + int lfloghigh; /* high leaf logging index */ + int lowstale; /* index of prev stale leaf */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail pointer */ + xfs_mount_t *mp; /* filesystem mount point */ + int needbytes; /* leaf block bytes needed */ + int needlog; /* need to log data header */ + int needscan; /* need to rescan data free */ + xfs_dir2_data_off_t *tagp; /* end of data entry */ + xfs_trans_t *tp; /* transaction pointer */ + xfs_dir2_db_t use_block; /* data block number */ + + xfs_dir2_trace_args("leaf_addname", args); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Read the leaf block. + */ + error = xfs_da_read_buf(tp, dp, mp->m_dirleafblk, -1, &lbp, + XFS_DATA_FORK); + if (error) { + return error; + } + ASSERT(lbp != NULL); + /* + * Look up the entry by hash value and name. + * We know it's not there, our caller has already done a lookup. + * So the index is of the entry to insert in front of. + * But if there are dup hash values the index is of the first of those. + */ + index = xfs_dir2_leaf_search_hash(args, lbp); + leaf = lbp->data; + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + length = XFS_DIR2_DATA_ENTSIZE(args->namelen); + /* + * See if there are any entries with the same hash value + * and space in their block for the new entry. + * This is good because it puts multiple same-hash value entries + * in a data block, improving the lookup of those entries. + */ + for (use_block = -1, lep = &leaf->ents[index]; + index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + index++, lep++) { + if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + i = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + ASSERT(i < INT_GET(ltp->bestcount, ARCH_CONVERT)); + ASSERT(INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF); + if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { + use_block = i; + break; + } + } + /* + * Didn't find a block yet, linear search all the data blocks. + */ + if (use_block == -1) { + for (i = 0; i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++) { + /* + * Remember a block we see that's missing. + */ + if (INT_GET(bestsp[i], ARCH_CONVERT) == NULLDATAOFF && use_block == -1) + use_block = i; + else if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { + use_block = i; + break; + } + } + } + /* + * How many bytes do we need in the leaf block? + */ + needbytes = + (!INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT) ? 0 : (uint)sizeof(leaf->ents[0])) + + (use_block != -1 ? 0 : (uint)sizeof(leaf->bests[0])); + /* + * Now kill use_block if it refers to a missing block, so we + * can use it as an indication of allocation needed. + */ + if (use_block != -1 && INT_GET(bestsp[use_block], ARCH_CONVERT) == NULLDATAOFF) + use_block = -1; + /* + * If we don't have enough free bytes but we can make enough + * by compacting out stale entries, we'll do that. + */ + if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < needbytes && + INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1) { + compact = 1; + } + /* + * Otherwise if we don't have enough free bytes we need to + * convert to node form. + */ + else if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < + needbytes) { + /* + * Just checking or no space reservation, give up. + */ + if (args->justcheck || args->total == 0) { + xfs_da_brelse(tp, lbp); + return XFS_ERROR(ENOSPC); + } + /* + * Convert to node form. + */ + error = xfs_dir2_leaf_to_node(args, lbp); + xfs_da_buf_done(lbp); + if (error) + return error; + /* + * Then add the new entry. + */ + return xfs_dir2_node_addname(args); + } + /* + * Otherwise it will fit without compaction. + */ + else + compact = 0; + /* + * If just checking, then it will fit unless we needed to allocate + * a new data block. + */ + if (args->justcheck) { + xfs_da_brelse(tp, lbp); + return use_block == -1 ? XFS_ERROR(ENOSPC) : 0; + } + /* + * If no allocations are allowed, return now before we've + * changed anything. + */ + if (args->total == 0 && use_block == -1) { + xfs_da_brelse(tp, lbp); + return XFS_ERROR(ENOSPC); + } + /* + * Need to compact the leaf entries, removing stale ones. + * Leave one stale entry behind - the one closest to our + * insertion index - and we'll shift that one to our insertion + * point later. + */ + if (compact) { + xfs_dir2_leaf_compact_x1(lbp, &index, &lowstale, &highstale, + &lfloglow, &lfloghigh); + } + /* + * There are stale entries, so we'll need log-low and log-high + * impossibly bad values later. + */ + else if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) { + lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); + lfloghigh = -1; + } + /* + * If there was no data block space found, we need to allocate + * a new one. + */ + if (use_block == -1) { + /* + * Add the new data block. + */ + if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, + &use_block))) { + xfs_da_brelse(tp, lbp); + return error; + } + /* + * Initialize the block. + */ + if ((error = xfs_dir2_data_init(args, use_block, &dbp))) { + xfs_da_brelse(tp, lbp); + return error; + } + /* + * If we're adding a new data block on the end we need to + * extend the bests table. Copy it up one entry. + */ + if (use_block >= INT_GET(ltp->bestcount, ARCH_CONVERT)) { + bestsp--; + memmove(&bestsp[0], &bestsp[1], + INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(bestsp[0])); + INT_MOD(ltp->bestcount, ARCH_CONVERT, +1); + xfs_dir2_leaf_log_tail(tp, lbp); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + } + /* + * If we're filling in a previously empty block just log it. + */ + else + xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); + data = dbp->data; + INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); + grown = 1; + } + /* + * Already had space in some data block. + * Just read that one in. + */ + else { + if ((error = + xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, use_block), + -1, &dbp, XFS_DATA_FORK))) { + xfs_da_brelse(tp, lbp); + return error; + } + data = dbp->data; + grown = 0; + } + xfs_dir2_data_check(dp, dbp); + /* + * Point to the biggest freespace in our data block. + */ + dup = (xfs_dir2_data_unused_t *) + ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); + ASSERT(INT_GET(dup->length, ARCH_CONVERT) >= length); + needscan = needlog = 0; + /* + * Mark the initial part of our freespace in use for the new entry. + */ + xfs_dir2_data_use_free(tp, dbp, dup, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length, + &needlog, &needscan); + /* + * Initialize our new entry (at last). + */ + dep = (xfs_dir2_data_entry_t *)dup; + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + dep->namelen = args->namelen; + memcpy(dep->name, args->name, dep->namelen); + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); + /* + * Need to scan fix up the bestfree table. + */ + if (needscan) + xfs_dir2_data_freescan(mp, data, &needlog, NULL); + /* + * Need to log the data block's header. + */ + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + xfs_dir2_data_log_entry(tp, dbp, dep); + /* + * If the bests table needs to be changed, do it. + * Log the change unless we've already done that. + */ + if (INT_GET(bestsp[use_block], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); + if (!grown) + xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); + } + /* + * Now we need to make room to insert the leaf entry. + * If there are no stale entries, we just insert a hole at index. + */ + if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) { + /* + * lep is still good as the index leaf entry. + */ + if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + memmove(lep + 1, lep, + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); + /* + * Record low and high logging indices for the leaf. + */ + lfloglow = index; + lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); + } + /* + * There are stale entries. + * We will use one of them for the new entry. + * It's probably not at the right location, so we'll have to + * shift some up or down first. + */ + else { + /* + * If we didn't compact before, we need to find the nearest + * stale entries before and after our insertion point. + */ + if (compact == 0) { + /* + * Find the first stale entry before the insertion + * point, if any. + */ + for (lowstale = index - 1; + lowstale >= 0 && + INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != + XFS_DIR2_NULL_DATAPTR; + lowstale--) + continue; + /* + * Find the next stale entry at or after the insertion + * point, if any. Stop if we go so far that the + * lowstale entry would be better. + */ + for (highstale = index; + highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && + INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != + XFS_DIR2_NULL_DATAPTR && + (lowstale < 0 || + index - lowstale - 1 >= highstale - index); + highstale++) + continue; + } + /* + * If the low one is better, use it. + */ + if (lowstale >= 0 && + (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + index - lowstale - 1 < highstale - index)) { + ASSERT(index - lowstale - 1 >= 0); + ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == + XFS_DIR2_NULL_DATAPTR); + /* + * Copy entries up to cover the stale entry + * and make room for the new entry. + */ + if (index - lowstale - 1 > 0) + memmove(&leaf->ents[lowstale], + &leaf->ents[lowstale + 1], + (index - lowstale - 1) * sizeof(*lep)); + lep = &leaf->ents[index - 1]; + lfloglow = MIN(lowstale, lfloglow); + lfloghigh = MAX(index - 1, lfloghigh); + } + /* + * The high one is better, so use that one. + */ + else { + ASSERT(highstale - index >= 0); + ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == + XFS_DIR2_NULL_DATAPTR); + /* + * Copy entries down to copver the stale entry + * and make room for the new entry. + */ + if (highstale - index > 0) + memmove(&leaf->ents[index + 1], + &leaf->ents[index], + (highstale - index) * sizeof(*lep)); + lep = &leaf->ents[index]; + lfloglow = MIN(index, lfloglow); + lfloghigh = MAX(highstale, lfloghigh); + } + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); + } + /* + * Fill in the new leaf entry. + */ + INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, INT_GET(*tagp, ARCH_CONVERT))); + /* + * Log the leaf fields and give up the buffers. + */ + xfs_dir2_leaf_log_header(tp, lbp); + xfs_dir2_leaf_log_ents(tp, lbp, lfloglow, lfloghigh); + xfs_dir2_leaf_check(dp, lbp); + xfs_da_buf_done(lbp); + xfs_dir2_data_check(dp, dbp); + xfs_da_buf_done(dbp); + return 0; +} + +#ifdef DEBUG +/* + * Check the internal consistency of a leaf1 block. + * Pop an assert if something is wrong. + */ +void +xfs_dir2_leaf_check( + xfs_inode_t *dp, /* incore directory inode */ + xfs_dabuf_t *bp) /* leaf's buffer */ +{ + int i; /* leaf index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail pointer */ + xfs_mount_t *mp; /* filesystem mount point */ + int stale; /* count of stale leaves */ + + leaf = bp->data; + mp = dp->i_mount; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + /* + * This value is not restrictive enough. + * Should factor in the size of the bests table as well. + * We can deduce a value for that from di_size. + */ + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + /* + * Leaves and bests don't overlap. + */ + ASSERT((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] <= + (char *)XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT)); + /* + * Check hash value order, count stale entries. + */ + for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { + if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= + INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); + if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + stale++; + } + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); +} +#endif /* DEBUG */ + +/* + * Compact out any stale entries in the leaf. + * Log the header and changed leaf entries, if any. + */ +void +xfs_dir2_leaf_compact( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *bp) /* leaf buffer */ +{ + int from; /* source leaf index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int loglow; /* first leaf entry to log */ + int to; /* target leaf index */ + + leaf = bp->data; + if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) { + return; + } + /* + * Compress out the stale entries in place. + */ + for (from = to = 0, loglow = -1; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Only actually copy the entries that are different. + */ + if (from > to) { + if (loglow == -1) + loglow = to; + leaf->ents[to] = leaf->ents[from]; + } + to++; + } + /* + * Update and log the header, log the leaf entries. + */ + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == from - to); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(INT_GET(leaf->hdr.stale, ARCH_CONVERT))); + INT_ZERO(leaf->hdr.stale, ARCH_CONVERT); + xfs_dir2_leaf_log_header(args->trans, bp); + if (loglow != -1) + xfs_dir2_leaf_log_ents(args->trans, bp, loglow, to - 1); +} + +/* + * Compact the leaf entries, removing stale ones. + * Leave one stale entry behind - the one closest to our + * insertion index - and the caller will shift that one to our insertion + * point later. + * Return new insertion index, where the remaining stale entry is, + * and leaf logging indices. + */ +void +xfs_dir2_leaf_compact_x1( + xfs_dabuf_t *bp, /* leaf buffer */ + int *indexp, /* insertion index */ + int *lowstalep, /* out: stale entry before us */ + int *highstalep, /* out: stale entry after us */ + int *lowlogp, /* out: low log index */ + int *highlogp) /* out: high log index */ +{ + int from; /* source copy index */ + int highstale; /* stale entry at/after index */ + int index; /* insertion index */ + int keepstale; /* source index of kept stale */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int lowstale; /* stale entry before index */ + int newindex=0; /* new insertion index */ + int to; /* destination copy index */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1); + index = *indexp; + /* + * Find the first stale entry before our index, if any. + */ + for (lowstale = index - 1; + lowstale >= 0 && + INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; + lowstale--) + continue; + /* + * Find the first stale entry at or after our index, if any. + * Stop if the answer would be worse than lowstale. + */ + for (highstale = index; + highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && + INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && + (lowstale < 0 || index - lowstale > highstale - index); + highstale++) + continue; + /* + * Pick the better of lowstale and highstale. + */ + if (lowstale >= 0 && + (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + index - lowstale <= highstale - index)) + keepstale = lowstale; + else + keepstale = highstale; + /* + * Copy the entries in place, removing all the stale entries + * except keepstale. + */ + for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + /* + * Notice the new value of index. + */ + if (index == from) + newindex = to; + if (from != keepstale && + INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { + if (from == to) + *lowlogp = to; + continue; + } + /* + * Record the new keepstale value for the insertion. + */ + if (from == keepstale) + lowstale = highstale = to; + /* + * Copy only the entries that have moved. + */ + if (from > to) + leaf->ents[to] = leaf->ents[from]; + to++; + } + ASSERT(from > to); + /* + * If the insertion point was past the last entry, + * set the new insertion point accordingly. + */ + if (index == from) + newindex = to; + *indexp = newindex; + /* + * Adjust the leaf header values. + */ + INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(from - to)); + INT_SET(leaf->hdr.stale, ARCH_CONVERT, 1); + /* + * Remember the low/high stale value only in the "right" + * direction. + */ + if (lowstale >= newindex) + lowstale = -1; + else + highstale = INT_GET(leaf->hdr.count, ARCH_CONVERT); + *highlogp = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1; + *lowstalep = lowstale; + *highstalep = highstale; +} + +/* + * Getdents (readdir) for leaf and node directories. + * This reads the data blocks only, so is the same for both forms. + */ +int /* error */ +xfs_dir2_leaf_getdents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + uio_t *uio, /* I/O control & vectors */ + int *eofp, /* out: reached end of dir */ + xfs_dirent_t *dbp, /* caller's buffer */ + xfs_dir2_put_t put) /* ABI formatting routine */ +{ + xfs_dabuf_t *bp; /* data block buffer */ + int byteoff; /* offset in current block */ + xfs_dir2_db_t curdb; /* db for current block */ + xfs_dir2_off_t curoff; /* current overall offset */ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_entry_t *dep; /* data entry */ + xfs_dir2_data_unused_t *dup; /* unused entry */ + int eof; /* reached end of directory */ + int error=0; /* error return value */ + int i; /* temporary loop index */ + int j; /* temporary loop index */ + int length; /* temporary length value */ + xfs_bmbt_irec_t *map; /* map vector for blocks */ + xfs_extlen_t map_blocks; /* number of fsbs in map */ + xfs_dablk_t map_off; /* last mapped file offset */ + int map_size; /* total entries in *map */ + int map_valid; /* valid entries in *map */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_off_t newoff; /* new curoff after new blk */ + int nmap; /* mappings to ask xfs_bmapi */ + xfs_dir2_put_args_t p; /* formatting arg bundle */ + char *ptr=NULL; /* pointer to current data */ + int ra_current; /* number of read-ahead blks */ + int ra_index; /* *map index for read-ahead */ + int ra_offset; /* map entry offset for ra */ + int ra_want; /* readahead count wanted */ + + /* + * If the offset is at or past the largest allowed value, + * give up right away, return eof. + */ + if (uio->uio_offset >= XFS_DIR2_MAX_DATAPTR) { + *eofp = 1; + return 0; + } + mp = dp->i_mount; + /* + * Setup formatting arguments. + */ + p.dbp = dbp; + p.put = put; + p.uio = uio; + /* + * Set up to bmap a number of blocks based on the caller's + * buffer size, the directory block size, and the filesystem + * block size. + */ + map_size = + howmany(uio->uio_resid + mp->m_dirblksize, + mp->m_sb.sb_blocksize); + map = kmem_alloc(map_size * sizeof(*map), KM_SLEEP); + map_valid = ra_index = ra_offset = ra_current = map_blocks = 0; + bp = NULL; + eof = 1; + /* + * Inside the loop we keep the main offset value as a byte offset + * in the directory file. + */ + curoff = XFS_DIR2_DATAPTR_TO_BYTE(mp, uio->uio_offset); + /* + * Force this conversion through db so we truncate the offset + * down to get the start of the data block. + */ + map_off = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, curoff)); + /* + * Loop over directory entries until we reach the end offset. + * Get more blocks and readahead as necessary. + */ + while (curoff < XFS_DIR2_LEAF_OFFSET) { + /* + * If we have no buffer, or we're off the end of the + * current buffer, need to get another one. + */ + if (!bp || ptr >= (char *)bp->data + mp->m_dirblksize) { + /* + * If we have a buffer, we need to release it and + * take it out of the mapping. + */ + if (bp) { + xfs_da_brelse(tp, bp); + bp = NULL; + map_blocks -= mp->m_dirblkfsbs; + /* + * Loop to get rid of the extents for the + * directory block. + */ + for (i = mp->m_dirblkfsbs; i > 0; ) { + j = MIN((int)map->br_blockcount, i); + map->br_blockcount -= j; + map->br_startblock += j; + map->br_startoff += j; + /* + * If mapping is done, pitch it from + * the table. + */ + if (!map->br_blockcount && --map_valid) + memmove(&map[0], &map[1], + sizeof(map[0]) * + map_valid); + i -= j; + } + } + /* + * Recalculate the readahead blocks wanted. + */ + ra_want = howmany(uio->uio_resid + mp->m_dirblksize, + mp->m_sb.sb_blocksize) - 1; + /* + * If we don't have as many as we want, and we haven't + * run out of data blocks, get some more mappings. + */ + if (1 + ra_want > map_blocks && + map_off < + XFS_DIR2_BYTE_TO_DA(mp, XFS_DIR2_LEAF_OFFSET)) { + /* + * Get more bmaps, fill in after the ones + * we already have in the table. + */ + nmap = map_size - map_valid; + error = xfs_bmapi(tp, dp, + map_off, + XFS_DIR2_BYTE_TO_DA(mp, + XFS_DIR2_LEAF_OFFSET) - map_off, + XFS_BMAPI_METADATA, NULL, 0, + &map[map_valid], &nmap, NULL); + /* + * Don't know if we should ignore this or + * try to return an error. + * The trouble with returning errors + * is that readdir will just stop without + * actually passing the error through. + */ + if (error) + break; /* XXX */ + /* + * If we got all the mappings we asked for, + * set the final map offset based on the + * last bmap value received. + * Otherwise, we've reached the end. + */ + if (nmap == map_size - map_valid) + map_off = + map[map_valid + nmap - 1].br_startoff + + map[map_valid + nmap - 1].br_blockcount; + else + map_off = + XFS_DIR2_BYTE_TO_DA(mp, + XFS_DIR2_LEAF_OFFSET); + /* + * Look for holes in the mapping, and + * eliminate them. Count up the valid blocks. + */ + for (i = map_valid; i < map_valid + nmap; ) { + if (map[i].br_startblock == + HOLESTARTBLOCK) { + nmap--; + length = map_valid + nmap - i; + if (length) + memmove(&map[i], + &map[i + 1], + sizeof(map[i]) * + length); + } else { + map_blocks += + map[i].br_blockcount; + i++; + } + } + map_valid += nmap; + } + /* + * No valid mappings, so no more data blocks. + */ + if (!map_valid) { + curoff = XFS_DIR2_DA_TO_BYTE(mp, map_off); + break; + } + /* + * Read the directory block starting at the first + * mapping. + */ + curdb = XFS_DIR2_DA_TO_DB(mp, map->br_startoff); + error = xfs_da_read_buf(tp, dp, map->br_startoff, + map->br_blockcount >= mp->m_dirblkfsbs ? + XFS_FSB_TO_DADDR(mp, map->br_startblock) : + -1, + &bp, XFS_DATA_FORK); + /* + * Should just skip over the data block instead + * of giving up. + */ + if (error) + break; /* XXX */ + /* + * Adjust the current amount of read-ahead: we just + * read a block that was previously ra. + */ + if (ra_current) + ra_current -= mp->m_dirblkfsbs; + /* + * Do we need more readahead? + */ + for (ra_index = ra_offset = i = 0; + ra_want > ra_current && i < map_blocks; + i += mp->m_dirblkfsbs) { + ASSERT(ra_index < map_valid); + /* + * Read-ahead a contiguous directory block. + */ + if (i > ra_current && + map[ra_index].br_blockcount >= + mp->m_dirblkfsbs) { + xfs_baread(mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, + map[ra_index].br_startblock + + ra_offset), + (int)BTOBB(mp->m_dirblksize)); + ra_current = i; + } + /* + * Read-ahead a non-contiguous directory block. + * This doesn't use our mapping, but this + * is a very rare case. + */ + else if (i > ra_current) { + (void)xfs_da_reada_buf(tp, dp, + map[ra_index].br_startoff + + ra_offset, XFS_DATA_FORK); + ra_current = i; + } + /* + * Advance offset through the mapping table. + */ + for (j = 0; j < mp->m_dirblkfsbs; j++) { + /* + * The rest of this extent but not + * more than a dir block. + */ + length = MIN(mp->m_dirblkfsbs, + (int)(map[ra_index].br_blockcount - + ra_offset)); + j += length; + ra_offset += length; + /* + * Advance to the next mapping if + * this one is used up. + */ + if (ra_offset == + map[ra_index].br_blockcount) { + ra_offset = 0; + ra_index++; + } + } + } + /* + * Having done a read, we need to set a new offset. + */ + newoff = XFS_DIR2_DB_OFF_TO_BYTE(mp, curdb, 0); + /* + * Start of the current block. + */ + if (curoff < newoff) + curoff = newoff; + /* + * Make sure we're in the right block. + */ + else if (curoff > newoff) + ASSERT(XFS_DIR2_BYTE_TO_DB(mp, curoff) == + curdb); + data = bp->data; + xfs_dir2_data_check(dp, bp); + /* + * Find our position in the block. + */ + ptr = (char *)&data->u; + byteoff = XFS_DIR2_BYTE_TO_OFF(mp, curoff); + /* + * Skip past the header. + */ + if (byteoff == 0) + curoff += (uint)sizeof(data->hdr); + /* + * Skip past entries until we reach our offset. + */ + else { + while ((char *)ptr - (char *)data < byteoff) { + dup = (xfs_dir2_data_unused_t *)ptr; + + if (INT_GET(dup->freetag, ARCH_CONVERT) + == XFS_DIR2_DATA_FREE_TAG) { + + length = INT_GET(dup->length, + ARCH_CONVERT); + ptr += length; + continue; + } + dep = (xfs_dir2_data_entry_t *)ptr; + length = + XFS_DIR2_DATA_ENTSIZE(dep->namelen); + ptr += length; + } + /* + * Now set our real offset. + */ + curoff = + XFS_DIR2_DB_OFF_TO_BYTE(mp, + XFS_DIR2_BYTE_TO_DB(mp, curoff), + (char *)ptr - (char *)data); + if (ptr >= (char *)data + mp->m_dirblksize) { + continue; + } + } + } + /* + * We have a pointer to an entry. + * Is it a live one? + */ + dup = (xfs_dir2_data_unused_t *)ptr; + /* + * No, it's unused, skip over it. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) + == XFS_DIR2_DATA_FREE_TAG) { + length = INT_GET(dup->length, ARCH_CONVERT); + ptr += length; + curoff += length; + continue; + } + + /* + * Copy the entry into the putargs, and try formatting it. + */ + dep = (xfs_dir2_data_entry_t *)ptr; + + p.namelen = dep->namelen; + + length = XFS_DIR2_DATA_ENTSIZE(p.namelen); + + p.cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); + +#if XFS_BIG_FILESYSTEMS + p.ino = INT_GET(dep->inumber, ARCH_CONVERT) + mp->m_inoadd; +#else + p.ino = INT_GET(dep->inumber, ARCH_CONVERT); +#endif + p.name = (char *)dep->name; + + error = p.put(&p); + + /* + * Won't fit. Return to caller. + */ + if (!p.done) { + eof = 0; + break; + } + /* + * Advance to next entry in the block. + */ + ptr += length; + curoff += length; + } + + /* + * All done. Set output offset value to current offset. + */ + *eofp = eof; + if (curoff > XFS_DIR2_DATAPTR_TO_BYTE(mp, XFS_DIR2_MAX_DATAPTR)) + uio->uio_offset = XFS_DIR2_MAX_DATAPTR; + else + uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff); + kmem_free(map, map_size * sizeof(*map)); + if (bp) + xfs_da_brelse(tp, bp); + return error; +} + +/* + * Initialize a new leaf block, leaf1 or leafn magic accepted. + */ +int +xfs_dir2_leaf_init( + xfs_da_args_t *args, /* operation arguments */ + xfs_dir2_db_t bno, /* directory block number */ + xfs_dabuf_t **bpp, /* out: leaf buffer */ + int magic) /* magic number for block */ +{ + xfs_dabuf_t *bp; /* leaf buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + ASSERT(dp != NULL); + tp = args->trans; + mp = dp->i_mount; + ASSERT(bno >= XFS_DIR2_LEAF_FIRSTDB(mp) && + bno < XFS_DIR2_FREE_FIRSTDB(mp)); + /* + * Get the buffer for the block. + */ + error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, bno), -1, &bp, + XFS_DATA_FORK); + if (error) { + return error; + } + ASSERT(bp != NULL); + leaf = bp->data; + /* + * Initialize the header. + */ + INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, magic); + INT_ZERO(leaf->hdr.info.forw, ARCH_CONVERT); + INT_ZERO(leaf->hdr.info.back, ARCH_CONVERT); + INT_ZERO(leaf->hdr.count, ARCH_CONVERT); + INT_ZERO(leaf->hdr.stale, ARCH_CONVERT); + xfs_dir2_leaf_log_header(tp, bp); + /* + * If it's a leaf-format directory initialize the tail. + * In this case our caller has the real bests table to copy into + * the block. + */ + if (magic == XFS_DIR2_LEAF1_MAGIC) { + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + INT_ZERO(ltp->bestcount, ARCH_CONVERT); + xfs_dir2_leaf_log_tail(tp, bp); + } + *bpp = bp; + return 0; +} + +/* + * Log the bests entries indicated from a leaf1 block. + */ +void +xfs_dir2_leaf_log_bests( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* leaf buffer */ + int first, /* first entry to log */ + int last) /* last entry to log */ +{ + xfs_dir2_data_off_t *firstb; /* pointer to first entry */ + xfs_dir2_data_off_t *lastb; /* pointer to last entry */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf); + firstb = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT) + first; + lastb = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT) + last; + xfs_da_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf), + (uint)((char *)lastb - (char *)leaf + sizeof(*lastb) - 1)); +} + +/* + * Log the leaf entries indicated from a leaf1 or leafn block. + */ +void +xfs_dir2_leaf_log_ents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* leaf buffer */ + int first, /* first entry to log */ + int last) /* last entry to log */ +{ + xfs_dir2_leaf_entry_t *firstlep; /* pointer to first entry */ + xfs_dir2_leaf_entry_t *lastlep; /* pointer to last entry */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || + INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + firstlep = &leaf->ents[first]; + lastlep = &leaf->ents[last]; + xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf), + (uint)((char *)lastlep - (char *)leaf + sizeof(*lastlep) - 1)); +} + +/* + * Log the header of the leaf1 or leafn block. + */ +void +xfs_dir2_leaf_log_header( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp) /* leaf buffer */ +{ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || + INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf), + (uint)(sizeof(leaf->hdr) - 1)); +} + +/* + * Log the tail of the leaf1 block. + */ +void +xfs_dir2_leaf_log_tail( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp) /* leaf buffer */ +{ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + + mp = tp->t_mountp; + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), + (uint)(mp->m_dirblksize - 1)); +} + +/* + * Look up the entry referred to by args in the leaf format directory. + * Most of the work is done by the xfs_dir2_leaf_lookup_int routine which + * is also used by the node-format code. + */ +int +xfs_dir2_leaf_lookup( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + int index; /* found entry index */ + xfs_dabuf_t *lbp; /* leaf buffer */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("leaf_lookup", args); + /* + * Look up name in the leaf block, returning both buffers and index. + */ + if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) { + return error; + } + tp = args->trans; + dp = args->dp; + xfs_dir2_leaf_check(dp, lbp); + leaf = lbp->data; + /* + * Get to the leaf entry and contained data entry address. + */ + lep = &leaf->ents[index]; + /* + * Point to the data entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)dbp->data + + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); + /* + * Return the found inode number. + */ + args->inumber = INT_GET(dep->inumber, ARCH_CONVERT); + xfs_da_brelse(tp, dbp); + xfs_da_brelse(tp, lbp); + return XFS_ERROR(EEXIST); +} + +/* + * Look up name/hash in the leaf block. + * Fill in indexp with the found index, and dbpp with the data buffer. + * If not found dbpp will be NULL, and ENOENT comes back. + * lbpp will always be filled in with the leaf buffer unless there's an error. + */ +static int /* error */ +xfs_dir2_leaf_lookup_int( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t **lbpp, /* out: leaf buffer */ + int *indexp, /* out: index in leaf block */ + xfs_dabuf_t **dbpp) /* out: data buffer */ +{ + xfs_dir2_db_t curdb; /* current data block number */ + xfs_dabuf_t *dbp; /* data buffer */ + xfs_dir2_data_entry_t *dep; /* data entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + int index; /* index in leaf block */ + xfs_dabuf_t *lbp; /* leaf buffer */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_db_t newdb; /* new data block number */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Read the leaf block into the buffer. + */ + if ((error = + xfs_da_read_buf(tp, dp, mp->m_dirleafblk, -1, &lbp, + XFS_DATA_FORK))) { + return error; + } + *lbpp = lbp; + leaf = lbp->data; + xfs_dir2_leaf_check(dp, lbp); + /* + * Look for the first leaf entry with our hash value. + */ + index = xfs_dir2_leaf_search_hash(args, lbp); + /* + * Loop over all the entries with the right hash value + * looking to match the name. + */ + for (lep = &leaf->ents[index], dbp = NULL, curdb = -1; + index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + lep++, index++) { + /* + * Skip over stale leaf entries. + */ + if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Get the new data block number. + */ + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + /* + * If it's not the same as the old data block number, + * need to pitch the old one and read the new one. + */ + if (newdb != curdb) { + if (dbp) + xfs_da_brelse(tp, dbp); + if ((error = + xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, newdb), -1, &dbp, + XFS_DATA_FORK))) { + xfs_da_brelse(tp, lbp); + return error; + } + xfs_dir2_data_check(dp, dbp); + curdb = newdb; + } + /* + * Point to the data entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)dbp->data + + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + /* + * If it matches then return it. + */ + if (dep->namelen == args->namelen && + dep->name[0] == args->name[0] && + memcmp(dep->name, args->name, args->namelen) == 0) { + *dbpp = dbp; + *indexp = index; + return 0; + } + } + /* + * No match found, return ENOENT. + */ + ASSERT(args->oknoent); + if (dbp) + xfs_da_brelse(tp, dbp); + xfs_da_brelse(tp, lbp); + return XFS_ERROR(ENOENT); +} + +/* + * Remove an entry from a leaf format directory. + */ +int /* error */ +xfs_dir2_leaf_removename( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dir2_data_off_t *bestsp; /* leaf block best freespace */ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_db_t db; /* data block number */ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data entry structure */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dir2_db_t i; /* temporary data block # */ + int index; /* index into leaf entries */ + xfs_dabuf_t *lbp; /* leaf buffer */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log data header */ + int needscan; /* need to rescan data frees */ + xfs_dir2_data_off_t oldbest; /* old value of best free */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("leaf_removename", args); + /* + * Lookup the leaf entry, get the leaf and data blocks read in. + */ + if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) { + return error; + } + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + leaf = lbp->data; + data = dbp->data; + xfs_dir2_data_check(dp, dbp); + /* + * Point to the leaf entry, use that to point to the data entry. + */ + lep = &leaf->ents[index]; + db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + dep = (xfs_dir2_data_entry_t *) + ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + needscan = needlog = 0; + oldbest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + ASSERT(INT_GET(bestsp[db], ARCH_CONVERT) == oldbest); + /* + * Mark the former data entry unused. + */ + xfs_dir2_data_make_free(tp, dbp, + (xfs_dir2_data_aoff_t)((char *)dep - (char *)data), + XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); + /* + * We just mark the leaf entry stale by putting a null in it. + */ + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); + xfs_dir2_leaf_log_header(tp, lbp); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + xfs_dir2_leaf_log_ents(tp, lbp, index, index); + /* + * Scan the freespace in the data block again if necessary, + * log the data block header if necessary. + */ + if (needscan) + xfs_dir2_data_freescan(mp, data, &needlog, NULL); + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + /* + * If the longest freespace in the data block has changed, + * put the new value in the bests table and log that. + */ + if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) != oldbest) { + INT_COPY(bestsp[db], data->hdr.bestfree[0].length, ARCH_CONVERT); + xfs_dir2_leaf_log_bests(tp, lbp, db, db); + } + xfs_dir2_data_check(dp, dbp); + /* + * If the data block is now empty then get rid of the data block. + */ + if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == + mp->m_dirblksize - (uint)sizeof(data->hdr)) { + ASSERT(db != mp->m_dirdatablk); + if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { + /* + * Nope, can't get rid of it because it caused + * allocation of a bmap btree block to do so. + * Just go on, returning success, leaving the + * empty block in place. + */ + if (error == ENOSPC && args->total == 0) { + xfs_da_buf_done(dbp); + error = 0; + } + xfs_dir2_leaf_check(dp, lbp); + xfs_da_buf_done(lbp); + return error; + } + dbp = NULL; + /* + * If this is the last data block then compact the + * bests table by getting rid of entries. + */ + if (db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1) { + /* + * Look for the last active entry (i). + */ + for (i = db - 1; i > 0; i--) { + if (INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF) + break; + } + /* + * Copy the table down so inactive entries at the + * end are removed. + */ + memmove(&bestsp[db - i], bestsp, + (INT_GET(ltp->bestcount, ARCH_CONVERT) - (db - i)) * sizeof(*bestsp)); + INT_MOD(ltp->bestcount, ARCH_CONVERT, -(db - i)); + xfs_dir2_leaf_log_tail(tp, lbp); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + } else + INT_SET(bestsp[db], ARCH_CONVERT, NULLDATAOFF); + } + /* + * If the data block was not the first one, drop it. + */ + else if (db != mp->m_dirdatablk && dbp != NULL) { + xfs_da_buf_done(dbp); + dbp = NULL; + } + xfs_dir2_leaf_check(dp, lbp); + /* + * See if we can convert to block form. + */ + return xfs_dir2_leaf_to_block(args, lbp, dbp); +} + +/* + * Replace the inode number in a leaf format directory entry. + */ +int /* error */ +xfs_dir2_leaf_replace( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + int index; /* index of leaf entry */ + xfs_dabuf_t *lbp; /* leaf buffer */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("leaf_replace", args); + /* + * Look up the entry. + */ + if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) { + return error; + } + dp = args->dp; + leaf = lbp->data; + /* + * Point to the leaf entry, get data address from it. + */ + lep = &leaf->ents[index]; + /* + * Point to the data entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)dbp->data + + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); + ASSERT(args->inumber != INT_GET(dep->inumber, ARCH_CONVERT)); + /* + * Put the new inode number in, log it. + */ + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + tp = args->trans; + xfs_dir2_data_log_entry(tp, dbp, dep); + xfs_da_buf_done(dbp); + xfs_dir2_leaf_check(dp, lbp); + xfs_da_brelse(tp, lbp); + return 0; +} + +/* + * Return index in the leaf block (lbp) which is either the first + * one with this hash value, or if there are none, the insert point + * for that hash value. + */ +int /* index value */ +xfs_dir2_leaf_search_hash( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *lbp) /* leaf buffer */ +{ + xfs_dahash_t hash=0; /* hash from this entry */ + xfs_dahash_t hashwant; /* hash value looking for */ + int high; /* high leaf index */ + int low; /* low leaf index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + int mid=0; /* current leaf index */ + + leaf = lbp->data; +#ifndef __KERNEL__ + if (INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + return 0; +#endif + /* + * Note, the table cannot be empty, so we have to go through the loop. + * Binary search the leaf entries looking for our hash value. + */ + for (lep = leaf->ents, low = 0, high = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1, + hashwant = args->hashval; + low <= high; ) { + mid = (low + high) >> 1; + if ((hash = INT_GET(lep[mid].hashval, ARCH_CONVERT)) == hashwant) + break; + if (hash < hashwant) + low = mid + 1; + else + high = mid - 1; + } + /* + * Found one, back up through all the equal hash values. + */ + if (hash == hashwant) { + while (mid > 0 && INT_GET(lep[mid - 1].hashval, ARCH_CONVERT) == hashwant) { + mid--; + } + } + /* + * Need to point to an entry higher than ours. + */ + else if (hash < hashwant) + mid++; + return mid; +} + +/* + * Trim off a trailing data block. We know it's empty since the leaf + * freespace table says so. + */ +int /* error */ +xfs_dir2_leaf_trim_data( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *lbp, /* leaf buffer */ + xfs_dir2_db_t db) /* data block number */ +{ + xfs_dir2_data_off_t *bestsp; /* leaf bests table */ +#ifdef DEBUG + xfs_dir2_data_t *data; /* data block structure */ +#endif + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Read the offending data block. We need its buffer. + */ + if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, db), -1, &dbp, + XFS_DATA_FORK))) { + return error; + } +#ifdef DEBUG + data = dbp->data; + ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); +#endif + /* this seems to be an error + * data is only valid if DEBUG is defined? + * RMC 09/08/1999 + */ + + leaf = lbp->data; + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == + mp->m_dirblksize - (uint)sizeof(data->hdr)); + ASSERT(db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + /* + * Get rid of the data block. + */ + if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { + ASSERT(error != ENOSPC); + xfs_da_brelse(tp, dbp); + return error; + } + /* + * Eliminate the last bests entry from the table. + */ + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + INT_MOD(ltp->bestcount, ARCH_CONVERT, -1); + memmove(&bestsp[1], &bestsp[0], INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(*bestsp)); + xfs_dir2_leaf_log_tail(tp, lbp); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + return 0; +} + +/* + * Convert node form directory to leaf form directory. + * The root of the node form dir needs to already be a LEAFN block. + * Just return if we can't do anything. + */ +int /* error */ +xfs_dir2_node_to_leaf( + xfs_da_state_t *state) /* directory operation state */ +{ + xfs_da_args_t *args; /* operation arguments */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dabuf_t *fbp; /* buffer for freespace block */ + xfs_fileoff_t fo; /* freespace file offset */ + xfs_dir2_free_t *free; /* freespace structure */ + xfs_dabuf_t *lbp; /* buffer for leaf block */ + xfs_dir2_leaf_tail_t *ltp; /* tail of leaf structure */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_mount_t *mp; /* filesystem mount point */ + int rval; /* successful free trim? */ + xfs_trans_t *tp; /* transaction pointer */ + + /* + * There's more than a leaf level in the btree, so there must + * be multiple leafn blocks. Give up. + */ + if (state->path.active > 1) + return 0; + args = state->args; + xfs_dir2_trace_args("node_to_leaf", args); + mp = state->mp; + dp = args->dp; + tp = args->trans; + /* + * Get the last offset in the file. + */ + if ((error = xfs_bmap_last_offset(tp, dp, &fo, XFS_DATA_FORK))) { + return error; + } + fo -= mp->m_dirblkfsbs; + /* + * If there are freespace blocks other than the first one, + * take this opportunity to remove trailing empty freespace blocks + * that may have been left behind during no-space-reservation + * operations. + */ + while (fo > mp->m_dirfreeblk) { + if ((error = xfs_dir2_node_trim_free(args, fo, &rval))) { + return error; + } + if (rval) + fo -= mp->m_dirblkfsbs; + else + return 0; + } + /* + * Now find the block just before the freespace block. + */ + if ((error = xfs_bmap_last_before(tp, dp, &fo, XFS_DATA_FORK))) { + return error; + } + /* + * If it's not the single leaf block, give up. + */ + if (XFS_FSB_TO_B(mp, fo) > XFS_DIR2_LEAF_OFFSET + mp->m_dirblksize) + return 0; + lbp = state->path.blk[0].bp; + leaf = lbp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + /* + * Read the freespace block. + */ + if ((error = xfs_da_read_buf(tp, dp, mp->m_dirfreeblk, -1, &fbp, + XFS_DATA_FORK))) { + return error; + } + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_ISZERO(free->hdr.firstdb, ARCH_CONVERT)); + /* + * Now see if the leafn and free data will fit in a leaf1. + * If not, release the buffer and give up. + */ + if ((uint)sizeof(leaf->hdr) + + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)) * (uint)sizeof(leaf->ents[0]) + + INT_GET(free->hdr.nvalid, ARCH_CONVERT) * (uint)sizeof(leaf->bests[0]) + + (uint)sizeof(leaf->tail) > + mp->m_dirblksize) { + xfs_da_brelse(tp, fbp); + return 0; + } + /* + * If the leaf has any stale entries in it, compress them out. + * The compact routine will log the header. + */ + if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) + xfs_dir2_leaf_compact(args, lbp); + else + xfs_dir2_leaf_log_header(tp, lbp); + INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAF1_MAGIC); + /* + * Set up the leaf tail from the freespace block. + */ + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + INT_COPY(ltp->bestcount, free->hdr.nvalid, ARCH_CONVERT); + /* + * Set up the leaf bests table. + */ + memcpy(XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT), free->bests, + INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(leaf->bests[0])); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + xfs_dir2_leaf_log_tail(tp, lbp); + xfs_dir2_leaf_check(dp, lbp); + /* + * Get rid of the freespace block. + */ + error = xfs_dir2_shrink_inode(args, XFS_DIR2_FREE_FIRSTDB(mp), fbp); + if (error) { + /* + * This can't fail here because it can only happen when + * punching out the middle of an extent, and this is an + * isolated block. + */ + ASSERT(error != ENOSPC); + return error; + } + fbp = NULL; + /* + * Now see if we can convert the single-leaf directory + * down to a block form directory. + * This routine always kills the dabuf for the leaf, so + * eliminate it from the path. + */ + error = xfs_dir2_leaf_to_block(args, lbp, NULL); + state->path.blk[0].bp = NULL; + return error; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_leaf.h linux.21rc5-ac2/fs/xfs/xfs_dir2_leaf.h --- linux.21rc5/fs/xfs/xfs_dir2_leaf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_leaf.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_LEAF_H__ +#define __XFS_DIR2_LEAF_H__ + +/* + * Directory version 2, leaf block structures. + */ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * Constants. + */ + +/* + * Offset of the leaf/node space. First block in this space + * is the btree root. + */ +#define XFS_DIR2_LEAF_SPACE 1 +#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE) +#define XFS_DIR2_LEAF_FIRSTDB(mp) \ + XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_LEAF_OFFSET) + +/* + * Types. + */ + +/* + * Offset in data space of a data entry. + */ +typedef __uint32_t xfs_dir2_dataptr_t; +#define XFS_DIR2_MAX_DATAPTR ((xfs_dir2_dataptr_t)0x7fffffff) +#define XFS_DIR2_NULL_DATAPTR ((xfs_dir2_dataptr_t)0) + +/* + * Structures. + */ + +/* + * Leaf block header. + */ +typedef struct xfs_dir2_leaf_hdr { + xfs_da_blkinfo_t info; /* header for da routines */ + __uint16_t count; /* count of entries */ + __uint16_t stale; /* count of stale entries */ +} xfs_dir2_leaf_hdr_t; + +/* + * Leaf block entry. + */ +typedef struct xfs_dir2_leaf_entry { + xfs_dahash_t hashval; /* hash value of name */ + xfs_dir2_dataptr_t address; /* address of data entry */ +} xfs_dir2_leaf_entry_t; + +/* + * Leaf block tail. + */ +typedef struct xfs_dir2_leaf_tail { + __uint32_t bestcount; +} xfs_dir2_leaf_tail_t; + +/* + * Leaf block. + * bests and tail are at the end of the block for single-leaf only + * (magic = XFS_DIR2_LEAF1_MAGIC not XFS_DIR2_LEAFN_MAGIC). + */ +typedef struct xfs_dir2_leaf { + xfs_dir2_leaf_hdr_t hdr; /* leaf header */ + xfs_dir2_leaf_entry_t ents[1]; /* entries */ + /* ... */ + xfs_dir2_data_off_t bests[1]; /* best free counts */ + xfs_dir2_leaf_tail_t tail; /* leaf tail */ +} xfs_dir2_leaf_t; + +/* + * Macros. + * The DB blocks are logical directory block numbers, not filesystem blocks. + */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_MAX_LEAF_ENTS) +int +xfs_dir2_max_leaf_ents(struct xfs_mount *mp); +#define XFS_DIR2_MAX_LEAF_ENTS(mp) \ + xfs_dir2_max_leaf_ents(mp) +#else +#define XFS_DIR2_MAX_LEAF_ENTS(mp) \ + ((int)(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_leaf_hdr_t)) / \ + (uint)sizeof(xfs_dir2_leaf_entry_t))) +#endif + +/* + * Get address of the bestcount field in the single-leaf block. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_LEAF_TAIL_P) +xfs_dir2_leaf_tail_t * +xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp); +#define XFS_DIR2_LEAF_TAIL_P(mp,lp) \ + xfs_dir2_leaf_tail_p(mp, lp) +#else +#define XFS_DIR2_LEAF_TAIL_P(mp,lp) \ + ((xfs_dir2_leaf_tail_t *)\ + ((char *)(lp) + (mp)->m_dirblksize - \ + (uint)sizeof(xfs_dir2_leaf_tail_t))) +#endif + +/* + * Get address of the bests array in the single-leaf block. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_LEAF_BESTS_P) +xfs_dir2_data_off_t * +xfs_dir2_leaf_bests_p_arch(xfs_dir2_leaf_tail_t *ltp, xfs_arch_t arch); +#define XFS_DIR2_LEAF_BESTS_P_ARCH(ltp,arch) xfs_dir2_leaf_bests_p_arch(ltp,arch) +#else +#define XFS_DIR2_LEAF_BESTS_P_ARCH(ltp,arch) \ + ((xfs_dir2_data_off_t *)(ltp) - INT_GET((ltp)->bestcount, arch)) +#endif + +/* + * Convert dataptr to byte in file space + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATAPTR_TO_BYTE) +xfs_dir2_off_t +xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp); +#define XFS_DIR2_DATAPTR_TO_BYTE(mp,dp) xfs_dir2_dataptr_to_byte(mp, dp) +#else +#define XFS_DIR2_DATAPTR_TO_BYTE(mp,dp) \ + ((xfs_dir2_off_t)(dp) << XFS_DIR2_DATA_ALIGN_LOG) +#endif + +/* + * Convert byte in file space to dataptr. It had better be aligned. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_DATAPTR) +xfs_dir2_dataptr_t +xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by); +#define XFS_DIR2_BYTE_TO_DATAPTR(mp,by) xfs_dir2_byte_to_dataptr(mp,by) +#else +#define XFS_DIR2_BYTE_TO_DATAPTR(mp,by) \ + ((xfs_dir2_dataptr_t)((by) >> XFS_DIR2_DATA_ALIGN_LOG)) +#endif + +/* + * Convert dataptr to a block number + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATAPTR_TO_DB) +xfs_dir2_db_t +xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp); +#define XFS_DIR2_DATAPTR_TO_DB(mp,dp) xfs_dir2_dataptr_to_db(mp, dp) +#else +#define XFS_DIR2_DATAPTR_TO_DB(mp,dp) \ + XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp)) +#endif + +/* + * Convert dataptr to a byte offset in a block + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATAPTR_TO_OFF) +xfs_dir2_data_aoff_t +xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp); +#define XFS_DIR2_DATAPTR_TO_OFF(mp,dp) xfs_dir2_dataptr_to_off(mp, dp) +#else +#define XFS_DIR2_DATAPTR_TO_OFF(mp,dp) \ + XFS_DIR2_BYTE_TO_OFF(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp)) +#endif + +/* + * Convert block and offset to byte in space + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_OFF_TO_BYTE) +xfs_dir2_off_t +xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db, + xfs_dir2_data_aoff_t o); +#define XFS_DIR2_DB_OFF_TO_BYTE(mp,db,o) \ + xfs_dir2_db_off_to_byte(mp, db, o) +#else +#define XFS_DIR2_DB_OFF_TO_BYTE(mp,db,o) \ + (((xfs_dir2_off_t)(db) << \ + ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)) + (o)) +#endif + +/* + * Convert byte in space to (DB) block + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_DB) +xfs_dir2_db_t xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by); +#define XFS_DIR2_BYTE_TO_DB(mp,by) xfs_dir2_byte_to_db(mp, by) +#else +#define XFS_DIR2_BYTE_TO_DB(mp,by) \ + ((xfs_dir2_db_t)((by) >> \ + ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog))) +#endif + +/* + * Convert byte in space to (DA) block + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_DA) +xfs_dablk_t xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by); +#define XFS_DIR2_BYTE_TO_DA(mp,by) xfs_dir2_byte_to_da(mp, by) +#else +#define XFS_DIR2_BYTE_TO_DA(mp,by) \ + XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, by)) +#endif + +/* + * Convert byte in space to offset in a block + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_OFF) +xfs_dir2_data_aoff_t +xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by); +#define XFS_DIR2_BYTE_TO_OFF(mp,by) xfs_dir2_byte_to_off(mp, by) +#else +#define XFS_DIR2_BYTE_TO_OFF(mp,by) \ + ((xfs_dir2_data_aoff_t)((by) & \ + ((1 << ((mp)->m_sb.sb_blocklog + \ + (mp)->m_sb.sb_dirblklog)) - 1))) +#endif + +/* + * Convert block and offset to dataptr + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_OFF_TO_DATAPTR) +xfs_dir2_dataptr_t +xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db, + xfs_dir2_data_aoff_t o); +#define XFS_DIR2_DB_OFF_TO_DATAPTR(mp,db,o) \ + xfs_dir2_db_off_to_dataptr(mp, db, o) +#else +#define XFS_DIR2_DB_OFF_TO_DATAPTR(mp,db,o) \ + XFS_DIR2_BYTE_TO_DATAPTR(mp, XFS_DIR2_DB_OFF_TO_BYTE(mp, db, o)) +#endif + +/* + * Convert block (DB) to block (dablk) + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_TO_DA) +xfs_dablk_t xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db); +#define XFS_DIR2_DB_TO_DA(mp,db) xfs_dir2_db_to_da(mp, db) +#else +#define XFS_DIR2_DB_TO_DA(mp,db) \ + ((xfs_dablk_t)((db) << (mp)->m_sb.sb_dirblklog)) +#endif + +/* + * Convert block (dablk) to block (DB) + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DA_TO_DB) +xfs_dir2_db_t xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da); +#define XFS_DIR2_DA_TO_DB(mp,da) xfs_dir2_da_to_db(mp, da) +#else +#define XFS_DIR2_DA_TO_DB(mp,da) \ + ((xfs_dir2_db_t)((da) >> (mp)->m_sb.sb_dirblklog)) +#endif + +/* + * Convert block (dablk) to byte offset in space + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DA_TO_BYTE) +xfs_dir2_off_t xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da); +#define XFS_DIR2_DA_TO_BYTE(mp,da) xfs_dir2_da_to_byte(mp, da) +#else +#define XFS_DIR2_DA_TO_BYTE(mp,da) \ + XFS_DIR2_DB_OFF_TO_BYTE(mp, XFS_DIR2_DA_TO_DB(mp, da), 0) +#endif + +/* + * Function declarations. + */ + +extern int + xfs_dir2_block_to_leaf(struct xfs_da_args *args, struct xfs_dabuf *dbp); + +extern int + xfs_dir2_leaf_addname(struct xfs_da_args *args); + +extern void + xfs_dir2_leaf_compact(struct xfs_da_args *args, struct xfs_dabuf *bp); + +extern void + xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp, + int *lowstalep, int *highstalep, int *lowlogp, + int *highlogp); + +extern int + xfs_dir2_leaf_getdents(struct xfs_trans *tp, struct xfs_inode *dp, + struct uio *uio, int *eofp, struct xfs_dirent *dbp, + xfs_dir2_put_t put); + +extern int + xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno, + struct xfs_dabuf **bpp, int magic); + +extern void + xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp, + int first, int last); + +extern void + xfs_dir2_leaf_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp, + int first, int last); + +extern void + xfs_dir2_leaf_log_header(struct xfs_trans *tp, struct xfs_dabuf *bp); + +extern void + xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_dabuf *bp); + +extern int + xfs_dir2_leaf_lookup(struct xfs_da_args *args); + +extern int + xfs_dir2_leaf_removename(struct xfs_da_args *args); + +extern int + xfs_dir2_leaf_replace(struct xfs_da_args *args); + +extern int + xfs_dir2_leaf_search_hash(struct xfs_da_args *args, + struct xfs_dabuf *lbp); +extern int + xfs_dir2_leaf_trim_data(struct xfs_da_args *args, struct xfs_dabuf *lbp, xfs_dir2_db_t db); + +extern int + xfs_dir2_node_to_leaf(struct xfs_da_state *state); + +#endif /* __XFS_DIR2_LEAF_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_node.c linux.21rc5-ac2/fs/xfs/xfs_dir2_node.c --- linux.21rc5/fs/xfs/xfs_dir2_node.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_node.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,2023 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_node.c + * XFS directory implementation, version 2, node form files + * See data structures in xfs_dir2_node.h and xfs_da_btree.h. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_dir2_trace.h" +#include "xfs_error.h" + +/* + * Function declarations. + */ +static void xfs_dir2_free_log_header(xfs_trans_t *tp, xfs_dabuf_t *bp); +static int xfs_dir2_leafn_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index); +#ifdef DEBUG +static void xfs_dir2_leafn_check(xfs_inode_t *dp, xfs_dabuf_t *bp); +#else +#define xfs_dir2_leafn_check(dp, bp) +#endif +static void xfs_dir2_leafn_moveents(xfs_da_args_t *args, xfs_dabuf_t *bp_s, + int start_s, xfs_dabuf_t *bp_d, int start_d, + int count); +static void xfs_dir2_leafn_rebalance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2); +static int xfs_dir2_leafn_remove(xfs_da_args_t *args, xfs_dabuf_t *bp, + int index, xfs_da_state_blk_t *dblk, + int *rval); +static int xfs_dir2_node_addname_int(xfs_da_args_t *args, + xfs_da_state_blk_t *fblk); + +/* + * Log entries from a freespace block. + */ +void +xfs_dir2_free_log_bests( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* freespace buffer */ + int first, /* first entry to log */ + int last) /* last entry to log */ +{ + xfs_dir2_free_t *free; /* freespace structure */ + + free = bp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + xfs_da_log_buf(tp, bp, + (uint)((char *)&free->bests[first] - (char *)free), + (uint)((char *)&free->bests[last] - (char *)free + + sizeof(free->bests[0]) - 1)); +} + +/* + * Log header from a freespace block. + */ +static void +xfs_dir2_free_log_header( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp) /* freespace buffer */ +{ + xfs_dir2_free_t *free; /* freespace structure */ + + free = bp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free), + (uint)(sizeof(xfs_dir2_free_hdr_t) - 1)); +} + +/* + * Convert a leaf-format directory to a node-format directory. + * We need to change the magic number of the leaf block, and copy + * the freespace table out of the leaf block into its own block. + */ +int /* error */ +xfs_dir2_leaf_to_node( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *lbp) /* leaf buffer */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + xfs_dabuf_t *fbp; /* freespace buffer */ + xfs_dir2_db_t fdb; /* freespace block number */ + xfs_dir2_free_t *free; /* freespace structure */ + xfs_dir2_data_off_t *from; /* pointer to freespace entry */ + int i; /* leaf freespace index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + int n; /* count of live freespc ents */ + xfs_dir2_data_off_t off; /* freespace entry value */ + xfs_dir2_data_off_t *to; /* pointer to freespace entry */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_b("leaf_to_node", args, lbp); + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Add a freespace block to the directory. + */ + if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_FREE_SPACE, &fdb))) { + return error; + } + ASSERT(fdb == XFS_DIR2_FREE_FIRSTDB(mp)); + /* + * Get the buffer for the new freespace block. + */ + if ((error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb), -1, &fbp, + XFS_DATA_FORK))) { + return error; + } + ASSERT(fbp != NULL); + free = fbp->data; + leaf = lbp->data; + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + /* + * Initialize the freespace block header. + */ + INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); + INT_ZERO(free->hdr.firstdb, ARCH_CONVERT); + ASSERT(INT_GET(ltp->bestcount, ARCH_CONVERT) <= (uint)dp->i_d.di_size / mp->m_dirblksize); + INT_COPY(free->hdr.nvalid, ltp->bestcount, ARCH_CONVERT); + /* + * Copy freespace entries from the leaf block to the new block. + * Count active entries. + */ + for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT), to = free->bests; + i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++, from++, to++) { + if ((off = INT_GET(*from, ARCH_CONVERT)) != NULLDATAOFF) + n++; + INT_SET(*to, ARCH_CONVERT, off); + } + INT_SET(free->hdr.nused, ARCH_CONVERT, n); + INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAFN_MAGIC); + /* + * Log everything. + */ + xfs_dir2_leaf_log_header(tp, lbp); + xfs_dir2_free_log_header(tp, fbp); + xfs_dir2_free_log_bests(tp, fbp, 0, INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1); + xfs_da_buf_done(fbp); + xfs_dir2_leafn_check(dp, lbp); + return 0; +} + +/* + * Add a leaf entry to a leaf block in a node-form directory. + * The other work necessary is done from the caller. + */ +static int /* error */ +xfs_dir2_leafn_add( + xfs_dabuf_t *bp, /* leaf buffer */ + xfs_da_args_t *args, /* operation arguments */ + int index) /* insertion pt for new entry */ +{ + int compact; /* compacting stale leaves */ + xfs_inode_t *dp; /* incore directory inode */ + int highstale; /* next stale entry */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + int lfloghigh; /* high leaf entry logging */ + int lfloglow; /* low leaf entry logging */ + int lowstale; /* previous stale entry */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_sb("leafn_add", args, index, bp); + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + leaf = bp->data; + /* + * If there are already the maximum number of leaf entries in + * the block, if there are no stale entries it won't fit. + * Caller will do a split. If there are stale entries we'll do + * a compact. + */ + if (INT_GET(leaf->hdr.count, ARCH_CONVERT) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { + if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) + return XFS_ERROR(ENOSPC); + compact = INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1; + } else + compact = 0; + ASSERT(index == 0 || INT_GET(leaf->ents[index - 1].hashval, ARCH_CONVERT) <= args->hashval); + ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + INT_GET(leaf->ents[index].hashval, ARCH_CONVERT) >= args->hashval); + + if (args->justcheck) + return 0; + + /* + * Compact out all but one stale leaf entry. Leaves behind + * the entry closest to index. + */ + if (compact) { + xfs_dir2_leaf_compact_x1(bp, &index, &lowstale, &highstale, + &lfloglow, &lfloghigh); + } + /* + * Set impossible logging indices for this case. + */ + else if (!INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) { + lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); + lfloghigh = -1; + } + /* + * No stale entries, just insert a space for the new entry. + */ + if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) { + lep = &leaf->ents[index]; + if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + memmove(lep + 1, lep, + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); + lfloglow = index; + lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); + } + /* + * There are stale entries. We'll use one for the new entry. + */ + else { + /* + * If we didn't do a compact then we need to figure out + * which stale entry will be used. + */ + if (compact == 0) { + /* + * Find first stale entry before our insertion point. + */ + for (lowstale = index - 1; + lowstale >= 0 && + INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != + XFS_DIR2_NULL_DATAPTR; + lowstale--) + continue; + /* + * Find next stale entry after insertion point. + * Stop looking if the answer would be worse than + * lowstale already found. + */ + for (highstale = index; + highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && + INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != + XFS_DIR2_NULL_DATAPTR && + (lowstale < 0 || + index - lowstale - 1 >= highstale - index); + highstale++) + continue; + } + /* + * Using the low stale entry. + * Shift entries up toward the stale slot. + */ + if (lowstale >= 0 && + (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + index - lowstale - 1 < highstale - index)) { + ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == + XFS_DIR2_NULL_DATAPTR); + ASSERT(index - lowstale - 1 >= 0); + if (index - lowstale - 1 > 0) + memmove(&leaf->ents[lowstale], + &leaf->ents[lowstale + 1], + (index - lowstale - 1) * sizeof(*lep)); + lep = &leaf->ents[index - 1]; + lfloglow = MIN(lowstale, lfloglow); + lfloghigh = MAX(index - 1, lfloghigh); + } + /* + * Using the high stale entry. + * Shift entries down toward the stale slot. + */ + else { + ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == + XFS_DIR2_NULL_DATAPTR); + ASSERT(highstale - index >= 0); + if (highstale - index > 0) + memmove(&leaf->ents[index + 1], + &leaf->ents[index], + (highstale - index) * sizeof(*lep)); + lep = &leaf->ents[index]; + lfloglow = MIN(index, lfloglow); + lfloghigh = MAX(highstale, lfloghigh); + } + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); + } + /* + * Insert the new entry, log everything. + */ + INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, args->blkno, args->index)); + xfs_dir2_leaf_log_header(tp, bp); + xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh); + xfs_dir2_leafn_check(dp, bp); + return 0; +} + +#ifdef DEBUG +/* + * Check internal consistency of a leafn block. + */ +void +xfs_dir2_leafn_check( + xfs_inode_t *dp, /* incore directory inode */ + xfs_dabuf_t *bp) /* leaf buffer */ +{ + int i; /* leaf index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_mount_t *mp; /* filesystem mount point */ + int stale; /* count of stale leaves */ + + leaf = bp->data; + mp = dp->i_mount; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { + if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) { + ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= + INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); + } + if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + stale++; + } + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); +} +#endif /* DEBUG */ + +/* + * Return the last hash value in the leaf. + * Stale entries are ok. + */ +xfs_dahash_t /* hash value */ +xfs_dir2_leafn_lasthash( + xfs_dabuf_t *bp, /* leaf buffer */ + int *count) /* count of entries in leaf */ +{ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + if (count) + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + if (INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + return 0; + return INT_GET(leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); +} + +/* + * Look up a leaf entry in a node-format leaf block. + * If this is an addname then the extrablk in state is a freespace block, + * otherwise it's a data block. + */ +int +xfs_dir2_leafn_lookup_int( + xfs_dabuf_t *bp, /* leaf buffer */ + xfs_da_args_t *args, /* operation arguments */ + int *indexp, /* out: leaf entry index */ + xfs_da_state_t *state) /* state to fill in */ +{ + xfs_dabuf_t *curbp; /* current data/free buffer */ + xfs_dir2_db_t curdb; /* current data block number */ + xfs_dir2_db_t curfdb; /* current free block number */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + int fi; /* free entry index */ + xfs_dir2_free_t *free=NULL; /* free block structure */ + int index; /* leaf entry index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int length=0; /* length of new data entry */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_db_t newdb; /* new data block number */ + xfs_dir2_db_t newfdb; /* new free block number */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); +#ifdef __KERNEL__ + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) > 0); +#endif + xfs_dir2_leafn_check(dp, bp); + /* + * Look up the hash value in the leaf entries. + */ + index = xfs_dir2_leaf_search_hash(args, bp); + /* + * Do we have a buffer coming in? + */ + if (state->extravalid) + curbp = state->extrablk.bp; + else + curbp = NULL; + /* + * For addname, it's a free block buffer, get the block number. + */ + if (args->addname) { + curfdb = curbp ? state->extrablk.blkno : -1; + curdb = -1; + length = XFS_DIR2_DATA_ENTSIZE(args->namelen); + if ((free = (curbp ? curbp->data : NULL))) + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + } + /* + * For others, it's a data block buffer, get the block number. + */ + else { + curfdb = -1; + curdb = curbp ? state->extrablk.blkno : -1; + } + /* + * Loop over leaf entries with the right hash value. + */ + for (lep = &leaf->ents[index]; + index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + lep++, index++) { + /* + * Skip stale leaf entries. + */ + if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Pull the data block number from the entry. + */ + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + /* + * For addname, we're looking for a place to put the new entry. + * We want to use a data block with an entry of equal + * hash value to ours if there is one with room. + */ + if (args->addname) { + /* + * If this block isn't the data block we already have + * in hand, take a look at it. + */ + if (newdb != curdb) { + curdb = newdb; + /* + * Convert the data block to the free block + * holding its freespace information. + */ + newfdb = XFS_DIR2_DB_TO_FDB(mp, newdb); + /* + * If it's not the one we have in hand, + * read it in. + */ + if (newfdb != curfdb) { + /* + * If we had one before, drop it. + */ + if (curbp) + xfs_da_brelse(tp, curbp); + /* + * Read the free block. + */ + if ((error = xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, + newfdb), + -1, &curbp, + XFS_DATA_FORK))) { + return error; + } + curfdb = newfdb; + free = curbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == + XFS_DIR2_FREE_MAGIC); + ASSERT((INT_GET(free->hdr.firstdb, ARCH_CONVERT) % + XFS_DIR2_MAX_FREE_BESTS(mp)) == + 0); + ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) <= curdb); + ASSERT(curdb < + INT_GET(free->hdr.firstdb, ARCH_CONVERT) + + INT_GET(free->hdr.nvalid, ARCH_CONVERT)); + } + /* + * Get the index for our entry. + */ + fi = XFS_DIR2_DB_TO_FDINDEX(mp, curdb); + /* + * If it has room, return it. + */ + if (unlikely(INT_GET(free->bests[fi], ARCH_CONVERT) == NULLDATAOFF)) { + XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + if (INT_GET(free->bests[fi], ARCH_CONVERT) >= length) { + *indexp = index; + state->extravalid = 1; + state->extrablk.bp = curbp; + state->extrablk.blkno = curfdb; + state->extrablk.index = fi; + state->extrablk.magic = + XFS_DIR2_FREE_MAGIC; + ASSERT(args->oknoent); + return XFS_ERROR(ENOENT); + } + } + } + /* + * Not adding a new entry, so we really want to find + * the name given to us. + */ + else { + /* + * If it's a different data block, go get it. + */ + if (newdb != curdb) { + /* + * If we had a block before, drop it. + */ + if (curbp) + xfs_da_brelse(tp, curbp); + /* + * Read the data block. + */ + if ((error = + xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, newdb), -1, + &curbp, XFS_DATA_FORK))) { + return error; + } + xfs_dir2_data_check(dp, curbp); + curdb = newdb; + } + /* + * Point to the data entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)curbp->data + + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + /* + * Compare the entry, return it if it matches. + */ + if (dep->namelen == args->namelen && + dep->name[0] == args->name[0] && + memcmp(dep->name, args->name, args->namelen) == 0) { + args->inumber = INT_GET(dep->inumber, ARCH_CONVERT); + *indexp = index; + state->extravalid = 1; + state->extrablk.bp = curbp; + state->extrablk.blkno = curdb; + state->extrablk.index = + (int)((char *)dep - + (char *)curbp->data); + state->extrablk.magic = XFS_DIR2_DATA_MAGIC; + return XFS_ERROR(EEXIST); + } + } + } + /* + * Didn't find a match. + * If we are holding a buffer, give it back in case our caller + * finds it useful. + */ + if ((state->extravalid = (curbp != NULL))) { + state->extrablk.bp = curbp; + state->extrablk.index = -1; + /* + * For addname, giving back a free block. + */ + if (args->addname) { + state->extrablk.blkno = curfdb; + state->extrablk.magic = XFS_DIR2_FREE_MAGIC; + } + /* + * For other callers, giving back a data block. + */ + else { + state->extrablk.blkno = curdb; + state->extrablk.magic = XFS_DIR2_DATA_MAGIC; + } + } + /* + * Return the final index, that will be the insertion point. + */ + *indexp = index; + ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); + return XFS_ERROR(ENOENT); +} + +/* + * Move count leaf entries from source to destination leaf. + * Log entries and headers. Stale entries are preserved. + */ +static void +xfs_dir2_leafn_moveents( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *bp_s, /* source leaf buffer */ + int start_s, /* source leaf index */ + xfs_dabuf_t *bp_d, /* destination leaf buffer */ + int start_d, /* destination leaf index */ + int count) /* count of leaves to copy */ +{ + xfs_dir2_leaf_t *leaf_d; /* destination leaf structure */ + xfs_dir2_leaf_t *leaf_s; /* source leaf structure */ + int stale; /* count stale leaves copied */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_bibii("leafn_moveents", args, bp_s, start_s, bp_d, + start_d, count); + /* + * Silently return if nothing to do. + */ + if (count == 0) { + return; + } + tp = args->trans; + leaf_s = bp_s->data; + leaf_d = bp_d->data; + /* + * If the destination index is not the end of the current + * destination leaf entries, open up a hole in the destination + * to hold the new entries. + */ + if (start_d < INT_GET(leaf_d->hdr.count, ARCH_CONVERT)) { + memmove(&leaf_d->ents[start_d + count], &leaf_d->ents[start_d], + (INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - start_d) * + sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, bp_d, start_d + count, + count + INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - 1); + } + /* + * If the source has stale leaves, count the ones in the copy range + * so we can update the header correctly. + */ + if (!INT_ISZERO(leaf_s->hdr.stale, ARCH_CONVERT)) { + int i; /* temp leaf index */ + + for (i = start_s, stale = 0; i < start_s + count; i++) { + if (INT_GET(leaf_s->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + stale++; + } + } else + stale = 0; + /* + * Copy the leaf entries from source to destination. + */ + memcpy(&leaf_d->ents[start_d], &leaf_s->ents[start_s], + count * sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, bp_d, start_d, start_d + count - 1); + /* + * If there are source entries after the ones we copied, + * delete the ones we copied by sliding the next ones down. + */ + if (start_s + count < INT_GET(leaf_s->hdr.count, ARCH_CONVERT)) { + memmove(&leaf_s->ents[start_s], &leaf_s->ents[start_s + count], + count * sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1); + } + /* + * Update the headers and log them. + */ + INT_MOD(leaf_s->hdr.count, ARCH_CONVERT, -(count)); + INT_MOD(leaf_s->hdr.stale, ARCH_CONVERT, -(stale)); + INT_MOD(leaf_d->hdr.count, ARCH_CONVERT, count); + INT_MOD(leaf_d->hdr.stale, ARCH_CONVERT, stale); + xfs_dir2_leaf_log_header(tp, bp_s); + xfs_dir2_leaf_log_header(tp, bp_d); + xfs_dir2_leafn_check(args->dp, bp_s); + xfs_dir2_leafn_check(args->dp, bp_d); +} + +/* + * Determine the sort order of two leaf blocks. + * Returns 1 if both are valid and leaf2 should be before leaf1, else 0. + */ +int /* sort order */ +xfs_dir2_leafn_order( + xfs_dabuf_t *leaf1_bp, /* leaf1 buffer */ + xfs_dabuf_t *leaf2_bp) /* leaf2 buffer */ +{ + xfs_dir2_leaf_t *leaf1; /* leaf1 structure */ + xfs_dir2_leaf_t *leaf2; /* leaf2 structure */ + + leaf1 = leaf1_bp->data; + leaf2 = leaf2_bp->data; + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0 && + INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0 && + (INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT) < INT_GET(leaf1->ents[0].hashval, ARCH_CONVERT) || + INT_GET(leaf2->ents[INT_GET(leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT) < + INT_GET(leaf1->ents[INT_GET(leaf1->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT))) + return 1; + return 0; +} + +/* + * Rebalance leaf entries between two leaf blocks. + * This is actually only called when the second block is new, + * though the code deals with the general case. + * A new entry will be inserted in one of the blocks, and that + * entry is taken into account when balancing. + */ +static void +xfs_dir2_leafn_rebalance( + xfs_da_state_t *state, /* btree cursor */ + xfs_da_state_blk_t *blk1, /* first btree block */ + xfs_da_state_blk_t *blk2) /* second btree block */ +{ + xfs_da_args_t *args; /* operation arguments */ + int count; /* count (& direction) leaves */ + int isleft; /* new goes in left leaf */ + xfs_dir2_leaf_t *leaf1; /* first leaf structure */ + xfs_dir2_leaf_t *leaf2; /* second leaf structure */ + int mid; /* midpoint leaf index */ +#ifdef DEBUG + int oldstale; /* old count of stale leaves */ +#endif + int oldsum; /* old total leaf count */ + int swap; /* swapped leaf blocks */ + + args = state->args; + /* + * If the block order is wrong, swap the arguments. + */ + if ((swap = xfs_dir2_leafn_order(blk1->bp, blk2->bp))) { + xfs_da_state_blk_t *tmp; /* temp for block swap */ + + tmp = blk1; + blk1 = blk2; + blk2 = tmp; + } + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + oldsum = INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT); +#ifdef DEBUG + oldstale = INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT); +#endif + mid = oldsum >> 1; + /* + * If the old leaf count was odd then the new one will be even, + * so we need to divide the new count evenly. + */ + if (oldsum & 1) { + xfs_dahash_t midhash; /* middle entry hash value */ + + if (mid >= INT_GET(leaf1->hdr.count, ARCH_CONVERT)) + midhash = INT_GET(leaf2->ents[mid - INT_GET(leaf1->hdr.count, ARCH_CONVERT)].hashval, ARCH_CONVERT); + else + midhash = INT_GET(leaf1->ents[mid].hashval, ARCH_CONVERT); + isleft = args->hashval <= midhash; + } + /* + * If the old count is even then the new count is odd, so there's + * no preferred side for the new entry. + * Pick the left one. + */ + else + isleft = 1; + /* + * Calculate moved entry count. Positive means left-to-right, + * negative means right-to-left. Then move the entries. + */ + count = INT_GET(leaf1->hdr.count, ARCH_CONVERT) - mid + (isleft == 0); + if (count > 0) + xfs_dir2_leafn_moveents(args, blk1->bp, + INT_GET(leaf1->hdr.count, ARCH_CONVERT) - count, blk2->bp, 0, count); + else if (count < 0) + xfs_dir2_leafn_moveents(args, blk2->bp, 0, blk1->bp, + INT_GET(leaf1->hdr.count, ARCH_CONVERT), count); + ASSERT(INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT) == oldsum); + ASSERT(INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT) == oldstale); + /* + * Mark whether we're inserting into the old or new leaf. + */ + if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) < INT_GET(leaf2->hdr.count, ARCH_CONVERT)) + state->inleaf = swap; + else if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > INT_GET(leaf2->hdr.count, ARCH_CONVERT)) + state->inleaf = !swap; + else + state->inleaf = + swap ^ (args->hashval < INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT)); + /* + * Adjust the expected index for insertion. + */ + if (!state->inleaf) + blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT); +} + +/* + * Remove an entry from a node directory. + * This removes the leaf entry and the data entry, + * and updates the free block if necessary. + */ +static int /* error */ +xfs_dir2_leafn_remove( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *bp, /* leaf buffer */ + int index, /* leaf entry index */ + xfs_da_state_blk_t *dblk, /* data block */ + int *rval) /* resulting block needs join */ +{ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_db_t db; /* data block number */ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + int longest; /* longest data free entry */ + int off; /* data block entry offset */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log data header */ + int needscan; /* need to rescan data frees */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_sb("leafn_remove", args, index, bp); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + /* + * Point to the entry we're removing. + */ + lep = &leaf->ents[index]; + /* + * Extract the data block and offset from the entry. + */ + db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + ASSERT(dblk->blkno == db); + off = XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT)); + ASSERT(dblk->index == off); + /* + * Kill the leaf entry by marking it stale. + * Log the leaf block changes. + */ + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); + xfs_dir2_leaf_log_header(tp, bp); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + xfs_dir2_leaf_log_ents(tp, bp, index, index); + /* + * Make the data entry free. Keep track of the longest freespace + * in the data block in case it changes. + */ + dbp = dblk->bp; + data = dbp->data; + dep = (xfs_dir2_data_entry_t *)((char *)data + off); + longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + needlog = needscan = 0; + xfs_dir2_data_make_free(tp, dbp, off, + XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); + /* + * Rescan the data block freespaces for bestfree. + * Log the data block header if needed. + */ + if (needscan) + xfs_dir2_data_freescan(mp, data, &needlog, NULL); + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + xfs_dir2_data_check(dp, dbp); + /* + * If the longest data block freespace changes, need to update + * the corresponding freeblock entry. + */ + if (longest < INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + int error; /* error return value */ + xfs_dabuf_t *fbp; /* freeblock buffer */ + xfs_dir2_db_t fdb; /* freeblock block number */ + int findex; /* index in freeblock entries */ + xfs_dir2_free_t *free; /* freeblock structure */ + int logfree; /* need to log free entry */ + + /* + * Convert the data block number to a free block, + * read in the free block. + */ + fdb = XFS_DIR2_DB_TO_FDB(mp, db); + if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb), + -1, &fbp, XFS_DATA_FORK))) { + return error; + } + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) == + XFS_DIR2_MAX_FREE_BESTS(mp) * + (fdb - XFS_DIR2_FREE_FIRSTDB(mp))); + /* + * Calculate which entry we need to fix. + */ + findex = XFS_DIR2_DB_TO_FDINDEX(mp, db); + longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + /* + * If the data block is now empty we can get rid of it + * (usually). + */ + if (longest == mp->m_dirblksize - (uint)sizeof(data->hdr)) { + /* + * Try to punch out the data block. + */ + error = xfs_dir2_shrink_inode(args, db, dbp); + if (error == 0) { + dblk->bp = NULL; + data = NULL; + } + /* + * We can get ENOSPC if there's no space reservation. + * In this case just drop the buffer and some one else + * will eventually get rid of the empty block. + */ + else if (error == ENOSPC && args->total == 0) + xfs_da_buf_done(dbp); + else + return error; + } + /* + * If we got rid of the data block, we can eliminate that entry + * in the free block. + */ + if (data == NULL) { + /* + * One less used entry in the free table. + */ + INT_MOD(free->hdr.nused, ARCH_CONVERT, -1); + xfs_dir2_free_log_header(tp, fbp); + /* + * If this was the last entry in the table, we can + * trim the table size back. There might be other + * entries at the end referring to non-existent + * data blocks, get those too. + */ + if (findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1) { + int i; /* free entry index */ + + for (i = findex - 1; + i >= 0 && INT_GET(free->bests[i], ARCH_CONVERT) == NULLDATAOFF; + i--) + continue; + INT_SET(free->hdr.nvalid, ARCH_CONVERT, i + 1); + logfree = 0; + } + /* + * Not the last entry, just punch it out. + */ + else { + INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); + logfree = 1; + } + /* + * If there are no useful entries left in the block, + * get rid of the block if we can. + */ + if (INT_ISZERO(free->hdr.nused, ARCH_CONVERT)) { + error = xfs_dir2_shrink_inode(args, fdb, fbp); + if (error == 0) { + fbp = NULL; + logfree = 0; + } else if (error != ENOSPC || args->total != 0) + return error; + /* + * It's possible to get ENOSPC if there is no + * space reservation. In this case some one + * else will eventually get rid of this block. + */ + } + } + /* + * Data block is not empty, just set the free entry to + * the new value. + */ + else { + INT_SET(free->bests[findex], ARCH_CONVERT, longest); + logfree = 1; + } + /* + * Log the free entry that changed, unless we got rid of it. + */ + if (logfree) + xfs_dir2_free_log_bests(tp, fbp, findex, findex); + /* + * Drop the buffer if we still have it. + */ + if (fbp) + xfs_da_buf_done(fbp); + } + xfs_dir2_leafn_check(dp, bp); + /* + * Return indication of whether this leaf block is emtpy enough + * to justify trying to join it with a neighbor. + */ + *rval = + ((uint)sizeof(leaf->hdr) + + (uint)sizeof(leaf->ents[0]) * + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT))) < + mp->m_dir_magicpct; + return 0; +} + +/* + * Split the leaf entries in the old block into old and new blocks. + */ +int /* error */ +xfs_dir2_leafn_split( + xfs_da_state_t *state, /* btree cursor */ + xfs_da_state_blk_t *oldblk, /* original block */ + xfs_da_state_blk_t *newblk) /* newly created block */ +{ + xfs_da_args_t *args; /* operation arguments */ + xfs_dablk_t blkno; /* new leaf block number */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + + /* + * Allocate space for a new leaf node. + */ + args = state->args; + mp = args->dp->i_mount; + ASSERT(args != NULL); + ASSERT(oldblk->magic == XFS_DIR2_LEAFN_MAGIC); + error = xfs_da_grow_inode(args, &blkno); + if (error) { + return error; + } + /* + * Initialize the new leaf block. + */ + error = xfs_dir2_leaf_init(args, XFS_DIR2_DA_TO_DB(mp, blkno), + &newblk->bp, XFS_DIR2_LEAFN_MAGIC); + if (error) { + return error; + } + newblk->blkno = blkno; + newblk->magic = XFS_DIR2_LEAFN_MAGIC; + /* + * Rebalance the entries across the two leaves, link the new + * block into the leaves. + */ + xfs_dir2_leafn_rebalance(state, oldblk, newblk); + error = xfs_da_blk_link(state, oldblk, newblk); + if (error) { + return error; + } + /* + * Insert the new entry in the correct block. + */ + if (state->inleaf) + error = xfs_dir2_leafn_add(oldblk->bp, args, oldblk->index); + else + error = xfs_dir2_leafn_add(newblk->bp, args, newblk->index); + /* + * Update last hashval in each block since we added the name. + */ + oldblk->hashval = xfs_dir2_leafn_lasthash(oldblk->bp, NULL); + newblk->hashval = xfs_dir2_leafn_lasthash(newblk->bp, NULL); + xfs_dir2_leafn_check(args->dp, oldblk->bp); + xfs_dir2_leafn_check(args->dp, newblk->bp); + return error; +} + +/* + * Check a leaf block and its neighbors to see if the block should be + * collapsed into one or the other neighbor. Always keep the block + * with the smaller block number. + * If the current block is over 50% full, don't try to join it, return 0. + * If the block is empty, fill in the state structure and return 2. + * If it can be collapsed, fill in the state structure and return 1. + * If nothing can be done, return 0. + */ +int /* error */ +xfs_dir2_leafn_toosmall( + xfs_da_state_t *state, /* btree cursor */ + int *action) /* resulting action to take */ +{ + xfs_da_state_blk_t *blk; /* leaf block */ + xfs_dablk_t blkno; /* leaf block number */ + xfs_dabuf_t *bp; /* leaf buffer */ + int bytes; /* bytes in use */ + int count; /* leaf live entry count */ + int error; /* error return value */ + int forward; /* sibling block direction */ + int i; /* sibling counter */ + xfs_da_blkinfo_t *info; /* leaf block header */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int rval; /* result from path_shift */ + + /* + * Check for the degenerate case of the block being over 50% full. + * If so, it's not worth even looking to see if we might be able + * to coalesce with a sibling. + */ + blk = &state->path.blk[state->path.active - 1]; + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + leaf = (xfs_dir2_leaf_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]); + if (bytes > (state->blocksize >> 1)) { + /* + * Blk over 50%, don't try to join. + */ + *action = 0; + return 0; + } + /* + * Check for the degenerate case of the block being empty. + * If the block is empty, we'll simply delete it, no need to + * coalesce it with a sibling block. We choose (arbitrarily) + * to merge with the forward block unless it is NULL. + */ + if (count == 0) { + /* + * Make altpath point to the block we want to keep and + * path point to the block we want to drop (this one). + */ + forward = !INT_ISZERO(info->forw, ARCH_CONVERT); + memcpy(&state->altpath, &state->path, sizeof(state->path)); + error = xfs_da_path_shift(state, &state->altpath, forward, 0, + &rval); + if (error) + return error; + *action = rval ? 2 : 0; + return 0; + } + /* + * Examine each sibling block to see if we can coalesce with + * at least 25% free space to spare. We need to figure out + * whether to merge with the forward or the backward block. + * We prefer coalescing with the lower numbered sibling so as + * to shrink a directory over time. + */ + forward = INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT); + for (i = 0, bp = NULL; i < 2; forward = !forward, i++) { + blkno = forward ?INT_GET( info->forw, ARCH_CONVERT) : INT_GET(info->back, ARCH_CONVERT); + if (blkno == 0) + continue; + /* + * Read the sibling leaf block. + */ + if ((error = + xfs_da_read_buf(state->args->trans, state->args->dp, blkno, + -1, &bp, XFS_DATA_FORK))) { + return error; + } + ASSERT(bp != NULL); + /* + * Count bytes in the two blocks combined. + */ + leaf = (xfs_dir2_leaf_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + bytes = state->blocksize - (state->blocksize >> 2); + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + count += INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + bytes -= count * (uint)sizeof(leaf->ents[0]); + /* + * Fits with at least 25% to spare. + */ + if (bytes >= 0) + break; + xfs_da_brelse(state->args->trans, bp); + } + /* + * Didn't like either block, give up. + */ + if (i >= 2) { + *action = 0; + return 0; + } + /* + * Done with the sibling leaf block here, drop the dabuf + * so path_shift can get it. + */ + xfs_da_buf_done(bp); + /* + * Make altpath point to the block we want to keep (the lower + * numbered block) and path point to the block we want to drop. + */ + memcpy(&state->altpath, &state->path, sizeof(state->path)); + if (blkno < blk->blkno) + error = xfs_da_path_shift(state, &state->altpath, forward, 0, + &rval); + else + error = xfs_da_path_shift(state, &state->path, forward, 0, + &rval); + if (error) { + return error; + } + *action = rval ? 0 : 1; + return 0; +} + +/* + * Move all the leaf entries from drop_blk to save_blk. + * This is done as part of a join operation. + */ +void +xfs_dir2_leafn_unbalance( + xfs_da_state_t *state, /* cursor */ + xfs_da_state_blk_t *drop_blk, /* dead block */ + xfs_da_state_blk_t *save_blk) /* surviving block */ +{ + xfs_da_args_t *args; /* operation arguments */ + xfs_dir2_leaf_t *drop_leaf; /* dead leaf structure */ + xfs_dir2_leaf_t *save_leaf; /* surviving leaf structure */ + + args = state->args; + ASSERT(drop_blk->magic == XFS_DIR2_LEAFN_MAGIC); + ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC); + drop_leaf = drop_blk->bp->data; + save_leaf = save_blk->bp->data; + ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + /* + * If there are any stale leaf entries, take this opportunity + * to purge them. + */ + if (INT_GET(drop_leaf->hdr.stale, ARCH_CONVERT)) + xfs_dir2_leaf_compact(args, drop_blk->bp); + if (INT_GET(save_leaf->hdr.stale, ARCH_CONVERT)) + xfs_dir2_leaf_compact(args, save_blk->bp); + /* + * Move the entries from drop to the appropriate end of save. + */ + drop_blk->hashval = INT_GET(drop_leaf->ents[INT_GET(drop_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp)) + xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, 0, + INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); + else + xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, + INT_GET(save_leaf->hdr.count, ARCH_CONVERT), INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); + save_blk->hashval = INT_GET(save_leaf->ents[INT_GET(save_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + xfs_dir2_leafn_check(args->dp, save_blk->bp); +} + +/* + * Top-level node form directory addname routine. + */ +int /* error */ +xfs_dir2_node_addname( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_da_state_blk_t *blk; /* leaf block for insert */ + int error; /* error return value */ + int rval; /* sub-return value */ + xfs_da_state_t *state; /* btree cursor */ + + xfs_dir2_trace_args("node_addname", args); + /* + * Allocate and initialize the state (btree cursor). + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_dirblksize; + state->node_ents = state->mp->m_dir_node_ents; + /* + * Look up the name. We're not supposed to find it, but + * this gives us the insertion point. + */ + error = xfs_da_node_lookup_int(state, &rval); + if (error) + rval = error; + if (rval != ENOENT) { + goto done; + } + /* + * Add the data entry to a data block. + * Extravalid is set to a freeblock found by lookup. + */ + rval = xfs_dir2_node_addname_int(args, + state->extravalid ? &state->extrablk : NULL); + if (rval) { + goto done; + } + blk = &state->path.blk[state->path.active - 1]; + ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); + /* + * Add the new leaf entry. + */ + rval = xfs_dir2_leafn_add(blk->bp, args, blk->index); + if (rval == 0) { + /* + * It worked, fix the hash values up the btree. + */ + if (!args->justcheck) + xfs_da_fixhashpath(state, &state->path); + } else { + /* + * It didn't work, we need to split the leaf block. + */ + if (args->total == 0) { + ASSERT(rval == ENOSPC); + goto done; + } + /* + * Split the leaf block and insert the new entry. + */ + rval = xfs_da_split(state); + } +done: + xfs_da_state_free(state); + return rval; +} + +/* + * Add the data entry for a node-format directory name addition. + * The leaf entry is added in xfs_dir2_leafn_add. + * We may enter with a freespace block that the lookup found. + */ +static int /* error */ +xfs_dir2_node_addname_int( + xfs_da_args_t *args, /* operation arguments */ + xfs_da_state_blk_t *fblk) /* optional freespace block */ +{ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_db_t dbno; /* data block number */ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data entry pointer */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* data unused entry pointer */ + int error; /* error return value */ + xfs_dir2_db_t fbno; /* freespace block number */ + xfs_dabuf_t *fbp; /* freespace buffer */ + int findex; /* freespace entry index */ + xfs_dir2_db_t foundbno=0; /* found freespace block no */ + int foundindex=0; /* found freespace entry idx */ + int foundhole; /* found hole in freespace */ + xfs_dir2_free_t *free=NULL; /* freespace block structure */ + xfs_dir2_db_t ifbno; /* initial freespace block no */ + xfs_dir2_db_t lastfbno=0; /* highest freespace block no */ + int length; /* length of the new entry */ + int logfree; /* need to log free entry */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log data header */ + int needscan; /* need to rescan data frees */ + int needfreesp; /* need to allocate freesp blk */ + xfs_dir2_data_off_t *tagp; /* data entry tag pointer */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + length = XFS_DIR2_DATA_ENTSIZE(args->namelen); + foundhole = 0; + /* + * If we came in with a freespace block that means that lookup + * found an entry with our hash value. This is the freespace + * block for that data entry. + */ + if (fblk) { + fbp = fblk->bp; + /* + * Remember initial freespace block number. + */ + ifbno = fblk->blkno; + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + findex = fblk->index; + /* + * This means the free entry showed that the data block had + * space for our entry, so we remembered it. + * Use that data block. + */ + if (findex >= 0) { + ASSERT(findex < INT_GET(free->hdr.nvalid, ARCH_CONVERT)); + ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF); + ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) >= length); + dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; + } + /* + * The data block looked at didn't have enough room. + * We'll start at the beginning of the freespace entries. + */ + else { + dbno = -1; + findex = 0; + } + } + /* + * Didn't come in with a freespace block, so don't have a data block. + */ + else { + ifbno = dbno = -1; + fbp = NULL; + findex = 0; + } + /* + * If we don't have a data block yet, we're going to scan the + * freespace blocks looking for one. Figure out what the + * highest freespace block number is. + */ + if (dbno == -1) { + xfs_fileoff_t fo; /* freespace block number */ + + if ((error = xfs_bmap_last_offset(tp, dp, &fo, XFS_DATA_FORK))) + return error; + lastfbno = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo); + fbno = ifbno; + foundindex = -1; + } + /* + * While we haven't identified a data block, search the freeblock + * data for a good data block. If we find a null freeblock entry, + * indicating a hole in the data blocks, remember that. + */ + while (dbno == -1) { + /* + * If we don't have a freeblock in hand, get the next one. + */ + if (fbp == NULL) { + /* + * Happens the first time through unless lookup gave + * us a freespace block to start with. + */ + if (++fbno == 0) + fbno = XFS_DIR2_FREE_FIRSTDB(mp); + /* + * If it's ifbno we already looked at it. + */ + if (fbno == ifbno) + fbno++; + /* + * If it's off the end we're done. + */ + if (fbno >= lastfbno) + break; + /* + * Read the block. There can be holes in the + * freespace blocks, so this might not succeed. + * This should be really rare, so there's no reason + * to avoid it. + */ + if ((error = xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp, + XFS_DATA_FORK))) { + return error; + } + if (unlikely(fbp == NULL)) { + foundhole = 1; + continue; + } + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + findex = 0; + } + /* + * Look at the current free entry. Is it good enough? + */ + if (INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF && + INT_GET(free->bests[findex], ARCH_CONVERT) >= length) + dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; + else { + /* + * If we haven't found an empty entry yet, and this + * one is empty, remember this slot. + */ + if (foundindex == -1 && + INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF && !foundhole) { + foundindex = findex; + foundbno = fbno; + } + /* + * Are we done with the freeblock? + */ + if (++findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { + /* + * If there is space left in this freeblock, + * and we don't have an empty entry yet, + * remember this slot. + */ + if (foundindex == -1 && + findex < XFS_DIR2_MAX_FREE_BESTS(mp) && + !foundhole) { + foundindex = findex; + foundbno = fbno; + } + /* + * Drop the block. + */ + xfs_da_brelse(tp, fbp); + fbp = NULL; + if (fblk && fblk->bp) + fblk->bp = NULL; + } + } + } + /* + * If we don't have a data block, we need to allocate one and make + * the freespace entries refer to it. + */ + if (unlikely(dbno == -1)) { + /* + * Not allowed to allocate, return failure. + */ + if (args->justcheck || args->total == 0) { + /* + * Drop the freespace buffer unless it came from our + * caller. + */ + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + return XFS_ERROR(ENOSPC); + } + /* + * Allocate and initialize the new data block. + */ + if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, + &dbno)) || + (error = xfs_dir2_data_init(args, dbno, &dbp))) { + /* + * Drop the freespace buffer unless it came from our + * caller. + */ + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + return error; + } + /* + * If the freespace entry for this data block is not in the + * freespace block we have in hand, drop the one we have + * and get the right one. + */ + needfreesp = 0; + if (XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno || fbp == NULL) { + if (fbp) + xfs_da_brelse(tp, fbp); + if (fblk && fblk->bp) + fblk->bp = NULL; + fbno = XFS_DIR2_DB_TO_FDB(mp, dbno); + if ((error = xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp, + XFS_DATA_FORK))) { + xfs_da_buf_done(dbp); + return error; + } + + /* + * If there wasn't a freespace block, the read will + * return a NULL fbp. Allocate one later. + */ + + if(unlikely( fbp == NULL )) { + needfreesp = 1; + } else { + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + } + } + + /* + * If we don't have a data block, and there's no free slot in a + * freeblock, we need to add a new freeblock. + */ + if (unlikely(needfreesp || foundindex == -1)) { + /* + * Add the new freeblock. + */ + if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_FREE_SPACE, + &fbno))) { + return error; + } + + if (XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno) { + cmn_err(CE_ALERT, + "xfs_dir2_node_addname_int: needed block %lld, got %lld\n", + (long long)XFS_DIR2_DB_TO_FDB(mp, dbno), + (long long)fbno); + XFS_ERROR_REPORT("xfs_dir2_node_addname_int", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + /* + * Get a buffer for the new block. + */ + if ((error = xfs_da_get_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, fbno), + -1, &fbp, XFS_DATA_FORK))) { + return error; + } + ASSERT(fbp != NULL); + + /* + * Initialize the new block to be empty, and remember + * its first slot as our empty slot. + */ + free = fbp->data; + INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); + INT_SET(free->hdr.firstdb, ARCH_CONVERT, + (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * + XFS_DIR2_MAX_FREE_BESTS(mp)); + INT_ZERO(free->hdr.nvalid, ARCH_CONVERT); + INT_ZERO(free->hdr.nused, ARCH_CONVERT); + foundindex = 0; + foundbno = fbno; + } + + /* + * Set the freespace block index from the data block number. + */ + findex = XFS_DIR2_DB_TO_FDINDEX(mp, dbno); + /* + * If it's after the end of the current entries in the + * freespace block, extend that table. + */ + if (findex >= INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { + ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp)); + INT_SET(free->hdr.nvalid, ARCH_CONVERT, findex + 1); + /* + * Tag new entry so nused will go up. + */ + INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); + } + /* + * If this entry was for an empty data block + * (this should always be true) then update the header. + */ + if (INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF) { + INT_MOD(free->hdr.nused, ARCH_CONVERT, +1); + xfs_dir2_free_log_header(tp, fbp); + } + /* + * Update the real value in the table. + * We haven't allocated the data entry yet so this will + * change again. + */ + data = dbp->data; + INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); + logfree = 1; + } + /* + * We had a data block so we don't have to make a new one. + */ + else { + /* + * If just checking, we succeeded. + */ + if (args->justcheck) { + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + return 0; + } + /* + * Read the data block in. + */ + if (unlikely( + error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, dbno), + -1, &dbp, XFS_DATA_FORK))) { + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + return error; + } + data = dbp->data; + logfree = 0; + } + ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) >= length); + /* + * Point to the existing unused space. + */ + dup = (xfs_dir2_data_unused_t *) + ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); + needscan = needlog = 0; + /* + * Mark the first part of the unused space, inuse for us. + */ + xfs_dir2_data_use_free(tp, dbp, dup, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length, + &needlog, &needscan); + /* + * Fill in the new entry and log it. + */ + dep = (xfs_dir2_data_entry_t *)dup; + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + dep->namelen = args->namelen; + memcpy(dep->name, args->name, dep->namelen); + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); + xfs_dir2_data_log_entry(tp, dbp, dep); + /* + * Rescan the block for bestfree if needed. + */ + if (needscan) + xfs_dir2_data_freescan(mp, data, &needlog, NULL); + /* + * Log the data block header if needed. + */ + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + /* + * If the freespace entry is now wrong, update it. + */ + if (INT_GET(free->bests[findex], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); + logfree = 1; + } + /* + * Log the freespace entry if needed. + */ + if (logfree) + xfs_dir2_free_log_bests(tp, fbp, findex, findex); + /* + * If the caller didn't hand us the freespace block, drop it. + */ + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + /* + * Return the data block and offset in args, then drop the data block. + */ + args->blkno = (xfs_dablk_t)dbno; + args->index = INT_GET(*tagp, ARCH_CONVERT); + xfs_da_buf_done(dbp); + return 0; +} + +/* + * Lookup an entry in a node-format directory. + * All the real work happens in xfs_da_node_lookup_int. + * The only real output is the inode number of the entry. + */ +int /* error */ +xfs_dir2_node_lookup( + xfs_da_args_t *args) /* operation arguments */ +{ + int error; /* error return value */ + int i; /* btree level */ + int rval; /* operation return value */ + xfs_da_state_t *state; /* btree cursor */ + + xfs_dir2_trace_args("node_lookup", args); + /* + * Allocate and initialize the btree cursor. + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_dirblksize; + state->node_ents = state->mp->m_dir_node_ents; + /* + * Fill in the path to the entry in the cursor. + */ + error = xfs_da_node_lookup_int(state, &rval); + if (error) + rval = error; + /* + * Release the btree blocks and leaf block. + */ + for (i = 0; i < state->path.active; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + /* + * Release the data block if we have it. + */ + if (state->extravalid && state->extrablk.bp) { + xfs_da_brelse(args->trans, state->extrablk.bp); + state->extrablk.bp = NULL; + } + xfs_da_state_free(state); + return rval; +} + +/* + * Remove an entry from a node-format directory. + */ +int /* error */ +xfs_dir2_node_removename( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_da_state_blk_t *blk; /* leaf block */ + int error; /* error return value */ + int rval; /* operation return value */ + xfs_da_state_t *state; /* btree cursor */ + + xfs_dir2_trace_args("node_removename", args); + /* + * Allocate and initialize the btree cursor. + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_dirblksize; + state->node_ents = state->mp->m_dir_node_ents; + /* + * Look up the entry we're deleting, set up the cursor. + */ + error = xfs_da_node_lookup_int(state, &rval); + if (error) { + rval = error; + } + /* + * Didn't find it, upper layer screwed up. + */ + if (rval != EEXIST) { + xfs_da_state_free(state); + return rval; + } + blk = &state->path.blk[state->path.active - 1]; + ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); + ASSERT(state->extravalid); + /* + * Remove the leaf and data entries. + * Extrablk refers to the data block. + */ + error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, + &state->extrablk, &rval); + if (error) { + return error; + } + /* + * Fix the hash values up the btree. + */ + xfs_da_fixhashpath(state, &state->path); + /* + * If we need to join leaf blocks, do it. + */ + if (rval && state->path.active > 1) + error = xfs_da_join(state); + /* + * If no errors so far, try conversion to leaf format. + */ + if (!error) + error = xfs_dir2_node_to_leaf(state); + xfs_da_state_free(state); + return error; +} + +/* + * Replace an entry's inode number in a node-format directory. + */ +int /* error */ +xfs_dir2_node_replace( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_da_state_blk_t *blk; /* leaf block */ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_entry_t *dep; /* data entry changed */ + int error; /* error return value */ + int i; /* btree level */ + xfs_ino_t inum; /* new inode number */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry being changed */ + int rval; /* internal return value */ + xfs_da_state_t *state; /* btree cursor */ + + xfs_dir2_trace_args("node_replace", args); + /* + * Allocate and initialize the btree cursor. + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_dirblksize; + state->node_ents = state->mp->m_dir_node_ents; + inum = args->inumber; + /* + * Lookup the entry to change in the btree. + */ + error = xfs_da_node_lookup_int(state, &rval); + if (error) { + rval = error; + } + /* + * It should be found, since the vnodeops layer has looked it up + * and locked it. But paranoia is good. + */ + if (rval == EEXIST) { + /* + * Find the leaf entry. + */ + blk = &state->path.blk[state->path.active - 1]; + ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); + leaf = blk->bp->data; + lep = &leaf->ents[blk->index]; + ASSERT(state->extravalid); + /* + * Point to the data entry. + */ + data = state->extrablk.bp->data; + ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + dep = (xfs_dir2_data_entry_t *) + ((char *)data + + XFS_DIR2_DATAPTR_TO_OFF(state->mp, INT_GET(lep->address, ARCH_CONVERT))); + ASSERT(inum != INT_GET(dep->inumber, ARCH_CONVERT)); + /* + * Fill in the new inode number and log the entry. + */ + INT_SET(dep->inumber, ARCH_CONVERT, inum); + xfs_dir2_data_log_entry(args->trans, state->extrablk.bp, dep); + rval = 0; + } + /* + * Didn't find it, and we're holding a data block. Drop it. + */ + else if (state->extravalid) { + xfs_da_brelse(args->trans, state->extrablk.bp); + state->extrablk.bp = NULL; + } + /* + * Release all the buffers in the cursor. + */ + for (i = 0; i < state->path.active; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + xfs_da_state_free(state); + return rval; +} + +/* + * Trim off a trailing empty freespace block. + * Return (in rvalp) 1 if we did it, 0 if not. + */ +int /* error */ +xfs_dir2_node_trim_free( + xfs_da_args_t *args, /* operation arguments */ + xfs_fileoff_t fo, /* free block number */ + int *rvalp) /* out: did something */ +{ + xfs_dabuf_t *bp; /* freespace buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dir2_free_t *free; /* freespace structure */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Read the freespace block. + */ + if (unlikely(error = xfs_da_read_buf(tp, dp, (xfs_dablk_t)fo, -2, &bp, + XFS_DATA_FORK))) { + return error; + } + + /* + * There can be holes in freespace. If fo is a hole, there's + * nothing to do. + */ + if (bp == NULL) { + return 0; + } + free = bp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + /* + * If there are used entries, there's nothing to do. + */ + if (INT_GET(free->hdr.nused, ARCH_CONVERT) > 0) { + xfs_da_brelse(tp, bp); + *rvalp = 0; + return 0; + } + /* + * Blow the block away. + */ + if ((error = + xfs_dir2_shrink_inode(args, XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo), + bp))) { + /* + * Can't fail with ENOSPC since that only happens with no + * space reservation, when breaking up an extent into two + * pieces. This is the last block of an extent. + */ + ASSERT(error != ENOSPC); + xfs_da_brelse(tp, bp); + return error; + } + /* + * Return that we succeeded. + */ + *rvalp = 1; + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_node.h linux.21rc5-ac2/fs/xfs/xfs_dir2_node.h --- linux.21rc5/fs/xfs/xfs_dir2_node.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_node.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_NODE_H__ +#define __XFS_DIR2_NODE_H__ + +/* + * Directory version 2, btree node format structures + */ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_da_state; +struct xfs_da_state_blk; +struct xfs_inode; +struct xfs_trans; + +/* + * Constants. + */ + +/* + * Offset of the freespace index. + */ +#define XFS_DIR2_FREE_SPACE 2 +#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE) +#define XFS_DIR2_FREE_FIRSTDB(mp) \ + XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_FREE_OFFSET) + +#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */ + +/* + * Structures. + */ +typedef struct xfs_dir2_free_hdr { + __uint32_t magic; /* XFS_DIR2_FREE_MAGIC */ + __int32_t firstdb; /* db of first entry */ + __int32_t nvalid; /* count of valid entries */ + __int32_t nused; /* count of used entries */ +} xfs_dir2_free_hdr_t; + +typedef struct xfs_dir2_free { + xfs_dir2_free_hdr_t hdr; /* block header */ + xfs_dir2_data_off_t bests[1]; /* best free counts */ + /* unused entries are -1 */ +} xfs_dir2_free_t; +#define XFS_DIR2_MAX_FREE_BESTS(mp) \ + (((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_free_hdr_t)) / \ + (uint)sizeof(xfs_dir2_data_off_t)) + +/* + * Macros. + */ + +/* + * Convert data space db to the corresponding free db. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_TO_FDB) +xfs_dir2_db_t +xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db); +#define XFS_DIR2_DB_TO_FDB(mp,db) xfs_dir2_db_to_fdb(mp, db) +#else +#define XFS_DIR2_DB_TO_FDB(mp,db) \ + (XFS_DIR2_FREE_FIRSTDB(mp) + (db) / XFS_DIR2_MAX_FREE_BESTS(mp)) +#endif + +/* + * Convert data space db to the corresponding index in a free db. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_TO_FDINDEX) +int +xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db); +#define XFS_DIR2_DB_TO_FDINDEX(mp,db) xfs_dir2_db_to_fdindex(mp, db) +#else +#define XFS_DIR2_DB_TO_FDINDEX(mp,db) ((db) % XFS_DIR2_MAX_FREE_BESTS(mp)) +#endif + +/* + * Functions. + */ + +extern void + xfs_dir2_free_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp, + int first, int last); + +extern int + xfs_dir2_leaf_to_node(struct xfs_da_args *args, struct xfs_dabuf *lbp); + +extern xfs_dahash_t + xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count); + +extern int + xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp, + struct xfs_da_args *args, int *indexp, + struct xfs_da_state *state); + +extern int + xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp, + struct xfs_dabuf *leaf2_bp); + +extern int + xfs_dir2_leafn_split(struct xfs_da_state *state, + struct xfs_da_state_blk *oldblk, + struct xfs_da_state_blk *newblk); + +extern int + xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action); + +extern void + xfs_dir2_leafn_unbalance(struct xfs_da_state *state, + struct xfs_da_state_blk *drop_blk, + struct xfs_da_state_blk *save_blk); + +extern int + xfs_dir2_node_addname(struct xfs_da_args *args); + +extern int + xfs_dir2_node_lookup(struct xfs_da_args *args); + +extern int + xfs_dir2_node_removename(struct xfs_da_args *args); + +extern int + xfs_dir2_node_replace(struct xfs_da_args *args); + +extern int + xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo, + int *rvalp); + +#endif /* __XFS_DIR2_NODE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_sf.c linux.21rc5-ac2/fs/xfs/xfs_dir2_sf.c --- linux.21rc5/fs/xfs/xfs_dir2_sf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_sf.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1328 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_sf.c + * Shortform directory implementation for v2 directories. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_error.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_trace.h" + +/* + * Prototypes for internal functions. + */ +static void xfs_dir2_sf_addname_easy(xfs_da_args_t *args, + xfs_dir2_sf_entry_t *sfep, + xfs_dir2_data_aoff_t offset, + int new_isize); +static void xfs_dir2_sf_addname_hard(xfs_da_args_t *args, int objchange, + int new_isize); +static int xfs_dir2_sf_addname_pick(xfs_da_args_t *args, int objchange, + xfs_dir2_sf_entry_t **sfepp, + xfs_dir2_data_aoff_t *offsetp); +#ifdef DEBUG +static void xfs_dir2_sf_check(xfs_da_args_t *args); +#else +#define xfs_dir2_sf_check(args) +#endif /* DEBUG */ +#if XFS_BIG_FILESYSTEMS +static void xfs_dir2_sf_toino4(xfs_da_args_t *args); +static void xfs_dir2_sf_toino8(xfs_da_args_t *args); +#endif /* XFS_BIG_FILESYSTEMS */ + +/* + * Given a block directory (dp/block), calculate its size as a shortform (sf) + * directory and a header for the sf directory, if it will fit it the + * space currently present in the inode. If it won't fit, the output + * size is too big (but not accurate). + */ +int /* size for sf form */ +xfs_dir2_block_sfsize( + xfs_inode_t *dp, /* incore inode pointer */ + xfs_dir2_block_t *block, /* block directory data */ + xfs_dir2_sf_hdr_t *sfhp) /* output: header for sf form */ +{ + xfs_dir2_dataptr_t addr; /* data entry address */ + xfs_dir2_leaf_entry_t *blp; /* leaf area of the block */ + xfs_dir2_block_tail_t *btp; /* tail area of the block */ + int count; /* shortform entry count */ + xfs_dir2_data_entry_t *dep; /* data entry in the block */ + int i; /* block entry index */ + int i8count; /* count of big-inode entries */ + int isdot; /* entry is "." */ + int isdotdot; /* entry is ".." */ + xfs_mount_t *mp; /* mount structure pointer */ + int namelen; /* total name bytes */ + xfs_ino_t parent; /* parent inode number */ + int size=0; /* total computed size */ + + mp = dp->i_mount; + + count = i8count = namelen = 0; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + + /* + * Iterate over the block's data entries by using the leaf pointers. + */ + for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { + if ((addr = INT_GET(blp[i].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Calculate the pointer to the entry at hand. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr)); + /* + * Detect . and .., so we can special-case them. + * . is not included in sf directories. + * .. is included by just the parent inode number. + */ + isdot = dep->namelen == 1 && dep->name[0] == '.'; + isdotdot = + dep->namelen == 2 && + dep->name[0] == '.' && dep->name[1] == '.'; +#if XFS_BIG_FILESYSTEMS + if (!isdot) + i8count += INT_GET(dep->inumber, ARCH_CONVERT) > XFS_DIR2_MAX_SHORT_INUM; +#endif + if (!isdot && !isdotdot) { + count++; + namelen += dep->namelen; + } else if (isdotdot) + parent = INT_GET(dep->inumber, ARCH_CONVERT); + /* + * Calculate the new size, see if we should give up yet. + */ + size = XFS_DIR2_SF_HDR_SIZE(i8count) + /* header */ + count + /* namelen */ + count * (uint)sizeof(xfs_dir2_sf_off_t) + /* offset */ + namelen + /* name */ + (i8count ? /* inumber */ + (uint)sizeof(xfs_dir2_ino8_t) * count : + (uint)sizeof(xfs_dir2_ino4_t) * count); + if (size > XFS_IFORK_DSIZE(dp)) + return size; /* size value is a failure */ + } + /* + * Create the output header, if it worked. + */ + sfhp->count = count; + sfhp->i8count = i8count; + XFS_DIR2_SF_PUT_INUMBER_ARCH((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent, ARCH_CONVERT); + return size; +} + +/* + * Convert a block format directory to shortform. + * Caller has already checked that it will fit, and built us a header. + */ +int /* error */ +xfs_dir2_block_to_sf( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *bp, /* block buffer */ + int size, /* shortform directory size */ + xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */ +{ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_block_tail_t *btp; /* block tail pointer */ + xfs_dir2_data_entry_t *dep; /* data entry pointer */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* unused data pointer */ + char *endptr; /* end of data entries */ + int error; /* error return value */ + int logflags; /* inode logging flags */ + xfs_mount_t *mp; /* filesystem mount point */ + char *ptr; /* current data pointer */ + xfs_dir2_sf_entry_t *sfep; /* shortform entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_ino_t temp; + + xfs_dir2_trace_args_sb("block_to_sf", args, size, bp); + dp = args->dp; + mp = dp->i_mount; + + /* + * Make a copy of the block data, so we can shrink the inode + * and add local data. + */ + block = kmem_alloc(mp->m_dirblksize, KM_SLEEP); + memcpy(block, bp->data, mp->m_dirblksize); + logflags = XFS_ILOG_CORE; + if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) { + ASSERT(error != ENOSPC); + goto out; + } + /* + * The buffer is now unconditionally gone, whether + * xfs_dir2_shrink_inode worked or not. + * + * Convert the inode to local format. + */ + dp->i_df.if_flags &= ~XFS_IFEXTENTS; + dp->i_df.if_flags |= XFS_IFINLINE; + dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; + ASSERT(dp->i_df.if_bytes == 0); + xfs_idata_realloc(dp, size, XFS_DATA_FORK); + logflags |= XFS_ILOG_DDATA; + /* + * Copy the header into the newly allocate local space. + */ + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + memcpy(sfp, sfhp, XFS_DIR2_SF_HDR_SIZE(sfhp->i8count)); + dp->i_d.di_size = size; + /* + * Set up to loop over the block's entries. + */ + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + ptr = (char *)block->u; + endptr = (char *)XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + /* + * Loop over the active and unused entries. + * Stop when we reach the leaf/tail portion of the block. + */ + while (ptr < endptr) { + /* + * If it's unused, just skip over it. + */ + dup = (xfs_dir2_data_unused_t *)ptr; + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ptr += INT_GET(dup->length, ARCH_CONVERT); + continue; + } + dep = (xfs_dir2_data_entry_t *)ptr; + /* + * Skip . + */ + if (dep->namelen == 1 && dep->name[0] == '.') + ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) == dp->i_ino); + /* + * Skip .., but make sure the inode number is right. + */ + else if (dep->namelen == 2 && + dep->name[0] == '.' && dep->name[1] == '.') + ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) == + XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT)); + /* + * Normal entry, copy it into shortform. + */ + else { + sfep->namelen = dep->namelen; + XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep, + (xfs_dir2_data_aoff_t) + ((char *)dep - (char *)block), ARCH_CONVERT); + memcpy(sfep->name, dep->name, dep->namelen); + temp=INT_GET(dep->inumber, ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &temp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); + } + ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen); + } + ASSERT((char *)sfep - (char *)sfp == size); + xfs_dir2_sf_check(args); +out: + xfs_trans_log_inode(args->trans, dp, logflags); + kmem_free(block, mp->m_dirblksize); + return error; +} + +/* + * Add a name to a shortform directory. + * There are two algorithms, "easy" and "hard" which we decide on + * before changing anything. + * Convert to block form if necessary, if the new entry won't fit. + */ +int /* error */ +xfs_dir2_sf_addname( + xfs_da_args_t *args) /* operation arguments */ +{ + int add_entsize; /* size of the new entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + int incr_isize; /* total change in size */ + int new_isize; /* di_size after adding name */ + int objchange; /* changing to 8-byte inodes */ + xfs_dir2_data_aoff_t offset; /* offset for new entry */ + int old_isize; /* di_size before adding name */ + int pick; /* which algorithm to use */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_entry_t *sfep; /* shortform entry */ + + xfs_dir2_trace_args("sf_addname", args); + ASSERT(xfs_dir2_sf_lookup(args) == ENOENT); + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Make sure the shortform value has some of its header. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + /* + * Compute entry (and change in) size. + */ + add_entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen); + incr_isize = add_entsize; +#if XFS_BIG_FILESYSTEMS + /* + * Do we have to change to 8 byte inodes? + */ + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) { + /* + * Yes, adjust the entry size and the total size. + */ + add_entsize += + (uint)sizeof(xfs_dir2_ino8_t) - + (uint)sizeof(xfs_dir2_ino4_t); + incr_isize += + (sfp->hdr.count + 2) * + ((uint)sizeof(xfs_dir2_ino8_t) - + (uint)sizeof(xfs_dir2_ino4_t)); + objchange = 1; + } else + objchange = 0; +#else + objchange = 0; +#endif + old_isize = (int)dp->i_d.di_size; + new_isize = old_isize + incr_isize; + /* + * Won't fit as shortform any more (due to size), + * or the pick routine says it won't (due to offset values). + */ + if (new_isize > XFS_IFORK_DSIZE(dp) || + (pick = + xfs_dir2_sf_addname_pick(args, objchange, &sfep, &offset)) == 0) { + /* + * Just checking or no space reservation, it doesn't fit. + */ + if (args->justcheck || args->total == 0) + return XFS_ERROR(ENOSPC); + /* + * Convert to block form then add the name. + */ + error = xfs_dir2_sf_to_block(args); + if (error) + return error; + return xfs_dir2_block_addname(args); + } + /* + * Just checking, it fits. + */ + if (args->justcheck) + return 0; + /* + * Do it the easy way - just add it at the end. + */ + if (pick == 1) + xfs_dir2_sf_addname_easy(args, sfep, offset, new_isize); + /* + * Do it the hard way - look for a place to insert the new entry. + * Convert to 8 byte inode numbers first if necessary. + */ + else { + ASSERT(pick == 2); +#if XFS_BIG_FILESYSTEMS + if (objchange) + xfs_dir2_sf_toino8(args); +#endif + xfs_dir2_sf_addname_hard(args, objchange, new_isize); + } + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + return 0; +} + +/* + * Add the new entry the "easy" way. + * This is copying the old directory and adding the new entry at the end. + * Since it's sorted by "offset" we need room after the last offset + * that's already there, and then room to convert to a block directory. + * This is already checked by the pick routine. + */ +static void +xfs_dir2_sf_addname_easy( + xfs_da_args_t *args, /* operation arguments */ + xfs_dir2_sf_entry_t *sfep, /* pointer to new entry */ + xfs_dir2_data_aoff_t offset, /* offset to use for new ent */ + int new_isize) /* new directory size */ +{ + int byteoff; /* byte offset in sf dir */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + dp = args->dp; + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + byteoff = (int)((char *)sfep - (char *)sfp); + /* + * Grow the in-inode space. + */ + xfs_idata_realloc(dp, XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen), + XFS_DATA_FORK); + /* + * Need to set up again due to realloc of the inode data. + */ + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + byteoff); + /* + * Fill in the new entry. + */ + sfep->namelen = args->namelen; + XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep, offset, ARCH_CONVERT); + memcpy(sfep->name, args->name, sfep->namelen); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + /* + * Update the header and inode. + */ + sfp->hdr.count++; +#if XFS_BIG_FILESYSTEMS + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) + sfp->hdr.i8count++; +#endif + dp->i_d.di_size = new_isize; + xfs_dir2_sf_check(args); +} + +/* + * Add the new entry the "hard" way. + * The caller has already converted to 8 byte inode numbers if necessary, + * in which case we need to leave the i8count at 1. + * Find a hole that the new entry will fit into, and copy + * the first part of the entries, the new entry, and the last part of + * the entries. + */ +/* ARGSUSED */ +static void +xfs_dir2_sf_addname_hard( + xfs_da_args_t *args, /* operation arguments */ + int objchange, /* changing inode number size */ + int new_isize) /* new directory size */ +{ + int add_datasize; /* data size need for new ent */ + char *buf; /* buffer for old */ + xfs_inode_t *dp; /* incore directory inode */ + int eof; /* reached end of old dir */ + int nbytes; /* temp for byte copies */ + xfs_dir2_data_aoff_t new_offset; /* next offset value */ + xfs_dir2_data_aoff_t offset; /* current offset value */ + int old_isize; /* previous di_size */ + xfs_dir2_sf_entry_t *oldsfep; /* entry in original dir */ + xfs_dir2_sf_t *oldsfp; /* original shortform dir */ + xfs_dir2_sf_entry_t *sfep; /* entry in new dir */ + xfs_dir2_sf_t *sfp; /* new shortform dir */ + + /* + * Copy the old directory to the stack buffer. + */ + dp = args->dp; + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + old_isize = (int)dp->i_d.di_size; + buf = kmem_alloc(old_isize, KM_SLEEP); + oldsfp = (xfs_dir2_sf_t *)buf; + memcpy(oldsfp, sfp, old_isize); + /* + * Loop over the old directory finding the place we're going + * to insert the new entry. + * If it's going to end up at the end then oldsfep will point there. + */ + for (offset = XFS_DIR2_DATA_FIRST_OFFSET, + oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp), + add_datasize = XFS_DIR2_DATA_ENTSIZE(args->namelen), + eof = (char *)oldsfep == &buf[old_isize]; + !eof; + offset = new_offset + XFS_DIR2_DATA_ENTSIZE(oldsfep->namelen), + oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep), + eof = (char *)oldsfep == &buf[old_isize]) { + new_offset = XFS_DIR2_SF_GET_OFFSET_ARCH(oldsfep, ARCH_CONVERT); + if (offset + add_datasize <= new_offset) + break; + } + /* + * Get rid of the old directory, then allocate space for + * the new one. We do this so xfs_idata_realloc won't copy + * the data. + */ + xfs_idata_realloc(dp, -old_isize, XFS_DATA_FORK); + xfs_idata_realloc(dp, new_isize, XFS_DATA_FORK); + /* + * Reset the pointer since the buffer was reallocated. + */ + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + /* + * Copy the first part of the directory, including the header. + */ + nbytes = (int)((char *)oldsfep - (char *)oldsfp); + memcpy(sfp, oldsfp, nbytes); + sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + nbytes); + /* + * Fill in the new entry, and update the header counts. + */ + sfep->namelen = args->namelen; + XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep, offset, ARCH_CONVERT); + memcpy(sfep->name, args->name, sfep->namelen); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + sfp->hdr.count++; +#if XFS_BIG_FILESYSTEMS + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange) + sfp->hdr.i8count++; +#endif + /* + * If there's more left to copy, do that. + */ + if (!eof) { + sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); + memcpy(sfep, oldsfep, old_isize - nbytes); + } + kmem_free(buf, old_isize); + dp->i_d.di_size = new_isize; + xfs_dir2_sf_check(args); +} + +/* + * Decide if the new entry will fit at all. + * If it will fit, pick between adding the new entry to the end (easy) + * or somewhere else (hard). + * Return 0 (won't fit), 1 (easy), 2 (hard). + */ +/*ARGSUSED*/ +static int /* pick result */ +xfs_dir2_sf_addname_pick( + xfs_da_args_t *args, /* operation arguments */ + int objchange, /* inode # size changes */ + xfs_dir2_sf_entry_t **sfepp, /* out(1): new entry ptr */ + xfs_dir2_data_aoff_t *offsetp) /* out(1): new offset */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int holefit; /* found hole it will fit in */ + int i; /* entry number */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_data_aoff_t offset; /* data block offset */ + xfs_dir2_sf_entry_t *sfep; /* shortform entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + int size; /* entry's data size */ + int used; /* data bytes used */ + + dp = args->dp; + mp = dp->i_mount; + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + size = XFS_DIR2_DATA_ENTSIZE(args->namelen); + offset = XFS_DIR2_DATA_FIRST_OFFSET; + sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + holefit = 0; + /* + * Loop over sf entries. + * Keep track of data offset and whether we've seen a place + * to insert the new entry. + */ + for (i = 0; i < sfp->hdr.count; i++) { + if (!holefit) + holefit = offset + size <= XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT); + offset = XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT) + + XFS_DIR2_DATA_ENTSIZE(sfep->namelen); + sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); + } + /* + * Calculate data bytes used excluding the new entry, if this + * was a data block (block form directory). + */ + used = offset + + (sfp->hdr.count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) + + (uint)sizeof(xfs_dir2_block_tail_t); + /* + * If it won't fit in a block form then we can't insert it, + * we'll go back, convert to block, then try the insert and convert + * to leaf. + */ + if (used + (holefit ? 0 : size) > mp->m_dirblksize) + return 0; + /* + * If changing the inode number size, do it the hard way. + */ +#if XFS_BIG_FILESYSTEMS + if (objchange) { + return 2; + } +#else + ASSERT(objchange == 0); +#endif + /* + * If it won't fit at the end then do it the hard way (use the hole). + */ + if (used + size > mp->m_dirblksize) + return 2; + /* + * Do it the easy way. + */ + *sfepp = sfep; + *offsetp = offset; + return 1; +} + +#ifdef DEBUG +/* + * Check consistency of shortform directory, assert if bad. + */ +static void +xfs_dir2_sf_check( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry number */ + int i8count; /* number of big inode#s */ + xfs_ino_t ino; /* entry inode number */ + int offset; /* data offset */ + xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + dp = args->dp; + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + offset = XFS_DIR2_DATA_FIRST_OFFSET; + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT); + i8count = ino > XFS_DIR2_MAX_SHORT_INUM; + + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + ASSERT(XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT) >= offset); + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + i8count += ino > XFS_DIR2_MAX_SHORT_INUM; + offset = + XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT) + + XFS_DIR2_DATA_ENTSIZE(sfep->namelen); + } + ASSERT(i8count == sfp->hdr.i8count); +#if !XFS_BIG_FILESYSTEMS + ASSERT(i8count == 0); +#endif + ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size); + ASSERT(offset + + (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + + (uint)sizeof(xfs_dir2_block_tail_t) <= + dp->i_mount->m_dirblksize); +} +#endif /* DEBUG */ + +/* + * Create a new (shortform) directory. + */ +int /* error, always 0 */ +xfs_dir2_sf_create( + xfs_da_args_t *args, /* operation arguments */ + xfs_ino_t pino) /* parent inode number */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int i8count; /* parent inode is an 8-byte number */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + int size; /* directory size */ + + xfs_dir2_trace_args_i("sf_create", args, pino); + dp = args->dp; + + ASSERT(dp != NULL); + ASSERT(dp->i_d.di_size == 0); + /* + * If it's currently a zero-length extent file, + * convert it to local format. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) { + dp->i_df.if_flags &= ~XFS_IFEXTENTS; /* just in case */ + dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE); + dp->i_df.if_flags |= XFS_IFINLINE; + } + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + ASSERT(dp->i_df.if_bytes == 0); + i8count = pino > XFS_DIR2_MAX_SHORT_INUM; + size = XFS_DIR2_SF_HDR_SIZE(i8count); + /* + * Make a buffer for the data. + */ + xfs_idata_realloc(dp, size, XFS_DATA_FORK); + /* + * Fill in the header, + */ + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp->hdr.i8count = i8count; + /* + * Now can put in the inode number, since i8count is set. + */ + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &pino, &sfp->hdr.parent, ARCH_CONVERT); + sfp->hdr.count = 0; + dp->i_d.di_size = size; + xfs_dir2_sf_check(args); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + return 0; +} + +int /* error */ +xfs_dir2_sf_getdents( + xfs_inode_t *dp, /* incore directory inode */ + uio_t *uio, /* caller's buffer control */ + int *eofp, /* eof reached? (out) */ + xfs_dirent_t *dbp, /* caller's buffer */ + xfs_dir2_put_t put) /* abi's formatting function */ +{ + int error; /* error return value */ + int i; /* shortform entry number */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_dataptr_t off; /* current entry's offset */ + xfs_dir2_put_args_t p; /* arg package for put rtn */ + xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_off_t dir_offset; + + mp = dp->i_mount; + + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Give up if the directory is way too short. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + return XFS_ERROR(EIO); + } + + dir_offset = uio->uio_offset; + + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + + /* + * If the block number in the offset is out of range, we're done. + */ + if (XFS_DIR2_DATAPTR_TO_DB(mp, dir_offset) > mp->m_dirdatablk) { + *eofp = 1; + return 0; + } + + /* + * Set up putargs structure. + */ + p.dbp = dbp; + p.put = put; + p.uio = uio; + /* + * Put . entry unless we're starting past it. + */ + if (dir_offset <= + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_DOT_OFFSET)) { + p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, 0, + XFS_DIR2_DATA_DOTDOT_OFFSET); +#if XFS_BIG_FILESYSTEMS + p.ino = dp->i_ino + mp->m_inoadd; +#else + p.ino = dp->i_ino; +#endif + p.name = "."; + p.namelen = 1; + + error = p.put(&p); + + if (!p.done) { + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_DOT_OFFSET); + return error; + } + } + + /* + * Put .. entry unless we're starting past it. + */ + if (dir_offset <= + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_DOTDOT_OFFSET)) { + p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_FIRST_OFFSET); +#if XFS_BIG_FILESYSTEMS + p.ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT) + + mp->m_inoadd; +#else + p.ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT); +#endif + p.name = ".."; + p.namelen = 2; + + error = p.put(&p); + + if (!p.done) { + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_DOTDOT_OFFSET); + return error; + } + } + + /* + * Loop while there are more entries and put'ing works. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + + off = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT)); + + if (dir_offset > off) + continue; + + p.namelen = sfep->namelen; + + p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT) + + XFS_DIR2_DATA_ENTSIZE(p.namelen)); + +#if XFS_BIG_FILESYSTEMS + p.ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT) + + mp->m_inoadd; +#else + p.ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); +#endif + p.name = (char *)sfep->name; + + error = p.put(&p); + + if (!p.done) { + uio->uio_offset = off; + return error; + } + } + + /* + * They all fit. + */ + *eofp = 1; + + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0); + + return 0; +} + +/* + * Lookup an entry in a shortform directory. + * Returns EEXIST if found, ENOENT if not found. + */ +int /* error */ +xfs_dir2_sf_lookup( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry index */ + xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + xfs_dir2_trace_args("sf_lookup", args); + xfs_dir2_sf_check(args); + dp = args->dp; + + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Bail out if the directory is way too short. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + /* + * Special case for . + */ + if (args->namelen == 1 && args->name[0] == '.') { + args->inumber = dp->i_ino; + return XFS_ERROR(EEXIST); + } + /* + * Special case for .. + */ + if (args->namelen == 2 && + args->name[0] == '.' && args->name[1] == '.') { + args->inumber = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT); + return XFS_ERROR(EEXIST); + } + /* + * Loop over all the entries trying to match ours. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + if (sfep->namelen == args->namelen && + sfep->name[0] == args->name[0] && + memcmp(args->name, sfep->name, args->namelen) == 0) { + args->inumber = + XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + return XFS_ERROR(EEXIST); + } + } + /* + * Didn't find it. + */ + ASSERT(args->oknoent); + return XFS_ERROR(ENOENT); +} + +/* + * Remove an entry from a shortform directory. + */ +int /* error */ +xfs_dir2_sf_removename( + xfs_da_args_t *args) +{ + int byteoff; /* offset of removed entry */ + xfs_inode_t *dp; /* incore directory inode */ + int entsize; /* this entry's size */ + int i; /* shortform entry index */ + int newsize; /* new inode size */ + int oldsize; /* old inode size */ + xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + xfs_dir2_trace_args("sf_removename", args); + dp = args->dp; + + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + oldsize = (int)dp->i_d.di_size; + /* + * Bail out if the directory is way too short. + */ + if (oldsize < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == oldsize); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(oldsize >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + /* + * Loop over the old directory entries. + * Find the one we're deleting. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + if (sfep->namelen == args->namelen && + sfep->name[0] == args->name[0] && + memcmp(sfep->name, args->name, args->namelen) == 0) { + ASSERT(XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT) == + args->inumber); + break; + } + } + /* + * Didn't find it. + */ + if (i == sfp->hdr.count) { + return XFS_ERROR(ENOENT); + } + /* + * Calculate sizes. + */ + byteoff = (int)((char *)sfep - (char *)sfp); + entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen); + newsize = oldsize - entsize; + /* + * Copy the part if any after the removed entry, sliding it down. + */ + if (byteoff + entsize < oldsize) + memmove((char *)sfp + byteoff, (char *)sfp + byteoff + entsize, + oldsize - (byteoff + entsize)); + /* + * Fix up the header and file size. + */ + sfp->hdr.count--; + dp->i_d.di_size = newsize; + /* + * Reallocate, making it smaller. + */ + xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; +#if XFS_BIG_FILESYSTEMS + /* + * Are we changing inode number size? + */ + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) { + if (sfp->hdr.i8count == 1) + xfs_dir2_sf_toino4(args); + else + sfp->hdr.i8count--; + } +#endif + xfs_dir2_sf_check(args); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + return 0; +} + +/* + * Replace the inode number of an entry in a shortform directory. + */ +int /* error */ +xfs_dir2_sf_replace( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry index */ +#if XFS_BIG_FILESYSTEMS || defined(DEBUG) + xfs_ino_t ino=0; /* entry old inode number */ +#endif +#if XFS_BIG_FILESYSTEMS + int i8elevated; /* sf_toino8 set i8count=1 */ +#endif + xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + xfs_dir2_trace_args("sf_replace", args); + dp = args->dp; + + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Bail out if the shortform directory is way too small. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); +#if XFS_BIG_FILESYSTEMS + /* + * New inode number is large, and need to convert to 8-byte inodes. + */ + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) { + int error; /* error return value */ + int newsize; /* new inode size */ + + newsize = + dp->i_df.if_bytes + + (sfp->hdr.count + 1) * + ((uint)sizeof(xfs_dir2_ino8_t) - + (uint)sizeof(xfs_dir2_ino4_t)); + /* + * Won't fit as shortform, convert to block then do replace. + */ + if (newsize > XFS_IFORK_DSIZE(dp)) { + error = xfs_dir2_sf_to_block(args); + if (error) { + return error; + } + return xfs_dir2_block_replace(args); + } + /* + * Still fits, convert to 8-byte now. + */ + xfs_dir2_sf_toino8(args); + i8elevated = 1; + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + } else + i8elevated = 0; +#endif + ASSERT(args->namelen != 1 || args->name[0] != '.'); + /* + * Replace ..'s entry. + */ + if (args->namelen == 2 && + args->name[0] == '.' && args->name[1] == '.') { +#if XFS_BIG_FILESYSTEMS || defined(DEBUG) + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT); + ASSERT(args->inumber != ino); +#endif + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber, &sfp->hdr.parent, ARCH_CONVERT); + } + /* + * Normal entry, look for the name. + */ + else { + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + if (sfep->namelen == args->namelen && + sfep->name[0] == args->name[0] && + memcmp(args->name, sfep->name, args->namelen) == 0) { +#if XFS_BIG_FILESYSTEMS || defined(DEBUG) + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + ASSERT(args->inumber != ino); +#endif + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + break; + } + } + /* + * Didn't find it. + */ + if (i == sfp->hdr.count) { + ASSERT(args->oknoent); +#if XFS_BIG_FILESYSTEMS + if (i8elevated) + xfs_dir2_sf_toino4(args); +#endif + return XFS_ERROR(ENOENT); + } + } +#if XFS_BIG_FILESYSTEMS + /* + * See if the old number was large, the new number is small. + */ + if (ino > XFS_DIR2_MAX_SHORT_INUM && + args->inumber <= XFS_DIR2_MAX_SHORT_INUM) { + /* + * And the old count was one, so need to convert to small. + */ + if (sfp->hdr.i8count == 1) + xfs_dir2_sf_toino4(args); + else + sfp->hdr.i8count--; + } + /* + * See if the old number was small, the new number is large. + */ + if (ino <= XFS_DIR2_MAX_SHORT_INUM && + args->inumber > XFS_DIR2_MAX_SHORT_INUM) { + /* + * add to the i8count unless we just converted to 8-byte + * inodes (which does an implied i8count = 1) + */ + ASSERT(sfp->hdr.i8count != 0); + if (!i8elevated) + sfp->hdr.i8count++; + } +#endif + xfs_dir2_sf_check(args); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); + return 0; +} + +#if XFS_BIG_FILESYSTEMS +/* + * Convert from 8-byte inode numbers to 4-byte inode numbers. + * The last 8-byte inode number is gone, but the count is still 1. + */ +static void +xfs_dir2_sf_toino4( + xfs_da_args_t *args) /* operation arguments */ +{ + char *buf; /* old dir's buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry index */ + xfs_ino_t ino; /* entry inode number */ + int newsize; /* new inode size */ + xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */ + xfs_dir2_sf_t *oldsfp; /* old sf directory */ + int oldsize; /* old inode size */ + xfs_dir2_sf_entry_t *sfep; /* new sf entry */ + xfs_dir2_sf_t *sfp; /* new sf directory */ + + xfs_dir2_trace_args("sf_toino4", args); + dp = args->dp; + + /* + * Copy the old directory to the buffer. + * Then nuke it from the inode, and add the new buffer to the inode. + * Don't want xfs_idata_realloc copying the data here. + */ + oldsize = dp->i_df.if_bytes; + buf = kmem_alloc(oldsize, KM_SLEEP); + oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(oldsfp->hdr.i8count == 1); + memcpy(buf, oldsfp, oldsize); + /* + * Compute the new inode size. + */ + newsize = + oldsize - + (oldsfp->hdr.count + 1) * + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)); + xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK); + xfs_idata_realloc(dp, newsize, XFS_DATA_FORK); + /* + * Reset our pointers, the data has moved. + */ + oldsfp = (xfs_dir2_sf_t *)buf; + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + /* + * Fill in the new header. + */ + sfp->hdr.count = oldsfp->hdr.count; + sfp->hdr.i8count = 0; + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp, &oldsfp->hdr.parent, ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, &sfp->hdr.parent, ARCH_CONVERT); + /* + * Copy the entries field by field. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp), + oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep), + oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) { + sfep->namelen = oldsfep->namelen; + sfep->offset = oldsfep->offset; + memcpy(sfep->name, oldsfep->name, sfep->namelen); + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp, + XFS_DIR2_SF_INUMBERP(oldsfep), ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + } + /* + * Clean up the inode. + */ + kmem_free(buf, oldsize); + dp->i_d.di_size = newsize; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); +} + +/* + * Convert from 4-byte inode numbers to 8-byte inode numbers. + * The new 8-byte inode number is not there yet, we leave with the + * count 1 but no corresponding entry. + */ +static void +xfs_dir2_sf_toino8( + xfs_da_args_t *args) /* operation arguments */ +{ + char *buf; /* old dir's buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry index */ + xfs_ino_t ino; /* entry inode number */ + int newsize; /* new inode size */ + xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */ + xfs_dir2_sf_t *oldsfp; /* old sf directory */ + int oldsize; /* old inode size */ + xfs_dir2_sf_entry_t *sfep; /* new sf entry */ + xfs_dir2_sf_t *sfp; /* new sf directory */ + + xfs_dir2_trace_args("sf_toino8", args); + dp = args->dp; + + /* + * Copy the old directory to the buffer. + * Then nuke it from the inode, and add the new buffer to the inode. + * Don't want xfs_idata_realloc copying the data here. + */ + oldsize = dp->i_df.if_bytes; + buf = kmem_alloc(oldsize, KM_SLEEP); + oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(oldsfp->hdr.i8count == 0); + memcpy(buf, oldsfp, oldsize); + /* + * Compute the new inode size. + */ + newsize = + oldsize + + (oldsfp->hdr.count + 1) * + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)); + xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK); + xfs_idata_realloc(dp, newsize, XFS_DATA_FORK); + /* + * Reset our pointers, the data has moved. + */ + oldsfp = (xfs_dir2_sf_t *)buf; + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + /* + * Fill in the new header. + */ + sfp->hdr.count = oldsfp->hdr.count; + sfp->hdr.i8count = 1; + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp, &oldsfp->hdr.parent, ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, &sfp->hdr.parent, ARCH_CONVERT); + /* + * Copy the entries field by field. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp), + oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep), + oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) { + sfep->namelen = oldsfep->namelen; + sfep->offset = oldsfep->offset; + memcpy(sfep->name, oldsfep->name, sfep->namelen); + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp, + XFS_DIR2_SF_INUMBERP(oldsfep), ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + } + /* + * Clean up the inode. + */ + kmem_free(buf, oldsize); + dp->i_d.di_size = newsize; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); +} +#endif /* XFS_BIG_FILESYSTEMS */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_sf.h linux.21rc5-ac2/fs/xfs/xfs_dir2_sf.h --- linux.21rc5/fs/xfs/xfs_dir2_sf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_sf.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_SF_H__ +#define __XFS_DIR2_SF_H__ + +/* + * Directory layout when stored internal to an inode. + * + * Small directories are packed as tightly as possible so as to + * fit into the literal area of the inode. + */ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_dir2_block; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * Maximum size of a shortform directory. + */ +#define XFS_DIR2_SF_MAX_SIZE \ + (XFS_DINODE_MAX_SIZE - (uint)sizeof(xfs_dinode_core_t) - \ + (uint)sizeof(xfs_agino_t)) + +/* + * Inode number stored as 8 8-bit values. + */ +typedef struct { __uint8_t i[8]; } xfs_dir2_ino8_t; + +#define XFS_DIR2_SF_GET_INO8_ARCH(di,arch) \ + (xfs_ino_t)(DIRINO_GET_ARCH(&di,arch)) +#define XFS_DIR2_SF_GET_INO8(di) \ + XFS_DIR2_SF_GET_INO8_ARCH(di,ARCH_NOCONVERT) + +/* + * Inode number stored as 4 8-bit values. + * Works a lot of the time, when all the inode numbers in a directory + * fit in 32 bits. + */ +typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t; +#define XFS_DIR2_SF_GET_INO4_ARCH(di,arch) \ + (xfs_ino_t)(DIRINO4_GET_ARCH(&di,arch)) +#define XFS_DIR2_SF_GET_INO4(di) \ + XFS_DIR2_SF_GET_INO4_ARCH(di,ARCH_NOCONVERT) + +typedef union { + xfs_dir2_ino8_t i8; + xfs_dir2_ino4_t i4; +} xfs_dir2_inou_t; +#define XFS_DIR2_MAX_SHORT_INUM ((xfs_ino_t)0xffffffffULL) + +/* + * Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t. + * Only need 16 bits, this is the byte offset into the single block form. + */ +typedef struct { __uint8_t i[2]; } xfs_dir2_sf_off_t; + +/* + * The parent directory has a dedicated field, and the self-pointer must + * be calculated on the fly. + * + * Entries are packed toward the top as tightly as possible. The header + * and the elements must be memcpy'd out into a work area to get correct + * alignment for the inode number fields. + */ +typedef struct xfs_dir2_sf_hdr { + __uint8_t count; /* count of entries */ + __uint8_t i8count; /* count of 8-byte inode #s */ + xfs_dir2_inou_t parent; /* parent dir inode number */ +} xfs_dir2_sf_hdr_t; + +typedef struct xfs_dir2_sf_entry { + __uint8_t namelen; /* actual name length */ + xfs_dir2_sf_off_t offset; /* saved offset */ + __uint8_t name[1]; /* name, variable size */ + xfs_dir2_inou_t inumber; /* inode number, var. offset */ +} xfs_dir2_sf_entry_t; + +typedef struct xfs_dir2_sf { + xfs_dir2_sf_hdr_t hdr; /* shortform header */ + xfs_dir2_sf_entry_t list[1]; /* shortform entries */ +} xfs_dir2_sf_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_HDR_SIZE) +int xfs_dir2_sf_hdr_size(int i8count); +#define XFS_DIR2_SF_HDR_SIZE(i8count) xfs_dir2_sf_hdr_size(i8count) +#else +#define XFS_DIR2_SF_HDR_SIZE(i8count) \ + ((uint)sizeof(xfs_dir2_sf_hdr_t) - \ + ((i8count) == 0) * \ + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_INUMBERP) +xfs_dir2_inou_t *xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep); +#define XFS_DIR2_SF_INUMBERP(sfep) xfs_dir2_sf_inumberp(sfep) +#else +#define XFS_DIR2_SF_INUMBERP(sfep) \ + ((xfs_dir2_inou_t *)&(sfep)->name[(sfep)->namelen]) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_GET_INUMBER) +xfs_intino_t xfs_dir2_sf_get_inumber_arch(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from, + xfs_arch_t arch); +#define XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, from, arch) \ + xfs_dir2_sf_get_inumber_arch(sfp, from, arch) + +#else +#define XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, from, arch) \ + ((sfp)->hdr.i8count == 0 ? \ + (xfs_intino_t)XFS_DIR2_SF_GET_INO4_ARCH(*(from), arch) : \ + (xfs_intino_t)XFS_DIR2_SF_GET_INO8_ARCH(*(from), arch)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_PUT_INUMBER) +void xfs_dir2_sf_put_inumber_arch(xfs_dir2_sf_t *sfp, xfs_ino_t *from, + xfs_dir2_inou_t *to, xfs_arch_t arch); +#define XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp,from,to,arch) \ + xfs_dir2_sf_put_inumber_arch(sfp,from,to,arch) +#else +#define XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp,from,to,arch) \ + if ((sfp)->hdr.i8count == 0) { \ + DIRINO4_COPY_ARCH(from,to,arch); \ + } else { \ + DIRINO_COPY_ARCH(from,to,arch); \ + } +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_GET_OFFSET) +xfs_dir2_data_aoff_t xfs_dir2_sf_get_offset_arch(xfs_dir2_sf_entry_t *sfep, + xfs_arch_t arch); +xfs_dir2_data_aoff_t xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep); +#define XFS_DIR2_SF_GET_OFFSET_ARCH(sfep,arch) \ + xfs_dir2_sf_get_offset_arch(sfep,arch) +#else +#define XFS_DIR2_SF_GET_OFFSET_ARCH(sfep,arch) \ + INT_GET_UNALIGNED_16_ARCH(&(sfep)->offset.i,arch) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_PUT_OFFSET) +void xfs_dir2_sf_put_offset_arch(xfs_dir2_sf_entry_t *sfep, + xfs_dir2_data_aoff_t off, xfs_arch_t arch); +#define XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep,off,arch) \ + xfs_dir2_sf_put_offset_arch(sfep,off,arch) +#else +#define XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep,off,arch) \ + INT_SET_UNALIGNED_16_ARCH(&(sfep)->offset.i,off,arch) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_ENTSIZE_BYNAME) +int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len); +#define XFS_DIR2_SF_ENTSIZE_BYNAME(sfp,len) \ + xfs_dir2_sf_entsize_byname(sfp,len) +#else +#define XFS_DIR2_SF_ENTSIZE_BYNAME(sfp,len) /* space a name uses */ \ + ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \ + ((sfp)->hdr.i8count == 0) * \ + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_ENTSIZE_BYENTRY) +int xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep); +#define XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep) \ + xfs_dir2_sf_entsize_byentry(sfp,sfep) +#else +#define XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep) /* space an entry uses */ \ + ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (sfep)->namelen - \ + ((sfp)->hdr.i8count == 0) * \ + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_FIRSTENTRY) +xfs_dir2_sf_entry_t *xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp); +#define XFS_DIR2_SF_FIRSTENTRY(sfp) xfs_dir2_sf_firstentry(sfp) +#else +#define XFS_DIR2_SF_FIRSTENTRY(sfp) /* first entry in struct */ \ + ((xfs_dir2_sf_entry_t *) \ + ((char *)(sfp) + XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_NEXTENTRY) +xfs_dir2_sf_entry_t *xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, + xfs_dir2_sf_entry_t *sfep); +#define XFS_DIR2_SF_NEXTENTRY(sfp,sfep) xfs_dir2_sf_nextentry(sfp,sfep) +#else +#define XFS_DIR2_SF_NEXTENTRY(sfp,sfep) /* next entry in struct */ \ + ((xfs_dir2_sf_entry_t *) \ + ((char *)(sfep) + XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep))) +#endif + +/* + * Functions. + */ + +extern int + xfs_dir2_block_sfsize(struct xfs_inode *dp, + struct xfs_dir2_block *block, + xfs_dir2_sf_hdr_t *sfhp); + +extern int + xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp, + int size, xfs_dir2_sf_hdr_t *sfhp); + +extern int + xfs_dir2_sf_addname(struct xfs_da_args *args); + +extern int + xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); + +extern int + xfs_dir2_sf_getdents(struct xfs_inode *dp, struct uio *uio, int *eofp, + struct xfs_dirent *dbp, xfs_dir2_put_t put); + +extern int + xfs_dir2_sf_lookup(struct xfs_da_args *args); + +extern int + xfs_dir2_sf_removename(struct xfs_da_args *args); + +extern int + xfs_dir2_sf_replace(struct xfs_da_args *args); + +#endif /* __XFS_DIR2_SF_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_trace.c linux.21rc5-ac2/fs/xfs/xfs_dir2_trace.c --- linux.21rc5/fs/xfs/xfs_dir2_trace.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_trace.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_trace.c + * Tracing for xfs v2 directories. + */ +#include "xfs.h" + +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_da_btree.h" +#include "xfs_dir2_trace.h" + +#ifdef DEBUG +ktrace_t *xfs_dir2_trace_buf; +#endif /* DEBUG */ + +#ifdef XFS_DIR2_TRACE +/* + * Enter something in the trace buffers. + */ +static void +xfs_dir2_trace_enter( + xfs_inode_t *dp, + int type, + char *where, + char *name, + int namelen, + __psunsigned_t a0, + __psunsigned_t a1, + __psunsigned_t a2, + __psunsigned_t a3, + __psunsigned_t a4, + __psunsigned_t a5, + __psunsigned_t a6) +{ + __psunsigned_t n[6]; + + ASSERT(xfs_dir2_trace_buf); + ASSERT(dp->i_dir_trace); + if (name) + memcpy(n, name, min(sizeof(n), namelen)); + else + memset((char *)n, 0, sizeof(n)); + ktrace_enter(xfs_dir2_trace_buf, + (void *)(__psunsigned_t)type, (void *)where, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, + (void *)(__psunsigned_t)namelen, + (void *)n[0], (void *)n[1], (void *)n[2], + (void *)n[3], (void *)n[4], (void *)n[5]); + ktrace_enter(dp->i_dir_trace, + (void *)(__psunsigned_t)type, (void *)where, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, + (void *)(__psunsigned_t)namelen, + (void *)n[0], (void *)n[1], (void *)n[2], + (void *)n[3], (void *)n[4], (void *)n[5]); +} + +void +xfs_dir2_trace_args( + char *where, + xfs_da_args_t *args) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, 0, 0); +} + +void +xfs_dir2_trace_args_b( + char *where, + xfs_da_args_t *args, + xfs_dabuf_t *bp) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_B, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, + (__psunsigned_t)(bp ? bp->bps[0] : NULL), 0); +} + +void +xfs_dir2_trace_args_bb( + char *where, + xfs_da_args_t *args, + xfs_dabuf_t *lbp, + xfs_dabuf_t *dbp) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BB, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, + (__psunsigned_t)(lbp ? lbp->bps[0] : NULL), + (__psunsigned_t)(dbp ? dbp->bps[0] : NULL)); +} + +void +xfs_dir2_trace_args_bibii( + char *where, + xfs_da_args_t *args, + xfs_dabuf_t *bs, + int ss, + xfs_dabuf_t *bd, + int sd, + int c) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BIBII, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)(bs ? bs->bps[0] : NULL), (__psunsigned_t)ss, + (__psunsigned_t)(bd ? bd->bps[0] : NULL), (__psunsigned_t)sd, + (__psunsigned_t)c); +} + +void +xfs_dir2_trace_args_db( + char *where, + xfs_da_args_t *args, + xfs_dir2_db_t db, + xfs_dabuf_t *bp) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_DB, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, (__psunsigned_t)db, + (__psunsigned_t)(bp ? bp->bps[0] : NULL)); +} + +void +xfs_dir2_trace_args_i( + char *where, + xfs_da_args_t *args, + xfs_ino_t i) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_I, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, (__psunsigned_t)i, 0); +} + +void +xfs_dir2_trace_args_s( + char *where, + xfs_da_args_t *args, + int s) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_S, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, (__psunsigned_t)s, 0); +} + +void +xfs_dir2_trace_args_sb( + char *where, + xfs_da_args_t *args, + int s, + xfs_dabuf_t *bp) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_SB, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, (__psunsigned_t)s, + (__psunsigned_t)(bp ? bp->bps[0] : NULL)); +} +#endif /* XFS_DIR2_TRACE */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir2_trace.h linux.21rc5-ac2/fs/xfs/xfs_dir2_trace.h --- linux.21rc5/fs/xfs/xfs_dir2_trace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir2_trace.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_TRACE_H__ +#define __XFS_DIR2_TRACE_H__ + +/* + * Tracing for xfs v2 directories. + */ + +struct ktrace; +struct xfs_dabuf; +struct xfs_da_args; + +#ifdef XFS_ALL_TRACE +#define XFS_DIR2_TRACE +#endif /* XFS_ALL_TRACE */ + +#if !defined(DEBUG) +#undef XFS_DIR2_TRACE +#endif /* !DEBUG */ + +#define XFS_DIR2_GTRACE_SIZE 4096 /* global buffer */ +#define XFS_DIR2_KTRACE_SIZE 32 /* per-inode buffer */ + +#define XFS_DIR2_KTRACE_ARGS 1 /* args only */ +#define XFS_DIR2_KTRACE_ARGS_B 2 /* args + buffer */ +#define XFS_DIR2_KTRACE_ARGS_BB 3 /* args + 2 buffers */ +#define XFS_DIR2_KTRACE_ARGS_DB 4 /* args, db, buffer */ +#define XFS_DIR2_KTRACE_ARGS_I 5 /* args, inum */ +#define XFS_DIR2_KTRACE_ARGS_S 6 /* args, int */ +#define XFS_DIR2_KTRACE_ARGS_SB 7 /* args, int, buffer */ +#define XFS_DIR2_KTRACE_ARGS_BIBII 8 /* args, buf/int/buf/int/int */ + +#ifdef XFS_DIR2_TRACE + +void xfs_dir2_trace_args(char *where, struct xfs_da_args *args); +void xfs_dir2_trace_args_b(char *where, struct xfs_da_args *args, + struct xfs_dabuf *bp); +void xfs_dir2_trace_args_bb(char *where, struct xfs_da_args *args, + struct xfs_dabuf *lbp, struct xfs_dabuf *dbp); +void xfs_dir2_trace_args_bibii(char *where, struct xfs_da_args *args, + struct xfs_dabuf *bs, int ss, + struct xfs_dabuf *bd, int sd, int c); +void xfs_dir2_trace_args_db(char *where, struct xfs_da_args *args, + xfs_dir2_db_t db, struct xfs_dabuf *bp); +void xfs_dir2_trace_args_i(char *where, struct xfs_da_args *args, xfs_ino_t i); +void xfs_dir2_trace_args_s(char *where, struct xfs_da_args *args, int s); +void xfs_dir2_trace_args_sb(char *where, struct xfs_da_args *args, int s, + struct xfs_dabuf *bp); + +#else /* XFS_DIR2_TRACE */ + +#define xfs_dir2_trace_args(where, args) +#define xfs_dir2_trace_args_b(where, args, bp) +#define xfs_dir2_trace_args_bb(where, args, lbp, dbp) +#define xfs_dir2_trace_args_bibii(where, args, bs, ss, bd, sd, c) +#define xfs_dir2_trace_args_db(where, args, db, bp) +#define xfs_dir2_trace_args_i(where, args, i) +#define xfs_dir2_trace_args_s(where, args, s) +#define xfs_dir2_trace_args_sb(where, args, s, bp) + +#endif /* XFS_DIR2_TRACE */ + +extern struct ktrace *xfs_dir2_trace_buf; + +#endif /* __XFS_DIR2_TRACE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir.c linux.21rc5-ac2/fs/xfs/xfs_dir.c --- linux.21rc5/fs/xfs/xfs_dir.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1210 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_error.h" + +/* + * xfs_dir.c + * + * Provide the external interfaces to manage directories. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Functions for the dirops interfaces. + */ +static void xfs_dir_mount(struct xfs_mount *mp); + +static int xfs_dir_isempty(struct xfs_inode *dp); + +static int xfs_dir_init(struct xfs_trans *trans, + struct xfs_inode *dir, + struct xfs_inode *parent_dir); + +static int xfs_dir_createname(struct xfs_trans *trans, + struct xfs_inode *dp, + char *name_string, + int name_len, + xfs_ino_t inode_number, + xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, + xfs_extlen_t total); + +static int xfs_dir_lookup(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name_string, + int name_length, + xfs_ino_t *inode_number); + +static int xfs_dir_removename(struct xfs_trans *trans, + struct xfs_inode *dp, + char *name_string, + int name_length, + xfs_ino_t ino, + xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, + xfs_extlen_t total); + +static int xfs_dir_getdents(struct xfs_trans *tp, + struct xfs_inode *dp, + struct uio *uiop, + int *eofp); + +static int xfs_dir_replace(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name_string, + int name_length, + xfs_ino_t inode_number, + xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, + xfs_extlen_t total); + +static int xfs_dir_canenter(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name_string, + int name_length); + +static int xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, + xfs_dinode_t *dip); + +xfs_dirops_t xfsv1_dirops = { + .xd_mount = xfs_dir_mount, + .xd_isempty = xfs_dir_isempty, + .xd_init = xfs_dir_init, + .xd_createname = xfs_dir_createname, + .xd_lookup = xfs_dir_lookup, + .xd_removename = xfs_dir_removename, + .xd_getdents = xfs_dir_getdents, + .xd_replace = xfs_dir_replace, + .xd_canenter = xfs_dir_canenter, + .xd_shortform_validate_ondisk = xfs_dir_shortform_validate_ondisk, + .xd_shortform_to_single = xfs_dir_shortform_to_leaf, +}; + +/* + * Internal routines when dirsize == XFS_LBSIZE(mp). + */ +STATIC int xfs_dir_leaf_lookup(xfs_da_args_t *args); +STATIC int xfs_dir_leaf_removename(xfs_da_args_t *args, int *number_entries, + int *total_namebytes); +STATIC int xfs_dir_leaf_getdents(xfs_trans_t *trans, xfs_inode_t *dp, + uio_t *uio, int *eofp, + xfs_dirent_t *dbp, + xfs_dir_put_t put); +STATIC int xfs_dir_leaf_replace(xfs_da_args_t *args); + +/* + * Internal routines when dirsize > XFS_LBSIZE(mp). + */ +STATIC int xfs_dir_node_addname(xfs_da_args_t *args); +STATIC int xfs_dir_node_lookup(xfs_da_args_t *args); +STATIC int xfs_dir_node_removename(xfs_da_args_t *args); +STATIC int xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, + uio_t *uio, int *eofp, + xfs_dirent_t *dbp, + xfs_dir_put_t put); +STATIC int xfs_dir_node_replace(xfs_da_args_t *args); + +#if defined(DEBUG) +ktrace_t *xfs_dir_trace_buf; +#endif + + +/*======================================================================== + * Overall external interface routines. + *========================================================================*/ + +xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot; + +/* + * One-time startup routine called from xfs_init(). + */ +void +xfs_dir_startup(void) +{ + xfs_dir_hash_dot = xfs_da_hashname(".", 1); + xfs_dir_hash_dotdot = xfs_da_hashname("..", 2); +} + +/* + * Initialize directory-related fields in the mount structure. + */ +static void +xfs_dir_mount(xfs_mount_t *mp) +{ + uint shortcount, leafcount, count; + + mp->m_dirversion = 1; + shortcount = (mp->m_attroffset - (uint)sizeof(xfs_dir_sf_hdr_t)) / + (uint)sizeof(xfs_dir_sf_entry_t); + leafcount = (XFS_LBSIZE(mp) - (uint)sizeof(xfs_dir_leaf_hdr_t)) / + ((uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_name_t)); + count = shortcount > leafcount ? shortcount : leafcount; + mp->m_dircook_elog = xfs_da_log2_roundup(count + 1); + ASSERT(mp->m_dircook_elog <= mp->m_sb.sb_blocklog); + mp->m_dir_node_ents = mp->m_attr_node_ents = + (XFS_LBSIZE(mp) - (uint)sizeof(xfs_da_node_hdr_t)) / + (uint)sizeof(xfs_da_node_entry_t); + mp->m_dir_magicpct = (XFS_LBSIZE(mp) * 37) / 100; + mp->m_dirblksize = mp->m_sb.sb_blocksize; + mp->m_dirblkfsbs = 1; +} + +/* + * Return 1 if directory contains only "." and "..". + */ +static int +xfs_dir_isempty(xfs_inode_t *dp) +{ + xfs_dir_sf_hdr_t *hdr; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + if (dp->i_d.di_size == 0) + return(1); + if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp)) + return(0); + hdr = (xfs_dir_sf_hdr_t *)dp->i_df.if_u1.if_data; + return(hdr->count == 0); +} + +/* + * Initialize a directory with its "." and ".." entries. + */ +static int +xfs_dir_init(xfs_trans_t *trans, xfs_inode_t *dir, xfs_inode_t *parent_dir) +{ + xfs_da_args_t args; + int error; + + memset((char *)&args, 0, sizeof(args)); + args.dp = dir; + args.trans = trans; + + ASSERT((dir->i_d.di_mode & IFMT) == IFDIR); + if ((error = xfs_dir_ino_validate(trans->t_mountp, parent_dir->i_ino))) + return error; + + return(xfs_dir_shortform_create(&args, parent_dir->i_ino)); +} + +/* + * Generic handler routine to add a name to a directory. + * Transitions directory from shortform to Btree as necessary. + */ +static int /* error */ +xfs_dir_createname(xfs_trans_t *trans, xfs_inode_t *dp, char *name, + int namelen, xfs_ino_t inum, xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, xfs_extlen_t total) +{ + xfs_da_args_t args; + int retval, newsize, done; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + if ((retval = xfs_dir_ino_validate(trans->t_mountp, inum))) + return (retval); + + XFS_STATS_INC(xfsstats.xs_dir_create); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = inum; + args.dp = dp; + args.firstblock = firstblock; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = 0; + args.addname = args.oknoent = 1; + + /* + * Decide on what work routines to call based on the inode size. + */ + done = 0; + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + newsize = XFS_DIR_SF_ENTSIZE_BYNAME(args.namelen); + if ((dp->i_d.di_size + newsize) <= XFS_IFORK_DSIZE(dp)) { + retval = xfs_dir_shortform_addname(&args); + done = 1; + } else { + if (total == 0) + return XFS_ERROR(ENOSPC); + retval = xfs_dir_shortform_to_leaf(&args); + done = retval != 0; + } + } + if (!done && xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_addname(&args); + done = retval != ENOSPC; + if (!done) { + if (total == 0) + return XFS_ERROR(ENOSPC); + retval = xfs_dir_leaf_to_node(&args); + done = retval != 0; + } + } + if (!done) { + retval = xfs_dir_node_addname(&args); + } + return(retval); +} + +/* + * Generic handler routine to check if a name can be added to a directory, + * without adding any blocks to the directory. + */ +static int /* error */ +xfs_dir_canenter(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen) +{ + xfs_da_args_t args; + int retval, newsize; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = 0; + args.dp = dp; + args.firstblock = NULL; + args.flist = NULL; + args.total = 0; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = args.addname = args.oknoent = 1; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + newsize = XFS_DIR_SF_ENTSIZE_BYNAME(args.namelen); + if ((dp->i_d.di_size + newsize) <= XFS_IFORK_DSIZE(dp)) + retval = 0; + else + retval = XFS_ERROR(ENOSPC); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_addname(&args); + } else { + retval = xfs_dir_node_addname(&args); + } + return(retval); +} + +/* + * Generic handler routine to remove a name from a directory. + * Transitions directory from Btree to shortform as necessary. + */ +static int /* error */ +xfs_dir_removename(xfs_trans_t *trans, xfs_inode_t *dp, char *name, + int namelen, xfs_ino_t ino, xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, xfs_extlen_t total) +{ + xfs_da_args_t args; + int count, totallen, newsize, retval; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + XFS_STATS_INC(xfsstats.xs_dir_remove); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = ino; + args.dp = dp; + args.firstblock = firstblock; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = args.addname = args.oknoent = 0; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + retval = xfs_dir_shortform_removename(&args); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_removename(&args, &count, &totallen); + if (retval == 0) { + newsize = XFS_DIR_SF_ALLFIT(count, totallen); + if (newsize <= XFS_IFORK_DSIZE(dp)) { + retval = xfs_dir_leaf_to_shortform(&args); + } + } + } else { + retval = xfs_dir_node_removename(&args); + } + return(retval); +} + +static int /* error */ +xfs_dir_lookup(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen, + xfs_ino_t *inum) +{ + xfs_da_args_t args; + int retval; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + XFS_STATS_INC(xfsstats.xs_dir_lookup); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = 0; + args.dp = dp; + args.firstblock = NULL; + args.flist = NULL; + args.total = 0; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = args.addname = 0; + args.oknoent = 1; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + retval = xfs_dir_shortform_lookup(&args); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_lookup(&args); + } else { + retval = xfs_dir_node_lookup(&args); + } + if (retval == EEXIST) + retval = 0; + *inum = args.inumber; + return(retval); +} + +/* + * Implement readdir. + */ +static int /* error */ +xfs_dir_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, int *eofp) +{ + xfs_dirent_t *dbp; + int alignment, retval; + xfs_dir_put_t put; + + XFS_STATS_INC(xfsstats.xs_dir_getdents); + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + /* + * If our caller has given us a single contiguous memory buffer, + * just work directly within that buffer. If it's in user memory, + * lock it down first. + */ + alignment = sizeof(xfs_off_t) - 1; + if ((uio->uio_iovcnt == 1) && + (((__psint_t)uio->uio_iov[0].iov_base & alignment) == 0) && + ((uio->uio_iov[0].iov_len & alignment) == 0)) { + dbp = NULL; + put = xfs_dir_put_dirent64_direct; + } else { + dbp = kmem_alloc(sizeof(*dbp) + MAXNAMELEN, KM_SLEEP); + put = xfs_dir_put_dirent64_uio; + } + + /* + * Decide on what work routines to call based on the inode size. + */ + *eofp = 0; + + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + retval = xfs_dir_shortform_getdents(dp, uio, eofp, dbp, put); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_getdents(trans, dp, uio, eofp, dbp, put); + } else { + retval = xfs_dir_node_getdents(trans, dp, uio, eofp, dbp, put); + } + if (dbp != NULL) + kmem_free(dbp, sizeof(*dbp) + MAXNAMELEN); + + return(retval); +} + +static int /* error */ +xfs_dir_replace(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen, + xfs_ino_t inum, xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, xfs_extlen_t total) +{ + xfs_da_args_t args; + int retval; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + if ((retval = xfs_dir_ino_validate(trans->t_mountp, inum))) + return retval; + + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = inum; + args.dp = dp; + args.firstblock = firstblock; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = args.addname = args.oknoent = 0; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + retval = xfs_dir_shortform_replace(&args); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_replace(&args); + } else { + retval = xfs_dir_node_replace(&args); + } + + return(retval); +} + +static int +xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, xfs_dinode_t *dp) +{ + xfs_ino_t ino; + int namelen_sum; + int count; + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int i; + + + + if ((INT_GET(dp->di_core.di_mode, ARCH_CONVERT) & IFMT) != IFDIR) { + return 0; + } + if (INT_GET(dp->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_LOCAL) { + return 0; + } + if (INT_GET(dp->di_core.di_size, ARCH_CONVERT) < sizeof(sf->hdr)) { + xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform size: dp 0x%p", + dp); + return 1; + } + sf = (xfs_dir_shortform_t *)(&dp->di_u.di_dirsf); + ino = XFS_GET_DIR_INO_ARCH(mp, sf->hdr.parent, ARCH_CONVERT); + if (xfs_dir_ino_validate(mp, ino)) + return 1; + + count = sf->hdr.count; + if ((count < 0) || ((count * 10) > XFS_LITINO(mp))) { + xfs_fs_cmn_err(CE_WARN, mp, + "Invalid shortform count: dp 0x%p", dp); + return(1); + } + + if (count == 0) { + return 0; + } + + namelen_sum = 0; + sfe = &sf->list[0]; + for (i = sf->hdr.count - 1; i >= 0; i--) { + ino = XFS_GET_DIR_INO_ARCH(mp, sfe->inumber, ARCH_CONVERT); + xfs_dir_ino_validate(mp, ino); + if (sfe->namelen >= XFS_LITINO(mp)) { + xfs_fs_cmn_err(CE_WARN, mp, + "Invalid shortform namelen: dp 0x%p", dp); + return 1; + } + namelen_sum += sfe->namelen; + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + if (namelen_sum >= XFS_LITINO(mp)) { + xfs_fs_cmn_err(CE_WARN, mp, + "Invalid shortform namelen: dp 0x%p", dp); + return 1; + } + + return 0; +} + +/*======================================================================== + * External routines when dirsize == XFS_LBSIZE(dp->i_mount). + *========================================================================*/ + +/* + * Add a name to the leaf directory structure + * This is the external routine. + */ +int +xfs_dir_leaf_addname(xfs_da_args_t *args) +{ + int index, retval; + xfs_dabuf_t *bp; + + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + + retval = xfs_dir_leaf_lookup_int(bp, args, &index); + if (retval == ENOENT) + retval = xfs_dir_leaf_add(bp, args, index); + xfs_da_buf_done(bp); + return(retval); +} + +/* + * Remove a name from the leaf directory structure + * This is the external routine. + */ +STATIC int +xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen) +{ + xfs_dir_leafblock_t *leaf; + int index, retval; + xfs_dabuf_t *bp; + + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + retval = xfs_dir_leaf_lookup_int(bp, args, &index); + if (retval == EEXIST) { + (void)xfs_dir_leaf_remove(args->trans, bp, index); + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + *totallen = INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + retval = 0; + } + xfs_da_buf_done(bp); + return(retval); +} + +/* + * Look up a name in a leaf directory structure. + * This is the external routine. + */ +STATIC int +xfs_dir_leaf_lookup(xfs_da_args_t *args) +{ + int index, retval; + xfs_dabuf_t *bp; + + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + retval = xfs_dir_leaf_lookup_int(bp, args, &index); + xfs_da_brelse(args->trans, bp); + return(retval); +} + +/* + * Copy out directory entries for getdents(), for leaf directories. + */ +STATIC int +xfs_dir_leaf_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, + int *eofp, xfs_dirent_t *dbp, xfs_dir_put_t put) +{ + xfs_dabuf_t *bp; + int retval, eob; + + retval = xfs_da_read_buf(dp->i_transp, dp, 0, -1, &bp, XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + retval = xfs_dir_leaf_getdents_int(bp, dp, 0, uio, &eob, dbp, put, -1); + xfs_da_brelse(trans, bp); + *eofp = (eob == 0); + return(retval); +} + +/* + * Look up a name in a leaf directory structure, replace the inode number. + * This is the external routine. + */ +STATIC int +xfs_dir_leaf_replace(xfs_da_args_t *args) +{ + int index, retval; + xfs_dabuf_t *bp; + xfs_ino_t inum; + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + + inum = args->inumber; + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + retval = xfs_dir_leaf_lookup_int(bp, args, &index); + if (retval == EEXIST) { + leaf = bp->data; + entry = &leaf->entries[index]; + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + /* XXX - replace assert? */ + XFS_DIR_SF_PUT_DIRINO_ARCH(&inum, &namest->inumber, ARCH_CONVERT); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber))); + xfs_da_buf_done(bp); + retval = 0; + } else + xfs_da_brelse(args->trans, bp); + return(retval); +} + + +/*======================================================================== + * External routines when dirsize > XFS_LBSIZE(mp). + *========================================================================*/ + +/* + * Add a name to a Btree-format directory. + * + * This will involve walking down the Btree, and may involve splitting + * leaf nodes and even splitting intermediate nodes up to and including + * the root node (a special case of an intermediate node). + */ +STATIC int +xfs_dir_node_addname(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + int retval, error; + + /* + * Fill in bucket of arguments/results/context to carry around. + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_dir_node_ents; + + /* + * Search to see if name already exists, and get back a pointer + * to where it should go. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) + retval = error; + if (retval != ENOENT) + goto error; + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC); + retval = xfs_dir_leaf_add(blk->bp, args, blk->index); + if (retval == 0) { + /* + * Addition succeeded, update Btree hashvals. + */ + if (!args->justcheck) + xfs_da_fixhashpath(state, &state->path); + } else { + /* + * Addition failed, split as many Btree elements as required. + */ + if (args->total == 0) { + ASSERT(retval == ENOSPC); + goto error; + } + retval = xfs_da_split(state); + } +error: + xfs_da_state_free(state); + + return(retval); +} + +/* + * Remove a name from a B-tree directory. + * + * This will involve walking down the Btree, and may involve joining + * leaf nodes and even joining intermediate nodes up to and including + * the root node (a special case of an intermediate node). + */ +STATIC int +xfs_dir_node_removename(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + int retval, error; + + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_dir_node_ents; + + /* + * Search to see if name exists, and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) + retval = error; + if (retval != EEXIST) { + xfs_da_state_free(state); + return(retval); + } + + /* + * Remove the name and update the hashvals in the tree. + */ + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC); + retval = xfs_dir_leaf_remove(args->trans, blk->bp, blk->index); + xfs_da_fixhashpath(state, &state->path); + + /* + * Check to see if the tree needs to be collapsed. + */ + error = 0; + if (retval) { + error = xfs_da_join(state); + } + + xfs_da_state_free(state); + if (error) + return(error); + return(0); +} + +/* + * Look up a filename in a int directory. + * Use an internal routine to actually do all the work. + */ +STATIC int +xfs_dir_node_lookup(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + int retval, error, i; + + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_dir_node_ents; + + /* + * Search to see if name exists, + * and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) { + retval = error; + } + + /* + * If not in a transaction, we have to release all the buffers. + */ + for (i = 0; i < state->path.active; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + + xfs_da_state_free(state); + return(retval); +} + +STATIC int +xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, + int *eofp, xfs_dirent_t *dbp, xfs_dir_put_t put) +{ + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + xfs_dir_leafblock_t *leaf = NULL; + xfs_dablk_t bno, nextbno; + xfs_dahash_t cookhash; + xfs_mount_t *mp; + int error, eob, i; + xfs_dabuf_t *bp; + xfs_daddr_t nextda; + + /* + * Pick up our context. + */ + mp = dp->i_mount; + bp = NULL; + bno = XFS_DA_COOKIE_BNO(mp, uio->uio_offset); + cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); + + xfs_dir_trace_g_du("node: start", dp, uio); + + /* + * Re-find our place, even if we're confused about what our place is. + * + * First we check the block number from the magic cookie, it is a + * cache of where we ended last time. If we find a leaf block, and + * the starting hashval in that block is less than our desired + * hashval, then we run with it. + */ + if (bno > 0) { + error = xfs_da_read_buf(trans, dp, bno, -2, &bp, XFS_DATA_FORK); + if ((error != 0) && (error != EFSCORRUPTED)) + return(error); + if (bp) + leaf = bp->data; + if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { + xfs_dir_trace_g_dub("node: block not a leaf", + dp, uio, bno); + xfs_da_brelse(trans, bp); + bp = NULL; + } + if (bp && INT_GET(leaf->entries[0].hashval, ARCH_CONVERT) > cookhash) { + xfs_dir_trace_g_dub("node: leaf hash too large", + dp, uio, bno); + xfs_da_brelse(trans, bp); + bp = NULL; + } + if (bp && + cookhash > INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT)) { + xfs_dir_trace_g_dub("node: leaf hash too small", + dp, uio, bno); + xfs_da_brelse(trans, bp); + bp = NULL; + } + } + + /* + * If we did not find a leaf block from the blockno in the cookie, + * or we there was no blockno in the cookie (eg: first time thru), + * the we start at the top of the Btree and re-find our hashval. + */ + if (bp == NULL) { + xfs_dir_trace_g_du("node: start at root" , dp, uio); + bno = 0; + for (;;) { + error = xfs_da_read_buf(trans, dp, bno, -1, &bp, + XFS_DATA_FORK); + if (error) + return(error); + if (bp == NULL) + return(XFS_ERROR(EFSCORRUPTED)); + node = bp->data; + if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) + break; + btree = &node->btree[0]; + xfs_dir_trace_g_dun("node: node detail", dp, uio, node); + for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) { + if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) { + bno = INT_GET(btree->before, ARCH_CONVERT); + break; + } + } + if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { + xfs_da_brelse(trans, bp); + xfs_dir_trace_g_du("node: hash beyond EOF", + dp, uio); + uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, + XFS_DA_MAXHASH); + *eofp = 1; + return(0); + } + xfs_dir_trace_g_dub("node: going to block", + dp, uio, bno); + xfs_da_brelse(trans, bp); + } + } + ASSERT(cookhash != XFS_DA_MAXHASH); + + /* + * We've dropped down to the (first) leaf block that contains the + * hashval we are interested in. Continue rolling upward thru the + * leaf blocks until we fill up our buffer. + */ + for (;;) { + leaf = bp->data; + if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC)) { + xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf); + xfs_da_brelse(trans, bp); + XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)", + XFS_ERRLEVEL_LOW, mp, leaf); + return XFS_ERROR(EFSCORRUPTED); + } + xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf); + if ((nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT))) { + nextda = xfs_da_reada_buf(trans, dp, nextbno, + XFS_DATA_FORK); + } else + nextda = -1; + error = xfs_dir_leaf_getdents_int(bp, dp, bno, uio, &eob, dbp, + put, nextda); + xfs_da_brelse(trans, bp); + bno = nextbno; + if (eob) { + xfs_dir_trace_g_dub("node: E-O-B", dp, uio, bno); + *eofp = 0; + return(error); + } + if (bno == 0) + break; + error = xfs_da_read_buf(trans, dp, bno, nextda, &bp, + XFS_DATA_FORK); + if (error) + return(error); + if (unlikely(bp == NULL)) { + XFS_ERROR_REPORT("xfs_dir_node_getdents(2)", + XFS_ERRLEVEL_LOW, mp); + return(XFS_ERROR(EFSCORRUPTED)); + } + } + *eofp = 1; + xfs_dir_trace_g_du("node: E-O-F", dp, uio); + return(0); +} + +/* + * Look up a filename in an int directory, replace the inode number. + * Use an internal routine to actually do the lookup. + */ +STATIC int +xfs_dir_node_replace(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + xfs_ino_t inum; + int retval, error, i; + xfs_dabuf_t *bp; + + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_dir_node_ents; + inum = args->inumber; + + /* + * Search to see if name exists, + * and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) { + retval = error; + } + + if (retval == EEXIST) { + blk = &state->path.blk[state->path.active - 1]; + ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC); + bp = blk->bp; + leaf = bp->data; + entry = &leaf->entries[blk->index]; + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + /* XXX - replace assert ? */ + XFS_DIR_SF_PUT_DIRINO_ARCH(&inum, &namest->inumber, ARCH_CONVERT); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber))); + xfs_da_buf_done(bp); + blk->bp = NULL; + retval = 0; + } else { + i = state->path.active - 1; + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + for (i = 0; i < state->path.active - 1; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + + xfs_da_state_free(state); + return(retval); +} + +#if defined(XFS_DIR_TRACE) +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_du(char *where, xfs_inode_t *dp, uio_t *uio) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DU, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + NULL, NULL, NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_dub(char *where, xfs_inode_t *dp, uio_t *uio, xfs_dablk_t bno) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUB, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)bno, + NULL, NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio, + xfs_da_intnode_t *node) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)INT_GET(node->hdr.info.forw, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT), + NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio, + xfs_dir_leafblock_t *leaf) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUL, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)INT_GET(leaf->hdr.info.forw, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT), + NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_due(char *where, xfs_inode_t *dp, uio_t *uio, + xfs_dir_leaf_entry_t *entry) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUE, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)INT_GET(entry->hashval, ARCH_CONVERT), + NULL, NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_duc(char *where, xfs_inode_t *dp, uio_t *uio, xfs_off_t cookie) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUC, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)(cookie >> 32), + (__psunsigned_t)(cookie & 0xFFFFFFFF), + NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for the arguments given to the routine, + * generic form. + */ +void +xfs_dir_trace_enter(int type, char *where, + __psunsigned_t a0, __psunsigned_t a1, + __psunsigned_t a2, __psunsigned_t a3, + __psunsigned_t a4, __psunsigned_t a5, + __psunsigned_t a6, __psunsigned_t a7, + __psunsigned_t a8, __psunsigned_t a9, + __psunsigned_t a10, __psunsigned_t a11) +{ + ASSERT(xfs_dir_trace_buf); + ktrace_enter(xfs_dir_trace_buf, (void *)((__psunsigned_t)type), + (void *)where, + (void *)a0, (void *)a1, (void *)a2, + (void *)a3, (void *)a4, (void *)a5, + (void *)a6, (void *)a7, (void *)a8, + (void *)a9, (void *)a10, (void *)a11, + NULL, NULL); +} +#endif /* XFS_DIR_TRACE */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir.h linux.21rc5-ac2/fs/xfs/xfs_dir.h --- linux.21rc5/fs/xfs/xfs_dir.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR_H__ +#define __XFS_DIR_H__ + +/* + * Large directories are structured around Btrees where all the data + * elements are in the leaf nodes. Filenames are hashed into an int, + * then that int is used as the index into the Btree. Since the hashval + * of a filename may not be unique, we may have duplicate keys. The + * internal links in the Btree are logical block offsets into the file. + * + * Small directories use a different format and are packed as tightly + * as possible so as to fit into the literal area of the inode. + */ + +#ifdef XFS_ALL_TRACE +#define XFS_DIR_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_DIR_TRACE +#endif + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +struct uio; +struct xfs_bmap_free; +struct xfs_da_args; +struct xfs_dinode; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * Directory function types. + * Put in structures (xfs_dirops_t) for v1 and v2 directories. + */ +typedef void (*xfs_dir_mount_t)(struct xfs_mount *mp); +typedef int (*xfs_dir_isempty_t)(struct xfs_inode *dp); +typedef int (*xfs_dir_init_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + struct xfs_inode *pdp); +typedef int (*xfs_dir_createname_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen, + xfs_ino_t inum, + xfs_fsblock_t *first, + struct xfs_bmap_free *flist, + xfs_extlen_t total); +typedef int (*xfs_dir_lookup_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen, + xfs_ino_t *inum); +typedef int (*xfs_dir_removename_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen, + xfs_ino_t ino, + xfs_fsblock_t *first, + struct xfs_bmap_free *flist, + xfs_extlen_t total); +typedef int (*xfs_dir_getdents_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + struct uio *uio, + int *eofp); +typedef int (*xfs_dir_replace_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen, + xfs_ino_t inum, + xfs_fsblock_t *first, + struct xfs_bmap_free *flist, + xfs_extlen_t total); +typedef int (*xfs_dir_canenter_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen); +typedef int (*xfs_dir_shortform_validate_ondisk_t)(struct xfs_mount *mp, + struct xfs_dinode *dip); +typedef int (*xfs_dir_shortform_to_single_t)(struct xfs_da_args *args); + +typedef struct xfs_dirops { + xfs_dir_mount_t xd_mount; + xfs_dir_isempty_t xd_isempty; + xfs_dir_init_t xd_init; + xfs_dir_createname_t xd_createname; + xfs_dir_lookup_t xd_lookup; + xfs_dir_removename_t xd_removename; + xfs_dir_getdents_t xd_getdents; + xfs_dir_replace_t xd_replace; + xfs_dir_canenter_t xd_canenter; + xfs_dir_shortform_validate_ondisk_t xd_shortform_validate_ondisk; + xfs_dir_shortform_to_single_t xd_shortform_to_single; +} xfs_dirops_t; + +/* + * Overall external interface routines. + */ +void xfs_dir_startup(void); /* called exactly once */ + +#define XFS_DIR_MOUNT(mp) \ + ((mp)->m_dirops.xd_mount(mp)) +#define XFS_DIR_ISEMPTY(mp,dp) \ + ((mp)->m_dirops.xd_isempty(dp)) +#define XFS_DIR_INIT(mp,tp,dp,pdp) \ + ((mp)->m_dirops.xd_init(tp,dp,pdp)) +#define XFS_DIR_CREATENAME(mp,tp,dp,name,namelen,inum,first,flist,total) \ + ((mp)->m_dirops.xd_createname(tp,dp,name,namelen,inum,first,flist,\ + total)) +#define XFS_DIR_LOOKUP(mp,tp,dp,name,namelen,inum) \ + ((mp)->m_dirops.xd_lookup(tp,dp,name,namelen,inum)) +#define XFS_DIR_REMOVENAME(mp,tp,dp,name,namelen,ino,first,flist,total) \ + ((mp)->m_dirops.xd_removename(tp,dp,name,namelen,ino,first,flist,total)) +#define XFS_DIR_GETDENTS(mp,tp,dp,uio,eofp) \ + ((mp)->m_dirops.xd_getdents(tp,dp,uio,eofp)) +#define XFS_DIR_REPLACE(mp,tp,dp,name,namelen,inum,first,flist,total) \ + ((mp)->m_dirops.xd_replace(tp,dp,name,namelen,inum,first,flist,total)) +#define XFS_DIR_CANENTER(mp,tp,dp,name,namelen) \ + ((mp)->m_dirops.xd_canenter(tp,dp,name,namelen)) +#define XFS_DIR_SHORTFORM_VALIDATE_ONDISK(mp,dip) \ + ((mp)->m_dirops.xd_shortform_validate_ondisk(mp,dip)) +#define XFS_DIR_SHORTFORM_TO_SINGLE(mp,args) \ + ((mp)->m_dirops.xd_shortform_to_single(args)) + +#define XFS_DIR_IS_V1(mp) ((mp)->m_dirversion == 1) +extern xfs_dirops_t xfsv1_dirops; + +#endif /* __XFS_DIR_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir_leaf.c linux.21rc5-ac2/fs/xfs/xfs_dir_leaf.c --- linux.21rc5/fs/xfs/xfs_dir_leaf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir_leaf.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,2262 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir_leaf.c + * + * GROT: figure out how to recover gracefully when bmap returns ENOSPC. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_error.h" + +/* + * xfs_dir_leaf.c + * + * Routines to implement leaf blocks of directories as Btrees of hashed names. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Routines used for growing the Btree. + */ +STATIC void xfs_dir_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args, + int insertion_index, + int freemap_index); +STATIC int xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer, + int musthave, int justcheck); +STATIC void xfs_dir_leaf_rebalance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2); +STATIC int xfs_dir_leaf_figure_balance(xfs_da_state_t *state, + xfs_da_state_blk_t *leaf_blk_1, + xfs_da_state_blk_t *leaf_blk_2, + int *number_entries_in_blk1, + int *number_namebytes_in_blk1); + +/* + * Utility routines. + */ +STATIC void xfs_dir_leaf_moveents(xfs_dir_leafblock_t *src_leaf, + int src_start, + xfs_dir_leafblock_t *dst_leaf, + int dst_start, int move_count, + xfs_mount_t *mp); + + +/*======================================================================== + * External routines when dirsize < XFS_IFORK_DSIZE(dp). + *========================================================================*/ + + +/* + * Validate a given inode number. + */ +int +xfs_dir_ino_validate(xfs_mount_t *mp, xfs_ino_t ino) +{ + xfs_agblock_t agblkno; + xfs_agino_t agino; + xfs_agnumber_t agno; + int ino_ok; + int ioff; + + agno = XFS_INO_TO_AGNO(mp, ino); + agblkno = XFS_INO_TO_AGBNO(mp, ino); + ioff = XFS_INO_TO_OFFSET(mp, ino); + agino = XFS_OFFBNO_TO_AGINO(mp, agblkno, ioff); + ino_ok = + agno < mp->m_sb.sb_agcount && + agblkno < mp->m_sb.sb_agblocks && + agblkno != 0 && + ioff < (1 << mp->m_sb.sb_inopblog) && + XFS_AGINO_TO_INO(mp, agno, agino) == ino; + if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE, + XFS_RANDOM_DIR_INO_VALIDATE))) { + xfs_fs_cmn_err(CE_WARN, mp, "Invalid inode number 0x%Lx", + (unsigned long long) ino); + XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + return 0; +} + +/* + * Create the initial contents of a shortform directory. + */ +int +xfs_dir_shortform_create(xfs_da_args_t *args, xfs_ino_t parent) +{ + xfs_dir_sf_hdr_t *hdr; + xfs_inode_t *dp; + + dp = args->dp; + ASSERT(dp != NULL); + ASSERT(dp->i_d.di_size == 0); + if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) { + dp->i_df.if_flags &= ~XFS_IFEXTENTS; /* just in case */ + dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE); + dp->i_df.if_flags |= XFS_IFINLINE; + } + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + ASSERT(dp->i_df.if_bytes == 0); + xfs_idata_realloc(dp, sizeof(*hdr), XFS_DATA_FORK); + hdr = (xfs_dir_sf_hdr_t *)dp->i_df.if_u1.if_data; + XFS_DIR_SF_PUT_DIRINO_ARCH(&parent, &hdr->parent, ARCH_CONVERT); + + INT_ZERO(hdr->count, ARCH_CONVERT); + dp->i_d.di_size = sizeof(*hdr); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + return(0); +} + +/* + * Add a name to the shortform directory structure. + * Overflow from the inode has already been checked for. + */ +int +xfs_dir_shortform_addname(xfs_da_args_t *args) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int i, offset, size; + xfs_inode_t *dp; + + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + sfe = &sf->list[0]; + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + if (sfe->namelen == args->namelen && + args->name[0] == sfe->name[0] && + memcmp(args->name, sfe->name, args->namelen) == 0) + return(XFS_ERROR(EEXIST)); + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + + offset = (int)((char *)sfe - (char *)sf); + size = XFS_DIR_SF_ENTSIZE_BYNAME(args->namelen); + xfs_idata_realloc(dp, size, XFS_DATA_FORK); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + sfe = (xfs_dir_sf_entry_t *)((char *)sf + offset); + + XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &sfe->inumber, ARCH_CONVERT); + sfe->namelen = args->namelen; + memcpy(sfe->name, args->name, sfe->namelen); + INT_MOD(sf->hdr.count, ARCH_CONVERT, +1); + + dp->i_d.di_size += size; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + + return(0); +} + +/* + * Remove a name from the shortform directory structure. + */ +int +xfs_dir_shortform_removename(xfs_da_args_t *args) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int base, size = 0, i; + xfs_inode_t *dp; + + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + base = sizeof(xfs_dir_sf_hdr_t); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + sfe = &sf->list[0]; + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe); + if (sfe->namelen == args->namelen && + sfe->name[0] == args->name[0] && + memcmp(sfe->name, args->name, args->namelen) == 0) + break; + base += size; + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + if (i < 0) { + ASSERT(args->oknoent); + return(XFS_ERROR(ENOENT)); + } + + if ((base + size) != dp->i_d.di_size) { + memmove(&((char *)sf)[base], &((char *)sf)[base+size], + dp->i_d.di_size - (base+size)); + } + INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); + + xfs_idata_realloc(dp, -size, XFS_DATA_FORK); + dp->i_d.di_size -= size; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + + return(0); +} + +/* + * Look up a name in a shortform directory structure. + */ +int +xfs_dir_shortform_lookup(xfs_da_args_t *args) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int i; + xfs_inode_t *dp; + + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + if (args->namelen == 2 && + args->name[0] == '.' && args->name[1] == '.') { + XFS_DIR_SF_GET_DIRINO_ARCH(&sf->hdr.parent, &args->inumber, ARCH_CONVERT); + return(XFS_ERROR(EEXIST)); + } + if (args->namelen == 1 && args->name[0] == '.') { + args->inumber = dp->i_ino; + return(XFS_ERROR(EEXIST)); + } + sfe = &sf->list[0]; + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + if (sfe->namelen == args->namelen && + sfe->name[0] == args->name[0] && + memcmp(args->name, sfe->name, args->namelen) == 0) { + XFS_DIR_SF_GET_DIRINO_ARCH(&sfe->inumber, &args->inumber, ARCH_CONVERT); + return(XFS_ERROR(EEXIST)); + } + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + ASSERT(args->oknoent); + return(XFS_ERROR(ENOENT)); +} + +/* + * Convert from using the shortform to the leaf. + */ +int +xfs_dir_shortform_to_leaf(xfs_da_args_t *iargs) +{ + xfs_inode_t *dp; + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + xfs_da_args_t args; + xfs_ino_t inumber; + char *tmpbuffer; + int retval, i, size; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + + dp = iargs->dp; + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + size = dp->i_df.if_bytes; + tmpbuffer = kmem_alloc(size, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + + memcpy(tmpbuffer, dp->i_df.if_u1.if_data, size); + + sf = (xfs_dir_shortform_t *)tmpbuffer; + XFS_DIR_SF_GET_DIRINO_ARCH(&sf->hdr.parent, &inumber, ARCH_CONVERT); + + xfs_idata_realloc(dp, -size, XFS_DATA_FORK); + dp->i_d.di_size = 0; + xfs_trans_log_inode(iargs->trans, dp, XFS_ILOG_CORE); + retval = xfs_da_grow_inode(iargs, &blkno); + if (retval) + goto out; + + ASSERT(blkno == 0); + retval = xfs_dir_leaf_create(iargs, blkno, &bp); + if (retval) + goto out; + xfs_da_buf_done(bp); + + args.name = "."; + args.namelen = 1; + args.hashval = xfs_dir_hash_dot; + args.inumber = dp->i_ino; + args.dp = dp; + args.firstblock = iargs->firstblock; + args.flist = iargs->flist; + args.total = iargs->total; + args.whichfork = XFS_DATA_FORK; + args.trans = iargs->trans; + args.justcheck = 0; + args.addname = args.oknoent = 1; + retval = xfs_dir_leaf_addname(&args); + if (retval) + goto out; + + args.name = ".."; + args.namelen = 2; + args.hashval = xfs_dir_hash_dotdot; + args.inumber = inumber; + retval = xfs_dir_leaf_addname(&args); + if (retval) + goto out; + + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + args.name = (char *)(sfe->name); + args.namelen = sfe->namelen; + args.hashval = xfs_da_hashname((char *)(sfe->name), + sfe->namelen); + XFS_DIR_SF_GET_DIRINO_ARCH(&sfe->inumber, &args.inumber, ARCH_CONVERT); + retval = xfs_dir_leaf_addname(&args); + if (retval) + goto out; + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + retval = 0; + +out: + kmem_free(tmpbuffer, size); + return(retval); +} + +STATIC int +xfs_dir_shortform_compare(const void *a, const void *b) +{ + xfs_dir_sf_sort_t *sa, *sb; + + sa = (xfs_dir_sf_sort_t *)a; + sb = (xfs_dir_sf_sort_t *)b; + if (sa->hash < sb->hash) + return -1; + else if (sa->hash > sb->hash) + return 1; + else + return sa->entno - sb->entno; +} + +/* + * Copy out directory entries for getdents(), for shortform directories. + */ +/*ARGSUSED*/ +int +xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp, + xfs_dirent_t *dbp, xfs_dir_put_t put) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int retval, i, sbsize, nsbuf, lastresid=0, want_entno; + xfs_mount_t *mp; + xfs_dahash_t cookhash, hash; + xfs_dir_put_args_t p; + xfs_dir_sf_sort_t *sbuf, *sbp; + + mp = dp->i_mount; + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); + want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); + nsbuf = INT_GET(sf->hdr.count, ARCH_CONVERT) + 2; + sbsize = (nsbuf + 1) * sizeof(*sbuf); + sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); + + xfs_dir_trace_g_du("sf: start", dp, uio); + + /* + * Collect all the entries into the buffer. + * Entry 0 is . + */ + sbp->entno = 0; + sbp->seqno = 0; + sbp->hash = xfs_dir_hash_dot; + sbp->ino = dp->i_ino; + sbp->name = "."; + sbp->namelen = 1; + sbp++; + + /* + * Entry 1 is .. + */ + sbp->entno = 1; + sbp->seqno = 0; + sbp->hash = xfs_dir_hash_dotdot; + sbp->ino = XFS_GET_DIR_INO_ARCH(mp, sf->hdr.parent, ARCH_CONVERT); + sbp->name = ".."; + sbp->namelen = 2; + sbp++; + + /* + * Scan the directory data for the rest of the entries. + */ + for (i = 0, sfe = &sf->list[0]; + i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + + if (unlikely( + ((char *)sfe < (char *)sf) || + ((char *)sfe >= ((char *)sf + dp->i_df.if_bytes)) || + (sfe->namelen >= MAXNAMELEN))) { + xfs_dir_trace_g_du("sf: corrupted", dp, uio); + XFS_CORRUPTION_ERROR("xfs_dir_shortform_getdents", + XFS_ERRLEVEL_LOW, mp, sfe); + kmem_free(sbuf, sbsize); + return XFS_ERROR(EFSCORRUPTED); + } + + sbp->entno = i + 2; + sbp->seqno = 0; + sbp->hash = xfs_da_hashname((char *)sfe->name, sfe->namelen); + sbp->ino = XFS_GET_DIR_INO_ARCH(mp, sfe->inumber, ARCH_CONVERT); + sbp->name = (char *)sfe->name; + sbp->namelen = sfe->namelen; + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + sbp++; + } + + /* + * Sort the entries on hash then entno. + */ + qsort(sbuf, nsbuf, sizeof(*sbuf), xfs_dir_shortform_compare); + /* + * Stuff in last entry. + */ + sbp->entno = nsbuf; + sbp->hash = XFS_DA_MAXHASH; + sbp->seqno = 0; + /* + * Figure out the sequence numbers in case there's a hash duplicate. + */ + for (hash = sbuf->hash, sbp = sbuf + 1; + sbp < &sbuf[nsbuf + 1]; sbp++) { + if (sbp->hash == hash) + sbp->seqno = sbp[-1].seqno + 1; + else + hash = sbp->hash; + } + + /* + * Set up put routine. + */ + p.dbp = dbp; + p.put = put; + p.uio = uio; + + /* + * Find our place. + */ + for (sbp = sbuf; sbp < &sbuf[nsbuf + 1]; sbp++) { + if (sbp->hash > cookhash || + (sbp->hash == cookhash && sbp->seqno >= want_entno)) + break; + } + + /* + * Did we fail to find anything? We stop at the last entry, + * the one we put maxhash into. + */ + if (sbp == &sbuf[nsbuf]) { + kmem_free(sbuf, sbsize); + xfs_dir_trace_g_du("sf: hash beyond end", dp, uio); + uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); + *eofp = 1; + return 0; + } + + /* + * Loop putting entries into the user buffer. + */ + while (sbp < &sbuf[nsbuf]) { + /* + * Save the first resid in a run of equal-hashval entries + * so that we can back them out if they don't all fit. + */ + if (sbp->seqno == 0 || sbp == sbuf) + lastresid = uio->uio_resid; + /* + * NOTE! Linux "filldir" semantics require that the + * offset "cookie" be for this entry, not the + * next; all the actual shuffling to make it + * "look right" to the user is done in filldir. + */ + XFS_PUT_COOKIE(p.cook, mp, 0, sbp->seqno, sbp->hash); + +#if XFS_BIG_FILESYSTEMS + p.ino = sbp->ino + mp->m_inoadd; +#else + p.ino = sbp->ino; +#endif + p.name = sbp->name; + p.namelen = sbp->namelen; + + retval = p.put(&p); + + if (!p.done) { + uio->uio_offset = + XFS_DA_MAKE_COOKIE(mp, 0, 0, sbp->hash); + kmem_free(sbuf, sbsize); + uio->uio_resid = lastresid; + xfs_dir_trace_g_du("sf: E-O-B", dp, uio); + return retval; + } + + sbp++; + } + + kmem_free(sbuf, sbsize); + + XFS_PUT_COOKIE(p.cook, mp, 0, 0, XFS_DA_MAXHASH); + + uio->uio_offset = p.cook.o; + + *eofp = 1; + + xfs_dir_trace_g_du("sf: E-O-F", dp, uio); + + return 0; +} + +/* + * Look up a name in a shortform directory structure, replace the inode number. + */ +int +xfs_dir_shortform_replace(xfs_da_args_t *args) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + xfs_inode_t *dp; + int i; + + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + if (args->namelen == 2 && + args->name[0] == '.' && args->name[1] == '.') { + /* XXX - replace assert? */ + XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &sf->hdr.parent, ARCH_CONVERT); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); + return(0); + } + ASSERT(args->namelen != 1 || args->name[0] != '.'); + sfe = &sf->list[0]; + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + if (sfe->namelen == args->namelen && + sfe->name[0] == args->name[0] && + memcmp(args->name, sfe->name, args->namelen) == 0) { + ASSERT(memcmp((char *)&args->inumber, + (char *)&sfe->inumber, sizeof(xfs_ino_t))); + XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &sfe->inumber, ARCH_CONVERT); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); + return(0); + } + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + ASSERT(args->oknoent); + return(XFS_ERROR(ENOENT)); +} + +/* + * Convert a leaf directory to shortform structure + */ +int +xfs_dir_leaf_to_shortform(xfs_da_args_t *iargs) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + xfs_da_args_t args; + xfs_inode_t *dp; + xfs_ino_t parent; + char *tmpbuffer; + int retval, i; + xfs_dabuf_t *bp; + + dp = iargs->dp; + tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP); + ASSERT(tmpbuffer != NULL); + + retval = xfs_da_read_buf(iargs->trans, iargs->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + goto out; + ASSERT(bp != NULL); + memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); + leaf = (xfs_dir_leafblock_t *)tmpbuffer; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); + + /* + * Find and special case the parent inode number + */ + hdr = &leaf->hdr; + entry = &leaf->entries[0]; + for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; i >= 0; entry++, i--) { + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + if ((entry->namelen == 2) && + (namest->name[0] == '.') && + (namest->name[1] == '.')) { + XFS_DIR_SF_GET_DIRINO_ARCH(&namest->inumber, &parent, ARCH_CONVERT); + INT_ZERO(entry->nameidx, ARCH_CONVERT); + } else if ((entry->namelen == 1) && (namest->name[0] == '.')) { + INT_ZERO(entry->nameidx, ARCH_CONVERT); + } + } + retval = xfs_da_shrink_inode(iargs, 0, bp); + if (retval) + goto out; + retval = xfs_dir_shortform_create(iargs, parent); + if (retval) + goto out; + + /* + * Copy the rest of the filenames + */ + entry = &leaf->entries[0]; + args.dp = dp; + args.firstblock = iargs->firstblock; + args.flist = iargs->flist; + args.total = iargs->total; + args.whichfork = XFS_DATA_FORK; + args.trans = iargs->trans; + args.justcheck = 0; + args.addname = args.oknoent = 1; + for (i = 0; i < INT_GET(hdr->count, ARCH_CONVERT); entry++, i++) { + if (INT_ISZERO(entry->nameidx, ARCH_CONVERT)) + continue; + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + args.name = (char *)(namest->name); + args.namelen = entry->namelen; + args.hashval = INT_GET(entry->hashval, ARCH_CONVERT); + XFS_DIR_SF_GET_DIRINO_ARCH(&namest->inumber, &args.inumber, ARCH_CONVERT); + xfs_dir_shortform_addname(&args); + } + +out: + kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount)); + return(retval); +} + +/* + * Convert from using a single leaf to a root node and a leaf. + */ +int +xfs_dir_leaf_to_node(xfs_da_args_t *args) +{ + xfs_dir_leafblock_t *leaf; + xfs_da_intnode_t *node; + xfs_inode_t *dp; + xfs_dabuf_t *bp1, *bp2; + xfs_dablk_t blkno; + int retval; + + dp = args->dp; + retval = xfs_da_grow_inode(args, &blkno); + ASSERT(blkno == 1); + if (retval) + return(retval); + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp1 != NULL); + retval = xfs_da_get_buf(args->trans, args->dp, 1, -1, &bp2, + XFS_DATA_FORK); + if (retval) { + xfs_da_buf_done(bp1); + return(retval); + } + ASSERT(bp2 != NULL); + memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount)); + xfs_da_buf_done(bp1); + xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1); + + /* + * Set up the new root node. + */ + retval = xfs_da_node_create(args, 0, 1, &bp1, XFS_DATA_FORK); + if (retval) { + xfs_da_buf_done(bp2); + return(retval); + } + node = bp1->data; + leaf = bp2->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + INT_SET(node->btree[0].hashval, ARCH_CONVERT, INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); + xfs_da_buf_done(bp2); + INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); + INT_SET(node->hdr.count, ARCH_CONVERT, 1); + xfs_da_log_buf(args->trans, bp1, + XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0]))); + xfs_da_buf_done(bp1); + + return(retval); +} + + +/*======================================================================== + * Routines used for growing the Btree. + *========================================================================*/ + +/* + * Create the initial contents of a leaf directory + * or a leaf in a node directory. + */ +int +xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int retval; + + dp = args->dp; + ASSERT(dp != NULL); + retval = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp, XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + leaf = bp->data; + memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); + hdr = &leaf->hdr; + INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_DIR_LEAF_MAGIC); + INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); + if (INT_ISZERO(hdr->firstused, ARCH_CONVERT)) + INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - 1); + INT_SET(hdr->freemap[0].base, ARCH_CONVERT, sizeof(xfs_dir_leaf_hdr_t)); + INT_SET(hdr->freemap[0].size, ARCH_CONVERT, INT_GET(hdr->firstused, ARCH_CONVERT) - INT_GET(hdr->freemap[0].base, ARCH_CONVERT)); + + xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); + + *bpp = bp; + return(0); +} + +/* + * Split the leaf node, rebalance, then add the new entry. + */ +int +xfs_dir_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, + xfs_da_state_blk_t *newblk) +{ + xfs_dablk_t blkno; + xfs_da_args_t *args; + int error; + + /* + * Allocate space for a new leaf node. + */ + args = state->args; + ASSERT(args != NULL); + ASSERT(oldblk->magic == XFS_DIR_LEAF_MAGIC); + error = xfs_da_grow_inode(args, &blkno); + if (error) + return(error); + error = xfs_dir_leaf_create(args, blkno, &newblk->bp); + if (error) + return(error); + newblk->blkno = blkno; + newblk->magic = XFS_DIR_LEAF_MAGIC; + + /* + * Rebalance the entries across the two leaves. + */ + xfs_dir_leaf_rebalance(state, oldblk, newblk); + error = xfs_da_blk_link(state, oldblk, newblk); + if (error) + return(error); + + /* + * Insert the new entry in the correct block. + */ + if (state->inleaf) { + error = xfs_dir_leaf_add(oldblk->bp, args, oldblk->index); + } else { + error = xfs_dir_leaf_add(newblk->bp, args, newblk->index); + } + + /* + * Update last hashval in each block since we added the name. + */ + oldblk->hashval = xfs_dir_leaf_lasthash(oldblk->bp, NULL); + newblk->hashval = xfs_dir_leaf_lasthash(newblk->bp, NULL); + return(error); +} + +/* + * Add a name to the leaf directory structure. + * + * Must take into account fragmented leaves and leaves where spacemap has + * lost some freespace information (ie: holes). + */ +int +xfs_dir_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_dir_leaf_map_t *map; + int tablesize, entsize, sum, i, tmp, error; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT((index >= 0) && (index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); + hdr = &leaf->hdr; + entsize = XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen); + + /* + * Search through freemap for first-fit on new name length. + * (may need to figure in size of entry struct too) + */ + tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) * (uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_hdr_t); + map = &hdr->freemap[XFS_DIR_LEAF_MAPSIZE-1]; + for (sum = 0, i = XFS_DIR_LEAF_MAPSIZE-1; i >= 0; map--, i--) { + if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) { + sum += INT_GET(map->size, ARCH_CONVERT); + continue; + } + if (INT_ISZERO(map->size, ARCH_CONVERT)) + continue; /* no space in this map */ + tmp = entsize; + if (INT_GET(map->base, ARCH_CONVERT) < INT_GET(hdr->firstused, ARCH_CONVERT)) + tmp += (uint)sizeof(xfs_dir_leaf_entry_t); + if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { + if (!args->justcheck) + xfs_dir_leaf_add_work(bp, args, index, i); + return(0); + } + sum += INT_GET(map->size, ARCH_CONVERT); + } + + /* + * If there are no holes in the address space of the block, + * and we don't have enough freespace, then compaction will do us + * no good and we should just give up. + */ + if (!hdr->holes && (sum < entsize)) + return(XFS_ERROR(ENOSPC)); + + /* + * Compact the entries to coalesce free space. + * Pass the justcheck flag so the checking pass can return + * an error, without changing anything, if it won't fit. + */ + error = xfs_dir_leaf_compact(args->trans, bp, + args->total == 0 ? + entsize + + (uint)sizeof(xfs_dir_leaf_entry_t) : 0, + args->justcheck); + if (error) + return(error); + /* + * After compaction, the block is guaranteed to have only one + * free region, in freemap[0]. If it is not big enough, give up. + */ + if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) < + (entsize + (uint)sizeof(xfs_dir_leaf_entry_t))) + return(XFS_ERROR(ENOSPC)); + + if (!args->justcheck) + xfs_dir_leaf_add_work(bp, args, index, 0); + return(0); +} + +/* + * Add a name to a leaf directory structure. + */ +STATIC void +xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index, + int mapindex) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + xfs_dir_leaf_map_t *map; + /* REFERENCED */ + xfs_mount_t *mp; + int tmp, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + hdr = &leaf->hdr; + ASSERT((mapindex >= 0) && (mapindex < XFS_DIR_LEAF_MAPSIZE)); + ASSERT((index >= 0) && (index <= INT_GET(hdr->count, ARCH_CONVERT))); + + /* + * Force open some space in the entry array and fill it in. + */ + entry = &leaf->entries[index]; + if (index < INT_GET(hdr->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr->count, ARCH_CONVERT) - index; + tmp *= (uint)sizeof(xfs_dir_leaf_entry_t); + memmove(entry + 1, entry, tmp); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry))); + } + INT_MOD(hdr->count, ARCH_CONVERT, +1); + + /* + * Allocate space for the new string (at the end of the run). + */ + map = &hdr->freemap[mapindex]; + mp = args->trans->t_mountp; + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) >= XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + INT_MOD(map->size, ARCH_CONVERT, -(XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen))); + INT_SET(entry->nameidx, ARCH_CONVERT, INT_GET(map->base, ARCH_CONVERT) + INT_GET(map->size, ARCH_CONVERT)); + INT_SET(entry->hashval, ARCH_CONVERT, args->hashval); + entry->namelen = args->namelen; + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); + + /* + * Copy the string and inode number into the new space. + */ + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &namest->inumber, ARCH_CONVERT); + memcpy(namest->name, args->name, args->namelen); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, namest, XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry))); + + /* + * Update the control info for this leaf node + */ + if (INT_GET(entry->nameidx, ARCH_CONVERT) < INT_GET(hdr->firstused, ARCH_CONVERT)) + INT_COPY(hdr->firstused, entry->nameidx, ARCH_CONVERT); + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) >= ((INT_GET(hdr->count, ARCH_CONVERT)*sizeof(*entry))+sizeof(*hdr))); + tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1) * (uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_hdr_t); + map = &hdr->freemap[0]; + for (i = 0; i < XFS_DIR_LEAF_MAPSIZE; map++, i++) { + if (INT_GET(map->base, ARCH_CONVERT) == tmp) { + INT_MOD(map->base, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_entry_t)); + INT_MOD(map->size, ARCH_CONVERT, -((uint)sizeof(xfs_dir_leaf_entry_t))); + } + } + INT_MOD(hdr->namebytes, ARCH_CONVERT, args->namelen); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); +} + +/* + * Garbage collect a leaf directory block by copying it to a new buffer. + */ +STATIC int +xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp, int musthave, + int justcheck) +{ + xfs_dir_leafblock_t *leaf_s, *leaf_d; + xfs_dir_leaf_hdr_t *hdr_s, *hdr_d; + xfs_mount_t *mp; + char *tmpbuffer; + char *tmpbuffer2=NULL; + int rval; + int lbsize; + + mp = trans->t_mountp; + lbsize = XFS_LBSIZE(mp); + tmpbuffer = kmem_alloc(lbsize, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memcpy(tmpbuffer, bp->data, lbsize); + + /* + * Make a second copy in case xfs_dir_leaf_moveents() + * below destroys the original. + */ + if (musthave || justcheck) { + tmpbuffer2 = kmem_alloc(lbsize, KM_SLEEP); + memcpy(tmpbuffer2, bp->data, lbsize); + } + memset(bp->data, 0, lbsize); + + /* + * Copy basic information + */ + leaf_s = (xfs_dir_leafblock_t *)tmpbuffer; + leaf_d = bp->data; + hdr_s = &leaf_s->hdr; + hdr_d = &leaf_d->hdr; + hdr_d->info = hdr_s->info; /* struct copy */ + INT_SET(hdr_d->firstused, ARCH_CONVERT, lbsize); + if (INT_ISZERO(hdr_d->firstused, ARCH_CONVERT)) + INT_SET(hdr_d->firstused, ARCH_CONVERT, lbsize - 1); + INT_ZERO(hdr_d->namebytes, ARCH_CONVERT); + INT_ZERO(hdr_d->count, ARCH_CONVERT); + hdr_d->holes = 0; + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, sizeof(xfs_dir_leaf_hdr_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, INT_GET(hdr_d->firstused, ARCH_CONVERT) - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + + /* + * Copy all entry's in the same (sorted) order, + * but allocate filenames packed and in sequence. + * This changes the source (leaf_s) as well. + */ + xfs_dir_leaf_moveents(leaf_s, 0, leaf_d, 0, (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp); + + if (musthave && INT_GET(hdr_d->freemap[0].size, ARCH_CONVERT) < musthave) + rval = XFS_ERROR(ENOSPC); + else + rval = 0; + + if (justcheck || rval == ENOSPC) { + ASSERT(tmpbuffer2); + memcpy(bp->data, tmpbuffer2, lbsize); + } else { + xfs_da_log_buf(trans, bp, 0, lbsize - 1); + } + + kmem_free(tmpbuffer, lbsize); + if (musthave || justcheck) + kmem_free(tmpbuffer2, lbsize); + return(rval); +} + +/* + * Redistribute the directory entries between two leaf nodes, + * taking into account the size of the new entry. + * + * NOTE: if new block is empty, then it will get the upper half of old block. + */ +STATIC void +xfs_dir_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2) +{ + xfs_da_state_blk_t *tmp_blk; + xfs_dir_leafblock_t *leaf1, *leaf2; + xfs_dir_leaf_hdr_t *hdr1, *hdr2; + int count, totallen, max, space, swap; + + /* + * Set up environment. + */ + ASSERT(blk1->magic == XFS_DIR_LEAF_MAGIC); + ASSERT(blk2->magic == XFS_DIR_LEAF_MAGIC); + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + + /* + * Check ordering of blocks, reverse if it makes things simpler. + */ + swap = 0; + if (xfs_dir_leaf_order(blk1->bp, blk2->bp)) { + tmp_blk = blk1; + blk1 = blk2; + blk2 = tmp_blk; + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + swap = 1; + } + hdr1 = &leaf1->hdr; + hdr2 = &leaf2->hdr; + + /* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. Then get + * the direction to copy and the number of elements to move. + */ + state->inleaf = xfs_dir_leaf_figure_balance(state, blk1, blk2, + &count, &totallen); + if (swap) + state->inleaf = !state->inleaf; + + /* + * Move any entries required from leaf to leaf: + */ + if (count < INT_GET(hdr1->count, ARCH_CONVERT)) { + /* + * Figure the total bytes to be added to the destination leaf. + */ + count = INT_GET(hdr1->count, ARCH_CONVERT) - count; /* number entries being moved */ + space = INT_GET(hdr1->namebytes, ARCH_CONVERT) - totallen; + space += count * ((uint)sizeof(xfs_dir_leaf_name_t)-1); + space += count * (uint)sizeof(xfs_dir_leaf_entry_t); + + /* + * leaf2 is the destination, compact it if it looks tight. + */ + max = INT_GET(hdr2->firstused, ARCH_CONVERT) - (uint)sizeof(xfs_dir_leaf_hdr_t); + max -= INT_GET(hdr2->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t); + if (space > max) { + xfs_dir_leaf_compact(state->args->trans, blk2->bp, + 0, 0); + } + + /* + * Move high entries from leaf1 to low end of leaf2. + */ + xfs_dir_leaf_moveents(leaf1, INT_GET(hdr1->count, ARCH_CONVERT) - count, + leaf2, 0, count, state->mp); + + xfs_da_log_buf(state->args->trans, blk1->bp, 0, + state->blocksize-1); + xfs_da_log_buf(state->args->trans, blk2->bp, 0, + state->blocksize-1); + + } else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) { + /* + * Figure the total bytes to be added to the destination leaf. + */ + count -= INT_GET(hdr1->count, ARCH_CONVERT); /* number entries being moved */ + space = totallen - INT_GET(hdr1->namebytes, ARCH_CONVERT); + space += count * ((uint)sizeof(xfs_dir_leaf_name_t)-1); + space += count * (uint)sizeof(xfs_dir_leaf_entry_t); + + /* + * leaf1 is the destination, compact it if it looks tight. + */ + max = INT_GET(hdr1->firstused, ARCH_CONVERT) - (uint)sizeof(xfs_dir_leaf_hdr_t); + max -= INT_GET(hdr1->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t); + if (space > max) { + xfs_dir_leaf_compact(state->args->trans, blk1->bp, + 0, 0); + } + + /* + * Move low entries from leaf2 to high end of leaf1. + */ + xfs_dir_leaf_moveents(leaf2, 0, leaf1, (int)INT_GET(hdr1->count, ARCH_CONVERT), + count, state->mp); + + xfs_da_log_buf(state->args->trans, blk1->bp, 0, + state->blocksize-1); + xfs_da_log_buf(state->args->trans, blk2->bp, 0, + state->blocksize-1); + } + + /* + * Copy out last hashval in each block for B-tree code. + */ + blk1->hashval = INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk2->hashval = INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + + /* + * Adjust the expected index for insertion. + * GROT: this doesn't work unless blk2 was originally empty. + */ + if (!state->inleaf) { + blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + } +} + +/* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. + * GROT: Is this really necessary? With other than a 512 byte blocksize, + * GROT: there will always be enough room in either block for a new entry. + * GROT: Do a double-split for this case? + */ +STATIC int +xfs_dir_leaf_figure_balance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2, + int *countarg, int *namebytesarg) +{ + xfs_dir_leafblock_t *leaf1, *leaf2; + xfs_dir_leaf_hdr_t *hdr1, *hdr2; + xfs_dir_leaf_entry_t *entry; + int count, max, totallen, half; + int lastdelta, foundit, tmp; + + /* + * Set up environment. + */ + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + hdr1 = &leaf1->hdr; + hdr2 = &leaf2->hdr; + foundit = 0; + totallen = 0; + + /* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. + */ + max = INT_GET(hdr1->count, ARCH_CONVERT) + INT_GET(hdr2->count, ARCH_CONVERT); + half = (max+1) * (uint)(sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1); + half += INT_GET(hdr1->namebytes, ARCH_CONVERT) + INT_GET(hdr2->namebytes, ARCH_CONVERT) + state->args->namelen; + half /= 2; + lastdelta = state->blocksize; + entry = &leaf1->entries[0]; + for (count = 0; count < max; entry++, count++) { + +#define XFS_DIR_ABS(A) (((A) < 0) ? -(A) : (A)) + /* + * The new entry is in the first block, account for it. + */ + if (count == blk1->index) { + tmp = totallen + (uint)sizeof(*entry) + + XFS_DIR_LEAF_ENTSIZE_BYNAME(state->args->namelen); + if (XFS_DIR_ABS(half - tmp) > lastdelta) + break; + lastdelta = XFS_DIR_ABS(half - tmp); + totallen = tmp; + foundit = 1; + } + + /* + * Wrap around into the second block if necessary. + */ + if (count == INT_GET(hdr1->count, ARCH_CONVERT)) { + leaf1 = leaf2; + entry = &leaf1->entries[0]; + } + + /* + * Figure out if next leaf entry would be too much. + */ + tmp = totallen + (uint)sizeof(*entry) + + XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry); + if (XFS_DIR_ABS(half - tmp) > lastdelta) + break; + lastdelta = XFS_DIR_ABS(half - tmp); + totallen = tmp; +#undef XFS_DIR_ABS + } + + /* + * Calculate the number of namebytes that will end up in lower block. + * If new entry not in lower block, fix up the count. + */ + totallen -= + count * (uint)(sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1); + if (foundit) { + totallen -= (sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1) + + state->args->namelen; + } + + *countarg = count; + *namebytesarg = totallen; + return(foundit); +} + +/*======================================================================== + * Routines used for shrinking the Btree. + *========================================================================*/ + +/* + * Check a leaf block and its neighbors to see if the block should be + * collapsed into one or the other neighbor. Always keep the block + * with the smaller block number. + * If the current block is over 50% full, don't try to join it, return 0. + * If the block is empty, fill in the state structure and return 2. + * If it can be collapsed, fill in the state structure and return 1. + * If nothing can be done, return 0. + */ +int +xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) +{ + xfs_dir_leafblock_t *leaf; + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *info; + int count, bytes, forward, error, retval, i; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + + /* + * Check for the degenerate case of the block being over 50% full. + * If so, it's not worth even looking to see if we might be able + * to coalesce with a sibling. + */ + blk = &state->path.blk[ state->path.active-1 ]; + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + leaf = (xfs_dir_leafblock_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes = (uint)sizeof(xfs_dir_leaf_hdr_t) + + count * (uint)sizeof(xfs_dir_leaf_entry_t) + + count * ((uint)sizeof(xfs_dir_leaf_name_t)-1) + + INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + if (bytes > (state->blocksize >> 1)) { + *action = 0; /* blk over 50%, don't try to join */ + return(0); + } + + /* + * Check for the degenerate case of the block being empty. + * If the block is empty, we'll simply delete it, no need to + * coalesce it with a sibling block. We choose (aribtrarily) + * to merge with the forward block unless it is NULL. + */ + if (count == 0) { + /* + * Make altpath point to the block we want to keep and + * path point to the block we want to drop (this one). + */ + forward = !INT_ISZERO(info->forw, ARCH_CONVERT); + memcpy(&state->altpath, &state->path, sizeof(state->path)); + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 2; + } + return(0); + } + + /* + * Examine each sibling block to see if we can coalesce with + * at least 25% free space to spare. We need to figure out + * whether to merge with the forward or the backward block. + * We prefer coalescing with the lower numbered sibling so as + * to shrink a directory over time. + */ + forward = (INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT)); /* start with smaller blk num */ + for (i = 0; i < 2; forward = !forward, i++) { + if (forward) + blkno = INT_GET(info->forw, ARCH_CONVERT); + else + blkno = INT_GET(info->back, ARCH_CONVERT); + if (blkno == 0) + continue; + error = xfs_da_read_buf(state->args->trans, state->args->dp, + blkno, -1, &bp, + XFS_DATA_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + + leaf = (xfs_dir_leafblock_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes = state->blocksize - (state->blocksize>>2); + bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + count += INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + bytes -= count * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); + bytes -= count * (uint)sizeof(xfs_dir_leaf_entry_t); + bytes -= (uint)sizeof(xfs_dir_leaf_hdr_t); + if (bytes >= 0) + break; /* fits with at least 25% to spare */ + + xfs_da_brelse(state->args->trans, bp); + } + if (i >= 2) { + *action = 0; + return(0); + } + xfs_da_buf_done(bp); + + /* + * Make altpath point to the block we want to keep (the lower + * numbered block) and path point to the block we want to drop. + */ + memcpy(&state->altpath, &state->path, sizeof(state->path)); + if (blkno < blk->blkno) { + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + } else { + error = xfs_da_path_shift(state, &state->path, forward, + 0, &retval); + } + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 1; + } + return(0); +} + +/* + * Remove a name from the leaf directory structure. + * + * Return 1 if leaf is less than 37% full, 0 if >= 37% full. + * If two leaves are 37% full, when combined they will leave 25% free. + */ +int +xfs_dir_leaf_remove(xfs_trans_t *trans, xfs_dabuf_t *bp, int index) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_dir_leaf_map_t *map; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + int before, after, smallest, entsize; + int tablesize, tmp, i; + xfs_mount_t *mp; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + hdr = &leaf->hdr; + mp = trans->t_mountp; + ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); + ASSERT((index >= 0) && (index < INT_GET(hdr->count, ARCH_CONVERT))); + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) >= ((INT_GET(hdr->count, ARCH_CONVERT)*sizeof(*entry))+sizeof(*hdr))); + entry = &leaf->entries[index]; + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp)); + + /* + * Scan through free region table: + * check for adjacency of free'd entry with an existing one, + * find smallest free region in case we need to replace it, + * adjust any map that borders the entry table, + */ + tablesize = INT_GET(hdr->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_hdr_t); + map = &hdr->freemap[0]; + tmp = INT_GET(map->size, ARCH_CONVERT); + before = after = -1; + smallest = XFS_DIR_LEAF_MAPSIZE - 1; + entsize = XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry); + for (i = 0; i < XFS_DIR_LEAF_MAPSIZE; map++, i++) { + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + if (INT_GET(map->base, ARCH_CONVERT) == tablesize) { + INT_MOD(map->base, ARCH_CONVERT, -((uint)sizeof(xfs_dir_leaf_entry_t))); + INT_MOD(map->size, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_entry_t)); + } + + if ((INT_GET(map->base, ARCH_CONVERT) + INT_GET(map->size, ARCH_CONVERT)) == INT_GET(entry->nameidx, ARCH_CONVERT)) { + before = i; + } else if (INT_GET(map->base, ARCH_CONVERT) == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) { + after = i; + } else if (INT_GET(map->size, ARCH_CONVERT) < tmp) { + tmp = INT_GET(map->size, ARCH_CONVERT); + smallest = i; + } + } + + /* + * Coalesce adjacent freemap regions, + * or replace the smallest region. + */ + if ((before >= 0) || (after >= 0)) { + if ((before >= 0) && (after >= 0)) { + map = &hdr->freemap[before]; + INT_MOD(map->size, ARCH_CONVERT, entsize); + INT_MOD(map->size, ARCH_CONVERT, INT_GET(hdr->freemap[after].size, ARCH_CONVERT)); + INT_ZERO(hdr->freemap[after].base, ARCH_CONVERT); + INT_ZERO(hdr->freemap[after].size, ARCH_CONVERT); + } else if (before >= 0) { + map = &hdr->freemap[before]; + INT_MOD(map->size, ARCH_CONVERT, entsize); + } else { + map = &hdr->freemap[after]; + INT_COPY(map->base, entry->nameidx, ARCH_CONVERT); + INT_MOD(map->size, ARCH_CONVERT, entsize); + } + } else { + /* + * Replace smallest region (if it is smaller than free'd entry) + */ + map = &hdr->freemap[smallest]; + if (INT_GET(map->size, ARCH_CONVERT) < entsize) { + INT_COPY(map->base, entry->nameidx, ARCH_CONVERT); + INT_SET(map->size, ARCH_CONVERT, entsize); + } + } + + /* + * Did we remove the first entry? + */ + if (INT_GET(entry->nameidx, ARCH_CONVERT) == INT_GET(hdr->firstused, ARCH_CONVERT)) + smallest = 1; + else + smallest = 0; + + /* + * Compress the remaining entries and zero out the removed stuff. + */ + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + memset((char *)namest, 0, entsize); + xfs_da_log_buf(trans, bp, XFS_DA_LOGRANGE(leaf, namest, entsize)); + + INT_MOD(hdr->namebytes, ARCH_CONVERT, -(entry->namelen)); + tmp = (INT_GET(hdr->count, ARCH_CONVERT) - index) * (uint)sizeof(xfs_dir_leaf_entry_t); + memmove(entry, entry + 1, tmp); + INT_MOD(hdr->count, ARCH_CONVERT, -1); + xfs_da_log_buf(trans, bp, + XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry))); + entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)]; + memset((char *)entry, 0, sizeof(xfs_dir_leaf_entry_t)); + + /* + * If we removed the first entry, re-find the first used byte + * in the name area. Note that if the entry was the "firstused", + * then we don't have a "hole" in our block resulting from + * removing the name. + */ + if (smallest) { + tmp = XFS_LBSIZE(mp); + entry = &leaf->entries[0]; + for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; i >= 0; entry++, i--) { + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp)); + if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp) + tmp = INT_GET(entry->nameidx, ARCH_CONVERT); + } + INT_SET(hdr->firstused, ARCH_CONVERT, tmp); + if (INT_ISZERO(hdr->firstused, ARCH_CONVERT)) + INT_SET(hdr->firstused, ARCH_CONVERT, tmp - 1); + } else { + hdr->holes = 1; /* mark as needing compaction */ + } + + xfs_da_log_buf(trans, bp, XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); + + /* + * Check if leaf is less than 50% full, caller may want to + * "join" the leaf with a sibling if so. + */ + tmp = (uint)sizeof(xfs_dir_leaf_hdr_t); + tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t); + tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); + tmp += INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + if (tmp < mp->m_dir_magicpct) + return(1); /* leaf is < 37% full */ + return(0); +} + +/* + * Move all the directory entries from drop_leaf into save_leaf. + */ +void +xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk) +{ + xfs_dir_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf; + xfs_dir_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr; + xfs_mount_t *mp; + char *tmpbuffer; + + /* + * Set up environment. + */ + mp = state->mp; + ASSERT(drop_blk->magic == XFS_DIR_LEAF_MAGIC); + ASSERT(save_blk->magic == XFS_DIR_LEAF_MAGIC); + drop_leaf = drop_blk->bp->data; + save_leaf = save_blk->bp->data; + ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + drop_hdr = &drop_leaf->hdr; + save_hdr = &save_leaf->hdr; + + /* + * Save last hashval from dying block for later Btree fixup. + */ + drop_blk->hashval = INT_GET(drop_leaf->entries[ drop_leaf->hdr.count-1 ].hashval, ARCH_CONVERT); + + /* + * Check if we need a temp buffer, or can we do it in place. + * Note that we don't check "leaf" for holes because we will + * always be dropping it, toosmall() decided that for us already. + */ + if (save_hdr->holes == 0) { + /* + * dest leaf has no holes, so we add there. May need + * to make some room in the entry array. + */ + if (xfs_dir_leaf_order(save_blk->bp, drop_blk->bp)) { + xfs_dir_leaf_moveents(drop_leaf, 0, save_leaf, 0, + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + } else { + xfs_dir_leaf_moveents(drop_leaf, 0, + save_leaf, INT_GET(save_hdr->count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + } + } else { + /* + * Destination has holes, so we make a temporary copy + * of the leaf and add them both to that. + */ + tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memset(tmpbuffer, 0, state->blocksize); + tmp_leaf = (xfs_dir_leafblock_t *)tmpbuffer; + tmp_hdr = &tmp_leaf->hdr; + tmp_hdr->info = save_hdr->info; /* struct copy */ + INT_ZERO(tmp_hdr->count, ARCH_CONVERT); + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize); + if (INT_ISZERO(tmp_hdr->firstused, ARCH_CONVERT)) + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize - 1); + INT_ZERO(tmp_hdr->namebytes, ARCH_CONVERT); + if (xfs_dir_leaf_order(save_blk->bp, drop_blk->bp)) { + xfs_dir_leaf_moveents(drop_leaf, 0, tmp_leaf, 0, + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + xfs_dir_leaf_moveents(save_leaf, 0, + tmp_leaf, INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(save_hdr->count, ARCH_CONVERT), mp); + } else { + xfs_dir_leaf_moveents(save_leaf, 0, tmp_leaf, 0, + (int)INT_GET(save_hdr->count, ARCH_CONVERT), mp); + xfs_dir_leaf_moveents(drop_leaf, 0, + tmp_leaf, INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + } + memcpy(save_leaf, tmp_leaf, state->blocksize); + kmem_free(tmpbuffer, state->blocksize); + } + + xfs_da_log_buf(state->args->trans, save_blk->bp, 0, + state->blocksize - 1); + + /* + * Copy out last hashval in each block for B-tree code. + */ + save_blk->hashval = INT_GET(save_leaf->entries[ INT_GET(save_leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); +} + +/*======================================================================== + * Routines used for finding things in the Btree. + *========================================================================*/ + +/* + * Look up a name in a leaf directory structure. + * This is the internal routine, it uses the caller's buffer. + * + * Note that duplicate keys are allowed, but only check within the + * current leaf node. The Btree code must check in adjacent leaf nodes. + * + * Return in *index the index into the entry[] array of either the found + * entry, or where the entry should have been (insert before that entry). + * + * Don't change the args->inumber unless we find the filename. + */ +int +xfs_dir_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args, int *index) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + int probe, span; + xfs_dahash_t hashval; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8)); + + /* + * Binary search. (note: small blocks will skip this loop) + */ + hashval = args->hashval; + probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2; + for (entry = &leaf->entries[probe]; span > 4; + entry = &leaf->entries[probe]) { + span /= 2; + if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval) + probe += span; + else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval) + probe -= span; + else + break; + } + ASSERT((probe >= 0) && \ + ((INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)))); + ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) == hashval)); + + /* + * Since we may have duplicate hashval's, find the first matching + * hashval in the leaf. + */ + while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) >= hashval)) { + entry--; + probe--; + } + while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) { + entry++; + probe++; + } + if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { + *index = probe; + ASSERT(args->oknoent); + return(XFS_ERROR(ENOENT)); + } + + /* + * Duplicate keys may be present, so search all of them for a match. + */ + while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval)) { + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + if (entry->namelen == args->namelen && + namest->name[0] == args->name[0] && + memcmp(args->name, namest->name, args->namelen) == 0) { + XFS_DIR_SF_GET_DIRINO_ARCH(&namest->inumber, &args->inumber, ARCH_CONVERT); + *index = probe; + return(XFS_ERROR(EEXIST)); + } + entry++; + probe++; + } + *index = probe; + ASSERT(probe == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); + return(XFS_ERROR(ENOENT)); +} + +/*======================================================================== + * Utility routines. + *========================================================================*/ + +/* + * Move the indicated entries from one leaf to another. + * NOTE: this routine modifies both source and destination leaves. + */ +/* ARGSUSED */ +STATIC void +xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s, + xfs_dir_leafblock_t *leaf_d, int start_d, + int count, xfs_mount_t *mp) +{ + xfs_dir_leaf_hdr_t *hdr_s, *hdr_d; + xfs_dir_leaf_entry_t *entry_s, *entry_d; + int tmp, i; + + /* + * Check for nothing to do. + */ + if (count == 0) + return; + + /* + * Set up environment. + */ + ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + hdr_s = &leaf_s->hdr; + hdr_d = &leaf_d->hdr; + ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) && (INT_GET(hdr_s->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); + ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_s->count, ARCH_CONVERT)*sizeof(*entry_s))+sizeof(*hdr_s))); + ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_d->count, ARCH_CONVERT)*sizeof(*entry_d))+sizeof(*hdr_d))); + + ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT)); + ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT)); + ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT)); + + /* + * Move the entries in the destination leaf up to make a hole? + */ + if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d; + tmp *= (uint)sizeof(xfs_dir_leaf_entry_t); + entry_s = &leaf_d->entries[start_d]; + entry_d = &leaf_d->entries[start_d + count]; + memcpy(entry_d, entry_s, tmp); + } + + /* + * Copy all entry's in the same (sorted) order, + * but allocate filenames packed and in sequence. + */ + entry_s = &leaf_s->entries[start_s]; + entry_d = &leaf_d->entries[start_d]; + for (i = 0; i < count; entry_s++, entry_d++, i++) { + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) >= INT_GET(hdr_s->firstused, ARCH_CONVERT)); + ASSERT(entry_s->namelen < MAXNAMELEN); + tmp = XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry_s); + INT_MOD(hdr_d->firstused, ARCH_CONVERT, -(tmp)); + entry_d->hashval = entry_s->hashval; /* INT_: direct copy */ + INT_COPY(entry_d->nameidx, hdr_d->firstused, ARCH_CONVERT); + entry_d->namelen = entry_s->namelen; + ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp)); + memcpy(XFS_DIR_LEAF_NAMESTRUCT(leaf_d, INT_GET(entry_d->nameidx, ARCH_CONVERT)), + XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)), tmp); + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp)); + memset((char *)XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)), + 0, tmp); + INT_MOD(hdr_s->namebytes, ARCH_CONVERT, -(entry_d->namelen)); + INT_MOD(hdr_d->namebytes, ARCH_CONVERT, entry_d->namelen); + INT_MOD(hdr_s->count, ARCH_CONVERT, -1); + INT_MOD(hdr_d->count, ARCH_CONVERT, +1); + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_hdr_t); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp); + + } + + /* + * Zero out the entries we just copied. + */ + if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) { + tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t); + entry_s = &leaf_s->entries[start_s]; + ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp)); + memset((char *)entry_s, 0, tmp); + } else { + /* + * Move the remaining entries down to fill the hole, + * then zero the entries at the top. + */ + tmp = INT_GET(hdr_s->count, ARCH_CONVERT) - count; + tmp *= (uint)sizeof(xfs_dir_leaf_entry_t); + entry_s = &leaf_s->entries[start_s + count]; + entry_d = &leaf_s->entries[start_s]; + memcpy(entry_d, entry_s, tmp); + + tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t); + entry_s = &leaf_s->entries[INT_GET(hdr_s->count, ARCH_CONVERT)]; + ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp)); + memset((char *)entry_s, 0, tmp); + } + + /* + * Fill in the freemap information + */ + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_hdr_t)); + INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, INT_GET(hdr_d->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, INT_GET(hdr_d->firstused, ARCH_CONVERT) - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + INT_SET(hdr_d->freemap[1].base, ARCH_CONVERT, INT_ZERO(hdr_d->freemap[2].base, ARCH_CONVERT)); + INT_SET(hdr_d->freemap[1].size, ARCH_CONVERT, INT_ZERO(hdr_d->freemap[2].size, ARCH_CONVERT)); + hdr_s->holes = 1; /* leaf may not be compact */ +} + +/* + * Compare two leaf blocks "order". + */ +int +xfs_dir_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) +{ + xfs_dir_leafblock_t *leaf1, *leaf2; + + leaf1 = leaf1_bp->data; + leaf2 = leaf2_bp->data; + ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) && + (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC)); + if ((INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) && + ((INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + return(1); + } + return(0); +} + +/* + * Pick up the last hashvalue from a leaf block. + */ +xfs_dahash_t +xfs_dir_leaf_lasthash(xfs_dabuf_t *bp, int *count) +{ + xfs_dir_leafblock_t *leaf; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + if (count) + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + if (INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + return(0); + return(INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); +} + +/* + * Copy out directory entries for getdents(), for leaf directories. + */ +int +xfs_dir_leaf_getdents_int( + xfs_dabuf_t *bp, + xfs_inode_t *dp, + xfs_dablk_t bno, + uio_t *uio, + int *eobp, + xfs_dirent_t *dbp, + xfs_dir_put_t put, + xfs_daddr_t nextda) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + int entno, want_entno, i, nextentno; + xfs_mount_t *mp; + xfs_dahash_t cookhash; + xfs_dahash_t nexthash = 0; +#if (BITS_PER_LONG == 32) + xfs_dahash_t lasthash = XFS_DA_MAXHASH; +#endif + xfs_dir_put_args_t p; + + mp = dp->i_mount; + leaf = bp->data; + if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { + *eobp = 1; + return(XFS_ERROR(ENOENT)); /* XXX wrong code */ + } + + want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); + + cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); + + xfs_dir_trace_g_dul("leaf: start", dp, uio, leaf); + + /* + * Re-find our place. + */ + for (i = entno = 0, entry = &leaf->entries[0]; + i < INT_GET(leaf->hdr.count, ARCH_CONVERT); + entry++, i++) { + + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, + INT_GET(entry->nameidx, ARCH_CONVERT)); + + if (unlikely( + ((char *)namest < (char *)leaf) || + ((char *)namest >= (char *)leaf + XFS_LBSIZE(mp)) || + (entry->namelen >= MAXNAMELEN))) { + XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(1)", + XFS_ERRLEVEL_LOW, mp, leaf); + xfs_dir_trace_g_du("leaf: corrupted", dp, uio); + return XFS_ERROR(EFSCORRUPTED); + } + if (INT_GET(entry->hashval, ARCH_CONVERT) >= cookhash) { + if ( entno < want_entno + && INT_GET(entry->hashval, ARCH_CONVERT) + == cookhash) { + /* + * Trying to get to a particular offset in a + * run of equal-hashval entries. + */ + entno++; + } else if ( want_entno > 0 + && entno == want_entno + && INT_GET(entry->hashval, ARCH_CONVERT) + == cookhash) { + break; + } else { + entno = 0; + break; + } + } + } + + if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { + xfs_dir_trace_g_du("leaf: hash not found", dp, uio); + if (!INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) + uio->uio_offset = + XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); + /* + * Don't set uio_offset if there's another block: + * the node code will be setting uio_offset anyway. + */ + *eobp = 0; + return(0); + } + xfs_dir_trace_g_due("leaf: hash found", dp, uio, entry); + + p.dbp = dbp; + p.put = put; + p.uio = uio; + + /* + * We're synchronized, start copying entries out to the user. + */ + for (; entno >= 0 && i < INT_GET(leaf->hdr.count, ARCH_CONVERT); + entry++, i++, (entno = nextentno)) { + int lastresid=0, retval; + xfs_dircook_t lastoffset; + xfs_dahash_t thishash; + + /* + * Check for a damaged directory leaf block and pick up + * the inode number from this entry. + */ + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, + INT_GET(entry->nameidx, ARCH_CONVERT)); + + if (unlikely( + ((char *)namest < (char *)leaf) || + ((char *)namest >= (char *)leaf + XFS_LBSIZE(mp)) || + (entry->namelen >= MAXNAMELEN))) { + XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(2)", + XFS_ERRLEVEL_LOW, mp, leaf); + xfs_dir_trace_g_du("leaf: corrupted", dp, uio); + return XFS_ERROR(EFSCORRUPTED); + } + + thishash = INT_GET(entry->hashval, ARCH_CONVERT); + + /* + * NOTE! Linux "filldir" semantics require that the + * offset "cookie" be for this entry, not the + * next; all the actual shuffling to make it + * "look right" to the user is done in filldir. + */ + XFS_PUT_COOKIE(p.cook, mp, bno, entno, thishash); + + xfs_dir_trace_g_duc("leaf: middle cookie ", + dp, uio, p.cook.o); + + if (i < (INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1)) { + nexthash = INT_GET(entry[1].hashval, ARCH_CONVERT); + + if (nexthash == INT_GET(entry->hashval, ARCH_CONVERT)) + nextentno = entno + 1; + else + nextentno = 0; + + } else if (INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) { + xfs_dabuf_t *bp2; + xfs_dir_leafblock_t *leaf2; + + ASSERT(nextda != -1); + + retval = xfs_da_read_buf(dp->i_transp, dp, + INT_GET(leaf->hdr.info.forw, + ARCH_CONVERT), nextda, + &bp2, XFS_DATA_FORK); + if (retval) + return(retval); + + ASSERT(bp2 != NULL); + + leaf2 = bp2->data; + + if (unlikely( + (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + != XFS_DIR_LEAF_MAGIC) + || (INT_GET(leaf2->hdr.info.back, ARCH_CONVERT) + != bno))) { /* GROT */ + XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)", + XFS_ERRLEVEL_LOW, mp, + leaf2); + xfs_da_brelse(dp->i_transp, bp2); + + return(XFS_ERROR(EFSCORRUPTED)); + } + + nexthash = INT_GET(leaf2->entries[0].hashval, + ARCH_CONVERT); + nextentno = -1; + + xfs_da_brelse(dp->i_transp, bp2); + xfs_dir_trace_g_duc("leaf: next blk cookie", + dp, uio, p.cook.o); + } else { + nextentno = -1; + nexthash = XFS_DA_MAXHASH; + } + + /* + * Save off the cookie so we can fall back should the + * 'put' into the outgoing buffer fails. To handle a run + * of equal-hashvals, the off_t structure on 64bit + * builds has entno built into the cookie to ID the + * entry. On 32bit builds, we only have space for the + * hashval so we can't ID specific entries within a group + * of same hashval entries. For this, lastoffset is set + * to the first in the run of equal hashvals so we don't + * include any entries unless we can include all entries + * that share the same hashval. Hopefully the buffer + * provided is big enough to handle it (see pv763517). + */ +#if (BITS_PER_LONG == 32) + if (INT_GET(entry->hashval, ARCH_CONVERT) != lasthash) { + XFS_PUT_COOKIE(lastoffset, mp, bno, entno, thishash); + lastresid = uio->uio_resid; + lasthash = thishash; + } else { + xfs_dir_trace_g_duc("leaf: DUP COOKIES, skipped", + dp, uio, p.cook.o); + } +#else + XFS_PUT_COOKIE(lastoffset, mp, bno, entno, thishash); + lastresid = uio->uio_resid; +#endif /* BITS_PER_LONG == 32 */ + + /* + * Put the current entry into the outgoing buffer. If we fail + * then restore the UIO to the first entry in the current + * run of equal-hashval entries (probably one 1 entry long). + */ +#if XFS_BIG_FILESYSTEMS + p.ino = XFS_GET_DIR_INO_ARCH(mp, namest->inumber, ARCH_CONVERT) + mp->m_inoadd; +#else + p.ino = XFS_GET_DIR_INO_ARCH(mp, namest->inumber, ARCH_CONVERT); +#endif + p.name = (char *)namest->name; + p.namelen = entry->namelen; + + retval = p.put(&p); + + if (!p.done) { + uio->uio_offset = lastoffset.o; + uio->uio_resid = lastresid; + + *eobp = 1; + + xfs_dir_trace_g_du("leaf: E-O-B", dp, uio); + + return(retval); + } + } + + XFS_PUT_COOKIE(p.cook, mp, 0, 0, nexthash); + + uio->uio_offset = p.cook.o; + + *eobp = 0; + + xfs_dir_trace_g_du("leaf: E-O-F", dp, uio); + + return(0); +} + +/* + * Format a dirent64 structure and copy it out the the user's buffer. + */ +int +xfs_dir_put_dirent64_direct(xfs_dir_put_args_t *pa) +{ + iovec_t *iovp; + int reclen, namelen; + xfs_dirent_t *idbp; + uio_t *uio; + + namelen = pa->namelen; + reclen = DIRENTSIZE(namelen); + uio = pa->uio; + if (reclen > uio->uio_resid) { + pa->done = 0; + return 0; + } + iovp = uio->uio_iov; + idbp = (xfs_dirent_t *)iovp->iov_base; + iovp->iov_base = (char *)idbp + reclen; + iovp->iov_len -= reclen; + uio->uio_resid -= reclen; + idbp->d_reclen = reclen; + idbp->d_ino = pa->ino; + idbp->d_off = pa->cook.o; + idbp->d_name[namelen] = '\0'; + pa->done = 1; + memcpy(idbp->d_name, pa->name, namelen); + return 0; +} + +/* + * Format a dirent64 structure and copy it out the the user's buffer. + */ +int +xfs_dir_put_dirent64_uio(xfs_dir_put_args_t *pa) +{ + int retval, reclen, namelen; + xfs_dirent_t *idbp; + uio_t *uio; + + namelen = pa->namelen; + reclen = DIRENTSIZE(namelen); + uio = pa->uio; + if (reclen > uio->uio_resid) { + pa->done = 0; + return 0; + } + idbp = pa->dbp; + idbp->d_reclen = reclen; + idbp->d_ino = pa->ino; + idbp->d_off = pa->cook.o; + idbp->d_name[namelen] = '\0'; + memcpy(idbp->d_name, pa->name, namelen); + retval = uiomove((caddr_t)idbp, reclen, UIO_READ, uio); + pa->done = (retval == 0); + return retval; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir_leaf.h linux.21rc5-ac2/fs/xfs/xfs_dir_leaf.h --- linux.21rc5/fs/xfs/xfs_dir_leaf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir_leaf.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR_LEAF_H__ +#define __XFS_DIR_LEAF_H__ + +/* + * Directory layout, internal structure, access macros, etc. + * + * Large directories are structured around Btrees where all the data + * elements are in the leaf nodes. Filenames are hashed into an int, + * then that int is used as the index into the Btree. Since the hashval + * of a filename may not be unique, we may have duplicate keys. The + * internal links in the Btree are logical block offsets into the file. + */ + +struct uio; +struct xfs_bmap_free; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_da_state; +struct xfs_da_state_blk; +struct xfs_dir_put_args; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/*======================================================================== + * Directory Structure when equal to XFS_LBSIZE(mp) bytes. + *========================================================================*/ + +/* + * This is the structure of the leaf nodes in the Btree. + * + * Struct leaf_entry's are packed from the top. Names grow from the bottom + * but are not packed. The freemap contains run-length-encoded entries + * for the free bytes after the leaf_entry's, but only the N largest such, + * smaller runs are dropped. When the freemap doesn't show enough space + * for an allocation, we compact the namelist area and try again. If we + * still don't have enough space, then we have to split the block. + * + * Since we have duplicate hash keys, for each key that matches, compare + * the actual string. The root and intermediate node search always takes + * the first-in-the-block key match found, so we should only have to work + * "forw"ard. If none matches, continue with the "forw"ard leaf nodes + * until the hash key changes or the filename is found. + * + * The parent directory and the self-pointer are explicitly represented + * (ie: there are entries for "." and ".."). + * + * Note that the count being a __uint16_t limits us to something like a + * blocksize of 1.3MB in the face of worst case (short) filenames. + */ +#define XFS_DIR_LEAF_MAPSIZE 3 /* how many freespace slots */ + +typedef struct xfs_dir_leafblock { + struct xfs_dir_leaf_hdr { /* constant-structure header block */ + xfs_da_blkinfo_t info; /* block type, links, etc. */ + __uint16_t count; /* count of active leaf_entry's */ + __uint16_t namebytes; /* num bytes of name strings stored */ + __uint16_t firstused; /* first used byte in name area */ + __uint8_t holes; /* != 0 if blk needs compaction */ + __uint8_t pad1; + struct xfs_dir_leaf_map {/* RLE map of free bytes */ + __uint16_t base; /* base of free region */ + __uint16_t size; /* run length of free region */ + } freemap[XFS_DIR_LEAF_MAPSIZE]; /* N largest free regions */ + } hdr; + struct xfs_dir_leaf_entry { /* sorted on key, not name */ + xfs_dahash_t hashval; /* hash value of name */ + __uint16_t nameidx; /* index into buffer of name */ + __uint8_t namelen; /* length of name string */ + __uint8_t pad2; + } entries[1]; /* var sized array */ + struct xfs_dir_leaf_name { + xfs_dir_ino_t inumber; /* inode number for this key */ + __uint8_t name[1]; /* name string itself */ + } namelist[1]; /* grows from bottom of buf */ +} xfs_dir_leafblock_t; +typedef struct xfs_dir_leaf_hdr xfs_dir_leaf_hdr_t; +typedef struct xfs_dir_leaf_map xfs_dir_leaf_map_t; +typedef struct xfs_dir_leaf_entry xfs_dir_leaf_entry_t; +typedef struct xfs_dir_leaf_name xfs_dir_leaf_name_t; + +/* + * Length of name for which a 512-byte block filesystem + * can get a double split. + */ +#define XFS_DIR_LEAF_CAN_DOUBLE_SPLIT_LEN \ + (512 - (uint)sizeof(xfs_dir_leaf_hdr_t) - \ + (uint)sizeof(xfs_dir_leaf_entry_t) * 2 - \ + (uint)sizeof(xfs_dir_leaf_name_t) * 2 - (MAXNAMELEN - 2) + 1 + 1) + +typedef int (*xfs_dir_put_t)(struct xfs_dir_put_args *pa); + +typedef union { + xfs_off_t o; /* offset (cookie) */ + /* + * Watch the order here (endian-ness dependent). + */ + struct { +#if __BYTE_ORDER == __LITTLE_ENDIAN + xfs_dahash_t h; /* hash value */ + __uint32_t be; /* block and entry */ +#else /* __BYTE_ORDER == __BIG_ENDIAN */ + __uint32_t be; /* block and entry */ + xfs_dahash_t h; /* hash value */ +#endif /* __BYTE_ORDER == __BIG_ENDIAN */ + } s; +} xfs_dircook_t; + +#define XFS_PUT_COOKIE(c,mp,bno,entry,hash) \ + ((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash)) + +#define XFS_GET_DIR_INO_ARCH(mp,di,arch) \ + DIRINO_GET_ARCH(&(di),arch) +#define XFS_GET_DIR_INO(mp,di) \ + XFS_GET_DIR_INO_ARCH(mp,di,ARCH_NOCONVERT) + +typedef struct xfs_dir_put_args +{ + xfs_dircook_t cook; /* cookie of (next) entry */ + xfs_intino_t ino; /* inode number */ + struct xfs_dirent *dbp; /* buffer pointer */ + char *name; /* directory entry name */ + int namelen; /* length of name */ + int done; /* output: set if value was stored */ + xfs_dir_put_t put; /* put function ptr (i/o) */ + struct uio *uio; /* uio control structure */ +} xfs_dir_put_args_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_LEAF_ENTSIZE_BYNAME) +int xfs_dir_leaf_entsize_byname(int len); +#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) xfs_dir_leaf_entsize_byname(len) +#else +#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) /* space a name will use */ \ + ((uint)sizeof(xfs_dir_leaf_name_t)-1 + len) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_LEAF_ENTSIZE_BYENTRY) +int xfs_dir_leaf_entsize_byentry(xfs_dir_leaf_entry_t *entry); +#define XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry) \ + xfs_dir_leaf_entsize_byentry(entry) +#else +#define XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry) /* space an entry will use */ \ + ((uint)sizeof(xfs_dir_leaf_name_t)-1 + (entry)->namelen) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_LEAF_NAMESTRUCT) +xfs_dir_leaf_name_t * +xfs_dir_leaf_namestruct(xfs_dir_leafblock_t *leafp, int offset); +#define XFS_DIR_LEAF_NAMESTRUCT(leafp,offset) \ + xfs_dir_leaf_namestruct(leafp,offset) +#else +#define XFS_DIR_LEAF_NAMESTRUCT(leafp,offset) /* point to name struct */ \ + ((xfs_dir_leaf_name_t *)&((char *)(leafp))[offset]) +#endif + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Internal routines when dirsize < XFS_LITINO(mp). + */ +int xfs_dir_shortform_create(struct xfs_da_args *args, xfs_ino_t parent); +int xfs_dir_shortform_addname(struct xfs_da_args *args); +int xfs_dir_shortform_lookup(struct xfs_da_args *args); +int xfs_dir_shortform_to_leaf(struct xfs_da_args *args); +int xfs_dir_shortform_removename(struct xfs_da_args *args); +int xfs_dir_shortform_getdents(struct xfs_inode *dp, struct uio *uio, int *eofp, + struct xfs_dirent *dbp, xfs_dir_put_t put); +int xfs_dir_shortform_replace(struct xfs_da_args *args); + +/* + * Internal routines when dirsize == XFS_LBSIZE(mp). + */ +int xfs_dir_leaf_to_node(struct xfs_da_args *args); +int xfs_dir_leaf_to_shortform(struct xfs_da_args *args); + +/* + * Routines used for growing the Btree. + */ +int xfs_dir_leaf_create(struct xfs_da_args *args, xfs_dablk_t which_block, + struct xfs_dabuf **bpp); +int xfs_dir_leaf_split(struct xfs_da_state *state, + struct xfs_da_state_blk *oldblk, + struct xfs_da_state_blk *newblk); +int xfs_dir_leaf_add(struct xfs_dabuf *leaf_buffer, + struct xfs_da_args *args, int insertion_index); +int xfs_dir_leaf_addname(struct xfs_da_args *args); +int xfs_dir_leaf_lookup_int(struct xfs_dabuf *leaf_buffer, + struct xfs_da_args *args, + int *index_found_at); +int xfs_dir_leaf_remove(struct xfs_trans *trans, + struct xfs_dabuf *leaf_buffer, + int index_to_remove); +int xfs_dir_leaf_getdents_int(struct xfs_dabuf *bp, struct xfs_inode *dp, + xfs_dablk_t bno, struct uio *uio, + int *eobp, struct xfs_dirent *dbp, + xfs_dir_put_t put, xfs_daddr_t nextda); + +/* + * Routines used for shrinking the Btree. + */ +int xfs_dir_leaf_toosmall(struct xfs_da_state *state, int *retval); +void xfs_dir_leaf_unbalance(struct xfs_da_state *state, + struct xfs_da_state_blk *drop_blk, + struct xfs_da_state_blk *save_blk); + +/* + * Utility routines. + */ +uint xfs_dir_leaf_lasthash(struct xfs_dabuf *bp, int *count); +int xfs_dir_leaf_order(struct xfs_dabuf *leaf1_bp, + struct xfs_dabuf *leaf2_bp); +int xfs_dir_put_dirent64_direct(xfs_dir_put_args_t *pa); +int xfs_dir_put_dirent64_uio(xfs_dir_put_args_t *pa); +int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino); + + +/* + * Global data. + */ +extern xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot; + +#endif /* __XFS_DIR_LEAF_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dir_sf.h linux.21rc5-ac2/fs/xfs/xfs_dir_sf.h --- linux.21rc5/fs/xfs/xfs_dir_sf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dir_sf.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR_SF_H__ +#define __XFS_DIR_SF_H__ + +/* + * Directory layout when stored internal to an inode. + * + * Small directories are packed as tightly as possible so as to + * fit into the literal area of the inode. + */ + +typedef struct { __uint8_t i[sizeof(xfs_ino_t)]; } xfs_dir_ino_t; + +/* + * The parent directory has a dedicated field, and the self-pointer must + * be calculated on the fly. + * + * Entries are packed toward the top as tight as possible. The header + * and the elements much be memcpy'd out into a work area to get correct + * alignment for the inode number fields. + */ +typedef struct xfs_dir_shortform { + struct xfs_dir_sf_hdr { /* constant-structure header block */ + xfs_dir_ino_t parent; /* parent dir inode number */ + __uint8_t count; /* count of active entries */ + } hdr; + struct xfs_dir_sf_entry { + xfs_dir_ino_t inumber; /* referenced inode number */ + __uint8_t namelen; /* actual length of name (no NULL) */ + __uint8_t name[1]; /* name */ + } list[1]; /* variable sized array */ +} xfs_dir_shortform_t; +typedef struct xfs_dir_sf_hdr xfs_dir_sf_hdr_t; +typedef struct xfs_dir_sf_entry xfs_dir_sf_entry_t; + +/* + * We generate this then sort it, so that readdirs are returned in + * hash-order. Else seekdir won't work. + */ +typedef struct xfs_dir_sf_sort { + __uint8_t entno; /* .=0, ..=1, else entry# + 2 */ + __uint8_t seqno; /* sequence # with same hash value */ + __uint8_t namelen; /* length of name value (no null) */ + xfs_dahash_t hash; /* this entry's hash value */ + xfs_intino_t ino; /* this entry's inode number */ + char *name; /* name value, pointer into buffer */ +} xfs_dir_sf_sort_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_GET_DIRINO) +void xfs_dir_sf_get_dirino_arch(xfs_dir_ino_t *from, xfs_ino_t *to, xfs_arch_t arch); +void xfs_dir_sf_get_dirino(xfs_dir_ino_t *from, xfs_ino_t *to); +#define XFS_DIR_SF_GET_DIRINO_ARCH(from,to,arch) xfs_dir_sf_get_dirino_arch(from, to, arch) +#define XFS_DIR_SF_GET_DIRINO(from,to) xfs_dir_sf_get_dirino(from, to) +#else +#define XFS_DIR_SF_GET_DIRINO_ARCH(from,to,arch) DIRINO_COPY_ARCH(from,to,arch) +#define XFS_DIR_SF_GET_DIRINO(from,to) DIRINO_COPY_ARCH(from,to,ARCH_NOCONVERT) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_PUT_DIRINO) +void xfs_dir_sf_put_dirino_arch(xfs_ino_t *from, xfs_dir_ino_t *to, xfs_arch_t arch); +void xfs_dir_sf_put_dirino(xfs_ino_t *from, xfs_dir_ino_t *to); +#define XFS_DIR_SF_PUT_DIRINO_ARCH(from,to,arch) xfs_dir_sf_put_dirino_arch(from, to, arch) +#define XFS_DIR_SF_PUT_DIRINO(from,to) xfs_dir_sf_put_dirino(from, to) +#else +#define XFS_DIR_SF_PUT_DIRINO_ARCH(from,to,arch) DIRINO_COPY_ARCH(from,to,arch) +#define XFS_DIR_SF_PUT_DIRINO(from,to) DIRINO_COPY_ARCH(from,to,ARCH_NOCONVERT) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_ENTSIZE_BYNAME) +int xfs_dir_sf_entsize_byname(int len); +#define XFS_DIR_SF_ENTSIZE_BYNAME(len) xfs_dir_sf_entsize_byname(len) +#else +#define XFS_DIR_SF_ENTSIZE_BYNAME(len) /* space a name uses */ \ + ((uint)sizeof(xfs_dir_sf_entry_t)-1 + (len)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_ENTSIZE_BYENTRY) +int xfs_dir_sf_entsize_byentry(xfs_dir_sf_entry_t *sfep); +#define XFS_DIR_SF_ENTSIZE_BYENTRY(sfep) xfs_dir_sf_entsize_byentry(sfep) +#else +#define XFS_DIR_SF_ENTSIZE_BYENTRY(sfep) /* space an entry uses */ \ + ((uint)sizeof(xfs_dir_sf_entry_t)-1 + (sfep)->namelen) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_NEXTENTRY) +xfs_dir_sf_entry_t *xfs_dir_sf_nextentry(xfs_dir_sf_entry_t *sfep); +#define XFS_DIR_SF_NEXTENTRY(sfep) xfs_dir_sf_nextentry(sfep) +#else +#define XFS_DIR_SF_NEXTENTRY(sfep) /* next entry in struct */ \ + ((xfs_dir_sf_entry_t *) \ + ((char *)(sfep) + XFS_DIR_SF_ENTSIZE_BYENTRY(sfep))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_ALLFIT) +int xfs_dir_sf_allfit(int count, int totallen); +#define XFS_DIR_SF_ALLFIT(count,totallen) \ + xfs_dir_sf_allfit(count,totallen) +#else +#define XFS_DIR_SF_ALLFIT(count,totallen) /* will all entries fit? */ \ + ((uint)sizeof(xfs_dir_sf_hdr_t) + \ + ((uint)sizeof(xfs_dir_sf_entry_t)-1)*(count) + (totallen)) +#endif + +#ifdef XFS_ALL_TRACE +#define XFS_DIR_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_DIR_TRACE +#endif + +/* + * Kernel tracing support for directories. + */ +struct uio; +struct xfs_inode; +struct xfs_da_intnode; +struct xfs_dinode; +struct xfs_dir_leafblock; +struct xfs_dir_leaf_entry; + +#define XFS_DIR_TRACE_SIZE 4096 /* size of global trace buffer */ + +/* + * Trace record types. + */ +#define XFS_DIR_KTRACE_G_DU 1 /* dp, uio */ +#define XFS_DIR_KTRACE_G_DUB 2 /* dp, uio, bno */ +#define XFS_DIR_KTRACE_G_DUN 3 /* dp, uio, node */ +#define XFS_DIR_KTRACE_G_DUL 4 /* dp, uio, leaf */ +#define XFS_DIR_KTRACE_G_DUE 5 /* dp, uio, leaf entry */ +#define XFS_DIR_KTRACE_G_DUC 6 /* dp, uio, cookie */ + +#if defined(XFS_DIR_TRACE) + +void xfs_dir_trace_g_du(char *where, struct xfs_inode *dp, struct uio *uio); +void xfs_dir_trace_g_dub(char *where, struct xfs_inode *dp, struct uio *uio, + xfs_dablk_t bno); +void xfs_dir_trace_g_dun(char *where, struct xfs_inode *dp, struct uio *uio, + struct xfs_da_intnode *node); +void xfs_dir_trace_g_dul(char *where, struct xfs_inode *dp, struct uio *uio, + struct xfs_dir_leafblock *leaf); +void xfs_dir_trace_g_due(char *where, struct xfs_inode *dp, struct uio *uio, + struct xfs_dir_leaf_entry *entry); +void xfs_dir_trace_g_duc(char *where, struct xfs_inode *dp, struct uio *uio, + xfs_off_t cookie); +void xfs_dir_trace_enter(int type, char *where, + __psunsigned_t a0, __psunsigned_t a1, + __psunsigned_t a2, __psunsigned_t a3, + __psunsigned_t a4, __psunsigned_t a5, + __psunsigned_t a6, __psunsigned_t a7, + __psunsigned_t a8, __psunsigned_t a9, + __psunsigned_t a10, __psunsigned_t a11); +#else +#define xfs_dir_trace_g_du(w,d,u) +#define xfs_dir_trace_g_dub(w,d,u,b) +#define xfs_dir_trace_g_dun(w,d,u,n) +#define xfs_dir_trace_g_dul(w,d,u,l) +#define xfs_dir_trace_g_due(w,d,u,e) +#define xfs_dir_trace_g_duc(w,d,u,c) +#endif /* DEBUG */ + +#endif /* __XFS_DIR_SF_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dmapi.h linux.21rc5-ac2/fs/xfs/xfs_dmapi.h --- linux.21rc5/fs/xfs/xfs_dmapi.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dmapi.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DMAPI_H__ +#define __XFS_DMAPI_H__ + +/* Values used to define the on-disk version of dm_attrname_t. All + * on-disk attribute names start with the 8-byte string "SGI_DMI_". + * + * In the on-disk inode, DMAPI attribute names consist of the user-provided + * name with the DMATTR_PREFIXSTRING pre-pended. This string must NEVER be + * changed. + */ + +#define DMATTR_PREFIXLEN 8 +#define DMATTR_PREFIXSTRING "SGI_DMI_" + +typedef enum { + DM_EVENT_INVALID = -1, + DM_EVENT_CANCEL = 0, /* not supported */ + DM_EVENT_MOUNT = 1, + DM_EVENT_PREUNMOUNT = 2, + DM_EVENT_UNMOUNT = 3, + DM_EVENT_DEBUT = 4, /* not supported */ + DM_EVENT_CREATE = 5, + DM_EVENT_CLOSE = 6, /* not supported */ + DM_EVENT_POSTCREATE = 7, + DM_EVENT_REMOVE = 8, + DM_EVENT_POSTREMOVE = 9, + DM_EVENT_RENAME = 10, + DM_EVENT_POSTRENAME = 11, + DM_EVENT_LINK = 12, + DM_EVENT_POSTLINK = 13, + DM_EVENT_SYMLINK = 14, + DM_EVENT_POSTSYMLINK = 15, + DM_EVENT_READ = 16, + DM_EVENT_WRITE = 17, + DM_EVENT_TRUNCATE = 18, + DM_EVENT_ATTRIBUTE = 19, + DM_EVENT_DESTROY = 20, + DM_EVENT_NOSPACE = 21, + DM_EVENT_USER = 22, + DM_EVENT_MAX = 23 +} dm_eventtype_t; +#define HAVE_DM_EVENTTYPE_T + +typedef enum { + DM_RIGHT_NULL, + DM_RIGHT_SHARED, + DM_RIGHT_EXCL +} dm_right_t; +#define HAVE_DM_RIGHT_T + +/* Defines for determining if an event message should be sent. */ +#define DM_EVENT_ENABLED(vfsp, ip, event) ( \ + unlikely ((vfsp)->vfs_flag & VFS_DMI) && \ + ( ((ip)->i_d.di_dmevmask & (1 << event)) || \ + ((ip)->i_mount->m_dmevmask & (1 << event)) ) \ + ) + +#define DM_EVENT_ENABLED_IO(vfsp, io, event) ( \ + unlikely ((vfsp)->vfs_flag & VFS_DMI) && \ + ( ((io)->io_dmevmask & (1 << event)) || \ + ((io)->io_mount->m_dmevmask & (1 << event)) ) \ + ) + +#define DM_XFS_VALID_FS_EVENTS ( \ + (1 << DM_EVENT_PREUNMOUNT) | \ + (1 << DM_EVENT_UNMOUNT) | \ + (1 << DM_EVENT_NOSPACE) | \ + (1 << DM_EVENT_DEBUT) | \ + (1 << DM_EVENT_CREATE) | \ + (1 << DM_EVENT_POSTCREATE) | \ + (1 << DM_EVENT_REMOVE) | \ + (1 << DM_EVENT_POSTREMOVE) | \ + (1 << DM_EVENT_RENAME) | \ + (1 << DM_EVENT_POSTRENAME) | \ + (1 << DM_EVENT_LINK) | \ + (1 << DM_EVENT_POSTLINK) | \ + (1 << DM_EVENT_SYMLINK) | \ + (1 << DM_EVENT_POSTSYMLINK) | \ + (1 << DM_EVENT_ATTRIBUTE) | \ + (1 << DM_EVENT_DESTROY) ) + +/* Events valid in dm_set_eventlist() when called with a file handle for + a regular file or a symlink. These events are persistent. +*/ + +#define DM_XFS_VALID_FILE_EVENTS ( \ + (1 << DM_EVENT_ATTRIBUTE) | \ + (1 << DM_EVENT_DESTROY) ) + +/* Events valid in dm_set_eventlist() when called with a file handle for + a directory. These events are persistent. +*/ + +#define DM_XFS_VALID_DIRECTORY_EVENTS ( \ + (1 << DM_EVENT_CREATE) | \ + (1 << DM_EVENT_POSTCREATE) | \ + (1 << DM_EVENT_REMOVE) | \ + (1 << DM_EVENT_POSTREMOVE) | \ + (1 << DM_EVENT_RENAME) | \ + (1 << DM_EVENT_POSTRENAME) | \ + (1 << DM_EVENT_LINK) | \ + (1 << DM_EVENT_POSTLINK) | \ + (1 << DM_EVENT_SYMLINK) | \ + (1 << DM_EVENT_POSTSYMLINK) | \ + (1 << DM_EVENT_ATTRIBUTE) | \ + (1 << DM_EVENT_DESTROY) ) + +/* Events supported by the XFS filesystem. */ +#define DM_XFS_SUPPORTED_EVENTS ( \ + (1 << DM_EVENT_MOUNT) | \ + (1 << DM_EVENT_PREUNMOUNT) | \ + (1 << DM_EVENT_UNMOUNT) | \ + (1 << DM_EVENT_NOSPACE) | \ + (1 << DM_EVENT_CREATE) | \ + (1 << DM_EVENT_POSTCREATE) | \ + (1 << DM_EVENT_REMOVE) | \ + (1 << DM_EVENT_POSTREMOVE) | \ + (1 << DM_EVENT_RENAME) | \ + (1 << DM_EVENT_POSTRENAME) | \ + (1 << DM_EVENT_LINK) | \ + (1 << DM_EVENT_POSTLINK) | \ + (1 << DM_EVENT_SYMLINK) | \ + (1 << DM_EVENT_POSTSYMLINK) | \ + (1 << DM_EVENT_READ) | \ + (1 << DM_EVENT_WRITE) | \ + (1 << DM_EVENT_TRUNCATE) | \ + (1 << DM_EVENT_ATTRIBUTE) | \ + (1 << DM_EVENT_DESTROY) ) + + +/* + * Definitions used for the flags field on dm_send_*_event(). + */ + +#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */ +#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */ + +/* + * Macros to turn caller specified delay/block flags into + * dm_send_xxxx_event flag DM_FLAGS_NDELAY. + */ + +#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \ + DM_FLAGS_NDELAY : 0) +#define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0) + +/* + * Macros to turn caller specified delay/block flags into + * dm_send_xxxx_event flag DM_FLAGS_NDELAY. + */ + +#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \ + DM_FLAGS_NDELAY : 0) + + +extern struct bhv_vfsops xfs_dmops; + +extern int dmapi_init(void); +extern void dmapi_uninit(void); + +#endif /* __XFS_DMAPI_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_dmops.c linux.21rc5-ac2/fs/xfs/xfs_dmops.c --- linux.21rc5/fs/xfs/xfs_dmops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_dmops.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" + + +#ifndef CONFIG_XFS_DMAPI +xfs_dmops_t xfs_dmcore_xfs = { + .xfs_send_data = (xfs_send_data_t)fs_nosys, + .xfs_send_mmap = (xfs_send_mmap_t)fs_noerr, + .xfs_send_destroy = (xfs_send_destroy_t)fs_nosys, + .xfs_send_namesp = (xfs_send_namesp_t)fs_nosys, + .xfs_send_unmount = (xfs_send_unmount_t)fs_noval, +}; +#endif /* CONFIG_XFS_DMAPI */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_error.c linux.21rc5-ac2/fs/xfs/xfs_error.c --- linux.21rc5/fs/xfs/xfs_error.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_error.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_sb.h" +#include "xfs_trans.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_utils.h" +#include "xfs_error.h" + +#ifdef DEBUG + +int xfs_etrap[XFS_ERROR_NTRAP] = { + 0, +}; + +int +xfs_error_trap(int e) +{ + int i; + + if (!e) + return 0; + for (i = 0; i < XFS_ERROR_NTRAP; i++) { + if (xfs_etrap[i] == 0) + break; + if (e != xfs_etrap[i]) + continue; + cmn_err(CE_NOTE, "xfs_error_trap: error %d", e); + debug_stop_all_cpus((void *)-1LL); + BUG(); + break; + } + return e; +} +#endif + +#if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) + +int xfs_etest[XFS_NUM_INJECT_ERROR]; +int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; +char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR]; + +void +xfs_error_test_init(void) +{ + memset(xfs_etest, 0, sizeof(xfs_etest)); + memset(xfs_etest_fsid, 0, sizeof(xfs_etest_fsid)); + memset(xfs_etest_fsname, 0, sizeof(xfs_etest_fsname)); +} + +int +xfs_error_test(int error_tag, int *fsidp, char *expression, + int line, char *file, unsigned long randfactor) +{ + int i; + int64_t fsid; + + if (random() % randfactor) + return 0; + + memcpy(&fsid, fsidp, sizeof(fsid_t)); + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) { + cmn_err(CE_WARN, + "Injecting error (%s) at file %s, line %d, on filesystem \"%s\"", + expression, file, line, xfs_etest_fsname[i]); + return 1; + } + } + + return 0; +} + +int +xfs_errortag_add(int error_tag, xfs_mount_t *mp) +{ + int i; + int len; + int64_t fsid; + + memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t)); + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) { + cmn_err(CE_WARN, "XFS error tag #%d on", error_tag); + return 0; + } + } + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if (xfs_etest[i] == 0) { + cmn_err(CE_WARN, "Turned on XFS error tag #%d", + error_tag); + xfs_etest[i] = error_tag; + xfs_etest_fsid[i] = fsid; + len = strlen(mp->m_fsname); + xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP); + strcpy(xfs_etest_fsname[i], mp->m_fsname); + return 0; + } + } + + cmn_err(CE_WARN, "error tag overflow, too many turned on"); + + return 1; +} + +int +xfs_errortag_clear(int error_tag, xfs_mount_t *mp) +{ + int i; + int64_t fsid; + + memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t)); + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) { + xfs_etest[i] = 0; + xfs_etest_fsid[i] = 0LL; + kmem_free(xfs_etest_fsname[i], + strlen(xfs_etest_fsname[i]) + 1); + xfs_etest_fsname[i] = NULL; + cmn_err(CE_WARN, "Cleared XFS error tag #%d", + error_tag); + return 0; + } + } + + cmn_err(CE_WARN, "XFS error tag %d not on", error_tag); + + return 1; +} + +int +xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud) +{ + int i; + int cleared = 0; + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if ((fsid == 0LL || xfs_etest_fsid[i] == fsid) && + xfs_etest[i] != 0) { + cleared = 1; + cmn_err(CE_WARN, "Clearing XFS error tag #%d", + xfs_etest[i]); + xfs_etest[i] = 0; + xfs_etest_fsid[i] = 0LL; + kmem_free(xfs_etest_fsname[i], + strlen(xfs_etest_fsname[i]) + 1); + xfs_etest_fsname[i] = NULL; + } + } + + if (loud || cleared) + cmn_err(CE_WARN, + "Cleared all XFS error tags for filesystem \"%s\"", + fsname); + + return 0; +} + +int +xfs_errortag_clearall(xfs_mount_t *mp) +{ + int64_t fsid; + + memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t)); + + return xfs_errortag_clearall_umount(fsid, mp->m_fsname, 1); +} +#endif /* DEBUG || INDUCE_IO_ERROR */ + +static void +xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap) +{ + char *newfmt; + int len = 16 + mp->m_fsname_len + strlen(fmt); + + newfmt = kmem_alloc(len, KM_SLEEP); + sprintf(newfmt, "Filesystem \"%s\": %s", mp->m_fsname, fmt); + icmn_err(level, newfmt, ap); + kmem_free(newfmt, len); +} + +void +xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + xfs_fs_vcmn_err(level, mp, fmt, ap); + va_end(ap); +} + +void +xfs_cmn_err(uint64_t panic_tag, int level, xfs_mount_t *mp, char *fmt, ...) +{ + va_list ap; + +#ifdef DEBUG + xfs_panic_mask |= XFS_PTAG_SHUTDOWN_CORRUPT; +#endif + + if (xfs_panic_mask && (xfs_panic_mask & panic_tag) + && (level & CE_ALERT)) { + level &= ~CE_ALERT; + level |= CE_PANIC; + cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG."); + } + va_start(ap, fmt); + xfs_fs_vcmn_err(level, mp, fmt, ap); + va_end(ap); +} + +void +xfs_error_report( + char *tag, + int level, + xfs_mount_t *mp, + char *fname, + int linenum, + inst_t *ra) +{ + if (level <= xfs_error_level) { + if (mp != NULL) { + xfs_cmn_err(XFS_PTAG_ERROR_REPORT, + CE_ALERT, mp, + "XFS internal error %s at line %d of file %s. Caller 0x%p\n", + tag, linenum, fname, ra); + } else { + cmn_err(CE_ALERT, + "XFS internal error %s at line %d of file %s. Caller 0x%p\n", + tag, linenum, fname, ra); + } + + xfs_stack_trace(); + } +} + +void +xfs_hex_dump(void *p, int length) +{ + __uint8_t *uip = (__uint8_t*)p; + int i; + char sbuf[128], *s; + + s = sbuf; + *s = '\0'; + for (i=0; i= KERNEL_VERSION(2,4,20) + dump_stack(); +#else + #if defined(CONFIG_X86) || defined(CONFIG_X86_64) + show_stack(0); + #else + return; + #endif +#endif +} + +#define XFS_ERROR_REPORT(e, lvl, mp) \ + xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address) +#define XFS_CORRUPTION_ERROR(e, lvl, mp, mem) \ + xfs_corruption_error(e, lvl, mp, mem, \ + __FILE__, __LINE__, __return_address) + +#define XFS_ERRLEVEL_OFF 0 +#define XFS_ERRLEVEL_LOW 1 +#define XFS_ERRLEVEL_HIGH 5 + +/* + * error injection tags - the labels can be anything you want + * but each tag should have its own unique number + */ + +#define XFS_ERRTAG_NOERROR 0 +#define XFS_ERRTAG_IFLUSH_1 1 +#define XFS_ERRTAG_IFLUSH_2 2 +#define XFS_ERRTAG_IFLUSH_3 3 +#define XFS_ERRTAG_IFLUSH_4 4 +#define XFS_ERRTAG_IFLUSH_5 5 +#define XFS_ERRTAG_IFLUSH_6 6 +#define XFS_ERRTAG_DA_READ_BUF 7 +#define XFS_ERRTAG_BTREE_CHECK_LBLOCK 8 +#define XFS_ERRTAG_BTREE_CHECK_SBLOCK 9 +#define XFS_ERRTAG_ALLOC_READ_AGF 10 +#define XFS_ERRTAG_IALLOC_READ_AGI 11 +#define XFS_ERRTAG_ITOBP_INOTOBP 12 +#define XFS_ERRTAG_IUNLINK 13 +#define XFS_ERRTAG_IUNLINK_REMOVE 14 +#define XFS_ERRTAG_DIR_INO_VALIDATE 15 +#define XFS_ERRTAG_BULKSTAT_READ_CHUNK 16 +#define XFS_ERRTAG_IODONE_IOERR 17 +#define XFS_ERRTAG_STRATREAD_IOERR 18 +#define XFS_ERRTAG_STRATCMPL_IOERR 19 +#define XFS_ERRTAG_DIOWRITE_IOERR 20 +#define XFS_ERRTAG_BMAPIFORMAT 21 +#define XFS_ERRTAG_MAX 22 + +/* + * Random factors for above tags, 1 means always, 2 means 1/2 time, etc. + */ +#define XFS_RANDOM_DEFAULT 100 +#define XFS_RANDOM_IFLUSH_1 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_2 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_3 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_4 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_5 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_6 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_DA_READ_BUF XFS_RANDOM_DEFAULT +#define XFS_RANDOM_BTREE_CHECK_LBLOCK (XFS_RANDOM_DEFAULT/4) +#define XFS_RANDOM_BTREE_CHECK_SBLOCK XFS_RANDOM_DEFAULT +#define XFS_RANDOM_ALLOC_READ_AGF XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IALLOC_READ_AGI XFS_RANDOM_DEFAULT +#define XFS_RANDOM_ITOBP_INOTOBP XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IUNLINK XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IUNLINK_REMOVE XFS_RANDOM_DEFAULT +#define XFS_RANDOM_DIR_INO_VALIDATE XFS_RANDOM_DEFAULT +#define XFS_RANDOM_BULKSTAT_READ_CHUNK XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IODONE_IOERR (XFS_RANDOM_DEFAULT/10) +#define XFS_RANDOM_STRATREAD_IOERR (XFS_RANDOM_DEFAULT/10) +#define XFS_RANDOM_STRATCMPL_IOERR (XFS_RANDOM_DEFAULT/10) +#define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10) +#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT + +#if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) +extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); +void xfs_error_test_init(void); + +#define XFS_NUM_INJECT_ERROR 10 + +#ifdef __ANSI_CPP__ +#define XFS_TEST_ERROR(expr, mp, tag, rf) \ + ((expr) || \ + xfs_error_test((tag), (mp)->m_fixedfsid, #expr, __LINE__, __FILE__, \ + (rf))) +#else +#define XFS_TEST_ERROR(expr, mp, tag, rf) \ + ((expr) || \ + xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ + (rf))) +#endif /* __ANSI_CPP__ */ + +int xfs_errortag_add(int error_tag, xfs_mount_t *mp); +int xfs_errortag_clear(int error_tag, xfs_mount_t *mp); + +int xfs_errortag_clearall(xfs_mount_t *mp); +int xfs_errortag_clearall_umount(int64_t fsid, char *fsname, + int loud); +#else +#define XFS_TEST_ERROR(expr, mp, tag, rf) (expr) +#define xfs_errortag_add(tag, mp) (ENOSYS) +#define xfs_errortag_clearall(mp) (ENOSYS) +#endif /* (DEBUG || INDUCE_IO_ERROR) */ + +/* + * XFS panic tags -- allow a call to xfs_cmn_err() be turned into + * a panic by setting xfs_panic_mask in a + * sysctl. update xfs_max[XFS_PARAM] if + * more are added. + */ +#define XFS_NO_PTAG 0LL +#define XFS_PTAG_IFLUSH 0x0000000000000001LL +#define XFS_PTAG_LOGRES 0x0000000000000002LL +#define XFS_PTAG_AILDELETE 0x0000000000000004LL +#define XFS_PTAG_ERROR_REPORT 0x0000000000000008LL +#define XFS_PTAG_SHUTDOWN_CORRUPT 0x0000000000000010LL +#define XFS_PTAG_SHUTDOWN_IOERROR 0x0000000000000020LL +#define XFS_PTAG_SHUTDOWN_LOGERROR 0x0000000000000040LL + +struct xfs_mount; +/* PRINTFLIKE4 */ +void xfs_cmn_err(uint64_t panic_tag, int level, struct xfs_mount *mp, + char *fmt, ...); +/* PRINTFLIKE3 */ +void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...); + +#endif /* __XFS_ERROR_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_extfree_item.c linux.21rc5-ac2/fs/xfs/xfs_extfree_item.c --- linux.21rc5/fs/xfs/xfs_extfree_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_extfree_item.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,668 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains the implementation of the xfs_efi_log_item + * and xfs_efd_log_item items. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_extfree_item.h" + + +kmem_zone_t *xfs_efi_zone; +kmem_zone_t *xfs_efd_zone; + +STATIC void xfs_efi_item_unlock(xfs_efi_log_item_t *); +STATIC void xfs_efi_item_abort(xfs_efi_log_item_t *); +STATIC void xfs_efd_item_abort(xfs_efd_log_item_t *); + + + +/* + * This returns the number of iovecs needed to log the given efi item. + * We only need 1 iovec for an efi item. It just logs the efi_log_format + * structure. + */ +/*ARGSUSED*/ +STATIC uint +xfs_efi_item_size(xfs_efi_log_item_t *efip) +{ + return 1; +} + +/* + * This is called to fill in the vector of log iovecs for the + * given efi log item. We use only 1 iovec, and we point that + * at the efi_log_format structure embedded in the efi item. + * It is at this point that we assert that all of the extent + * slots in the efi item have been filled. + */ +STATIC void +xfs_efi_item_format(xfs_efi_log_item_t *efip, + xfs_log_iovec_t *log_vector) +{ + uint size; + + ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents); + + efip->efi_format.efi_type = XFS_LI_EFI; + + size = sizeof(xfs_efi_log_format_t); + size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t); + efip->efi_format.efi_size = 1; + + log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format); + log_vector->i_len = size; + ASSERT(size >= sizeof(xfs_efi_log_format_t)); +} + + +/* + * Pinning has no meaning for an efi item, so just return. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_pin(xfs_efi_log_item_t *efip) +{ + return; +} + + +/* + * While EFIs cannot really be pinned, the unpin operation is the + * last place at which the EFI is manipulated during a transaction. + * Here we coordinate with xfs_efi_cancel() to determine who gets to + * free the EFI. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) +{ + int nexts; + int size; + xfs_mount_t *mp; + SPLDECL(s); + + mp = efip->efi_item.li_mountp; + AIL_LOCK(mp, s); + if (efip->efi_flags & XFS_EFI_CANCELED) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); + + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } else { + efip->efi_flags |= XFS_EFI_COMMITTED; + AIL_UNLOCK(mp, s); + } + + return; +} + +/* + * like unpin only we have to also clear the xaction descriptor + * pointing the log item if we free the item. This routine duplicates + * unpin because efi_flags is protected by the AIL lock. Freeing + * the descriptor and then calling unpin would force us to drop the AIL + * lock which would open up a race condition. + */ +STATIC void +xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) +{ + int nexts; + int size; + xfs_mount_t *mp; + xfs_log_item_desc_t *lidp; + SPLDECL(s); + + mp = efip->efi_item.li_mountp; + AIL_LOCK(mp, s); + if (efip->efi_flags & XFS_EFI_CANCELED) { + /* + * free the xaction descriptor pointing to this item + */ + lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) efip); + xfs_trans_free_item(tp, lidp); + /* + * pull the item off the AIL. + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); + /* + * now free the item itself + */ + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } else { + efip->efi_flags |= XFS_EFI_COMMITTED; + AIL_UNLOCK(mp, s); + } + + return; +} + +/* + * Efi items have no locking or pushing. However, since EFIs are + * pulled from the AIL when their corresponding EFDs are committed + * to disk, their situation is very similar to being pinned. Return + * XFS_ITEM_PINNED so that the caller will eventually flush the log. + * This should help in getting the EFI out of the AIL. + */ +/*ARGSUSED*/ +STATIC uint +xfs_efi_item_trylock(xfs_efi_log_item_t *efip) +{ + return XFS_ITEM_PINNED; +} + +/* + * Efi items have no locking, so just return. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_unlock(xfs_efi_log_item_t *efip) +{ + if (efip->efi_item.li_flags & XFS_LI_ABORTED) + xfs_efi_item_abort(efip); + return; +} + +/* + * The EFI is logged only once and cannot be moved in the log, so + * simply return the lsn at which it's been logged. The canceled + * flag is not paid any attention here. Checking for that is delayed + * until the EFI is unpinned. + */ +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn) +{ + return lsn; +} + +/* + * This is called when the transaction logging the EFI is aborted. + * Free up the EFI and return. No need to clean up the slot for + * the item in the transaction. That was done by the unpin code + * which is called prior to this routine in the abort/fs-shutdown path. + */ +STATIC void +xfs_efi_item_abort(xfs_efi_log_item_t *efip) +{ + int nexts; + int size; + + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + return; +} + +/* + * There isn't much you can do to push on an efi item. It is simply + * stuck waiting for all of its corresponding efd items to be + * committed to disk. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_push(xfs_efi_log_item_t *efip) +{ + return; +} + +/* + * The EFI dependency tracking op doesn't do squat. It can't because + * it doesn't know where the free extent is coming from. The dependency + * tracking has to be handled by the "enclosing" metadata object. For + * example, for inodes, the inode is locked throughout the extent freeing + * so the dependency should be recorded there. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_committing(xfs_efi_log_item_t *efip, xfs_lsn_t lsn) +{ + return; +} + +/* + * This is the ops vector shared by all efi log items. + */ +struct xfs_item_ops xfs_efi_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_efi_item_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_efi_item_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_efi_item_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_efi_item_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *)) + xfs_efi_item_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efi_item_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efi_item_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_efi_item_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_efi_item_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_efi_item_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_efi_item_committing +}; + + +/* + * Allocate and initialize an efi item with the given number of extents. + */ +xfs_efi_log_item_t * +xfs_efi_init(xfs_mount_t *mp, + uint nextents) + +{ + xfs_efi_log_item_t *efip; + uint size; + + ASSERT(nextents > 0); + if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { + size = (uint)(sizeof(xfs_efi_log_item_t) + + ((nextents - 1) * sizeof(xfs_extent_t))); + efip = (xfs_efi_log_item_t*)kmem_zalloc(size, KM_SLEEP); + } else { + efip = (xfs_efi_log_item_t*)kmem_zone_zalloc(xfs_efi_zone, + KM_SLEEP); + } + + efip->efi_item.li_type = XFS_LI_EFI; + efip->efi_item.li_ops = &xfs_efi_item_ops; + efip->efi_item.li_mountp = mp; + efip->efi_format.efi_nextents = nextents; + efip->efi_format.efi_id = (__psint_t)(void*)efip; + + return (efip); +} + +/* + * This is called by the efd item code below to release references to + * the given efi item. Each efd calls this with the number of + * extents that it has logged, and when the sum of these reaches + * the total number of extents logged by this efi item we can free + * the efi item. + * + * Freeing the efi item requires that we remove it from the AIL. + * We'll use the AIL lock to protect our counters as well as + * the removal from the AIL. + */ +void +xfs_efi_release(xfs_efi_log_item_t *efip, + uint nextents) +{ + xfs_mount_t *mp; + int extents_left; + uint size; + int nexts; + SPLDECL(s); + + mp = efip->efi_item.li_mountp; + ASSERT(efip->efi_next_extent > 0); + ASSERT(efip->efi_flags & XFS_EFI_COMMITTED); + + AIL_LOCK(mp, s); + ASSERT(efip->efi_next_extent >= nextents); + efip->efi_next_extent -= nextents; + extents_left = efip->efi_next_extent; + if (extents_left == 0) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); + } else { + AIL_UNLOCK(mp, s); + } + + if (extents_left == 0) { + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } +} + +/* + * This is called when the transaction that should be committing the + * EFD corresponding to the given EFI is aborted. The committed and + * canceled flags are used to coordinate the freeing of the EFI and + * the references by the transaction that committed it. + */ +STATIC void +xfs_efi_cancel( + xfs_efi_log_item_t *efip) +{ + int nexts; + int size; + xfs_mount_t *mp; + SPLDECL(s); + + mp = efip->efi_item.li_mountp; + AIL_LOCK(mp, s); + if (efip->efi_flags & XFS_EFI_COMMITTED) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); + + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } else { + efip->efi_flags |= XFS_EFI_CANCELED; + AIL_UNLOCK(mp, s); + } + + return; +} + + + + + +/* + * This returns the number of iovecs needed to log the given efd item. + * We only need 1 iovec for an efd item. It just logs the efd_log_format + * structure. + */ +/*ARGSUSED*/ +STATIC uint +xfs_efd_item_size(xfs_efd_log_item_t *efdp) +{ + return 1; +} + +/* + * This is called to fill in the vector of log iovecs for the + * given efd log item. We use only 1 iovec, and we point that + * at the efd_log_format structure embedded in the efd item. + * It is at this point that we assert that all of the extent + * slots in the efd item have been filled. + */ +STATIC void +xfs_efd_item_format(xfs_efd_log_item_t *efdp, + xfs_log_iovec_t *log_vector) +{ + uint size; + + ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents); + + efdp->efd_format.efd_type = XFS_LI_EFD; + + size = sizeof(xfs_efd_log_format_t); + size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t); + efdp->efd_format.efd_size = 1; + + log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format); + log_vector->i_len = size; + ASSERT(size >= sizeof(xfs_efd_log_format_t)); +} + + +/* + * Pinning has no meaning for an efd item, so just return. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_pin(xfs_efd_log_item_t *efdp) +{ + return; +} + + +/* + * Since pinning has no meaning for an efd item, unpinning does + * not either. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_unpin(xfs_efd_log_item_t *efdp, int stale) +{ + return; +} + +/*ARGSUSED*/ +STATIC void +xfs_efd_item_unpin_remove(xfs_efd_log_item_t *efdp, xfs_trans_t *tp) +{ + return; +} + +/* + * Efd items have no locking, so just return success. + */ +/*ARGSUSED*/ +STATIC uint +xfs_efd_item_trylock(xfs_efd_log_item_t *efdp) +{ + return XFS_ITEM_LOCKED; +} + +/* + * Efd items have no locking or pushing, so return failure + * so that the caller doesn't bother with us. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_unlock(xfs_efd_log_item_t *efdp) +{ + if (efdp->efd_item.li_flags & XFS_LI_ABORTED) + xfs_efd_item_abort(efdp); + return; +} + +/* + * When the efd item is committed to disk, all we need to do + * is delete our reference to our partner efi item and then + * free ourselves. Since we're freeing ourselves we must + * return -1 to keep the transaction code from further referencing + * this item. + */ +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn) +{ + uint size; + int nexts; + + /* + * If we got a log I/O error, it's always the case that the LR with the + * EFI got unpinned and freed before the EFD got aborted. + */ + if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0) + xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents); + + nexts = efdp->efd_format.efd_nextents; + if (nexts > XFS_EFD_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efd_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efdp, size); + } else { + kmem_zone_free(xfs_efd_zone, efdp); + } + + return (xfs_lsn_t)-1; +} + +/* + * The transaction of which this EFD is a part has been aborted. + * Inform its companion EFI of this fact and then clean up after + * ourselves. No need to clean up the slot for the item in the + * transaction. That was done by the unpin code which is called + * prior to this routine in the abort/fs-shutdown path. + */ +STATIC void +xfs_efd_item_abort(xfs_efd_log_item_t *efdp) +{ + int nexts; + int size; + + /* + * If we got a log I/O error, it's always the case that the LR with the + * EFI got unpinned and freed before the EFD got aborted. So don't + * reference the EFI at all in that case. + */ + if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0) + xfs_efi_cancel(efdp->efd_efip); + + nexts = efdp->efd_format.efd_nextents; + if (nexts > XFS_EFD_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efd_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efdp, size); + } else { + kmem_zone_free(xfs_efd_zone, efdp); + } + return; +} + +/* + * There isn't much you can do to push on an efd item. It is simply + * stuck waiting for the log to be flushed to disk. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_push(xfs_efd_log_item_t *efdp) +{ + return; +} + +/* + * The EFD dependency tracking op doesn't do squat. It can't because + * it doesn't know where the free extent is coming from. The dependency + * tracking has to be handled by the "enclosing" metadata object. For + * example, for inodes, the inode is locked throughout the extent freeing + * so the dependency should be recorded there. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_committing(xfs_efd_log_item_t *efip, xfs_lsn_t lsn) +{ + return; +} + +/* + * This is the ops vector shared by all efd log items. + */ +struct xfs_item_ops xfs_efd_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_efd_item_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_efd_item_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_efd_item_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_efd_item_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) + xfs_efd_item_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efd_item_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efd_item_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_efd_item_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_efd_item_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_efd_item_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_efd_item_committing +}; + + +/* + * Allocate and initialize an efd item with the given number of extents. + */ +xfs_efd_log_item_t * +xfs_efd_init(xfs_mount_t *mp, + xfs_efi_log_item_t *efip, + uint nextents) + +{ + xfs_efd_log_item_t *efdp; + uint size; + + ASSERT(nextents > 0); + if (nextents > XFS_EFD_MAX_FAST_EXTENTS) { + size = (uint)(sizeof(xfs_efd_log_item_t) + + ((nextents - 1) * sizeof(xfs_extent_t))); + efdp = (xfs_efd_log_item_t*)kmem_zalloc(size, KM_SLEEP); + } else { + efdp = (xfs_efd_log_item_t*)kmem_zone_zalloc(xfs_efd_zone, + KM_SLEEP); + } + + efdp->efd_item.li_type = XFS_LI_EFD; + efdp->efd_item.li_ops = &xfs_efd_item_ops; + efdp->efd_item.li_mountp = mp; + efdp->efd_efip = efip; + efdp->efd_format.efd_nextents = nextents; + efdp->efd_format.efd_efi_id = efip->efi_format.efi_id; + + return (efdp); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_extfree_item.h linux.21rc5-ac2/fs/xfs/xfs_extfree_item.h --- linux.21rc5/fs/xfs/xfs_extfree_item.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_extfree_item.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_EXTFREE_ITEM_H__ +#define __XFS_EXTFREE_ITEM_H__ + +struct xfs_mount; +struct kmem_zone; + +typedef struct xfs_extent { + xfs_dfsbno_t ext_start; + xfs_extlen_t ext_len; +} xfs_extent_t; + +/* + * This is the structure used to lay out an efi log item in the + * log. The efi_extents field is a variable size array whose + * size is given by efi_nextents. + */ +typedef struct xfs_efi_log_format { + unsigned short efi_type; /* efi log item type */ + unsigned short efi_size; /* size of this item */ + uint efi_nextents; /* # extents to free */ + __uint64_t efi_id; /* efi identifier */ + xfs_extent_t efi_extents[1]; /* array of extents to free */ +} xfs_efi_log_format_t; + +/* + * This is the structure used to lay out an efd log item in the + * log. The efd_extents array is a variable size array whose + * size is given by efd_nextents; + */ +typedef struct xfs_efd_log_format { + unsigned short efd_type; /* efd log item type */ + unsigned short efd_size; /* size of this item */ + uint efd_nextents; /* # of extents freed */ + __uint64_t efd_efi_id; /* id of corresponding efi */ + xfs_extent_t efd_extents[1]; /* array of extents freed */ +} xfs_efd_log_format_t; + + +#ifdef __KERNEL__ + +/* + * Max number of extents in fast allocation path. + */ +#define XFS_EFI_MAX_FAST_EXTENTS 16 + +/* + * Define EFI flags. + */ +#define XFS_EFI_RECOVERED 0x1 +#define XFS_EFI_COMMITTED 0x2 +#define XFS_EFI_CANCELED 0x4 + +/* + * This is the "extent free intention" log item. It is used + * to log the fact that some extents need to be free. It is + * used in conjunction with the "extent free done" log item + * described below. + */ +typedef struct xfs_efi_log_item { + xfs_log_item_t efi_item; + uint efi_flags; /* misc flags */ + uint efi_next_extent; + xfs_efi_log_format_t efi_format; +} xfs_efi_log_item_t; + +/* + * This is the "extent free done" log item. It is used to log + * the fact that some extents earlier mentioned in an efi item + * have been freed. + */ +typedef struct xfs_efd_log_item { + xfs_log_item_t efd_item; + xfs_efi_log_item_t *efd_efip; + uint efd_next_extent; + xfs_efd_log_format_t efd_format; +} xfs_efd_log_item_t; + +/* + * Max number of extents in fast allocation path. + */ +#define XFS_EFD_MAX_FAST_EXTENTS 16 + +extern struct kmem_zone *xfs_efi_zone; +extern struct kmem_zone *xfs_efd_zone; + +xfs_efi_log_item_t *xfs_efi_init(struct xfs_mount *, uint); +xfs_efd_log_item_t *xfs_efd_init(struct xfs_mount *, xfs_efi_log_item_t *, + uint); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_EXTFREE_ITEM_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_fs.h linux.21rc5-ac2/fs/xfs/xfs_fs.h --- linux.21rc5/fs/xfs/xfs_fs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_fs.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,513 @@ +/* + * Copyright (c) 1995-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_FS_H__ +#define __XFS_FS_H__ + +/* + * SGI's XFS filesystem's major stuff (constants, structures) + */ + +#define XFS_NAME "xfs" + +/* + * Direct I/O attribute record used with XFS_IOC_DIOINFO + * d_miniosz is the min xfer size, xfer size multiple and file seek offset + * alignment. + */ +#ifndef HAVE_DIOATTR +struct dioattr { + __u32 d_mem; /* data buffer memory alignment */ + __u32 d_miniosz; /* min xfer size */ + __u32 d_maxiosz; /* max xfer size */ +}; +#endif + +/* + * Structure for XFS_IOC_FSGETXATTR[A] and XFS_IOC_FSSETXATTR. + */ +#ifndef HAVE_FSXATTR +struct fsxattr { + __u32 fsx_xflags; /* xflags field value (get/set) */ + __u32 fsx_extsize; /* extsize field value (get/set)*/ + __u32 fsx_nextents; /* nextents field value (get) */ + unsigned char fsx_pad[16]; +}; +#endif + +/* + * Flags for the bs_xflags/fsx_xflags field + * There should be a one-to-one correspondence between these flags and the + * XFS_DIFLAG_s. + */ +#define XFS_XFLAG_REALTIME 0x00000001 +#define XFS_XFLAG_PREALLOC 0x00000002 +#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ +#define XFS_XFLAG_ALL \ + ( XFS_XFLAG_REALTIME|XFS_XFLAG_PREALLOC|XFS_XFLAG_HASATTR ) + + +/* + * Structure for XFS_IOC_GETBMAP. + * On input, fill in bmv_offset and bmv_length of the first structure + * to indicate the area of interest in the file, and bmv_entry with the + * number of array elements given. The first structure is updated on + * return to give the offset and length for the next call. + */ +#ifndef HAVE_GETBMAP +struct getbmap { + __s64 bmv_offset; /* file offset of segment in blocks */ + __s64 bmv_block; /* starting block (64-bit daddr_t) */ + __s64 bmv_length; /* length of segment, blocks */ + __s32 bmv_count; /* # of entries in array incl. 1st */ + __s32 bmv_entries; /* # of entries filled in (output) */ +}; +#endif + +/* + * Structure for XFS_IOC_GETBMAPX. Fields bmv_offset through bmv_entries + * are used exactly as in the getbmap structure. The getbmapx structure + * has additional bmv_iflags and bmv_oflags fields. The bmv_iflags field + * is only used for the first structure. It contains input flags + * specifying XFS_IOC_GETBMAPX actions. The bmv_oflags field is filled + * in by the XFS_IOC_GETBMAPX command for each returned structure after + * the first. + */ +#ifndef HAVE_GETBMAPX +struct getbmapx { + __s64 bmv_offset; /* file offset of segment in blocks */ + __s64 bmv_block; /* starting block (64-bit daddr_t) */ + __s64 bmv_length; /* length of segment, blocks */ + __s32 bmv_count; /* # of entries in array incl. 1st */ + __s32 bmv_entries; /* # of entries filled in (output). */ + __s32 bmv_iflags; /* input flags (1st structure) */ + __s32 bmv_oflags; /* output flags (after 1st structure)*/ + __s32 bmv_unused1; /* future use */ + __s32 bmv_unused2; /* future use */ +}; +#endif + +/* bmv_iflags values - set by XFS_IOC_GETBMAPX caller. */ +#define BMV_IF_ATTRFORK 0x1 /* return attr fork rather than data */ +#define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ +#define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ +#define BMV_IF_VALID (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC) +#ifdef __KERNEL__ +#define BMV_IF_EXTENDED 0x40000000 /* getpmapx if set */ +#endif + +/* bmv_oflags values - returned for for each non-header segment */ +#define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ + +/* Convert getbmap <-> getbmapx - move fields from p1 to p2. */ +#define GETBMAP_CONVERT(p1,p2) { \ + p2.bmv_offset = p1.bmv_offset; \ + p2.bmv_block = p1.bmv_block; \ + p2.bmv_length = p1.bmv_length; \ + p2.bmv_count = p1.bmv_count; \ + p2.bmv_entries = p1.bmv_entries; } + + +/* + * Structure for XFS_IOC_FSSETDM. + * For use by backup and restore programs to set the XFS on-disk inode + * fields di_dmevmask and di_dmstate. These must be set to exactly and + * only values previously obtained via xfs_bulkstat! (Specifically the + * xfs_bstat_t fields bs_dmevmask and bs_dmstate.) + */ +#ifndef HAVE_FSDMIDATA +struct fsdmidata { + __u32 fsd_dmevmask; /* corresponds to di_dmevmask */ + __u16 fsd_padding; + __u16 fsd_dmstate; /* corresponds to di_dmstate */ +}; +#endif + +/* + * File segment locking set data type for 64 bit access. + * Also used for all the RESV/FREE interfaces. + */ +typedef struct xfs_flock64 { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; /* len == 0 means until end of file */ + __s32 l_sysid; + pid_t l_pid; + __s32 l_pad[4]; /* reserve area */ +} xfs_flock64_t; + +/* + * Output for XFS_IOC_FSGEOMETRY_V1 + */ +typedef struct xfs_fsop_geom_v1 { + __u32 blocksize; /* filesystem (data) block size */ + __u32 rtextsize; /* realtime extent size */ + __u32 agblocks; /* fsblocks in an AG */ + __u32 agcount; /* number of allocation groups */ + __u32 logblocks; /* fsblocks in the log */ + __u32 sectsize; /* (data) sector size, bytes */ + __u32 inodesize; /* inode size in bytes */ + __u32 imaxpct; /* max allowed inode space(%) */ + __u64 datablocks; /* fsblocks in data subvolume */ + __u64 rtblocks; /* fsblocks in realtime subvol */ + __u64 rtextents; /* rt extents in realtime subvol*/ + __u64 logstart; /* starting fsblock of the log */ + unsigned char uuid[16]; /* unique id of the filesystem */ + __u32 sunit; /* stripe unit, fsblocks */ + __u32 swidth; /* stripe width, fsblocks */ + __s32 version; /* structure version */ + __u32 flags; /* superblock version flags */ + __u32 logsectsize; /* log sector size, bytes */ + __u32 rtsectsize; /* realtime sector size, bytes */ + __u32 dirblocksize; /* directory block size, bytes */ +} xfs_fsop_geom_v1_t; + +/* + * Output for XFS_IOC_FSGEOMETRY + */ +typedef struct xfs_fsop_geom { + __u32 blocksize; /* filesystem (data) block size */ + __u32 rtextsize; /* realtime extent size */ + __u32 agblocks; /* fsblocks in an AG */ + __u32 agcount; /* number of allocation groups */ + __u32 logblocks; /* fsblocks in the log */ + __u32 sectsize; /* (data) sector size, bytes */ + __u32 inodesize; /* inode size in bytes */ + __u32 imaxpct; /* max allowed inode space(%) */ + __u64 datablocks; /* fsblocks in data subvolume */ + __u64 rtblocks; /* fsblocks in realtime subvol */ + __u64 rtextents; /* rt extents in realtime subvol*/ + __u64 logstart; /* starting fsblock of the log */ + unsigned char uuid[16]; /* unique id of the filesystem */ + __u32 sunit; /* stripe unit, fsblocks */ + __u32 swidth; /* stripe width, fsblocks */ + __s32 version; /* structure version */ + __u32 flags; /* superblock version flags */ + __u32 logsectsize; /* log sector size, bytes */ + __u32 rtsectsize; /* realtime sector size, bytes */ + __u32 dirblocksize; /* directory block size, bytes */ + __u32 logsunit; /* log stripe unit, bytes */ +} xfs_fsop_geom_t; + +/* Output for XFS_FS_COUNTS */ +typedef struct xfs_fsop_counts { + __u64 freedata; /* free data section blocks */ + __u64 freertx; /* free rt extents */ + __u64 freeino; /* free inodes */ + __u64 allocino; /* total allocated inodes */ +} xfs_fsop_counts_t; + +/* Input/Output for XFS_GET_RESBLKS and XFS_SET_RESBLKS */ +typedef struct xfs_fsop_resblks { + __u64 resblks; + __u64 resblks_avail; +} xfs_fsop_resblks_t; + +#define XFS_FSOP_GEOM_VERSION 0 + +#define XFS_FSOP_GEOM_FLAGS_ATTR 0x0001 /* attributes in use */ +#define XFS_FSOP_GEOM_FLAGS_NLINK 0x0002 /* 32-bit nlink values */ +#define XFS_FSOP_GEOM_FLAGS_QUOTA 0x0004 /* quotas enabled */ +#define XFS_FSOP_GEOM_FLAGS_IALIGN 0x0008 /* inode alignment */ +#define XFS_FSOP_GEOM_FLAGS_DALIGN 0x0010 /* large data alignment */ +#define XFS_FSOP_GEOM_FLAGS_SHARED 0x0020 /* read-only shared */ +#define XFS_FSOP_GEOM_FLAGS_EXTFLG 0x0040 /* special extent flag */ +#define XFS_FSOP_GEOM_FLAGS_DIRV2 0x0080 /* directory version 2 */ +#define XFS_FSOP_GEOM_FLAGS_LOGV2 0x0100 /* log format version 2 */ +#define XFS_FSOP_GEOM_FLAGS_SECTOR 0x0200 /* sector sizes >1BB */ + + +/* + * Minimum and maximum sizes need for growth checks + */ +#define XFS_MIN_AG_BLOCKS 64 +#define XFS_MIN_LOG_BLOCKS 512 +#define XFS_MAX_LOG_BLOCKS (64 * 1024) +#define XFS_MIN_LOG_BYTES (256 * 1024) +#define XFS_MAX_LOG_BYTES (128 * 1024 * 1024) + +/* + * Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT + */ +typedef struct xfs_growfs_data { + __u64 newblocks; /* new data subvol size, fsblocks */ + __u32 imaxpct; /* new inode space percentage limit */ +} xfs_growfs_data_t; + +typedef struct xfs_growfs_log { + __u32 newblocks; /* new log size, fsblocks */ + __u32 isint; /* 1 if new log is internal */ +} xfs_growfs_log_t; + +typedef struct xfs_growfs_rt { + __u64 newblocks; /* new realtime size, fsblocks */ + __u32 extsize; /* new realtime extent size, fsblocks */ +} xfs_growfs_rt_t; + + +/* + * Structures returned from ioctl XFS_IOC_FSBULKSTAT & XFS_IOC_FSBULKSTAT_SINGLE + */ +typedef struct xfs_bstime { + time_t tv_sec; /* seconds */ + __s32 tv_nsec; /* and nanoseconds */ +} xfs_bstime_t; + +typedef struct xfs_bstat { + __u64 bs_ino; /* inode number */ + __u16 bs_mode; /* type and mode */ + __u16 bs_nlink; /* number of links */ + __u32 bs_uid; /* user id */ + __u32 bs_gid; /* group id */ + __u32 bs_rdev; /* device value */ + __s32 bs_blksize; /* block size */ + __s64 bs_size; /* file size */ + xfs_bstime_t bs_atime; /* access time */ + xfs_bstime_t bs_mtime; /* modify time */ + xfs_bstime_t bs_ctime; /* inode change time */ + int64_t bs_blocks; /* number of blocks */ + __u32 bs_xflags; /* extended flags */ + __s32 bs_extsize; /* extent size */ + __s32 bs_extents; /* number of extents */ + __u32 bs_gen; /* generation count */ + __u16 bs_projid; /* project id */ + unsigned char bs_pad[14]; /* pad space, unused */ + __u32 bs_dmevmask; /* DMIG event mask */ + __u16 bs_dmstate; /* DMIG state info */ + __u16 bs_aextents; /* attribute number of extents */ +} xfs_bstat_t; + +/* + * The user-level BulkStat Request interface structure. + */ +typedef struct xfs_fsop_bulkreq { + __u64 *lastip; /* last inode # pointer */ + __s32 icount; /* count of entries in buffer */ + void *ubuffer; /* user buffer for inode desc. */ + __s32 *ocount; /* output count pointer */ +} xfs_fsop_bulkreq_t; + + +/* + * Structures returned from xfs_inumbers routine (XFS_IOC_FSINUMBERS). + */ +typedef struct xfs_inogrp { + __u64 xi_startino; /* starting inode number */ + __s32 xi_alloccount; /* # bits set in allocmask */ + __u64 xi_allocmask; /* mask of allocated inodes */ +} xfs_inogrp_t; + + +/* + * Error injection. + */ +typedef struct xfs_error_injection { + __s32 fd; + __s32 errtag; +} xfs_error_injection_t; + + +/* + * The user-level Handle Request interface structure. + */ +typedef struct xfs_fsop_handlereq { + __u32 fd; /* fd for FD_TO_HANDLE */ + void *path; /* user pathname */ + __u32 oflags; /* open flags */ + void *ihandle; /* user supplied handle */ + __u32 ihandlen; /* user supplied length */ + void *ohandle; /* user buffer for handle */ + __u32 *ohandlen; /* user buffer length */ +} xfs_fsop_handlereq_t; + +/* + * Compound structures for passing args through Handle Request interfaces + * xfs_fssetdm_by_handle, xfs_attrlist_by_handle, xfs_attrmulti_by_handle + * - ioctls: XFS_IOC_FSSETDM_BY_HANDLE, XFS_IOC_ATTRLIST_BY_HANDLE, and + * XFS_IOC_ATTRMULTI_BY_HANDLE + */ + +typedef struct xfs_fsop_setdm_handlereq { + struct xfs_fsop_handlereq hreq; /* handle interface structure */ + struct fsdmidata *data; /* DMAPI data to set */ +} xfs_fsop_setdm_handlereq_t; + +typedef struct xfs_attrlist_cursor { + __u32 opaque[4]; +} xfs_attrlist_cursor_t; + +typedef struct xfs_fsop_attrlist_handlereq { + struct xfs_fsop_handlereq hreq; /* handle interface structure */ + struct xfs_attrlist_cursor pos; /* opaque cookie, list offset */ + __u32 flags; /* flags, use ROOT/USER names */ + __u32 buflen; /* length of buffer supplied */ + void *buffer; /* attrlist data to return */ +} xfs_fsop_attrlist_handlereq_t; + +typedef struct xfs_attr_multiop { + __u32 am_opcode; + __s32 am_error; + void *am_attrname; + void *am_attrvalue; + __u32 am_length; + __u32 am_flags; +} xfs_attr_multiop_t; + +typedef struct xfs_fsop_attrmulti_handlereq { + struct xfs_fsop_handlereq hreq; /* handle interface structure */ + __u32 opcount; /* count of following multiop */ + struct xfs_attr_multiop *ops; /* attr_multi data to get/set */ +} xfs_fsop_attrmulti_handlereq_t; + +/* + * File system identifier. Should be unique (at least per machine). + */ +typedef struct { + __u32 val[2]; /* file system id type */ +} xfs_fsid_t; + +/* + * File identifier. Should be unique per filesystem on a single machine. + * This is typically called by a stateless file server in order to generate + * "file handles". + */ +#ifndef HAVE_FID +#define MAXFIDSZ 46 +typedef struct fid { + __u16 fid_len; /* length of data in bytes */ + unsigned char fid_data[MAXFIDSZ]; /* data (variable length) */ +} fid_t; +#endif + +typedef struct xfs_fid { + __u16 xfs_fid_len; /* length of remainder */ + __u16 xfs_fid_pad; + __u32 xfs_fid_gen; /* generation number */ + __u64 xfs_fid_ino; /* 64 bits inode number */ +} xfs_fid_t; + +typedef struct xfs_fid2 { + __u16 fid_len; /* length of remainder */ + __u16 fid_pad; /* padding, must be zero */ + __u32 fid_gen; /* generation number */ + __u64 fid_ino; /* inode number */ +} xfs_fid2_t; + +typedef struct xfs_handle { + union { + __s64 align; /* force alignment of ha_fid */ + xfs_fsid_t _ha_fsid; /* unique file system identifier */ + } ha_u; + xfs_fid_t ha_fid; /* file system specific file ID */ +} xfs_handle_t; +#define ha_fsid ha_u._ha_fsid + +#define XFS_HSIZE(handle) (((char *) &(handle).ha_fid.xfs_fid_pad \ + - (char *) &(handle)) \ + + (handle).ha_fid.xfs_fid_len) + +#define XFS_HANDLE_CMP(h1, h2) memcmp(h1, h2, sizeof(xfs_handle_t)) + +#define FSHSIZE sizeof(fsid_t) + + +/* + * ioctl commands that replace IRIX fcntl()'s + * For 'documentation' purposed more than anything else, + * the "cmd #" field reflects the IRIX fcntl number. + */ +#define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64) +#define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64) +#define XFS_IOC_DIOINFO _IOR ('X', 30, struct dioattr) +#define XFS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#define XFS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64) +#define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64) +#define XFS_IOC_GETBMAP _IOWR('X', 38, struct getbmap) +#define XFS_IOC_FSSETDM _IOW ('X', 39, struct fsdmidata) +#define XFS_IOC_RESVSP _IOW ('X', 40, struct xfs_flock64) +#define XFS_IOC_UNRESVSP _IOW ('X', 41, struct xfs_flock64) +#define XFS_IOC_RESVSP64 _IOW ('X', 42, struct xfs_flock64) +#define XFS_IOC_UNRESVSP64 _IOW ('X', 43, struct xfs_flock64) +#define XFS_IOC_GETBMAPA _IOWR('X', 44, struct getbmap) +#define XFS_IOC_FSGETXATTRA _IOR ('X', 45, struct fsxattr) +/* XFS_IOC_SETBIOSIZE ---- deprecated 46 */ +/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ +#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) + +/* + * ioctl commands that replace IRIX syssgi()'s + */ +#define XFS_IOC_FSGEOMETRY_V1 _IOR ('X', 100, struct xfs_fsop_geom_v1) +#define XFS_IOC_FSBULKSTAT _IOWR('X', 101, struct xfs_fsop_bulkreq) +#define XFS_IOC_FSBULKSTAT_SINGLE _IOWR('X', 102, struct xfs_fsop_bulkreq) +#define XFS_IOC_FSINUMBERS _IOWR('X', 103, struct xfs_fsop_bulkreq) +#define XFS_IOC_PATH_TO_FSHANDLE _IOWR('X', 104, struct xfs_fsop_handlereq) +#define XFS_IOC_PATH_TO_HANDLE _IOWR('X', 105, struct xfs_fsop_handlereq) +#define XFS_IOC_FD_TO_HANDLE _IOWR('X', 106, struct xfs_fsop_handlereq) +#define XFS_IOC_OPEN_BY_HANDLE _IOWR('X', 107, struct xfs_fsop_handlereq) +#define XFS_IOC_READLINK_BY_HANDLE _IOWR('X', 108, struct xfs_fsop_handlereq) +#define XFS_IOC_SWAPEXT _IOWR('X', 109, struct xfs_swapext) +#define XFS_IOC_FSGROWFSDATA _IOW ('X', 110, struct xfs_growfs_data) +#define XFS_IOC_FSGROWFSLOG _IOW ('X', 111, struct xfs_growfs_log) +#define XFS_IOC_FSGROWFSRT _IOW ('X', 112, struct xfs_growfs_rt) +#define XFS_IOC_FSCOUNTS _IOR ('X', 113, struct xfs_fsop_counts) +#define XFS_IOC_SET_RESBLKS _IOWR('X', 114, struct xfs_fsop_resblks) +#define XFS_IOC_GET_RESBLKS _IOR ('X', 115, struct xfs_fsop_resblks) +#define XFS_IOC_ERROR_INJECTION _IOW ('X', 116, struct xfs_error_injection) +#define XFS_IOC_ERROR_CLEARALL _IOW ('X', 117, struct xfs_error_injection) +/* XFS_IOC_ATTRCTL_BY_HANDLE -- deprecated 118 */ +#define XFS_IOC_FREEZE _IOWR('X', 119, int) +#define XFS_IOC_THAW _IOWR('X', 120, int) +#define XFS_IOC_FSSETDM_BY_HANDLE _IOW ('X', 121, struct xfs_fsop_setdm_handlereq) +#define XFS_IOC_ATTRLIST_BY_HANDLE _IOW ('X', 122, struct xfs_fsop_attrlist_handlereq) +#define XFS_IOC_ATTRMULTI_BY_HANDLE _IOW ('X', 123, struct xfs_fsop_attrmulti_handlereq) +#define XFS_IOC_FSGEOMETRY _IOR ('X', 124, struct xfs_fsop_geom) +/* XFS_IOC_GETFSUUID ---------- deprecated 140 */ + + +#ifndef HAVE_BBMACROS +/* + * Block I/O parameterization. A basic block (BB) is the lowest size of + * filesystem allocation, and must equal 512. Length units given to bio + * routines are in BB's. + */ +#define BBSHIFT 9 +#define BBSIZE (1<> BBSHIFT) +#define BTOBBT(bytes) ((__u64)(bytes) >> BBSHIFT) +#define BBTOB(bbs) ((bbs) << BBSHIFT) +#endif + +#endif /* __XFS_FS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_fsops.c linux.21rc5-ac2/fs/xfs/xfs_fsops.c --- linux.21rc5/fs/xfs/xfs_fsops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_fsops.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,628 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_error.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_fsops.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_trans_space.h" +#include "xfs_rtalloc.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_inode_item.h" + +/* + * File system operations + */ + +int +xfs_fs_geometry( + xfs_mount_t *mp, + xfs_fsop_geom_t *geo, + int new_version) +{ + geo->blocksize = mp->m_sb.sb_blocksize; + geo->rtextsize = mp->m_sb.sb_rextsize; + geo->agblocks = mp->m_sb.sb_agblocks; + geo->agcount = mp->m_sb.sb_agcount; + geo->logblocks = mp->m_sb.sb_logblocks; + geo->sectsize = mp->m_sb.sb_sectsize; + geo->inodesize = mp->m_sb.sb_inodesize; + geo->imaxpct = mp->m_sb.sb_imax_pct; + geo->datablocks = mp->m_sb.sb_dblocks; + geo->rtblocks = mp->m_sb.sb_rblocks; + geo->rtextents = mp->m_sb.sb_rextents; + geo->logstart = mp->m_sb.sb_logstart; + ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid)); + memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid)); + if (new_version >= 2) { + geo->sunit = mp->m_sb.sb_unit; + geo->swidth = mp->m_sb.sb_width; + } + if (new_version >= 3) { + geo->version = XFS_FSOP_GEOM_VERSION; + geo->flags = + (XFS_SB_VERSION_HASATTR(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_ATTR : 0) | + (XFS_SB_VERSION_HASNLINK(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_NLINK : 0) | + (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_QUOTA : 0) | + (XFS_SB_VERSION_HASALIGN(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_IALIGN : 0) | + (XFS_SB_VERSION_HASDALIGN(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_DALIGN : 0) | + (XFS_SB_VERSION_HASSHARED(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_SHARED : 0) | + (XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) | + (XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) | + (XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_SECTOR : 0); + geo->logsectsize = XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ? + mp->m_sb.sb_logsectsize : BBSIZE; + geo->rtsectsize = mp->m_sb.sb_blocksize; + geo->dirblocksize = mp->m_dirblksize; + } + if (new_version >= 4) { + geo->flags |= + (XFS_SB_VERSION_HASLOGV2(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_LOGV2 : 0); + geo->logsunit = mp->m_sb.sb_logsunit; + } + return 0; +} + +static int +xfs_growfs_data_private( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_growfs_data_t *in) /* growfs data input struct */ +{ + xfs_agf_t *agf; + xfs_agi_t *agi; + xfs_agnumber_t agno; + xfs_extlen_t agsize; + xfs_extlen_t tmpsize; + xfs_alloc_rec_t *arec; + xfs_btree_sblock_t *block; + xfs_buf_t *bp; + int bucket; + int dpct; + int error; + xfs_agnumber_t nagcount; + xfs_rfsblock_t nb, nb_mod; + xfs_rfsblock_t new; + xfs_rfsblock_t nfree; + xfs_agnumber_t oagcount; + int pct; + xfs_sb_t *sbp; + xfs_trans_t *tp; + + nb = in->newblocks; + pct = in->imaxpct; + if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) + return XFS_ERROR(EINVAL); + dpct = pct - mp->m_sb.sb_imax_pct; + error = xfs_read_buf(mp, mp->m_ddev_targp, + XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (error) + return error; + ASSERT(bp); + xfs_buf_relse(bp); + + new = nb; /* use new as a temporary here */ + nb_mod = do_div(new, mp->m_sb.sb_agblocks); + nagcount = new + (nb_mod != 0); + if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { + nagcount--; + nb = nagcount * mp->m_sb.sb_agblocks; + if (nb < mp->m_sb.sb_dblocks) + return XFS_ERROR(EINVAL); + } + new = in->newblocks - mp->m_sb.sb_dblocks; + oagcount = mp->m_sb.sb_agcount; + if (nagcount > oagcount) { + down_write(&mp->m_peraglock); + mp->m_perag = kmem_realloc(mp->m_perag, + sizeof(xfs_perag_t) * nagcount, + sizeof(xfs_perag_t) * oagcount, + KM_SLEEP); + memset(&mp->m_perag[oagcount], 0, + (nagcount - oagcount) * sizeof(xfs_perag_t)); + mp->m_flags |= XFS_MOUNT_32BITINODES; + xfs_initialize_perag(mp, nagcount); + up_write(&mp->m_peraglock); + } + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); + if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp), + XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) { + xfs_trans_cancel(tp, 0); + return error; + } + + nfree = 0; + for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { + /* + * AG freelist header block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0); + agf = XFS_BUF_TO_AGF(bp); + memset(agf, 0, mp->m_sb.sb_sectsize); + INT_SET(agf->agf_magicnum, ARCH_CONVERT, XFS_AGF_MAGIC); + INT_SET(agf->agf_versionnum, ARCH_CONVERT, XFS_AGF_VERSION); + INT_SET(agf->agf_seqno, ARCH_CONVERT, agno); + if (agno == nagcount - 1) + agsize = + nb - + (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks); + else + agsize = mp->m_sb.sb_agblocks; + INT_SET(agf->agf_length, ARCH_CONVERT, agsize); + INT_SET(agf->agf_roots[XFS_BTNUM_BNOi], ARCH_CONVERT, + XFS_BNO_BLOCK(mp)); + INT_SET(agf->agf_roots[XFS_BTNUM_CNTi], ARCH_CONVERT, + XFS_CNT_BLOCK(mp)); + INT_SET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT, 1); + INT_SET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT, 1); + INT_ZERO(agf->agf_flfirst, ARCH_CONVERT); + INT_SET(agf->agf_fllast, ARCH_CONVERT, XFS_AGFL_SIZE(mp) - 1); + INT_ZERO(agf->agf_flcount, ARCH_CONVERT); + tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); + INT_SET(agf->agf_freeblks, ARCH_CONVERT, tmpsize); + INT_SET(agf->agf_longest, ARCH_CONVERT, tmpsize); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + /* + * AG inode header block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0); + agi = XFS_BUF_TO_AGI(bp); + memset(agi, 0, mp->m_sb.sb_sectsize); + INT_SET(agi->agi_magicnum, ARCH_CONVERT, XFS_AGI_MAGIC); + INT_SET(agi->agi_versionnum, ARCH_CONVERT, XFS_AGI_VERSION); + INT_SET(agi->agi_seqno, ARCH_CONVERT, agno); + INT_SET(agi->agi_length, ARCH_CONVERT, agsize); + INT_ZERO(agi->agi_count, ARCH_CONVERT); + INT_SET(agi->agi_root, ARCH_CONVERT, XFS_IBT_BLOCK(mp)); + INT_SET(agi->agi_level, ARCH_CONVERT, 1); + INT_ZERO(agi->agi_freecount, ARCH_CONVERT); + INT_SET(agi->agi_newino, ARCH_CONVERT, NULLAGINO); + INT_SET(agi->agi_dirino, ARCH_CONVERT, NULLAGINO); + for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) + INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, + NULLAGINO); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + /* + * BNO btree root block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), + BTOBB(mp->m_sb.sb_blocksize), 0); + block = XFS_BUF_TO_SBLOCK(bp); + memset(block, 0, mp->m_sb.sb_blocksize); + INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTB_MAGIC); + INT_ZERO(block->bb_level, ARCH_CONVERT); + INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); + INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, + block, 1, mp->m_alloc_mxr[0]); + INT_SET(arec->ar_startblock, ARCH_CONVERT, + XFS_PREALLOC_BLOCKS(mp)); + INT_SET(arec->ar_blockcount, ARCH_CONVERT, + agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT)); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + /* + * CNT btree root block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), + BTOBB(mp->m_sb.sb_blocksize), 0); + block = XFS_BUF_TO_SBLOCK(bp); + memset(block, 0, mp->m_sb.sb_blocksize); + INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTC_MAGIC); + INT_ZERO(block->bb_level, ARCH_CONVERT); + INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); + INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, + block, 1, mp->m_alloc_mxr[0]); + INT_SET(arec->ar_startblock, ARCH_CONVERT, + XFS_PREALLOC_BLOCKS(mp)); + INT_SET(arec->ar_blockcount, ARCH_CONVERT, + agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT)); + nfree += INT_GET(arec->ar_blockcount, ARCH_CONVERT); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + /* + * INO btree root block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), + BTOBB(mp->m_sb.sb_blocksize), 0); + block = XFS_BUF_TO_SBLOCK(bp); + memset(block, 0, mp->m_sb.sb_blocksize); + INT_SET(block->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC); + INT_ZERO(block->bb_level, ARCH_CONVERT); + INT_ZERO(block->bb_numrecs, ARCH_CONVERT); + INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + } + xfs_trans_agblocks_delta(tp, nfree); + /* + * There are new blocks in the old last a.g. + */ + if (new) { + /* + * Change the agi length. + */ + error = xfs_ialloc_read_agi(mp, tp, agno, &bp); + if (error) { + goto error0; + } + ASSERT(bp); + agi = XFS_BUF_TO_AGI(bp); + INT_MOD(agi->agi_length, ARCH_CONVERT, new); + ASSERT(nagcount == oagcount || + INT_GET(agi->agi_length, ARCH_CONVERT) == + mp->m_sb.sb_agblocks); + xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); + /* + * Change agf length. + */ + error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp); + if (error) { + goto error0; + } + ASSERT(bp); + agf = XFS_BUF_TO_AGF(bp); + INT_MOD(agf->agf_length, ARCH_CONVERT, new); + ASSERT(INT_GET(agf->agf_length, ARCH_CONVERT) == + INT_GET(agi->agi_length, ARCH_CONVERT)); + /* + * Free the new space. + */ + error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno, + INT_GET(agf->agf_length, ARCH_CONVERT) - new), new); + if (error) { + goto error0; + } + } + if (nagcount > oagcount) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount); + if (nb > mp->m_sb.sb_dblocks) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, + nb - mp->m_sb.sb_dblocks); + if (nfree) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree); + if (dpct) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); + error = xfs_trans_commit(tp, 0, NULL); + if (error) { + return error; + } + if (mp->m_sb.sb_imax_pct) { + __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct; + do_div(icount, 100); + mp->m_maxicount = icount << mp->m_sb.sb_inopblog; + } else + mp->m_maxicount = 0; + for (agno = 1; agno < nagcount; agno++) { + error = xfs_read_buf(mp, mp->m_ddev_targp, + XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (error) { + xfs_fs_cmn_err(CE_WARN, mp, + "error %d reading secondary superblock for ag %d", + error, agno); + break; + } + sbp = XFS_BUF_TO_SBP(bp); + xfs_xlatesb(sbp, &mp->m_sb, -1, ARCH_CONVERT, XFS_SB_ALL_BITS); + /* + * If we get an error writing out the alternate superblocks, + * just issue a warning and continue. The real work is + * already done and committed. + */ + if (!(error = xfs_bwrite(mp, bp))) { + continue; + } else { + xfs_fs_cmn_err(CE_WARN, mp, + "write error %d updating secondary superblock for ag %d", + error, agno); + break; /* no point in continuing */ + } + } + return 0; + + error0: + xfs_trans_cancel(tp, XFS_TRANS_ABORT); + return error; +} + +static int +xfs_growfs_log_private( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_growfs_log_t *in) /* growfs log input struct */ +{ + xfs_extlen_t nb; + + nb = in->newblocks; + if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) + return XFS_ERROR(EINVAL); + if (nb == mp->m_sb.sb_logblocks && + in->isint == (mp->m_sb.sb_logstart != 0)) + return XFS_ERROR(EINVAL); + /* + * Moving the log is hard, need new interfaces to sync + * the log first, hold off all activity while moving it. + * Can have shorter or longer log in the same space, + * or transform internal to external log or vice versa. + */ + return XFS_ERROR(ENOSYS); +} + +/* + * protected versions of growfs function acquire and release locks on the mount + * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG, + * XFS_IOC_FSGROWFSRT + */ + + +int +xfs_growfs_data( + xfs_mount_t *mp, + xfs_growfs_data_t *in) +{ + int error; + if (!cpsema(&mp->m_growlock)) + return XFS_ERROR(EWOULDBLOCK); + error = xfs_growfs_data_private(mp, in); + vsema(&mp->m_growlock); + return error; +} + +int +xfs_growfs_log( + xfs_mount_t *mp, + xfs_growfs_log_t *in) +{ + int error; + if (!cpsema(&mp->m_growlock)) + return XFS_ERROR(EWOULDBLOCK); + error = xfs_growfs_log_private(mp, in); + vsema(&mp->m_growlock); + return error; +} + +/* + * exported through ioctl XFS_IOC_FSCOUNTS + */ + +int +xfs_fs_counts( + xfs_mount_t *mp, + xfs_fsop_counts_t *cnt) +{ + unsigned long s; + + s = XFS_SB_LOCK(mp); + cnt->freedata = mp->m_sb.sb_fdblocks; + cnt->freertx = mp->m_sb.sb_frextents; + cnt->freeino = mp->m_sb.sb_ifree; + cnt->allocino = mp->m_sb.sb_icount; + XFS_SB_UNLOCK(mp, s); + return 0; +} + +/* + * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS + * + * xfs_reserve_blocks is called to set m_resblks + * in the in-core mount table. The number of unused reserved blocks + * is kept in m_resbls_avail. + * + * Reserve the requested number of blocks if available. Otherwise return + * as many as possible to satisfy the request. The actual number + * reserved are returned in outval + * + * A null inval pointer indicates that only the current reserved blocks + * available should be returned no settings are changed. + */ + +int +xfs_reserve_blocks( + xfs_mount_t *mp, + __uint64_t *inval, + xfs_fsop_resblks_t *outval) +{ + __uint64_t lcounter, delta; + __uint64_t request; + unsigned long s; + + /* If inval is null, report current values and return */ + + if (inval == (__uint64_t *)NULL) { + outval->resblks = mp->m_resblks; + outval->resblks_avail = mp->m_resblks_avail; + return(0); + } + + request = *inval; + s = XFS_SB_LOCK(mp); + + /* + * If our previous reservation was larger than the current value, + * then move any unused blocks back to the free pool. + */ + + if (mp->m_resblks > request) { + lcounter = mp->m_resblks_avail - request; + if (lcounter > 0) { /* release unused blocks */ + mp->m_sb.sb_fdblocks += lcounter; + mp->m_resblks_avail -= lcounter; + } + mp->m_resblks = request; + } else { + delta = request - mp->m_resblks; + lcounter = mp->m_sb.sb_fdblocks; + lcounter -= delta; + if (lcounter < 0) { + /* We can't satisfy the request, just get what we can */ + mp->m_resblks += mp->m_sb.sb_fdblocks; + mp->m_resblks_avail += mp->m_sb.sb_fdblocks; + mp->m_sb.sb_fdblocks = 0; + } else { + mp->m_sb.sb_fdblocks = lcounter; + mp->m_resblks = request; + mp->m_resblks_avail += delta; + } + } + + outval->resblks = mp->m_resblks; + outval->resblks_avail = mp->m_resblks_avail; + XFS_SB_UNLOCK(mp, s); + return(0); +} + +void +xfs_fs_log_dummy(xfs_mount_t *mp) +{ + xfs_trans_t *tp; + xfs_inode_t *ip; + + + tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); + atomic_inc(&mp->m_active_trans); + if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) { + xfs_trans_cancel(tp, 0); + return; + } + + ip = mp->m_rootip; + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_commit(tp, XFS_TRANS_SYNC, NULL); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); +} + +int +xfs_fs_freeze( + xfs_mount_t *mp) +{ + vfs_t *vfsp; + /*REFERENCED*/ + int error; + + vfsp = XFS_MTOVFS(mp); + + /* Stop new writers */ + xfs_start_freeze(mp, XFS_FREEZE_WRITE); + + /* Flush the refcache */ + xfs_refcache_purge_mp(mp); + + /* Flush delalloc and delwri data */ + VFS_SYNC(vfsp, SYNC_DELWRI|SYNC_WAIT, NULL, error); + + /* Pause transaction subsystem */ + xfs_start_freeze(mp, XFS_FREEZE_TRANS); + + /* Flush any remaining inodes into buffers */ + VFS_SYNC(vfsp, SYNC_ATTR|SYNC_WAIT, NULL, error); + + /* Push all buffers out to disk */ + xfs_binval(mp->m_ddev_targp); + if (mp->m_rtdev_targp) { + xfs_binval(mp->m_rtdev_targp); + } + + /* Push the superblock and write an unmount record */ + xfs_log_unmount_write(mp); + xfs_unmountfs_writesb(mp); + + return 0; +} + +int +xfs_fs_thaw( + xfs_mount_t *mp) +{ + xfs_finish_freeze(mp); + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_fsops.h linux.21rc5-ac2/fs/xfs/xfs_fsops.h --- linux.21rc5/fs/xfs/xfs_fsops.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_fsops.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_FSOPS_H__ +#define __XFS_FSOPS_H__ + +int +xfs_fs_geometry( + xfs_mount_t *mp, + xfs_fsop_geom_t *geo, + int new_version); + +int +xfs_growfs_data( + xfs_mount_t *mp, + xfs_growfs_data_t *in); + +int +xfs_growfs_log( + xfs_mount_t *mp, + xfs_growfs_log_t *in); + +int +xfs_fs_counts( + xfs_mount_t *mp, + xfs_fsop_counts_t *cnt); + +int +xfs_reserve_blocks( + xfs_mount_t *mp, + __uint64_t *inval, + xfs_fsop_resblks_t *outval); + +int +xfs_fs_freeze( + xfs_mount_t *mp); + +int +xfs_fs_thaw( + xfs_mount_t *mp); + +#endif /* __XFS_FSOPS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs.h linux.21rc5-ac2/fs/xfs/xfs.h --- linux.21rc5/fs/xfs/xfs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs.h 2003-05-29 01:44:06.000000000 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_H__ +#define __XFS_H__ + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#endif /* __XFS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_ialloc_btree.c linux.21rc5-ac2/fs/xfs/xfs_ialloc_btree.c --- linux.21rc5/fs/xfs/xfs_ialloc_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_ialloc_btree.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,2122 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" + +/* + * Inode allocation management for XFS. + */ + +/* + * Prototypes for internal functions. + */ + +STATIC void xfs_inobt_log_block(xfs_trans_t *, xfs_buf_t *, int); +STATIC void xfs_inobt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC int xfs_inobt_lshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *); +STATIC int xfs_inobt_rshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_inobt_split(xfs_btree_cur_t *, int, xfs_agblock_t *, + xfs_inobt_key_t *, xfs_btree_cur_t **, int *); +STATIC int xfs_inobt_updkey(xfs_btree_cur_t *, xfs_inobt_key_t *, int); + +/* + * Internal functions. + */ + +#ifdef _NOTYET_ +/* + * Single level of the xfs_inobt_delete record deletion routine. + * Delete record pointed to by cur/level. + * Remove the record from its block then rebalance the tree. + * Return 0 for error, 1 for done, 2 to go on to the next level. + */ +STATIC int /* error */ +xfs_inobt_delrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level removing record from */ + int *stat) /* fail/done/go-on */ +{ + xfs_buf_t *agbp; /* buffer for a.g. inode header */ + xfs_agnumber_t agfbno; /* agf block of freed btree block */ + xfs_buf_t *agfbp; /* bp of agf block of freed block */ + xfs_agi_t *agi; /* allocation group inode header */ + xfs_inobt_block_t *block; /* btree block record/key lives in */ + xfs_agblock_t bno; /* btree block number */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ + int i; /* loop index */ + xfs_inobt_key_t key; /* kp points here if block is level 0 */ + xfs_inobt_key_t *kp; /* pointer to btree keys */ + xfs_agblock_t lbno; /* left block's block number */ + xfs_buf_t *lbp; /* left block's buffer pointer */ + xfs_inobt_block_t *left; /* left btree block */ + xfs_inobt_key_t *lkp; /* left block key pointer */ + xfs_inobt_ptr_t *lpp; /* left block address pointer */ + int lrecs; /* number of records in left block */ + xfs_inobt_rec_t *lrp; /* left block record pointer */ + xfs_inobt_ptr_t *pp; /* pointer to btree addresses */ + int ptr; /* index in btree block for this rec */ + xfs_agblock_t rbno; /* right block's block number */ + xfs_buf_t *rbp; /* right block's buffer pointer */ + xfs_inobt_block_t *right; /* right btree block */ + xfs_inobt_key_t *rkp; /* right block key pointer */ + xfs_inobt_rec_t *rp; /* pointer to btree records */ + xfs_inobt_ptr_t *rpp; /* right block address pointer */ + int rrecs; /* number of records in right block */ + xfs_inobt_rec_t *rrp; /* right block record pointer */ + xfs_btree_cur_t *tcur; /* temporary btree cursor */ + + + /* + * Get the index of the entry being deleted, check for nothing there. + */ + ptr = cur->bc_ptrs[level]; + if (ptr == 0) { + *stat = 0; + return 0; + } + /* + * Get the buffer & block containing the record or key/ptr. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if (error = xfs_btree_check_sblock(cur, block, level, bp)) + return error; +#endif + /* + * Fail if we're off the end of the block. + */ + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + *stat = 0; + return 0; + } + /* + * It's a nonleaf. Excise the key and ptr being deleted, by + * sliding the entries past them down one. + * Log the changed areas of the block. + */ + if (level > 0) { + kp = XFS_INOBT_KEY_ADDR(block, 1, cur); + pp = XFS_INOBT_PTR_ADDR(block, 1, cur); +#ifdef DEBUG + for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) { + if (error = xfs_btree_check_sptr(cur, INT_GET(pp[i], ARCH_CONVERT), level)) + return error; + } +#endif + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + memmove(&kp[ptr - 1], &kp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*kp)); + memmove(&pp[ptr - 1], &pp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*pp)); + xfs_inobt_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + xfs_inobt_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + } + } + /* + * It's a leaf. Excise the record being deleted, by sliding the + * entries past it down one. Log the changed areas of the block. + */ + else { + rp = XFS_INOBT_REC_ADDR(block, 1, cur); + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + memmove(&rp[ptr - 1], &rp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*rp)); + xfs_inobt_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + } + /* + * If it's the first record in the block, we'll need a key + * structure to pass up to the next level (updkey). + */ + if (ptr == 1) { + INT_COPY(key.ir_startino, rp->ir_startino, ARCH_CONVERT); + kp = &key; + } + } + /* + * Decrement and log the number of entries in the block. + */ + INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1); + xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); + /* + * Is this the root level? If so, we're almost done. + */ + if (level == cur->bc_nlevels - 1) { + /* + * If this is the root level, + * and there's only one entry left, + * and it's NOT the leaf level, + * then we can get rid of this level. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) { + agbp = cur->bc_private.i.agbp; + agi = XFS_BUF_TO_AGI(agbp); + /* + * pp is still set to the first pointer in the block. + * Make it the new root of the btree. + */ + bno = INT_GET(agi->agi_root, ARCH_CONVERT); + INT_COPY(agi->agi_root, *pp, ARCH_CONVERT); + INT_MOD(agi->agi_level, ARCH_CONVERT, -1); + /* + * Free the block. + */ + if (error = xfs_free_extent(cur->bc_tp, bno, 1)) + return error; + xfs_trans_binval(cur->bc_tp, bp); + xfs_ialloc_log_agi(cur->bc_tp, agbp, + XFS_AGI_ROOT | XFS_AGI_LEVEL); + /* + * Update the cursor so there's one fewer level. + */ + cur->bc_bufs[level] = NULL; + cur->bc_nlevels--; + /* + * To ensure that the freed block is not used for + * user data until this transaction is permanent, + * we lock the agf buffer for this ag until the + * transaction record makes it to the on-disk log. + */ + agfbno = XFS_AG_DADDR(cur->bc_mp, + cur->bc_private.i.agno, + XFS_AGF_DADDR(mp)); + if (error = xfs_trans_read_buf(cur->bc_mp, cur->bc_tp, + cur->bc_mp->m_ddev_targp, agfbno, + XFS_FSS_TO_BB(mp, 1), 0, &agfbp)) + return error; + ASSERT(!XFS_BUF_GETERROR(agfbp)); + xfs_trans_bhold_until_committed(cur->bc_tp, agfbp); + } else if (level > 0 && + (error = xfs_inobt_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * If we deleted the leftmost entry in the block, update the + * key values above us in the tree. + */ + if (ptr == 1 && (error = xfs_inobt_updkey(cur, kp, level + 1))) + return error; + /* + * If the number of records remaining in the block is at least + * the minimum, we're done. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_INOBT_BLOCK_MINRECS(level, cur)) { + if (level > 0 && + (error = xfs_inobt_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * Otherwise, we have to move some records around to keep the + * tree balanced. Look at the left and right sibling blocks to + * see if we can re-balance by moving only one record. + */ + rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); + bno = NULLAGBLOCK; + ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); + /* + * Duplicate the cursor so our btree manipulations here won't + * disrupt the next level up. + */ + if (error = xfs_btree_dup_cursor(cur, &tcur)) + return error; + /* + * If there's a right sibling, see if it's ok to shift an entry + * out of it. + */ + if (rbno != NULLAGBLOCK) { + /* + * Move the temp cursor to the last entry in the next block. + * Actually any entry but the first would suffice. + */ + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (error = xfs_inobt_increment(tcur, level, &i)) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Grab a pointer to the block. + */ + rbp = tcur->bc_bufs[level]; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); +#ifdef DEBUG + if (error = xfs_btree_check_sblock(cur, right, level, rbp)) + goto error0; +#endif + /* + * Grab the current block number, for future use. + */ + bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + /* + * If right block is full enough so that removing one entry + * won't make it too empty, and left-shifting an entry out + * of right to us works, we're done. + */ + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_INOBT_BLOCK_MINRECS(level, cur)) { + if (error = xfs_inobt_lshift(tcur, level, &i)) + goto error0; + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_INOBT_BLOCK_MINRECS(level, cur)); + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + if (level > 0 && + (error = xfs_inobt_decrement(cur, level, + &i))) + return error; + *stat = 1; + return 0; + } + } + /* + * Otherwise, grab the number of records in right for + * future reference, and fix up the temp cursor to point + * to our block again (last record). + */ + rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + if (lbno != NULLAGBLOCK) { + xfs_btree_firstrec(tcur, level); + if (error = xfs_inobt_decrement(tcur, level, &i)) + goto error0; + } + } + /* + * If there's a left sibling, see if it's ok to shift an entry + * out of it. + */ + if (lbno != NULLAGBLOCK) { + /* + * Move the temp cursor to the first entry in the + * previous block. + */ + xfs_btree_firstrec(tcur, level); + if (error = xfs_inobt_decrement(tcur, level, &i)) + goto error0; + xfs_btree_firstrec(tcur, level); + /* + * Grab a pointer to the block. + */ + lbp = tcur->bc_bufs[level]; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); +#ifdef DEBUG + if (error = xfs_btree_check_sblock(cur, left, level, lbp)) + goto error0; +#endif + /* + * Grab the current block number, for future use. + */ + bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + /* + * If left block is full enough so that removing one entry + * won't make it too empty, and right-shifting an entry out + * of left to us works, we're done. + */ + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_INOBT_BLOCK_MINRECS(level, cur)) { + if (error = xfs_inobt_rshift(tcur, level, &i)) + goto error0; + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_INOBT_BLOCK_MINRECS(level, cur)); + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + if (level == 0) + cur->bc_ptrs[0]++; + *stat = 1; + return 0; + } + } + /* + * Otherwise, grab the number of records in right for + * future reference. + */ + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * Delete the temp cursor, we're done with it. + */ + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + /* + * If here, we need to do a join to keep the tree balanced. + */ + ASSERT(bno != NULLAGBLOCK); + /* + * See if we can join with the left neighbor block. + */ + if (lbno != NULLAGBLOCK && + lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + /* + * Set "right" to be the starting block, + * "left" to be the left neighbor. + */ + rbno = bno; + right = block; + rbp = bp; + if (error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, lbno, 0, &lbp, + XFS_INO_BTREE_REF)) + return error; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); + if (error = xfs_btree_check_sblock(cur, left, level, lbp)) + return error; + } + /* + * If that won't work, see if we can join with the right neighbor block. + */ + else if (rbno != NULLAGBLOCK && + rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + /* + * Set "left" to be the starting block, + * "right" to be the right neighbor. + */ + lbno = bno; + left = block; + lbp = bp; + if (error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, rbno, 0, &rbp, + XFS_INO_BTREE_REF)) + return error; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); + if (error = xfs_btree_check_sblock(cur, right, level, rbp)) + return error; + } + /* + * Otherwise, we can't fix the imbalance. + * Just return. This is probably a logic error, but it's not fatal. + */ + else { + if (level > 0 && (error = xfs_inobt_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * We're now going to join "left" and "right" by moving all the stuff + * in "right" to "left" and deleting "right". + */ + if (level > 0) { + /* + * It's a non-leaf. Move keys and pointers. + */ + lkp = XFS_INOBT_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + lpp = XFS_INOBT_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); + rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if (error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)) + return error; + } +#endif + memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); + memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); + xfs_inobt_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_inobt_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } else { + /* + * It's a leaf. Move records. + */ + lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + rrp = XFS_INOBT_REC_ADDR(right, 1, cur); + memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp)); + xfs_inobt_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } + /* + * Fix up the number of records in the surviving block. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + /* + * Fix up the right block pointer in the surviving block, and log it. + */ + INT_COPY(left->bb_rightsib, right->bb_rightsib, ARCH_CONVERT); + xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + /* + * If there is a right sibling now, make it point to the + * remaining block. + */ + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_inobt_block_t *rrblock; + xfs_buf_t *rrbp; + + if (error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, + &rrbp, XFS_INO_BTREE_REF)) + return error; + rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); + if (error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)) + return error; + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); + xfs_inobt_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); + } + /* + * Free the deleting block. + */ + if (error = xfs_free_extent(cur->bc_tp, rbno, 1)) + return error; + xfs_trans_binval(cur->bc_tp, rbp); + /* + * To ensure that the freed block is not used for + * user data until this transaction is permanent, + * we lock the agf buffer for this ag until the + * transaction record makes it to the on-disk log. + */ + agfbno = XFS_AG_DADDR(cur->bc_mp, cur->bc_private.i.agno, + XFS_AGF_DADDR(mp)); + if (error = xfs_trans_read_buf(cur->bc_mp, cur->bc_tp, + cur->bc_mp->m_ddev_targp, agfbno, + XFS_FSS_TO_BB(mp, 1), 0, &agfbp)) + return error; + ASSERT(!XFS_BUF_GETERROR(agfbp)); + xfs_trans_bhold_until_committed(cur->bc_tp, agfbp); + /* + * If we joined with the left neighbor, set the buffer in the + * cursor to the left block, and fix up the index. + */ + if (bp != lbp) { + cur->bc_bufs[level] = lbp; + cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT); + cur->bc_ra[level] = 0; + } + /* + * If we joined with the right neighbor and there's a level above + * us, increment the cursor at that level. + */ + else if (level + 1 < cur->bc_nlevels && + (error = xfs_inobt_increment(cur, level + 1, &i))) { + return error; + } + /* + * Readjust the ptr at this level if it's not a leaf, since it's + * still pointing at the deletion point, which makes the cursor + * inconsistent. If this makes the ptr 0, the caller fixes it up. + * We can't use decrement because it would change the next level up. + */ + if (level > 0) + cur->bc_ptrs[level]--; + /* + * Return value means the next level up has something to do. + */ + *stat = 2; + return 0; + +error0: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} +#endif /* _NOTYET_ */ + +/* + * Insert one record/level. Return information to the caller + * allowing the next level up to proceed if necessary. + */ +STATIC int /* error */ +xfs_inobt_insrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to insert record at */ + xfs_agblock_t *bnop, /* i/o: block number inserted */ + xfs_inobt_rec_t *recp, /* i/o: record data inserted */ + xfs_btree_cur_t **curp, /* output: new cursor replacing cur */ + int *stat) /* success/failure */ +{ + xfs_inobt_block_t *block; /* btree block record/key lives in */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ + int i; /* loop index */ + xfs_inobt_key_t key; /* key value being inserted */ + xfs_inobt_key_t *kp=NULL; /* pointer to btree keys */ + xfs_agblock_t nbno; /* block number of allocated block */ + xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */ + xfs_inobt_key_t nkey; /* new key value, from split */ + xfs_inobt_rec_t nrec; /* new record value, for caller */ + int optr; /* old ptr value */ + xfs_inobt_ptr_t *pp; /* pointer to btree addresses */ + int ptr; /* index in btree block for this rec */ + xfs_inobt_rec_t *rp=NULL; /* pointer to btree records */ + + /* + * If we made it to the root level, allocate a new root block + * and we're done. + */ + if (level >= cur->bc_nlevels) { + error = xfs_inobt_newroot(cur, &i); + *bnop = NULLAGBLOCK; + *stat = i; + return error; + } + /* + * Make a key out of the record data to be inserted, and save it. + */ + key.ir_startino = recp->ir_startino; /* INT_: direct copy */ + optr = ptr = cur->bc_ptrs[level]; + /* + * If we're off the left edge, return failure. + */ + if (ptr == 0) { + *stat = 0; + return 0; + } + /* + * Get pointers to the btree buffer and block. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; + /* + * Check that the new entry is being inserted in the right place. + */ + if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (level == 0) { + rp = XFS_INOBT_REC_ADDR(block, ptr, cur); + xfs_btree_check_rec(cur->bc_btnum, recp, rp); + } else { + kp = XFS_INOBT_KEY_ADDR(block, ptr, cur); + xfs_btree_check_key(cur->bc_btnum, &key, kp); + } + } +#endif + nbno = NULLAGBLOCK; + ncur = (xfs_btree_cur_t *)0; + /* + * If the block is full, we can't insert the new entry until we + * make the block un-full. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + /* + * First, try shifting an entry to the right neighbor. + */ + if ((error = xfs_inobt_rshift(cur, level, &i))) + return error; + if (i) { + /* nothing */ + } + /* + * Next, try shifting an entry to the left neighbor. + */ + else { + if ((error = xfs_inobt_lshift(cur, level, &i))) + return error; + if (i) { + optr = ptr = cur->bc_ptrs[level]; + } else { + /* + * Next, try splitting the current block + * in half. If this works we have to + * re-set our variables because + * we could be in a different block now. + */ + if ((error = xfs_inobt_split(cur, level, &nbno, + &nkey, &ncur, &i))) + return error; + if (i) { + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, + block, level, bp))) + return error; +#endif + ptr = cur->bc_ptrs[level]; + nrec.ir_startino = nkey.ir_startino; /* INT_: direct copy */ + } else { + /* + * Otherwise the insert fails. + */ + *stat = 0; + return 0; + } + } + } + } + /* + * At this point we know there's room for our new entry in the block + * we're pointing at. + */ + if (level > 0) { + /* + * It's a non-leaf entry. Make a hole for the new data + * in the key and ptr regions of the block. + */ + kp = XFS_INOBT_KEY_ADDR(block, 1, cur); + pp = XFS_INOBT_PTR_ADDR(block, 1, cur); +#ifdef DEBUG + for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) + return error; + } +#endif + memmove(&kp[ptr], &kp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); + memmove(&pp[ptr], &pp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); + /* + * Now stuff the new data in, bump numrecs and log the new data. + */ +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, *bnop, level))) + return error; +#endif + kp[ptr - 1] = key; /* INT_: struct copy */ + INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); + INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); + xfs_inobt_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + xfs_inobt_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + } else { + /* + * It's a leaf entry. Make a hole for the new record. + */ + rp = XFS_INOBT_REC_ADDR(block, 1, cur); + memmove(&rp[ptr], &rp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp)); + /* + * Now stuff the new record in, bump numrecs + * and log the new data. + */ + rp[ptr - 1] = *recp; /* INT_: struct copy */ + INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); + xfs_inobt_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + } + /* + * Log the new number of records in the btree header. + */ + xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); +#ifdef DEBUG + /* + * Check that the key/record is in the right place, now. + */ + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (level == 0) + xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, + rp + ptr); + else + xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, + kp + ptr); + } +#endif + /* + * If we inserted at the start of a block, update the parents' keys. + */ + if (optr == 1 && (error = xfs_inobt_updkey(cur, &key, level + 1))) + return error; + /* + * Return the new block number, if any. + * If there is one, give back a record value and a cursor too. + */ + *bnop = nbno; + if (nbno != NULLAGBLOCK) { + *recp = nrec; /* INT_: struct copy */ + *curp = ncur; + } + *stat = 1; + return 0; +} + +/* + * Log header fields from a btree block. + */ +STATIC void +xfs_inobt_log_block( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* buffer containing btree block */ + int fields) /* mask of fields: XFS_BB_... */ +{ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + static const short offsets[] = { /* table of offsets */ + offsetof(xfs_inobt_block_t, bb_magic), + offsetof(xfs_inobt_block_t, bb_level), + offsetof(xfs_inobt_block_t, bb_numrecs), + offsetof(xfs_inobt_block_t, bb_leftsib), + offsetof(xfs_inobt_block_t, bb_rightsib), + sizeof(xfs_inobt_block_t) + }; + + xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last); + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * Log keys from a btree block (nonleaf). + */ +STATIC void +xfs_inobt_log_keys( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int kfirst, /* index of first key to log */ + int klast) /* index of last key to log */ +{ + xfs_inobt_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + xfs_inobt_key_t *kp; /* key pointer in btree block */ + int last; /* last byte offset logged */ + + block = XFS_BUF_TO_INOBT_BLOCK(bp); + kp = XFS_INOBT_KEY_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Log block pointer fields from a btree block (nonleaf). + */ +STATIC void +xfs_inobt_log_ptrs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int pfirst, /* index of first pointer to log */ + int plast) /* index of last pointer to log */ +{ + xfs_inobt_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + xfs_inobt_ptr_t *pp; /* block-pointer pointer in btree blk */ + + block = XFS_BUF_TO_INOBT_BLOCK(bp); + pp = XFS_INOBT_PTR_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Log records from a btree block (leaf). + */ +STATIC void +xfs_inobt_log_recs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int rfirst, /* index of first record to log */ + int rlast) /* index of last record to log */ +{ + xfs_inobt_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + xfs_inobt_rec_t *rp; /* record pointer for btree block */ + + block = XFS_BUF_TO_INOBT_BLOCK(bp); + rp = XFS_INOBT_REC_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Lookup the record. The cursor is made to point to it, based on dir. + * Return 0 if can't find any such record, 1 for success. + */ +STATIC int /* error */ +xfs_inobt_lookup( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_lookup_t dir, /* <=, ==, or >= */ + int *stat) /* success/failure */ +{ + xfs_agblock_t agbno; /* a.g. relative btree block number */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_inobt_block_t *block=NULL; /* current btree block */ + int diff; /* difference for the current key */ + int error; /* error return value */ + int keyno=0; /* current key number */ + int level; /* level in the btree */ + xfs_mount_t *mp; /* file system mount point */ + + /* + * Get the allocation group header, and the root block number. + */ + mp = cur->bc_mp; + { + xfs_agi_t *agi; /* a.g. inode header */ + + agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); + agno = INT_GET(agi->agi_seqno, ARCH_CONVERT); + agbno = INT_GET(agi->agi_root, ARCH_CONVERT); + } + /* + * Iterate over each level in the btree, starting at the root. + * For each level above the leaves, find the key we need, based + * on the lookup record, then follow the corresponding block + * pointer down to the next level. + */ + for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { + xfs_buf_t *bp; /* buffer pointer for btree block */ + xfs_daddr_t d; /* disk address of btree block */ + + /* + * Get the disk address we're looking for. + */ + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + /* + * If the old buffer at this level is for a different block, + * throw it away, otherwise just use it. + */ + bp = cur->bc_bufs[level]; + if (bp && XFS_BUF_ADDR(bp) != d) + bp = (xfs_buf_t *)0; + if (!bp) { + /* + * Need to get a new buffer. Read it, then + * set it in the cursor, releasing the old one. + */ + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + agno, agbno, 0, &bp, XFS_INO_BTREE_REF))) + return error; + xfs_btree_setbuf(cur, level, bp); + /* + * Point to the btree block, now that we have the buffer + */ + block = XFS_BUF_TO_INOBT_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, level, + bp))) + return error; + } else + block = XFS_BUF_TO_INOBT_BLOCK(bp); + /* + * If we already had a key match at a higher level, we know + * we need to use the first entry in this block. + */ + if (diff == 0) + keyno = 1; + /* + * Otherwise we need to search this block. Do a binary search. + */ + else { + int high; /* high entry number */ + xfs_inobt_key_t *kkbase=NULL;/* base of keys in block */ + xfs_inobt_rec_t *krbase=NULL;/* base of records in block */ + int low; /* low entry number */ + + /* + * Get a pointer to keys or records. + */ + if (level > 0) + kkbase = XFS_INOBT_KEY_ADDR(block, 1, cur); + else + krbase = XFS_INOBT_REC_ADDR(block, 1, cur); + /* + * Set low and high entry numbers, 1-based. + */ + low = 1; + if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { + /* + * If the block is empty, the tree must + * be an empty leaf. + */ + ASSERT(level == 0 && cur->bc_nlevels == 1); + cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; + *stat = 0; + return 0; + } + /* + * Binary search the block. + */ + while (low <= high) { + xfs_agino_t startino; /* key value */ + + /* + * keyno is average of low and high. + */ + keyno = (low + high) >> 1; + /* + * Get startino. + */ + if (level > 0) { + xfs_inobt_key_t *kkp; + + kkp = kkbase + keyno - 1; + startino = INT_GET(kkp->ir_startino, ARCH_CONVERT); + } else { + xfs_inobt_rec_t *krp; + + krp = krbase + keyno - 1; + startino = INT_GET(krp->ir_startino, ARCH_CONVERT); + } + /* + * Compute difference to get next direction. + */ + diff = (int)startino - cur->bc_rec.i.ir_startino; + /* + * Less than, move right. + */ + if (diff < 0) + low = keyno + 1; + /* + * Greater than, move left. + */ + else if (diff > 0) + high = keyno - 1; + /* + * Equal, we're done. + */ + else + break; + } + } + /* + * If there are more levels, set up for the next level + * by getting the block number and filling in the cursor. + */ + if (level > 0) { + /* + * If we moved left, need the previous key number, + * unless there isn't one. + */ + if (diff > 0 && --keyno < 1) + keyno = 1; + agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, agbno, level))) + return error; +#endif + cur->bc_ptrs[level] = keyno; + } + } + /* + * Done with the search. + * See if we need to adjust the results. + */ + if (dir != XFS_LOOKUP_LE && diff < 0) { + keyno++; + /* + * If ge search and we went off the end of the block, but it's + * not the last block, we're in the wrong block. + */ + if (dir == XFS_LOOKUP_GE && + keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && + INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + int i; + + cur->bc_ptrs[0] = keyno; + if ((error = xfs_inobt_increment(cur, 0, &i))) + return error; + ASSERT(i == 1); + *stat = 1; + return 0; + } + } + else if (dir == XFS_LOOKUP_LE && diff > 0) + keyno--; + cur->bc_ptrs[0] = keyno; + /* + * Return if we succeeded or not. + */ + if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) + *stat = 0; + else + *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); + return 0; +} + +/* + * Move 1 record left from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_inobt_lshift( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to shift record on */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef DEBUG + int i; /* loop index */ +#endif + xfs_inobt_key_t key; /* key value for leaf level upward */ + xfs_buf_t *lbp; /* buffer for left neighbor block */ + xfs_inobt_block_t *left; /* left neighbor btree block */ + xfs_inobt_key_t *lkp=NULL; /* key pointer for left block */ + xfs_inobt_ptr_t *lpp; /* address pointer for left block */ + xfs_inobt_rec_t *lrp=NULL; /* record pointer for left block */ + int nrec; /* new number of left block entries */ + xfs_buf_t *rbp; /* buffer for right (current) block */ + xfs_inobt_block_t *right; /* right (current) btree block */ + xfs_inobt_key_t *rkp=NULL; /* key pointer for right block */ + xfs_inobt_ptr_t *rpp=NULL; /* address pointer for right block */ + xfs_inobt_rec_t *rrp=NULL; /* record pointer for right block */ + + /* + * Set up variables for this block as "right". + */ + rbp = cur->bc_bufs[level]; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; +#endif + /* + * If we've got no left sibling then we can't shift an entry left. + */ + if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + if (cur->bc_ptrs[level] <= 1) { + *stat = 0; + return 0; + } + /* + * Set up the left neighbor as "left". + */ + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, + XFS_INO_BTREE_REF))) + return error; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; + /* + * If it's full, it can't take another entry. + */ + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + *stat = 0; + return 0; + } + nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; + /* + * If non-leaf, copy a key and a ptr to the left block. + */ + if (level > 0) { + lkp = XFS_INOBT_KEY_ADDR(left, nrec, cur); + rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); + *lkp = *rkp; + xfs_inobt_log_keys(cur, lbp, nrec, nrec); + lpp = XFS_INOBT_PTR_ADDR(left, nrec, cur); + rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) + return error; +#endif + *lpp = *rpp; /* INT_: no-change copy */ + xfs_inobt_log_ptrs(cur, lbp, nrec, nrec); + } + /* + * If leaf, copy a record to the left block. + */ + else { + lrp = XFS_INOBT_REC_ADDR(left, nrec, cur); + rrp = XFS_INOBT_REC_ADDR(right, 1, cur); + *lrp = *rrp; + xfs_inobt_log_recs(cur, lbp, nrec, nrec); + } + /* + * Bump and log left's numrecs, decrement and log right's numrecs. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); + xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); +#ifdef DEBUG + if (level > 0) + xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp); + else + xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); +#endif + INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); + xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); + /* + * Slide the contents of right down one entry. + */ + if (level > 0) { +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), + level))) + return error; + } +#endif + memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } else { + memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ + rkp = &key; + } + /* + * Update the parent key values of right. + */ + if ((error = xfs_inobt_updkey(cur, rkp, level + 1))) + return error; + /* + * Slide the cursor value left one. + */ + cur->bc_ptrs[level]--; + *stat = 1; + return 0; +} + +/* + * Allocate a new root block, fill it in. + */ +STATIC int /* error */ +xfs_inobt_newroot( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + xfs_agi_t *agi; /* a.g. inode header */ + xfs_alloc_arg_t args; /* allocation argument structure */ + xfs_inobt_block_t *block; /* one half of the old root block */ + xfs_buf_t *bp; /* buffer containing block */ + int error; /* error return value */ + xfs_inobt_key_t *kp; /* btree key pointer */ + xfs_agblock_t lbno; /* left block number */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_inobt_block_t *left; /* left btree block */ + xfs_buf_t *nbp; /* new (root) buffer */ + xfs_inobt_block_t *new; /* new (root) btree block */ + int nptr; /* new value for key index, 1 or 2 */ + xfs_inobt_ptr_t *pp; /* btree address pointer */ + xfs_agblock_t rbno; /* right block number */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_inobt_block_t *right; /* right btree block */ + xfs_inobt_rec_t *rp; /* btree record pointer */ + + ASSERT(cur->bc_nlevels < XFS_IN_MAXLEVELS(cur->bc_mp)); + + /* + * Get a block & a buffer. + */ + agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); + args.tp = cur->bc_tp; + args.mp = cur->bc_mp; + args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, + INT_GET(agi->agi_root, ARCH_CONVERT)); + args.mod = args.minleft = args.alignment = args.total = args.wasdel = + args.isfl = args.userdata = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + args.type = XFS_ALLOCTYPE_NEAR_BNO; + if ((error = xfs_alloc_vextent(&args))) + return error; + /* + * None available, we fail. + */ + if (args.fsbno == NULLFSBLOCK) { + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + nbp = xfs_btree_get_bufs(args.mp, args.tp, args.agno, args.agbno, 0); + new = XFS_BUF_TO_INOBT_BLOCK(nbp); + /* + * Set the root data in the a.g. inode structure. + */ + INT_SET(agi->agi_root, ARCH_CONVERT, args.agbno); + INT_MOD(agi->agi_level, ARCH_CONVERT, 1); + xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, + XFS_AGI_ROOT | XFS_AGI_LEVEL); + /* + * At the previous root level there are now two blocks: the old + * root, and the new block generated when it was split. + * We don't know which one the cursor is pointing at, so we + * set up variables "left" and "right" for each case. + */ + bp = cur->bc_bufs[cur->bc_nlevels - 1]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, cur->bc_nlevels - 1, bp))) + return error; +#endif + if (INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + /* + * Our block is left, pick up the right block. + */ + lbp = bp; + lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp)); + left = block; + rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, + rbno, 0, &rbp, XFS_INO_BTREE_REF))) + return error; + bp = rbp; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, + cur->bc_nlevels - 1, rbp))) + return error; + nptr = 1; + } else { + /* + * Our block is right, pick up the left block. + */ + rbp = bp; + rbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(rbp)); + right = block; + lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, + lbno, 0, &lbp, XFS_INO_BTREE_REF))) + return error; + bp = lbp; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, + cur->bc_nlevels - 1, lbp))) + return error; + nptr = 2; + } + /* + * Fill in the new block's btree header and log it. + */ + INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); + INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); + INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); + INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + xfs_inobt_log_block(args.tp, nbp, XFS_BB_ALL_BITS); + ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); + /* + * Fill in the key data in the new root. + */ + kp = XFS_INOBT_KEY_ADDR(new, 1, cur); + if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { + kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur); /* INT_: struct copy */ + kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur); /* INT_: struct copy */ + } else { + rp = XFS_INOBT_REC_ADDR(left, 1, cur); + INT_COPY(kp[0].ir_startino, rp->ir_startino, ARCH_CONVERT); + rp = XFS_INOBT_REC_ADDR(right, 1, cur); + INT_COPY(kp[1].ir_startino, rp->ir_startino, ARCH_CONVERT); + } + xfs_inobt_log_keys(cur, nbp, 1, 2); + /* + * Fill in the pointer data in the new root. + */ + pp = XFS_INOBT_PTR_ADDR(new, 1, cur); + INT_SET(pp[0], ARCH_CONVERT, lbno); + INT_SET(pp[1], ARCH_CONVERT, rbno); + xfs_inobt_log_ptrs(cur, nbp, 1, 2); + /* + * Fix up the cursor. + */ + xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); + cur->bc_ptrs[cur->bc_nlevels] = nptr; + cur->bc_nlevels++; + *stat = 1; + return 0; +} + +/* + * Move 1 record right from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_inobt_rshift( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to shift record on */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* loop index */ + xfs_inobt_key_t key; /* key value for leaf level upward */ + xfs_buf_t *lbp; /* buffer for left (current) block */ + xfs_inobt_block_t *left; /* left (current) btree block */ + xfs_inobt_key_t *lkp; /* key pointer for left block */ + xfs_inobt_ptr_t *lpp; /* address pointer for left block */ + xfs_inobt_rec_t *lrp; /* record pointer for left block */ + xfs_buf_t *rbp; /* buffer for right neighbor block */ + xfs_inobt_block_t *right; /* right neighbor btree block */ + xfs_inobt_key_t *rkp; /* key pointer for right block */ + xfs_inobt_ptr_t *rpp; /* address pointer for right block */ + xfs_inobt_rec_t *rrp=NULL; /* record pointer for right block */ + xfs_btree_cur_t *tcur; /* temporary cursor */ + + /* + * Set up variables for this block as "left". + */ + lbp = cur->bc_bufs[level]; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; +#endif + /* + * If we've got no right sibling then we can't shift an entry right. + */ + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { + *stat = 0; + return 0; + } + /* + * Set up the right neighbor as "right". + */ + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, + XFS_INO_BTREE_REF))) + return error; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; + /* + * If it's full, it can't take another entry. + */ + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + *stat = 0; + return 0; + } + /* + * Make a hole at the start of the right neighbor block, then + * copy the last left block entry to the hole. + */ + if (level > 0) { + lkp = XFS_INOBT_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lpp = XFS_INOBT_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); + rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) + return error; +#endif + *rkp = *lkp; /* INT_: no change copy */ + *rpp = *lpp; /* INT_: no change copy */ + xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + } else { + lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rrp = XFS_INOBT_REC_ADDR(right, 1, cur); + memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + *rrp = *lrp; + xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ + rkp = &key; + } + /* + * Decrement and log left's numrecs, bump and log right's numrecs. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); + xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); +#ifdef DEBUG + if (level > 0) + xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); + else + xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); +#endif + xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); + /* + * Using a temporary cursor, update the parent key values of the + * block on the right. + */ + if ((error = xfs_btree_dup_cursor(cur, &tcur))) + return error; + xfs_btree_lastrec(tcur, level); + if ((error = xfs_inobt_increment(tcur, level, &i)) || + (error = xfs_inobt_updkey(tcur, rkp, level + 1))) { + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; + } + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + *stat = 1; + return 0; +} + +/* + * Split cur/level block in half. + * Return new block number and its first record (to be inserted into parent). + */ +STATIC int /* error */ +xfs_inobt_split( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to split */ + xfs_agblock_t *bnop, /* output: block number allocated */ + xfs_inobt_key_t *keyp, /* output: first key of new block */ + xfs_btree_cur_t **curp, /* output: new cursor */ + int *stat) /* success/failure */ +{ + xfs_alloc_arg_t args; /* allocation argument structure */ + int error; /* error return value */ + int i; /* loop index/record number */ + xfs_agblock_t lbno; /* left (current) block number */ + xfs_buf_t *lbp; /* buffer for left block */ + xfs_inobt_block_t *left; /* left (current) btree block */ + xfs_inobt_key_t *lkp; /* left btree key pointer */ + xfs_inobt_ptr_t *lpp; /* left btree address pointer */ + xfs_inobt_rec_t *lrp; /* left btree record pointer */ + xfs_buf_t *rbp; /* buffer for right block */ + xfs_inobt_block_t *right; /* right (new) btree block */ + xfs_inobt_key_t *rkp; /* right btree key pointer */ + xfs_inobt_ptr_t *rpp; /* right btree address pointer */ + xfs_inobt_rec_t *rrp; /* right btree record pointer */ + + /* + * Set up left block (current one). + */ + lbp = cur->bc_bufs[level]; + args.tp = cur->bc_tp; + args.mp = cur->bc_mp; + lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp)); + /* + * Allocate the new block. + * If we can't do it, we're toast. Give up. + */ + args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, lbno); + args.mod = args.minleft = args.alignment = args.total = args.wasdel = + args.isfl = args.userdata = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + args.type = XFS_ALLOCTYPE_NEAR_BNO; + if ((error = xfs_alloc_vextent(&args))) + return error; + if (args.fsbno == NULLFSBLOCK) { + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + rbp = xfs_btree_get_bufs(args.mp, args.tp, args.agno, args.agbno, 0); + /* + * Set up the new block as "right". + */ + right = XFS_BUF_TO_INOBT_BLOCK(rbp); + /* + * "Left" is the current (according to the cursor) block. + */ + left = XFS_BUF_TO_INOBT_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; +#endif + /* + * Fill in the btree header for the new block. + */ + INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); + right->bb_level = left->bb_level; /* INT_: direct copy */ + INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); + /* + * Make sure that if there's an odd number of entries now, that + * each new block will have the same number of entries. + */ + if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && + cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; + /* + * For non-leaf blocks, copy keys and addresses over to the new block. + */ + if (level > 0) { + lkp = XFS_INOBT_KEY_ADDR(left, i, cur); + lpp = XFS_INOBT_PTR_ADDR(left, i, cur); + rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); + rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + *keyp = *rkp; + } + /* + * For leaf blocks, copy records over to the new block. + */ + else { + lrp = XFS_INOBT_REC_ADDR(left, i, cur); + rrp = XFS_INOBT_REC_ADDR(right, 1, cur); + memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + keyp->ir_startino = rrp->ir_startino; /* INT_: direct copy */ + } + /* + * Find the left block number by looking in the buffer. + * Adjust numrecs, sibling pointers. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); + right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ + INT_SET(left->bb_rightsib, ARCH_CONVERT, args.agbno); + INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); + xfs_inobt_log_block(args.tp, rbp, XFS_BB_ALL_BITS); + xfs_inobt_log_block(args.tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + /* + * If there's a block to the new block's right, make that block + * point back to right instead of to left. + */ + if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_inobt_block_t *rrblock; /* rr btree block */ + xfs_buf_t *rrbp; /* buffer for rrblock */ + + if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, + INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp, + XFS_INO_BTREE_REF))) + return error; + rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); + if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) + return error; + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.agbno); + xfs_inobt_log_block(args.tp, rrbp, XFS_BB_LEFTSIB); + } + /* + * If the cursor is really in the right block, move it there. + * If it's just pointing past the last entry in left, then we'll + * insert there, so don't change anything in that case. + */ + if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { + xfs_btree_setbuf(cur, level, rbp); + cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * If there are more levels, we'll need another cursor which refers + * the right block, no matter where this cursor was. + */ + if (level + 1 < cur->bc_nlevels) { + if ((error = xfs_btree_dup_cursor(cur, curp))) + return error; + (*curp)->bc_ptrs[level + 1]++; + } + *bnop = args.agbno; + *stat = 1; + return 0; +} + +/* + * Update keys at all levels from here to the root along the cursor's path. + */ +STATIC int /* error */ +xfs_inobt_updkey( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_inobt_key_t *keyp, /* new key value to update to */ + int level) /* starting level for update */ +{ + int ptr; /* index of key in block */ + + /* + * Go up the tree from this level toward the root. + * At each level, update the key value to the value input. + * Stop when we reach a level where the cursor isn't pointing + * at the first entry in the block. + */ + for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { + xfs_buf_t *bp; /* buffer for block */ + xfs_inobt_block_t *block; /* btree block */ +#ifdef DEBUG + int error; /* error return value */ +#endif + xfs_inobt_key_t *kp; /* ptr to btree block keys */ + + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + ptr = cur->bc_ptrs[level]; + kp = XFS_INOBT_KEY_ADDR(block, ptr, cur); + *kp = *keyp; + xfs_inobt_log_keys(cur, bp, ptr, ptr); + } + return 0; +} + +/* + * Externally visible routines. + */ + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_inobt_decrement( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat) /* success/failure */ +{ + xfs_inobt_block_t *block; /* btree block */ + int error; + int lev; /* btree level */ + + ASSERT(level < cur->bc_nlevels); + /* + * Read-ahead to the left at this level. + */ + xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); + /* + * Decrement the ptr at this level. If we're still in the block + * then we're done. + */ + if (--cur->bc_ptrs[level] > 0) { + *stat = 1; + return 0; + } + /* + * Get a pointer to the btree block. + */ + block = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[level]); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, + cur->bc_bufs[level]))) + return error; +#endif + /* + * If we just went off the left edge of the tree, return failure. + */ + if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * March up the tree decrementing pointers. + * Stop when we don't go off the left edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + if (--cur->bc_ptrs[lev] > 0) + break; + /* + * Read-ahead the left block, we're going to read it + * in the next loop. + */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); + } + /* + * If we went off the root then we are seriously confused. + */ + ASSERT(lev < cur->bc_nlevels); + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (block = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); lev > level; ) { + xfs_agblock_t agbno; /* block number of btree block */ + xfs_buf_t *bp; /* buffer containing btree block */ + + agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, agbno, 0, &bp, + XFS_INO_BTREE_REF))) + return error; + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_INOBT_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; + cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); + } + *stat = 1; + return 0; +} + +#ifdef _NOTYET_ +/* + * Delete the record pointed to by cur. + * The cursor refers to the place where the record was (could be inserted) + * when the operation returns. + */ +int /* error */ +xfs_inobt_delete( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; + int i; /* result code */ + int level; /* btree level */ + + /* + * Go up the tree, starting at leaf level. + * If 2 is returned then a join was done; go to the next level. + * Otherwise we are done. + */ + for (level = 0, i = 2; i == 2; level++) { + if (error = xfs_inobt_delrec(cur, level, &i)) + return error; + } + if (i == 0) { + for (level = 1; level < cur->bc_nlevels; level++) { + if (cur->bc_ptrs[level] == 0) { + if (error = xfs_inobt_decrement(cur, level, &i)) + return error; + break; + } + } + } + *stat = i; + return 0; +} +#endif /* _NOTYET_ */ + +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_inobt_get_rec( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t *ino, /* output: starting inode of chunk */ + __int32_t *fcnt, /* output: number of free inodes */ + xfs_inofree_t *free, /* output: free inode mask */ + int *stat, /* output: success/failure */ + xfs_arch_t arch) /* input: architecture */ +{ + xfs_inobt_block_t *block; /* btree block */ + xfs_buf_t *bp; /* buffer containing btree block */ +#ifdef DEBUG + int error; /* error return value */ +#endif + int ptr; /* record number */ + xfs_inobt_rec_t *rec; /* record data */ + + bp = cur->bc_bufs[0]; + ptr = cur->bc_ptrs[0]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, 0, bp))) + return error; +#endif + /* + * Off the right end or left end, return failure. + */ + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { + *stat = 0; + return 0; + } + /* + * Point to the record and extract its data. + */ + rec = XFS_INOBT_REC_ADDR(block, ptr, cur); + ASSERT(arch == ARCH_NOCONVERT || arch == ARCH_CONVERT); + if (arch == ARCH_NOCONVERT) { + *ino = INT_GET(rec->ir_startino, ARCH_CONVERT); + *fcnt = INT_GET(rec->ir_freecount, ARCH_CONVERT); + *free = INT_GET(rec->ir_free, ARCH_CONVERT); + } else { + INT_COPY(*ino, rec->ir_startino, ARCH_CONVERT); + INT_COPY(*fcnt, rec->ir_freecount, ARCH_CONVERT); + INT_COPY(*free, rec->ir_free, ARCH_CONVERT); + } + *stat = 1; + return 0; +} + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_inobt_increment( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat) /* success/failure */ +{ + xfs_inobt_block_t *block; /* btree block */ + xfs_buf_t *bp; /* buffer containing btree block */ + int error; /* error return value */ + int lev; /* btree level */ + + ASSERT(level < cur->bc_nlevels); + /* + * Read-ahead to the right at this level. + */ + xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); + /* + * Get a pointer to the btree block. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + /* + * Increment the ptr at this level. If we're still in the block + * then we're done. + */ + if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + *stat = 1; + return 0; + } + /* + * If we just went off the right edge of the tree, return failure. + */ + if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * March up the tree incrementing pointers. + * Stop when we don't go off the right edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + bp = cur->bc_bufs[lev]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; +#endif + if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) + break; + /* + * Read-ahead the right block, we're going to read it + * in the next loop. + */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); + } + /* + * If we went off the root then we are seriously confused. + */ + ASSERT(lev < cur->bc_nlevels); + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_INOBT_BLOCK(bp); + lev > level; ) { + xfs_agblock_t agbno; /* block number of btree block */ + + agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, agbno, 0, &bp, + XFS_INO_BTREE_REF))) + return error; + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_INOBT_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; + cur->bc_ptrs[lev] = 1; + } + *stat = 1; + return 0; +} + +/* + * Insert the current record at the point referenced by cur. + * The cursor may be inconsistent on return if splits have been done. + */ +int /* error */ +xfs_inobt_insert( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* result value, 0 for failure */ + int level; /* current level number in btree */ + xfs_agblock_t nbno; /* new block number (split result) */ + xfs_btree_cur_t *ncur; /* new cursor (split result) */ + xfs_inobt_rec_t nrec; /* record being inserted this level */ + xfs_btree_cur_t *pcur; /* previous level's cursor */ + + level = 0; + nbno = NULLAGBLOCK; + INT_SET(nrec.ir_startino, ARCH_CONVERT, cur->bc_rec.i.ir_startino); + INT_SET(nrec.ir_freecount, ARCH_CONVERT, cur->bc_rec.i.ir_freecount); + INT_SET(nrec.ir_free, ARCH_CONVERT, cur->bc_rec.i.ir_free); + ncur = (xfs_btree_cur_t *)0; + pcur = cur; + /* + * Loop going up the tree, starting at the leaf level. + * Stop when we don't get a split block, that must mean that + * the insert is finished with this level. + */ + do { + /* + * Insert nrec/nbno into this level of the tree. + * Note if we fail, nbno will be null. + */ + if ((error = xfs_inobt_insrec(pcur, level++, &nbno, &nrec, &ncur, + &i))) { + if (pcur != cur) + xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); + return error; + } + /* + * See if the cursor we just used is trash. + * Can't trash the caller's cursor, but otherwise we should + * if ncur is a new cursor or we're about to be done. + */ + if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) { + cur->bc_nlevels = pcur->bc_nlevels; + xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); + } + /* + * If we got a new cursor, switch to it. + */ + if (ncur) { + pcur = ncur; + ncur = (xfs_btree_cur_t *)0; + } + } while (nbno != NULLAGBLOCK); + *stat = i; + return 0; +} + +/* + * Lookup the record equal to ino in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_eq( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat) /* success/failure */ +{ + cur->bc_rec.i.ir_startino = ino; + cur->bc_rec.i.ir_freecount = fcnt; + cur->bc_rec.i.ir_free = free; + return xfs_inobt_lookup(cur, XFS_LOOKUP_EQ, stat); +} + +/* + * Lookup the first record greater than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_ge( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat) /* success/failure */ +{ + cur->bc_rec.i.ir_startino = ino; + cur->bc_rec.i.ir_freecount = fcnt; + cur->bc_rec.i.ir_free = free; + return xfs_inobt_lookup(cur, XFS_LOOKUP_GE, stat); +} + +/* + * Lookup the first record less than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_le( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat) /* success/failure */ +{ + cur->bc_rec.i.ir_startino = ino; + cur->bc_rec.i.ir_freecount = fcnt; + cur->bc_rec.i.ir_free = free; + return xfs_inobt_lookup(cur, XFS_LOOKUP_LE, stat); +} + +/* + * Update the record referred to by cur, to the value given + * by [ino, fcnt, free]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +int /* error */ +xfs_inobt_update( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free) /* free inode mask */ +{ + xfs_inobt_block_t *block; /* btree block to update */ + xfs_buf_t *bp; /* buffer containing btree block */ + int error; /* error return value */ + int ptr; /* current record number (updating) */ + xfs_inobt_rec_t *rp; /* pointer to updated record */ + + /* + * Pick up the current block. + */ + bp = cur->bc_bufs[0]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, 0, bp))) + return error; +#endif + /* + * Get the address of the rec to be updated. + */ + ptr = cur->bc_ptrs[0]; + rp = XFS_INOBT_REC_ADDR(block, ptr, cur); + /* + * Fill in the new contents and log them. + */ + INT_SET(rp->ir_startino, ARCH_CONVERT, ino); + INT_SET(rp->ir_freecount, ARCH_CONVERT, fcnt); + INT_SET(rp->ir_free, ARCH_CONVERT, free); + xfs_inobt_log_recs(cur, bp, ptr, ptr); + /* + * Updating first record in leaf. Pass new key value up to our parent. + */ + if (ptr == 1) { + xfs_inobt_key_t key; /* key containing [ino] */ + + INT_SET(key.ir_startino, ARCH_CONVERT, ino); + if ((error = xfs_inobt_updkey(cur, &key, 1))) + return error; + } + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_ialloc_btree.h linux.21rc5-ac2/fs/xfs/xfs_ialloc_btree.h --- linux.21rc5/fs/xfs/xfs_ialloc_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_ialloc_btree.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_IALLOC_BTREE_H__ +#define __XFS_IALLOC_BTREE_H__ + +/* + * Inode map on-disk structures + */ + +struct xfs_buf; +struct xfs_btree_cur; +struct xfs_btree_sblock; +struct xfs_mount; + +/* + * There is a btree for the inode map per allocation group. + */ +#define XFS_IBT_MAGIC 0x49414254 /* 'IABT' */ + +typedef __uint64_t xfs_inofree_t; +#define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t)) +#define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3) +#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1) + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_MASKN) +xfs_inofree_t xfs_inobt_maskn(int i, int n); +#define XFS_INOBT_MASKN(i,n) xfs_inobt_maskn(i,n) +#else +#define XFS_INOBT_MASKN(i,n) \ + ((((n) >= XFS_INODES_PER_CHUNK ? \ + (xfs_inofree_t)0 : ((xfs_inofree_t)1 << (n))) - 1) << (i)) +#endif + +/* + * Data record structure + */ +typedef struct xfs_inobt_rec +{ + xfs_agino_t ir_startino; /* starting inode number */ + __int32_t ir_freecount; /* count of free inodes (set bits) */ + xfs_inofree_t ir_free; /* free inode mask */ +} xfs_inobt_rec_t; + +/* + * Key structure + */ +typedef struct xfs_inobt_key +{ + xfs_agino_t ir_startino; /* starting inode number */ +} xfs_inobt_key_t; + +typedef xfs_agblock_t xfs_inobt_ptr_t; /* btree pointer type */ + /* btree block header type */ +typedef struct xfs_btree_sblock xfs_inobt_block_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_INOBT_BLOCK) +xfs_inobt_block_t *xfs_buf_to_inobt_block(struct xfs_buf *bp); +#define XFS_BUF_TO_INOBT_BLOCK(bp) xfs_buf_to_inobt_block(bp) +#else +#define XFS_BUF_TO_INOBT_BLOCK(bp) ((xfs_inobt_block_t *)(XFS_BUF_PTR(bp))) +#endif + +/* + * Bit manipulations for ir_free. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_MASK) +xfs_inofree_t xfs_inobt_mask(int i); +#define XFS_INOBT_MASK(i) xfs_inobt_mask(i) +#else +#define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_IS_FREE) +int xfs_inobt_is_free(xfs_inobt_rec_t *rp, int i, xfs_arch_t arch); +#define XFS_INOBT_IS_FREE(rp,i,arch) xfs_inobt_is_free(rp,i,arch) +#else +#define XFS_INOBT_IS_FREE(rp,i,arch) ((INT_GET((rp)->ir_free, arch) \ + & XFS_INOBT_MASK(i)) != 0) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_SET_FREE) +void xfs_inobt_set_free(xfs_inobt_rec_t *rp, int i, xfs_arch_t arch); +#define XFS_INOBT_SET_FREE(rp,i,arch) xfs_inobt_set_free(rp,i,arch) +#else +#define XFS_INOBT_SET_FREE(rp,i,arch) (INT_MOD_EXPR((rp)->ir_free, arch, |= XFS_INOBT_MASK(i))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_CLR_FREE) +void xfs_inobt_clr_free(xfs_inobt_rec_t *rp, int i, xfs_arch_t arch); +#define XFS_INOBT_CLR_FREE(rp,i,arch) xfs_inobt_clr_free(rp,i,arch) +#else +#define XFS_INOBT_CLR_FREE(rp,i,arch) (INT_MOD_EXPR((rp)->ir_free, arch, &= ~XFS_INOBT_MASK(i))) +#endif + +/* + * Real block structures have a size equal to the disk block size. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_BLOCK_SIZE) +int xfs_inobt_block_size(int lev, struct xfs_btree_cur *cur); +#define XFS_INOBT_BLOCK_SIZE(lev,cur) xfs_inobt_block_size(lev,cur) +#else +#define XFS_INOBT_BLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_BLOCK_MAXRECS) +int xfs_inobt_block_maxrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_INOBT_BLOCK_MAXRECS(lev,cur) xfs_inobt_block_maxrecs(lev,cur) +#else +#define XFS_INOBT_BLOCK_MAXRECS(lev,cur) \ + ((cur)->bc_mp->m_inobt_mxr[lev != 0]) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_BLOCK_MINRECS) +int xfs_inobt_block_minrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_INOBT_BLOCK_MINRECS(lev,cur) xfs_inobt_block_minrecs(lev,cur) +#else +#define XFS_INOBT_BLOCK_MINRECS(lev,cur) \ + ((cur)->bc_mp->m_inobt_mnr[lev != 0]) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_IS_LAST_REC) +int xfs_inobt_is_last_rec(struct xfs_btree_cur *cur); +#define XFS_INOBT_IS_LAST_REC(cur) xfs_inobt_is_last_rec(cur) +#else +#define XFS_INOBT_IS_LAST_REC(cur) \ + ((cur)->bc_ptrs[0] == \ + INT_GET(XFS_BUF_TO_INOBT_BLOCK((cur)->bc_bufs[0])->bb_numrecs, ARCH_CONVERT)) +#endif + +/* + * Maximum number of inode btree levels. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IN_MAXLEVELS) +int xfs_in_maxlevels(struct xfs_mount *mp); +#define XFS_IN_MAXLEVELS(mp) xfs_in_maxlevels(mp) +#else +#define XFS_IN_MAXLEVELS(mp) ((mp)->m_in_maxlevels) +#endif + +/* + * block numbers in the AG. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IBT_BLOCK) +xfs_agblock_t xfs_ibt_block(struct xfs_mount *mp); +#define XFS_IBT_BLOCK(mp) xfs_ibt_block(mp) +#else +#define XFS_IBT_BLOCK(mp) ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_PREALLOC_BLOCKS) +xfs_agblock_t xfs_prealloc_blocks(struct xfs_mount *mp); +#define XFS_PREALLOC_BLOCKS(mp) xfs_prealloc_blocks(mp) +#else +#define XFS_PREALLOC_BLOCKS(mp) ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1)) +#endif + +/* + * Record, key, and pointer address macros for btree blocks. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_REC_ADDR) +xfs_inobt_rec_t * +xfs_inobt_rec_addr(xfs_inobt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_INOBT_REC_ADDR(bb,i,cur) xfs_inobt_rec_addr(bb,i,cur) +#else +#define XFS_INOBT_REC_ADDR(bb,i,cur) \ + XFS_BTREE_REC_ADDR(XFS_INOBT_BLOCK_SIZE(0,cur), xfs_inobt, bb, i, \ + XFS_INOBT_BLOCK_MAXRECS(0, cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_KEY_ADDR) +xfs_inobt_key_t * +xfs_inobt_key_addr(xfs_inobt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_INOBT_KEY_ADDR(bb,i,cur) xfs_inobt_key_addr(bb,i,cur) +#else +#define XFS_INOBT_KEY_ADDR(bb,i,cur) \ + XFS_BTREE_KEY_ADDR(XFS_INOBT_BLOCK_SIZE(1,cur), xfs_inobt, bb, i, \ + XFS_INOBT_BLOCK_MAXRECS(1, cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_PTR_ADDR) +xfs_inobt_ptr_t * +xfs_inobt_ptr_addr(xfs_inobt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_INOBT_PTR_ADDR(bb,i,cur) xfs_inobt_ptr_addr(bb,i,cur) +#else +#define XFS_INOBT_PTR_ADDR(bb,i,cur) \ + XFS_BTREE_PTR_ADDR(XFS_INOBT_BLOCK_SIZE(1,cur), xfs_inobt, bb, i, \ + XFS_INOBT_BLOCK_MAXRECS(1, cur)) +#endif + +/* + * Prototypes for externally visible routines. + */ + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_inobt_decrement( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat); /* success/failure */ + +#ifdef _NOTYET_ +/* + * Delete the record pointed to by cur. + * The cursor refers to the place where the record was (could be inserted) + * when the operation returns. + */ +int /* error */ +xfs_inobt_delete( + struct xfs_btree_cur *cur, /* btree cursor */ + int *stat); /* success/failure */ +#endif /* _NOTYET_ */ + +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_inobt_get_rec( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t *ino, /* output: starting inode of chunk */ + __int32_t *fcnt, /* output: number of free inodes */ + xfs_inofree_t *free, /* output: free inode mask */ + int *stat, /* output: success/failure */ + xfs_arch_t arch); /* output: architecture */ + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_inobt_increment( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat); /* success/failure */ + +/* + * Insert the current record at the point referenced by cur. + * The cursor may be inconsistent on return if splits have been done. + */ +int /* error */ +xfs_inobt_insert( + struct xfs_btree_cur *cur, /* btree cursor */ + int *stat); /* success/failure */ + +/* + * Lookup the record equal to ino in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_eq( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat); /* success/failure */ + +/* + * Lookup the first record greater than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_ge( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat); /* success/failure */ + +/* + * Lookup the first record less than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_le( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat); /* success/failure */ + +/* + * Update the record referred to by cur, to the value given + * by [ino, fcnt, free]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +int /* error */ +xfs_inobt_update( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free); /* free inode mask */ + +#endif /* __XFS_IALLOC_BTREE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_ialloc.c linux.21rc5-ac2/fs/xfs/xfs_ialloc.c --- linux.21rc5/fs/xfs/xfs_ialloc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_ialloc.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1379 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" + +/* + * Log specified fields for the inode given by bp and off. + */ +STATIC void +xfs_ialloc_log_di( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* inode buffer */ + int off, /* index of inode in buffer */ + int fields) /* bitmask of fields to log */ +{ + int first; /* first byte number */ + int ioffset; /* off in bytes */ + int last; /* last byte number */ + xfs_mount_t *mp; /* mount point structure */ + static const short offsets[] = { /* field offsets */ + /* keep in sync with bits */ + offsetof(xfs_dinode_core_t, di_magic), + offsetof(xfs_dinode_core_t, di_mode), + offsetof(xfs_dinode_core_t, di_version), + offsetof(xfs_dinode_core_t, di_format), + offsetof(xfs_dinode_core_t, di_onlink), + offsetof(xfs_dinode_core_t, di_uid), + offsetof(xfs_dinode_core_t, di_gid), + offsetof(xfs_dinode_core_t, di_nlink), + offsetof(xfs_dinode_core_t, di_projid), + offsetof(xfs_dinode_core_t, di_pad), + offsetof(xfs_dinode_core_t, di_atime), + offsetof(xfs_dinode_core_t, di_mtime), + offsetof(xfs_dinode_core_t, di_ctime), + offsetof(xfs_dinode_core_t, di_size), + offsetof(xfs_dinode_core_t, di_nblocks), + offsetof(xfs_dinode_core_t, di_extsize), + offsetof(xfs_dinode_core_t, di_nextents), + offsetof(xfs_dinode_core_t, di_anextents), + offsetof(xfs_dinode_core_t, di_forkoff), + offsetof(xfs_dinode_core_t, di_aformat), + offsetof(xfs_dinode_core_t, di_dmevmask), + offsetof(xfs_dinode_core_t, di_dmstate), + offsetof(xfs_dinode_core_t, di_flags), + offsetof(xfs_dinode_core_t, di_gen), + offsetof(xfs_dinode_t, di_next_unlinked), + offsetof(xfs_dinode_t, di_u), + offsetof(xfs_dinode_t, di_a), + sizeof(xfs_dinode_t) + }; + + + ASSERT(offsetof(xfs_dinode_t, di_core) == 0); + ASSERT((fields & (XFS_DI_U|XFS_DI_A)) == 0); + mp = tp->t_mountp; + /* + * Get the inode-relative first and last bytes for these fields + */ + xfs_btree_offsets(fields, offsets, XFS_DI_NUM_BITS, &first, &last); + /* + * Convert to buffer offsets and log it. + */ + ioffset = off << mp->m_sb.sb_inodelog; + first += ioffset; + last += ioffset; + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * Allocation group level functions. + */ + +/* + * Allocate new inodes in the allocation group specified by agbp. + * Return 0 for success, else error code. + */ +STATIC int /* error code or 0 */ +xfs_ialloc_ag_alloc( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* alloc group buffer */ + int *alloc) +{ + xfs_agi_t *agi; /* allocation group header */ + xfs_alloc_arg_t args; /* allocation argument structure */ + int blks_per_cluster; /* fs blocks per inode cluster */ + xfs_btree_cur_t *cur; /* inode btree cursor */ + xfs_daddr_t d; /* disk addr of buffer */ + int error; + xfs_buf_t *fbuf; /* new free inodes' buffer */ + xfs_dinode_t *free; /* new free inode structure */ + int i; /* inode counter */ + int j; /* block counter */ + int nbufs; /* num bufs of new inodes */ + xfs_agino_t newino; /* new first inode's number */ + xfs_agino_t newlen; /* new number of inodes */ + int ninodes; /* num inodes per buf */ + xfs_agino_t thisino; /* current inode number, for loop */ + int version; /* inode version number to use */ + static xfs_timestamp_t ztime; /* zero xfs timestamp */ + int isaligned; /* inode allocation at stripe unit */ + /* boundary */ + xfs_dinode_core_t dic; /* a dinode_core to copy to new */ + /* inodes */ + + args.tp = tp; + args.mp = tp->t_mountp; + + /* + * Locking will ensure that we don't have two callers in here + * at one time. + */ + newlen = XFS_IALLOC_INODES(args.mp); + if (args.mp->m_maxicount && + args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) + return XFS_ERROR(ENOSPC); + args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); + /* + * Set the alignment for the allocation. + * If stripe alignment is turned on then align at stripe unit + * boundary. + * If the cluster size is smaller than a filesystem block + * then we're doing I/O for inodes in filesystem block size pieces, + * so don't need alignment anyway. + */ + isaligned = 0; + if (args.mp->m_sinoalign) { + ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN)); + args.alignment = args.mp->m_dalign; + isaligned = 1; + } else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) && + args.mp->m_sb.sb_inoalignmt >= + XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) + args.alignment = args.mp->m_sb.sb_inoalignmt; + else + args.alignment = 1; + agi = XFS_BUF_TO_AGI(agbp); + /* + * Need to figure out where to allocate the inode blocks. + * Ideally they should be spaced out through the a.g. + * For now, just allocate blocks up front. + */ + args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT); + args.fsbno = XFS_AGB_TO_FSB(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT), + args.agbno); + /* + * Allocate a fixed-size extent of inodes. + */ + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.mod = args.total = args.wasdel = args.isfl = args.userdata = + args.minalignslop = 0; + args.prod = 1; + /* + * Allow space for the inode btree to split. + */ + args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; + if ((error = xfs_alloc_vextent(&args))) + return error; + + /* + * If stripe alignment is turned on, then try again with cluster + * alignment. + */ + if (isaligned && args.fsbno == NULLFSBLOCK) { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT); + args.fsbno = XFS_AGB_TO_FSB(args.mp, + INT_GET(agi->agi_seqno, ARCH_CONVERT), args.agbno); + if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) && + args.mp->m_sb.sb_inoalignmt >= + XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) + args.alignment = args.mp->m_sb.sb_inoalignmt; + else + args.alignment = 1; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + + if (args.fsbno == NULLFSBLOCK) { + *alloc = 0; + return 0; + } + ASSERT(args.len == args.minlen); + /* + * Convert the results. + */ + newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0); + /* + * Loop over the new block(s), filling in the inodes. + * For small block sizes, manipulate the inodes in buffers + * which are multiples of the blocks size. + */ + if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) { + blks_per_cluster = 1; + nbufs = (int)args.len; + ninodes = args.mp->m_sb.sb_inopblock; + } else { + blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) / + args.mp->m_sb.sb_blocksize; + nbufs = (int)args.len / blks_per_cluster; + ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock; + } + /* + * Figure out what version number to use in the inodes we create. + * If the superblock version has caught up to the one that supports + * the new inode format, then use the new inode version. Otherwise + * use the old version so that old kernels will continue to be + * able to use the file system. + */ + if (XFS_SB_VERSION_HASNLINK(&args.mp->m_sb)) + version = XFS_DINODE_VERSION_2; + else + version = XFS_DINODE_VERSION_1; + for (j = 0; j < nbufs; j++) { + /* + * Get the block. + */ + d = XFS_AGB_TO_DADDR(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT), + args.agbno + (j * blks_per_cluster)); + fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d, + args.mp->m_bsize * blks_per_cluster, + XFS_BUF_LOCK); + ASSERT(fbuf); + ASSERT(!XFS_BUF_GETERROR(fbuf)); + /* + * Loop over the inodes in this buffer. + */ + INT_SET(dic.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC); + INT_ZERO(dic.di_mode, ARCH_CONVERT); + INT_SET(dic.di_version, ARCH_CONVERT, version); + INT_ZERO(dic.di_format, ARCH_CONVERT); + INT_ZERO(dic.di_onlink, ARCH_CONVERT); + INT_ZERO(dic.di_uid, ARCH_CONVERT); + INT_ZERO(dic.di_gid, ARCH_CONVERT); + INT_ZERO(dic.di_nlink, ARCH_CONVERT); + INT_ZERO(dic.di_projid, ARCH_CONVERT); + memset(&(dic.di_pad[0]), 0, sizeof(dic.di_pad)); + INT_SET(dic.di_atime.t_sec, ARCH_CONVERT, ztime.t_sec); + INT_SET(dic.di_atime.t_nsec, ARCH_CONVERT, ztime.t_nsec); + + INT_SET(dic.di_mtime.t_sec, ARCH_CONVERT, ztime.t_sec); + INT_SET(dic.di_mtime.t_nsec, ARCH_CONVERT, ztime.t_nsec); + + INT_SET(dic.di_ctime.t_sec, ARCH_CONVERT, ztime.t_sec); + INT_SET(dic.di_ctime.t_nsec, ARCH_CONVERT, ztime.t_nsec); + + INT_ZERO(dic.di_size, ARCH_CONVERT); + INT_ZERO(dic.di_nblocks, ARCH_CONVERT); + INT_ZERO(dic.di_extsize, ARCH_CONVERT); + INT_ZERO(dic.di_nextents, ARCH_CONVERT); + INT_ZERO(dic.di_anextents, ARCH_CONVERT); + INT_ZERO(dic.di_forkoff, ARCH_CONVERT); + INT_ZERO(dic.di_aformat, ARCH_CONVERT); + INT_ZERO(dic.di_dmevmask, ARCH_CONVERT); + INT_ZERO(dic.di_dmstate, ARCH_CONVERT); + INT_ZERO(dic.di_flags, ARCH_CONVERT); + INT_ZERO(dic.di_gen, ARCH_CONVERT); + + for (i = 0; i < ninodes; i++) { + free = XFS_MAKE_IPTR(args.mp, fbuf, i); + memcpy(&(free->di_core), &dic, sizeof(xfs_dinode_core_t)); + INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO); + xfs_ialloc_log_di(tp, fbuf, i, + XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); + } + xfs_trans_inode_alloc_buf(tp, fbuf); + } + INT_MOD(agi->agi_count, ARCH_CONVERT, newlen); + INT_MOD(agi->agi_freecount, ARCH_CONVERT, newlen); + down_read(&args.mp->m_peraglock); + args.mp->m_perag[INT_GET(agi->agi_seqno, ARCH_CONVERT)].pagi_freecount += newlen; + up_read(&args.mp->m_peraglock); + INT_SET(agi->agi_newino, ARCH_CONVERT, newino); + /* + * Insert records describing the new inode chunk into the btree. + */ + cur = xfs_btree_init_cursor(args.mp, tp, agbp, + INT_GET(agi->agi_seqno, ARCH_CONVERT), + XFS_BTNUM_INO, (xfs_inode_t *)0, 0); + for (thisino = newino; + thisino < newino + newlen; + thisino += XFS_INODES_PER_CHUNK) { + if ((error = xfs_inobt_lookup_eq(cur, thisino, + XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) { + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; + } + ASSERT(i == 0); + if ((error = xfs_inobt_insert(cur, &i))) { + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; + } + ASSERT(i == 1); + } + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + /* + * Log allocation group header fields + */ + xfs_ialloc_log_agi(tp, agbp, + XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); + /* + * Modify/log superblock values for inode count and inode free count. + */ + xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); + xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); + *alloc = 1; + return 0; +} + +STATIC __inline xfs_agnumber_t +xfs_ialloc_next_ag( + xfs_mount_t *mp) +{ + xfs_agnumber_t agno; + + spin_lock(&mp->m_agirotor_lock); + agno = mp->m_agirotor; + if (++mp->m_agirotor == mp->m_maxagi) + mp->m_agirotor = 0; + spin_unlock(&mp->m_agirotor_lock); + + return agno; +} + +/* + * Select an allocation group to look for a free inode in, based on the parent + * inode and then mode. Return the allocation group buffer. + */ +STATIC xfs_buf_t * /* allocation group buffer */ +xfs_ialloc_ag_select( + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t parent, /* parent directory inode number */ + mode_t mode, /* bits set to indicate file type */ + int okalloc) /* ok to allocate more space */ +{ + xfs_buf_t *agbp; /* allocation group header buffer */ + xfs_agnumber_t agcount; /* number of ag's in the filesystem */ + xfs_agnumber_t agno; /* current ag number */ + int flags; /* alloc buffer locking flags */ + xfs_extlen_t ineed; /* blocks needed for inode allocation */ + xfs_extlen_t longest = 0; /* longest extent available */ + xfs_mount_t *mp; /* mount point structure */ + int needspace; /* file mode implies space allocated */ + xfs_perag_t *pag; /* per allocation group data */ + xfs_agnumber_t pagno; /* parent (starting) ag number */ + + /* + * Files of these types need at least one block if length > 0 + * (and they won't fit in the inode, but that's hard to figure out). + */ + needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); + mp = tp->t_mountp; + agcount = mp->m_maxagi; + if (S_ISDIR(mode)) + pagno = xfs_ialloc_next_ag(mp); + else { + pagno = XFS_INO_TO_AGNO(mp, parent); + if (pagno >= agcount) + pagno = 0; + } + ASSERT(pagno < agcount); + /* + * Loop through allocation groups, looking for one with a little + * free space in it. Note we don't look for free inodes, exactly. + * Instead, we include whether there is a need to allocate inodes + * to mean that blocks must be allocated for them, + * if none are currently free. + */ + agno = pagno; + flags = XFS_ALLOC_FLAG_TRYLOCK; + down_read(&mp->m_peraglock); + for (;;) { + pag = &mp->m_perag[agno]; + if (!pag->pagi_init) { + if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { + agbp = NULL; + goto nextag; + } + } else + agbp = NULL; + + if (!pag->pagi_inodeok) { + xfs_ialloc_next_ag(mp); + goto unlock_nextag; + } + + /* + * Is there enough free space for the file plus a block + * of inodes (if we need to allocate some)? + */ + ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp); + if (ineed && !pag->pagf_init) { + if (agbp == NULL && + xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { + agbp = NULL; + goto nextag; + } + (void)xfs_alloc_pagf_init(mp, tp, agno, flags); + } + if (!ineed || pag->pagf_init) { + if (ineed && !(longest = pag->pagf_longest)) + longest = pag->pagf_flcount > 0; + if (!ineed || + (pag->pagf_freeblks >= needspace + ineed && + longest >= ineed && + okalloc)) { + if (agbp == NULL && + xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { + agbp = NULL; + goto nextag; + } + up_read(&mp->m_peraglock); + return agbp; + } + } +unlock_nextag: + if (agbp) + xfs_trans_brelse(tp, agbp); +nextag: + /* + * No point in iterating over the rest, if we're shutting + * down. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + up_read(&mp->m_peraglock); + return (xfs_buf_t *)0; + } + agno++; + if (agno >= agcount) + agno = 0; + if (agno == pagno) { + if (flags == 0) { + up_read(&mp->m_peraglock); + return (xfs_buf_t *)0; + } + flags = 0; + } + } +} + +/* + * Visible inode allocation functions. + */ + +/* + * Allocate an inode on disk. + * Mode is used to tell whether the new inode will need space, and whether + * it is a directory. + * + * The arguments IO_agbp and alloc_done are defined to work within + * the constraint of one allocation per transaction. + * xfs_dialloc() is designed to be called twice if it has to do an + * allocation to make more free inodes. On the first call, + * IO_agbp should be set to NULL. If an inode is available, + * i.e., xfs_dialloc() did not need to do an allocation, an inode + * number is returned. In this case, IO_agbp would be set to the + * current ag_buf and alloc_done set to false. + * If an allocation needed to be done, xfs_dialloc would return + * the current ag_buf in IO_agbp and set alloc_done to true. + * The caller should then commit the current transaction, allocate a new + * transaction, and call xfs_dialloc() again, passing in the previous + * value of IO_agbp. IO_agbp should be held across the transactions. + * Since the agbp is locked across the two calls, the second call is + * guaranteed to have a free inode available. + * + * Once we successfully pick an inode its number is returned and the + * on-disk data structures are updated. The inode itself is not read + * in, since doing so would break ordering constraints with xfs_reclaim. + */ +int +xfs_dialloc( + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t parent, /* parent inode (directory) */ + mode_t mode, /* mode bits for new inode */ + int okalloc, /* ok to allocate more space */ + xfs_buf_t **IO_agbp, /* in/out ag header's buffer */ + boolean_t *alloc_done, /* true if we needed to replenish + inode freelist */ + xfs_ino_t *inop) /* inode number allocated */ +{ + xfs_agnumber_t agcount; /* number of allocation groups */ + xfs_buf_t *agbp; /* allocation group header's buffer */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_agi_t *agi; /* allocation group header structure */ + xfs_btree_cur_t *cur; /* inode allocation btree cursor */ + int error; /* error return value */ + int i; /* result code */ + int ialloced; /* inode allocation status */ + int noroom = 0; /* no space for inode blk allocation */ + xfs_ino_t ino; /* fs-relative inode to be returned */ + /* REFERENCED */ + int j; /* result code */ + xfs_mount_t *mp; /* file system mount structure */ + int offset; /* index of inode in chunk */ + xfs_agino_t pagino; /* parent's a.g. relative inode # */ + xfs_agnumber_t pagno; /* parent's allocation group number */ + xfs_inobt_rec_t rec; /* inode allocation record */ + xfs_agnumber_t tagno; /* testing allocation group number */ + xfs_btree_cur_t *tcur; /* temp cursor */ + xfs_inobt_rec_t trec; /* temp inode allocation record */ + + + if (*IO_agbp == NULL) { + /* + * We do not have an agbp, so select an initial allocation + * group for inode allocation. + */ + agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc); + /* + * Couldn't find an allocation group satisfying the + * criteria, give up. + */ + if (!agbp) { + *inop = NULLFSINO; + return 0; + } + agi = XFS_BUF_TO_AGI(agbp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + } else { + /* + * Continue where we left off before. In this case, we + * know that the allocation group has free inodes. + */ + agbp = *IO_agbp; + agi = XFS_BUF_TO_AGI(agbp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0); + } + mp = tp->t_mountp; + agcount = mp->m_sb.sb_agcount; + agno = INT_GET(agi->agi_seqno, ARCH_CONVERT); + tagno = agno; + pagno = XFS_INO_TO_AGNO(mp, parent); + pagino = XFS_INO_TO_AGINO(mp, parent); + + /* + * If we have already hit the ceiling of inode blocks then clear + * okalloc so we scan all available agi structures for a free + * inode. + */ + + if (mp->m_maxicount && + mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { + noroom = 1; + okalloc = 0; + } + + /* + * Loop until we find an allocation group that either has free inodes + * or in which we can allocate some inodes. Iterate through the + * allocation groups upward, wrapping at the end. + */ + *alloc_done = B_FALSE; + while (INT_ISZERO(agi->agi_freecount, ARCH_CONVERT)) { + /* + * Don't do anything if we're not supposed to allocate + * any blocks, just go on to the next ag. + */ + if (okalloc) { + /* + * Try to allocate some new inodes in the allocation + * group. + */ + if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) { + xfs_trans_brelse(tp, agbp); + if (error == ENOSPC) { + *inop = NULLFSINO; + return 0; + } else + return error; + } + if (ialloced) { + /* + * We successfully allocated some inodes, return + * the current context to the caller so that it + * can commit the current transaction and call + * us again where we left off. + */ + ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0); + *alloc_done = B_TRUE; + *IO_agbp = agbp; + *inop = NULLFSINO; + return 0; + } + } + /* + * If it failed, give up on this ag. + */ + xfs_trans_brelse(tp, agbp); + /* + * Go on to the next ag: get its ag header. + */ +nextag: + if (++tagno == agcount) + tagno = 0; + if (tagno == agno) { + *inop = NULLFSINO; + return noroom ? ENOSPC : 0; + } + down_read(&mp->m_peraglock); + if (mp->m_perag[tagno].pagi_inodeok == 0) { + up_read(&mp->m_peraglock); + goto nextag; + } + error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp); + up_read(&mp->m_peraglock); + if (error) + goto nextag; + agi = XFS_BUF_TO_AGI(agbp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + } + /* + * Here with an allocation group that has a free inode. + * Reset agno since we may have chosen a new ag in the + * loop above. + */ + agno = tagno; + *IO_agbp = NULL; + cur = xfs_btree_init_cursor(mp, tp, agbp, INT_GET(agi->agi_seqno, ARCH_CONVERT), + XFS_BTNUM_INO, (xfs_inode_t *)0, 0); + /* + * If pagino is 0 (this is the root inode allocation) use newino. + * This must work because we've just allocated some. + */ + if (!pagino) + pagino = INT_GET(agi->agi_newino, ARCH_CONVERT); +#ifdef DEBUG + if (cur->bc_nlevels == 1) { + int freecount = 0; + + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + do { + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + freecount += rec.ir_freecount; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + } while (i == 1); + + ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + XFS_FORCED_SHUTDOWN(mp)); + } +#endif + /* + * If in the same a.g. as the parent, try to get near the parent. + */ + if (pagno == agno) { + if ((error = xfs_inobt_lookup_le(cur, pagino, 0, 0, &i))) + goto error0; + if (i != 0 && + (error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &j, ARCH_NOCONVERT)) == 0 && + j == 1 && + rec.ir_freecount > 0) { + /* + * Found a free inode in the same chunk + * as parent, done. + */ + } + /* + * In the same a.g. as parent, but parent's chunk is full. + */ + else { + int doneleft; /* done, to the left */ + int doneright; /* done, to the right */ + + if (error) + goto error0; + ASSERT(i == 1); + ASSERT(j == 1); + /* + * Duplicate the cursor, search left & right + * simultaneously. + */ + if ((error = xfs_btree_dup_cursor(cur, &tcur))) + goto error0; + /* + * Search left with tcur, back up 1 record. + */ + if ((error = xfs_inobt_decrement(tcur, 0, &i))) + goto error1; + doneleft = !i; + if (!doneleft) { + if ((error = xfs_inobt_get_rec(tcur, + &trec.ir_startino, + &trec.ir_freecount, + &trec.ir_free, &i, ARCH_NOCONVERT))) + goto error1; + XFS_WANT_CORRUPTED_GOTO(i == 1, error1); + } + /* + * Search right with cur, go forward 1 record. + */ + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error1; + doneright = !i; + if (!doneright) { + if ((error = xfs_inobt_get_rec(cur, + &rec.ir_startino, + &rec.ir_freecount, + &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error1; + XFS_WANT_CORRUPTED_GOTO(i == 1, error1); + } + /* + * Loop until we find the closest inode chunk + * with a free one. + */ + while (!doneleft || !doneright) { + int useleft; /* using left inode + chunk this time */ + + /* + * Figure out which block is closer, + * if both are valid. + */ + if (!doneleft && !doneright) + useleft = + pagino - + (trec.ir_startino + + XFS_INODES_PER_CHUNK - 1) < + rec.ir_startino - pagino; + else + useleft = !doneleft; + /* + * If checking the left, does it have + * free inodes? + */ + if (useleft && trec.ir_freecount) { + /* + * Yes, set it up as the chunk to use. + */ + rec = trec; + xfs_btree_del_cursor(cur, + XFS_BTREE_NOERROR); + cur = tcur; + break; + } + /* + * If checking the right, does it have + * free inodes? + */ + if (!useleft && rec.ir_freecount) { + /* + * Yes, it's already set up. + */ + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + break; + } + /* + * If used the left, get another one + * further left. + */ + if (useleft) { + if ((error = xfs_inobt_decrement(tcur, 0, + &i))) + goto error1; + doneleft = !i; + if (!doneleft) { + if ((error = xfs_inobt_get_rec( + tcur, + &trec.ir_startino, + &trec.ir_freecount, + &trec.ir_free, &i, ARCH_NOCONVERT))) + goto error1; + XFS_WANT_CORRUPTED_GOTO(i == 1, + error1); + } + } + /* + * If used the right, get another one + * further right. + */ + else { + if ((error = xfs_inobt_increment(cur, 0, + &i))) + goto error1; + doneright = !i; + if (!doneright) { + if ((error = xfs_inobt_get_rec( + cur, + &rec.ir_startino, + &rec.ir_freecount, + &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error1; + XFS_WANT_CORRUPTED_GOTO(i == 1, + error1); + } + } + } + ASSERT(!doneleft || !doneright); + } + } + /* + * In a different a.g. from the parent. + * See if the most recently allocated block has any free. + */ + else if (INT_GET(agi->agi_newino, ARCH_CONVERT) != NULLAGINO) { + if ((error = xfs_inobt_lookup_eq(cur, + INT_GET(agi->agi_newino, ARCH_CONVERT), 0, 0, &i))) + goto error0; + if (i == 1 && + (error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &j, ARCH_NOCONVERT)) == 0 && + j == 1 && + rec.ir_freecount > 0) { + /* + * The last chunk allocated in the group still has + * a free inode. + */ + } + /* + * None left in the last group, search the whole a.g. + */ + else { + if (error) + goto error0; + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + ASSERT(i == 1); + for (;;) { + if ((error = xfs_inobt_get_rec(cur, + &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, + &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (rec.ir_freecount > 0) + break; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + } + } + offset = XFS_IALLOC_FIND_FREE(&rec.ir_free); + ASSERT(offset >= 0); + ASSERT(offset < XFS_INODES_PER_CHUNK); + ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % + XFS_INODES_PER_CHUNK) == 0); + ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); + XFS_INOBT_CLR_FREE(&rec, offset, ARCH_NOCONVERT); + rec.ir_freecount--; + if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, + rec.ir_free))) + goto error0; + INT_MOD(agi->agi_freecount, ARCH_CONVERT, -1); + xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); + down_read(&mp->m_peraglock); + mp->m_perag[tagno].pagi_freecount--; + up_read(&mp->m_peraglock); +#ifdef DEBUG + if (cur->bc_nlevels == 1) { + int freecount = 0; + + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + do { + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + freecount += rec.ir_freecount; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + } while (i == 1); + ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + XFS_FORCED_SHUTDOWN(mp)); + } +#endif + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); + *inop = ino; + return 0; +error1: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); +error0: + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Free disk inode. Carefully avoids touching the incore inode, all + * manipulations incore are the caller's responsibility. + * The on-disk inode is not changed by this operation, only the + * btree (free inode mask) is changed. + */ +int +xfs_difree( + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t inode) /* inode to be freed */ +{ + /* REFERENCED */ + xfs_agblock_t agbno; /* block number containing inode */ + xfs_buf_t *agbp; /* buffer containing allocation group header */ + xfs_agino_t agino; /* inode number relative to allocation group */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_agi_t *agi; /* allocation group header */ + xfs_btree_cur_t *cur; /* inode btree cursor */ + int error; /* error return value */ + int i; /* result code */ + xfs_mount_t *mp; /* mount structure for filesystem */ + int off; /* offset of inode in inode chunk */ + xfs_inobt_rec_t rec; /* btree record */ + + mp = tp->t_mountp; + + /* + * Break up inode number into its components. + */ + agno = XFS_INO_TO_AGNO(mp, inode); + if (agno >= mp->m_sb.sb_agcount) { + cmn_err(CE_WARN, + "xfs_difree: agno >= mp->m_sb.sb_agcount (%d >= %d) on %s. Returning EINVAL.", + agno, mp->m_sb.sb_agcount, mp->m_fsname); + ASSERT(0); + return XFS_ERROR(EINVAL); + } + agino = XFS_INO_TO_AGINO(mp, inode); + if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { + cmn_err(CE_WARN, + "xfs_difree: inode != XFS_AGINO_TO_INO() (%d != %d) on %s. Returning EINVAL.", + inode, XFS_AGINO_TO_INO(mp, agno, agino), mp->m_fsname); + ASSERT(0); + return XFS_ERROR(EINVAL); + } + agbno = XFS_AGINO_TO_AGBNO(mp, agino); + if (agbno >= mp->m_sb.sb_agblocks) { + cmn_err(CE_WARN, + "xfs_difree: agbno >= mp->m_sb.sb_agblocks (%d >= %d) on %s. Returning EINVAL.", + agbno, mp->m_sb.sb_agblocks, mp->m_fsname); + ASSERT(0); + return XFS_ERROR(EINVAL); + } + /* + * Get the allocation group header. + */ + down_read(&mp->m_peraglock); + error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); + up_read(&mp->m_peraglock); + if (error) { + cmn_err(CE_WARN, + "xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + agi = XFS_BUF_TO_AGI(agbp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + ASSERT(agbno < INT_GET(agi->agi_length, ARCH_CONVERT)); + /* + * Initialize the cursor. + */ + cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, + (xfs_inode_t *)0, 0); +#ifdef DEBUG + if (cur->bc_nlevels == 1) { + int freecount = 0; + + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + do { + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + freecount += rec.ir_freecount; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + } while (i == 1); + ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + XFS_FORCED_SHUTDOWN(mp)); + } +#endif + /* + * Look for the entry describing this inode. + */ + if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) { + cmn_err(CE_WARN, + "xfs_difree: xfs_inobt_lookup_le returned() an error %d on %s. Returning error.", + error, mp->m_fsname); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, &rec.ir_freecount, + &rec.ir_free, &i, ARCH_NOCONVERT))) { + cmn_err(CE_WARN, + "xfs_difree: xfs_inobt_get_rec() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Get the offset in the inode chunk. + */ + off = agino - rec.ir_startino; + ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); + ASSERT(!XFS_INOBT_IS_FREE(&rec, off, ARCH_NOCONVERT)); + /* + * Mark the inode free & increment the count. + */ + XFS_INOBT_SET_FREE(&rec, off, ARCH_NOCONVERT); + rec.ir_freecount++; + if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, rec.ir_free))) { + cmn_err(CE_WARN, + "xfs_difree: xfs_inobt_update() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + goto error0; + } + /* + * Change the inode free counts and log the ag/sb changes. + */ + INT_MOD(agi->agi_freecount, ARCH_CONVERT, 1); + xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); + down_read(&mp->m_peraglock); + mp->m_perag[agno].pagi_freecount++; + up_read(&mp->m_peraglock); +#ifdef DEBUG + if (cur->bc_nlevels == 1) { + int freecount = 0; + + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + do { + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + freecount += rec.ir_freecount; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + } while (i == 1); + ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + XFS_FORCED_SHUTDOWN(mp)); + } +#endif + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); + return 0; + +error0: + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Return the location of the inode in bno/off, for mapping it into a buffer. + */ +/*ARGSUSED*/ +int +xfs_dilocate( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t ino, /* inode to locate */ + xfs_fsblock_t *bno, /* output: block containing inode */ + int *len, /* output: num blocks in inode cluster */ + int *off, /* output: index in block of inode */ + uint flags) /* flags concerning inode lookup */ +{ + xfs_agblock_t agbno; /* block number of inode in the alloc group */ + xfs_buf_t *agbp; /* agi buffer */ + xfs_agino_t agino; /* inode number within alloc group */ + xfs_agnumber_t agno; /* allocation group number */ + int blks_per_cluster; /* num blocks per inode cluster */ + xfs_agblock_t chunk_agbno; /* first block in inode chunk */ + xfs_agino_t chunk_agino; /* first agino in inode chunk */ + __int32_t chunk_cnt; /* count of free inodes in chunk */ + xfs_inofree_t chunk_free; /* mask of free inodes in chunk */ + xfs_agblock_t cluster_agbno; /* first block in inode cluster */ + xfs_btree_cur_t *cur; /* inode btree cursor */ + int error; /* error code */ + int i; /* temp state */ + int offset; /* index of inode in its buffer */ + int offset_agbno; /* blks from chunk start to inode */ + + ASSERT(ino != NULLFSINO); + /* + * Split up the inode number into its parts. + */ + agno = XFS_INO_TO_AGNO(mp, ino); + agino = XFS_INO_TO_AGINO(mp, ino); + agbno = XFS_AGINO_TO_AGBNO(mp, agino); + if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || + ino != XFS_AGINO_TO_INO(mp, agno, agino)) { +#ifdef DEBUG + if (agno >= mp->m_sb.sb_agcount) { + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_dilocate: agno (%d) >= " + "mp->m_sb.sb_agcount (%d)", + agno, mp->m_sb.sb_agcount); + } + if (agbno >= mp->m_sb.sb_agblocks) { + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_dilocate: agbno (0x%llx) >= " + "mp->m_sb.sb_agblocks (0x%lx)", + (unsigned long long) agbno, + (unsigned long) mp->m_sb.sb_agblocks); + } + if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) { + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_dilocate: ino (0x%llx) != " + "XFS_AGINO_TO_INO(mp, agno, agino) " + "(0x%llx)", + ino, XFS_AGINO_TO_INO(mp, agno, agino)); + } +#endif /* DEBUG */ + return XFS_ERROR(EINVAL); + } + if ((mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) || + !(flags & XFS_IMAP_LOOKUP)) { + offset = XFS_INO_TO_OFFSET(mp, ino); + ASSERT(offset < mp->m_sb.sb_inopblock); + *bno = XFS_AGB_TO_FSB(mp, agno, agbno); + *off = offset; + *len = 1; + return 0; + } + blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; + if (*bno != NULLFSBLOCK) { + offset = XFS_INO_TO_OFFSET(mp, ino); + ASSERT(offset < mp->m_sb.sb_inopblock); + cluster_agbno = XFS_FSB_TO_AGBNO(mp, *bno); + *off = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + + offset; + *len = blks_per_cluster; + return 0; + } + if (mp->m_inoalign_mask) { + offset_agbno = agbno & mp->m_inoalign_mask; + chunk_agbno = agbno - offset_agbno; + } else { + down_read(&mp->m_peraglock); + error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); + up_read(&mp->m_peraglock); + if (error) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + "xfs_ialloc_read_agi() returned " + "error %d, agno %d", + error, agno); +#endif /* DEBUG */ + return error; + } + cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, + (xfs_inode_t *)0, 0); + if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + "xfs_inobt_lookup_le() failed"); +#endif /* DEBUG */ + goto error0; + } + if ((error = xfs_inobt_get_rec(cur, &chunk_agino, &chunk_cnt, + &chunk_free, &i, ARCH_NOCONVERT))) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + "xfs_inobt_get_rec() failed"); +#endif /* DEBUG */ + goto error0; + } + if (i == 0) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + "xfs_inobt_get_rec() failed"); +#endif /* DEBUG */ + error = XFS_ERROR(EINVAL); + } + xfs_trans_brelse(tp, agbp); + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + if (error) + return error; + chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_agino); + offset_agbno = agbno - chunk_agbno; + } + ASSERT(agbno >= chunk_agbno); + cluster_agbno = chunk_agbno + + ((offset_agbno / blks_per_cluster) * blks_per_cluster); + offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + + XFS_INO_TO_OFFSET(mp, ino); + *bno = XFS_AGB_TO_FSB(mp, agno, cluster_agbno); + *off = offset; + *len = blks_per_cluster; + return 0; +error0: + xfs_trans_brelse(tp, agbp); + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Compute and fill in value of m_in_maxlevels. + */ +void +xfs_ialloc_compute_maxlevels( + xfs_mount_t *mp) /* file system mount structure */ +{ + int level; + uint maxblocks; + uint maxleafents; + int minleafrecs; + int minnoderecs; + + maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >> + XFS_INODES_PER_CHUNK_LOG; + minleafrecs = mp->m_alloc_mnr[0]; + minnoderecs = mp->m_alloc_mnr[1]; + maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; + for (level = 1; maxblocks > 1; level++) + maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; + mp->m_in_maxlevels = level; +} + +/* + * Log specified fields for the ag hdr (inode section) + */ +void +xfs_ialloc_log_agi( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* allocation group header buffer */ + int fields) /* bitmask of fields to log */ +{ + int first; /* first byte number */ + int last; /* last byte number */ + static const short offsets[] = { /* field starting offsets */ + /* keep in sync with bit definitions */ + offsetof(xfs_agi_t, agi_magicnum), + offsetof(xfs_agi_t, agi_versionnum), + offsetof(xfs_agi_t, agi_seqno), + offsetof(xfs_agi_t, agi_length), + offsetof(xfs_agi_t, agi_count), + offsetof(xfs_agi_t, agi_root), + offsetof(xfs_agi_t, agi_level), + offsetof(xfs_agi_t, agi_freecount), + offsetof(xfs_agi_t, agi_newino), + offsetof(xfs_agi_t, agi_dirino), + offsetof(xfs_agi_t, agi_unlinked), + sizeof(xfs_agi_t) + }; +#ifdef DEBUG + xfs_agi_t *agi; /* allocation group header */ + + agi = XFS_BUF_TO_AGI(bp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); +#endif + /* + * Compute byte offsets for the first and last fields. + */ + xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS, &first, &last); + /* + * Log the allocation group inode header buffer. + */ + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * Read in the allocation group header (inode allocation section) + */ +int +xfs_ialloc_read_agi( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_buf_t **bpp) /* allocation group hdr buf */ +{ + xfs_agi_t *agi; /* allocation group header */ + int agi_ok; /* agi is consistent */ + xfs_buf_t *bp; /* allocation group hdr buf */ + xfs_perag_t *pag; /* per allocation group data */ + int error; + + ASSERT(agno != NULLAGNUMBER); + error = xfs_trans_read_buf( + mp, tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (error) + return error; + ASSERT(bp && !XFS_BUF_GETERROR(bp)); + + /* + * Validate the magic number of the agi block. + */ + agi = XFS_BUF_TO_AGI(bp); + agi_ok = + INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && + XFS_AGI_GOOD_VERSION( + INT_GET(agi->agi_versionnum, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI, + XFS_RANDOM_IALLOC_READ_AGI))) { + XFS_CORRUPTION_ERROR("xfs_ialloc_read_agi", XFS_ERRLEVEL_LOW, + mp, agi); + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); + } + pag = &mp->m_perag[agno]; + if (!pag->pagi_init) { + pag->pagi_freecount = INT_GET(agi->agi_freecount, ARCH_CONVERT); + pag->pagi_init = 1; + } else { + /* + * It's possible for these to be out of sync if + * we are in the middle of a forced shutdown. + */ + ASSERT(pag->pagi_freecount == + INT_GET(agi->agi_freecount, ARCH_CONVERT) + || XFS_FORCED_SHUTDOWN(mp)); + } + +#ifdef DEBUG + { + int i; + + for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) + ASSERT(!INT_ISZERO(agi->agi_unlinked[i], ARCH_CONVERT)); + } +#endif + + XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGI, XFS_AGI_REF); + *bpp = bp; + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_ialloc.h linux.21rc5-ac2/fs/xfs/xfs_ialloc.h --- linux.21rc5/fs/xfs/xfs_ialloc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_ialloc.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_IALLOC_H__ +#define __XFS_IALLOC_H__ + +struct xfs_buf; +struct xfs_dinode; +struct xfs_mount; +struct xfs_trans; + +/* + * Allocation parameters for inode allocation. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IALLOC_INODES) +int xfs_ialloc_inodes(struct xfs_mount *mp); +#define XFS_IALLOC_INODES(mp) xfs_ialloc_inodes(mp) +#else +#define XFS_IALLOC_INODES(mp) ((mp)->m_ialloc_inos) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IALLOC_BLOCKS) +xfs_extlen_t xfs_ialloc_blocks(struct xfs_mount *mp); +#define XFS_IALLOC_BLOCKS(mp) xfs_ialloc_blocks(mp) +#else +#define XFS_IALLOC_BLOCKS(mp) ((mp)->m_ialloc_blks) +#endif + +/* + * For small block file systems, move inodes in clusters of this size. + * When we don't have a lot of memory, however, we go a bit smaller + * to reduce the number of AGI and ialloc btree blocks we need to keep + * around for xfs_dilocate(). We choose which one to use in + * xfs_mount_int(). + */ +#define XFS_INODE_BIG_CLUSTER_SIZE 8192 +#define XFS_INODE_SMALL_CLUSTER_SIZE 4096 +#define XFS_INODE_CLUSTER_SIZE(mp) (mp)->m_inode_cluster_size + +/* + * Make an inode pointer out of the buffer/offset. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MAKE_IPTR) +struct xfs_dinode *xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o); +#define XFS_MAKE_IPTR(mp,b,o) xfs_make_iptr(mp,b,o) +#else +#define XFS_MAKE_IPTR(mp,b,o) \ + ((xfs_dinode_t *)(xfs_buf_offset(b, (o) << (mp)->m_sb.sb_inodelog))) +#endif + +/* + * Find a free (set) bit in the inode bitmask. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IALLOC_FIND_FREE) +int xfs_ialloc_find_free(xfs_inofree_t *fp); +#define XFS_IALLOC_FIND_FREE(fp) xfs_ialloc_find_free(fp) +#else +#define XFS_IALLOC_FIND_FREE(fp) xfs_lowbit64(*(fp)) +#endif + + +#ifdef __KERNEL__ + +/* + * Prototypes for visible xfs_ialloc.c routines. + */ + +/* + * Allocate an inode on disk. + * Mode is used to tell whether the new inode will need space, and whether + * it is a directory. + * + * To work within the constraint of one allocation per transaction, + * xfs_dialloc() is designed to be called twice if it has to do an + * allocation to make more free inodes. If an inode is + * available without an allocation, agbp would be set to the current + * agbp and alloc_done set to false. + * If an allocation needed to be done, agbp would be set to the + * inode header of the allocation group and alloc_done set to true. + * The caller should then commit the current transaction and allocate a new + * transaction. xfs_dialloc() should then be called again with + * the agbp value returned from the previous call. + * + * Once we successfully pick an inode its number is returned and the + * on-disk data structures are updated. The inode itself is not read + * in, since doing so would break ordering constraints with xfs_reclaim. + * + * *agbp should be set to NULL on the first call, *alloc_done set to FALSE. + */ +int /* error */ +xfs_dialloc( + struct xfs_trans *tp, /* transaction pointer */ + xfs_ino_t parent, /* parent inode (directory) */ + mode_t mode, /* mode bits for new inode */ + int okalloc, /* ok to allocate more space */ + struct xfs_buf **agbp, /* buf for a.g. inode header */ + boolean_t *alloc_done, /* an allocation was done to replenish + the free inodes */ + xfs_ino_t *inop); /* inode number allocated */ + +/* + * Free disk inode. Carefully avoids touching the incore inode, all + * manipulations incore are the caller's responsibility. + * The on-disk inode is not changed by this operation, only the + * btree (free inode mask) is changed. + */ +int /* error */ +xfs_difree( + struct xfs_trans *tp, /* transaction pointer */ + xfs_ino_t inode); /* inode to be freed */ + +/* + * Return the location of the inode in bno/len/off, + * for mapping it into a buffer. + */ +int +xfs_dilocate( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_ino_t ino, /* inode to locate */ + xfs_fsblock_t *bno, /* output: block containing inode */ + int *len, /* output: num blocks in cluster*/ + int *off, /* output: index in block of inode */ + uint flags); /* flags for inode btree lookup */ + +/* + * Compute and fill in value of m_in_maxlevels. + */ +void +xfs_ialloc_compute_maxlevels( + struct xfs_mount *mp); /* file system mount structure */ + +/* + * Log specified fields for the ag hdr (inode section) + */ +void +xfs_ialloc_log_agi( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *bp, /* allocation group header buffer */ + int fields); /* bitmask of fields to log */ + +/* + * Read in the allocation group header (inode allocation section) + */ +int /* error */ +xfs_ialloc_read_agi( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + struct xfs_buf **bpp); /* allocation group hdr buf */ + +#endif /* __KERNEL__ */ + +#endif /* __XFS_IALLOC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfsidbg.c linux.21rc5-ac2/fs/xfs/xfsidbg.c --- linux.21rc5/fs/xfs/xfsidbg.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfsidbg.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,5468 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "pagebuf/page_buf_internal.h" + +#include +#include +#include +#include +#include + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_buf_item.h" +#include "xfs_extfree_item.h" +#include "xfs_inode_item.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_dir2_trace.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" +#include "xfs_rw.h" +#include "xfs_bit.h" +#include "xfs_quota.h" +#include "xfs_log_recover.h" +#include "quota/xfs_qm.h" + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Additional kdb commands for debugging XFS"); +MODULE_LICENSE("GPL"); +EXPORT_NO_SYMBOLS; + +/* + * Command table functions. + */ +static void xfsidbg_xagf(xfs_agf_t *); +static void xfsidbg_xagi(xfs_agi_t *); +static void xfsidbg_xaildump(xfs_mount_t *); +static void xfsidbg_xalloc(xfs_alloc_arg_t *); +#ifdef DEBUG +static void xfsidbg_xalmtrace(xfs_mount_t *); +#endif +static void xfsidbg_xattrcontext(xfs_attr_list_context_t *); +static void xfsidbg_xattrleaf(xfs_attr_leafblock_t *); +static void xfsidbg_xattrsf(xfs_attr_shortform_t *); +static void xfsidbg_xbirec(xfs_bmbt_irec_t *r); +static void xfsidbg_xbmalla(xfs_bmalloca_t *); +static void xfsidbg_xbrec(xfs_bmbt_rec_64_t *); +static void xfsidbg_xbroot(xfs_inode_t *); +static void xfsidbg_xbroota(xfs_inode_t *); +static void xfsidbg_xbtcur(xfs_btree_cur_t *); +static void xfsidbg_xbuf(xfs_buf_t *); +static void xfsidbg_xbuf_real(xfs_buf_t *, int); +static void xfsidbg_xchash(xfs_mount_t *mp); +static void xfsidbg_xchashlist(xfs_chashlist_t *chl); +static void xfsidbg_xdaargs(xfs_da_args_t *); +static void xfsidbg_xdabuf(xfs_dabuf_t *); +static void xfsidbg_xdanode(xfs_da_intnode_t *); +static void xfsidbg_xdastate(xfs_da_state_t *); +static void xfsidbg_xdirleaf(xfs_dir_leafblock_t *); +static void xfsidbg_xdirsf(xfs_dir_shortform_t *); +static void xfsidbg_xdir2free(xfs_dir2_free_t *); +static void xfsidbg_xdir2sf(xfs_dir2_sf_t *); +static void xfsidbg_xexlist(xfs_inode_t *); +static void xfsidbg_xflist(xfs_bmap_free_t *); +static void xfsidbg_xhelp(void); +static void xfsidbg_xiclog(xlog_in_core_t *); +static void xfsidbg_xiclogall(xlog_in_core_t *); +static void xfsidbg_xiclogcb(xlog_in_core_t *); +static void xfsidbg_xihash(xfs_mount_t *mp); +static void xfsidbg_xinodes(xfs_mount_t *); +static void xfsidbg_delayed_blocks(xfs_mount_t *); +static void xfsidbg_xinodes_quiesce(xfs_mount_t *); +static void xfsidbg_xlog(xlog_t *); +static void xfsidbg_xlog_ritem(xlog_recover_item_t *); +static void xfsidbg_xlog_rtrans(xlog_recover_t *); +static void xfsidbg_xlog_rtrans_entire(xlog_recover_t *); +static void xfsidbg_xlog_tic(xlog_ticket_t *); +static void xfsidbg_xlogitem(xfs_log_item_t *); +static void xfsidbg_xmount(xfs_mount_t *); +static void xfsidbg_xnode(xfs_inode_t *ip); +static void xfsidbg_xcore(xfs_iocore_t *io); +static void xfsidbg_xperag(xfs_mount_t *); +static void xfsidbg_xqm_diskdq(xfs_disk_dquot_t *); +static void xfsidbg_xqm_dqattached_inos(xfs_mount_t *); +static void xfsidbg_xqm_dquot(xfs_dquot_t *); +static void xfsidbg_xqm_mplist(xfs_mount_t *); +static void xfsidbg_xqm_qinfo(xfs_mount_t *mp); +static void xfsidbg_xqm_tpdqinfo(xfs_trans_t *tp); +static void xfsidbg_xsb(xfs_sb_t *, int convert); +static void xfsidbg_xtp(xfs_trans_t *); +static void xfsidbg_xtrans_res(xfs_mount_t *); +#ifdef CONFIG_XFS_QUOTA +static void xfsidbg_xqm(void); +static void xfsidbg_xqm_htab(void); +static void xfsidbg_xqm_freelist_print(xfs_frlist_t *qlist, char *title); +static void xfsidbg_xqm_freelist(void); +#endif + +/* kdb wrappers */ + +static int kdbm_xfs_xagf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xagf((xfs_agf_t *)addr); + return 0; +} + +static int kdbm_xfs_xagi( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xagi((xfs_agi_t *)addr); + return 0; +} + +static int kdbm_xfs_xaildump( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xaildump((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xalloc( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xalloc((xfs_alloc_arg_t *) addr); + return 0; +} + +#ifdef DEBUG +static int kdbm_xfs_xalmtrace( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xalmtrace((xfs_mount_t *) addr); + return 0; +} +#endif /* DEBUG */ + +static int kdbm_xfs_xattrcontext( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xattrcontext((xfs_attr_list_context_t *) addr); + return 0; +} + +static int kdbm_xfs_xattrleaf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xattrleaf((xfs_attr_leafblock_t *) addr); + return 0; +} + +static int kdbm_xfs_xattrsf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xattrsf((xfs_attr_shortform_t *) addr); + return 0; +} + +static int kdbm_xfs_xbirec( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbirec((xfs_bmbt_irec_t *) addr); + return 0; +} + +static int kdbm_xfs_xbmalla( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbmalla((xfs_bmalloca_t *)addr); + return 0; +} + +static int kdbm_xfs_xbrec( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbrec((xfs_bmbt_rec_64_t *) addr); + return 0; +} + +static int kdbm_xfs_xbroot( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbroot((xfs_inode_t *) addr); + return 0; +} + +static int kdbm_xfs_xbroota( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbroota((xfs_inode_t *) addr); + return 0; +} + +static int kdbm_xfs_xbtcur( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbtcur((xfs_btree_cur_t *) addr); + return 0; +} + +static int kdbm_xfs_xbuf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbuf((xfs_buf_t *) addr); + return 0; +} + + +static int kdbm_xfs_xchash( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xchash((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xchashlist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xchashlist((xfs_chashlist_t *) addr); + return 0; +} + + +static int kdbm_xfs_xdaargs( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdaargs((xfs_da_args_t *) addr); + return 0; +} + +static int kdbm_xfs_xdabuf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdabuf((xfs_dabuf_t *) addr); + return 0; +} + +static int kdbm_xfs_xdanode( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdanode((xfs_da_intnode_t *) addr); + return 0; +} + +static int kdbm_xfs_xdastate( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdastate((xfs_da_state_t *) addr); + return 0; +} + +static int kdbm_xfs_xdirleaf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdirleaf((xfs_dir_leafblock_t *) addr); + return 0; +} + +static int kdbm_xfs_xdirsf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdirsf((xfs_dir_shortform_t *) addr); + return 0; +} + +static int kdbm_xfs_xdir2free( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdir2free((xfs_dir2_free_t *) addr); + return 0; +} + +static int kdbm_xfs_xdir2sf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdir2sf((xfs_dir2_sf_t *) addr); + return 0; +} + +static int kdbm_xfs_xexlist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xexlist((xfs_inode_t *) addr); + return 0; +} + +static int kdbm_xfs_xflist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xflist((xfs_bmap_free_t *) addr); + return 0; +} + +static int kdbm_xfs_xhelp( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + if (argc != 0) + return KDB_ARGCOUNT; + + xfsidbg_xhelp(); + return 0; +} + +static int kdbm_xfs_xiclog( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xiclog((xlog_in_core_t *) addr); + return 0; +} + +static int kdbm_xfs_xiclogall( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xiclogall((xlog_in_core_t *) addr); + return 0; +} + +static int kdbm_xfs_xiclogcb( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xiclogcb((xlog_in_core_t *) addr); + return 0; +} + +static int kdbm_xfs_xihash( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xihash((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xinodes( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xinodes((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_delayed_blocks( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_delayed_blocks((xfs_mount_t *) addr); + return 0; +} + + +static int kdbm_xfs_xinodes_quiesce( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xinodes_quiesce((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog((xlog_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog_ritem( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog_ritem((xlog_recover_item_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog_rtrans( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog_rtrans((xlog_recover_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog_rtrans_entire( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog_rtrans_entire((xlog_recover_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog_tic( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog_tic((xlog_ticket_t *) addr); + return 0; +} + +static int kdbm_xfs_xlogitem( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlogitem((xfs_log_item_t *) addr); + return 0; +} + +static int kdbm_xfs_xmount( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xmount((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xnode( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xnode((xfs_inode_t *) addr); + return 0; +} + +static int kdbm_xfs_xcore( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xcore((xfs_iocore_t *) addr); + return 0; +} + +static int kdbm_xfs_xperag( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xperag((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_diskdq( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_diskdq((xfs_disk_dquot_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_dqattached_inos( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_dqattached_inos((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_dquot( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_dquot((xfs_dquot_t *) addr); + return 0; +} + +#ifdef CONFIG_XFS_QUOTA +static int kdbm_xfs_xqm( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + if (argc != 0) + return KDB_ARGCOUNT; + + xfsidbg_xqm(); + return 0; +} + +static int kdbm_xfs_xqm_freelist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + if (argc != 0) + return KDB_ARGCOUNT; + + xfsidbg_xqm_freelist(); + return 0; +} + +static int kdbm_xfs_xqm_htab( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + if (argc != 0) + return KDB_ARGCOUNT; + + xfsidbg_xqm_htab(); + return 0; +} +#endif + +static int kdbm_xfs_xqm_mplist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_mplist((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_qinfo( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_qinfo((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_tpdqinfo( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_tpdqinfo((xfs_trans_t *) addr); + return 0; +} + +static int kdbm_xfs_xsb( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + unsigned long convert=0; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1 && argc!=2) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + if (argc==2) { + /* extra argument - conversion flag */ + diag = kdbgetaddrarg(argc, argv, &nextarg, &convert, &offset, NULL, regs); + if (diag) + return diag; + } + + xfsidbg_xsb((xfs_sb_t *) addr, (int)convert); + return 0; +} + +static int kdbm_xfs_xtp( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xtp((xfs_trans_t *) addr); + return 0; +} + +static int kdbm_xfs_xtrans_res( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xtrans_res((xfs_mount_t *) addr); + return 0; +} + +/* + * Vnode descriptor dump. + * This table is a string version of all the flags defined in vnode.h. + */ +char *tab_vflags[] = { + /* local only flags */ + "VINACT", /* 0x01 */ + "VRECLM", /* 0x02 */ + "VWAIT", /* 0x04 */ + "VMODIFIED", /* 0x08 */ + "INVALID0x10", /* 0x10 */ + "INVALID0x20", /* 0x20 */ + "INVALID0x40", /* 0x40 */ + "INVALID0x80", /* 0x80 */ + "INVALID0x100", /* 0x100 */ + "INVALID0x200", /* 0x200 */ + "INVALID0x400", /* 0x400 */ + "INVALID0x800", /* 0x800 */ + "INVALID0x1000", /* 0x1000 */ + "INVALID0x2000", /* 0x2000 */ + "INVALID0x4000", /* 0x4000 */ + "INVALID0x8000", /* 0x8000 */ + "INVALID0x10000", /* 0x10000 */ + "INVALID0x20000", /* 0x20000 */ + "INVALID0x40000", /* 0x40000 */ + "INVALID0x80000", /* 0x80000 */ + "VROOT", /* 0x100000 */ + "INVALID0x200000", /* 0x200000 */ + "INVALID00x400000", /* 0x400000 */ + "INVALID0x800000", /* 0x800000 */ + "INVALID0x1000000", /* 0x1000000 */ + "INVALID0x2000000", /* 0x2000000 */ + "VSHARE", /* 0x4000000 */ + "INVALID0x8000000", /* 0x8000000 */ + "VENF_LOCKING", /* 0x10000000 */ + "VOPLOCK", /* 0x20000000 */ + "VPURGE", /* 0x40000000 */ + "INVALID0x80000000", /* 0x80000000 */ + 0 +}; + + +static char *vnode_type[] = { + "VNON", "VREG", "VDIR", "VBLK", "VLNK", "VFIFO", "VBAD", "VSOCK" +}; + +static void +printflags(register uint64_t flags, + register char **strings, + register char *name) +{ + register uint64_t mask = 1; + + if (name) + kdb_printf("%s 0x%llx <", name, (unsigned long long)flags); + + while (flags != 0 && *strings) { + if (mask & flags) { + kdb_printf("%s ", *strings); + flags &= ~mask; + } + mask <<= 1; + strings++; + } + + if (name) + kdb_printf("> "); + + return; +} + + +static void printvnode(vnode_t *vp, unsigned long addr) +{ + bhv_desc_t *bh; + kdb_symtab_t symtab; + + + kdb_printf("vnode: 0x%lx type ", addr); + if ((size_t)vp->v_type >= sizeof(vnode_type)/sizeof(vnode_type[0])) + kdb_printf("out of range 0x%x", vp->v_type); + else + kdb_printf("%s", vnode_type[vp->v_type]); + kdb_printf(" v_bh %p\n", &vp->v_bh); + + if ((bh = vp->v_bh.bh_first)) { + kdb_printf(" v_inode 0x%p v_bh->bh_first 0x%p pobj 0x%p\n", + LINVFS_GET_IP((struct vnode *) addr), + bh, bh->bd_pdata); + + if (kdbnearsym((unsigned long)bh->bd_ops, &symtab)) + kdb_printf(" ops %s ", symtab.sym_name); + else + kdb_printf(" ops %s/0x%p ", + "???", (void *)bh->bd_ops); + } else { + kdb_printf(" v_inode 0x%p v_bh->bh_first = NULLBHV ", + LINVFS_GET_IP((struct vnode *) addr)); + } + + printflags((__psunsigned_t)vp->v_flag, tab_vflags, "flag ="); + kdb_printf("\n"); + +#ifdef CONFIG_XFS_VNODE_TRACING + kdb_printf(" v_trace 0x%p\n", vp->v_trace); +#endif /* CONFIG_XFS_VNODE_TRACING */ + + kdb_printf(" v_vfsp 0x%p v_number %Lx\n", + vp->v_vfsp, vp->v_number); +} + +static int kdbm_vnode( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + vnode_t vp; + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + if ((diag = kdb_getarea(vp, addr))) + return diag; + + printvnode(&vp, addr); + + return 0; +} + +static void +print_vfs(vfs_t *vfs, unsigned long addr) +{ + kdb_printf("vfsp at 0x%lx", addr); + kdb_printf(" vfs_fbhv 0x%p sb 0x%p\n", vfs->vfs_fbhv, vfs->vfs_super); +} + +static int kdbm_vfs( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + vfs_t vfs; + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + if ((diag = kdb_getarea(vfs, addr))) + return diag; + + print_vfs(&vfs, addr); + + return 0; +} + + +#ifdef CONFIG_XFS_VNODE_TRACING +/* + * Print a vnode trace entry. + */ +static int +vn_trace_pr_entry(ktrace_entry_t *ktep) +{ + char funcname[128]; + kdb_symtab_t symtab; + + + if ((__psint_t)ktep->val[0] == 0) + return 0; + + if (kdbnearsym((unsigned int)ktep->val[8], &symtab)) { + unsigned long offval; + + offval = (unsigned int)ktep->val[8] - symtab.sym_start; + + if (offval) + sprintf(funcname, "%s+0x%lx", symtab.sym_name, offval); + else + sprintf(funcname, "%s", symtab.sym_name); + } else + funcname[0] = '\0'; + + + switch ((__psint_t)ktep->val[0]) { + case VNODE_KTRACE_ENTRY: + kdb_printf("entry to %s i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[3]); + break; + + case VNODE_KTRACE_EXIT: + kdb_printf("exit from %s i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[3]); + break; + + case VNODE_KTRACE_HOLD: + if ((__psint_t)ktep->val[3] != 1) + kdb_printf("hold @%s:%d(%s) i_count %d => %d ", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3] - 1, + (__psint_t)ktep->val[3]); + else + kdb_printf("get @%s:%d(%s) i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3]); + break; + + case VNODE_KTRACE_REF: + kdb_printf("ref @%s:%d(%s) i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3]); + break; + + case VNODE_KTRACE_RELE: + if ((__psint_t)ktep->val[3] != 1) + kdb_printf("rele @%s:%d(%s) i_count %d => %d ", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3], + (__psint_t)ktep->val[3] - 1); + else + kdb_printf("free @%s:%d(%s) i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3]); + break; + + default: + kdb_printf("unknown vntrace record\n"); + return 1; + } + + kdb_printf("\n"); + + kdb_printf(" cpu = %d pid = %d ", + (__psint_t)ktep->val[6], (pid_t)ktep->val[7]); + + printflags((__psunsigned_t)ktep->val[5], tab_vflags, "flag ="); + + if (kdbnearsym((unsigned int)ktep->val[4], &symtab)) { + unsigned long offval; + + offval = (unsigned int)ktep->val[4] - symtab.sym_start; + + if (offval) + kdb_printf(" ra = %s+0x%lx", symtab.sym_name, offval); + else + kdb_printf(" ra = %s", symtab.sym_name); + } else + kdb_printf(" ra = ?? 0x%p", (void *)ktep->val[4]); + + return 1; +} + + +/* + * Print out the trace buffer attached to the given vnode. + */ +static int kdbm_vntrace( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + int diag; + int nextarg = 1; + long offset = 0; + unsigned long addr; + vnode_t *vp; + ktrace_entry_t *ktep; + ktrace_snap_t kts; + + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + vp = (vnode_t *)addr; + + if (vp->v_trace == NULL) { + kdb_printf("The vnode trace buffer is not initialized\n"); + + return 0; + } + + kdb_printf("vntrace vp 0x%p\n", vp); + + ktep = ktrace_first(vp->v_trace, &kts); + + while (ktep != NULL) { + if (vn_trace_pr_entry(ktep)) + kdb_printf("\n"); + + ktep = ktrace_next(vp->v_trace, &kts); + } + + return 0; +} +/* + * Print out the trace buffer attached to the given vnode. + */ +static int kdbm_vntraceaddr( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + int diag; + int nextarg = 1; + long offset = 0; + unsigned long addr; + struct ktrace *kt; + ktrace_entry_t *ktep; + ktrace_snap_t kts; + + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + kt = (struct ktrace *)addr; + + kdb_printf("vntraceaddr kt 0x%p\n", kt); + + ktep = ktrace_first(kt, &kts); + + while (ktep != NULL) { + if (vn_trace_pr_entry(ktep)) + kdb_printf("\n"); + + ktep = ktrace_next(kt, &kts); + } + + return 0; +} +#endif /* CONFIG_XFS_VNODE_TRACING */ + + +static void printinode(struct inode *ip) +{ + unsigned long addr; + + + if (ip == NULL) + return; + + kdb_printf(" i_ino = %lu i_count = %u i_dev = 0x%x i_size %Ld\n", + ip->i_ino, atomic_read(&ip->i_count), + kdev_t_to_nr(ip->i_dev), ip->i_size); + + kdb_printf( + " i_mode = 0x%x i_nlink = %d i_rdev = 0x%x i_state = 0x%lx\n", + ip->i_mode, ip->i_nlink, + kdev_t_to_nr(ip->i_rdev), ip->i_state); + + kdb_printf(" i_hash.nxt = 0x%p i_hash.prv = 0x%p\n", + ip->i_hash.next, ip->i_hash.prev); + kdb_printf(" i_list.nxt = 0x%p i_list.prv = 0x%p\n", + ip->i_list.next, ip->i_list.prev); + kdb_printf(" i_dentry.nxt = 0x%p i_dentry.prv = 0x%p\n", + ip->i_dentry.next, + ip->i_dentry.prev); + + addr = (unsigned long)ip; + + kdb_printf(" i_sb = 0x%p i_op = 0x%p i_data = 0x%lx nrpages = %lu\n", + ip->i_sb, ip->i_op, + addr + offsetof(struct inode, i_data), + ip->i_data.nrpages); + + kdb_printf(" vnode ptr 0x%p\n", LINVFS_GET_VP(ip)); +} + + +static int kdbm_vn( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + int diag; + int nextarg = 1; +/* char *symname; */ + long offset = 0; + unsigned long addr; + struct inode *ip; +/* bhv_desc_t *bh; */ +#ifdef CONFIG_XFS_VNODE_TRACING + ktrace_entry_t *ktep; + ktrace_snap_t kts; +#endif + vnode_t vp; + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + if ((diag = kdb_getarea(vp, addr))) + return diag; + + ip = LINVFS_GET_IP((vnode_t *)addr); + + kdb_printf("--> Inode @ 0x%p\n", ip); + printinode(ip); + + kdb_printf("--> Vnode @ 0x%lx\n", addr); + printvnode(&vp, addr); + +#ifdef CONFIG_XFS_VNODE_TRACING + + kdb_printf("--> Vntrace @ 0x%p/0x%p\n", vp, vp->v_trace); + + if (vp->v_trace == NULL) + return 0; + + ktep = ktrace_first(vp->v_trace, &kts); + + while (ktep != NULL) { + if (vn_trace_pr_entry(ktep)) + kdb_printf("\n"); + + ktep = ktrace_next(vp->v_trace, &kts); + } +#endif /* CONFIG_XFS_VNODE_TRACING */ + + return 0; +} + + +/* pagebuf stuff */ + +static char *pb_flag_vals[] = { +/* 0 */ "READ", "WRITE", "MAPPED", "PARTIAL", "ASYNC", +/* 5 */ "NONE", "DELWRI", "FREED", "SYNC", "MAPPABLE", +/* 10 */ "STALE", "FS_MANAGED", "FS_DATAIOD", "LOCK", "TRYLOCK", +/* 15 */ "DONT_BLOCK", "LOCKABLE", "PRIVATE_BH", "ALL_PAGES_MAPPED", + "ADDR_ALLOCATED", +/* 20 */ "MEM_ALLOCATED", "FORCEIO", "FLUSH", "READ_AHEAD", + NULL }; + +static char *pbm_flag_vals[] = { + "EOF", "HOLE", "DELAY", "INVALID0x08", + "INVALID0x10", "UNWRITTEN", "INVALID0x40", "INVALID0x80", + NULL }; + + +static char *map_flags(unsigned long flags, char *mapping[]) +{ + static char buffer[256]; + int index; + int offset = 12; + + buffer[0] = '\0'; + + for (index = 0; flags && mapping[index]; flags >>= 1, index++) { + if (flags & 1) { + if ((offset + strlen(mapping[index]) + 1) >= 80) { + strcat(buffer, "\n "); + offset = 12; + } else if (offset > 12) { + strcat(buffer, " "); + offset++; + } + strcat(buffer, mapping[index]); + offset += strlen(mapping[index]); + } + } + + return (buffer); +} + +static char *pb_flags(page_buf_flags_t pb_flag) +{ + return(map_flags((unsigned long) pb_flag, pb_flag_vals)); +} + +static int +kdbm_pb_flags(int argc, const char **argv, const char **envp, struct pt_regs *regs) +{ + unsigned long flags; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetularg(argv[1], &flags); + if (diag) + return diag; + + kdb_printf("pb flags 0x%lx = %s\n", flags, pb_flags(flags)); + + return 0; +} + +static void +print_pagebuf( + page_buf_t *pb, + unsigned long addr) +{ + kdb_printf("page_buf_t at 0x%lx\n", addr); + kdb_printf(" pb_flags %s\n", pb_flags(pb->pb_flags)); + kdb_printf(" pb_target 0x%p pb_hold %d pb_next 0x%p pb_prev 0x%p\n", + pb->pb_target, pb->pb_hold.counter, + list_entry(pb->pb_list.next, page_buf_t, pb_list), + list_entry(pb->pb_list.prev, page_buf_t, pb_list)); + kdb_printf(" pb_hash_index %d pb_hash_next 0x%p pb_hash_prev 0x%p\n", + pb->pb_hash_index, + list_entry(pb->pb_hash_list.next, page_buf_t, pb_hash_list), + list_entry(pb->pb_hash_list.prev, page_buf_t, pb_hash_list)); + kdb_printf(" pb_file_offset 0x%llx pb_buffer_length 0x%llx pb_addr 0x%p\n", + (unsigned long long) pb->pb_file_offset, + (unsigned long long) pb->pb_buffer_length, + pb->pb_addr); + kdb_printf(" pb_bn 0x%Lx pb_count_desired 0x%lx\n", + pb->pb_bn, + (unsigned long) pb->pb_count_desired); + kdb_printf(" pb_flushtime %ld (%ld) pb_io_remaining %d pb_error %u\n", + pb->pb_flushtime, pb->pb_flushtime - jiffies, + pb->pb_io_remaining.counter, pb->pb_error); + kdb_printf(" pb_page_count %u pb_offset 0x%x pb_pages 0x%p\n", + pb->pb_page_count, pb->pb_offset, + pb->pb_pages); +#ifdef PAGEBUF_LOCK_TRACKING + kdb_printf(" pb_iodonesema (%d,%d) pb_sema (%d,%d) pincount (%d) last holder %d\n", + pb->pb_iodonesema.count.counter, + pb->pb_iodonesema.sleepers, + pb->pb_sema.count.counter, pb->pb_sema.sleepers, + pb->pb_pin_count.counter, pb->pb_last_holder); +#else + kdb_printf(" pb_iodonesema (%d,%d) pb_sema (%d,%d) pincount (%d)\n", + pb->pb_iodonesema.count.counter, + pb->pb_iodonesema.sleepers, + pb->pb_sema.count.counter, pb->pb_sema.sleepers, + pb->pb_pin_count.counter); +#endif + if (pb->pb_fspriv || pb->pb_fspriv2) { + kdb_printf( "pb_fspriv 0x%p pb_fspriv2 0x%p\n", + pb->pb_fspriv, pb->pb_fspriv2); + } +} + +static int +kdbm_pb(int argc, const char **argv, const char **envp, struct pt_regs *regs) +{ + page_buf_t bp; + unsigned long addr; + long offset=0; + int nextarg; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + + nextarg = 1; + if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs)) || + (diag = kdb_getarea(bp, addr))) + return diag; + + print_pagebuf(&bp, addr); + + return 0; +} + +static int +kdbm_pbdelay(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + unsigned long verbose = 0; + int count = 0; + struct list_head *curr, *next; + page_buf_t bp; + unsigned long addr; + int diag; + extern struct list_head pbd_delwrite_queue; + + if (argc > 1) + return KDB_ARGCOUNT; + + if (argc == 1) { + if ((diag = kdbgetularg(argv[1], &verbose))) { + return diag; + } + } + + if (!verbose) { + kdb_printf("index pb pin flushtime\n"); + } + + list_for_each_safe(curr, next, &pbd_delwrite_queue) { + addr = (unsigned long)list_entry(curr, page_buf_t, pb_list); + if ((diag = kdb_getarea(bp, addr))) + return diag; + + if (verbose) { + print_pagebuf(&bp, addr); + } else { + kdb_printf("%4d 0x%lx %d %ld\n", + count++, addr, + bp.pb_pin_count.counter, + bp.pb_flushtime - jiffies); + } + } + + return 0; +} + +static int +kdbm_pbmap(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + page_buf_bmap_t pbm; + unsigned long addr; + long offset=0; + int nextarg; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + + nextarg = 1; + if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs)) || + (diag = kdb_getarea(pbm, addr))) + + kdb_printf("page_buf_bmap_t at 0x%lx\n", addr); + kdb_printf(" pbm_bn 0x%llx pbm_offset 0x%Lx pbm_delta 0x%lx pbm_bsize 0x%lx\n", + (long long) pbm.pbm_bn, pbm.pbm_offset, + (unsigned long) pbm.pbm_delta, (unsigned long) pbm.pbm_bsize); + + kdb_printf(" pbm_flags %s\n", map_flags(pbm.pbm_flags, pbm_flag_vals)); + + return 0; +} + +#ifdef PAGEBUF_TRACE +# ifdef __PAGEBUF_TRACE__ +# undef __PAGEBUF_TRACE__ +# undef PB_DEFINE_TRACES +# undef PB_TRACE_START +# undef PB_TRACE_REC +# undef PB_TRACE_END +# endif +#include "pagebuf/page_buf_trace.h" + +#define EV_SIZE (sizeof(event_names)/sizeof(char *)) + +void +pb_trace_core( + unsigned long match, + char *event_match, + unsigned long long offset, + long long mask) +{ + extern struct pagebuf_trace_buf pb_trace; + int i, total, end; + pagebuf_trace_t *trace; + char *event; + char value[10]; + + end = pb_trace.start - 1; + if (end < 0) + end = PB_TRACE_BUFSIZE - 1; + + if (match && (match < PB_TRACE_BUFSIZE)) { + for (i = pb_trace.start, total = 0; i != end; i = CIRC_INC(i)) { + trace = &pb_trace.buf[i]; + if (trace->pb == 0) + continue; + total++; + } + total = total - match; + for (i = pb_trace.start; i != end && total; i = CIRC_INC(i)) { + trace = &pb_trace.buf[i]; + if (trace->pb == 0) + continue; + total--; + } + match = 0; + } else + i = pb_trace.start; + for ( ; i != end; i = CIRC_INC(i)) { + trace = &pb_trace.buf[i]; + + if (offset) { + if ((trace->offset & ~mask) != offset) + continue; + } + + if (trace->pb == 0) + continue; + + if ((match != 0) && (trace->pb != match)) + continue; + + if ((trace->event < EV_SIZE-1) && event_names[trace->event]) { + event = event_names[trace->event]; + } else if (trace->event == EV_SIZE-1) { + event = (char *)trace->misc; + } else { + event = value; + sprintf(value, "%8d", trace->event); + } + + if (event_match && strcmp(event, event_match)) { + continue; + } + + + kdb_printf("pb 0x%lx [%s] (hold %u lock %d) misc 0x%p", + trace->pb, event, + trace->hold, trace->lock_value, + trace->misc); + kdb_symbol_print((unsigned int)trace->ra, NULL, + KDB_SP_SPACEB|KDB_SP_PAREN|KDB_SP_NEWLINE); + kdb_printf(" offset 0x%Lx size 0x%x task 0x%p\n", + trace->offset, trace->size, trace->task); + kdb_printf(" flags: %s\n", + pb_flags(trace->flags)); + } +} + + +static int +kdbm_pbtrace_offset(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + long mask = 0; + unsigned long offset = 0; + int diag; + + if (argc > 2) + return KDB_ARGCOUNT; + + if (argc > 0) { + diag = kdbgetularg(argv[1], &offset); + if (diag) + return diag; + } + + if (argc > 1) { + diag = kdbgetularg(argv[1], &mask); + if (diag) + return diag; + } + + pb_trace_core(0, NULL, (unsigned long long)offset, + (long long)mask); /* sign extent mask */ + return 0; +} + +static int +kdbm_pbtrace(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + unsigned long addr = 0; + int diag, nextarg; + long offset = 0; + char *event_match = NULL; + + if (argc > 1) + return KDB_ARGCOUNT; + + if (argc == 1) { + if (isupper(argv[1][0]) || islower(argv[1][0])) { + event_match = (char *)argv[1]; + printk("event match on \"%s\"\n", event_match); + argc = 0; + } else { + nextarg = 1; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) { + printk("failed to parse %s as a number\n", + argv[1]); + return diag; + } + } + } + + pb_trace_core(addr, event_match, 0LL, 0LL); + return 0; +} + +#else /* PAGEBUF_TRACE */ +static int +kdbm_pbtrace(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + kdb_printf("pagebuf tracing not compiled in\n"); + + return 0; +} +#endif /* PAGEBUF_TRACE */ + +static struct xif { + char *name; + int (*func)(int, const char **, const char **, struct pt_regs *); + char *args; + char *help; +} xfsidbg_funcs[] = { + { "vn", kdbm_vn, "", "Dump inode/vnode/trace"}, + { "vnode", kdbm_vnode, "", "Dump vnode"}, + { "vfs", kdbm_vfs, "", "Dump vfs"}, +#ifdef CONFIG_XFS_VNODE_TRACING + { "vntrace", kdbm_vntrace, "", "Dump vnode Trace"}, + { "vntraceaddr", kdbm_vntraceaddr, "", "Dump vnode Trace by Address"}, +#endif /* CONFIG_XFS_VNODE_TRACING */ + { "xagf", kdbm_xfs_xagf, "", + "Dump XFS allocation group freespace" }, + { "xagi", kdbm_xfs_xagi, "", + "Dump XFS allocation group inode" }, + { "xail", kdbm_xfs_xaildump, "", + "Dump XFS AIL for a mountpoint" }, + { "xalloc", kdbm_xfs_xalloc, "", + "Dump XFS allocation args structure" }, +#ifdef DEBUG + { "xalmtrc", kdbm_xfs_xalmtrace, "", + "Dump XFS alloc mount-point trace" }, +#endif + { "xattrcx", kdbm_xfs_xattrcontext, "", + "Dump XFS attr_list context struct"}, + { "xattrlf", kdbm_xfs_xattrleaf, "", + "Dump XFS attribute leaf block"}, + { "xattrsf", kdbm_xfs_xattrsf, "", + "Dump XFS attribute shortform"}, + { "xbirec", kdbm_xfs_xbirec, "", + "Dump XFS bmalloc args structure"}, + { "xbrec", kdbm_xfs_xbrec, "", + "Dump XFS bmap btree root (data)"}, + { "xbroota", kdbm_xfs_xbroota, "", + "Dump XFS bmap btree root (attr)"}, + { "xbtcur", kdbm_xfs_xbtcur, "", + "Dump XFS btree cursor"}, + { "xbuf", kdbm_xfs_xbuf, "", + "Dump XFS data from a buffer"}, + { "xchash", kdbm_xfs_xchash, "", + "Dump XFS cluster hash"}, + { "xchlist", kdbm_xfs_xchashlist, "", + "Dump XFS cluster hash list"}, + { "xd2free", kdbm_xfs_xdir2free, "", + "Dump XFS directory v2 freemap"}, + { "xdaargs", kdbm_xfs_xdaargs, "", + "Dump XFS dir/attr args structure"}, + { "xdabuf", kdbm_xfs_xdabuf, "", + "Dump XFS dir/attr buf structure"}, + { "xdanode", kdbm_xfs_xdanode, "", + "Dump XFS dir/attr node block"}, + { "xdastat", kdbm_xfs_xdastate, "", + "Dump XFS dir/attr state_blk struct"}, + { "xdelay", kdbm_xfs_delayed_blocks, "", + "Dump delayed block totals"}, + { "xdirlf", kdbm_xfs_xdirleaf, "", + "Dump XFS directory leaf block"}, + { "xdirsf", kdbm_xfs_xdirsf, "", + "Dump XFS directory shortform"}, + { "xdir2sf", kdbm_xfs_xdir2sf, "", + "Dump XFS directory v2 shortform"}, + { "xdiskdq", kdbm_xfs_xqm_diskdq, "", + "Dump XFS ondisk dquot (quota) struct"}, + { "xdqatt", kdbm_xfs_xqm_dqattached_inos, "", + "All incore inodes with dquots"}, + { "xdqinfo", kdbm_xfs_xqm_tpdqinfo, "", + "Dump dqinfo structure of a trans"}, + { "xdquot", kdbm_xfs_xqm_dquot, "", + "Dump XFS dquot (quota) structure"}, + { "xexlist", kdbm_xfs_xexlist, "", + "Dump XFS bmap extents in inode"}, + { "xflist", kdbm_xfs_xflist, "", + "Dump XFS to-be-freed extent list"}, + { "xhelp", kdbm_xfs_xhelp, "", + "Print idbg-xfs help"}, + { "xicall", kdbm_xfs_xiclogall, "", + "Dump All XFS in-core logs"}, + { "xiclog", kdbm_xfs_xiclog, "", + "Dump XFS in-core log"}, + { "xihash", kdbm_xfs_xihash, "", + "Dump XFS inode hash statistics"}, + { "xinodes", kdbm_xfs_xinodes, "", + "Dump XFS inodes per mount"}, + { "xquiesce",kdbm_xfs_xinodes_quiesce, "", + "Dump non-quiesced XFS inodes per mount"}, + { "xl_rcit", kdbm_xfs_xlog_ritem, "", + "Dump XFS recovery item"}, + { "xl_rctr", kdbm_xfs_xlog_rtrans, "", + "Dump XFS recovery transaction"}, + { "xl_rctr2",kdbm_xfs_xlog_rtrans_entire, "", + "Dump entire recovery transaction"}, + { "xl_tic", kdbm_xfs_xlog_tic, "", + "Dump XFS log ticket"}, + { "xlog", kdbm_xfs_xlog, "", + "Dump XFS log"}, + { "xlogcb", kdbm_xfs_xiclogcb, "", + "Dump XFS in-core log callbacks"}, + { "xlogitm", kdbm_xfs_xlogitem, "", + "Dump XFS log item structure"}, + { "xmount", kdbm_xfs_xmount, "", + "Dump XFS mount structure"}, + { "xnode", kdbm_xfs_xnode, "", + "Dump XFS inode"}, + { "xiocore", kdbm_xfs_xcore, "", + "Dump XFS iocore"}, + { "xperag", kdbm_xfs_xperag, "", + "Dump XFS per-allocation group data"}, + { "xqinfo", kdbm_xfs_xqm_qinfo, "", + "Dump mount->m_quotainfo structure"}, +#ifdef CONFIG_XFS_QUOTA + { "xqm", kdbm_xfs_xqm, "", + "Dump XFS quota manager structure"}, + { "xqmfree", kdbm_xfs_xqm_freelist, "", + "Dump XFS global freelist of dquots"}, + { "xqmhtab", kdbm_xfs_xqm_htab, "", + "Dump XFS hashtable of dquots"}, +#endif /* CONFIG_XFS_QUOTA */ + { "xqmplist",kdbm_xfs_xqm_mplist, "", + "Dump XFS all dquots of a f/s"}, + { "xsb", kdbm_xfs_xsb, " ", + "Dump XFS superblock"}, + { "xtp", kdbm_xfs_xtp, "", + "Dump XFS transaction structure"}, + { "xtrres", kdbm_xfs_xtrans_res, "", + "Dump XFS reservation values"}, + { 0, 0, 0 } +}; + +static int +__init xfsidbg_init(void) +{ + struct xif *p; + + for (p = xfsidbg_funcs; p->name; p++) + kdb_register(p->name, p->func, p->args, p->help, 0); + + kdb_register("pb", kdbm_pb, "", "Display page_buf_t", 0); + kdb_register("pbflags", kdbm_pb_flags, "", + "Display page buf flags", 0); + kdb_register("pbmap", kdbm_pbmap, "", + "Display Bmap", 0); + kdb_register("pbdelay", kdbm_pbdelay, "0|1", + "Display delwri pagebufs", 0); + kdb_register("pbtrace", kdbm_pbtrace, "|", + "page_buf_t trace", 0); +#ifdef PAGEBUF_TRACE + kdb_register("pboffset", kdbm_pbtrace_offset, " []", + "page_buf_t trace", 0); +#endif + return 0; +} + +static void +__exit xfsidbg_exit(void) +{ + struct xif *p; + + for (p = xfsidbg_funcs; p->name; p++) + kdb_unregister(p->name); + + kdb_unregister("pb"); + kdb_unregister("pbflags"); + kdb_unregister("pbmap"); + kdb_unregister("pbdelay"); + kdb_unregister("pbtrace"); +#ifdef PAGEBUF_TRACE + kdb_unregister("pboffset"); +#endif + +} + +/* + * Argument to xfs_alloc routines, for allocation type. + */ +static char *xfs_alloctype[] = { + "any_ag", "first_ag", "start_ag", "this_ag", + "start_bno", "near_bno", "this_bno" +}; + + +/* + * Prototypes for static functions. + */ +#ifdef DEBUG +static int xfs_alloc_trace_entry(ktrace_entry_t *ktep); +#endif +static void xfs_broot(xfs_inode_t *ip, xfs_ifork_t *f); +static void xfs_btalloc(xfs_alloc_block_t *bt, int bsz); +static void xfs_btbmap(xfs_bmbt_block_t *bt, int bsz); +static void xfs_btino(xfs_inobt_block_t *bt, int bsz); +static void xfs_buf_item_print(xfs_buf_log_item_t *blip, int summary); +static void xfs_dastate_path(xfs_da_state_path_t *p); +static void xfs_dir2data(void *addr, int size); +static void xfs_dir2leaf(xfs_dir2_leaf_t *leaf, int size); +static void xfs_dquot_item_print(xfs_dq_logitem_t *lip, int summary); +static void xfs_efd_item_print(xfs_efd_log_item_t *efdp, int summary); +static void xfs_efi_item_print(xfs_efi_log_item_t *efip, int summary); +static char *xfs_fmtformat(xfs_dinode_fmt_t f); +static char *xfs_fmtfsblock(xfs_fsblock_t bno, xfs_mount_t *mp); +static char *xfs_fmtino(xfs_ino_t ino, xfs_mount_t *mp); +static char *xfs_fmtlsn(xfs_lsn_t *lsnp); +static char *xfs_fmtmode(int m); +static char *xfs_fmtsize(size_t i); +static char *xfs_fmtuuid(uuid_t *); +static void xfs_inode_item_print(xfs_inode_log_item_t *ilip, int summary); +static void xfs_inodebuf(xfs_buf_t *bp); +static void xfs_prdinode(xfs_dinode_t *di, int coreonly, int convert); +static void xfs_prdinode_core(xfs_dinode_core_t *dip, int convert); +static void xfs_qoff_item_print(xfs_qoff_logitem_t *lip, int summary); +static void xfs_xexlist_fork(xfs_inode_t *ip, int whichfork); +static void xfs_xnode_fork(char *name, xfs_ifork_t *f); + +/* + * Static functions. + */ + +#ifdef DEBUG +/* + * Print xfs alloc trace buffer entry. + */ +static int +xfs_alloc_trace_entry(ktrace_entry_t *ktep) +{ + static char *modagf_flags[] = { + "magicnum", + "versionnum", + "seqno", + "length", + "roots", + "levels", + "flfirst", + "fllast", + "flcount", + "freeblks", + "longest", + NULL + }; + + if (((__psint_t)ktep->val[0] & 0xffff) == 0) + return 0; + switch ((long)ktep->val[0] & 0xffffL) { + case XFS_ALLOC_KTRACE_ALLOC: + kdb_printf("alloc %s[%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf( + "agno %d agbno %d minlen %d maxlen %d mod %d prod %d minleft %d\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8], + (__psunsigned_t)ktep->val[9], + (__psunsigned_t)ktep->val[10]); + kdb_printf("total %d alignment %d len %d type %s otype %s\n", + (__psunsigned_t)ktep->val[11], + (__psunsigned_t)ktep->val[12], + (__psunsigned_t)ktep->val[13], + xfs_alloctype[((__psint_t)ktep->val[14]) >> 16], + xfs_alloctype[((__psint_t)ktep->val[14]) & 0xffff]); + kdb_printf("wasdel %d wasfromfl %d isfl %d userdata %d\n", + ((__psint_t)ktep->val[15] & (1 << 3)) != 0, + ((__psint_t)ktep->val[15] & (1 << 2)) != 0, + ((__psint_t)ktep->val[15] & (1 << 1)) != 0, + ((__psint_t)ktep->val[15] & (1 << 0)) != 0); + break; + case XFS_ALLOC_KTRACE_FREE: + kdb_printf("free %s[%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf("agno %d agbno %d len %d isfl %d\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psint_t)ktep->val[7]); + break; + case XFS_ALLOC_KTRACE_MODAGF: + kdb_printf("modagf %s[%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + printflags((__psint_t)ktep->val[4], modagf_flags, "modified"); + kdb_printf("seqno %d length %d roots b %d c %d\n", + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8]); + kdb_printf("levels b %d c %d flfirst %d fllast %d flcount %d\n", + (__psunsigned_t)ktep->val[9], + (__psunsigned_t)ktep->val[10], + (__psunsigned_t)ktep->val[11], + (__psunsigned_t)ktep->val[12], + (__psunsigned_t)ktep->val[13]); + kdb_printf("freeblks %d longest %d\n", + (__psunsigned_t)ktep->val[14], + (__psunsigned_t)ktep->val[15]); + break; + + case XFS_ALLOC_KTRACE_UNBUSY: + kdb_printf("unbusy %s [%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf(" agno %d slot %d tp 0x%x\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8]); + break; + case XFS_ALLOC_KTRACE_BUSY: + kdb_printf("busy %s [%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf(" agno %d agbno %d len %d slot %d tp 0x%x\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8]); + break; + case XFS_ALLOC_KTRACE_BUSYSEARCH: + kdb_printf("busy-search %s [%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf(" agno %d agbno %d len %d slot %d tp 0x%x\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8]); + break; + default: + kdb_printf("unknown alloc trace record\n"); + break; + } + return 1; +} +#endif /* DEBUG */ + +/* + * Print an xfs in-inode bmap btree root. + */ +static void +xfs_broot(xfs_inode_t *ip, xfs_ifork_t *f) +{ + xfs_bmbt_block_t *broot; + int format; + int i; + xfs_bmbt_key_t *kp; + xfs_bmbt_ptr_t *pp; + + format = f == &ip->i_df ? ip->i_d.di_format : ip->i_d.di_aformat; + if ((f->if_flags & XFS_IFBROOT) == 0 || + format != XFS_DINODE_FMT_BTREE) { + kdb_printf("inode 0x%p not btree format\n", ip); + return; + } + broot = f->if_broot; + kdb_printf("block @0x%p magic %x level %d numrecs %d\n", + broot, INT_GET(broot->bb_magic, ARCH_CONVERT), INT_GET(broot->bb_level, ARCH_CONVERT), INT_GET(broot->bb_numrecs, ARCH_CONVERT)); + kp = XFS_BMAP_BROOT_KEY_ADDR(broot, 1, f->if_broot_bytes); + pp = XFS_BMAP_BROOT_PTR_ADDR(broot, 1, f->if_broot_bytes); + for (i = 1; i <= INT_GET(broot->bb_numrecs, ARCH_CONVERT); i++) + kdb_printf("\t%d: startoff %Ld ptr %Lx %s\n", + i, INT_GET(kp[i - 1].br_startoff, ARCH_CONVERT), INT_GET(pp[i - 1], ARCH_CONVERT), + xfs_fmtfsblock(INT_GET(pp[i - 1], ARCH_CONVERT), ip->i_mount)); +} + +/* + * Print allocation btree block. + */ +static void +xfs_btalloc(xfs_alloc_block_t *bt, int bsz) +{ + int i; + + kdb_printf("magic 0x%x level %d numrecs %d leftsib 0x%x rightsib 0x%x\n", + INT_GET(bt->bb_magic, ARCH_CONVERT), INT_GET(bt->bb_level, ARCH_CONVERT), INT_GET(bt->bb_numrecs, ARCH_CONVERT), + INT_GET(bt->bb_leftsib, ARCH_CONVERT), INT_GET(bt->bb_rightsib, ARCH_CONVERT)); + if (INT_ISZERO(bt->bb_level, ARCH_CONVERT)) { + + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_alloc_rec_t *r; + + r = XFS_BTREE_REC_ADDR(bsz, xfs_alloc, bt, i, 0); + kdb_printf("rec %d startblock 0x%x blockcount %d\n", + i, INT_GET(r->ar_startblock, ARCH_CONVERT), INT_GET(r->ar_blockcount, ARCH_CONVERT)); + } + } else { + int mxr; + + mxr = XFS_BTREE_BLOCK_MAXRECS(bsz, xfs_alloc, 0); + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_alloc_key_t *k; + xfs_alloc_ptr_t *p; + + k = XFS_BTREE_KEY_ADDR(bsz, xfs_alloc, bt, i, mxr); + p = XFS_BTREE_PTR_ADDR(bsz, xfs_alloc, bt, i, mxr); + kdb_printf("key %d startblock 0x%x blockcount %d ptr 0x%x\n", + i, INT_GET(k->ar_startblock, ARCH_CONVERT), INT_GET(k->ar_blockcount, ARCH_CONVERT), *p); + } + } +} + +/* + * Print a bmap btree block. + */ +static void +xfs_btbmap(xfs_bmbt_block_t *bt, int bsz) +{ + int i; + + kdb_printf("magic 0x%x level %d numrecs %d leftsib %Lx ", + INT_GET(bt->bb_magic, ARCH_CONVERT), + INT_GET(bt->bb_level, ARCH_CONVERT), + INT_GET(bt->bb_numrecs, ARCH_CONVERT), + INT_GET(bt->bb_leftsib, ARCH_CONVERT)); + kdb_printf("rightsib %Lx\n", INT_GET(bt->bb_rightsib, ARCH_CONVERT)); + if (INT_ISZERO(bt->bb_level, ARCH_CONVERT)) { + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_bmbt_rec_t *r; + xfs_bmbt_irec_t irec; + + r = (xfs_bmbt_rec_t *)XFS_BTREE_REC_ADDR(bsz, + xfs_bmbt, bt, i, 0); + + xfs_bmbt_disk_get_all((xfs_bmbt_rec_t *)r, &irec); + kdb_printf("rec %d startoff %Ld startblock %Lx blockcount %Ld flag %d\n", + i, irec.br_startoff, + (__uint64_t)irec.br_startblock, + irec.br_blockcount, irec.br_state); + } + } else { + int mxr; + + mxr = XFS_BTREE_BLOCK_MAXRECS(bsz, xfs_bmbt, 0); + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_bmbt_key_t *k; + xfs_bmbt_ptr_t *p; + + k = XFS_BTREE_KEY_ADDR(bsz, xfs_bmbt, bt, i, mxr); + p = XFS_BTREE_PTR_ADDR(bsz, xfs_bmbt, bt, i, mxr); + kdb_printf("key %d startoff %Ld ", + i, INT_GET(k->br_startoff, ARCH_CONVERT)); + kdb_printf("ptr %Lx\n", INT_GET(*p, ARCH_CONVERT)); + } + } +} + +/* + * Print an inode btree block. + */ +static void +xfs_btino(xfs_inobt_block_t *bt, int bsz) +{ + int i; + + kdb_printf("magic 0x%x level %d numrecs %d leftsib 0x%x rightsib 0x%x\n", + INT_GET(bt->bb_magic, ARCH_CONVERT), INT_GET(bt->bb_level, ARCH_CONVERT), INT_GET(bt->bb_numrecs, ARCH_CONVERT), + INT_GET(bt->bb_leftsib, ARCH_CONVERT), INT_GET(bt->bb_rightsib, ARCH_CONVERT)); + if (INT_ISZERO(bt->bb_level, ARCH_CONVERT)) { + + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_inobt_rec_t *r; + + r = XFS_BTREE_REC_ADDR(bsz, xfs_inobt, bt, i, 0); + kdb_printf("rec %d startino 0x%x freecount %d, free %Lx\n", + i, INT_GET(r->ir_startino, ARCH_CONVERT), INT_GET(r->ir_freecount, ARCH_CONVERT), + INT_GET(r->ir_free, ARCH_CONVERT)); + } + } else { + int mxr; + + mxr = XFS_BTREE_BLOCK_MAXRECS(bsz, xfs_inobt, 0); + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_inobt_key_t *k; + xfs_inobt_ptr_t *p; + + k = XFS_BTREE_KEY_ADDR(bsz, xfs_inobt, bt, i, mxr); + p = XFS_BTREE_PTR_ADDR(bsz, xfs_inobt, bt, i, mxr); + kdb_printf("key %d startino 0x%x ptr 0x%x\n", + i, INT_GET(k->ir_startino, ARCH_CONVERT), INT_GET(*p, ARCH_CONVERT)); + } + } +} + +/* + * Print a buf log item. + */ +static void +xfs_buf_item_print(xfs_buf_log_item_t *blip, int summary) +{ + static char *bli_flags[] = { + "hold", /* 0x1 */ + "dirty", /* 0x2 */ + "stale", /* 0x4 */ + "logged", /* 0x8 */ + "ialloc", /* 0x10 */ + 0 + }; + static char *blf_flags[] = { + "inode", /* 0x1 */ + "cancel", /* 0x2 */ + 0 + }; + + if (summary) { + kdb_printf("buf 0x%p blkno 0x%Lx ", blip->bli_buf, + blip->bli_format.blf_blkno); + printflags(blip->bli_flags, bli_flags, "flags:"); + kdb_printf("\n "); + xfsidbg_xbuf_real(blip->bli_buf, 1); + return; + } + kdb_printf("buf 0x%p recur %d refcount %d flags:", + blip->bli_buf, blip->bli_recur, + atomic_read(&blip->bli_refcount)); + printflags(blip->bli_flags, bli_flags, NULL); + kdb_printf("\n"); + kdb_printf("size %d blkno 0x%Lx len 0x%x map size %d map 0x%p\n", + blip->bli_format.blf_size, blip->bli_format.blf_blkno, + (uint) blip->bli_format.blf_len, blip->bli_format.blf_map_size, + &(blip->bli_format.blf_data_map[0])); + kdb_printf("blf flags: "); + printflags((uint)blip->bli_format.blf_flags, blf_flags, NULL); +#ifdef XFS_TRANS_DEBUG + kdb_printf("orig 0x%x logged 0x%x", + blip->bli_orig, blip->bli_logged); +#endif + kdb_printf("\n"); +} + +/* + * Print an xfs_da_state_path structure. + */ +static void +xfs_dastate_path(xfs_da_state_path_t *p) +{ + int i; + + kdb_printf("active %d\n", p->active); + for (i = 0; i < XFS_DA_NODE_MAXDEPTH; i++) { + kdb_printf(" blk %d bp 0x%p blkno 0x%x", + i, p->blk[i].bp, p->blk[i].blkno); + kdb_printf(" index %d hashval 0x%x ", + p->blk[i].index, (uint_t)p->blk[i].hashval); + switch(p->blk[i].magic) { + case XFS_DA_NODE_MAGIC: kdb_printf("NODE\n"); break; + case XFS_DIR_LEAF_MAGIC: kdb_printf("DIR\n"); break; + case XFS_ATTR_LEAF_MAGIC: kdb_printf("ATTR\n"); break; + case XFS_DIR2_LEAFN_MAGIC: kdb_printf("DIR2\n"); break; + default: kdb_printf("type ??\n"); break; + } + } +} + + +/* + * Print an efd log item. + */ +static void +xfs_efd_item_print(xfs_efd_log_item_t *efdp, int summary) +{ + int i; + xfs_extent_t *ep; + + if (summary) { + kdb_printf("Extent Free Done: ID 0x%Lx nextents %d (at 0x%p)\n", + efdp->efd_format.efd_efi_id, + efdp->efd_format.efd_nextents, efdp); + return; + } + kdb_printf("size %d nextents %d next extent %d efip 0x%p\n", + efdp->efd_format.efd_size, efdp->efd_format.efd_nextents, + efdp->efd_next_extent, efdp->efd_efip); + kdb_printf("efi_id 0x%Lx\n", efdp->efd_format.efd_efi_id); + kdb_printf("efd extents:\n"); + ep = &(efdp->efd_format.efd_extents[0]); + for (i = 0; i < efdp->efd_next_extent; i++, ep++) { + kdb_printf(" block %Lx len %d\n", + ep->ext_start, ep->ext_len); + } +} + +/* + * Print an efi log item. + */ +static void +xfs_efi_item_print(xfs_efi_log_item_t *efip, int summary) +{ + int i; + xfs_extent_t *ep; + static char *efi_flags[] = { + "recovered", /* 0x1 */ + "committed", /* 0x2 */ + "cancelled", /* 0x4 */ + 0, + }; + + if (summary) { + kdb_printf("Extent Free Intention: ID 0x%Lx nextents %d (at 0x%p)\n", + efip->efi_format.efi_id, + efip->efi_format.efi_nextents, efip); + return; + } + kdb_printf("size %d nextents %d next extent %d\n", + efip->efi_format.efi_size, efip->efi_format.efi_nextents, + efip->efi_next_extent); + kdb_printf("id %Lx", efip->efi_format.efi_id); + printflags(efip->efi_flags, efi_flags, "flags :"); + kdb_printf("\n"); + kdb_printf("efi extents:\n"); + ep = &(efip->efi_format.efi_extents[0]); + for (i = 0; i < efip->efi_next_extent; i++, ep++) { + kdb_printf(" block %Lx len %d\n", + ep->ext_start, ep->ext_len); + } +} + +/* + * Format inode "format" into a static buffer & return it. + */ +static char * +xfs_fmtformat(xfs_dinode_fmt_t f) +{ + static char *t[] = { + "dev", + "local", + "extents", + "btree", + "uuid" + }; + + return t[f]; +} + +/* + * Format fsblock number into a static buffer & return it. + */ +static char * +xfs_fmtfsblock(xfs_fsblock_t bno, xfs_mount_t *mp) +{ + static char rval[50]; + + if (bno == NULLFSBLOCK) + sprintf(rval, "NULLFSBLOCK"); + else if (ISNULLSTARTBLOCK(bno)) + sprintf(rval, "NULLSTARTBLOCK(%Ld)", STARTBLOCKVAL(bno)); + else if (mp) + sprintf(rval, "%Ld[%x:%x]", (xfs_dfsbno_t)bno, + XFS_FSB_TO_AGNO(mp, bno), XFS_FSB_TO_AGBNO(mp, bno)); + else + sprintf(rval, "%Ld", (xfs_dfsbno_t)bno); + return rval; +} + +/* + * Format inode number into a static buffer & return it. + */ +static char * +xfs_fmtino(xfs_ino_t ino, xfs_mount_t *mp) +{ + static char rval[50]; + + if (mp) + sprintf(rval, "%llu[%x:%x:%x]", + (unsigned long long) ino, + XFS_INO_TO_AGNO(mp, ino), + XFS_INO_TO_AGBNO(mp, ino), + XFS_INO_TO_OFFSET(mp, ino)); + else + sprintf(rval, "%llu", (unsigned long long) ino); + return rval; +} + +/* + * Format an lsn for printing into a static buffer & return it. + */ +static char * +xfs_fmtlsn(xfs_lsn_t *lsnp) +{ + uint *wordp; + uint *word2p; + static char buf[20]; + + wordp = (uint *)lsnp; + word2p = wordp++; + sprintf(buf, "[%u:%u]", *wordp, *word2p); + + return buf; +} + +/* + * Format file mode into a static buffer & return it. + */ +static char * +xfs_fmtmode(int m) +{ + static char rval[16]; + + sprintf(rval, "%c%c%c%c%c%c%c%c%c%c%c%c%c", + "?fc?dxb?r?l?S?m?"[(m & IFMT) >> 12], + m & ISUID ? 'u' : '-', + m & ISGID ? 'g' : '-', + m & ISVTX ? 'v' : '-', + m & IREAD ? 'r' : '-', + m & IWRITE ? 'w' : '-', + m & IEXEC ? 'x' : '-', + m & (IREAD >> 3) ? 'r' : '-', + m & (IWRITE >> 3) ? 'w' : '-', + m & (IEXEC >> 3) ? 'x' : '-', + m & (IREAD >> 6) ? 'r' : '-', + m & (IWRITE >> 6) ? 'w' : '-', + m & (IEXEC >> 6) ? 'x' : '-'); + return rval; +} + +/* + * Format a size into a static buffer & return it. + */ +static char * +xfs_fmtsize(size_t i) +{ + static char rval[20]; + + /* size_t is 32 bits in 32-bit kernel, 64 bits in 64-bit kernel */ + sprintf(rval, "0x%lx", (unsigned long) i); + return rval; +} + +/* + * Format a uuid into a static buffer & return it. + */ +static char * +xfs_fmtuuid(uuid_t *uu) +{ + static char rval[40]; + char *o = rval; + char *i = (unsigned char*)uu; + int b; + + for (b=0;b<16;b++) { + o+=sprintf(o, "%02x", *i++); + if (b==3||b==5||b==7||b==9) *o++='-'; + } + *o='\0'; + + return rval; +} + +/* + * Print an inode log item. + */ +static void +xfs_inode_item_print(xfs_inode_log_item_t *ilip, int summary) +{ + static char *ili_flags[] = { + "hold", /* 0x1 */ + "iolock excl", /* 0x2 */ + "iolock shrd", /* 0x4 */ + 0 + }; + static char *ilf_fields[] = { + "core", /* 0x001 */ + "ddata", /* 0x002 */ + "dexts", /* 0x004 */ + "dbroot", /* 0x008 */ + "dev", /* 0x010 */ + "uuid", /* 0x020 */ + "adata", /* 0x040 */ + "aext", /* 0x080 */ + "abroot", /* 0x100 */ + 0 + }; + + if (summary) { + kdb_printf("inode 0x%p logged %d ", + ilip->ili_inode, ilip->ili_logged); + printflags(ilip->ili_flags, ili_flags, "flags:"); + printflags(ilip->ili_format.ilf_fields, ilf_fields, "format:"); + printflags(ilip->ili_last_fields, ilf_fields, "lastfield:"); + kdb_printf("\n"); + return; + } + kdb_printf("inode 0x%p ino 0x%llu pushbuf %d logged %d flags: ", + ilip->ili_inode, (unsigned long long) ilip->ili_format.ilf_ino, + ilip->ili_pushbuf_flag, ilip->ili_logged); + printflags(ilip->ili_flags, ili_flags, NULL); + kdb_printf("\n"); + kdb_printf("ilock recur %d iolock recur %d ext buf 0x%p\n", + ilip->ili_ilock_recur, ilip->ili_iolock_recur, + ilip->ili_extents_buf); +#ifdef XFS_TRANS_DEBUG + kdb_printf("root bytes %d root orig 0x%x\n", + ilip->ili_root_size, ilip->ili_orig_root); +#endif + kdb_printf("size %d ", ilip->ili_format.ilf_size); + printflags(ilip->ili_format.ilf_fields, ilf_fields, "fields:"); + printflags(ilip->ili_last_fields, ilf_fields, " last fields: "); + kdb_printf("\n"); + kdb_printf(" flush lsn %s last lsn %s\n", + xfs_fmtlsn(&(ilip->ili_flush_lsn)), + xfs_fmtlsn(&(ilip->ili_last_lsn))); + kdb_printf("dsize %d, asize %d, rdev 0x%x\n", + ilip->ili_format.ilf_dsize, + ilip->ili_format.ilf_asize, + ilip->ili_format.ilf_u.ilfu_rdev); + kdb_printf("blkno 0x%Lx len 0x%x boffset 0x%x\n", + ilip->ili_format.ilf_blkno, + ilip->ili_format.ilf_len, + ilip->ili_format.ilf_boffset); +} + +/* + * Print a dquot log item. + */ +/* ARGSUSED */ +static void +xfs_dquot_item_print(xfs_dq_logitem_t *lip, int summary) +{ + kdb_printf("dquot 0x%p\n", + lip->qli_dquot); + +} + +/* + * Print a quotaoff log item. + */ +/* ARGSUSED */ +static void +xfs_qoff_item_print(xfs_qoff_logitem_t *lip, int summary) +{ + kdb_printf("start qoff item 0x%p flags 0x%x\n", + lip->qql_start_lip, lip->qql_format.qf_flags); + +} + +/* + * Print buffer full of inodes. + */ +static void +xfs_inodebuf(xfs_buf_t *bp) +{ + xfs_dinode_t *di; + int n, i; + + n = XFS_BUF_COUNT(bp) >> 8; + for (i = 0; i < n; i++) { + di = (xfs_dinode_t *)xfs_buf_offset(bp, + i * 256); + xfs_prdinode(di, 0, ARCH_CONVERT); + } +} + + +/* + * Print disk inode. + */ +static void +xfs_prdinode(xfs_dinode_t *di, int coreonly, int convert) +{ + xfs_prdinode_core(&di->di_core, convert); + if (!coreonly) + kdb_printf("next_unlinked 0x%x u@0x%p\n", + INT_GET(di->di_next_unlinked, convert), + &di->di_u); +} + +/* + * Print disk inode core. + */ +static void +xfs_prdinode_core(xfs_dinode_core_t *dip, int convert) +{ + static char *diflags[] = { + "realtime", /* XFS_DIFLAG_REALTIME */ + "prealloc", /* XFS_DIFLAG_PREALLOC */ + NULL + }; + + kdb_printf("magic 0x%x mode 0%o (%s) version 0x%x format 0x%x (%s)\n", + INT_GET(dip->di_magic, convert), + INT_GET(dip->di_mode, convert), + xfs_fmtmode(INT_GET(dip->di_mode, convert)), + INT_GET(dip->di_version, convert), + INT_GET(dip->di_format, convert), + xfs_fmtformat( + (xfs_dinode_fmt_t)INT_GET(dip->di_format, convert))); + kdb_printf("nlink %d uid %d gid %d projid %d\n", + INT_GET(dip->di_nlink, convert), + INT_GET(dip->di_uid, convert), + INT_GET(dip->di_gid, convert), + (uint)INT_GET(dip->di_projid, convert)); + kdb_printf("atime %u:%u mtime %ud:%u ctime %u:%u\n", + INT_GET(dip->di_atime.t_sec, convert), + INT_GET(dip->di_atime.t_nsec, convert), + INT_GET(dip->di_mtime.t_sec, convert), + INT_GET(dip->di_mtime.t_nsec, convert), + INT_GET(dip->di_ctime.t_sec, convert), + INT_GET(dip->di_ctime.t_nsec, convert)); + kdb_printf("size 0x%Ld ", INT_GET(dip->di_size, convert)); + kdb_printf("nblocks %Ld extsize 0x%x nextents 0x%x anextents 0x%x\n", + INT_GET(dip->di_nblocks, convert), + INT_GET(dip->di_extsize, convert), + INT_GET(dip->di_nextents, convert), + INT_GET(dip->di_anextents, convert)); + kdb_printf("forkoff %d aformat 0x%x (%s) dmevmask 0x%x dmstate 0x%x ", + INT_GET(dip->di_forkoff, convert), + INT_GET(dip->di_aformat, convert), + xfs_fmtformat( + (xfs_dinode_fmt_t)INT_GET(dip->di_aformat, convert)), + INT_GET(dip->di_dmevmask, convert), + INT_GET(dip->di_dmstate, convert)); + printflags(INT_GET(dip->di_flags, convert), diflags, "flags"); + kdb_printf("gen 0x%x\n", INT_GET(dip->di_gen, convert)); +} + +/* + * Print xfs extent list for a fork. + */ +static void +xfs_xexlist_fork(xfs_inode_t *ip, int whichfork) +{ + int nextents, i; + xfs_ifork_t *ifp; + xfs_bmbt_irec_t irec; + + ifp = XFS_IFORK_PTR(ip, whichfork); + if (ifp->if_flags & XFS_IFEXTENTS) { + nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_64_t); + kdb_printf("inode 0x%p %cf extents 0x%p nextents 0x%x\n", + ip, "da"[whichfork], ifp->if_u1.if_extents, nextents); + for (i = 0; i < nextents; i++) { + xfs_bmbt_get_all(&ifp->if_u1.if_extents[i], &irec); + kdb_printf( + "%d: startoff %Ld startblock %s blockcount %Ld flag %d\n", + i, irec.br_startoff, + xfs_fmtfsblock(irec.br_startblock, ip->i_mount), + irec.br_blockcount, irec.br_state); + } + } +} + +static void +xfs_xnode_fork(char *name, xfs_ifork_t *f) +{ + static char *tab_flags[] = { + "inline", /* XFS_IFINLINE */ + "extents", /* XFS_IFEXTENTS */ + "broot", /* XFS_IFBROOT */ + NULL + }; + int *p; + + kdb_printf("%s fork", name); + if (f == NULL) { + kdb_printf(" empty\n"); + return; + } else + kdb_printf("\n"); + kdb_printf(" bytes %s ", xfs_fmtsize(f->if_bytes)); + kdb_printf("real_bytes %s lastex 0x%x u1:%s 0x%p\n", + xfs_fmtsize(f->if_real_bytes), f->if_lastex, + f->if_flags & XFS_IFINLINE ? "data" : "extents", + f->if_flags & XFS_IFINLINE ? + f->if_u1.if_data : + (char *)f->if_u1.if_extents); + kdb_printf(" broot 0x%p broot_bytes %s ext_max %d ", + f->if_broot, xfs_fmtsize(f->if_broot_bytes), f->if_ext_max); + printflags(f->if_flags, tab_flags, "flags"); + kdb_printf("\n"); + kdb_printf(" u2"); + for (p = (int *)&f->if_u2; + p < (int *)((char *)&f->if_u2 + XFS_INLINE_DATA); + p++) + kdb_printf(" 0x%x", *p); + kdb_printf("\n"); +} + +/* + * Command-level xfs-idbg functions. + */ + +/* + * Print xfs allocation group freespace header. + */ +static void +xfsidbg_xagf(xfs_agf_t *agf) +{ + kdb_printf("magicnum 0x%x versionnum 0x%x seqno 0x%x length 0x%x\n", + INT_GET(agf->agf_magicnum, ARCH_CONVERT), + INT_GET(agf->agf_versionnum, ARCH_CONVERT), + INT_GET(agf->agf_seqno, ARCH_CONVERT), + INT_GET(agf->agf_length, ARCH_CONVERT)); + kdb_printf("roots b 0x%x c 0x%x levels b %d c %d\n", + INT_GET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT), + INT_GET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT), + INT_GET(agf->agf_levels[XFS_BTNUM_BNO], ARCH_CONVERT), + INT_GET(agf->agf_levels[XFS_BTNUM_CNT], ARCH_CONVERT)); + kdb_printf("flfirst %d fllast %d flcount %d freeblks %d longest %d\n", + INT_GET(agf->agf_flfirst, ARCH_CONVERT), + INT_GET(agf->agf_fllast, ARCH_CONVERT), + INT_GET(agf->agf_flcount, ARCH_CONVERT), + INT_GET(agf->agf_freeblks, ARCH_CONVERT), + INT_GET(agf->agf_longest, ARCH_CONVERT)); +} + +/* + * Print xfs allocation group inode header. + */ +static void +xfsidbg_xagi(xfs_agi_t *agi) +{ + int i; + int j; + + kdb_printf("magicnum 0x%x versionnum 0x%x seqno 0x%x length 0x%x\n", + INT_GET(agi->agi_magicnum, ARCH_CONVERT), + INT_GET(agi->agi_versionnum, ARCH_CONVERT), + INT_GET(agi->agi_seqno, ARCH_CONVERT), + INT_GET(agi->agi_length, ARCH_CONVERT)); + kdb_printf("count 0x%x root 0x%x level 0x%x\n", + INT_GET(agi->agi_count, ARCH_CONVERT), + INT_GET(agi->agi_root, ARCH_CONVERT), + INT_GET(agi->agi_level, ARCH_CONVERT)); + kdb_printf("freecount 0x%x newino 0x%x dirino 0x%x\n", + INT_GET(agi->agi_freecount, ARCH_CONVERT), + INT_GET(agi->agi_newino, ARCH_CONVERT), + INT_GET(agi->agi_dirino, ARCH_CONVERT)); + + kdb_printf("unlinked buckets\n"); + for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { + for (j = 0; j < 4; j++, i++) { + kdb_printf("0x%08x ", + INT_GET(agi->agi_unlinked[i], ARCH_CONVERT)); + } + kdb_printf("\n"); + } +} + + +/* + * Print an allocation argument structure for XFS. + */ +static void +xfsidbg_xalloc(xfs_alloc_arg_t *args) +{ + kdb_printf("tp 0x%p mp 0x%p agbp 0x%p pag 0x%p fsbno %s\n", + args->tp, args->mp, args->agbp, args->pag, + xfs_fmtfsblock(args->fsbno, args->mp)); + kdb_printf("agno 0x%x agbno 0x%x minlen 0x%x maxlen 0x%x mod 0x%x\n", + args->agno, args->agbno, args->minlen, args->maxlen, args->mod); + kdb_printf("prod 0x%x minleft 0x%x total 0x%x alignment 0x%x\n", + args->prod, args->minleft, args->total, args->alignment); + kdb_printf("minalignslop 0x%x len 0x%x type %s otype %s wasdel %d\n", + args->minalignslop, args->len, xfs_alloctype[args->type], + xfs_alloctype[args->otype], args->wasdel); + kdb_printf("wasfromfl %d isfl %d userdata %d\n", + args->wasfromfl, args->isfl, args->userdata); +} + +#ifdef DEBUG +/* + * Print out all the entries in the alloc trace buf corresponding + * to the given mount point. + */ +static void +xfsidbg_xalmtrace(xfs_mount_t *mp) +{ + ktrace_entry_t *ktep; + ktrace_snap_t kts; + extern ktrace_t *xfs_alloc_trace_buf; + + if (xfs_alloc_trace_buf == NULL) { + kdb_printf("The xfs alloc trace buffer is not initialized\n"); + return; + } + + ktep = ktrace_first(xfs_alloc_trace_buf, &kts); + while (ktep != NULL) { + if ((__psint_t)ktep->val[0] && (xfs_mount_t *)ktep->val[3] == mp) { + (void)xfs_alloc_trace_entry(ktep); + kdb_printf("\n"); + } + ktep = ktrace_next(xfs_alloc_trace_buf, &kts); + } +} +#endif /* DEBUG */ + +/* + * Print an attr_list() context structure. + */ +static void +xfsidbg_xattrcontext(xfs_attr_list_context_t *context) +{ + static char *attr_arg_flags[] = { + "DONTFOLLOW", /* 0x0001 */ + "?", /* 0x0002 */ + "?", /* 0x0004 */ + "?", /* 0x0008 */ + "CREATE", /* 0x0010 */ + "?", /* 0x0020 */ + "?", /* 0x0040 */ + "?", /* 0x0080 */ + "?", /* 0x0100 */ + "?", /* 0x0200 */ + "?", /* 0x0400 */ + "?", /* 0x0800 */ + "KERNOTIME", /* 0x1000 */ + NULL + }; + + kdb_printf("dp 0x%p, dupcnt %d, resynch %d", + context->dp, context->dupcnt, context->resynch); + printflags((__psunsigned_t)context->flags, attr_arg_flags, ", flags"); + kdb_printf("\ncursor h/b/o 0x%x/0x%x/%d -- p/p/i 0x%x/0x%x/0x%x\n", + context->cursor->hashval, context->cursor->blkno, + context->cursor->offset, context->cursor->pad1, + context->cursor->pad2, context->cursor->initted); + kdb_printf("alist 0x%p, bufsize 0x%x, count %d, firstu 0x%x\n", + context->alist, context->bufsize, context->count, + context->firstu); +} + +/* + * Print attribute leaf block. + */ +static void +xfsidbg_xattrleaf(xfs_attr_leafblock_t *leaf) +{ + xfs_attr_leaf_hdr_t *h; + xfs_da_blkinfo_t *i; + xfs_attr_leaf_map_t *m; + xfs_attr_leaf_entry_t *e; + xfs_attr_leaf_name_local_t *l; + xfs_attr_leaf_name_remote_t *r; + int j, k; + + h = &leaf->hdr; + i = &h->info; + kdb_printf("hdr info forw 0x%x back 0x%x magic 0x%x\n", + i->forw, i->back, i->magic); + kdb_printf("hdr count %d usedbytes %d firstused %d holes %d\n", + INT_GET(h->count, ARCH_CONVERT), + INT_GET(h->usedbytes, ARCH_CONVERT), + INT_GET(h->firstused, ARCH_CONVERT), h->holes); + for (j = 0, m = h->freemap; j < XFS_ATTR_LEAF_MAPSIZE; j++, m++) { + kdb_printf("hdr freemap %d base %d size %d\n", + j, INT_GET(m->base, ARCH_CONVERT), + INT_GET(m->size, ARCH_CONVERT)); + } + for (j = 0, e = leaf->entries; j < INT_GET(h->count, ARCH_CONVERT); j++, e++) { + kdb_printf("[%2d] hash 0x%x nameidx %d flags 0x%x", + j, INT_GET(e->hashval, ARCH_CONVERT), + INT_GET(e->nameidx, ARCH_CONVERT), e->flags); + if (e->flags & XFS_ATTR_LOCAL) + kdb_printf("LOCAL "); + if (e->flags & XFS_ATTR_ROOT) + kdb_printf("ROOT "); + if (e->flags & XFS_ATTR_INCOMPLETE) + kdb_printf("INCOMPLETE "); + k = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_INCOMPLETE); + if ((e->flags & k) != 0) + kdb_printf("0x%x", e->flags & k); + kdb_printf(">\n name \""); + if (e->flags & XFS_ATTR_LOCAL) { + l = XFS_ATTR_LEAF_NAME_LOCAL(leaf, j); + for (k = 0; k < l->namelen; k++) + kdb_printf("%c", l->nameval[k]); + kdb_printf("\"(%d) value \"", l->namelen); + for (k = 0; (k < INT_GET(l->valuelen, ARCH_CONVERT)) && (k < 32); k++) + kdb_printf("%c", l->nameval[l->namelen + k]); + if (k == 32) + kdb_printf("..."); + kdb_printf("\"(%d)\n", + INT_GET(l->valuelen, ARCH_CONVERT)); + } else { + r = XFS_ATTR_LEAF_NAME_REMOTE(leaf, j); + for (k = 0; k < r->namelen; k++) + kdb_printf("%c", r->name[k]); + kdb_printf("\"(%d) value blk 0x%x len %d\n", + r->namelen, + INT_GET(r->valueblk, ARCH_CONVERT), + INT_GET(r->valuelen, ARCH_CONVERT)); + } + } +} + +/* + * Print a shortform attribute list. + */ +static void +xfsidbg_xattrsf(xfs_attr_shortform_t *s) +{ + xfs_attr_sf_hdr_t *sfh; + xfs_attr_sf_entry_t *sfe; + int i, j; + + sfh = &s->hdr; + kdb_printf("hdr count %d\n", INT_GET(sfh->count, ARCH_CONVERT)); + for (i = 0, sfe = s->list; i < INT_GET(sfh->count, ARCH_CONVERT); i++) { + kdb_printf("entry %d namelen %d name \"", i, sfe->namelen); + for (j = 0; j < sfe->namelen; j++) + kdb_printf("%c", sfe->nameval[j]); + kdb_printf("\" valuelen %d value \"", INT_GET(sfe->valuelen, ARCH_CONVERT)); + for (j = 0; (j < INT_GET(sfe->valuelen, ARCH_CONVERT)) && (j < 32); j++) + kdb_printf("%c", sfe->nameval[sfe->namelen + j]); + if (j == 32) + kdb_printf("..."); + kdb_printf("\"\n"); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + } +} + + +/* + * Print xfs bmap internal record + */ +static void +xfsidbg_xbirec(xfs_bmbt_irec_t *r) +{ + kdb_printf( + "startoff %Ld startblock %Lx blockcount %Ld state %Ld\n", + (__uint64_t)r->br_startoff, + (__uint64_t)r->br_startblock, + (__uint64_t)r->br_blockcount, + (__uint64_t)r->br_state); +} + + +/* + * Print a bmap alloc argument structure for XFS. + */ +static void +xfsidbg_xbmalla(xfs_bmalloca_t *a) +{ + kdb_printf("tp 0x%p ip 0x%p eof %d prevp 0x%p\n", + a->tp, a->ip, a->eof, a->prevp); + kdb_printf("gotp 0x%p firstblock %s alen %d total %d\n", + a->gotp, xfs_fmtfsblock(a->firstblock, a->ip->i_mount), + a->alen, a->total); + kdb_printf("off %s wasdel %d userdata %d minlen %d\n", + xfs_fmtfsblock(a->off, a->ip->i_mount), a->wasdel, + a->userdata, a->minlen); + kdb_printf("minleft %d low %d rval %s aeof %d\n", + a->minleft, a->low, xfs_fmtfsblock(a->rval, a->ip->i_mount), + a->aeof); +} + + +/* + * Print xfs bmap record + */ +static void +xfsidbg_xbrec(xfs_bmbt_rec_64_t *r) +{ + xfs_bmbt_irec_t irec; + + xfs_bmbt_get_all((xfs_bmbt_rec_t *)r, &irec); + kdb_printf("startoff %Ld startblock %Lx blockcount %Ld flag %d\n", + irec.br_startoff, (__uint64_t)irec.br_startblock, + irec.br_blockcount, irec.br_state); +} + +/* + * Print an xfs in-inode bmap btree root (data fork). + */ +static void +xfsidbg_xbroot(xfs_inode_t *ip) +{ + xfs_broot(ip, &ip->i_df); +} + +/* + * Print an xfs in-inode bmap btree root (attribute fork). + */ +static void +xfsidbg_xbroota(xfs_inode_t *ip) +{ + if (ip->i_afp) + xfs_broot(ip, ip->i_afp); +} + +/* + * Print xfs btree cursor. + */ +static void +xfsidbg_xbtcur(xfs_btree_cur_t *c) +{ + int l; + + kdb_printf("tp 0x%p mp 0x%p\n", + c->bc_tp, + c->bc_mp); + if (c->bc_btnum == XFS_BTNUM_BMAP) { + kdb_printf("rec.b "); + xfsidbg_xbirec(&c->bc_rec.b); + } else if (c->bc_btnum == XFS_BTNUM_INO) { + kdb_printf("rec.i startino 0x%x freecount 0x%x free %Lx\n", + c->bc_rec.i.ir_startino, c->bc_rec.i.ir_freecount, + c->bc_rec.i.ir_free); + } else { + kdb_printf("rec.a startblock 0x%x blockcount 0x%x\n", + c->bc_rec.a.ar_startblock, + c->bc_rec.a.ar_blockcount); + } + kdb_printf("bufs"); + for (l = 0; l < c->bc_nlevels; l++) + kdb_printf(" 0x%p", c->bc_bufs[l]); + kdb_printf("\n"); + kdb_printf("ptrs"); + for (l = 0; l < c->bc_nlevels; l++) + kdb_printf(" 0x%x", c->bc_ptrs[l]); + kdb_printf(" ra"); + for (l = 0; l < c->bc_nlevels; l++) + kdb_printf(" %d", c->bc_ra[l]); + kdb_printf("\n"); + kdb_printf("nlevels %d btnum %s blocklog %d\n", + c->bc_nlevels, + c->bc_btnum == XFS_BTNUM_BNO ? "bno" : + (c->bc_btnum == XFS_BTNUM_CNT ? "cnt" : + (c->bc_btnum == XFS_BTNUM_BMAP ? "bmap" : "ino")), + c->bc_blocklog); + if (c->bc_btnum == XFS_BTNUM_BMAP) { + kdb_printf("private forksize 0x%x whichfork %d ip 0x%p flags %d\n", + c->bc_private.b.forksize, + c->bc_private.b.whichfork, + c->bc_private.b.ip, + c->bc_private.b.flags); + kdb_printf("private firstblock %s flist 0x%p allocated 0x%x\n", + xfs_fmtfsblock(c->bc_private.b.firstblock, c->bc_mp), + c->bc_private.b.flist, + c->bc_private.b.allocated); + } else if (c->bc_btnum == XFS_BTNUM_INO) { + kdb_printf("private agbp 0x%p agno 0x%x\n", + c->bc_private.i.agbp, + c->bc_private.i.agno); + } else { + kdb_printf("private agbp 0x%p agno 0x%x\n", + c->bc_private.a.agbp, + c->bc_private.a.agno); + } +} + +/* + * Figure out what kind of xfs block the buffer contains, + * and invoke a print routine. + */ +static void +xfsidbg_xbuf(xfs_buf_t *bp) +{ + xfsidbg_xbuf_real(bp, 0); +} + +/* + * Figure out what kind of xfs block the buffer contains, + * and invoke a print routine (if asked to). + */ +static void +xfsidbg_xbuf_real(xfs_buf_t *bp, int summary) +{ + void *d; + xfs_agf_t *agf; + xfs_agi_t *agi; + xfs_sb_t *sb; + xfs_alloc_block_t *bta; + xfs_bmbt_block_t *btb; + xfs_inobt_block_t *bti; + xfs_attr_leafblock_t *aleaf; + xfs_dir_leafblock_t *dleaf; + xfs_da_intnode_t *node; + xfs_dinode_t *di; + xfs_disk_dquot_t *dqb; + xfs_dir2_block_t *d2block; + xfs_dir2_data_t *d2data; + xfs_dir2_leaf_t *d2leaf; + xfs_dir2_free_t *d2free; + + d = XFS_BUF_PTR(bp); + if (INT_GET((agf = d)->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC) { + if (summary) { + kdb_printf("freespace hdr for AG %d (at 0x%p)\n", + INT_GET(agf->agf_seqno, ARCH_CONVERT), agf); + } else { + kdb_printf("buf 0x%p agf 0x%p\n", bp, agf); + xfsidbg_xagf(agf); + } + } else if (INT_GET((agi = d)->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC) { + if (summary) { + kdb_printf("Inode hdr for AG %d (at 0x%p)\n", + INT_GET(agi->agi_seqno, ARCH_CONVERT), agi); + } else { + kdb_printf("buf 0x%p agi 0x%p\n", bp, agi); + xfsidbg_xagi(agi); + } + } else if (INT_GET((bta = d)->bb_magic, ARCH_CONVERT) == XFS_ABTB_MAGIC) { + if (summary) { + kdb_printf("Alloc BNO Btree blk, level %d (at 0x%p)\n", + INT_GET(bta->bb_level, ARCH_CONVERT), bta); + } else { + kdb_printf("buf 0x%p abtbno 0x%p\n", bp, bta); + xfs_btalloc(bta, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((bta = d)->bb_magic, ARCH_CONVERT) == XFS_ABTC_MAGIC) { + if (summary) { + kdb_printf("Alloc COUNT Btree blk, level %d (at 0x%p)\n", + INT_GET(bta->bb_level, ARCH_CONVERT), bta); + } else { + kdb_printf("buf 0x%p abtcnt 0x%p\n", bp, bta); + xfs_btalloc(bta, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((btb = d)->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC) { + if (summary) { + kdb_printf("Bmap Btree blk, level %d (at 0x%p)\n", + INT_GET(btb->bb_level, ARCH_CONVERT), btb); + } else { + kdb_printf("buf 0x%p bmapbt 0x%p\n", bp, btb); + xfs_btbmap(btb, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((bti = d)->bb_magic, ARCH_CONVERT) == XFS_IBT_MAGIC) { + if (summary) { + kdb_printf("Inode Btree blk, level %d (at 0x%p)\n", + INT_GET(bti->bb_level, ARCH_CONVERT), bti); + } else { + kdb_printf("buf 0x%p inobt 0x%p\n", bp, bti); + xfs_btino(bti, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((aleaf = d)->hdr.info.magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + if (summary) { + kdb_printf("Attr Leaf, 1st hash 0x%x (at 0x%p)\n", + INT_GET(aleaf->entries[0].hashval, ARCH_CONVERT), aleaf); + } else { + kdb_printf("buf 0x%p attr leaf 0x%p\n", bp, aleaf); + xfsidbg_xattrleaf(aleaf); + } + } else if (INT_GET((dleaf = d)->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + if (summary) { + kdb_printf("Dir Leaf, 1st hash 0x%x (at 0x%p)\n", + dleaf->entries[0].hashval, dleaf); + } else { + kdb_printf("buf 0x%p dir leaf 0x%p\n", bp, dleaf); + xfsidbg_xdirleaf(dleaf); + } + } else if (INT_GET((node = d)->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + if (summary) { + kdb_printf("Dir/Attr Node, level %d, 1st hash 0x%x (at 0x%p)\n", + node->hdr.level, node->btree[0].hashval, node); + } else { + kdb_printf("buf 0x%p dir/attr node 0x%p\n", bp, node); + xfsidbg_xdanode(node); + } + } else if (INT_GET((di = d)->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC) { + if (summary) { + kdb_printf("Disk Inode (at 0x%p)\n", di); + } else { + kdb_printf("buf 0x%p dinode 0x%p\n", bp, di); + xfs_inodebuf(bp); + } + } else if (INT_GET((sb = d)->sb_magicnum, ARCH_CONVERT) == XFS_SB_MAGIC) { + if (summary) { + kdb_printf("Superblock (at 0x%p)\n", sb); + } else { + kdb_printf("buf 0x%p sb 0x%p\n", bp, sb); + /* SB in a buffer - we need to convert */ + xfsidbg_xsb(sb, 1); + } + } else if ((dqb = d)->d_magic == XFS_DQUOT_MAGIC) { +#define XFSIDBG_DQTYPESTR(d) \ + ((INT_GET((d)->d_flags, ARCH_CONVERT) & XFS_DQ_USER) ? "USR" : \ + ((INT_GET((d)->d_flags, ARCH_CONVERT) & XFS_DQ_GROUP) ? "GRP" : "???")) + kdb_printf("Quota blk starting ID [%d], type %s at 0x%p\n", + INT_GET(dqb->d_id, ARCH_CONVERT), XFSIDBG_DQTYPESTR(dqb), dqb); + + } else if (INT_GET((d2block = d)->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + if (summary) { + kdb_printf("Dir2 block (at 0x%p)\n", d2block); + } else { + kdb_printf("buf 0x%p dir2 block 0x%p\n", bp, d2block); + xfs_dir2data((void *)d2block, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((d2data = d)->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) { + if (summary) { + kdb_printf("Dir2 data (at 0x%p)\n", d2data); + } else { + kdb_printf("buf 0x%p dir2 data 0x%p\n", bp, d2data); + xfs_dir2data((void *)d2data, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((d2leaf = d)->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC) { + if (summary) { + kdb_printf("Dir2 leaf(1) (at 0x%p)\n", d2leaf); + } else { + kdb_printf("buf 0x%p dir2 leaf 0x%p\n", bp, d2leaf); + xfs_dir2leaf(d2leaf, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET(d2leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + if (summary) { + kdb_printf("Dir2 leaf(n) (at 0x%p)\n", d2leaf); + } else { + kdb_printf("buf 0x%p dir2 leaf 0x%p\n", bp, d2leaf); + xfs_dir2leaf(d2leaf, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((d2free = d)->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC) { + if (summary) { + kdb_printf("Dir2 free (at 0x%p)\n", d2free); + } else { + kdb_printf("buf 0x%p dir2 free 0x%p\n", bp, d2free); + xfsidbg_xdir2free(d2free); + } + } else { + kdb_printf("buf 0x%p unknown 0x%p\n", bp, d); + } +} + + +/* + * Print an xfs_da_args structure. + */ +static void +xfsidbg_xdaargs(xfs_da_args_t *n) +{ + char *ch; + int i; + + kdb_printf(" name \""); + for (i = 0; i < n->namelen; i++) { + kdb_printf("%c", n->name[i]); + } + kdb_printf("\"(%d) value ", n->namelen); + if (n->value) { + kdb_printf("\""); + ch = n->value; + for (i = 0; (i < n->valuelen) && (i < 32); ch++, i++) { + switch(*ch) { + case '\n': kdb_printf("\n"); break; + case '\b': kdb_printf("\b"); break; + case '\t': kdb_printf("\t"); break; + default: kdb_printf("%c", *ch); break; + } + } + if (i == 32) + kdb_printf("..."); + kdb_printf("\"(%d)\n", n->valuelen); + } else { + kdb_printf("(NULL)(%d)\n", n->valuelen); + } + kdb_printf(" hashval 0x%x whichfork %d flags <", + (uint_t)n->hashval, n->whichfork); + if (n->flags & ATTR_ROOT) + kdb_printf("ROOT "); + if (n->flags & ATTR_CREATE) + kdb_printf("CREATE "); + if (n->flags & ATTR_REPLACE) + kdb_printf("REPLACE "); + if (n->flags & XFS_ATTR_INCOMPLETE) + kdb_printf("INCOMPLETE "); + i = ~(ATTR_ROOT | ATTR_CREATE | ATTR_REPLACE | XFS_ATTR_INCOMPLETE); + if ((n->flags & i) != 0) + kdb_printf("0x%x", n->flags & i); + kdb_printf(">\n"); + kdb_printf(" rename %d justcheck %d addname %d oknoent %d\n", + n->rename, n->justcheck, n->addname, n->oknoent); + kdb_printf(" leaf: blkno %d index %d rmtblkno %d rmtblkcnt %d\n", + n->blkno, n->index, n->rmtblkno, n->rmtblkcnt); + kdb_printf(" leaf2: blkno %d index %d rmtblkno %d rmtblkcnt %d\n", + n->blkno2, n->index2, n->rmtblkno2, n->rmtblkcnt2); + kdb_printf(" inumber %llu dp 0x%p firstblock 0x%p flist 0x%p\n", + (unsigned long long) n->inumber, + n->dp, n->firstblock, n->flist); + kdb_printf(" trans 0x%p total %d\n", + n->trans, n->total); +} + +/* + * Print a da buffer structure. + */ +static void +xfsidbg_xdabuf(xfs_dabuf_t *dabuf) +{ + int i; + + kdb_printf("nbuf %d dirty %d bbcount %d data 0x%p bps", + dabuf->nbuf, dabuf->dirty, dabuf->bbcount, dabuf->data); + for (i = 0; i < dabuf->nbuf; i++) + kdb_printf(" %d:0x%p", i, dabuf->bps[i]); + kdb_printf("\n"); +#ifdef XFS_DABUF_DEBUG + kdb_printf(" ra 0x%x prev 0x%x next 0x%x dev 0x%x blkno 0x%x\n", + dabuf->ra, dabuf->prev, dabuf->next, dabuf->dev, dabuf->blkno); +#endif +} + +/* + * Print a directory/attribute internal node block. + */ +static void +xfsidbg_xdanode(xfs_da_intnode_t *node) +{ + xfs_da_node_hdr_t *h; + xfs_da_blkinfo_t *i; + xfs_da_node_entry_t *e; + int j; + + h = &node->hdr; + i = &h->info; + kdb_printf("hdr info forw 0x%x back 0x%x magic 0x%x\n", + INT_GET(i->forw, ARCH_CONVERT), INT_GET(i->back, ARCH_CONVERT), INT_GET(i->magic, ARCH_CONVERT)); + kdb_printf("hdr count %d level %d\n", + INT_GET(h->count, ARCH_CONVERT), INT_GET(h->level, ARCH_CONVERT)); + for (j = 0, e = node->btree; j < INT_GET(h->count, ARCH_CONVERT); j++, e++) { + kdb_printf("btree %d hashval 0x%x before 0x%x\n", + j, (uint_t)INT_GET(e->hashval, ARCH_CONVERT), INT_GET(e->before, ARCH_CONVERT)); + } +} + +/* + * Print an xfs_da_state_blk structure. + */ +static void +xfsidbg_xdastate(xfs_da_state_t *s) +{ + xfs_da_state_blk_t *eblk; + + kdb_printf("args 0x%p mp 0x%p blocksize %u node_ents %u inleaf %u\n", + s->args, s->mp, s->blocksize, s->node_ents, s->inleaf); + if (s->args) + xfsidbg_xdaargs(s->args); + + kdb_printf("path: "); + xfs_dastate_path(&s->path); + + kdb_printf("altpath: "); + xfs_dastate_path(&s->altpath); + + eblk = &s->extrablk; + kdb_printf("extra: valid %d, after %d\n", s->extravalid, s->extraafter); + kdb_printf(" bp 0x%p blkno 0x%x ", eblk->bp, eblk->blkno); + kdb_printf("index %d hashval 0x%x\n", eblk->index, (uint_t)eblk->hashval); +} + +/* + * Print a directory leaf block. + */ +static void +xfsidbg_xdirleaf(xfs_dir_leafblock_t *leaf) +{ + xfs_dir_leaf_hdr_t *h; + xfs_da_blkinfo_t *i; + xfs_dir_leaf_map_t *m; + xfs_dir_leaf_entry_t *e; + xfs_dir_leaf_name_t *n; + int j, k; + xfs_ino_t ino; + + h = &leaf->hdr; + i = &h->info; + kdb_printf("hdr info forw 0x%x back 0x%x magic 0x%x\n", + INT_GET(i->forw, ARCH_CONVERT), INT_GET(i->back, ARCH_CONVERT), INT_GET(i->magic, ARCH_CONVERT)); + kdb_printf("hdr count %d namebytes %d firstused %d holes %d\n", + INT_GET(h->count, ARCH_CONVERT), INT_GET(h->namebytes, ARCH_CONVERT), INT_GET(h->firstused, ARCH_CONVERT), h->holes); + for (j = 0, m = h->freemap; j < XFS_DIR_LEAF_MAPSIZE; j++, m++) { + kdb_printf("hdr freemap %d base %d size %d\n", + j, INT_GET(m->base, ARCH_CONVERT), INT_GET(m->size, ARCH_CONVERT)); + } + for (j = 0, e = leaf->entries; j < INT_GET(h->count, ARCH_CONVERT); j++, e++) { + n = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(e->nameidx, ARCH_CONVERT)); + XFS_DIR_SF_GET_DIRINO_ARCH(&n->inumber, &ino, ARCH_CONVERT); + kdb_printf("leaf %d hashval 0x%x nameidx %d inumber %llu ", + j, (uint_t)INT_GET(e->hashval, ARCH_CONVERT), + INT_GET(e->nameidx, ARCH_CONVERT), + (unsigned long long)ino); + kdb_printf("namelen %d name \"", e->namelen); + for (k = 0; k < e->namelen; k++) + kdb_printf("%c", n->name[k]); + kdb_printf("\"\n"); + } +} + +/* + * Print a directory v2 data block, single or multiple. + */ +static void +xfs_dir2data(void *addr, int size) +{ + xfs_dir2_data_t *db; + xfs_dir2_block_t *bb; + xfs_dir2_data_hdr_t *h; + xfs_dir2_data_free_t *m; + xfs_dir2_data_entry_t *e; + xfs_dir2_data_unused_t *u; + xfs_dir2_leaf_entry_t *l=NULL; + int j, k; + char *p; + char *t; + xfs_dir2_block_tail_t *tail=NULL; + + db = (xfs_dir2_data_t *)addr; + bb = (xfs_dir2_block_t *)addr; + h = &db->hdr; + kdb_printf("hdr magic 0x%x (%s)\nhdr bestfree", INT_GET(h->magic, ARCH_CONVERT), + INT_GET(h->magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ? "DATA" : + (INT_GET(h->magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC ? "BLOCK" : "")); + for (j = 0, m = h->bestfree; j < XFS_DIR2_DATA_FD_COUNT; j++, m++) { + kdb_printf(" %d: 0x%x@0x%x", j, INT_GET(m->length, ARCH_CONVERT), INT_GET(m->offset, ARCH_CONVERT)); + } + kdb_printf("\n"); + if (INT_GET(h->magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) + t = (char *)db + size; + else { + /* XFS_DIR2_BLOCK_TAIL_P */ + tail = (xfs_dir2_block_tail_t *) + ((char *)bb + size - sizeof(xfs_dir2_block_tail_t)); + l = XFS_DIR2_BLOCK_LEAF_P_ARCH(tail, ARCH_CONVERT); + t = (char *)l; + } + for (p = (char *)(h + 1); p < t; ) { + u = (xfs_dir2_data_unused_t *)p; + if (u->freetag == XFS_DIR2_DATA_FREE_TAG) { + kdb_printf("0x%lx unused freetag 0x%x length 0x%x tag 0x%x\n", + (unsigned long) (p - (char *)addr), + INT_GET(u->freetag, ARCH_CONVERT), + INT_GET(u->length, ARCH_CONVERT), + INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(u, ARCH_CONVERT), ARCH_CONVERT)); + p += INT_GET(u->length, ARCH_CONVERT); + continue; + } + e = (xfs_dir2_data_entry_t *)p; + kdb_printf("0x%lx entry inumber %llu namelen %d name \"", + (unsigned long) (p - (char *)addr), + (unsigned long long) INT_GET(e->inumber, ARCH_CONVERT), + e->namelen); + for (k = 0; k < e->namelen; k++) + kdb_printf("%c", e->name[k]); + kdb_printf("\" tag 0x%x\n", INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(e), ARCH_CONVERT)); + p += XFS_DIR2_DATA_ENTSIZE(e->namelen); + } + if (INT_GET(h->magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) + return; + for (j = 0; j < INT_GET(tail->count, ARCH_CONVERT); j++, l++) { + kdb_printf("0x%lx leaf %d hashval 0x%x address 0x%x (byte 0x%x)\n", + (unsigned long) ((char *)l - (char *)addr), j, + (uint_t)INT_GET(l->hashval, ARCH_CONVERT), + INT_GET(l->address, ARCH_CONVERT), + /* XFS_DIR2_DATAPTR_TO_BYTE */ + INT_GET(l->address, ARCH_CONVERT) << XFS_DIR2_DATA_ALIGN_LOG); + } + kdb_printf("0x%lx tail count %d\n", + (unsigned long) ((char *)tail - (char *)addr), + INT_GET(tail->count, ARCH_CONVERT)); +} + +static void +xfs_dir2leaf(xfs_dir2_leaf_t *leaf, int size) +{ + xfs_dir2_leaf_hdr_t *h; + xfs_da_blkinfo_t *i; + xfs_dir2_leaf_entry_t *e; + xfs_dir2_data_off_t *b; + xfs_dir2_leaf_tail_t *t; + int j; + + h = &leaf->hdr; + i = &h->info; + e = leaf->ents; + kdb_printf("hdr info forw 0x%x back 0x%x magic 0x%x\n", + INT_GET(i->forw, ARCH_CONVERT), INT_GET(i->back, ARCH_CONVERT), INT_GET(i->magic, ARCH_CONVERT)); + kdb_printf("hdr count %d stale %d\n", INT_GET(h->count, ARCH_CONVERT), INT_GET(h->stale, ARCH_CONVERT)); + for (j = 0; j < INT_GET(h->count, ARCH_CONVERT); j++, e++) { + kdb_printf("0x%lx ent %d hashval 0x%x address 0x%x (byte 0x%x)\n", + (unsigned long) ((char *)e - (char *)leaf), j, + (uint_t)INT_GET(e->hashval, ARCH_CONVERT), + INT_GET(e->address, ARCH_CONVERT), + /* XFS_DIR2_DATAPTR_TO_BYTE */ + INT_GET(e->address, ARCH_CONVERT) << XFS_DIR2_DATA_ALIGN_LOG); + } + if (INT_GET(i->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) + return; + /* XFS_DIR2_LEAF_TAIL_P */ + t = (xfs_dir2_leaf_tail_t *)((char *)leaf + size - sizeof(*t)); + b = XFS_DIR2_LEAF_BESTS_P_ARCH(t, ARCH_CONVERT); + for (j = 0; j < INT_GET(t->bestcount, ARCH_CONVERT); j++, b++) { + kdb_printf("0x%lx best %d 0x%x\n", + (unsigned long) ((char *)b - (char *)leaf), j, + INT_GET(*b, ARCH_CONVERT)); + } + kdb_printf("tail bestcount %d\n", INT_GET(t->bestcount, ARCH_CONVERT)); +} + +/* + * Print a shortform directory. + */ +static void +xfsidbg_xdirsf(xfs_dir_shortform_t *s) +{ + xfs_dir_sf_hdr_t *sfh; + xfs_dir_sf_entry_t *sfe; + xfs_ino_t ino; + int i, j; + + sfh = &s->hdr; + XFS_DIR_SF_GET_DIRINO_ARCH(&sfh->parent, &ino, ARCH_CONVERT); + kdb_printf("hdr parent %llu", (unsigned long long)ino); + kdb_printf(" count %d\n", sfh->count); + for (i = 0, sfe = s->list; i < sfh->count; i++) { + XFS_DIR_SF_GET_DIRINO_ARCH(&sfe->inumber, &ino, ARCH_CONVERT); + kdb_printf("entry %d inumber %llu", i, (unsigned long long)ino); + kdb_printf(" namelen %d name \"", sfe->namelen); + for (j = 0; j < sfe->namelen; j++) + kdb_printf("%c", sfe->name[j]); + kdb_printf("\"\n"); + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } +} + +/* + * Print a shortform v2 directory. + */ +static void +xfsidbg_xdir2sf(xfs_dir2_sf_t *s) +{ + xfs_dir2_sf_hdr_t *sfh; + xfs_dir2_sf_entry_t *sfe; + xfs_ino_t ino; + int i, j; + + sfh = &s->hdr; + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(s, &sfh->parent, ARCH_CONVERT); + kdb_printf("hdr count %d i8count %d parent %llu\n", + sfh->count, sfh->i8count, (unsigned long long) ino); + for (i = 0, sfe = XFS_DIR2_SF_FIRSTENTRY(s); i < sfh->count; i++) { + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(s, XFS_DIR2_SF_INUMBERP(sfe), ARCH_CONVERT); + kdb_printf("entry %d inumber %llu offset 0x%x namelen %d name \"", + i, (unsigned long long) ino, + XFS_DIR2_SF_GET_OFFSET_ARCH(sfe, ARCH_CONVERT), + sfe->namelen); + for (j = 0; j < sfe->namelen; j++) + kdb_printf("%c", sfe->name[j]); + kdb_printf("\"\n"); + sfe = XFS_DIR2_SF_NEXTENTRY(s, sfe); + } +} + +/* + * Print a node-form v2 directory freemap block. + */ +static void +xfsidbg_xdir2free(xfs_dir2_free_t *f) +{ + int i; + + kdb_printf("hdr magic 0x%x firstdb %d nvalid %d nused %d\n", + INT_GET(f->hdr.magic, ARCH_CONVERT), INT_GET(f->hdr.firstdb, ARCH_CONVERT), INT_GET(f->hdr.nvalid, ARCH_CONVERT), INT_GET(f->hdr.nused, ARCH_CONVERT)); + for (i = 0; i < INT_GET(f->hdr.nvalid, ARCH_CONVERT); i++) { + kdb_printf("entry %d db %d count %d\n", + i, i + INT_GET(f->hdr.firstdb, ARCH_CONVERT), INT_GET(f->bests[i], ARCH_CONVERT)); + } +} + + +/* + * Print xfs extent list. + */ +static void +xfsidbg_xexlist(xfs_inode_t *ip) +{ + xfs_xexlist_fork(ip, XFS_DATA_FORK); + if (XFS_IFORK_Q(ip)) + xfs_xexlist_fork(ip, XFS_ATTR_FORK); +} + +/* + * Print an xfs free-extent list. + */ +static void +xfsidbg_xflist(xfs_bmap_free_t *flist) +{ + xfs_bmap_free_item_t *item; + + kdb_printf("flist@0x%p: first 0x%p count %d low %d\n", flist, + flist->xbf_first, flist->xbf_count, flist->xbf_low); + for (item = flist->xbf_first; item; item = item->xbfi_next) { + kdb_printf("item@0x%p: startblock %Lx blockcount %d", item, + (xfs_dfsbno_t)item->xbfi_startblock, + item->xbfi_blockcount); + } +} + +/* + * Print out the help messages for these functions. + */ +static void +xfsidbg_xhelp(void) +{ + struct xif *p; + + for (p = xfsidbg_funcs; p->name; p++) + kdb_printf("%-16s %s %s\n", p->name, p->args, p->help); +} + +/* + * Print out an XFS in-core log structure. + */ +static void +xfsidbg_xiclog(xlog_in_core_t *iclog) +{ + int i; + static char *ic_flags[] = { + "ACTIVE", /* 0x0001 */ + "WANT_SYNC", /* 0x0002 */ + "SYNCING", /* 0X0004 */ + "DONE_SYNC", /* 0X0008 */ + "DO_CALLBACK", /* 0X0010 */ + "CALLBACK", /* 0X0020 */ + "DIRTY", /* 0X0040 */ + "IOERROR", /* 0X0080 */ + "NOTUSED", /* 0X8000 */ + 0 + }; + + kdb_printf("xlog_in_core/header at 0x%p/0x%p\n", + iclog, iclog->hic_data); + kdb_printf("magicno: %x cycle: %d version: %d lsn: 0x%Lx\n", + INT_GET(iclog->ic_header.h_magicno, ARCH_CONVERT), INT_GET(iclog->ic_header.h_cycle, ARCH_CONVERT), + INT_GET(iclog->ic_header.h_version, ARCH_CONVERT), INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)); + kdb_printf("tail_lsn: 0x%Lx len: %d prev_block: %d num_ops: %d\n", + INT_GET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT), INT_GET(iclog->ic_header.h_len, ARCH_CONVERT), + INT_GET(iclog->ic_header.h_prev_block, ARCH_CONVERT), INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT)); + kdb_printf("cycle_data: "); + for (i=0; i<(iclog->ic_size>>BBSHIFT); i++) { + kdb_printf("%x ", INT_GET(iclog->ic_header.h_cycle_data[i], ARCH_CONVERT)); + } + kdb_printf("\n"); + kdb_printf("size: %d\n", INT_GET(iclog->ic_header.h_size, ARCH_CONVERT)); + kdb_printf("\n"); + kdb_printf("--------------------------------------------------\n"); + kdb_printf("data: 0x%p &forcesema: 0x%p next: 0x%p bp: 0x%p\n", + iclog->ic_datap, &iclog->ic_forcesema, iclog->ic_next, + iclog->ic_bp); + kdb_printf("log: 0x%p callb: 0x%p callb_tail: 0x%p roundoff: %d\n", + iclog->ic_log, iclog->ic_callback, iclog->ic_callback_tail, + iclog->ic_roundoff); + kdb_printf("size: %d (OFFSET: %d) refcnt: %d bwritecnt: %d", + iclog->ic_size, iclog->ic_offset, + iclog->ic_refcnt, iclog->ic_bwritecnt); + if (iclog->ic_state & XLOG_STATE_ALL) + printflags(iclog->ic_state, ic_flags, "state:"); + else + kdb_printf("state: ILLEGAL 0x%x", iclog->ic_state); + kdb_printf("\n"); +} /* xfsidbg_xiclog */ + + +/* + * Print all incore logs. + */ +static void +xfsidbg_xiclogall(xlog_in_core_t *iclog) +{ + xlog_in_core_t *first_iclog = iclog; + + do { + xfsidbg_xiclog(iclog); + kdb_printf("=================================================\n"); + iclog = iclog->ic_next; + } while (iclog != first_iclog); +} /* xfsidbg_xiclogall */ + +/* + * Print out the callback structures attached to an iclog. + */ +static void +xfsidbg_xiclogcb(xlog_in_core_t *iclog) +{ + xfs_log_callback_t *cb; + kdb_symtab_t symtab; + + for (cb = iclog->ic_callback; cb != NULL; cb = cb->cb_next) { + + if (kdbnearsym((unsigned long)cb->cb_func, &symtab)) { + unsigned long offval; + + offval = (unsigned long)cb->cb_func - symtab.sym_start; + + if (offval) + kdb_printf("func = %s+0x%lx", + symtab.sym_name, + offval); + else + kdb_printf("func = %s", symtab.sym_name); + } else + kdb_printf("func = ?? 0x%p", (void *)cb->cb_func); + + kdb_printf(" arg 0x%p next 0x%p\n", cb->cb_arg, cb->cb_next); + } +} + + +/* + * Print all of the inodes attached to the given mount structure. + */ +static void +xfsidbg_xinodes(xfs_mount_t *mp) +{ + xfs_inode_t *ip; + + kdb_printf("xfs_mount at 0x%p\n", mp); + ip = mp->m_inodes; + if (ip != NULL) { + do { + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + kdb_printf("\n"); + xfsidbg_xnode(ip); + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + } + kdb_printf("\nEnd of Inodes\n"); +} + +static void +xfsidbg_delayed_blocks(xfs_mount_t *mp) +{ + xfs_inode_t *ip; + unsigned int total = 0; + unsigned int icount = 0; + + ip = mp->m_inodes; + if (ip != NULL) { + do { + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + if (ip->i_delayed_blks) { + total += ip->i_delayed_blks; + icount++; + } + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + } + kdb_printf("delayed blocks total: %d in %d inodes\n", total, icount); +} + +static void +xfsidbg_xinodes_quiesce(xfs_mount_t *mp) +{ + xfs_inode_t *ip; + + kdb_printf("xfs_mount at 0x%p\n", mp); + ip = mp->m_inodes; + if (ip != NULL) { + do { + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + if (!(ip->i_flags & XFS_IQUIESCE)) { + kdb_printf("ip 0x%p not quiesced\n", ip); + } + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + } + kdb_printf("\nEnd of Inodes\n"); +} + +static char * +xfsidbg_get_cstate(int state) +{ + switch(state) { + case XLOG_STATE_COVER_IDLE: + return("idle"); + case XLOG_STATE_COVER_NEED: + return("need"); + case XLOG_STATE_COVER_DONE: + return("done"); + case XLOG_STATE_COVER_NEED2: + return("need2"); + case XLOG_STATE_COVER_DONE2: + return("done2"); + default: + return("unknown"); + } +} + +/* + * Print out an XFS log structure. + */ +static void +xfsidbg_xlog(xlog_t *log) +{ + int rbytes; + int wbytes; + static char *t_flags[] = { + "CHKSUM_MISMATCH", /* 0x01 */ + "ACTIVE_RECOVERY", /* 0x02 */ + "RECOVERY_NEEDED", /* 0x04 */ + "IO_ERROR", /* 0x08 */ + 0 + }; + + kdb_printf("xlog at 0x%p\n", log); + kdb_printf("&flushsm: 0x%p flushcnt: %d tic_cnt: %d tic_tcnt: %d \n", + &log->l_flushsema, log->l_flushcnt, + log->l_ticket_cnt, log->l_ticket_tcnt); + kdb_printf("freelist: 0x%p tail: 0x%p ICLOG: 0x%p \n", + log->l_freelist, log->l_tail, log->l_iclog); + kdb_printf("&icloglock: 0x%p tail_lsn: %s last_sync_lsn: %s \n", + &log->l_icloglock, xfs_fmtlsn(&log->l_tail_lsn), + xfs_fmtlsn(&log->l_last_sync_lsn)); + kdb_printf("mp: 0x%p xbuf: 0x%p roundoff: %d l_covered_state: %s \n", + log->l_mp, log->l_xbuf, log->l_roundoff, + xfsidbg_get_cstate(log->l_covered_state)); + kdb_printf("flags: "); + printflags(log->l_flags, t_flags,"log"); + kdb_printf(" dev: 0x%x logBBstart: %lld logsize: %d logBBsize: %d\n", + log->l_dev, (long long) log->l_logBBstart, + log->l_logsize,log->l_logBBsize); + kdb_printf("curr_cycle: %d prev_cycle: %d curr_block: %d prev_block: %d\n", + log->l_curr_cycle, log->l_prev_cycle, log->l_curr_block, + log->l_prev_block); + kdb_printf("iclog_bak: 0x%p iclog_size: 0x%x (%d) num iclogs: %d\n", + log->l_iclog_bak, log->l_iclog_size, log->l_iclog_size, + log->l_iclog_bufs); + kdb_printf("l_iclog_hsize %d l_iclog_heads %d\n", + log->l_iclog_hsize, log->l_iclog_heads); + kdb_printf("&grant_lock: 0x%p resHeadQ: 0x%p wrHeadQ: 0x%p\n", + &log->l_grant_lock, log->l_reserve_headq, log->l_write_headq); + kdb_printf("GResCycle: %d GResBytes: %d GWrCycle: %d GWrBytes: %d\n", + log->l_grant_reserve_cycle, log->l_grant_reserve_bytes, + log->l_grant_write_cycle, log->l_grant_write_bytes); + rbytes = log->l_grant_reserve_bytes + log->l_roundoff; + wbytes = log->l_grant_write_bytes + log->l_roundoff; + kdb_printf("GResBlocks: %d GResRemain: %d GWrBlocks: %d GWrRemain: %d\n", + rbytes / BBSIZE, rbytes % BBSIZE, + wbytes / BBSIZE, wbytes % BBSIZE); +} /* xfsidbg_xlog */ + + +/* + * Print out an XFS recovery transaction + */ +static void +xfsidbg_xlog_ritem(xlog_recover_item_t *item) +{ + int i = XLOG_MAX_REGIONS_IN_ITEM; + + kdb_printf("(xlog_recover_item 0x%p) ", item); + kdb_printf("next: 0x%p prev: 0x%p type: %d cnt: %d ttl: %d\n", + item->ri_next, item->ri_prev, ITEM_TYPE(item), item->ri_cnt, + item->ri_total); + for ( ; i > 0; i--) { + if (!item->ri_buf[XLOG_MAX_REGIONS_IN_ITEM-i].i_addr) + break; + kdb_printf("a: 0x%p l: %d ", + item->ri_buf[XLOG_MAX_REGIONS_IN_ITEM-i].i_addr, + item->ri_buf[XLOG_MAX_REGIONS_IN_ITEM-i].i_len); + } + kdb_printf("\n"); +} /* xfsidbg_xlog_ritem */ + +/* + * Print out an XFS recovery transaction + */ +static void +xfsidbg_xlog_rtrans(xlog_recover_t *trans) +{ + xlog_recover_item_t *rip, *first_rip; + + kdb_printf("(xlog_recover 0x%p) ", trans); + kdb_printf("tid: %x type: %d items: %d ttid: 0x%x ", + trans->r_log_tid, trans->r_theader.th_type, + trans->r_theader.th_num_items, trans->r_theader.th_tid); + kdb_printf("itemq: 0x%p\n", trans->r_itemq); + if (trans->r_itemq) { + rip = first_rip = trans->r_itemq; + do { + kdb_printf("(recovery item: 0x%p) ", rip); + kdb_printf("type: %d cnt: %d total: %d\n", + ITEM_TYPE(rip), rip->ri_cnt, rip->ri_total); + rip = rip->ri_next; + } while (rip != first_rip); + } +} /* xfsidbg_xlog_rtrans */ + +static void +xfsidbg_xlog_buf_logitem(xlog_recover_item_t *item) +{ + xfs_buf_log_format_t *buf_f; + int i, j; + int bit; + int nbits; + unsigned int *data_map; + unsigned int map_size; + int size; + + buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr; + if (buf_f->blf_flags & XFS_BLI_INODE_BUF) { + kdb_printf("\tINODE BUF \n", + buf_f->blf_blkno, buf_f->blf_len); + } else if (buf_f->blf_flags & (XFS_BLI_UDQUOT_BUF | XFS_BLI_GDQUOT_BUF)) { + kdb_printf("\tDQUOT BUF \n", + buf_f->blf_blkno, buf_f->blf_len); + } else { + data_map = buf_f->blf_data_map; + map_size = buf_f->blf_map_size; + kdb_printf("\tREG BUF \n", + buf_f->blf_blkno, buf_f->blf_len, data_map, map_size); + bit = 0; + i = 0; /* 0 is the buf format structure */ + while (1) { + bit = xfs_next_bit(data_map, map_size, bit); + if (bit == -1) + break; + nbits = xfs_contig_bits(data_map, map_size, bit); + size = ((uint)bit << XFS_BLI_SHIFT)+(nbits<ri_buf[i].i_addr, size); + kdb_printf("\t\t\t\""); + for (j=0; j<8 && jri_buf[i].i_addr)[j]); + } + kdb_printf("...\"\n"); + i++; + bit += nbits; + } + + } +} + +/* + * Print out an ENTIRE XFS recovery transaction + */ +static void +xfsidbg_xlog_rtrans_entire(xlog_recover_t *trans) +{ + xlog_recover_item_t *item, *first_rip; + + kdb_printf("(Recovering Xact 0x%p) ", trans); + kdb_printf("tid: %x type: %d nitems: %d ttid: 0x%x ", + trans->r_log_tid, trans->r_theader.th_type, + trans->r_theader.th_num_items, trans->r_theader.th_tid); + kdb_printf("itemq: 0x%p\n", trans->r_itemq); + if (trans->r_itemq) { + item = first_rip = trans->r_itemq; + do { + /* + kdb_printf("(recovery item: 0x%x) ", item); + kdb_printf("type: %d cnt: %d total: %d\n", + item->ri_type, item->ri_cnt, item->ri_total); + */ + if ((ITEM_TYPE(item) == XFS_LI_BUF) || + (ITEM_TYPE(item) == XFS_LI_6_1_BUF) || + (ITEM_TYPE(item) == XFS_LI_5_3_BUF)) { + kdb_printf("BUF:"); + xfsidbg_xlog_buf_logitem(item); + } else if ((ITEM_TYPE(item) == XFS_LI_INODE) || + (ITEM_TYPE(item) == XFS_LI_6_1_INODE) || + (ITEM_TYPE(item) == XFS_LI_5_3_INODE)) { + kdb_printf("INODE:\n"); + } else if (ITEM_TYPE(item) == XFS_LI_EFI) { + kdb_printf("EFI:\n"); + } else if (ITEM_TYPE(item) == XFS_LI_EFD) { + kdb_printf("EFD:\n"); + } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) { + kdb_printf("DQUOT:\n"); + } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) { + kdb_printf("QUOTAOFF:\n"); + } else { + kdb_printf("UNKNOWN LOGITEM 0x%x\n", ITEM_TYPE(item)); + } + item = item->ri_next; + } while (item != first_rip); + } +} /* xfsidbg_xlog_rtrans */ + +/* + * Print out an XFS ticket structure. + */ +static void +xfsidbg_xlog_tic(xlog_ticket_t *tic) +{ + static char *t_flags[] = { + "INIT", /* 0x1 */ + "PERM_RES", /* 0x2 */ + "IN_Q", /* 0x4 */ + 0 + }; + + kdb_printf("xlog_ticket at 0x%p\n", tic); + kdb_printf("next: 0x%p prev: 0x%p tid: 0x%x \n", + tic->t_next, tic->t_prev, tic->t_tid); + kdb_printf("curr_res: %d unit_res: %d ocnt: %d cnt: %d\n", + tic->t_curr_res, tic->t_unit_res, (int)tic->t_ocnt, + (int)tic->t_cnt); + kdb_printf("clientid: %c \n", tic->t_clientid); + printflags(tic->t_flags, t_flags,"ticket"); + kdb_printf("\n"); +} /* xfsidbg_xlog_tic */ + +/* + * Print out a single log item. + */ +static void +xfsidbg_xlogitem(xfs_log_item_t *lip) +{ + xfs_log_item_t *bio_lip; + static char *lid_type[] = { + "???", /* 0 */ + "5-3-buf", /* 1 */ + "5-3-inode", /* 2 */ + "efi", /* 3 */ + "efd", /* 4 */ + "iunlink", /* 5 */ + "6-1-inode", /* 6 */ + "6-1-buf", /* 7 */ + "inode", /* 8 */ + "buf", /* 9 */ + "dquot", /* 10 */ + 0 + }; + static char *li_flags[] = { + "in ail", /* 0x1 */ + 0 + }; + + kdb_printf("type %s mountp 0x%p flags ", + lid_type[lip->li_type - XFS_LI_5_3_BUF + 1], + lip->li_mountp); + printflags((uint)(lip->li_flags), li_flags,"log"); + kdb_printf("\n"); + kdb_printf("ail forw 0x%p ail back 0x%p lsn %s\ndesc %p ops 0x%p", + lip->li_ail.ail_forw, lip->li_ail.ail_back, + xfs_fmtlsn(&(lip->li_lsn)), lip->li_desc, lip->li_ops); + kdb_printf(" iodonefunc &0x%p\n", lip->li_cb); + if (lip->li_type == XFS_LI_BUF) { + bio_lip = lip->li_bio_list; + if (bio_lip != NULL) { + kdb_printf("iodone list:\n"); + } + while (bio_lip != NULL) { + kdb_printf("item 0x%p func 0x%p\n", + bio_lip, bio_lip->li_cb); + bio_lip = bio_lip->li_bio_list; + } + } + switch (lip->li_type) { + case XFS_LI_BUF: + xfs_buf_item_print((xfs_buf_log_item_t *)lip, 0); + break; + case XFS_LI_INODE: + xfs_inode_item_print((xfs_inode_log_item_t *)lip, 0); + break; + case XFS_LI_EFI: + xfs_efi_item_print((xfs_efi_log_item_t *)lip, 0); + break; + case XFS_LI_EFD: + xfs_efd_item_print((xfs_efd_log_item_t *)lip, 0); + break; + case XFS_LI_DQUOT: + xfs_dquot_item_print((xfs_dq_logitem_t *)lip, 0); + break; + case XFS_LI_QUOTAOFF: + xfs_qoff_item_print((xfs_qoff_logitem_t *)lip, 0); + break; + + default: + kdb_printf("Unknown item type %d\n", lip->li_type); + break; + } +} + +/* + * Print out a summary of the AIL hanging off of a mount struct. + */ +static void +xfsidbg_xaildump(xfs_mount_t *mp) +{ + xfs_log_item_t *lip; + static char *lid_type[] = { + "???", /* 0 */ + "5-3-buf", /* 1 */ + "5-3-inode", /* 2 */ + "efi", /* 3 */ + "efd", /* 4 */ + "iunlink", /* 5 */ + "6-1-inode", /* 6 */ + "6-1-buf", /* 7 */ + "inode", /* 8 */ + "buf", /* 9 */ + "dquot", /* 10 */ + 0 + }; + static char *li_flags[] = { + "in ail", /* 0x1 */ + 0 + }; + int count; + + if ((mp->m_ail.ail_forw == NULL) || + (mp->m_ail.ail_forw == (xfs_log_item_t *)&mp->m_ail)) { + kdb_printf("AIL is empty\n"); + return; + } + kdb_printf("AIL for mp 0x%p, oldest first\n", mp); + lip = (xfs_log_item_t*)mp->m_ail.ail_forw; + for (count = 0; lip; count++) { + kdb_printf("[%d] type %s ", count, + lid_type[lip->li_type - XFS_LI_5_3_BUF + 1]); + printflags((uint)(lip->li_flags), li_flags, "flags:"); + kdb_printf(" lsn %s\n ", xfs_fmtlsn(&(lip->li_lsn))); + switch (lip->li_type) { + case XFS_LI_BUF: + xfs_buf_item_print((xfs_buf_log_item_t *)lip, 1); + break; + case XFS_LI_INODE: + xfs_inode_item_print((xfs_inode_log_item_t *)lip, 1); + break; + case XFS_LI_EFI: + xfs_efi_item_print((xfs_efi_log_item_t *)lip, 1); + break; + case XFS_LI_EFD: + xfs_efd_item_print((xfs_efd_log_item_t *)lip, 1); + break; + case XFS_LI_DQUOT: + xfs_dquot_item_print((xfs_dq_logitem_t *)lip, 1); + break; + case XFS_LI_QUOTAOFF: + xfs_qoff_item_print((xfs_qoff_logitem_t *)lip, 1); + break; + default: + kdb_printf("Unknown item type %d\n", lip->li_type); + break; + } + + if (lip->li_ail.ail_forw == (xfs_log_item_t*)&mp->m_ail) { + lip = NULL; + } else { + lip = lip->li_ail.ail_forw; + } + } +} + +/* + * Print xfs mount structure. + */ +static void +xfsidbg_xmount(xfs_mount_t *mp) +{ + static char *xmount_flags[] = { + "WSYNC", /* 0x0001 */ + "INO64", /* 0x0002 */ + "RQCHK", /* 0x0004 */ + "FSCLEAN", /* 0x0008 */ + "FSSHUTDN", /* 0x0010 */ + "NOATIME", /* 0x0020 */ + "RETERR", /* 0x0040 */ + "NOALIGN", /* 0x0080 */ + "UNSHRD", /* 0x0100 */ + "RGSTRD", /* 0x0200 */ + "NORECVR", /* 0x0400 */ + "SHRD", /* 0x0800 */ + "IOSZ", /* 0x1000 */ + "OSYNC", /* 0x2000 */ + "NOUUID", /* 0x4000 */ + "32BIT", /* 0x8000 */ + "NOLOGFLUSH", /* 0x10000 */ + 0 + }; + + static char *quota_flags[] = { + "UQ", /* 0x0001 */ + "UQE", /* 0x0002 */ + "UQCHKD", /* 0x0004 */ + "PQ", /* 0x0008 (IRIX ondisk) */ + "GQE", /* 0x0010 */ + "GQCHKD", /* 0x0020 */ + "GQ", /* 0x0040 */ + "UQACTV", /* 0x0080 */ + "GQACTV", /* 0x0100 */ + "QMAYBE", /* 0x0200 */ + 0 + }; + + kdb_printf("xfs_mount at 0x%p\n", mp); + kdb_printf("vfsp 0x%p tid 0x%x ail_lock 0x%p &ail 0x%p\n", + XFS_MTOVFS(mp), mp->m_tid, &mp->m_ail_lock, &mp->m_ail); + kdb_printf("ail_gen 0x%x &sb 0x%p\n", + mp->m_ail_gen, &mp->m_sb); + kdb_printf("sb_lock 0x%p sb_bp 0x%p dev 0x%x logdev 0x%x rtdev 0x%x\n", + &mp->m_sb_lock, mp->m_sb_bp, + mp->m_ddev_targp ? mp->m_ddev_targp->pbr_dev : 0, + mp->m_logdev_targp ? mp->m_logdev_targp->pbr_dev : 0, + mp->m_rtdev_targp ? mp->m_rtdev_targp->pbr_dev : 0); + kdb_printf("bsize %d agfrotor %d agirotor %d ihash 0x%p ihsize %d\n", + mp->m_bsize, mp->m_agfrotor, mp->m_agirotor, + mp->m_ihash, mp->m_ihsize); + kdb_printf("inodes 0x%p ilock 0x%p ireclaims 0x%x\n", + mp->m_inodes, &mp->m_ilock, mp->m_ireclaims); + kdb_printf("readio_log 0x%x readio_blocks 0x%x ", + mp->m_readio_log, mp->m_readio_blocks); + kdb_printf("writeio_log 0x%x writeio_blocks 0x%x\n", + mp->m_writeio_log, mp->m_writeio_blocks); + kdb_printf("logbufs %d logbsize %d LOG 0x%p\n", mp->m_logbufs, + mp->m_logbsize, mp->m_log); + kdb_printf("rsumlevels 0x%x rsumsize 0x%x rbmip 0x%p rsumip 0x%p\n", + mp->m_rsumlevels, mp->m_rsumsize, mp->m_rbmip, mp->m_rsumip); + kdb_printf("rootip 0x%p\n", mp->m_rootip); + kdb_printf("dircook_elog %d blkbit_log %d blkbb_log %d agno_log %d\n", + mp->m_dircook_elog, mp->m_blkbit_log, mp->m_blkbb_log, + mp->m_agno_log); + kdb_printf("agino_log %d nreadaheads %d inode cluster size %d\n", + mp->m_agino_log, mp->m_nreadaheads, + mp->m_inode_cluster_size); + kdb_printf("blockmask 0x%x blockwsize 0x%x blockwmask 0x%x\n", + mp->m_blockmask, mp->m_blockwsize, mp->m_blockwmask); + kdb_printf("alloc_mxr[lf,nd] %d %d alloc_mnr[lf,nd] %d %d\n", + mp->m_alloc_mxr[0], mp->m_alloc_mxr[1], + mp->m_alloc_mnr[0], mp->m_alloc_mnr[1]); + kdb_printf("bmap_dmxr[lfnr,ndnr] %d %d bmap_dmnr[lfnr,ndnr] %d %d\n", + mp->m_bmap_dmxr[0], mp->m_bmap_dmxr[1], + mp->m_bmap_dmnr[0], mp->m_bmap_dmnr[1]); + kdb_printf("inobt_mxr[lf,nd] %d %d inobt_mnr[lf,nd] %d %d\n", + mp->m_inobt_mxr[0], mp->m_inobt_mxr[1], + mp->m_inobt_mnr[0], mp->m_inobt_mnr[1]); + kdb_printf("ag_maxlevels %d bm_maxlevels[d,a] %d %d in_maxlevels %d\n", + mp->m_ag_maxlevels, mp->m_bm_maxlevels[0], + mp->m_bm_maxlevels[1], mp->m_in_maxlevels); + kdb_printf("perag 0x%p &peraglock 0x%p &growlock 0x%p\n", + mp->m_perag, &mp->m_peraglock, &mp->m_growlock); + printflags(mp->m_flags, xmount_flags,"flags"); + kdb_printf("ialloc_inos %d ialloc_blks %d litino %d\n", + mp->m_ialloc_inos, mp->m_ialloc_blks, mp->m_litino); + kdb_printf("dir_node_ents %u attr_node_ents %u\n", + mp->m_dir_node_ents, mp->m_attr_node_ents); + kdb_printf("attroffset %d maxicount %Ld inoalign_mask %d\n", + mp->m_attroffset, mp->m_maxicount, mp->m_inoalign_mask); + kdb_printf("resblks %Ld resblks_avail %Ld\n", mp->m_resblks, + mp->m_resblks_avail); +#if XFS_BIG_FILESYSTEMS + kdb_printf(" inoadd %llx\n", (unsigned long long) mp->m_inoadd); +#else + kdb_printf("\n"); +#endif + if (mp->m_quotainfo) + kdb_printf("quotainfo 0x%p (uqip = 0x%p, gqip = 0x%p)\n", + mp->m_quotainfo, + mp->m_quotainfo->qi_uquotaip, + mp->m_quotainfo->qi_gquotaip); + else + kdb_printf("quotainfo NULL\n"); + printflags(mp->m_qflags, quota_flags,"quotaflags"); + kdb_printf("\n"); + kdb_printf("dalign %d swidth %d sinoalign %d attr_magicpct %d dir_magicpct %d\n", + mp->m_dalign, mp->m_swidth, mp->m_sinoalign, + mp->m_attr_magicpct, mp->m_dir_magicpct); + kdb_printf("mk_sharedro %d inode_quiesce %d sectbb_log %d\n", + mp->m_mk_sharedro, mp->m_inode_quiesce, mp->m_sectbb_log); + kdb_printf("dirversion %d dirblkfsbs %d &dirops 0x%p\n", + mp->m_dirversion, mp->m_dirblkfsbs, &mp->m_dirops); + kdb_printf("dirblksize %d dirdatablk 0x%Lx dirleafblk 0x%Lx dirfreeblk 0x%Lx\n", + mp->m_dirblksize, + (xfs_dfiloff_t)mp->m_dirdatablk, + (xfs_dfiloff_t)mp->m_dirleafblk, + (xfs_dfiloff_t)mp->m_dirfreeblk); + kdb_printf("chsize %d chash 0x%p\n", + mp->m_chsize, mp->m_chash); + kdb_printf("m_lstripemask %d\n", mp->m_lstripemask); + kdb_printf("m_frozen %d m_active_trans %d\n", + mp->m_frozen, mp->m_active_trans.counter); + if (mp->m_fsname != NULL) + kdb_printf("mountpoint \"%s\"\n", mp->m_fsname); + else + kdb_printf("No name!!!\n"); + +} + +static void +xfsidbg_xihash(xfs_mount_t *mp) +{ + xfs_ihash_t *ih; + int i; + int j; + int total; + int numzeros; + xfs_inode_t *ip; + int *hist; + int hist_bytes = mp->m_ihsize * sizeof(int); + int hist2[21]; + + hist = (int *) kmalloc(hist_bytes, GFP_KERNEL); + + if (hist == NULL) { + kdb_printf("xfsidbg_xihash: kmalloc(%d) failed!\n", + hist_bytes); + return; + } + + for (i = 0; i < mp->m_ihsize; i++) { + ih = mp->m_ihash + i; + j = 0; + for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) + j++; + hist[i] = j; + } + + numzeros = total = 0; + + for (i = 0; i < 21; i++) + hist2[i] = 0; + + for (i = 0; i < mp->m_ihsize; i++) { + kdb_printf("%d ", hist[i]); + total += hist[i]; + numzeros += hist[i] == 0 ? 1 : 0; + if (hist[i] > 20) + j = 20; + else + j = hist[i]; + + if (! (j <= 20)) { + kdb_printf("xfsidbg_xihash: (j > 20)/%d @ line # %d\n", + j, __LINE__); + return; + } + + hist2[j]++; + } + + kdb_printf("\n"); + + kdb_printf("total inodes = %d, average length = %d, adjusted average = %d \n", + total, total / mp->m_ihsize, + total / (mp->m_ihsize - numzeros)); + + for (i = 0; i < 21; i++) { + kdb_printf("%d - %d , ", i, hist2[i]); + } + kdb_printf("\n"); + kfree(hist); +} + +/* + * Command to print xfs inodes: kp xnode + */ +static void +xfsidbg_xnode(xfs_inode_t *ip) +{ + static char *tab_flags[] = { + "grio", /* XFS_IGRIO */ + "uiosize", /* XFS_IUIOSZ */ + "quiesce", /* XFS_IQUIESCE */ + "reclaim", /* XFS_IRECLAIM */ + NULL + }; + + kdb_printf("hash 0x%p next 0x%p prevp 0x%p mount 0x%p\n", + ip->i_hash, + ip->i_next, + ip->i_prevp, + ip->i_mount); + kdb_printf("mnext 0x%p mprev 0x%p vnode 0x%p \n", + ip->i_mnext, + ip->i_mprev, + XFS_ITOV_NULL(ip)); + kdb_printf("dev %x ino %s\n", + ip->i_mount->m_dev, + xfs_fmtino(ip->i_ino, ip->i_mount)); + kdb_printf("blkno 0x%llx len 0x%x boffset 0x%x\n", + (long long) ip->i_blkno, + ip->i_len, + ip->i_boffset); + kdb_printf("transp 0x%p &itemp 0x%p\n", + ip->i_transp, + ip->i_itemp); + kdb_printf("&lock 0x%p &iolock 0x%p ", + &ip->i_lock, + &ip->i_iolock); + kdb_printf("&flock 0x%p (%d) pincount 0x%x\n", + &ip->i_flock, valusema(&ip->i_flock), + xfs_ipincount(ip)); + kdb_printf("udquotp 0x%p gdquotp 0x%p\n", + ip->i_udquot, ip->i_gdquot); + kdb_printf("new_size %Ld\n", ip->i_iocore.io_new_size); + printflags((int)ip->i_flags, tab_flags, "flags"); + kdb_printf("\n"); + kdb_printf("update_core %d update size %d\n", + (int)(ip->i_update_core), (int) ip->i_update_size); + kdb_printf("gen 0x%x delayed blks %d", + ip->i_gen, + ip->i_delayed_blks); + kdb_printf("\n"); + kdb_printf("chash 0x%p cnext 0x%p cprev 0x%p\n", + ip->i_chash, + ip->i_cnext, + ip->i_cprev); + xfs_xnode_fork("data", &ip->i_df); + xfs_xnode_fork("attr", ip->i_afp); + kdb_printf("\n"); + xfs_prdinode_core(&ip->i_d, ARCH_NOCONVERT); +} + +static void +xfsidbg_xcore(xfs_iocore_t *io) +{ + kdb_printf("io_obj 0x%p io_flags 0x%x io_mount 0x%p\n", + io->io_obj, io->io_flags, io->io_mount); + kdb_printf("new_size %Lx\n", io->io_new_size); +} + +/* + * Command to print xfs inode cluster hash table: kp xchash + */ +static void +xfsidbg_xchash(xfs_mount_t *mp) +{ + int i; + xfs_chash_t *ch; + + kdb_printf("m_chash 0x%p size %d\n", + mp->m_chash, mp->m_chsize); + for (i = 0; i < mp->m_chsize; i++) { + ch = mp->m_chash + i; + kdb_printf("[%3d] ch 0x%p chashlist 0x%p\n", i, ch, ch->ch_list); + xfsidbg_xchashlist(ch->ch_list); + } +} + +/* + * Command to print xfs inode cluster hash list: kp xchashlist + */ +static void +xfsidbg_xchashlist(xfs_chashlist_t *chl) +{ + xfs_inode_t *ip; + + while (chl != NULL) { +#ifdef DEBUG + kdb_printf("hashlist inode 0x%p blkno %Ld buf 0x%p", + chl->chl_ip, chl->chl_blkno, chl->chl_buf); +#else + kdb_printf("hashlist inode 0x%p blkno %lld", + chl->chl_ip, (long long) chl->chl_blkno); +#endif + + kdb_printf("\n"); + + /* print inodes on chashlist */ + ip = chl->chl_ip; + do { + kdb_printf("0x%p ", ip); + ip = ip->i_cnext; + } while (ip != chl->chl_ip); + kdb_printf("\n"); + + chl=chl->chl_next; + } +} + +/* + * Print xfs per-ag data structures for filesystem. + */ +static void +xfsidbg_xperag(xfs_mount_t *mp) +{ + xfs_agnumber_t agno; + xfs_perag_t *pag; + int busy; + + pag = mp->m_perag; + for (agno = 0; agno < mp->m_sb.sb_agcount; agno++, pag++) { + kdb_printf("ag %d f_init %d i_init %d\n", + agno, pag->pagf_init, pag->pagi_init); + if (pag->pagf_init) + kdb_printf( + " f_levels[b,c] %d,%d f_flcount %d f_freeblks %d f_longest %d\n" + " f__metadata %d\n", + pag->pagf_levels[XFS_BTNUM_BNOi], + pag->pagf_levels[XFS_BTNUM_CNTi], + pag->pagf_flcount, pag->pagf_freeblks, + pag->pagf_longest, pag->pagf_metadata); + if (pag->pagi_init) + kdb_printf(" i_freecount %d i_inodeok %d\n", + pag->pagi_freecount, pag->pagi_inodeok); + if (pag->pagf_init) { + for (busy = 0; busy < XFS_PAGB_NUM_SLOTS; busy++) { + if (pag->pagb_list[busy].busy_length != 0) { + kdb_printf( + " %04d: start %d length %d tp 0x%p\n", + busy, + pag->pagb_list[busy].busy_start, + pag->pagb_list[busy].busy_length, + pag->pagb_list[busy].busy_tp); + } + } + } + } +} + +#ifdef CONFIG_XFS_QUOTA +static void +xfsidbg_xqm() +{ + if (xfs_Gqm == NULL) { + kdb_printf("NULL XQM!!\n"); + return; + } + + kdb_printf("usrhtab 0x%p\tgrphtab 0x%p\tndqfree 0x%x\thashmask 0x%x\n", + xfs_Gqm->qm_usr_dqhtable, + xfs_Gqm->qm_grp_dqhtable, + xfs_Gqm->qm_dqfreelist.qh_nelems, + xfs_Gqm->qm_dqhashmask); + kdb_printf("&freelist 0x%p, totaldquots 0x%x nrefs 0x%x\n", + &xfs_Gqm->qm_dqfreelist, + atomic_read(&xfs_Gqm->qm_totaldquots), + xfs_Gqm->qm_nrefs); +} +#endif + +static void +xfsidbg_xqm_diskdq(xfs_disk_dquot_t *d) +{ + kdb_printf("magic 0x%x\tversion 0x%x\tID 0x%x (%d)\t\n", + INT_GET(d->d_magic, ARCH_CONVERT), + INT_GET(d->d_version, ARCH_CONVERT), + INT_GET(d->d_id, ARCH_CONVERT), + INT_GET(d->d_id, ARCH_CONVERT)); + kdb_printf("bhard 0x%llx\tbsoft 0x%llx\tihard 0x%llx\tisoft 0x%llx\n", + (unsigned long long)INT_GET(d->d_blk_hardlimit, ARCH_CONVERT), + (unsigned long long)INT_GET(d->d_blk_softlimit, ARCH_CONVERT), + (unsigned long long)INT_GET(d->d_ino_hardlimit, ARCH_CONVERT), + (unsigned long long)INT_GET(d->d_ino_softlimit, ARCH_CONVERT)); + kdb_printf("bcount 0x%llx icount 0x%llx\n", + (unsigned long long)INT_GET(d->d_bcount, ARCH_CONVERT), + (unsigned long long)INT_GET(d->d_icount, ARCH_CONVERT)); + kdb_printf("btimer 0x%x itimer 0x%x \n", + (int)INT_GET(d->d_btimer, ARCH_CONVERT), + (int)INT_GET(d->d_itimer, ARCH_CONVERT)); +} + +static void +xfsidbg_xqm_dquot(xfs_dquot_t *dqp) +{ + static char *qflags[] = { + "USR", + "GRP", + "LCKD", + "FLKD", + "DIRTY", + "WANT", + "INACT", + "MARKER", + 0 + }; + kdb_printf("mount 0x%p hash 0x%p gdquotp 0x%p HL_next 0x%p HL_prevp 0x%p\n", + dqp->q_mount, + dqp->q_hash, + dqp->q_gdquot, + dqp->HL_NEXT, + dqp->HL_PREVP); + kdb_printf("MPL_next 0x%p MPL_prevp 0x%p FL_next 0x%p FL_prev 0x%p\n", + dqp->MPL_NEXT, + dqp->MPL_PREVP, + dqp->dq_flnext, + dqp->dq_flprev); + + kdb_printf("nrefs 0x%x, res_bcount %d, ", + dqp->q_nrefs, (int) dqp->q_res_bcount); + printflags(dqp->dq_flags, qflags, "flags:"); + kdb_printf("\nblkno 0x%llx\tboffset 0x%x\n", + (unsigned long long) dqp->q_blkno, (int) dqp->q_bufoffset); + kdb_printf("qlock 0x%p flock 0x%p (%s) pincount 0x%x\n", + &dqp->q_qlock, + &dqp->q_flock, + (valusema(&dqp->q_flock) <= 0) ? "LCK" : "UNLKD", + dqp->q_pincount); + kdb_printf("disk-dquot 0x%p\n", &dqp->q_core); + xfsidbg_xqm_diskdq(&dqp->q_core); + +} + + +#define XQMIDBG_LIST_PRINT(l, NXT) \ +{ \ + xfs_dquot_t *dqp;\ + int i = 0; \ + kdb_printf("[#%d dquots]\n", (int) (l)->qh_nelems); \ + for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) {\ + kdb_printf( \ + "\t%d. [0x%p] \"%d (%s)\"\t blks = %d, inos = %d refs = %d\n", \ + ++i, dqp, (int) INT_GET(dqp->q_core.d_id, ARCH_CONVERT), \ + DQFLAGTO_TYPESTR(dqp), \ + (int) INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), \ + (int) INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), \ + (int) dqp->q_nrefs); }\ + kdb_printf("\n"); \ +} + +static void +xfsidbg_xqm_dqattached_inos(xfs_mount_t *mp) +{ + xfs_inode_t *ip; + int n = 0; + + ip = mp->m_inodes; + do { + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + if (ip->i_udquot || ip->i_gdquot) { + n++; + kdb_printf("inode = 0x%p, ino %d: udq 0x%p, gdq 0x%p\n", + ip, (int)ip->i_ino, ip->i_udquot, ip->i_gdquot); + } + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + kdb_printf("\nNumber of inodes with dquots attached: %d\n", n); +} + +#ifdef CONFIG_XFS_QUOTA +static void +xfsidbg_xqm_freelist_print(xfs_frlist_t *qlist, char *title) +{ + xfs_dquot_t *dq; + int i = 0; + kdb_printf("%s (#%d)\n", title, (int) qlist->qh_nelems); + FOREACH_DQUOT_IN_FREELIST(dq, qlist) { + kdb_printf("\t%d.\t\"%d (%s:0x%p)\"\t bcnt = %d, icnt = %d " + "refs = %d\n", + ++i, (int) INT_GET(dq->q_core.d_id, ARCH_CONVERT), + DQFLAGTO_TYPESTR(dq), dq, + (int) INT_GET(dq->q_core.d_bcount, ARCH_CONVERT), + (int) INT_GET(dq->q_core.d_icount, ARCH_CONVERT), + (int) dq->q_nrefs); + } +} + +static void +xfsidbg_xqm_freelist(void) +{ + if (xfs_Gqm) { + xfsidbg_xqm_freelist_print(&(xfs_Gqm->qm_dqfreelist), "Freelist"); + } else + kdb_printf("NULL XQM!!\n"); +} + +static void +xfsidbg_xqm_htab(void) +{ + int i; + xfs_dqhash_t *h; + + if (xfs_Gqm == NULL) { + kdb_printf("NULL XQM!!\n"); + return; + } + for (i = 0; i <= xfs_Gqm->qm_dqhashmask; i++) { + h = &xfs_Gqm->qm_usr_dqhtable[i]; + if (h->qh_next) { + kdb_printf("USR %d: ", i); + XQMIDBG_LIST_PRINT(h, HL_NEXT); + } + } + for (i = 0; i <= xfs_Gqm->qm_dqhashmask; i++) { + h = &xfs_Gqm->qm_grp_dqhtable[i]; + if (h->qh_next) { + kdb_printf("GRP %d: ", i); + XQMIDBG_LIST_PRINT(h, HL_NEXT); + } + } +} +#endif + +static void +xfsidbg_xqm_mplist(xfs_mount_t *mp) +{ + if (mp->m_quotainfo == NULL) { + kdb_printf("NULL quotainfo\n"); + return; + } + + XQMIDBG_LIST_PRINT(&(mp->m_quotainfo->qi_dqlist), MPL_NEXT); + +} + + +static void +xfsidbg_xqm_qinfo(xfs_mount_t *mp) +{ + if (mp == NULL || mp->m_quotainfo == NULL) { + kdb_printf("NULL quotainfo\n"); + return; + } + + kdb_printf("uqip 0x%p, gqip 0x%p, &pinlock 0x%p &dqlist 0x%p\n", + mp->m_quotainfo->qi_uquotaip, + mp->m_quotainfo->qi_gquotaip, + &mp->m_quotainfo->qi_pinlock, + &mp->m_quotainfo->qi_dqlist); + + kdb_printf("nreclaims %d, btmlimit 0x%x, itmlimit 0x%x, RTbtmlim 0x%x\n", + (int)mp->m_quotainfo->qi_dqreclaims, + (int)mp->m_quotainfo->qi_btimelimit, + (int)mp->m_quotainfo->qi_itimelimit, + (int)mp->m_quotainfo->qi_rtbtimelimit); + + kdb_printf("bwarnlim 0x%x, iwarnlim 0x%x, &qofflock 0x%p, " + "chunklen 0x%x, dqperchunk 0x%x\n", + (int)mp->m_quotainfo->qi_bwarnlimit, + (int)mp->m_quotainfo->qi_iwarnlimit, + &mp->m_quotainfo->qi_quotaofflock, + (int)mp->m_quotainfo->qi_dqchunklen, + (int)mp->m_quotainfo->qi_dqperchunk); +} + +static void +xfsidbg_xqm_tpdqinfo(xfs_trans_t *tp) +{ + xfs_dqtrx_t *qa, *q; + int i,j; + + kdb_printf("dqinfo 0x%p\n", tp->t_dqinfo); + if (! tp->t_dqinfo) + return; + kdb_printf("USR: \n"); + qa = tp->t_dqinfo->dqa_usrdquots; + for (j = 0; j < 2; j++) { + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + if (qa[i].qt_dquot == NULL) + break; + q = &qa[i]; + kdb_printf( + "\"%d\"[0x%p]: bres %d, bres-used %d, bdelta %d, del-delta %d, icnt-delta %d\n", + (int) q->qt_dquot->q_core.d_id, + q->qt_dquot, + (int) q->qt_blk_res, + (int) q->qt_blk_res_used, + (int) q->qt_bcount_delta, + (int) q->qt_delbcnt_delta, + (int) q->qt_icount_delta); + } + if (j == 0) { + qa = tp->t_dqinfo->dqa_grpdquots; + kdb_printf("GRP: \n"); + } + } + +} + + + +/* + * Print xfs superblock. + */ +static void +xfsidbg_xsb(xfs_sb_t *sbp, int convert) +{ + xfs_arch_t arch=convert?ARCH_CONVERT:ARCH_NOCONVERT; + + kdb_printf(convert?"\n":"\n"); + + kdb_printf("magicnum 0x%x blocksize 0x%x dblocks %Ld rblocks %Ld\n", + INT_GET(sbp->sb_magicnum, arch), INT_GET(sbp->sb_blocksize, arch), + INT_GET(sbp->sb_dblocks, arch), INT_GET(sbp->sb_rblocks, arch)); + kdb_printf("rextents %Ld uuid %s logstart %s\n", + INT_GET(sbp->sb_rextents, arch), + xfs_fmtuuid(&sbp->sb_uuid), + xfs_fmtfsblock(INT_GET(sbp->sb_logstart, arch), NULL)); + kdb_printf("rootino %s ", + xfs_fmtino(INT_GET(sbp->sb_rootino, arch), NULL)); + kdb_printf("rbmino %s ", + xfs_fmtino(INT_GET(sbp->sb_rbmino, arch), NULL)); + kdb_printf("rsumino %s\n", + xfs_fmtino(INT_GET(sbp->sb_rsumino, arch), NULL)); + kdb_printf("rextsize 0x%x agblocks 0x%x agcount 0x%x rbmblocks 0x%x\n", + INT_GET(sbp->sb_rextsize, arch), + INT_GET(sbp->sb_agblocks, arch), + INT_GET(sbp->sb_agcount, arch), + INT_GET(sbp->sb_rbmblocks, arch)); + kdb_printf("logblocks 0x%x versionnum 0x%x sectsize 0x%x inodesize 0x%x\n", + INT_GET(sbp->sb_logblocks, arch), + INT_GET(sbp->sb_versionnum, arch), + INT_GET(sbp->sb_sectsize, arch), + INT_GET(sbp->sb_inodesize, arch)); + kdb_printf("inopblock 0x%x blocklog 0x%x sectlog 0x%x inodelog 0x%x\n", + INT_GET(sbp->sb_inopblock, arch), + INT_GET(sbp->sb_blocklog, arch), + INT_GET(sbp->sb_sectlog, arch), + INT_GET(sbp->sb_inodelog, arch)); + kdb_printf("inopblog %d agblklog %d rextslog %d inprogress %d imax_pct %d\n", + INT_GET(sbp->sb_inopblog, arch), + INT_GET(sbp->sb_agblklog, arch), + INT_GET(sbp->sb_rextslog, arch), + INT_GET(sbp->sb_inprogress, arch), + INT_GET(sbp->sb_imax_pct, arch)); + kdb_printf("icount %Lx ifree %Lx fdblocks %Lx frextents %Lx\n", + INT_GET(sbp->sb_icount, arch), + INT_GET(sbp->sb_ifree, arch), + INT_GET(sbp->sb_fdblocks, arch), + INT_GET(sbp->sb_frextents, arch)); + kdb_printf("uquotino %s ", xfs_fmtino(INT_GET(sbp->sb_uquotino, arch), NULL)); + kdb_printf("gquotino %s ", xfs_fmtino(INT_GET(sbp->sb_gquotino, arch), NULL)); + kdb_printf("qflags 0x%x flags 0x%x shared_vn %d inoaligmt %d\n", + INT_GET(sbp->sb_qflags, arch), INT_GET(sbp->sb_flags, arch), INT_GET(sbp->sb_shared_vn, arch), + INT_GET(sbp->sb_inoalignmt, arch)); + kdb_printf("unit %d width %d dirblklog %d\n", + INT_GET(sbp->sb_unit, arch), INT_GET(sbp->sb_width, arch), INT_GET(sbp->sb_dirblklog, arch)); + kdb_printf("log sunit %d\n", INT_GET(sbp->sb_logsunit, arch)); +} + + +/* + * Print out an XFS transaction structure. Print summaries for + * each of the items. + */ +static void +xfsidbg_xtp(xfs_trans_t *tp) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_desc_t *lidp; + xfs_log_busy_chunk_t *lbcp; + int i; + int chunk; + static char *xtp_flags[] = { + "dirty", /* 0x1 */ + "sb_dirty", /* 0x2 */ + "perm_log_res", /* 0x4 */ + "sync", /* 0x08 */ + "dq_dirty", /* 0x10 */ + 0 + }; + static char *lid_flags[] = { + "dirty", /* 0x1 */ + "pinned", /* 0x2 */ + "sync unlock", /* 0x4 */ + "buf stale", /* 0x8 */ + 0 + }; + + kdb_printf("tp 0x%p type ", tp); + switch (tp->t_type) { + case XFS_TRANS_SETATTR_NOT_SIZE: kdb_printf("SETATTR_NOT_SIZE"); break; + case XFS_TRANS_SETATTR_SIZE: kdb_printf("SETATTR_SIZE"); break; + case XFS_TRANS_INACTIVE: kdb_printf("INACTIVE"); break; + case XFS_TRANS_CREATE: kdb_printf("CREATE"); break; + case XFS_TRANS_CREATE_TRUNC: kdb_printf("CREATE_TRUNC"); break; + case XFS_TRANS_TRUNCATE_FILE: kdb_printf("TRUNCATE_FILE"); break; + case XFS_TRANS_REMOVE: kdb_printf("REMOVE"); break; + case XFS_TRANS_LINK: kdb_printf("LINK"); break; + case XFS_TRANS_RENAME: kdb_printf("RENAME"); break; + case XFS_TRANS_MKDIR: kdb_printf("MKDIR"); break; + case XFS_TRANS_RMDIR: kdb_printf("RMDIR"); break; + case XFS_TRANS_SYMLINK: kdb_printf("SYMLINK"); break; + case XFS_TRANS_SET_DMATTRS: kdb_printf("SET_DMATTRS"); break; + case XFS_TRANS_GROWFS: kdb_printf("GROWFS"); break; + case XFS_TRANS_STRAT_WRITE: kdb_printf("STRAT_WRITE"); break; + case XFS_TRANS_DIOSTRAT: kdb_printf("DIOSTRAT"); break; + case XFS_TRANS_WRITE_SYNC: kdb_printf("WRITE_SYNC"); break; + case XFS_TRANS_WRITEID: kdb_printf("WRITEID"); break; + case XFS_TRANS_ADDAFORK: kdb_printf("ADDAFORK"); break; + case XFS_TRANS_ATTRINVAL: kdb_printf("ATTRINVAL"); break; + case XFS_TRANS_ATRUNCATE: kdb_printf("ATRUNCATE"); break; + case XFS_TRANS_ATTR_SET: kdb_printf("ATTR_SET"); break; + case XFS_TRANS_ATTR_RM: kdb_printf("ATTR_RM"); break; + case XFS_TRANS_ATTR_FLAG: kdb_printf("ATTR_FLAG"); break; + case XFS_TRANS_CLEAR_AGI_BUCKET: kdb_printf("CLEAR_AGI_BUCKET"); break; + case XFS_TRANS_QM_SBCHANGE: kdb_printf("QM_SBCHANGE"); break; + case XFS_TRANS_QM_QUOTAOFF: kdb_printf("QM_QUOTAOFF"); break; + case XFS_TRANS_QM_DQALLOC: kdb_printf("QM_DQALLOC"); break; + case XFS_TRANS_QM_SETQLIM: kdb_printf("QM_SETQLIM"); break; + case XFS_TRANS_QM_DQCLUSTER: kdb_printf("QM_DQCLUSTER"); break; + case XFS_TRANS_QM_QINOCREATE: kdb_printf("QM_QINOCREATE"); break; + case XFS_TRANS_QM_QUOTAOFF_END: kdb_printf("QM_QOFF_END"); break; + case XFS_TRANS_SB_UNIT: kdb_printf("SB_UNIT"); break; + case XFS_TRANS_FSYNC_TS: kdb_printf("FSYNC_TS"); break; + case XFS_TRANS_GROWFSRT_ALLOC: kdb_printf("GROWFSRT_ALLOC"); break; + case XFS_TRANS_GROWFSRT_ZERO: kdb_printf("GROWFSRT_ZERO"); break; + case XFS_TRANS_GROWFSRT_FREE: kdb_printf("GROWFSRT_FREE"); break; + + default: kdb_printf("0x%x", tp->t_type); break; + } + kdb_printf(" mount 0x%p\n", tp->t_mountp); + kdb_printf("flags "); + printflags(tp->t_flags, xtp_flags,"xtp"); + kdb_printf("\n"); + kdb_printf("callback 0x%p forw 0x%p back 0x%p\n", + &tp->t_logcb, tp->t_forw, tp->t_back); + kdb_printf("log res %d block res %d block res used %d\n", + tp->t_log_res, tp->t_blk_res, tp->t_blk_res_used); + kdb_printf("rt res %d rt res used %d\n", tp->t_rtx_res, + tp->t_rtx_res_used); + kdb_printf("ticket 0x%lx lsn %s commit_lsn %s\n", + (unsigned long) tp->t_ticket, + xfs_fmtlsn(&tp->t_lsn), + xfs_fmtlsn(&tp->t_commit_lsn)); + kdb_printf("callback 0x%p callarg 0x%p\n", + tp->t_callback, tp->t_callarg); + kdb_printf("icount delta %ld ifree delta %ld\n", + tp->t_icount_delta, tp->t_ifree_delta); + kdb_printf("blocks delta %ld res blocks delta %ld\n", + tp->t_fdblocks_delta, tp->t_res_fdblocks_delta); + kdb_printf("rt delta %ld res rt delta %ld\n", + tp->t_frextents_delta, tp->t_res_frextents_delta); + kdb_printf("ag freeblks delta %ld ag flist delta %ld ag btree delta %ld\n", + tp->t_ag_freeblks_delta, tp->t_ag_flist_delta, + tp->t_ag_btree_delta); + kdb_printf("dblocks delta %ld agcount delta %ld imaxpct delta %ld\n", + tp->t_dblocks_delta, tp->t_agcount_delta, tp->t_imaxpct_delta); + kdb_printf("rextsize delta %ld rbmblocks delta %ld\n", + tp->t_rextsize_delta, tp->t_rbmblocks_delta); + kdb_printf("rblocks delta %ld rextents delta %ld rextslog delta %ld\n", + tp->t_rblocks_delta, tp->t_rextents_delta, + tp->t_rextslog_delta); + kdb_printf("dqinfo 0x%p\n", tp->t_dqinfo); + kdb_printf("log items:\n"); + licp = &tp->t_items; + chunk = 0; + while (licp != NULL) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { + licp = licp->lic_next; + chunk++; + continue; + } + for (i = 0; i < licp->lic_unused; i++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lidp = XFS_LIC_SLOT(licp, i); + kdb_printf("\n"); + kdb_printf("chunk %d index %d item 0x%p size %d\n", + chunk, i, lidp->lid_item, lidp->lid_size); + kdb_printf("flags "); + printflags(lidp->lid_flags, lid_flags,"lic"); + kdb_printf("\n"); + xfsidbg_xlogitem(lidp->lid_item); + } + chunk++; + licp = licp->lic_next; + } + + kdb_printf("log busy free %d, list:\n", tp->t_busy_free); + lbcp = &tp->t_busy; + chunk = 0; + while (lbcp != NULL) { + kdb_printf("Chunk %d at 0x%p next 0x%p free 0x%08x unused %d\n", + chunk, lbcp, lbcp->lbc_next, lbcp->lbc_free, + lbcp->lbc_unused); + for (i = 0; i < XFS_LBC_NUM_SLOTS; i++) { + kdb_printf(" %02d: ag %d idx %d\n", + i, + lbcp->lbc_busy[i].lbc_ag, + lbcp->lbc_busy[i].lbc_idx); + } + lbcp = lbcp->lbc_next; + } +} + +static void +xfsidbg_xtrans_res( + xfs_mount_t *mp) +{ + xfs_trans_reservations_t *xtrp; + + xtrp = &mp->m_reservations; + kdb_printf("write: %d\ttruncate: %d\trename: %d\n", + xtrp->tr_write, xtrp->tr_itruncate, xtrp->tr_rename); + kdb_printf("link: %d\tremove: %d\tsymlink: %d\n", + xtrp->tr_link, xtrp->tr_remove, xtrp->tr_symlink); + kdb_printf("create: %d\tmkdir: %d\tifree: %d\n", + xtrp->tr_create, xtrp->tr_mkdir, xtrp->tr_ifree); + kdb_printf("ichange: %d\tgrowdata: %d\tswrite: %d\n", + xtrp->tr_ichange, xtrp->tr_growdata, xtrp->tr_swrite); + kdb_printf("addafork: %d\twriteid: %d\tattrinval: %d\n", + xtrp->tr_addafork, xtrp->tr_writeid, xtrp->tr_attrinval); + kdb_printf("attrset: %d\tattrrm: %d\tclearagi: %d\n", + xtrp->tr_attrset, xtrp->tr_attrrm, xtrp->tr_clearagi); + kdb_printf("growrtalloc: %d\tgrowrtzero: %d\tgrowrtfree: %d\n", + xtrp->tr_growrtalloc, xtrp->tr_growrtzero, xtrp->tr_growrtfree); +} + +module_init(xfsidbg_init) +module_exit(xfsidbg_exit) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_iget.c linux.21rc5-ac2/fs/xfs/xfs_iget.c --- linux.21rc5/fs/xfs/xfs_iget.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_iget.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1009 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_quota.h" +#include "xfs_utils.h" + +/* + * Initialize the inode hash table for the newly mounted file system. + * + * mp -- this is the mount point structure for the file system being + * initialized + */ +void +xfs_ihash_init(xfs_mount_t *mp) +{ + int i; + + mp->m_ihsize = XFS_BUCKETS(mp); + mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize + * sizeof(xfs_ihash_t), KM_SLEEP); + ASSERT(mp->m_ihash != NULL); + for (i = 0; i < mp->m_ihsize; i++) { + rwlock_init(&(mp->m_ihash[i].ih_lock)); + } +} + +/* + * Free up structures allocated by xfs_ihash_init, at unmount time. + */ +void +xfs_ihash_free(xfs_mount_t *mp) +{ + kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t)); + mp->m_ihash = NULL; +} + +/* + * Initialize the inode cluster hash table for the newly mounted file system. + * + * mp -- this is the mount point structure for the file system being + * initialized + */ +void +xfs_chash_init(xfs_mount_t *mp) +{ + int i; + + /* + * m_chash size is based on m_ihash + * with a minimum of 37 entries + */ + mp->m_chsize = (XFS_BUCKETS(mp)) / + (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); + if (mp->m_chsize < 37) { + mp->m_chsize = 37; + } + mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize + * sizeof(xfs_chash_t), + KM_SLEEP); + ASSERT(mp->m_chash != NULL); + + for (i = 0; i < mp->m_chsize; i++) { + spinlock_init(&mp->m_chash[i].ch_lock,"xfshash"); + } +} + +/* + * Free up structures allocated by xfs_chash_init, at unmount time. + */ +void +xfs_chash_free(xfs_mount_t *mp) +{ + int i; + + for (i = 0; i < mp->m_chsize; i++) { + spinlock_destroy(&mp->m_chash[i].ch_lock); + } + + kmem_free(mp->m_chash, mp->m_chsize*sizeof(xfs_chash_t)); + mp->m_chash = NULL; +} + +/* + * Look up an inode by number in the given file system. + * The inode is looked up in the hash table for the file system + * represented by the mount point parameter mp. Each bucket of + * the hash table is guarded by an individual semaphore. + * + * If the inode is found in the hash table, its corresponding vnode + * is obtained with a call to vn_get(). This call takes care of + * coordination with the reclamation of the inode and vnode. Note + * that the vmap structure is filled in while holding the hash lock. + * This gives us the state of the inode/vnode when we found it and + * is used for coordination in vn_get(). + * + * If it is not in core, read it in from the file system's device and + * add the inode into the hash table. + * + * The inode is locked according to the value of the lock_flags parameter. + * This flag parameter indicates how and if the inode's IO lock and inode lock + * should be taken. + * + * mp -- the mount point structure for the current file system. It points + * to the inode hash table. + * tp -- a pointer to the current transaction if there is one. This is + * simply passed through to the xfs_iread() call. + * ino -- the number of the inode desired. This is the unique identifier + * within the file system for the inode being requested. + * lock_flags -- flags indicating how to lock the inode. See the comment + * for xfs_ilock() for a list of valid values. + * bno -- the block number starting the buffer containing the inode, + * if known (as by bulkstat), else 0. + */ +STATIC int +xfs_iget_core( + vnode_t *vp, + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + uint lock_flags, + xfs_inode_t **ipp, + xfs_daddr_t bno) +{ + xfs_ihash_t *ih; + xfs_inode_t *ip; + xfs_inode_t *iq; + vnode_t *inode_vp; + ulong version; + int error; + /* REFERENCED */ + int newnode; + xfs_chash_t *ch; + xfs_chashlist_t *chl, *chlnew; + SPLDECL(s); + + + ih = XFS_IHASH(mp, ino); + +again: + read_lock(&ih->ih_lock); + + for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) { + if (ip->i_ino == ino) { + + inode_vp = XFS_ITOV_NULL(ip); + + if (inode_vp == NULL) { + /* If IRECLAIM is set this inode is + * on its way out of the system, + * we need to pause and try again. + */ + if (ip->i_flags & XFS_IRECLAIM) { + read_unlock(&ih->ih_lock); + delay(1); + XFS_STATS_INC(xfsstats.xs_ig_frecycle); + + goto again; + } + + vn_trace_exit(vp, "xfs_iget.alloc", + (inst_t *)__return_address); + + XFS_STATS_INC(xfsstats.xs_ig_found); + + read_unlock(&ih->ih_lock); + goto finish_inode; + + } else if (vp != inode_vp) { + struct inode *inode = LINVFS_GET_IP(inode_vp); + + /* The inode is being torn down, pause and + * try again. + */ + if (inode->i_state & (I_FREEING | I_CLEAR)) { + read_unlock(&ih->ih_lock); + delay(1); + XFS_STATS_INC(xfsstats.xs_ig_frecycle); + + goto again; + } +/* Chances are the other vnode (the one in the inode) is being torn + * down right now, and we landed on top of it. Question is, what do + * we do? Unhook the old inode and hook up the new one? + */ + cmn_err(CE_PANIC, + "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", + inode_vp, vp); + } + + read_unlock(&ih->ih_lock); + + XFS_STATS_INC(xfsstats.xs_ig_found); + +finish_inode: + if (lock_flags != 0) { + xfs_ilock(ip, lock_flags); + } + + newnode = (ip->i_d.di_mode == 0); + if (newnode) { + xfs_iocore_inode_reinit(ip); + } + + XFS_MOUNT_ILOCK(mp); + list_del_init(&ip->i_reclaim); + XFS_MOUNT_IUNLOCK(mp); + + vn_trace_exit(vp, "xfs_iget.found", + (inst_t *)__return_address); + goto return_ip; + } + } + + /* + * Inode cache miss: save the hash chain version stamp and unlock + * the chain, so we don't deadlock in vn_alloc. + */ + XFS_STATS_INC(xfsstats.xs_ig_missed); + + version = ih->ih_version; + + read_unlock(&ih->ih_lock); + + /* + * Read the disk inode attributes into a new inode structure and get + * a new vnode for it. This should also initialize i_ino and i_mount. + */ + error = xfs_iread(mp, tp, ino, &ip, bno); + if (error) { + return error; + } + + vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address); + + xfs_inode_lock_init(ip, vp); + xfs_iocore_inode_init(ip); + + if (lock_flags != 0) { + xfs_ilock(ip, lock_flags); + } + + /* + * Put ip on its hash chain, unless someone else hashed a duplicate + * after we released the hash lock. + */ + write_lock(&ih->ih_lock); + + if (ih->ih_version != version) { + for (iq = ih->ih_next; iq != NULL; iq = iq->i_next) { + if (iq->i_ino == ino) { + write_unlock(&ih->ih_lock); + xfs_idestroy(ip); + + XFS_STATS_INC(xfsstats.xs_ig_dup); + goto again; + } + } + } + + /* + * These values _must_ be set before releasing ihlock! + */ + ip->i_hash = ih; + if ((iq = ih->ih_next)) { + iq->i_prevp = &ip->i_next; + } + ip->i_next = iq; + ip->i_prevp = &ih->ih_next; + ih->ih_next = ip; + ip->i_udquot = ip->i_gdquot = NULL; + ih->ih_version++; + + write_unlock(&ih->ih_lock); + + /* + * put ip on its cluster's hash chain + */ + ASSERT(ip->i_chash == NULL && ip->i_cprev == NULL && + ip->i_cnext == NULL); + + chlnew = NULL; + ch = XFS_CHASH(mp, ip->i_blkno); + chlredo: + s = mutex_spinlock(&ch->ch_lock); + for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) { + if (chl->chl_blkno == ip->i_blkno) { + + /* insert this inode into the doubly-linked list + * where chl points */ + if ((iq = chl->chl_ip)) { + ip->i_cprev = iq->i_cprev; + iq->i_cprev->i_cnext = ip; + iq->i_cprev = ip; + ip->i_cnext = iq; + } else { + ip->i_cnext = ip; + ip->i_cprev = ip; + } + chl->chl_ip = ip; + ip->i_chash = chl; + break; + } + } + + /* no hash list found for this block; add a new hash list */ + if (chl == NULL) { + if (chlnew == NULL) { + mutex_spinunlock(&ch->ch_lock, s); + ASSERT(xfs_chashlist_zone != NULL); + chlnew = (xfs_chashlist_t *) + kmem_zone_alloc(xfs_chashlist_zone, + KM_SLEEP); + ASSERT(chlnew != NULL); + goto chlredo; + } else { + ip->i_cnext = ip; + ip->i_cprev = ip; + ip->i_chash = chlnew; + chlnew->chl_ip = ip; + chlnew->chl_blkno = ip->i_blkno; + chlnew->chl_next = ch->ch_list; + ch->ch_list = chlnew; + chlnew = NULL; + } + } else { + if (chlnew != NULL) { + kmem_zone_free(xfs_chashlist_zone, chlnew); + } + } + + mutex_spinunlock(&ch->ch_lock, s); + + + /* + * Link ip to its mount and thread it on the mount's inode list. + */ + XFS_MOUNT_ILOCK(mp); + if ((iq = mp->m_inodes)) { + ASSERT(iq->i_mprev->i_mnext == iq); + ip->i_mprev = iq->i_mprev; + iq->i_mprev->i_mnext = ip; + iq->i_mprev = ip; + ip->i_mnext = iq; + } else { + ip->i_mnext = ip; + ip->i_mprev = ip; + } + mp->m_inodes = ip; + + XFS_MOUNT_IUNLOCK(mp); + + newnode = 1; + + return_ip: + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); + + ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == + ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); + + *ipp = ip; + + /* + * If we have a real type for an on-disk inode, we can set ops(&unlock) + * now. If it's a new inode being created, xfs_ialloc will handle it. + */ + VFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1); + + return 0; +} + + +/* + * The 'normal' internal xfs_iget, if needed it will + * 'allocate', or 'get', the vnode. + */ +int +xfs_iget( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + uint lock_flags, + xfs_inode_t **ipp, + xfs_daddr_t bno) +{ + struct inode *inode; + vnode_t *vp = NULL; + int error; + +retry: + XFS_STATS_INC(xfsstats.xs_ig_attempts); + + if ((inode = VFS_GET_INODE(XFS_MTOVFS(mp), ino, 0))) { + bhv_desc_t *bdp; + xfs_inode_t *ip; + int newnode; + + vp = LINVFS_GET_VP(inode); + if (inode->i_state & I_NEW) { +inode_allocate: + vn_initialize(inode); + error = xfs_iget_core(vp, mp, tp, ino, + lock_flags, ipp, bno); + if (error) { + remove_inode_hash(inode); + make_bad_inode(inode); + if (inode->i_state & I_NEW) + unlock_new_inode(inode); + iput(inode); + } + } else { + /* These are true if the inode is in inactive or + * reclaim. The linux inode is about to go away, + * wait for that path to finish, and try again. + */ + if (vp->v_flag & (VINACT | VRECLM)) { + vn_wait(vp); + iput(inode); + goto retry; + } + + if (is_bad_inode(inode)) { + iput(inode); + return EIO; + } + + bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops); + if (bdp == NULL) { + XFS_STATS_INC(xfsstats.xs_ig_dup); + goto inode_allocate; + } + ip = XFS_BHVTOI(bdp); + if (lock_flags != 0) + xfs_ilock(ip, lock_flags); + newnode = (ip->i_d.di_mode == 0); + if (newnode) + xfs_iocore_inode_reinit(ip); + XFS_STATS_INC(xfsstats.xs_ig_found); + *ipp = ip; + error = 0; + } + } else + error = ENOMEM; /* If we got no inode we are out of memory */ + + return error; +} + +/* + * Do the setup for the various locks within the incore inode. + */ +void +xfs_inode_lock_init( + xfs_inode_t *ip, + vnode_t *vp) +{ + mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, + "xfsino", (long)vp->v_number); + mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number); +#ifdef NOTYET + mutex_init(&ip->i_range_lock.r_spinlock, MUTEX_SPIN, "xrange"); +#endif /* NOTYET */ + init_waitqueue_head(&ip->i_ipin_wait); + atomic_set(&ip->i_pincount, 0); + init_sema(&ip->i_flock, 1, "xfsfino", vp->v_number); +} + +/* + * Look for the inode corresponding to the given ino in the hash table. + * If it is there and its i_transp pointer matches tp, return it. + * Otherwise, return NULL. + */ +xfs_inode_t * +xfs_inode_incore(xfs_mount_t *mp, + xfs_ino_t ino, + xfs_trans_t *tp) +{ + xfs_ihash_t *ih; + xfs_inode_t *ip; + + ih = XFS_IHASH(mp, ino); + read_lock(&ih->ih_lock); + for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) { + if (ip->i_ino == ino) { + /* + * If we find it and tp matches, return it. + * Otherwise break from the loop and return + * NULL. + */ + if (ip->i_transp == tp) { + read_unlock(&ih->ih_lock); + return (ip); + } + break; + } + } + read_unlock(&ih->ih_lock); + return (NULL); +} + +/* + * Decrement reference count of an inode structure and unlock it. + * + * ip -- the inode being released + * lock_flags -- this parameter indicates the inode's locks to be + * to be released. See the comment on xfs_iunlock() for a list + * of valid values. + */ +void +xfs_iput(xfs_inode_t *ip, + uint lock_flags) +{ + vnode_t *vp = XFS_ITOV(ip); + + vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address); + + xfs_iunlock(ip, lock_flags); + + VN_RELE(vp); +} + +/* + * Special iput for brand-new inodes that are still locked + */ +void +xfs_iput_new(xfs_inode_t *ip, + uint lock_flags) +{ + vnode_t *vp = XFS_ITOV(ip); + struct inode *inode = LINVFS_GET_IP(vp); + + vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); + + /* We shouldn't get here without this being true, but just in case */ + if (inode->i_state & I_NEW) { + remove_inode_hash(inode); + make_bad_inode(inode); + unlock_new_inode(inode); + } + if (lock_flags) + xfs_iunlock(ip, lock_flags); + VN_RELE(vp); +} + + +/* + * This routine embodies the part of the reclaim code that pulls + * the inode from the inode hash table and the mount structure's + * inode list. + * This should only be called from xfs_reclaim(). + */ +void +xfs_ireclaim(xfs_inode_t *ip) +{ + vnode_t *vp; + + /* + * Remove from old hash list and mount list. + */ + XFS_STATS_INC(xfsstats.xs_ig_reclaims); + + xfs_iextract(ip); + + /* + * Here we do a spurious inode lock in order to coordinate with + * xfs_sync(). This is because xfs_sync() references the inodes + * in the mount list without taking references on the corresponding + * vnodes. We make that OK here by ensuring that we wait until + * the inode is unlocked in xfs_sync() before we go ahead and + * free it. We get both the regular lock and the io lock because + * the xfs_sync() code may need to drop the regular one but will + * still hold the io lock. + */ + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + + /* + * Release dquots (and their references) if any. An inode may escape + * xfs_inactive and get here via vn_alloc->vn_reclaim path. + */ + XFS_QM_DQDETACH(ip->i_mount, ip); + + /* + * Pull our behavior descriptor from the vnode chain. + */ + vp = XFS_ITOV_NULL(ip); + if (vp) { + vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); + } + + /* + * Free all memory associated with the inode. + */ + xfs_idestroy(ip); +} + +/* + * This routine removes an about-to-be-destroyed inode from + * all of the lists in which it is located with the exception + * of the behavior chain. + */ +void +xfs_iextract( + xfs_inode_t *ip) +{ + xfs_ihash_t *ih; + xfs_inode_t *iq; + xfs_mount_t *mp; + xfs_chash_t *ch; + xfs_chashlist_t *chl, *chm; + SPLDECL(s); + + ih = ip->i_hash; + write_lock(&ih->ih_lock); + if ((iq = ip->i_next)) { + iq->i_prevp = ip->i_prevp; + } + *ip->i_prevp = iq; + write_unlock(&ih->ih_lock); + + /* + * Remove from cluster hash list + * 1) delete the chashlist if this is the last inode on the chashlist + * 2) unchain from list of inodes + * 3) point chashlist->chl_ip to 'chl_next' if to this inode. + */ + mp = ip->i_mount; + ch = XFS_CHASH(mp, ip->i_blkno); + s = mutex_spinlock(&ch->ch_lock); + + if (ip->i_cnext == ip) { + /* Last inode on chashlist */ + ASSERT(ip->i_cnext == ip && ip->i_cprev == ip); + ASSERT(ip->i_chash != NULL); + chm=NULL; + for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) { + if (chl->chl_blkno == ip->i_blkno) { + if (chm == NULL) { + /* first item on the list */ + ch->ch_list = chl->chl_next; + } else { + chm->chl_next = chl->chl_next; + } + kmem_zone_free(xfs_chashlist_zone, chl); + break; + } else { + ASSERT(chl->chl_ip != ip); + chm = chl; + } + } + ASSERT_ALWAYS(chl != NULL); + } else { + /* delete one inode from a non-empty list */ + iq = ip->i_cnext; + iq->i_cprev = ip->i_cprev; + ip->i_cprev->i_cnext = iq; + if (ip->i_chash->chl_ip == ip) { + ip->i_chash->chl_ip = iq; + } + ip->i_chash = __return_address; + ip->i_cprev = __return_address; + ip->i_cnext = __return_address; + } + mutex_spinunlock(&ch->ch_lock, s); + + /* + * Remove from mount's inode list. + */ + XFS_MOUNT_ILOCK(mp); + ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL)); + iq = ip->i_mnext; + iq->i_mprev = ip->i_mprev; + ip->i_mprev->i_mnext = iq; + + /* + * Fix up the head pointer if it points to the inode being deleted. + */ + if (mp->m_inodes == ip) { + if (ip == iq) { + mp->m_inodes = NULL; + } else { + mp->m_inodes = iq; + } + } + + /* Deal with the deleted inodes list */ + list_del_init(&ip->i_reclaim); + + mp->m_ireclaims++; + XFS_MOUNT_IUNLOCK(mp); +} + +/* + * This is a wrapper routine around the xfs_ilock() routine + * used to centralize some grungy code. It is used in places + * that wish to lock the inode solely for reading the extents. + * The reason these places can't just call xfs_ilock(SHARED) + * is that the inode lock also guards to bringing in of the + * extents from disk for a file in b-tree format. If the inode + * is in b-tree format, then we need to lock the inode exclusively + * until the extents are read in. Locking it exclusively all + * the time would limit our parallelism unnecessarily, though. + * What we do instead is check to see if the extents have been + * read in yet, and only lock the inode exclusively if they + * have not. + * + * The function returns a value which should be given to the + * corresponding xfs_iunlock_map_shared(). This value is + * the mode in which the lock was actually taken. + */ +uint +xfs_ilock_map_shared( + xfs_inode_t *ip) +{ + uint lock_mode; + + if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && + ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { + lock_mode = XFS_ILOCK_EXCL; + } else { + lock_mode = XFS_ILOCK_SHARED; + } + + xfs_ilock(ip, lock_mode); + + return lock_mode; +} + +/* + * This is simply the unlock routine to go with xfs_ilock_map_shared(). + * All it does is call xfs_iunlock() with the given lock_mode. + */ +void +xfs_iunlock_map_shared( + xfs_inode_t *ip, + unsigned int lock_mode) +{ + xfs_iunlock(ip, lock_mode); +} + +/* + * The xfs inode contains 2 locks: a multi-reader lock called the + * i_iolock and a multi-reader lock called the i_lock. This routine + * allows either or both of the locks to be obtained. + * + * The 2 locks should always be ordered so that the IO lock is + * obtained first in order to prevent deadlock. + * + * ip -- the inode being locked + * lock_flags -- this parameter indicates the inode's locks + * to be locked. It can be: + * XFS_IOLOCK_SHARED, + * XFS_IOLOCK_EXCL, + * XFS_ILOCK_SHARED, + * XFS_ILOCK_EXCL, + * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED, + * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL, + * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED, + * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL + */ +void +xfs_ilock(xfs_inode_t *ip, + uint lock_flags) +{ + /* + * You can't set both SHARED and EXCL for the same lock, + * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, + * and XFS_ILOCK_EXCL are valid values to set in lock_flags. + */ + ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != + (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); + ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != + (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); + ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0); + + if (lock_flags & XFS_IOLOCK_EXCL) { + mrupdate(&ip->i_iolock); + } else if (lock_flags & XFS_IOLOCK_SHARED) { + mraccess(&ip->i_iolock); + } + if (lock_flags & XFS_ILOCK_EXCL) { + mrupdate(&ip->i_lock); + } else if (lock_flags & XFS_ILOCK_SHARED) { + mraccess(&ip->i_lock); + } +#ifdef XFS_ILOCK_TRACE + xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)return_address); +#endif +} + +/* + * This is just like xfs_ilock(), except that the caller + * is guaranteed not to sleep. It returns 1 if it gets + * the requested locks and 0 otherwise. If the IO lock is + * obtained but the inode lock cannot be, then the IO lock + * is dropped before returning. + * + * ip -- the inode being locked + * lock_flags -- this parameter indicates the inode's locks to be + * to be locked. See the comment for xfs_ilock() for a list + * of valid values. + * + */ +int +xfs_ilock_nowait(xfs_inode_t *ip, + uint lock_flags) +{ + int iolocked; + int ilocked; + + /* + * You can't set both SHARED and EXCL for the same lock, + * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, + * and XFS_ILOCK_EXCL are valid values to set in lock_flags. + */ + ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != + (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); + ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != + (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); + ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0); + + iolocked = 0; + if (lock_flags & XFS_IOLOCK_EXCL) { + iolocked = mrtryupdate(&ip->i_iolock); + if (!iolocked) { + return 0; + } + } else if (lock_flags & XFS_IOLOCK_SHARED) { + iolocked = mrtryaccess(&ip->i_iolock); + if (!iolocked) { + return 0; + } + } + if (lock_flags & XFS_ILOCK_EXCL) { + ilocked = mrtryupdate(&ip->i_lock); + if (!ilocked) { + if (iolocked) { + mrunlock(&ip->i_iolock); + } + return 0; + } + } else if (lock_flags & XFS_ILOCK_SHARED) { + ilocked = mrtryaccess(&ip->i_lock); + if (!ilocked) { + if (iolocked) { + mrunlock(&ip->i_iolock); + } + return 0; + } + } +#ifdef XFS_ILOCK_TRACE + xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address); +#endif + return 1; +} + +/* + * xfs_iunlock() is used to drop the inode locks acquired with + * xfs_ilock() and xfs_ilock_nowait(). The caller must pass + * in the flags given to xfs_ilock() or xfs_ilock_nowait() so + * that we know which locks to drop. + * + * ip -- the inode being unlocked + * lock_flags -- this parameter indicates the inode's locks to be + * to be unlocked. See the comment for xfs_ilock() for a list + * of valid values for this parameter. + * + */ +void +xfs_iunlock(xfs_inode_t *ip, + uint lock_flags) +{ + /* + * You can't set both SHARED and EXCL for the same lock, + * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, + * and XFS_ILOCK_EXCL are valid values to set in lock_flags. + */ + ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != + (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); + ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != + (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); + ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY)) == 0); + ASSERT(lock_flags != 0); + + if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) { + ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) || + (ismrlocked(&ip->i_iolock, MR_ACCESS))); + ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) || + (ismrlocked(&ip->i_iolock, MR_UPDATE))); + mrunlock(&ip->i_iolock); + } + + if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) { + ASSERT(!(lock_flags & XFS_ILOCK_SHARED) || + (ismrlocked(&ip->i_lock, MR_ACCESS))); + ASSERT(!(lock_flags & XFS_ILOCK_EXCL) || + (ismrlocked(&ip->i_lock, MR_UPDATE))); + mrunlock(&ip->i_lock); + + /* + * Let the AIL know that this item has been unlocked in case + * it is in the AIL and anyone is waiting on it. Don't do + * this if the caller has asked us not to. + */ + if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) && + ip->i_itemp != NULL) { + xfs_trans_unlocked_item(ip->i_mount, + (xfs_log_item_t*)(ip->i_itemp)); + } + } +#ifdef XFS_ILOCK_TRACE + xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address); +#endif +} + +/* + * give up write locks. the i/o lock cannot be held nested + * if it is being demoted. + */ +void +xfs_ilock_demote(xfs_inode_t *ip, + uint lock_flags) +{ + ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)); + ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); + + if (lock_flags & XFS_ILOCK_EXCL) { + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + mrdemote(&ip->i_lock); + } + if (lock_flags & XFS_IOLOCK_EXCL) { + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); + mrdemote(&ip->i_iolock); + } +} + +/* + * The following three routines simply manage the i_flock + * semaphore embedded in the inode. This semaphore synchronizes + * processes attempting to flush the in-core inode back to disk. + */ +void +xfs_iflock(xfs_inode_t *ip) +{ + psema(&(ip->i_flock), PINOD|PLTWAIT); +} + +int +xfs_iflock_nowait(xfs_inode_t *ip) +{ + return (cpsema(&(ip->i_flock))); +} + +void +xfs_ifunlock(xfs_inode_t *ip) +{ + ASSERT(valusema(&(ip->i_flock)) <= 0); + vsema(&(ip->i_flock)); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_imap.h linux.21rc5-ac2/fs/xfs/xfs_imap.h --- linux.21rc5/fs/xfs/xfs_imap.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_imap.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_IMAP_H__ +#define __XFS_IMAP_H__ + +/* + * This is the structure passed to xfs_imap() to map + * an inode number to its on disk location. + */ +typedef struct xfs_imap { + xfs_daddr_t im_blkno; /* starting BB of inode chunk */ + uint im_len; /* length in BBs of inode chunk */ + xfs_agblock_t im_agblkno; /* logical block of inode chunk in ag */ + ushort im_ioffset; /* inode offset in block in "inodes" */ + ushort im_boffset; /* inode offset in block in bytes */ +} xfs_imap_t; + +#ifdef __KERNEL__ +struct xfs_mount; +struct xfs_trans; +int xfs_imap(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, + xfs_imap_t *, uint); +#endif + +#endif /* __XFS_IMAP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_inode.c linux.21rc5-ac2/fs/xfs/xfs_inode.c --- linux.21rc5/fs/xfs/xfs_inode.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_inode.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,3661 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_imap.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_buf_item.h" +#include "xfs_rw.h" +#include "xfs_error.h" +#include "xfs_bit.h" +#include "xfs_utils.h" +#include "xfs_dir2_trace.h" +#include "xfs_quota.h" +#include "xfs_mac.h" +#include "xfs_acl.h" + + +kmem_zone_t *xfs_ifork_zone; +kmem_zone_t *xfs_inode_zone; +kmem_zone_t *xfs_chashlist_zone; + +/* + * Used in xfs_itruncate(). This is the maximum number of extents + * freed from a file in a single transaction. + */ +#define XFS_ITRUNC_MAX_EXTENTS 2 + +STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); +STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); +STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); +STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); + + +#ifdef DEBUG +/* + * Make sure that the extents in the given memory buffer + * are valid. + */ +STATIC void +xfs_validate_extents( + xfs_bmbt_rec_t *ep, + int nrecs, + int disk, + xfs_exntfmt_t fmt) +{ + xfs_bmbt_irec_t irec; + xfs_bmbt_rec_t rec; + int i; + + for (i = 0; i < nrecs; i++) { + rec.l0 = get_unaligned((__uint64_t*)&ep->l0); + rec.l1 = get_unaligned((__uint64_t*)&ep->l1); + if (disk) + xfs_bmbt_disk_get_all(&rec, &irec); + else + xfs_bmbt_get_all(&rec, &irec); + if (fmt == XFS_EXTFMT_NOSTATE) + ASSERT(irec.br_state == XFS_EXT_NORM); + ep++; + } +} +#else /* DEBUG */ +#define xfs_validate_extents(ep, nrecs, disk, fmt) +#endif /* DEBUG */ + +/* + * Check that none of the inode's in the buffer have a next + * unlinked field of 0. + */ +#if defined(DEBUG) +void +xfs_inobp_check( + xfs_mount_t *mp, + xfs_buf_t *bp) +{ + int i; + int j; + xfs_dinode_t *dip; + + j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; + + for (i = 0; i < j; i++) { + dip = (xfs_dinode_t *)xfs_buf_offset(bp, + i * mp->m_sb.sb_inodesize); + if (INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)) { + xfs_fs_cmn_err(CE_ALERT, mp, + "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.", + bp); + ASSERT(!INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)); + } + } +} +#endif + +/* + * called from bwrite on xfs inode buffers + */ +void +xfs_inobp_bwcheck(xfs_buf_t *bp) +{ + xfs_mount_t *mp; + int i; + int j; + xfs_dinode_t *dip; + + ASSERT(XFS_BUF_FSPRIVATE3(bp, void *) != NULL); + + mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); + + + j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; + + for (i = 0; i < j; i++) { + dip = (xfs_dinode_t *) xfs_buf_offset(bp, + i * mp->m_sb.sb_inodesize); + if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) { + cmn_err(CE_WARN, +"Bad magic # 0x%x in XFS inode buffer 0x%Lx, starting blockno %Ld, offset 0x%x", + INT_GET(dip->di_core.di_magic, ARCH_CONVERT), + (__uint64_t)(__psunsigned_t) bp, + (__int64_t) XFS_BUF_ADDR(bp), + xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); + xfs_fs_cmn_err(CE_WARN, mp, + "corrupt, unmount and run xfs_repair"); + } + if (INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)) { + cmn_err(CE_WARN, +"Bad next_unlinked field (0) in XFS inode buffer 0x%p, starting blockno %Ld, offset 0x%x", + (__uint64_t)(__psunsigned_t) bp, + (__int64_t) XFS_BUF_ADDR(bp), + xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); + xfs_fs_cmn_err(CE_WARN, mp, + "corrupt, unmount and run xfs_repair"); + } + } + + return; +} + +/* + * This routine is called to map an inode number within a file + * system to the buffer containing the on-disk version of the + * inode. It returns a pointer to the buffer containing the + * on-disk inode in the bpp parameter, and in the dip parameter + * it returns a pointer to the on-disk inode within that buffer. + * + * If a non-zero error is returned, then the contents of bpp and + * dipp are undefined. + * + * Use xfs_imap() to determine the size and location of the + * buffer to read from disk. + */ +int +xfs_inotobp( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + xfs_dinode_t **dipp, + xfs_buf_t **bpp, + int *offset) +{ + int di_ok; + xfs_imap_t imap; + xfs_buf_t *bp; + int error; + xfs_dinode_t *dip; + + /* + * Call the space managment code to find the location of the + * inode on disk. + */ + imap.im_blkno = 0; + error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP); + if (error != 0) { + cmn_err(CE_WARN, + "xfs_inotobp: xfs_imap() returned an " + "error %d on %s. Returning error.", error, mp->m_fsname); + return error; + } + + /* + * If the inode number maps to a block outside the bounds of the + * file system then return NULL rather than calling read_buf + * and panicing when we get an error from the driver. + */ + if ((imap.im_blkno + imap.im_len) > + XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { + cmn_err(CE_WARN, + "xfs_inotobp: inode number (%d + %d) maps to a block outside the bounds " + "of the file system %s. Returning EINVAL.", + imap.im_blkno, imap.im_len,mp->m_fsname); + return XFS_ERROR(EINVAL); + } + + /* + * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will + * default to just a read_buf() call. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, + (int)imap.im_len, XFS_BUF_LOCK, &bp); + + if (error) { + cmn_err(CE_WARN, + "xfs_inotobp: xfs_trans_read_buf() returned an " + "error %d on %s. Returning error.", error, mp->m_fsname); + return error; + } + dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0); + di_ok = + INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && + XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, + XFS_RANDOM_ITOBP_INOTOBP))) { + XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip); + xfs_trans_brelse(tp, bp); + cmn_err(CE_WARN, + "xfs_inotobp: XFS_TEST_ERROR() returned an " + "error on %s. Returning EFSCORRUPTED.", mp->m_fsname); + return XFS_ERROR(EFSCORRUPTED); + } + + xfs_inobp_check(mp, bp); + + /* + * Set *dipp to point to the on-disk inode in the buffer. + */ + *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); + *bpp = bp; + *offset = imap.im_boffset; + return 0; +} + + +/* + * This routine is called to map an inode to the buffer containing + * the on-disk version of the inode. It returns a pointer to the + * buffer containing the on-disk inode in the bpp parameter, and in + * the dip parameter it returns a pointer to the on-disk inode within + * that buffer. + * + * If a non-zero error is returned, then the contents of bpp and + * dipp are undefined. + * + * If the inode is new and has not yet been initialized, use xfs_imap() + * to determine the size and location of the buffer to read from disk. + * If the inode has already been mapped to its buffer and read in once, + * then use the mapping information stored in the inode rather than + * calling xfs_imap(). This allows us to avoid the overhead of looking + * at the inode btree for small block file systems (see xfs_dilocate()). + * We can tell whether the inode has been mapped in before by comparing + * its disk block address to 0. Only uninitialized inodes will have + * 0 for the disk block address. + */ +int +xfs_itobp( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dinode_t **dipp, + xfs_buf_t **bpp, + xfs_daddr_t bno) +{ + xfs_buf_t *bp; + int error; + xfs_imap_t imap; +#ifdef __KERNEL__ + int i; + int ni; +#endif + + if (ip->i_blkno == (xfs_daddr_t)0) { + /* + * Call the space management code to find the location of the + * inode on disk. + */ + imap.im_blkno = bno; + error = xfs_imap(mp, tp, ip->i_ino, &imap, XFS_IMAP_LOOKUP); + if (error != 0) { + return error; + } + + /* + * If the inode number maps to a block outside the bounds + * of the file system then return NULL rather than calling + * read_buf and panicing when we get an error from the + * driver. + */ + if ((imap.im_blkno + imap.im_len) > + XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " + "(imap.im_blkno (0x%llx) " + "+ imap.im_len (0x%llx)) > " + " XFS_FSB_TO_BB(mp, " + "mp->m_sb.sb_dblocks) (0x%llx)", + (unsigned long long) imap.im_blkno, + (unsigned long long) imap.im_len, + XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); +#endif /* DEBUG */ + return XFS_ERROR(EINVAL); + } + + /* + * Fill in the fields in the inode that will be used to + * map the inode to its buffer from now on. + */ + ip->i_blkno = imap.im_blkno; + ip->i_len = imap.im_len; + ip->i_boffset = imap.im_boffset; + } else { + /* + * We've already mapped the inode once, so just use the + * mapping that we saved the first time. + */ + imap.im_blkno = ip->i_blkno; + imap.im_len = ip->i_len; + imap.im_boffset = ip->i_boffset; + } + ASSERT(bno == 0 || bno == imap.im_blkno); + + /* + * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will + * default to just a read_buf() call. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, + (int)imap.im_len, XFS_BUF_LOCK, &bp); + + if (error) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " + "xfs_trans_read_buf() returned error %d, " + "imap.im_blkno 0x%llx, imap.im_len 0x%llx", + error, (unsigned long long) imap.im_blkno, + (unsigned long long) imap.im_len); +#endif /* DEBUG */ + return error; + } +#ifdef __KERNEL__ + /* + * Validate the magic number and version of every inode in the buffer + * (if DEBUG kernel) or the first inode in the buffer, otherwise. + */ +#ifdef DEBUG + ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; +#else + ni = 1; +#endif + for (i = 0; i < ni; i++) { + int di_ok; + xfs_dinode_t *dip; + + dip = (xfs_dinode_t *)xfs_buf_offset(bp, + (i << mp->m_sb.sb_inodelog)); + di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && + XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, + XFS_RANDOM_ITOBP_INOTOBP))) { +#ifdef DEBUG + prdev("bad inode magic/vsn daddr 0x%llx #%d (magic=%x)", + mp->m_dev, (unsigned long long)imap.im_blkno, i, + INT_GET(dip->di_core.di_magic, ARCH_CONVERT)); +#endif + XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_LOW, + mp, dip); + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); + } + } +#endif /* __KERNEL__ */ + + xfs_inobp_check(mp, bp); + + /* + * Mark the buffer as an inode buffer now that it looks good + */ + XFS_BUF_SET_VTYPE(bp, B_FS_INO); + + /* + * Set *dipp to point to the on-disk inode in the buffer. + */ + *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); + *bpp = bp; + return 0; +} + +/* + * Move inode type and inode format specific information from the + * on-disk inode to the in-core inode. For fifos, devs, and sockets + * this means set if_rdev to the proper value. For files, directories, + * and symlinks this means to bring in the in-line data or extent + * pointers. For a file in B-tree format, only the root is immediately + * brought in-core. The rest will be in-lined in if_extents when it + * is first referenced (see xfs_iread_extents()). + */ +STATIC int +xfs_iformat( + xfs_inode_t *ip, + xfs_dinode_t *dip) +{ + xfs_attr_shortform_t *atp; + int size; + int error; + xfs_fsize_t di_size; + ip->i_df.if_ext_max = + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + error = 0; + + if (unlikely( + INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) + + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) > + INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt dinode %Lu, extent total = %d, nblocks = %Lu." + " Unmount and run xfs_repair.", + (unsigned long long)ip->i_ino, + (int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) + + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)), + (unsigned long long) + INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT)); + XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt dinode %Lu, forkoff = 0x%x." + " Unmount and run xfs_repair.", + (unsigned long long)ip->i_ino, + (int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT))); + XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + switch (ip->i_d.di_mode & IFMT) { + case IFIFO: + case IFCHR: + case IFBLK: + case IFSOCK: + if (unlikely(INT_GET(dip->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_DEV)) { + XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + ip->i_d.di_size = 0; + ip->i_df.if_u2.if_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT); + break; + + case IFREG: + case IFLNK: + case IFDIR: + switch (INT_GET(dip->di_core.di_format, ARCH_CONVERT)) { + case XFS_DINODE_FMT_LOCAL: + /* + * no local regular files yet + */ + if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & IFMT) == IFREG)) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode (local format for regular file) %Lu. Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino); + XFS_CORRUPTION_ERROR("xfs_iformat(4)", + XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT); + if (unlikely(di_size > + XFS_DFORK_DSIZE_ARCH(dip, ip->i_mount, ARCH_CONVERT))) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode %Lu (bad size %Ld for local inode). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino, + (long long) di_size); + XFS_CORRUPTION_ERROR("xfs_iformat(5)", + XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + size = (int)di_size; + error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); + break; + case XFS_DINODE_FMT_EXTENTS: + error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); + break; + case XFS_DINODE_FMT_BTREE: + error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); + break; + default: + XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + break; + + default: + XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + if (error) { + return error; + } + if (!XFS_DFORK_Q_ARCH(dip, ARCH_CONVERT)) + return 0; + ASSERT(ip->i_afp == NULL); + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); + ip->i_afp->if_ext_max = + XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) { + case XFS_DINODE_FMT_LOCAL: + atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR_ARCH(dip, ARCH_CONVERT); + size = (int)INT_GET(atp->hdr.totsize, ARCH_CONVERT); + error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); + break; + case XFS_DINODE_FMT_EXTENTS: + error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); + break; + case XFS_DINODE_FMT_BTREE: + error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); + break; + default: + error = XFS_ERROR(EFSCORRUPTED); + break; + } + if (error) { + kmem_zone_free(xfs_ifork_zone, ip->i_afp); + ip->i_afp = NULL; + xfs_idestroy_fork(ip, XFS_DATA_FORK); + } + return error; +} + +/* + * The file is in-lined in the on-disk inode. + * If it fits into if_inline_data, then copy + * it there, otherwise allocate a buffer for it + * and copy the data there. Either way, set + * if_data to point at the data. + * If we allocate a buffer for the data, make + * sure that its size is a multiple of 4 and + * record the real size in i_real_bytes. + */ +STATIC int +xfs_iformat_local( + xfs_inode_t *ip, + xfs_dinode_t *dip, + int whichfork, + int size) +{ + xfs_ifork_t *ifp; + int real_size; + + /* + * If the size is unreasonable, then something + * is wrong and we just bail out rather than crash in + * kmem_alloc() or memcpy() below. + */ + if (unlikely(size > XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT))) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode %Lu (bad size %d for local fork, size = %d). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino, size, + XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT)); + XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + ifp = XFS_IFORK_PTR(ip, whichfork); + real_size = 0; + if (size == 0) + ifp->if_u1.if_data = NULL; + else if (size <= sizeof(ifp->if_u2.if_inline_data)) + ifp->if_u1.if_data = ifp->if_u2.if_inline_data; + else { + real_size = roundup(size, 4); + ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); + } + ifp->if_bytes = size; + ifp->if_real_bytes = real_size; + if (size) + memcpy(ifp->if_u1.if_data, + XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT), size); + ifp->if_flags &= ~XFS_IFEXTENTS; + ifp->if_flags |= XFS_IFINLINE; + return 0; +} + +/* + * The file consists of a set of extents all + * of which fit into the on-disk inode. + * If there are few enough extents to fit into + * the if_inline_ext, then copy them there. + * Otherwise allocate a buffer for them and copy + * them into it. Either way, set if_extents + * to point at the extents. + */ +STATIC int +xfs_iformat_extents( + xfs_inode_t *ip, + xfs_dinode_t *dip, + int whichfork) +{ + xfs_bmbt_rec_t *ep, *dp; + xfs_ifork_t *ifp; + int nex; + int real_size; + int size; + int i; + + ifp = XFS_IFORK_PTR(ip, whichfork); + nex = XFS_DFORK_NEXTENTS_ARCH(dip, whichfork, ARCH_CONVERT); + size = nex * (uint)sizeof(xfs_bmbt_rec_t); + + /* + * If the number of extents is unreasonable, then something + * is wrong and we just bail out rather than crash in + * kmem_alloc() or memcpy() below. + */ + if (unlikely(size < 0 || size > XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT))) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode %Lu ((a)extents = %d). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino, nex); + XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + real_size = 0; + if (nex == 0) + ifp->if_u1.if_extents = NULL; + else if (nex <= XFS_INLINE_EXTS) + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; + else { + ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); + ASSERT(ifp->if_u1.if_extents != NULL); + real_size = size; + } + ifp->if_bytes = size; + ifp->if_real_bytes = real_size; + if (size) { + dp = (xfs_bmbt_rec_t *) + XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT); + xfs_validate_extents(dp, nex, 1, XFS_EXTFMT_INODE(ip)); + ep = ifp->if_u1.if_extents; + for (i = 0; i < nex; i++, ep++, dp++) { + ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0), + ARCH_CONVERT); + ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1), + ARCH_CONVERT); + } + xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex, + whichfork); + if (whichfork != XFS_DATA_FORK || + XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) + if (unlikely(xfs_check_nostate_extents( + ifp->if_u1.if_extents, nex))) { + XFS_ERROR_REPORT("xfs_iformat_extents(2)", + XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + } + ifp->if_flags |= XFS_IFEXTENTS; + return 0; +} + +/* + * The file has too many extents to fit into + * the inode, so they are in B-tree format. + * Allocate a buffer for the root of the B-tree + * and copy the root into it. The i_extents + * field will remain NULL until all of the + * extents are read in (when they are needed). + */ +STATIC int +xfs_iformat_btree( + xfs_inode_t *ip, + xfs_dinode_t *dip, + int whichfork) +{ + xfs_bmdr_block_t *dfp; + xfs_ifork_t *ifp; + /* REFERENCED */ + int nrecs; + int size; + + ifp = XFS_IFORK_PTR(ip, whichfork); + dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT); + size = XFS_BMAP_BROOT_SPACE(dfp); + nrecs = XFS_BMAP_BROOT_NUMRECS(dfp); + + /* + * blow out if -- fork has less extents than can fit in + * fork (fork shouldn't be a btree format), root btree + * block has more records than can fit into the fork, + * or the number of extents is greater than the number of + * blocks. + */ + if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max + || XFS_BMDR_SPACE_CALC(nrecs) > + XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT) + || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode %Lu (btree). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino); + XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + + ifp->if_broot_bytes = size; + ifp->if_broot = kmem_alloc(size, KM_SLEEP); + ASSERT(ifp->if_broot != NULL); + /* + * Copy and convert from the on-disk structure + * to the in-memory structure. + */ + xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT), + ifp->if_broot, size); + ifp->if_flags &= ~XFS_IFEXTENTS; + ifp->if_flags |= XFS_IFBROOT; + + return 0; +} + +/* + * xfs_xlate_dinode_core - translate an xfs_inode_core_t between ondisk + * and native format + * + * buf = on-disk representation + * dip = native representation + * dir = direction - +ve -> disk to native + * -ve -> native to disk + * arch = on-disk architecture + */ +void +xfs_xlate_dinode_core( + xfs_caddr_t buf, + xfs_dinode_core_t *dip, + int dir, + xfs_arch_t arch) +{ + xfs_dinode_core_t *buf_core = (xfs_dinode_core_t *)buf; + xfs_dinode_core_t *mem_core = (xfs_dinode_core_t *)dip; + + ASSERT(dir); + if (arch == ARCH_NOCONVERT) { + if (dir > 0) { + memcpy((xfs_caddr_t)mem_core, (xfs_caddr_t)buf_core, + sizeof(xfs_dinode_core_t)); + } else { + memcpy((xfs_caddr_t)buf_core, (xfs_caddr_t)mem_core, + sizeof(xfs_dinode_core_t)); + } + return; + } + + INT_XLATE(buf_core->di_magic, mem_core->di_magic, dir, arch); + INT_XLATE(buf_core->di_mode, mem_core->di_mode, dir, arch); + INT_XLATE(buf_core->di_version, mem_core->di_version, dir, arch); + INT_XLATE(buf_core->di_format, mem_core->di_format, dir, arch); + INT_XLATE(buf_core->di_onlink, mem_core->di_onlink, dir, arch); + INT_XLATE(buf_core->di_uid, mem_core->di_uid, dir, arch); + INT_XLATE(buf_core->di_gid, mem_core->di_gid, dir, arch); + INT_XLATE(buf_core->di_nlink, mem_core->di_nlink, dir, arch); + INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch); + + if (dir > 0) { + memcpy(mem_core->di_pad, buf_core->di_pad, + sizeof(buf_core->di_pad)); + } else { + memcpy(buf_core->di_pad, mem_core->di_pad, + sizeof(buf_core->di_pad)); + } + + INT_XLATE(buf_core->di_atime.t_sec, mem_core->di_atime.t_sec, + dir, arch); + INT_XLATE(buf_core->di_atime.t_nsec, mem_core->di_atime.t_nsec, + dir, arch); + INT_XLATE(buf_core->di_mtime.t_sec, mem_core->di_mtime.t_sec, + dir, arch); + INT_XLATE(buf_core->di_mtime.t_nsec, mem_core->di_mtime.t_nsec, + dir, arch); + INT_XLATE(buf_core->di_ctime.t_sec, mem_core->di_ctime.t_sec, + dir, arch); + INT_XLATE(buf_core->di_ctime.t_nsec, mem_core->di_ctime.t_nsec, + dir, arch); + INT_XLATE(buf_core->di_size, mem_core->di_size, dir, arch); + INT_XLATE(buf_core->di_nblocks, mem_core->di_nblocks, dir, arch); + INT_XLATE(buf_core->di_extsize, mem_core->di_extsize, dir, arch); + INT_XLATE(buf_core->di_nextents, mem_core->di_nextents, dir, arch); + INT_XLATE(buf_core->di_anextents, mem_core->di_anextents, dir, arch); + INT_XLATE(buf_core->di_forkoff, mem_core->di_forkoff, dir, arch); + INT_XLATE(buf_core->di_aformat, mem_core->di_aformat, dir, arch); + INT_XLATE(buf_core->di_dmevmask, mem_core->di_dmevmask, dir, arch); + INT_XLATE(buf_core->di_dmstate, mem_core->di_dmstate, dir, arch); + INT_XLATE(buf_core->di_flags, mem_core->di_flags, dir, arch); + INT_XLATE(buf_core->di_gen, mem_core->di_gen, dir, arch); +} + +/* + * Given a mount structure and an inode number, return a pointer + * to a newly allocated in-core inode coresponding to the given + * inode number. + * + * Initialize the inode's attributes and extent pointers if it + * already has them (it will not if the inode has no links). + */ +int +xfs_iread( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + xfs_inode_t **ipp, + xfs_daddr_t bno) +{ + xfs_buf_t *bp; + xfs_dinode_t *dip; + xfs_inode_t *ip; + int error; + + ASSERT(xfs_inode_zone != NULL); + + ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP); + ip->i_ino = ino; + ip->i_mount = mp; + + /* + * Get pointer's to the on-disk inode and the buffer containing it. + * If the inode number refers to a block outside the file system + * then xfs_itobp() will return NULL. In this case we should + * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will + * know that this is a new incore inode. + */ + error = xfs_itobp(mp, tp, ip, &dip, &bp, bno); + + if (error != 0) { + kmem_zone_free(xfs_inode_zone, ip); + return error; + } + + /* + * Initialize inode's trace buffers. + * Do this before xfs_iformat in case it adds entries. + */ +#ifdef XFS_BMAP_TRACE + ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_BMBT_TRACE + ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_RW_TRACE + ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_STRAT_TRACE + ip->i_strat_trace = ktrace_alloc(XFS_STRAT_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_ILOCK_TRACE + ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_DIR2_TRACE + ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP); +#endif + + /* + * If we got something that isn't an inode it means someone + * (nfs or dmi) has a stale handle. + */ + if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) { + kmem_zone_free(xfs_inode_zone, ip); + xfs_trans_brelse(tp, bp); +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " + "dip->di_core.di_magic (0x%x) != " + "XFS_DINODE_MAGIC (0x%x)", + INT_GET(dip->di_core.di_magic, ARCH_CONVERT), + XFS_DINODE_MAGIC); +#endif /* DEBUG */ + return XFS_ERROR(EINVAL); + } + + /* + * If the on-disk inode is already linked to a directory + * entry, copy all of the inode into the in-core inode. + * xfs_iformat() handles copying in the inode format + * specific information. + * Otherwise, just get the truly permanent information. + */ + if (!INT_ISZERO(dip->di_core.di_mode, ARCH_CONVERT)) { + xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core, + &(ip->i_d), 1, ARCH_CONVERT); + error = xfs_iformat(ip, dip); + if (error) { + kmem_zone_free(xfs_inode_zone, ip); + xfs_trans_brelse(tp, bp); +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " + "xfs_iformat() returned error %d", + error); +#endif /* DEBUG */ + return error; + } + } else { + ip->i_d.di_magic = INT_GET(dip->di_core.di_magic, ARCH_CONVERT); + ip->i_d.di_version = INT_GET(dip->di_core.di_version, ARCH_CONVERT); + ip->i_d.di_gen = INT_GET(dip->di_core.di_gen, ARCH_CONVERT); + /* + * Make sure to pull in the mode here as well in + * case the inode is released without being used. + * This ensures that xfs_inactive() will see that + * the inode is already free and not try to mess + * with the uninitialized part of it. + */ + ip->i_d.di_mode = 0; + /* + * Initialize the per-fork minima and maxima for a new + * inode here. xfs_iformat will do it for old inodes. + */ + ip->i_df.if_ext_max = + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + } + + INIT_LIST_HEAD(&ip->i_reclaim); + + /* + * The inode format changed when we moved the link count and + * made it 32 bits long. If this is an old format inode, + * convert it in memory to look like a new one. If it gets + * flushed to disk we will convert back before flushing or + * logging it. We zero out the new projid field and the old link + * count field. We'll handle clearing the pad field (the remains + * of the old uuid field) when we actually convert the inode to + * the new format. We don't change the version number so that we + * can distinguish this from a real new format inode. + */ + if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { + ip->i_d.di_nlink = ip->i_d.di_onlink; + ip->i_d.di_onlink = 0; + ip->i_d.di_projid = 0; + } + + ip->i_delayed_blks = 0; + + /* + * Mark the buffer containing the inode as something to keep + * around for a while. This helps to keep recently accessed + * meta-data in-core longer. + */ + XFS_BUF_SET_REF(bp, XFS_INO_REF); + + /* + * Use xfs_trans_brelse() to release the buffer containing the + * on-disk inode, because it was acquired with xfs_trans_read_buf() + * in xfs_itobp() above. If tp is NULL, this is just a normal + * brelse(). If we're within a transaction, then xfs_trans_brelse() + * will only release the buffer if it is not dirty within the + * transaction. It will be OK to release the buffer in this case, + * because inodes on disk are never destroyed and we will be + * locking the new in-core inode before putting it in the hash + * table where other processes can find it. Thus we don't have + * to worry about the inode being changed just because we released + * the buffer. + */ + xfs_trans_brelse(tp, bp); + *ipp = ip; + return 0; +} + +/* + * Read in extents from a btree-format inode. + * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. + */ +int +xfs_iread_extents( + xfs_trans_t *tp, + xfs_inode_t *ip, + int whichfork) +{ + int error; + xfs_ifork_t *ifp; + size_t size; + + if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { + XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + size = XFS_IFORK_NEXTENTS(ip, whichfork) * (uint)sizeof(xfs_bmbt_rec_t); + ifp = XFS_IFORK_PTR(ip, whichfork); + /* + * We know that the size is legal (it's checked in iformat_btree) + */ + ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); + ASSERT(ifp->if_u1.if_extents != NULL); + ifp->if_lastex = NULLEXTNUM; + ifp->if_bytes = ifp->if_real_bytes = (int)size; + ifp->if_flags |= XFS_IFEXTENTS; + error = xfs_bmap_read_extents(tp, ip, whichfork); + if (error) { + kmem_free(ifp->if_u1.if_extents, size); + ifp->if_u1.if_extents = NULL; + ifp->if_bytes = ifp->if_real_bytes = 0; + ifp->if_flags &= ~XFS_IFEXTENTS; + return error; + } + xfs_validate_extents((xfs_bmbt_rec_t *)ifp->if_u1.if_extents, + XFS_IFORK_NEXTENTS(ip, whichfork), 0, XFS_EXTFMT_INODE(ip)); + return 0; +} + +/* + * Allocate an inode on disk and return a copy of its in-core version. + * The in-core inode is locked exclusively. Set mode, nlink, and rdev + * appropriately within the inode. The uid and gid for the inode are + * set according to the contents of the given cred structure. + * + * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() + * has a free inode available, call xfs_iget() + * to obtain the in-core version of the allocated inode. Finally, + * fill in the inode and log its initial contents. In this case, + * ialloc_context would be set to NULL and call_again set to false. + * + * If xfs_dialloc() does not have an available inode, + * it will replenish its supply by doing an allocation. Since we can + * only do one allocation within a transaction without deadlocks, we + * must commit the current transaction before returning the inode itself. + * In this case, therefore, we will set call_again to true and return. + * The caller should then commit the current transaction, start a new + * transaction, and call xfs_ialloc() again to actually get the inode. + * + * To ensure that some other process does not grab the inode that + * was allocated during the first call to xfs_ialloc(), this routine + * also returns the [locked] bp pointing to the head of the freelist + * as ialloc_context. The caller should hold this buffer across + * the commit and pass it back into this routine on the second call. + */ +int +xfs_ialloc( + xfs_trans_t *tp, + xfs_inode_t *pip, + mode_t mode, + nlink_t nlink, + xfs_dev_t rdev, + cred_t *cr, + xfs_prid_t prid, + int okalloc, + xfs_buf_t **ialloc_context, + boolean_t *call_again, + xfs_inode_t **ipp) +{ + xfs_ino_t ino; + xfs_inode_t *ip; + vnode_t *vp; + uint flags; + int error; + + /* + * Call the space management code to pick + * the on-disk inode to be allocated. + */ + ASSERT(pip != NULL); + error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, + ialloc_context, call_again, &ino); + if (error != 0) { + return error; + } + if (*call_again || ino == NULLFSINO) { + *ipp = NULL; + return 0; + } + ASSERT(*ialloc_context == NULL); + + /* + * Get the in-core inode with the lock held exclusively. + * This is because we're setting fields here we need + * to prevent others from looking at until we're done. + */ + error = xfs_trans_iget(tp->t_mountp, tp, ino, XFS_ILOCK_EXCL, &ip); + if (error != 0) { + return error; + } + ASSERT(ip != NULL); + + vp = XFS_ITOV(ip); + vp->v_type = IFTOVT(mode); + ip->i_d.di_mode = (__uint16_t)mode; + ip->i_d.di_onlink = 0; + ip->i_d.di_nlink = nlink; + ASSERT(ip->i_d.di_nlink == nlink); + ip->i_d.di_uid = current->fsuid; + ip->i_d.di_gid = current->fsgid; + ip->i_d.di_projid = prid; + memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); + + /* + * If the superblock version is up to where we support new format + * inodes and this is currently an old format inode, then change + * the inode version number now. This way we only do the conversion + * here rather than here and in the flush/logging code. + */ + if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) && + ip->i_d.di_version == XFS_DINODE_VERSION_1) { + ip->i_d.di_version = XFS_DINODE_VERSION_2; + /* + * We've already zeroed the old link count, the projid field, + * and the pad field. + */ + } + + /* + * Project ids won't be stored on disk if we are using a version 1 inode. + */ + if ( (prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1)) + xfs_bump_ino_vers2(tp, ip); + + if (XFS_INHERIT_GID(pip, vp->v_vfsp)) { + ip->i_d.di_gid = pip->i_d.di_gid; + if ((pip->i_d.di_mode & ISGID) && (mode & IFMT) == IFDIR) { + ip->i_d.di_mode |= ISGID; + } + } + + /* + * If the group ID of the new file does not match the effective group + * ID or one of the supplementary group IDs, the ISGID bit is cleared + * (and only if the irix_sgid_inherit compatibility variable is set). + */ + if ((irix_sgid_inherit) && + (ip->i_d.di_mode & ISGID) && + (!in_group_p((gid_t)ip->i_d.di_gid))) { + ip->i_d.di_mode &= ~ISGID; + } + + ip->i_d.di_size = 0; + ip->i_d.di_nextents = 0; + ASSERT(ip->i_d.di_nblocks == 0); + xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD); + /* + * di_gen will have been taken care of in xfs_iread. + */ + ip->i_d.di_extsize = 0; + ip->i_d.di_dmevmask = 0; + ip->i_d.di_dmstate = 0; + ip->i_d.di_flags = 0; + flags = XFS_ILOG_CORE; + switch (mode & IFMT) { + case IFIFO: + case IFCHR: + case IFBLK: + case IFSOCK: + ip->i_d.di_format = XFS_DINODE_FMT_DEV; + ip->i_df.if_u2.if_rdev = rdev; + ip->i_df.if_flags = 0; + flags |= XFS_ILOG_DEV; + break; + case IFREG: + case IFDIR: + case IFLNK: + ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; + ip->i_df.if_flags = XFS_IFEXTENTS; + ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; + ip->i_df.if_u1.if_extents = NULL; + break; + default: + ASSERT(0); + } + /* + * Attribute fork settings for new inode. + */ + ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; + ip->i_d.di_anextents = 0; + + /* + * Log the new values stuffed into the inode. + */ + xfs_trans_log_inode(tp, ip, flags); + + /* now that we have a v_type we can set Linux inode ops (& unlock) */ + VFS_INIT_VNODE(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1); + + *ipp = ip; + return 0; +} + +/* + * Check to make sure that there are no blocks allocated to the + * file beyond the size of the file. We don't check this for + * files with fixed size extents or real time extents, but we + * at least do it for regular files. + */ +#ifdef DEBUG +void +xfs_isize_check( + xfs_mount_t *mp, + xfs_inode_t *ip, + xfs_fsize_t isize) +{ + xfs_fileoff_t map_first; + int nimaps; + xfs_bmbt_irec_t imaps[2]; + + if ((ip->i_d.di_mode & IFMT) != IFREG) + return; + + if ( ip->i_d.di_flags & XFS_DIFLAG_REALTIME ) + return; + + nimaps = 2; + map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); + /* + * The filesystem could be shutting down, so bmapi may return + * an error. + */ + if (xfs_bmapi(NULL, ip, map_first, + (XFS_B_TO_FSB(mp, + (xfs_ufsize_t)XFS_MAX_FILE_OFFSET) - + map_first), + XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, + NULL)) + return; + ASSERT(nimaps == 1); + ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); +} +#endif /* DEBUG */ + +/* + * Calculate the last possible buffered byte in a file. This must + * include data that was buffered beyond the EOF by the write code. + * This also needs to deal with overflowing the xfs_fsize_t type + * which can happen for sizes near the limit. + * + * We also need to take into account any blocks beyond the EOF. It + * may be the case that they were buffered by a write which failed. + * In that case the pages will still be in memory, but the inode size + * will never have been updated. + */ +xfs_fsize_t +xfs_file_last_byte( + xfs_inode_t *ip) +{ + xfs_mount_t *mp; + xfs_fsize_t last_byte; + xfs_fileoff_t last_block; + xfs_fileoff_t size_last_block; + int error; + + ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS)); + + mp = ip->i_mount; + /* + * Only check for blocks beyond the EOF if the extents have + * been read in. This eliminates the need for the inode lock, + * and it also saves us from looking when it really isn't + * necessary. + */ + if (ip->i_df.if_flags & XFS_IFEXTENTS) { + error = xfs_bmap_last_offset(NULL, ip, &last_block, + XFS_DATA_FORK); + if (error) { + last_block = 0; + } + } else { + last_block = 0; + } + size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_d.di_size); + last_block = XFS_FILEOFF_MAX(last_block, size_last_block); + + last_byte = XFS_FSB_TO_B(mp, last_block); + if (last_byte < 0) { + return XFS_MAX_FILE_OFFSET; + } + last_byte += (1 << mp->m_writeio_log); + if (last_byte < 0) { + return XFS_MAX_FILE_OFFSET; + } + return last_byte; +} + +#if defined(XFS_RW_TRACE) +STATIC void +xfs_itrunc_trace( + int tag, + xfs_inode_t *ip, + int flag, + xfs_fsize_t new_size, + xfs_off_t toss_start, + xfs_off_t toss_finish) +{ + if (ip->i_rwtrace == NULL) { + return; + } + + ktrace_enter(ip->i_rwtrace, + (void*)((long)tag), + (void*)ip, + (void*)((ip->i_d.di_size >> 32) & 0xffffffff), + (void*)(ip->i_d.di_size & 0xffffffff), + (void*)((long)flag), + (void*)((new_size >> 32) & 0xffffffff), + (void*)(new_size & 0xffffffff), + (void*)((toss_start >> 32) & 0xffffffff), + (void*)(toss_start & 0xffffffff), + (void*)((toss_finish >> 32) & 0xffffffff), + (void*)(toss_finish & 0xffffffff), + (void*)((long)private.p_cpuid), + (void*)0, + (void*)0, + (void*)0, + (void*)0); +} +#else +#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) +#endif + +/* + * Start the truncation of the file to new_size. The new size + * must be smaller than the current size. This routine will + * clear the buffer and page caches of file data in the removed + * range, and xfs_itruncate_finish() will remove the underlying + * disk blocks. + * + * The inode must have its I/O lock locked EXCLUSIVELY, and it + * must NOT have the inode lock held at all. This is because we're + * calling into the buffer/page cache code and we can't hold the + * inode lock when we do so. + * + * The flags parameter can have either the value XFS_ITRUNC_DEFINITE + * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used + * in the case that the caller is locking things out of order and + * may not be able to call xfs_itruncate_finish() with the inode lock + * held without dropping the I/O lock. If the caller must drop the + * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() + * must be called again with all the same restrictions as the initial + * call. + */ +void +xfs_itruncate_start( + xfs_inode_t *ip, + uint flags, + xfs_fsize_t new_size) +{ + xfs_fsize_t last_byte; + xfs_off_t toss_start; + xfs_mount_t *mp; + vnode_t *vp; + + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); + ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size)); + ASSERT((flags == XFS_ITRUNC_DEFINITE) || + (flags == XFS_ITRUNC_MAYBE)); + + mp = ip->i_mount; + vp = XFS_ITOV(ip); + /* + * Call VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES() to get rid of pages and buffers + * overlapping the region being removed. We have to use + * the less efficient VOP_FLUSHINVAL_PAGES() in the case that the + * caller may not be able to finish the truncate without + * dropping the inode's I/O lock. Make sure + * to catch any pages brought in by buffers overlapping + * the EOF by searching out beyond the isize by our + * block size. We round new_size up to a block boundary + * so that we don't toss things on the same block as + * new_size but before it. + * + * Before calling VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES(), make sure to + * call remapf() over the same region if the file is mapped. + * This frees up mapped file references to the pages in the + * given range and for the VOP_FLUSHINVAL_PAGES() case it ensures + * that we get the latest mapped changes flushed out. + */ + toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); + toss_start = XFS_FSB_TO_B(mp, toss_start); + if (toss_start < 0) { + /* + * The place to start tossing is beyond our maximum + * file size, so there is no way that the data extended + * out there. + */ + return; + } + last_byte = xfs_file_last_byte(ip); + xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, + last_byte); + if (last_byte > toss_start) { + if (flags & XFS_ITRUNC_DEFINITE) { + VOP_TOSS_PAGES(vp, toss_start, -1, FI_REMAPF_LOCKED); + } else { + VOP_FLUSHINVAL_PAGES(vp, toss_start, -1, FI_REMAPF_LOCKED); + } + } + +#ifdef DEBUG + if (new_size == 0) { + ASSERT(VN_CACHED(vp) == 0); + } +#endif +} + +/* + * Shrink the file to the given new_size. The new + * size must be smaller than the current size. + * This will free up the underlying blocks + * in the removed range after a call to xfs_itruncate_start() + * or xfs_atruncate_start(). + * + * The transaction passed to this routine must have made + * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES. + * This routine may commit the given transaction and + * start new ones, so make sure everything involved in + * the transaction is tidy before calling here. + * Some transaction will be returned to the caller to be + * committed. The incoming transaction must already include + * the inode, and both inode locks must be held exclusively. + * The inode must also be "held" within the transaction. On + * return the inode will be "held" within the returned transaction. + * This routine does NOT require any disk space to be reserved + * for it within the transaction. + * + * The fork parameter must be either xfs_attr_fork or xfs_data_fork, + * and it indicates the fork which is to be truncated. For the + * attribute fork we only support truncation to size 0. + * + * We use the sync parameter to indicate whether or not the first + * transaction we perform might have to be synchronous. For the attr fork, + * it needs to be so if the unlink of the inode is not yet known to be + * permanent in the log. This keeps us from freeing and reusing the + * blocks of the attribute fork before the unlink of the inode becomes + * permanent. + * + * For the data fork, we normally have to run synchronously if we're + * being called out of the inactive path or we're being called + * out of the create path where we're truncating an existing file. + * Either way, the truncate needs to be sync so blocks don't reappear + * in the file with altered data in case of a crash. wsync filesystems + * can run the first case async because anything that shrinks the inode + * has to run sync so by the time we're called here from inactive, the + * inode size is permanently set to 0. + * + * Calls from the truncate path always need to be sync unless we're + * in a wsync filesystem and the file has already been unlinked. + * + * The caller is responsible for correctly setting the sync parameter. + * It gets too hard for us to guess here which path we're being called + * out of just based on inode state. + */ +int +xfs_itruncate_finish( + xfs_trans_t **tp, + xfs_inode_t *ip, + xfs_fsize_t new_size, + int fork, + int sync) +{ + xfs_fsblock_t first_block; + xfs_fileoff_t first_unmap_block; + xfs_fileoff_t last_block; + xfs_filblks_t unmap_len=0; + xfs_mount_t *mp; + xfs_trans_t *ntp; + int done; + int committed; + xfs_bmap_free_t free_list; + int error; + + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); + ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size)); + ASSERT(*tp != NULL); + ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); + ASSERT(ip->i_transp == *tp); + ASSERT(ip->i_itemp != NULL); + ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); + + + ntp = *tp; + mp = (ntp)->t_mountp; + ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); + + /* + * We only support truncating the entire attribute fork. + */ + if (fork == XFS_ATTR_FORK) { + new_size = 0LL; + } + first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); + xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); + /* + * The first thing we do is set the size to new_size permanently + * on disk. This way we don't have to worry about anyone ever + * being able to look at the data being freed even in the face + * of a crash. What we're getting around here is the case where + * we free a block, it is allocated to another file, it is written + * to, and then we crash. If the new data gets written to the + * file but the log buffers containing the free and reallocation + * don't, then we'd end up with garbage in the blocks being freed. + * As long as we make the new_size permanent before actually + * freeing any blocks it doesn't matter if they get writtten to. + * + * The callers must signal into us whether or not the size + * setting here must be synchronous. There are a few cases + * where it doesn't have to be synchronous. Those cases + * occur if the file is unlinked and we know the unlink is + * permanent or if the blocks being truncated are guaranteed + * to be beyond the inode eof (regardless of the link count) + * and the eof value is permanent. Both of these cases occur + * only on wsync-mounted filesystems. In those cases, we're + * guaranteed that no user will ever see the data in the blocks + * that are being truncated so the truncate can run async. + * In the free beyond eof case, the file may wind up with + * more blocks allocated to it than it needs if we crash + * and that won't get fixed until the next time the file + * is re-opened and closed but that's ok as that shouldn't + * be too many blocks. + * + * However, we can't just make all wsync xactions run async + * because there's one call out of the create path that needs + * to run sync where it's truncating an existing file to size + * 0 whose size is > 0. + * + * It's probably possible to come up with a test in this + * routine that would correctly distinguish all the above + * cases from the values of the function parameters and the + * inode state but for sanity's sake, I've decided to let the + * layers above just tell us. It's simpler to correctly figure + * out in the layer above exactly under what conditions we + * can run async and I think it's easier for others read and + * follow the logic in case something has to be changed. + * cscope is your friend -- rcc. + * + * The attribute fork is much simpler. + * + * For the attribute fork we allow the caller to tell us whether + * the unlink of the inode that led to this call is yet permanent + * in the on disk log. If it is not and we will be freeing extents + * in this inode then we make the first transaction synchronous + * to make sure that the unlink is permanent by the time we free + * the blocks. + */ + if (fork == XFS_DATA_FORK) { + if (ip->i_d.di_nextents > 0) { + ip->i_d.di_size = new_size; + xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); + } + } else if (sync) { + ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); + if (ip->i_d.di_anextents > 0) + xfs_trans_set_sync(ntp); + } + ASSERT(fork == XFS_DATA_FORK || + (fork == XFS_ATTR_FORK && + ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || + (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); + + /* + * Since it is possible for space to become allocated beyond + * the end of the file (in a crash where the space is allocated + * but the inode size is not yet updated), simply remove any + * blocks which show up between the new EOF and the maximum + * possible file size. If the first block to be removed is + * beyond the maximum file size (ie it is the same as last_block), + * then there is nothing to do. + */ + last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAX_FILE_OFFSET); + ASSERT(first_unmap_block <= last_block); + done = 0; + if (last_block == first_unmap_block) { + done = 1; + } else { + unmap_len = last_block - first_unmap_block + 1; + } + while (!done) { + /* + * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() + * will tell us whether it freed the entire range or + * not. If this is a synchronous mount (wsync), + * then we can tell bunmapi to keep all the + * transactions asynchronous since the unlink + * transaction that made this inode inactive has + * already hit the disk. There's no danger of + * the freed blocks being reused, there being a + * crash, and the reused blocks suddenly reappearing + * in this file with garbage in them once recovery + * runs. + */ + XFS_BMAP_INIT(&free_list, &first_block); + error = xfs_bunmapi(ntp, ip, first_unmap_block, + unmap_len, + XFS_BMAPI_AFLAG(fork) | + (sync ? 0 : XFS_BMAPI_ASYNC), + XFS_ITRUNC_MAX_EXTENTS, + &first_block, &free_list, &done); + if (error) { + /* + * If the bunmapi call encounters an error, + * return to the caller where the transaction + * can be properly aborted. We just need to + * make sure we're not holding any resources + * that we were not when we came in. + */ + xfs_bmap_cancel(&free_list); + return error; + } + + /* + * Duplicate the transaction that has the permanent + * reservation and commit the old transaction. + */ + error = xfs_bmap_finish(tp, &free_list, first_block, + &committed); + ntp = *tp; + if (error) { + /* + * If the bmap finish call encounters an error, + * return to the caller where the transaction + * can be properly aborted. We just need to + * make sure we're not holding any resources + * that we were not when we came in. + * + * Aborting from this point might lose some + * blocks in the file system, but oh well. + */ + xfs_bmap_cancel(&free_list); + if (committed) { + /* + * If the passed in transaction committed + * in xfs_bmap_finish(), then we want to + * add the inode to this one before returning. + * This keeps things simple for the higher + * level code, because it always knows that + * the inode is locked and held in the + * transaction that returns to it whether + * errors occur or not. We don't mark the + * inode dirty so that this transaction can + * be easily aborted if possible. + */ + xfs_trans_ijoin(ntp, ip, + XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(ntp, ip); + } + return error; + } + + if (committed) { + /* + * The first xact was committed, + * so add the inode to the new one. + * Mark it dirty so it will be logged + * and moved forward in the log as + * part of every commit. + */ + xfs_trans_ijoin(ntp, ip, + XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(ntp, ip); + xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); + } + ntp = xfs_trans_dup(ntp); + (void) xfs_trans_commit(*tp, 0, NULL); + *tp = ntp; + error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + /* + * Add the inode being truncated to the next chained + * transaction. + */ + xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(ntp, ip); + if (error) + return (error); + } + /* + * Only update the size in the case of the data fork, but + * always re-log the inode so that our permanent transaction + * can keep on rolling it forward in the log. + */ + if (fork == XFS_DATA_FORK) { + xfs_isize_check(mp, ip, new_size); + ip->i_d.di_size = new_size; + } + xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); + ASSERT((new_size != 0) || + (fork == XFS_ATTR_FORK) || + (ip->i_delayed_blks == 0)); + ASSERT((new_size != 0) || + (fork == XFS_ATTR_FORK) || + (ip->i_d.di_nextents == 0)); + xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); + return 0; +} + + +/* + * xfs_igrow_start + * + * Do the first part of growing a file: zero any data in the last + * block that is beyond the old EOF. We need to do this before + * the inode is joined to the transaction to modify the i_size. + * That way we can drop the inode lock and call into the buffer + * cache to get the buffer mapping the EOF. + */ +int +xfs_igrow_start( + xfs_inode_t *ip, + xfs_fsize_t new_size, + cred_t *credp) +{ + xfs_fsize_t isize; + int error; + + ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); + ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); + ASSERT(new_size > ip->i_d.di_size); + + error = 0; + isize = ip->i_d.di_size; + /* + * Zero any pages that may have been created by + * xfs_write_file() beyond the end of the file + * and any blocks between the old and new file sizes. + */ + error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, isize, + new_size); + return error; +} + +/* + * xfs_igrow_finish + * + * This routine is called to extend the size of a file. + * The inode must have both the iolock and the ilock locked + * for update and it must be a part of the current transaction. + * The xfs_igrow_start() function must have been called previously. + * If the change_flag is not zero, the inode change timestamp will + * be updated. + */ +void +xfs_igrow_finish( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_fsize_t new_size, + int change_flag) +{ + ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); + ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); + ASSERT(ip->i_transp == tp); + ASSERT(new_size > ip->i_d.di_size); + + /* + * Update the file size. Update the inode change timestamp + * if change_flag set. + */ + ip->i_d.di_size = new_size; + if (change_flag) + xfs_ichgtime(ip, XFS_ICHGTIME_CHG); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + +} + + +/* + * This is called when the inode's link count goes to 0. + * We place the on-disk inode on a list in the AGI. It + * will be pulled from this list when the inode is freed. + */ +int +xfs_iunlink( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + xfs_mount_t *mp; + xfs_agi_t *agi; + xfs_dinode_t *dip; + xfs_buf_t *agibp; + xfs_buf_t *ibp; + xfs_agnumber_t agno; + xfs_daddr_t agdaddr; + xfs_agino_t agino; + short bucket_index; + int offset; + int error; + int agi_ok; + + ASSERT(ip->i_d.di_nlink == 0); + ASSERT(ip->i_d.di_mode != 0); + ASSERT(ip->i_transp == tp); + + mp = tp->t_mountp; + + agno = XFS_INO_TO_AGNO(mp, ip->i_ino); + agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); + + /* + * Get the agi buffer first. It ensures lock ordering + * on the list. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, + XFS_FSS_TO_BB(mp, 1), 0, &agibp); + if (error) { + return error; + } + /* + * Validate the magic number of the agi block. + */ + agi = XFS_BUF_TO_AGI(agibp); + agi_ok = + INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && + XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK, + XFS_RANDOM_IUNLINK))) { + XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi); + xfs_trans_brelse(tp, agibp); + return XFS_ERROR(EFSCORRUPTED); + } + /* + * Get the index into the agi hash table for the + * list this inode will go on. + */ + agino = XFS_INO_TO_AGINO(mp, ip->i_ino); + ASSERT(agino != 0); + bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; + ASSERT(!INT_ISZERO(agi->agi_unlinked[bucket_index], ARCH_CONVERT)); + ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != agino); + + if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO) { + /* + * There is already another inode in the bucket we need + * to add ourselves to. Add us at the front of the list. + * Here we put the head pointer into our next pointer, + * and then we fall through to point the head at us. + */ + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + if (error) { + return error; + } + ASSERT(INT_GET(dip->di_next_unlinked, ARCH_CONVERT) == NULLAGINO); + ASSERT(!INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)); + /* both on-disk, don't endian flip twice */ + dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; + offset = ip->i_boffset + + offsetof(xfs_dinode_t, di_next_unlinked); + xfs_trans_inode_buf(tp, ibp); + xfs_trans_log_buf(tp, ibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + xfs_inobp_check(mp, ibp); + } + + /* + * Point the bucket head pointer at the inode being inserted. + */ + ASSERT(agino != 0); + INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, agino); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket_index); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + return 0; +} + +/* + * Pull the on-disk inode from the AGI unlinked list. + */ +STATIC int +xfs_iunlink_remove( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + xfs_ino_t next_ino; + xfs_mount_t *mp; + xfs_agi_t *agi; + xfs_dinode_t *dip; + xfs_buf_t *agibp; + xfs_buf_t *ibp; + xfs_agnumber_t agno; + xfs_daddr_t agdaddr; + xfs_agino_t agino; + xfs_agino_t next_agino; + xfs_buf_t *last_ibp; + xfs_dinode_t *last_dip; + short bucket_index; + int offset, last_offset; + int error; + int agi_ok; + + /* + * First pull the on-disk inode from the AGI unlinked list. + */ + mp = tp->t_mountp; + + agno = XFS_INO_TO_AGNO(mp, ip->i_ino); + agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); + + /* + * Get the agi buffer first. It ensures lock ordering + * on the list. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, + XFS_FSS_TO_BB(mp, 1), 0, &agibp); + if (error) { + cmn_err(CE_WARN, + "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + /* + * Validate the magic number of the agi block. + */ + agi = XFS_BUF_TO_AGI(agibp); + agi_ok = + INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && + XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE, + XFS_RANDOM_IUNLINK_REMOVE))) { + XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW, + mp, agi); + xfs_trans_brelse(tp, agibp); + cmn_err(CE_WARN, + "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.", + mp->m_fsname); + return XFS_ERROR(EFSCORRUPTED); + } + /* + * Get the index into the agi hash table for the + * list this inode will go on. + */ + agino = XFS_INO_TO_AGINO(mp, ip->i_ino); + ASSERT(agino != 0); + bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; + ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO); + ASSERT(!INT_ISZERO(agi->agi_unlinked[bucket_index], ARCH_CONVERT)); + + if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) == agino) { + /* + * We're at the head of the list. Get the inode's + * on-disk buffer to see if there is anyone after us + * on the list. Only modify our next pointer if it + * is not already NULLAGINO. This saves us the overhead + * of dealing with the buffer when there is no need to + * change it. + */ + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + if (error) { + cmn_err(CE_WARN, + "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT); + ASSERT(next_agino != 0); + if (next_agino != NULLAGINO) { + INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO); + offset = ip->i_boffset + + offsetof(xfs_dinode_t, di_next_unlinked); + xfs_trans_inode_buf(tp, ibp); + xfs_trans_log_buf(tp, ibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + xfs_inobp_check(mp, ibp); + } else { + xfs_trans_brelse(tp, ibp); + } + /* + * Point the bucket head pointer at the next inode. + */ + ASSERT(next_agino != 0); + ASSERT(next_agino != agino); + INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, next_agino); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket_index); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + } else { + /* + * We need to search the list for the inode being freed. + */ + next_agino = INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT); + last_ibp = NULL; + while (next_agino != agino) { + /* + * If the last inode wasn't the one pointing to + * us, then release its buffer since we're not + * going to do anything with it. + */ + if (last_ibp != NULL) { + xfs_trans_brelse(tp, last_ibp); + } + next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); + error = xfs_inotobp(mp, tp, next_ino, &last_dip, + &last_ibp, &last_offset); + if (error) { + cmn_err(CE_WARN, + "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + next_agino = INT_GET(last_dip->di_next_unlinked, ARCH_CONVERT); + ASSERT(next_agino != NULLAGINO); + ASSERT(next_agino != 0); + } + /* + * Now last_ibp points to the buffer previous to us on + * the unlinked list. Pull us from the list. + */ + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + if (error) { + cmn_err(CE_WARN, + "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT); + ASSERT(next_agino != 0); + ASSERT(next_agino != agino); + if (next_agino != NULLAGINO) { + INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO); + offset = ip->i_boffset + + offsetof(xfs_dinode_t, di_next_unlinked); + xfs_trans_inode_buf(tp, ibp); + xfs_trans_log_buf(tp, ibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + xfs_inobp_check(mp, ibp); + } else { + xfs_trans_brelse(tp, ibp); + } + /* + * Point the previous inode on the list to the next inode. + */ + INT_SET(last_dip->di_next_unlinked, ARCH_CONVERT, next_agino); + ASSERT(next_agino != 0); + offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); + xfs_trans_inode_buf(tp, last_ibp); + xfs_trans_log_buf(tp, last_ibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + xfs_inobp_check(mp, last_ibp); + } + return 0; +} + +/* + * This is called to return an inode to the inode free list. + * The inode should already be truncated to 0 length and have + * no pages associated with it. This routine also assumes that + * the inode is already a part of the transaction. + * + * The on-disk copy of the inode will have been added to the list + * of unlinked inodes in the AGI. We need to remove the inode from + * that list atomically with respect to freeing it here. + */ +int +xfs_ifree( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + int error; + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(ip->i_transp == tp); + ASSERT(ip->i_d.di_nlink == 0); + ASSERT(ip->i_d.di_nextents == 0); + ASSERT(ip->i_d.di_anextents == 0); + ASSERT((ip->i_d.di_size == 0) || + ((ip->i_d.di_mode & IFMT) != IFREG)); + ASSERT(ip->i_d.di_nblocks == 0); + + /* + * Pull the on-disk inode from the AGI unlinked list. + */ + error = xfs_iunlink_remove(tp, ip); + if (error != 0) { + return error; + } + + error = xfs_difree(tp, ip->i_ino); + if (error != 0) { + return error; + } + ip->i_d.di_mode = 0; /* mark incore inode as free */ + ip->i_d.di_flags = 0; + ip->i_d.di_dmevmask = 0; + ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ + ip->i_df.if_ext_max = + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; + ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; + + /* + * Bump the generation count so no one will be confused + * by reincarnations of this inode. + */ + ip->i_d.di_gen++; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return 0; +} + +/* + * Reallocate the space for if_broot based on the number of records + * being added or deleted as indicated in rec_diff. Move the records + * and pointers in if_broot to fit the new size. When shrinking this + * will eliminate holes between the records and pointers created by + * the caller. When growing this will create holes to be filled in + * by the caller. + * + * The caller must not request to add more records than would fit in + * the on-disk inode root. If the if_broot is currently NULL, then + * if we adding records one will be allocated. The caller must also + * not request that the number of records go below zero, although + * it can go to zero. + * + * ip -- the inode whose if_broot area is changing + * ext_diff -- the change in the number of records, positive or negative, + * requested for the if_broot array. + */ +void +xfs_iroot_realloc( + xfs_inode_t *ip, + int rec_diff, + int whichfork) +{ + int cur_max; + xfs_ifork_t *ifp; + xfs_bmbt_block_t *new_broot; + int new_max; + size_t new_size; + char *np; + char *op; + + /* + * Handle the degenerate case quietly. + */ + if (rec_diff == 0) { + return; + } + + ifp = XFS_IFORK_PTR(ip, whichfork); + if (rec_diff > 0) { + /* + * If there wasn't any memory allocated before, just + * allocate it now and get out. + */ + if (ifp->if_broot_bytes == 0) { + new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); + ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size, + KM_SLEEP); + ifp->if_broot_bytes = (int)new_size; + return; + } + + /* + * If there is already an existing if_broot, then we need + * to realloc() it and shift the pointers to their new + * location. The records don't change location because + * they are kept butted up against the btree block header. + */ + cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); + new_max = cur_max + rec_diff; + new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); + ifp->if_broot = (xfs_bmbt_block_t *) + kmem_realloc(ifp->if_broot, + new_size, + (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ + KM_SLEEP); + op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, + ifp->if_broot_bytes); + np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, + (int)new_size); + ifp->if_broot_bytes = (int)new_size; + ASSERT(ifp->if_broot_bytes <= + XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); + memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); + return; + } + + /* + * rec_diff is less than 0. In this case, we are shrinking the + * if_broot buffer. It must already exist. If we go to zero + * records, just get rid of the root and clear the status bit. + */ + ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); + cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); + new_max = cur_max + rec_diff; + ASSERT(new_max >= 0); + if (new_max > 0) + new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); + else + new_size = 0; + if (new_size > 0) { + new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP); + /* + * First copy over the btree block header. + */ + memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t)); + } else { + new_broot = NULL; + ifp->if_flags &= ~XFS_IFBROOT; + } + + /* + * Only copy the records and pointers if there are any. + */ + if (new_max > 0) { + /* + * First copy the records. + */ + op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1, + ifp->if_broot_bytes); + np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1, + (int)new_size); + memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); + + /* + * Then copy the pointers. + */ + op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, + ifp->if_broot_bytes); + np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1, + (int)new_size); + memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); + } + kmem_free(ifp->if_broot, ifp->if_broot_bytes); + ifp->if_broot = new_broot; + ifp->if_broot_bytes = (int)new_size; + ASSERT(ifp->if_broot_bytes <= + XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); + return; +} + + +/* + * This is called when the amount of space needed for if_extents + * is increased or decreased. The change in size is indicated by + * the number of extents that need to be added or deleted in the + * ext_diff parameter. + * + * If the amount of space needed has decreased below the size of the + * inline buffer, then switch to using the inline buffer. Otherwise, + * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer + * to what is needed. + * + * ip -- the inode whose if_extents area is changing + * ext_diff -- the change in the number of extents, positive or negative, + * requested for the if_extents array. + */ +void +xfs_iext_realloc( + xfs_inode_t *ip, + int ext_diff, + int whichfork) +{ + int byte_diff; + xfs_ifork_t *ifp; + int new_size; + uint rnew_size; + + if (ext_diff == 0) { + return; + } + + ifp = XFS_IFORK_PTR(ip, whichfork); + byte_diff = ext_diff * (uint)sizeof(xfs_bmbt_rec_t); + new_size = (int)ifp->if_bytes + byte_diff; + ASSERT(new_size >= 0); + + if (new_size == 0) { + if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { + ASSERT(ifp->if_real_bytes != 0); + kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); + } + ifp->if_u1.if_extents = NULL; + rnew_size = 0; + } else if (new_size <= sizeof(ifp->if_u2.if_inline_ext)) { + /* + * If the valid extents can fit in if_inline_ext, + * copy them from the malloc'd vector and free it. + */ + if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { + /* + * For now, empty files are format EXTENTS, + * so the if_extents pointer is null. + */ + if (ifp->if_u1.if_extents) { + memcpy(ifp->if_u2.if_inline_ext, + ifp->if_u1.if_extents, new_size); + kmem_free(ifp->if_u1.if_extents, + ifp->if_real_bytes); + } + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; + } + rnew_size = 0; + } else { + rnew_size = new_size; + if ((rnew_size & (rnew_size - 1)) != 0) + rnew_size = xfs_iroundup(rnew_size); + /* + * Stuck with malloc/realloc. + */ + if (ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_alloc(rnew_size, KM_SLEEP); + memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, + sizeof(ifp->if_u2.if_inline_ext)); + } else if (rnew_size != ifp->if_real_bytes) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_realloc(ifp->if_u1.if_extents, + rnew_size, + ifp->if_real_bytes, + KM_NOFS); + } + } + ifp->if_real_bytes = rnew_size; + ifp->if_bytes = new_size; +} + + +/* + * This is called when the amount of space needed for if_data + * is increased or decreased. The change in size is indicated by + * the number of bytes that need to be added or deleted in the + * byte_diff parameter. + * + * If the amount of space needed has decreased below the size of the + * inline buffer, then switch to using the inline buffer. Otherwise, + * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer + * to what is needed. + * + * ip -- the inode whose if_data area is changing + * byte_diff -- the change in the number of bytes, positive or negative, + * requested for the if_data array. + */ +void +xfs_idata_realloc( + xfs_inode_t *ip, + int byte_diff, + int whichfork) +{ + xfs_ifork_t *ifp; + int new_size; + int real_size; + + if (byte_diff == 0) { + return; + } + + ifp = XFS_IFORK_PTR(ip, whichfork); + new_size = (int)ifp->if_bytes + byte_diff; + ASSERT(new_size >= 0); + + if (new_size == 0) { + if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { + kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); + } + ifp->if_u1.if_data = NULL; + real_size = 0; + } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { + /* + * If the valid extents/data can fit in if_inline_ext/data, + * copy them from the malloc'd vector and free it. + */ + if (ifp->if_u1.if_data == NULL) { + ifp->if_u1.if_data = ifp->if_u2.if_inline_data; + } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { + ASSERT(ifp->if_real_bytes != 0); + memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, + new_size); + kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); + ifp->if_u1.if_data = ifp->if_u2.if_inline_data; + } + real_size = 0; + } else { + /* + * Stuck with malloc/realloc. + * For inline data, the underlying buffer must be + * a multiple of 4 bytes in size so that it can be + * logged and stay on word boundaries. We enforce + * that here. + */ + real_size = roundup(new_size, 4); + if (ifp->if_u1.if_data == NULL) { + ASSERT(ifp->if_real_bytes == 0); + ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); + } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { + /* + * Only do the realloc if the underlying size + * is really changing. + */ + if (ifp->if_real_bytes != real_size) { + ifp->if_u1.if_data = + kmem_realloc(ifp->if_u1.if_data, + real_size, + ifp->if_real_bytes, + KM_SLEEP); + } + } else { + ASSERT(ifp->if_real_bytes == 0); + ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); + memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, + ifp->if_bytes); + } + } + ifp->if_real_bytes = real_size; + ifp->if_bytes = new_size; + ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); +} + + + + +/* + * Map inode to disk block and offset. + * + * mp -- the mount point structure for the current file system + * tp -- the current transaction + * ino -- the inode number of the inode to be located + * imap -- this structure is filled in with the information necessary + * to retrieve the given inode from disk + * flags -- flags to pass to xfs_dilocate indicating whether or not + * lookups in the inode btree were OK or not + */ +int +xfs_imap( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + xfs_imap_t *imap, + uint flags) +{ + xfs_fsblock_t fsbno; + int len; + int off; + int error; + + fsbno = imap->im_blkno ? + XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK; + error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags); + if (error != 0) { + return error; + } + imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno); + imap->im_len = XFS_FSB_TO_BB(mp, len); + imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno); + imap->im_ioffset = (ushort)off; + imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog); + return 0; +} + +void +xfs_idestroy_fork( + xfs_inode_t *ip, + int whichfork) +{ + xfs_ifork_t *ifp; + + ifp = XFS_IFORK_PTR(ip, whichfork); + if (ifp->if_broot != NULL) { + kmem_free(ifp->if_broot, ifp->if_broot_bytes); + ifp->if_broot = NULL; + } + + /* + * If the format is local, then we can't have an extents + * array so just look for an inline data array. If we're + * not local then we may or may not have an extents list, + * so check and free it up if we do. + */ + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && + (ifp->if_u1.if_data != NULL)) { + ASSERT(ifp->if_real_bytes != 0); + kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); + ifp->if_u1.if_data = NULL; + ifp->if_real_bytes = 0; + } + } else if ((ifp->if_flags & XFS_IFEXTENTS) && + (ifp->if_u1.if_extents != NULL) && + (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)) { + ASSERT(ifp->if_real_bytes != 0); + kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); + ifp->if_u1.if_extents = NULL; + ifp->if_real_bytes = 0; + } + ASSERT(ifp->if_u1.if_extents == NULL || + ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); + ASSERT(ifp->if_real_bytes == 0); + if (whichfork == XFS_ATTR_FORK) { + kmem_zone_free(xfs_ifork_zone, ip->i_afp); + ip->i_afp = NULL; + } +} + +/* + * This is called free all the memory associated with an inode. + * It must free the inode itself and any buffers allocated for + * if_extents/if_data and if_broot. It must also free the lock + * associated with the inode. + */ +void +xfs_idestroy( + xfs_inode_t *ip) +{ + + switch (ip->i_d.di_mode & IFMT) { + case IFREG: + case IFDIR: + case IFLNK: + xfs_idestroy_fork(ip, XFS_DATA_FORK); + break; + } + if (ip->i_afp) + xfs_idestroy_fork(ip, XFS_ATTR_FORK); +#ifdef NOTYET + if (ip->i_range_lock.r_sleep != NULL) { + freesema(ip->i_range_lock.r_sleep); + kmem_free(ip->i_range_lock.r_sleep, sizeof(sema_t)); + } +#endif /* NOTYET */ + mrfree(&ip->i_lock); + mrfree(&ip->i_iolock); +#ifdef NOTYET + mutex_destroy(&ip->i_range_lock.r_spinlock); +#endif /* NOTYET */ + freesema(&ip->i_flock); +#ifdef XFS_BMAP_TRACE + ktrace_free(ip->i_xtrace); +#endif +#ifdef XFS_BMBT_TRACE + ktrace_free(ip->i_btrace); +#endif +#ifdef XFS_RW_TRACE + ktrace_free(ip->i_rwtrace); +#endif +#ifdef XFS_STRAT_TRACE + ktrace_free(ip->i_strat_trace); +#endif +#ifdef XFS_ILOCK_TRACE + ktrace_free(ip->i_lock_trace); +#endif +#ifdef XFS_DIR2_TRACE + ktrace_free(ip->i_dir_trace); +#endif + if (ip->i_itemp) { + /* XXXdpd should be able to assert this but shutdown + * is leaving the AIL behind. */ + ASSERT(((ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL) == 0) || + XFS_FORCED_SHUTDOWN(ip->i_mount)); + xfs_inode_item_destroy(ip); + } + kmem_zone_free(xfs_inode_zone, ip); +} + + +/* + * Increment the pin count of the given buffer. + * This value is protected by ipinlock spinlock in the mount structure. + */ +void +xfs_ipin( + xfs_inode_t *ip) +{ + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + + atomic_inc(&ip->i_pincount); +} + +/* + * Decrement the pin count of the given inode, and wake up + * anyone in xfs_iwait_unpin() if the count goes to 0. The + * inode must have been previoulsy pinned with a call to xfs_ipin(). + */ +void +xfs_iunpin( + xfs_inode_t *ip) +{ + ASSERT(atomic_read(&ip->i_pincount) > 0); + + if (atomic_dec_and_test(&ip->i_pincount)) { + vnode_t *vp = XFS_ITOV_NULL(ip); + + /* make sync come back and flush this inode */ + if (vp) { + struct inode *inode = LINVFS_GET_IP(vp); + + mark_inode_dirty_sync(inode); + } + + wake_up(&ip->i_ipin_wait); + } +} + +/* + * This is called to wait for the given inode to be unpinned. + * It will sleep until this happens. The caller must have the + * inode locked in at least shared mode so that the buffer cannot + * be subsequently pinned once someone is waiting for it to be + * unpinned. + */ +void +xfs_iunpin_wait( + xfs_inode_t *ip) +{ + xfs_inode_log_item_t *iip; + xfs_lsn_t lsn; + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); + + if (atomic_read(&ip->i_pincount) == 0) { + return; + } + + iip = ip->i_itemp; + if (iip && iip->ili_last_lsn) { + lsn = iip->ili_last_lsn; + } else { + lsn = (xfs_lsn_t)0; + } + + /* + * Give the log a push so we don't wait here too long. + */ + xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE); + + wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); +} + + +/* + * xfs_iextents_copy() + * + * This is called to copy the REAL extents (as opposed to the delayed + * allocation extents) from the inode into the given buffer. It + * returns the number of bytes copied into the buffer. + * + * If there are no delayed allocation extents, then we can just + * memcpy() the extents into the buffer. Otherwise, we need to + * examine each extent in turn and skip those which are delayed. + */ +int +xfs_iextents_copy( + xfs_inode_t *ip, + xfs_bmbt_rec_t *buffer, + int whichfork) +{ + int copied; + xfs_bmbt_rec_t *dest_ep; + xfs_bmbt_rec_t *ep; +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_iextents_copy"; +#endif + int i; + xfs_ifork_t *ifp; + int nrecs; + xfs_fsblock_t start_block; + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); + ASSERT(ifp->if_bytes > 0); + + nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork); + ASSERT(nrecs > 0); + + /* + * There are some delayed allocation extents in the + * inode, so copy the extents one at a time and skip + * the delayed ones. There must be at least one + * non-delayed extent. + */ + ep = ifp->if_u1.if_extents; + dest_ep = buffer; + copied = 0; + for (i = 0; i < nrecs; i++) { + start_block = xfs_bmbt_get_startblock(ep); + if (ISNULLSTARTBLOCK(start_block)) { + /* + * It's a delayed allocation extent, so skip it. + */ + ep++; + continue; + } + + /* Translate to on disk format */ + put_unaligned(INT_GET(ep->l0, ARCH_CONVERT), + (__uint64_t*)&dest_ep->l0); + put_unaligned(INT_GET(ep->l1, ARCH_CONVERT), + (__uint64_t*)&dest_ep->l1); + dest_ep++; + ep++; + copied++; + } + ASSERT(copied != 0); + xfs_validate_extents(buffer, copied, 1, XFS_EXTFMT_INODE(ip)); + + return (copied * (uint)sizeof(xfs_bmbt_rec_t)); +} + +/* + * Each of the following cases stores data into the same region + * of the on-disk inode, so only one of them can be valid at + * any given time. While it is possible to have conflicting formats + * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is + * in EXTENTS format, this can only happen when the fork has + * changed formats after being modified but before being flushed. + * In these cases, the format always takes precedence, because the + * format indicates the current state of the fork. + */ +/*ARGSUSED*/ +STATIC int +xfs_iflush_fork( + xfs_inode_t *ip, + xfs_dinode_t *dip, + xfs_inode_log_item_t *iip, + int whichfork, + xfs_buf_t *bp) +{ + char *cp; + xfs_ifork_t *ifp; + xfs_mount_t *mp; +#ifdef XFS_TRANS_DEBUG + int first; +#endif + static const short brootflag[2] = + { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; + static const short dataflag[2] = + { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; + static const short extflag[2] = + { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; + + if (iip == NULL) + return 0; + ifp = XFS_IFORK_PTR(ip, whichfork); + /* + * This can happen if we gave up in iformat in an error path, + * for the attribute fork. + */ + if (ifp == NULL) { + ASSERT(whichfork == XFS_ATTR_FORK); + return 0; + } + cp = XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT); + mp = ip->i_mount; + switch (XFS_IFORK_FORMAT(ip, whichfork)) { + case XFS_DINODE_FMT_LOCAL: + if ((iip->ili_format.ilf_fields & dataflag[whichfork]) && + (ifp->if_bytes > 0)) { + ASSERT(ifp->if_u1.if_data != NULL); + ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); + memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); + } + if (whichfork == XFS_DATA_FORK) { + if (unlikely(XFS_DIR_SHORTFORM_VALIDATE_ONDISK(mp, dip))) { + XFS_ERROR_REPORT("xfs_iflush_fork", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + } + break; + + case XFS_DINODE_FMT_EXTENTS: + ASSERT((ifp->if_flags & XFS_IFEXTENTS) || + !(iip->ili_format.ilf_fields & extflag[whichfork])); + ASSERT((ifp->if_u1.if_extents != NULL) || (ifp->if_bytes == 0)); + ASSERT((ifp->if_u1.if_extents == NULL) || (ifp->if_bytes > 0)); + if ((iip->ili_format.ilf_fields & extflag[whichfork]) && + (ifp->if_bytes > 0)) { + ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); + (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, + whichfork); + } + break; + + case XFS_DINODE_FMT_BTREE: + if ((iip->ili_format.ilf_fields & brootflag[whichfork]) && + (ifp->if_broot_bytes > 0)) { + ASSERT(ifp->if_broot != NULL); + ASSERT(ifp->if_broot_bytes <= + (XFS_IFORK_SIZE(ip, whichfork) + + XFS_BROOT_SIZE_ADJ)); + xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes, + (xfs_bmdr_block_t *)cp, + XFS_DFORK_SIZE_ARCH(dip, mp, whichfork, ARCH_CONVERT)); + } + break; + + case XFS_DINODE_FMT_DEV: + if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { + ASSERT(whichfork == XFS_DATA_FORK); + INT_SET(dip->di_u.di_dev, ARCH_CONVERT, ip->i_df.if_u2.if_rdev); + } + break; + + case XFS_DINODE_FMT_UUID: + if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { + ASSERT(whichfork == XFS_DATA_FORK); + memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid, + sizeof(uuid_t)); + } + break; + + default: + ASSERT(0); + break; + } + + return 0; +} + +/* + * xfs_iflush() will write a modified inode's changes out to the + * inode's on disk home. The caller must have the inode lock held + * in at least shared mode and the inode flush semaphore must be + * held as well. The inode lock will still be held upon return from + * the call and the caller is free to unlock it. + * The inode flush lock will be unlocked when the inode reaches the disk. + * The flags indicate how the inode's buffer should be written out. + */ +int +xfs_iflush( + xfs_inode_t *ip, + uint flags) +{ + xfs_inode_log_item_t *iip; + xfs_buf_t *bp; + xfs_dinode_t *dip; + xfs_mount_t *mp; + int error; + /* REFERENCED */ + xfs_chash_t *ch; + xfs_inode_t *iq; + int clcount; /* count of inodes clustered */ + int bufwasdelwri; + enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; + SPLDECL(s); + + XFS_STATS_INC(xfsstats.xs_iflush_count); + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); + ASSERT(valusema(&ip->i_flock) <= 0); + ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || + ip->i_d.di_nextents > ip->i_df.if_ext_max); + + iip = ip->i_itemp; + mp = ip->i_mount; + + /* + * If the inode isn't dirty, then just release the inode + * flush lock and do nothing. + */ + if ((ip->i_update_core == 0) && + ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { + ASSERT((iip != NULL) ? + !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1); + xfs_ifunlock(ip); + return 0; + } + + /* + * We can't flush the inode until it is unpinned, so + * wait for it. We know noone new can pin it, because + * we are holding the inode lock shared and you need + * to hold it exclusively to pin the inode. + */ + xfs_iunpin_wait(ip); + + /* + * This may have been unpinned because the filesystem is shutting + * down forcibly. If that's the case we must not write this inode + * to disk, because the log record didn't make it to disk! + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + ip->i_update_core = 0; + if (iip) + iip->ili_format.ilf_fields = 0; + xfs_ifunlock(ip); + return XFS_ERROR(EIO); + } + + /* + * Get the buffer containing the on-disk inode. + */ + error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); + if (error != 0) { + xfs_ifunlock(ip); + return error; + } + + /* + * Decide how buffer will be flushed out. This is done before + * the call to xfs_iflush_int because this field is zeroed by it. + */ + if (iip != NULL && iip->ili_format.ilf_fields != 0) { + /* + * Flush out the inode buffer according to the directions + * of the caller. In the cases where the caller has given + * us a choice choose the non-delwri case. This is because + * the inode is in the AIL and we need to get it out soon. + */ + switch (flags) { + case XFS_IFLUSH_SYNC: + case XFS_IFLUSH_DELWRI_ELSE_SYNC: + flags = 0; + break; + case XFS_IFLUSH_ASYNC: + case XFS_IFLUSH_DELWRI_ELSE_ASYNC: + flags = INT_ASYNC; + break; + case XFS_IFLUSH_DELWRI: + flags = INT_DELWRI; + break; + default: + ASSERT(0); + flags = 0; + break; + } + } else { + switch (flags) { + case XFS_IFLUSH_DELWRI_ELSE_SYNC: + case XFS_IFLUSH_DELWRI_ELSE_ASYNC: + case XFS_IFLUSH_DELWRI: + flags = INT_DELWRI; + break; + case XFS_IFLUSH_ASYNC: + flags = INT_ASYNC; + break; + case XFS_IFLUSH_SYNC: + flags = 0; + break; + default: + ASSERT(0); + flags = 0; + break; + } + } + + /* + * First flush out the inode that xfs_iflush was called with. + */ + error = xfs_iflush_int(ip, bp); + if (error) { + goto corrupt_out; + } + + /* + * inode clustering: + * see if other inodes can be gathered into this write + */ + +#ifdef DEBUG + ip->i_chash->chl_buf = bp; /* inode clustering debug */ +#endif + + ch = XFS_CHASH(mp, ip->i_blkno); + s = mutex_spinlock(&ch->ch_lock); + + clcount = 0; + for (iq = ip->i_cnext; iq != ip; iq = iq->i_cnext) { + /* + * Do an un-protected check to see if the inode is dirty and + * is a candidate for flushing. These checks will be repeated + * later after the appropriate locks are acquired. + */ + iip = iq->i_itemp; + if ((iq->i_update_core == 0) && + ((iip == NULL) || + !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) && + xfs_ipincount(iq) == 0) { + continue; + } + + /* + * Try to get locks. If any are unavailable, + * then this inode cannot be flushed and is skipped. + */ + + /* get inode locks (just i_lock) */ + if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) { + /* get inode flush lock */ + if (xfs_iflock_nowait(iq)) { + /* check if pinned */ + if (xfs_ipincount(iq) == 0) { + /* arriving here means that + * this inode can be flushed. + * first re-check that it's + * dirty + */ + iip = iq->i_itemp; + if ((iq->i_update_core != 0)|| + ((iip != NULL) && + (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { + clcount++; + error = xfs_iflush_int(iq, bp); + if (error) { + xfs_iunlock(iq, + XFS_ILOCK_SHARED); + goto cluster_corrupt_out; + } + } else { + xfs_ifunlock(iq); + } + } else { + xfs_ifunlock(iq); + } + } + xfs_iunlock(iq, XFS_ILOCK_SHARED); + } + } + mutex_spinunlock(&ch->ch_lock, s); + + if (clcount) { + XFS_STATS_INC(xfsstats.xs_icluster_flushcnt); + XFS_STATS_ADD(xfsstats.xs_icluster_flushinode, clcount); + } + + /* + * If the buffer is pinned then push on the log so we won't + * get stuck waiting in the write for too long. + */ + if (XFS_BUF_ISPINNED(bp)){ + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + } + + if (flags & INT_DELWRI) { + xfs_bdwrite(mp, bp); + } else if (flags & INT_ASYNC) { + xfs_bawrite(mp, bp); + } else { + error = xfs_bwrite(mp, bp); + } + return error; + +corrupt_out: + xfs_buf_relse(bp); + xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); + xfs_iflush_abort(ip); + /* + * Unlocks the flush lock + */ + return XFS_ERROR(EFSCORRUPTED); + +cluster_corrupt_out: + /* Corruption detected in the clustering loop. Invalidate the + * inode buffer and shut down the filesystem. + */ + mutex_spinunlock(&ch->ch_lock, s); + + /* + * Clean up the buffer. If it was B_DELWRI, just release it -- + * brelse can handle it with no problems. If not, shut down the + * filesystem before releasing the buffer. + */ + if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) { + xfs_buf_relse(bp); + } + + xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); + + if(!bufwasdelwri) { + /* + * Just like incore_relse: if we have b_iodone functions, + * mark the buffer as an error and call them. Otherwise + * mark it as stale and brelse. + */ + if (XFS_BUF_IODONE_FUNC(bp)) { + XFS_BUF_CLR_BDSTRAT_FUNC(bp); + XFS_BUF_UNDONE(bp); + XFS_BUF_STALE(bp); + XFS_BUF_SHUT(bp); + XFS_BUF_ERROR(bp,EIO); + xfs_biodone(bp); + } else { + XFS_BUF_STALE(bp); + xfs_buf_relse(bp); + } + } + + xfs_iflush_abort(iq); + /* + * Unlocks the flush lock + */ + return XFS_ERROR(EFSCORRUPTED); +} + + +STATIC int +xfs_iflush_int( + xfs_inode_t *ip, + xfs_buf_t *bp) +{ + xfs_inode_log_item_t *iip; + xfs_dinode_t *dip; + xfs_mount_t *mp; +#ifdef XFS_TRANS_DEBUG + int first; +#endif + SPLDECL(s); + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); + ASSERT(valusema(&ip->i_flock) <= 0); + ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || + ip->i_d.di_nextents > ip->i_df.if_ext_max); + + iip = ip->i_itemp; + mp = ip->i_mount; + + + /* + * If the inode isn't dirty, then just release the inode + * flush lock and do nothing. + */ + if ((ip->i_update_core == 0) && + ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { + xfs_ifunlock(ip); + return 0; + } + + /* set *dip = inode's place in the buffer */ + dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset); + + /* + * Clear i_update_core before copying out the data. + * This is for coordination with our timestamp updates + * that don't hold the inode lock. They will always + * update the timestamps BEFORE setting i_update_core, + * so if we clear i_update_core after they set it we + * are guaranteed to see their updates to the timestamps. + * I believe that this depends on strongly ordered memory + * semantics, but we have that. We use the SYNCHRONIZE + * macro to make sure that the compiler does not reorder + * the i_update_core access below the data copy below. + */ + ip->i_update_core = 0; + SYNCHRONIZE(); + + if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC, + mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p", + ip->i_ino, (int) INT_GET(dip->di_core.di_magic, ARCH_CONVERT), dip); + goto corrupt_out; + } + if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, + mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x", + ip->i_ino, ip, ip->i_d.di_magic); + goto corrupt_out; + } + if ((ip->i_d.di_mode & IFMT) == IFREG) { + if (XFS_TEST_ERROR( + (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && + (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), + mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: Bad regular inode %Lu, ptr 0x%p", + ip->i_ino, ip); + goto corrupt_out; + } + } else if ((ip->i_d.di_mode & IFMT) == IFDIR) { + if (XFS_TEST_ERROR( + (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && + (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && + (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), + mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: Bad directory inode %Lu, ptr 0x%p", + ip->i_ino, ip); + goto corrupt_out; + } + } + if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > + ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, + XFS_RANDOM_IFLUSH_5)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p", + ip->i_ino, + ip->i_d.di_nextents + ip->i_d.di_anextents, + ip->i_d.di_nblocks, + ip); + goto corrupt_out; + } + if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, + mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p", + ip->i_ino, ip->i_d.di_forkoff, ip); + goto corrupt_out; + } + /* + * Copy the dirty parts of the inode into the on-disk + * inode. We always copy out the core of the inode, + * because if the inode is dirty at all the core must + * be. + */ + xfs_xlate_dinode_core((xfs_caddr_t)&(dip->di_core), &(ip->i_d), + -1, ARCH_CONVERT); + + /* + * If this is really an old format inode and the superblock version + * has not been updated to support only new format inodes, then + * convert back to the old inode format. If the superblock version + * has been updated, then make the conversion permanent. + */ + ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || + XFS_SB_VERSION_HASNLINK(&mp->m_sb)); + if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { + if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { + /* + * Convert it back. + */ + ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); + INT_SET(dip->di_core.di_onlink, ARCH_CONVERT, ip->i_d.di_nlink); + } else { + /* + * The superblock version has already been bumped, + * so just make the conversion to the new inode + * format permanent. + */ + ip->i_d.di_version = XFS_DINODE_VERSION_2; + INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2); + ip->i_d.di_onlink = 0; + INT_ZERO(dip->di_core.di_onlink, ARCH_CONVERT); + memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); + memset(&(dip->di_core.di_pad[0]), 0, + sizeof(dip->di_core.di_pad)); + ASSERT(ip->i_d.di_projid == 0); + } + } + + if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) { + goto corrupt_out; + } + + if (XFS_IFORK_Q(ip)) { + /* + * The only error from xfs_iflush_fork is on the data fork. + */ + (void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); + } + xfs_inobp_check(mp, bp); + + /* + * We've recorded everything logged in the inode, so we'd + * like to clear the ilf_fields bits so we don't log and + * flush things unnecessarily. However, we can't stop + * logging all this information until the data we've copied + * into the disk buffer is written to disk. If we did we might + * overwrite the copy of the inode in the log with all the + * data after re-logging only part of it, and in the face of + * a crash we wouldn't have all the data we need to recover. + * + * What we do is move the bits to the ili_last_fields field. + * When logging the inode, these bits are moved back to the + * ilf_fields field. In the xfs_iflush_done() routine we + * clear ili_last_fields, since we know that the information + * those bits represent is permanently on disk. As long as + * the flush completes before the inode is logged again, then + * both ilf_fields and ili_last_fields will be cleared. + * + * We can play with the ilf_fields bits here, because the inode + * lock must be held exclusively in order to set bits there + * and the flush lock protects the ili_last_fields bits. + * Set ili_logged so the flush done + * routine can tell whether or not to look in the AIL. + * Also, store the current LSN of the inode so that we can tell + * whether the item has moved in the AIL from xfs_iflush_done(). + * In order to read the lsn we need the AIL lock, because + * it is a 64 bit value that cannot be read atomically. + */ + if (iip != NULL && iip->ili_format.ilf_fields != 0) { + iip->ili_last_fields = iip->ili_format.ilf_fields; + iip->ili_format.ilf_fields = 0; + iip->ili_logged = 1; + + ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ + AIL_LOCK(mp,s); + iip->ili_flush_lsn = iip->ili_item.li_lsn; + AIL_UNLOCK(mp, s); + + /* + * Attach the function xfs_iflush_done to the inode's + * buffer. This will remove the inode from the AIL + * and unlock the inode's flush lock when the inode is + * completely written to disk. + */ + xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) + xfs_iflush_done, (xfs_log_item_t *)iip); + + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); + } else { + /* + * We're flushing an inode which is not in the AIL and has + * not been logged but has i_update_core set. For this + * case we can use a B_DELWRI flush and immediately drop + * the inode flush lock because we can avoid the whole + * AIL state thing. It's OK to drop the flush lock now, + * because we've already locked the buffer and to do anything + * you really need both. + */ + if (iip != NULL) { + ASSERT(iip->ili_logged == 0); + ASSERT(iip->ili_last_fields == 0); + ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); + } + xfs_ifunlock(ip); + } + + return 0; + +corrupt_out: + return XFS_ERROR(EFSCORRUPTED); +} + +/* + * Flush all inactive inodes in mp. Return true if no user references + * were found, false otherwise. + */ +int +xfs_iflush_all( + xfs_mount_t *mp, + int flag) +{ + int busy; + int done; + int purged; + xfs_inode_t *ip; + vmap_t vmap; + vnode_t *vp; + + busy = done = 0; + while (!done) { + purged = 0; + XFS_MOUNT_ILOCK(mp); + ip = mp->m_inodes; + if (ip == NULL) { + break; + } + do { + /* Make sure we skip markers inserted by sync */ + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + + /* + * It's up to our caller to purge the root + * and quota vnodes later. + */ + vp = XFS_ITOV_NULL(ip); + + if (!vp) { + XFS_MOUNT_IUNLOCK(mp); + xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); + purged = 1; + break; + } + + if (vn_count(vp) != 0) { + if (vn_count(vp) == 1 && + (ip == mp->m_rootip || + (mp->m_quotainfo && + (ip->i_ino == mp->m_sb.sb_uquotino || + ip->i_ino == mp->m_sb.sb_gquotino)))) { + + ip = ip->i_mnext; + continue; + } + if (!(flag & XFS_FLUSH_ALL)) { + ASSERT(0); + busy = 1; + done = 1; + break; + } + /* + * Ignore busy inodes but continue flushing + * others. + */ + ip = ip->i_mnext; + continue; + } + /* + * Sample vp mapping while holding mp locked on MP + * systems, so we don't purge a reclaimed or + * nonexistent vnode. We break from the loop + * since we know that we modify + * it by pulling ourselves from it in xfs_reclaim() + * called via vn_purge() below. Set ip to the next + * entry in the list anyway so we'll know below + * whether we reached the end or not. + */ + VMAP(vp, vmap); + XFS_MOUNT_IUNLOCK(mp); + + vn_purge(vp, &vmap); + + purged = 1; + break; + } while (ip != mp->m_inodes); + /* + * We need to distinguish between when we exit the loop + * after a purge and when we simply hit the end of the + * list. We can't use the (ip == mp->m_inodes) test, + * because when we purge an inode at the start of the list + * the next inode on the list becomes mp->m_inodes. That + * would cause such a test to bail out early. The purged + * variable tells us how we got out of the loop. + */ + if (!purged) { + done = 1; + } + } + XFS_MOUNT_IUNLOCK(mp); + return !busy; +} + + +/* + * xfs_iaccess: check accessibility of inode for mode. + */ +int +xfs_iaccess( + xfs_inode_t *ip, + mode_t mode, + cred_t *cr) +{ + int error; + mode_t orgmode = mode; + struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + + /* + * Verify that the MAC policy allows the requested access. + */ + if ((error = _MAC_XFS_IACCESS(ip, mode, cr))) + return XFS_ERROR(error); + + if (mode & IWRITE) { + umode_t imode = inode->i_mode; + + if (IS_RDONLY(inode) && + (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode))) + return XFS_ERROR(EROFS); + } + + /* + * If there's an Access Control List it's used instead of + * the mode bits. + */ + if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1) + return error ? XFS_ERROR(error) : 0; + + if (current->fsuid != ip->i_d.di_uid) { + mode >>= 3; + if (!in_group_p((gid_t)ip->i_d.di_gid)) + mode >>= 3; + } + + /* + * If the DACs are ok we don't need any capability check. + */ + if ((ip->i_d.di_mode & mode) == mode) + return 0; + /* + * Read/write DACs are always overridable. + * Executable DACs are overridable if at least one exec bit is set. + */ + if ((orgmode & (IREAD|IWRITE)) || (inode->i_mode & S_IXUGO)) + if (capable_cred(cr, CAP_DAC_OVERRIDE)) + return 0; + + if ((orgmode == IREAD) || + (((ip->i_d.di_mode & IFMT) == IFDIR) && + (!(orgmode & ~(IWRITE|IEXEC))))) { + if (capable_cred(cr, CAP_DAC_READ_SEARCH)) + return 0; +#ifdef NOISE + cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode); +#endif /* NOISE */ + return XFS_ERROR(EACCES); + } + return XFS_ERROR(EACCES); +} + +/* + * Return whether or not it is OK to swap to the given file in the + * given range. Return 0 for OK and otherwise return the error. + * + * It is only OK to swap to a file if it has no holes, and all + * extents have been initialized. + * + * We use the vnode behavior chain prevent and allow primitives + * to ensure that the vnode chain stays coherent while we do this. + * This allows us to walk the chain down to the bottom where XFS + * lives without worrying about it changing out from under us. + */ +int +xfs_swappable( + bhv_desc_t *bdp) +{ + xfs_inode_t *ip; + + ip = XFS_BHVTOI(bdp); + /* + * Verify that the file does not have any + * holes or unwritten exents. + */ + return xfs_bmap_check_swappable(ip); +} + +/* + * xfs_iroundup: round up argument to next power of two + */ +uint +xfs_iroundup( + uint v) +{ + int i; + uint m; + + if ((v & (v - 1)) == 0) + return v; + ASSERT((v & 0x80000000) == 0); + if ((v & (v + 1)) == 0) + return v + 1; + for (i = 0, m = 1; i < 31; i++, m <<= 1) { + if (v & m) + continue; + v |= m; + if ((v & (v + 1)) == 0) + return v + 1; + } + ASSERT(0); + return( 0 ); +} + +/* + * Change the requested timestamp in the given inode. + * We don't lock across timestamp updates, and we don't log them but + * we do record the fact that there is dirty information in core. + * + * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG + * with XFS_ICHGTIME_ACC to be sure that access time + * update will take. Calling first with XFS_ICHGTIME_ACC + * and then XFS_ICHGTIME_MOD may fail to modify the access + * timestamp if the filesystem is mounted noacctm. + */ +void +xfs_ichgtime(xfs_inode_t *ip, + int flags) +{ + timespec_t tv; + vnode_t *vp = XFS_ITOV(ip); + struct inode *inode = LINVFS_GET_IP(vp); + + /* + * We're not supposed to change timestamps in readonly-mounted + * filesystems. Throw it away if anyone asks us. + */ + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + return; + + /* + * Don't update access timestamps on reads if mounted "noatime" + * Throw it away if anyone asks us. + */ + if (ip->i_mount->m_flags & XFS_MOUNT_NOATIME && + ((flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) + == XFS_ICHGTIME_ACC)) + return; + + nanotime(&tv); + if (flags & XFS_ICHGTIME_MOD) { + inode->i_mtime = ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; + ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; + } + if (flags & XFS_ICHGTIME_ACC) { + inode->i_atime = ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec; + ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec; + } + if (flags & XFS_ICHGTIME_CHG) { + inode->i_ctime = ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec; + ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec; + } + + /* + * We update the i_update_core field _after_ changing + * the timestamps in order to coordinate properly with + * xfs_iflush() so that we don't lose timestamp updates. + * This keeps us from having to hold the inode lock + * while doing this. We use the SYNCHRONIZE macro to + * ensure that the compiler does not reorder the update + * of i_update_core above the timestamp updates above. + */ + SYNCHRONIZE(); + ip->i_update_core = 1; + if (!(inode->i_state & I_LOCK)) + mark_inode_dirty(inode); +} + +#ifdef XFS_ILOCK_TRACE +void +xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) +{ + ktrace_enter(ip->i_lock_trace, + (void *)ip, + (void *)(__psint_t)lock, /* 1 = LOCK, 3=UNLOCK, etc */ + (void *)(__psint_t)lockflags, /* XFS_ILOCK_EXCL etc */ + (void *)ra, /* caller of ilock */ + (void *)(__psint_t)cpuid(), + (void *)(__psint_t)current_pid(), + 0,0,0,0,0,0,0,0,0,0); + +} +#endif /* ILOCK_TRACE */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_inode.h linux.21rc5-ac2/fs/xfs/xfs_inode.h --- linux.21rc5/fs/xfs/xfs_inode.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_inode.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,556 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_INODE_H__ +#define __XFS_INODE_H__ + +/* + * File incore extent information, present for each of data & attr forks. + */ +#define XFS_INLINE_EXTS 2 +#define XFS_INLINE_DATA 32 +typedef struct xfs_ifork { + int if_bytes; /* bytes in if_u1 */ + int if_real_bytes; /* bytes allocated in if_u1 */ + xfs_bmbt_block_t *if_broot; /* file's incore btree root */ + short if_broot_bytes; /* bytes allocated for root */ + unsigned char if_flags; /* per-fork flags */ + unsigned char if_ext_max; /* max # of extent records */ + xfs_extnum_t if_lastex; /* last if_extents used */ + union { + xfs_bmbt_rec_t *if_extents; /* linear map file exts */ + char *if_data; /* inline file data */ + } if_u1; + union { + xfs_bmbt_rec_t if_inline_ext[XFS_INLINE_EXTS]; + /* very small file extents */ + char if_inline_data[XFS_INLINE_DATA]; + /* very small file data */ + xfs_dev_t if_rdev; /* dev number if special */ + uuid_t if_uuid; /* mount point value */ + } if_u2; +} xfs_ifork_t; + +/* + * Flags for xfs_ichgtime(). + */ +#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */ +#define XFS_ICHGTIME_ACC 0x2 /* data fork access timestamp */ +#define XFS_ICHGTIME_CHG 0x4 /* inode field change timestamp */ + +/* + * Per-fork incore inode flags. + */ +#define XFS_IFINLINE 0x0001 /* Inline data is read in */ +#define XFS_IFEXTENTS 0x0002 /* All extent pointers are read in */ +#define XFS_IFBROOT 0x0004 /* i_broot points to the bmap b-tree root */ + +/* + * Flags for xfs_imap() and xfs_dilocate(). + */ +#define XFS_IMAP_LOOKUP 0x1 + +/* + * Maximum number of extent pointers in if_u1.if_extents. + */ +#define XFS_MAX_INCORE_EXTENTS 32768 + + +#ifdef __KERNEL__ +struct bhv_desc; +struct cred; +struct ktrace; +struct vnode; +struct xfs_buf; +struct xfs_bmap_free; +struct xfs_bmbt_irec; +struct xfs_bmbt_block; +struct xfs_inode; +struct xfs_inode_log_item; +struct xfs_mount; +struct xfs_trans; +struct xfs_dquot; + + +/* + * This structure is used to communicate which extents of a file + * were holes when a write started from xfs_write_file() to + * xfs_strat_read(). This is necessary so that we can know which + * blocks need to be zeroed when they are read in in xfs_strat_read() + * if they weren\'t allocated when the buffer given to xfs_strat_read() + * was mapped. + * + * We keep a list of these attached to the inode. The list is + * protected by the inode lock and the fact that the io lock is + * held exclusively by writers. + */ +typedef struct xfs_gap { + struct xfs_gap *xg_next; + xfs_fileoff_t xg_offset_fsb; + xfs_extlen_t xg_count_fsb; +} xfs_gap_t; + +typedef struct dm_attrs_s { + __uint32_t da_dmevmask; /* DMIG event mask */ + __uint16_t da_dmstate; /* DMIG state info */ + __uint16_t da_pad; /* DMIG extra padding */ +} dm_attrs_t; + +typedef struct xfs_iocore { + void *io_obj; /* pointer to container + * inode or dcxvn structure */ + struct xfs_mount *io_mount; /* fs mount struct ptr */ +#ifdef DEBUG + mrlock_t *io_lock; /* inode IO lock */ + mrlock_t *io_iolock; /* inode IO lock */ +#endif + + /* I/O state */ + xfs_fsize_t io_new_size; /* sz when write completes */ + + /* Miscellaneous state. */ + unsigned int io_flags; /* IO related flags */ + + /* DMAPI state */ + dm_attrs_t io_dmattrs; + +} xfs_iocore_t; + +#define io_dmevmask io_dmattrs.da_dmevmask +#define io_dmstate io_dmattrs.da_dmstate + +#define XFS_IO_INODE(io) ((xfs_inode_t *) ((io)->io_obj)) +#define XFS_IO_DCXVN(io) ((dcxvn_t *) ((io)->io_obj)) + +/* + * Flags in the flags field + */ + +#define XFS_IOCORE_RT 0x1 + +/* + * xfs_iocore prototypes + */ + +extern void xfs_iocore_inode_init(struct xfs_inode *); +extern void xfs_iocore_inode_reinit(struct xfs_inode *); + + +/* + * This is the type used in the xfs inode hash table. + * An array of these is allocated for each mounted + * file system to hash the inodes for that file system. + */ +typedef struct xfs_ihash { + struct xfs_inode *ih_next; + rwlock_t ih_lock; + uint ih_version; +} xfs_ihash_t; + +/* + * Inode hashing and hash bucket locking. + */ +#define XFS_BUCKETS(mp) (37*(mp)->m_sb.sb_agcount-1) +#define XFS_IHASH(mp,ino) ((mp)->m_ihash + (((uint)ino) % (mp)->m_ihsize)) + +/* + * This is the xfs inode cluster hash. This hash is used by xfs_iflush to + * find inodes that share a cluster and can be flushed to disk at the same + * time. + */ + +typedef struct xfs_chashlist { + struct xfs_chashlist *chl_next; + struct xfs_inode *chl_ip; + xfs_daddr_t chl_blkno; /* starting block number of + * the cluster */ +#ifdef DEBUG + struct xfs_buf *chl_buf; /* debug: the inode buffer */ +#endif +} xfs_chashlist_t; + +typedef struct xfs_chash { + xfs_chashlist_t *ch_list; + lock_t ch_lock; +} xfs_chash_t; + + +/* + * This is the xfs in-core inode structure. + * Most of the on-disk inode is embedded in the i_d field. + * + * The extent pointers/inline file space, however, are managed + * separately. The memory for this information is pointed to by + * the if_u1 unions depending on the type of the data. + * This is used to linearize the array of extents for fast in-core + * access. This is used until the file's number of extents + * surpasses XFS_MAX_INCORE_EXTENTS, at which point all extent pointers + * are accessed through the buffer cache. + * + * Other state kept in the in-core inode is used for identification, + * locking, transactional updating, etc of the inode. + * + * Generally, we do not want to hold the i_rlock while holding the + * i_ilock. Hierarchy is i_iolock followed by i_rlock. + * + * xfs_iptr_t contains all the inode fields upto and including the + * i_mnext and i_mprev fields, it is used as a marker in the inode + * chain off the mount structure by xfs_sync calls. + */ + +typedef struct { + struct xfs_ihash *ip_hash; /* pointer to hash header */ + struct xfs_inode *ip_next; /* inode hash link forw */ + struct xfs_inode *ip_mnext; /* next inode in mount list */ + struct xfs_inode *ip_mprev; /* ptr to prev inode */ + struct xfs_inode **ip_prevp; /* ptr to prev i_next */ + struct xfs_mount *ip_mount; /* fs mount struct ptr */ +} xfs_iptr_t; + +typedef struct xfs_inode { + /* Inode linking and identification information. */ + struct xfs_ihash *i_hash; /* pointer to hash header */ + struct xfs_inode *i_next; /* inode hash link forw */ + struct xfs_inode *i_mnext; /* next inode in mount list */ + struct xfs_inode *i_mprev; /* ptr to prev inode */ + struct xfs_inode **i_prevp; /* ptr to prev i_next */ + struct xfs_mount *i_mount; /* fs mount struct ptr */ + struct list_head i_reclaim; /* reclaim list */ + struct bhv_desc i_bhv_desc; /* inode behavior descriptor*/ + struct xfs_dquot *i_udquot; /* user dquot */ + struct xfs_dquot *i_gdquot; /* group dquot */ + + /* Inode location stuff */ + xfs_ino_t i_ino; /* inode number (agno/agino)*/ + xfs_daddr_t i_blkno; /* blkno of inode buffer */ + ushort i_len; /* len of inode buffer */ + ushort i_boffset; /* off of inode in buffer */ + + /* Extent information. */ + xfs_ifork_t *i_afp; /* attribute fork pointer */ + xfs_ifork_t i_df; /* data fork */ + + /* Transaction and locking information. */ + struct xfs_trans *i_transp; /* ptr to owning transaction*/ + struct xfs_inode_log_item *i_itemp; /* logging information */ + mrlock_t i_lock; /* inode lock */ + mrlock_t i_iolock; /* inode IO lock */ + sema_t i_flock; /* inode flush lock */ + atomic_t i_pincount; /* inode pin count */ + wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ + struct xfs_inode **i_refcache; /* ptr to entry in ref cache */ + struct xfs_inode *i_release; /* inode to unref */ + + /* I/O state */ + xfs_iocore_t i_iocore; /* I/O core */ + + /* Miscellaneous state. */ + unsigned short i_flags; /* see defined flags below */ + unsigned char i_update_core; /* timestamps/size is dirty */ + unsigned char i_update_size; /* di_size field is dirty */ + unsigned int i_gen; /* generation count */ + unsigned int i_delayed_blks; /* count of delay alloc blks */ + + xfs_dinode_core_t i_d; /* most of ondisk inode */ + xfs_chashlist_t *i_chash; /* cluster hash list header */ + struct xfs_inode *i_cnext; /* cluster hash link forward */ + struct xfs_inode *i_cprev; /* cluster hash link backward */ + +#ifdef DEBUG + /* Trace buffers per inode. */ + struct ktrace *i_xtrace; /* inode extent list trace */ + struct ktrace *i_btrace; /* inode bmap btree trace */ + struct ktrace *i_rwtrace; /* inode read/write trace */ + struct ktrace *i_strat_trace; /* inode strat_write trace */ + struct ktrace *i_lock_trace; /* inode lock/unlock trace */ + struct ktrace *i_dir_trace; /* inode directory trace */ +#endif /* DEBUG */ +} xfs_inode_t; + +#endif /* __KERNEL__ */ + + +/* + * Fork handling. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_PTR) +xfs_ifork_t *xfs_ifork_ptr(xfs_inode_t *ip, int w); +#define XFS_IFORK_PTR(ip,w) xfs_ifork_ptr(ip,w) +#else +#define XFS_IFORK_PTR(ip,w) ((w) == XFS_DATA_FORK ? &(ip)->i_df : (ip)->i_afp) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_Q) +int xfs_ifork_q(xfs_inode_t *ip); +#define XFS_IFORK_Q(ip) xfs_ifork_q(ip) +#else +#define XFS_IFORK_Q(ip) XFS_CFORK_Q(&(ip)->i_d) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_DSIZE) +int xfs_ifork_dsize(xfs_inode_t *ip); +#define XFS_IFORK_DSIZE(ip) xfs_ifork_dsize(ip) +#else +#define XFS_IFORK_DSIZE(ip) XFS_CFORK_DSIZE(&ip->i_d, ip->i_mount) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_ASIZE) +int xfs_ifork_asize(xfs_inode_t *ip); +#define XFS_IFORK_ASIZE(ip) xfs_ifork_asize(ip) +#else +#define XFS_IFORK_ASIZE(ip) XFS_CFORK_ASIZE(&ip->i_d, ip->i_mount) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_SIZE) +int xfs_ifork_size(xfs_inode_t *ip, int w); +#define XFS_IFORK_SIZE(ip,w) xfs_ifork_size(ip,w) +#else +#define XFS_IFORK_SIZE(ip,w) XFS_CFORK_SIZE(&ip->i_d, ip->i_mount, w) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_FORMAT) +int xfs_ifork_format(xfs_inode_t *ip, int w); +#define XFS_IFORK_FORMAT(ip,w) xfs_ifork_format(ip,w) +#else +#define XFS_IFORK_FORMAT(ip,w) XFS_CFORK_FORMAT(&ip->i_d, w) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_FMT_SET) +void xfs_ifork_fmt_set(xfs_inode_t *ip, int w, int n); +#define XFS_IFORK_FMT_SET(ip,w,n) xfs_ifork_fmt_set(ip,w,n) +#else +#define XFS_IFORK_FMT_SET(ip,w,n) XFS_CFORK_FMT_SET(&ip->i_d, w, n) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_NEXTENTS) +int xfs_ifork_nextents(xfs_inode_t *ip, int w); +#define XFS_IFORK_NEXTENTS(ip,w) xfs_ifork_nextents(ip,w) +#else +#define XFS_IFORK_NEXTENTS(ip,w) XFS_CFORK_NEXTENTS(&ip->i_d, w) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_NEXT_SET) +void xfs_ifork_next_set(xfs_inode_t *ip, int w, int n); +#define XFS_IFORK_NEXT_SET(ip,w,n) xfs_ifork_next_set(ip,w,n) +#else +#define XFS_IFORK_NEXT_SET(ip,w,n) XFS_CFORK_NEXT_SET(&ip->i_d, w, n) +#endif + + +#ifdef __KERNEL__ + +/* + * In-core inode flags. + */ +#define XFS_IGRIO 0x0001 /* inode used for guaranteed rate i/o */ +#define XFS_IUIOSZ 0x0002 /* inode i/o sizes have been explicitly set */ +#define XFS_IQUIESCE 0x0004 /* we have started quiescing for this inode */ +#define XFS_IRECLAIM 0x0008 /* we have started reclaiming this inode */ + +/* + * Flags for inode locking. + */ +#define XFS_IOLOCK_EXCL 0x001 +#define XFS_IOLOCK_SHARED 0x002 +#define XFS_ILOCK_EXCL 0x004 +#define XFS_ILOCK_SHARED 0x008 +#define XFS_IUNLOCK_NONOTIFY 0x010 +#define XFS_EXTENT_TOKEN_RD 0x040 +#define XFS_SIZE_TOKEN_RD 0x080 +#define XFS_EXTSIZE_RD (XFS_EXTENT_TOKEN_RD|XFS_SIZE_TOKEN_RD) +#define XFS_WILLLEND 0x100 /* Always acquire tokens for lending */ +#define XFS_EXTENT_TOKEN_WR (XFS_EXTENT_TOKEN_RD | XFS_WILLLEND) +#define XFS_SIZE_TOKEN_WR (XFS_SIZE_TOKEN_RD | XFS_WILLLEND) +#define XFS_EXTSIZE_WR (XFS_EXTSIZE_RD | XFS_WILLLEND) + + +#define XFS_LOCK_MASK \ + (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL | \ + XFS_ILOCK_SHARED | XFS_EXTENT_TOKEN_RD | XFS_SIZE_TOKEN_RD | \ + XFS_WILLLEND) + +/* + * Flags for xfs_iflush() + */ +#define XFS_IFLUSH_DELWRI_ELSE_SYNC 1 +#define XFS_IFLUSH_DELWRI_ELSE_ASYNC 2 +#define XFS_IFLUSH_SYNC 3 +#define XFS_IFLUSH_ASYNC 4 +#define XFS_IFLUSH_DELWRI 5 + +/* + * Flags for xfs_iflush_all. + */ +#define XFS_FLUSH_ALL 0x1 + +/* + * Flags for xfs_itruncate_start(). + */ +#define XFS_ITRUNC_DEFINITE 0x1 +#define XFS_ITRUNC_MAYBE 0x2 + +/* + * max file offset is 2^(31+PAGE_SHIFT) - 1 (due to linux page cache) + * + * NOTE: XFS itself can handle 2^63 - 1 (largest positive value of xfs_fsize_t) + * but this is the Linux limit. + */ +#define XFS_MAX_FILE_OFFSET MAX_LFS_FILESIZE + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ITOV) +struct vnode *xfs_itov(xfs_inode_t *ip); +#define XFS_ITOV(ip) xfs_itov(ip) +#else +#define XFS_ITOV(ip) BHV_TO_VNODE(XFS_ITOBHV(ip)) +#endif +#define XFS_ITOV_NULL(ip) BHV_TO_VNODE_NULL(XFS_ITOBHV(ip)) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ITOBHV) +struct bhv_desc *xfs_itobhv(xfs_inode_t *ip); +#define XFS_ITOBHV(ip) xfs_itobhv(ip) +#else +#define XFS_ITOBHV(ip) ((struct bhv_desc *)(&((ip)->i_bhv_desc))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BHVTOI) +xfs_inode_t *xfs_bhvtoi(struct bhv_desc *bhvp); +#define XFS_BHVTOI(bhvp) xfs_bhvtoi(bhvp) +#else +#define XFS_BHVTOI(bhvp) \ + ((xfs_inode_t *)((char *)(bhvp) - \ + (char *)&(((xfs_inode_t *)0)->i_bhv_desc))) +#endif + +#define BHV_IS_XFS(bdp) (BHV_OPS(bdp) == &xfs_vnodeops) + +/* + * Pick the inode cluster hash bucket + * (m_chash is the same size as m_ihash) + */ +#define XFS_CHASH(mp,blk) ((mp)->m_chash + (((uint)blk) % (mp)->m_chsize)) + +/* + * For multiple groups support: if ISGID bit is set in the parent + * directory, group of new file is set to that of the parent, and + * new subdirectory gets ISGID bit from parent. + */ +#define XFS_INHERIT_GID(pip, vfsp) ((pip) != NULL && \ + (((vfsp)->vfs_flag & VFS_GRPID) || ((pip)->i_d.di_mode & ISGID))) + +/* + * xfs_iget.c prototypes. + */ +void xfs_ihash_init(struct xfs_mount *); +void xfs_ihash_free(struct xfs_mount *); +void xfs_chash_init(struct xfs_mount *); +void xfs_chash_free(struct xfs_mount *); +xfs_inode_t *xfs_inode_incore(struct xfs_mount *, xfs_ino_t, + struct xfs_trans *); +void xfs_inode_lock_init(xfs_inode_t *, struct vnode *); +int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, + uint, xfs_inode_t **, xfs_daddr_t); +void xfs_iput(xfs_inode_t *, uint); +void xfs_iput_new(xfs_inode_t *, uint); +void xfs_ilock(xfs_inode_t *, uint); +int xfs_ilock_nowait(xfs_inode_t *, uint); +void xfs_iunlock(xfs_inode_t *, uint); +void xfs_ilock_demote(xfs_inode_t *, uint); +void xfs_iflock(xfs_inode_t *); +int xfs_iflock_nowait(xfs_inode_t *); +uint xfs_ilock_map_shared(xfs_inode_t *); +void xfs_iunlock_map_shared(xfs_inode_t *, uint); +void xfs_ifunlock(xfs_inode_t *); +void xfs_ireclaim(xfs_inode_t *); +int xfs_finish_reclaim(xfs_inode_t *, int, int); +int xfs_finish_reclaim_all(struct xfs_mount *, int); + +/* + * xfs_inode.c prototypes. + */ +int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, + xfs_dinode_t **, struct xfs_buf **, int *); +int xfs_itobp(struct xfs_mount *, struct xfs_trans *, + xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **, + xfs_daddr_t); +int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, + xfs_inode_t **, xfs_daddr_t); +int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); +int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, nlink_t, + xfs_dev_t, struct cred *, xfs_prid_t, int, + struct xfs_buf **, boolean_t *, xfs_inode_t **); +void xfs_xlate_dinode_core(xfs_caddr_t, struct xfs_dinode_core *, int, + xfs_arch_t); +int xfs_ifree(struct xfs_trans *, xfs_inode_t *); +int xfs_atruncate_start(xfs_inode_t *); +void xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t); +int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, + xfs_fsize_t, int, int); +int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); +int xfs_igrow_start(xfs_inode_t *, xfs_fsize_t, struct cred *); +void xfs_igrow_finish(struct xfs_trans *, xfs_inode_t *, + xfs_fsize_t, int); + +void xfs_idestroy_fork(xfs_inode_t *, int); +void xfs_idestroy(xfs_inode_t *); +void xfs_idata_realloc(xfs_inode_t *, int, int); +void xfs_iextract(xfs_inode_t *); +void xfs_iext_realloc(xfs_inode_t *, int, int); +void xfs_iroot_realloc(xfs_inode_t *, int, int); +void xfs_ipin(xfs_inode_t *); +void xfs_iunpin(xfs_inode_t *); +int xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_t *, int); +int xfs_iflush(xfs_inode_t *, uint); +int xfs_iflush_all(struct xfs_mount *, int); +int xfs_iaccess(xfs_inode_t *, mode_t, cred_t *); +uint xfs_iroundup(uint); +void xfs_ichgtime(xfs_inode_t *, int); +xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); +void xfs_lock_inodes(xfs_inode_t **, int, int, uint); + +#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) + +#ifdef DEBUG +void xfs_isize_check(struct xfs_mount *, xfs_inode_t *, xfs_fsize_t); +#else /* DEBUG */ +#define xfs_isize_check(mp, ip, isize) +#endif /* DEBUG */ + +#if defined(DEBUG) +void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); +#else +#define xfs_inobp_check(mp, bp) +#endif /* DEBUG */ + +extern struct kmem_zone *xfs_chashlist_zone; +extern struct kmem_zone *xfs_ifork_zone; +extern struct kmem_zone *xfs_inode_zone; +extern struct kmem_zone *xfs_ili_zone; +extern struct vnodeops xfs_vnodeops; + +#ifdef XFS_ILOCK_TRACE +#define XFS_ILOCK_KTRACE_SIZE 32 +void xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, + inst_t *ra); +#endif + +#endif /* __KERNEL__ */ + +#endif /* __XFS_INODE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_inode_item.c linux.21rc5-ac2/fs/xfs/xfs_inode_item.c --- linux.21rc5/fs/xfs/xfs_inode_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_inode_item.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1076 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains the implementation of the xfs_inode_log_item. + * It contains the item operations used to manipulate the inode log + * items as well as utility routines used by the inode specific + * transaction routines. + */ +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_rw.h" + + +kmem_zone_t *xfs_ili_zone; /* inode log item zone */ + +/* + * This returns the number of iovecs needed to log the given inode item. + * + * We need one iovec for the inode log format structure, one for the + * inode core, and possibly one for the inode data/extents/b-tree root + * and one for the inode attribute data/extents/b-tree root. + */ +STATIC uint +xfs_inode_item_size( + xfs_inode_log_item_t *iip) +{ + uint nvecs; + xfs_inode_t *ip; + + ip = iip->ili_inode; + nvecs = 2; + + /* + * Only log the data/extents/b-tree root if there is something + * left to log. + */ + iip->ili_format.ilf_fields |= XFS_ILOG_CORE; + + switch (ip->i_d.di_format) { + case XFS_DINODE_FMT_EXTENTS: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEV | XFS_ILOG_UUID); + if ((iip->ili_format.ilf_fields & XFS_ILOG_DEXT) && + (ip->i_d.di_nextents > 0) && + (ip->i_df.if_bytes > 0)) { + ASSERT(ip->i_df.if_u1.if_extents != NULL); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_DEXT; + } + break; + + case XFS_DINODE_FMT_BTREE: + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t)); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | + XFS_ILOG_DEV | XFS_ILOG_UUID); + if ((iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) && + (ip->i_df.if_broot_bytes > 0)) { + ASSERT(ip->i_df.if_broot != NULL); + nvecs++; + } else { + ASSERT(!(iip->ili_format.ilf_fields & + XFS_ILOG_DBROOT)); +#ifdef XFS_TRANS_DEBUG + if (iip->ili_root_size > 0) { + ASSERT(iip->ili_root_size == + ip->i_df.if_broot_bytes); + ASSERT(memcmp(iip->ili_orig_root, + ip->i_df.if_broot, + iip->ili_root_size) == 0); + } else { + ASSERT(ip->i_df.if_broot_bytes == 0); + } +#endif + iip->ili_format.ilf_fields &= ~XFS_ILOG_DBROOT; + } + break; + + case XFS_DINODE_FMT_LOCAL: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | + XFS_ILOG_DEV | XFS_ILOG_UUID); + if ((iip->ili_format.ilf_fields & XFS_ILOG_DDATA) && + (ip->i_df.if_bytes > 0)) { + ASSERT(ip->i_df.if_u1.if_data != NULL); + ASSERT(ip->i_d.di_size > 0); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_DDATA; + } + break; + + case XFS_DINODE_FMT_DEV: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEXT | XFS_ILOG_UUID); + break; + + case XFS_DINODE_FMT_UUID: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEXT | XFS_ILOG_DEV); + break; + + default: + ASSERT(0); + break; + } + + /* + * If there are no attributes associated with this file, + * then there cannot be anything more to log. + * Clear all attribute-related log flags. + */ + if (!XFS_IFORK_Q(ip)) { + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); + return nvecs; + } + + /* + * Log any necessary attribute data. + */ + switch (ip->i_d.di_aformat) { + case XFS_DINODE_FMT_EXTENTS: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT); + if ((iip->ili_format.ilf_fields & XFS_ILOG_AEXT) && + (ip->i_d.di_anextents > 0) && + (ip->i_afp->if_bytes > 0)) { + ASSERT(ip->i_afp->if_u1.if_extents != NULL); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_AEXT; + } + break; + + case XFS_DINODE_FMT_BTREE: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); + if ((iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) && + (ip->i_afp->if_broot_bytes > 0)) { + ASSERT(ip->i_afp->if_broot != NULL); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_ABROOT; + } + break; + + case XFS_DINODE_FMT_LOCAL: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); + if ((iip->ili_format.ilf_fields & XFS_ILOG_ADATA) && + (ip->i_afp->if_bytes > 0)) { + ASSERT(ip->i_afp->if_u1.if_data != NULL); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_ADATA; + } + break; + + default: + ASSERT(0); + break; + } + + return nvecs; +} + +/* + * This is called to fill in the vector of log iovecs for the + * given inode log item. It fills the first item with an inode + * log format structure, the second with the on-disk inode structure, + * and a possible third and/or fourth with the inode data/extents/b-tree + * root and inode attributes data/extents/b-tree root. + */ +STATIC void +xfs_inode_item_format( + xfs_inode_log_item_t *iip, + xfs_log_iovec_t *log_vector) +{ + uint nvecs; + xfs_log_iovec_t *vecp; + xfs_inode_t *ip; + size_t data_bytes; + xfs_bmbt_rec_t *ext_buffer; + int nrecs; + xfs_mount_t *mp; + + ip = iip->ili_inode; + vecp = log_vector; + + vecp->i_addr = (xfs_caddr_t)&iip->ili_format; + vecp->i_len = sizeof(xfs_inode_log_format_t); + vecp++; + nvecs = 1; + + /* + * Clear i_update_core if the timestamps (or any other + * non-transactional modification) need flushing/logging + * and we're about to log them with the rest of the core. + * + * This is the same logic as xfs_iflush() but this code can't + * run at the same time as xfs_iflush because we're in commit + * processing here and so we have the inode lock held in + * exclusive mode. Although it doesn't really matter + * for the timestamps if both routines were to grab the + * timestamps or not. That would be ok. + * + * We clear i_update_core before copying out the data. + * This is for coordination with our timestamp updates + * that don't hold the inode lock. They will always + * update the timestamps BEFORE setting i_update_core, + * so if we clear i_update_core after they set it we + * are guaranteed to see their updates to the timestamps + * either here. Likewise, if they set it after we clear it + * here, we'll see it either on the next commit of this + * inode or the next time the inode gets flushed via + * xfs_iflush(). This depends on strongly ordered memory + * semantics, but we have that. We use the SYNCHRONIZE + * macro to make sure that the compiler does not reorder + * the i_update_core access below the data copy below. + */ + if (ip->i_update_core) { + ip->i_update_core = 0; + SYNCHRONIZE(); + } + + /* + * We don't have to worry about re-ordering here because + * the update_size field is protected by the inode lock + * and we have that held in exclusive mode. + */ + if (ip->i_update_size) + ip->i_update_size = 0; + + vecp->i_addr = (xfs_caddr_t)&ip->i_d; + vecp->i_len = sizeof(xfs_dinode_core_t); + vecp++; + nvecs++; + iip->ili_format.ilf_fields |= XFS_ILOG_CORE; + + /* + * If this is really an old format inode, then we need to + * log it as such. This means that we have to copy the link + * count from the new field to the old. We don't have to worry + * about the new fields, because nothing trusts them as long as + * the old inode version number is there. If the superblock already + * has a new version number, then we don't bother converting back. + */ + mp = ip->i_mount; + ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || + XFS_SB_VERSION_HASNLINK(&mp->m_sb)); + if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { + if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { + /* + * Convert it back. + */ + ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); + ip->i_d.di_onlink = ip->i_d.di_nlink; + } else { + /* + * The superblock version has already been bumped, + * so just make the conversion to the new inode + * format permanent. + */ + ip->i_d.di_version = XFS_DINODE_VERSION_2; + ip->i_d.di_onlink = 0; + memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); + } + } + + switch (ip->i_d.di_format) { + case XFS_DINODE_FMT_EXTENTS: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEV | XFS_ILOG_UUID))); + if (iip->ili_format.ilf_fields & XFS_ILOG_DEXT) { + ASSERT(ip->i_df.if_bytes > 0); + ASSERT(ip->i_df.if_u1.if_extents != NULL); + ASSERT(ip->i_d.di_nextents > 0); + ASSERT(iip->ili_extents_buf == NULL); + nrecs = ip->i_df.if_bytes / + (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(nrecs > 0); +#if ARCH_CONVERT == ARCH_NOCONVERT + if (nrecs == ip->i_d.di_nextents) { + /* + * There are no delayed allocation + * extents, so just point to the + * real extents array. + */ + vecp->i_addr = + (char *)(ip->i_df.if_u1.if_extents); + vecp->i_len = ip->i_df.if_bytes; + } else +#endif + { + /* + * There are delayed allocation extents + * in the inode, or we need to convert + * the extents to on disk format. + * Use xfs_iextents_copy() + * to copy only the real extents into + * a separate buffer. We'll free the + * buffer in the unlock routine. + */ + ext_buffer = kmem_alloc(ip->i_df.if_bytes, + KM_SLEEP); + iip->ili_extents_buf = ext_buffer; + vecp->i_addr = (xfs_caddr_t)ext_buffer; + vecp->i_len = xfs_iextents_copy(ip, ext_buffer, + XFS_DATA_FORK); + } + ASSERT(vecp->i_len <= ip->i_df.if_bytes); + iip->ili_format.ilf_dsize = vecp->i_len; + vecp++; + nvecs++; + } + break; + + case XFS_DINODE_FMT_BTREE: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DDATA | XFS_ILOG_DEXT | + XFS_ILOG_DEV | XFS_ILOG_UUID))); + if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) { + ASSERT(ip->i_df.if_broot_bytes > 0); + ASSERT(ip->i_df.if_broot != NULL); + vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot; + vecp->i_len = ip->i_df.if_broot_bytes; + vecp++; + nvecs++; + iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; + } + break; + + case XFS_DINODE_FMT_LOCAL: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | + XFS_ILOG_DEV | XFS_ILOG_UUID))); + if (iip->ili_format.ilf_fields & XFS_ILOG_DDATA) { + ASSERT(ip->i_df.if_bytes > 0); + ASSERT(ip->i_df.if_u1.if_data != NULL); + ASSERT(ip->i_d.di_size > 0); + + vecp->i_addr = (xfs_caddr_t)ip->i_df.if_u1.if_data; + /* + * Round i_bytes up to a word boundary. + * The underlying memory is guaranteed to + * to be there by xfs_idata_realloc(). + */ + data_bytes = roundup(ip->i_df.if_bytes, 4); + ASSERT((ip->i_df.if_real_bytes == 0) || + (ip->i_df.if_real_bytes == data_bytes)); + vecp->i_len = (int)data_bytes; + vecp++; + nvecs++; + iip->ili_format.ilf_dsize = (unsigned)data_bytes; + } + break; + + case XFS_DINODE_FMT_DEV: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | + XFS_ILOG_DDATA | XFS_ILOG_UUID))); + if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { + iip->ili_format.ilf_u.ilfu_rdev = + ip->i_df.if_u2.if_rdev; + } + break; + + case XFS_DINODE_FMT_UUID: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | + XFS_ILOG_DDATA | XFS_ILOG_DEV))); + if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { + iip->ili_format.ilf_u.ilfu_uuid = + ip->i_df.if_u2.if_uuid; + } + break; + + default: + ASSERT(0); + break; + } + + /* + * If there are no attributes associated with the file, + * then we're done. + * Assert that no attribute-related log flags are set. + */ + if (!XFS_IFORK_Q(ip)) { + ASSERT(nvecs == iip->ili_item.li_desc->lid_size); + iip->ili_format.ilf_size = nvecs; + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); + return; + } + + switch (ip->i_d.di_aformat) { + case XFS_DINODE_FMT_EXTENTS: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_ADATA | XFS_ILOG_ABROOT))); + if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) { + ASSERT(ip->i_afp->if_bytes > 0); + ASSERT(ip->i_afp->if_u1.if_extents != NULL); + ASSERT(ip->i_d.di_anextents > 0); +#ifdef DEBUG + nrecs = ip->i_afp->if_bytes / + (uint)sizeof(xfs_bmbt_rec_t); +#endif + ASSERT(nrecs > 0); + ASSERT(nrecs == ip->i_d.di_anextents); +#if ARCH_CONVERT == ARCH_NOCONVERT + /* + * There are not delayed allocation extents + * for attributes, so just point at the array. + */ + vecp->i_addr = (char *)(ip->i_afp->if_u1.if_extents); + vecp->i_len = ip->i_afp->if_bytes; +#else + ASSERT(iip->ili_aextents_buf == NULL); + /* + * Need to endian flip before logging + */ + ext_buffer = kmem_alloc(ip->i_afp->if_bytes, + KM_SLEEP); + iip->ili_aextents_buf = ext_buffer; + vecp->i_addr = (xfs_caddr_t)ext_buffer; + vecp->i_len = xfs_iextents_copy(ip, ext_buffer, + XFS_ATTR_FORK); +#endif + iip->ili_format.ilf_asize = vecp->i_len; + vecp++; + nvecs++; + } + break; + + case XFS_DINODE_FMT_BTREE: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_ADATA | XFS_ILOG_AEXT))); + if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) { + ASSERT(ip->i_afp->if_broot_bytes > 0); + ASSERT(ip->i_afp->if_broot != NULL); + vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot; + vecp->i_len = ip->i_afp->if_broot_bytes; + vecp++; + nvecs++; + iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; + } + break; + + case XFS_DINODE_FMT_LOCAL: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); + if (iip->ili_format.ilf_fields & XFS_ILOG_ADATA) { + ASSERT(ip->i_afp->if_bytes > 0); + ASSERT(ip->i_afp->if_u1.if_data != NULL); + + vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_u1.if_data; + /* + * Round i_bytes up to a word boundary. + * The underlying memory is guaranteed to + * to be there by xfs_idata_realloc(). + */ + data_bytes = roundup(ip->i_afp->if_bytes, 4); + ASSERT((ip->i_afp->if_real_bytes == 0) || + (ip->i_afp->if_real_bytes == data_bytes)); + vecp->i_len = (int)data_bytes; + vecp++; + nvecs++; + iip->ili_format.ilf_asize = (unsigned)data_bytes; + } + break; + + default: + ASSERT(0); + break; + } + + ASSERT(nvecs == iip->ili_item.li_desc->lid_size); + iip->ili_format.ilf_size = nvecs; +} + + +/* + * This is called to pin the inode associated with the inode log + * item in memory so it cannot be written out. Do this by calling + * xfs_ipin() to bump the pin count in the inode while holding the + * inode pin lock. + */ +STATIC void +xfs_inode_item_pin( + xfs_inode_log_item_t *iip) +{ + ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE)); + xfs_ipin(iip->ili_inode); +} + + +/* + * This is called to unpin the inode associated with the inode log + * item which was previously pinned with a call to xfs_inode_item_pin(). + * Just call xfs_iunpin() on the inode to do this. + */ +/* ARGSUSED */ +STATIC void +xfs_inode_item_unpin( + xfs_inode_log_item_t *iip, + int stale) +{ + xfs_iunpin(iip->ili_inode); +} + +/* ARGSUSED */ +STATIC void +xfs_inode_item_unpin_remove( + xfs_inode_log_item_t *iip, + xfs_trans_t *tp) +{ + xfs_iunpin(iip->ili_inode); +} + +/* + * This is called to attempt to lock the inode associated with this + * inode log item, in preparation for the push routine which does the actual + * iflush. Don't sleep on the inode lock or the flush lock. + * + * If the flush lock is already held, indicating that the inode has + * been or is in the process of being flushed, then (ideally) we'd like to + * see if the inode's buffer is still incore, and if so give it a nudge. + * We delay doing so until the pushbuf routine, though, to avoid holding + * the AIL lock across a call to the blackhole which is the buffercache. + * Also we don't want to sleep in any device strategy routines, which can happen + * if we do the subsequent bawrite in here. + */ +STATIC uint +xfs_inode_item_trylock( + xfs_inode_log_item_t *iip) +{ + register xfs_inode_t *ip; + + ip = iip->ili_inode; + + if (xfs_ipincount(ip) > 0) { + return XFS_ITEM_PINNED; + } + + if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { + return XFS_ITEM_LOCKED; + } + + if (!xfs_iflock_nowait(ip)) { + /* + * If someone else isn't already trying to push the inode + * buffer, we get to do it. + */ + if (iip->ili_pushbuf_flag == 0) { + iip->ili_pushbuf_flag = 1; +#ifdef DEBUG + iip->ili_push_owner = get_thread_id(); +#endif + /* + * Inode is left locked in shared mode. + * Pushbuf routine gets to unlock it. + */ + return XFS_ITEM_PUSHBUF; + } else { + /* + * We hold the AIL_LOCK, so we must specify the + * NONOTIFY flag so that we won't double trip. + */ + xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY); + return XFS_ITEM_FLUSHING; + } + /* NOTREACHED */ + } +#ifdef DEBUG + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + ASSERT(iip->ili_format.ilf_fields != 0); + ASSERT(iip->ili_logged == 0); + ASSERT(iip->ili_item.li_flags & XFS_LI_IN_AIL); + } +#endif + return XFS_ITEM_SUCCESS; +} + +/* + * Unlock the inode associated with the inode log item. + * Clear the fields of the inode and inode log item that + * are specific to the current transaction. If the + * hold flags is set, do not unlock the inode. + */ +STATIC void +xfs_inode_item_unlock( + xfs_inode_log_item_t *iip) +{ + uint hold; + uint iolocked; + uint lock_flags; + xfs_inode_t *ip; + + ASSERT(iip != NULL); + ASSERT(iip->ili_inode->i_itemp != NULL); + ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE)); + ASSERT((!(iip->ili_inode->i_itemp->ili_flags & + XFS_ILI_IOLOCKED_EXCL)) || + ismrlocked(&(iip->ili_inode->i_iolock), MR_UPDATE)); + ASSERT((!(iip->ili_inode->i_itemp->ili_flags & + XFS_ILI_IOLOCKED_SHARED)) || + ismrlocked(&(iip->ili_inode->i_iolock), MR_ACCESS)); + /* + * Clear the transaction pointer in the inode. + */ + ip = iip->ili_inode; + ip->i_transp = NULL; + + /* + * If the inode needed a separate buffer with which to log + * its extents, then free it now. + */ + if (iip->ili_extents_buf != NULL) { + ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS); + ASSERT(ip->i_d.di_nextents > 0); + ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT); + ASSERT(ip->i_df.if_bytes > 0); + kmem_free(iip->ili_extents_buf, ip->i_df.if_bytes); + iip->ili_extents_buf = NULL; + } + if (iip->ili_aextents_buf != NULL) { + ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS); + ASSERT(ip->i_d.di_anextents > 0); + ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT); + ASSERT(ip->i_afp->if_bytes > 0); + kmem_free(iip->ili_aextents_buf, ip->i_afp->if_bytes); + iip->ili_aextents_buf = NULL; + } + + /* + * Figure out if we should unlock the inode or not. + */ + hold = iip->ili_flags & XFS_ILI_HOLD; + + /* + * Before clearing out the flags, remember whether we + * are holding the inode's IO lock. + */ + iolocked = iip->ili_flags & XFS_ILI_IOLOCKED_ANY; + + /* + * Clear out the fields of the inode log item particular + * to the current transaction. + */ + iip->ili_ilock_recur = 0; + iip->ili_iolock_recur = 0; + iip->ili_flags = 0; + + /* + * Unlock the inode if XFS_ILI_HOLD was not set. + */ + if (!hold) { + lock_flags = XFS_ILOCK_EXCL; + if (iolocked & XFS_ILI_IOLOCKED_EXCL) { + lock_flags |= XFS_IOLOCK_EXCL; + } else if (iolocked & XFS_ILI_IOLOCKED_SHARED) { + lock_flags |= XFS_IOLOCK_SHARED; + } + xfs_iput(iip->ili_inode, lock_flags); + } +} + +/* + * This is called to find out where the oldest active copy of the + * inode log item in the on disk log resides now that the last log + * write of it completed at the given lsn. Since we always re-log + * all dirty data in an inode, the latest copy in the on disk log + * is the only one that matters. Therefore, simply return the + * given lsn. + */ +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_inode_item_committed( + xfs_inode_log_item_t *iip, + xfs_lsn_t lsn) +{ + return (lsn); +} + +/* + * The transaction with the inode locked has aborted. The inode + * must not be dirty within the transaction (unless we're forcibly + * shutting down). We simply unlock just as if the transaction + * had been cancelled. + */ +STATIC void +xfs_inode_item_abort( + xfs_inode_log_item_t *iip) +{ + xfs_inode_item_unlock(iip); + return; +} + + +/* + * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK + * failed to get the inode flush lock but did get the inode locked SHARED. + * Here we're trying to see if the inode buffer is incore, and if so whether it's + * marked delayed write. If that's the case, we'll initiate a bawrite on that + * buffer to expedite the process. + * + * We aren't holding the AIL_LOCK (or the flush lock) when this gets called, + * so it is inherently race-y. + */ +STATIC void +xfs_inode_item_pushbuf( + xfs_inode_log_item_t *iip) +{ + xfs_inode_t *ip; + xfs_mount_t *mp; + xfs_buf_t *bp; + uint dopush; + + ip = iip->ili_inode; + + ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS)); + + /* + * The ili_pushbuf_flag keeps others from + * trying to duplicate our effort. + */ + ASSERT(iip->ili_pushbuf_flag != 0); + ASSERT(iip->ili_push_owner == get_thread_id()); + + /* + * If flushlock isn't locked anymore, chances are that the + * inode flush completed and the inode was taken off the AIL. + * So, just get out. + */ + if ((valusema(&(ip->i_flock)) > 0) || + ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { + iip->ili_pushbuf_flag = 0; + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return; + } + + mp = ip->i_mount; + bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno, + iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK); + + if (bp != NULL) { + if (XFS_BUF_ISDELAYWRITE(bp)) { + /* + * We were racing with iflush because we don't hold + * the AIL_LOCK or the flush lock. However, at this point, + * we have the buffer, and we know that it's dirty. + * So, it's possible that iflush raced with us, and + * this item is already taken off the AIL. + * If not, we can flush it async. + */ + dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && + (valusema(&(ip->i_flock)) <= 0)); + iip->ili_pushbuf_flag = 0; + xfs_iunlock(ip, XFS_ILOCK_SHARED); + xfs_buftrace("INODE ITEM PUSH", bp); + if (XFS_BUF_ISPINNED(bp)) { + xfs_log_force(mp, (xfs_lsn_t)0, + XFS_LOG_FORCE); + } + if (dopush) { + xfs_bawrite(mp, bp); + } else { + xfs_buf_relse(bp); + } + } else { + iip->ili_pushbuf_flag = 0; + xfs_iunlock(ip, XFS_ILOCK_SHARED); + xfs_buf_relse(bp); + } + return; + } + /* + * We have to be careful about resetting pushbuf flag too early (above). + * Even though in theory we can do it as soon as we have the buflock, + * we don't want others to be doing work needlessly. They'll come to + * this function thinking that pushing the buffer is their + * responsibility only to find that the buffer is still locked by + * another doing the same thing + */ + iip->ili_pushbuf_flag = 0; + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return; +} + + +/* + * This is called to asynchronously write the inode associated with this + * inode log item out to disk. The inode will already have been locked by + * a successful call to xfs_inode_item_trylock(). + */ +STATIC void +xfs_inode_item_push( + xfs_inode_log_item_t *iip) +{ + xfs_inode_t *ip; + + ip = iip->ili_inode; + + ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS)); + ASSERT(valusema(&(ip->i_flock)) <= 0); + /* + * Since we were able to lock the inode's flush lock and + * we found it on the AIL, the inode must be dirty. This + * is because the inode is removed from the AIL while still + * holding the flush lock in xfs_iflush_done(). Thus, if + * we found it in the AIL and were able to obtain the flush + * lock without sleeping, then there must not have been + * anyone in the process of flushing the inode. + */ + ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || + iip->ili_format.ilf_fields != 0); + + /* + * Write out the inode. The completion routine ('iflush_done') will + * pull it from the AIL, mark it clean, unlock the flush lock. + */ + (void) xfs_iflush(ip, XFS_IFLUSH_DELWRI); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + return; +} + +/* + * XXX rcc - this one really has to do something. Probably needs + * to stamp in a new field in the incore inode. + */ +/* ARGSUSED */ +STATIC void +xfs_inode_item_committing( + xfs_inode_log_item_t *iip, + xfs_lsn_t lsn) +{ + iip->ili_last_lsn = lsn; + return; +} + +/* + * This is the ops vector shared by all buf log items. + */ +struct xfs_item_ops xfs_inode_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_inode_item_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_inode_item_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_inode_item_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_inode_item_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) + xfs_inode_item_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_inode_item_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_inode_item_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_inode_item_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_inode_item_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_inode_item_abort, + .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_inode_item_pushbuf, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_inode_item_committing +}; + + +/* + * Initialize the inode log item for a newly allocated (in-core) inode. + */ +void +xfs_inode_item_init( + xfs_inode_t *ip, + xfs_mount_t *mp) +{ + xfs_inode_log_item_t *iip; + + ASSERT(ip->i_itemp == NULL); + iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP); + + iip->ili_item.li_type = XFS_LI_INODE; + iip->ili_item.li_ops = &xfs_inode_item_ops; + iip->ili_item.li_mountp = mp; + iip->ili_inode = ip; + + /* + We have zeroed memory. No need ... + iip->ili_extents_buf = NULL; + iip->ili_pushbuf_flag = 0; + */ + + iip->ili_format.ilf_type = XFS_LI_INODE; + iip->ili_format.ilf_ino = ip->i_ino; + iip->ili_format.ilf_blkno = ip->i_blkno; + iip->ili_format.ilf_len = ip->i_len; + iip->ili_format.ilf_boffset = ip->i_boffset; +} + +/* + * Free the inode log item and any memory hanging off of it. + */ +void +xfs_inode_item_destroy( + xfs_inode_t *ip) +{ +#ifdef XFS_TRANS_DEBUG + if (ip->i_itemp->ili_root_size != 0) { + kmem_free(ip->i_itemp->ili_orig_root, + ip->i_itemp->ili_root_size); + } +#endif + kmem_zone_free(xfs_ili_zone, ip->i_itemp); +} + + +/* + * This is the inode flushing I/O completion routine. It is called + * from interrupt level when the buffer containing the inode is + * flushed to disk. It is responsible for removing the inode item + * from the AIL if it has not been re-logged, and unlocking the inode's + * flush lock. + */ +/*ARGSUSED*/ +void +xfs_iflush_done( + xfs_buf_t *bp, + xfs_inode_log_item_t *iip) +{ + xfs_inode_t *ip; + SPLDECL(s); + + ip = iip->ili_inode; + + /* + * We only want to pull the item from the AIL if it is + * actually there and its location in the log has not + * changed since we started the flush. Thus, we only bother + * if the ili_logged flag is set and the inode's lsn has not + * changed. First we check the lsn outside + * the lock since it's cheaper, and then we recheck while + * holding the lock before removing the inode from the AIL. + */ + if (iip->ili_logged && + (iip->ili_item.li_lsn == iip->ili_flush_lsn)) { + AIL_LOCK(ip->i_mount, s); + if (iip->ili_item.li_lsn == iip->ili_flush_lsn) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(ip->i_mount, + (xfs_log_item_t*)iip, s); + } else { + AIL_UNLOCK(ip->i_mount, s); + } + } + + iip->ili_logged = 0; + + /* + * Clear the ili_last_fields bits now that we know that the + * data corresponding to them is safely on disk. + */ + iip->ili_last_fields = 0; + + /* + * Release the inode's flush lock since we're done with it. + */ + xfs_ifunlock(ip); + + return; +} + +/* + * This is the inode flushing abort routine. It is called + * from xfs_iflush when the filesystem is shutting down to clean + * up the inode state. + * It is responsible for removing the inode item + * from the AIL if it has not been re-logged, and unlocking the inode's + * flush lock. + */ +void +xfs_iflush_abort( + xfs_inode_t *ip) +{ + xfs_inode_log_item_t *iip; + xfs_mount_t *mp; + SPLDECL(s); + + iip = ip->i_itemp; + mp = ip->i_mount; + if (iip) { + if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { + AIL_LOCK(mp, s); + if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip, + s); + } else + AIL_UNLOCK(mp, s); + } + iip->ili_logged = 0; + /* + * Clear the ili_last_fields bits now that we know that the + * data corresponding to them is safely on disk. + */ + iip->ili_last_fields = 0; + /* + * Clear the inode logging fields so no more flushes are + * attempted. + */ + iip->ili_format.ilf_fields = 0; + } + /* + * Release the inode's flush lock since we're done with it. + */ + xfs_ifunlock(ip); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_inode_item.h linux.21rc5-ac2/fs/xfs/xfs_inode_item.h --- linux.21rc5/fs/xfs/xfs_inode_item.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_inode_item.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_INODE_ITEM_H__ +#define __XFS_INODE_ITEM_H__ + +/* + * This is the structure used to lay out an inode log item in the + * log. The size of the inline data/extents/b-tree root to be logged + * (if any) is indicated in the ilf_dsize field. Changes to this structure + * must be added on to the end. + * + * Convention for naming inode log item versions : The current version + * is always named XFS_LI_INODE. When an inode log item gets superseded, + * add the latest version of IRIX that will generate logs with that item + * to the version name. + * + * -Version 1 of this structure (XFS_LI_5_3_INODE) included up to the first + * union (ilf_u) field. This was released with IRIX 5.3-XFS. + * -Version 2 of this structure (XFS_LI_6_1_INODE) is currently the entire + * structure. This was released with IRIX 6.0.1-XFS and IRIX 6.1. + * -Version 3 of this structure (XFS_LI_INODE) is the same as version 2 + * so a new structure definition wasn't necessary. However, we had + * to add a new type because the inode cluster size changed from 4K + * to 8K and the version number had to be rev'ved to keep older kernels + * from trying to recover logs with the 8K buffers in them. The logging + * code can handle recovery on different-sized clusters now so hopefully + * this'll be the last time we need to change the inode log item just + * for a change in the inode cluster size. This new version was + * released with IRIX 6.2. + */ +typedef struct xfs_inode_log_format { + unsigned short ilf_type; /* inode log item type */ + unsigned short ilf_size; /* size of this item */ + uint ilf_fields; /* flags for fields logged */ + ushort ilf_asize; /* size of attr d/ext/root */ + ushort ilf_dsize; /* size of data/ext/root */ + xfs_ino_t ilf_ino; /* inode number */ + union { + xfs_dev_t ilfu_rdev; /* rdev value for dev inode*/ + uuid_t ilfu_uuid; /* mount point value */ + } ilf_u; + __int64_t ilf_blkno; /* blkno of inode buffer */ + int ilf_len; /* len of inode buffer */ + int ilf_boffset; /* off of inode in buffer */ +} xfs_inode_log_format_t; + +/* Initial version shipped with IRIX 5.3-XFS */ +typedef struct xfs_inode_log_format_v1 { + unsigned short ilf_type; /* inode log item type */ + unsigned short ilf_size; /* size of this item */ + uint ilf_fields; /* flags for fields logged */ + uint ilf_dsize; /* size of data/ext/root */ + xfs_ino_t ilf_ino; /* inode number */ + union { + xfs_dev_t ilfu_rdev; /* rdev value for dev inode*/ + uuid_t ilfu_uuid; /* mount point value */ + } ilf_u; +} xfs_inode_log_format_t_v1; + +/* + * Flags for xfs_trans_log_inode flags field. + */ +#define XFS_ILOG_CORE 0x001 /* log standard inode fields */ +#define XFS_ILOG_DDATA 0x002 /* log i_df.if_data */ +#define XFS_ILOG_DEXT 0x004 /* log i_df.if_extents */ +#define XFS_ILOG_DBROOT 0x008 /* log i_df.i_broot */ +#define XFS_ILOG_DEV 0x010 /* log the dev field */ +#define XFS_ILOG_UUID 0x020 /* log the uuid field */ +#define XFS_ILOG_ADATA 0x040 /* log i_af.if_data */ +#define XFS_ILOG_AEXT 0x080 /* log i_af.if_extents */ +#define XFS_ILOG_ABROOT 0x100 /* log i_af.i_broot */ + +#define XFS_ILOG_NONCORE (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \ + XFS_ILOG_DBROOT | XFS_ILOG_DEV | \ + XFS_ILOG_UUID | XFS_ILOG_ADATA | \ + XFS_ILOG_AEXT | XFS_ILOG_ABROOT) + +#define XFS_ILOG_DFORK (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \ + XFS_ILOG_DBROOT) + +#define XFS_ILOG_AFORK (XFS_ILOG_ADATA | XFS_ILOG_AEXT | \ + XFS_ILOG_ABROOT) + +#define XFS_ILOG_ALL (XFS_ILOG_CORE | XFS_ILOG_DDATA | \ + XFS_ILOG_DEXT | XFS_ILOG_DBROOT | \ + XFS_ILOG_DEV | XFS_ILOG_UUID | \ + XFS_ILOG_ADATA | XFS_ILOG_AEXT | \ + XFS_ILOG_ABROOT) + +#define XFS_ILI_HOLD 0x1 +#define XFS_ILI_IOLOCKED_EXCL 0x2 +#define XFS_ILI_IOLOCKED_SHARED 0x4 + +#define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED) + + +#ifdef __KERNEL__ + +struct xfs_buf; +struct xfs_bmbt_rec_64; +struct xfs_inode; +struct xfs_mount; + + +typedef struct xfs_inode_log_item { + xfs_log_item_t ili_item; /* common portion */ + struct xfs_inode *ili_inode; /* inode ptr */ + xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ + xfs_lsn_t ili_last_lsn; /* lsn at last transaction */ + unsigned short ili_ilock_recur; /* lock recursion count */ + unsigned short ili_iolock_recur; /* lock recursion count */ + unsigned short ili_flags; /* misc flags */ + unsigned short ili_logged; /* flushed logged data */ + unsigned int ili_last_fields; /* fields when flushed */ + struct xfs_bmbt_rec_64 *ili_extents_buf; /* array of logged + data exts */ + struct xfs_bmbt_rec_64 *ili_aextents_buf; /* array of logged + attr exts */ + unsigned int ili_pushbuf_flag; /* one bit used in push_ail */ + +#ifdef DEBUG + uint64_t ili_push_owner; /* one who sets pushbuf_flag + above gets to push the buf */ +#endif +#ifdef XFS_TRANS_DEBUG + int ili_root_size; + char *ili_orig_root; +#endif + xfs_inode_log_format_t ili_format; /* logged structure */ +} xfs_inode_log_item_t; + + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ILOG_FDATA) +int xfs_ilog_fdata(int w); +#define XFS_ILOG_FDATA(w) xfs_ilog_fdata(w) +#else +#define XFS_ILOG_FDATA(w) \ + ((w) == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA) +#endif + +#endif /* __KERNEL__ */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ILOG_FBROOT) +int xfs_ilog_fbroot(int w); +#define XFS_ILOG_FBROOT(w) xfs_ilog_fbroot(w) +#else +#define XFS_ILOG_FBROOT(w) \ + ((w) == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ILOG_FEXT) +int xfs_ilog_fext(int w); +#define XFS_ILOG_FEXT(w) xfs_ilog_fext(w) +#else +#define XFS_ILOG_FEXT(w) \ + ((w) == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT) +#endif + +#ifdef __KERNEL__ + +void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); +void xfs_inode_item_destroy(struct xfs_inode *); +void xfs_iflush_done(struct xfs_buf *, xfs_inode_log_item_t *); +void xfs_iflush_abort(struct xfs_inode *); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_INODE_ITEM_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_inum.h linux.21rc5-ac2/fs/xfs/xfs_inum.h --- linux.21rc5/fs/xfs/xfs_inum.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_inum.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_INUM_H__ +#define __XFS_INUM_H__ + +/* + * Inode number format: + * low inopblog bits - offset in block + * next agblklog bits - block number in ag + * next agno_log bits - ag number + * high agno_log-agblklog-inopblog bits - 0 + */ + +typedef __uint32_t xfs_agino_t; /* within allocation grp inode number */ + +/* + * Useful inode bits for this kernel. + * Used in some places where having 64-bits in the 32-bit kernels + * costs too much. + */ +#if XFS_BIG_FILESYSTEMS +typedef xfs_ino_t xfs_intino_t; +#else +typedef __uint32_t xfs_intino_t; +#endif + +#define NULLFSINO ((xfs_ino_t)-1) +#define NULLAGINO ((xfs_agino_t)-1) + +struct xfs_mount; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_MASK) +__uint32_t xfs_ino_mask(int k); +#define XFS_INO_MASK(k) xfs_ino_mask(k) +#else +#define XFS_INO_MASK(k) ((__uint32_t)((1ULL << (k)) - 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_OFFSET_BITS) +int xfs_ino_offset_bits(struct xfs_mount *mp); +#define XFS_INO_OFFSET_BITS(mp) xfs_ino_offset_bits(mp) +#else +#define XFS_INO_OFFSET_BITS(mp) ((mp)->m_sb.sb_inopblog) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_AGBNO_BITS) +int xfs_ino_agbno_bits(struct xfs_mount *mp); +#define XFS_INO_AGBNO_BITS(mp) xfs_ino_agbno_bits(mp) +#else +#define XFS_INO_AGBNO_BITS(mp) ((mp)->m_sb.sb_agblklog) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_AGINO_BITS) +int xfs_ino_agino_bits(struct xfs_mount *mp); +#define XFS_INO_AGINO_BITS(mp) xfs_ino_agino_bits(mp) +#else +#define XFS_INO_AGINO_BITS(mp) ((mp)->m_agino_log) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_AGNO_BITS) +int xfs_ino_agno_bits(struct xfs_mount *mp); +#define XFS_INO_AGNO_BITS(mp) xfs_ino_agno_bits(mp) +#else +#define XFS_INO_AGNO_BITS(mp) ((mp)->m_agno_log) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_BITS) +int xfs_ino_bits(struct xfs_mount *mp); +#define XFS_INO_BITS(mp) xfs_ino_bits(mp) +#else +#define XFS_INO_BITS(mp) (XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_AGNO) +xfs_agnumber_t xfs_ino_to_agno(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_AGNO(mp,i) xfs_ino_to_agno(mp,i) +#else +#define XFS_INO_TO_AGNO(mp,i) \ + ((xfs_agnumber_t)((i) >> XFS_INO_AGINO_BITS(mp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_AGINO) +xfs_agino_t xfs_ino_to_agino(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_AGINO(mp,i) xfs_ino_to_agino(mp,i) +#else +#define XFS_INO_TO_AGINO(mp,i) \ + ((xfs_agino_t)(i) & XFS_INO_MASK(XFS_INO_AGINO_BITS(mp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_AGBNO) +xfs_agblock_t xfs_ino_to_agbno(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_AGBNO(mp,i) xfs_ino_to_agbno(mp,i) +#else +#define XFS_INO_TO_AGBNO(mp,i) \ + (((xfs_agblock_t)(i) >> XFS_INO_OFFSET_BITS(mp)) & \ + XFS_INO_MASK(XFS_INO_AGBNO_BITS(mp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_OFFSET) +int xfs_ino_to_offset(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_OFFSET(mp,i) xfs_ino_to_offset(mp,i) +#else +#define XFS_INO_TO_OFFSET(mp,i) \ + ((int)(i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_FSB) +xfs_fsblock_t xfs_ino_to_fsb(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_FSB(mp,i) xfs_ino_to_fsb(mp,i) +#else +#define XFS_INO_TO_FSB(mp,i) \ + XFS_AGB_TO_FSB(mp, XFS_INO_TO_AGNO(mp,i), XFS_INO_TO_AGBNO(mp,i)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGINO_TO_INO) +xfs_ino_t +xfs_agino_to_ino(struct xfs_mount *mp, xfs_agnumber_t a, xfs_agino_t i); +#define XFS_AGINO_TO_INO(mp,a,i) xfs_agino_to_ino(mp,a,i) +#else +#define XFS_AGINO_TO_INO(mp,a,i) \ + (((xfs_ino_t)(a) << XFS_INO_AGINO_BITS(mp)) | (i)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGINO_TO_AGBNO) +xfs_agblock_t xfs_agino_to_agbno(struct xfs_mount *mp, xfs_agino_t i); +#define XFS_AGINO_TO_AGBNO(mp,i) xfs_agino_to_agbno(mp,i) +#else +#define XFS_AGINO_TO_AGBNO(mp,i) ((i) >> XFS_INO_OFFSET_BITS(mp)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGINO_TO_OFFSET) +int xfs_agino_to_offset(struct xfs_mount *mp, xfs_agino_t i); +#define XFS_AGINO_TO_OFFSET(mp,i) xfs_agino_to_offset(mp,i) +#else +#define XFS_AGINO_TO_OFFSET(mp,i) \ + ((i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_OFFBNO_TO_AGINO) +xfs_agino_t xfs_offbno_to_agino(struct xfs_mount *mp, xfs_agblock_t b, int o); +#define XFS_OFFBNO_TO_AGINO(mp,b,o) xfs_offbno_to_agino(mp,b,o) +#else +#define XFS_OFFBNO_TO_AGINO(mp,b,o) \ + ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o))) +#endif + +#if XFS_BIG_FILESYSTEMS +#define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL)) +#define XFS_INO64_OFFSET ((xfs_ino_t)(1ULL << 32)) +#else +#define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 32) - 1ULL)) +#endif +#define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL)) + +#endif /* __XFS_INUM_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_iocore.c linux.21rc5-ac2/fs/xfs/xfs_iocore.c --- linux.21rc5/fs/xfs/xfs_iocore.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_iocore.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_itable.h" +#include "xfs_btree.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_error.h" +#include "xfs_bit.h" +#include "xfs_rw.h" +#include "xfs_quota.h" +#include "xfs_trans_space.h" +#include "xfs_dmapi.h" + + +STATIC xfs_fsize_t +xfs_size_fn( + xfs_inode_t *ip) +{ + return (ip->i_d.di_size); +} + +STATIC int +xfs_ioinit( + struct vfs *vfsp, + struct xfs_mount_args *mntargs, + int flags) +{ + return xfs_mountfs(vfsp, XFS_VFSTOM(vfsp), + vfsp->vfs_super->s_bdev->bd_dev, flags); +} + +xfs_ioops_t xfs_iocore_xfs = { + .xfs_ioinit = (xfs_ioinit_t) xfs_ioinit, + .xfs_bmapi_func = (xfs_bmapi_t) xfs_bmapi, + .xfs_bmap_eof_func = (xfs_bmap_eof_t) xfs_bmap_eof, + .xfs_iomap_write_direct = + (xfs_iomap_write_direct_t) xfs_iomap_write_direct, + .xfs_iomap_write_delay = + (xfs_iomap_write_delay_t) xfs_iomap_write_delay, + .xfs_iomap_write_allocate = + (xfs_iomap_write_allocate_t) xfs_iomap_write_allocate, + .xfs_iomap_write_unwritten = + (xfs_iomap_write_unwritten_t) xfs_iomap_write_unwritten, + .xfs_ilock = (xfs_lock_t) xfs_ilock, + .xfs_lck_map_shared = (xfs_lck_map_shared_t) xfs_ilock_map_shared, + .xfs_ilock_demote = (xfs_lock_demote_t) xfs_ilock_demote, + .xfs_ilock_nowait = (xfs_lock_nowait_t) xfs_ilock_nowait, + .xfs_unlock = (xfs_unlk_t) xfs_iunlock, + .xfs_size_func = (xfs_size_t) xfs_size_fn, + .xfs_iodone = (xfs_iodone_t) fs_noerr, +}; + +void +xfs_iocore_inode_reinit( + xfs_inode_t *ip) +{ + xfs_iocore_t *io = &ip->i_iocore; + + io->io_flags = 0; + if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) + io->io_flags |= XFS_IOCORE_RT; + io->io_dmevmask = ip->i_d.di_dmevmask; + io->io_dmstate = ip->i_d.di_dmstate; +} + +void +xfs_iocore_inode_init( + xfs_inode_t *ip) +{ + xfs_iocore_t *io = &ip->i_iocore; + xfs_mount_t *mp = ip->i_mount; + + io->io_mount = mp; +#ifdef DEBUG + io->io_lock = &ip->i_lock; + io->io_iolock = &ip->i_iolock; +#endif + + io->io_obj = (void *)ip; + + xfs_iocore_inode_reinit(ip); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_itable.c linux.21rc5-ac2/fs/xfs/xfs_itable.c --- linux.21rc5/fs/xfs/xfs_itable.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_itable.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,793 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_ialloc.h" +#include "xfs_itable.h" +#include "xfs_error.h" + +/* + * Return stat information for one inode. + * Return 0 if ok, else errno. + */ +int /* error status */ +xfs_bulkstat_one( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t ino, /* inode number to get data for */ + void *buffer, /* buffer to place output in */ + xfs_daddr_t bno, /* starting bno of inode cluster */ + void *dibuff, /* on-disk inode buffer */ + int *stat) /* BULKSTAT_RV_... */ +{ + xfs_bstat_t *buf; /* return buffer */ + int error; /* error value */ + xfs_dinode_t *dip; /* dinode inode pointer */ + xfs_dinode_core_t *dic; /* dinode core info pointer */ + xfs_inode_t *ip = NULL; /* incore inode pointer */ + xfs_arch_t arch; /* these are set according to */ + __uint16_t di_flags; /* temp */ + + buf = (xfs_bstat_t *)buffer; + dip = (xfs_dinode_t *)dibuff; + + if (! buf || ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || + (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && + (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))) { + *stat = BULKSTAT_RV_NOTHING; + return XFS_ERROR(EINVAL); + } + + if (dip == NULL) { + /* We're not being passed a pointer to a dinode. This happens + * if BULKSTAT_FG_IGET is selected. Do the iget. + */ + error = xfs_iget(mp, tp, ino, XFS_ILOCK_SHARED, &ip, bno); + if (error) { + *stat = BULKSTAT_RV_NOTHING; + return error; + } + ASSERT(ip != NULL); + ASSERT(ip->i_blkno != (xfs_daddr_t)0); + if (ip->i_d.di_mode == 0) { + xfs_iput_new(ip, XFS_ILOCK_SHARED); + *stat = BULKSTAT_RV_NOTHING; + return XFS_ERROR(ENOENT); + } + dic = &ip->i_d; + arch = ARCH_NOCONVERT; /* in-core! */ + ASSERT(dic != NULL); + + /* xfs_iget returns the following without needing + * further change. + */ + buf->bs_nlink = dic->di_nlink; + buf->bs_projid = dic->di_projid; + + } else { + dic = &dip->di_core; + ASSERT(dic != NULL); + + /* buffer dinode_core is in on-disk arch */ + arch = ARCH_CONVERT; + + /* + * The inode format changed when we moved the link count and + * made it 32 bits long. If this is an old format inode, + * convert it in memory to look like a new one. If it gets + * flushed to disk we will convert back before flushing or + * logging it. We zero out the new projid field and the old link + * count field. We'll handle clearing the pad field (the remains + * of the old uuid field) when we actually convert the inode to + * the new format. We don't change the version number so that we + * can distinguish this from a real new format inode. + */ + if (INT_GET(dic->di_version, arch) == XFS_DINODE_VERSION_1) { + buf->bs_nlink = INT_GET(dic->di_onlink, arch); + buf->bs_projid = 0; + } + else { + buf->bs_nlink = INT_GET(dic->di_nlink, arch); + buf->bs_projid = INT_GET(dic->di_projid, arch); + } + + } + + buf->bs_ino = ino; + buf->bs_mode = INT_GET(dic->di_mode, arch); + buf->bs_uid = INT_GET(dic->di_uid, arch); + buf->bs_gid = INT_GET(dic->di_gid, arch); + buf->bs_size = INT_GET(dic->di_size, arch); + buf->bs_atime.tv_sec = INT_GET(dic->di_atime.t_sec, arch); + buf->bs_atime.tv_nsec = INT_GET(dic->di_atime.t_nsec, arch); + buf->bs_mtime.tv_sec = INT_GET(dic->di_mtime.t_sec, arch); + buf->bs_mtime.tv_nsec = INT_GET(dic->di_mtime.t_nsec, arch); + buf->bs_ctime.tv_sec = INT_GET(dic->di_ctime.t_sec, arch); + buf->bs_ctime.tv_nsec = INT_GET(dic->di_ctime.t_nsec, arch); + /* + * convert di_flags to bs_xflags. + */ + di_flags=INT_GET(dic->di_flags, arch); + + buf->bs_xflags = + ((di_flags & XFS_DIFLAG_REALTIME) ? + XFS_XFLAG_REALTIME : 0) | + ((di_flags & XFS_DIFLAG_PREALLOC) ? + XFS_XFLAG_PREALLOC : 0) | + (XFS_CFORK_Q_ARCH(dic, arch) ? + XFS_XFLAG_HASATTR : 0); + + buf->bs_extsize = INT_GET(dic->di_extsize, arch) << mp->m_sb.sb_blocklog; + buf->bs_extents = INT_GET(dic->di_nextents, arch); + buf->bs_gen = INT_GET(dic->di_gen, arch); + memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); + buf->bs_dmevmask = INT_GET(dic->di_dmevmask, arch); + buf->bs_dmstate = INT_GET(dic->di_dmstate, arch); + buf->bs_aextents = INT_GET(dic->di_anextents, arch); + + switch (INT_GET(dic->di_format, arch)) { + case XFS_DINODE_FMT_DEV: + if ( ip ) { + buf->bs_rdev = ip->i_df.if_u2.if_rdev; + } else { + buf->bs_rdev = INT_GET(dip->di_u.di_dev, arch); + } + + buf->bs_blksize = BLKDEV_IOSIZE; + buf->bs_blocks = 0; + break; + case XFS_DINODE_FMT_LOCAL: + case XFS_DINODE_FMT_UUID: + buf->bs_rdev = 0; + buf->bs_blksize = mp->m_sb.sb_blocksize; + buf->bs_blocks = 0; + break; + case XFS_DINODE_FMT_EXTENTS: + case XFS_DINODE_FMT_BTREE: + buf->bs_rdev = 0; + buf->bs_blksize = mp->m_sb.sb_blocksize; + if ( ip ) { + buf->bs_blocks = INT_GET(dic->di_nblocks, arch) + ip->i_delayed_blks; + } else { + buf->bs_blocks = INT_GET(dic->di_nblocks, arch); + } + break; + } + + if (ip) { + xfs_iput(ip, XFS_ILOCK_SHARED); + } + + *stat = BULKSTAT_RV_DIDONE; + return 0; +} + +/* + * Return stat information in bulk (by-inode) for the filesystem. + */ +int /* error status */ +xfs_bulkstat( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t *lastinop, /* last inode returned */ + int *ubcountp, /* size of buffer/count returned */ + bulkstat_one_pf formatter, /* func that'd fill a single buf */ + size_t statstruct_size, /* sizeof struct filling */ + xfs_caddr_t ubuffer, /* buffer with inode stats */ + int flags, /* defined in xfs_itable.h */ + int *done) /* 1 if there're more stats to get */ +{ + xfs_agblock_t agbno=0;/* allocation group block number */ + xfs_buf_t *agbp; /* agi header buffer */ + xfs_agi_t *agi; /* agi header data */ + xfs_agino_t agino; /* inode # in allocation group */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_daddr_t bno; /* inode cluster start daddr */ + int chunkidx; /* current index into inode chunk */ + int clustidx; /* current index into inode cluster */ + xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ + int end_of_ag; /* set if we've seen the ag end */ + int error; /* error code */ + int fmterror;/* bulkstat formatter result */ + __int32_t gcnt; /* current btree rec's count */ + xfs_inofree_t gfree; /* current btree rec's free mask */ + xfs_agino_t gino; /* current btree rec's start inode */ + int i; /* loop index */ + int icount; /* count of inodes good in irbuf */ + xfs_ino_t ino; /* inode number (filesystem) */ + xfs_inobt_rec_t *irbp; /* current irec buffer pointer */ + xfs_inobt_rec_t *irbuf; /* start of irec buffer */ + xfs_inobt_rec_t *irbufend; /* end of good irec buffer entries */ + xfs_ino_t lastino=0; /* last inode number returned */ + int nbcluster; /* # of blocks in a cluster */ + int nicluster; /* # of inodes in a cluster */ + int nimask; /* mask for inode clusters */ + int nirbuf; /* size of irbuf */ + int rval; /* return value error code */ + int tmp; /* result value from btree calls */ + int ubcount; /* size of user's buffer */ + int ubleft; /* spaces left in user's buffer */ + xfs_caddr_t ubufp; /* current pointer into user's buffer */ + xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ + xfs_dinode_t *dip; /* ptr into bp for specific inode */ + xfs_inode_t *ip; /* ptr to in-core inode struct */ + + /* + * Get the last inode value, see if there's nothing to do. + */ + ino = (xfs_ino_t)*lastinop; + dip = NULL; + agno = XFS_INO_TO_AGNO(mp, ino); + agino = XFS_INO_TO_AGINO(mp, ino); + if (agno >= mp->m_sb.sb_agcount || + ino != XFS_AGINO_TO_INO(mp, agno, agino)) { + *done = 1; + *ubcountp = 0; + return 0; + } + ubcount = ubleft = *ubcountp; + *ubcountp = 0; + *done = 0; + fmterror = 0; + ubufp = ubuffer; + nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? + mp->m_sb.sb_inopblock : + (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); + nimask = ~(nicluster - 1); + nbcluster = nicluster >> mp->m_sb.sb_inopblog; + /* + * Lock down the user's buffer. If a buffer was not sent, as in the case + * disk quota code calls here, we skip this. + */ +#if defined(HAVE_USERACC) + if (ubuffer && + (error = useracc(ubuffer, ubcount * statstruct_size, + (B_READ|B_PHYS), NULL))) { + return error; + } +#endif + /* + * Allocate a page-sized buffer for inode btree records. + * We could try allocating something smaller, but for normal + * calls we'll always (potentially) need the whole page. + */ + irbuf = kmem_alloc(NBPC, KM_SLEEP); + nirbuf = NBPC / sizeof(*irbuf); + /* + * Loop over the allocation groups, starting from the last + * inode returned; 0 means start of the allocation group. + */ + rval = 0; + while (ubleft > 0 && agno < mp->m_sb.sb_agcount) { + bp = NULL; + down_read(&mp->m_peraglock); + error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); + up_read(&mp->m_peraglock); + if (error) { + /* + * Skip this allocation group and go to the next one. + */ + agno++; + agino = 0; + continue; + } + agi = XFS_BUF_TO_AGI(agbp); + /* + * Allocate and initialize a btree cursor for ialloc btree. + */ + cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, + (xfs_inode_t *)0, 0); + irbp = irbuf; + irbufend = irbuf + nirbuf; + end_of_ag = 0; + /* + * If we're returning in the middle of an allocation group, + * we need to get the remainder of the chunk we're in. + */ + if (agino > 0) { + /* + * Lookup the inode chunk that this inode lives in. + */ + error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp); + if (!error && /* no I/O error */ + tmp && /* lookup succeeded */ + /* got the record, should always work */ + !(error = xfs_inobt_get_rec(cur, &gino, &gcnt, + &gfree, &i, ARCH_NOCONVERT)) && + i == 1 && + /* this is the right chunk */ + agino < gino + XFS_INODES_PER_CHUNK && + /* lastino was not last in chunk */ + (chunkidx = agino - gino + 1) < + XFS_INODES_PER_CHUNK && + /* there are some left allocated */ + XFS_INOBT_MASKN(chunkidx, + XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { + /* + * Grab the chunk record. Mark all the + * uninteresting inodes (because they're + * before our start point) free. + */ + for (i = 0; i < chunkidx; i++) { + if (XFS_INOBT_MASK(i) & ~gfree) + gcnt++; + } + gfree |= XFS_INOBT_MASKN(0, chunkidx); + INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); + INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); + INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); + irbp++; + agino = gino + XFS_INODES_PER_CHUNK; + icount = XFS_INODES_PER_CHUNK - gcnt; + } else { + /* + * If any of those tests failed, bump the + * inode number (just in case). + */ + agino++; + icount = 0; + } + /* + * In any case, increment to the next record. + */ + if (!error) + error = xfs_inobt_increment(cur, 0, &tmp); + } else { + /* + * Start of ag. Lookup the first inode chunk. + */ + error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp); + icount = 0; + } + /* + * Loop through inode btree records in this ag, + * until we run out of inodes or space in the buffer. + */ + while (irbp < irbufend && icount < ubcount) { + /* + * Loop as long as we're unable to read the + * inode btree. + */ + while (error) { + agino += XFS_INODES_PER_CHUNK; + if (XFS_AGINO_TO_AGBNO(mp, agino) >= + INT_GET(agi->agi_length, ARCH_CONVERT)) + break; + error = xfs_inobt_lookup_ge(cur, agino, 0, 0, + &tmp); + } + /* + * If ran off the end of the ag either with an error, + * or the normal way, set end and stop collecting. + */ + if (error || + (error = xfs_inobt_get_rec(cur, &gino, &gcnt, + &gfree, &i, ARCH_NOCONVERT)) || + i == 0) { + end_of_ag = 1; + break; + } + /* + * If this chunk has any allocated inodes, save it. + */ + if (gcnt < XFS_INODES_PER_CHUNK) { + INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); + INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); + INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); + irbp++; + icount += XFS_INODES_PER_CHUNK - gcnt; + } + /* + * Set agino to after this chunk and bump the cursor. + */ + agino = gino + XFS_INODES_PER_CHUNK; + error = xfs_inobt_increment(cur, 0, &tmp); + } + /* + * Drop the btree buffers and the agi buffer. + * We can't hold any of the locks these represent + * when calling iget. + */ + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + xfs_trans_brelse(tp, agbp); + /* + * Now format all the good inodes into the user's buffer. + */ + irbufend = irbp; + for (irbp = irbuf; irbp < irbufend && ubleft > 0; irbp++) { + /* + * Read-ahead the next chunk's worth of inodes. + */ + if (&irbp[1] < irbufend) { + /* + * Loop over all clusters in the next chunk. + * Do a readahead if there are any allocated + * inodes in that cluster. + */ + for (agbno = XFS_AGINO_TO_AGBNO(mp, + INT_GET(irbp[1].ir_startino, ARCH_CONVERT)), + chunkidx = 0; + chunkidx < XFS_INODES_PER_CHUNK; + chunkidx += nicluster, + agbno += nbcluster) { + if (XFS_INOBT_MASKN(chunkidx, + nicluster) & + ~(INT_GET(irbp[1].ir_free, ARCH_CONVERT))) + xfs_btree_reada_bufs(mp, agno, + agbno, nbcluster); + } + } + /* + * Now process this chunk of inodes. + */ + for (agino = INT_GET(irbp->ir_startino, ARCH_CONVERT), chunkidx = 0, clustidx = 0; + ubleft > 0 && + INT_GET(irbp->ir_freecount, ARCH_CONVERT) < XFS_INODES_PER_CHUNK; + chunkidx++, clustidx++, agino++) { + ASSERT(chunkidx < XFS_INODES_PER_CHUNK); + /* + * Recompute agbno if this is the + * first inode of the cluster. + * + * Careful with clustidx. There can be + * multple clusters per chunk, a single + * cluster per chunk or a cluster that has + * inodes represented from several different + * chunks (if blocksize is large). + * + * Because of this, the starting clustidx is + * initialized to zero in this loop but must + * later be reset after reading in the cluster + * buffer. + */ + if ((chunkidx & (nicluster - 1)) == 0) { + agbno = XFS_AGINO_TO_AGBNO(mp, + INT_GET(irbp->ir_startino, ARCH_CONVERT)) + + ((chunkidx & nimask) >> + mp->m_sb.sb_inopblog); + + if (flags & BULKSTAT_FG_QUICK) { + ino = XFS_AGINO_TO_INO(mp, agno, + agino); + bno = XFS_AGB_TO_DADDR(mp, agno, + agbno); + + /* + * Get the inode cluster buffer + */ + ASSERT(xfs_inode_zone != NULL); + ip = kmem_zone_zalloc(xfs_inode_zone, + KM_SLEEP); + ip->i_ino = ino; + ip->i_mount = mp; + if (bp) + xfs_trans_brelse(tp, bp); + error = xfs_itobp(mp, tp, ip, + &dip, &bp, bno); + if (!error) + clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; + kmem_zone_free(xfs_inode_zone, ip); + if (XFS_TEST_ERROR(error != 0, + mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, + XFS_RANDOM_BULKSTAT_READ_CHUNK)) { + bp = NULL; + break; + } + } + } + /* + * Skip if this inode is free. + */ + if (XFS_INOBT_MASK(chunkidx) & INT_GET(irbp->ir_free, ARCH_CONVERT)) + continue; + /* + * Count used inodes as free so we can tell + * when the chunk is used up. + */ + INT_MOD(irbp->ir_freecount, ARCH_CONVERT, +1); + ino = XFS_AGINO_TO_INO(mp, agno, agino); + bno = XFS_AGB_TO_DADDR(mp, agno, agbno); + if (flags & BULKSTAT_FG_QUICK) { + dip = (xfs_dinode_t *)xfs_buf_offset(bp, + (clustidx << mp->m_sb.sb_inodelog)); + + if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) + != XFS_DINODE_MAGIC + || !XFS_DINODE_GOOD_VERSION( + INT_GET(dip->di_core.di_version, ARCH_CONVERT))) + continue; + } + + /* + * Get the inode and fill in a single buffer. + * BULKSTAT_FG_QUICK uses dip to fill it in. + * BULKSTAT_FG_IGET uses igets. + * See: xfs_bulkstat_one & dm_bulkstat_one. + * This is also used to count inodes/blks, etc + * in xfs_qm_quotacheck. + */ + error = formatter(mp, tp, ino, ubufp, bno, dip, + &fmterror); + if (fmterror == BULKSTAT_RV_NOTHING) + continue; + if (fmterror == BULKSTAT_RV_GIVEUP) { + ubleft = 0; + ASSERT(error); + rval = error; + break; + } + if (ubufp) + ubufp += statstruct_size; + ubleft--; + lastino = ino; + } + } + + if (bp) + xfs_trans_brelse(tp, bp); + + /* + * Set up for the next loop iteration. + */ + if (ubleft > 0) { + if (end_of_ag) { + agno++; + agino = 0; + } else + agino = XFS_INO_TO_AGINO(mp, lastino); + } else + break; + } + /* + * Done, we're either out of filesystem or space to put the data. + */ + kmem_free(irbuf, NBPC); +#if defined(HAVE_USERACC) + if (ubuffer) + unuseracc(ubuffer, ubcount * statstruct_size, (B_READ|B_PHYS)); +#endif + *ubcountp = ubcount - ubleft; + if (agno >= mp->m_sb.sb_agcount) { + /* + * If we ran out of filesystem, mark lastino as off + * the end of the filesystem, so the next call + * will return immediately. + */ + *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); + *done = 1; + } else + *lastinop = (xfs_ino_t)lastino; + + return rval; +} + +/* + * Return stat information in bulk (by-inode) for the filesystem. + * Special case for non-sequential one inode bulkstat. + */ +int /* error status */ +xfs_bulkstat_single( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_ino_t *lastinop, /* inode to return */ + xfs_caddr_t buffer, /* buffer with inode stats */ + int *done) /* 1 if there're more stats to get */ +{ + xfs_bstat_t bstat; /* one bulkstat result structure */ + int count; /* count value for bulkstat call */ + int error; /* return value */ + xfs_ino_t ino; /* filesystem inode number */ + int res; /* result from bs1 */ + + /* + * note that requesting valid inode numbers which are not allocated + * to inodes will most likely cause xfs_itobp to generate warning + * messages about bad magic numbers. This is ok. The fact that + * the inode isn't actually an inode is handled by the + * error check below. Done this way to make the usual case faster + * at the expense of the error case. + */ + + ino = (xfs_ino_t)*lastinop; + error = xfs_bulkstat_one(mp, NULL, ino, &bstat, 0, 0, &res); + if (error) { + /* + * Special case way failed, do it the "long" way + * to see if that works. + */ + (*lastinop)--; + count = 1; + if (xfs_bulkstat(mp, NULL, lastinop, &count, xfs_bulkstat_one, + sizeof(bstat), buffer, BULKSTAT_FG_IGET, done)) + return error; + if (count == 0 || (xfs_ino_t)*lastinop != ino) + return error == EFSCORRUPTED ? + XFS_ERROR(EINVAL) : error; + else + return 0; + } + *done = 0; + if (copy_to_user(buffer, &bstat, sizeof(bstat))) + return XFS_ERROR(EFAULT); + return 0; +} + +/* + * Return inode number table for the filesystem. + */ +int /* error status */ +xfs_inumbers( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t *lastino, /* last inode returned */ + int *count, /* size of buffer/count returned */ + xfs_caddr_t ubuffer) /* buffer with inode descriptions */ +{ + xfs_buf_t *agbp; + xfs_agino_t agino; + xfs_agnumber_t agno; + int bcount; + xfs_inogrp_t *buffer; + int bufidx; + xfs_btree_cur_t *cur; + int error; + __int32_t gcnt; + xfs_inofree_t gfree; + xfs_agino_t gino; + int i; + xfs_ino_t ino; + int left; + int tmp; + + ino = (xfs_ino_t)*lastino; + agno = XFS_INO_TO_AGNO(mp, ino); + agino = XFS_INO_TO_AGINO(mp, ino); + left = *count; + *count = 0; + bcount = MIN(left, (int)(NBPP / sizeof(*buffer))); + buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); + error = bufidx = 0; + cur = NULL; + agbp = NULL; + while (left > 0 && agno < mp->m_sb.sb_agcount) { + if (agbp == NULL) { + down_read(&mp->m_peraglock); + error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); + up_read(&mp->m_peraglock); + if (error) { + /* + * If we can't read the AGI of this ag, + * then just skip to the next one. + */ + ASSERT(cur == NULL); + agbp = NULL; + agno++; + agino = 0; + continue; + } + cur = xfs_btree_init_cursor(mp, tp, agbp, agno, + XFS_BTNUM_INO, (xfs_inode_t *)0, 0); + error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); + if (error) { + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + cur = NULL; + xfs_trans_brelse(tp, agbp); + agbp = NULL; + /* + * Move up the the last inode in the current + * chunk. The lookup_ge will always get + * us the first inode in the next chunk. + */ + agino += XFS_INODES_PER_CHUNK - 1; + continue; + } + } + if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree, + &i, ARCH_NOCONVERT)) || + i == 0) { + xfs_trans_brelse(tp, agbp); + agbp = NULL; + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + cur = NULL; + agno++; + agino = 0; + continue; + } + agino = gino + XFS_INODES_PER_CHUNK - 1; + buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino); + buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt; + buffer[bufidx].xi_allocmask = ~gfree; + bufidx++; + left--; + if (bufidx == bcount) { + if (copy_to_user(ubuffer, buffer, + bufidx * sizeof(*buffer))) { + error = XFS_ERROR(EFAULT); + break; + } + ubuffer += bufidx * sizeof(*buffer); + *count += bufidx; + bufidx = 0; + } + if (left) { + error = xfs_inobt_increment(cur, 0, &tmp); + if (error) { + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + cur = NULL; + xfs_trans_brelse(tp, agbp); + agbp = NULL; + /* + * The agino value has already been bumped. + * Just try to skip up to it. + */ + agino += XFS_INODES_PER_CHUNK; + continue; + } + } + } + if (!error) { + if (bufidx) { + if (copy_to_user(ubuffer, buffer, + bufidx * sizeof(*buffer))) + error = XFS_ERROR(EFAULT); + else + *count += bufidx; + } + *lastino = XFS_AGINO_TO_INO(mp, agno, agino); + } + kmem_free(buffer, bcount * sizeof(*buffer)); + if (cur) + xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : + XFS_BTREE_NOERROR)); + if (agbp) + xfs_trans_brelse(tp, agbp); + return error; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_itable.h linux.21rc5-ac2/fs/xfs/xfs_itable.h --- linux.21rc5/fs/xfs/xfs_itable.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_itable.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ITABLE_H__ +#define __XFS_ITABLE_H__ + +/* + * xfs_bulkstat() is used to fill in xfs_bstat structures as well as dm_stat + * structures (by the dmi library). This is a pointer to a formatter function + * that will iget the inode and fill in the appropriate structure. + * see xfs_bulkstat_one() and dm_bulkstat_one() in dmi_xfs.c + */ +typedef int (*bulkstat_one_pf)(struct xfs_mount *mp, + struct xfs_trans *tp, + xfs_ino_t ino, + void *buffer, + xfs_daddr_t bno, + void *dip, + int *stat); +/* + * Values for stat return value. + */ +#define BULKSTAT_RV_NOTHING 0 +#define BULKSTAT_RV_DIDONE 1 +#define BULKSTAT_RV_GIVEUP 2 + +/* + * Values for bulkstat flag argument. + */ +#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */ +#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */ +#define BULKSTAT_FG_VFSLOCKED 0x4 /* Already have vfs lock */ + +/* + * Return stat information in bulk (by-inode) for the filesystem. + */ +int /* error status */ +xfs_bulkstat( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t *lastino, /* last inode returned */ + int *count, /* size of buffer/count returned */ + bulkstat_one_pf formatter, /* func that'd fill a single buf */ + size_t statstruct_size,/* sizeof struct that we're filling */ + xfs_caddr_t ubuffer, /* buffer with inode stats */ + int flags, /* flag to control access method */ + int *done); /* 1 if there're more stats to get */ + +int +xfs_bulkstat_single( + xfs_mount_t *mp, + xfs_ino_t *lastinop, + xfs_caddr_t buffer, + int *done); + +int +xfs_bulkstat_one( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + void *buffer, + xfs_daddr_t bno, + void *dibuff, + int *stat); + +int /* error status */ +xfs_inumbers( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t *last, /* last inode returned */ + int *count, /* size of buffer/count returned */ + xfs_caddr_t buffer);/* buffer with inode descriptions */ + +#endif /* __XFS_ITABLE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_log.c linux.21rc5-ac2/fs/xfs/xfs_log.c --- linux.21rc5/fs/xfs/xfs_log.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_log.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,3628 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * High level interface routines for log manager + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_ag.h" +#include "xfs_sb.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_buf_item.h" +#include "xfs_alloc_btree.h" +#include "xfs_log_recover.h" +#include "xfs_bit.h" +#include "xfs_rw.h" +#include "xfs_trans_priv.h" + + +#define xlog_write_adv_cnt(ptr, len, off, bytes) \ + { (ptr) += (bytes); \ + (len) -= (bytes); \ + (off) += (bytes);} + +/* Local miscellaneous function prototypes */ +STATIC int xlog_bdstrat_cb(struct xfs_buf *); +STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket, + xlog_in_core_t **, xfs_lsn_t *); +STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, + dev_t log_dev, + xfs_daddr_t blk_offset, + int num_bblks); +STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes); +STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); +STATIC void xlog_unalloc_log(xlog_t *log); +STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[], + int nentries, xfs_log_ticket_t tic, + xfs_lsn_t *start_lsn, + xlog_in_core_t **commit_iclog, + uint flags); + +/* local state machine functions */ +STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int); +STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog); +STATIC int xlog_state_get_iclog_space(xlog_t *log, + int len, + xlog_in_core_t **iclog, + xlog_ticket_t *ticket, + int *continued_write, + int *logoffsetp); +STATIC void xlog_state_put_ticket(xlog_t *log, + xlog_ticket_t *tic); +STATIC int xlog_state_release_iclog(xlog_t *log, + xlog_in_core_t *iclog); +STATIC void xlog_state_switch_iclogs(xlog_t *log, + xlog_in_core_t *iclog, + int eventual_size); +STATIC int xlog_state_sync(xlog_t *log, xfs_lsn_t lsn, uint flags); +STATIC int xlog_state_sync_all(xlog_t *log, uint flags); +STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); + +/* local functions to manipulate grant head */ +STATIC int xlog_grant_log_space(xlog_t *log, + xlog_ticket_t *xtic); +STATIC void xlog_grant_push_ail(xfs_mount_t *mp, + int need_bytes); +STATIC void xlog_regrant_reserve_log_space(xlog_t *log, + xlog_ticket_t *ticket); +STATIC int xlog_regrant_write_log_space(xlog_t *log, + xlog_ticket_t *ticket); +STATIC void xlog_ungrant_log_space(xlog_t *log, + xlog_ticket_t *ticket); + + +/* local ticket functions */ +STATIC void xlog_state_ticket_alloc(xlog_t *log); +STATIC xlog_ticket_t *xlog_ticket_get(xlog_t *log, + int unit_bytes, + int count, + char clientid, + uint flags); +STATIC void xlog_ticket_put(xlog_t *log, xlog_ticket_t *ticket); + +/* local debug functions */ +#if defined(DEBUG) && !defined(XLOG_NOLOG) +STATIC void xlog_verify_dest_ptr(xlog_t *log, __psint_t ptr); +#ifdef XFSDEBUG +STATIC void xlog_verify_disk_cycle_no(xlog_t *log, xlog_in_core_t *iclog); +#endif +STATIC void xlog_verify_grant_head(xlog_t *log, int equals); +STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, + int count, boolean_t syncing); +STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, + xfs_lsn_t tail_lsn); +#else +#define xlog_verify_dest_ptr(a,b) +#define xlog_verify_disk_cycle_no(a,b) +#define xlog_verify_grant_head(a,b) +#define xlog_verify_iclog(a,b,c,d) +#define xlog_verify_tail_lsn(a,b,c) +#endif + +int xlog_iclogs_empty(xlog_t *log); + +#ifdef DEBUG +int xlog_do_error = 0; +int xlog_req_num = 0; +int xlog_error_mod = 33; +#endif + +#define XLOG_FORCED_SHUTDOWN(log) (log->l_flags & XLOG_IO_ERROR) + +/* + * 0 => disable log manager + * 1 => enable log manager + * 2 => enable log manager and log debugging + */ +#if defined(XLOG_NOLOG) || defined(DEBUG) +int xlog_debug = 1; +dev_t xlog_devt = 0; +#endif + +#if defined(XFS_LOG_TRACE) +void +xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string) +{ + if (! log->l_grant_trace) + log->l_grant_trace = ktrace_alloc(1024, KM_SLEEP); + + ktrace_enter(log->l_grant_trace, + (void *)tic, + (void *)log->l_reserve_headq, + (void *)log->l_write_headq, + (void *)((unsigned long)log->l_grant_reserve_cycle), + (void *)((unsigned long)log->l_grant_reserve_bytes), + (void *)((unsigned long)log->l_grant_write_cycle), + (void *)((unsigned long)log->l_grant_write_bytes), + (void *)((unsigned long)log->l_curr_cycle), + (void *)((unsigned long)log->l_curr_block), + (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn, ARCH_NOCONVERT)), + (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn, ARCH_NOCONVERT)), + (void *)string, + (void *)((unsigned long)13), + (void *)((unsigned long)14), + (void *)((unsigned long)15), + (void *)((unsigned long)16)); +} + +void +xlog_trace_tic(xlog_t *log, xlog_ticket_t *tic) +{ + if (! log->l_trace) + log->l_trace = ktrace_alloc(256, KM_SLEEP); + + ktrace_enter(log->l_trace, + (void *)tic, + (void *)((unsigned long)tic->t_curr_res), + (void *)((unsigned long)tic->t_unit_res), + (void *)((unsigned long)tic->t_ocnt), + (void *)((unsigned long)tic->t_cnt), + (void *)((unsigned long)tic->t_flags), + (void *)((unsigned long)7), + (void *)((unsigned long)8), + (void *)((unsigned long)9), + (void *)((unsigned long)10), + (void *)((unsigned long)11), + (void *)((unsigned long)12), + (void *)((unsigned long)13), + (void *)((unsigned long)14), + (void *)((unsigned long)15), + (void *)((unsigned long)16)); +} + +void +xlog_trace_iclog(xlog_in_core_t *iclog, uint state) +{ + pid_t pid; + + pid = current_pid(); + + if (!iclog->ic_trace) + iclog->ic_trace = ktrace_alloc(256, KM_SLEEP); + ktrace_enter(iclog->ic_trace, + (void *)((unsigned long)state), + (void *)((unsigned long)pid), + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0); +} + +#else +#define xlog_trace_loggrant(log,tic,string) +#define xlog_trace_iclog(iclog,state) +#endif /* XFS_LOG_TRACE */ + +/* + * NOTES: + * + * 1. currblock field gets updated at startup and after in-core logs + * marked as with WANT_SYNC. + */ + +/* + * This routine is called when a user of a log manager ticket is done with + * the reservation. If the ticket was ever used, then a commit record for + * the associated transaction is written out as a log operation header with + * no data. The flag XLOG_TIC_INITED is set when the first write occurs with + * a given ticket. If the ticket was one with a permanent reservation, then + * a few operations are done differently. Permanent reservation tickets by + * default don't release the reservation. They just commit the current + * transaction with the belief that the reservation is still needed. A flag + * must be passed in before permanent reservations are actually released. + * When these type of tickets are not released, they need to be set into + * the inited state again. By doing this, a start record will be written + * out when the next write occurs. + */ +xfs_lsn_t +xfs_log_done(xfs_mount_t *mp, + xfs_log_ticket_t xtic, + void **iclog, + uint flags) +{ + xlog_t *log = mp->m_log; + xlog_ticket_t *ticket = (xfs_log_ticket_t) xtic; + xfs_lsn_t lsn = 0; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + + if (XLOG_FORCED_SHUTDOWN(log) || + /* + * If nothing was ever written, don't write out commit record. + * If we get an error, just continue and give back the log ticket. + */ + (((ticket->t_flags & XLOG_TIC_INITED) == 0) && + (xlog_commit_record(mp, ticket, + (xlog_in_core_t **)iclog, &lsn)))) { + lsn = (xfs_lsn_t) -1; + if (ticket->t_flags & XLOG_TIC_PERM_RESERV) { + flags |= XFS_LOG_REL_PERM_RESERV; + } + } + + + if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 || + (flags & XFS_LOG_REL_PERM_RESERV)) { + /* + * Release ticket if not permanent reservation or a specifc + * request has been made to release a permanent reservation. + */ + xlog_ungrant_log_space(log, ticket); + xlog_state_put_ticket(log, ticket); + } else { + xlog_regrant_reserve_log_space(log, ticket); + } + + /* If this ticket was a permanent reservation and we aren't + * trying to release it, reset the inited flags; so next time + * we write, a start record will be written out. + */ + if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) && + (flags & XFS_LOG_REL_PERM_RESERV) == 0) + ticket->t_flags |= XLOG_TIC_INITED; + + return lsn; +} /* xfs_log_done */ + + +/* + * Force the in-core log to disk. If flags == XFS_LOG_SYNC, + * the force is done synchronously. + * + * Asynchronous forces are implemented by setting the WANT_SYNC + * bit in the appropriate in-core log and then returning. + * + * Synchronous forces are implemented with a semaphore. All callers + * to force a given lsn to disk will wait on a semaphore attached to the + * specific in-core log. When given in-core log finally completes its + * write to disk, that thread will wake up all threads waiting on the + * semaphore. + */ +int +xfs_log_force(xfs_mount_t *mp, + xfs_lsn_t lsn, + uint flags) +{ + int rval; + xlog_t *log = mp->m_log; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + + ASSERT(flags & XFS_LOG_FORCE); + + XFS_STATS_INC(xfsstats.xs_log_force); + + if ((log->l_flags & XLOG_IO_ERROR) == 0) { + if (lsn == 0) + rval = xlog_state_sync_all(log, flags); + else + rval = xlog_state_sync(log, lsn, flags); + } else { + rval = XFS_ERROR(EIO); + } + + return rval; + +} /* xfs_log_force */ + + +/* + * This function will take a log sequence number and check to see if that + * lsn has been flushed to disk. If it has, then the callback function is + * called with the callback argument. If the relevant in-core log has not + * been synced to disk, we add the callback to the callback list of the + * in-core log. + */ +int +xfs_log_notify(xfs_mount_t *mp, /* mount of partition */ + void *iclog_hndl, /* iclog to hang callback off */ + xfs_log_callback_t *cb) +{ + xlog_t *log = mp->m_log; + xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; + int abortflg, spl; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + cb->cb_next = 0; + spl = LOG_LOCK(log); + abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); + if (!abortflg) { + ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || + (iclog->ic_state == XLOG_STATE_WANT_SYNC)); + cb->cb_next = 0; + *(iclog->ic_callback_tail) = cb; + iclog->ic_callback_tail = &(cb->cb_next); + } + LOG_UNLOCK(log, spl); + if (abortflg) { + cb->cb_func(cb->cb_arg, abortflg); + } + return 0; +} /* xfs_log_notify */ + +int +xfs_log_release_iclog(xfs_mount_t *mp, + void *iclog_hndl) +{ + xlog_t *log = mp->m_log; + xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; + + if (xlog_state_release_iclog(log, iclog)) { + xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); + return(EIO); + } + + return 0; +} + +/* + * Initialize log manager data. This routine is intended to be called when + * a system boots up. It is not a per filesystem initialization. + * + * As you can see, we currently do nothing. + */ +int +xfs_log_init(void) +{ + return( 0 ); +} + + +/* + * 1. Reserve an amount of on-disk log space and return a ticket corresponding + * to the reservation. + * 2. Potentially, push buffers at tail of log to disk. + * + * Each reservation is going to reserve extra space for a log record header. + * When writes happen to the on-disk log, we don't subtract the length of the + * log record header from any reservation. By wasting space in each + * reservation, we prevent over allocation problems. + */ +int +xfs_log_reserve(xfs_mount_t *mp, + int unit_bytes, + int cnt, + xfs_log_ticket_t *ticket, + __uint8_t client, + uint flags) +{ + xlog_t *log = mp->m_log; + xlog_ticket_t *internal_ticket; + int retval; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + retval = 0; + ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); + ASSERT((flags & XFS_LOG_NOSLEEP) == 0); + + if (XLOG_FORCED_SHUTDOWN(log)) + return XFS_ERROR(EIO); + + XFS_STATS_INC(xfsstats.xs_try_logspace); + + if (*ticket != NULL) { + ASSERT(flags & XFS_LOG_PERM_RESERV); + internal_ticket = (xlog_ticket_t *)*ticket; + xlog_grant_push_ail(mp, internal_ticket->t_unit_res); + retval = xlog_regrant_write_log_space(log, internal_ticket); + } else { + /* may sleep if need to allocate more tickets */ + internal_ticket = xlog_ticket_get(log, unit_bytes, cnt, + client, flags); + *ticket = internal_ticket; + xlog_grant_push_ail(mp, + (internal_ticket->t_unit_res * + internal_ticket->t_cnt)); + retval = xlog_grant_log_space(log, internal_ticket); + } + + return retval; +} /* xfs_log_reserve */ + + +/* + * Mount a log filesystem + * + * mp - ubiquitous xfs mount point structure + * log_dev - device number of on-disk log device + * blk_offset - Start block # where block size is 512 bytes (BBSIZE) + * num_bblocks - Number of BBSIZE blocks in on-disk log + * + * Return error or zero. + */ +int +xfs_log_mount(xfs_mount_t *mp, + dev_t log_dev, + xfs_daddr_t blk_offset, + int num_bblks) +{ + xlog_t *log; + + if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) + cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); + else { + cmn_err(CE_NOTE, + "!Mounting filesystem \"%s\" in no-recovery mode. Filesystem will be inconsistent.", + mp->m_fsname); + ASSERT(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY); + } + + mp->m_log = log = xlog_alloc_log(mp, log_dev, blk_offset, num_bblks); + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug) { + cmn_err(CE_NOTE, "log dev: 0x%x", log_dev); + return 0; + } +#endif + /* + * skip log recovery on a norecovery mount. pretend it all + * just worked. + */ + if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { + int error; + vfs_t *vfsp = XFS_MTOVFS(mp); + int readonly = (vfsp->vfs_flag & VFS_RDONLY); + + if (readonly) + vfsp->vfs_flag &= ~VFS_RDONLY; + + error = xlog_recover(log, readonly); + + if (readonly) + vfsp->vfs_flag |= VFS_RDONLY; + if (error) { + cmn_err(CE_WARN, "XFS: log mount/recovery failed"); + xlog_unalloc_log(log); + return error; + } + } + + /* Normal transactions can now occur */ + log->l_flags &= ~XLOG_ACTIVE_RECOVERY; + + /* End mounting message in xfs_log_mount_finish */ + return 0; +} /* xfs_log_mount */ + +/* + * Finish the recovery of the file system. This is separate from + * the xfs_log_mount() call, because it depends on the code in + * xfs_mountfs() to read in the root and real-time bitmap inodes + * between calling xfs_log_mount() and here. + * + * mp - ubiquitous xfs mount point structure + */ +int +xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags) +{ + int error; + + if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) + error = xlog_recover_finish(mp->m_log, mfsi_flags); + else { + error = 0; + ASSERT(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY); + } + + return error; +} + +/* + * Unmount processing for the log. + */ +int +xfs_log_unmount(xfs_mount_t *mp) +{ + int error; + + error = xfs_log_unmount_write(mp); + xfs_log_unmount_dealloc(mp); + return (error); +} + +/* + * Final log writes as part of unmount. + * + * Mark the filesystem clean as unmount happens. Note that during relocation + * this routine needs to be executed as part of source-bag while the + * deallocation must not be done until source-end. + */ + +/* + * Unmount record used to have a string "Unmount filesystem--" in the + * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). + * We just write the magic number now since that particular field isn't + * currently architecture converted and "nUmount" is a bit foo. + * As far as I know, there weren't any dependencies on the old behaviour. + */ + +int +xfs_log_unmount_write(xfs_mount_t *mp) +{ + xlog_t *log = mp->m_log; + xlog_in_core_t *iclog; +#ifdef DEBUG + xlog_in_core_t *first_iclog; +#endif + xfs_log_iovec_t reg[1]; + xfs_log_ticket_t tic = 0; + xfs_lsn_t lsn; + int error; + SPLDECL(s); + + /* the data section must be 32 bit size aligned */ + struct { + __uint16_t magic; + __uint16_t pad1; + __uint32_t pad2; /* may as well make it 64 bits */ + } magic = { XLOG_UNMOUNT_TYPE, 0, 0 }; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + + /* + * Don't write out unmount record on read-only mounts. + * Or, if we are doing a forced umount (typically because of IO errors). + */ + if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) + return 0; + + xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC); + +#ifdef DEBUG + first_iclog = iclog = log->l_iclog; + do { + if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { + ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE); + ASSERT(iclog->ic_offset == 0); + } + iclog = iclog->ic_next; + } while (iclog != first_iclog); +#endif + if (! (XLOG_FORCED_SHUTDOWN(log))) { + reg[0].i_addr = (void*)&magic; + reg[0].i_len = sizeof(magic); + + error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); + if (!error) { + /* remove inited flag */ + ((xlog_ticket_t *)tic)->t_flags = 0; + error = xlog_write(mp, reg, 1, tic, &lsn, + NULL, XLOG_UNMOUNT_TRANS); + /* + * At this point, we're umounting anyway, + * so there's no point in transitioning log state + * to IOERROR. Just continue... + */ + } + + if (error) { + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_log_unmount: unmount record failed"); + } + + + s = LOG_LOCK(log); + iclog = log->l_iclog; + iclog->ic_refcnt++; + LOG_UNLOCK(log, s); + xlog_state_want_sync(log, iclog); + (void) xlog_state_release_iclog(log, iclog); + + s = LOG_LOCK(log); + if (!(iclog->ic_state == XLOG_STATE_ACTIVE || + iclog->ic_state == XLOG_STATE_DIRTY)) { + if (!XLOG_FORCED_SHUTDOWN(log)) { + sv_wait(&iclog->ic_forcesema, PMEM, + &log->l_icloglock, s); + } else { + LOG_UNLOCK(log, s); + } + } else { + LOG_UNLOCK(log, s); + } + if (tic) + xlog_state_put_ticket(log, tic); + } else { + /* + * We're already in forced_shutdown mode, couldn't + * even attempt to write out the unmount transaction. + * + * Go through the motions of sync'ing and releasing + * the iclog, even though no I/O will actually happen, + * we need to wait for other log I/O's that may already + * be in progress. Do this as a separate section of + * code so we'll know if we ever get stuck here that + * we're in this odd situation of trying to unmount + * a file system that went into forced_shutdown as + * the result of an unmount.. + */ + s = LOG_LOCK(log); + iclog = log->l_iclog; + iclog->ic_refcnt++; + LOG_UNLOCK(log, s); + + xlog_state_want_sync(log, iclog); + (void) xlog_state_release_iclog(log, iclog); + + s = LOG_LOCK(log); + + if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE + || iclog->ic_state == XLOG_STATE_DIRTY + || iclog->ic_state == XLOG_STATE_IOERROR) ) { + + sv_wait(&iclog->ic_forcesema, PMEM, + &log->l_icloglock, s); + } else { + LOG_UNLOCK(log, s); + } + } + + return 0; +} /* xfs_log_unmount_write */ + +/* + * Deallocate log structures for unmount/relocation. + */ +void +xfs_log_unmount_dealloc(xfs_mount_t *mp) +{ + xlog_unalloc_log(mp->m_log); +} + +/* + * Write region vectors to log. The write happens using the space reservation + * of the ticket (tic). It is not a requirement that all writes for a given + * transaction occur with one call to xfs_log_write(). + */ +int +xfs_log_write(xfs_mount_t * mp, + xfs_log_iovec_t reg[], + int nentries, + xfs_log_ticket_t tic, + xfs_lsn_t *start_lsn) +{ + int error; + xlog_t *log = mp->m_log; +#if defined(DEBUG) || defined(XLOG_NOLOG) + + if (! xlog_debug && xlog_devt == log->l_dev) { + *start_lsn = 0; + return 0; + } +#endif + if (XLOG_FORCED_SHUTDOWN(log)) + return XFS_ERROR(EIO); + + if ((error = xlog_write(mp, reg, nentries, tic, start_lsn, NULL, 0))) { + xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); + } + return (error); +} /* xfs_log_write */ + + +void +xfs_log_move_tail(xfs_mount_t *mp, + xfs_lsn_t tail_lsn) +{ + xlog_ticket_t *tic; + xlog_t *log = mp->m_log; + int need_bytes, free_bytes, cycle, bytes; + SPLDECL(s); + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (!xlog_debug && xlog_devt == log->l_dev) + return; +#endif + /* XXXsup tmp */ + if (XLOG_FORCED_SHUTDOWN(log)) + return; + ASSERT(!XFS_FORCED_SHUTDOWN(mp)); + + if (tail_lsn == 0) { + /* needed since sync_lsn is 64 bits */ + s = LOG_LOCK(log); + tail_lsn = log->l_last_sync_lsn; + LOG_UNLOCK(log, s); + } + + s = GRANT_LOCK(log); + + /* Also an illegal lsn. 1 implies that we aren't passing in a legal + * tail_lsn. + */ + if (tail_lsn != 1) + log->l_tail_lsn = tail_lsn; + + if ((tic = log->l_write_headq)) { +#ifdef DEBUG + if (log->l_flags & XLOG_ACTIVE_RECOVERY) + panic("Recovery problem"); +#endif + cycle = log->l_grant_write_cycle; + bytes = log->l_grant_write_bytes; + free_bytes = xlog_space_left(log, cycle, bytes); + do { + ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); + + if (free_bytes < tic->t_unit_res) + break; + free_bytes -= tic->t_unit_res; + sv_signal(&tic->t_sema); + tic = tic->t_next; + } while (tic != log->l_write_headq); + } + if ((tic = log->l_reserve_headq)) { +#ifdef DEBUG + if (log->l_flags & XLOG_ACTIVE_RECOVERY) + panic("Recovery problem"); +#endif + cycle = log->l_grant_reserve_cycle; + bytes = log->l_grant_reserve_bytes; + free_bytes = xlog_space_left(log, cycle, bytes); + do { + if (tic->t_flags & XLOG_TIC_PERM_RESERV) + need_bytes = tic->t_unit_res*tic->t_cnt; + else + need_bytes = tic->t_unit_res; + if (free_bytes < need_bytes) + break; + free_bytes -= need_bytes; + sv_signal(&tic->t_sema); + tic = tic->t_next; + } while (tic != log->l_reserve_headq); + } + GRANT_UNLOCK(log, s); +} /* xfs_log_move_tail */ + +/* + * Determine if we have a transaction that has gone to disk + * that needs to be covered. Log activity needs to be idle (no AIL and + * nothing in the iclogs). And, we need to be in the right state indicating + * something has gone out. + */ +int +xfs_log_need_covered(xfs_mount_t *mp) +{ + SPLDECL(s); + int needed = 0, gen; + xlog_t *log = mp->m_log; + + if (mp->m_frozen || XFS_FORCED_SHUTDOWN(mp)) + return 0; + + s = LOG_LOCK(log); + if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || + (log->l_covered_state == XLOG_STATE_COVER_NEED2)) + && !xfs_trans_first_ail(mp, &gen) + && xlog_iclogs_empty(log)) { + if (log->l_covered_state == XLOG_STATE_COVER_NEED) + log->l_covered_state = XLOG_STATE_COVER_DONE; + else { + ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2); + log->l_covered_state = XLOG_STATE_COVER_DONE2; + } + needed = 1; + } + LOG_UNLOCK(log, s); + return(needed); +} + +/****************************************************************************** + * + * local routines + * + ****************************************************************************** + */ + +/* xfs_trans_tail_ail returns 0 when there is nothing in the list. + * The log manager must keep track of the last LR which was committed + * to disk. The lsn of this LR will become the new tail_lsn whenever + * xfs_trans_tail_ail returns 0. If we don't do this, we run into + * the situation where stuff could be written into the log but nothing + * was ever in the AIL when asked. Eventually, we panic since the + * tail hits the head. + * + * We may be holding the log iclog lock upon entering this routine. + */ +xfs_lsn_t +xlog_assign_tail_lsn(xfs_mount_t *mp) +{ + xfs_lsn_t tail_lsn; + SPLDECL(s); + xlog_t *log = mp->m_log; + + tail_lsn = xfs_trans_tail_ail(mp); + s = GRANT_LOCK(log); + if (tail_lsn != 0) + log->l_tail_lsn = tail_lsn; + else + tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; + GRANT_UNLOCK(log, s); + + return tail_lsn; +} /* xlog_assign_tail_lsn */ + + +/* + * Return the space in the log between the tail and the head. The head + * is passed in the cycle/bytes formal parms. In the special case where + * the reserve head has wrapped passed the tail, this calculation is no + * longer valid. In this case, just return 0 which means there is no space + * in the log. This works for all places where this function is called + * with the reserve head. Of course, if the write head were to ever + * wrap the tail, we should blow up. Rather than catch this case here, + * we depend on other ASSERTions in other parts of the code. XXXmiken + * + * This code also handles the case where the reservation head is behind + * the tail. The details of this case are described below, but the end + * result is that we return the size of the log as the amount of space left. + */ +int +xlog_space_left(xlog_t *log, int cycle, int bytes) +{ + int free_bytes; + int tail_bytes; + int tail_cycle; + + tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn, ARCH_NOCONVERT)); + tail_cycle = CYCLE_LSN(log->l_tail_lsn, ARCH_NOCONVERT); + if ((tail_cycle == cycle) && (bytes >= tail_bytes)) { + free_bytes = log->l_logsize - (bytes - tail_bytes); + } else if ((tail_cycle + 1) < cycle) { + return 0; + } else if (tail_cycle < cycle) { + ASSERT(tail_cycle == (cycle - 1)); + free_bytes = tail_bytes - bytes; + } else { + /* + * The reservation head is behind the tail. + * This can only happen when the AIL is empty so the tail + * is equal to the head and the l_roundoff value in the + * log structure is taking up the difference between the + * reservation head and the tail. The bytes accounted for + * by the l_roundoff field are temporarily 'lost' to the + * reservation mechanism, but they are cleaned up when the + * log buffers that created them are reused. These lost + * bytes are what allow the reservation head to fall behind + * the tail in the case that the log is 'empty'. + * In this case we just want to return the size of the + * log as the amount of space left. + */ +/* This assert does not take into account padding from striped log writes * + ASSERT((tail_cycle == (cycle + 1)) || + ((bytes + log->l_roundoff) >= tail_bytes)); +*/ + free_bytes = log->l_logsize; + } + return free_bytes; +} /* xlog_space_left */ + + +/* + * Log function which is called when an io completes. + * + * The log manager needs its own routine, in order to control what + * happens with the buffer after the write completes. + */ +void +xlog_iodone(xfs_buf_t *bp) +{ + xlog_in_core_t *iclog; + int aborted; + + iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); + ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long) 2); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); + aborted = 0; + + /* + * Race to shutdown the filesystem if we see an error. + */ + if (XFS_BUF_GETERROR(bp)) { + /* Some versions of cpp barf on the recursive definition of + * ic_log -> hic_fields.ic_log and expand ic_log twice when + * it is passed through two macros. Workaround for broken cpp + */ + struct log *l; + xfs_ioerror_alert("xlog_iodone", + iclog->ic_log->l_mp, bp, XFS_BUF_ADDR(bp)); + XFS_BUF_STALE(bp); + l = iclog->ic_log; + xfs_force_shutdown(l->l_mp, XFS_LOG_IO_ERROR); + /* + * This flag will be propagated to the trans-committed + * callback routines to let them know that the log-commit + * didn't succeed. + */ + aborted = XFS_LI_ABORTED; + } else if (iclog->ic_state & XLOG_STATE_IOERROR) { + aborted = XFS_LI_ABORTED; + } + xlog_state_done_syncing(iclog, aborted); + if (!(XFS_BUF_ISASYNC(bp))) { + /* + * Corresponding psema() will be done in bwrite(). If we don't + * vsema() here, panic. + */ + XFS_BUF_V_IODONESEMA(bp); + } +} /* xlog_iodone */ + +/* + * The bdstrat callback function for log bufs. This gives us a central + * place to trap bufs in case we get hit by a log I/O error and need to + * shutdown. Actually, in practice, even when we didn't get a log error, + * we transition the iclogs to IOERROR state *after* flushing all existing + * iclogs to disk. This is because we don't want anymore new transactions to be + * started or completed afterwards. + */ +STATIC int +xlog_bdstrat_cb(struct xfs_buf *bp) +{ + xlog_in_core_t *iclog; + + iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); + + if ((iclog->ic_state & XLOG_STATE_IOERROR) == 0) { + /* note for irix bstrat will need struct bdevsw passed + * Fix the following macro if the code ever is merged + */ + XFS_bdstrat(bp); + return 0; + } + + xfs_buftrace("XLOG__BDSTRAT IOERROR", bp); + XFS_BUF_ERROR(bp, EIO); + XFS_BUF_STALE(bp); + xfs_biodone(bp); + return (XFS_ERROR(EIO)); + + +} + +/* + * Return size of each in-core log record buffer. + * + * Low memory machines only get 2 16KB buffers. We don't want to waste + * memory here. However, all other machines get at least 2 32KB buffers. + * The number is hard coded because we don't care about the minimum + * memory size, just 32MB systems. + * + * If the filesystem blocksize is too large, we may need to choose a + * larger size since the directory code currently logs entire blocks. + * XXXmiken XXXcurtis + */ + +STATIC void +xlog_get_iclog_buffer_size(xfs_mount_t *mp, + xlog_t *log) +{ + int size; + int xhdrs; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + /* + * When logbufs == 0, someone has disabled the log from the FSTAB + * file. This is not a documented feature. We need to set xlog_debug + * to zero (this deactivates the log) and set xlog_devt to the + * appropriate dev_t. Only one filesystem may be affected as such + * since this is just a performance hack to test what we might be able + * to get if the log were not present. + */ + if (mp->m_logbufs == 0) { + xlog_debug = 0; + xlog_devt = log->l_dev; + log->l_iclog_bufs = XLOG_NUM_ICLOGS; + } else +#endif + { + /* + * This is the normal path. If m_logbufs == -1, then the + * admin has chosen to use the system defaults for logbuffers. + */ + if (mp->m_logbufs == -1) + log->l_iclog_bufs = XLOG_NUM_ICLOGS; + else + log->l_iclog_bufs = mp->m_logbufs; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + /* We are reactivating a filesystem after it was active */ + if (log->l_dev == xlog_devt) { + xlog_devt = 1; + xlog_debug = 1; + } +#endif + } + + /* + * Buffer size passed in from mount system call. + */ + if (mp->m_logbsize != -1) { + size = log->l_iclog_size = mp->m_logbsize; + log->l_iclog_size_log = 0; + while (size != 1) { + log->l_iclog_size_log++; + size >>= 1; + } + + if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) { + /* # headers = size / 32K + * one header holds cycles from 32K of data + */ + + xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE; + if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE) + xhdrs++; + log->l_iclog_hsize = xhdrs << BBSHIFT; + log->l_iclog_heads = xhdrs; + } else { + ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE); + log->l_iclog_hsize = BBSIZE; + log->l_iclog_heads = 1; + } + return; + } + + /* + * Special case machines that have less than 32MB of memory. + * All machines with more memory use 32KB buffers. + */ + if (xfs_physmem <= btoc(32*1024*1024)) { + /* Don't change; min configuration */ + log->l_iclog_size = XLOG_RECORD_BSIZE; /* 16k */ + log->l_iclog_size_log = XLOG_RECORD_BSHIFT; + } else { + log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; /* 32k */ + log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; + } + + /* the default log size is 16k or 32k which is one header sector */ + log->l_iclog_hsize = BBSIZE; + log->l_iclog_heads = 1; + + /* + * For 16KB, we use 3 32KB buffers. For 32KB block sizes, we use + * 4 32KB buffers. For 64KB block sizes, we use 8 32KB buffers. + */ + if (mp->m_sb.sb_blocksize >= 16*1024) { + log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; + log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; + if (mp->m_logbufs == -1) { + switch (mp->m_sb.sb_blocksize) { + case 16*1024: /* 16 KB */ + log->l_iclog_bufs = 3; + break; + case 32*1024: /* 32 KB */ + log->l_iclog_bufs = 4; + break; + case 64*1024: /* 64 KB */ + log->l_iclog_bufs = 8; + break; + default: + xlog_panic("XFS: Illegal blocksize"); + break; + } + } + } +} /* xlog_get_iclog_buffer_size */ + + +/* + * This routine initializes some of the log structure for a given mount point. + * Its primary purpose is to fill in enough, so recovery can occur. However, + * some other stuff may be filled in too. + */ +STATIC xlog_t * +xlog_alloc_log(xfs_mount_t *mp, + dev_t log_dev, + xfs_daddr_t blk_offset, + int num_bblks) +{ + xlog_t *log; + xlog_rec_header_t *head; + xlog_in_core_t **iclogp; + xlog_in_core_t *iclog, *prev_iclog=NULL; + xfs_buf_t *bp; + int i; + int iclogsize; + + log = (void *)kmem_zalloc(sizeof(xlog_t), KM_SLEEP); + + log->l_mp = mp; + log->l_dev = log_dev; + log->l_logsize = BBTOB(num_bblks); + log->l_logBBstart = blk_offset; + log->l_logBBsize = num_bblks; + log->l_roundoff = 0; + log->l_covered_state = XLOG_STATE_COVER_IDLE; + log->l_flags |= XLOG_ACTIVE_RECOVERY; + + log->l_prev_block = -1; + ASSIGN_ANY_LSN(log->l_tail_lsn, 1, 0, ARCH_NOCONVERT); + /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ + log->l_last_sync_lsn = log->l_tail_lsn; + log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ + log->l_curr_block = 0; /* filled in by xlog_recover */ + log->l_grant_reserve_bytes = 0; + log->l_grant_reserve_cycle = 1; + log->l_grant_write_bytes = 0; + log->l_grant_write_cycle = 1; + log->l_quotaoffs_flag = 0; /* XFS_LI_QUOTAOFF logitems */ + + xlog_get_iclog_buffer_size(mp, log); + + bp = log->l_xbuf = XFS_getrbuf(0,mp); /* get my locked buffer */ /* mp needed for pagebuf/linux only */ + + XFS_BUF_SET_TARGET(bp, mp->m_logdev_targp); + XFS_BUF_SET_SIZE(bp, log->l_iclog_size); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); + XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); + ASSERT(XFS_BUF_ISBUSY(log->l_xbuf)); + ASSERT(XFS_BUF_VALUSEMA(log->l_xbuf) <= 0); + spinlock_init(&log->l_icloglock, "iclog"); + spinlock_init(&log->l_grant_lock, "grhead_iclog"); + initnsema(&log->l_flushsema, 0, "ic-flush"); + xlog_state_ticket_alloc(log); /* wait until after icloglock inited */ + + /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ + ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); + + iclogp = &log->l_iclog; + /* + * The amount of memory to allocate for the iclog structure is + * rather funky due to the way the structure is defined. It is + * done this way so that we can use different sizes for machines + * with different amounts of memory. See the definition of + * xlog_in_core_t in xfs_log_priv.h for details. + */ + iclogsize = log->l_iclog_size; + ASSERT(log->l_iclog_size >= 4096); + for (i=0; i < log->l_iclog_bufs; i++) { + *iclogp = (xlog_in_core_t *) + kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP); + iclog = *iclogp; + iclog->hic_data = (xlog_in_core_2_t *) + kmem_alloc(iclogsize, KM_SLEEP); + + iclog->ic_prev = prev_iclog; + prev_iclog = iclog; + log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header); + + head = &iclog->ic_header; + memset(head, 0, sizeof(xlog_rec_header_t)); + INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); + INT_SET(head->h_version, ARCH_CONVERT, + XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); + INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size); + /* new fields */ + INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT); + memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); + + bp = iclog->ic_bp = XFS_getrbuf(0,mp); /* my locked buffer */ /* mp need for pagebuf/linux only */ + XFS_BUF_SET_TARGET(bp, mp->m_logdev_targp); + XFS_BUF_SET_SIZE(bp, log->l_iclog_size); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); + XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); + + iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize; + iclog->ic_state = XLOG_STATE_ACTIVE; + iclog->ic_log = log; + iclog->ic_callback_tail = &(iclog->ic_callback); + iclog->ic_datap = (char *)iclog->hic_data + log->l_iclog_hsize; + + ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); + ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); + sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force"); + sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write"); + + iclogp = &iclog->ic_next; + } + *iclogp = log->l_iclog; /* complete ring */ + log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ + + return log; +} /* xlog_alloc_log */ + + +/* + * Write out the commit record of a transaction associated with the given + * ticket. Return the lsn of the commit record. + */ +STATIC int +xlog_commit_record(xfs_mount_t *mp, + xlog_ticket_t *ticket, + xlog_in_core_t **iclog, + xfs_lsn_t *commitlsnp) +{ + int error; + xfs_log_iovec_t reg[1]; + + reg[0].i_addr = 0; + reg[0].i_len = 0; + + ASSERT_ALWAYS(iclog); + if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp, + iclog, XLOG_COMMIT_TRANS))) { + xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); + } + return (error); +} /* xlog_commit_record */ + + +/* + * Push on the buffer cache code if we ever use more than 75% of the on-disk + * log space. This code pushes on the lsn which would supposedly free up + * the 25% which we want to leave free. We may need to adopt a policy which + * pushes on an lsn which is further along in the log once we reach the high + * water mark. In this manner, we would be creating a low water mark. + */ +void +xlog_grant_push_ail(xfs_mount_t *mp, + int need_bytes) +{ + xlog_t *log = mp->m_log; /* pointer to the log */ + xfs_lsn_t tail_lsn; /* lsn of the log tail */ + xfs_lsn_t threshold_lsn = 0; /* lsn we'd like to be at */ + int free_blocks; /* free blocks left to write to */ + int free_bytes; /* free bytes left to write to */ + int threshold_block; /* block in lsn we'd like to be at */ + int threshold_cycle; /* lsn cycle we'd like to be at */ + int free_threshold; + SPLDECL(s); + + ASSERT(BTOBB(need_bytes) < log->l_logBBsize); + + s = GRANT_LOCK(log); + free_bytes = xlog_space_left(log, + log->l_grant_reserve_cycle, + log->l_grant_reserve_bytes); + tail_lsn = log->l_tail_lsn; + free_blocks = BTOBBT(free_bytes); + + /* + * Set the threshold for the minimum number of free blocks in the + * log to the maximum of what the caller needs, one quarter of the + * log, and 256 blocks. + */ + free_threshold = BTOBB(need_bytes); + free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); + free_threshold = MAX(free_threshold, 256); + if (free_blocks < free_threshold) { + threshold_block = BLOCK_LSN(tail_lsn, ARCH_NOCONVERT) + free_threshold; + threshold_cycle = CYCLE_LSN(tail_lsn, ARCH_NOCONVERT); + if (threshold_block >= log->l_logBBsize) { + threshold_block -= log->l_logBBsize; + threshold_cycle += 1; + } + ASSIGN_ANY_LSN(threshold_lsn, threshold_cycle, + threshold_block, ARCH_NOCONVERT); + + /* Don't pass in an lsn greater than the lsn of the last + * log record known to be on disk. + */ + if (XFS_LSN_CMP_ARCH(threshold_lsn, log->l_last_sync_lsn, ARCH_NOCONVERT) > 0) + threshold_lsn = log->l_last_sync_lsn; + } + GRANT_UNLOCK(log, s); + + /* + * Get the transaction layer to kick the dirty buffers out to + * disk asynchronously. No point in trying to do this if + * the filesystem is shutting down. + */ + if (threshold_lsn && + !XLOG_FORCED_SHUTDOWN(log)) + xfs_trans_push_ail(mp, threshold_lsn); +} /* xlog_grant_push_ail */ + + +/* + * Flush out the in-core log (iclog) to the on-disk log in a synchronous or + * asynchronous fashion. Previously, we should have moved the current iclog + * ptr in the log to point to the next available iclog. This allows further + * write to continue while this code syncs out an iclog ready to go. + * Before an in-core log can be written out, the data section must be scanned + * to save away the 1st word of each BBSIZE block into the header. We replace + * it with the current cycle count. Each BBSIZE block is tagged with the + * cycle count because there in an implicit assumption that drives will + * guarantee that entire 512 byte blocks get written at once. In other words, + * we can't have part of a 512 byte block written and part not written. By + * tagging each block, we will know which blocks are valid when recovering + * after an unclean shutdown. + * + * This routine is single threaded on the iclog. No other thread can be in + * this routine with the same iclog. Changing contents of iclog can there- + * fore be done without grabbing the state machine lock. Updating the global + * log will require grabbing the lock though. + * + * The entire log manager uses a logical block numbering scheme. Only + * log_sync (and then only bwrite()) know about the fact that the log may + * not start with block zero on a given device. The log block start offset + * is added immediately before calling bwrite(). + */ + +int +xlog_sync(xlog_t *log, + xlog_in_core_t *iclog) +{ + xfs_caddr_t dptr; /* pointer to byte sized element */ + xfs_buf_t *bp; + int i, ops; + uint roundup; + uint count; /* byte count of bwrite */ + int split = 0; /* split write into two regions */ + int error; + + XFS_STATS_INC(xfsstats.xs_log_writes); + ASSERT(iclog->ic_refcnt == 0); + + /* Round out the log write size */ + if (iclog->ic_offset & BBMASK) { + /* count of 0 is already accounted for up in + * xlog_state_sync_all(). Once in this routine, + * operations on the iclog are single threaded. + * + * Difference between rounded up size and size + */ + count = iclog->ic_offset & BBMASK; + iclog->ic_roundoff += BBSIZE - count; + } + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + unsigned sunit = BTOBB(log->l_mp->m_sb.sb_logsunit); + if (!sunit) + sunit = 1; + + count = BTOBB(log->l_iclog_hsize + iclog->ic_offset); + if (count & (sunit - 1)) { + roundup = sunit - (count & (sunit - 1)); + } else { + roundup = 0; + } + iclog->ic_offset += BBTOB(roundup); + } + + log->l_roundoff += iclog->ic_roundoff; + + xlog_pack_data(log, iclog); /* put cycle number in every block */ + + /* real byte length */ + INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset); + /* put ops count in correct order */ + ops = iclog->ic_header.h_num_logops; + INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); + + bp = iclog->ic_bp; + ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); + XFS_BUF_SET_ADDR(bp, BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT)); + + /* Count is already rounded up to a BBSIZE above */ + count = iclog->ic_offset + iclog->ic_roundoff; + ASSERT((count & BBMASK) == 0); + + /* Add for LR header */ + count += log->l_iclog_hsize; + XFS_STATS_ADD(xfsstats.xs_log_blocks, BTOBB(count)); + + /* Do we need to split this write into 2 parts? */ + if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { + split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); + count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); + iclog->ic_bwritecnt = 2; /* split into 2 writes */ + } else { + iclog->ic_bwritecnt = 1; + } + XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); + XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ + XFS_BUF_BUSY(bp); + XFS_BUF_ASYNC(bp); + /* + * Do a disk write cache flush for the log block. + * This is a bit of a sledgehammer, it would be better + * to use a tag barrier here that just prevents reordering. + * It may not be needed to flush the first split block in the log wrap + * case, but do it anyways to be safe -AK + */ + if (!(log->l_mp->m_flags & XFS_MOUNT_NOLOGFLUSH)) + XFS_BUF_FLUSH(bp); + + ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); + ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); + + xlog_verify_iclog(log, iclog, count, B_TRUE); + + /* account for log which doesn't start at block #0 */ + XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); + /* + * Don't call xfs_bwrite here. We do log-syncs even when the filesystem + * is shutting down. + */ + XFS_BUF_WRITE(bp); + + if ((error = XFS_bwrite(bp))) { + xfs_ioerror_alert("xlog_sync", log->l_mp, bp, + XFS_BUF_ADDR(bp)); + return (error); + } + if (split) { + bp = iclog->ic_log->l_xbuf; + ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == + (unsigned long)1); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); + XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ + XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ + (__psint_t)count), split); + XFS_BUF_SET_FSPRIVATE(bp, iclog); + XFS_BUF_BUSY(bp); + XFS_BUF_ASYNC(bp); + if (!(log->l_mp->m_flags & XFS_MOUNT_NOLOGFLUSH)) + XFS_BUF_FLUSH(bp); + dptr = XFS_BUF_PTR(bp); + /* + * Bump the cycle numbers at the start of each block + * since this part of the buffer is at the start of + * a new cycle. Watch out for the header magic number + * case, though. + */ + for (i=0; il_logBBsize-1); + ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); + + /* account for internal log which does't start at block #0 */ + XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); + XFS_BUF_WRITE(bp); + if ((error = XFS_bwrite(bp))) { + xfs_ioerror_alert("xlog_sync (split)", log->l_mp, + bp, XFS_BUF_ADDR(bp)); + return (error); + } + } + return (0); +} /* xlog_sync */ + + +/* + * Unallocate a log structure + */ +void +xlog_unalloc_log(xlog_t *log) +{ + xlog_in_core_t *iclog, *next_iclog; + xlog_ticket_t *tic, *next_tic; + int i; + + + iclog = log->l_iclog; + for (i=0; il_iclog_bufs; i++) { + sv_destroy(&iclog->ic_forcesema); + sv_destroy(&iclog->ic_writesema); + XFS_freerbuf(iclog->ic_bp); +#ifdef DEBUG + if (iclog->ic_trace != NULL) { + ktrace_free(iclog->ic_trace); + } +#endif + next_iclog = iclog->ic_next; + kmem_free(iclog->hic_data, log->l_iclog_size); + kmem_free(iclog, sizeof(xlog_in_core_t)); + iclog = next_iclog; + } + freesema(&log->l_flushsema); + spinlock_destroy(&log->l_icloglock); + spinlock_destroy(&log->l_grant_lock); + + /* XXXsup take a look at this again. */ + if ((log->l_ticket_cnt != log->l_ticket_tcnt) && + !XLOG_FORCED_SHUTDOWN(log)) { + xfs_fs_cmn_err(CE_WARN, log->l_mp, + "xlog_unalloc_log: (cnt: %d, total: %d)", + log->l_ticket_cnt, log->l_ticket_tcnt); + /* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */ + + } else { + tic = log->l_unmount_free; + while (tic) { + next_tic = tic->t_next; + kmem_free(tic, NBPP); + tic = next_tic; + } + } + XFS_freerbuf(log->l_xbuf); +#ifdef DEBUG + if (log->l_trace != NULL) { + ktrace_free(log->l_trace); + } + if (log->l_grant_trace != NULL) { + ktrace_free(log->l_grant_trace); + } +#endif + log->l_mp->m_log = NULL; + kmem_free(log, sizeof(xlog_t)); +} /* xlog_unalloc_log */ + +/* + * Update counters atomically now that memcpy is done. + */ +/* ARGSUSED */ +static inline void +xlog_state_finish_copy(xlog_t *log, + xlog_in_core_t *iclog, + int record_cnt, + int copy_bytes) +{ + SPLDECL(s); + + s = LOG_LOCK(log); + + iclog->ic_header.h_num_logops += record_cnt; + iclog->ic_offset += copy_bytes; + + LOG_UNLOCK(log, s); +} /* xlog_state_finish_copy */ + + + + +/* + * Write some region out to in-core log + * + * This will be called when writing externally provided regions or when + * writing out a commit record for a given transaction. + * + * General algorithm: + * 1. Find total length of this write. This may include adding to the + * lengths passed in. + * 2. Check whether we violate the tickets reservation. + * 3. While writing to this iclog + * A. Reserve as much space in this iclog as can get + * B. If this is first write, save away start lsn + * C. While writing this region: + * 1. If first write of transaction, write start record + * 2. Write log operation header (header per region) + * 3. Find out if we can fit entire region into this iclog + * 4. Potentially, verify destination memcpy ptr + * 5. Memcpy (partial) region + * 6. If partial copy, release iclog; otherwise, continue + * copying more regions into current iclog + * 4. Mark want sync bit (in simulation mode) + * 5. Release iclog for potential flush to on-disk log. + * + * ERRORS: + * 1. Panic if reservation is overrun. This should never happen since + * reservation amounts are generated internal to the filesystem. + * NOTES: + * 1. Tickets are single threaded data structures. + * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the + * syncing routine. When a single log_write region needs to span + * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set + * on all log operation writes which don't contain the end of the + * region. The XLOG_END_TRANS bit is used for the in-core log + * operation which contains the end of the continued log_write region. + * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, + * we don't really know exactly how much space will be used. As a result, + * we don't update ic_offset until the end when we know exactly how many + * bytes have been written out. + */ +int +xlog_write(xfs_mount_t * mp, + xfs_log_iovec_t reg[], + int nentries, + xfs_log_ticket_t tic, + xfs_lsn_t *start_lsn, + xlog_in_core_t **commit_iclog, + uint flags) +{ + xlog_t *log = mp->m_log; + xlog_ticket_t *ticket = (xlog_ticket_t *)tic; + xlog_op_header_t *logop_head; /* ptr to log operation header */ + xlog_in_core_t *iclog; /* ptr to current in-core log */ + __psint_t ptr; /* copy address into data region */ + int len; /* # xlog_write() bytes 2 still copy */ + int index; /* region index currently copying */ + int log_offset; /* offset (from 0) into data region */ + int start_rec_copy; /* # bytes to copy for start record */ + int partial_copy; /* did we split a region? */ + int partial_copy_len;/* # bytes copied if split region */ + int need_copy; /* # bytes need to memcpy this region */ + int copy_len; /* # bytes actually memcpy'ing */ + int copy_off; /* # bytes from entry start */ + int contwr; /* continued write of in-core log? */ + int firstwr = 0; /* first write of transaction */ + int error; + int record_cnt = 0, data_cnt = 0; + + partial_copy_len = partial_copy = 0; + + /* Calculate potential maximum space. Each region gets its own + * xlog_op_header_t and may need to be double word aligned. + */ + len = 0; + if (ticket->t_flags & XLOG_TIC_INITED) /* acct for start rec of xact */ + len += sizeof(xlog_op_header_t); + + for (index = 0; index < nentries; index++) { + len += sizeof(xlog_op_header_t); /* each region gets >= 1 */ + len += reg[index].i_len; + } + contwr = *start_lsn = 0; + + if (ticket->t_curr_res < len) { +#ifdef DEBUG + xlog_panic( + "xfs_log_write: reservation ran out. Need to up reservation"); +#else + /* Customer configurable panic */ + xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, mp, + "xfs_log_write: reservation ran out. Need to up reservation"); + /* If we did not panic, shutdown the filesystem */ + xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); +#endif + } else + ticket->t_curr_res -= len; + + for (index = 0; index < nentries; ) { + if ((error = xlog_state_get_iclog_space(log, len, &iclog, ticket, + &contwr, &log_offset))) + return (error); + + ASSERT(log_offset <= iclog->ic_size - 1); + ptr = (__psint_t) ((char *)iclog->ic_datap+log_offset); + + /* start_lsn is the first lsn written to. That's all we need. */ + if (! *start_lsn) + *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); + + /* This loop writes out as many regions as can fit in the amount + * of space which was allocated by xlog_state_get_iclog_space(). + */ + while (index < nentries) { + ASSERT(reg[index].i_len % sizeof(__int32_t) == 0); + ASSERT((__psint_t)ptr % sizeof(__int32_t) == 0); + start_rec_copy = 0; + + /* If first write for transaction, insert start record. + * We can't be trying to commit if we are inited. We can't + * have any "partial_copy" if we are inited. + */ + if (ticket->t_flags & XLOG_TIC_INITED) { + logop_head = (xlog_op_header_t *)ptr; + INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid); + logop_head->oh_clientid = ticket->t_clientid; + INT_ZERO(logop_head->oh_len, ARCH_CONVERT); + logop_head->oh_flags = XLOG_START_TRANS; + INT_ZERO(logop_head->oh_res2, ARCH_CONVERT); + ticket->t_flags &= ~XLOG_TIC_INITED; /* clear bit */ + firstwr = 1; /* increment log ops below */ + record_cnt++; + + start_rec_copy = sizeof(xlog_op_header_t); + xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy); + } + + /* Copy log operation header directly into data section */ + logop_head = (xlog_op_header_t *)ptr; + INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid); + logop_head->oh_clientid = ticket->t_clientid; + INT_ZERO(logop_head->oh_res2, ARCH_CONVERT); + + /* header copied directly */ + xlog_write_adv_cnt(ptr, len, log_offset, sizeof(xlog_op_header_t)); + + /* are we copying a commit or unmount record? */ + logop_head->oh_flags = flags; + + /* + * We've seen logs corrupted with bad transaction client + * ids. This makes sure that XFS doesn't generate them on. + * Turn this into an EIO and shut down the filesystem. + */ + switch (logop_head->oh_clientid) { + case XFS_TRANSACTION: + case XFS_VOLUME: + case XFS_LOG: + break; + default: + xfs_fs_cmn_err(CE_WARN, mp, + "Bad XFS transaction clientid 0x%x in ticket 0x%p", + logop_head->oh_clientid, tic); + return XFS_ERROR(EIO); + } + + /* Partial write last time? => (partial_copy != 0) + * need_copy is the amount we'd like to copy if everything could + * fit in the current memcpy. + */ + need_copy = reg[index].i_len - partial_copy_len; + + copy_off = partial_copy_len; + if (need_copy <= iclog->ic_size - log_offset) { /*complete write */ + INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len = need_copy); + if (partial_copy) + logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); + partial_copy_len = partial_copy = 0; + } else { /* partial write */ + copy_len = iclog->ic_size - log_offset; + INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len); + logop_head->oh_flags |= XLOG_CONTINUE_TRANS; + if (partial_copy) + logop_head->oh_flags |= XLOG_WAS_CONT_TRANS; + partial_copy_len += copy_len; + partial_copy++; + len += sizeof(xlog_op_header_t); /* from splitting of region */ + /* account for new log op header */ + ticket->t_curr_res -= sizeof(xlog_op_header_t); + } + xlog_verify_dest_ptr(log, ptr); + + /* copy region */ + ASSERT(copy_len >= 0); + memcpy((xfs_caddr_t)ptr, reg[index].i_addr + copy_off, copy_len); + xlog_write_adv_cnt(ptr, len, log_offset, copy_len); + + /* make copy_len total bytes copied, including headers */ + copy_len += start_rec_copy + sizeof(xlog_op_header_t); + record_cnt++; + data_cnt += contwr ? copy_len : 0; + firstwr = 0; + if (partial_copy) { /* copied partial region */ + /* already marked WANT_SYNC by xlog_state_get_iclog_space */ + xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); + record_cnt = data_cnt = 0; + if ((error = xlog_state_release_iclog(log, iclog))) + return (error); + break; /* don't increment index */ + } else { /* copied entire region */ + index++; + partial_copy_len = partial_copy = 0; + + if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { + xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); + record_cnt = data_cnt = 0; + xlog_state_want_sync(log, iclog); + if (commit_iclog) { + ASSERT(flags & XLOG_COMMIT_TRANS); + *commit_iclog = iclog; + } else if ((error = xlog_state_release_iclog(log, iclog))) + return (error); + if (index == nentries) + return 0; /* we are done */ + else + break; + } + } /* if (partial_copy) */ + } /* while (index < nentries) */ + } /* for (index = 0; index < nentries; ) */ + ASSERT(len == 0); + + xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); + if (commit_iclog) { + ASSERT(flags & XLOG_COMMIT_TRANS); + *commit_iclog = iclog; + return 0; + } + return (xlog_state_release_iclog(log, iclog)); +} /* xlog_write */ + + +/***************************************************************************** + * + * State Machine functions + * + ***************************************************************************** + */ + +/* Clean iclogs starting from the head. This ordering must be + * maintained, so an iclog doesn't become ACTIVE beyond one that + * is SYNCING. This is also required to maintain the notion that we use + * a counting semaphore to hold off would be writers to the log when every + * iclog is trying to sync to disk. + * + * State Change: DIRTY -> ACTIVE + */ +void +xlog_state_clean_log(xlog_t *log) +{ + xlog_in_core_t *iclog; + int changed = 0; + + iclog = log->l_iclog; + do { + if (iclog->ic_state == XLOG_STATE_DIRTY) { + iclog->ic_state = XLOG_STATE_ACTIVE; + iclog->ic_offset = 0; + iclog->ic_callback = 0; /* don't need to free */ + /* + * If the number of ops in this iclog indicate it just + * contains the dummy transaction, we can + * change state into IDLE (the second time around). + * Otherwise we should change the state into + * NEED a dummy. + * We don't need to cover the dummy. + */ + if (!changed && + (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) { + changed = 1; + } else { + /* + * We have two dirty iclogs so start over + * This could also be num of ops indicates + * this is not the dummy going out. + */ + changed = 2; + } + INT_ZERO(iclog->ic_header.h_num_logops, ARCH_CONVERT); + memset(iclog->ic_header.h_cycle_data, 0, + sizeof(iclog->ic_header.h_cycle_data)); + INT_ZERO(iclog->ic_header.h_lsn, ARCH_CONVERT); + } else if (iclog->ic_state == XLOG_STATE_ACTIVE) + /* do nothing */; + else + break; /* stop cleaning */ + iclog = iclog->ic_next; + } while (iclog != log->l_iclog); + + /* log is locked when we are called */ + /* + * Change state for the dummy log recording. + * We usually go to NEED. But we go to NEED2 if the changed indicates + * we are done writing the dummy record. + * If we are done with the second dummy recored (DONE2), then + * we go to IDLE. + */ + if (changed) { + switch (log->l_covered_state) { + case XLOG_STATE_COVER_IDLE: + case XLOG_STATE_COVER_NEED: + case XLOG_STATE_COVER_NEED2: + log->l_covered_state = XLOG_STATE_COVER_NEED; + break; + + case XLOG_STATE_COVER_DONE: + if (changed == 1) + log->l_covered_state = XLOG_STATE_COVER_NEED2; + else + log->l_covered_state = XLOG_STATE_COVER_NEED; + break; + + case XLOG_STATE_COVER_DONE2: + if (changed == 1) + log->l_covered_state = XLOG_STATE_COVER_IDLE; + else + log->l_covered_state = XLOG_STATE_COVER_NEED; + break; + + default: + ASSERT(0); + } + } +} /* xlog_state_clean_log */ + +STATIC xfs_lsn_t +xlog_get_lowest_lsn( + xlog_t *log) +{ + xlog_in_core_t *lsn_log; + xfs_lsn_t lowest_lsn, lsn; + + lsn_log = log->l_iclog; + lowest_lsn = 0; + do { + if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { + lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT); + if ((lsn && !lowest_lsn) || + (XFS_LSN_CMP_ARCH(lsn, lowest_lsn, ARCH_NOCONVERT) < 0)) { + lowest_lsn = lsn; + } + } + lsn_log = lsn_log->ic_next; + } while (lsn_log != log->l_iclog); + return(lowest_lsn); +} + + +STATIC void +xlog_state_do_callback( + xlog_t *log, + int aborted, + xlog_in_core_t *ciclog) +{ + xlog_in_core_t *iclog; + xlog_in_core_t *first_iclog; /* used to know when we've + * processed all iclogs once */ + xfs_log_callback_t *cb, *cb_next; + int flushcnt = 0; + xfs_lsn_t lowest_lsn; + int ioerrors; /* counter: iclogs with errors */ + int loopdidcallbacks; /* flag: inner loop did callbacks*/ + int funcdidcallbacks; /* flag: function did callbacks */ + int repeats; /* for issuing console warnings if + * looping too many times */ + SPLDECL(s); + + s = LOG_LOCK(log); + first_iclog = iclog = log->l_iclog; + ioerrors = 0; + funcdidcallbacks = 0; + repeats = 0; + + do { + /* + * Scan all iclogs starting with the one pointed to by the + * log. Reset this starting point each time the log is + * unlocked (during callbacks). + * + * Keep looping through iclogs until one full pass is made + * without running any callbacks. + */ + first_iclog = log->l_iclog; + iclog = log->l_iclog; + loopdidcallbacks = 0; + repeats++; + + do { + + /* skip all iclogs in the ACTIVE & DIRTY states */ + if (iclog->ic_state & + (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) { + iclog = iclog->ic_next; + continue; + } + + /* + * Between marking a filesystem SHUTDOWN and stopping + * the log, we do flush all iclogs to disk (if there + * wasn't a log I/O error). So, we do want things to + * go smoothly in case of just a SHUTDOWN w/o a + * LOG_IO_ERROR. + */ + if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { + /* + * Can only perform callbacks in order. Since + * this iclog is not in the DONE_SYNC/ + * DO_CALLBACK state, we skip the rest and + * just try to clean up. If we set our iclog + * to DO_CALLBACK, we will not process it when + * we retry since a previous iclog is in the + * CALLBACK and the state cannot change since + * we are holding the LOG_LOCK. + */ + if (!(iclog->ic_state & + (XLOG_STATE_DONE_SYNC | + XLOG_STATE_DO_CALLBACK))) { + if (ciclog && (ciclog->ic_state == + XLOG_STATE_DONE_SYNC)) { + ciclog->ic_state = XLOG_STATE_DO_CALLBACK; + } + break; + } + /* + * We now have an iclog that is in either the + * DO_CALLBACK or DONE_SYNC states. The other + * states (WANT_SYNC, SYNCING, or CALLBACK were + * caught by the above if and are going to + * clean (i.e. we aren't doing their callbacks) + * see the above if. + */ + + /* + * We will do one more check here to see if we + * have chased our tail around. + */ + + lowest_lsn = xlog_get_lowest_lsn(log); + if (lowest_lsn && ( + XFS_LSN_CMP_ARCH( + lowest_lsn, + INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT), + ARCH_NOCONVERT + )<0)) { + iclog = iclog->ic_next; + continue; /* Leave this iclog for + * another thread */ + } + + iclog->ic_state = XLOG_STATE_CALLBACK; + + LOG_UNLOCK(log, s); + + /* l_last_sync_lsn field protected by + * GRANT_LOCK. Don't worry about iclog's lsn. + * No one else can be here except us. + */ + s = GRANT_LOCK(log); + ASSERT(XFS_LSN_CMP_ARCH( + log->l_last_sync_lsn, + INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT), + ARCH_NOCONVERT + )<=0); + log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); + GRANT_UNLOCK(log, s); + + /* + * Keep processing entries in the callback list + * until we come around and it is empty. We + * need to atomically see that the list is + * empty and change the state to DIRTY so that + * we don't miss any more callbacks being added. + */ + s = LOG_LOCK(log); + } else { + ioerrors++; + } + cb = iclog->ic_callback; + + while (cb != 0) { + iclog->ic_callback_tail = &(iclog->ic_callback); + iclog->ic_callback = 0; + LOG_UNLOCK(log, s); + + /* perform callbacks in the order given */ + for (; cb != 0; cb = cb_next) { + cb_next = cb->cb_next; + cb->cb_func(cb->cb_arg, aborted); + } + s = LOG_LOCK(log); + cb = iclog->ic_callback; + } + + loopdidcallbacks++; + funcdidcallbacks++; + + ASSERT(iclog->ic_callback == 0); + if (!(iclog->ic_state & XLOG_STATE_IOERROR)) + iclog->ic_state = XLOG_STATE_DIRTY; + + /* + * Transition from DIRTY to ACTIVE if applicable. + * NOP if STATE_IOERROR. + */ + xlog_state_clean_log(log); + + /* wake up threads waiting in xfs_log_force() */ + sv_broadcast(&iclog->ic_forcesema); + + iclog = iclog->ic_next; + } while (first_iclog != iclog); + if (repeats && (repeats % 10) == 0) { + xfs_fs_cmn_err(CE_WARN, log->l_mp, + "xlog_state_do_callback: looping %d", repeats); + } + } while (!ioerrors && loopdidcallbacks); + + /* + * make one last gasp attempt to see if iclogs are being left in + * limbo.. + */ +#ifdef DEBUG + if (funcdidcallbacks) { + first_iclog = iclog = log->l_iclog; + do { + ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK); + /* + * Terminate the loop if iclogs are found in states + * which will cause other threads to clean up iclogs. + * + * SYNCING - i/o completion will go through logs + * DONE_SYNC - interrupt thread should be waiting for + * LOG_LOCK + * IOERROR - give up hope all ye who enter here + */ + if (iclog->ic_state == XLOG_STATE_SYNCING || + iclog->ic_state == XLOG_STATE_DONE_SYNC || + iclog->ic_state == XLOG_STATE_IOERROR ) + break; + iclog = iclog->ic_next; + } while (first_iclog != iclog); + } +#endif + + if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) { + flushcnt = log->l_flushcnt; + log->l_flushcnt = 0; + } + LOG_UNLOCK(log, s); + while (flushcnt--) + vsema(&log->l_flushsema); +} /* xlog_state_do_callback */ + + +/* + * Finish transitioning this iclog to the dirty state. + * + * Make sure that we completely execute this routine only when this is + * the last call to the iclog. There is a good chance that iclog flushes, + * when we reach the end of the physical log, get turned into 2 separate + * calls to bwrite. Hence, one iclog flush could generate two calls to this + * routine. By using the reference count bwritecnt, we guarantee that only + * the second completion goes through. + * + * Callbacks could take time, so they are done outside the scope of the + * global state machine log lock. Assume that the calls to cvsema won't + * take a long time. At least we know it won't sleep. + */ +void +xlog_state_done_syncing( + xlog_in_core_t *iclog, + int aborted) +{ + xlog_t *log = iclog->ic_log; + SPLDECL(s); + + s = LOG_LOCK(log); + + ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || + iclog->ic_state == XLOG_STATE_IOERROR); + ASSERT(iclog->ic_refcnt == 0); + ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2); + + + /* + * If we got an error, either on the first buffer, or in the case of + * split log writes, on the second, we mark ALL iclogs STATE_IOERROR, + * and none should ever be attempted to be written to disk + * again. + */ + if (iclog->ic_state != XLOG_STATE_IOERROR) { + if (--iclog->ic_bwritecnt == 1) { + LOG_UNLOCK(log, s); + return; + } + iclog->ic_state = XLOG_STATE_DONE_SYNC; + } + + /* + * Someone could be sleeping prior to writing out the next + * iclog buffer, we wake them all, one will get to do the + * I/O, the others get to wait for the result. + */ + sv_broadcast(&iclog->ic_writesema); + LOG_UNLOCK(log, s); + xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ +} /* xlog_state_done_syncing */ + + +/* + * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must + * sleep. The flush semaphore is set to the number of in-core buffers and + * decremented around disk syncing. Therefore, if all buffers are syncing, + * this semaphore will cause new writes to sleep until a sync completes. + * Otherwise, this code just does p() followed by v(). This approximates + * a sleep/wakeup except we can't race. + * + * The in-core logs are used in a circular fashion. They are not used + * out-of-order even when an iclog past the head is free. + * + * return: + * * log_offset where xlog_write() can start writing into the in-core + * log's data space. + * * in-core log pointer to which xlog_write() should write. + * * boolean indicating this is a continued write to an in-core log. + * If this is the last write, then the in-core log's offset field + * needs to be incremented, depending on the amount of data which + * is copied. + */ +int +xlog_state_get_iclog_space(xlog_t *log, + int len, + xlog_in_core_t **iclogp, + xlog_ticket_t *ticket, + int *continued_write, + int *logoffsetp) +{ + SPLDECL(s); + int log_offset; + xlog_rec_header_t *head; + xlog_in_core_t *iclog; + int error; + +restart: + s = LOG_LOCK(log); + if (XLOG_FORCED_SHUTDOWN(log)) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + + iclog = log->l_iclog; + if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) { + log->l_flushcnt++; + LOG_UNLOCK(log, s); + xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); + XFS_STATS_INC(xfsstats.xs_log_noiclogs); + /* Ensure that log writes happen */ + psema(&log->l_flushsema, PINOD); + goto restart; + } + ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); + head = &iclog->ic_header; + + iclog->ic_refcnt++; /* prevents sync */ + log_offset = iclog->ic_offset; + + /* On the 1st write to an iclog, figure out lsn. This works + * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are + * committing to. If the offset is set, that's how many blocks + * must be written. + */ + if (log_offset == 0) { + ticket->t_curr_res -= log->l_iclog_hsize; + INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); + ASSIGN_LSN(head->h_lsn, log, ARCH_CONVERT); + ASSERT(log->l_curr_block >= 0); + + /* round off error from last write with this iclog */ + ticket->t_curr_res -= iclog->ic_roundoff; + log->l_roundoff -= iclog->ic_roundoff; + iclog->ic_roundoff = 0; + } + + /* If there is enough room to write everything, then do it. Otherwise, + * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC + * bit is on, so this will get flushed out. Don't update ic_offset + * until you know exactly how many bytes get copied. Therefore, wait + * until later to update ic_offset. + * + * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's + * can fit into remaining data section. + */ + if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { + xlog_state_switch_iclogs(log, iclog, iclog->ic_size); + + /* If I'm the only one writing to this iclog, sync it to disk */ + if (iclog->ic_refcnt == 1) { + LOG_UNLOCK(log, s); + if ((error = xlog_state_release_iclog(log, iclog))) + return (error); + } else { + iclog->ic_refcnt--; + LOG_UNLOCK(log, s); + } + goto restart; + } + + /* Do we have enough room to write the full amount in the remainder + * of this iclog? Or must we continue a write on the next iclog and + * mark this iclog as completely taken? In the case where we switch + * iclogs (to mark it taken), this particular iclog will release/sync + * to disk in xlog_write(). + */ + if (len <= iclog->ic_size - iclog->ic_offset) { + *continued_write = 0; + iclog->ic_offset += len; + } else { + *continued_write = 1; + xlog_state_switch_iclogs(log, iclog, iclog->ic_size); + } + *iclogp = iclog; + + ASSERT(iclog->ic_offset <= iclog->ic_size); + LOG_UNLOCK(log, s); + + *logoffsetp = log_offset; + return 0; +} /* xlog_state_get_iclog_space */ + +/* + * Atomically get the log space required for a log ticket. + * + * Once a ticket gets put onto the reserveq, it will only return after + * the needed reservation is satisfied. + */ +STATIC int +xlog_grant_log_space(xlog_t *log, + xlog_ticket_t *tic) +{ + int free_bytes; + int need_bytes; + SPLDECL(s); +#ifdef DEBUG + xfs_lsn_t tail_lsn; +#endif + + +#ifdef DEBUG + if (log->l_flags & XLOG_ACTIVE_RECOVERY) + panic("grant Recovery problem"); +#endif + + /* Is there space or do we need to sleep? */ + s = GRANT_LOCK(log); + xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter"); + + /* something is already sleeping; insert new transaction at end */ + if (log->l_reserve_headq) { + XLOG_INS_TICKETQ(log->l_reserve_headq, tic); + xlog_trace_loggrant(log, tic, + "xlog_grant_log_space: sleep 1"); + /* + * Gotta check this before going to sleep, while we're + * holding the grant lock. + */ + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + + XFS_STATS_INC(xfsstats.xs_sleep_logspace); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); + /* + * If we got an error, and the filesystem is shutting down, + * we'll catch it down below. So just continue... + */ + xlog_trace_loggrant(log, tic, + "xlog_grant_log_space: wake 1"); + s = GRANT_LOCK(log); + } + if (tic->t_flags & XFS_LOG_PERM_RESERV) + need_bytes = tic->t_unit_res*tic->t_ocnt; + else + need_bytes = tic->t_unit_res; + +redo: + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + + free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle, + log->l_grant_reserve_bytes); + if (free_bytes < need_bytes) { + if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) + XLOG_INS_TICKETQ(log->l_reserve_headq, tic); + xlog_trace_loggrant(log, tic, + "xlog_grant_log_space: sleep 2"); + XFS_STATS_INC(xfsstats.xs_sleep_logspace); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); + + if (XLOG_FORCED_SHUTDOWN(log)) { + s = GRANT_LOCK(log); + goto error_return; + } + + xlog_trace_loggrant(log, tic, + "xlog_grant_log_space: wake 2"); + xlog_grant_push_ail(log->l_mp, need_bytes); + s = GRANT_LOCK(log); + goto redo; + } else if (tic->t_flags & XLOG_TIC_IN_Q) + XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); + + /* we've got enough space */ + XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); + XLOG_GRANT_ADD_SPACE(log, need_bytes, 'r'); +#ifdef DEBUG + tail_lsn = log->l_tail_lsn; + /* + * Check to make sure the grant write head didn't just over lap the + * tail. If the cycles are the same, we can't be overlapping. + * Otherwise, make sure that the cycles differ by exactly one and + * check the byte count. + */ + if (CYCLE_LSN(tail_lsn, ARCH_NOCONVERT) != log->l_grant_write_cycle) { + ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn, ARCH_NOCONVERT)); + ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn, ARCH_NOCONVERT))); + } +#endif + xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit"); + xlog_verify_grant_head(log, 1); + GRANT_UNLOCK(log, s); + return 0; + + error_return: + if (tic->t_flags & XLOG_TIC_IN_Q) + XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); + xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret"); + /* + * If we are failing, make sure the ticket doesn't have any + * current reservations. We don't want to add this back when + * the ticket/transaction gets cancelled. + */ + tic->t_curr_res = 0; + tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ + GRANT_UNLOCK(log, s); + return XFS_ERROR(EIO); +} /* xlog_grant_log_space */ + + +/* + * Replenish the byte reservation required by moving the grant write head. + * + * + */ +STATIC int +xlog_regrant_write_log_space(xlog_t *log, + xlog_ticket_t *tic) +{ + SPLDECL(s); + int free_bytes, need_bytes; + xlog_ticket_t *ntic; +#ifdef DEBUG + xfs_lsn_t tail_lsn; +#endif + + tic->t_curr_res = tic->t_unit_res; + + if (tic->t_cnt > 0) + return (0); + +#ifdef DEBUG + if (log->l_flags & XLOG_ACTIVE_RECOVERY) + panic("regrant Recovery problem"); +#endif + + s = GRANT_LOCK(log); + xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter"); + + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + + /* If there are other waiters on the queue then give them a + * chance at logspace before us. Wake up the first waiters, + * if we do not wake up all the waiters then go to sleep waiting + * for more free space, otherwise try to get some space for + * this transaction. + */ + + if ((ntic = log->l_write_headq)) { + free_bytes = xlog_space_left(log, log->l_grant_write_cycle, + log->l_grant_write_bytes); + do { + ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV); + + if (free_bytes < ntic->t_unit_res) + break; + free_bytes -= ntic->t_unit_res; + sv_signal(&ntic->t_sema); + ntic = ntic->t_next; + } while (ntic != log->l_write_headq); + + if (ntic != log->l_write_headq) { + if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) + XLOG_INS_TICKETQ(log->l_write_headq, tic); + + xlog_trace_loggrant(log, tic, + "xlog_regrant_write_log_space: sleep 1"); + XFS_STATS_INC(xfsstats.xs_sleep_logspace); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, + &log->l_grant_lock, s); + + /* If we're shutting down, this tic is already + * off the queue */ + if (XLOG_FORCED_SHUTDOWN(log)) { + s = GRANT_LOCK(log); + goto error_return; + } + + xlog_trace_loggrant(log, tic, + "xlog_regrant_write_log_space: wake 1"); + xlog_grant_push_ail(log->l_mp, tic->t_unit_res); + s = GRANT_LOCK(log); + } + } + + need_bytes = tic->t_unit_res; + +redo: + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + + free_bytes = xlog_space_left(log, log->l_grant_write_cycle, + log->l_grant_write_bytes); + if (free_bytes < need_bytes) { + if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) + XLOG_INS_TICKETQ(log->l_write_headq, tic); + XFS_STATS_INC(xfsstats.xs_sleep_logspace); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); + + /* If we're shutting down, this tic is already off the queue */ + if (XLOG_FORCED_SHUTDOWN(log)) { + s = GRANT_LOCK(log); + goto error_return; + } + + xlog_trace_loggrant(log, tic, + "xlog_regrant_write_log_space: wake 2"); + xlog_grant_push_ail(log->l_mp, need_bytes); + s = GRANT_LOCK(log); + goto redo; + } else if (tic->t_flags & XLOG_TIC_IN_Q) + XLOG_DEL_TICKETQ(log->l_write_headq, tic); + + XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); /* we've got enough space */ +#ifdef DEBUG + tail_lsn = log->l_tail_lsn; + if (CYCLE_LSN(tail_lsn, ARCH_NOCONVERT) != log->l_grant_write_cycle) { + ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn, ARCH_NOCONVERT)); + ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn, ARCH_NOCONVERT))); + } +#endif + + xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); + xlog_verify_grant_head(log, 1); + GRANT_UNLOCK(log, s); + return (0); + + + error_return: + if (tic->t_flags & XLOG_TIC_IN_Q) + XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); + xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret"); + /* + * If we are failing, make sure the ticket doesn't have any + * current reservations. We don't want to add this back when + * the ticket/transaction gets cancelled. + */ + tic->t_curr_res = 0; + tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ + GRANT_UNLOCK(log, s); + return XFS_ERROR(EIO); +} /* xlog_regrant_write_log_space */ + + +/* The first cnt-1 times through here we don't need to + * move the grant write head because the permanent + * reservation has reserved cnt times the unit amount. + * Release part of current permanent unit reservation and + * reset current reservation to be one units worth. Also + * move grant reservation head forward. + */ +STATIC void +xlog_regrant_reserve_log_space(xlog_t *log, + xlog_ticket_t *ticket) +{ + SPLDECL(s); + + xlog_trace_loggrant(log, ticket, + "xlog_regrant_reserve_log_space: enter"); + if (ticket->t_cnt > 0) + ticket->t_cnt--; + + s = GRANT_LOCK(log); + XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); + XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r'); + ticket->t_curr_res = ticket->t_unit_res; + xlog_trace_loggrant(log, ticket, + "xlog_regrant_reserve_log_space: sub current res"); + xlog_verify_grant_head(log, 1); + + /* just return if we still have some of the pre-reserved space */ + if (ticket->t_cnt > 0) { + GRANT_UNLOCK(log, s); + return; + } + + XLOG_GRANT_ADD_SPACE(log, ticket->t_unit_res, 'r'); + xlog_trace_loggrant(log, ticket, + "xlog_regrant_reserve_log_space: exit"); + xlog_verify_grant_head(log, 0); + GRANT_UNLOCK(log, s); + ticket->t_curr_res = ticket->t_unit_res; +} /* xlog_regrant_reserve_log_space */ + + +/* + * Give back the space left from a reservation. + * + * All the information we need to make a correct determination of space left + * is present. For non-permanent reservations, things are quite easy. The + * count should have been decremented to zero. We only need to deal with the + * space remaining in the current reservation part of the ticket. If the + * ticket contains a permanent reservation, there may be left over space which + * needs to be released. A count of N means that N-1 refills of the current + * reservation can be done before we need to ask for more space. The first + * one goes to fill up the first current reservation. Once we run out of + * space, the count will stay at zero and the only space remaining will be + * in the current reservation field. + */ +STATIC void +xlog_ungrant_log_space(xlog_t *log, + xlog_ticket_t *ticket) +{ + SPLDECL(s); + + if (ticket->t_cnt > 0) + ticket->t_cnt--; + + s = GRANT_LOCK(log); + xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); + + XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); + XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r'); + + xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current"); + + /* If this is a permanent reservation ticket, we may be able to free + * up more space based on the remaining count. + */ + if (ticket->t_cnt > 0) { + ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); + XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'w'); + XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'r'); + } + + xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit"); + xlog_verify_grant_head(log, 1); + GRANT_UNLOCK(log, s); + xfs_log_move_tail(log->l_mp, 1); +} /* xlog_ungrant_log_space */ + + +/* + * Atomically put back used ticket. + */ +void +xlog_state_put_ticket(xlog_t *log, + xlog_ticket_t *tic) +{ + unsigned long s; + + s = LOG_LOCK(log); + xlog_ticket_put(log, tic); + LOG_UNLOCK(log, s); +} /* xlog_state_put_ticket */ + +/* + * Flush iclog to disk if this is the last reference to the given iclog and + * the WANT_SYNC bit is set. + * + * When this function is entered, the iclog is not necessarily in the + * WANT_SYNC state. It may be sitting around waiting to get filled. + * + * + */ +int +xlog_state_release_iclog(xlog_t *log, + xlog_in_core_t *iclog) +{ + SPLDECL(s); + int sync = 0; /* do we sync? */ + + xlog_assign_tail_lsn(log->l_mp); + + s = LOG_LOCK(log); + + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + + ASSERT(iclog->ic_refcnt > 0); + ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || + iclog->ic_state == XLOG_STATE_WANT_SYNC); + + if (--iclog->ic_refcnt == 0 && + iclog->ic_state == XLOG_STATE_WANT_SYNC) { + sync++; + iclog->ic_state = XLOG_STATE_SYNCING; + INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn); + xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); + /* cycle incremented when incrementing curr_block */ + } + + LOG_UNLOCK(log, s); + + /* + * We let the log lock go, so it's possible that we hit a log I/O + * error or someother SHUTDOWN condition that marks the iclog + * as XLOG_STATE_IOERROR before the bwrite. However, we know that + * this iclog has consistent data, so we ignore IOERROR + * flags after this point. + */ + if (sync) { + return xlog_sync(log, iclog); + } + return (0); + +} /* xlog_state_release_iclog */ + + +/* + * This routine will mark the current iclog in the ring as WANT_SYNC + * and move the current iclog pointer to the next iclog in the ring. + * When this routine is called from xlog_state_get_iclog_space(), the + * exact size of the iclog has not yet been determined. All we know is + * that every data block. We have run out of space in this log record. + */ +STATIC void +xlog_state_switch_iclogs(xlog_t *log, + xlog_in_core_t *iclog, + int eventual_size) +{ + uint roundup; + + ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); + if (!eventual_size) + eventual_size = iclog->ic_offset; + iclog->ic_state = XLOG_STATE_WANT_SYNC; + INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block); + log->l_prev_block = log->l_curr_block; + log->l_prev_cycle = log->l_curr_cycle; + + /* roll log?: ic_offset changed later */ + log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); + + /* Round up to next log-sunit */ + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + if (log->l_curr_block & (log->l_mp->m_lstripemask - 1)) { + roundup = log->l_mp->m_lstripemask - + (log->l_curr_block & + (log->l_mp->m_lstripemask - 1)); + } else { + roundup = 0; + } + log->l_curr_block += roundup; + } + + if (log->l_curr_block >= log->l_logBBsize) { + log->l_curr_cycle++; + if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) + log->l_curr_cycle++; + log->l_curr_block -= log->l_logBBsize; + ASSERT(log->l_curr_block >= 0); + } + ASSERT(iclog == log->l_iclog); + log->l_iclog = iclog->ic_next; +} /* xlog_state_switch_iclogs */ + + +/* + * Write out all data in the in-core log as of this exact moment in time. + * + * Data may be written to the in-core log during this call. However, + * we don't guarantee this data will be written out. A change from past + * implementation means this routine will *not* write out zero length LRs. + * + * Basically, we try and perform an intelligent scan of the in-core logs. + * If we determine there is no flushable data, we just return. There is no + * flushable data if: + * + * 1. the current iclog is active and has no data; the previous iclog + * is in the active or dirty state. + * 2. the current iclog is drity, and the previous iclog is in the + * active or dirty state. + * + * We may sleep (call psema) if: + * + * 1. the current iclog is not in the active nor dirty state. + * 2. the current iclog dirty, and the previous iclog is not in the + * active nor dirty state. + * 3. the current iclog is active, and there is another thread writing + * to this particular iclog. + * 4. a) the current iclog is active and has no other writers + * b) when we return from flushing out this iclog, it is still + * not in the active nor dirty state. + */ +STATIC int +xlog_state_sync_all(xlog_t *log, uint flags) +{ + xlog_in_core_t *iclog; + xfs_lsn_t lsn; + SPLDECL(s); + + s = LOG_LOCK(log); + + iclog = log->l_iclog; + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + + /* If the head iclog is not active nor dirty, we just attach + * ourselves to the head and go to sleep. + */ + if (iclog->ic_state == XLOG_STATE_ACTIVE || + iclog->ic_state == XLOG_STATE_DIRTY) { + /* + * If the head is dirty or (active and empty), then + * we need to look at the previous iclog. If the previous + * iclog is active or dirty we are done. There is nothing + * to sync out. Otherwise, we attach ourselves to the + * previous iclog and go to sleep. + */ + if (iclog->ic_state == XLOG_STATE_DIRTY || + (iclog->ic_refcnt == 0 && iclog->ic_offset == 0)) { + iclog = iclog->ic_prev; + if (iclog->ic_state == XLOG_STATE_ACTIVE || + iclog->ic_state == XLOG_STATE_DIRTY) + goto no_sleep; + else + goto maybe_sleep; + } else { + if (iclog->ic_refcnt == 0) { + /* We are the only one with access to this + * iclog. Flush it out now. There should + * be a roundoff of zero to show that someone + * has already taken care of the roundoff from + * the previous sync. + */ + ASSERT(iclog->ic_roundoff == 0); + iclog->ic_refcnt++; + lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); + xlog_state_switch_iclogs(log, iclog, 0); + LOG_UNLOCK(log, s); + + if (xlog_state_release_iclog(log, iclog)) + return XFS_ERROR(EIO); + s = LOG_LOCK(log); + if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && + iclog->ic_state != XLOG_STATE_DIRTY) + goto maybe_sleep; + else + goto no_sleep; + } else { + /* Someone else is writing to this iclog. + * Use its call to flush out the data. However, + * the other thread may not force out this LR, + * so we mark it WANT_SYNC. + */ + xlog_state_switch_iclogs(log, iclog, 0); + goto maybe_sleep; + } + } + } + + /* By the time we come around again, the iclog could've been filled + * which would give it another lsn. If we have a new lsn, just + * return because the relevant data has been flushed. + */ +maybe_sleep: + if (flags & XFS_LOG_SYNC) { + /* + * We must check if we're shutting down here, before + * we wait, while we're holding the LOG_LOCK. + * Then we check again after waking up, in case our + * sleep was disturbed by a bad news. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + XFS_STATS_INC(xfsstats.xs_log_force_sleep); + sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s); + /* + * No need to grab the log lock here since we're + * only deciding whether or not to return EIO + * and the memory read should be atomic. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) + return XFS_ERROR(EIO); + + } else { + +no_sleep: + LOG_UNLOCK(log, s); + } + return 0; +} /* xlog_state_sync_all */ + + +/* + * Used by code which implements synchronous log forces. + * + * Find in-core log with lsn. + * If it is in the DIRTY state, just return. + * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC + * state and go to sleep or return. + * If it is in any other state, go to sleep or return. + * + * If filesystem activity goes to zero, the iclog will get flushed only by + * bdflush(). + */ +int +xlog_state_sync(xlog_t *log, + xfs_lsn_t lsn, + uint flags) +{ + xlog_in_core_t *iclog; + int already_slept = 0; + SPLDECL(s); + + +try_again: + s = LOG_LOCK(log); + iclog = log->l_iclog; + + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + + do { + if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) { + iclog = iclog->ic_next; + continue; + } + + if (iclog->ic_state == XLOG_STATE_DIRTY) { + LOG_UNLOCK(log, s); + return 0; + } + + if (iclog->ic_state == XLOG_STATE_ACTIVE) { + /* + * We sleep here if we haven't already slept (e.g. + * this is the first time we've looked at the correct + * iclog buf) and the buffer before us is going to + * be sync'ed. The reason for this is that if we + * are doing sync transactions here, by waiting for + * the previous I/O to complete, we can allow a few + * more transactions into this iclog before we close + * it down. + * + * Otherwise, we mark the buffer WANT_SYNC, and bump + * up the refcnt so we can release the log (which drops + * the ref count). The state switch keeps new transaction + * commits from using this buffer. When the current commits + * finish writing into the buffer, the refcount will drop to + * zero and the buffer will go out then. + */ + if (!already_slept && + (iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC | + XLOG_STATE_SYNCING))) { + ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); + XFS_STATS_INC(xfsstats.xs_log_force_sleep); + sv_wait(&iclog->ic_prev->ic_writesema, PSWP, + &log->l_icloglock, s); + already_slept = 1; + goto try_again; + } else { + iclog->ic_refcnt++; + xlog_state_switch_iclogs(log, iclog, 0); + LOG_UNLOCK(log, s); + if (xlog_state_release_iclog(log, iclog)) + return XFS_ERROR(EIO); + s = LOG_LOCK(log); + } + } + + if ((flags & XFS_LOG_SYNC) && /* sleep */ + !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { + + /* + * Don't wait on the forcesema if we know that we've + * gotten a log write error. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + XFS_STATS_INC(xfsstats.xs_log_force_sleep); + sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s); + /* + * No need to grab the log lock here since we're + * only deciding whether or not to return EIO + * and the memory read should be atomic. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) + return XFS_ERROR(EIO); + } else { /* just return */ + LOG_UNLOCK(log, s); + } + return 0; + + } while (iclog != log->l_iclog); + + LOG_UNLOCK(log, s); + return (0); +} /* xlog_state_sync */ + + +/* + * Called when we want to mark the current iclog as being ready to sync to + * disk. + */ +void +xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) +{ + SPLDECL(s); + + s = LOG_LOCK(log); + + if (iclog->ic_state == XLOG_STATE_ACTIVE) { + xlog_state_switch_iclogs(log, iclog, 0); + } else { + ASSERT(iclog->ic_state & + (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); + } + + LOG_UNLOCK(log, s); +} /* xlog_state_want_sync */ + + + +/***************************************************************************** + * + * TICKET functions + * + ***************************************************************************** + */ + +/* + * Algorithm doesn't take into account page size. ;-( + */ +STATIC void +xlog_state_ticket_alloc(xlog_t *log) +{ + xlog_ticket_t *t_list; + xlog_ticket_t *next; + xfs_caddr_t buf; + uint i = (NBPP / sizeof(xlog_ticket_t)) - 2; + SPLDECL(s); + + /* + * The kmem_zalloc may sleep, so we shouldn't be holding the + * global lock. XXXmiken: may want to use zone allocator. + */ + buf = (xfs_caddr_t) kmem_zalloc(NBPP, 0); + + s = LOG_LOCK(log); + + /* Attach 1st ticket to Q, so we can keep track of allocated memory */ + t_list = (xlog_ticket_t *)buf; + t_list->t_next = log->l_unmount_free; + log->l_unmount_free = t_list++; + log->l_ticket_cnt++; + log->l_ticket_tcnt++; + + /* Next ticket becomes first ticket attached to ticket free list */ + if (log->l_freelist != NULL) { + ASSERT(log->l_tail != NULL); + log->l_tail->t_next = t_list; + } else { + log->l_freelist = t_list; + } + log->l_ticket_cnt++; + log->l_ticket_tcnt++; + + /* Cycle through rest of alloc'ed memory, building up free Q */ + for ( ; i > 0; i--) { + next = t_list + 1; + t_list->t_next = next; + t_list = next; + log->l_ticket_cnt++; + log->l_ticket_tcnt++; + } + t_list->t_next = 0; + log->l_tail = t_list; + LOG_UNLOCK(log, s); +} /* xlog_state_ticket_alloc */ + + +/* + * Put ticket into free list + * + * Assumption: log lock is held around this call. + */ +STATIC void +xlog_ticket_put(xlog_t *log, + xlog_ticket_t *ticket) +{ + sv_destroy(&ticket->t_sema); + + /* + * Don't think caching will make that much difference. It's + * more important to make debug easier. + */ +#if 0 + /* real code will want to use LIFO for caching */ + ticket->t_next = log->l_freelist; + log->l_freelist = ticket; + /* no need to clear fields */ +#else + /* When we debug, it is easier if tickets are cycled */ + ticket->t_next = 0; + if (log->l_tail != 0) { + log->l_tail->t_next = ticket; + } else { + ASSERT(log->l_freelist == 0); + log->l_freelist = ticket; + } + log->l_tail = ticket; +#endif /* DEBUG */ + log->l_ticket_cnt++; +} /* xlog_ticket_put */ + + +/* + * Grab ticket off freelist or allocation some more + */ +xlog_ticket_t * +xlog_ticket_get(xlog_t *log, + int unit_bytes, + int cnt, + char client, + uint xflags) +{ + xlog_ticket_t *tic; + SPLDECL(s); + + alloc: + if (log->l_freelist == NULL) + xlog_state_ticket_alloc(log); /* potentially sleep */ + + s = LOG_LOCK(log); + if (log->l_freelist == NULL) { + LOG_UNLOCK(log, s); + goto alloc; + } + tic = log->l_freelist; + log->l_freelist = tic->t_next; + if (log->l_freelist == NULL) + log->l_tail = NULL; + log->l_ticket_cnt--; + LOG_UNLOCK(log, s); + + /* + * Permanent reservations have up to 'cnt'-1 active log operations + * in the log. A unit in this case is the amount of space for one + * of these log operations. Normal reservations have a cnt of 1 + * and their unit amount is the total amount of space required. + * The following line of code adds one log record header length + * for each part of an operation which may fall on a different + * log record. + * + * One more XLOG_HEADER_SIZE is added to account for possible + * round off errors when syncing a LR to disk. The bytes are + * subtracted if the thread using this ticket is the first writer + * to a new LR. + * + * We add an extra log header for the possibility that the commit + * record is the first data written to a new log record. In this + * case it is separate from the rest of the transaction data and + * will be charged for the log record header. + */ + unit_bytes += log->l_iclog_hsize * (XLOG_BTOLRBB(unit_bytes) + 2); + + tic->t_unit_res = unit_bytes; + tic->t_curr_res = unit_bytes; + tic->t_cnt = cnt; + tic->t_ocnt = cnt; + tic->t_tid = (xlog_tid_t)((__psint_t)tic & 0xffffffff); + tic->t_clientid = client; + tic->t_flags = XLOG_TIC_INITED; + if (xflags & XFS_LOG_PERM_RESERV) + tic->t_flags |= XLOG_TIC_PERM_RESERV; + sv_init(&(tic->t_sema), SV_DEFAULT, "logtick"); + + return tic; +} /* xlog_ticket_get */ + + +/****************************************************************************** + * + * Log debug routines + * + ****************************************************************************** + */ +#if defined(DEBUG) && !defined(XLOG_NOLOG) +/* + * Make sure that the destination ptr is within the valid data region of + * one of the iclogs. This uses backup pointers stored in a different + * part of the log in case we trash the log structure. + */ +void +xlog_verify_dest_ptr(xlog_t *log, + __psint_t ptr) +{ + int i; + int good_ptr = 0; + + for (i=0; i < log->l_iclog_bufs; i++) { + if (ptr >= (__psint_t)log->l_iclog_bak[i] && + ptr <= (__psint_t)log->l_iclog_bak[i]+log->l_iclog_size) + good_ptr++; + } + if (! good_ptr) + xlog_panic("xlog_verify_dest_ptr: invalid ptr"); +} /* xlog_verify_dest_ptr */ + + +#ifdef XFSDEBUG +/* check split LR write */ +STATIC void +xlog_verify_disk_cycle_no(xlog_t *log, + xlog_in_core_t *iclog) +{ + xfs_buf_t *bp; + uint cycle_no; + xfs_daddr_t i; + + if (BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT) < 10) { + cycle_no = CYCLE_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT); + bp = xlog_get_bp(1, log->l_mp); + ASSERT(bp); + for (i = 0; i < BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT); i++) { + xlog_bread(log, i, 1, bp); + if (GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT) != cycle_no) + xlog_warn("XFS: xlog_verify_disk_cycle_no: bad cycle no"); + } + xlog_put_bp(bp); + } +} /* xlog_verify_disk_cycle_no */ +#endif + +STATIC void +xlog_verify_grant_head(xlog_t *log, int equals) +{ + if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) { + if (equals) + ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes); + else + ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes); + } else { + ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle); + ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes); + } +} /* xlog_verify_grant_head */ + +/* check if it will fit */ +STATIC void +xlog_verify_tail_lsn(xlog_t *log, + xlog_in_core_t *iclog, + xfs_lsn_t tail_lsn) +{ + int blocks; + + if (CYCLE_LSN(tail_lsn, ARCH_NOCONVERT) == log->l_prev_cycle) { + blocks = + log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn, ARCH_NOCONVERT)); + if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) + xlog_panic("xlog_verify_tail_lsn: ran out of log space"); + } else { + ASSERT(CYCLE_LSN(tail_lsn, ARCH_NOCONVERT)+1 == log->l_prev_cycle); + + if (BLOCK_LSN(tail_lsn, ARCH_NOCONVERT) == log->l_prev_block) + xlog_panic("xlog_verify_tail_lsn: tail wrapped"); + + blocks = BLOCK_LSN(tail_lsn, ARCH_NOCONVERT) - log->l_prev_block; + if (blocks < BTOBB(iclog->ic_offset) + 1) + xlog_panic("xlog_verify_tail_lsn: ran out of log space"); + } +} /* xlog_verify_tail_lsn */ + +/* + * Perform a number of checks on the iclog before writing to disk. + * + * 1. Make sure the iclogs are still circular + * 2. Make sure we have a good magic number + * 3. Make sure we don't have magic numbers in the data + * 4. Check fields of each log operation header for: + * A. Valid client identifier + * B. tid ptr value falls in valid ptr space (user space code) + * C. Length in log record header is correct according to the + * individual operation headers within record. + * 5. When a bwrite will occur within 5 blocks of the front of the physical + * log, check the preceding blocks of the physical log to make sure all + * the cycle numbers agree with the current cycle number. + */ +STATIC void +xlog_verify_iclog(xlog_t *log, + xlog_in_core_t *iclog, + int count, + boolean_t syncing) +{ + xlog_op_header_t *ophead; + xlog_in_core_t *icptr; + xfs_caddr_t ptr; + xfs_caddr_t base_ptr; + __psint_t field_offset; + __uint8_t clientid; + int len, i, j, k, op_len; + int idx; + SPLDECL(s); + + union ich { + xlog_rec_ext_header_t hic_xheader; + char hic_sector[XLOG_HEADER_SIZE]; + }*xhdr; + + /* check validity of iclog pointers */ + s = LOG_LOCK(log); + icptr = log->l_iclog; + for (i=0; i < log->l_iclog_bufs; i++) { + if (icptr == 0) + xlog_panic("xlog_verify_iclog: illegal ptr"); + icptr = icptr->ic_next; + } + if (icptr != log->l_iclog) + xlog_panic("xlog_verify_iclog: corrupt iclog ring"); + LOG_UNLOCK(log, s); + + /* check log magic numbers */ + ptr = (xfs_caddr_t) &(iclog->ic_header); + if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) + xlog_panic("xlog_verify_iclog: illegal magic num"); + + for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count; + ptr += BBSIZE) { + if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) + xlog_panic("xlog_verify_iclog: unexpected magic num"); + } + + /* check fields */ + len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT); + ptr = iclog->ic_datap; + base_ptr = ptr; + ophead = (xlog_op_header_t *)ptr; + xhdr = (union ich*)&iclog->ic_header; + for (i = 0; i < len; i++) { + ophead = (xlog_op_header_t *)ptr; + + /* clientid is only 1 byte */ + field_offset = (__psint_t) + ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr); + if (syncing == B_FALSE || (field_offset & 0x1ff)) { + clientid = ophead->oh_clientid; + } else { + idx = BTOBB((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap); + if (idx > (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { + j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + clientid = GET_CLIENT_ID(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); + } else { + clientid = GET_CLIENT_ID(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); + } + } + if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) + cmn_err(CE_WARN, "xlog_verify_iclog: illegal clientid %d op 0x%p offset 0x%x", clientid, ophead, field_offset); + + /* check length */ + field_offset = (__psint_t) + ((xfs_caddr_t)&(ophead->oh_len) - base_ptr); + if (syncing == B_FALSE || (field_offset & 0x1ff)) { + op_len = INT_GET(ophead->oh_len, ARCH_CONVERT); + } else { + idx = BTOBB((__psint_t)&ophead->oh_len - + (__psint_t)iclog->ic_datap); + if (idx > (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { + j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); + } else { + op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); + } + } + ptr += sizeof(xlog_op_header_t) + op_len; + } +} /* xlog_verify_iclog */ +#endif /* DEBUG && !XLOG_NOLOG */ + +/* + * Mark all iclogs IOERROR. LOG_LOCK is held by the caller. + */ +STATIC int +xlog_state_ioerror( + xlog_t *log) +{ + xlog_in_core_t *iclog, *ic; + + iclog = log->l_iclog; + if (! (iclog->ic_state & XLOG_STATE_IOERROR)) { + /* + * Mark all the incore logs IOERROR. + * From now on, no log flushes will result. + */ + ic = iclog; + do { + ic->ic_state = XLOG_STATE_IOERROR; + ic = ic->ic_next; + } while (ic != iclog); + return (0); + } + /* + * Return non-zero, if state transition has already happened. + */ + return (1); +} + +/* + * This is called from xfs_force_shutdown, when we're forcibly + * shutting down the filesystem, typically because of an IO error. + * Our main objectives here are to make sure that: + * a. the filesystem gets marked 'SHUTDOWN' for all interested + * parties to find out, 'atomically'. + * b. those who're sleeping on log reservations, pinned objects and + * other resources get woken up, and be told the bad news. + * c. nothing new gets queued up after (a) and (b) are done. + * d. if !logerror, flush the iclogs to disk, then seal them off + * for business. + */ +int +xfs_log_force_umount( + struct xfs_mount *mp, + int logerror) +{ + xlog_ticket_t *tic; + xlog_t *log; + int retval; + SPLDECL(s); + SPLDECL(s2); + + log = mp->m_log; + + /* + * If this happens during log recovery, don't worry about + * locking; the log isn't open for business yet. + */ + if (!log || + log->l_flags & XLOG_ACTIVE_RECOVERY) { + mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; + XFS_BUF_DONE(mp->m_sb_bp); + return (0); + } + + /* + * Somebody could've already done the hard work for us. + * No need to get locks for this. + */ + if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { + ASSERT(XLOG_FORCED_SHUTDOWN(log)); + return (1); + } + retval = 0; + /* + * We must hold both the GRANT lock and the LOG lock, + * before we mark the filesystem SHUTDOWN and wake + * everybody up to tell the bad news. + */ + s = GRANT_LOCK(log); + s2 = LOG_LOCK(log); + mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; + XFS_BUF_DONE(mp->m_sb_bp); + /* + * This flag is sort of redundant because of the mount flag, but + * it's good to maintain the separation between the log and the rest + * of XFS. + */ + log->l_flags |= XLOG_IO_ERROR; + + /* + * If we hit a log error, we want to mark all the iclogs IOERROR + * while we're still holding the loglock. + */ + if (logerror) + retval = xlog_state_ioerror(log); + LOG_UNLOCK(log, s2); + + /* + * We don't want anybody waiting for log reservations + * after this. That means we have to wake up everybody + * queued up on reserve_headq as well as write_headq. + * In addition, we make sure in xlog_{re}grant_log_space + * that we don't enqueue anything once the SHUTDOWN flag + * is set, and this action is protected by the GRANTLOCK. + */ + if ((tic = log->l_reserve_headq)) { + do { + sv_signal(&tic->t_sema); + tic = tic->t_next; + } while (tic != log->l_reserve_headq); + } + + if ((tic = log->l_write_headq)) { + do { + sv_signal(&tic->t_sema); + tic = tic->t_next; + } while (tic != log->l_write_headq); + } + GRANT_UNLOCK(log, s); + + if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { + ASSERT(!logerror); + /* + * Force the incore logs to disk before shutting the + * log down completely. + */ + xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC); + s2 = LOG_LOCK(log); + retval = xlog_state_ioerror(log); + LOG_UNLOCK(log, s2); + } + /* + * Wake up everybody waiting on xfs_log_force. + * Callback all log item committed functions as if the + * log writes were completed. + */ + xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); + +#ifdef XFSERRORDEBUG + { + xlog_in_core_t *iclog; + + s = LOG_LOCK(log); + iclog = log->l_iclog; + do { + ASSERT(iclog->ic_callback == 0); + iclog = iclog->ic_next; + } while (iclog != log->l_iclog); + LOG_UNLOCK(log, s); + } +#endif + /* return non-zero if log IOERROR transition had already happened */ + return (retval); +} + +int +xlog_iclogs_empty(xlog_t *log) +{ + xlog_in_core_t *iclog; + + iclog = log->l_iclog; + do { + /* endianness does not matter here, zero is zero in + * any language. + */ + if (iclog->ic_header.h_num_logops) + return(0); + iclog = iclog->ic_next; + } while (iclog != log->l_iclog); + return(1); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_log.h linux.21rc5-ac2/fs/xfs/xfs_log.h --- linux.21rc5/fs/xfs/xfs_log.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_log.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_LOG_H__ +#define __XFS_LOG_H__ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define LSN_FIELD_CYCLE(arch) (((arch)==ARCH_NOCONVERT)?1:0) +#define LSN_FIELD_BLOCK(arch) (((arch)==ARCH_NOCONVERT)?0:1) +#else +#define LSN_FIELD_CYCLE(arch) (0) +#define LSN_FIELD_BLOCK(arch) (1) +#endif + +/* get lsn fields */ + +#define CYCLE_LSN(lsn,arch) (INT_GET(((uint *)&(lsn))[LSN_FIELD_CYCLE(arch)], arch)) +#define BLOCK_LSN(lsn,arch) (INT_GET(((uint *)&(lsn))[LSN_FIELD_BLOCK(arch)], arch)) +/* this is used in a spot where we might otherwise double-endian-flip */ +#define CYCLE_LSN_NOCONV(lsn,arch) (((uint *)&(lsn))[LSN_FIELD_CYCLE(arch)]) + +#ifdef __KERNEL__ +/* + * By comparing each compnent, we don't have to worry about extra + * endian issues in treating two 32 bit numbers as one 64 bit number + */ +static +#if defined(__GNUC__) && (__GNUC__ == 2) && (__GNUC_MINOR__ == 95) +__attribute__((unused)) /* gcc 2.95 miscompiles this when inlined */ +#else +__inline__ +#endif +xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2, xfs_arch_t arch) +{ + if (CYCLE_LSN(lsn1, arch) != CYCLE_LSN(lsn2, arch)) + return (CYCLE_LSN(lsn1, arch)> XLOG_RECORD_BSHIFT) +#endif + +#define XLOG_HEADER_SIZE 512 + +#define XLOG_TOTAL_REC_SHIFT(log) \ + BTOBB(XLOG_MAX_ICLOGS << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \ + XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) + +/* + * set lsns + */ + +#define ASSIGN_LSN_CYCLE(lsn,cycle,arch) \ + INT_SET(((uint *)&(lsn))[LSN_FIELD_CYCLE(arch)], arch, (cycle)); +#define ASSIGN_LSN_BLOCK(lsn,block,arch) \ + INT_SET(((uint *)&(lsn))[LSN_FIELD_BLOCK(arch)], arch, (block)); +#define ASSIGN_ANY_LSN(lsn,cycle,block,arch) \ + { \ + ASSIGN_LSN_CYCLE(lsn,cycle,arch); \ + ASSIGN_LSN_BLOCK(lsn,block,arch); \ + } +#define ASSIGN_LSN(lsn,log,arch) \ + ASSIGN_ANY_LSN(lsn,(log)->l_curr_cycle,(log)->l_curr_block,arch); + +#define XLOG_SET(f,b) (((f) & (b)) == (b)) + +#define GET_CYCLE(ptr, arch) \ + (INT_GET(*(uint *)(ptr), arch) == XLOG_HEADER_MAGIC_NUM ? \ + INT_GET(*((uint *)(ptr)+1), arch) : \ + INT_GET(*(uint *)(ptr), arch) \ + ) + +#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) + + +#ifdef __KERNEL__ +/* + * get client id from packed copy. + * + * this hack is here because the xlog_pack code copies four bytes + * of xlog_op_header containing the fields oh_clientid, oh_flags + * and oh_res2 into the packed copy. + * + * later on this four byte chunk is treated as an int and the + * client id is pulled out. + * + * this has endian issues, of course. + */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define GET_CLIENT_ID(i,arch) \ + ((i) & 0xff) +#else +#define GET_CLIENT_ID(i,arch) \ + ((i) >> 24) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XLOG_GRANT_SUB_SPACE) +void xlog_grant_sub_space(struct log *log, int bytes, int type); +#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \ + xlog_grant_sub_space(log,bytes,type) +#else +#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \ + { \ + if (type == 'w') { \ + (log)->l_grant_write_bytes -= (bytes); \ + if ((log)->l_grant_write_bytes < 0) { \ + (log)->l_grant_write_bytes += (log)->l_logsize; \ + (log)->l_grant_write_cycle--; \ + } \ + } else { \ + (log)->l_grant_reserve_bytes -= (bytes); \ + if ((log)->l_grant_reserve_bytes < 0) { \ + (log)->l_grant_reserve_bytes += (log)->l_logsize;\ + (log)->l_grant_reserve_cycle--; \ + } \ + } \ + } +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XLOG_GRANT_ADD_SPACE) +void xlog_grant_add_space(struct log *log, int bytes, int type); +#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \ + xlog_grant_add_space(log,bytes,type) +#else +#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \ + { \ + if (type == 'w') { \ + (log)->l_grant_write_bytes += (bytes); \ + if ((log)->l_grant_write_bytes > (log)->l_logsize) { \ + (log)->l_grant_write_bytes -= (log)->l_logsize; \ + (log)->l_grant_write_cycle++; \ + } \ + } else { \ + (log)->l_grant_reserve_bytes += (bytes); \ + if ((log)->l_grant_reserve_bytes > (log)->l_logsize) { \ + (log)->l_grant_reserve_bytes -= (log)->l_logsize;\ + (log)->l_grant_reserve_cycle++; \ + } \ + } \ + } +#endif +#define XLOG_INS_TICKETQ(q,tic) \ + { \ + if (q) { \ + (tic)->t_next = (q); \ + (tic)->t_prev = (q)->t_prev; \ + (q)->t_prev->t_next = (tic); \ + (q)->t_prev = (tic); \ + } else { \ + (tic)->t_prev = (tic)->t_next = (tic); \ + (q) = (tic); \ + } \ + (tic)->t_flags |= XLOG_TIC_IN_Q; \ + } +#define XLOG_DEL_TICKETQ(q,tic) \ + { \ + if ((tic) == (tic)->t_next) { \ + (q) = NULL; \ + } else { \ + (q) = (tic)->t_next; \ + (tic)->t_next->t_prev = (tic)->t_prev; \ + (tic)->t_prev->t_next = (tic)->t_next; \ + } \ + (tic)->t_next = (tic)->t_prev = NULL; \ + (tic)->t_flags &= ~XLOG_TIC_IN_Q; \ + } + + +#define GRANT_LOCK(log) mutex_spinlock(&(log)->l_grant_lock) +#define GRANT_UNLOCK(log, s) mutex_spinunlock(&(log)->l_grant_lock, s) +#define LOG_LOCK(log) mutex_spinlock(&(log)->l_icloglock) +#define LOG_UNLOCK(log, s) mutex_spinunlock(&(log)->l_icloglock, s) + +#define xlog_panic(s) {cmn_err(CE_PANIC, s); } +#define xlog_exit(s) {cmn_err(CE_PANIC, s); } +#define xlog_warn(s) {cmn_err(CE_WARN, s); } + +/* + * In core log state + */ +#define XLOG_STATE_ACTIVE 0x0001 /* Current IC log being written to */ +#define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */ +#define XLOG_STATE_SYNCING 0x0004 /* This IC log is syncing */ +#define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */ +#define XLOG_STATE_DO_CALLBACK \ + 0x0010 /* Process callback functions */ +#define XLOG_STATE_CALLBACK 0x0020 /* Callback functions now */ +#define XLOG_STATE_DIRTY 0x0040 /* Dirty IC log, not ready for ACTIVE status*/ +#define XLOG_STATE_IOERROR 0x0080 /* IO error happened in sync'ing log */ +#define XLOG_STATE_ALL 0x7FFF /* All possible valid flags */ +#define XLOG_STATE_NOTUSED 0x8000 /* This IC log not being used */ +#endif /* __KERNEL__ */ + +/* + * Flags to log operation header + * + * The first write of a new transaction will be preceded with a start + * record, XLOG_START_TRANS. Once a transaction is committed, a commit + * record is written, XLOG_COMMIT_TRANS. If a single region can not fit into + * the remainder of the current active in-core log, it is split up into + * multiple regions. Each partial region will be marked with a + * XLOG_CONTINUE_TRANS until the last one, which gets marked with XLOG_END_TRANS. + * + */ +#define XLOG_START_TRANS 0x01 /* Start a new transaction */ +#define XLOG_COMMIT_TRANS 0x02 /* Commit this transaction */ +#define XLOG_CONTINUE_TRANS 0x04 /* Cont this trans into new region */ +#define XLOG_WAS_CONT_TRANS 0x08 /* Cont this trans into new region */ +#define XLOG_END_TRANS 0x10 /* End a continued transaction */ +#define XLOG_UNMOUNT_TRANS 0x20 /* Unmount a filesystem transaction */ +#define XLOG_SKIP_TRANS (XLOG_COMMIT_TRANS | XLOG_CONTINUE_TRANS | \ + XLOG_WAS_CONT_TRANS | XLOG_END_TRANS | \ + XLOG_UNMOUNT_TRANS) + +#ifdef __KERNEL__ +/* + * Flags to log ticket + */ +#define XLOG_TIC_INITED 0x1 /* has been initialized */ +#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */ +#define XLOG_TIC_IN_Q 0x4 +#endif /* __KERNEL__ */ + +#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */ + +/* + * Flags for log structure + */ +#define XLOG_CHKSUM_MISMATCH 0x1 /* used only during recovery */ +#define XLOG_ACTIVE_RECOVERY 0x2 /* in the middle of recovery */ +#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */ +#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being + shutdown */ +typedef __uint32_t xlog_tid_t; + + +#ifdef __KERNEL__ +/* + * Below are states for covering allocation transactions. + * By covering, we mean changing the h_tail_lsn in the last on-disk + * log write such that no allocation transactions will be re-done during + * recovery after a system crash. Recovery starts at the last on-disk + * log write. + * + * These states are used to insert dummy log entries to cover + * space allocation transactions which can undo non-transactional changes + * after a crash. Writes to a file with space + * already allocated do not result in any transactions. Allocations + * might include space beyond the EOF. So if we just push the EOF a + * little, the last transaction for the file could contain the wrong + * size. If there is no file system activity, after an allocation + * transaction, and the system crashes, the allocation transaction + * will get replayed and the file will be truncated. This could + * be hours/days/... after the allocation occurred. + * + * The fix for this is to do two dummy transactions when the + * system is idle. We need two dummy transaction because the h_tail_lsn + * in the log record header needs to point beyond the last possible + * non-dummy transaction. The first dummy changes the h_tail_lsn to + * the first transaction before the dummy. The second dummy causes + * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn. + * + * These dummy transactions get committed when everything + * is idle (after there has been some activity). + * + * There are 5 states used to control this. + * + * IDLE -- no logging has been done on the file system or + * we are done covering previous transactions. + * NEED -- logging has occurred and we need a dummy transaction + * when the log becomes idle. + * DONE -- we were in the NEED state and have committed a dummy + * transaction. + * NEED2 -- we detected that a dummy transaction has gone to the + * on disk log with no other transactions. + * DONE2 -- we committed a dummy transaction when in the NEED2 state. + * + * There are two places where we switch states: + * + * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2. + * We commit the dummy transaction and switch to DONE or DONE2, + * respectively. In all other states, we don't do anything. + * + * 2.) When we finish writing the on-disk log (xlog_state_clean_log). + * + * No matter what state we are in, if this isn't the dummy + * transaction going out, the next state is NEED. + * So, if we aren't in the DONE or DONE2 states, the next state + * is NEED. We can't be finishing a write of the dummy record + * unless it was committed and the state switched to DONE or DONE2. + * + * If we are in the DONE state and this was a write of the + * dummy transaction, we move to NEED2. + * + * If we are in the DONE2 state and this was a write of the + * dummy transaction, we move to IDLE. + * + * + * Writing only one dummy transaction can get appended to + * one file space allocation. When this happens, the log recovery + * code replays the space allocation and a file could be truncated. + * This is why we have the NEED2 and DONE2 states before going idle. + */ + +#define XLOG_STATE_COVER_IDLE 0 +#define XLOG_STATE_COVER_NEED 1 +#define XLOG_STATE_COVER_DONE 2 +#define XLOG_STATE_COVER_NEED2 3 +#define XLOG_STATE_COVER_DONE2 4 + +#define XLOG_COVER_OPS 5 + +typedef struct xlog_ticket { + sv_t t_sema; /* sleep on this semaphore :20 */ + struct xlog_ticket *t_next; /* : 4 */ + struct xlog_ticket *t_prev; /* : 4 */ + xlog_tid_t t_tid; /* transaction identifier : 4 */ + int t_curr_res; /* current reservation in bytes : 4 */ + int t_unit_res; /* unit reservation in bytes : 4 */ + __uint8_t t_ocnt; /* original count : 1 */ + __uint8_t t_cnt; /* current count : 1 */ + __uint8_t t_clientid; /* who does this belong to; : 1 */ + __uint8_t t_flags; /* properties of reservation : 1 */ +} xlog_ticket_t; +#endif + + +typedef struct xlog_op_header { + xlog_tid_t oh_tid; /* transaction id of operation : 4 b */ + int oh_len; /* bytes in data region : 4 b */ + __uint8_t oh_clientid; /* who sent me this : 1 b */ + __uint8_t oh_flags; /* : 1 b */ + ushort oh_res2; /* 32 bit align : 2 b */ +} xlog_op_header_t; + + +/* valid values for h_fmt */ +#define XLOG_FMT_UNKNOWN 0 +#define XLOG_FMT_LINUX_LE 1 +#define XLOG_FMT_LINUX_BE 2 +#define XLOG_FMT_IRIX_BE 3 + +/* our fmt */ +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define XLOG_FMT XLOG_FMT_LINUX_LE +#else +#if __BYTE_ORDER == __BIG_ENDIAN +#define XLOG_FMT XLOG_FMT_LINUX_BE +#else +#error unknown byte order +#endif +#endif + +typedef struct xlog_rec_header { + uint h_magicno; /* log record (LR) identifier : 4 */ + uint h_cycle; /* write cycle of log : 4 */ + int h_version; /* LR version : 4 */ + int h_len; /* len in bytes; should be 64-bit aligned: 4 */ + xfs_lsn_t h_lsn; /* lsn of this LR : 8 */ + xfs_lsn_t h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ + uint h_chksum; /* may not be used; non-zero if used : 4 */ + int h_prev_block; /* block number to previous LR : 4 */ + int h_num_logops; /* number of log operations in this LR : 4 */ + uint h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; + /* new fields */ + int h_fmt; /* format of log record : 4 */ + uuid_t h_fs_uuid; /* uuid of FS : 16 */ + int h_size; /* iclog size : 4 */ +} xlog_rec_header_t; + +typedef struct xlog_rec_ext_header { + uint xh_cycle; /* write cycle of log : 4 */ + uint xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ +} xlog_rec_ext_header_t; +#ifdef __KERNEL__ +/* + * - A log record header is 512 bytes. There is plenty of room to grow the + * xlog_rec_header_t into the reserved space. + * - ic_data follows, so a write to disk can start at the beginning of + * the iclog. + * - ic_forcesema is used to implement synchronous forcing of the iclog to disk. + * - ic_next is the pointer to the next iclog in the ring. + * - ic_bp is a pointer to the buffer used to write this incore log to disk. + * - ic_log is a pointer back to the global log structure. + * - ic_callback is a linked list of callback function/argument pairs to be + * called after an iclog finishes writing. + * - ic_size is the full size of the header plus data. + * - ic_offset is the current number of bytes written to in this iclog. + * - ic_refcnt is bumped when someone is writing to the log. + * - ic_state is the state of the iclog. + */ +typedef struct xlog_iclog_fields { + sv_t ic_forcesema; + sv_t ic_writesema; + struct xlog_in_core *ic_next; + struct xlog_in_core *ic_prev; + struct xfs_buf *ic_bp; + struct log *ic_log; + xfs_log_callback_t *ic_callback; + xfs_log_callback_t **ic_callback_tail; +#ifdef DEBUG + struct ktrace *ic_trace; +#endif + int ic_size; + int ic_offset; + int ic_refcnt; + int ic_roundoff; + int ic_bwritecnt; + ushort_t ic_state; + char *ic_datap; /* pointer to iclog data */ +} xlog_iclog_fields_t; + +typedef struct xlog_in_core2 { + union { + xlog_rec_header_t hic_header; + xlog_rec_ext_header_t hic_xheader; + char hic_sector[XLOG_HEADER_SIZE]; + } ic_h; +} xlog_in_core_2_t; + +typedef struct xlog_in_core { + xlog_iclog_fields_t hic_fields; + xlog_in_core_2_t *hic_data; +} xlog_in_core_t; + +/* + * Defines to save our code from this glop. + */ +#define ic_forcesema hic_fields.ic_forcesema +#define ic_writesema hic_fields.ic_writesema +#define ic_next hic_fields.ic_next +#define ic_prev hic_fields.ic_prev +#define ic_bp hic_fields.ic_bp +#define ic_log hic_fields.ic_log +#define ic_callback hic_fields.ic_callback +#define ic_callback_tail hic_fields.ic_callback_tail +#define ic_trace hic_fields.ic_trace +#define ic_size hic_fields.ic_size +#define ic_offset hic_fields.ic_offset +#define ic_refcnt hic_fields.ic_refcnt +#define ic_roundoff hic_fields.ic_roundoff +#define ic_bwritecnt hic_fields.ic_bwritecnt +#define ic_state hic_fields.ic_state +#define ic_datap hic_fields.ic_datap +#define ic_header hic_data->ic_h.hic_header + +/* + * The reservation head lsn is not made up of a cycle number and block number. + * Instead, it uses a cycle number and byte number. Logs don't expect to + * overflow 31 bits worth of byte offset, so using a byte number will mean + * that round off problems won't occur when releasing partial reservations. + */ +typedef struct log { + /* The following block of fields are changed while holding icloglock */ + sema_t l_flushsema; /* iclog flushing semaphore */ + int l_flushcnt; /* # of procs waiting on this sema */ + int l_ticket_cnt; /* free ticket count */ + int l_ticket_tcnt; /* total ticket count */ + int l_covered_state;/* state of "covering disk log entries" */ + xlog_ticket_t *l_freelist; /* free list of tickets */ + xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */ + xlog_ticket_t *l_tail; /* free list of tickets */ + xlog_in_core_t *l_iclog; /* head log queue */ + lock_t l_icloglock; /* grab to change iclog state */ + xfs_lsn_t l_tail_lsn; /* lsn of 1st LR w/ unflush buffers */ + xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ + struct xfs_mount *l_mp; /* mount point */ + struct xfs_buf *l_xbuf; /* extra buffer for log wrapping */ + dev_t l_dev; /* dev_t of log */ + xfs_daddr_t l_logBBstart; /* start block of log */ + int l_logsize; /* size of log in bytes */ + int l_logBBsize; /* size of log in 512 byte chunks */ + int l_roundoff; /* round off error of all iclogs */ + int l_curr_cycle; /* Cycle number of log writes */ + int l_prev_cycle; /* Cycle # b4 last block increment */ + int l_curr_block; /* current logical block of log */ + int l_prev_block; /* previous logical block of log */ + int l_iclog_size; /* size of log in bytes */ + int l_iclog_size_log;/* log power size of log */ + int l_iclog_bufs; /* number of iclog buffers */ + + /* The following field are used for debugging; need to hold icloglock */ + char *l_iclog_bak[XLOG_MAX_ICLOGS]; + + /* The following block of fields are changed while holding grant_lock */ + lock_t l_grant_lock; /* protects below fields */ + xlog_ticket_t *l_reserve_headq; /* */ + xlog_ticket_t *l_write_headq; /* */ + int l_grant_reserve_cycle; /* */ + int l_grant_reserve_bytes; /* */ + int l_grant_write_cycle; /* */ + int l_grant_write_bytes; /* */ + + /* The following fields don't need locking */ +#ifdef DEBUG + struct ktrace *l_trace; + struct ktrace *l_grant_trace; +#endif + uint l_flags; + uint l_quotaoffs_flag;/* XFS_DQ_*, if QUOTAOFFs found */ + struct xfs_buf_cancel **l_buf_cancel_table; + int l_iclog_hsize; /* size of iclog header */ + int l_iclog_heads; /* number of iclog header sectors */ +} xlog_t; + + +/* common routines */ +extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); +extern int xlog_find_head(xlog_t *log, xfs_daddr_t *head_blk); +extern int xlog_find_tail(xlog_t *log, + xfs_daddr_t *head_blk, + xfs_daddr_t *tail_blk, + int readonly); +extern int xlog_print_find_oldest(xlog_t *log, xfs_daddr_t *last_blk); +extern int xlog_recover(xlog_t *log, int readonly); +extern int xlog_recover_finish(xlog_t *log, int mfsi_flags); +extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog); +extern struct xfs_buf *xlog_get_bp(int,xfs_mount_t *); +extern void xlog_put_bp(struct xfs_buf *); +extern int xlog_bread(xlog_t *, xfs_daddr_t blkno, int bblks, struct xfs_buf *bp); +extern void xlog_recover_process_iunlinks(xlog_t *log); + +#define XLOG_TRACE_GRAB_FLUSH 1 +#define XLOG_TRACE_REL_FLUSH 2 +#define XLOG_TRACE_SLEEP_FLUSH 3 +#define XLOG_TRACE_WAKE_FLUSH 4 + +#endif /* __KERNEL__ */ + +#endif /* __XFS_LOG_PRIV_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_log_recover.c linux.21rc5-ac2/fs/xfs/xfs_log_recover.c --- linux.21rc5/fs/xfs/xfs_log_recover.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_log_recover.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,3882 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_ag.h" +#include "xfs_sb.h" +#include "xfs_trans.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_error.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_imap.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_ialloc_btree.h" +#include "xfs_ialloc.h" +#include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_buf_item.h" +#include "xfs_alloc_btree.h" +#include "xfs_log_recover.h" +#include "xfs_extfree_item.h" +#include "xfs_trans_priv.h" +#include "xfs_bit.h" +#include "xfs_quota.h" +#include "xfs_rw.h" + +STATIC int xlog_find_zeroed(struct log *log, xfs_daddr_t *blk_no); + +STATIC int xlog_clear_stale_blocks(xlog_t *log, xfs_lsn_t tail_lsn); +STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q, + xlog_recover_item_t *item); + +#if defined(DEBUG) +STATIC void xlog_recover_check_summary(xlog_t *log); +STATIC void xlog_recover_check_ail(xfs_mount_t *mp, xfs_log_item_t *lip, + int gen); +#else +#define xlog_recover_check_summary(log) +#define xlog_recover_check_ail(mp, lip, gen) +#endif /* DEBUG */ + + +xfs_buf_t * +xlog_get_bp(int num_bblks, xfs_mount_t *mp) +{ + xfs_buf_t *bp; + + ASSERT(num_bblks > 0); + + bp = XFS_ngetrbuf(BBTOB(num_bblks),mp); + return bp; +} /* xlog_get_bp */ + + +void +xlog_put_bp(xfs_buf_t *bp) +{ + XFS_nfreerbuf(bp); +} /* xlog_put_bp */ + + +/* + * nbblks should be uint, but oh well. Just want to catch that 32-bit length. + */ +int +xlog_bread(xlog_t *log, + xfs_daddr_t blk_no, + int nbblks, + xfs_buf_t *bp) +{ + int error; + + ASSERT(log); + ASSERT(nbblks > 0); + ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); + ASSERT(bp); + + XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); + XFS_BUF_READ(bp); + XFS_BUF_BUSY(bp); + XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); + XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); + + xfsbdstrat(log->l_mp, bp); + if ((error = xfs_iowait(bp))) { + xfs_ioerror_alert("xlog_bread", log->l_mp, + bp, XFS_BUF_ADDR(bp)); + return (error); + } + return error; +} /* xlog_bread */ + + +/* + * Write out the buffer at the given block for the given number of blocks. + * The buffer is kept locked across the write and is returned locked. + * This can only be used for synchronous log writes. + */ +int +xlog_bwrite( + xlog_t *log, + int blk_no, + int nbblks, + xfs_buf_t *bp) +{ + int error; + + ASSERT(nbblks > 0); + ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); + + XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); + XFS_BUF_ZEROFLAGS(bp); + XFS_BUF_BUSY(bp); + XFS_BUF_HOLD(bp); + XFS_BUF_PSEMA(bp, PRIBIO); + XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); + XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); + + if ((error = xfs_bwrite(log->l_mp, bp))) + xfs_ioerror_alert("xlog_bwrite", log->l_mp, + bp, XFS_BUF_ADDR(bp)); + + return (error); +} /* xlog_bwrite */ + +#ifdef DEBUG +/* + * check log record header for recovery + */ +static void +xlog_header_check_dump(xfs_mount_t *mp, xlog_rec_header_t *head) +{ + int b; + + printk("%s: SB : uuid = ", __FUNCTION__); + for (b=0;b<16;b++) printk("%02x",((unsigned char *)&mp->m_sb.sb_uuid)[b]); + printk(", fmt = %d\n",XLOG_FMT); + printk(" log : uuid = "); + for (b=0;b<16;b++) printk("%02x",((unsigned char *)&head->h_fs_uuid)[b]); + printk(", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT)); +} +#endif + +/* + * check log record header for recovery + */ + +STATIC int +xlog_header_check_recover(xfs_mount_t *mp, xlog_rec_header_t *head) +{ + ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + + /* + * IRIX doesn't write the h_fmt field and leaves it zeroed + * (XLOG_FMT_UNKNOWN). This stops us from trying to recover + * a dirty log created in IRIX. + */ + + if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) { + xlog_warn("XFS: dirty log written in incompatible format - can't recover"); +#ifdef DEBUG + xlog_header_check_dump(mp, head); +#endif + XFS_ERROR_REPORT("xlog_header_check_recover(1)", + XFS_ERRLEVEL_HIGH, mp); + return XFS_ERROR(EFSCORRUPTED); + } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { + xlog_warn("XFS: dirty log entry has mismatched uuid - can't recover"); +#ifdef DEBUG + xlog_header_check_dump(mp, head); +#endif + XFS_ERROR_REPORT("xlog_header_check_recover(2)", + XFS_ERRLEVEL_HIGH, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + return 0; +} + +/* + * read the head block of the log and check the header + */ + +STATIC int +xlog_header_check_mount(xfs_mount_t *mp, xlog_rec_header_t *head) +{ + ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + + if (uuid_is_nil(&head->h_fs_uuid)) { + + /* + * IRIX doesn't write the h_fs_uuid or h_fmt fields. If + * h_fs_uuid is nil, we assume this log was last mounted + * by IRIX and continue. + */ + + xlog_warn("XFS: nil uuid in log - IRIX style log"); + + } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { + xlog_warn("XFS: log has mismatched uuid - can't recover"); +#ifdef DEBUG + xlog_header_check_dump(mp, head); +#endif + XFS_ERROR_REPORT("xlog_header_check_mount", + XFS_ERRLEVEL_HIGH, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + return 0; +} + +STATIC void +xlog_recover_iodone( + struct xfs_buf *bp) +{ + xfs_mount_t *mp; + ASSERT(XFS_BUF_FSPRIVATE(bp, void *)); + + if (XFS_BUF_GETERROR(bp)) { + /* + * We're not going to bother about retrying + * this during recovery. One strike! + */ + mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *); + xfs_ioerror_alert("xlog_recover_iodone", + mp, bp, XFS_BUF_ADDR(bp)); + xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); + } + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + xfs_biodone(bp); +} + +/* + * This routine finds (to an approximation) the first block in the physical + * log which contains the given cycle. It uses a binary search algorithm. + * Note that the algorithm can not be perfect because the disk will not + * necessarily be perfect. + */ +int +xlog_find_cycle_start(xlog_t *log, + xfs_buf_t *bp, + xfs_daddr_t first_blk, + xfs_daddr_t *last_blk, + uint cycle) +{ + xfs_daddr_t mid_blk; + uint mid_cycle; + int error; + + mid_blk = BLK_AVG(first_blk, *last_blk); + while (mid_blk != first_blk && mid_blk != *last_blk) { + if ((error = xlog_bread(log, mid_blk, 1, bp))) + return error; + mid_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + if (mid_cycle == cycle) { + *last_blk = mid_blk; + /* last_half_cycle == mid_cycle */ + } else { + first_blk = mid_blk; + /* first_half_cycle == mid_cycle */ + } + mid_blk = BLK_AVG(first_blk, *last_blk); + } + ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) || + (mid_blk == *last_blk && mid_blk-1 == first_blk)); + + return 0; +} /* xlog_find_cycle_start */ + + +/* + * Check that the range of blocks does not contain the cycle number + * given. The scan needs to occur from front to back and the ptr into the + * region must be updated since a later routine will need to perform another + * test. If the region is completely good, we end up returning the same + * last block number. + * + * Set blkno to -1 if we encounter no errors. This is an invalid block number + * since we don't ever expect logs to get this large. + */ + +STATIC int +xlog_find_verify_cycle( xlog_t *log, + xfs_daddr_t start_blk, + int nbblks, + uint stop_on_cycle_no, + xfs_daddr_t *new_blk) +{ + xfs_daddr_t i, j; + uint cycle; + xfs_buf_t *bp; + char *buf = NULL; + int error = 0; + xfs_daddr_t bufblks; + + bufblks = 1 << ffs(nbblks); + + while (!(bp = xlog_get_bp(bufblks, log->l_mp))) { + /* can't get enough memory to do everything in one big buffer */ + bufblks >>= 1; + if (!bufblks) + return ENOMEM; + } + + for (i = start_blk; i < start_blk + nbblks; i += bufblks) { + int bcount; + + bcount = min(bufblks, (start_blk + nbblks - i)); + + if ((error = xlog_bread(log, i, bcount, bp))) + goto out; + + buf = XFS_BUF_PTR(bp); + for (j = 0; j < bcount; j++) { + cycle = GET_CYCLE(buf, ARCH_CONVERT); + if (cycle == stop_on_cycle_no) { + *new_blk = i+j; + goto out; + } + + buf += BBSIZE; + } + } + + *new_blk = -1; + +out: + xlog_put_bp(bp); + + return error; +} /* xlog_find_verify_cycle */ + + +/* + * Potentially backup over partial log record write. + * + * In the typical case, last_blk is the number of the block directly after + * a good log record. Therefore, we subtract one to get the block number + * of the last block in the given buffer. extra_bblks contains the number + * of blocks we would have read on a previous read. This happens when the + * last log record is split over the end of the physical log. + * + * extra_bblks is the number of blocks potentially verified on a previous + * call to this routine. + */ + +STATIC int +xlog_find_verify_log_record(xlog_t *log, + xfs_daddr_t start_blk, + xfs_daddr_t *last_blk, + int extra_bblks) +{ + xfs_daddr_t i; + xfs_buf_t *bp; + char *buf = NULL; + xlog_rec_header_t *head = NULL; + int error = 0; + int smallmem = 0; + int num_blks = *last_blk - start_blk; + int xhdrs; + + ASSERT(start_blk != 0 || *last_blk != start_blk); + + if (!(bp = xlog_get_bp(num_blks, log->l_mp))) { + if (!(bp = xlog_get_bp(1, log->l_mp))) + return ENOMEM; + smallmem = 1; + buf = XFS_BUF_PTR(bp); + } else { + if ((error = xlog_bread(log, start_blk, num_blks, bp))) + goto out; + buf = XFS_BUF_PTR(bp) + ((num_blks - 1) << BBSHIFT); + } + + for (i = (*last_blk) - 1; i >= 0; i--) { + if (i < start_blk) { + /* legal log record not found */ + xlog_warn("XFS: Log inconsistent (didn't find previous header)"); + ASSERT(0); + error = XFS_ERROR(EIO); + goto out; + } + + if (smallmem && (error = xlog_bread(log, i, 1, bp))) + goto out; + head = (xlog_rec_header_t*)buf; + + if (INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) + break; + + if (!smallmem) + buf -= BBSIZE; + } + + /* + * We hit the beginning of the physical log & still no header. Return + * to caller. If caller can handle a return of -1, then this routine + * will be called again for the end of the physical log. + */ + if (i == -1) { + error = -1; + goto out; + } + + /* we have the final block of the good log (the first block + * of the log record _before_ the head. So we check the uuid. + */ + + if ((error = xlog_header_check_mount(log->l_mp, head))) + goto out; + + /* + * We may have found a log record header before we expected one. + * last_blk will be the 1st block # with a given cycle #. We may end + * up reading an entire log record. In this case, we don't want to + * reset last_blk. Only when last_blk points in the middle of a log + * record do we update last_blk. + */ + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + uint h_size = INT_GET(head->h_size, ARCH_CONVERT); + + xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; + if (h_size % XLOG_HEADER_CYCLE_SIZE) + xhdrs++; + } else { + xhdrs = 1; + } + + if (*last_blk - i + extra_bblks + != BTOBB(INT_GET(head->h_len, ARCH_CONVERT))+xhdrs) + *last_blk = i; + +out: + xlog_put_bp(bp); + + return error; +} /* xlog_find_verify_log_record */ + +/* + * Head is defined to be the point of the log where the next log write + * write could go. This means that incomplete LR writes at the end are + * eliminated when calculating the head. We aren't guaranteed that previous + * LR have complete transactions. We only know that a cycle number of + * current cycle number -1 won't be present in the log if we start writing + * from our current block number. + * + * last_blk contains the block number of the first block with a given + * cycle number. + * + * Also called from xfs_log_print.c + * + * Return: zero if normal, non-zero if error. + */ +int +xlog_find_head(xlog_t *log, + xfs_daddr_t *return_head_blk) +{ + xfs_buf_t *bp; + xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; + int num_scan_bblks; + uint first_half_cycle, last_half_cycle; + uint stop_on_cycle; + int error, log_bbnum = log->l_logBBsize; + + /* Is the end of the log device zeroed? */ + if ((error = xlog_find_zeroed(log, &first_blk)) == -1) { + *return_head_blk = first_blk; + + /* is the whole lot zeroed? */ + if (!first_blk) { + /* Linux XFS shouldn't generate totally zeroed logs - + * mkfs etc write a dummy unmount record to a fresh + * log so we can store the uuid in there + */ + xlog_warn("XFS: totally zeroed log"); + } + + return 0; + } else if (error) { + xlog_warn("XFS: empty log check failed"); + return error; + } + + first_blk = 0; /* get cycle # of 1st block */ + bp = xlog_get_bp(1,log->l_mp); + if (!bp) + return ENOMEM; + if ((error = xlog_bread(log, 0, 1, bp))) + goto bp_err; + first_half_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + + last_blk = head_blk = log_bbnum-1; /* get cycle # of last block */ + if ((error = xlog_bread(log, last_blk, 1, bp))) + goto bp_err; + last_half_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + ASSERT(last_half_cycle != 0); + + /* + * If the 1st half cycle number is equal to the last half cycle number, + * then the entire log is stamped with the same cycle number. In this + * case, head_blk can't be set to zero (which makes sense). The below + * math doesn't work out properly with head_blk equal to zero. Instead, + * we set it to log_bbnum which is an illegal block number, but this + * value makes the math correct. If head_blk doesn't changed through + * all the tests below, *head_blk is set to zero at the very end rather + * than log_bbnum. In a sense, log_bbnum and zero are the same block + * in a circular file. + */ + if (first_half_cycle == last_half_cycle) { + /* + * In this case we believe that the entire log should have cycle + * number last_half_cycle. We need to scan backwards from the + * end verifying that there are no holes still containing + * last_half_cycle - 1. If we find such a hole, then the start + * of that hole will be the new head. The simple case looks like + * x | x ... | x - 1 | x + * Another case that fits this picture would be + * x | x + 1 | x ... | x + * In this case the head really is somwhere at the end of the + * log, as one of the latest writes at the beginning was incomplete. + * One more case is + * x | x + 1 | x ... | x - 1 | x + * This is really the combination of the above two cases, and the + * head has to end up at the start of the x-1 hole at the end of + * the log. + * + * In the 256k log case, we will read from the beginning to the + * end of the log and search for cycle numbers equal to x-1. We + * don't worry about the x+1 blocks that we encounter, because + * we know that they cannot be the head since the log started with + * x. + */ + head_blk = log_bbnum; + stop_on_cycle = last_half_cycle - 1; + } else { + /* + * In this case we want to find the first block with cycle number + * matching last_half_cycle. We expect the log to be some + * variation on + * x + 1 ... | x ... + * The first block with cycle number x (last_half_cycle) will be + * where the new head belongs. First we do a binary search for + * the first occurrence of last_half_cycle. The binary search + * may not be totally accurate, so then we scan back from there + * looking for occurrences of last_half_cycle before us. If + * that backwards scan wraps around the beginning of the log, + * then we look for occurrences of last_half_cycle - 1 at the + * end of the log. The cases we're looking for look like + * x + 1 ... | x | x + 1 | x ... + * ^ binary search stopped here + * or + * x + 1 ... | x ... | x - 1 | x + * <---------> less than scan distance + */ + stop_on_cycle = last_half_cycle; + if ((error = xlog_find_cycle_start(log, bp, first_blk, + &head_blk, last_half_cycle))) + goto bp_err; + } + + /* + * Now validate the answer. Scan back some number of maximum possible + * blocks and make sure each one has the expected cycle number. The + * maximum is determined by the total possible amount of buffering + * in the in-core log. The following number can be made tighter if + * we actually look at the block size of the filesystem. + */ + num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); + if (head_blk >= num_scan_bblks) { + /* + * We are guaranteed that the entire check can be performed + * in one buffer. + */ + start_blk = head_blk - num_scan_bblks; + if ((error = xlog_find_verify_cycle(log, start_blk, num_scan_bblks, + stop_on_cycle, &new_blk))) + goto bp_err; + if (new_blk != -1) + head_blk = new_blk; + } else { /* need to read 2 parts of log */ + /* + * We are going to scan backwards in the log in two parts. First + * we scan the physical end of the log. In this part of the log, + * we are looking for blocks with cycle number last_half_cycle - 1. + * If we find one, then we know that the log starts there, as we've + * found a hole that didn't get written in going around the end + * of the physical log. The simple case for this is + * x + 1 ... | x ... | x - 1 | x + * <---------> less than scan distance + * If all of the blocks at the end of the log have cycle number + * last_half_cycle, then we check the blocks at the start of the + * log looking for occurrences of last_half_cycle. If we find one, + * then our current estimate for the location of the first + * occurrence of last_half_cycle is wrong and we move back to the + * hole we've found. This case looks like + * x + 1 ... | x | x + 1 | x ... + * ^ binary search stopped here + * Another case we need to handle that only occurs in 256k logs is + * x + 1 ... | x ... | x+1 | x ... + * ^ binary search stops here + * In a 256k log, the scan at the end of the log will see the x+1 + * blocks. We need to skip past those since that is certainly not + * the head of the log. By searching for last_half_cycle-1 we + * accomplish that. + */ + start_blk = log_bbnum - num_scan_bblks + head_blk; + ASSERT(head_blk <= INT_MAX && (xfs_daddr_t) num_scan_bblks-head_blk >= 0); + if ((error = xlog_find_verify_cycle(log, start_blk, + num_scan_bblks-(int)head_blk, (stop_on_cycle - 1), + &new_blk))) + goto bp_err; + if (new_blk != -1) { + head_blk = new_blk; + goto bad_blk; + } + + /* + * Scan beginning of log now. The last part of the physical log + * is good. This scan needs to verify that it doesn't find the + * last_half_cycle. + */ + start_blk = 0; + ASSERT(head_blk <= INT_MAX); + if ((error = xlog_find_verify_cycle(log, start_blk, (int) head_blk, + stop_on_cycle, &new_blk))) + goto bp_err; + if (new_blk != -1) + head_blk = new_blk; + } + +bad_blk: + /* + * Now we need to make sure head_blk is not pointing to a block in + * the middle of a log record. + */ + num_scan_bblks = BTOBB(XLOG_MAX_RECORD_BSIZE); + if (head_blk >= num_scan_bblks) { + start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ + + /* start ptr at last block ptr before head_blk */ + if ((error = xlog_find_verify_log_record(log, + start_blk, + &head_blk, + 0)) == -1) { + error = XFS_ERROR(EIO); + goto bp_err; + } else if (error) + goto bp_err; + } else { + start_blk = 0; + ASSERT(head_blk <= INT_MAX); + if ((error = xlog_find_verify_log_record(log, + start_blk, + &head_blk, + 0)) == -1) { + /* We hit the beginning of the log during our search */ + start_blk = log_bbnum - num_scan_bblks + head_blk; + new_blk = log_bbnum; + ASSERT(start_blk <= INT_MAX && (xfs_daddr_t) log_bbnum-start_blk >= 0); + ASSERT(head_blk <= INT_MAX); + if ((error = xlog_find_verify_log_record(log, + start_blk, + &new_blk, + (int)head_blk)) == -1) { + error = XFS_ERROR(EIO); + goto bp_err; + } else if (error) + goto bp_err; + if (new_blk != log_bbnum) + head_blk = new_blk; + } else if (error) + goto bp_err; + } + + xlog_put_bp(bp); + if (head_blk == log_bbnum) + *return_head_blk = 0; + else + *return_head_blk = head_blk; + /* + * When returning here, we have a good block number. Bad block + * means that during a previous crash, we didn't have a clean break + * from cycle number N to cycle number N-1. In this case, we need + * to find the first block with cycle number N-1. + */ + return 0; + +bp_err: + xlog_put_bp(bp); + + if (error) + xlog_warn("XFS: failed to find log head"); + + return error; +} /* xlog_find_head */ + +/* + * Find the sync block number or the tail of the log. + * + * This will be the block number of the last record to have its + * associated buffers synced to disk. Every log record header has + * a sync lsn embedded in it. LSNs hold block numbers, so it is easy + * to get a sync block number. The only concern is to figure out which + * log record header to believe. + * + * The following algorithm uses the log record header with the largest + * lsn. The entire log record does not need to be valid. We only care + * that the header is valid. + * + * We could speed up search by using current head_blk buffer, but it is not + * available. + */ +int +xlog_find_tail(xlog_t *log, + xfs_daddr_t *head_blk, + xfs_daddr_t *tail_blk, + int readonly) +{ + xlog_rec_header_t *rhead; + xlog_op_header_t *op_head; + xfs_buf_t *bp; + int error, i, found; + xfs_daddr_t umount_data_blk; + xfs_daddr_t after_umount_blk; + xfs_lsn_t tail_lsn; + int hblks; + + found = 0; + + /* + * Find previous log record + */ + if ((error = xlog_find_head(log, head_blk))) + return error; + + bp = xlog_get_bp(1,log->l_mp); + if (!bp) + return ENOMEM; + if (*head_blk == 0) { /* special case */ + if ((error = xlog_bread(log, 0, 1, bp))) + goto bread_err; + if (GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT) == 0) { + *tail_blk = 0; + /* leave all other log inited values alone */ + goto exit; + } + } + + /* + * Search backwards looking for log record header block + */ + ASSERT(*head_blk < INT_MAX); + for (i = (int)(*head_blk) - 1; i >= 0; i--) { + if ((error = xlog_bread(log, i, 1, bp))) + goto bread_err; + if (XLOG_HEADER_MAGIC_NUM == + INT_GET(*(uint *)(XFS_BUF_PTR(bp)), ARCH_CONVERT)) { + found = 1; + break; + } + } + /* + * If we haven't found the log record header block, start looking + * again from the end of the physical log. XXXmiken: There should be + * a check here to make sure we didn't search more than N blocks in + * the previous code. + */ + if (!found) { + for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) { + if ((error = xlog_bread(log, i, 1, bp))) + goto bread_err; + if (XLOG_HEADER_MAGIC_NUM == + INT_GET(*(uint*)(XFS_BUF_PTR(bp)), ARCH_CONVERT)) { + found = 2; + break; + } + } + } + if (!found) { + xlog_warn("XFS: xlog_find_tail: couldn't find sync record"); + ASSERT(0); + return XFS_ERROR(EIO); + } + + /* find blk_no of tail of log */ + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(bp); + *tail_blk = BLOCK_LSN(rhead->h_tail_lsn, ARCH_CONVERT); + + /* + * Reset log values according to the state of the log when we + * crashed. In the case where head_blk == 0, we bump curr_cycle + * one because the next write starts a new cycle rather than + * continuing the cycle of the last good log record. At this + * point we have guaranteed that all partial log records have been + * accounted for. Therefore, we know that the last good log record + * written was complete and ended exactly on the end boundary + * of the physical log. + */ + log->l_prev_block = i; + log->l_curr_block = (int)*head_blk; + log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT); + if (found == 2) + log->l_curr_cycle++; + log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT); + log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT); + log->l_grant_reserve_cycle = log->l_curr_cycle; + log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); + log->l_grant_write_cycle = log->l_curr_cycle; + log->l_grant_write_bytes = BBTOB(log->l_curr_block); + + /* + * Look for unmount record. If we find it, then we know there + * was a clean unmount. Since 'i' could be the last block in + * the physical log, we convert to a log block before comparing + * to the head_blk. + * + * Save the current tail lsn to use to pass to + * xlog_clear_stale_blocks() below. We won't want to clear the + * unmount record if there is one, so we pass the lsn of the + * unmount record rather than the block after it. + */ + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + int h_size = INT_GET(rhead->h_size, ARCH_CONVERT); + int h_version = INT_GET(rhead->h_version, ARCH_CONVERT); + + if ((h_version & XLOG_VERSION_2) && + (h_size > XLOG_HEADER_CYCLE_SIZE)) { + hblks = h_size / XLOG_HEADER_CYCLE_SIZE; + if (h_size % XLOG_HEADER_CYCLE_SIZE) + hblks++; + } else { + hblks = 1; + } + } else { + hblks = 1; + } + after_umount_blk = (i + hblks + (int) + BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize; + tail_lsn = log->l_tail_lsn; + if (*head_blk == after_umount_blk && + INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) { + umount_data_blk = (i + hblks) % log->l_logBBsize; + if ((error = xlog_bread(log, umount_data_blk, 1, bp))) { + goto bread_err; + } + op_head = (xlog_op_header_t *)XFS_BUF_PTR(bp); + if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { + /* + * Set tail and last sync so that newly written + * log records will point recovery to after the + * current unmount record. + */ + ASSIGN_ANY_LSN(log->l_tail_lsn, log->l_curr_cycle, + after_umount_blk, ARCH_NOCONVERT); + ASSIGN_ANY_LSN(log->l_last_sync_lsn, log->l_curr_cycle, + after_umount_blk, ARCH_NOCONVERT); + *tail_blk = after_umount_blk; + } + } + +#ifdef __KERNEL__ + /* + * Make sure that there are no blocks in front of the head + * with the same cycle number as the head. This can happen + * because we allow multiple outstanding log writes concurrently, + * and the later writes might make it out before earlier ones. + * + * We use the lsn from before modifying it so that we'll never + * overwrite the unmount record after a clean unmount. + * + * Do this only if we are going to recover the filesystem + * + * NOTE: This used to say "if (!readonly)" + * However on Linux, we can & do recover a read-only filesystem. + * We only skip recovery if NORECOVERY is specified on mount, + * in which case we would not be here. + * + * But... if the -device- itself is readonly, just skip this. + * We can't recover this device anyway, so it won't matter. + */ + + if (!is_read_only(log->l_mp->m_logdev_targp->pbr_kdev)) { + error = xlog_clear_stale_blocks(log, tail_lsn); + } +#endif + +bread_err: +exit: + xlog_put_bp(bp); + + if (error) + xlog_warn("XFS: failed to locate log tail"); + + return error; +} /* xlog_find_tail */ + + +/* + * Is the log zeroed at all? + * + * The last binary search should be changed to perform an X block read + * once X becomes small enough. You can then search linearly through + * the X blocks. This will cut down on the number of reads we need to do. + * + * If the log is partially zeroed, this routine will pass back the blkno + * of the first block with cycle number 0. It won't have a complete LR + * preceding it. + * + * Return: + * 0 => the log is completely written to + * -1 => use *blk_no as the first block of the log + * >0 => error has occurred + */ +int +xlog_find_zeroed(struct log *log, + xfs_daddr_t *blk_no) +{ + xfs_buf_t *bp; + uint first_cycle, last_cycle; + xfs_daddr_t new_blk, last_blk, start_blk; + xfs_daddr_t num_scan_bblks; + int error, log_bbnum = log->l_logBBsize; + + /* check totally zeroed log */ + bp = xlog_get_bp(1,log->l_mp); + if (!bp) + return ENOMEM; + if ((error = xlog_bread(log, 0, 1, bp))) + goto bp_err; + first_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + if (first_cycle == 0) { /* completely zeroed log */ + *blk_no = 0; + xlog_put_bp(bp); + return -1; + } + + /* check partially zeroed log */ + if ((error = xlog_bread(log, log_bbnum-1, 1, bp))) + goto bp_err; + last_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + if (last_cycle != 0) { /* log completely written to */ + xlog_put_bp(bp); + return 0; + } else if (first_cycle != 1) { + /* + * If the cycle of the last block is zero, the cycle of + * the first block must be 1. If it's not, maybe we're + * not looking at a log... Bail out. + */ + xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)"); + return XFS_ERROR(EINVAL); + } + + /* we have a partially zeroed log */ + last_blk = log_bbnum-1; + if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0))) + goto bp_err; + + /* + * Validate the answer. Because there is no way to guarantee that + * the entire log is made up of log records which are the same size, + * we scan over the defined maximum blocks. At this point, the maximum + * is not chosen to mean anything special. XXXmiken + */ + num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); + ASSERT(num_scan_bblks <= INT_MAX); + + if (last_blk < num_scan_bblks) + num_scan_bblks = last_blk; + start_blk = last_blk - num_scan_bblks; + + /* + * We search for any instances of cycle number 0 that occur before + * our current estimate of the head. What we're trying to detect is + * 1 ... | 0 | 1 | 0... + * ^ binary search ends here + */ + if ((error = xlog_find_verify_cycle(log, start_blk, + (int)num_scan_bblks, 0, &new_blk))) + goto bp_err; + if (new_blk != -1) + last_blk = new_blk; + + /* + * Potentially backup over partial log record write. We don't need + * to search the end of the log because we know it is zero. + */ + if ((error = xlog_find_verify_log_record(log, start_blk, + &last_blk, 0)) == -1) { + error = XFS_ERROR(EIO); + goto bp_err; + } else if (error) + goto bp_err; + + *blk_no = last_blk; +bp_err: + xlog_put_bp(bp); + if (error) + return error; + return -1; +} /* xlog_find_zeroed */ + +/* + * This is simply a subroutine used by xlog_clear_stale_blocks() below + * to initialize a buffer full of empty log record headers and write + * them into the log. + */ +STATIC int +xlog_write_log_records( + xlog_t *log, + int cycle, + int start_block, + int blocks, + int tail_cycle, + int tail_block) +{ + xlog_rec_header_t *recp; + int i, j; + int end_block = start_block + blocks; + int error = 0; + xfs_buf_t *bp; + char *buf; + int bufblks; + + bufblks = 1 << ffs(blocks); + while (!(bp = xlog_get_bp(bufblks, log->l_mp))) { + bufblks >>= 1; + if (!bufblks) + return ENOMEM; + } + + buf = XFS_BUF_PTR(bp); + recp = (xlog_rec_header_t*)buf; + + memset(buf, 0, BBSIZE); + INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); + INT_SET(recp->h_cycle, ARCH_CONVERT, cycle); + INT_SET(recp->h_version, ARCH_CONVERT, + XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); + ASSIGN_ANY_LSN(recp->h_tail_lsn, tail_cycle, tail_block, ARCH_CONVERT); + + for (i = start_block; i < end_block; i += bufblks) { + int bcount = min(bufblks, end_block - start_block); + /* with plenty of memory, we duplicate the block + * right through the buffer and modify each entry + */ + ASSIGN_ANY_LSN(recp->h_lsn, cycle, i, ARCH_CONVERT); + for (j = 1; j < bcount; j++) { + buf += BBSIZE; + recp = (xlog_rec_header_t*)buf; + memcpy(buf, XFS_BUF_PTR(bp), BBSIZE); + ASSIGN_ANY_LSN(recp->h_lsn, cycle, i+j, ARCH_CONVERT); + } + /* then write the whole lot out at once */ + error = xlog_bwrite(log, start_block, bcount, bp); + start_block += bcount; + buf = XFS_BUF_PTR(bp); + recp = (xlog_rec_header_t*)buf; + } + xlog_put_bp(bp); + + return error; +} + +/* + * This routine is called to blow away any incomplete log writes out + * in front of the log head. We do this so that we won't become confused + * if we come up, write only a little bit more, and then crash again. + * If we leave the partial log records out there, this situation could + * cause us to think those partial writes are valid blocks since they + * have the current cycle number. We get rid of them by overwriting them + * with empty log records with the old cycle number rather than the + * current one. + * + * The tail lsn is passed in rather than taken from + * the log so that we will not write over the unmount record after a + * clean unmount in a 512 block log. Doing so would leave the log without + * any valid log records in it until a new one was written. If we crashed + * during that time we would not be able to recover. + */ +STATIC int +xlog_clear_stale_blocks( + xlog_t *log, + xfs_lsn_t tail_lsn) +{ + int tail_cycle, head_cycle; + int tail_block, head_block; + int tail_distance, max_distance; + int distance; + int error; + + tail_cycle = CYCLE_LSN(tail_lsn, ARCH_NOCONVERT); + tail_block = BLOCK_LSN(tail_lsn, ARCH_NOCONVERT); + head_cycle = log->l_curr_cycle; + head_block = log->l_curr_block; + + /* + * Figure out the distance between the new head of the log + * and the tail. We want to write over any blocks beyond the + * head that we may have written just before the crash, but + * we don't want to overwrite the tail of the log. + */ + if (head_cycle == tail_cycle) { + /* + * The tail is behind the head in the physical log, + * so the distance from the head to the tail is the + * distance from the head to the end of the log plus + * the distance from the beginning of the log to the + * tail. + */ + if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { + XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", + XFS_ERRLEVEL_LOW, log->l_mp); + return XFS_ERROR(EFSCORRUPTED); + } + tail_distance = tail_block + (log->l_logBBsize - head_block); + } else { + /* + * The head is behind the tail in the physical log, + * so the distance from the head to the tail is just + * the tail block minus the head block. + */ + if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ + XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", + XFS_ERRLEVEL_LOW, log->l_mp); + return XFS_ERROR(EFSCORRUPTED); + } + tail_distance = tail_block - head_block; + } + + /* + * If the head is right up against the tail, we can't clear + * anything. + */ + if (tail_distance <= 0) { + ASSERT(tail_distance == 0); + return 0; + } + + max_distance = XLOG_TOTAL_REC_SHIFT(log); + /* + * Take the smaller of the maximum amount of outstanding I/O + * we could have and the distance to the tail to clear out. + * We take the smaller so that we don't overwrite the tail and + * we don't waste all day writing from the head to the tail + * for no reason. + */ + max_distance = MIN(max_distance, tail_distance); + + if ((head_block + max_distance) <= log->l_logBBsize) { + /* + * We can stomp all the blocks we need to without + * wrapping around the end of the log. Just do it + * in a single write. Use the cycle number of the + * current cycle minus one so that the log will look like: + * n ... | n - 1 ... + */ + error = xlog_write_log_records(log, (head_cycle - 1), + head_block, max_distance, tail_cycle, + tail_block); + if (error) + return error; + } else { + /* + * We need to wrap around the end of the physical log in + * order to clear all the blocks. Do it in two separate + * I/Os. The first write should be from the head to the + * end of the physical log, and it should use the current + * cycle number minus one just like above. + */ + distance = log->l_logBBsize - head_block; + error = xlog_write_log_records(log, (head_cycle - 1), + head_block, distance, tail_cycle, + tail_block); + + if (error) + return error; + + /* + * Now write the blocks at the start of the physical log. + * This writes the remainder of the blocks we want to clear. + * It uses the current cycle number since we're now on the + * same cycle as the head so that we get: + * n ... n ... | n - 1 ... + * ^^^^^ blocks we're writing + */ + distance = max_distance - (log->l_logBBsize - head_block); + error = xlog_write_log_records(log, head_cycle, 0, distance, + tail_cycle, tail_block); + if (error) + return error; + } + + return 0; +} + +/****************************************************************************** + * + * Log recover routines + * + ****************************************************************************** + */ + +STATIC xlog_recover_t * +xlog_recover_find_tid(xlog_recover_t *q, + xlog_tid_t tid) +{ + xlog_recover_t *p = q; + + while (p != NULL) { + if (p->r_log_tid == tid) + break; + p = p->r_next; + } + return p; +} /* xlog_recover_find_tid */ + + +STATIC void +xlog_recover_put_hashq(xlog_recover_t **q, + xlog_recover_t *trans) +{ + trans->r_next = *q; + *q = trans; +} /* xlog_recover_put_hashq */ + + +STATIC void +xlog_recover_add_item(xlog_recover_item_t **itemq) +{ + xlog_recover_item_t *item; + + item = kmem_zalloc(sizeof(xlog_recover_item_t), 0); + xlog_recover_insert_item_backq(itemq, item); +} /* xlog_recover_add_item */ + + +STATIC int +xlog_recover_add_to_cont_trans(xlog_recover_t *trans, + xfs_caddr_t dp, + int len) +{ + xlog_recover_item_t *item; + xfs_caddr_t ptr, old_ptr; + int old_len; + + item = trans->r_itemq; + if (item == 0) { + /* finish copying rest of trans header */ + xlog_recover_add_item(&trans->r_itemq); + ptr = (xfs_caddr_t)&trans->r_theader+sizeof(xfs_trans_header_t)-len; + memcpy(ptr, dp, len); /* d, s, l */ + return 0; + } + item = item->ri_prev; + + old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; + old_len = item->ri_buf[item->ri_cnt-1].i_len; + + ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0); + memcpy(&ptr[old_len], dp, len); /* d, s, l */ + item->ri_buf[item->ri_cnt-1].i_len += len; + item->ri_buf[item->ri_cnt-1].i_addr = ptr; + return 0; +} /* xlog_recover_add_to_cont_trans */ + + +/* The next region to add is the start of a new region. It could be + * a whole region or it could be the first part of a new region. Because + * of this, the assumption here is that the type and size fields of all + * format structures fit into the first 32 bits of the structure. + * + * This works because all regions must be 32 bit aligned. Therefore, we + * either have both fields or we have neither field. In the case we have + * neither field, the data part of the region is zero length. We only have + * a log_op_header and can throw away the header since a new one will appear + * later. If we have at least 4 bytes, then we can determine how many regions + * will appear in the current log item. + */ +STATIC int +xlog_recover_add_to_trans(xlog_recover_t *trans, + xfs_caddr_t dp, + int len) +{ + xfs_inode_log_format_t *in_f; /* any will do */ + xlog_recover_item_t *item; + xfs_caddr_t ptr; + + if (!len) + return 0; + item = trans->r_itemq; + if (item == 0) { + ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC); + if (len == sizeof(xfs_trans_header_t)) + xlog_recover_add_item(&trans->r_itemq); + memcpy(&trans->r_theader, dp, len); /* d, s, l */ + return 0; + } + + ptr = kmem_alloc(len, 0); + memcpy(ptr, dp, len); + in_f = (xfs_inode_log_format_t *)ptr; + + if (item->ri_prev->ri_total != 0 && + item->ri_prev->ri_total == item->ri_prev->ri_cnt) { + xlog_recover_add_item(&trans->r_itemq); + } + item = trans->r_itemq; + item = item->ri_prev; + + if (item->ri_total == 0) { /* first region to be added */ + item->ri_total = in_f->ilf_size; + ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM); + item->ri_buf = kmem_zalloc((item->ri_total * + sizeof(xfs_log_iovec_t)), 0); + } + ASSERT(item->ri_total > item->ri_cnt); + /* Description region is ri_buf[0] */ + item->ri_buf[item->ri_cnt].i_addr = ptr; + item->ri_buf[item->ri_cnt].i_len = len; + item->ri_cnt++; + return 0; +} /* xlog_recover_add_to_trans */ + + +STATIC void +xlog_recover_new_tid(xlog_recover_t **q, + xlog_tid_t tid, + xfs_lsn_t lsn) +{ + xlog_recover_t *trans; + + trans = kmem_zalloc(sizeof(xlog_recover_t), 0); + trans->r_log_tid = tid; + trans->r_lsn = lsn; + xlog_recover_put_hashq(q, trans); +} /* xlog_recover_new_tid */ + + +STATIC int +xlog_recover_unlink_tid(xlog_recover_t **q, + xlog_recover_t *trans) +{ + xlog_recover_t *tp; + int found = 0; + + ASSERT(trans != 0); + if (trans == *q) { + *q = (*q)->r_next; + } else { + tp = *q; + while (tp != 0) { + if (tp->r_next == trans) { + found = 1; + break; + } + tp = tp->r_next; + } + if (!found) { + xlog_warn( + "XFS: xlog_recover_unlink_tid: trans not found"); + ASSERT(0); + return XFS_ERROR(EIO); + } + tp->r_next = tp->r_next->r_next; + } + return 0; +} /* xlog_recover_unlink_tid */ + +STATIC void +xlog_recover_insert_item_backq(xlog_recover_item_t **q, + xlog_recover_item_t *item) +{ + if (*q == 0) { + item->ri_prev = item->ri_next = item; + *q = item; + } else { + item->ri_next = *q; + item->ri_prev = (*q)->ri_prev; + (*q)->ri_prev = item; + item->ri_prev->ri_next = item; + } +} /* xlog_recover_insert_item_backq */ + +STATIC void +xlog_recover_insert_item_frontq(xlog_recover_item_t **q, + xlog_recover_item_t *item) +{ + xlog_recover_insert_item_backq(q, item); + *q = item; +} /* xlog_recover_insert_item_frontq */ + +STATIC int +xlog_recover_reorder_trans(xlog_t *log, + xlog_recover_t *trans) +{ + xlog_recover_item_t *first_item, *itemq, *itemq_next; + + first_item = itemq = trans->r_itemq; + trans->r_itemq = NULL; + do { + itemq_next = itemq->ri_next; + switch (ITEM_TYPE(itemq)) { + case XFS_LI_BUF: + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: { + xlog_recover_insert_item_frontq(&trans->r_itemq, itemq); + break; + } + case XFS_LI_INODE: + case XFS_LI_6_1_INODE: + case XFS_LI_5_3_INODE: + case XFS_LI_DQUOT: + case XFS_LI_QUOTAOFF: + case XFS_LI_EFD: + case XFS_LI_EFI: { + xlog_recover_insert_item_backq(&trans->r_itemq, itemq); + break; + } + default: { + xlog_warn( + "XFS: xlog_recover_reorder_trans: unrecognized type of log operation"); + ASSERT(0); + return XFS_ERROR(EIO); + } + } + itemq = itemq_next; + } while (first_item != itemq); + return 0; +} /* xlog_recover_reorder_trans */ + + +/* + * Build up the table of buf cancel records so that we don't replay + * cancelled data in the second pass. For buffer records that are + * not cancel records, there is nothing to do here so we just return. + * + * If we get a cancel record which is already in the table, this indicates + * that the buffer was cancelled multiple times. In order to ensure + * that during pass 2 we keep the record in the table until we reach its + * last occurrence in the log, we keep a reference count in the cancel + * record in the table to tell us how many times we expect to see this + * record during the second pass. + */ +STATIC void +xlog_recover_do_buffer_pass1(xlog_t *log, + xfs_buf_log_format_t *buf_f) +{ + xfs_buf_cancel_t *bcp; + xfs_buf_cancel_t *nextp; + xfs_buf_cancel_t *prevp; + xfs_buf_cancel_t **bucket; + xfs_buf_log_format_v1_t *obuf_f; + xfs_daddr_t blkno=0; + uint len=0; + ushort flags=0; + + switch (buf_f->blf_type) { + case XFS_LI_BUF: + blkno = buf_f->blf_blkno; + len = buf_f->blf_len; + flags = buf_f->blf_flags; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + blkno = (xfs_daddr_t) obuf_f->blf_blkno; + len = obuf_f->blf_len; + flags = obuf_f->blf_flags; + break; + } + + /* + * If this isn't a cancel buffer item, then just return. + */ + if (!(flags & XFS_BLI_CANCEL)) { + return; + } + + /* + * Insert an xfs_buf_cancel record into the hash table of + * them. If there is already an identical record, bump + * its reference count. + */ + bucket = &log->l_buf_cancel_table[(__uint64_t)blkno % + XLOG_BC_TABLE_SIZE]; + /* + * If the hash bucket is empty then just insert a new record into + * the bucket. + */ + if (*bucket == NULL) { + bcp = (xfs_buf_cancel_t*)kmem_alloc(sizeof(xfs_buf_cancel_t), + KM_SLEEP); + bcp->bc_blkno = blkno; + bcp->bc_len = len; + bcp->bc_refcount = 1; + bcp->bc_next = NULL; + *bucket = bcp; + return; + } + + /* + * The hash bucket is not empty, so search for duplicates of our + * record. If we find one them just bump its refcount. If not + * then add us at the end of the list. + */ + prevp = NULL; + nextp = *bucket; + while (nextp != NULL) { + if (nextp->bc_blkno == blkno && nextp->bc_len == len) { + nextp->bc_refcount++; + return; + } + prevp = nextp; + nextp = nextp->bc_next; + } + ASSERT(prevp != NULL); + bcp = (xfs_buf_cancel_t*)kmem_alloc(sizeof(xfs_buf_cancel_t), + KM_SLEEP); + bcp->bc_blkno = blkno; + bcp->bc_len = len; + bcp->bc_refcount = 1; + bcp->bc_next = NULL; + prevp->bc_next = bcp; +} + +/* + * Check to see whether the buffer being recovered has a corresponding + * entry in the buffer cancel record table. If it does then return 1 + * so that it will be cancelled, otherwise return 0. If the buffer is + * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement + * the refcount on the entry in the table and remove it from the table + * if this is the last reference. + * + * We remove the cancel record from the table when we encounter its + * last occurrence in the log so that if the same buffer is re-used + * again after its last cancellation we actually replay the changes + * made at that point. + */ +STATIC int +xlog_recover_do_buffer_pass2(xlog_t *log, + xfs_buf_log_format_t *buf_f) +{ + xfs_buf_cancel_t *bcp; + xfs_buf_cancel_t *prevp; + xfs_buf_cancel_t **bucket; + xfs_buf_log_format_v1_t *obuf_f; + xfs_daddr_t blkno=0; + ushort flags=0; + uint len=0; + + + switch (buf_f->blf_type) { + case XFS_LI_BUF: + blkno = buf_f->blf_blkno; + flags = buf_f->blf_flags; + len = buf_f->blf_len; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + blkno = (xfs_daddr_t) obuf_f->blf_blkno; + flags = obuf_f->blf_flags; + len = (xfs_daddr_t) obuf_f->blf_len; + break; + } + if (log->l_buf_cancel_table == NULL) { + /* + * There is nothing in the table built in pass one, + * so this buffer must not be cancelled. + */ + ASSERT(!(flags & XFS_BLI_CANCEL)); + return 0; + } + + bucket = &log->l_buf_cancel_table[(__uint64_t)blkno % + XLOG_BC_TABLE_SIZE]; + bcp = *bucket; + if (bcp == NULL) { + /* + * There is no corresponding entry in the table built + * in pass one, so this buffer has not been cancelled. + */ + ASSERT(!(flags & XFS_BLI_CANCEL)); + return 0; + } + + /* + * Search for an entry in the buffer cancel table that + * matches our buffer. + */ + prevp = NULL; + while (bcp != NULL) { + if (bcp->bc_blkno == blkno && bcp->bc_len == len) { + /* + * We've go a match, so return 1 so that the + * recovery of this buffer is cancelled. + * If this buffer is actually a buffer cancel + * log item, then decrement the refcount on the + * one in the table and remove it if this is the + * last reference. + */ + if (flags & XFS_BLI_CANCEL) { + bcp->bc_refcount--; + if (bcp->bc_refcount == 0) { + if (prevp == NULL) { + *bucket = bcp->bc_next; + } else { + prevp->bc_next = bcp->bc_next; + } + kmem_free(bcp, + sizeof(xfs_buf_cancel_t)); + } + } + return 1; + } + prevp = bcp; + bcp = bcp->bc_next; + } + /* + * We didn't find a corresponding entry in the table, so + * return 0 so that the buffer is NOT cancelled. + */ + ASSERT(!(flags & XFS_BLI_CANCEL)); + return 0; +} + + +/* + * Perform recovery for a buffer full of inodes. In these buffers, + * the only data which should be recovered is that which corresponds + * to the di_next_unlinked pointers in the on disk inode structures. + * The rest of the data for the inodes is always logged through the + * inodes themselves rather than the inode buffer and is recovered + * in xlog_recover_do_inode_trans(). + * + * The only time when buffers full of inodes are fully recovered is + * when the buffer is full of newly allocated inodes. In this case + * the buffer will not be marked as an inode buffer and so will be + * sent to xlog_recover_do_reg_buffer() below during recovery. + */ +STATIC int +xlog_recover_do_inode_buffer(xfs_mount_t *mp, + xlog_recover_item_t *item, + xfs_buf_t *bp, + xfs_buf_log_format_t *buf_f) +{ + int i; + int item_index; + int bit; + int nbits; + int reg_buf_offset; + int reg_buf_bytes; + int next_unlinked_offset; + int inodes_per_buf; + xfs_agino_t *logged_nextp; + xfs_agino_t *buffer_nextp; + xfs_buf_log_format_v1_t *obuf_f; + unsigned int *data_map=NULL; + unsigned int map_size=0; + + switch (buf_f->blf_type) { + case XFS_LI_BUF: + data_map = buf_f->blf_data_map; + map_size = buf_f->blf_map_size; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + data_map = obuf_f->blf_data_map; + map_size = obuf_f->blf_map_size; + break; + } + /* + * Set the variables corresponding to the current region to + * 0 so that we'll initialize them on the first pass through + * the loop. + */ + reg_buf_offset = 0; + reg_buf_bytes = 0; + bit = 0; + nbits = 0; + item_index = 0; + inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog; + for (i = 0; i < inodes_per_buf; i++) { + next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + + offsetof(xfs_dinode_t, di_next_unlinked); + + while (next_unlinked_offset >= + (reg_buf_offset + reg_buf_bytes)) { + /* + * The next di_next_unlinked field is beyond + * the current logged region. Find the next + * logged region that contains or is beyond + * the current di_next_unlinked field. + */ + bit += nbits; + bit = xfs_next_bit(data_map, map_size, bit); + + /* + * If there are no more logged regions in the + * buffer, then we're done. + */ + if (bit == -1) { + return 0; + } + + nbits = xfs_contig_bits(data_map, map_size, + bit); + reg_buf_offset = bit << XFS_BLI_SHIFT; + reg_buf_bytes = nbits << XFS_BLI_SHIFT; + item_index++; + } + + /* + * If the current logged region starts after the current + * di_next_unlinked field, then move on to the next + * di_next_unlinked field. + */ + if (next_unlinked_offset < reg_buf_offset) { + continue; + } + + ASSERT(item->ri_buf[item_index].i_addr != NULL); + ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0); + ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp)); + + /* + * The current logged region contains a copy of the + * current di_next_unlinked field. Extract its value + * and copy it to the buffer copy. + */ + logged_nextp = (xfs_agino_t *) + ((char *)(item->ri_buf[item_index].i_addr) + + (next_unlinked_offset - reg_buf_offset)); + if (unlikely(*logged_nextp == 0)) { + xfs_fs_cmn_err(CE_ALERT, mp, + "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field", + item, bp); + XFS_ERROR_REPORT("xlog_recover_do_inode_buf", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, + next_unlinked_offset); + INT_SET(*buffer_nextp, ARCH_CONVERT, *logged_nextp); + } + + return 0; +} /* xlog_recover_do_inode_buffer */ + +/* + * Perform a 'normal' buffer recovery. Each logged region of the + * buffer should be copied over the corresponding region in the + * given buffer. The bitmap in the buf log format structure indicates + * where to place the logged data. + */ +/*ARGSUSED*/ +STATIC void +xlog_recover_do_reg_buffer(xfs_mount_t *mp, + xlog_recover_item_t *item, + xfs_buf_t *bp, + xfs_buf_log_format_t *buf_f) +{ + int i; + int bit; + int nbits; + xfs_buf_log_format_v1_t *obuf_f; + unsigned int *data_map=NULL; + unsigned int map_size=0; + int error; + + switch (buf_f->blf_type) { + case XFS_LI_BUF: + data_map = buf_f->blf_data_map; + map_size = buf_f->blf_map_size; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + data_map = obuf_f->blf_data_map; + map_size = obuf_f->blf_map_size; + break; + } + bit = 0; + i = 1; /* 0 is the buf format structure */ + while (1) { + bit = xfs_next_bit(data_map, map_size, bit); + if (bit == -1) + break; + nbits = xfs_contig_bits(data_map, map_size, bit); + ASSERT(item->ri_buf[i].i_addr != 0); + ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0); + ASSERT(XFS_BUF_COUNT(bp) >= + ((uint)bit << XFS_BLI_SHIFT)+(nbits<blf_flags & (XFS_BLI_UDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { + error = xfs_qm_dqcheck((xfs_disk_dquot_t *) + item->ri_buf[i].i_addr, + -1, 0, XFS_QMOPT_DOWARN, + "dquot_buf_recover"); + } + if (!error) + memcpy(xfs_buf_offset(bp, + (uint)bit << XFS_BLI_SHIFT), /* dest */ + item->ri_buf[i].i_addr, /* source */ + nbits<ri_total); +} /* xlog_recover_do_reg_buffer */ + +/* + * Do some primitive error checking on ondisk dquot data structures. + */ +int +xfs_qm_dqcheck( + xfs_disk_dquot_t *ddq, + xfs_dqid_t id, + uint type, /* used only when IO_dorepair is true */ + uint flags, + char *str) +{ + xfs_dqblk_t *d = (xfs_dqblk_t *)ddq; + int errs = 0; + + /* + * We can encounter an uninitialized dquot buffer for 2 reasons: + * 1. If we crash while deleting the quotainode(s), and those blks got + * used for user data. This is because we take the path of regular + * file deletion; however, the size field of quotainodes is never + * updated, so all the tricks that we play in itruncate_finish + * don't quite matter. + * + * 2. We don't play the quota buffers when there's a quotaoff logitem. + * But the allocation will be replayed so we'll end up with an + * uninitialized quota block. + * + * This is all fine; things are still consistent, and we haven't lost + * any quota information. Just don't complain about bad dquot blks. + */ + if (INT_GET(ddq->d_magic, ARCH_CONVERT) != XFS_DQUOT_MAGIC) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", + str, id, + INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_MAGIC); + errs++; + } + if (INT_GET(ddq->d_version, ARCH_CONVERT) != XFS_DQUOT_VERSION) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x", + str, id, + INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_VERSION); + errs++; + } + + if (INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_USER && + INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_GROUP) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : XFS dquot ID 0x%x, unknown flags 0x%x", + str, id, INT_GET(ddq->d_flags, ARCH_CONVERT)); + errs++; + } + + if (id != -1 && id != INT_GET(ddq->d_id, ARCH_CONVERT)) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : ondisk-dquot 0x%x, ID mismatch: " + "0x%x expected, found id 0x%x", + str, ddq, id, INT_GET(ddq->d_id, ARCH_CONVERT)); + errs++; + } + + if (! errs) { + if (INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT) && + INT_GET(ddq->d_bcount, ARCH_CONVERT) >= + INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT)) { + if (INT_ISZERO(ddq->d_btimer, ARCH_CONVERT) && + !INT_ISZERO(ddq->d_id, ARCH_CONVERT)) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : Dquot ID 0x%x (0x%x) " + "BLK TIMER NOT STARTED", + str, (int) + INT_GET(ddq->d_id, ARCH_CONVERT), ddq); + errs++; + } + } + if (INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT) && + INT_GET(ddq->d_icount, ARCH_CONVERT) >= + INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT)) { + if (INT_ISZERO(ddq->d_itimer, ARCH_CONVERT) && + !INT_ISZERO(ddq->d_id, ARCH_CONVERT)) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : Dquot ID 0x%x (0x%x) " + "INODE TIMER NOT STARTED", + str, (int) + INT_GET(ddq->d_id, ARCH_CONVERT), ddq); + errs++; + } + } + } + + if (!errs || !(flags & XFS_QMOPT_DQREPAIR)) + return errs; + + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id); + + /* + * Typically, a repair is only requested by quotacheck. + */ + ASSERT(id != -1); + ASSERT(flags & XFS_QMOPT_DQREPAIR); + memset(d, 0, sizeof(xfs_dqblk_t)); + INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC); + INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION); + INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id); + INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type); + + return errs; +} + +/* + * Perform a dquot buffer recovery. + * Simple algorithm: if we have found a QUOTAOFF logitem of the same type + * (ie. USR or GRP), then just toss this buffer away; don't recover it. + * Else, treat it as a regular buffer and do recovery. + */ +STATIC void +xlog_recover_do_dquot_buffer( + xfs_mount_t *mp, + xlog_t *log, + xlog_recover_item_t *item, + xfs_buf_t *bp, + xfs_buf_log_format_t *buf_f) +{ + uint type; + + /* + * Filesystems are required to send in quota flags at mount time. + */ + if (mp->m_qflags == 0) { + return; + } + + type = 0; + if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF) + type |= XFS_DQ_USER; + if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF) + type |= XFS_DQ_GROUP; + /* + * This type of quotas was turned off, so ignore this buffer + */ + if (log->l_quotaoffs_flag & type) + return; + + xlog_recover_do_reg_buffer(mp, item, bp, buf_f); +} + +/* + * This routine replays a modification made to a buffer at runtime. + * There are actually two types of buffer, regular and inode, which + * are handled differently. Inode buffers are handled differently + * in that we only recover a specific set of data from them, namely + * the inode di_next_unlinked fields. This is because all other inode + * data is actually logged via inode records and any data we replay + * here which overlaps that may be stale. + * + * When meta-data buffers are freed at run time we log a buffer item + * with the XFS_BLI_CANCEL bit set to indicate that previous copies + * of the buffer in the log should not be replayed at recovery time. + * This is so that if the blocks covered by the buffer are reused for + * file data before we crash we don't end up replaying old, freed + * meta-data into a user's file. + * + * To handle the cancellation of buffer log items, we make two passes + * over the log during recovery. During the first we build a table of + * those buffers which have been cancelled, and during the second we + * only replay those buffers which do not have corresponding cancel + * records in the table. See xlog_recover_do_buffer_pass[1,2] above + * for more details on the implementation of the table of cancel records. + */ +STATIC int +xlog_recover_do_buffer_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_buf_log_format_t *buf_f; + xfs_buf_log_format_v1_t *obuf_f; + xfs_mount_t *mp; + xfs_buf_t *bp; + int error; + int cancel; + xfs_daddr_t blkno; + int len; + ushort flags; + + buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr; + + if (pass == XLOG_RECOVER_PASS1) { + /* + * In this pass we're only looking for buf items + * with the XFS_BLI_CANCEL bit set. + */ + xlog_recover_do_buffer_pass1(log, buf_f); + return 0; + } else { + /* + * In this pass we want to recover all the buffers + * which have not been cancelled and are not + * cancellation buffers themselves. The routine + * we call here will tell us whether or not to + * continue with the replay of this buffer. + */ + cancel = xlog_recover_do_buffer_pass2(log, buf_f); + if (cancel) { + return 0; + } + } + switch (buf_f->blf_type) { + case XFS_LI_BUF: + blkno = buf_f->blf_blkno; + len = buf_f->blf_len; + flags = buf_f->blf_flags; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + blkno = obuf_f->blf_blkno; + len = obuf_f->blf_len; + flags = obuf_f->blf_flags; + break; + default: + xfs_fs_cmn_err(CE_ALERT, log->l_mp, + "xfs_log_recover: unknown buffer type 0x%x, dev 0x%x", + buf_f->blf_type, log->l_dev); + XFS_ERROR_REPORT("xlog_recover_do_buffer_trans", + XFS_ERRLEVEL_LOW, log->l_mp); + return XFS_ERROR(EFSCORRUPTED); + } + + mp = log->l_mp; + if (flags & XFS_BLI_INODE_BUF) { + bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len, + XFS_BUF_LOCK); + } else { + bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0); + } + if (XFS_BUF_ISERROR(bp)) { + xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp, + bp, blkno); + error = XFS_BUF_GETERROR(bp); + xfs_buf_relse(bp); + return error; + } + + error = 0; + if (flags & XFS_BLI_INODE_BUF) { + error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); + } else if (flags & (XFS_BLI_UDQUOT_BUF | XFS_BLI_GDQUOT_BUF)) { + xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); + } else { + xlog_recover_do_reg_buffer(mp, item, bp, buf_f); + } + if (error) + return XFS_ERROR(error); + + /* + * Perform delayed write on the buffer. Asynchronous writes will be + * slower when taking into account all the buffers to be flushed. + * + * Also make sure that only inode buffers with good sizes stay in + * the buffer cache. The kernel moves inodes in buffers of 1 block + * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode + * buffers in the log can be a different size if the log was generated + * by an older kernel using unclustered inode buffers or a newer kernel + * running with a different inode cluster size. Regardless, if the + * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) + * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep + * the buffer out of the buffer cache so that the buffer won't + * overlap with future reads of those inodes. + */ + if (XFS_DINODE_MAGIC == + INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) && + (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize, + (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { + XFS_BUF_STALE(bp); + error = xfs_bwrite(mp, bp); + } else { + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL || + XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp); + XFS_BUF_SET_FSPRIVATE(bp, mp); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + xfs_bdwrite(mp, bp); + } + + return (error); +} /* xlog_recover_do_buffer_trans */ + +STATIC int +xlog_recover_do_inode_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_inode_log_format_t *in_f; + xfs_mount_t *mp; + xfs_buf_t *bp; + xfs_imap_t imap; + xfs_dinode_t *dip; + xfs_ino_t ino; + int len; + xfs_caddr_t src; + xfs_caddr_t dest; + int error; + int attr_index; + uint fields; + xfs_dinode_core_t *dicp; + + if (pass == XLOG_RECOVER_PASS1) { + return 0; + } + + in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr; + ino = in_f->ilf_ino; + mp = log->l_mp; + if (ITEM_TYPE(item) == XFS_LI_INODE) { + imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno; + imap.im_len = in_f->ilf_len; + imap.im_boffset = in_f->ilf_boffset; + } else { + /* + * It's an old inode format record. We don't know where + * its cluster is located on disk, and we can't allow + * xfs_imap() to figure it out because the inode btrees + * are not ready to be used. Therefore do not pass the + * XFS_IMAP_LOOKUP flag to xfs_imap(). This will give + * us only the single block in which the inode lives + * rather than its cluster, so we must make sure to + * invalidate the buffer when we write it out below. + */ + imap.im_blkno = 0; + xfs_imap(log->l_mp, 0, ino, &imap, 0); + } + bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len, + XFS_BUF_LOCK); + if (XFS_BUF_ISERROR(bp)) { + xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, + bp, imap.im_blkno); + error = XFS_BUF_GETERROR(bp); + xfs_buf_relse(bp); + return error; + } + error = 0; + ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); + dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); + + /* + * Make sure the place we're flushing out to really looks + * like an inode! + */ + if (unlikely(INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC)) { + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld", + dip, bp, ino); + XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + dicp = (xfs_dinode_core_t*)(item->ri_buf[1].i_addr); + if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld", + item, ino); + XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + if (unlikely((dicp->di_mode & IFMT) == IFREG)) { + if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && + (dicp->di_format != XFS_DINODE_FMT_BTREE)) { + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", + item, dip, bp, ino); + return XFS_ERROR(EFSCORRUPTED); + } + } else if (unlikely((dicp->di_mode & IFMT) == IFDIR)) { + if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && + (dicp->di_format != XFS_DINODE_FMT_BTREE) && + (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", + item, dip, bp, ino); + return XFS_ERROR(EFSCORRUPTED); + } + } + if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){ + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", + item, dip, bp, ino, + dicp->di_nextents + dicp->di_anextents, + dicp->di_nblocks); + return XFS_ERROR(EFSCORRUPTED); + } + if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x", + item, dip, bp, ino, dicp->di_forkoff); + return XFS_ERROR(EFSCORRUPTED); + } + if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) { + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p", + item->ri_buf[1].i_len, item); + return XFS_ERROR(EFSCORRUPTED); + } + + /* The core is in in-core format */ + xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core, + (xfs_dinode_core_t*)item->ri_buf[1].i_addr, + -1, ARCH_CONVERT); + /* the rest is in on-disk format */ + if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) { + memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t), + item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t), + item->ri_buf[1].i_len - sizeof(xfs_dinode_core_t)); + } + + fields = in_f->ilf_fields; + switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) { + case XFS_ILOG_DEV: + INT_SET(dip->di_u.di_dev, ARCH_CONVERT, in_f->ilf_u.ilfu_rdev); + + break; + case XFS_ILOG_UUID: + dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid; + break; + } + + if (in_f->ilf_size == 2) + goto write_inode_buffer; + len = item->ri_buf[2].i_len; + src = item->ri_buf[2].i_addr; + ASSERT(in_f->ilf_size <= 4); + ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); + ASSERT(!(fields & XFS_ILOG_DFORK) || + (len == in_f->ilf_dsize)); + + switch (fields & XFS_ILOG_DFORK) { + case XFS_ILOG_DDATA: + case XFS_ILOG_DEXT: + memcpy(&dip->di_u, src, len); + break; + + case XFS_ILOG_DBROOT: + xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len, + &(dip->di_u.di_bmbt), + XFS_DFORK_DSIZE(dip, mp)); + break; + + default: + /* + * There are no data fork flags set. + */ + ASSERT((fields & XFS_ILOG_DFORK) == 0); + break; + } + + /* + * If we logged any attribute data, recover it. There may or + * may not have been any other non-core data logged in this + * transaction. + */ + if (in_f->ilf_fields & XFS_ILOG_AFORK) { + if (in_f->ilf_fields & XFS_ILOG_DFORK) { + attr_index = 3; + } else { + attr_index = 2; + } + len = item->ri_buf[attr_index].i_len; + src = item->ri_buf[attr_index].i_addr; + ASSERT(len == in_f->ilf_asize); + + switch (in_f->ilf_fields & XFS_ILOG_AFORK) { + case XFS_ILOG_ADATA: + case XFS_ILOG_AEXT: + dest = XFS_DFORK_APTR(dip); + ASSERT(len <= XFS_DFORK_ASIZE(dip, mp)); + memcpy(dest, src, len); + break; + + case XFS_ILOG_ABROOT: + dest = XFS_DFORK_APTR(dip); + xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len, + (xfs_bmdr_block_t*)dest, + XFS_DFORK_ASIZE(dip, mp)); + break; + + default: + xlog_warn("XFS: xlog_recover_do_inode_trans: Illegal flag"); + ASSERT(0); + xfs_buf_relse(bp); + return XFS_ERROR(EIO); + } + } + + +write_inode_buffer: + if (ITEM_TYPE(item) == XFS_LI_INODE) { + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL || + XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp); + XFS_BUF_SET_FSPRIVATE(bp, mp); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + xfs_bdwrite(mp, bp); + } else { + XFS_BUF_STALE(bp); + error = xfs_bwrite(mp, bp); + } + + return (error); +} /* xlog_recover_do_inode_trans */ + + +/* + * Recover QUOTAOFF records. We simply make a note of it in the xlog_t + * structure, so that we know not to do any dquot item or dquot buffer recovery, + * of that type. + */ +STATIC int +xlog_recover_do_quotaoff_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_qoff_logformat_t *qoff_f; + + if (pass == XLOG_RECOVER_PASS2) { + return (0); + } + + qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr; + ASSERT(qoff_f); + + /* + * The logitem format's flag tells us if this was user quotaoff, + * group quotaoff or both. + */ + if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) + log->l_quotaoffs_flag |= XFS_DQ_USER; + if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) + log->l_quotaoffs_flag |= XFS_DQ_GROUP; + + return (0); +} + + +/* + * Recover a dquot record + */ +STATIC int +xlog_recover_do_dquot_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_mount_t *mp; + xfs_buf_t *bp; + struct xfs_disk_dquot *ddq, *recddq; + int error; + xfs_dq_logformat_t *dq_f; + uint type; + + if (pass == XLOG_RECOVER_PASS1) { + return 0; + } + mp = log->l_mp; + + /* + * Filesystems are required to send in quota flags at mount time. + */ + if (mp->m_qflags == 0) + return (0); + + recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr; + ASSERT(recddq); + /* + * This type of quotas was turned off, so ignore this record. + */ + type = INT_GET(recddq->d_flags, ARCH_CONVERT) & + (XFS_DQ_USER | XFS_DQ_GROUP); + ASSERT(type); + if (log->l_quotaoffs_flag & type) + return (0); + + /* + * At this point we know that quota was _not_ turned off. + * Since the mount flags are not indicating to us otherwise, this + * must mean that quota is on, and the dquot needs to be replayed. + * Remember that we may not have fully recovered the superblock yet, + * so we can't do the usual trick of looking at the SB quota bits. + * + * The other possibility, of course, is that the quota subsystem was + * removed since the last mount - ENOSYS. + */ + dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr; + ASSERT(dq_f); + if ((error = xfs_qm_dqcheck(recddq, + dq_f->qlf_id, + 0, XFS_QMOPT_DOWARN, + "xlog_recover_do_dquot_trans (log copy)"))) { + return XFS_ERROR(EIO); + } + ASSERT(dq_f->qlf_len == 1); + + error = xfs_read_buf(mp, mp->m_ddev_targp, + dq_f->qlf_blkno, + XFS_FSB_TO_BB(mp, dq_f->qlf_len), + 0, &bp); + if (error) { + xfs_ioerror_alert("xlog_recover_do..(read#3)", mp, + bp, dq_f->qlf_blkno); + return error; + } + ASSERT(bp); + ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset); + + /* + * At least the magic num portion should be on disk because this + * was among a chunk of dquots created earlier, and we did some + * minimal initialization then. + */ + if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, + "xlog_recover_do_dquot_trans")) { + xfs_buf_relse(bp); + return XFS_ERROR(EIO); + } + + memcpy(ddq, recddq, item->ri_buf[1].i_len); + + ASSERT(dq_f->qlf_size == 2); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL || + XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp); + XFS_BUF_SET_FSPRIVATE(bp, mp); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + xfs_bdwrite(mp, bp); + + return (0); +} /* xlog_recover_do_dquot_trans */ + +/* + * This routine is called to create an in-core extent free intent + * item from the efi format structure which was logged on disk. + * It allocates an in-core efi, copies the extents from the format + * structure into it, and adds the efi to the AIL with the given + * LSN. + */ +STATIC void +xlog_recover_do_efi_trans(xlog_t *log, + xlog_recover_item_t *item, + xfs_lsn_t lsn, + int pass) +{ + xfs_mount_t *mp; + xfs_efi_log_item_t *efip; + xfs_efi_log_format_t *efi_formatp; + SPLDECL(s); + + if (pass == XLOG_RECOVER_PASS1) { + return; + } + + efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr; + ASSERT(item->ri_buf[0].i_len == + (sizeof(xfs_efi_log_format_t) + + ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t)))); + + mp = log->l_mp; + efip = xfs_efi_init(mp, efi_formatp->efi_nextents); + memcpy((char *)&(efip->efi_format), (char *)efi_formatp, + sizeof(xfs_efi_log_format_t) + + ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t))); + efip->efi_next_extent = efi_formatp->efi_nextents; + efip->efi_flags |= XFS_EFI_COMMITTED; + + AIL_LOCK(mp,s); + /* + * xfs_trans_update_ail() drops the AIL lock. + */ + xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s); +} /* xlog_recover_do_efi_trans */ + + +/* + * This routine is called when an efd format structure is found in + * a committed transaction in the log. It's purpose is to cancel + * the corresponding efi if it was still in the log. To do this + * it searches the AIL for the efi with an id equal to that in the + * efd format structure. If we find it, we remove the efi from the + * AIL and free it. + */ +STATIC void +xlog_recover_do_efd_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_mount_t *mp; + xfs_efd_log_format_t *efd_formatp; + xfs_efi_log_item_t *efip=NULL; + xfs_log_item_t *lip; + int gen; + int nexts; + __uint64_t efi_id; + SPLDECL(s); + + if (pass == XLOG_RECOVER_PASS1) { + return; + } + + efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr; + ASSERT(item->ri_buf[0].i_len == + (sizeof(xfs_efd_log_format_t) + + ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_t)))); + efi_id = efd_formatp->efd_efi_id; + + /* + * Search for the efi with the id in the efd format structure + * in the AIL. + */ + mp = log->l_mp; + AIL_LOCK(mp,s); + lip = xfs_trans_first_ail(mp, &gen); + while (lip != NULL) { + if (lip->li_type == XFS_LI_EFI) { + efip = (xfs_efi_log_item_t *)lip; + if (efip->efi_format.efi_id == efi_id) { + /* + * xfs_trans_delete_ail() drops the + * AIL lock. + */ + xfs_trans_delete_ail(mp, lip, s); + break; + } + } + lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + } + if (lip == NULL) { + AIL_UNLOCK(mp, s); + } + + /* + * If we found it, then free it up. If it wasn't there, it + * must have been overwritten in the log. Oh well. + */ + if (lip != NULL) { + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + kmem_free(lip, sizeof(xfs_efi_log_item_t) + + ((nexts - 1) * sizeof(xfs_extent_t))); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } +} /* xlog_recover_do_efd_trans */ + +/* + * Perform the transaction + * + * If the transaction modifies a buffer or inode, do it now. Otherwise, + * EFIs and EFDs get queued up by adding entries into the AIL for them. + */ +STATIC int +xlog_recover_do_trans(xlog_t *log, + xlog_recover_t *trans, + int pass) +{ + int error = 0; + xlog_recover_item_t *item, *first_item; + + if ((error = xlog_recover_reorder_trans(log, trans))) + return error; + first_item = item = trans->r_itemq; + do { + /* + * we don't need to worry about the block number being + * truncated in > 1 TB buffers because in user-land, + * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so + * the blkno's will get through the user-mode buffer + * cache properly. The only bad case is o32 kernels + * where xfs_daddr_t is 32-bits but mount will warn us + * off a > 1 TB filesystem before we get here. + */ + if ((ITEM_TYPE(item) == XFS_LI_BUF) || + (ITEM_TYPE(item) == XFS_LI_6_1_BUF) || + (ITEM_TYPE(item) == XFS_LI_5_3_BUF)) { + if ((error = xlog_recover_do_buffer_trans(log, item, + pass))) + break; + } else if ((ITEM_TYPE(item) == XFS_LI_INODE) || + (ITEM_TYPE(item) == XFS_LI_6_1_INODE) || + (ITEM_TYPE(item) == XFS_LI_5_3_INODE)) { + if ((error = xlog_recover_do_inode_trans(log, item, + pass))) + break; + } else if (ITEM_TYPE(item) == XFS_LI_EFI) { + xlog_recover_do_efi_trans(log, item, trans->r_lsn, + pass); + } else if (ITEM_TYPE(item) == XFS_LI_EFD) { + xlog_recover_do_efd_trans(log, item, pass); + } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) { + if ((error = xlog_recover_do_dquot_trans(log, item, + pass))) + break; + } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) { + if ((error = xlog_recover_do_quotaoff_trans(log, item, + pass))) + break; + } else { + xlog_warn("XFS: xlog_recover_do_trans"); + ASSERT(0); + error = XFS_ERROR(EIO); + break; + } + item = item->ri_next; + } while (first_item != item); + + return error; +} /* xlog_recover_do_trans */ + + +/* + * Free up any resources allocated by the transaction + * + * Remember that EFIs, EFDs, and IUNLINKs are handled later. + */ +STATIC void +xlog_recover_free_trans(xlog_recover_t *trans) +{ + xlog_recover_item_t *first_item, *item, *free_item; + int i; + + item = first_item = trans->r_itemq; + do { + free_item = item; + item = item->ri_next; + /* Free the regions in the item. */ + for (i = 0; i < free_item->ri_cnt; i++) { + kmem_free(free_item->ri_buf[i].i_addr, + free_item->ri_buf[i].i_len); + } + /* Free the item itself */ + kmem_free(free_item->ri_buf, + (free_item->ri_total * sizeof(xfs_log_iovec_t))); + kmem_free(free_item, sizeof(xlog_recover_item_t)); + } while (first_item != item); + /* Free the transaction recover structure */ + kmem_free(trans, sizeof(xlog_recover_t)); +} /* xlog_recover_free_trans */ + + +STATIC int +xlog_recover_commit_trans(xlog_t *log, + xlog_recover_t **q, + xlog_recover_t *trans, + int pass) +{ + int error; + + if ((error = xlog_recover_unlink_tid(q, trans))) + return error; + if ((error = xlog_recover_do_trans(log, trans, pass))) + return error; + xlog_recover_free_trans(trans); /* no error */ + return 0; +} /* xlog_recover_commit_trans */ + + +/*ARGSUSED*/ +STATIC int +xlog_recover_unmount_trans(xlog_recover_t *trans) +{ + /* Do nothing now */ + xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR"); + return( 0 ); +} /* xlog_recover_unmount_trans */ + + +/* + * There are two valid states of the r_state field. 0 indicates that the + * transaction structure is in a normal state. We have either seen the + * start of the transaction or the last operation we added was not a partial + * operation. If the last operation we added to the transaction was a + * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. + * + * NOTE: skip LRs with 0 data length. + */ +STATIC int +xlog_recover_process_data(xlog_t *log, + xlog_recover_t *rhash[], + xlog_rec_header_t *rhead, + xfs_caddr_t dp, + int pass) +{ + xfs_caddr_t lp = dp+INT_GET(rhead->h_len, ARCH_CONVERT); + int num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT); + xlog_op_header_t *ohead; + xlog_recover_t *trans; + xlog_tid_t tid; + int error; + unsigned long hash; + uint flags; + + /* check the log format matches our own - else we can't recover */ + if (xlog_header_check_recover(log->l_mp, rhead)) + return (XFS_ERROR(EIO)); + + while ((dp < lp) && num_logops) { + ASSERT(dp + sizeof(xlog_op_header_t) <= lp); + ohead = (xlog_op_header_t *)dp; + dp += sizeof(xlog_op_header_t); + if (ohead->oh_clientid != XFS_TRANSACTION && + ohead->oh_clientid != XFS_LOG) { + xlog_warn("XFS: xlog_recover_process_data: bad clientid"); + ASSERT(0); + return (XFS_ERROR(EIO)); + } + tid = INT_GET(ohead->oh_tid, ARCH_CONVERT); + hash = XLOG_RHASH(tid); + trans = xlog_recover_find_tid(rhash[hash], tid); + if (trans == NULL) { /* not found; add new tid */ + if (ohead->oh_flags & XLOG_START_TRANS) + xlog_recover_new_tid(&rhash[hash], tid, INT_GET(rhead->h_lsn, ARCH_CONVERT)); + } else { + ASSERT(dp+INT_GET(ohead->oh_len, ARCH_CONVERT) <= lp); + flags = ohead->oh_flags & ~XLOG_END_TRANS; + if (flags & XLOG_WAS_CONT_TRANS) + flags &= ~XLOG_CONTINUE_TRANS; + switch (flags) { + case XLOG_COMMIT_TRANS: { + error = xlog_recover_commit_trans(log, &rhash[hash], + trans, pass); + break; + } + case XLOG_UNMOUNT_TRANS: { + error = xlog_recover_unmount_trans(trans); + break; + } + case XLOG_WAS_CONT_TRANS: { + error = xlog_recover_add_to_cont_trans(trans, dp, + INT_GET(ohead->oh_len, ARCH_CONVERT)); + break; + } + case XLOG_START_TRANS : { + xlog_warn("XFS: xlog_recover_process_data: bad transaction"); + ASSERT(0); + error = XFS_ERROR(EIO); + break; + } + case 0: + case XLOG_CONTINUE_TRANS: { + error = xlog_recover_add_to_trans(trans, dp, + INT_GET(ohead->oh_len, ARCH_CONVERT)); + break; + } + default: { + xlog_warn("XFS: xlog_recover_process_data: bad flag"); + ASSERT(0); + error = XFS_ERROR(EIO); + break; + } + } /* switch */ + if (error) + return error; + } /* if */ + dp += INT_GET(ohead->oh_len, ARCH_CONVERT); + num_logops--; + } + return( 0 ); +} /* xlog_recover_process_data */ + + +/* + * Process an extent free intent item that was recovered from + * the log. We need to free the extents that it describes. + */ +STATIC void +xlog_recover_process_efi(xfs_mount_t *mp, + xfs_efi_log_item_t *efip) +{ + xfs_efd_log_item_t *efdp; + xfs_trans_t *tp; + int i; + xfs_extent_t *extp; + xfs_fsblock_t startblock_fsb; + + ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED)); + + /* + * First check the validity of the extents described by the + * EFI. If any are bad, then assume that all are bad and + * just toss the EFI. + */ + for (i = 0; i < efip->efi_format.efi_nextents; i++) { + extp = &(efip->efi_format.efi_extents[i]); + startblock_fsb = XFS_BB_TO_FSB(mp, + XFS_FSB_TO_DADDR(mp, extp->ext_start)); + if ((startblock_fsb == 0) || + (extp->ext_len == 0) || + (startblock_fsb >= mp->m_sb.sb_dblocks) || + (extp->ext_len >= mp->m_sb.sb_agblocks)) { + /* + * This will pull the EFI from the AIL and + * free the memory associated with it. + */ + xfs_efi_release(efip, efip->efi_format.efi_nextents); + return; + } + } + + tp = xfs_trans_alloc(mp, 0); + xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0); + efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); + + for (i = 0; i < efip->efi_format.efi_nextents; i++) { + extp = &(efip->efi_format.efi_extents[i]); + xfs_free_extent(tp, extp->ext_start, extp->ext_len); + xfs_trans_log_efd_extent(tp, efdp, extp->ext_start, + extp->ext_len); + } + + efip->efi_flags |= XFS_EFI_RECOVERED; + xfs_trans_commit(tp, 0, NULL); +} /* xlog_recover_process_efi */ + + +/* + * Verify that once we've encountered something other than an EFI + * in the AIL that there are no more EFIs in the AIL. + */ +#if defined(DEBUG) +STATIC void +xlog_recover_check_ail(xfs_mount_t *mp, + xfs_log_item_t *lip, + int gen) +{ + int orig_gen; + + orig_gen = gen; + do { + ASSERT(lip->li_type != XFS_LI_EFI); + lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + /* + * The check will be bogus if we restart from the + * beginning of the AIL, so ASSERT that we don't. + * We never should since we're holding the AIL lock + * the entire time. + */ + ASSERT(gen == orig_gen); + } while (lip != NULL); +} +#endif /* DEBUG */ + + +/* + * When this is called, all of the EFIs which did not have + * corresponding EFDs should be in the AIL. What we do now + * is free the extents associated with each one. + * + * Since we process the EFIs in normal transactions, they + * will be removed at some point after the commit. This prevents + * us from just walking down the list processing each one. + * We'll use a flag in the EFI to skip those that we've already + * processed and use the AIL iteration mechanism's generation + * count to try to speed this up at least a bit. + * + * When we start, we know that the EFIs are the only things in + * the AIL. As we process them, however, other items are added + * to the AIL. Since everything added to the AIL must come after + * everything already in the AIL, we stop processing as soon as + * we see something other than an EFI in the AIL. + */ +STATIC void +xlog_recover_process_efis(xlog_t *log) +{ + xfs_log_item_t *lip; + xfs_efi_log_item_t *efip; + int gen; + xfs_mount_t *mp; + SPLDECL(s); + + mp = log->l_mp; + AIL_LOCK(mp,s); + + lip = xfs_trans_first_ail(mp, &gen); + while (lip != NULL) { + /* + * We're done when we see something other than an EFI. + */ + if (lip->li_type != XFS_LI_EFI) { + xlog_recover_check_ail(mp, lip, gen); + break; + } + + /* + * Skip EFIs that we've already processed. + */ + efip = (xfs_efi_log_item_t *)lip; + if (efip->efi_flags & XFS_EFI_RECOVERED) { + lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + continue; + } + + AIL_UNLOCK(mp, s); + xlog_recover_process_efi(mp, efip); + AIL_LOCK(mp,s); + lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + } + AIL_UNLOCK(mp, s); +} /* xlog_recover_process_efis */ + + +/* + * This routine performs a transaction to null out a bad inode pointer + * in an agi unlinked inode hash bucket. + */ +STATIC void +xlog_recover_clear_agi_bucket( + xfs_mount_t *mp, + xfs_agnumber_t agno, + int bucket) +{ + xfs_trans_t *tp; + xfs_agi_t *agi; + xfs_buf_t *agibp; + int offset; + int error; + + tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET); + xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0); + + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &agibp); + if (error) { + xfs_trans_cancel(tp, XFS_TRANS_ABORT); + return; + } + + agi = XFS_BUF_TO_AGI(agibp); + if (INT_GET(agi->agi_magicnum, ARCH_CONVERT) != XFS_AGI_MAGIC) { + xfs_trans_cancel(tp, XFS_TRANS_ABORT); + return; + } + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + + INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, NULLAGINO); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + + (void) xfs_trans_commit(tp, 0, NULL); +} /* xlog_recover_clear_agi_bucket */ + + +/* + * xlog_iunlink_recover + * + * This is called during recovery to process any inodes which + * we unlinked but not freed when the system crashed. These + * inodes will be on the lists in the AGI blocks. What we do + * here is scan all the AGIs and fully truncate and free any + * inodes found on the lists. Each inode is removed from the + * lists when it has been fully truncated and is freed. The + * freeing of the inode and its removal from the list must be + * atomic. + */ +void +xlog_recover_process_iunlinks(xlog_t *log) +{ + xfs_mount_t *mp; + xfs_agnumber_t agno; + xfs_agi_t *agi; + xfs_buf_t *agibp; + xfs_buf_t *ibp; + xfs_dinode_t *dip; + xfs_inode_t *ip; + xfs_agino_t agino; + xfs_ino_t ino; + int bucket; + int error; + uint mp_dmevmask; + + mp = log->l_mp; + + /* + * Prevent any DMAPI event from being sent while in this function. + */ + mp_dmevmask = mp->m_dmevmask; + mp->m_dmevmask = 0; + + for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { + /* + * Find the agi for this ag. + */ + agibp = xfs_buf_read(mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0); + if (XFS_BUF_ISERROR(agibp)) { + xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)", + log->l_mp, agibp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp))); + } + agi = XFS_BUF_TO_AGI(agibp); + ASSERT(XFS_AGI_MAGIC == + INT_GET(agi->agi_magicnum, ARCH_CONVERT)); + + for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { + + agino = INT_GET(agi->agi_unlinked[bucket], ARCH_CONVERT); + while (agino != NULLAGINO) { + + /* + * Release the agi buffer so that it can + * be acquired in the normal course of the + * transaction to truncate and free the inode. + */ + xfs_buf_relse(agibp); + + ino = XFS_AGINO_TO_INO(mp, agno, agino); + error = xfs_iget(mp, NULL, ino, 0, &ip, 0); + ASSERT(error || (ip != NULL)); + + if (!error) { + /* + * Get the on disk inode to find the + * next inode in the bucket. + */ + error = xfs_itobp(mp, NULL, ip, &dip, + &ibp, 0); + ASSERT(error || (dip != NULL)); + } + + if (!error) { + ASSERT(ip->i_d.di_nlink == 0); + + /* setup for the next pass */ + agino = INT_GET(dip->di_next_unlinked, + ARCH_CONVERT); + xfs_buf_relse(ibp); + /* + * Prevent any DMAPI event from + * being sent when the + * reference on the inode is + * dropped. + */ + ip->i_d.di_dmevmask = 0; + + /* + * If this is a new inode, handle + * it specially. Otherwise, + * just drop our reference to the + * inode. If there are no + * other references, this will + * send the inode to + * xfs_inactive() which will + * truncate the file and free + * the inode. + */ + if (ip->i_d.di_mode == 0) + xfs_iput_new(ip, 0); + else + VN_RELE(XFS_ITOV(ip)); + } else { + /* + * We can't read in the inode + * this bucket points to, or + * this inode is messed up. Just + * ditch this bucket of inodes. We + * will lose some inodes and space, + * but at least we won't hang. Call + * xlog_recover_clear_agi_bucket() + * to perform a transaction to clear + * the inode pointer in the bucket. + */ + xlog_recover_clear_agi_bucket(mp, agno, + bucket); + + agino = NULLAGINO; + } + + /* + * Reacquire the agibuffer and continue around + * the loop. + */ + agibp = xfs_buf_read(mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, + XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0); + if (XFS_BUF_ISERROR(agibp)) { + xfs_ioerror_alert( + "xlog_recover_process_iunlinks(#2)", + log->l_mp, agibp, + XFS_AG_DADDR(mp, agno, + XFS_AGI_DADDR(mp))); + } + agi = XFS_BUF_TO_AGI(agibp); + ASSERT(XFS_AGI_MAGIC == INT_GET( + agi->agi_magicnum, ARCH_CONVERT)); + } + } + + /* + * Release the buffer for the current agi so we can + * go on to the next one. + */ + xfs_buf_relse(agibp); + } + + mp->m_dmevmask = mp_dmevmask; + +} /* xlog_recover_process_iunlinks */ + + +/* + * Stamp cycle number in every block + * + * This routine is also called in xfs_log.c + */ +/*ARGSUSED*/ +void +xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog) +{ + int i, j, k; + int size = iclog->ic_offset + iclog->ic_roundoff; + xfs_caddr_t dp; + union ich { + xlog_rec_ext_header_t hic_xheader; + char hic_sector[XLOG_HEADER_SIZE]; + } *xhdr; + uint cycle_lsn; + +#ifdef DEBUG + uint *up; + uint chksum = 0; + + up = (uint *)iclog->ic_datap; + /* divide length by 4 to get # words */ + for (i=0; i> 2; i++) { + chksum ^= INT_GET(*up, ARCH_CONVERT); + up++; + } + INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum); +#endif /* DEBUG */ + + cycle_lsn = CYCLE_LSN_NOCONV(iclog->ic_header.h_lsn, ARCH_CONVERT); + + dp = iclog->ic_datap; + for (i = 0; i < BTOBB(size) && + i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { + iclog->ic_header.h_cycle_data[i] = *(uint *)dp; + *(uint *)dp = cycle_lsn; + dp += BBSIZE; + } + + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + xhdr = (union ich*)&iclog->ic_header; + for ( ; i < BTOBB(size); i++) { + j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp; + *(uint *)dp = cycle_lsn; + dp += BBSIZE; + } + + for (i = 1; i < log->l_iclog_heads; i++) { + xhdr[i].hic_xheader.xh_cycle = cycle_lsn; + } + } + +} /* xlog_pack_data */ + + +/*ARGSUSED*/ +STATIC void +xlog_unpack_data(xlog_rec_header_t *rhead, + xfs_caddr_t dp, + xlog_t *log) +{ + int i, j, k; + union ich { + xlog_rec_header_t hic_header; + xlog_rec_ext_header_t hic_xheader; + char hic_sector[XLOG_HEADER_SIZE]; + } *xhdr; + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + uint *up = (uint *)dp; + uint chksum = 0; +#endif + + for (i=0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) && + i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { + *(uint *)dp = *(uint *)&rhead->h_cycle_data[i]; + dp += BBSIZE; + } + + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + xhdr = (union ich*)rhead; + for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) { + j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; + dp += BBSIZE; + } + } + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + /* divide length by 4 to get # words */ + for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) { + chksum ^= INT_GET(*up, ARCH_CONVERT); + up++; + } + if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) { + if (!INT_ISZERO(rhead->h_chksum, ARCH_CONVERT) || + ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) { + cmn_err(CE_DEBUG, + "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)", + INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum); + cmn_err(CE_DEBUG, +"XFS: Disregard message if filesystem was created with non-DEBUG kernel"); + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + cmn_err(CE_DEBUG, + "XFS: LogR this is a LogV2 filesystem"); + } + log->l_flags |= XLOG_CHKSUM_MISMATCH; + } + } +#endif /* DEBUG && XFS_LOUD_RECOVERY */ +} /* xlog_unpack_data */ + + +/* + * Read the log from tail to head and process the log records found. + * Handle the two cases where the tail and head are in the same cycle + * and where the active portion of the log wraps around the end of + * the physical log separately. The pass parameter is passed through + * to the routines called to process the data and is not looked at + * here. + */ +STATIC int +xlog_do_recovery_pass(xlog_t *log, + xfs_daddr_t head_blk, + xfs_daddr_t tail_blk, + int pass) +{ + xlog_rec_header_t *rhead; + xfs_daddr_t blk_no; + xfs_caddr_t bufaddr; + xfs_buf_t *hbp, *dbp; + int error, h_size; + int bblks, split_bblks; + int hblks, split_hblks, wrapped_hblks; + xlog_recover_t *rhash[XLOG_RHASH_SIZE]; + + error = 0; + + + /* + * Read the header of the tail block and get the iclog buffer size from + * h_size. Use this to tell how many sectors make up the log header. + */ + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + /* + * When using variable length iclogs, read first sector of iclog + * header and extract the header size from it. Get a new hbp that + * is the correct size. + */ + hbp = xlog_get_bp(1, log->l_mp); + if (!hbp) + return ENOMEM; + if ((error = xlog_bread(log, tail_blk, 1, hbp))) + goto bread_err1; + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(hbp); + ASSERT(INT_GET(rhead->h_magicno, ARCH_CONVERT) == + XLOG_HEADER_MAGIC_NUM); + if ((INT_GET(rhead->h_version, ARCH_CONVERT) & (~XLOG_VERSION_OKBITS)) != 0) { + xlog_warn("XFS: xlog_do_recovery_pass: unrecognised log version number."); + error = XFS_ERROR(EIO); + goto bread_err1; + } + h_size = INT_GET(rhead->h_size, ARCH_CONVERT); + + if ((INT_GET(rhead->h_version, ARCH_CONVERT) & XLOG_VERSION_2) && + (h_size > XLOG_HEADER_CYCLE_SIZE)) { + hblks = h_size / XLOG_HEADER_CYCLE_SIZE; + if (h_size % XLOG_HEADER_CYCLE_SIZE) + hblks++; + xlog_put_bp(hbp); + hbp = xlog_get_bp(hblks, log->l_mp); + } else { + hblks=1; + } + } else { + hblks=1; + hbp = xlog_get_bp(1, log->l_mp); + h_size = XLOG_BIG_RECORD_BSIZE; + } + + if (!hbp) + return ENOMEM; + dbp = xlog_get_bp(BTOBB(h_size),log->l_mp); + if (!dbp) { + xlog_put_bp(hbp); + return ENOMEM; + } + + memset(rhash, 0, sizeof(rhash)); + if (tail_blk <= head_blk) { + for (blk_no = tail_blk; blk_no < head_blk; ) { + if ((error = xlog_bread(log, blk_no, hblks, hbp))) + goto bread_err2; + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(hbp); + ASSERT(INT_GET(rhead->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + ASSERT(BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) <= INT_MAX)); + bblks = (int) BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); /* blocks in data section */ + + if (unlikely((INT_GET(rhead->h_magicno, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) || + (BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) > INT_MAX)) || + (bblks <= 0) || + (blk_no > log->l_logBBsize))) { + XFS_ERROR_REPORT("xlog_do_recovery_pass(1)", + XFS_ERRLEVEL_LOW, log->l_mp); + error = EFSCORRUPTED; + goto bread_err2; + } + + if ((INT_GET(rhead->h_version, ARCH_CONVERT) & (~XLOG_VERSION_OKBITS)) != 0) { + xlog_warn("XFS: xlog_do_recovery_pass: unrecognised log version number."); + error = XFS_ERROR(EIO); + goto bread_err2; + } + bblks = (int) BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); /* blocks in data section */ + if (bblks > 0) { + if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) + goto bread_err2; + xlog_unpack_data(rhead, XFS_BUF_PTR(dbp), log); + if ((error = xlog_recover_process_data(log, rhash, + rhead, XFS_BUF_PTR(dbp), + pass))) + goto bread_err2; + } + blk_no += (bblks+hblks); + } + } else { + /* + * Perform recovery around the end of the physical log. When the head + * is not on the same cycle number as the tail, we can't do a sequential + * recovery as above. + */ + blk_no = tail_blk; + while (blk_no < log->l_logBBsize) { + /* + * Check for header wrapping around physical end-of-log + */ + wrapped_hblks = 0; + if (blk_no+hblks <= log->l_logBBsize) { + /* Read header in one read */ + if ((error = xlog_bread(log, blk_no, hblks, hbp))) + goto bread_err2; + } else { + /* This log record is split across physical end of log */ + split_hblks = 0; + if (blk_no != log->l_logBBsize) { + /* some data is before physical end of log */ + ASSERT(blk_no <= INT_MAX); + split_hblks = log->l_logBBsize - (int)blk_no; + ASSERT(split_hblks > 0); + if ((error = xlog_bread(log, blk_no, split_hblks, hbp))) + goto bread_err2; + } + bufaddr = XFS_BUF_PTR(hbp); + XFS_BUF_SET_PTR(hbp, bufaddr + BBTOB(split_hblks), + BBTOB(hblks - split_hblks)); + wrapped_hblks = hblks - split_hblks; + if ((error = xlog_bread(log, 0, wrapped_hblks, hbp))) + goto bread_err2; + XFS_BUF_SET_PTR(hbp, bufaddr, hblks); + } + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(hbp); + ASSERT(INT_GET(rhead->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + ASSERT(BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) <= INT_MAX)); + bblks = (int) BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); + + /* LR body must have data or it wouldn't have been written */ + ASSERT(bblks > 0); + blk_no += hblks; /* successfully read header */ + + if (unlikely((INT_GET(rhead->h_magicno, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) || + (BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) > INT_MAX)) || + (bblks <= 0))) { + XFS_ERROR_REPORT("xlog_do_recovery_pass(2)", + XFS_ERRLEVEL_LOW, log->l_mp); + error = EFSCORRUPTED; + goto bread_err2; + } + + /* Read in data for log record */ + if (blk_no+bblks <= log->l_logBBsize) { + if ((error = xlog_bread(log, blk_no, bblks, dbp))) + goto bread_err2; + } else { + /* This log record is split across physical end of log */ + split_bblks = 0; + if (blk_no != log->l_logBBsize) { + + /* some data is before physical end of log */ + ASSERT(blk_no <= INT_MAX); + split_bblks = log->l_logBBsize - (int)blk_no; + ASSERT(split_bblks > 0); + if ((error = xlog_bread(log, blk_no, split_bblks, dbp))) + goto bread_err2; + } + bufaddr = XFS_BUF_PTR(dbp); + XFS_BUF_SET_PTR(dbp, bufaddr + BBTOB(split_bblks), + BBTOB(bblks - split_bblks)); + if ((error = xlog_bread(log, wrapped_hblks, + bblks - split_bblks, dbp))) + goto bread_err2; + XFS_BUF_SET_PTR(dbp, bufaddr, XLOG_BIG_RECORD_BSIZE); + } + xlog_unpack_data(rhead, XFS_BUF_PTR(dbp), log); + if ((error = xlog_recover_process_data(log, rhash, + rhead, XFS_BUF_PTR(dbp), + pass))) + goto bread_err2; + blk_no += bblks; + } + + ASSERT(blk_no >= log->l_logBBsize); + blk_no -= log->l_logBBsize; + + /* read first part of physical log */ + while (blk_no < head_blk) { + if ((error = xlog_bread(log, blk_no, hblks, hbp))) + goto bread_err2; + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(hbp); + ASSERT(INT_GET(rhead->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + ASSERT(BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) <= INT_MAX)); + bblks = (int) BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); + ASSERT(bblks > 0); + if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) + goto bread_err2; + xlog_unpack_data(rhead, XFS_BUF_PTR(dbp), log); + if ((error = xlog_recover_process_data(log, rhash, + rhead, XFS_BUF_PTR(dbp), + pass))) + goto bread_err2; + blk_no += (bblks+hblks); + } + } + +bread_err2: + xlog_put_bp(dbp); +bread_err1: + xlog_put_bp(hbp); + + return error; +} + +/* + * Do the recovery of the log. We actually do this in two phases. + * The two passes are necessary in order to implement the function + * of cancelling a record written into the log. The first pass + * determines those things which have been cancelled, and the + * second pass replays log items normally except for those which + * have been cancelled. The handling of the replay and cancellations + * takes place in the log item type specific routines. + * + * The table of items which have cancel records in the log is allocated + * and freed at this level, since only here do we know when all of + * the log recovery has been completed. + */ +STATIC int +xlog_do_log_recovery(xlog_t *log, + xfs_daddr_t head_blk, + xfs_daddr_t tail_blk) +{ + int error; +#ifdef DEBUG + int i; +#endif + + /* + * First do a pass to find all of the cancelled buf log items. + * Store them in the buf_cancel_table for use in the second pass. + */ + log->l_buf_cancel_table = + (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE * + sizeof(xfs_buf_cancel_t*), + KM_SLEEP); + error = xlog_do_recovery_pass(log, head_blk, tail_blk, + XLOG_RECOVER_PASS1); + if (error != 0) { + kmem_free(log->l_buf_cancel_table, + XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*)); + log->l_buf_cancel_table = NULL; + return error; + } + /* + * Then do a second pass to actually recover the items in the log. + * When it is complete free the table of buf cancel items. + */ + error = xlog_do_recovery_pass(log, head_blk, tail_blk, + XLOG_RECOVER_PASS2); +#ifdef DEBUG + for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) { + ASSERT(log->l_buf_cancel_table[i] == NULL); + } +#endif /* DEBUG */ + kmem_free(log->l_buf_cancel_table, + XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*)); + log->l_buf_cancel_table = NULL; + + return error; +} + +/* + * Do the actual recovery + */ +STATIC int +xlog_do_recover(xlog_t *log, + xfs_daddr_t head_blk, + xfs_daddr_t tail_blk) +{ + int error; + xfs_buf_t *bp; + xfs_sb_t *sbp; + + /* + * First replay the images in the log. + */ + error = xlog_do_log_recovery(log, head_blk, tail_blk); + if (error) { + return error; + } + + XFS_bflush(log->l_mp->m_ddev_targp); + + /* + * If IO errors happened during recovery, bail out. + */ + if (XFS_FORCED_SHUTDOWN(log->l_mp)) { + return (EIO); + } + + /* + * We now update the tail_lsn since much of the recovery has completed + * and there may be space available to use. If there were no extent + * or iunlinks, we can free up the entire log and set the tail_lsn to + * be the last_sync_lsn. This was set in xlog_find_tail to be the + * lsn of the last known good LR on disk. If there are extent frees + * or iunlinks they will have some entries in the AIL; so we look at + * the AIL to determine how to set the tail_lsn. + */ + xlog_assign_tail_lsn(log->l_mp); + + /* + * Now that we've finished replaying all buffer and inode + * updates, re-read in the superblock. + */ + bp = xfs_getsb(log->l_mp, 0); + XFS_BUF_UNDONE(bp); + XFS_BUF_READ(bp); + xfsbdstrat(log->l_mp, bp); + if ((error = xfs_iowait(bp))) { + xfs_ioerror_alert("xlog_do_recover", + log->l_mp, bp, XFS_BUF_ADDR(bp)); + ASSERT(0); + xfs_buf_relse(bp); + return error; + } + + /* Convert superblock from on-disk format */ + sbp = &log->l_mp->m_sb; + xfs_xlatesb(XFS_BUF_TO_SBP(bp), sbp, 1, ARCH_CONVERT, XFS_SB_ALL_BITS); + ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC); + ASSERT(XFS_SB_GOOD_VERSION(sbp)); + xfs_buf_relse(bp); + + xlog_recover_check_summary(log); + + /* Normal transactions can now occur */ + log->l_flags &= ~XLOG_ACTIVE_RECOVERY; + return 0; +} /* xlog_do_recover */ + +/* + * Perform recovery and re-initialize some log variables in xlog_find_tail. + * + * Return error or zero. + */ +int +xlog_recover(xlog_t *log, int readonly) +{ + xfs_daddr_t head_blk, tail_blk; + int error; + + /* find the tail of the log */ + + if ((error = xlog_find_tail(log, &head_blk, &tail_blk, readonly))) + return error; + + if (tail_blk != head_blk) { +#ifndef __KERNEL__ + extern xfs_daddr_t HEAD_BLK, TAIL_BLK; + head_blk = HEAD_BLK; + tail_blk = TAIL_BLK; +#endif + /* There used to be a comment here: + * + * disallow recovery on read-only mounts. note -- mount + * checks for ENOSPC and turns it into an intelligent + * error message. + * ...but this is no longer true. Now, unless you specify + * NORECOVERY (in which case this function would never be + * called), we just go ahead and recover. We do this all + * under the vfs layer, so we can get away with it unless + * the device itself is read-only, in which case we fail. + */ +#ifdef __KERNEL__ + if ((error = xfs_dev_is_read_only(log->l_mp, + "recovery required"))) { + return error; + } +#else + if (readonly) { + return ENOSPC; + } +#endif + +#ifdef __KERNEL__ +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + cmn_err(CE_NOTE, + "Starting XFS recovery on filesystem: %s (dev: %d/%d)", + log->l_mp->m_fsname, MAJOR(log->l_dev), + MINOR(log->l_dev)); +#else + cmn_err(CE_NOTE, + "!Starting XFS recovery on filesystem: %s (dev: %d/%d)", + log->l_mp->m_fsname, MAJOR(log->l_dev), + MINOR(log->l_dev)); +#endif +#endif + error = xlog_do_recover(log, head_blk, tail_blk); + log->l_flags |= XLOG_RECOVERY_NEEDED; + } + return error; +} /* xlog_recover */ + + +/* + * In the first part of recovery we replay inodes and buffers and build + * up the list of extent free items which need to be processed. Here + * we process the extent free items and clean up the on disk unlinked + * inode lists. This is separated from the first part of recovery so + * that the root and real-time bitmap inodes can be read in from disk in + * between the two stages. This is necessary so that we can free space + * in the real-time portion of the file system. + */ +int +xlog_recover_finish(xlog_t *log, int mfsi_flags) +{ + /* + * Now we're ready to do the transactions needed for the + * rest of recovery. Start with completing all the extent + * free intent records and then process the unlinked inode + * lists. At this point, we essentially run in normal mode + * except that we're still performing recovery actions + * rather than accepting new requests. + */ + if (log->l_flags & XLOG_RECOVERY_NEEDED) { + xlog_recover_process_efis(log); + /* + * Sync the log to get all the EFIs out of the AIL. + * This isn't absolutely necessary, but it helps in + * case the unlink transactions would have problems + * pushing the EFIs out of the way. + */ + xfs_log_force(log->l_mp, (xfs_lsn_t)0, + (XFS_LOG_FORCE | XFS_LOG_SYNC)); + + if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) { + + xlog_recover_process_iunlinks(log); + } + + xlog_recover_check_summary(log); + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + cmn_err(CE_NOTE, + "Ending XFS recovery on filesystem: %s (dev: %d/%d)", + log->l_mp->m_fsname, MAJOR(log->l_dev), + MINOR(log->l_dev)); +#else + cmn_err(CE_NOTE, + "!Ending XFS recovery on filesystem: %s (dev: %d/%d)", + log->l_mp->m_fsname, MAJOR(log->l_dev), + MINOR(log->l_dev)); +#endif + log->l_flags &= ~XLOG_RECOVERY_NEEDED; + } else { + cmn_err(CE_DEBUG, + "!Ending clean XFS mount for filesystem: %s", + log->l_mp->m_fsname); + } + return 0; +} /* xlog_recover_finish */ + + +#if defined(DEBUG) +/* + * Read all of the agf and agi counters and check that they + * are consistent with the superblock counters. + */ +void +xlog_recover_check_summary(xlog_t *log) +{ + xfs_mount_t *mp; + xfs_agf_t *agfp; + xfs_agi_t *agip; + xfs_buf_t *agfbp; + xfs_buf_t *agibp; + xfs_daddr_t agfdaddr; + xfs_daddr_t agidaddr; + xfs_buf_t *sbbp; +#ifdef XFS_LOUD_RECOVERY + xfs_sb_t *sbp; +#endif + xfs_agnumber_t agno; + __uint64_t freeblks; + __uint64_t itotal; + __uint64_t ifree; + + mp = log->l_mp; + + freeblks = 0LL; + itotal = 0LL; + ifree = 0LL; + for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { + agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)); + agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr, + XFS_FSS_TO_BB(mp, 1), 0); + if (XFS_BUF_ISERROR(agfbp)) { + xfs_ioerror_alert("xlog_recover_check_summary(agf)", + mp, agfbp, agfdaddr); + } + agfp = XFS_BUF_TO_AGF(agfbp); + ASSERT(XFS_AGF_MAGIC == + INT_GET(agfp->agf_magicnum, ARCH_CONVERT)); + ASSERT(XFS_AGF_GOOD_VERSION( + INT_GET(agfp->agf_versionnum, ARCH_CONVERT))); + ASSERT(INT_GET(agfp->agf_seqno, ARCH_CONVERT) == agno); + + freeblks += INT_GET(agfp->agf_freeblks, ARCH_CONVERT) + + INT_GET(agfp->agf_flcount, ARCH_CONVERT); + xfs_buf_relse(agfbp); + + agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); + agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr, + XFS_FSS_TO_BB(mp, 1), 0); + if (XFS_BUF_ISERROR(agibp)) { + xfs_ioerror_alert("xlog_recover_check_summary(agi)", + log->l_mp, agibp, agidaddr); + } + agip = XFS_BUF_TO_AGI(agibp); + ASSERT(XFS_AGI_MAGIC == + INT_GET(agip->agi_magicnum, ARCH_CONVERT)); + ASSERT(XFS_AGI_GOOD_VERSION( + INT_GET(agip->agi_versionnum, ARCH_CONVERT))); + ASSERT(INT_GET(agip->agi_seqno, ARCH_CONVERT) == agno); + + itotal += INT_GET(agip->agi_count, ARCH_CONVERT); + ifree += INT_GET(agip->agi_freecount, ARCH_CONVERT); + xfs_buf_relse(agibp); + } + + sbbp = xfs_getsb(mp, 0); +#ifdef XFS_LOUD_RECOVERY + sbp = XFS_BUF_TO_SBP(sbbp); + cmn_err(CE_NOTE, + "xlog_recover_check_summary: sb_icount %Lu itotal %Lu", + sbp->sb_icount, itotal); + cmn_err(CE_NOTE, + "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu", + sbp->sb_ifree, ifree); + cmn_err(CE_NOTE, + "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu", + sbp->sb_fdblocks, freeblks); +#if 0 + /* + * This is turned off until I account for the allocation + * btree blocks which live in free space. + */ + ASSERT(sbp->sb_icount == itotal); + ASSERT(sbp->sb_ifree == ifree); + ASSERT(sbp->sb_fdblocks == freeblks); +#endif +#endif + xfs_buf_relse(sbbp); +} +#endif /* DEBUG */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_log_recover.h linux.21rc5-ac2/fs/xfs/xfs_log_recover.h --- linux.21rc5/fs/xfs/xfs_log_recover.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_log_recover.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_LOG_RECOVER_H__ +#define __XFS_LOG_RECOVER_H__ + +/* + * Macros, structures, prototypes for internal log manager use. + */ + +#define XLOG_RHASH_BITS 4 +#define XLOG_RHASH_SIZE 16 +#define XLOG_RHASH_SHIFT 2 +#define XLOG_RHASH(tid) \ + ((((__uint32_t)tid)>>XLOG_RHASH_SHIFT) & (XLOG_RHASH_SIZE-1)) + +#define XLOG_MAX_REGIONS_IN_ITEM (XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK / 2 + 1) + + +/* + * item headers are in ri_buf[0]. Additional buffers follow. + */ +typedef struct xlog_recover_item { + struct xlog_recover_item *ri_next; + struct xlog_recover_item *ri_prev; + int ri_type; + int ri_cnt; /* count of regions found */ + int ri_total; /* total regions */ + xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */ +} xlog_recover_item_t; + +struct xlog_tid; +typedef struct xlog_recover { + struct xlog_recover *r_next; + xlog_tid_t r_log_tid; /* log's transaction id */ + xfs_trans_header_t r_theader; /* trans header for partial */ + int r_state; /* not needed */ + xfs_lsn_t r_lsn; /* xact lsn */ + xlog_recover_item_t *r_itemq; /* q for items */ +} xlog_recover_t; + +#define ITEM_TYPE(i) (*(ushort *)(i)->ri_buf[0].i_addr) + +/* + * This is the number of entries in the l_buf_cancel_table used during + * recovery. + */ +#define XLOG_BC_TABLE_SIZE 64 + +#define XLOG_RECOVER_PASS1 1 +#define XLOG_RECOVER_PASS2 2 + +#endif /* __XFS_LOG_RECOVER_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_mac.c linux.21rc5-ac2/fs/xfs/xfs_mac.c --- linux.21rc5/fs/xfs/xfs_mac.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_mac.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +static xfs_mac_label_t *mac_low_high_lp; +static xfs_mac_label_t *mac_high_low_lp; +static xfs_mac_label_t *mac_admin_high_lp; +static xfs_mac_label_t *mac_equal_equal_lp; + +/* + * Test for the existence of a MAC label as efficiently as possible. + */ +int +xfs_mac_vhaslabel( + vnode_t *vp) +{ + int error; + int len = sizeof(xfs_mac_label_t); + int flags = ATTR_KERNOVAL|ATTR_ROOT; + + VOP_ATTR_GET(vp, SGI_MAC_FILE, NULL, &len, flags, sys_cred, error); + return (error == 0); +} + +int +xfs_mac_iaccess(xfs_inode_t *ip, mode_t mode, struct cred *cr) +{ + xfs_mac_label_t mac; + xfs_mac_label_t *mp = mac_high_low_lp; + + if (cr == NULL || sys_cred == NULL ) { + return EACCES; + } + + if (xfs_attr_fetch(ip, SGI_MAC_FILE, (char *)&mac, sizeof(mac)) == 0) { + if ((mp = mac_add_label(&mac)) == NULL) { + return mac_access(mac_high_low_lp, cr, mode); + } + } + + return mac_access(mp, cr, mode); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_mac.h linux.21rc5-ac2/fs/xfs/xfs_mac.h --- linux.21rc5/fs/xfs/xfs_mac.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_mac.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_MAC_H__ +#define __XFS_MAC_H__ + +/* + * Mandatory Access Control + * + * Layout of a composite MAC label: + * ml_list contains the list of categories (MSEN) followed by the list of + * divisions (MINT). This is actually a header for the data structure which + * will have an ml_list with more than one element. + * + * ------------------------------- + * | ml_msen_type | ml_mint_type | + * ------------------------------- + * | ml_level | ml_grade | + * ------------------------------- + * | ml_catcount | + * ------------------------------- + * | ml_divcount | + * ------------------------------- + * | category 1 | + * | . . . | + * | category N | (where N = ml_catcount) + * ------------------------------- + * | division 1 | + * | . . . | + * | division M | (where M = ml_divcount) + * ------------------------------- + */ +#define XFS_MAC_MAX_SETS 250 +typedef struct xfs_mac_label { + __uint8_t ml_msen_type; /* MSEN label type */ + __uint8_t ml_mint_type; /* MINT label type */ + __uint8_t ml_level; /* Hierarchical level */ + __uint8_t ml_grade; /* Hierarchical grade */ + __uint16_t ml_catcount; /* Category count */ + __uint16_t ml_divcount; /* Division count */ + /* Category set, then Division set */ + __uint16_t ml_list[XFS_MAC_MAX_SETS]; +} xfs_mac_label_t; + +/* MSEN label type names. Choose an upper case ASCII character. */ +#define XFS_MSEN_ADMIN_LABEL 'A' /* Admin: lowm_ail_lock, "xfs_ail"); + spinlock_init(&mp->m_sb_lock, "xfs_sb"); + mutex_init(&mp->m_ilock, MUTEX_DEFAULT, "xfs_ilock"); + initnsema(&mp->m_growlock, 1, "xfs_grow"); + /* + * Initialize the AIL. + */ + xfs_trans_ail_init(mp); + + /* Init freeze sync structures */ + spinlock_init(&mp->m_freeze_lock, "xfs_freeze"); + init_sv(&mp->m_wait_unfreeze, SV_DEFAULT, "xfs_freeze", 0); + atomic_set(&mp->m_active_trans, 0); + + return mp; +} + +/* + * Free up the resources associated with a mount structure. Assume that + * the structure was initially zeroed, so we can tell which fields got + * initialized. + */ +void +xfs_mount_free( + xfs_mount_t *mp, + int remove_bhv) +{ + if (mp->m_ihash) + xfs_ihash_free(mp); + if (mp->m_chash) + xfs_chash_free(mp); + + if (mp->m_perag) { + int agno; + + for (agno = 0; agno < mp->m_maxagi; agno++) + if (mp->m_perag[agno].pagb_list) + kmem_free(mp->m_perag[agno].pagb_list, + sizeof(xfs_perag_busy_t) * + XFS_PAGB_NUM_SLOTS); + kmem_free(mp->m_perag, + sizeof(xfs_perag_t) * mp->m_sb.sb_agcount); + } + + AIL_LOCK_DESTROY(&mp->m_ail_lock); + spinlock_destroy(&mp->m_sb_lock); + mutex_destroy(&mp->m_ilock); + freesema(&mp->m_growlock); + if (mp->m_quotainfo) + XFS_QM_DONE(mp); + + if (mp->m_fsname != NULL) + kmem_free(mp->m_fsname, mp->m_fsname_len); + + if (remove_bhv) { + struct vfs *vfsp = XFS_MTOVFS(mp); + + bhv_remove_all_vfsops(vfsp, 0); + VFS_REMOVEBHV(vfsp, &mp->m_bhv); + } + + spinlock_destroy(&mp->m_freeze_lock); + sv_destroy(&mp->m_wait_unfreeze); + kmem_free(mp, sizeof(xfs_mount_t)); +} + + +/* + * Check the validity of the SB found. + */ +STATIC int +xfs_mount_validate_sb( + xfs_mount_t *mp, + xfs_sb_t *sbp) +{ + /* + * If the log device and data device have the + * same device number, the log is internal. + * Consequently, the sb_logstart should be non-zero. If + * we have a zero sb_logstart in this case, we may be trying to mount + * a volume filesystem in a non-volume manner. + */ + if (sbp->sb_magicnum != XFS_SB_MAGIC) { + cmn_err(CE_WARN, "XFS: bad magic number"); + return XFS_ERROR(EWRONGFS); + } + + if (!XFS_SB_GOOD_VERSION(sbp)) { + cmn_err(CE_WARN, "XFS: bad version"); + return XFS_ERROR(EWRONGFS); + } + + if (unlikely(sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) { + cmn_err(CE_WARN, "XFS: filesystem is marked as having an external log; specify logdev on the\nmount command line."); + XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(1)", + XFS_ERRLEVEL_HIGH, mp, sbp); + return XFS_ERROR(EFSCORRUPTED); + } + + if (unlikely(sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) { + cmn_err(CE_WARN, "XFS: filesystem is marked as having an internal log; don't specify logdev on\nthe mount command line."); + XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(2)", + XFS_ERRLEVEL_HIGH, mp, sbp); + return XFS_ERROR(EFSCORRUPTED); + } + + /* + * More sanity checking. These were stolen directly from + * xfs_repair. + */ + if (unlikely( + sbp->sb_agcount <= 0 || + sbp->sb_sectsize < XFS_MIN_SECTORSIZE || + sbp->sb_sectsize > XFS_MAX_SECTORSIZE || + sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG || + sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG || + sbp->sb_blocksize < XFS_MIN_BLOCKSIZE || + sbp->sb_blocksize > XFS_MAX_BLOCKSIZE || + sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || + sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || + sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || + (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || + (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || + sbp->sb_imax_pct > 100)) { + cmn_err(CE_WARN, "XFS: SB sanity check 1 failed"); + XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)", + XFS_ERRLEVEL_LOW, mp, sbp); + return XFS_ERROR(EFSCORRUPTED); + } + + /* + * Sanity check AG count, size fields against data size field + */ + if (unlikely( + sbp->sb_dblocks == 0 || + sbp->sb_dblocks > + (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks || + sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) * + sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) { + cmn_err(CE_WARN, "XFS: SB sanity check 2 failed"); + XFS_ERROR_REPORT("xfs_mount_validate_sb(4)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + +#if !XFS_BIG_FILESYSTEMS + if (sbp->sb_dblocks > INT_MAX || sbp->sb_rblocks > INT_MAX) { + cmn_err(CE_WARN, +"XFS: File systems greater than 1TB not supported on this system."); + return XFS_ERROR(E2BIG); + } +#endif + + if (unlikely(sbp->sb_inprogress)) { + cmn_err(CE_WARN, "XFS: file system busy"); + XFS_ERROR_REPORT("xfs_mount_validate_sb(5)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + /* + * Until this is fixed only page-sized or smaller data blocks work. + */ + if (sbp->sb_blocksize > PAGE_SIZE) { + cmn_err(CE_WARN, + "XFS: Attempted to mount file system with blocksize %d bytes", + sbp->sb_blocksize); + cmn_err(CE_WARN, + "XFS: Only page-sized (%d) or less blocksizes currently work.", + PAGE_SIZE); + return XFS_ERROR(ENOSYS); + } + + return 0; +} + +void +xfs_initialize_perag(xfs_mount_t *mp, int agcount) +{ + int index, max_metadata; + xfs_perag_t *pag; + xfs_agino_t agino; + xfs_ino_t ino; + xfs_sb_t *sbp = &mp->m_sb; + xfs_ino_t max_inum = XFS_MAXINUMBER_32; + + /* Check to see if the filesystem can overflow 32 bit inodes */ + agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); + ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); + + /* Clear the mount flag if no inode can overflow 32 bits + * on this filesystem. + */ + if (ino <= max_inum) { + mp->m_flags &= ~XFS_MOUNT_32BITINODES; + } + + /* If we can overflow then setup the ag headers accordingly */ + if (mp->m_flags & XFS_MOUNT_32BITINODES) { + /* Calculate how much should be reserved for inodes to + * meet the max inode percentage. + */ + if (mp->m_maxicount) { + __uint64_t icount; + + icount = sbp->sb_dblocks * sbp->sb_imax_pct; + do_div(icount, 100); + icount += sbp->sb_agblocks - 1; + do_div(icount, mp->m_ialloc_blks); + max_metadata = icount; + } else { + max_metadata = agcount; + } + for (index = 0; index < agcount; index++) { + ino = XFS_AGINO_TO_INO(mp, index, agino); + if (ino > max_inum) { + index++; + break; + } + + /* This ag is prefered for inodes */ + pag = &mp->m_perag[index]; + pag->pagi_inodeok = 1; + if (index < max_metadata) + pag->pagf_metadata = 1; + } + } else { + /* Setup default behavior for smaller filesystems */ + for (index = 0; index < agcount; index++) { + pag = &mp->m_perag[index]; + pag->pagi_inodeok = 1; + } + } + mp->m_maxagi = index; +} + +/* + * xfs_xlatesb + * + * data - on disk version of sb + * sb - a superblock + * dir - conversion direction: <0 - convert sb to buf + * >0 - convert buf to sb + * arch - architecture to read/write from/to buf + * fields - which fields to copy (bitmask) + */ +void +xfs_xlatesb( + void *data, + xfs_sb_t *sb, + int dir, + xfs_arch_t arch, + __int64_t fields) +{ + xfs_caddr_t buf_ptr; + xfs_caddr_t mem_ptr; + xfs_sb_field_t f; + int first; + int size; + + ASSERT(dir); + ASSERT(fields); + + if (!fields) + return; + + buf_ptr = (xfs_caddr_t)data; + mem_ptr = (xfs_caddr_t)sb; + + while (fields) { + f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); + first = xfs_sb_info[f].offset; + size = xfs_sb_info[f + 1].offset - first; + + ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1); + + if (arch == ARCH_NOCONVERT || + size == 1 || + xfs_sb_info[f].type == 1) { + if (dir > 0) { + memcpy(mem_ptr + first, buf_ptr + first, size); + } else { + memcpy(buf_ptr + first, mem_ptr + first, size); + } + } else { + switch (size) { + case 2: + INT_XLATE(*(__uint16_t*)(buf_ptr+first), + *(__uint16_t*)(mem_ptr+first), + dir, arch); + break; + case 4: + INT_XLATE(*(__uint32_t*)(buf_ptr+first), + *(__uint32_t*)(mem_ptr+first), + dir, arch); + break; + case 8: + INT_XLATE(*(__uint64_t*)(buf_ptr+first), + *(__uint64_t*)(mem_ptr+first), dir, arch); + break; + default: + ASSERT(0); + } + } + + fields &= ~(1LL << f); + } +} + +/* + * xfs_readsb + * + * Does the initial read of the superblock. + */ +int +xfs_readsb(xfs_mount_t *mp) +{ + unsigned int sector_size; + unsigned int extra_flags; + xfs_buf_t *bp; + xfs_sb_t *sbp; + int error; + + ASSERT(mp->m_sb_bp == NULL); + ASSERT(mp->m_ddev_targp != NULL); + + /* + * Allocate a (locked) buffer to hold the superblock. + * This will be kept around at all times to optimize + * access to the superblock. + */ + sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); + extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED; + + bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, + BTOBB(sector_size), extra_flags); + ASSERT(bp); + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + + /* + * Initialize the mount structure from the superblock. + * But first do some basic consistency checking. + */ + sbp = XFS_BUF_TO_SBP(bp); + xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, + ARCH_CONVERT, XFS_SB_ALL_BITS); + + error = xfs_mount_validate_sb(mp, &(mp->m_sb)); + if (error) { + cmn_err(CE_WARN, "XFS: SB validate failed"); + XFS_BUF_UNMANAGE(bp); + xfs_buf_relse(bp); + return error; + } + + /* + * We must be able to do sector-sized and sector-aligned IO. + */ + if (sector_size > mp->m_sb.sb_sectsize) { + cmn_err(CE_WARN, + "XFS: device supports only %u byte sectors (not %u)", + sector_size, mp->m_sb.sb_sectsize); + XFS_BUF_UNMANAGE(bp); + xfs_buf_relse(bp); + return XFS_ERROR(ENOSYS); + } + + /* + * If device sector size is smaller than the superblock size, + * re-read the superblock so the buffer is correctly sized. + */ + if (sector_size < mp->m_sb.sb_sectsize) { + XFS_BUF_UNMANAGE(bp); + xfs_buf_relse(bp); + sector_size = mp->m_sb.sb_sectsize; + bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, + BTOBB(sector_size), extra_flags); + ASSERT(bp); + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + } + + mp->m_sb_bp = bp; + xfs_buf_relse(bp); + ASSERT(XFS_BUF_VALUSEMA(bp) > 0); + return 0; +} + + +/* + * xfs_mount_common + * + * Mount initialization code establishing various mount + * fields from the superblock associated with the given + * mount structure + */ +void +xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) +{ + int i; + + mp->m_agfrotor = mp->m_agirotor = 0; + spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock"); + mp->m_maxagi = mp->m_sb.sb_agcount; + mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; + mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; + mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT; + mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; + mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; + mp->m_litino = sbp->sb_inodesize - + ((uint)sizeof(xfs_dinode_core_t) + (uint)sizeof(xfs_agino_t)); + mp->m_blockmask = sbp->sb_blocksize - 1; + mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; + mp->m_blockwmask = mp->m_blockwsize - 1; + INIT_LIST_HEAD(&mp->m_del_inodes); + + + if (XFS_SB_VERSION_HASLOGV2(sbp)) { + if (sbp->sb_logsunit <= 1) { + mp->m_lstripemask = 1; + } else { + mp->m_lstripemask = + 1 << xfs_highbit32(sbp->sb_logsunit >> BBSHIFT); + } + } + + /* + * Setup for attributes, in case they get created. + * This value is for inodes getting attributes for the first time, + * the per-inode value is for old attribute values. + */ + ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048); + switch (sbp->sb_inodesize) { + case 256: + mp->m_attroffset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(2); + break; + case 512: + case 1024: + case 2048: + mp->m_attroffset = XFS_BMDR_SPACE_CALC(12); + break; + default: + ASSERT(0); + } + ASSERT(mp->m_attroffset < XFS_LITINO(mp)); + + for (i = 0; i < 2; i++) { + mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, + xfs_alloc, i == 0); + mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, + xfs_alloc, i == 0); + } + for (i = 0; i < 2; i++) { + mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, + xfs_bmbt, i == 0); + mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, + xfs_bmbt, i == 0); + } + for (i = 0; i < 2; i++) { + mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, + xfs_inobt, i == 0); + mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, + xfs_inobt, i == 0); + } + + mp->m_bsize = XFS_FSB_TO_BB(mp, 1); + mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK, + sbp->sb_inopblock); + mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; +} + +/* + * xfs_mountfs + * + * This function does the following on an initial mount of a file system: + * - reads the superblock from disk and init the mount struct + * - if we're a 32-bit kernel, do a size check on the superblock + * so we don't mount terabyte filesystems + * - init mount struct realtime fields + * - allocate inode hash table for fs + * - init directory manager + * - perform recovery and init the log manager + */ +int +xfs_mountfs( + vfs_t *vfsp, + xfs_mount_t *mp, + dev_t dev, + int mfsi_flags) +{ + xfs_buf_t *bp; + xfs_sb_t *sbp = &(mp->m_sb); + xfs_inode_t *rip; + vnode_t *rvp = 0; + int readio_log, writeio_log; + vmap_t vmap; + xfs_daddr_t d; + __uint64_t ret64; + __int64_t update_flags; + uint quotamount, quotaflags; + int agno, noio; + int uuid_mounted = 0; + int error = 0; + + noio = dev == 0 && mp->m_sb_bp != NULL; + if (mp->m_sb_bp == NULL) { + if ((error = xfs_readsb(mp))) { + return (error); + } + } + xfs_mount_common(mp, sbp); + + /* + * Check if sb_agblocks is aligned at stripe boundary + * If sb_agblocks is NOT aligned turn off m_dalign since + * allocator alignment is within an ag, therefore ag has + * to be aligned at stripe boundary. + */ + update_flags = 0LL; + if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { + /* + * If stripe unit and stripe width are not multiples + * of the fs blocksize turn off alignment. + */ + if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || + (BBTOB(mp->m_swidth) & mp->m_blockmask)) { + if (mp->m_flags & XFS_MOUNT_RETERR) { + cmn_err(CE_WARN, + "XFS: alignment check 1 failed"); + error = XFS_ERROR(EINVAL); + goto error1; + } + } else { + /* + * Convert the stripe unit and width to FSBs. + */ + mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); + if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { + if (mp->m_flags & XFS_MOUNT_RETERR) { + error = XFS_ERROR(EINVAL); + goto error1; + } + mp->m_dalign = 0; + mp->m_swidth = 0; + } else if (mp->m_dalign) { + mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); + } else { + if (mp->m_flags & XFS_MOUNT_RETERR) { + cmn_err(CE_WARN, + "XFS: alignment check 3 failed"); + error = XFS_ERROR(EINVAL); + goto error1; + } + mp->m_swidth = 0; + } + } + + /* + * Update superblock with new values + * and log changes + */ + if (XFS_SB_VERSION_HASDALIGN(sbp)) { + if (sbp->sb_unit != mp->m_dalign) { + sbp->sb_unit = mp->m_dalign; + update_flags |= XFS_SB_UNIT; + } + if (sbp->sb_width != mp->m_swidth) { + sbp->sb_width = mp->m_swidth; + update_flags |= XFS_SB_WIDTH; + } + } + } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && + XFS_SB_VERSION_HASDALIGN(&mp->m_sb)) { + mp->m_dalign = sbp->sb_unit; + mp->m_swidth = sbp->sb_width; + } + + xfs_alloc_compute_maxlevels(mp); + xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); + xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); + xfs_ialloc_compute_maxlevels(mp); + + if (sbp->sb_imax_pct) { + __uint64_t icount; + + /* Make sure the maximum inode count is a multiple of the + * units we allocate inodes in. + */ + + icount = sbp->sb_dblocks * sbp->sb_imax_pct; + do_div(icount, 100); + do_div(icount, mp->m_ialloc_blks); + mp->m_maxicount = (icount * mp->m_ialloc_blks) << + sbp->sb_inopblog; + } else + mp->m_maxicount = 0; + + /* + * XFS uses the uuid from the superblock as the unique + * identifier for fsid. We can not use the uuid from the volume + * since a single partition filesystem is identical to a single + * partition volume/filesystem. + */ + if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && + (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { + if (xfs_uuid_mount(mp)) { + error = XFS_ERROR(EINVAL); + goto error1; + } + uuid_mounted=1; + ret64 = uuid_hash64(&sbp->sb_uuid); + memcpy(&vfsp->vfs_fsid, &ret64, sizeof(ret64)); + } + + /* + * Set the default minimum read and write sizes unless + * already specified in a mount option. + * We use smaller I/O sizes when the file system + * is being used for NFS service (wsync mount option). + */ + if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { + if (mp->m_flags & XFS_MOUNT_WSYNC) { + readio_log = XFS_WSYNC_READIO_LOG; + writeio_log = XFS_WSYNC_WRITEIO_LOG; + } else { + readio_log = XFS_READIO_LOG_LARGE; + writeio_log = XFS_WRITEIO_LOG_LARGE; + } + } else { + readio_log = mp->m_readio_log; + writeio_log = mp->m_writeio_log; + } + + /* + * Set the number of readahead buffers to use based on + * physical memory size. + */ + if (xfs_physmem <= 4096) /* <= 16MB */ + mp->m_nreadaheads = XFS_RW_NREADAHEAD_16MB; + else if (xfs_physmem <= 8192) /* <= 32MB */ + mp->m_nreadaheads = XFS_RW_NREADAHEAD_32MB; + else + mp->m_nreadaheads = XFS_RW_NREADAHEAD_K32; + if (sbp->sb_blocklog > readio_log) { + mp->m_readio_log = sbp->sb_blocklog; + } else { + mp->m_readio_log = readio_log; + } + mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); + if (sbp->sb_blocklog > writeio_log) { + mp->m_writeio_log = sbp->sb_blocklog; + } else { + mp->m_writeio_log = writeio_log; + } + mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); + + /* + * Set the inode cluster size based on the physical memory + * size. This may still be overridden by the file system + * block size if it is larger than the chosen cluster size. + */ + if (xfs_physmem <= btoc(32 * 1024 * 1024)) { /* <= 32 MB */ + mp->m_inode_cluster_size = XFS_INODE_SMALL_CLUSTER_SIZE; + } else { + mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; + } + /* + * Set whether we're using inode alignment. + */ + if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) && + mp->m_sb.sb_inoalignmt >= + XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) + mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; + else + mp->m_inoalign_mask = 0; + /* + * If we are using stripe alignment, check whether + * the stripe unit is a multiple of the inode alignment + */ + if (mp->m_dalign && mp->m_inoalign_mask && + !(mp->m_dalign & mp->m_inoalign_mask)) + mp->m_sinoalign = mp->m_dalign; + else + mp->m_sinoalign = 0; + /* + * Check that the data (and log if separate) are an ok size. + */ + d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); + if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { + cmn_err(CE_WARN, "XFS: size check 1 failed"); + error = XFS_ERROR(E2BIG); + goto error1; + } + if (!noio) { + error = xfs_read_buf(mp, mp->m_ddev_targp, + d - XFS_FSS_TO_BB(mp, 1), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (!error) { + xfs_buf_relse(bp); + } else { + cmn_err(CE_WARN, "XFS: size check 2 failed"); + if (error == ENOSPC) { + error = XFS_ERROR(E2BIG); + } + goto error1; + } + } + + if (!noio && ((mfsi_flags & XFS_MFSI_CLIENT) == 0) && + mp->m_logdev_targp != mp->m_ddev_targp) { + d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); + if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { + cmn_err(CE_WARN, "XFS: size check 3 failed"); + error = XFS_ERROR(E2BIG); + goto error1; + } + error = xfs_read_buf(mp, mp->m_logdev_targp, + d - XFS_FSB_TO_BB(mp, 1), + XFS_FSB_TO_BB(mp, 1), 0, &bp); + if (!error) { + xfs_buf_relse(bp); + } else { + cmn_err(CE_WARN, "XFS: size check 3 failed"); + if (error == ENOSPC) { + error = XFS_ERROR(E2BIG); + } + goto error1; + } + } + + /* + * Initialize realtime fields in the mount structure + */ + if ((error = xfs_rtmount_init(mp))) { + cmn_err(CE_WARN, "XFS: RT mount failed"); + goto error1; + } + + /* + * For client case we are done now + */ + if (mfsi_flags & XFS_MFSI_CLIENT) { + return(0); + } + + /* + * Copies the low order bits of the timestamp and the randomly + * set "sequence" number out of a UUID. + */ + uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); + + /* + * The vfs structure needs to have a file system independent + * way of checking for the invariant file system ID. Since it + * can't look at mount structures it has a pointer to the data + * in the mount structure. + * + * File systems that don't support user level file handles (i.e. + * all of them except for XFS) will leave vfs_altfsid as NULL. + */ + vfsp->vfs_altfsid = (fsid_t *)mp->m_fixedfsid; + mp->m_dmevmask = 0; /* not persistent; set after each mount */ + + /* + * Select the right directory manager. + */ + mp->m_dirops = + XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ? + xfsv2_dirops : + xfsv1_dirops; + + /* + * Initialize directory manager's entries. + */ + XFS_DIR_MOUNT(mp); + + /* + * Initialize the attribute manager's entries. + */ + mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100; + + /* + * Initialize the precomputed transaction reservations values. + */ + xfs_trans_init(mp); + if (noio) { + ASSERT((mfsi_flags & XFS_MFSI_CLIENT) == 0); + return 0; + } + + /* + * Allocate and initialize the inode hash table for this + * file system. + */ + xfs_ihash_init(mp); + xfs_chash_init(mp); + + /* + * Allocate and initialize the per-ag data. + */ + init_rwsem(&mp->m_peraglock); + mp->m_perag = + kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP); + + xfs_initialize_perag(mp, sbp->sb_agcount); + + /* + * log's mount-time initialization. Perform 1st part recovery if needed + */ + if (likely(sbp->sb_logblocks > 0)) { /* check for volume case */ + error = xfs_log_mount(mp, mp->m_logdev_targp->pbr_dev, + XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), + XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); + if (error) { + cmn_err(CE_WARN, "XFS: log mount failed"); + goto error2; + } + } else { /* No log has been defined */ + cmn_err(CE_WARN, "XFS: no log defined"); + XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto error2; + } + + /* + * Get and sanity-check the root inode. + * Save the pointer to it in the mount structure. + */ + error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_ILOCK_EXCL, &rip, 0); + if (error) { + cmn_err(CE_WARN, "XFS: failed to read root inode"); + goto error3; + } + + ASSERT(rip != NULL); + rvp = XFS_ITOV(rip); + VMAP(rvp, vmap); + + if (unlikely((rip->i_d.di_mode & IFMT) != IFDIR)) { + cmn_err(CE_WARN, "XFS: corrupted root inode"); + prdev("Root inode %llu is not a directory", + mp->m_dev, (unsigned long long)rip->i_ino); + xfs_iunlock(rip, XFS_ILOCK_EXCL); + XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, + mp); + error = XFS_ERROR(EFSCORRUPTED); + goto error4; + } + mp->m_rootip = rip; /* save it */ + + xfs_iunlock(rip, XFS_ILOCK_EXCL); + + /* + * Initialize realtime inode pointers in the mount structure + */ + if ((error = xfs_rtmount_inodes(mp))) { + /* + * Free up the root inode. + */ + cmn_err(CE_WARN, "XFS: failed to read RT inodes"); + goto error4; + } + + /* + * If fs is not mounted readonly, then update the superblock + * unit and width changes. + */ + if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY)) + xfs_mount_log_sbunit(mp, update_flags); + + /* + * Initialise the XFS quota management subsystem for this mount + */ + if ((error = XFS_QM_INIT(mp, "amount, "aflags))) + goto error4; + + /* + * Finish recovering the file system. This part needed to be + * delayed until after the root and real-time bitmap inodes + * were consistently read in. + */ + error = xfs_log_mount_finish(mp, mfsi_flags); + if (error) { + cmn_err(CE_WARN, "XFS: log mount finish failed"); + goto error4; + } + + /* + * Complete the quota initialisation, post-log-replay component. + */ + if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags))) + goto error4; + + return 0; + + error4: + /* + * Free up the root inode. + */ + VN_RELE(rvp); + vn_purge(rvp, &vmap); + error3: + xfs_log_unmount_dealloc(mp); + error2: + xfs_ihash_free(mp); + xfs_chash_free(mp); + for (agno = 0; agno < sbp->sb_agcount; agno++) + if (mp->m_perag[agno].pagb_list) + kmem_free(mp->m_perag[agno].pagb_list, + sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS); + kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t)); + mp->m_perag = NULL; + /* FALLTHROUGH */ + error1: + if (uuid_mounted) + xfs_uuid_unmount(mp); + xfs_freesb(mp); + return error; +} + +/* + * xfs_unmountfs + * + * This flushes out the inodes,dquots and the superblock, unmounts the + * log and makes sure that incore structures are freed. + */ +int +xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) +{ + struct vfs *vfsp = XFS_MTOVFS(mp); +#if defined(DEBUG) || defined(INDUCE_IO_ERROR) + int64_t fsid; +#endif + + xfs_iflush_all(mp, XFS_FLUSH_ALL); + + XFS_QM_DQPURGEALL(mp, + XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING); + + /* + * Flush out the log synchronously so that we know for sure + * that nothing is pinned. This is important because bflush() + * will skip pinned buffers. + */ + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); + + xfs_binval(mp->m_ddev_targp); + if (mp->m_rtdev_targp) { + xfs_binval(mp->m_rtdev_targp); + } + + xfs_unmountfs_writesb(mp); + + xfs_log_unmount(mp); /* Done! No more fs ops. */ + + xfs_freesb(mp); + + /* + * All inodes from this mount point should be freed. + */ + ASSERT(mp->m_inodes == NULL); + + /* + * We may have bufs that are in the process of getting written still. + * We must wait for the I/O completion of those. The sync flag here + * does a two pass iteration thru the bufcache. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + xfs_incore_relse(mp->m_ddev_targp, 0, 1); /* synchronous */ + } + + xfs_unmountfs_close(mp, cr); + if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) + xfs_uuid_unmount(mp); + +#if defined(DEBUG) || defined(INDUCE_IO_ERROR) + /* + * clear all error tags on this filesystem + */ + memcpy(&fsid, &vfsp->vfs_fsid, sizeof(int64_t)); + xfs_errortag_clearall_umount(fsid, mp->m_fsname, 0); +#endif + XFS_IODONE(vfsp); + xfs_mount_free(mp, 1); + return 0; +} + +void +xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr) +{ + int have_logdev = (mp->m_logdev_targp != mp->m_ddev_targp); + + if (mp->m_ddev_targp) { + xfs_free_buftarg(mp->m_ddev_targp); + mp->m_ddev_targp = NULL; + } + if (mp->m_rtdev_targp) { + xfs_blkdev_put(mp->m_rtdev_targp->pbr_bdev); + xfs_free_buftarg(mp->m_rtdev_targp); + mp->m_rtdev_targp = NULL; + } + if (mp->m_logdev_targp && have_logdev) { + xfs_blkdev_put(mp->m_logdev_targp->pbr_bdev); + xfs_free_buftarg(mp->m_logdev_targp); + mp->m_logdev_targp = NULL; + } +} + +int +xfs_unmountfs_writesb(xfs_mount_t *mp) +{ + xfs_buf_t *sbp; + xfs_sb_t *sb; + int error = 0; + + /* + * skip superblock write if fs is read-only, or + * if we are doing a forced umount. + */ + sbp = xfs_getsb(mp, 0); + if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || + XFS_FORCED_SHUTDOWN(mp))) { + /* + * mark shared-readonly if desired + */ + sb = XFS_BUF_TO_SBP(sbp); + if (mp->m_mk_sharedro) { + if (!(sb->sb_flags & XFS_SBF_READONLY)) + sb->sb_flags |= XFS_SBF_READONLY; + if (!XFS_SB_VERSION_HASSHARED(sb)) + XFS_SB_VERSION_ADDSHARED(sb); + xfs_fs_cmn_err(CE_NOTE, mp, + "Unmounting, marking shared read-only"); + } + XFS_BUF_UNDONE(sbp); + XFS_BUF_UNREAD(sbp); + XFS_BUF_UNDELAYWRITE(sbp); + XFS_BUF_WRITE(sbp); + XFS_BUF_UNASYNC(sbp); + ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); + xfsbdstrat(mp, sbp); + /* Nevermind errors we might get here. */ + error = xfs_iowait(sbp); + if (error) + xfs_ioerror_alert("xfs_unmountfs_writesb", + mp, sbp, XFS_BUF_ADDR(sbp)); + if (error && mp->m_mk_sharedro) + xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly"); + } + xfs_buf_relse(sbp); + return (error); +} + +/* + * xfs_mod_sb() can be used to copy arbitrary changes to the + * in-core superblock into the superblock buffer to be logged. + * It does not provide the higher level of locking that is + * needed to protect the in-core superblock from concurrent + * access. + */ +void +xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) +{ + xfs_buf_t *bp; + int first; + int last; + xfs_mount_t *mp; + xfs_sb_t *sbp; + xfs_sb_field_t f; + + ASSERT(fields); + if (!fields) + return; + mp = tp->t_mountp; + bp = xfs_trans_getsb(tp, mp, 0); + sbp = XFS_BUF_TO_SBP(bp); + first = sizeof(xfs_sb_t); + last = 0; + + /* translate/copy */ + + xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), -1, ARCH_CONVERT, fields); + + /* find modified range */ + + f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); + ASSERT((1LL << f) & XFS_SB_MOD_BITS); + first = xfs_sb_info[f].offset; + + f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); + ASSERT((1LL << f) & XFS_SB_MOD_BITS); + last = xfs_sb_info[f + 1].offset - 1; + + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply + * a delta to a specified field in the in-core superblock. Simply + * switch on the field indicated and apply the delta to that field. + * Fields are not allowed to dip below zero, so if the delta would + * do this do not apply it and return EINVAL. + * + * The SB_LOCK must be held when this routine is called. + */ +STATIC int +xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, + int delta, int rsvd) +{ + int scounter; /* short counter for 32 bit fields */ + long long lcounter; /* long counter for 64 bit fields */ + long long res_used, rem; + + /* + * With the in-core superblock spin lock held, switch + * on the indicated field. Apply the delta to the + * proper field. If the fields value would dip below + * 0, then do not apply the delta and return EINVAL. + */ + switch (field) { + case XFS_SBS_ICOUNT: + lcounter = (long long)mp->m_sb.sb_icount; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_icount = lcounter; + return (0); + case XFS_SBS_IFREE: + lcounter = (long long)mp->m_sb.sb_ifree; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_ifree = lcounter; + return (0); + case XFS_SBS_FDBLOCKS: + + lcounter = (long long)mp->m_sb.sb_fdblocks; + res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); + + if (delta > 0) { /* Putting blocks back */ + if (res_used > delta) { + mp->m_resblks_avail += delta; + } else { + rem = delta - res_used; + mp->m_resblks_avail = mp->m_resblks; + lcounter += rem; + } + } else { /* Taking blocks away */ + + lcounter += delta; + + /* + * If were out of blocks, use any available reserved blocks if + * were allowed to. + */ + + if (lcounter < 0) { + if (rsvd) { + lcounter = (long long)mp->m_resblks_avail + delta; + if (lcounter < 0) { + return (XFS_ERROR(ENOSPC)); + } + mp->m_resblks_avail = lcounter; + return (0); + } else { /* not reserved */ + return (XFS_ERROR(ENOSPC)); + } + } + } + + mp->m_sb.sb_fdblocks = lcounter; + return (0); + case XFS_SBS_FREXTENTS: + lcounter = (long long)mp->m_sb.sb_frextents; + lcounter += delta; + if (lcounter < 0) { + return (XFS_ERROR(ENOSPC)); + } + mp->m_sb.sb_frextents = lcounter; + return (0); + case XFS_SBS_DBLOCKS: + lcounter = (long long)mp->m_sb.sb_dblocks; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_dblocks = lcounter; + return (0); + case XFS_SBS_AGCOUNT: + scounter = mp->m_sb.sb_agcount; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_agcount = scounter; + return (0); + case XFS_SBS_IMAX_PCT: + scounter = mp->m_sb.sb_imax_pct; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_imax_pct = scounter; + return (0); + case XFS_SBS_REXTSIZE: + scounter = mp->m_sb.sb_rextsize; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rextsize = scounter; + return (0); + case XFS_SBS_RBMBLOCKS: + scounter = mp->m_sb.sb_rbmblocks; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rbmblocks = scounter; + return (0); + case XFS_SBS_RBLOCKS: + lcounter = (long long)mp->m_sb.sb_rblocks; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rblocks = lcounter; + return (0); + case XFS_SBS_REXTENTS: + lcounter = (long long)mp->m_sb.sb_rextents; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rextents = lcounter; + return (0); + case XFS_SBS_REXTSLOG: + scounter = mp->m_sb.sb_rextslog; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rextslog = scounter; + return (0); + default: + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } +} + +/* + * xfs_mod_incore_sb() is used to change a field in the in-core + * superblock structure by the specified delta. This modification + * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked() + * routine to do the work. + */ +int +xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) +{ + unsigned long s; + int status; + + s = XFS_SB_LOCK(mp); + status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); + XFS_SB_UNLOCK(mp, s); + return (status); +} + +/* + * xfs_mod_incore_sb_batch() is used to change more than one field + * in the in-core superblock structure at a time. This modification + * is protected by a lock internal to this module. The fields and + * changes to those fields are specified in the array of xfs_mod_sb + * structures passed in. + * + * Either all of the specified deltas will be applied or none of + * them will. If any modified field dips below 0, then all modifications + * will be backed out and EINVAL will be returned. + */ +int +xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) +{ + unsigned long s; + int status=0; + xfs_mod_sb_t *msbp; + + /* + * Loop through the array of mod structures and apply each + * individually. If any fail, then back out all those + * which have already been applied. Do all of this within + * the scope of the SB_LOCK so that all of the changes will + * be atomic. + */ + s = XFS_SB_LOCK(mp); + msbp = &msb[0]; + for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { + /* + * Apply the delta at index n. If it fails, break + * from the loop so we'll fall into the undo loop + * below. + */ + status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, + msbp->msb_delta, rsvd); + if (status != 0) { + break; + } + } + + /* + * If we didn't complete the loop above, then back out + * any changes made to the superblock. If you add code + * between the loop above and here, make sure that you + * preserve the value of status. Loop back until + * we step below the beginning of the array. Make sure + * we don't touch anything back there. + */ + if (status != 0) { + msbp--; + while (msbp >= msb) { + status = xfs_mod_incore_sb_unlocked(mp, + msbp->msb_field, -(msbp->msb_delta), rsvd); + ASSERT(status == 0); + msbp--; + } + } + XFS_SB_UNLOCK(mp, s); + return (status); +} + +/* + * xfs_getsb() is called to obtain the buffer for the superblock. + * The buffer is returned locked and read in from disk. + * The buffer should be released with a call to xfs_brelse(). + * + * If the flags parameter is BUF_TRYLOCK, then we'll only return + * the superblock buffer if it can be locked without sleeping. + * If it can't then we'll return NULL. + */ +xfs_buf_t * +xfs_getsb( + xfs_mount_t *mp, + int flags) +{ + xfs_buf_t *bp; + + ASSERT(mp->m_sb_bp != NULL); + bp = mp->m_sb_bp; + if (flags & XFS_BUF_TRYLOCK) { + if (!XFS_BUF_CPSEMA(bp)) { + return NULL; + } + } else { + XFS_BUF_PSEMA(bp, PRIBIO); + } + XFS_BUF_HOLD(bp); + ASSERT(XFS_BUF_ISDONE(bp)); + return (bp); +} + +/* + * Used to free the superblock along various error paths. + */ +void +xfs_freesb( + xfs_mount_t *mp) +{ + xfs_buf_t *bp; + + /* + * Use xfs_getsb() so that the buffer will be locked + * when we call xfs_buf_relse(). + */ + bp = xfs_getsb(mp, 0); + XFS_BUF_UNMANAGE(bp); + xfs_buf_relse(bp); + mp->m_sb_bp = NULL; +} + +/* + * See if the UUID is unique among mounted XFS filesystems. + * Mount fails if UUID is nil or a FS with the same UUID is already mounted. + */ +STATIC int +xfs_uuid_mount( + xfs_mount_t *mp) +{ + if (uuid_is_nil(&mp->m_sb.sb_uuid)) { + cmn_err(CE_WARN, + "XFS: Filesystem %s has nil UUID - can't mount", + mp->m_fsname); + return -1; + } + if (!uuid_table_insert(&mp->m_sb.sb_uuid)) { + cmn_err(CE_WARN, + "XFS: Filesystem %s has duplicate UUID - can't mount", + mp->m_fsname); + return -1; + } + return 0; +} + +/* + * Remove filesystem from the UUID table. + */ +STATIC void +xfs_uuid_unmount( + xfs_mount_t *mp) +{ + uuid_table_remove(&mp->m_sb.sb_uuid); +} + +/* + * Used to log changes to the superblock unit and width fields which could + * be altered by the mount options. Only the first superblock is updated. + */ +STATIC void +xfs_mount_log_sbunit( + xfs_mount_t *mp, + __int64_t fields) +{ + xfs_trans_t *tp; + + ASSERT(fields & (XFS_SB_UNIT|XFS_SB_WIDTH|XFS_SB_UUID)); + + tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); + if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, + XFS_DEFAULT_LOG_COUNT)) { + xfs_trans_cancel(tp, 0); + return; + } + xfs_mod_sb(tp, fields); + xfs_trans_commit(tp, 0, NULL); +} + +/* Functions to lock access out of the filesystem for forced + * shutdown or snapshot. + */ + +void +xfs_start_freeze( + xfs_mount_t *mp, + int level) +{ + unsigned long s = mutex_spinlock(&mp->m_freeze_lock); + + mp->m_frozen = level; + mutex_spinunlock(&mp->m_freeze_lock, s); + + if (level == XFS_FREEZE_TRANS) { + while (atomic_read(&mp->m_active_trans) > 0) + delay(100); + } +} + +void +xfs_finish_freeze( + xfs_mount_t *mp) +{ + unsigned long s = mutex_spinlock(&mp->m_freeze_lock); + + if (mp->m_frozen) { + mp->m_frozen = 0; + sv_broadcast(&mp->m_wait_unfreeze); + } + + mutex_spinunlock(&mp->m_freeze_lock, s); +} + +void +xfs_check_frozen( + xfs_mount_t *mp, + bhv_desc_t *bdp, + int level) +{ + unsigned long s; + + if (mp->m_frozen) { + s = mutex_spinlock(&mp->m_freeze_lock); + + if (mp->m_frozen < level) { + mutex_spinunlock(&mp->m_freeze_lock, s); + } else { + sv_wait(&mp->m_wait_unfreeze, 0, &mp->m_freeze_lock, s); + } + } + + if (level == XFS_FREEZE_TRANS) + atomic_inc(&mp->m_active_trans); +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_mount.h linux.21rc5-ac2/fs/xfs/xfs_mount.h --- linux.21rc5/fs/xfs/xfs_mount.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_mount.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,582 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_MOUNT_H__ +#define __XFS_MOUNT_H__ + + +typedef struct xfs_trans_reservations { + uint tr_write; /* extent alloc trans */ + uint tr_itruncate; /* truncate trans */ + uint tr_rename; /* rename trans */ + uint tr_link; /* link trans */ + uint tr_remove; /* unlink trans */ + uint tr_symlink; /* symlink trans */ + uint tr_create; /* create trans */ + uint tr_mkdir; /* mkdir trans */ + uint tr_ifree; /* inode free trans */ + uint tr_ichange; /* inode update trans */ + uint tr_growdata; /* fs data section grow trans */ + uint tr_swrite; /* sync write inode trans */ + uint tr_addafork; /* cvt inode to attributed trans */ + uint tr_writeid; /* write setuid/setgid file */ + uint tr_attrinval; /* attr fork buffer invalidation */ + uint tr_attrset; /* set/create an attribute */ + uint tr_attrrm; /* remove an attribute */ + uint tr_clearagi; /* clear bad agi unlinked ino bucket */ + uint tr_growrtalloc; /* grow realtime allocations */ + uint tr_growrtzero; /* grow realtime zeroing */ + uint tr_growrtfree; /* grow realtime freeing */ +} xfs_trans_reservations_t; + + +#ifndef __KERNEL__ +/* + * Moved here from xfs_ag.h to avoid reordering header files + */ +#define XFS_DADDR_TO_AGNO(mp,d) \ + ((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks)) +#define XFS_DADDR_TO_AGBNO(mp,d) \ + ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks)) +#else +struct cred; +struct vfs; +struct vnode; +struct xfs_mount_args; +struct xfs_ihash; +struct xfs_chash; +struct xfs_inode; +struct xfs_perag; +struct xfs_iocore; +struct xfs_bmbt_irec; +struct xfs_bmap_free; + +#define SPLDECL(s) unsigned long s +#define AIL_LOCK_T lock_t +#define AIL_LOCKINIT(x,y) spinlock_init(x,y) +#define AIL_LOCK_DESTROY(x) spinlock_destroy(x) +#define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock) +#define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s) + + +/* + * Prototypes and functions for the Data Migration subsystem. + */ + +typedef int (*xfs_send_data_t)(int, struct bhv_desc *, + xfs_off_t, size_t, int, vrwlock_t *); +typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint); +typedef int (*xfs_send_destroy_t)(struct bhv_desc *, dm_right_t); +typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct bhv_desc *, + dm_right_t, struct bhv_desc *, dm_right_t, + char *, char *, mode_t, int, int); +typedef void (*xfs_send_unmount_t)(struct vfs *, struct vnode *, + dm_right_t, mode_t, int, int); + +typedef struct xfs_dmops { + xfs_send_data_t xfs_send_data; + xfs_send_mmap_t xfs_send_mmap; + xfs_send_destroy_t xfs_send_destroy; + xfs_send_namesp_t xfs_send_namesp; + xfs_send_unmount_t xfs_send_unmount; +} xfs_dmops_t; + +#define XFS_SEND_DATA(mp, ev,bdp,off,len,fl,lock) \ + (*(mp)->m_dm_ops.xfs_send_data)(ev,bdp,off,len,fl,lock) +#define XFS_SEND_MMAP(mp, vma,fl) \ + (*(mp)->m_dm_ops.xfs_send_mmap)(vma,fl) +#define XFS_SEND_DESTROY(mp, bdp,right) \ + (*(mp)->m_dm_ops.xfs_send_destroy)(bdp,right) +#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ + (*(mp)->m_dm_ops.xfs_send_namesp)(ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) +#define XFS_SEND_UNMOUNT(mp, vfsp,vp,right,mode,rval,fl) \ + (*(mp)->m_dm_ops.xfs_send_unmount)(vfsp,vp,right,mode,rval,fl) + + +/* + * Prototypes and functions for the Quota Management subsystem. + */ + +struct xfs_dquot; +struct xfs_dqtrxops; +struct xfs_quotainfo; + +typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *); +typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint); +typedef int (*xfs_qmunmount_t)(struct xfs_mount *); +typedef void (*xfs_qmdone_t)(struct xfs_mount *); +typedef void (*xfs_dqrele_t)(struct xfs_dquot *); +typedef int (*xfs_dqattach_t)(struct xfs_inode *, uint); +typedef void (*xfs_dqdetach_t)(struct xfs_inode *); +typedef int (*xfs_dqpurgeall_t)(struct xfs_mount *, uint); +typedef int (*xfs_dqvopalloc_t)(struct xfs_mount *, + struct xfs_inode *, uid_t, gid_t, uint, + struct xfs_dquot **, struct xfs_dquot **); +typedef void (*xfs_dqvopcreate_t)(struct xfs_trans *, struct xfs_inode *, + struct xfs_dquot *, struct xfs_dquot *); +typedef int (*xfs_dqvoprename_t)(struct xfs_inode **); +typedef struct xfs_dquot * (*xfs_dqvopchown_t)( + struct xfs_trans *, struct xfs_inode *, + struct xfs_dquot **, struct xfs_dquot *); +typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *, + struct xfs_dquot *, struct xfs_dquot *, uint); + +typedef struct xfs_qmops { + xfs_qminit_t xfs_qminit; + xfs_qmdone_t xfs_qmdone; + xfs_qmmount_t xfs_qmmount; + xfs_qmunmount_t xfs_qmunmount; + xfs_dqrele_t xfs_dqrele; + xfs_dqattach_t xfs_dqattach; + xfs_dqdetach_t xfs_dqdetach; + xfs_dqpurgeall_t xfs_dqpurgeall; + xfs_dqvopalloc_t xfs_dqvopalloc; + xfs_dqvopcreate_t xfs_dqvopcreate; + xfs_dqvoprename_t xfs_dqvoprename; + xfs_dqvopchown_t xfs_dqvopchown; + xfs_dqvopchownresv_t xfs_dqvopchownresv; + struct xfs_dqtrxops *xfs_dqtrxops; +} xfs_qmops_t; + +#define XFS_QM_INIT(mp, mnt, fl) \ + (*(mp)->m_qm_ops.xfs_qminit)(mp, mnt, fl) +#define XFS_QM_MOUNT(mp, mnt, fl) \ + (*(mp)->m_qm_ops.xfs_qmmount)(mp, mnt, fl) +#define XFS_QM_UNMOUNT(mp) \ + (*(mp)->m_qm_ops.xfs_qmunmount)(mp) +#define XFS_QM_DONE(mp) \ + (*(mp)->m_qm_ops.xfs_qmdone)(mp) +#define XFS_QM_DQRELE(mp, dq) \ + (*(mp)->m_qm_ops.xfs_dqrele)(dq) +#define XFS_QM_DQATTACH(mp, ip, fl) \ + (*(mp)->m_qm_ops.xfs_dqattach)(ip, fl) +#define XFS_QM_DQDETACH(mp, ip) \ + (*(mp)->m_qm_ops.xfs_dqdetach)(ip) +#define XFS_QM_DQPURGEALL(mp, fl) \ + (*(mp)->m_qm_ops.xfs_dqpurgeall)(mp, fl) +#define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, fl, dq1, dq2) \ + (*(mp)->m_qm_ops.xfs_dqvopalloc)(mp, ip, uid, gid, fl, dq1, dq2) +#define XFS_QM_DQVOPCREATE(mp, tp, ip, dq1, dq2) \ + (*(mp)->m_qm_ops.xfs_dqvopcreate)(tp, ip, dq1, dq2) +#define XFS_QM_DQVOPRENAME(mp, ip) \ + (*(mp)->m_qm_ops.xfs_dqvoprename)(ip) +#define XFS_QM_DQVOPCHOWN(mp, tp, ip, dqp, dq) \ + (*(mp)->m_qm_ops.xfs_dqvopchown)(tp, ip, dqp, dq) +#define XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, dq1, dq2, fl) \ + (*(mp)->m_qm_ops.xfs_dqvopchownresv)(tp, ip, dq1, dq2, fl) + + +/* + * Prototypes and functions for I/O core modularization. + */ + +typedef int (*xfs_ioinit_t)(struct vfs *, + struct xfs_mount_args *, int); +typedef int (*xfs_bmapi_t)(struct xfs_trans *, void *, + xfs_fileoff_t, xfs_filblks_t, int, + xfs_fsblock_t *, xfs_extlen_t, + struct xfs_bmbt_irec *, int *, + struct xfs_bmap_free *); +typedef int (*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *); +typedef int (*xfs_iomap_write_direct_t)( + void *, loff_t, size_t, int, + struct xfs_bmbt_irec *, int *, int); +typedef int (*xfs_iomap_write_delay_t)( + void *, loff_t, size_t, int, + struct xfs_bmbt_irec *, int *); +typedef int (*xfs_iomap_write_allocate_t)( + void *, struct xfs_bmbt_irec *, int *); +typedef int (*xfs_iomap_write_unwritten_t)( + void *, loff_t, size_t); +typedef uint (*xfs_lck_map_shared_t)(void *); +typedef void (*xfs_lock_t)(void *, uint); +typedef void (*xfs_lock_demote_t)(void *, uint); +typedef int (*xfs_lock_nowait_t)(void *, uint); +typedef void (*xfs_unlk_t)(void *, unsigned int); +typedef xfs_fsize_t (*xfs_size_t)(void *); +typedef xfs_fsize_t (*xfs_iodone_t)(struct vfs *); + +typedef struct xfs_ioops { + xfs_ioinit_t xfs_ioinit; + xfs_bmapi_t xfs_bmapi_func; + xfs_bmap_eof_t xfs_bmap_eof_func; + xfs_iomap_write_direct_t xfs_iomap_write_direct; + xfs_iomap_write_delay_t xfs_iomap_write_delay; + xfs_iomap_write_allocate_t xfs_iomap_write_allocate; + xfs_iomap_write_unwritten_t xfs_iomap_write_unwritten; + xfs_lock_t xfs_ilock; + xfs_lck_map_shared_t xfs_lck_map_shared; + xfs_lock_demote_t xfs_ilock_demote; + xfs_lock_nowait_t xfs_ilock_nowait; + xfs_unlk_t xfs_unlock; + xfs_size_t xfs_size_func; + xfs_iodone_t xfs_iodone; +} xfs_ioops_t; + +#define XFS_IOINIT(vfsp, args, flags) \ + (*(mp)->m_io_ops.xfs_ioinit)(vfsp, args, flags) +#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist) \ + (*(mp)->m_io_ops.xfs_bmapi_func) \ + (trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist) +#define XFS_BMAP_EOF(mp, io, endoff, whichfork, eof) \ + (*(mp)->m_io_ops.xfs_bmap_eof_func) \ + ((io)->io_obj, endoff, whichfork, eof) +#define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\ + (*(mp)->m_io_ops.xfs_iomap_write_direct) \ + ((io)->io_obj, offset, count, flags, mval, nmap, found) +#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \ + (*(mp)->m_io_ops.xfs_iomap_write_delay) \ + ((io)->io_obj, offset, count, flags, mval, nmap) +#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \ + (*(mp)->m_io_ops.xfs_iomap_write_allocate) \ + ((io)->io_obj, mval, nmap) +#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \ + (*(mp)->m_io_ops.xfs_iomap_write_unwritten) \ + ((io)->io_obj, offset, count) +#define XFS_LCK_MAP_SHARED(mp, io) \ + (*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj) +#define XFS_ILOCK(mp, io, mode) \ + (*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode) +#define XFS_ILOCK_NOWAIT(mp, io, mode) \ + (*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode) +#define XFS_IUNLOCK(mp, io, mode) \ + (*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode) +#define XFS_ILOCK_DEMOTE(mp, io, mode) \ + (*(mp)->m_io_ops.xfs_ilock_demote)((io)->io_obj, mode) +#define XFS_SIZE(mp, io) \ + (*(mp)->m_io_ops.xfs_size_func)((io)->io_obj) +#define XFS_IODONE(vfsp) \ + (*(mp)->m_io_ops.xfs_iodone)(vfsp) + + +typedef struct xfs_mount { + bhv_desc_t m_bhv; /* vfs xfs behavior */ + xfs_tid_t m_tid; /* next unused tid for fs */ + AIL_LOCK_T m_ail_lock; /* fs AIL mutex */ + xfs_ail_entry_t m_ail; /* fs active log item list */ + uint m_ail_gen; /* fs AIL generation count */ + xfs_sb_t m_sb; /* copy of fs superblock */ + lock_t m_sb_lock; /* sb counter mutex */ + struct xfs_buf *m_sb_bp; /* buffer for superblock */ + char *m_fsname; /* filesystem name */ + int m_fsname_len; /* strlen of fs name */ + int m_bsize; /* fs logical block size */ + xfs_agnumber_t m_agfrotor; /* last ag where space found */ + xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ + lock_t m_agirotor_lock;/* .. and lock protecting it */ + xfs_agnumber_t m_maxagi; /* highest inode alloc group */ + int m_ihsize; /* size of next field */ + struct xfs_ihash *m_ihash; /* fs private inode hash table*/ + struct xfs_inode *m_inodes; /* active inode list */ + struct list_head m_del_inodes; /* inodes to reclaim */ + mutex_t m_ilock; /* inode list mutex */ + uint m_ireclaims; /* count of calls to reclaim*/ + uint m_readio_log; /* min read size log bytes */ + uint m_readio_blocks; /* min read size blocks */ + uint m_writeio_log; /* min write size log bytes */ + uint m_writeio_blocks; /* min write size blocks */ + void *m_log; /* log specific stuff */ + int m_logbufs; /* number of log buffers */ + int m_logbsize; /* size of each log buffer */ + uint m_rsumlevels; /* rt summary levels */ + uint m_rsumsize; /* size of rt summary, bytes */ + struct xfs_inode *m_rbmip; /* pointer to bitmap inode */ + struct xfs_inode *m_rsumip; /* pointer to summary inode */ + struct xfs_inode *m_rootip; /* pointer to root directory */ + struct xfs_quotainfo *m_quotainfo; /* disk quota information */ + xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ + xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ + xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ +#define m_dev m_ddev_targp->pbr_dev + __uint8_t m_dircook_elog; /* log d-cookie entry bits */ + __uint8_t m_blkbit_log; /* blocklog + NBBY */ + __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ + __uint8_t m_agno_log; /* log #ag's */ + __uint8_t m_agino_log; /* #bits for agino in inum */ + __uint8_t m_nreadaheads; /* #readahead buffers */ + __uint16_t m_inode_cluster_size;/* min inode buf size */ + uint m_blockmask; /* sb_blocksize-1 */ + uint m_blockwsize; /* sb_blocksize in words */ + uint m_blockwmask; /* blockwsize-1 */ + uint m_alloc_mxr[2]; /* XFS_ALLOC_BLOCK_MAXRECS */ + uint m_alloc_mnr[2]; /* XFS_ALLOC_BLOCK_MINRECS */ + uint m_bmap_dmxr[2]; /* XFS_BMAP_BLOCK_DMAXRECS */ + uint m_bmap_dmnr[2]; /* XFS_BMAP_BLOCK_DMINRECS */ + uint m_inobt_mxr[2]; /* XFS_INOBT_BLOCK_MAXRECS */ + uint m_inobt_mnr[2]; /* XFS_INOBT_BLOCK_MINRECS */ + uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */ + uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ + uint m_in_maxlevels; /* XFS_IN_MAXLEVELS */ + struct xfs_perag *m_perag; /* per-ag accounting info */ + struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */ + sema_t m_growlock; /* growfs mutex */ + int m_fixedfsid[2]; /* unchanged for life of FS */ + uint m_dmevmask; /* DMI events for this FS */ + uint m_flags; /* global mount flags */ + uint m_attroffset; /* inode attribute offset */ + uint m_dir_node_ents; /* #entries in a dir danode */ + uint m_attr_node_ents; /* #entries in attr danode */ + int m_ialloc_inos; /* inodes in inode allocation */ + int m_ialloc_blks; /* blocks in inode allocation */ + int m_litino; /* size of inode union area */ + int m_inoalign_mask;/* mask sb_inoalignmt if used */ + uint m_qflags; /* quota status flags */ + xfs_trans_reservations_t m_reservations;/* precomputed res values */ + __uint64_t m_maxicount; /* maximum inode count */ + __uint64_t m_resblks; /* total reserved blocks */ + __uint64_t m_resblks_avail;/* available reserved blocks */ +#if XFS_BIG_FILESYSTEMS + xfs_ino_t m_inoadd; /* add value for ino64_offset */ +#endif + int m_dalign; /* stripe unit */ + int m_swidth; /* stripe width */ + int m_lstripemask; /* log stripe mask */ + int m_sinoalign; /* stripe unit inode alignmnt */ + int m_attr_magicpct;/* 37% of the blocksize */ + int m_dir_magicpct; /* 37% of the dir blocksize */ + __uint8_t m_mk_sharedro; /* mark shared ro on unmount */ + __uint8_t m_inode_quiesce;/* call quiesce on new inodes. + field governed by m_ilock */ + __uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ + __uint8_t m_dirversion; /* 1 or 2 */ + xfs_dirops_t m_dirops; /* table of dir funcs */ + int m_dirblksize; /* directory block sz--bytes */ + int m_dirblkfsbs; /* directory block sz--fsbs */ + xfs_dablk_t m_dirdatablk; /* blockno of dir data v2 */ + xfs_dablk_t m_dirleafblk; /* blockno of dir non-data v2 */ + xfs_dablk_t m_dirfreeblk; /* blockno of dirfreeindex v2 */ + int m_chsize; /* size of next field */ + struct xfs_chash *m_chash; /* fs private inode per-cluster + * hash table */ + struct xfs_dmops m_dm_ops; /* vector of DMI ops */ + struct xfs_qmops m_qm_ops; /* vector of XQM ops */ + struct xfs_ioops m_io_ops; /* vector of I/O ops */ + lock_t m_freeze_lock; /* Lock for m_frozen */ + uint m_frozen; /* FS frozen for shutdown or + * snapshot */ + sv_t m_wait_unfreeze;/* waiting to unfreeze */ + atomic_t m_active_trans; /* number trans frozen */ +} xfs_mount_t; + +/* + * Flags for m_flags. + */ +#define XFS_MOUNT_WSYNC 0x00000001 /* for nfs - all metadata ops + must be synchronous except + for space allocations */ +#if XFS_BIG_FILESYSTEMS +#define XFS_MOUNT_INO64 0x00000002 +#endif + /* 0x00000004 -- currently unused */ + /* 0x00000008 -- currently unused */ +#define XFS_MOUNT_FS_SHUTDOWN 0x00000010 /* atomic stop of all filesystem + operations, typically for + disk errors in metadata */ +#define XFS_MOUNT_NOATIME 0x00000020 /* don't modify inode access + times on reads */ +#define XFS_MOUNT_RETERR 0x00000040 /* return alignment errors to + user */ +#define XFS_MOUNT_NOALIGN 0x00000080 /* turn off stripe alignment + allocations */ + /* 0x00000100 -- currently unused */ + /* 0x00000200 -- currently unused */ +#define XFS_MOUNT_NORECOVERY 0x00000400 /* no recovery - dirty fs */ +#define XFS_MOUNT_SHARED 0x00000800 /* shared mount */ +#define XFS_MOUNT_DFLT_IOSIZE 0x00001000 /* set default i/o size */ +#define XFS_MOUNT_OSYNCISOSYNC 0x00002000 /* o_sync is REALLY o_sync */ + /* osyncisdsync is now default*/ +#define XFS_MOUNT_NOUUID 0x00004000 /* ignore uuid during mount */ +#define XFS_MOUNT_32BITINODES 0x00008000 /* do not create inodes above + * 32 bits in size */ +#define XFS_MOUNT_NOLOGFLUSH 0x00010000 + +#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN) + +/* + * Default minimum read and write sizes. + */ +#define XFS_READIO_LOG_LARGE 16 +#define XFS_WRITEIO_LOG_LARGE 16 +/* + * Default allocation size + */ +#define XFS_WRITE_IO_LOG 16 + +/* + * Max and min values for UIO and mount-option defined I/O sizes; + * min value can't be less than a page. Currently unused. + */ +#define XFS_MAX_IO_LOG 16 /* 64K */ +#define XFS_MIN_IO_LOG PAGE_SHIFT + +/* + * Synchronous read and write sizes. This should be + * better for NFSv2 wsync filesystems. + */ +#define XFS_WSYNC_READIO_LOG 15 /* 32K */ +#define XFS_WSYNC_WRITEIO_LOG 14 /* 16K */ + +#define xfs_force_shutdown(m,f) \ + VFS_FORCE_SHUTDOWN((XFS_MTOVFS(m)), f, __FILE__, __LINE__) + +/* + * Flags sent to xfs_force_shutdown. + */ +#define XFS_METADATA_IO_ERROR 0x1 +#define XFS_LOG_IO_ERROR 0x2 +#define XFS_FORCE_UMOUNT 0x4 +#define XFS_CORRUPT_INCORE 0x8 /* Corrupt in-memory data structures */ +#define XFS_SHUTDOWN_REMOTE_REQ 0x10 /* Shutdown came from remote cell */ + +/* + * xflags for xfs_syncsub + */ +#define XFS_XSYNC_RELOC 0x01 + +/* + * Flags for xfs_mountfs + */ +#define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */ +#define XFS_MFSI_CLIENT 0x02 /* Is a client -- skip lots of stuff */ +#define XFS_MFSI_NOUNLINK 0x08 /* Skip unlinked inode processing in */ + /* log recovery */ + +/* + * Macros for getting from mount to vfs and back. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MTOVFS) +struct vfs *xfs_mtovfs(xfs_mount_t *mp); +#define XFS_MTOVFS(mp) xfs_mtovfs(mp) +#else +#define XFS_MTOVFS(mp) (bhvtovfs(&(mp)->m_bhv)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BHVTOM) +xfs_mount_t *xfs_bhvtom(bhv_desc_t *bdp); +#define XFS_BHVTOM(bdp) xfs_bhvtom(bdp) +#else +#define XFS_BHVTOM(bdp) ((xfs_mount_t *)BHV_PDATA(bdp)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_VFSTOM) +xfs_mount_t *xfs_vfstom(vfs_t *vfs); +#define XFS_VFSTOM(vfs) xfs_vfstom(vfs) +#else +#define XFS_VFSTOM(vfs) \ + (XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfs), &xfs_vfsops))) +#endif + + +/* + * Moved here from xfs_ag.h to avoid reordering header files + */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DADDR_TO_AGNO) +xfs_agnumber_t xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d); +#define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d) +#else + +static inline xfs_agnumber_t XFS_DADDR_TO_AGNO(xfs_mount_t *mp, xfs_daddr_t d) +{ + d = XFS_BB_TO_FSBT(mp, d); + do_div(d, mp->m_sb.sb_agblocks); + return (xfs_agnumber_t) d; +} + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DADDR_TO_AGBNO) +xfs_agblock_t xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d); +#define XFS_DADDR_TO_AGBNO(mp,d) xfs_daddr_to_agbno(mp,d) +#else + +static inline xfs_agblock_t XFS_DADDR_TO_AGBNO(xfs_mount_t *mp, xfs_daddr_t d) +{ + d = XFS_BB_TO_FSBT(mp, d); + return (xfs_agblock_t) do_div(d, mp->m_sb.sb_agblocks); +} + +#endif + +/* + * This structure is for use by the xfs_mod_incore_sb_batch() routine. + */ +typedef struct xfs_mod_sb { + xfs_sb_field_t msb_field; /* Field to modify, see below */ + int msb_delta; /* Change to make to specified field */ +} xfs_mod_sb_t; + +#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock), PINOD) +#define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) +#define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock) +#define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s)) + +extern xfs_mount_t *xfs_mount_init(void); +extern void xfs_mod_sb(xfs_trans_t *, __int64_t); +extern void xfs_mount_free(xfs_mount_t *mp, int remove_bhv); +extern int xfs_mountfs(struct vfs *, xfs_mount_t *mp, dev_t, int); + +extern int xfs_unmountfs(xfs_mount_t *, struct cred *); +extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *); +extern int xfs_unmountfs_writesb(xfs_mount_t *); +extern int xfs_unmount_flush(xfs_mount_t *, int); +extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int, int); +extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, + uint, int); +extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); +extern int xfs_readsb(xfs_mount_t *mp); +extern void xfs_freesb(xfs_mount_t *); +extern void xfs_do_force_shutdown(bhv_desc_t *, int, char *, int); +extern int xfs_syncsub(xfs_mount_t *, int, int, int *); +extern void xfs_initialize_perag(xfs_mount_t *, int); +extern void xfs_xlatesb(void *, struct xfs_sb *, int, xfs_arch_t, + __int64_t); + +/* + * Flags for freeze operations. + */ +#define XFS_FREEZE_WRITE 1 +#define XFS_FREEZE_TRANS 2 + +extern void xfs_start_freeze(xfs_mount_t *, int); +extern void xfs_finish_freeze(xfs_mount_t *); +extern void xfs_check_frozen(xfs_mount_t *, bhv_desc_t *, int); + +extern struct vfsops xfs_vfsops; +extern struct vnodeops xfs_vnodeops; + +extern struct xfs_dmops xfs_dmcore_xfs; +extern struct xfs_qmops xfs_qmcore_xfs; +extern struct xfs_ioops xfs_iocore_xfs; + +extern int xfs_init(void); +extern void xfs_cleanup(void); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_MOUNT_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_qmops.c linux.21rc5-ac2/fs/xfs/xfs_qmops.c --- linux.21rc5/fs/xfs/xfs_qmops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_qmops.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" + + +#ifndef CONFIG_XFS_QUOTA +STATIC struct xfs_dquot * +xfs_dqvopchown_default( + struct xfs_trans *tp, + struct xfs_inode *ip, + struct xfs_dquot **dqp, + struct xfs_dquot *dq) +{ + return NULL; +} + +xfs_qmops_t xfs_qmcore_xfs = { + .xfs_qminit = (xfs_qminit_t) fs_noerr, + .xfs_qmdone = (xfs_qmdone_t) fs_noerr, + .xfs_qmmount = (xfs_qmmount_t) fs_noerr, + .xfs_qmunmount = (xfs_qmunmount_t) fs_noerr, + .xfs_dqrele = (xfs_dqrele_t) fs_noerr, + .xfs_dqattach = (xfs_dqattach_t) fs_noerr, + .xfs_dqdetach = (xfs_dqdetach_t) fs_noerr, + .xfs_dqpurgeall = (xfs_dqpurgeall_t) fs_noerr, + .xfs_dqvopalloc = (xfs_dqvopalloc_t) fs_noerr, + .xfs_dqvopcreate = (xfs_dqvopcreate_t) fs_noerr, + .xfs_dqvoprename = (xfs_dqvoprename_t) fs_noerr, + .xfs_dqvopchown = xfs_dqvopchown_default, + .xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr, +}; +#endif /* CONFIG_XFS_QUOTA */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_quota.h linux.21rc5-ac2/fs/xfs/xfs_quota.h --- linux.21rc5/fs/xfs/xfs_quota.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_quota.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_QUOTA_H__ +#define __XFS_QUOTA_H__ + +/* + * The ondisk form of a dquot structure. + */ +#define XFS_DQUOT_MAGIC 0x4451 /* 'DQ' */ +#define XFS_DQUOT_VERSION (u_int8_t)0x01 /* latest version number */ + +/* + * uid_t and gid_t are hard-coded to 32 bits in the inode. + * Hence, an 'id' in a dquot is 32 bits.. + */ +typedef __int32_t xfs_dqid_t; + +/* + * Eventhough users may not have quota limits occupying all 64-bits, + * they may need 64-bit accounting. Hence, 64-bit quota-counters, + * and quota-limits. This is a waste in the common case, but hey ... + */ +typedef __uint64_t xfs_qcnt_t; +typedef __uint16_t xfs_qwarncnt_t; + +/* + * This is the main portion of the on-disk representation of quota + * information for a user. This is the q_core of the xfs_dquot_t that + * is kept in kernel memory. We pad this with some more expansion room + * to construct the on disk structure. + */ +typedef struct xfs_disk_dquot { +/*16*/ u_int16_t d_magic; /* dquot magic = XFS_DQUOT_MAGIC */ +/*8 */ u_int8_t d_version; /* dquot version */ +/*8 */ u_int8_t d_flags; /* XFS_DQ_USER/PROJ/GROUP */ +/*32*/ xfs_dqid_t d_id; /* user,project,group id */ +/*64*/ xfs_qcnt_t d_blk_hardlimit;/* absolute limit on disk blks */ +/*64*/ xfs_qcnt_t d_blk_softlimit;/* preferred limit on disk blks */ +/*64*/ xfs_qcnt_t d_ino_hardlimit;/* maximum # allocated inodes */ +/*64*/ xfs_qcnt_t d_ino_softlimit;/* preferred inode limit */ +/*64*/ xfs_qcnt_t d_bcount; /* disk blocks owned by the user */ +/*64*/ xfs_qcnt_t d_icount; /* inodes owned by the user */ +/*32*/ __int32_t d_itimer; /* zero if within inode limits if not, + this is when we refuse service */ +/*32*/ __int32_t d_btimer; /* similar to above; for disk blocks */ +/*16*/ xfs_qwarncnt_t d_iwarns; /* warnings issued wrt num inodes */ +/*16*/ xfs_qwarncnt_t d_bwarns; /* warnings issued wrt disk blocks */ +/*32*/ __int32_t d_pad0; /* 64 bit align */ +/*64*/ xfs_qcnt_t d_rtb_hardlimit;/* absolute limit on realtime blks */ +/*64*/ xfs_qcnt_t d_rtb_softlimit;/* preferred limit on RT disk blks */ +/*64*/ xfs_qcnt_t d_rtbcount; /* realtime blocks owned */ +/*32*/ __int32_t d_rtbtimer; /* similar to above; for RT disk blocks */ +/*16*/ xfs_qwarncnt_t d_rtbwarns; /* warnings issued wrt RT disk blocks */ +/*16*/ __uint16_t d_pad; +} xfs_disk_dquot_t; + +/* + * This is what goes on disk. This is separated from the xfs_disk_dquot because + * carrying the unnecessary padding would be a waste of memory. + */ +typedef struct xfs_dqblk { + xfs_disk_dquot_t dd_diskdq; /* portion that lives incore as well */ + char dd_fill[32]; /* filling for posterity */ +} xfs_dqblk_t; + +/* + * flags for q_flags field in the dquot. + */ +#define XFS_DQ_USER 0x0001 /* a user quota */ +/* #define XFS_DQ_PROJ 0x0002 -- project quota (IRIX) */ +#define XFS_DQ_GROUP 0x0004 /* a group quota */ +#define XFS_DQ_FLOCKED 0x0008 /* flush lock taken */ +#define XFS_DQ_DIRTY 0x0010 /* dquot is dirty */ +#define XFS_DQ_WANT 0x0020 /* for lookup/reclaim race */ +#define XFS_DQ_INACTIVE 0x0040 /* dq off mplist & hashlist */ +#define XFS_DQ_MARKER 0x0080 /* sentinel */ + +/* + * In the worst case, when both user and group quotas are on, + * we can have a max of three dquots changing in a single transaction. + */ +#define XFS_DQUOT_LOGRES(mp) (sizeof(xfs_disk_dquot_t) * 3) + + +/* + * These are the structures used to lay out dquots and quotaoff + * records on the log. Quite similar to those of inodes. + */ + +/* + * log format struct for dquots. + * The first two fields must be the type and size fitting into + * 32 bits : log_recovery code assumes that. + */ +typedef struct xfs_dq_logformat { + __uint16_t qlf_type; /* dquot log item type */ + __uint16_t qlf_size; /* size of this item */ + xfs_dqid_t qlf_id; /* usr/grp id number : 32 bits */ + __int64_t qlf_blkno; /* blkno of dquot buffer */ + __int32_t qlf_len; /* len of dquot buffer */ + __uint32_t qlf_boffset; /* off of dquot in buffer */ +} xfs_dq_logformat_t; + +/* + * log format struct for QUOTAOFF records. + * The first two fields must be the type and size fitting into + * 32 bits : log_recovery code assumes that. + * We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer + * to the first and ensures that the first logitem is taken out of the AIL + * only when the last one is securely committed. + */ +typedef struct xfs_qoff_logformat { + unsigned short qf_type; /* quotaoff log item type */ + unsigned short qf_size; /* size of this item */ + unsigned int qf_flags; /* USR and/or GRP */ + char qf_pad[12]; /* padding for future */ +} xfs_qoff_logformat_t; + + +/* + * Disk quotas status in m_qflags, and also sb_qflags. 16 bits. + */ +#define XFS_UQUOTA_ACCT 0x0001 /* user quota accounting ON */ +#define XFS_UQUOTA_ENFD 0x0002 /* user quota limits enforced */ +#define XFS_UQUOTA_CHKD 0x0004 /* quotacheck run on usr quotas */ +#define XFS_PQUOTA_ACCT 0x0008 /* (IRIX) project quota accounting ON */ +#define XFS_GQUOTA_ENFD 0x0010 /* group quota limits enforced */ +#define XFS_GQUOTA_CHKD 0x0020 /* quotacheck run on grp quotas */ +#define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */ + +/* + * Incore only flags for quotaoff - these bits get cleared when quota(s) + * are in the process of getting turned off. These flags are in m_qflags but + * never in sb_qflags. + */ +#define XFS_UQUOTA_ACTIVE 0x0080 /* uquotas are being turned off */ +#define XFS_GQUOTA_ACTIVE 0x0100 /* gquotas are being turned off */ + +/* + * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees + * quota will be not be switched off as long as that inode lock is held. + */ +#define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \ + XFS_GQUOTA_ACTIVE)) +#define XFS_IS_UQUOTA_ON(mp) ((mp)->m_qflags & XFS_UQUOTA_ACTIVE) +#define XFS_IS_GQUOTA_ON(mp) ((mp)->m_qflags & XFS_GQUOTA_ACTIVE) + +/* + * Flags to tell various functions what to do. Not all of these are meaningful + * to a single function. None of these XFS_QMOPT_* flags are meant to have + * persistent values (ie. their values can and will change between versions) + */ +#define XFS_QMOPT_DQLOCK 0x0000001 /* dqlock */ +#define XFS_QMOPT_DQALLOC 0x0000002 /* alloc dquot ondisk if needed */ +#define XFS_QMOPT_UQUOTA 0x0000004 /* user dquot requested */ +#define XFS_QMOPT_GQUOTA 0x0000008 /* group dquot requested */ +#define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */ +#define XFS_QMOPT_DQSUSER 0x0000020 /* don't cache super users dquot */ +#define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */ +#define XFS_QMOPT_QUOTAOFF 0x0000080 /* quotas are being turned off */ +#define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */ +#define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */ +#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if necessary */ +#define XFS_QMOPT_ILOCKED 0x0000800 /* inode is already locked (excl) */ +#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot, if damaged. */ + +/* + * flags to xfs_trans_mod_dquot to indicate which field needs to be + * modified. + */ +#define XFS_QMOPT_RES_REGBLKS 0x0010000 +#define XFS_QMOPT_RES_RTBLKS 0x0020000 +#define XFS_QMOPT_BCOUNT 0x0040000 +#define XFS_QMOPT_ICOUNT 0x0080000 +#define XFS_QMOPT_RTBCOUNT 0x0100000 +#define XFS_QMOPT_DELBCOUNT 0x0200000 +#define XFS_QMOPT_DELRTBCOUNT 0x0400000 +#define XFS_QMOPT_RES_INOS 0x0800000 + +/* + * flags for dqflush and dqflush_all. + */ +#define XFS_QMOPT_SYNC 0x1000000 +#define XFS_QMOPT_ASYNC 0x2000000 +#define XFS_QMOPT_DELWRI 0x4000000 + +/* + * flags for dqalloc. + */ +#define XFS_QMOPT_INHERIT 0x8000000 + +/* + * flags to xfs_trans_mod_dquot. + */ +#define XFS_TRANS_DQ_RES_BLKS XFS_QMOPT_RES_REGBLKS +#define XFS_TRANS_DQ_RES_RTBLKS XFS_QMOPT_RES_RTBLKS +#define XFS_TRANS_DQ_RES_INOS XFS_QMOPT_RES_INOS +#define XFS_TRANS_DQ_BCOUNT XFS_QMOPT_BCOUNT +#define XFS_TRANS_DQ_DELBCOUNT XFS_QMOPT_DELBCOUNT +#define XFS_TRANS_DQ_ICOUNT XFS_QMOPT_ICOUNT +#define XFS_TRANS_DQ_RTBCOUNT XFS_QMOPT_RTBCOUNT +#define XFS_TRANS_DQ_DELRTBCOUNT XFS_QMOPT_DELRTBCOUNT + + +#define XFS_QMOPT_QUOTALL (XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA) +#define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS) + +#ifdef __KERNEL__ +/* + * This check is done typically without holding the inode lock; + * that may seem racey, but it is harmless in the context that it is used. + * The inode cannot go inactive as long a reference is kept, and + * therefore if dquot(s) were attached, they'll stay consistent. + * If, for example, the ownership of the inode changes while + * we didn't have the inode locked, the appropriate dquot(s) will be + * attached atomically. + */ +#define XFS_NOT_DQATTACHED(mp, ip) ((XFS_IS_UQUOTA_ON(mp) &&\ + (ip)->i_udquot == NULL) || \ + (XFS_IS_GQUOTA_ON(mp) && \ + (ip)->i_gdquot == NULL)) + +#define XFS_QM_NEED_QUOTACHECK(mp) ((XFS_IS_UQUOTA_ON(mp) && \ + (mp->m_sb.sb_qflags & \ + XFS_UQUOTA_CHKD) == 0) || \ + (XFS_IS_GQUOTA_ON(mp) && \ + (mp->m_sb.sb_qflags & \ + XFS_GQUOTA_CHKD) == 0)) + +#define XFS_MOUNT_QUOTA_ALL (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ + XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\ + XFS_GQUOTA_ENFD|XFS_GQUOTA_CHKD) +#define XFS_MOUNT_QUOTA_MASK (XFS_MOUNT_QUOTA_ALL | XFS_UQUOTA_ACTIVE | \ + XFS_GQUOTA_ACTIVE) + + +/* + * The structure kept inside the xfs_trans_t keep track of dquot changes + * within a transaction and apply them later. + */ +typedef struct xfs_dqtrx { + struct xfs_dquot *qt_dquot; /* the dquot this refers to */ + ulong qt_blk_res; /* blks reserved on a dquot */ + ulong qt_blk_res_used; /* blks used from the reservation */ + ulong qt_ino_res; /* inode reserved on a dquot */ + ulong qt_ino_res_used; /* inodes used from the reservation */ + long qt_bcount_delta; /* dquot blk count changes */ + long qt_delbcnt_delta; /* delayed dquot blk count changes */ + long qt_icount_delta; /* dquot inode count changes */ + ulong qt_rtblk_res; /* # blks reserved on a dquot */ + ulong qt_rtblk_res_used;/* # blks used from reservation */ + long qt_rtbcount_delta;/* dquot realtime blk changes */ + long qt_delrtb_delta; /* delayed RT blk count changes */ +} xfs_dqtrx_t; + +/* + * Dquot transaction functions, used if quota is enabled. + */ +typedef void (*qo_dup_dqinfo_t)(struct xfs_trans *, struct xfs_trans *); +typedef void (*qo_mod_dquot_byino_t)(struct xfs_trans *, + struct xfs_inode *, uint, long); +typedef void (*qo_free_dqinfo_t)(struct xfs_trans *); +typedef void (*qo_apply_dquot_deltas_t)(struct xfs_trans *); +typedef void (*qo_unreserve_and_mod_dquots_t)(struct xfs_trans *); +typedef int (*qo_reserve_quota_nblks_t)( + struct xfs_trans *, struct xfs_mount *, + struct xfs_inode *, long, long, uint); +typedef int (*qo_reserve_quota_bydquots_t)( + struct xfs_trans *, struct xfs_mount *, + struct xfs_dquot *, struct xfs_dquot *, + long, long, uint); +typedef struct xfs_dqtrxops { + qo_dup_dqinfo_t qo_dup_dqinfo; + qo_free_dqinfo_t qo_free_dqinfo; + qo_mod_dquot_byino_t qo_mod_dquot_byino; + qo_apply_dquot_deltas_t qo_apply_dquot_deltas; + qo_reserve_quota_nblks_t qo_reserve_quota_nblks; + qo_reserve_quota_bydquots_t qo_reserve_quota_bydquots; + qo_unreserve_and_mod_dquots_t qo_unreserve_and_mod_dquots; +} xfs_dqtrxops_t; + +#define XFS_DQTRXOP(mp, tp, op, args...) \ + ((mp)->m_qm_ops.xfs_dqtrxops ? \ + ((mp)->m_qm_ops.xfs_dqtrxops->op)(tp, ## args) : 0) + +#define XFS_TRANS_DUP_DQINFO(mp, otp, ntp) \ + XFS_DQTRXOP(mp, otp, qo_dup_dqinfo, ntp) +#define XFS_TRANS_FREE_DQINFO(mp, tp) \ + XFS_DQTRXOP(mp, tp, qo_free_dqinfo) +#define XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, field, delta) \ + XFS_DQTRXOP(mp, tp, qo_mod_dquot_byino, ip, field, delta) +#define XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp) \ + XFS_DQTRXOP(mp, tp, qo_apply_dquot_deltas) +#define XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, fl) \ + XFS_DQTRXOP(mp, tp, qo_reserve_quota_nblks, mp, ip, nblks, ninos, fl) +#define XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, fl) \ + XFS_DQTRXOP(mp, tp, qo_reserve_quota_bydquots, mp, ud, gd, nb, ni, fl) +#define XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp) \ + XFS_DQTRXOP(mp, tp, qo_unreserve_and_mod_dquots) + +#define XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, nblks) \ + XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \ + XFS_QMOPT_RES_REGBLKS) +#define XFS_TRANS_RESERVE_BLKQUOTA_FORCE(mp, tp, ip, nblks) \ + XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \ + XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES) +#define XFS_TRANS_UNRESERVE_BLKQUOTA(mp, tp, ip, nblks) \ + XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, -(nblks), 0, \ + XFS_QMOPT_RES_REGBLKS) +#define XFS_TRANS_RESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \ + XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, \ + f | XFS_QMOPT_RES_REGBLKS) +#define XFS_TRANS_UNRESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \ + XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, -(nb), -(ni), \ + f | XFS_QMOPT_RES_REGBLKS) + +extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *); + +extern struct bhv_vfsops xfs_qmops; + +extern void xfs_qm_init(void); +extern void xfs_qm_exit(void); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_QUOTA_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_rename.c linux.21rc5-ac2/fs/xfs/xfs_rename.c --- linux.21rc5/fs/xfs/xfs_rename.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_rename.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,675 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_error.h" +#include "xfs_quota.h" +#include "xfs_rw.h" +#include "xfs_utils.h" +#include "xfs_trans_space.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_dmapi.h" + + +/* + * Given an array of up to 4 inode pointers, unlock the pointed to inodes. + * If there are fewer than 4 entries in the array, the empty entries will + * be at the end and will have NULL pointers in them. + */ +STATIC void +xfs_rename_unlock4( + xfs_inode_t **i_tab, + uint lock_mode) +{ + int i; + + xfs_iunlock(i_tab[0], lock_mode); + for (i = 1; i < 4; i++) { + if (i_tab[i] == NULL) { + break; + } + /* + * Watch out for duplicate entries in the table. + */ + if (i_tab[i] != i_tab[i-1]) { + xfs_iunlock(i_tab[i], lock_mode); + } + } +} + +#ifdef DEBUG +int xfs_rename_skip, xfs_rename_nskip; +#endif + +/* + * The following routine will acquire the locks required for a rename + * operation. The code understands the semantics of renames and will + * validate that name1 exists under dp1 & that name2 may or may not + * exist under dp2. + * + * We are renaming dp1/name1 to dp2/name2. + * + * Return ENOENT if dp1 does not exist, other lookup errors, or 0 for success. + */ +STATIC int +xfs_lock_for_rename( + xfs_inode_t *dp1, /* old (source) directory inode */ + xfs_inode_t *dp2, /* new (target) directory inode */ + vname_t *vname1,/* old entry name */ + vname_t *vname2,/* new entry name */ + xfs_inode_t **ipp1, /* inode of old entry */ + xfs_inode_t **ipp2, /* inode of new entry, if it + already exists, NULL otherwise. */ + xfs_inode_t **i_tab,/* array of inode returned, sorted */ + int *num_inodes) /* number of inodes in array */ +{ + xfs_inode_t *ip1, *ip2, *temp; + xfs_ino_t inum1, inum2; + int error; + int i, j; + uint lock_mode; + int diff_dirs = (dp1 != dp2); + + ip2 = NULL; + + /* + * First, find out the current inums of the entries so that we + * can determine the initial locking order. We'll have to + * sanity check stuff after all the locks have been acquired + * to see if we still have the right inodes, directories, etc. + */ + lock_mode = xfs_ilock_map_shared(dp1); + error = xfs_get_dir_entry(vname1, &ip1); + if (error) { + xfs_iunlock_map_shared(dp1, lock_mode); + return error; + } + + inum1 = ip1->i_ino; + + ASSERT(ip1); + ITRACE(ip1); + + /* + * Unlock dp1 and lock dp2 if they are different. + */ + + if (diff_dirs) { + xfs_iunlock_map_shared(dp1, lock_mode); + lock_mode = xfs_ilock_map_shared(dp2); + } + + error = xfs_dir_lookup_int(XFS_ITOBHV(dp2), lock_mode, + vname2, &inum2, &ip2); + if (error == ENOENT) { /* target does not need to exist. */ + inum2 = 0; + } else if (error) { + /* + * If dp2 and dp1 are the same, the next line unlocks dp1. + * Got it? + */ + xfs_iunlock_map_shared(dp2, lock_mode); + IRELE (ip1); + return error; + } else { + ITRACE(ip2); + } + + /* + * i_tab contains a list of pointers to inodes. We initialize + * the table here & we'll sort it. We will then use it to + * order the acquisition of the inode locks. + * + * Note that the table may contain duplicates. e.g., dp1 == dp2. + */ + i_tab[0] = dp1; + i_tab[1] = dp2; + i_tab[2] = ip1; + if (inum2 == 0) { + *num_inodes = 3; + i_tab[3] = NULL; + } else { + *num_inodes = 4; + i_tab[3] = ip2; + } + + /* + * Sort the elements via bubble sort. (Remember, there are at + * most 4 elements to sort, so this is adequate.) + */ + for (i=0; i < *num_inodes; i++) { + for (j=1; j < *num_inodes; j++) { + if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { + temp = i_tab[j]; + i_tab[j] = i_tab[j-1]; + i_tab[j-1] = temp; + } + } + } + + /* + * We have dp2 locked. If it isn't first, unlock it. + * If it is first, tell xfs_lock_inodes so it can skip it + * when locking. if dp1 == dp2, xfs_lock_inodes will skip both + * since they are equal. xfs_lock_inodes needs all these inodes + * so that it can unlock and retry if there might be a dead-lock + * potential with the log. + */ + + if (i_tab[0] == dp2 && lock_mode == XFS_ILOCK_SHARED) { +#ifdef DEBUG + xfs_rename_skip++; +#endif + xfs_lock_inodes(i_tab, *num_inodes, 1, XFS_ILOCK_SHARED); + } else { +#ifdef DEBUG + xfs_rename_nskip++; +#endif + xfs_iunlock_map_shared(dp2, lock_mode); + xfs_lock_inodes(i_tab, *num_inodes, 0, XFS_ILOCK_SHARED); + } + + /* + * Set the return value. Null out any unused entries in i_tab. + */ + *ipp1 = *ipp2 = NULL; + for (i=0; i < *num_inodes; i++) { + if (i_tab[i]->i_ino == inum1) { + *ipp1 = i_tab[i]; + } + if (i_tab[i]->i_ino == inum2) { + *ipp2 = i_tab[i]; + } + } + for (;i < 4; i++) { + i_tab[i] = NULL; + } + return 0; +} + + +int rename_which_error_return = 0; + +/* + * xfs_rename + */ +int +xfs_rename( + bhv_desc_t *src_dir_bdp, + vname_t *src_vname, + vnode_t *target_dir_vp, + vname_t *target_vname, + cred_t *credp) +{ + xfs_trans_t *tp; + xfs_inode_t *src_dp, *target_dp, *src_ip, *target_ip; + xfs_mount_t *mp; + int new_parent; /* moving to a new dir */ + int src_is_directory; /* src_name is a directory */ + int error; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + int cancel_flags; + int committed; + xfs_inode_t *inodes[4]; + int target_ip_dropped = 0; /* dropped target_ip link? */ + vnode_t *src_dir_vp; + bhv_desc_t *target_dir_bdp; + int spaceres; + int target_link_zero = 0; + int num_inodes; + char *src_name = VNAME(src_vname); + char *target_name = VNAME(target_vname); + int src_namelen = VNAMELEN(src_vname); + int target_namelen = VNAMELEN(target_vname); + + src_dir_vp = BHV_TO_VNODE(src_dir_bdp); + vn_trace_entry(src_dir_vp, "xfs_rename", (inst_t *)__return_address); + vn_trace_entry(target_dir_vp, "xfs_rename", (inst_t *)__return_address); + + /* + * Find the XFS behavior descriptor for the target directory + * vnode since it was not handed to us. + */ + target_dir_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(target_dir_vp), + &xfs_vnodeops); + if (target_dir_bdp == NULL) { + return XFS_ERROR(EXDEV); + } + + src_dp = XFS_BHVTOI(src_dir_bdp); + target_dp = XFS_BHVTOI(target_dir_bdp); + mp = src_dp->i_mount; + + if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) || + DM_EVENT_ENABLED(target_dir_vp->v_vfsp, + target_dp, DM_EVENT_RENAME)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME, + src_dir_bdp, DM_RIGHT_NULL, + target_dir_bdp, DM_RIGHT_NULL, + src_name, target_name, + 0, 0, 0); + if (error) { + return error; + } + } + /* Return through std_return after this point. */ + + /* + * Lock all the participating inodes. Depending upon whether + * the target_name exists in the target directory, and + * whether the target directory is the same as the source + * directory, we can lock from 2 to 4 inodes. + * xfs_lock_for_rename() will return ENOENT if src_name + * does not exist in the source directory. + */ + tp = NULL; + error = xfs_lock_for_rename(src_dp, target_dp, src_vname, + target_vname, &src_ip, &target_ip, inodes, + &num_inodes); + + if (error) { + rename_which_error_return = __LINE__; + /* + * We have nothing locked, no inode references, and + * no transaction, so just get out. + */ + goto std_return; + } + + ASSERT(src_ip != NULL); + + if ((src_ip->i_d.di_mode & IFMT) == IFDIR) { + /* + * Check for link count overflow on target_dp + */ + if (target_ip == NULL && (src_dp != target_dp) && + target_dp->i_d.di_nlink >= XFS_MAXLINK) { + rename_which_error_return = __LINE__; + error = XFS_ERROR(EMLINK); + xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); + goto rele_return; + } + } + + new_parent = (src_dp != target_dp); + src_is_directory = ((src_ip->i_d.di_mode & IFMT) == IFDIR); + + /* + * Drop the locks on our inodes so that we can do the ancestor + * check if necessary and start the transaction. + */ + xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); + + XFS_BMAP_INIT(&free_list, &first_block); + tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + spaceres = XFS_RENAME_SPACE_RES(mp, target_namelen); + error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT); + if (error == ENOSPC) { + spaceres = 0; + error = xfs_trans_reserve(tp, 0, XFS_RENAME_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT); + } + if (error) { + rename_which_error_return = __LINE__; + xfs_trans_cancel(tp, 0); + goto rele_return; + } + + /* + * Attach the dquots to the inodes + */ + if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) { + xfs_trans_cancel(tp, cancel_flags); + rename_which_error_return = __LINE__; + goto rele_return; + } + + /* + * Reacquire the inode locks we dropped above. + */ + xfs_lock_inodes(inodes, num_inodes, 0, XFS_ILOCK_EXCL); + + /* + * Join all the inodes to the transaction. From this point on, + * we can rely on either trans_commit or trans_cancel to unlock + * them. Note that we need to add a vnode reference to the + * directories since trans_commit & trans_cancel will decrement + * them when they unlock the inodes. Also, we need to be careful + * not to add an inode to the transaction more than once. + */ + VN_HOLD(src_dir_vp); + xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); + if (new_parent) { + VN_HOLD(target_dir_vp); + xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); + } + if ((src_ip != src_dp) && (src_ip != target_dp)) { + xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); + } + if ((target_ip != NULL) && + (target_ip != src_ip) && + (target_ip != src_dp) && + (target_ip != target_dp)) { + xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); + } + + /* + * Set up the target. + */ + if (target_ip == NULL) { + /* + * If there's no space reservation, check the entry will + * fit before actually inserting it. + */ + if (spaceres == 0 && + (error = XFS_DIR_CANENTER(mp, tp, target_dp, target_name, + target_namelen))) { + rename_which_error_return = __LINE__; + goto error_return; + } + /* + * If target does not exist and the rename crosses + * directories, adjust the target directory link count + * to account for the ".." reference from the new entry. + */ + error = XFS_DIR_CREATENAME(mp, tp, target_dp, target_name, + target_namelen, src_ip->i_ino, + &first_block, &free_list, spaceres); + if (error == ENOSPC) { + rename_which_error_return = __LINE__; + goto error_return; + } + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + if (new_parent && src_is_directory) { + error = xfs_bumplink(tp, target_dp); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + } + } else { /* target_ip != NULL */ + + /* + * If target exists and it's a directory, check that both + * target and source are directories and that target can be + * destroyed, or that neither is a directory. + */ + if ((target_ip->i_d.di_mode & IFMT) == IFDIR) { + /* + * Make sure target dir is empty. + */ + if (!(XFS_DIR_ISEMPTY(target_ip->i_mount, target_ip)) || + (target_ip->i_d.di_nlink > 2)) { + error = XFS_ERROR(EEXIST); + rename_which_error_return = __LINE__; + goto error_return; + } + } + + /* + * Link the source inode under the target name. + * If the source inode is a directory and we are moving + * it across directories, its ".." entry will be + * inconsistent until we replace that down below. + * + * In case there is already an entry with the same + * name at the destination directory, remove it first. + */ + error = XFS_DIR_REPLACE(mp, tp, target_dp, target_name, + target_namelen, src_ip->i_ino, &first_block, + &free_list, spaceres); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + /* + * Decrement the link count on the target since the target + * dir no longer points to it. + */ + error = xfs_droplink(tp, target_ip); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return;; + } + target_ip_dropped = 1; + + if (src_is_directory) { + /* + * Drop the link from the old "." entry. + */ + error = xfs_droplink(tp, target_ip); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + } + + /* Do this test while we still hold the locks */ + target_link_zero = (target_ip)->i_d.di_nlink==0; + + } /* target_ip != NULL */ + + /* + * Remove the source. + */ + if (new_parent && src_is_directory) { + + /* + * Rewrite the ".." entry to point to the new + * directory. + */ + error = XFS_DIR_REPLACE(mp, tp, src_ip, "..", 2, + target_dp->i_ino, &first_block, + &free_list, spaceres); + ASSERT(error != EEXIST); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + xfs_ichgtime(src_ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + } else { + /* + * We always want to hit the ctime on the source inode. + * We do it in the if clause above for the 'new_parent && + * src_is_directory' case, and here we get all the other + * cases. This isn't strictly required by the standards + * since the source inode isn't really being changed, + * but old unix file systems did it and some incremental + * backup programs won't work without it. + */ + xfs_ichgtime(src_ip, XFS_ICHGTIME_CHG); + } + + /* + * Adjust the link count on src_dp. This is necessary when + * renaming a directory, either within one parent when + * the target existed, or across two parent directories. + */ + if (src_is_directory && (new_parent || target_ip != NULL)) { + + /* + * Decrement link count on src_directory since the + * entry that's moved no longer points to it. + */ + error = xfs_droplink(tp, src_dp); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + } + + error = XFS_DIR_REMOVENAME(mp, tp, src_dp, src_name, src_namelen, + src_ip->i_ino, &first_block, &free_list, spaceres); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + /* + * Update the generation counts on all the directory inodes + * that we're modifying. + */ + src_dp->i_gen++; + xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); + + if (new_parent) { + target_dp->i_gen++; + xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); + } + + /* + * If there was a target inode, take an extra reference on + * it here so that it doesn't go to xfs_inactive() from + * within the commit. + */ + if (target_ip != NULL) { + IHOLD(target_ip); + } + + /* + * If this is a synchronous mount, make sure that the + * rename transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + /* + * Take refs. for vop_link_removed calls below. No need to worry + * about directory refs. because the caller holds them. + * + * Do holds before the xfs_bmap_finish since it might rele them down + * to zero. + */ + + if (target_ip_dropped) + IHOLD(target_ip); + IHOLD(src_ip); + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT)); + if (target_ip != NULL) { + IRELE(target_ip); + } + if (target_ip_dropped) { + IRELE(target_ip); + } + IRELE(src_ip); + goto std_return; + } + + /* + * trans_commit will unlock src_ip, target_ip & decrement + * the vnode references. + */ + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (target_ip != NULL) { + xfs_refcache_purge_ip(target_ip); + IRELE(target_ip); + } + /* + * Let interposed file systems know about removed links. + */ + if (target_ip_dropped) { + VOP_LINK_REMOVED(XFS_ITOV(target_ip), target_dir_vp, + target_link_zero); + IRELE(target_ip); + } + + FSC_NOTIFY_NAME_CHANGED(XFS_ITOV(src_ip)); + + IRELE(src_ip); + + /* Fall through to std_return with error = 0 or errno from + * xfs_trans_commit */ +std_return: + if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_POSTRENAME) || + DM_EVENT_ENABLED(target_dir_vp->v_vfsp, + target_dp, DM_EVENT_POSTRENAME)) { + (void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME, + src_dir_bdp, DM_RIGHT_NULL, + target_dir_bdp, DM_RIGHT_NULL, + src_name, target_name, + 0, error, 0); + } + return error; + + abort_return: + cancel_flags |= XFS_TRANS_ABORT; + /* FALLTHROUGH */ + error_return: + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, cancel_flags); + goto std_return; + + rele_return: + IRELE(src_ip); + if (target_ip != NULL) { + IRELE(target_ip); + } + goto std_return; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_rtalloc.c linux.21rc5-ac2/fs/xfs/xfs_rtalloc.c --- linux.21rc5/fs/xfs/xfs_rtalloc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_rtalloc.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,2471 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Free realtime space allocation for XFS. + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_fsops.h" +#include "xfs_error.h" +#include "xfs_rw.h" +#include "xfs_inode_item.h" +#include "xfs_trans_space.h" + + +/* + * Prototypes for internal functions. + */ + + +STATIC int xfs_rtallocate_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_extlen_t, xfs_buf_t **, xfs_fsblock_t *); +STATIC int xfs_rtany_summary(xfs_mount_t *, xfs_trans_t *, int, int, + xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, int *); +STATIC int xfs_rtcheck_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_extlen_t, int, xfs_rtblock_t *, int *); +STATIC int xfs_rtfind_back(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_rtblock_t, xfs_rtblock_t *); +STATIC int xfs_rtfind_forw(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_rtblock_t, xfs_rtblock_t *); +STATIC int xfs_rtget_summary( xfs_mount_t *, xfs_trans_t *, int, + xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, xfs_suminfo_t *); +STATIC int xfs_rtmodify_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_extlen_t, int); +STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int, + xfs_rtblock_t, int, xfs_buf_t **, xfs_fsblock_t *); + +/* + * Internal functions. + */ + +/* + * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set. + */ +STATIC int +xfs_lowbit32( + __uint32_t v) +{ + return ffs(v)-1; +} + +/* + * Allocate space to the bitmap or summary file, and zero it, for growfs. + */ +STATIC int /* error */ +xfs_growfs_rt_alloc( + xfs_mount_t *mp, /* file system mount point */ + xfs_extlen_t oblocks, /* old count of blocks */ + xfs_extlen_t nblocks, /* new count of blocks */ + xfs_ino_t ino) /* inode number (bitmap/summary) */ +{ + xfs_fileoff_t bno; /* block number in file */ + xfs_buf_t *bp; /* temporary buffer for zeroing */ + int cancelflags; /* flags for xfs_trans_cancel */ + int committed; /* transaction committed flag */ + xfs_daddr_t d; /* disk block address */ + int error; /* error return value */ + xfs_fsblock_t firstblock; /* first block allocated in xaction */ + xfs_bmap_free_t flist; /* list of freed blocks */ + xfs_fsblock_t fsbno; /* filesystem block for bno */ + xfs_inode_t *ip; /* pointer to incore inode */ + xfs_bmbt_irec_t map; /* block map output */ + int nmap; /* number of block maps */ + int resblks; /* space reservation */ + xfs_trans_t *tp; /* transaction pointer */ + + /* + * Allocate space to the file, as necessary. + */ + while (oblocks < nblocks) { + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC); + resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks); + cancelflags = 0; + /* + * Reserve space & log for one extent added to the file. + */ + if ((error = xfs_trans_reserve(tp, resblks, + XFS_GROWRTALLOC_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_DEFAULT_PERM_LOG_COUNT))) + goto error_exit; + cancelflags = XFS_TRANS_RELEASE_LOG_RES; + /* + * Lock the inode. + */ + if ((error = xfs_trans_iget(mp, tp, ino, XFS_ILOCK_EXCL, &ip))) + goto error_exit; + XFS_BMAP_INIT(&flist, &firstblock); + /* + * Allocate blocks to the bitmap file. + */ + nmap = 1; + cancelflags |= XFS_TRANS_ABORT; + error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks, + XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock, + resblks, &map, &nmap, &flist); + if (!error && nmap < 1) + error = XFS_ERROR(ENOSPC); + if (error) + goto error_exit; + /* + * Free any blocks freed up in the transaction, then commit. + */ + error = xfs_bmap_finish(&tp, &flist, firstblock, &committed); + if (error) + goto error_exit; + xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + /* + * Now we need to clear the allocated blocks. + * Do this one block per transaction, to keep it simple. + */ + cancelflags = 0; + for (bno = map.br_startoff, fsbno = map.br_startblock; + bno < map.br_startoff + map.br_blockcount; + bno++, fsbno++) { + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO); + /* + * Reserve log for one block zeroing. + */ + if ((error = xfs_trans_reserve(tp, 0, + XFS_GROWRTZERO_LOG_RES(mp), 0, 0, 0))) + goto error_exit; + /* + * Lock the bitmap inode. + */ + if ((error = xfs_trans_iget(mp, tp, ino, XFS_ILOCK_EXCL, + &ip))) + goto error_exit; + /* + * Get a buffer for the block. + */ + d = XFS_FSB_TO_DADDR(mp, fsbno); + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, + mp->m_bsize, 0); + if (bp == NULL) { + error = XFS_ERROR(EIO); + goto error_exit; + } + memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize); + xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); + /* + * Commit the transaction. + */ + xfs_trans_commit(tp, 0, NULL); + } + /* + * Go on to the next extent, if any. + */ + oblocks = map.br_startoff + map.br_blockcount; + } + return 0; +error_exit: + xfs_trans_cancel(tp, cancelflags); + return error; +} + +/* + * Attempt to allocate an extent minlen<=len<=maxlen starting from + * bitmap block bbno. If we don't get maxlen then use prod to trim + * the length, if given. Returns error; returns starting block in *rtblock. + * The lengths are all in rtextents. + */ +STATIC int /* error */ +xfs_rtallocate_extent_block( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bbno, /* bitmap block number */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_rtblock_t *nextp, /* out: next block to try */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + xfs_rtblock_t besti; /* best rtblock found so far */ + xfs_rtblock_t bestlen; /* best length found so far */ + xfs_rtblock_t end; /* last rtblock in chunk */ + int error; /* error value */ + xfs_rtblock_t i; /* current rtblock trying */ + xfs_rtblock_t next; /* next rtblock to try */ + int stat; /* status from internal calls */ + + /* + * Loop over all the extents starting in this bitmap block, + * looking for one that's long enough. + */ + for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0, + end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1; + i <= end; + i++) { + /* + * See if there's a free extent of maxlen starting at i. + * If it's not so then next will contain the first non-free. + */ + error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat); + if (error) { + return error; + } + if (stat) { + /* + * i for maxlen is all free, allocate and return that. + */ + error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp, + rsb); + if (error) { + return error; + } + *len = maxlen; + *rtblock = i; + return 0; + } + /* + * In the case where we have a variable-sized allocation + * request, figure out how big this free piece is, + * and if it's big enough for the minimum, and the best + * so far, remember it. + */ + if (minlen < maxlen) { + xfs_rtblock_t thislen; /* this extent size */ + + thislen = next - i; + if (thislen >= minlen && thislen > bestlen) { + besti = i; + bestlen = thislen; + } + } + /* + * If not done yet, find the start of the next free space. + */ + if (next < end) { + error = xfs_rtfind_forw(mp, tp, next, end, &i); + if (error) { + return error; + } + } else + break; + } + /* + * Searched the whole thing & didn't find a maxlen free extent. + */ + if (minlen < maxlen && besti != -1) { + xfs_extlen_t p; /* amount to trim length by */ + + /* + * If size should be a multiple of prod, make that so. + */ + if (prod > 1 && (p = do_mod(bestlen, prod))) + bestlen -= p; + /* + * Allocate besti for bestlen & return that. + */ + error = xfs_rtallocate_range(mp, tp, besti, bestlen, rbpp, rsb); + if (error) { + return error; + } + *len = bestlen; + *rtblock = besti; + return 0; + } + /* + * Allocation failed. Set *nextp to the next block to try. + */ + *nextp = next; + *rtblock = NULLRTBLOCK; + return 0; +} + +/* + * Allocate an extent of length minlen<=len<=maxlen, starting at block + * bno. If we don't get maxlen then use prod to trim the length, if given. + * Returns error; returns starting block in *rtblock. + * The lengths are all in rtextents. + */ +STATIC int /* error */ +xfs_rtallocate_extent_exact( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to allocate */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + int error; /* error value */ + xfs_extlen_t i; /* extent length trimmed due to prod */ + int isfree; /* extent is free */ + xfs_rtblock_t next; /* next block to try (dummy) */ + + ASSERT(minlen % prod == 0 && maxlen % prod == 0); + /* + * Check if the range in question (for maxlen) is free. + */ + error = xfs_rtcheck_range(mp, tp, bno, maxlen, 1, &next, &isfree); + if (error) { + return error; + } + if (isfree) { + /* + * If it is, allocate it and return success. + */ + error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb); + if (error) { + return error; + } + *len = maxlen; + *rtblock = bno; + return 0; + } + /* + * If not, allocate what there is, if it's at least minlen. + */ + maxlen = next - bno; + if (maxlen < minlen) { + /* + * Failed, return failure status. + */ + *rtblock = NULLRTBLOCK; + return 0; + } + /* + * Trim off tail of extent, if prod is specified. + */ + if (prod > 1 && (i = maxlen % prod)) { + maxlen -= i; + if (maxlen < minlen) { + /* + * Now we can't do it, return failure status. + */ + *rtblock = NULLRTBLOCK; + return 0; + } + } + /* + * Allocate what we can and return it. + */ + error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb); + if (error) { + return error; + } + *len = maxlen; + *rtblock = bno; + return 0; +} + +/* + * Allocate an extent of length minlen<=len<=maxlen, starting as near + * to bno as possible. If we don't get maxlen then use prod to trim + * the length, if given. The lengths are all in rtextents. + */ +STATIC int /* error */ +xfs_rtallocate_extent_near( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to allocate */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + int any; /* any useful extents from summary */ + xfs_rtblock_t bbno; /* bitmap block number */ + int error; /* error value */ + int i; /* bitmap block offset (loop control) */ + int j; /* secondary loop control */ + int log2len; /* log2 of minlen */ + xfs_rtblock_t n; /* next block to try */ + xfs_rtblock_t r; /* result block */ + + ASSERT(minlen % prod == 0 && maxlen % prod == 0); + /* + * If the block number given is off the end, silently set it to + * the last block. + */ + if (bno >= mp->m_sb.sb_rextents) + bno = mp->m_sb.sb_rextents - 1; + /* + * Try the exact allocation first. + */ + error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen, len, + rbpp, rsb, prod, &r); + if (error) { + return error; + } + /* + * If the exact allocation worked, return that. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + bbno = XFS_BITTOBLOCK(mp, bno); + i = 0; + log2len = xfs_highbit32(minlen); + /* + * Loop over all bitmap blocks (bbno + i is current block). + */ + for (;;) { + /* + * Get summary information of extents of all useful levels + * starting in this bitmap block. + */ + error = xfs_rtany_summary(mp, tp, log2len, mp->m_rsumlevels - 1, + bbno + i, rbpp, rsb, &any); + if (error) { + return error; + } + /* + * If there are any useful extents starting here, try + * allocating one. + */ + if (any) { + /* + * On the positive side of the starting location. + */ + if (i >= 0) { + /* + * Try to allocate an extent starting in + * this block. + */ + error = xfs_rtallocate_extent_block(mp, tp, + bbno + i, minlen, maxlen, len, &n, rbpp, + rsb, prod, &r); + if (error) { + return error; + } + /* + * If it worked, return it. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + } + /* + * On the negative side of the starting location. + */ + else { /* i < 0 */ + /* + * Loop backwards through the bitmap blocks from + * the starting point-1 up to where we are now. + * There should be an extent which ends in this + * bitmap block and is long enough. + */ + for (j = -1; j > i; j--) { + /* + * Grab the summary information for + * this bitmap block. + */ + error = xfs_rtany_summary(mp, tp, + log2len, mp->m_rsumlevels - 1, + bbno + j, rbpp, rsb, &any); + if (error) { + return error; + } + /* + * If there's no extent given in the + * summary that means the extent we + * found must carry over from an + * earlier block. If there is an + * extent given, we've already tried + * that allocation, don't do it again. + */ + if (any) + continue; + error = xfs_rtallocate_extent_block(mp, + tp, bbno + j, minlen, maxlen, + len, &n, rbpp, rsb, prod, &r); + if (error) { + return error; + } + /* + * If it works, return the extent. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + } + /* + * There weren't intervening bitmap blocks + * with a long enough extent, or the + * allocation didn't work for some reason + * (i.e. it's a little * too short). + * Try to allocate from the summary block + * that we found. + */ + error = xfs_rtallocate_extent_block(mp, tp, + bbno + i, minlen, maxlen, len, &n, rbpp, + rsb, prod, &r); + if (error) { + return error; + } + /* + * If it works, return the extent. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + } + } + /* + * Loop control. If we were on the positive side, and there's + * still more blocks on the negative side, go there. + */ + if (i > 0 && (int)bbno - i >= 0) + i = -i; + /* + * If positive, and no more negative, but there are more + * positive, go there. + */ + else if (i > 0 && (int)bbno + i < mp->m_sb.sb_rbmblocks - 1) + i++; + /* + * If negative or 0 (just started), and there are positive + * blocks to go, go there. The 0 case moves to block 1. + */ + else if (i <= 0 && (int)bbno - i < mp->m_sb.sb_rbmblocks - 1) + i = 1 - i; + /* + * If negative or 0 and there are more negative blocks, + * go there. + */ + else if (i <= 0 && (int)bbno + i > 0) + i--; + /* + * Must be done. Return failure. + */ + else + break; + } + *rtblock = NULLRTBLOCK; + return 0; +} + +/* + * Allocate an extent of length minlen<=len<=maxlen, with no position + * specified. If we don't get maxlen then use prod to trim + * the length, if given. The lengths are all in rtextents. + */ +STATIC int /* error */ +xfs_rtallocate_extent_size( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + int error; /* error value */ + int i; /* bitmap block number */ + int l; /* level number (loop control) */ + xfs_rtblock_t n; /* next block to be tried */ + xfs_rtblock_t r; /* result block number */ + xfs_suminfo_t sum; /* summary information for extents */ + + ASSERT(minlen % prod == 0 && maxlen % prod == 0); + /* + * Loop over all the levels starting with maxlen. + * At each level, look at all the bitmap blocks, to see if there + * are extents starting there that are long enough (>= maxlen). + * Note, only on the initial level can the allocation fail if + * the summary says there's an extent. + */ + for (l = xfs_highbit32(maxlen); l < mp->m_rsumlevels; l++) { + /* + * Loop over all the bitmap blocks. + */ + for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) { + /* + * Get the summary for this level/block. + */ + error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, + &sum); + if (error) { + return error; + } + /* + * Nothing there, on to the next block. + */ + if (!sum) + continue; + /* + * Try allocating the extent. + */ + error = xfs_rtallocate_extent_block(mp, tp, i, maxlen, + maxlen, len, &n, rbpp, rsb, prod, &r); + if (error) { + return error; + } + /* + * If it worked, return that. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + /* + * If the "next block to try" returned from the + * allocator is beyond the next bitmap block, + * skip to that bitmap block. + */ + if (XFS_BITTOBLOCK(mp, n) > i + 1) + i = XFS_BITTOBLOCK(mp, n) - 1; + } + } + /* + * Didn't find any maxlen blocks. Try smaller ones, unless + * we're asking for a fixed size extent. + */ + if (minlen > --maxlen) { + *rtblock = NULLRTBLOCK; + return 0; + } + /* + * Loop over sizes, from maxlen down to minlen. + * This time, when we do the allocations, allow smaller ones + * to succeed. + */ + for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) { + /* + * Loop over all the bitmap blocks, try an allocation + * starting in that block. + */ + for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) { + /* + * Get the summary information for this level/block. + */ + error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, + &sum); + if (error) { + return error; + } + /* + * If nothing there, go on to next. + */ + if (!sum) + continue; + /* + * Try the allocation. Make sure the specified + * minlen/maxlen are in the possible range for + * this summary level. + */ + error = xfs_rtallocate_extent_block(mp, tp, i, + XFS_RTMAX(minlen, 1 << l), + XFS_RTMIN(maxlen, (1 << (l + 1)) - 1), + len, &n, rbpp, rsb, prod, &r); + if (error) { + return error; + } + /* + * If it worked, return that extent. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + /* + * If the "next block to try" returned from the + * allocator is beyond the next bitmap block, + * skip to that bitmap block. + */ + if (XFS_BITTOBLOCK(mp, n) > i + 1) + i = XFS_BITTOBLOCK(mp, n) - 1; + } + } + /* + * Got nothing, return failure. + */ + *rtblock = NULLRTBLOCK; + return 0; +} + +/* + * Mark an extent specified by start and len allocated. + * Updates all the summary information as well as the bitmap. + */ +STATIC int /* error */ +xfs_rtallocate_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* start block to allocate */ + xfs_extlen_t len, /* length to allocate */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb) /* in/out: summary block number */ +{ + xfs_rtblock_t end; /* end of the allocated extent */ + int error; /* error value */ + xfs_rtblock_t postblock; /* first block allocated > end */ + xfs_rtblock_t preblock; /* first block allocated < start */ + + end = start + len - 1; + /* + * Assume we're allocating out of the middle of a free extent. + * We need to find the beginning and end of the extent so we can + * properly update the summary. + */ + error = xfs_rtfind_back(mp, tp, start, 0, &preblock); + if (error) { + return error; + } + /* + * Find the next allocated block (end of free extent). + */ + error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1, + &postblock); + if (error) { + return error; + } + /* + * Decrement the summary information corresponding to the entire + * (old) free extent. + */ + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(postblock + 1 - preblock), + XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb); + if (error) { + return error; + } + /* + * If there are blocks not being allocated at the front of the + * old extent, add summary data for them to be free. + */ + if (preblock < start) { + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(start - preblock), + XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb); + if (error) { + return error; + } + } + /* + * If there are blocks not being allocated at the end of the + * old extent, add summary data for them to be free. + */ + if (postblock > end) { + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(postblock - end), + XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb); + if (error) { + return error; + } + } + /* + * Modify the bitmap to mark this extent allocated. + */ + error = xfs_rtmodify_range(mp, tp, start, len, 0); + return error; +} + +/* + * Return whether there are any free extents in the size range given + * by low and high, for the bitmap block bbno. + */ +STATIC int /* error */ +xfs_rtany_summary( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + int low, /* low log2 extent size */ + int high, /* high log2 extent size */ + xfs_rtblock_t bbno, /* bitmap block number */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + int *stat) /* out: any good extents here? */ +{ + int error; /* error value */ + int log; /* loop counter, log2 of ext. size */ + xfs_suminfo_t sum; /* summary data */ + + /* + * Loop over logs of extent sizes. Order is irrelevant. + */ + for (log = low; log <= high; log++) { + /* + * Get one summary datum. + */ + error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum); + if (error) { + return error; + } + /* + * If there are any, return success. + */ + if (sum) { + *stat = 1; + return 0; + } + } + /* + * Found nothing, return failure. + */ + *stat = 0; + return 0; +} + +/* + * Get a buffer for the bitmap or summary file block specified. + * The buffer is returned read and locked. + */ +STATIC int /* error */ +xfs_rtbuf_get( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t block, /* block number in bitmap or summary */ + int issum, /* is summary not bitmap */ + xfs_buf_t **bpp) /* output: buffer for the block */ +{ + xfs_buf_t *bp; /* block buffer, result */ + xfs_daddr_t d; /* disk addr of block */ + int error; /* error value */ + xfs_fsblock_t fsb; /* fs block number for block */ + xfs_inode_t *ip; /* bitmap or summary inode */ + + ip = issum ? mp->m_rsumip : mp->m_rbmip; + /* + * Map from the file offset (block) and inode number to the + * file system block. + */ + error = xfs_bmapi_single(tp, ip, XFS_DATA_FORK, &fsb, block); + if (error) { + return error; + } + ASSERT(fsb != NULLFSBLOCK); + /* + * Convert to disk address for buffer cache. + */ + d = XFS_FSB_TO_DADDR(mp, fsb); + /* + * Read the buffer. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, + mp->m_bsize, 0, &bp); + if (error) { + return error; + } + ASSERT(bp && !XFS_BUF_GETERROR(bp)); + *bpp = bp; + return 0; +} + +#ifdef DEBUG +/* + * Check that the given extent (block range) is allocated already. + */ +STATIC int /* error */ +xfs_rtcheck_alloc_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* out: 1 for allocated, 0 for not */ +{ + xfs_rtblock_t new; /* dummy for xfs_rtcheck_range */ + + return xfs_rtcheck_range(mp, tp, bno, len, 0, &new, stat); +} +#endif + +#ifdef DEBUG +/* + * Check whether the given block in the bitmap has the given value. + */ +STATIC int /* 1 for matches, 0 for not */ +xfs_rtcheck_bit( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* bit (block) to check */ + int val) /* 1 for free, 0 for allocated */ +{ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* pointer into the buffer */ + /* REFERENCED */ + int error; /* error value */ + xfs_rtword_t wdiff; /* difference between bit & expected */ + int word; /* word number in the buffer */ + xfs_rtword_t wval; /* word value from buffer */ + + block = XFS_BITTOBLOCK(mp, start); + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = XFS_BITTOWORD(mp, start); + bit = (int)(start & (XFS_NBWORD - 1)); + wval = bufp[word]; + xfs_trans_brelse(tp, bp); + wdiff = (wval ^ -val) & ((xfs_rtword_t)1 << bit); + return !wdiff; +} +#endif /* DEBUG */ + +#if 0 +/* + * Check that the given extent (block range) is free already. + */ +STATIC int /* error */ +xfs_rtcheck_free_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* out: 1 for free, 0 for not */ +{ + xfs_rtblock_t new; /* dummy for xfs_rtcheck_range */ + + return xfs_rtcheck_range(mp, tp, bno, len, 1, &new, stat); +} +#endif + +/* + * Check that the given range is either all allocated (val = 0) or + * all free (val = 1). + */ +STATIC int /* error */ +xfs_rtcheck_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block number of extent */ + xfs_extlen_t len, /* length of extent */ + int val, /* 1 for free, 0 for allocated */ + xfs_rtblock_t *new, /* out: first block not matching */ + int *stat) /* out: 1 for matches, 0 for not */ +{ + xfs_rtword_t *b; /* current word in buffer */ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* starting word in buffer */ + int error; /* error value */ + xfs_rtblock_t i; /* current bit number rel. to start */ + xfs_rtblock_t lastbit; /* last useful bit in word */ + xfs_rtword_t mask; /* mask of relevant bits for value */ + xfs_rtword_t wdiff; /* difference from wanted value */ + int word; /* word number in the buffer */ + + /* + * Compute starting bitmap block number + */ + block = XFS_BITTOBLOCK(mp, start); + /* + * Read the bitmap block. + */ + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + /* + * Compute the starting word's address, and starting bit. + */ + word = XFS_BITTOWORD(mp, start); + b = &bufp[word]; + bit = (int)(start & (XFS_NBWORD - 1)); + /* + * 0 (allocated) => all zero's; 1 (free) => all one's. + */ + val = -val; + /* + * If not starting on a word boundary, deal with the first + * (partial) word. + */ + if (bit) { + /* + * Compute first bit not examined. + */ + lastbit = XFS_RTMIN(bit + len, XFS_NBWORD); + /* + * Mask of relevant bits. + */ + mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit; + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = (*b ^ val) & mask)) { + /* + * Different, compute first wrong bit and return. + */ + xfs_trans_brelse(tp, bp); + i = XFS_RTLOBIT(wdiff) - bit; + *new = start + i; + *stat = 0; + return 0; + } + i = lastbit - bit; + /* + * Go on to next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * If done with this block, get the next one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer. + */ + b++; + } + } else { + /* + * Starting on a word boundary, no partial word. + */ + i = 0; + } + /* + * Loop over whole words in buffers. When we use up one buffer + * we move on to the next one. + */ + while (len - i >= XFS_NBWORD) { + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = *b ^ val)) { + /* + * Different, compute first wrong bit and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_RTLOBIT(wdiff); + *new = start + i; + *stat = 0; + return 0; + } + i += XFS_NBWORD; + /* + * Go on to next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * If done with this block, get the next one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer. + */ + b++; + } + } + /* + * If not ending on a word boundary, deal with the last + * (partial) word. + */ + if ((lastbit = len - i)) { + /* + * Mask of relevant bits. + */ + mask = ((xfs_rtword_t)1 << lastbit) - 1; + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = (*b ^ val) & mask)) { + /* + * Different, compute first wrong bit and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_RTLOBIT(wdiff); + *new = start + i; + *stat = 0; + return 0; + } else + i = len; + } + /* + * Successful, return. + */ + xfs_trans_brelse(tp, bp); + *new = start + i; + *stat = 1; + return 0; +} + +/* + * Copy and transform the summary file, given the old and new + * parameters in the mount structures. + */ +STATIC int /* error */ +xfs_rtcopy_summary( + xfs_mount_t *omp, /* old file system mount point */ + xfs_mount_t *nmp, /* new file system mount point */ + xfs_trans_t *tp) /* transaction pointer */ +{ + xfs_rtblock_t bbno; /* bitmap block number */ + xfs_buf_t *bp; /* summary buffer */ + int error; /* error return value */ + int log; /* summary level number (log length) */ + xfs_suminfo_t sum; /* summary data */ + xfs_fsblock_t sumbno; /* summary block number */ + + bp = NULL; + for (log = omp->m_rsumlevels - 1; log >= 0; log--) { + for (bbno = omp->m_sb.sb_rbmblocks - 1; + (xfs_srtblock_t)bbno >= 0; + bbno--) { + error = xfs_rtget_summary(omp, tp, log, bbno, &bp, + &sumbno, &sum); + if (error) + return error; + if (sum == 0) + continue; + error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum, + &bp, &sumbno); + if (error) + return error; + error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum, + &bp, &sumbno); + if (error) + return error; + ASSERT(sum > 0); + } + } + return 0; +} + +/* + * Searching backward from start to limit, find the first block whose + * allocated/free state is different from start's. + */ +STATIC int /* error */ +xfs_rtfind_back( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to look at */ + xfs_rtblock_t limit, /* last block to look at */ + xfs_rtblock_t *rtblock) /* out: start block found */ +{ + xfs_rtword_t *b; /* current word in buffer */ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* starting word in buffer */ + int error; /* error value */ + xfs_rtblock_t firstbit; /* first useful bit in the word */ + xfs_rtblock_t i; /* current bit number rel. to start */ + xfs_rtblock_t len; /* length of inspected area */ + xfs_rtword_t mask; /* mask of relevant bits for value */ + xfs_rtword_t want; /* mask for "good" values */ + xfs_rtword_t wdiff; /* difference from wanted value */ + int word; /* word number in the buffer */ + + /* + * Compute and read in starting bitmap block for starting block. + */ + block = XFS_BITTOBLOCK(mp, start); + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + /* + * Get the first word's index & point to it. + */ + word = XFS_BITTOWORD(mp, start); + b = &bufp[word]; + bit = (int)(start & (XFS_NBWORD - 1)); + len = start - limit + 1; + /* + * Compute match value, based on the bit at start: if 1 (free) + * then all-ones, else all-zeroes. + */ + want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0; + /* + * If the starting position is not word-aligned, deal with the + * partial word. + */ + if (bit < XFS_NBWORD - 1) { + /* + * Calculate first (leftmost) bit number to look at, + * and mask for all the relevant bits in this word. + */ + firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0); + mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) << + firstbit; + /* + * Calculate the difference between the value there + * and what we're looking for. + */ + if ((wdiff = (*b ^ want) & mask)) { + /* + * Different. Mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i = bit - XFS_RTHIBIT(wdiff); + *rtblock = start - i + 1; + return 0; + } + i = bit - firstbit + 1; + /* + * Go on to previous block if that's where the previous word is + * and we need the previous word. + */ + if (--word == -1 && i < len) { + /* + * If done with this block, get the previous one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, --block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = XFS_BLOCKWMASK(mp); + b = &bufp[word]; + } else { + /* + * Go on to the previous word in the buffer. + */ + b--; + } + } else { + /* + * Starting on a word boundary, no partial word. + */ + i = 0; + } + /* + * Loop over whole words in buffers. When we use up one buffer + * we move on to the previous one. + */ + while (len - i >= XFS_NBWORD) { + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = *b ^ want)) { + /* + * Different, mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff); + *rtblock = start - i + 1; + return 0; + } + i += XFS_NBWORD; + /* + * Go on to previous block if that's where the previous word is + * and we need the previous word. + */ + if (--word == -1 && i < len) { + /* + * If done with this block, get the previous one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, --block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = XFS_BLOCKWMASK(mp); + b = &bufp[word]; + } else { + /* + * Go on to the previous word in the buffer. + */ + b--; + } + } + /* + * If not ending on a word boundary, deal with the last + * (partial) word. + */ + if (len - i) { + /* + * Calculate first (leftmost) bit number to look at, + * and mask for all the relevant bits in this word. + */ + firstbit = XFS_NBWORD - (len - i); + mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit; + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = (*b ^ want) & mask)) { + /* + * Different, mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff); + *rtblock = start - i + 1; + return 0; + } else + i = len; + } + /* + * No match, return that we scanned the whole area. + */ + xfs_trans_brelse(tp, bp); + *rtblock = start - i + 1; + return 0; +} + +/* + * Searching forward from start to limit, find the first block whose + * allocated/free state is different from start's. + */ +STATIC int /* error */ +xfs_rtfind_forw( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to look at */ + xfs_rtblock_t limit, /* last block to look at */ + xfs_rtblock_t *rtblock) /* out: start block found */ +{ + xfs_rtword_t *b; /* current word in buffer */ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* starting word in buffer */ + int error; /* error value */ + xfs_rtblock_t i; /* current bit number rel. to start */ + xfs_rtblock_t lastbit; /* last useful bit in the word */ + xfs_rtblock_t len; /* length of inspected area */ + xfs_rtword_t mask; /* mask of relevant bits for value */ + xfs_rtword_t want; /* mask for "good" values */ + xfs_rtword_t wdiff; /* difference from wanted value */ + int word; /* word number in the buffer */ + + /* + * Compute and read in starting bitmap block for starting block. + */ + block = XFS_BITTOBLOCK(mp, start); + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + /* + * Get the first word's index & point to it. + */ + word = XFS_BITTOWORD(mp, start); + b = &bufp[word]; + bit = (int)(start & (XFS_NBWORD - 1)); + len = limit - start + 1; + /* + * Compute match value, based on the bit at start: if 1 (free) + * then all-ones, else all-zeroes. + */ + want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0; + /* + * If the starting position is not word-aligned, deal with the + * partial word. + */ + if (bit) { + /* + * Calculate last (rightmost) bit number to look at, + * and mask for all the relevant bits in this word. + */ + lastbit = XFS_RTMIN(bit + len, XFS_NBWORD); + mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit; + /* + * Calculate the difference between the value there + * and what we're looking for. + */ + if ((wdiff = (*b ^ want) & mask)) { + /* + * Different. Mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i = XFS_RTLOBIT(wdiff) - bit; + *rtblock = start + i - 1; + return 0; + } + i = lastbit - bit; + /* + * Go on to next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * If done with this block, get the previous one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the previous word in the buffer. + */ + b++; + } + } else { + /* + * Starting on a word boundary, no partial word. + */ + i = 0; + } + /* + * Loop over whole words in buffers. When we use up one buffer + * we move on to the next one. + */ + while (len - i >= XFS_NBWORD) { + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = *b ^ want)) { + /* + * Different, mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_RTLOBIT(wdiff); + *rtblock = start + i - 1; + return 0; + } + i += XFS_NBWORD; + /* + * Go on to next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * If done with this block, get the next one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer. + */ + b++; + } + } + /* + * If not ending on a word boundary, deal with the last + * (partial) word. + */ + if ((lastbit = len - i)) { + /* + * Calculate mask for all the relevant bits in this word. + */ + mask = ((xfs_rtword_t)1 << lastbit) - 1; + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = (*b ^ want) & mask)) { + /* + * Different, mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_RTLOBIT(wdiff); + *rtblock = start + i - 1; + return 0; + } else + i = len; + } + /* + * No match, return that we scanned the whole area. + */ + xfs_trans_brelse(tp, bp); + *rtblock = start + i - 1; + return 0; +} + +/* + * Mark an extent specified by start and len freed. + * Updates all the summary information as well as the bitmap. + */ +STATIC int /* error */ +xfs_rtfree_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to free */ + xfs_extlen_t len, /* length to free */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb) /* in/out: summary block number */ +{ + xfs_rtblock_t end; /* end of the freed extent */ + int error; /* error value */ + xfs_rtblock_t postblock; /* first block freed > end */ + xfs_rtblock_t preblock; /* first block freed < start */ + + end = start + len - 1; + /* + * Modify the bitmap to mark this extent freed. + */ + error = xfs_rtmodify_range(mp, tp, start, len, 1); + if (error) { + return error; + } + /* + * Assume we're freeing out of the middle of an allocated extent. + * We need to find the beginning and end of the extent so we can + * properly update the summary. + */ + error = xfs_rtfind_back(mp, tp, start, 0, &preblock); + if (error) { + return error; + } + /* + * Find the next allocated block (end of allocated extent). + */ + error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1, + &postblock); + /* + * If there are blocks not being freed at the front of the + * old extent, add summary data for them to be allocated. + */ + if (preblock < start) { + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(start - preblock), + XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb); + if (error) { + return error; + } + } + /* + * If there are blocks not being freed at the end of the + * old extent, add summary data for them to be allocated. + */ + if (postblock > end) { + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(postblock - end), + XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb); + if (error) { + return error; + } + } + /* + * Increment the summary information corresponding to the entire + * (new) free extent. + */ + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(postblock + 1 - preblock), + XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb); + return error; +} + +/* + * Read and return the summary information for a given extent size, + * bitmap block combination. + * Keeps track of a current summary block, so we don't keep reading + * it from the buffer cache. + */ +STATIC int /* error */ +xfs_rtget_summary( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + int log, /* log2 of extent size */ + xfs_rtblock_t bbno, /* bitmap block number */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_suminfo_t *sum) /* out: summary info for this block */ +{ + xfs_buf_t *bp; /* buffer for summary block */ + int error; /* error value */ + xfs_fsblock_t sb; /* summary fsblock */ + int so; /* index into the summary file */ + xfs_suminfo_t *sp; /* pointer to returned data */ + + /* + * Compute entry number in the summary file. + */ + so = XFS_SUMOFFS(mp, log, bbno); + /* + * Compute the block number in the summary file. + */ + sb = XFS_SUMOFFSTOBLOCK(mp, so); + /* + * If we have an old buffer, and the block number matches, use that. + */ + if (rbpp && *rbpp && *rsb == sb) + bp = *rbpp; + /* + * Otherwise we have to get the buffer. + */ + else { + /* + * If there was an old one, get rid of it first. + */ + if (rbpp && *rbpp) + xfs_trans_brelse(tp, *rbpp); + error = xfs_rtbuf_get(mp, tp, sb, 1, &bp); + if (error) { + return error; + } + /* + * Remember this buffer and block for the next call. + */ + if (rbpp) { + *rbpp = bp; + *rsb = sb; + } + } + /* + * Point to the summary information & copy it out. + */ + sp = XFS_SUMPTR(mp, bp, so); + *sum = *sp; + /* + * Drop the buffer if we're not asked to remember it. + */ + if (!rbpp) + xfs_trans_brelse(tp, bp); + return 0; +} + +/* + * Set the given range of bitmap bits to the given value. + * Do whatever I/O and logging is required. + */ +STATIC int /* error */ +xfs_rtmodify_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to modify */ + xfs_extlen_t len, /* length of extent to modify */ + int val) /* 1 for free, 0 for allocated */ +{ + xfs_rtword_t *b; /* current word in buffer */ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* starting word in buffer */ + int error; /* error value */ + xfs_rtword_t *first; /* first used word in the buffer */ + int i; /* current bit number rel. to start */ + int lastbit; /* last useful bit in word */ + xfs_rtword_t mask; /* mask o frelevant bits for value */ + int word; /* word number in the buffer */ + + /* + * Compute starting bitmap block number. + */ + block = XFS_BITTOBLOCK(mp, start); + /* + * Read the bitmap block, and point to its data. + */ + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + /* + * Compute the starting word's address, and starting bit. + */ + word = XFS_BITTOWORD(mp, start); + first = b = &bufp[word]; + bit = (int)(start & (XFS_NBWORD - 1)); + /* + * 0 (allocated) => all zeroes; 1 (free) => all ones. + */ + val = -val; + /* + * If not starting on a word boundary, deal with the first + * (partial) word. + */ + if (bit) { + /* + * Compute first bit not changed and mask of relevant bits. + */ + lastbit = XFS_RTMIN(bit + len, XFS_NBWORD); + mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit; + /* + * Set/clear the active bits. + */ + if (val) + *b |= mask; + else + *b &= ~mask; + i = lastbit - bit; + /* + * Go on to the next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * Log the changed part of this block. + * Get the next one. + */ + xfs_trans_log_buf(tp, bp, + (uint)((char *)first - (char *)bufp), + (uint)((char *)b - (char *)bufp)); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer + */ + b++; + } + } else { + /* + * Starting on a word boundary, no partial word. + */ + i = 0; + } + /* + * Loop over whole words in buffers. When we use up one buffer + * we move on to the next one. + */ + while (len - i >= XFS_NBWORD) { + /* + * Set the word value correctly. + */ + *b = val; + i += XFS_NBWORD; + /* + * Go on to the next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * Log the changed part of this block. + * Get the next one. + */ + xfs_trans_log_buf(tp, bp, + (uint)((char *)first - (char *)bufp), + (uint)((char *)b - (char *)bufp)); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer + */ + b++; + } + } + /* + * If not ending on a word boundary, deal with the last + * (partial) word. + */ + if ((lastbit = len - i)) { + /* + * Compute a mask of relevant bits. + */ + bit = 0; + mask = ((xfs_rtword_t)1 << lastbit) - 1; + /* + * Set/clear the active bits. + */ + if (val) + *b |= mask; + else + *b &= ~mask; + b++; + } + /* + * Log any remaining changed bytes. + */ + if (b > first) + xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp), + (uint)((char *)b - (char *)bufp - 1)); + return 0; +} + +/* + * Read and modify the summary information for a given extent size, + * bitmap block combination. + * Keeps track of a current summary block, so we don't keep reading + * it from the buffer cache. + */ +STATIC int /* error */ +xfs_rtmodify_summary( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + int log, /* log2 of extent size */ + xfs_rtblock_t bbno, /* bitmap block number */ + int delta, /* change to make to summary info */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb) /* in/out: summary block number */ +{ + xfs_buf_t *bp; /* buffer for the summary block */ + int error; /* error value */ + xfs_fsblock_t sb; /* summary fsblock */ + int so; /* index into the summary file */ + xfs_suminfo_t *sp; /* pointer to returned data */ + + /* + * Compute entry number in the summary file. + */ + so = XFS_SUMOFFS(mp, log, bbno); + /* + * Compute the block number in the summary file. + */ + sb = XFS_SUMOFFSTOBLOCK(mp, so); + /* + * If we have an old buffer, and the block number matches, use that. + */ + if (rbpp && *rbpp && *rsb == sb) + bp = *rbpp; + /* + * Otherwise we have to get the buffer. + */ + else { + /* + * If there was an old one, get rid of it first. + */ + if (rbpp && *rbpp) + xfs_trans_brelse(tp, *rbpp); + error = xfs_rtbuf_get(mp, tp, sb, 1, &bp); + if (error) { + return error; + } + /* + * Remember this buffer and block for the next call. + */ + if (rbpp) { + *rbpp = bp; + *rsb = sb; + } + } + /* + * Point to the summary information, modify and log it. + */ + sp = XFS_SUMPTR(mp, bp, so); + *sp += delta; + xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)XFS_BUF_PTR(bp)), + (uint)((char *)sp - (char *)XFS_BUF_PTR(bp) + sizeof(*sp) - 1)); + return 0; +} + +/* + * Visible (exported) functions. + */ + +/* + * Grow the realtime area of the filesystem. + */ +int +xfs_growfs_rt( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_growfs_rt_t *in) /* growfs rt input struct */ +{ + xfs_rtblock_t bmbno; /* bitmap block number */ + xfs_buf_t *bp; /* temporary buffer */ + int cancelflags; /* flags for xfs_trans_cancel */ + int error; /* error return value */ + xfs_inode_t *ip; /* bitmap inode, used as lock */ + xfs_mount_t *nmp; /* new (fake) mount structure */ + xfs_drfsbno_t nrblocks; /* new number of realtime blocks */ + xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */ + xfs_drtbno_t nrextents; /* new number of realtime extents */ + uint8_t nrextslog; /* new log2 of sb_rextents */ + xfs_extlen_t nrsumblocks; /* new number of summary blocks */ + uint nrsumlevels; /* new rt summary levels */ + uint nrsumsize; /* new size of rt summary, bytes */ + xfs_sb_t *nsbp; /* new superblock */ + xfs_extlen_t rbmblocks; /* current number of rt bitmap blocks */ + xfs_extlen_t rsumblocks; /* current number of rt summary blks */ + xfs_sb_t *sbp; /* old superblock */ + xfs_fsblock_t sumbno; /* summary block number */ + xfs_trans_t *tp; /* transaction pointer */ + + sbp = &mp->m_sb; + /* + * Initial error checking. + */ + if (mp->m_rtdev_targp || mp->m_rbmip == NULL || + (nrblocks = in->newblocks) <= sbp->sb_rblocks || + (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) + return XFS_ERROR(EINVAL); + /* + * Read in the last block of the device, make sure it exists. + */ + error = xfs_read_buf(mp, mp->m_rtdev_targp, + XFS_FSB_TO_BB(mp, in->newblocks - 1), + XFS_FSB_TO_BB(mp, 1), 0, &bp); + if (error) + return error; + ASSERT(bp); + xfs_buf_relse(bp); + /* + * Calculate new parameters. These are the final values to be reached. + */ + nrextents = do_div(nrblocks, in->extsize); + nrbmblocks = roundup_64(nrextents, NBBY * sbp->sb_blocksize); + nrextslog = xfs_highbit32(nrextents); + nrsumlevels = nrextslog + 1; + nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks; + nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize); + nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks); + /* + * New summary size can't be more than half the size of + * the log. This prevents us from getting a log overflow, + * since we'll log basically the whole summary file at once. + */ + if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1)) + return XFS_ERROR(EINVAL); + /* + * Get the old block counts for bitmap and summary inodes. + * These can't change since other growfs callers are locked out. + */ + rbmblocks = XFS_B_TO_FSB(mp, mp->m_rbmip->i_d.di_size); + rsumblocks = XFS_B_TO_FSB(mp, mp->m_rsumip->i_d.di_size); + /* + * Allocate space to the bitmap and summary files, as necessary. + */ + if ((error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks, + mp->m_sb.sb_rbmino))) + return error; + if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, + mp->m_sb.sb_rsumino))) + return error; + nmp = NULL; + /* + * Loop over the bitmap blocks. + * We will do everything one bitmap block at a time. + * Skip the current block if it is exactly full. + * This also deals with the case where there were no rtextents before. + */ + for (bmbno = sbp->sb_rbmblocks - + ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0); + bmbno < nrbmblocks; + bmbno++) { + /* + * Allocate a new (fake) mount/sb. + */ + nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP); + *nmp = *mp; + nsbp = &nmp->m_sb; + /* + * Calculate new sb and mount fields for this round. + */ + nsbp->sb_rextsize = in->extsize; + nsbp->sb_rbmblocks = bmbno + 1; + nsbp->sb_rblocks = + XFS_RTMIN(nrblocks, + nsbp->sb_rbmblocks * NBBY * + nsbp->sb_blocksize * nsbp->sb_rextsize); + nsbp->sb_rextents = do_div(nsbp->sb_rblocks, nsbp->sb_rextsize); + nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); + nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; + nrsumsize = + (uint)sizeof(xfs_suminfo_t) * nrsumlevels * + nsbp->sb_rbmblocks; + nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize); + nmp->m_rsumsize = nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks); + /* + * Start a transaction, get the log reservation. + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_FREE); + cancelflags = 0; + if ((error = xfs_trans_reserve(tp, 0, + XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0))) + goto error_exit; + /* + * Lock out other callers by grabbing the bitmap inode lock. + */ + if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, + XFS_ILOCK_EXCL, &ip))) + goto error_exit; + ASSERT(ip == mp->m_rbmip); + /* + * Update the bitmap inode's size. + */ + mp->m_rbmip->i_d.di_size = + nsbp->sb_rbmblocks * nsbp->sb_blocksize; + xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE); + cancelflags |= XFS_TRANS_ABORT; + /* + * Get the summary inode into the transaction. + */ + if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, + XFS_ILOCK_EXCL, &ip))) + goto error_exit; + ASSERT(ip == mp->m_rsumip); + /* + * Update the summary inode's size. + */ + mp->m_rsumip->i_d.di_size = nmp->m_rsumsize; + xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE); + /* + * Copy summary data from old to new sizes. + * Do this when the real size (not block-aligned) changes. + */ + if (sbp->sb_rbmblocks != nsbp->sb_rbmblocks || + mp->m_rsumlevels != nmp->m_rsumlevels) { + error = xfs_rtcopy_summary(mp, nmp, tp); + if (error) + goto error_exit; + } + /* + * Update superblock fields. + */ + if (nsbp->sb_rextsize != sbp->sb_rextsize) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSIZE, + nsbp->sb_rextsize - sbp->sb_rextsize); + if (nsbp->sb_rbmblocks != sbp->sb_rbmblocks) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBMBLOCKS, + nsbp->sb_rbmblocks - sbp->sb_rbmblocks); + if (nsbp->sb_rblocks != sbp->sb_rblocks) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBLOCKS, + nsbp->sb_rblocks - sbp->sb_rblocks); + if (nsbp->sb_rextents != sbp->sb_rextents) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTENTS, + nsbp->sb_rextents - sbp->sb_rextents); + if (nsbp->sb_rextslog != sbp->sb_rextslog) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSLOG, + nsbp->sb_rextslog - sbp->sb_rextslog); + /* + * Free new extent. + */ + bp = NULL; + error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents, + nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno); + if (error) + goto error_exit; + /* + * Mark more blocks free in the superblock. + */ + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, + nsbp->sb_rextents - sbp->sb_rextents); + /* + * Free the fake mp structure. + */ + kmem_free(nmp, sizeof(*nmp)); + nmp = NULL; + /* + * Update mp values into the real mp structure. + */ + mp->m_rsumlevels = nrsumlevels; + mp->m_rsumsize = nrsumsize; + /* + * Commit the transaction. + */ + xfs_trans_commit(tp, 0, NULL); + } + return 0; + + /* + * Error paths come here. + */ +error_exit: + if (nmp) + kmem_free(nmp, sizeof(*nmp)); + xfs_trans_cancel(tp, cancelflags); + return error; +} + +/* + * Allocate an extent in the realtime subvolume, with the usual allocation + * parameters. The length units are all in realtime extents, as is the + * result block number. + */ +int /* error */ +xfs_rtallocate_extent( + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to allocate */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_alloctype_t type, /* allocation type XFS_ALLOCTYPE... */ + int wasdel, /* was a delayed allocation extent */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + int error; /* error value */ + xfs_inode_t *ip; /* inode for bitmap file */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_rtblock_t r; /* result allocated block */ + xfs_fsblock_t sb; /* summary file block number */ + xfs_buf_t *sumbp; /* summary file block buffer */ + + ASSERT(minlen > 0 && minlen <= maxlen); + mp = tp->t_mountp; + /* + * If prod is set then figure out what to do to minlen and maxlen. + */ + if (prod > 1) { + xfs_extlen_t i; + + if ((i = maxlen % prod)) + maxlen -= i; + if ((i = minlen % prod)) + minlen += prod - i; + if (maxlen < minlen) { + *rtblock = NULLRTBLOCK; + return 0; + } + } + /* + * Lock out other callers by grabbing the bitmap inode lock. + */ + error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, XFS_ILOCK_EXCL, &ip); + if (error) { + return error; + } + sumbp = NULL; + /* + * Allocate by size, or near another block, or exactly at some block. + */ + switch (type) { + case XFS_ALLOCTYPE_ANY_AG: + error = xfs_rtallocate_extent_size(mp, tp, minlen, maxlen, len, + &sumbp, &sb, prod, &r); + break; + case XFS_ALLOCTYPE_NEAR_BNO: + error = xfs_rtallocate_extent_near(mp, tp, bno, minlen, maxlen, + len, &sumbp, &sb, prod, &r); + break; + case XFS_ALLOCTYPE_THIS_BNO: + error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen, + len, &sumbp, &sb, prod, &r); + break; + default: + ASSERT(0); + } + if (error) { + return error; + } + /* + * If it worked, update the superblock. + */ + if (r != NULLRTBLOCK) { + long slen = (long)*len; + + ASSERT(*len >= minlen && *len <= maxlen); + if (wasdel) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FREXTENTS, -slen); + else + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, -slen); + } + *rtblock = r; + return 0; +} + +/* + * Free an extent in the realtime subvolume. Length is expressed in + * realtime extents, as is the block number. + */ +int /* error */ +xfs_rtfree_extent( + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to free */ + xfs_extlen_t len) /* length of extent freed */ +{ + int error; /* error value */ + xfs_inode_t *ip; /* bitmap file inode */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_fsblock_t sb; /* summary file block number */ + xfs_buf_t *sumbp; /* summary file block buffer */ + + mp = tp->t_mountp; + /* + * Synchronize by locking the bitmap inode. + */ + error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, XFS_ILOCK_EXCL, &ip); + if (error) { + return error; + } +#if defined(__KERNEL__) && defined(DEBUG) + /* + * Check to see that this whole range is currently allocated. + */ + { + int stat; /* result from checking range */ + + error = xfs_rtcheck_alloc_range(mp, tp, bno, len, &stat); + if (error) { + return error; + } + ASSERT(stat); + } +#endif + sumbp = NULL; + /* + * Free the range of realtime blocks. + */ + error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb); + if (error) { + return error; + } + /* + * Mark more blocks free in the superblock. + */ + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len); + /* + * If we've now freed all the blocks, reset the file sequence + * number to 0. + */ + if (tp->t_frextents_delta + mp->m_sb.sb_frextents == + mp->m_sb.sb_rextents) { + if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) + ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM; + *(__uint64_t *)&ip->i_d.di_atime = 0; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + } + return 0; +} + +/* + * Initialize realtime fields in the mount structure. + */ +int /* error */ +xfs_rtmount_init( + xfs_mount_t *mp) /* file system mount structure */ +{ + xfs_buf_t *bp; /* buffer for last block of subvolume */ + xfs_daddr_t d; /* address of last block of subvolume */ + int error; /* error return value */ + xfs_sb_t *sbp; /* filesystem superblock copy in mount */ + + sbp = &mp->m_sb; + if (sbp->sb_rblocks == 0) + return 0; + if (mp->m_rtdev_targp == NULL) { + cmn_err(CE_WARN, + "XFS: This filesystem has a realtime volume, use rtdev=device option"); + return XFS_ERROR(ENODEV); + } + mp->m_rsumlevels = sbp->sb_rextslog + 1; + mp->m_rsumsize = + (uint)sizeof(xfs_suminfo_t) * mp->m_rsumlevels * + sbp->sb_rbmblocks; + mp->m_rsumsize = roundup(mp->m_rsumsize, sbp->sb_blocksize); + mp->m_rbmip = mp->m_rsumip = NULL; + /* + * Check that the realtime section is an ok size. + */ + d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks); + if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) { + cmn_err(CE_WARN, "XFS: realtime mount -- %llu != %llu", + (unsigned long long) XFS_BB_TO_FSB(mp, d), + (unsigned long long) mp->m_sb.sb_rblocks); + return XFS_ERROR(E2BIG); + } + error = xfs_read_buf(mp, mp->m_rtdev_targp, + d - XFS_FSB_TO_BB(mp, 1), + XFS_FSB_TO_BB(mp, 1), 0, &bp); + if (error) { + cmn_err(CE_WARN, + "XFS: realtime mount -- xfs_read_buf failed, returned %d", error); + if (error == ENOSPC) + return XFS_ERROR(E2BIG); + return error; + } + xfs_buf_relse(bp); + return 0; +} + +/* + * Get the bitmap and summary inodes into the mount structure + * at mount time. + */ +int /* error */ +xfs_rtmount_inodes( + xfs_mount_t *mp) /* file system mount structure */ +{ + int error; /* error return value */ + xfs_sb_t *sbp; + + sbp = &mp->m_sb; + if (sbp->sb_rbmino == NULLFSINO) + return 0; + error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, &mp->m_rbmip, 0); + if (error) + return error; + ASSERT(mp->m_rbmip != NULL); + ASSERT(sbp->sb_rsumino != NULLFSINO); + error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, &mp->m_rsumip, 0); + if (error) { + vnode_t *rbmvp; /* vnode for bitmap file */ + vmap_t vmap; /* vmap to delete vnode */ + + rbmvp = XFS_ITOV(mp->m_rbmip); + VMAP(rbmvp, vmap); + VN_RELE(rbmvp); + vn_purge(rbmvp, &vmap); + return error; + } + ASSERT(mp->m_rsumip != NULL); + return 0; +} + +/* + * Pick an extent for allocation at the start of a new realtime file. + * Use the sequence number stored in the atime field of the bitmap inode. + * Translate this to a fraction of the rtextents, and return the product + * of rtextents and the fraction. + * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ... + */ +int /* error */ +xfs_rtpick_extent( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_extlen_t len, /* allocation length (rtextents) */ + xfs_rtblock_t *pick) /* result rt extent */ +{ + xfs_rtblock_t b; /* result block */ + int error; /* error return value */ + xfs_inode_t *ip; /* bitmap incore inode */ + int log2; /* log of sequence number */ + __uint64_t resid; /* residual after log removed */ + __uint64_t seq; /* sequence number of file creation */ + __uint64_t *seqp; /* pointer to seqno in inode */ + + error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, XFS_ILOCK_EXCL, &ip); + if (error) + return error; + ASSERT(ip == mp->m_rbmip); + seqp = (__uint64_t *)&ip->i_d.di_atime; + if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) { + ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM; + *seqp = 0; + } + seq = *seqp; + if ((log2 = xfs_highbit64(seq)) == -1) + b = 0; + else { + resid = seq - (1ULL << log2); + b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >> + (log2 + 1); + if (b >= mp->m_sb.sb_rextents) + b = do_mod(b, mp->m_sb.sb_rextents); + if (b + len > mp->m_sb.sb_rextents) + b = mp->m_sb.sb_rextents - len; + } + *seqp = seq + 1; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + *pick = b; + return 0; +} + +#ifdef DEBUG +/* + * Debug code: print out the value of a range in the bitmap. + */ +void +xfs_rtprint_range( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to print */ + xfs_extlen_t len) /* length to print */ +{ + xfs_extlen_t i; /* block number in the extent */ + + printk("%Ld: ", (long long)start); + for (i = 0; i < len; i++) + printk("%d", xfs_rtcheck_bit(mp, tp, start + i, 1)); + printk("\n"); +} + +/* + * Debug code: print the summary file. + */ +void +xfs_rtprint_summary( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp) /* transaction pointer */ +{ + xfs_suminfo_t c; /* summary data */ + xfs_rtblock_t i; /* bitmap block number */ + int l; /* summary information level */ + int p; /* flag for printed anything */ + xfs_fsblock_t sb; /* summary block number */ + xfs_buf_t *sumbp; /* summary block buffer */ + + sumbp = NULL; + for (l = 0; l < mp->m_rsumlevels; l++) { + for (p = 0, i = 0; i < mp->m_sb.sb_rbmblocks; i++) { + (void)xfs_rtget_summary(mp, tp, l, i, &sumbp, &sb, &c); + if (c) { + if (!p) { + printk("%Ld-%Ld:", 1LL << l, + XFS_RTMIN((1LL << l) + + ((1LL << l) - 1LL), + mp->m_sb.sb_rextents)); + p = 1; + } + printk(" %Ld:%d", (long long)i, c); + } + } + if (p) + printk("\n"); + } + if (sumbp) + xfs_trans_brelse(tp, sumbp); +} +#endif /* DEBUG */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_rtalloc.h linux.21rc5-ac2/fs/xfs/xfs_rtalloc.h --- linux.21rc5/fs/xfs/xfs_rtalloc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_rtalloc.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_RTALLOC_H__ +#define __XFS_RTALLOC_H__ + +struct xfs_mount; +struct xfs_trans; + +#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) + +/* Min and max rt extent sizes, specified in bytes */ +#define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */ +#define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64KB */ +#define XFS_MIN_RTEXTSIZE (4 * 1024) /* 4KB */ + +/* + * Constants for bit manipulations. + */ +#define XFS_NBBYLOG 3 /* log2(NBBY) */ +#define XFS_WORDLOG 2 /* log2(sizeof(xfs_rtword_t)) */ +#define XFS_NBWORDLOG (XFS_NBBYLOG + XFS_WORDLOG) +#define XFS_NBWORD (1 << XFS_NBWORDLOG) +#define XFS_WORDMASK ((1 << XFS_WORDLOG) - 1) + +#define XFS_BLOCKSIZE(mp) ((mp)->m_sb.sb_blocksize) +#define XFS_BLOCKMASK(mp) ((mp)->m_blockmask) +#define XFS_BLOCKWSIZE(mp) ((mp)->m_blockwsize) +#define XFS_BLOCKWMASK(mp) ((mp)->m_blockwmask) + +/* + * Summary and bit manipulation macros. + */ +#define XFS_SUMOFFS(mp,ls,bb) ((int)((ls) * (mp)->m_sb.sb_rbmblocks + (bb))) +#define XFS_SUMOFFSTOBLOCK(mp,s) \ + (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog) +#define XFS_SUMPTR(mp,bp,so) \ + ((xfs_suminfo_t *)((char *)XFS_BUF_PTR(bp) + \ + (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp)))) + +#define XFS_BITTOBLOCK(mp,bi) ((bi) >> (mp)->m_blkbit_log) +#define XFS_BLOCKTOBIT(mp,bb) ((bb) << (mp)->m_blkbit_log) +#define XFS_BITTOWORD(mp,bi) \ + ((int)(((bi) >> XFS_NBWORDLOG) & XFS_BLOCKWMASK(mp))) + +#define XFS_RTMIN(a,b) ((a) < (b) ? (a) : (b)) +#define XFS_RTMAX(a,b) ((a) > (b) ? (a) : (b)) + +#define XFS_RTLOBIT(w) xfs_lowbit32(w) +#define XFS_RTHIBIT(w) xfs_highbit32(w) + +#if XFS_BIG_FILESYSTEMS +#define XFS_RTBLOCKLOG(b) xfs_highbit64(b) +#else +#define XFS_RTBLOCKLOG(b) xfs_highbit32(b) +#endif + + +#ifdef __KERNEL__ + +#ifdef CONFIG_XFS_RT +/* + * Function prototypes for exported functions. + */ + +/* + * Allocate an extent in the realtime subvolume, with the usual allocation + * parameters. The length units are all in realtime extents, as is the + * result block number. + */ +int /* error */ +xfs_rtallocate_extent( + struct xfs_trans *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to allocate */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_alloctype_t type, /* allocation type XFS_ALLOCTYPE... */ + int wasdel, /* was a delayed allocation extent */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock); /* out: start block allocated */ + +/* + * Free an extent in the realtime subvolume. Length is expressed in + * realtime extents, as is the block number. + */ +int /* error */ +xfs_rtfree_extent( + struct xfs_trans *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to free */ + xfs_extlen_t len); /* length of extent freed */ + +/* + * Initialize realtime fields in the mount structure. + */ +int /* error */ +xfs_rtmount_init( + struct xfs_mount *mp); /* file system mount structure */ + +/* + * Get the bitmap and summary inodes into the mount structure + * at mount time. + */ +int /* error */ +xfs_rtmount_inodes( + struct xfs_mount *mp); /* file system mount structure */ + +/* + * Pick an extent for allocation at the start of a new realtime file. + * Use the sequence number stored in the atime field of the bitmap inode. + * Translate this to a fraction of the rtextents, and return the product + * of rtextents and the fraction. + * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ... + */ +int /* error */ +xfs_rtpick_extent( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_extlen_t len, /* allocation length (rtextents) */ + xfs_rtblock_t *pick); /* result rt extent */ + +/* + * Debug code: print out the value of a range in the bitmap. + */ +void +xfs_rtprint_range( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to print */ + xfs_extlen_t len); /* length to print */ + +/* + * Debug code: print the summary file. + */ +void +xfs_rtprint_summary( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp); /* transaction pointer */ + +/* + * Grow the realtime area of the filesystem. + */ +int +xfs_growfs_rt( + struct xfs_mount *mp, /* file system mount structure */ + xfs_growfs_rt_t *in); /* user supplied growfs struct */ + +#else +# define xfs_rtallocate_extent(t,b,min,max,l,a,f,p,rb) (ENOSYS) +# define xfs_rtfree_extent(t,b,l) (ENOSYS) +# define xfs_rtpick_extent(m,t,l,rb) (ENOSYS) +# define xfs_growfs_rt(mp,in) (ENOSYS) +# define xfs_rtmount_init(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) +# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) +#endif /* CONFIG_XFS_RT */ + +#endif /* __KERNEL__ */ + +#endif /* __XFS_RTALLOC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_rw.c linux.21rc5-ac2/fs/xfs/xfs_rw.c --- linux.21rc5/fs/xfs/xfs_rw.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_rw.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,761 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_itable.h" +#include "xfs_btree.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_attr.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_acl.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_error.h" +#include "xfs_buf_item.h" +#include "xfs_rw.h" + +/* + * This is a subroutine for xfs_write() and other writers (xfs_ioctl) + * which clears the setuid and setgid bits when a file is written. + */ +int +xfs_write_clear_setuid( + xfs_inode_t *ip) +{ + xfs_mount_t *mp; + xfs_trans_t *tp; + int error; + + mp = ip->i_mount; + tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID); + if ((error = xfs_trans_reserve(tp, 0, + XFS_WRITEID_LOG_RES(mp), + 0, 0, 0))) { + xfs_trans_cancel(tp, 0); + return error; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + ip->i_d.di_mode &= ~ISUID; + + /* + * Note that we don't have to worry about mandatory + * file locking being disabled here because we only + * clear the ISGID bit if the Group execute bit is + * on, but if it was on then mandatory locking wouldn't + * have been enabled. + */ + if (ip->i_d.di_mode & (IEXEC >> 3)) { + ip->i_d.di_mode &= ~ISGID; + } + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return 0; +} + +/* + * Force a shutdown of the filesystem instantly while keeping + * the filesystem consistent. We don't do an unmount here; just shutdown + * the shop, make sure that absolutely nothing persistent happens to + * this filesystem after this point. + */ + +void +xfs_do_force_shutdown( + bhv_desc_t *bdp, + int flags, + char *fname, + int lnnum) +{ + int logerror; + xfs_mount_t *mp; + + mp = XFS_BHVTOM(bdp); + logerror = flags & XFS_LOG_IO_ERROR; + + if (!(flags & XFS_FORCE_UMOUNT)) { + cmn_err(CE_NOTE, + "xfs_force_shutdown(%s,0x%x) called from line %d of file %s. Return address = 0x%p", + mp->m_fsname,flags,lnnum,fname,__return_address); + } + /* + * No need to duplicate efforts. + */ + if (XFS_FORCED_SHUTDOWN(mp) && !logerror) + return; + + /* + * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't + * queue up anybody new on the log reservations, and wakes up + * everybody who's sleeping on log reservations and tells + * them the bad news. + */ + if (xfs_log_force_umount(mp, logerror)) + return; + + if (flags & XFS_CORRUPT_INCORE) { + xfs_cmn_err(XFS_PTAG_SHUTDOWN_CORRUPT, CE_ALERT, mp, + "Corruption of in-memory data detected. Shutting down filesystem: %s", + mp->m_fsname); + } else if (!(flags & XFS_FORCE_UMOUNT)) { + if (logerror) { + xfs_cmn_err(XFS_PTAG_SHUTDOWN_LOGERROR, CE_ALERT, mp, + "Log I/O Error Detected. Shutting down filesystem: %s", + mp->m_fsname); + } else if (!(flags & XFS_SHUTDOWN_REMOTE_REQ)) { + xfs_cmn_err(XFS_PTAG_SHUTDOWN_IOERROR, CE_ALERT, mp, + "I/O Error Detected. Shutting down filesystem: %s", + mp->m_fsname); + } + } + if (!(flags & XFS_FORCE_UMOUNT)) { + cmn_err(CE_ALERT, + "Please umount the filesystem, and rectify the problem(s)"); + } +} + + +/* + * Called when we want to stop a buffer from getting written or read. + * We attach the EIO error, muck with its flags, and call biodone + * so that the proper iodone callbacks get called. + */ +int +xfs_bioerror( + xfs_buf_t *bp) +{ + +#ifdef XFSERRORDEBUG + ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone); +#endif + + /* + * No need to wait until the buffer is unpinned. + * We aren't flushing it. + */ + xfs_buftrace("XFS IOERROR", bp); + XFS_BUF_ERROR(bp, EIO); + /* + * We're calling biodone, so delete B_DONE flag. Either way + * we have to call the iodone callback, and calling biodone + * probably is the best way since it takes care of + * GRIO as well. + */ + XFS_BUF_UNREAD(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_UNDONE(bp); + XFS_BUF_STALE(bp); + + XFS_BUF_CLR_BDSTRAT_FUNC(bp); + xfs_biodone(bp); + + return (EIO); +} + +/* + * Same as xfs_bioerror, except that we are releasing the buffer + * here ourselves, and avoiding the biodone call. + * This is meant for userdata errors; metadata bufs come with + * iodone functions attached, so that we can track down errors. + */ +int +xfs_bioerror_relse( + xfs_buf_t *bp) +{ + int64_t fl; + + ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks); + ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone); + + xfs_buftrace("XFS IOERRELSE", bp); + fl = XFS_BUF_BFLAGS(bp); + /* + * No need to wait until the buffer is unpinned. + * We aren't flushing it. + * + * chunkhold expects B_DONE to be set, whether + * we actually finish the I/O or not. We don't want to + * change that interface. + */ + XFS_BUF_UNREAD(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_DONE(bp); + XFS_BUF_STALE(bp); + XFS_BUF_CLR_IODONE_FUNC(bp); + XFS_BUF_CLR_BDSTRAT_FUNC(bp); + if (!(fl & XFS_B_ASYNC)) { + /* + * Mark b_error and B_ERROR _both_. + * Lot's of chunkcache code assumes that. + * There's no reason to mark error for + * ASYNC buffers. + */ + XFS_BUF_ERROR(bp, EIO); + XFS_BUF_V_IODONESEMA(bp); + } else { + xfs_buf_relse(bp); + } + return (EIO); +} +/* + * Prints out an ALERT message about I/O error. + */ +void +xfs_ioerror_alert( + char *func, + struct xfs_mount *mp, + xfs_buf_t *bp, + xfs_daddr_t blkno) +{ + cmn_err(CE_ALERT, + "I/O error in filesystem (\"%s\") meta-data dev 0x%x block 0x%llx" + " (\"%s\") error %d buf count %u", + (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname, + XFS_BUF_TARGET_DEV(bp), + (__uint64_t)blkno, + func, + XFS_BUF_GETERROR(bp), + XFS_BUF_COUNT(bp)); +} + +/* + * This isn't an absolute requirement, but it is + * just a good idea to call xfs_read_buf instead of + * directly doing a read_buf call. For one, we shouldn't + * be doing this disk read if we are in SHUTDOWN state anyway, + * so this stops that from happening. Secondly, this does all + * the error checking stuff and the brelse if appropriate for + * the caller, so the code can be a little leaner. + */ + +int +xfs_read_buf( + struct xfs_mount *mp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len, + uint flags, + xfs_buf_t **bpp) +{ + xfs_buf_t *bp; + int error; + + if (flags) + bp = xfs_buf_read_flags(target, blkno, len, flags); + else + bp = xfs_buf_read(target, blkno, len, flags); + if (!bp) + return XFS_ERROR(EIO); + error = XFS_BUF_GETERROR(bp); + if (bp && !error && !XFS_FORCED_SHUTDOWN(mp)) { + *bpp = bp; + } else { + *bpp = NULL; + if (error) { + xfs_ioerror_alert("xfs_read_buf", mp, bp, XFS_BUF_ADDR(bp)); + } else { + error = XFS_ERROR(EIO); + } + if (bp) { + XFS_BUF_UNDONE(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_STALE(bp); + /* + * brelse clears B_ERROR and b_error + */ + xfs_buf_relse(bp); + } + } + return (error); +} + +/* + * Wrapper around bwrite() so that we can trap + * write errors, and act accordingly. + */ +int +xfs_bwrite( + struct xfs_mount *mp, + struct xfs_buf *bp) +{ + int error; + + /* + * XXXsup how does this work for quotas. + */ + XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); + XFS_BUF_SET_FSPRIVATE3(bp, mp); + XFS_BUF_WRITE(bp); + + if ((error = XFS_bwrite(bp))) { + ASSERT(mp); + /* + * Cannot put a buftrace here since if the buffer is not + * B_HOLD then we will brelse() the buffer before returning + * from bwrite and we could be tracing a buffer that has + * been reused. + */ + xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); + } + return (error); +} + +/* + * xfs_inval_cached_pages() + * This routine is responsible for keeping direct I/O and buffered I/O + * somewhat coherent. From here we make sure that we're at least + * temporarily holding the inode I/O lock exclusively and then call + * the page cache to flush and invalidate any cached pages. If there + * are no cached pages this routine will be very quick. + */ +void +xfs_inval_cached_pages( + vnode_t *vp, + xfs_iocore_t *io, + xfs_off_t offset, + int write, + int relock) +{ + xfs_mount_t *mp; + + if (!VN_CACHED(vp)) { + return; + } + + mp = io->io_mount; + + /* + * We need to get the I/O lock exclusively in order + * to safely invalidate pages and mappings. + */ + if (relock) { + XFS_IUNLOCK(mp, io, XFS_IOLOCK_SHARED); + XFS_ILOCK(mp, io, XFS_IOLOCK_EXCL); + } + + /* Writing beyond EOF creates a hole that must be zeroed */ + if (write && (offset > XFS_SIZE(mp, io))) { + xfs_fsize_t isize; + + XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + isize = XFS_SIZE(mp, io); + if (offset > isize) { + xfs_zero_eof(vp, io, offset, isize, offset); + } + XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + } + + VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED); + if (relock) { + XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); + } +} + + + +spinlock_t xfs_refcache_lock = SPIN_LOCK_UNLOCKED; +xfs_inode_t **xfs_refcache; +int xfs_refcache_size; +int xfs_refcache_index; +int xfs_refcache_busy; +int xfs_refcache_count; + +/* + * Insert the given inode into the reference cache. + */ +void +xfs_refcache_insert( + xfs_inode_t *ip) +{ + vnode_t *vp; + xfs_inode_t *release_ip; + xfs_inode_t **refcache; + + ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE)); + + /* + * If an unmount is busy blowing entries out of the cache, + * then don't bother. + */ + if (xfs_refcache_busy) { + return; + } + + /* + * If we tuned the refcache down to zero, don't do anything. + */ + if (!xfs_refcache_size) { + return; + } + + /* + * The inode is already in the refcache, so don't bother + * with it. + */ + if (ip->i_refcache != NULL) { + return; + } + + vp = XFS_ITOV(ip); + /* ASSERT(vp->v_count > 0); */ + VN_HOLD(vp); + + /* + * We allocate the reference cache on use so that we don't + * waste the memory on systems not being used as NFS servers. + */ + if (xfs_refcache == NULL) { + refcache = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX * + sizeof(xfs_inode_t *), + KM_SLEEP); + } else { + refcache = NULL; + } + + spin_lock(&xfs_refcache_lock); + + /* + * If we allocated memory for the refcache above and it still + * needs it, then use the memory we allocated. Otherwise we'll + * free the memory below. + */ + if (refcache != NULL) { + if (xfs_refcache == NULL) { + xfs_refcache = refcache; + refcache = NULL; + } + } + + /* + * If an unmount is busy clearing out the cache, don't add new + * entries to it. + */ + if (xfs_refcache_busy) { + spin_unlock(&xfs_refcache_lock); + VN_RELE(vp); + /* + * If we allocated memory for the refcache above but someone + * else beat us to using it, then free the memory now. + */ + if (refcache != NULL) { + kmem_free(refcache, + XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *)); + } + return; + } + release_ip = xfs_refcache[xfs_refcache_index]; + if (release_ip != NULL) { + release_ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + } + xfs_refcache[xfs_refcache_index] = ip; + ASSERT(ip->i_refcache == NULL); + ip->i_refcache = &(xfs_refcache[xfs_refcache_index]); + xfs_refcache_count++; + ASSERT(xfs_refcache_count <= xfs_refcache_size); + xfs_refcache_index++; + if (xfs_refcache_index == xfs_refcache_size) { + xfs_refcache_index = 0; + } + spin_unlock(&xfs_refcache_lock); + + /* + * Save the pointer to the inode to be released so that we can + * VN_RELE it once we've dropped our inode locks in xfs_rwunlock(). + * The pointer may be NULL, but that's OK. + */ + ip->i_release = release_ip; + + /* + * If we allocated memory for the refcache above but someone + * else beat us to using it, then free the memory now. + */ + if (refcache != NULL) { + kmem_free(refcache, + XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *)); + } + return; +} + + +/* + * If the given inode is in the reference cache, purge its entry and + * release the reference on the vnode. + */ +void +xfs_refcache_purge_ip( + xfs_inode_t *ip) +{ + vnode_t *vp; + int error; + + /* + * If we're not pointing to our entry in the cache, then + * we must not be in the cache. + */ + if (ip->i_refcache == NULL) { + return; + } + + spin_lock(&xfs_refcache_lock); + if (ip->i_refcache == NULL) { + spin_unlock(&xfs_refcache_lock); + return; + } + + /* + * Clear both our pointer to the cache entry and its pointer + * back to us. + */ + ASSERT(*(ip->i_refcache) == ip); + *(ip->i_refcache) = NULL; + ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + spin_unlock(&xfs_refcache_lock); + + vp = XFS_ITOV(ip); + /* ASSERT(vp->v_count > 1); */ + VOP_RELEASE(vp, error); + VN_RELE(vp); + + return; +} + + +/* + * This is called from the XFS unmount code to purge all entries for the + * given mount from the cache. It uses the refcache busy counter to + * make sure that new entries are not added to the cache as we purge them. + */ +void +xfs_refcache_purge_mp( + xfs_mount_t *mp) +{ + vnode_t *vp; + int error, i; + xfs_inode_t *ip; + + if (xfs_refcache == NULL) { + return; + } + + spin_lock(&xfs_refcache_lock); + /* + * Bumping the busy counter keeps new entries from being added + * to the cache. We use a counter since multiple unmounts could + * be in here simultaneously. + */ + xfs_refcache_busy++; + + for (i = 0; i < xfs_refcache_size; i++) { + ip = xfs_refcache[i]; + if ((ip != NULL) && (ip->i_mount == mp)) { + xfs_refcache[i] = NULL; + ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + spin_unlock(&xfs_refcache_lock); + vp = XFS_ITOV(ip); + VOP_RELEASE(vp, error); + VN_RELE(vp); + spin_lock(&xfs_refcache_lock); + } + } + + xfs_refcache_busy--; + ASSERT(xfs_refcache_busy >= 0); + spin_unlock(&xfs_refcache_lock); +} + + +/* + * This is called from the XFS sync code to ensure that the refcache + * is emptied out over time. We purge a small number of entries with + * each call. + */ +void +xfs_refcache_purge_some(xfs_mount_t *mp) +{ + int error, i; + xfs_inode_t *ip; + int iplist_index; + xfs_inode_t **iplist; + int purge_count; + + if ((xfs_refcache == NULL) || (xfs_refcache_count == 0)) { + return; + } + + iplist_index = 0; + purge_count = xfs_params.refcache_purge; + iplist = (xfs_inode_t **)kmem_zalloc(purge_count * + sizeof(xfs_inode_t *), KM_SLEEP); + + spin_lock(&xfs_refcache_lock); + + /* + * Store any inodes we find in the next several entries + * into the iplist array to be released after dropping + * the spinlock. We always start looking from the currently + * oldest place in the cache. We move the refcache index + * forward as we go so that we are sure to eventually clear + * out the entire cache when the system goes idle. + */ + for (i = 0; i < purge_count; i++) { + ip = xfs_refcache[xfs_refcache_index]; + if (ip != NULL) { + xfs_refcache[xfs_refcache_index] = NULL; + ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + iplist[iplist_index] = ip; + iplist_index++; + } + xfs_refcache_index++; + if (xfs_refcache_index == xfs_refcache_size) { + xfs_refcache_index = 0; + } + } + + spin_unlock(&xfs_refcache_lock); + + /* + * Now drop the inodes we collected. + */ + for (i = 0; i < iplist_index; i++) { + VOP_RELEASE(XFS_ITOV(iplist[i]), error); + VN_RELE(XFS_ITOV(iplist[i])); + } + + kmem_free(iplist, purge_count * + sizeof(xfs_inode_t *)); +} + +/* + * This is called when the refcache is dynamically resized + * via a sysctl. + * + * If the new size is smaller than the old size, purge all + * entries in slots greater than the new size, and move + * the index if necessary. + * + * If the refcache hasn't even been allocated yet, or the + * new size is larger than the old size, just set the value + * of xfs_refcache_size. + */ + +void +xfs_refcache_resize(int xfs_refcache_new_size) +{ + int i; + xfs_inode_t *ip; + int iplist_index = 0; + xfs_inode_t **iplist; + int error; + + /* + * If the new size is smaller than the current size, + * purge entries to create smaller cache, and + * reposition index if necessary. + * Don't bother if no refcache yet. + */ + if (xfs_refcache && (xfs_refcache_new_size < xfs_refcache_size)) { + + iplist = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX * + sizeof(xfs_inode_t *), KM_SLEEP); + + spin_lock(&xfs_refcache_lock); + + for (i = xfs_refcache_new_size; i < xfs_refcache_size; i++) { + ip = xfs_refcache[i]; + if (ip != NULL) { + xfs_refcache[i] = NULL; + ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + iplist[iplist_index] = ip; + iplist_index++; + } + } + + xfs_refcache_size = xfs_refcache_new_size; + + /* + * Move index to beginning of cache if it's now past the end + */ + if (xfs_refcache_index >= xfs_refcache_new_size) + xfs_refcache_index = 0; + + spin_unlock(&xfs_refcache_lock); + + /* + * Now drop the inodes we collected. + */ + for (i = 0; i < iplist_index; i++) { + VOP_RELEASE(XFS_ITOV(iplist[i]), error); + VN_RELE(XFS_ITOV(iplist[i])); + } + + kmem_free(iplist, XFS_REFCACHE_SIZE_MAX * + sizeof(xfs_inode_t *)); + } else { + spin_lock(&xfs_refcache_lock); + xfs_refcache_size = xfs_refcache_new_size; + spin_unlock(&xfs_refcache_lock); + } +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_rw.h linux.21rc5-ac2/fs/xfs/xfs_rw.h --- linux.21rc5/fs/xfs/xfs_rw.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_rw.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_RW_H__ +#define __XFS_RW_H__ + +struct bhv_desc; +struct bmapval; +struct xfs_buf; +struct cred; +struct uio; +struct vnode; +struct xfs_inode; +struct xfs_iocore; +struct xfs_mount; +struct xfs_trans; + +/* + * Maximum count of bmaps used by read and write paths. + */ +#define XFS_MAX_RW_NBMAPS 4 + +/* + * Counts of readahead buffers to use based on physical memory size. + * None of these should be more than XFS_MAX_RW_NBMAPS. + */ +#define XFS_RW_NREADAHEAD_16MB 2 +#define XFS_RW_NREADAHEAD_32MB 3 +#define XFS_RW_NREADAHEAD_K32 4 +#define XFS_RW_NREADAHEAD_K64 4 + +/* + * Maximum size of a buffer that we\'ll map. Making this + * too big will degrade performance due to the number of + * pages which need to be gathered. Making it too small + * will prevent us from doing large I/O\'s to hardware that + * needs it. + * + * This is currently set to 512 KB. + */ +#define XFS_MAX_BMAP_LEN_BB 1024 +#define XFS_MAX_BMAP_LEN_BYTES 524288 + +/* + * Maximum size (in inodes) for the nfs refcache + */ +#define XFS_REFCACHE_SIZE_MAX 512 + + +/* + * Convert the given file system block to a disk block. + * We have to treat it differently based on whether the + * file is a real time file or not, because the bmap code + * does. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_DB) +xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb); +#define XFS_FSB_TO_DB(ip,fsb) xfs_fsb_to_db(ip,fsb) +#else +#define XFS_FSB_TO_DB(ip,fsb) \ + (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) ? \ + (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ + XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))) +#endif + +#define XFS_FSB_TO_DB_IO(io,fsb) \ + (((io)->io_flags & XFS_IOCORE_RT) ? \ + XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \ + XFS_FSB_TO_DADDR((io)->io_mount, (fsb))) + +/* + * Defines for the trace mechanisms in xfs_rw.c. + */ +#define XFS_RW_KTRACE_SIZE 64 +#define XFS_STRAT_KTRACE_SIZE 64 +#define XFS_STRAT_GTRACE_SIZE 512 + +#define XFS_READ_ENTER 1 +#define XFS_WRITE_ENTER 2 +#define XFS_IOMAP_READ_ENTER 3 +#define XFS_IOMAP_WRITE_ENTER 4 +#define XFS_IOMAP_READ_MAP 5 +#define XFS_IOMAP_WRITE_MAP 6 +#define XFS_IOMAP_WRITE_NOSPACE 7 +#define XFS_ITRUNC_START 8 +#define XFS_ITRUNC_FINISH1 9 +#define XFS_ITRUNC_FINISH2 10 +#define XFS_CTRUNC1 11 +#define XFS_CTRUNC2 12 +#define XFS_CTRUNC3 13 +#define XFS_CTRUNC4 14 +#define XFS_CTRUNC5 15 +#define XFS_CTRUNC6 16 +#define XFS_BUNMAPI 17 +#define XFS_INVAL_CACHED 18 +#define XFS_DIORD_ENTER 19 +#define XFS_DIOWR_ENTER 20 + +#if defined(XFS_ALL_TRACE) +#define XFS_RW_TRACE +#define XFS_STRAT_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_RW_TRACE +#undef XFS_STRAT_TRACE +#endif + +/* + * Prototypes for functions in xfs_rw.c. + */ + +int +xfs_write_clear_setuid( + struct xfs_inode *ip); + +int +xfs_bwrite( + struct xfs_mount *mp, + struct xfs_buf *bp); + +void +xfs_inval_cached_pages( + struct vnode *vp, + struct xfs_iocore *io, + xfs_off_t offset, + int write, + int relock); + +void +xfs_refcache_insert( + struct xfs_inode *ip); + +void +xfs_refcache_purge_ip( + struct xfs_inode *ip); + +void +xfs_refcache_purge_mp( + struct xfs_mount *mp); + +void +xfs_refcache_purge_some( + struct xfs_mount *mp); + +void +xfs_refcache_resize( + int xfs_refcache_new_size); + +int +xfs_bioerror( + struct xfs_buf *b); + +int +xfs_bioerror_relse( + struct xfs_buf *b); + +int +xfs_read_buf( + struct xfs_mount *mp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len, + uint flags, + struct xfs_buf **bpp); + +void +xfs_ioerror_alert( + char *func, + struct xfs_mount *mp, + xfs_buf_t *bp, + xfs_daddr_t blkno); + + +/* + * Prototypes for functions in xfs_vnodeops.c. + */ + +int +xfs_rwlock( + bhv_desc_t *bdp, + vrwlock_t write_lock); + +void +xfs_rwunlock( + bhv_desc_t *bdp, + vrwlock_t write_lock); + +int +xfs_change_file_space( + bhv_desc_t *bdp, + int cmd, + xfs_flock64_t *bf, + xfs_off_t offset, + cred_t *credp, + int flags); + +int +xfs_set_dmattrs( + bhv_desc_t *bdp, + u_int evmask, + u_int16_t state, + cred_t *credp); + +#endif /* __XFS_RW_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_sb.h linux.21rc5-ac2/fs/xfs/xfs_sb.h --- linux.21rc5/fs/xfs/xfs_sb.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_sb.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SB_H__ +#define __XFS_SB_H__ + +/* + * Super block + * Fits into a sector-sized buffer at address 0 of each allocation group. + * Only the first of these is ever updated except during growfs. + */ + +struct xfs_buf; +struct xfs_mount; + +#define XFS_SB_MAGIC 0x58465342 /* 'XFSB' */ +#define XFS_SB_VERSION_1 1 /* 5.3, 6.0.1, 6.1 */ +#define XFS_SB_VERSION_2 2 /* 6.2 - attributes */ +#define XFS_SB_VERSION_3 3 /* 6.2 - new inode version */ +#define XFS_SB_VERSION_4 4 /* 6.2+ - bitmask version */ +#define XFS_SB_VERSION_NUMBITS 0x000f +#define XFS_SB_VERSION_ALLFBITS 0xfff0 +#define XFS_SB_VERSION_SASHFBITS 0xf000 +#define XFS_SB_VERSION_REALFBITS 0x0ff0 +#define XFS_SB_VERSION_ATTRBIT 0x0010 +#define XFS_SB_VERSION_NLINKBIT 0x0020 +#define XFS_SB_VERSION_QUOTABIT 0x0040 +#define XFS_SB_VERSION_ALIGNBIT 0x0080 +#define XFS_SB_VERSION_DALIGNBIT 0x0100 +#define XFS_SB_VERSION_SHAREDBIT 0x0200 +#define XFS_SB_VERSION_LOGV2BIT 0x0400 +#define XFS_SB_VERSION_SECTORBIT 0x0800 +#define XFS_SB_VERSION_EXTFLGBIT 0x1000 +#define XFS_SB_VERSION_DIRV2BIT 0x2000 +#define XFS_SB_VERSION_OKSASHFBITS \ + (XFS_SB_VERSION_EXTFLGBIT | \ + XFS_SB_VERSION_DIRV2BIT) +#define XFS_SB_VERSION_OKREALFBITS \ + (XFS_SB_VERSION_ATTRBIT | \ + XFS_SB_VERSION_NLINKBIT | \ + XFS_SB_VERSION_QUOTABIT | \ + XFS_SB_VERSION_ALIGNBIT | \ + XFS_SB_VERSION_DALIGNBIT | \ + XFS_SB_VERSION_SHAREDBIT | \ + XFS_SB_VERSION_LOGV2BIT | \ + XFS_SB_VERSION_SECTORBIT) +#define XFS_SB_VERSION_OKSASHBITS \ + (XFS_SB_VERSION_NUMBITS | \ + XFS_SB_VERSION_REALFBITS | \ + XFS_SB_VERSION_OKSASHFBITS) +#define XFS_SB_VERSION_OKREALBITS \ + (XFS_SB_VERSION_NUMBITS | \ + XFS_SB_VERSION_OKREALFBITS | \ + XFS_SB_VERSION_OKSASHFBITS) +#define XFS_SB_VERSION_MKFS(ia,dia,extflag,dirv2,na,sflag) \ + (((ia) || (dia) || (extflag) || (dirv2) || (na)) ? \ + (XFS_SB_VERSION_4 | \ + ((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \ + ((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \ + ((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \ + ((dirv2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \ + ((na) ? XFS_SB_VERSION_LOGV2BIT : 0) | \ + ((sflag) ? XFS_SB_VERSION_SECTORBIT : 0)) : \ + XFS_SB_VERSION_1) + +typedef struct xfs_sb +{ + __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */ + __uint32_t sb_blocksize; /* logical block size, bytes */ + xfs_drfsbno_t sb_dblocks; /* number of data blocks */ + xfs_drfsbno_t sb_rblocks; /* number of realtime blocks */ + xfs_drtbno_t sb_rextents; /* number of realtime extents */ + uuid_t sb_uuid; /* file system unique id */ + xfs_dfsbno_t sb_logstart; /* starting block of log if internal */ + xfs_ino_t sb_rootino; /* root inode number */ + xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */ + xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */ + xfs_agblock_t sb_rextsize; /* realtime extent size, blocks */ + xfs_agblock_t sb_agblocks; /* size of an allocation group */ + xfs_agnumber_t sb_agcount; /* number of allocation groups */ + xfs_extlen_t sb_rbmblocks; /* number of rt bitmap blocks */ + xfs_extlen_t sb_logblocks; /* number of log blocks */ + __uint16_t sb_versionnum; /* header version == XFS_SB_VERSION */ + __uint16_t sb_sectsize; /* volume sector size, bytes */ + __uint16_t sb_inodesize; /* inode size, bytes */ + __uint16_t sb_inopblock; /* inodes per block */ + char sb_fname[12]; /* file system name */ + __uint8_t sb_blocklog; /* log2 of sb_blocksize */ + __uint8_t sb_sectlog; /* log2 of sb_sectsize */ + __uint8_t sb_inodelog; /* log2 of sb_inodesize */ + __uint8_t sb_inopblog; /* log2 of sb_inopblock */ + __uint8_t sb_agblklog; /* log2 of sb_agblocks (rounded up) */ + __uint8_t sb_rextslog; /* log2 of sb_rextents */ + __uint8_t sb_inprogress; /* mkfs is in progress, don't mount */ + __uint8_t sb_imax_pct; /* max % of fs for inode space */ + /* statistics */ + /* + * These fields must remain contiguous. If you really + * want to change their layout, make sure you fix the + * code in xfs_trans_apply_sb_deltas(). + */ + __uint64_t sb_icount; /* allocated inodes */ + __uint64_t sb_ifree; /* free inodes */ + __uint64_t sb_fdblocks; /* free data blocks */ + __uint64_t sb_frextents; /* free realtime extents */ + /* + * End contiguous fields. + */ + xfs_ino_t sb_uquotino; /* user quota inode */ + xfs_ino_t sb_gquotino; /* group quota inode */ + __uint16_t sb_qflags; /* quota flags */ + __uint8_t sb_flags; /* misc. flags */ + __uint8_t sb_shared_vn; /* shared version number */ + xfs_extlen_t sb_inoalignmt; /* inode chunk alignment, fsblocks */ + __uint32_t sb_unit; /* stripe or raid unit */ + __uint32_t sb_width; /* stripe or raid width */ + __uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */ + __uint8_t sb_logsectlog; /* log2 of the log sector size */ + __uint16_t sb_logsectsize; /* sector size for the log, bytes */ + __uint32_t sb_logsunit; /* stripe unit size for the log */ +} xfs_sb_t; + +/* + * Sequence number values for the fields. + */ +typedef enum { + XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS, + XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO, + XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS, + XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS, + XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE, + XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG, + XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG, + XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT, + XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO, + XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN, + XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG, + XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT, + XFS_SBS_FIELDCOUNT +} xfs_sb_field_t; + +/* + * Mask values, defined based on the xfs_sb_field_t values. + * Only define the ones we're using. + */ +#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x) +#define XFS_SB_UUID XFS_SB_MVAL(UUID) +#define XFS_SB_FNAME XFS_SB_MVAL(FNAME) +#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO) +#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO) +#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO) +#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM) +#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO) +#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO) +#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS) +#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN) +#define XFS_SB_UNIT XFS_SB_MVAL(UNIT) +#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH) +#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT) +#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1) +#define XFS_SB_MOD_BITS \ + (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \ + XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \ + XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH) + +/* + * Misc. Flags - warning - these will be cleared by xfs_repair unless + * a feature bit is set when the flag is used. + */ +#define XFS_SBF_NOFLAGS 0x00 /* no flags set */ +#define XFS_SBF_READONLY 0x01 /* only read-only mounts allowed */ + +/* + * define max. shared version we can interoperate with + */ +#define XFS_SB_MAX_SHARED_VN 0 + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_NUM) +int xfs_sb_version_num(xfs_sb_t *sbp); +#define XFS_SB_VERSION_NUM(sbp) xfs_sb_version_num(sbp) +#else +#define XFS_SB_VERSION_NUM(sbp) ((sbp)->sb_versionnum & XFS_SB_VERSION_NUMBITS) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_GOOD_VERSION) +int xfs_sb_good_version(xfs_sb_t *sbp); +#define XFS_SB_GOOD_VERSION(sbp) xfs_sb_good_version(sbp) +#else +#define XFS_SB_GOOD_VERSION_INT(sbp) \ + ((((sbp)->sb_versionnum >= XFS_SB_VERSION_1) && \ + ((sbp)->sb_versionnum <= XFS_SB_VERSION_3)) || \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + !((sbp)->sb_versionnum & ~XFS_SB_VERSION_OKREALBITS) +#ifdef __KERNEL__ +#define XFS_SB_GOOD_VERSION(sbp) \ + (XFS_SB_GOOD_VERSION_INT(sbp) && \ + (sbp)->sb_shared_vn <= XFS_SB_MAX_SHARED_VN) )) +#else +/* + * extra 2 paren's here (( to unconfuse paren-matching editors + * like vi because XFS_SB_GOOD_VERSION_INT is a partial expression + * and the two XFS_SB_GOOD_VERSION's each 2 more close paren's to + * complete the expression. + */ +#define XFS_SB_GOOD_VERSION(sbp) \ + (XFS_SB_GOOD_VERSION_INT(sbp) && \ + (!((sbp)->sb_versionnum & XFS_SB_VERSION_SHAREDBIT) || \ + (sbp)->sb_shared_vn <= XFS_SB_MAX_SHARED_VN)) )) +#endif /* __KERNEL__ */ +#endif + +#define XFS_SB_GOOD_SASH_VERSION(sbp) \ + ((((sbp)->sb_versionnum >= XFS_SB_VERSION_1) && \ + ((sbp)->sb_versionnum <= XFS_SB_VERSION_3)) || \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + !((sbp)->sb_versionnum & ~XFS_SB_VERSION_OKSASHBITS))) + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_TONEW) +unsigned xfs_sb_version_tonew(unsigned v); +#define XFS_SB_VERSION_TONEW(v) xfs_sb_version_tonew(v) +#else +#define XFS_SB_VERSION_TONEW(v) \ + ((((v) == XFS_SB_VERSION_1) ? \ + 0 : \ + (((v) == XFS_SB_VERSION_2) ? \ + XFS_SB_VERSION_ATTRBIT : \ + (XFS_SB_VERSION_ATTRBIT | XFS_SB_VERSION_NLINKBIT))) | \ + XFS_SB_VERSION_4) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_TOOLD) +unsigned xfs_sb_version_toold(unsigned v); +#define XFS_SB_VERSION_TOOLD(v) xfs_sb_version_toold(v) +#else +#define XFS_SB_VERSION_TOOLD(v) \ + (((v) & (XFS_SB_VERSION_QUOTABIT | XFS_SB_VERSION_ALIGNBIT)) ? \ + 0 : \ + (((v) & XFS_SB_VERSION_NLINKBIT) ? \ + XFS_SB_VERSION_3 : \ + (((v) & XFS_SB_VERSION_ATTRBIT) ? \ + XFS_SB_VERSION_2 : \ + XFS_SB_VERSION_1))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASATTR) +int xfs_sb_version_hasattr(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASATTR(sbp) xfs_sb_version_hasattr(sbp) +#else +#define XFS_SB_VERSION_HASATTR(sbp) \ + (((sbp)->sb_versionnum == XFS_SB_VERSION_2) || \ + ((sbp)->sb_versionnum == XFS_SB_VERSION_3) || \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_ATTRBIT))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDATTR) +void xfs_sb_version_addattr(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDATTR(sbp) xfs_sb_version_addattr(sbp) +#else +#define XFS_SB_VERSION_ADDATTR(sbp) \ + ((sbp)->sb_versionnum = \ + (((sbp)->sb_versionnum == XFS_SB_VERSION_1) ? \ + XFS_SB_VERSION_2 : \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) ? \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_ATTRBIT) : \ + (XFS_SB_VERSION_4 | XFS_SB_VERSION_ATTRBIT)))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASNLINK) +int xfs_sb_version_hasnlink(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASNLINK(sbp) xfs_sb_version_hasnlink(sbp) +#else +#define XFS_SB_VERSION_HASNLINK(sbp) \ + (((sbp)->sb_versionnum == XFS_SB_VERSION_3) || \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_NLINKBIT))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDNLINK) +void xfs_sb_version_addnlink(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDNLINK(sbp) xfs_sb_version_addnlink(sbp) +#else +#define XFS_SB_VERSION_ADDNLINK(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum <= XFS_SB_VERSION_2 ? \ + XFS_SB_VERSION_3 : \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_NLINKBIT))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASQUOTA) +int xfs_sb_version_hasquota(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASQUOTA(sbp) xfs_sb_version_hasquota(sbp) +#else +#define XFS_SB_VERSION_HASQUOTA(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_QUOTABIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDQUOTA) +void xfs_sb_version_addquota(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDQUOTA(sbp) xfs_sb_version_addquota(sbp) +#else +#define XFS_SB_VERSION_ADDQUOTA(sbp) \ + ((sbp)->sb_versionnum = \ + (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 ? \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_QUOTABIT) : \ + (XFS_SB_VERSION_TONEW((sbp)->sb_versionnum) | \ + XFS_SB_VERSION_QUOTABIT))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASALIGN) +int xfs_sb_version_hasalign(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASALIGN(sbp) xfs_sb_version_hasalign(sbp) +#else +#define XFS_SB_VERSION_HASALIGN(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_ALIGNBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_SUBALIGN) +void xfs_sb_version_subalign(xfs_sb_t *sbp); +#define XFS_SB_VERSION_SUBALIGN(sbp) xfs_sb_version_subalign(sbp) +#else +#define XFS_SB_VERSION_SUBALIGN(sbp) \ + ((sbp)->sb_versionnum = \ + XFS_SB_VERSION_TOOLD((sbp)->sb_versionnum & ~XFS_SB_VERSION_ALIGNBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASDALIGN) +int xfs_sb_version_hasdalign(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASDALIGN(sbp) xfs_sb_version_hasdalign(sbp) +#else +#define XFS_SB_VERSION_HASDALIGN(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_DALIGNBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDDALIGN) +int xfs_sb_version_adddalign(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDDALIGN(sbp) xfs_sb_version_adddalign(sbp) +#else +#define XFS_SB_VERSION_ADDDALIGN(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_DALIGNBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASSHARED) +int xfs_sb_version_hasshared(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASSHARED(sbp) xfs_sb_version_hasshared(sbp) +#else +#define XFS_SB_VERSION_HASSHARED(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_SHAREDBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDSHARED) +int xfs_sb_version_addshared(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDSHARED(sbp) xfs_sb_version_addshared(sbp) +#else +#define XFS_SB_VERSION_ADDSHARED(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_SHAREDBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_SUBSHARED) +int xfs_sb_version_subshared(xfs_sb_t *sbp); +#define XFS_SB_VERSION_SUBSHARED(sbp) xfs_sb_version_subshared(sbp) +#else +#define XFS_SB_VERSION_SUBSHARED(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum & ~XFS_SB_VERSION_SHAREDBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASDIRV2) +int xfs_sb_version_hasdirv2(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASDIRV2(sbp) xfs_sb_version_hasdirv2(sbp) +#else +#define XFS_SB_VERSION_HASDIRV2(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_DIRV2BIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASLOGV2) +int xfs_sb_version_haslogv2(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASLOGV2(sbp) xfs_sb_version_haslogv2(sbp) +#else +#define XFS_SB_VERSION_HASLOGV2(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_LOGV2BIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASEXTFLGBIT) +int xfs_sb_version_hasextflgbit(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASEXTFLGBIT(sbp) xfs_sb_version_hasextflgbit(sbp) +#else +#define XFS_SB_VERSION_HASEXTFLGBIT(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDEXTFLGBIT) +int xfs_sb_version_addextflgbit(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDEXTFLGBIT(sbp) xfs_sb_version_addextflgbit(sbp) +#else +#define XFS_SB_VERSION_ADDEXTFLGBIT(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_EXTFLGBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_SUBEXTFLGBIT) +int xfs_sb_version_subextflgbit(xfs_sb_t *sbp); +#define XFS_SB_VERSION_SUBEXTFLGBIT(sbp) xfs_sb_version_subextflgbit(sbp) +#else +#define XFS_SB_VERSION_SUBEXTFLGBIT(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum & ~XFS_SB_VERSION_EXTFLGBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASSECTOR) +int xfs_sb_version_hassector(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASSECTOR(sbp) xfs_sb_version_hassector(sbp) +#else +#define XFS_SB_VERSION_HASSECTOR(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_SECTORBIT)) +#endif + +/* + * end of superblock version macros + */ + +#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_BLOCK) +xfs_agblock_t xfs_sb_block(struct xfs_mount *mp); +#define XFS_SB_BLOCK(mp) xfs_sb_block(mp) +#else +#define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_HDR_BLOCK) +xfs_agblock_t xfs_hdr_block(struct xfs_mount *mp, xfs_daddr_t d); +#define XFS_HDR_BLOCK(mp,d) xfs_hdr_block(mp,d) +#else +#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp,d))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DADDR_TO_FSB) +xfs_fsblock_t xfs_daddr_to_fsb(struct xfs_mount *mp, xfs_daddr_t d); +#define XFS_DADDR_TO_FSB(mp,d) xfs_daddr_to_fsb(mp,d) +#else +#define XFS_DADDR_TO_FSB(mp,d) \ + XFS_AGB_TO_FSB(mp, XFS_DADDR_TO_AGNO(mp,d), XFS_DADDR_TO_AGBNO(mp,d)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_DADDR) +xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno); +#define XFS_FSB_TO_DADDR(mp,fsbno) xfs_fsb_to_daddr(mp,fsbno) +#else +#define XFS_FSB_TO_DADDR(mp,fsbno) \ + XFS_AGB_TO_DADDR(mp, XFS_FSB_TO_AGNO(mp,fsbno), \ + XFS_FSB_TO_AGBNO(mp,fsbno)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBP) +xfs_sb_t *xfs_buf_to_sbp(struct xfs_buf *bp); +#define XFS_BUF_TO_SBP(bp) xfs_buf_to_sbp(bp) +#else +#define XFS_BUF_TO_SBP(bp) ((xfs_sb_t *)XFS_BUF_PTR(bp)) +#endif + +/* + * File system sector to basic block conversions. + */ +#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log) +#define XFS_BB_TO_FSS(mp,bb) \ + (((bb) + (XFS_FSS_TO_BB(mp,1) - 1)) >> (mp)->m_sectbb_log) +#define XFS_BB_TO_FSST(mp,bb) ((bb) >> (mp)->m_sectbb_log) + +/* + * File system sector to byte conversions. + */ +#define XFS_FSS_TO_B(mp,sectno) ((xfs_fsize_t)(sectno) << (mp)->m_sb.sb_sectlog) +#define XFS_B_TO_FSST(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_sectlog) + +/* + * File system block to basic block conversions. + */ +#define XFS_FSB_TO_BB(mp,fsbno) ((fsbno) << (mp)->m_blkbb_log) +#define XFS_BB_TO_FSB(mp,bb) \ + (((bb) + (XFS_FSB_TO_BB(mp,1) - 1)) >> (mp)->m_blkbb_log) +#define XFS_BB_TO_FSBT(mp,bb) ((bb) >> (mp)->m_blkbb_log) +#define XFS_BB_FSB_OFFSET(mp,bb) ((bb) & ((mp)->m_bsize - 1)) + +/* + * File system block to byte conversions. + */ +#define XFS_FSB_TO_B(mp,fsbno) ((xfs_fsize_t)(fsbno) << (mp)->m_sb.sb_blocklog) +#define XFS_B_TO_FSB(mp,b) \ + ((((__uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog) +#define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog) +#define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask) + +#endif /* __XFS_SB_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_trans_ail.c linux.21rc5-ac2/fs/xfs/xfs_trans_ail.c --- linux.21rc5/fs/xfs/xfs_trans_ail.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_trans_ail.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,597 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_log.h" +#include "xfs_trans_priv.h" +#include "xfs_error.h" + +STATIC void xfs_ail_insert(xfs_ail_entry_t *, xfs_log_item_t *); +STATIC xfs_log_item_t * xfs_ail_delete(xfs_ail_entry_t *, xfs_log_item_t *); +STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_entry_t *); +STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *); + +#ifdef XFSDEBUG +STATIC void xfs_ail_check(xfs_ail_entry_t *); +#else +#define xfs_ail_check(a) +#endif /* XFSDEBUG */ + + +/* + * This is called by the log manager code to determine the LSN + * of the tail of the log. This is exactly the LSN of the first + * item in the AIL. If the AIL is empty, then this function + * returns 0. + * + * We need the AIL lock in order to get a coherent read of the + * lsn of the last item in the AIL. + */ +xfs_lsn_t +xfs_trans_tail_ail( + xfs_mount_t *mp) +{ + xfs_lsn_t lsn; + xfs_log_item_t *lip; + SPLDECL(s); + + AIL_LOCK(mp,s); + lip = xfs_ail_min(&(mp->m_ail)); + if (lip == NULL) { + lsn = (xfs_lsn_t)0; + } else { + lsn = lip->li_lsn; + } + AIL_UNLOCK(mp, s); + + return lsn; +} + +/* + * xfs_trans_push_ail + * + * This routine is called to move the tail of the AIL + * forward. It does this by trying to flush items in the AIL + * whose lsns are below the given threshold_lsn. + * + * The routine returns the lsn of the tail of the log. + */ +xfs_lsn_t +xfs_trans_push_ail( + xfs_mount_t *mp, + xfs_lsn_t threshold_lsn) +{ + xfs_lsn_t lsn; + xfs_log_item_t *lip; + int gen; + int restarts; + int lock_result; + int flush_log; + SPLDECL(s); + +#define XFS_TRANS_PUSH_AIL_RESTARTS 10 + + AIL_LOCK(mp,s); + lip = xfs_trans_first_ail(mp, &gen); + if (lip == NULL || XFS_FORCED_SHUTDOWN(mp)) { + /* + * Just return if the AIL is empty. + */ + AIL_UNLOCK(mp, s); + return (xfs_lsn_t)0; + } + + XFS_STATS_INC(xfsstats.xs_push_ail); + + /* + * While the item we are looking at is below the given threshold + * try to flush it out. Make sure to limit the number of times + * we allow xfs_trans_next_ail() to restart scanning from the + * beginning of the list. We'd like not to stop until we've at least + * tried to push on everything in the AIL with an LSN less than + * the given threshold. However, we may give up before that if + * we realize that we've been holding the AIL_LOCK for 'too long', + * blocking interrupts. Currently, too long is < 500us roughly. + */ + flush_log = 0; + restarts = 0; + while (((restarts < XFS_TRANS_PUSH_AIL_RESTARTS) && + (XFS_LSN_CMP(lip->li_lsn, threshold_lsn) < 0))) { + /* + * If we can lock the item without sleeping, unlock + * the AIL lock and flush the item. Then re-grab the + * AIL lock so we can look for the next item on the + * AIL. Since we unlock the AIL while we flush the + * item, the next routine may start over again at the + * the beginning of the list if anything has changed. + * That is what the generation count is for. + * + * If we can't lock the item, either its holder will flush + * it or it is already being flushed or it is being relogged. + * In any of these case it is being taken care of and we + * can just skip to the next item in the list. + */ + lock_result = IOP_TRYLOCK(lip); + switch (lock_result) { + case XFS_ITEM_SUCCESS: + AIL_UNLOCK(mp, s); + XFS_STATS_INC(xfsstats.xs_push_ail_success); + IOP_PUSH(lip); + AIL_LOCK(mp,s); + break; + + case XFS_ITEM_PUSHBUF: + AIL_UNLOCK(mp, s); + XFS_STATS_INC(xfsstats.xs_push_ail_pushbuf); +#ifdef XFSRACEDEBUG + delay_for_intr(); + delay(300); +#endif + ASSERT(lip->li_ops->iop_pushbuf); + ASSERT(lip); + IOP_PUSHBUF(lip); + AIL_LOCK(mp,s); + break; + + case XFS_ITEM_PINNED: + XFS_STATS_INC(xfsstats.xs_push_ail_pinned); + flush_log = 1; + break; + + case XFS_ITEM_LOCKED: + XFS_STATS_INC(xfsstats.xs_push_ail_locked); + break; + + case XFS_ITEM_FLUSHING: + XFS_STATS_INC(xfsstats.xs_push_ail_flushing); + break; + + default: + ASSERT(0); + break; + } + + lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); + if (lip == NULL) { + break; + } + if (XFS_FORCED_SHUTDOWN(mp)) { + /* + * Just return if we shut down during the last try. + */ + AIL_UNLOCK(mp, s); + return (xfs_lsn_t)0; + } + + } + + if (flush_log) { + /* + * If something we need to push out was pinned, then + * push out the log so it will become unpinned and + * move forward in the AIL. + */ + AIL_UNLOCK(mp, s); + XFS_STATS_INC(xfsstats.xs_push_ail_flush); + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + AIL_LOCK(mp, s); + } + + lip = xfs_ail_min(&(mp->m_ail)); + if (lip == NULL) { + lsn = (xfs_lsn_t)0; + } else { + lsn = lip->li_lsn; + } + + AIL_UNLOCK(mp, s); + return lsn; +} /* xfs_trans_push_ail */ + + +/* + * This is to be called when an item is unlocked that may have + * been in the AIL. It will wake up the first member of the AIL + * wait list if this item's unlocking might allow it to progress. + * If the item is in the AIL, then we need to get the AIL lock + * while doing our checking so we don't race with someone going + * to sleep waiting for this event in xfs_trans_push_ail(). + */ +void +xfs_trans_unlocked_item( + xfs_mount_t *mp, + xfs_log_item_t *lip) +{ + xfs_log_item_t *min_lip; + + /* + * If we're forcibly shutting down, we may have + * unlocked log items arbitrarily. The last thing + * we want to do is to move the tail of the log + * over some potentially valid data. + */ + if (!(lip->li_flags & XFS_LI_IN_AIL) || + XFS_FORCED_SHUTDOWN(mp)) { + return; + } + + /* + * This is the one case where we can call into xfs_ail_min() + * without holding the AIL lock because we only care about the + * case where we are at the tail of the AIL. If the object isn't + * at the tail, it doesn't matter what result we get back. This + * is slightly racy because since we were just unlocked, we could + * go to sleep between the call to xfs_ail_min and the call to + * xfs_log_move_tail, have someone else lock us, commit to us disk, + * move us out of the tail of the AIL, and then we wake up. However, + * the call to xfs_log_move_tail() doesn't do anything if there's + * not enough free space to wake people up so we're safe calling it. + */ + min_lip = xfs_ail_min(&mp->m_ail); + + if (min_lip == lip) + xfs_log_move_tail(mp, 1); +} /* xfs_trans_unlocked_item */ + + +/* + * Update the position of the item in the AIL with the new + * lsn. If it is not yet in the AIL, add it. Otherwise, move + * it to its new position by removing it and re-adding it. + * + * Wakeup anyone with an lsn less than the item's lsn. If the item + * we move in the AIL is the minimum one, update the tail lsn in the + * log manager. + * + * Increment the AIL's generation count to indicate that the tree + * has changed. + * + * This function must be called with the AIL lock held. The lock + * is dropped before returning, so the caller must pass in the + * cookie returned by AIL_LOCK. + */ +void +xfs_trans_update_ail( + xfs_mount_t *mp, + xfs_log_item_t *lip, + xfs_lsn_t lsn, + unsigned long s) +{ + xfs_ail_entry_t *ailp; + xfs_log_item_t *dlip=NULL; + xfs_log_item_t *mlip; /* ptr to minimum lip */ + + ailp = &(mp->m_ail); + mlip = xfs_ail_min(ailp); + + if (lip->li_flags & XFS_LI_IN_AIL) { + dlip = xfs_ail_delete(ailp, lip); + ASSERT(dlip == lip); + } else { + lip->li_flags |= XFS_LI_IN_AIL; + } + + lip->li_lsn = lsn; + + xfs_ail_insert(ailp, lip); + mp->m_ail_gen++; + + if (mlip == dlip) { + mlip = xfs_ail_min(&(mp->m_ail)); + AIL_UNLOCK(mp, s); + xfs_log_move_tail(mp, mlip->li_lsn); + } else { + AIL_UNLOCK(mp, s); + } + + +} /* xfs_trans_update_ail */ + +/* + * Delete the given item from the AIL. It must already be in + * the AIL. + * + * Wakeup anyone with an lsn less than item's lsn. If the item + * we delete in the AIL is the minimum one, update the tail lsn in the + * log manager. + * + * Clear the IN_AIL flag from the item, reset its lsn to 0, and + * bump the AIL's generation count to indicate that the tree + * has changed. + * + * This function must be called with the AIL lock held. The lock + * is dropped before returning, so the caller must pass in the + * cookie returned by AIL_LOCK. + */ +void +xfs_trans_delete_ail( + xfs_mount_t *mp, + xfs_log_item_t *lip, + unsigned long s) +{ + xfs_ail_entry_t *ailp; + xfs_log_item_t *dlip; + xfs_log_item_t *mlip; + + if (lip->li_flags & XFS_LI_IN_AIL) { + ailp = &(mp->m_ail); + mlip = xfs_ail_min(ailp); + dlip = xfs_ail_delete(ailp, lip); + ASSERT(dlip == lip); + + + lip->li_flags &= ~XFS_LI_IN_AIL; + lip->li_lsn = 0; + mp->m_ail_gen++; + + if (mlip == dlip) { + mlip = xfs_ail_min(&(mp->m_ail)); + AIL_UNLOCK(mp, s); + xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); + } else { + AIL_UNLOCK(mp, s); + } + } + else { + /* + * If the file system is not being shutdown, we are in + * serious trouble if we get to this stage. + */ + if (XFS_FORCED_SHUTDOWN(mp)) + AIL_UNLOCK(mp, s); + else { + xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, + "xfs_trans_delete_ail: attempting to delete a log item that is not in the AIL"); + xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); + AIL_UNLOCK(mp, s); + } + } +} + + + +/* + * Return the item in the AIL with the smallest lsn. + * Return the current tree generation number for use + * in calls to xfs_trans_next_ail(). + */ +xfs_log_item_t * +xfs_trans_first_ail( + xfs_mount_t *mp, + int *gen) +{ + xfs_log_item_t *lip; + + lip = xfs_ail_min(&(mp->m_ail)); + *gen = (int)mp->m_ail_gen; + + return (lip); +} + +/* + * If the generation count of the tree has not changed since the + * caller last took something from the AIL, then return the elmt + * in the tree which follows the one given. If the count has changed, + * then return the minimum elmt of the AIL and bump the restarts counter + * if one is given. + */ +xfs_log_item_t * +xfs_trans_next_ail( + xfs_mount_t *mp, + xfs_log_item_t *lip, + int *gen, + int *restarts) +{ + xfs_log_item_t *nlip; + + ASSERT(mp && lip && gen); + if (mp->m_ail_gen == *gen) { + nlip = xfs_ail_next(&(mp->m_ail), lip); + } else { + nlip = xfs_ail_min(&(mp->m_ail)); + *gen = (int)mp->m_ail_gen; + if (restarts != NULL) { + XFS_STATS_INC(xfsstats.xs_push_ail_restarts); + (*restarts)++; + } + } + + return (nlip); +} + + +/* + * The active item list (AIL) is a doubly linked list of log + * items sorted by ascending lsn. The base of the list is + * a forw/back pointer pair embedded in the xfs mount structure. + * The base is initialized with both pointers pointing to the + * base. This case always needs to be distinguished, because + * the base has no lsn to look at. We almost always insert + * at the end of the list, so on inserts we search from the + * end of the list to find where the new item belongs. + */ + +/* + * Initialize the doubly linked list to point only to itself. + */ +void +xfs_trans_ail_init( + xfs_mount_t *mp) +{ + mp->m_ail.ail_forw = (xfs_log_item_t*)&(mp->m_ail); + mp->m_ail.ail_back = (xfs_log_item_t*)&(mp->m_ail); +} + +/* + * Insert the given log item into the AIL. + * We almost always insert at the end of the list, so on inserts + * we search from the end of the list to find where the + * new item belongs. + */ +STATIC void +xfs_ail_insert( + xfs_ail_entry_t *base, + xfs_log_item_t *lip) +/* ARGSUSED */ +{ + xfs_log_item_t *next_lip; + + /* + * If the list is empty, just insert the item. + */ + if (base->ail_back == (xfs_log_item_t*)base) { + base->ail_forw = lip; + base->ail_back = lip; + lip->li_ail.ail_forw = (xfs_log_item_t*)base; + lip->li_ail.ail_back = (xfs_log_item_t*)base; + return; + } + + next_lip = base->ail_back; + while ((next_lip != (xfs_log_item_t*)base) && + (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) > 0)) { + next_lip = next_lip->li_ail.ail_back; + } + ASSERT((next_lip == (xfs_log_item_t*)base) || + (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)); + lip->li_ail.ail_forw = next_lip->li_ail.ail_forw; + lip->li_ail.ail_back = next_lip; + next_lip->li_ail.ail_forw = lip; + lip->li_ail.ail_forw->li_ail.ail_back = lip; + + xfs_ail_check(base); + return; +} + +/* + * Delete the given item from the AIL. Return a pointer to the item. + */ +/*ARGSUSED*/ +STATIC xfs_log_item_t * +xfs_ail_delete( + xfs_ail_entry_t *base, + xfs_log_item_t *lip) +/* ARGSUSED */ +{ + lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back; + lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw; + lip->li_ail.ail_forw = NULL; + lip->li_ail.ail_back = NULL; + + xfs_ail_check(base); + return lip; +} + +/* + * Return a pointer to the first item in the AIL. + * If the AIL is empty, then return NULL. + */ +STATIC xfs_log_item_t * +xfs_ail_min( + xfs_ail_entry_t *base) +/* ARGSUSED */ +{ + register xfs_log_item_t *forw = base->ail_forw; + if (forw == (xfs_log_item_t*)base) { + return NULL; + } + return forw; +} + +/* + * Return a pointer to the item which follows + * the given item in the AIL. If the given item + * is the last item in the list, then return NULL. + */ +STATIC xfs_log_item_t * +xfs_ail_next( + xfs_ail_entry_t *base, + xfs_log_item_t *lip) +/* ARGSUSED */ +{ + if (lip->li_ail.ail_forw == (xfs_log_item_t*)base) { + return NULL; + } + return lip->li_ail.ail_forw; + +} + +#ifdef XFSDEBUG +/* + * Check that the list is sorted as it should be. + */ +STATIC void +xfs_ail_check( + xfs_ail_entry_t *base) +{ + xfs_log_item_t *lip; + xfs_log_item_t *prev_lip; + + lip = base->ail_forw; + if (lip == (xfs_log_item_t*)base) { + /* + * Make sure the pointers are correct when the list + * is empty. + */ + ASSERT(base->ail_back == (xfs_log_item_t*)base); + return; + } + + /* + * Walk the list checking forward and backward pointers, + * lsn ordering, and that every entry has the XFS_LI_IN_AIL + * flag set. + */ + prev_lip = (xfs_log_item_t*)base; + while (lip != (xfs_log_item_t*)base) { + if (prev_lip != (xfs_log_item_t*)base) { + ASSERT(prev_lip->li_ail.ail_forw == lip); + ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); + } + ASSERT(lip->li_ail.ail_back == prev_lip); + ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); + prev_lip = lip; + lip = lip->li_ail.ail_forw; + } + ASSERT(lip == (xfs_log_item_t*)base); + ASSERT(base->ail_back == prev_lip); +} +#endif /* XFSDEBUG */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_trans_buf.c linux.21rc5-ac2/fs/xfs/xfs_trans_buf.c --- linux.21rc5/fs/xfs/xfs_trans_buf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_trans_buf.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1100 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_error.h" +#include "xfs_rw.h" + + +STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *, + xfs_daddr_t, int); +STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *, + xfs_daddr_t, int); + + +/* + * Get and lock the buffer for the caller if it is not already + * locked within the given transaction. If it is already locked + * within the transaction, just increment its lock recursion count + * and return a pointer to it. + * + * Use the fast path function xfs_trans_buf_item_match() or the buffer + * cache routine incore_match() to find the buffer + * if it is already owned by this transaction. + * + * If we don't already own the buffer, use get_buf() to get it. + * If it doesn't yet have an associated xfs_buf_log_item structure, + * then allocate one and add the item to this transaction. + * + * If the transaction pointer is NULL, make this just a normal + * get_buf() call. + */ +xfs_buf_t * +xfs_trans_get_buf(xfs_trans_t *tp, + xfs_buftarg_t *target_dev, + xfs_daddr_t blkno, + int len, + uint flags) +{ + xfs_buf_t *bp; + xfs_buf_log_item_t *bip; + + if (flags == 0) + flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; + + /* + * Default to a normal get_buf() call if the tp is NULL. + */ + if (tp == NULL) { + bp = xfs_buf_get_flags(target_dev, blkno, len, + flags | BUF_BUSY); + return(bp); + } + + /* + * If we find the buffer in the cache with this transaction + * pointer in its b_fsprivate2 field, then we know we already + * have it locked. In this case we just increment the lock + * recursion count and return the buffer to the caller. + */ + if (tp->t_items.lic_next == NULL) { + bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len); + } else { + bp = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len); + } + if (bp != NULL) { + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { + xfs_buftrace("TRANS GET RECUR SHUT", bp); + XFS_BUF_SUPER_STALE(bp); + } + /* + * If the buffer is stale then it was binval'ed + * since last read. This doesn't matter since the + * caller isn't allowed to use the data anyway. + */ + else if (XFS_BUF_ISSTALE(bp)) { + xfs_buftrace("TRANS GET RECUR STALE", bp); + ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); + } + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + bip->bli_recur++; + xfs_buftrace("TRANS GET RECUR", bp); + xfs_buf_item_trace("GET RECUR", bip); + return (bp); + } + + /* + * We always specify the BUF_BUSY flag within a transaction so + * that get_buf does not try to push out a delayed write buffer + * which might cause another transaction to take place (if the + * buffer was delayed alloc). Such recursive transactions can + * easily deadlock with our current transaction as well as cause + * us to run out of stack space. + */ + bp = xfs_buf_get_flags(target_dev, blkno, len, flags | BUF_BUSY); + if (bp == NULL) { + return NULL; + } + + ASSERT(!XFS_BUF_GETERROR(bp)); + + /* + * The xfs_buf_log_item pointer is stored in b_fsprivate. If + * it doesn't have one yet, then allocate one and initialize it. + * The checks to see if one is there are in xfs_buf_item_init(). + */ + xfs_buf_item_init(bp, tp->t_mountp); + + /* + * Set the recursion count for the buffer within this transaction + * to 0. + */ + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + bip->bli_recur = 0; + + /* + * Take a reference for this transaction on the buf item. + */ + atomic_inc(&bip->bli_refcount); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip); + + /* + * Initialize b_fsprivate2 so we can find it with incore_match() + * above. + */ + XFS_BUF_SET_FSPRIVATE2(bp, tp); + + xfs_buftrace("TRANS GET", bp); + xfs_buf_item_trace("GET", bip); + return (bp); +} + +/* + * Get and lock the superblock buffer of this file system for the + * given transaction. + * + * We don't need to use incore_match() here, because the superblock + * buffer is a private buffer which we keep a pointer to in the + * mount structure. + */ +xfs_buf_t * +xfs_trans_getsb(xfs_trans_t *tp, + struct xfs_mount *mp, + int flags) +{ + xfs_buf_t *bp; + xfs_buf_log_item_t *bip; + + /* + * Default to just trying to lock the superblock buffer + * if tp is NULL. + */ + if (tp == NULL) { + return (xfs_getsb(mp, flags)); + } + + /* + * If the superblock buffer already has this transaction + * pointer in its b_fsprivate2 field, then we know we already + * have it locked. In this case we just increment the lock + * recursion count and return the buffer to the caller. + */ + bp = mp->m_sb_bp; + if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) { + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + ASSERT(bip != NULL); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + bip->bli_recur++; + xfs_buf_item_trace("GETSB RECUR", bip); + return (bp); + } + + bp = xfs_getsb(mp, flags); + if (bp == NULL) { + return NULL; + } + + /* + * The xfs_buf_log_item pointer is stored in b_fsprivate. If + * it doesn't have one yet, then allocate one and initialize it. + * The checks to see if one is there are in xfs_buf_item_init(). + */ + xfs_buf_item_init(bp, mp); + + /* + * Set the recursion count for the buffer within this transaction + * to 0. + */ + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + bip->bli_recur = 0; + + /* + * Take a reference for this transaction on the buf item. + */ + atomic_inc(&bip->bli_refcount); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip); + + /* + * Initialize b_fsprivate2 so we can find it with incore_match() + * above. + */ + XFS_BUF_SET_FSPRIVATE2(bp, tp); + + xfs_buf_item_trace("GETSB", bip); + return (bp); +} + +#ifdef DEBUG +dev_t xfs_error_dev = 0; +int xfs_do_error; +int xfs_req_num; +int xfs_error_mod = 33; +#endif + +/* + * Get and lock the buffer for the caller if it is not already + * locked within the given transaction. If it has not yet been + * read in, read it from disk. If it is already locked + * within the transaction and already read in, just increment its + * lock recursion count and return a pointer to it. + * + * Use the fast path function xfs_trans_buf_item_match() or the buffer + * cache routine incore_match() to find the buffer + * if it is already owned by this transaction. + * + * If we don't already own the buffer, use read_buf() to get it. + * If it doesn't yet have an associated xfs_buf_log_item structure, + * then allocate one and add the item to this transaction. + * + * If the transaction pointer is NULL, make this just a normal + * read_buf() call. + */ +int +xfs_trans_read_buf( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len, + uint flags, + xfs_buf_t **bpp) +{ + xfs_buf_t *bp; + xfs_buf_log_item_t *bip; + int error; + + if (flags == 0) + flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; + + /* + * Default to a normal get_buf() call if the tp is NULL. + */ + if (tp == NULL) { + bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); + if (!bp) + return XFS_ERROR(ENOMEM); + + if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) { + xfs_ioerror_alert("xfs_trans_read_buf", mp, + bp, blkno); + error = XFS_BUF_GETERROR(bp); + xfs_buf_relse(bp); + return error; + } +#ifdef DEBUG + if (xfs_do_error && (bp != NULL)) { + if (xfs_error_dev == target->pbr_dev) { + if (((xfs_req_num++) % xfs_error_mod) == 0) { + xfs_buf_relse(bp); + printk("Returning error!\n"); + return XFS_ERROR(EIO); + } + } + } +#endif + if (XFS_FORCED_SHUTDOWN(mp)) + goto shutdown_abort; + *bpp = bp; + return 0; + } + + /* + * If we find the buffer in the cache with this transaction + * pointer in its b_fsprivate2 field, then we know we already + * have it locked. If it is already read in we just increment + * the lock recursion count and return the buffer to the caller. + * If the buffer is not yet read in, then we read it in, increment + * the lock recursion count, and return it to the caller. + */ + if (tp->t_items.lic_next == NULL) { + bp = xfs_trans_buf_item_match(tp, target, blkno, len); + } else { + bp = xfs_trans_buf_item_match_all(tp, target, blkno, len); + } + if (bp != NULL) { + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT((XFS_BUF_ISERROR(bp)) == 0); + if (!(XFS_BUF_ISDONE(bp))) { + xfs_buftrace("READ_BUF_INCORE !DONE", bp); + ASSERT(!XFS_BUF_ISASYNC(bp)); + XFS_BUF_READ(bp); + xfsbdstrat(tp->t_mountp, bp); + xfs_iowait(bp); + if (XFS_BUF_GETERROR(bp) != 0) { + xfs_ioerror_alert("xfs_trans_read_buf", mp, + bp, blkno); + error = XFS_BUF_GETERROR(bp); + xfs_buf_relse(bp); + /* + * We can gracefully recover from most + * read errors. Ones we can't are those + * that happen after the transaction's + * already dirty. + */ + if (tp->t_flags & XFS_TRANS_DIRTY) + xfs_force_shutdown(tp->t_mountp, + XFS_METADATA_IO_ERROR); + return error; + } + } + /* + * We never locked this buf ourselves, so we shouldn't + * brelse it either. Just get out. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp); + *bpp = NULL; + return XFS_ERROR(EIO); + } + + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + bip->bli_recur++; + + ASSERT(atomic_read(&bip->bli_refcount) > 0); + xfs_buf_item_trace("READ RECUR", bip); + *bpp = bp; + return 0; + } + + /* + * We always specify the BUF_BUSY flag within a transaction so + * that get_buf does not try to push out a delayed write buffer + * which might cause another transaction to take place (if the + * buffer was delayed alloc). Such recursive transactions can + * easily deadlock with our current transaction as well as cause + * us to run out of stack space. + */ + bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); + if (bp == NULL) { + *bpp = NULL; + return 0; + } + if (XFS_BUF_GETERROR(bp) != 0) { + XFS_BUF_SUPER_STALE(bp); + xfs_buftrace("READ ERROR", bp); + error = XFS_BUF_GETERROR(bp); + + xfs_ioerror_alert("xfs_trans_read_buf", mp, + bp, blkno); + if (tp->t_flags & XFS_TRANS_DIRTY) + xfs_force_shutdown(tp->t_mountp, XFS_METADATA_IO_ERROR); + xfs_buf_relse(bp); + return error; + } +#ifdef DEBUG + if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) { + if (xfs_error_dev == target->pbr_dev) { + if (((xfs_req_num++) % xfs_error_mod) == 0) { + xfs_force_shutdown(tp->t_mountp, + XFS_METADATA_IO_ERROR); + xfs_buf_relse(bp); + printk("Returning error in trans!\n"); + return XFS_ERROR(EIO); + } + } + } +#endif + if (XFS_FORCED_SHUTDOWN(mp)) + goto shutdown_abort; + + /* + * The xfs_buf_log_item pointer is stored in b_fsprivate. If + * it doesn't have one yet, then allocate one and initialize it. + * The checks to see if one is there are in xfs_buf_item_init(). + */ + xfs_buf_item_init(bp, tp->t_mountp); + + /* + * Set the recursion count for the buffer within this transaction + * to 0. + */ + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + bip->bli_recur = 0; + + /* + * Take a reference for this transaction on the buf item. + */ + atomic_inc(&bip->bli_refcount); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip); + + /* + * Initialize b_fsprivate2 so we can find it with incore_match() + * above. + */ + XFS_BUF_SET_FSPRIVATE2(bp, tp); + + xfs_buftrace("TRANS READ", bp); + xfs_buf_item_trace("READ", bip); + *bpp = bp; + return 0; + +shutdown_abort: + /* + * the theory here is that buffer is good but we're + * bailing out because the filesystem is being forcibly + * shut down. So we should leave the b_flags alone since + * the buffer's not staled and just get out. + */ +#if defined(DEBUG) + if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) + cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp); +#endif + ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != + (XFS_B_STALE|XFS_B_DELWRI)); + + xfs_buftrace("READ_BUF XFSSHUTDN", bp); + xfs_buf_relse(bp); + *bpp = NULL; + return XFS_ERROR(EIO); +} + + +/* + * Release the buffer bp which was previously acquired with one of the + * xfs_trans_... buffer allocation routines if the buffer has not + * been modified within this transaction. If the buffer is modified + * within this transaction, do decrement the recursion count but do + * not release the buffer even if the count goes to 0. If the buffer is not + * modified within the transaction, decrement the recursion count and + * release the buffer if the recursion count goes to 0. + * + * If the buffer is to be released and it was not modified before + * this transaction began, then free the buf_log_item associated with it. + * + * If the transaction pointer is NULL, make this just a normal + * brelse() call. + */ +void +xfs_trans_brelse(xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + xfs_log_item_t *lip; + xfs_log_item_desc_t *lidp; + + /* + * Default to a normal brelse() call if the tp is NULL. + */ + if (tp == NULL) { + ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); + /* + * If there's a buf log item attached to the buffer, + * then let the AIL know that the buffer is being + * unlocked. + */ + if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { + lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + if (lip->li_type == XFS_LI_BUF) { + bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); + xfs_trans_unlocked_item( + bip->bli_item.li_mountp, + lip); + } + } + xfs_buf_relse(bp); + return; + } + + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip->bli_item.li_type == XFS_LI_BUF); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + + /* + * Find the item descriptor pointing to this buffer's + * log item. It must be there. + */ + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); + ASSERT(lidp != NULL); + + /* + * If the release is just for a recursive lock, + * then decrement the count and return. + */ + if (bip->bli_recur > 0) { + bip->bli_recur--; + xfs_buf_item_trace("RELSE RECUR", bip); + return; + } + + /* + * If the buffer is dirty within this transaction, we can't + * release it until we commit. + */ + if (lidp->lid_flags & XFS_LID_DIRTY) { + xfs_buf_item_trace("RELSE DIRTY", bip); + return; + } + + /* + * If the buffer has been invalidated, then we can't release + * it until the transaction commits to disk unless it is re-dirtied + * as part of this transaction. This prevents us from pulling + * the item from the AIL before we should. + */ + if (bip->bli_flags & XFS_BLI_STALE) { + xfs_buf_item_trace("RELSE STALE", bip); + return; + } + + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + xfs_buf_item_trace("RELSE", bip); + + /* + * Free up the log item descriptor tracking the released item. + */ + xfs_trans_free_item(tp, lidp); + + /* + * Clear the hold flag in the buf log item if it is set. + * We wouldn't want the next user of the buffer to + * get confused. + */ + if (bip->bli_flags & XFS_BLI_HOLD) { + bip->bli_flags &= ~XFS_BLI_HOLD; + } + + /* + * Drop our reference to the buf log item. + */ + atomic_dec(&bip->bli_refcount); + + /* + * If the buf item is not tracking data in the log, then + * we must free it before releasing the buffer back to the + * free pool. Before releasing the buffer to the free pool, + * clear the transaction pointer in b_fsprivate2 to dissolve + * its relation to this transaction. + */ + if (!xfs_buf_item_dirty(bip)) { +/*** + ASSERT(bp->b_pincount == 0); +***/ + ASSERT(atomic_read(&bip->bli_refcount) == 0); + ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); + ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); + xfs_buf_item_relse(bp); + bip = NULL; + } + XFS_BUF_SET_FSPRIVATE2(bp, NULL); + + /* + * If we've still got a buf log item on the buffer, then + * tell the AIL that the buffer is being unlocked. + */ + if (bip != NULL) { + xfs_trans_unlocked_item(bip->bli_item.li_mountp, + (xfs_log_item_t*)bip); + } + + xfs_buf_relse(bp); + return; +} + +/* + * Add the locked buffer to the transaction. + * The buffer must be locked, and it cannot be associated with any + * transaction. + * + * If the buffer does not yet have a buf log item associated with it, + * then allocate one for it. Then add the buf item to the transaction. + */ +void +xfs_trans_bjoin(xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); + + /* + * The xfs_buf_log_item pointer is stored in b_fsprivate. If + * it doesn't have one yet, then allocate one and initialize it. + * The checks to see if one is there are in xfs_buf_item_init(). + */ + xfs_buf_item_init(bp, tp->t_mountp); + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + + /* + * Take a reference for this transaction on the buf item. + */ + atomic_inc(&bip->bli_refcount); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip); + + /* + * Initialize b_fsprivate2 so we can find it with incore_match() + * in xfs_trans_get_buf() and friends above. + */ + XFS_BUF_SET_FSPRIVATE2(bp, tp); + + xfs_buf_item_trace("BJOIN", bip); +} + +/* + * Mark the buffer as not needing to be unlocked when the buf item's + * IOP_UNLOCK() routine is called. The buffer must already be locked + * and associated with the given transaction. + */ +/* ARGSUSED */ +void +xfs_trans_bhold(xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + bip->bli_flags |= XFS_BLI_HOLD; + xfs_buf_item_trace("BHOLD", bip); +} + +/* + * This function is used to indicate that the buffer should not be + * unlocked until the transaction is committed to disk. Since we + * are going to keep the lock held, make the transaction synchronous + * so that the lock is not held too long. + * + * It uses the log item descriptor flag XFS_LID_SYNC_UNLOCK to + * delay the buf items's unlock call until the transaction is + * committed to disk or aborted. + */ +void +xfs_trans_bhold_until_committed(xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_log_item_desc_t *lidp; + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); + ASSERT(lidp != NULL); + + lidp->lid_flags |= XFS_LID_SYNC_UNLOCK; + xfs_buf_item_trace("BHOLD UNTIL COMMIT", bip); + + xfs_trans_set_sync(tp); +} + +/* + * This is called to mark bytes first through last inclusive of the given + * buffer as needing to be logged when the transaction is committed. + * The buffer must already be associated with the given transaction. + * + * First and last are numbers relative to the beginning of this buffer, + * so the first byte in the buffer is numbered 0 regardless of the + * value of b_blkno. + */ +void +xfs_trans_log_buf(xfs_trans_t *tp, + xfs_buf_t *bp, + uint first, + uint last) +{ + xfs_buf_log_item_t *bip; + xfs_log_item_desc_t *lidp; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); + ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) || + (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks)); + + /* + * Mark the buffer as needing to be written out eventually, + * and set its iodone function to remove the buffer's buf log + * item from the AIL and free it when the buffer is flushed + * to disk. See xfs_buf_attach_iodone() for more details + * on li_cb and xfs_buf_iodone_callbacks(). + * If we end up aborting this transaction, we trap this buffer + * inside the b_bdstrat callback so that this won't get written to + * disk. + */ + XFS_BUF_DELAYWRITE(bp); + XFS_BUF_DONE(bp); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); + bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone; + + /* + * If we invalidated the buffer within this transaction, then + * cancel the invalidation now that we're dirtying the buffer + * again. There are no races with the code in xfs_buf_item_unpin(), + * because we have a reference to the buffer this entire time. + */ + if (bip->bli_flags & XFS_BLI_STALE) { + xfs_buf_item_trace("BLOG UNSTALE", bip); + bip->bli_flags &= ~XFS_BLI_STALE; + /* note this will have to change for page_buf interface... unstale isn't really an option RMC */ + ASSERT(XFS_BUF_ISSTALE(bp)); + XFS_BUF_UNSTALE(bp); + bip->bli_format.blf_flags &= ~XFS_BLI_CANCEL; + } + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; + lidp->lid_flags &= ~XFS_LID_BUF_STALE; + bip->bli_flags |= XFS_BLI_LOGGED; + xfs_buf_item_log(bip, first, last); + xfs_buf_item_trace("BLOG", bip); +} + + +/* + * This called to invalidate a buffer that is being used within + * a transaction. Typically this is because the blocks in the + * buffer are being freed, so we need to prevent it from being + * written out when we're done. Allowing it to be written again + * might overwrite data in the free blocks if they are reallocated + * to a file. + * + * We prevent the buffer from being written out by clearing the + * B_DELWRI flag. We can't always + * get rid of the buf log item at this point, though, because + * the buffer may still be pinned by another transaction. If that + * is the case, then we'll wait until the buffer is committed to + * disk for the last time (we can tell by the ref count) and + * free it in xfs_buf_item_unpin(). Until it is cleaned up we + * will keep the buffer locked so that the buffer and buf log item + * are not reused. + */ +void +xfs_trans_binval( + xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_log_item_desc_t *lidp; + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); + ASSERT(lidp != NULL); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + + if (bip->bli_flags & XFS_BLI_STALE) { + /* + * If the buffer is already invalidated, then + * just return. + */ + ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); + ASSERT(XFS_BUF_ISSTALE(bp)); + ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_INODE_BUF)); + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + ASSERT(lidp->lid_flags & XFS_LID_DIRTY); + ASSERT(tp->t_flags & XFS_TRANS_DIRTY); + xfs_buftrace("XFS_BINVAL RECUR", bp); + xfs_buf_item_trace("BINVAL RECUR", bip); + return; + } + + /* + * Clear the dirty bit in the buffer and set the STALE flag + * in the buf log item. The STALE flag will be used in + * xfs_buf_item_unpin() to determine if it should clean up + * when the last reference to the buf item is given up. + * We set the XFS_BLI_CANCEL flag in the buf log format structure + * and log the buf item. This will be used at recovery time + * to determine that copies of the buffer in the log before + * this should not be replayed. + * We mark the item descriptor and the transaction dirty so + * that we'll hold the buffer until after the commit. + * + * Since we're invalidating the buffer, we also clear the state + * about which parts of the buffer have been logged. We also + * clear the flag indicating that this is an inode buffer since + * the data in the buffer will no longer be valid. + * + * We set the stale bit in the buffer as well since we're getting + * rid of it. + */ + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_STALE(bp); + bip->bli_flags |= XFS_BLI_STALE; + bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_DIRTY); + bip->bli_format.blf_flags &= ~XFS_BLI_INODE_BUF; + bip->bli_format.blf_flags |= XFS_BLI_CANCEL; + memset((char *)(bip->bli_format.blf_data_map), 0, + (bip->bli_format.blf_map_size * sizeof(uint))); + lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE; + tp->t_flags |= XFS_TRANS_DIRTY; + xfs_buftrace("XFS_BINVAL", bp); + xfs_buf_item_trace("BINVAL", bip); +} + +/* + * This call is used to indicate that the buffer contains on-disk + * inodes which must be handled specially during recovery. They + * require special handling because only the di_next_unlinked from + * the inodes in the buffer should be recovered. The rest of the + * data in the buffer is logged via the inodes themselves. + * + * All we do is set the XFS_BLI_INODE_BUF flag in the buffer's log + * format structure so that we'll know what to do at recovery time. + */ +/* ARGSUSED */ +void +xfs_trans_inode_buf( + xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + + bip->bli_format.blf_flags |= XFS_BLI_INODE_BUF; +} + + +/* + * Mark the buffer as being one which contains newly allocated + * inodes. We need to make sure that even if this buffer is + * relogged as an 'inode buf' we still recover all of the inode + * images in the face of a crash. This works in coordination with + * xfs_buf_item_committed() to ensure that the buffer remains in the + * AIL at its original location even after it has been relogged. + */ +/* ARGSUSED */ +void +xfs_trans_inode_alloc_buf( + xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); + + bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; +} + + +/* + * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of + * dquots. However, unlike in inode buffer recovery, dquot buffers get + * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag). + * The only thing that makes dquot buffers different from regular + * buffers is that we must not replay dquot bufs when recovering + * if a _corresponding_ quotaoff has happened. We also have to distinguish + * between usr dquot bufs and grp dquot bufs, because usr and grp quotas + * can be turned off independently. + */ +/* ARGSUSED */ +void +xfs_trans_dquot_buf( + xfs_trans_t *tp, + xfs_buf_t *bp, + uint type) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(type == XFS_BLI_UDQUOT_BUF || + type == XFS_BLI_GDQUOT_BUF); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + + bip->bli_format.blf_flags |= type; +} + +/* + * Check to see if a buffer matching the given parameters is already + * a part of the given transaction. Only check the first, embedded + * chunk, since we don't want to spend all day scanning large transactions. + */ +STATIC xfs_buf_t * +xfs_trans_buf_item_match( + xfs_trans_t *tp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_desc_t *lidp; + xfs_buf_log_item_t *blip; + xfs_buf_t *bp; + int i; + + bp = NULL; + len = BBTOB(len); + licp = &tp->t_items; + if (!XFS_LIC_ARE_ALL_FREE(licp)) { + for (i = 0; i < licp->lic_unused; i++) { + /* + * Skip unoccupied slots. + */ + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lidp = XFS_LIC_SLOT(licp, i); + blip = (xfs_buf_log_item_t *)lidp->lid_item; + if (blip->bli_item.li_type != XFS_LI_BUF) { + continue; + } + + bp = blip->bli_buf; + if ((XFS_BUF_TARGET(bp) == target) && + (XFS_BUF_ADDR(bp) == blkno) && + (XFS_BUF_COUNT(bp) == len)) { + /* + * We found it. Break out and + * return the pointer to the buffer. + */ + break; + } else { + bp = NULL; + } + } + } + return bp; +} + +/* + * Check to see if a buffer matching the given parameters is already + * a part of the given transaction. Check all the chunks, we + * want to be thorough. + */ +STATIC xfs_buf_t * +xfs_trans_buf_item_match_all( + xfs_trans_t *tp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_desc_t *lidp; + xfs_buf_log_item_t *blip; + xfs_buf_t *bp; + int i; + + bp = NULL; + len = BBTOB(len); + for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { + ASSERT(licp == &tp->t_items); + ASSERT(licp->lic_next == NULL); + return NULL; + } + for (i = 0; i < licp->lic_unused; i++) { + /* + * Skip unoccupied slots. + */ + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lidp = XFS_LIC_SLOT(licp, i); + blip = (xfs_buf_log_item_t *)lidp->lid_item; + if (blip->bli_item.li_type != XFS_LI_BUF) { + continue; + } + + bp = blip->bli_buf; + if ((XFS_BUF_TARGET(bp) == target) && + (XFS_BUF_ADDR(bp) == blkno) && + (XFS_BUF_COUNT(bp) == len)) { + /* + * We found it. Break out and + * return the pointer to the buffer. + */ + return bp; + } + } + } + return NULL; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_trans.c linux.21rc5-ac2/fs/xfs/xfs_trans.c --- linux.21rc5/fs/xfs/xfs_trans.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_trans.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1307 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_error.h" +#include "xfs_trans_priv.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_quota.h" +#include "xfs_trans_space.h" + + +STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); +STATIC uint xfs_trans_count_vecs(xfs_trans_t *); +STATIC void xfs_trans_fill_vecs(xfs_trans_t *, xfs_log_iovec_t *); +STATIC void xfs_trans_uncommit(xfs_trans_t *, uint); +STATIC void xfs_trans_committed(xfs_trans_t *, int); +STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); +STATIC void xfs_trans_free(xfs_trans_t *); + +kmem_zone_t *xfs_trans_zone; + + +/* + * Initialize the precomputed transaction reservation values + * in the mount structure. + */ +void +xfs_trans_init( + xfs_mount_t *mp) +{ + xfs_trans_reservations_t *resp; + + resp = &(mp->m_reservations); + resp->tr_write = + (uint)(XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_itruncate = + (uint)(XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_rename = + (uint)(XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_link = (uint)XFS_CALC_LINK_LOG_RES(mp); + resp->tr_remove = + (uint)(XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_symlink = + (uint)(XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_create = + (uint)(XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_mkdir = + (uint)(XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_ifree = + (uint)(XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_ichange = + (uint)(XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_growdata = (uint)XFS_CALC_GROWDATA_LOG_RES(mp); + resp->tr_swrite = (uint)XFS_CALC_SWRITE_LOG_RES(mp); + resp->tr_writeid = (uint)XFS_CALC_WRITEID_LOG_RES(mp); + resp->tr_addafork = + (uint)(XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_attrinval = (uint)XFS_CALC_ATTRINVAL_LOG_RES(mp); + resp->tr_attrset = + (uint)(XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_attrrm = + (uint)(XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_clearagi = (uint)XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); + resp->tr_growrtalloc = (uint)XFS_CALC_GROWRTALLOC_LOG_RES(mp); + resp->tr_growrtzero = (uint)XFS_CALC_GROWRTZERO_LOG_RES(mp); + resp->tr_growrtfree = (uint)XFS_CALC_GROWRTFREE_LOG_RES(mp); +} + +/* + * This routine is called to allocate a transaction structure. + * The type parameter indicates the type of the transaction. These + * are enumerated in xfs_trans.h. + * + * Dynamically allocate the transaction structure from the transaction + * zone, initialize it, and return it to the caller. + */ +xfs_trans_t * +xfs_trans_alloc( + xfs_mount_t *mp, + uint type) +{ + xfs_check_frozen(mp, NULL, XFS_FREEZE_TRANS); + return (_xfs_trans_alloc(mp, type)); + +} + +xfs_trans_t * +_xfs_trans_alloc( + xfs_mount_t *mp, + uint type) +{ + xfs_trans_t *tp; + ASSERT(xfs_trans_zone != NULL); + tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); + tp->t_dqinfo = NULL; + + /* + * Initialize the transaction structure. + */ + tp->t_magic = XFS_TRANS_MAGIC; + tp->t_type = type; + tp->t_mountp = mp; + tp->t_items_free = XFS_LIC_NUM_SLOTS; + tp->t_busy_free = XFS_LBC_NUM_SLOTS; + XFS_LIC_INIT(&(tp->t_items)); + XFS_LBC_INIT(&(tp->t_busy)); + + return (tp); +} + +/* + * This is called to create a new transaction which will share the + * permanent log reservation of the given transaction. The remaining + * unused block and rt extent reservations are also inherited. This + * implies that the original transaction is no longer allowed to allocate + * blocks. Locks and log items, however, are no inherited. They must + * be added to the new transaction explicitly. + */ +xfs_trans_t * +xfs_trans_dup( + xfs_trans_t *tp) +{ + xfs_trans_t *ntp; + + ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); + + /* + * Initialize the new transaction structure. + */ + ntp->t_magic = XFS_TRANS_MAGIC; + ntp->t_type = tp->t_type; + ntp->t_mountp = tp->t_mountp; + ntp->t_items_free = XFS_LIC_NUM_SLOTS; + ntp->t_busy_free = XFS_LBC_NUM_SLOTS; + XFS_LIC_INIT(&(ntp->t_items)); + XFS_LBC_INIT(&(ntp->t_busy)); + + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + +#if defined(XLOG_NOLOG) || defined(DEBUG) + ASSERT(!xlog_debug || tp->t_ticket != NULL); +#else + ASSERT(tp->t_ticket != NULL); +#endif + ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE); + ntp->t_ticket = tp->t_ticket; + ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; + tp->t_blk_res = tp->t_blk_res_used; + ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; + tp->t_rtx_res = tp->t_rtx_res_used; + + XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp); + + atomic_inc(&tp->t_mountp->m_active_trans); + return ntp; +} + +/* + * This is called to reserve free disk blocks and log space for the + * given transaction. This must be done before allocating any resources + * within the transaction. + * + * This will return ENOSPC if there are not enough blocks available. + * It will sleep waiting for available log space. + * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which + * is used by long running transactions. If any one of the reservations + * fails then they will all be backed out. + * + * This does not do quota reservations. That typically is done by the + * caller afterwards. + */ +int +xfs_trans_reserve( + xfs_trans_t *tp, + uint blocks, + uint logspace, + uint rtextents, + uint flags, + uint logcount) +{ + int log_flags; + int error; + int rsvd; + + error = 0; + rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; + + /* Mark this thread as being in a transaction */ + current->flags |= PF_FSTRANS; + + /* + * Attempt to reserve the needed disk blocks by decrementing + * the number needed from the number available. This will + * fail if the count would go below zero. + */ + if (blocks > 0) { + error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, + -blocks, rsvd); + if (error != 0) { + current->flags &= ~PF_FSTRANS; + return (XFS_ERROR(ENOSPC)); + } + tp->t_blk_res += blocks; + } + + /* + * Reserve the log space needed for this transaction. + */ + if (logspace > 0) { + ASSERT((tp->t_log_res == 0) || (tp->t_log_res == logspace)); + ASSERT((tp->t_log_count == 0) || + (tp->t_log_count == logcount)); + if (flags & XFS_TRANS_PERM_LOG_RES) { + log_flags = XFS_LOG_PERM_RESERV; + tp->t_flags |= XFS_TRANS_PERM_LOG_RES; + } else { + ASSERT(tp->t_ticket == NULL); + ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); + log_flags = 0; + } + + error = xfs_log_reserve(tp->t_mountp, logspace, logcount, + &tp->t_ticket, + XFS_TRANSACTION, log_flags); + if (error) { + goto undo_blocks; + } + tp->t_log_res = logspace; + tp->t_log_count = logcount; + } + + /* + * Attempt to reserve the needed realtime extents by decrementing + * the number needed from the number available. This will + * fail if the count would go below zero. + */ + if (rtextents > 0) { + error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, + -rtextents, rsvd); + if (error) { + error = XFS_ERROR(ENOSPC); + goto undo_log; + } + tp->t_rtx_res += rtextents; + } + + return 0; + + /* + * Error cases jump to one of these labels to undo any + * reservations which have already been performed. + */ +undo_log: + if (logspace > 0) { + if (flags & XFS_TRANS_PERM_LOG_RES) { + log_flags = XFS_LOG_REL_PERM_RESERV; + } else { + log_flags = 0; + } + xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags); + tp->t_ticket = NULL; + tp->t_log_res = 0; + tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; + } + +undo_blocks: + if (blocks > 0) { + (void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, + blocks, rsvd); + tp->t_blk_res = 0; + } + + current->flags &= ~PF_FSTRANS; + + return (error); +} + + +/* + * This is called to set the a callback to be called when the given + * transaction is committed to disk. The transaction pointer and the + * argument pointer will be passed to the callback routine. + * + * Only one callback can be associated with any single transaction. + */ +void +xfs_trans_callback( + xfs_trans_t *tp, + xfs_trans_callback_t callback, + void *arg) +{ + ASSERT(tp->t_callback == NULL); + tp->t_callback = callback; + tp->t_callarg = arg; +} + + +/* + * Record the indicated change to the given field for application + * to the file system's superblock when the transaction commits. + * For now, just store the change in the transaction structure. + * + * Mark the transaction structure to indicate that the superblock + * needs to be updated before committing. + */ +void +xfs_trans_mod_sb( + xfs_trans_t *tp, + uint field, + long delta) +{ + + switch (field) { + case XFS_TRANS_SB_ICOUNT: + ASSERT(delta > 0); + tp->t_icount_delta += delta; + break; + case XFS_TRANS_SB_IFREE: + tp->t_ifree_delta += delta; + break; + case XFS_TRANS_SB_FDBLOCKS: + /* + * Track the number of blocks allocated in the + * transaction. Make sure it does not exceed the + * number reserved. + */ + if (delta < 0) { + tp->t_blk_res_used += (uint)-delta; + ASSERT(tp->t_blk_res_used <= tp->t_blk_res); + } + tp->t_fdblocks_delta += delta; + break; + case XFS_TRANS_SB_RES_FDBLOCKS: + /* + * The allocation has already been applied to the + * in-core superblock's counter. This should only + * be applied to the on-disk superblock. + */ + ASSERT(delta < 0); + tp->t_res_fdblocks_delta += delta; + break; + case XFS_TRANS_SB_FREXTENTS: + /* + * Track the number of blocks allocated in the + * transaction. Make sure it does not exceed the + * number reserved. + */ + if (delta < 0) { + tp->t_rtx_res_used += (uint)-delta; + ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); + } + tp->t_frextents_delta += delta; + break; + case XFS_TRANS_SB_RES_FREXTENTS: + /* + * The allocation has already been applied to the + * in-core superblocks's counter. This should only + * be applied to the on-disk superblock. + */ + ASSERT(delta < 0); + tp->t_res_frextents_delta += delta; + break; + case XFS_TRANS_SB_DBLOCKS: + ASSERT(delta > 0); + tp->t_dblocks_delta += delta; + break; + case XFS_TRANS_SB_AGCOUNT: + ASSERT(delta > 0); + tp->t_agcount_delta += delta; + break; + case XFS_TRANS_SB_IMAXPCT: + tp->t_imaxpct_delta += delta; + break; + case XFS_TRANS_SB_REXTSIZE: + tp->t_rextsize_delta += delta; + break; + case XFS_TRANS_SB_RBMBLOCKS: + tp->t_rbmblocks_delta += delta; + break; + case XFS_TRANS_SB_RBLOCKS: + tp->t_rblocks_delta += delta; + break; + case XFS_TRANS_SB_REXTENTS: + tp->t_rextents_delta += delta; + break; + case XFS_TRANS_SB_REXTSLOG: + tp->t_rextslog_delta += delta; + break; + default: + ASSERT(0); + return; + } + + tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY); +} + +/* + * xfs_trans_apply_sb_deltas() is called from the commit code + * to bring the superblock buffer into the current transaction + * and modify it as requested by earlier calls to xfs_trans_mod_sb(). + * + * For now we just look at each field allowed to change and change + * it if necessary. + */ +STATIC void +xfs_trans_apply_sb_deltas( + xfs_trans_t *tp) +{ + xfs_sb_t *sbp; + xfs_buf_t *bp; + int whole = 0; + + bp = xfs_trans_getsb(tp, tp->t_mountp, 0); + sbp = XFS_BUF_TO_SBP(bp); + + /* + * Check that superblock mods match the mods made to AGF counters. + */ + ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == + (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + + tp->t_ag_btree_delta)); + + if (tp->t_icount_delta != 0) { + INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta); + } + if (tp->t_ifree_delta != 0) { + INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta); + } + + if (tp->t_fdblocks_delta != 0) { + INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta); + } + if (tp->t_res_fdblocks_delta != 0) { + INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta); + } + + if (tp->t_frextents_delta != 0) { + INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_frextents_delta); + } + if (tp->t_dblocks_delta != 0) { + INT_MOD(sbp->sb_dblocks, ARCH_CONVERT, tp->t_dblocks_delta); + whole = 1; + } + if (tp->t_agcount_delta != 0) { + INT_MOD(sbp->sb_agcount, ARCH_CONVERT, tp->t_agcount_delta); + whole = 1; + } + if (tp->t_imaxpct_delta != 0) { + INT_MOD(sbp->sb_imax_pct, ARCH_CONVERT, tp->t_imaxpct_delta); + whole = 1; + } + if (tp->t_rextsize_delta != 0) { + INT_MOD(sbp->sb_rextsize, ARCH_CONVERT, tp->t_rextsize_delta); + whole = 1; + } + if (tp->t_rbmblocks_delta != 0) { + INT_MOD(sbp->sb_rbmblocks, ARCH_CONVERT, tp->t_rbmblocks_delta); + whole = 1; + } + if (tp->t_rblocks_delta != 0) { + INT_MOD(sbp->sb_rblocks, ARCH_CONVERT, tp->t_rblocks_delta); + whole = 1; + } + if (tp->t_rextents_delta != 0) { + INT_MOD(sbp->sb_rextents, ARCH_CONVERT, tp->t_rextents_delta); + whole = 1; + } + if (tp->t_rextslog_delta != 0) { + INT_MOD(sbp->sb_rextslog, ARCH_CONVERT, tp->t_rextslog_delta); + whole = 1; + } + + if (whole) + /* + * Log the whole thing, the fields are discontiguous. + */ + xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1); + else + /* + * Since all the modifiable fields are contiguous, we + * can get away with this. + */ + xfs_trans_log_buf(tp, bp, offsetof(xfs_sb_t, sb_icount), + offsetof(xfs_sb_t, sb_frextents) + + sizeof(sbp->sb_frextents) - 1); + + XFS_MTOVFS(tp->t_mountp)->vfs_super->s_dirt = 1; +} + +/* + * xfs_trans_unreserve_and_mod_sb() is called to release unused + * reservations and apply superblock counter changes to the in-core + * superblock. + * + * This is done efficiently with a single call to xfs_mod_incore_sb_batch(). + */ +void +xfs_trans_unreserve_and_mod_sb( + xfs_trans_t *tp) +{ + xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ + xfs_mod_sb_t *msbp; + /* REFERENCED */ + int error; + int rsvd; + + msbp = msb; + rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; + + /* + * Release any reserved blocks. Any that were allocated + * will be taken back again by fdblocks_delta below. + */ + if (tp->t_blk_res > 0) { + msbp->msb_field = XFS_SBS_FDBLOCKS; + msbp->msb_delta = tp->t_blk_res; + msbp++; + } + + /* + * Release any reserved real time extents . Any that were + * allocated will be taken back again by frextents_delta below. + */ + if (tp->t_rtx_res > 0) { + msbp->msb_field = XFS_SBS_FREXTENTS; + msbp->msb_delta = tp->t_rtx_res; + msbp++; + } + + /* + * Apply any superblock modifications to the in-core version. + * The t_res_fdblocks_delta and t_res_frextents_delta fields are + * explicity NOT applied to the in-core superblock. + * The idea is that that has already been done. + */ + if (tp->t_flags & XFS_TRANS_SB_DIRTY) { + if (tp->t_icount_delta != 0) { + msbp->msb_field = XFS_SBS_ICOUNT; + msbp->msb_delta = (int)tp->t_icount_delta; + msbp++; + } + if (tp->t_ifree_delta != 0) { + msbp->msb_field = XFS_SBS_IFREE; + msbp->msb_delta = (int)tp->t_ifree_delta; + msbp++; + } + if (tp->t_fdblocks_delta != 0) { + msbp->msb_field = XFS_SBS_FDBLOCKS; + msbp->msb_delta = (int)tp->t_fdblocks_delta; + msbp++; + } + if (tp->t_frextents_delta != 0) { + msbp->msb_field = XFS_SBS_FREXTENTS; + msbp->msb_delta = (int)tp->t_frextents_delta; + msbp++; + } + if (tp->t_dblocks_delta != 0) { + msbp->msb_field = XFS_SBS_DBLOCKS; + msbp->msb_delta = (int)tp->t_dblocks_delta; + msbp++; + } + if (tp->t_agcount_delta != 0) { + msbp->msb_field = XFS_SBS_AGCOUNT; + msbp->msb_delta = (int)tp->t_agcount_delta; + msbp++; + } + if (tp->t_imaxpct_delta != 0) { + msbp->msb_field = XFS_SBS_IMAX_PCT; + msbp->msb_delta = (int)tp->t_imaxpct_delta; + msbp++; + } + if (tp->t_rextsize_delta != 0) { + msbp->msb_field = XFS_SBS_REXTSIZE; + msbp->msb_delta = (int)tp->t_rextsize_delta; + msbp++; + } + if (tp->t_rbmblocks_delta != 0) { + msbp->msb_field = XFS_SBS_RBMBLOCKS; + msbp->msb_delta = (int)tp->t_rbmblocks_delta; + msbp++; + } + if (tp->t_rblocks_delta != 0) { + msbp->msb_field = XFS_SBS_RBLOCKS; + msbp->msb_delta = (int)tp->t_rblocks_delta; + msbp++; + } + if (tp->t_rextents_delta != 0) { + msbp->msb_field = XFS_SBS_REXTENTS; + msbp->msb_delta = (int)tp->t_rextents_delta; + msbp++; + } + if (tp->t_rextslog_delta != 0) { + msbp->msb_field = XFS_SBS_REXTSLOG; + msbp->msb_delta = (int)tp->t_rextslog_delta; + msbp++; + } + } + + /* + * If we need to change anything, do it. + */ + if (msbp > msb) { + error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, + (uint)(msbp - msb), rsvd); + ASSERT(error == 0); + } +} + + +/* + * xfs_trans_commit + * + * Commit the given transaction to the log a/synchronously. + * + * XFS disk error handling mechanism is not based on a typical + * transaction abort mechanism. Logically after the filesystem + * gets marked 'SHUTDOWN', we can't let any new transactions + * be durable - ie. committed to disk - because some metadata might + * be inconsistent. In such cases, this returns an error, and the + * caller may assume that all locked objects joined to the transaction + * have already been unlocked as if the commit had succeeded. + * It's illegal to reference the transaction structure after this call. + */ + /*ARGSUSED*/ +int +xfs_trans_commit( + xfs_trans_t *tp, + uint flags, + xfs_lsn_t *commit_lsn_p) +{ + xfs_log_iovec_t *log_vector; + int nvec; + xfs_mount_t *mp; + xfs_lsn_t commit_lsn; + /* REFERENCED */ + int error; + int log_flags; + int sync; +#define XFS_TRANS_LOGVEC_COUNT 16 + xfs_log_iovec_t log_vector_fast[XFS_TRANS_LOGVEC_COUNT]; +#if defined(XLOG_NOLOG) || defined(DEBUG) + static xfs_lsn_t trans_lsn = 1; +#endif + void *commit_iclog; + int shutdown; + + commit_lsn = -1; + + /* + * Determine whether this commit is releasing a permanent + * log reservation or not. + */ + if (flags & XFS_TRANS_RELEASE_LOG_RES) { + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + log_flags = XFS_LOG_REL_PERM_RESERV; + } else { + log_flags = 0; + } + mp = tp->t_mountp; + + /* + * If there is nothing to be logged by the transaction, + * then unlock all of the items associated with the + * transaction and free the transaction structure. + * Also make sure to return any reserved blocks to + * the free pool. + */ +shut_us_down: + shutdown = XFS_FORCED_SHUTDOWN(mp) ? EIO : 0; + if (!(tp->t_flags & XFS_TRANS_DIRTY) || shutdown) { + xfs_trans_unreserve_and_mod_sb(tp); + /* + * It is indeed possible for the transaction to be + * not dirty but the dqinfo portion to be. All that + * means is that we have some (non-persistent) quota + * reservations that need to be unreserved. + */ + XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); + if (tp->t_ticket) { + commit_lsn = xfs_log_done(mp, tp->t_ticket, + NULL, log_flags); + if (commit_lsn == -1 && !shutdown) + shutdown = XFS_ERROR(EIO); + } + xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0); + xfs_trans_free_busy(tp); + xfs_trans_free(tp); + XFS_STATS_INC(xfsstats.xs_trans_empty); + if (commit_lsn_p) + *commit_lsn_p = commit_lsn; + current->flags &= ~PF_FSTRANS; + return (shutdown); + } +#if defined(XLOG_NOLOG) || defined(DEBUG) + ASSERT(!xlog_debug || tp->t_ticket != NULL); +#else + ASSERT(tp->t_ticket != NULL); +#endif + + /* + * If we need to update the superblock, then do it now. + */ + if (tp->t_flags & XFS_TRANS_SB_DIRTY) { + xfs_trans_apply_sb_deltas(tp); + } + XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp); + + /* + * Ask each log item how many log_vector entries it will + * need so we can figure out how many to allocate. + * Try to avoid the kmem_alloc() call in the common case + * by using a vector from the stack when it fits. + */ + nvec = xfs_trans_count_vecs(tp); + + if (nvec == 0) { + xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); + goto shut_us_down; + } + + + if (nvec <= XFS_TRANS_LOGVEC_COUNT) { + log_vector = log_vector_fast; + } else { + log_vector = (xfs_log_iovec_t *)kmem_alloc(nvec * + sizeof(xfs_log_iovec_t), + KM_SLEEP); + } + + /* + * Fill in the log_vector and pin the logged items, and + * then write the transaction to the log. + */ + xfs_trans_fill_vecs(tp, log_vector); + + /* + * Ignore errors here. xfs_log_done would do the right thing. + * We need to put the ticket, etc. away. + */ + error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, + &(tp->t_lsn)); + +#if defined(XLOG_NOLOG) || defined(DEBUG) + if (xlog_debug) { + commit_lsn = xfs_log_done(mp, tp->t_ticket, + &commit_iclog, log_flags); + } else { + commit_lsn = 0; + tp->t_lsn = trans_lsn++; + } +#else + /* + * This is the regular case. At this point (after the call finishes), + * the transaction is committed incore and could go out to disk at + * any time. However, all the items associated with the transaction + * are still locked and pinned in memory. + */ + commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags); +#endif + + tp->t_commit_lsn = commit_lsn; + if (nvec > XFS_TRANS_LOGVEC_COUNT) { + kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t)); + } + + if (commit_lsn_p) + *commit_lsn_p = commit_lsn; + + /* + * If we got a log write error. Unpin the logitems that we + * had pinned, clean up, free trans structure, and return error. + */ + if (error || commit_lsn == -1) { + xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT); + current->flags &= ~PF_FSTRANS; + return XFS_ERROR(EIO); + } + + /* + * Once the transaction has committed, unused + * reservations need to be released and changes to + * the superblock need to be reflected in the in-core + * version. Do that now. + */ + xfs_trans_unreserve_and_mod_sb(tp); + + sync = tp->t_flags & XFS_TRANS_SYNC; + + /* + * Tell the LM to call the transaction completion routine + * when the log write with LSN commit_lsn completes (e.g. + * when the transaction commit really hits the on-disk log). + * After this call we cannot reference tp, because the call + * can happen at any time and the call will free the transaction + * structure pointed to by tp. The only case where we call + * the completion routine (xfs_trans_committed) directly is + * if the log is turned off on a debug kernel or we're + * running in simulation mode (the log is explicitly turned + * off). + */ +#if defined(XLOG_NOLOG) || defined(DEBUG) + if (xlog_debug) { + tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed; + tp->t_logcb.cb_arg = tp; + error = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb)); + } else { + xfs_trans_committed(tp, 0); + } +#else + tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed; + tp->t_logcb.cb_arg = tp; + + /* We need to pass the iclog buffer which was used for the + * transaction commit record into this function, and attach + * the callback to it. The callback must be attached before + * the items are unlocked to avoid racing with other threads + * waiting for an item to unlock. + */ + error = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb)); +#endif + + /* + * Once all the items of the transaction have been copied + * to the in core log and the callback is attached, the + * items can be unlocked. + * + * This will free descriptors pointing to items which were + * not logged since there is nothing more to do with them. + * For items which were logged, we will keep pointers to them + * so they can be unpinned after the transaction commits to disk. + * This will also stamp each modified meta-data item with + * the commit lsn of this transaction for dependency tracking + * purposes. + */ + xfs_trans_unlock_items(tp, commit_lsn); + + /* + * Now that the xfs_trans_committed callback has been attached, + * and the items are released we can finally allow the iclog to + * go to disk. + */ + error = xfs_log_release_iclog(mp, commit_iclog); + + /* + * If the transaction needs to be synchronous, then force the + * log out now and wait for it. + */ + if (sync) { + if (!error) + error = xfs_log_force(mp, commit_lsn, + XFS_LOG_FORCE | XFS_LOG_SYNC); + XFS_STATS_INC(xfsstats.xs_trans_sync); + } else { + XFS_STATS_INC(xfsstats.xs_trans_async); + } + + /* mark this thread as no longer being in a transaction */ + current->flags &= ~PF_FSTRANS; + + return (error); +} + + +/* + * Total up the number of log iovecs needed to commit this + * transaction. The transaction itself needs one for the + * transaction header. Ask each dirty item in turn how many + * it needs to get the total. + */ +STATIC uint +xfs_trans_count_vecs( + xfs_trans_t *tp) +{ + int nvecs; + xfs_log_item_desc_t *lidp; + + nvecs = 1; + lidp = xfs_trans_first_item(tp); + ASSERT(lidp != NULL); + + /* In the non-debug case we need to start bailing out if we + * didn't find a log_item here, return zero and let trans_commit + * deal with it. + */ + if (lidp == NULL) + return 0; + + while (lidp != NULL) { + /* + * Skip items which aren't dirty in this transaction. + */ + if (!(lidp->lid_flags & XFS_LID_DIRTY)) { + lidp = xfs_trans_next_item(tp, lidp); + continue; + } + lidp->lid_size = IOP_SIZE(lidp->lid_item); + nvecs += lidp->lid_size; + lidp = xfs_trans_next_item(tp, lidp); + } + + return nvecs; +} + +/* + * Called from the trans_commit code when we notice that + * the filesystem is in the middle of a forced shutdown. + */ +STATIC void +xfs_trans_uncommit( + xfs_trans_t *tp, + uint flags) +{ + xfs_log_item_desc_t *lidp; + + for (lidp = xfs_trans_first_item(tp); + lidp != NULL; + lidp = xfs_trans_next_item(tp, lidp)) { + /* + * Unpin all but those that aren't dirty. + */ + if (lidp->lid_flags & XFS_LID_DIRTY) + IOP_UNPIN_REMOVE(lidp->lid_item, tp); + } + + xfs_trans_unreserve_and_mod_sb(tp); + XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); + + xfs_trans_free_items(tp, flags); + xfs_trans_free_busy(tp); + xfs_trans_free(tp); +} + +/* + * Fill in the vector with pointers to data to be logged + * by this transaction. The transaction header takes + * the first vector, and then each dirty item takes the + * number of vectors it indicated it needed in xfs_trans_count_vecs(). + * + * As each item fills in the entries it needs, also pin the item + * so that it cannot be flushed out until the log write completes. + */ +STATIC void +xfs_trans_fill_vecs( + xfs_trans_t *tp, + xfs_log_iovec_t *log_vector) +{ + xfs_log_item_desc_t *lidp; + xfs_log_iovec_t *vecp; + uint nitems; + + /* + * Skip over the entry for the transaction header, we'll + * fill that in at the end. + */ + vecp = log_vector + 1; /* pointer arithmetic */ + + nitems = 0; + lidp = xfs_trans_first_item(tp); + ASSERT(lidp != NULL); + while (lidp != NULL) { + /* + * Skip items which aren't dirty in this transaction. + */ + if (!(lidp->lid_flags & XFS_LID_DIRTY)) { + lidp = xfs_trans_next_item(tp, lidp); + continue; + } + /* + * The item may be marked dirty but not log anything. + * This can be used to get called when a transaction + * is committed. + */ + if (lidp->lid_size) { + nitems++; + } + IOP_FORMAT(lidp->lid_item, vecp); + vecp += lidp->lid_size; /* pointer arithmetic */ + IOP_PIN(lidp->lid_item); + lidp = xfs_trans_next_item(tp, lidp); + } + + /* + * Now that we've counted the number of items in this + * transaction, fill in the transaction header. + */ + tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC; + tp->t_header.th_type = tp->t_type; + tp->t_header.th_num_items = nitems; + log_vector->i_addr = (xfs_caddr_t)&tp->t_header; + log_vector->i_len = sizeof(xfs_trans_header_t); +} + + +/* + * Unlock all of the transaction's items and free the transaction. + * The transaction must not have modified any of its items, because + * there is no way to restore them to their previous state. + * + * If the transaction has made a log reservation, make sure to release + * it as well. + */ +void +xfs_trans_cancel( + xfs_trans_t *tp, + int flags) +{ + int log_flags; +#ifdef DEBUG + xfs_log_item_chunk_t *licp; + xfs_log_item_desc_t *lidp; + xfs_log_item_t *lip; + int i; +#endif + + /* + * See if the caller is being too lazy to figure out if + * the transaction really needs an abort. + */ + if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY)) + flags &= ~XFS_TRANS_ABORT; + /* + * See if the caller is relying on us to shut down the + * filesystem. This happens in paths where we detect + * corruption and decide to give up. + */ + if ((tp->t_flags & XFS_TRANS_DIRTY) && + !XFS_FORCED_SHUTDOWN(tp->t_mountp)) + xfs_force_shutdown(tp->t_mountp, XFS_CORRUPT_INCORE); +#ifdef DEBUG + if (!(flags & XFS_TRANS_ABORT)) { + licp = &(tp->t_items); + while (licp != NULL) { + lidp = licp->lic_descs; + for (i = 0; i < licp->lic_unused; i++, lidp++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lip = lidp->lid_item; + if (!XFS_FORCED_SHUTDOWN(tp->t_mountp)) + ASSERT(!(lip->li_type == XFS_LI_EFD)); + } + licp = licp->lic_next; + } + } +#endif + xfs_trans_unreserve_and_mod_sb(tp); + XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); + + if (tp->t_ticket) { + if (flags & XFS_TRANS_RELEASE_LOG_RES) { + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + log_flags = XFS_LOG_REL_PERM_RESERV; + } else { + log_flags = 0; + } + xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags); + } + xfs_trans_free_items(tp, flags); + xfs_trans_free_busy(tp); + xfs_trans_free(tp); + + /* mark this thread as no longer being in a transaction */ + current->flags &= ~PF_FSTRANS; +} + + +/* + * Free the transaction structure. If there is more clean up + * to do when the structure is freed, add it here. + */ +STATIC void +xfs_trans_free( + xfs_trans_t *tp) +{ + atomic_dec(&tp->t_mountp->m_active_trans); + XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); + kmem_zone_free(xfs_trans_zone, tp); +} + + +/* + * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). + * + * This is typically called by the LM when a transaction has been fully + * committed to disk. It needs to unpin the items which have + * been logged by the transaction and update their positions + * in the AIL if necessary. + * This also gets called when the transactions didn't get written out + * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then. + * + * Call xfs_trans_chunk_committed() to process the items in + * each chunk. + */ +STATIC void +xfs_trans_committed( + xfs_trans_t *tp, + int abortflag) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_chunk_t *next_licp; + xfs_log_busy_chunk_t *lbcp; + xfs_log_busy_slot_t *lbsp; + int i; + + /* + * Call the transaction's completion callback if there + * is one. + */ + if (tp->t_callback != NULL) { + tp->t_callback(tp, tp->t_callarg); + } + + /* + * Special case the chunk embedded in the transaction. + */ + licp = &(tp->t_items); + if (!(XFS_LIC_ARE_ALL_FREE(licp))) { + xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); + } + + /* + * Process the items in each chunk in turn. + */ + licp = licp->lic_next; + while (licp != NULL) { + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); + next_licp = licp->lic_next; + kmem_free(licp, sizeof(xfs_log_item_chunk_t)); + licp = next_licp; + } + + /* + * Clear all the per-AG busy list items listed in this transaction + */ + lbcp = &tp->t_busy; + while (lbcp != NULL) { + for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) { + if (!XFS_LBC_ISFREE(lbcp, i)) { + xfs_alloc_clear_busy(tp, lbsp->lbc_ag, + lbsp->lbc_idx); + } + } + lbcp = lbcp->lbc_next; + } + xfs_trans_free_busy(tp); + + /* + * That's it for the transaction structure. Free it. + */ + xfs_trans_free(tp); +} + +/* + * This is called to perform the commit processing for each + * item described by the given chunk. + * + * The commit processing consists of unlocking items which were + * held locked with the SYNC_UNLOCK attribute, calling the committed + * routine of each logged item, updating the item's position in the AIL + * if necessary, and unpinning each item. If the committed routine + * returns -1, then do nothing further with the item because it + * may have been freed. + * + * Since items are unlocked when they are copied to the incore + * log, it is possible for two transactions to be completing + * and manipulating the same item simultaneously. The AIL lock + * will protect the lsn field of each item. The value of this + * field can never go backwards. + * + * We unpin the items after repositioning them in the AIL, because + * otherwise they could be immediately flushed and we'd have to race + * with the flusher trying to pull the item from the AIL as we add it. + */ +STATIC void +xfs_trans_chunk_committed( + xfs_log_item_chunk_t *licp, + xfs_lsn_t lsn, + int aborted) +{ + xfs_log_item_desc_t *lidp; + xfs_log_item_t *lip; + xfs_lsn_t item_lsn; + struct xfs_mount *mp; + int i; + SPLDECL(s); + + lidp = licp->lic_descs; + for (i = 0; i < licp->lic_unused; i++, lidp++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lip = lidp->lid_item; + if (aborted) + lip->li_flags |= XFS_LI_ABORTED; + + if (lidp->lid_flags & XFS_LID_SYNC_UNLOCK) { + IOP_UNLOCK(lip); + } + + /* + * Send in the ABORTED flag to the COMMITTED routine + * so that it knows whether the transaction was aborted + * or not. + */ + item_lsn = IOP_COMMITTED(lip, lsn); + + /* + * If the committed routine returns -1, make + * no more references to the item. + */ + if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) { + continue; + } + + /* + * If the returned lsn is greater than what it + * contained before, update the location of the + * item in the AIL. If it is not, then do nothing. + * Items can never move backwards in the AIL. + * + * While the new lsn should usually be greater, it + * is possible that a later transaction completing + * simultaneously with an earlier one using the + * same item could complete first with a higher lsn. + * This would cause the earlier transaction to fail + * the test below. + */ + mp = lip->li_mountp; + AIL_LOCK(mp,s); + if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { + /* + * This will set the item's lsn to item_lsn + * and update the position of the item in + * the AIL. + * + * xfs_trans_update_ail() drops the AIL lock. + */ + xfs_trans_update_ail(mp, lip, item_lsn, s); + } else { + AIL_UNLOCK(mp, s); + } + + /* + * Now that we've repositioned the item in the AIL, + * unpin it so it can be flushed. Pass information + * about buffer stale state down from the log item + * flags, if anyone else stales the buffer we do not + * want to pay any attention to it. + */ + IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE); + } +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_trans_extfree.c linux.21rc5-ac2/fs/xfs/xfs_trans_extfree.c --- linux.21rc5/fs/xfs/xfs_trans_extfree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_trans_extfree.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_extfree_item.h" + +/* + * This routine is called to allocate an "extent free intention" + * log item that will hold nextents worth of extents. The + * caller must use all nextents extents, because we are not + * flexible about this at all. + */ +xfs_efi_log_item_t * +xfs_trans_get_efi(xfs_trans_t *tp, + uint nextents) +{ + xfs_efi_log_item_t *efip; + + ASSERT(tp != NULL); + ASSERT(nextents > 0); + + efip = xfs_efi_init(tp->t_mountp, nextents); + ASSERT(efip != NULL); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efip); + + return (efip); +} + +/* + * This routine is called to indicate that the described + * extent is to be logged as needing to be freed. It should + * be called once for each extent to be freed. + */ +void +xfs_trans_log_efi_extent(xfs_trans_t *tp, + xfs_efi_log_item_t *efip, + xfs_fsblock_t start_block, + xfs_extlen_t ext_len) +{ + xfs_log_item_desc_t *lidp; + uint next_extent; + xfs_extent_t *extp; + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efip); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; + + next_extent = efip->efi_next_extent; + ASSERT(next_extent < efip->efi_format.efi_nextents); + extp = &(efip->efi_format.efi_extents[next_extent]); + extp->ext_start = start_block; + extp->ext_len = ext_len; + efip->efi_next_extent++; +} + + +/* + * This routine is called to allocate an "extent free done" + * log item that will hold nextents worth of extents. The + * caller must use all nextents extents, because we are not + * flexible about this at all. + */ +xfs_efd_log_item_t * +xfs_trans_get_efd(xfs_trans_t *tp, + xfs_efi_log_item_t *efip, + uint nextents) +{ + xfs_efd_log_item_t *efdp; + + ASSERT(tp != NULL); + ASSERT(nextents > 0); + + efdp = xfs_efd_init(tp->t_mountp, efip, nextents); + ASSERT(efdp != NULL); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efdp); + + return (efdp); +} + +/* + * This routine is called to indicate that the described + * extent is to be logged as having been freed. It should + * be called once for each extent freed. + */ +void +xfs_trans_log_efd_extent(xfs_trans_t *tp, + xfs_efd_log_item_t *efdp, + xfs_fsblock_t start_block, + xfs_extlen_t ext_len) +{ + xfs_log_item_desc_t *lidp; + uint next_extent; + xfs_extent_t *extp; + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efdp); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; + + next_extent = efdp->efd_next_extent; + ASSERT(next_extent < efdp->efd_format.efd_nextents); + extp = &(efdp->efd_format.efd_extents[next_extent]); + extp->ext_start = start_block; + extp->ext_len = ext_len; + efdp->efd_next_extent++; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_trans.h linux.21rc5-ac2/fs/xfs/xfs_trans.h --- linux.21rc5/fs/xfs/xfs_trans.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_trans.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1029 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_TRANS_H__ +#define __XFS_TRANS_H__ + +/* + * This is the structure written in the log at the head of + * every transaction. It identifies the type and id of the + * transaction, and contains the number of items logged by + * the transaction so we know how many to expect during recovery. + * + * Do not change the below structure without redoing the code in + * xlog_recover_add_to_trans() and xlog_recover_add_to_cont_trans(). + */ +typedef struct xfs_trans_header { + uint th_magic; /* magic number */ + uint th_type; /* transaction type */ + __int32_t th_tid; /* transaction id (unused) */ + uint th_num_items; /* num items logged by trans */ +} xfs_trans_header_t; + +#define XFS_TRANS_HEADER_MAGIC 0x5452414e /* TRAN */ + +/* + * Log item types. + */ +#define XFS_LI_5_3_BUF 0x1234 /* v1 bufs, 1-block inode buffers */ +#define XFS_LI_5_3_INODE 0x1235 /* 1-block inode buffers */ +#define XFS_LI_EFI 0x1236 +#define XFS_LI_EFD 0x1237 +#define XFS_LI_IUNLINK 0x1238 +#define XFS_LI_6_1_INODE 0x1239 /* 4K non-aligned inode bufs */ +#define XFS_LI_6_1_BUF 0x123a /* v1, 4K inode buffers */ +#define XFS_LI_INODE 0x123b /* aligned ino chunks, var-size ibufs */ +#define XFS_LI_BUF 0x123c /* v2 bufs, variable sized inode bufs */ +#define XFS_LI_DQUOT 0x123d +#define XFS_LI_QUOTAOFF 0x123e + +/* + * Transaction types. Used to distinguish types of buffers. + */ +#define XFS_TRANS_SETATTR_NOT_SIZE 1 +#define XFS_TRANS_SETATTR_SIZE 2 +#define XFS_TRANS_INACTIVE 3 +#define XFS_TRANS_CREATE 4 +#define XFS_TRANS_CREATE_TRUNC 5 +#define XFS_TRANS_TRUNCATE_FILE 6 +#define XFS_TRANS_REMOVE 7 +#define XFS_TRANS_LINK 8 +#define XFS_TRANS_RENAME 9 +#define XFS_TRANS_MKDIR 10 +#define XFS_TRANS_RMDIR 11 +#define XFS_TRANS_SYMLINK 12 +#define XFS_TRANS_SET_DMATTRS 13 +#define XFS_TRANS_GROWFS 14 +#define XFS_TRANS_STRAT_WRITE 15 +#define XFS_TRANS_DIOSTRAT 16 +#define XFS_TRANS_WRITE_SYNC 17 +#define XFS_TRANS_WRITEID 18 +#define XFS_TRANS_ADDAFORK 19 +#define XFS_TRANS_ATTRINVAL 20 +#define XFS_TRANS_ATRUNCATE 21 +#define XFS_TRANS_ATTR_SET 22 +#define XFS_TRANS_ATTR_RM 23 +#define XFS_TRANS_ATTR_FLAG 24 +#define XFS_TRANS_CLEAR_AGI_BUCKET 25 +#define XFS_TRANS_QM_SBCHANGE 26 +/* + * Dummy entries since we use the transaction type to index into the + * trans_type[] in xlog_recover_print_trans_head() + */ +#define XFS_TRANS_DUMMY1 27 +#define XFS_TRANS_DUMMY2 28 +#define XFS_TRANS_QM_QUOTAOFF 29 +#define XFS_TRANS_QM_DQALLOC 30 +#define XFS_TRANS_QM_SETQLIM 31 +#define XFS_TRANS_QM_DQCLUSTER 32 +#define XFS_TRANS_QM_QINOCREATE 33 +#define XFS_TRANS_QM_QUOTAOFF_END 34 +#define XFS_TRANS_SB_UNIT 35 +#define XFS_TRANS_FSYNC_TS 36 +#define XFS_TRANS_GROWFSRT_ALLOC 37 +#define XFS_TRANS_GROWFSRT_ZERO 38 +#define XFS_TRANS_GROWFSRT_FREE 39 +#define XFS_TRANS_SWAPEXT 40 +/* new transaction types need to be reflected in xfs_logprint(8) */ + + +#ifdef __KERNEL__ +struct xfs_buf; +struct xfs_buftarg; +struct xfs_efd_log_item; +struct xfs_efi_log_item; +struct xfs_inode; +struct xfs_item_ops; +struct xfs_log_iovec; +struct xfs_log_item; +struct xfs_log_item_desc; +struct xfs_mount; +struct xfs_trans; +struct xfs_dquot_acct; + +typedef struct xfs_ail_entry { + struct xfs_log_item *ail_forw; /* AIL forw pointer */ + struct xfs_log_item *ail_back; /* AIL back pointer */ +} xfs_ail_entry_t; + +/* + * This structure is passed as a parameter to xfs_trans_push_ail() + * and is used to track the what LSN the waiting processes are + * waiting to become unused. + */ +typedef struct xfs_ail_ticket { + xfs_lsn_t at_lsn; /* lsn waitin for */ + struct xfs_ail_ticket *at_forw; /* wait list ptr */ + struct xfs_ail_ticket *at_back; /* wait list ptr */ + sv_t at_sema; /* wait sema */ +} xfs_ail_ticket_t; + + +typedef struct xfs_log_item { + xfs_ail_entry_t li_ail; /* AIL pointers */ + xfs_lsn_t li_lsn; /* last on-disk lsn */ + struct xfs_log_item_desc *li_desc; /* ptr to current desc*/ + struct xfs_mount *li_mountp; /* ptr to fs mount */ + uint li_type; /* item type */ + uint li_flags; /* misc flags */ + struct xfs_log_item *li_bio_list; /* buffer item list */ + void (*li_cb)(struct xfs_buf *, + struct xfs_log_item *); + /* buffer item iodone */ + /* callback func */ + struct xfs_item_ops *li_ops; /* function list */ +} xfs_log_item_t; + +#define XFS_LI_IN_AIL 0x1 +#define XFS_LI_ABORTED 0x2 + +typedef struct xfs_item_ops { + uint (*iop_size)(xfs_log_item_t *); + void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); + void (*iop_pin)(xfs_log_item_t *); + void (*iop_unpin)(xfs_log_item_t *, int); + void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *); + uint (*iop_trylock)(xfs_log_item_t *); + void (*iop_unlock)(xfs_log_item_t *); + xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); + void (*iop_push)(xfs_log_item_t *); + void (*iop_abort)(xfs_log_item_t *); + void (*iop_pushbuf)(xfs_log_item_t *); + void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); +} xfs_item_ops_t; + +#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) +#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) +#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip) +#define IOP_UNPIN(ip, flags) (*(ip)->li_ops->iop_unpin)(ip, flags) +#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp) +#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip) +#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) +#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn) +#define IOP_PUSH(ip) (*(ip)->li_ops->iop_push)(ip) +#define IOP_ABORT(ip) (*(ip)->li_ops->iop_abort)(ip) +#define IOP_PUSHBUF(ip) (*(ip)->li_ops->iop_pushbuf)(ip) +#define IOP_COMMITTING(ip, lsn) (*(ip)->li_ops->iop_committing)(ip, lsn) + +/* + * Return values for the IOP_TRYLOCK() routines. + */ +#define XFS_ITEM_SUCCESS 0 +#define XFS_ITEM_PINNED 1 +#define XFS_ITEM_LOCKED 2 +#define XFS_ITEM_FLUSHING 3 +#define XFS_ITEM_PUSHBUF 4 + +#endif /* __KERNEL__ */ + +/* + * This structure is used to track log items associated with + * a transaction. It points to the log item and keeps some + * flags to track the state of the log item. It also tracks + * the amount of space needed to log the item it describes + * once we get to commit processing (see xfs_trans_commit()). + */ +typedef struct xfs_log_item_desc { + xfs_log_item_t *lid_item; + ushort lid_size; + unsigned char lid_flags; + unsigned char lid_index; +} xfs_log_item_desc_t; + +#define XFS_LID_DIRTY 0x1 +#define XFS_LID_PINNED 0x2 +#define XFS_LID_SYNC_UNLOCK 0x4 +#define XFS_LID_BUF_STALE 0x8 + +/* + * This structure is used to maintain a chunk list of log_item_desc + * structures. The free field is a bitmask indicating which descriptors + * in this chunk's array are free. The unused field is the first value + * not used since this chunk was allocated. + */ +#define XFS_LIC_NUM_SLOTS 15 +typedef struct xfs_log_item_chunk { + struct xfs_log_item_chunk *lic_next; + ushort lic_free; + ushort lic_unused; + xfs_log_item_desc_t lic_descs[XFS_LIC_NUM_SLOTS]; +} xfs_log_item_chunk_t; + +#define XFS_LIC_MAX_SLOT (XFS_LIC_NUM_SLOTS - 1) +#define XFS_LIC_FREEMASK ((1 << XFS_LIC_NUM_SLOTS) - 1) + + +/* + * Initialize the given chunk. Set the chunk's free descriptor mask + * to indicate that all descriptors are free. The caller gets to set + * lic_unused to the right value (0 matches all free). The + * lic_descs.lid_index values are set up as each desc is allocated. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_INIT) +void xfs_lic_init(xfs_log_item_chunk_t *cp); +#define XFS_LIC_INIT(cp) xfs_lic_init(cp) +#else +#define XFS_LIC_INIT(cp) ((cp)->lic_free = XFS_LIC_FREEMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_INIT_SLOT) +void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_INIT_SLOT(cp,slot) xfs_lic_init_slot(cp, slot) +#else +#define XFS_LIC_INIT_SLOT(cp,slot) \ + ((cp)->lic_descs[slot].lid_index = (unsigned char)(slot)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_VACANCY) +int xfs_lic_vacancy(xfs_log_item_chunk_t *cp); +#define XFS_LIC_VACANCY(cp) xfs_lic_vacancy(cp) +#else +#define XFS_LIC_VACANCY(cp) (((cp)->lic_free) & XFS_LIC_FREEMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_ALL_FREE) +void xfs_lic_all_free(xfs_log_item_chunk_t *cp); +#define XFS_LIC_ALL_FREE(cp) xfs_lic_all_free(cp) +#else +#define XFS_LIC_ALL_FREE(cp) ((cp)->lic_free = XFS_LIC_FREEMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_ARE_ALL_FREE) +int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp); +#define XFS_LIC_ARE_ALL_FREE(cp) xfs_lic_are_all_free(cp) +#else +#define XFS_LIC_ARE_ALL_FREE(cp) (((cp)->lic_free & XFS_LIC_FREEMASK) ==\ + XFS_LIC_FREEMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_ISFREE) +int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_ISFREE(cp,slot) xfs_lic_isfree(cp,slot) +#else +#define XFS_LIC_ISFREE(cp,slot) ((cp)->lic_free & (1 << (slot))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_CLAIM) +void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_CLAIM(cp,slot) xfs_lic_claim(cp,slot) +#else +#define XFS_LIC_CLAIM(cp,slot) ((cp)->lic_free &= ~(1 << (slot))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_RELSE) +void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_RELSE(cp,slot) xfs_lic_relse(cp,slot) +#else +#define XFS_LIC_RELSE(cp,slot) ((cp)->lic_free |= 1 << (slot)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_SLOT) +xfs_log_item_desc_t *xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_SLOT(cp,slot) xfs_lic_slot(cp,slot) +#else +#define XFS_LIC_SLOT(cp,slot) (&((cp)->lic_descs[slot])) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_DESC_TO_SLOT) +int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp); +#define XFS_LIC_DESC_TO_SLOT(dp) xfs_lic_desc_to_slot(dp) +#else +#define XFS_LIC_DESC_TO_SLOT(dp) ((uint)((dp)->lid_index)) +#endif +/* + * Calculate the address of a chunk given a descriptor pointer: + * dp - dp->lid_index give the address of the start of the lic_descs array. + * From this we subtract the offset of the lic_descs field in a chunk. + * All of this yields the address of the chunk, which is + * cast to a chunk pointer. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_DESC_TO_CHUNK) +xfs_log_item_chunk_t *xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp); +#define XFS_LIC_DESC_TO_CHUNK(dp) xfs_lic_desc_to_chunk(dp) +#else +#define XFS_LIC_DESC_TO_CHUNK(dp) ((xfs_log_item_chunk_t*) \ + (((xfs_caddr_t)((dp) - (dp)->lid_index)) -\ + (xfs_caddr_t)(((xfs_log_item_chunk_t*) \ + 0)->lic_descs))) +#endif + +#ifdef __KERNEL__ +/* + * This structure is used to maintain a list of block ranges that have been + * freed in the transaction. The ranges are listed in the perag[] busy list + * between when they're freed and the transaction is committed to disk. + */ + +typedef struct xfs_log_busy_slot { + xfs_agnumber_t lbc_ag; + ushort lbc_idx; /* index in perag.busy[] */ +} xfs_log_busy_slot_t; + +#define XFS_LBC_NUM_SLOTS 31 +typedef struct xfs_log_busy_chunk { + struct xfs_log_busy_chunk *lbc_next; + uint lbc_free; /* bitmask of free slots */ + ushort lbc_unused; /* first unused */ + xfs_log_busy_slot_t lbc_busy[XFS_LBC_NUM_SLOTS]; +} xfs_log_busy_chunk_t; + +#define XFS_LBC_MAX_SLOT (XFS_LBC_NUM_SLOTS - 1) +#define XFS_LBC_FREEMASK ((1U << XFS_LBC_NUM_SLOTS) - 1) + +#define XFS_LBC_INIT(cp) ((cp)->lbc_free = XFS_LBC_FREEMASK) +#define XFS_LBC_CLAIM(cp, slot) ((cp)->lbc_free &= ~(1 << (slot))) +#define XFS_LBC_SLOT(cp, slot) (&((cp)->lbc_busy[(slot)])) +#define XFS_LBC_VACANCY(cp) (((cp)->lbc_free) & XFS_LBC_FREEMASK) +#define XFS_LBC_ISFREE(cp, slot) ((cp)->lbc_free & (1 << (slot))) + +/* + * This is the type of function which can be given to xfs_trans_callback() + * to be called upon the transaction's commit to disk. + */ +typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *); + +/* + * This is the structure maintained for every active transaction. + */ +typedef struct xfs_trans { + unsigned int t_magic; /* magic number */ + xfs_log_callback_t t_logcb; /* log callback struct */ + struct xfs_trans *t_forw; /* async list pointers */ + struct xfs_trans *t_back; /* async list pointers */ + unsigned int t_type; /* transaction type */ + unsigned int t_log_res; /* amt of log space resvd */ + unsigned int t_log_count; /* count for perm log res */ + unsigned int t_blk_res; /* # of blocks resvd */ + unsigned int t_blk_res_used; /* # of resvd blocks used */ + unsigned int t_rtx_res; /* # of rt extents resvd */ + unsigned int t_rtx_res_used; /* # of resvd rt extents used */ + xfs_log_ticket_t t_ticket; /* log mgr ticket */ + sema_t t_sema; /* sema for commit completion */ + xfs_lsn_t t_lsn; /* log seq num of start of + * transaction. */ + xfs_lsn_t t_commit_lsn; /* log seq num of end of + * transaction. */ + struct xfs_mount *t_mountp; /* ptr to fs mount struct */ + struct xfs_dquot_acct *t_dqinfo; /* accting info for dquots */ + xfs_trans_callback_t t_callback; /* transaction callback */ + void *t_callarg; /* callback arg */ + unsigned int t_flags; /* misc flags */ + long t_icount_delta; /* superblock icount change */ + long t_ifree_delta; /* superblock ifree change */ + long t_fdblocks_delta; /* superblock fdblocks chg */ + long t_res_fdblocks_delta; /* on-disk only chg */ + long t_frextents_delta;/* superblock freextents chg*/ + long t_res_frextents_delta; /* on-disk only chg */ + long t_ag_freeblks_delta; /* debugging counter */ + long t_ag_flist_delta; /* debugging counter */ + long t_ag_btree_delta; /* debugging counter */ + long t_dblocks_delta;/* superblock dblocks change */ + long t_agcount_delta;/* superblock agcount change */ + long t_imaxpct_delta;/* superblock imaxpct change */ + long t_rextsize_delta;/* superblock rextsize chg */ + long t_rbmblocks_delta;/* superblock rbmblocks chg */ + long t_rblocks_delta;/* superblock rblocks change */ + long t_rextents_delta;/* superblocks rextents chg */ + long t_rextslog_delta;/* superblocks rextslog chg */ + unsigned int t_items_free; /* log item descs free */ + xfs_log_item_chunk_t t_items; /* first log item desc chunk */ + xfs_trans_header_t t_header; /* header for in-log trans */ + unsigned int t_busy_free; /* busy descs free */ + xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ +} xfs_trans_t; + +#endif /* __KERNEL__ */ + + +#define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */ +/* + * Values for t_flags. + */ +#define XFS_TRANS_DIRTY 0x01 /* something needs to be logged */ +#define XFS_TRANS_SB_DIRTY 0x02 /* superblock is modified */ +#define XFS_TRANS_PERM_LOG_RES 0x04 /* xact took a permanent log res */ +#define XFS_TRANS_SYNC 0x08 /* make commit synchronous */ +#define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */ +#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */ + +/* + * Values for call flags parameter. + */ +#define XFS_TRANS_NOSLEEP 0x1 +#define XFS_TRANS_WAIT 0x2 +#define XFS_TRANS_RELEASE_LOG_RES 0x4 +#define XFS_TRANS_ABORT 0x8 + +/* + * Field values for xfs_trans_mod_sb. + */ +#define XFS_TRANS_SB_ICOUNT 0x00000001 +#define XFS_TRANS_SB_IFREE 0x00000002 +#define XFS_TRANS_SB_FDBLOCKS 0x00000004 +#define XFS_TRANS_SB_RES_FDBLOCKS 0x00000008 +#define XFS_TRANS_SB_FREXTENTS 0x00000010 +#define XFS_TRANS_SB_RES_FREXTENTS 0x00000020 +#define XFS_TRANS_SB_DBLOCKS 0x00000040 +#define XFS_TRANS_SB_AGCOUNT 0x00000080 +#define XFS_TRANS_SB_IMAXPCT 0x00000100 +#define XFS_TRANS_SB_REXTSIZE 0x00000200 +#define XFS_TRANS_SB_RBMBLOCKS 0x00000400 +#define XFS_TRANS_SB_RBLOCKS 0x00000800 +#define XFS_TRANS_SB_REXTENTS 0x00001000 +#define XFS_TRANS_SB_REXTSLOG 0x00002000 + + +/* + * Various log reservation values. + * These are based on the size of the file system block + * because that is what most transactions manipulate. + * Each adds in an additional 128 bytes per item logged to + * try to account for the overhead of the transaction mechanism. + * + * Note: + * Most of the reservations underestimate the number of allocation + * groups into which they could free extents in the xfs_bmap_finish() + * call. This is because the number in the worst case is quite high + * and quite unusual. In order to fix this we need to change + * xfs_bmap_finish() to free extents in only a single AG at a time. + * This will require changes to the EFI code as well, however, so that + * the EFI for the extents not freed is logged again in each transaction. + * See bug 261917. + */ + +/* + * Per-extent log reservation for the allocation btree changes + * involved in freeing or allocating an extent. + * 2 trees * (2 blocks/level * max depth - 1) * block size + */ +#define XFS_ALLOCFREE_LOG_RES(mp,nx) \ + ((nx) * (2 * XFS_FSB_TO_B((mp), 2 * XFS_AG_MAXLEVELS(mp) - 1))) +#define XFS_ALLOCFREE_LOG_COUNT(mp,nx) \ + ((nx) * (2 * (2 * XFS_AG_MAXLEVELS(mp) - 1))) + +/* + * Per-directory log reservation for any directory change. + * dir blocks: (1 btree block per level + data block + free block) * dblock size + * bmap btree: (levels + 2) * max depth * block size + * v2 directory blocks can be fragmented below the dirblksize down to the fsb + * size, so account for that in the DAENTER macros. + */ +#define XFS_DIROP_LOG_RES(mp) \ + (XFS_FSB_TO_B(mp, XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK)) + \ + (XFS_FSB_TO_B(mp, XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1))) +#define XFS_DIROP_LOG_COUNT(mp) \ + (XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \ + XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1) + +/* + * In a write transaction we can allocate a maximum of 2 + * extents. This gives: + * the inode getting the new extents: inode size + * the inode\'s bmap btree: max depth * block size + * the agfs of the ags from which the extents are allocated: 2 * sector + * the superblock free block counter: sector size + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size + * And the bmap_finish transaction can free bmap blocks in a join: + * the agfs of the ags containing the blocks: 2 * sector size + * the agfls of the ags containing the blocks: 2 * sector size + * the super block free block counter: sector size + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_WRITE_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \ + (2 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 2) + \ + (128 * (4 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))),\ + ((2 * (mp)->m_sb.sb_sectsize) + \ + (2 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 2) + \ + (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))))) + +#define XFS_WRITE_LOG_RES(mp) ((mp)->m_reservations.tr_write) + +/* + * In truncating a file we free up to two extents at once. We can modify: + * the inode being truncated: inode size + * the inode\'s bmap btree: (max depth + 1) * block size + * And the bmap_finish transaction can free the blocks and bmap blocks: + * the agf for each of the ags: 4 * sector size + * the agfl for each of the ags: 4 * sector size + * the super block to reflect the freed blocks: sector size + * worst case split in allocation btrees per extent assuming 4 extents: + * 4 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_ITRUNCATE_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1) + \ + (128 * (2 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \ + ((4 * (mp)->m_sb.sb_sectsize) + \ + (4 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 4) + \ + (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)))))) + +#define XFS_ITRUNCATE_LOG_RES(mp) ((mp)->m_reservations.tr_itruncate) + +/* + * In renaming a files we can modify: + * the four inodes involved: 4 * inode size + * the two directory btrees: 2 * (max depth + v2) * dir block size + * the two directory bmap btrees: 2 * max depth * block size + * And the bmap_finish transaction can free dir and bmap blocks (two sets + * of bmap blocks) giving: + * the agf for the ags in which the blocks live: 3 * sector size + * the agfl for the ags in which the blocks live: 3 * sector size + * the superblock for the free block count: sector size + * the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_RENAME_LOG_RES(mp) \ + (MAX( \ + ((4 * (mp)->m_sb.sb_inodesize) + \ + (2 * XFS_DIROP_LOG_RES(mp)) + \ + (128 * (4 + 2 * XFS_DIROP_LOG_COUNT(mp)))), \ + ((3 * (mp)->m_sb.sb_sectsize) + \ + (3 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 3) + \ + (128 * (7 + XFS_ALLOCFREE_LOG_COUNT(mp, 3)))))) + +#define XFS_RENAME_LOG_RES(mp) ((mp)->m_reservations.tr_rename) + +/* + * For creating a link to an inode: + * the parent directory inode: inode size + * the linked inode: inode size + * the directory btree could split: (max depth + v2) * dir block size + * the directory bmap btree could join or split: (max depth + v2) * blocksize + * And the bmap_finish transaction can free some bmap blocks giving: + * the agf for the ag in which the blocks live: sector size + * the agfl for the ag in which the blocks live: sector size + * the superblock for the free block count: sector size + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_LINK_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_inodesize + \ + XFS_DIROP_LOG_RES(mp) + \ + (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \ + ((mp)->m_sb.sb_sectsize + \ + (mp)->m_sb.sb_sectsize + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) + +#define XFS_LINK_LOG_RES(mp) ((mp)->m_reservations.tr_link) + +/* + * For removing a directory entry we can modify: + * the parent directory inode: inode size + * the removed inode: inode size + * the directory btree could join: (max depth + v2) * dir block size + * the directory bmap btree could join or split: (max depth + v2) * blocksize + * And the bmap_finish transaction can free the dir and bmap blocks giving: + * the agf for the ag in which the blocks live: 2 * sector size + * the agfl for the ag in which the blocks live: 2 * sector size + * the superblock for the free block count: sector size + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_REMOVE_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_inodesize + \ + XFS_DIROP_LOG_RES(mp) + \ + (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \ + ((2 * (mp)->m_sb.sb_sectsize) + \ + (2 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 2) + \ + (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))))) + +#define XFS_REMOVE_LOG_RES(mp) ((mp)->m_reservations.tr_remove) + +/* + * For symlink we can modify: + * the parent directory inode: inode size + * the new inode: inode size + * the inode btree entry: 1 block + * the directory btree: (max depth + v2) * dir block size + * the directory inode\'s bmap btree: (max depth + v2) * block size + * the blocks for the symlink: 1 KB + * Or in the first xact we allocate some inodes giving: + * the agi and agf of the ag getting the new inodes: 2 * sectorsize + * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize + * the inode btree: max depth * blocksize + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_SYMLINK_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B(mp, 1) + \ + XFS_DIROP_LOG_RES(mp) + \ + 1024 + \ + (128 * (4 + XFS_DIROP_LOG_COUNT(mp)))), \ + (2 * (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \ + XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \ + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) + +#define XFS_SYMLINK_LOG_RES(mp) ((mp)->m_reservations.tr_symlink) + +/* + * For create we can modify: + * the parent directory inode: inode size + * the new inode: inode size + * the inode btree entry: block size + * the superblock for the nlink flag: sector size + * the directory btree: (max depth + v2) * dir block size + * the directory inode\'s bmap btree: (max depth + v2) * block size + * Or in the first xact we allocate some inodes giving: + * the agi and agf of the ag getting the new inodes: 2 * sectorsize + * the superblock for the nlink flag: sector size + * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize + * the inode btree: max depth * blocksize + * the allocation btrees: 2 trees * (max depth - 1) * block size + */ +#define XFS_CALC_CREATE_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B(mp, 1) + \ + XFS_DIROP_LOG_RES(mp) + \ + (128 * (3 + XFS_DIROP_LOG_COUNT(mp)))), \ + (3 * (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \ + XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \ + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) + +#define XFS_CREATE_LOG_RES(mp) ((mp)->m_reservations.tr_create) + +/* + * Making a new directory is the same as creating a new file. + */ +#define XFS_CALC_MKDIR_LOG_RES(mp) XFS_CALC_CREATE_LOG_RES(mp) + +#define XFS_MKDIR_LOG_RES(mp) ((mp)->m_reservations.tr_mkdir) + +/* + * In freeing an inode we can modify: + * the inode being freed: inode size + * the super block free inode counter: sector size + * the agi hash list and counters: sector size + * the inode btree entry: block size + * the on disk inode before ours in the agi hash list: inode cluster size + */ +#define XFS_CALC_IFREE_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize + \ + (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), 1) + \ + MAX((__uint16_t)XFS_FSB_TO_B((mp), 1), XFS_INODE_CLUSTER_SIZE(mp)) + \ + (128 * 5)) + +#define XFS_IFREE_LOG_RES(mp) ((mp)->m_reservations.tr_ifree) + +/* + * When only changing the inode we log the inode and possibly the superblock + * We also add a bit of slop for the transaction stuff. + */ +#define XFS_CALC_ICHANGE_LOG_RES(mp) ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize + 512) + +#define XFS_ICHANGE_LOG_RES(mp) ((mp)->m_reservations.tr_ichange) + +/* + * Growing the data section of the filesystem. + * superblock + * agi and agf + * allocation btrees + */ +#define XFS_CALC_GROWDATA_LOG_RES(mp) \ + ((mp)->m_sb.sb_sectsize * 3 + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) + +#define XFS_GROWDATA_LOG_RES(mp) ((mp)->m_reservations.tr_growdata) + +/* + * Growing the rt section of the filesystem. + * In the first set of transactions (ALLOC) we allocate space to the + * bitmap or summary files. + * superblock: sector size + * agf of the ag from which the extent is allocated: sector size + * bmap btree for bitmap/summary inode: max depth * blocksize + * bitmap/summary inode: inode size + * allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize + */ +#define XFS_CALC_GROWRTALLOC_LOG_RES(mp) \ + (2 * (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \ + (mp)->m_sb.sb_inodesize + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * \ + (3 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + \ + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) + +#define XFS_GROWRTALLOC_LOG_RES(mp) ((mp)->m_reservations.tr_growrtalloc) + +/* + * Growing the rt section of the filesystem. + * In the second set of transactions (ZERO) we zero the new metadata blocks. + * one bitmap/summary block: blocksize + */ +#define XFS_CALC_GROWRTZERO_LOG_RES(mp) \ + ((mp)->m_sb.sb_blocksize + 128) + +#define XFS_GROWRTZERO_LOG_RES(mp) ((mp)->m_reservations.tr_growrtzero) + +/* + * Growing the rt section of the filesystem. + * In the third set of transactions (FREE) we update metadata without + * allocating any new blocks. + * superblock: sector size + * bitmap inode: inode size + * summary inode: inode size + * one bitmap block: blocksize + * summary blocks: new summary size + */ +#define XFS_CALC_GROWRTFREE_LOG_RES(mp) \ + ((mp)->m_sb.sb_sectsize + \ + 2 * (mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_blocksize + \ + (mp)->m_rsumsize + \ + (128 * 5)) + +#define XFS_GROWRTFREE_LOG_RES(mp) ((mp)->m_reservations.tr_growrtfree) + +/* + * Logging the inode modification timestamp on a synchronous write. + * inode + */ +#define XFS_CALC_SWRITE_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + 128) + +#define XFS_SWRITE_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) + +/* + * Logging the inode timestamps on an fsync -- same as SWRITE + * as long as SWRITE logs the entire inode core + */ +#define XFS_FSYNC_TS_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) + +/* + * Logging the inode mode bits when writing a setuid/setgid file + * inode + */ +#define XFS_CALC_WRITEID_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + 128) + +#define XFS_WRITEID_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) + +/* + * Converting the inode from non-attributed to attributed. + * the inode being converted: inode size + * agf block and superblock (for block allocation) + * the new block (directory sized) + * bmap blocks for the new directory block + * allocation btrees + */ +#define XFS_CALC_ADDAFORK_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize * 2 + \ + (mp)->m_dirblksize + \ + (XFS_DIR_IS_V1(mp) ? 0 : \ + XFS_FSB_TO_B(mp, (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1))) + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (4 + \ + (XFS_DIR_IS_V1(mp) ? 0 : \ + XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) + \ + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) + +#define XFS_ADDAFORK_LOG_RES(mp) ((mp)->m_reservations.tr_addafork) + +/* + * Removing the attribute fork of a file + * the inode being truncated: inode size + * the inode\'s bmap btree: max depth * block size + * And the bmap_finish transaction can free the blocks and bmap blocks: + * the agf for each of the ags: 4 * sector size + * the agfl for each of the ags: 4 * sector size + * the super block to reflect the freed blocks: sector size + * worst case split in allocation btrees per extent assuming 4 extents: + * 4 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_ATTRINVAL_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \ + (128 * (1 + XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)))), \ + ((4 * (mp)->m_sb.sb_sectsize) + \ + (4 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 4) + \ + (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)))))) + +#define XFS_ATTRINVAL_LOG_RES(mp) ((mp)->m_reservations.tr_attrinval) + +/* + * Setting an attribute. + * the inode getting the attribute + * the superblock for allocations + * the agfs extents are allocated from + * the attribute btree * max depth + * the inode allocation btree + * Since attribute transaction space is dependent on the size of the attribute, + * the calculation is done partially at mount time and partially at runtime. + */ +#define XFS_CALC_ATTRSET_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \ + (128 * (2 + XFS_DA_NODE_MAXDEPTH))) + +#define XFS_ATTRSET_LOG_RES(mp, ext) \ + ((mp)->m_reservations.tr_attrset + \ + (ext * (mp)->m_sb.sb_sectsize) + \ + (ext * XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))) + \ + (128 * (ext + (ext * XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))))) + +/* + * Removing an attribute. + * the inode: inode size + * the attribute btree could join: max depth * block size + * the inode bmap btree could join or split: max depth * block size + * And the bmap_finish transaction can free the attr blocks freed giving: + * the agf for the ag in which the blocks live: 2 * sector size + * the agfl for the ag in which the blocks live: 2 * sector size + * the superblock for the free block count: sector size + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_ATTRRM_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \ + (128 * (1 + XFS_DA_NODE_MAXDEPTH + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \ + ((2 * (mp)->m_sb.sb_sectsize) + \ + (2 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 2) + \ + (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))))) + +#define XFS_ATTRRM_LOG_RES(mp) ((mp)->m_reservations.tr_attrrm) + +/* + * Clearing a bad agino number in an agi hash bucket. + */ +#define XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp) \ + ((mp)->m_sb.sb_sectsize + 128) + +#define XFS_CLEAR_AGI_BUCKET_LOG_RES(mp) ((mp)->m_reservations.tr_clearagi) + + +/* + * Various log count values. + */ +#define XFS_DEFAULT_LOG_COUNT 1 +#define XFS_DEFAULT_PERM_LOG_COUNT 2 +#define XFS_ITRUNCATE_LOG_COUNT 2 +#define XFS_CREATE_LOG_COUNT 2 +#define XFS_MKDIR_LOG_COUNT 3 +#define XFS_SYMLINK_LOG_COUNT 3 +#define XFS_REMOVE_LOG_COUNT 2 +#define XFS_LINK_LOG_COUNT 2 +#define XFS_RENAME_LOG_COUNT 2 +#define XFS_WRITE_LOG_COUNT 2 +#define XFS_ADDAFORK_LOG_COUNT 2 +#define XFS_ATTRINVAL_LOG_COUNT 1 +#define XFS_ATTRSET_LOG_COUNT 3 +#define XFS_ATTRRM_LOG_COUNT 3 + +/* + * Here we centralize the specification of XFS meta-data buffer + * reference count values. This determine how hard the buffer + * cache tries to hold onto the buffer. + */ +#define XFS_AGF_REF 4 +#define XFS_AGI_REF 4 +#define XFS_AGFL_REF 3 +#define XFS_INO_BTREE_REF 3 +#define XFS_ALLOC_BTREE_REF 2 +#define XFS_BMAP_BTREE_REF 2 +#define XFS_DIR_BTREE_REF 2 +#define XFS_ATTR_BTREE_REF 1 +#define XFS_INO_REF 1 +#define XFS_DQUOT_REF 1 + +#ifdef __KERNEL__ +/* + * XFS transaction mechanism exported interfaces that are + * actually macros. + */ +#define xfs_trans_get_log_res(tp) ((tp)->t_log_res) +#define xfs_trans_get_log_count(tp) ((tp)->t_log_count) +#define xfs_trans_get_block_res(tp) ((tp)->t_blk_res) +#define xfs_trans_set_sync(tp) ((tp)->t_flags |= XFS_TRANS_SYNC) + +#ifdef DEBUG +#define xfs_trans_agblocks_delta(tp, d) ((tp)->t_ag_freeblks_delta += (long)d) +#define xfs_trans_agflist_delta(tp, d) ((tp)->t_ag_flist_delta += (long)d) +#define xfs_trans_agbtree_delta(tp, d) ((tp)->t_ag_btree_delta += (long)d) +#else +#define xfs_trans_agblocks_delta(tp, d) +#define xfs_trans_agflist_delta(tp, d) +#define xfs_trans_agbtree_delta(tp, d) +#endif + +/* + * XFS transaction mechanism exported interfaces. + */ +void xfs_trans_init(struct xfs_mount *); +xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint); +xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint); +xfs_trans_t *xfs_trans_dup(xfs_trans_t *); +int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint, + uint, uint); +void xfs_trans_callback(xfs_trans_t *, + void (*)(xfs_trans_t *, void *), void *); +void xfs_trans_mod_sb(xfs_trans_t *, uint, long); +struct xfs_buf *xfs_trans_get_buf(xfs_trans_t *, struct xfs_buftarg *, xfs_daddr_t, + int, uint); +int xfs_trans_read_buf(struct xfs_mount *, xfs_trans_t *, + struct xfs_buftarg *, xfs_daddr_t, int, uint, + struct xfs_buf **); +struct xfs_buf *xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int); + +void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_bhold_until_committed(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); +void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); +int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *, + xfs_ino_t , uint, struct xfs_inode **); +void xfs_trans_iput(xfs_trans_t *, struct xfs_inode *, uint); +void xfs_trans_ijoin(xfs_trans_t *, struct xfs_inode *, uint); +void xfs_trans_ihold(xfs_trans_t *, struct xfs_inode *); +void xfs_trans_ihold_release(xfs_trans_t *, struct xfs_inode *); +void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint); +void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint); +struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint); +void xfs_efi_release(struct xfs_efi_log_item *, uint); +void xfs_trans_log_efi_extent(xfs_trans_t *, + struct xfs_efi_log_item *, + xfs_fsblock_t, + xfs_extlen_t); +struct xfs_efd_log_item *xfs_trans_get_efd(xfs_trans_t *, + struct xfs_efi_log_item *, + uint); +void xfs_trans_log_efd_extent(xfs_trans_t *, + struct xfs_efd_log_item *, + xfs_fsblock_t, + xfs_extlen_t); +int xfs_trans_commit(xfs_trans_t *, uint flags, xfs_lsn_t *); +void xfs_trans_cancel(xfs_trans_t *, int); +void xfs_trans_ail_init(struct xfs_mount *); +xfs_lsn_t xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); +xfs_lsn_t xfs_trans_tail_ail(struct xfs_mount *); +void xfs_trans_unlocked_item(struct xfs_mount *, + xfs_log_item_t *); +xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, + xfs_agnumber_t ag, + xfs_extlen_t idx); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_TRANS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_trans_inode.c linux.21rc5-ac2/fs/xfs/xfs_trans_inode.c --- linux.21rc5/fs/xfs/xfs_trans_inode.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_trans_inode.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" + +#ifdef XFS_TRANS_DEBUG +STATIC void +xfs_trans_inode_broot_debug( + xfs_inode_t *ip); +#else +#define xfs_trans_inode_broot_debug(ip) +#endif + + +/* + * Get and lock the inode for the caller if it is not already + * locked within the given transaction. If it is already locked + * within the transaction, just increment its lock recursion count + * and return a pointer to it. + * + * For an inode to be locked in a transaction, the inode lock, as + * opposed to the io lock, must be taken exclusively. This ensures + * that the inode can be involved in only 1 transaction at a time. + * Lock recursion is handled on the io lock, but only for lock modes + * of equal or lesser strength. That is, you can recur on the io lock + * held EXCL with a SHARED request but not vice versa. Also, if + * the inode is already a part of the transaction then you cannot + * go from not holding the io lock to having it EXCL or SHARED. + * + * Use the inode cache routine xfs_inode_incore() to find the inode + * if it is already owned by this transaction. + * + * If we don't already own the inode, use xfs_iget() to get it. + * Since the inode log item structure is embedded in the incore + * inode structure and is initialized when the inode is brought + * into memory, there is nothing to do with it here. + * + * If the given transaction pointer is NULL, just call xfs_iget(). + * This simplifies code which must handle both cases. + */ +int +xfs_trans_iget( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + uint lock_flags, + xfs_inode_t **ipp) +{ + int error; + xfs_inode_t *ip; + xfs_inode_log_item_t *iip; + + /* + * If the transaction pointer is NULL, just call the normal + * xfs_iget(). + */ + if (tp == NULL) { + return (xfs_iget(mp, NULL, ino, lock_flags, ipp, 0)); + } + + /* + * If we find the inode in core with this transaction + * pointer in its i_transp field, then we know we already + * have it locked. In this case we just increment the lock + * recursion count and return the inode to the caller. + * Assert that the inode is already locked in the mode requested + * by the caller. We cannot do lock promotions yet, so + * die if someone gets this wrong. + */ + if ((ip = xfs_inode_incore(tp->t_mountp, ino, tp)) != NULL) { + /* + * Make sure that the inode lock is held EXCL and + * that the io lock is never upgraded when the inode + * is already a part of the transaction. + */ + ASSERT(ip->i_itemp != NULL); + ASSERT(lock_flags & XFS_ILOCK_EXCL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || + ismrlocked(&ip->i_iolock, MR_UPDATE)); + ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || + (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL)); + ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || + ismrlocked(&ip->i_iolock, (MR_UPDATE | MR_ACCESS))); + ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || + (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY)); + + if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) { + ip->i_itemp->ili_iolock_recur++; + } + if (lock_flags & XFS_ILOCK_EXCL) { + ip->i_itemp->ili_ilock_recur++; + } + *ipp = ip; + return 0; + } + + ASSERT(lock_flags & XFS_ILOCK_EXCL); + error = xfs_iget(tp->t_mountp, tp, ino, lock_flags, &ip, 0); + if (error) { + return error; + } + ASSERT(ip != NULL); + + /* + * Get a log_item_desc to point at the new item. + */ + if (ip->i_itemp == NULL) + xfs_inode_item_init(ip, mp); + iip = ip->i_itemp; + (void) xfs_trans_add_item(tp, (xfs_log_item_t *)(iip)); + + xfs_trans_inode_broot_debug(ip); + + /* + * If the IO lock has been acquired, mark that in + * the inode log item so we'll know to unlock it + * when the transaction commits. + */ + ASSERT(iip->ili_flags == 0); + if (lock_flags & XFS_IOLOCK_EXCL) { + iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL; + } else if (lock_flags & XFS_IOLOCK_SHARED) { + iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED; + } + + /* + * Initialize i_transp so we can find it with xfs_inode_incore() + * above. + */ + ip->i_transp = tp; + + *ipp = ip; + return 0; +} + + +/* + * Release the inode ip which was previously acquired with xfs_trans_iget() + * or added with xfs_trans_ijoin(). This will decrement the lock + * recursion count of the inode item. If the count goes to less than 0, + * the inode will be unlocked and disassociated from the transaction. + * + * If the inode has been modified within the transaction, it will not be + * unlocked until the transaction commits. + */ +void +xfs_trans_iput( + xfs_trans_t *tp, + xfs_inode_t *ip, + uint lock_flags) +{ + xfs_inode_log_item_t *iip; + xfs_log_item_desc_t *lidp; + + /* + * If the transaction pointer is NULL, just call xfs_iput(). + */ + if (tp == NULL) { + xfs_iput(ip, lock_flags); + } + + ASSERT(ip->i_transp == tp); + iip = ip->i_itemp; + ASSERT(iip != NULL); + + /* + * Find the item descriptor pointing to this inode's + * log item. It must be there. + */ + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)iip); + ASSERT(lidp != NULL); + ASSERT(lidp->lid_item == (xfs_log_item_t*)iip); + + /* + * Be consistent about the bookkeeping for the inode's + * io lock, but it doesn't mean much really. + */ + ASSERT((iip->ili_flags & XFS_ILI_IOLOCKED_ANY) != XFS_ILI_IOLOCKED_ANY); + if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) { + ASSERT(iip->ili_flags & XFS_ILI_IOLOCKED_ANY); + ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || + (iip->ili_flags & XFS_ILI_IOLOCKED_EXCL)); + ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || + (iip->ili_flags & + (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED))); + if (iip->ili_iolock_recur > 0) { + iip->ili_iolock_recur--; + } + } + + /* + * If the release is just for a recursive lock on the inode lock, + * then decrement the count and return. We can assert that + * the caller is dropping an EXCL lock on the inode, because + * inode must be locked EXCL within transactions. + */ + ASSERT(lock_flags & XFS_ILOCK_EXCL); + if (iip->ili_ilock_recur > 0) { + iip->ili_ilock_recur--; + return; + } + ASSERT(iip->ili_iolock_recur == 0); + + /* + * If the inode was dirtied within this transaction, it cannot + * be released until the transaction commits. + */ + if (lidp->lid_flags & XFS_LID_DIRTY) { + return; + } + + xfs_trans_free_item(tp, lidp); + + /* + * Clear the hold and iolocked flags in the inode log item. + * We wouldn't want the next user of the inode to + * get confused. Assert that if the iolocked flag is set + * in the item then we are unlocking it in the call to xfs_iput() + * below. + */ + ASSERT((!(iip->ili_flags & XFS_ILI_IOLOCKED_ANY)) || + (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED))); + if (iip->ili_flags & (XFS_ILI_HOLD | XFS_ILI_IOLOCKED_ANY)) { + iip->ili_flags &= ~(XFS_ILI_HOLD | XFS_ILI_IOLOCKED_ANY); + } + + /* + * Unlike xfs_brelse() the inode log item cannot be + * freed, because it is embedded within the inode. + * All we have to do is release the inode. + */ + xfs_iput(ip, lock_flags); + return; +} + + +/* + * Add the locked inode to the transaction. + * The inode must be locked, and it cannot be associated with any + * transaction. The caller must specify the locks already held + * on the inode. + */ +void +xfs_trans_ijoin( + xfs_trans_t *tp, + xfs_inode_t *ip, + uint lock_flags) +{ + xfs_inode_log_item_t *iip; + + ASSERT(ip->i_transp == NULL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(lock_flags & XFS_ILOCK_EXCL); + if (ip->i_itemp == NULL) + xfs_inode_item_init(ip, ip->i_mount); + iip = ip->i_itemp; + ASSERT(iip->ili_flags == 0); + ASSERT(iip->ili_ilock_recur == 0); + ASSERT(iip->ili_iolock_recur == 0); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(iip)); + + xfs_trans_inode_broot_debug(ip); + + /* + * If the IO lock is already held, mark that in the inode log item. + */ + if (lock_flags & XFS_IOLOCK_EXCL) { + iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL; + } else if (lock_flags & XFS_IOLOCK_SHARED) { + iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED; + } + + /* + * Initialize i_transp so we can find it with xfs_inode_incore() + * in xfs_trans_iget() above. + */ + ip->i_transp = tp; +} + + + +/* + * Mark the inode as not needing to be unlocked when the inode item's + * IOP_UNLOCK() routine is called. The inode must already be locked + * and associated with the given transaction. + */ +/*ARGSUSED*/ +void +xfs_trans_ihold( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + ASSERT(ip->i_transp == tp); + ASSERT(ip->i_itemp != NULL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + + ip->i_itemp->ili_flags |= XFS_ILI_HOLD; +} + +/* + * Cancel the previous inode hold request made on this inode + * for this transaction. + */ +/*ARGSUSED*/ +void +xfs_trans_ihold_release( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + ASSERT(ip->i_transp == tp); + ASSERT(ip->i_itemp != NULL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); + + ip->i_itemp->ili_flags &= ~XFS_ILI_HOLD; +} + + +/* + * This is called to mark the fields indicated in fieldmask as needing + * to be logged when the transaction is committed. The inode must + * already be associated with the given transaction. + * + * The values for fieldmask are defined in xfs_inode_item.h. We always + * log all of the core inode if any of it has changed, and we always log + * all of the inline data/extents/b-tree root if any of them has changed. + */ +void +xfs_trans_log_inode( + xfs_trans_t *tp, + xfs_inode_t *ip, + uint flags) +{ + xfs_log_item_desc_t *lidp; + + ASSERT(ip->i_transp == tp); + ASSERT(ip->i_itemp != NULL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp)); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; + + /* + * Always OR in the bits from the ili_last_fields field. + * This is to coordinate with the xfs_iflush() and xfs_iflush_done() + * routines in the eventual clearing of the ilf_fields bits. + * See the big comment in xfs_iflush() for an explanation of + * this coorination mechanism. + */ + flags |= ip->i_itemp->ili_last_fields; + ip->i_itemp->ili_format.ilf_fields |= flags; +} + +#ifdef XFS_TRANS_DEBUG +/* + * Keep track of the state of the inode btree root to make sure we + * log it properly. + */ +STATIC void +xfs_trans_inode_broot_debug( + xfs_inode_t *ip) +{ + xfs_inode_log_item_t *iip; + + ASSERT(ip->i_itemp != NULL); + iip = ip->i_itemp; + if (iip->ili_root_size != 0) { + ASSERT(iip->ili_orig_root != NULL); + kmem_free(iip->ili_orig_root, iip->ili_root_size); + iip->ili_root_size = 0; + iip->ili_orig_root = NULL; + } + if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { + ASSERT((ip->i_df.if_broot != NULL) && + (ip->i_df.if_broot_bytes > 0)); + iip->ili_root_size = ip->i_df.if_broot_bytes; + iip->ili_orig_root = + (char*)kmem_alloc(iip->ili_root_size, KM_SLEEP); + memcpy(iip->ili_orig_root, (char*)(ip->i_df.if_broot), + iip->ili_root_size); + } +} +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_trans_item.c linux.21rc5-ac2/fs/xfs/xfs_trans_item.c --- linux.21rc5/fs/xfs/xfs_trans_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_trans_item.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,561 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" + +STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *, + int, int, xfs_lsn_t); + +/* + * This is called to add the given log item to the transaction's + * list of log items. It must find a free log item descriptor + * or allocate a new one and add the item to that descriptor. + * The function returns a pointer to item descriptor used to point + * to the new item. The log item will now point to its new descriptor + * with its li_desc field. + */ +xfs_log_item_desc_t * +xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) +{ + xfs_log_item_desc_t *lidp; + xfs_log_item_chunk_t *licp; + int i=0; + + /* + * If there are no free descriptors, allocate a new chunk + * of them and put it at the front of the chunk list. + */ + if (tp->t_items_free == 0) { + licp = (xfs_log_item_chunk_t*) + kmem_alloc(sizeof(xfs_log_item_chunk_t), KM_SLEEP); + ASSERT(licp != NULL); + /* + * Initialize the chunk, and then + * claim the first slot in the newly allocated chunk. + */ + XFS_LIC_INIT(licp); + XFS_LIC_CLAIM(licp, 0); + licp->lic_unused = 1; + XFS_LIC_INIT_SLOT(licp, 0); + lidp = XFS_LIC_SLOT(licp, 0); + + /* + * Link in the new chunk and update the free count. + */ + licp->lic_next = tp->t_items.lic_next; + tp->t_items.lic_next = licp; + tp->t_items_free = XFS_LIC_NUM_SLOTS - 1; + + /* + * Initialize the descriptor and the generic portion + * of the log item. + * + * Point the new slot at this item and return it. + * Also point the log item at its currently active + * descriptor and set the item's mount pointer. + */ + lidp->lid_item = lip; + lidp->lid_flags = 0; + lidp->lid_size = 0; + lip->li_desc = lidp; + lip->li_mountp = tp->t_mountp; + return (lidp); + } + + /* + * Find the free descriptor. It is somewhere in the chunklist + * of descriptors. + */ + licp = &tp->t_items; + while (licp != NULL) { + if (XFS_LIC_VACANCY(licp)) { + if (licp->lic_unused <= XFS_LIC_MAX_SLOT) { + i = licp->lic_unused; + ASSERT(XFS_LIC_ISFREE(licp, i)); + break; + } + for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) { + if (XFS_LIC_ISFREE(licp, i)) + break; + } + ASSERT(i <= XFS_LIC_MAX_SLOT); + break; + } + licp = licp->lic_next; + } + ASSERT(licp != NULL); + /* + * If we find a free descriptor, claim it, + * initialize it, and return it. + */ + XFS_LIC_CLAIM(licp, i); + if (licp->lic_unused <= i) { + licp->lic_unused = i + 1; + XFS_LIC_INIT_SLOT(licp, i); + } + lidp = XFS_LIC_SLOT(licp, i); + tp->t_items_free--; + lidp->lid_item = lip; + lidp->lid_flags = 0; + lidp->lid_size = 0; + lip->li_desc = lidp; + lip->li_mountp = tp->t_mountp; + return (lidp); +} + +/* + * Free the given descriptor. + * + * This requires setting the bit in the chunk's free mask corresponding + * to the given slot. + */ +void +xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) +{ + uint slot; + xfs_log_item_chunk_t *licp; + xfs_log_item_chunk_t **licpp; + + slot = XFS_LIC_DESC_TO_SLOT(lidp); + licp = XFS_LIC_DESC_TO_CHUNK(lidp); + XFS_LIC_RELSE(licp, slot); + lidp->lid_item->li_desc = NULL; + tp->t_items_free++; + + /* + * If there are no more used items in the chunk and this is not + * the chunk embedded in the transaction structure, then free + * the chunk. First pull it from the chunk list and then + * free it back to the heap. We didn't bother with a doubly + * linked list here because the lists should be very short + * and this is not a performance path. It's better to save + * the memory of the extra pointer. + * + * Also decrement the transaction structure's count of free items + * by the number in a chunk since we are freeing an empty chunk. + */ + if (XFS_LIC_ARE_ALL_FREE(licp) && (licp != &(tp->t_items))) { + licpp = &(tp->t_items.lic_next); + while (*licpp != licp) { + ASSERT(*licpp != NULL); + licpp = &((*licpp)->lic_next); + } + *licpp = licp->lic_next; + kmem_free(licp, sizeof(xfs_log_item_chunk_t)); + tp->t_items_free -= XFS_LIC_NUM_SLOTS; + } +} + +/* + * This is called to find the descriptor corresponding to the given + * log item. It returns a pointer to the descriptor. + * The log item MUST have a corresponding descriptor in the given + * transaction. This routine does not return NULL, it panics. + * + * The descriptor pointer is kept in the log item's li_desc field. + * Just return it. + */ +/*ARGSUSED*/ +xfs_log_item_desc_t * +xfs_trans_find_item(xfs_trans_t *tp, xfs_log_item_t *lip) +{ + ASSERT(lip->li_desc != NULL); + + return (lip->li_desc); +} + + +/* + * Return a pointer to the first descriptor in the chunk list. + * This does not return NULL if there are none, it panics. + * + * The first descriptor must be in either the first or second chunk. + * This is because the only chunk allowed to be empty is the first. + * All others are freed when they become empty. + * + * At some point this and xfs_trans_next_item() should be optimized + * to quickly look at the mask to determine if there is anything to + * look at. + */ +xfs_log_item_desc_t * +xfs_trans_first_item(xfs_trans_t *tp) +{ + xfs_log_item_chunk_t *licp; + int i; + + licp = &tp->t_items; + /* + * If it's not in the first chunk, skip to the second. + */ + if (XFS_LIC_ARE_ALL_FREE(licp)) { + licp = licp->lic_next; + } + + /* + * Return the first non-free descriptor in the chunk. + */ + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + for (i = 0; i < licp->lic_unused; i++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + return (XFS_LIC_SLOT(licp, i)); + } + cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); + return(NULL); +} + + +/* + * Given a descriptor, return the next descriptor in the chunk list. + * This returns NULL if there are no more used descriptors in the list. + * + * We do this by first locating the chunk in which the descriptor resides, + * and then scanning forward in the chunk and the list for the next + * used descriptor. + */ +/*ARGSUSED*/ +xfs_log_item_desc_t * +xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) +{ + xfs_log_item_chunk_t *licp; + int i; + + licp = XFS_LIC_DESC_TO_CHUNK(lidp); + + /* + * First search the rest of the chunk. The for loop keeps us + * from referencing things beyond the end of the chunk. + */ + for (i = (int)XFS_LIC_DESC_TO_SLOT(lidp) + 1; i < licp->lic_unused; i++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + return (XFS_LIC_SLOT(licp, i)); + } + + /* + * Now search the next chunk. It must be there, because the + * next chunk would have been freed if it were empty. + * If there is no next chunk, return NULL. + */ + if (licp->lic_next == NULL) { + return (NULL); + } + + licp = licp->lic_next; + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + for (i = 0; i < licp->lic_unused; i++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + return (XFS_LIC_SLOT(licp, i)); + } + ASSERT(0); + /* NOTREACHED */ + return 0; /* keep gcc quite */ +} + +/* + * This is called to unlock all of the items of a transaction and to free + * all the descriptors of that transaction. + * + * It walks the list of descriptors and unlocks each item. It frees + * each chunk except that embedded in the transaction as it goes along. + */ +void +xfs_trans_free_items( + xfs_trans_t *tp, + int flags) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_chunk_t *next_licp; + int abort; + + abort = flags & XFS_TRANS_ABORT; + licp = &tp->t_items; + /* + * Special case the embedded chunk so we don't free it below. + */ + if (!XFS_LIC_ARE_ALL_FREE(licp)) { + (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); + XFS_LIC_ALL_FREE(licp); + licp->lic_unused = 0; + } + licp = licp->lic_next; + + /* + * Unlock each item in each chunk and free the chunks. + */ + while (licp != NULL) { + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); + next_licp = licp->lic_next; + kmem_free(licp, sizeof(xfs_log_item_chunk_t)); + licp = next_licp; + } + + /* + * Reset the transaction structure's free item count. + */ + tp->t_items_free = XFS_LIC_NUM_SLOTS; + tp->t_items.lic_next = NULL; +} + + + +/* + * This is called to unlock the items associated with a transaction. + * Items which were not logged should be freed. + * Those which were logged must still be tracked so they can be unpinned + * when the transaction commits. + */ +void +xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_chunk_t *next_licp; + xfs_log_item_chunk_t **licpp; + int freed; + + freed = 0; + licp = &tp->t_items; + + /* + * Special case the embedded chunk so we don't free. + */ + if (!XFS_LIC_ARE_ALL_FREE(licp)) { + freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); + } + licpp = &(tp->t_items.lic_next); + licp = licp->lic_next; + + /* + * Unlock each item in each chunk, free non-dirty descriptors, + * and free empty chunks. + */ + while (licp != NULL) { + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); + next_licp = licp->lic_next; + if (XFS_LIC_ARE_ALL_FREE(licp)) { + *licpp = next_licp; + kmem_free(licp, sizeof(xfs_log_item_chunk_t)); + freed -= XFS_LIC_NUM_SLOTS; + } else { + licpp = &(licp->lic_next); + } + ASSERT(*licpp == next_licp); + licp = next_licp; + } + + /* + * Fix the free descriptor count in the transaction. + */ + tp->t_items_free += freed; +} + +/* + * Unlock each item pointed to by a descriptor in the given chunk. + * Stamp the commit lsn into each item if necessary. + * Free descriptors pointing to items which are not dirty if freeing_chunk + * is zero. If freeing_chunk is non-zero, then we need to unlock all + * items in the chunk including those with XFS_LID_SYNC_UNLOCK set. + * Return the number of descriptors freed. + */ +STATIC int +xfs_trans_unlock_chunk( + xfs_log_item_chunk_t *licp, + int freeing_chunk, + int abort, + xfs_lsn_t commit_lsn) +{ + xfs_log_item_desc_t *lidp; + xfs_log_item_t *lip; + int i; + int freed; + + freed = 0; + lidp = licp->lic_descs; + for (i = 0; i < licp->lic_unused; i++, lidp++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + lip = lidp->lid_item; + lip->li_desc = NULL; + + if (commit_lsn != NULLCOMMITLSN) + IOP_COMMITTING(lip, commit_lsn); + + /* XXXsup */ + if (abort) + lip->li_flags |= XFS_LI_ABORTED; + + /* if (abort) { + IOP_ABORT(lip); + } else */ + if (!(lidp->lid_flags & XFS_LID_SYNC_UNLOCK) || + freeing_chunk || abort) { + IOP_UNLOCK(lip); + } + + /* + * Free the descriptor if the item is not dirty + * within this transaction and the caller is not + * going to just free the entire thing regardless. + */ + if (!(freeing_chunk) && + (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) { + XFS_LIC_RELSE(licp, i); + freed++; + } + } + + return (freed); +} + + +/* + * This is called to add the given busy item to the transaction's + * list of busy items. It must find a free busy item descriptor + * or allocate a new one and add the item to that descriptor. + * The function returns a pointer to busy descriptor used to point + * to the new busy entry. The log busy entry will now point to its new + * descriptor with its ???? field. + */ +xfs_log_busy_slot_t * +xfs_trans_add_busy(xfs_trans_t *tp, xfs_agnumber_t ag, xfs_extlen_t idx) +{ + xfs_log_busy_chunk_t *lbcp; + xfs_log_busy_slot_t *lbsp; + int i=0; + + /* + * If there are no free descriptors, allocate a new chunk + * of them and put it at the front of the chunk list. + */ + if (tp->t_busy_free == 0) { + lbcp = (xfs_log_busy_chunk_t*) + kmem_alloc(sizeof(xfs_log_busy_chunk_t), KM_SLEEP); + ASSERT(lbcp != NULL); + /* + * Initialize the chunk, and then + * claim the first slot in the newly allocated chunk. + */ + XFS_LBC_INIT(lbcp); + XFS_LBC_CLAIM(lbcp, 0); + lbcp->lbc_unused = 1; + lbsp = XFS_LBC_SLOT(lbcp, 0); + + /* + * Link in the new chunk and update the free count. + */ + lbcp->lbc_next = tp->t_busy.lbc_next; + tp->t_busy.lbc_next = lbcp; + tp->t_busy_free = XFS_LIC_NUM_SLOTS - 1; + + /* + * Initialize the descriptor and the generic portion + * of the log item. + * + * Point the new slot at this item and return it. + * Also point the log item at its currently active + * descriptor and set the item's mount pointer. + */ + lbsp->lbc_ag = ag; + lbsp->lbc_idx = idx; + return (lbsp); + } + + /* + * Find the free descriptor. It is somewhere in the chunklist + * of descriptors. + */ + lbcp = &tp->t_busy; + while (lbcp != NULL) { + if (XFS_LBC_VACANCY(lbcp)) { + if (lbcp->lbc_unused <= XFS_LBC_MAX_SLOT) { + i = lbcp->lbc_unused; + break; + } else { + /* out-of-order vacancy */ + printk("OOO vacancy lbcp 0x%p\n", lbcp); + ASSERT(0); + } + } + lbcp = lbcp->lbc_next; + } + ASSERT(lbcp != NULL); + /* + * If we find a free descriptor, claim it, + * initialize it, and return it. + */ + XFS_LBC_CLAIM(lbcp, i); + if (lbcp->lbc_unused <= i) { + lbcp->lbc_unused = i + 1; + } + lbsp = XFS_LBC_SLOT(lbcp, i); + tp->t_busy_free--; + lbsp->lbc_ag = ag; + lbsp->lbc_idx = idx; + return (lbsp); +} + + +/* + * xfs_trans_free_busy + * Free all of the busy lists from a transaction + */ +void +xfs_trans_free_busy(xfs_trans_t *tp) +{ + xfs_log_busy_chunk_t *lbcp; + xfs_log_busy_chunk_t *lbcq; + + lbcp = tp->t_busy.lbc_next; + while (lbcp != NULL) { + lbcq = lbcp->lbc_next; + kmem_free(lbcp, sizeof(xfs_log_busy_chunk_t)); + lbcp = lbcq; + } + + XFS_LBC_INIT(&tp->t_busy); + tp->t_busy.lbc_unused = 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_trans_priv.h linux.21rc5-ac2/fs/xfs/xfs_trans_priv.h --- linux.21rc5/fs/xfs/xfs_trans_priv.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_trans_priv.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_TRANS_PRIV_H__ +#define __XFS_TRANS_PRIV_H__ + +struct xfs_log_item; +struct xfs_log_item_desc; +struct xfs_mount; +struct xfs_trans; + +/* + * From xfs_trans_item.c + */ +struct xfs_log_item_desc *xfs_trans_add_item(struct xfs_trans *, + struct xfs_log_item *); +void xfs_trans_free_item(struct xfs_trans *, + struct xfs_log_item_desc *); +struct xfs_log_item_desc *xfs_trans_find_item(struct xfs_trans *, + struct xfs_log_item *); +struct xfs_log_item_desc *xfs_trans_first_item(struct xfs_trans *); +struct xfs_log_item_desc *xfs_trans_next_item(struct xfs_trans *, + struct xfs_log_item_desc *); +void xfs_trans_free_items(struct xfs_trans *, int); +void xfs_trans_unlock_items(struct xfs_trans *, + xfs_lsn_t); +void xfs_trans_free_busy(xfs_trans_t *tp); +xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, + xfs_agnumber_t ag, + xfs_extlen_t idx); + +/* + * From xfs_trans_ail.c + */ +void xfs_trans_update_ail(struct xfs_mount *, + struct xfs_log_item *, xfs_lsn_t, + unsigned long); +void xfs_trans_delete_ail(struct xfs_mount *, + struct xfs_log_item *, unsigned long); +struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *); +struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *, + struct xfs_log_item *, int *, int *); + + +#endif /* __XFS_TRANS_PRIV_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_trans_space.h linux.21rc5-ac2/fs/xfs/xfs_trans_space.h --- linux.21rc5/fs/xfs/xfs_trans_space.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_trans_space.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_TRANS_SPACE_H__ +#define __XFS_TRANS_SPACE_H__ + +/* + * Components of space reservations. + */ +#define XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) \ + (((mp)->m_alloc_mxr[0]) - ((mp)->m_alloc_mnr[0])) +#define XFS_EXTENTADD_SPACE_RES(mp,w) (XFS_BM_MAXLEVELS(mp,w) - 1) +#define XFS_NEXTENTADD_SPACE_RES(mp,b,w)\ + (((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \ + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \ + XFS_EXTENTADD_SPACE_RES(mp,w)) +#define XFS_DAENTER_1B(mp,w) ((w) == XFS_DATA_FORK ? (mp)->m_dirblkfsbs : 1) +#define XFS_DAENTER_DBS(mp,w) \ + (XFS_DA_NODE_MAXDEPTH + \ + ((XFS_DIR_IS_V2(mp) && (w) == XFS_DATA_FORK) ? 2 : 0)) +#define XFS_DAENTER_BLOCKS(mp,w) \ + (XFS_DAENTER_1B(mp,w) * XFS_DAENTER_DBS(mp,w)) +#define XFS_DAENTER_BMAP1B(mp,w) \ + XFS_NEXTENTADD_SPACE_RES(mp, XFS_DAENTER_1B(mp, w), w) +#define XFS_DAENTER_BMAPS(mp,w) \ + (XFS_DAENTER_DBS(mp,w) * XFS_DAENTER_BMAP1B(mp,w)) +#define XFS_DAENTER_SPACE_RES(mp,w) \ + (XFS_DAENTER_BLOCKS(mp,w) + XFS_DAENTER_BMAPS(mp,w)) +#define XFS_DAREMOVE_SPACE_RES(mp,w) XFS_DAENTER_BMAPS(mp,w) +#define XFS_DIRENTER_MAX_SPLIT(mp,nl) \ + (((mp)->m_sb.sb_blocksize == 512 && \ + XFS_DIR_IS_V1(mp) && \ + (nl) >= XFS_DIR_LEAF_CAN_DOUBLE_SPLIT_LEN) ? 2 : 1) +#define XFS_DIRENTER_SPACE_RES(mp,nl) \ + (XFS_DAENTER_SPACE_RES(mp, XFS_DATA_FORK) * \ + XFS_DIRENTER_MAX_SPLIT(mp,nl)) +#define XFS_DIRREMOVE_SPACE_RES(mp) \ + XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) +#define XFS_IALLOC_SPACE_RES(mp) \ + (XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp)-1) + +/* + * Space reservation values for various transactions. + */ +#define XFS_ADDAFORK_SPACE_RES(mp) \ + ((mp)->m_dirblkfsbs + \ + (XFS_DIR_IS_V1(mp) ? 0 : XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK))) +#define XFS_ATTRRM_SPACE_RES(mp) \ + XFS_DAREMOVE_SPACE_RES(mp, XFS_ATTR_FORK) +/* This macro is not used - see inline code in xfs_attr_set */ +#define XFS_ATTRSET_SPACE_RES(mp, v) \ + (XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK) + XFS_B_TO_FSB(mp, v)) +#define XFS_CREATE_SPACE_RES(mp,nl) \ + (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl)) +#define XFS_DIOSTRAT_SPACE_RES(mp, v) \ + (XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK) + (v)) +#define XFS_GROWFS_SPACE_RES(mp) \ + (2 * XFS_AG_MAXLEVELS(mp)) +#define XFS_GROWFSRT_SPACE_RES(mp,b) \ + ((b) + XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK)) +#define XFS_LINK_SPACE_RES(mp,nl) \ + XFS_DIRENTER_SPACE_RES(mp,nl) +#define XFS_MKDIR_SPACE_RES(mp,nl) \ + (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl)) +#define XFS_QM_DQALLOC_SPACE_RES(mp) \ + (XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK) + \ + XFS_DQUOT_CLUSTER_SIZE_FSB) +#define XFS_QM_QINOCREATE_SPACE_RES(mp) \ + XFS_IALLOC_SPACE_RES(mp) +#define XFS_REMOVE_SPACE_RES(mp) \ + XFS_DIRREMOVE_SPACE_RES(mp) +#define XFS_RENAME_SPACE_RES(mp,nl) \ + (XFS_DIRREMOVE_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl)) +#define XFS_SYMLINK_SPACE_RES(mp,nl,b) \ + (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl) + (b)) + +#endif /* __XFS_TRANS_SPACE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_types.h linux.21rc5-ac2/fs/xfs/xfs_types.h --- linux.21rc5/fs/xfs/xfs_types.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_types.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_TYPES_H__ +#define __XFS_TYPES_H__ + +#ifdef __KERNEL__ + +/* + * POSIX Extensions + */ +typedef unsigned char uchar_t; +typedef unsigned short ushort_t; +typedef unsigned int uint_t; +typedef unsigned long ulong_t; + +/* + * Additional type declarations for XFS + */ +typedef signed char __int8_t; +typedef unsigned char __uint8_t; +typedef signed short int __int16_t; +typedef unsigned short int __uint16_t; +typedef signed int __int32_t; +typedef unsigned int __uint32_t; +typedef signed long long int __int64_t; +typedef unsigned long long int __uint64_t; + +typedef enum { B_FALSE,B_TRUE } boolean_t; +typedef __int64_t prid_t; /* project ID */ +typedef __uint32_t inst_t; /* an instruction */ + +typedef __u64 xfs_off_t; +typedef __u64 xfs_ino_t; /* type */ +typedef __s64 xfs_daddr_t; /* type */ +typedef char * xfs_caddr_t; /* type */ +typedef __u32 xfs_dev_t; + +/* __psint_t is the same size as a pointer */ +#if (BITS_PER_LONG == 32) +typedef __int32_t __psint_t; +typedef __uint32_t __psunsigned_t; +#elif (BITS_PER_LONG == 64) +typedef __int64_t __psint_t; +typedef __uint64_t __psunsigned_t; +#else +#error BITS_PER_LONG must be 32 or 64 +#endif + +#endif /* __KERNEL__ */ + +/* + * Some types are conditional based on the selected configuration. + * Set XFS_BIG_FILESYSTEMS=1 or 0 depending on the desired configuration. + * XFS_BIG_FILESYSTEMS needs daddr_t to be 64 bits + * + * On linux right now we are limited to 2^32 512 byte blocks in a + * filesystem, Once this limit is changed, setting this to 1 + * will allow XFS to go larger. With BIG_FILESYSTEMS set to 0 + * a 4K block filesystem could still theoretically be 16Gbytes + * long, so on an ia32 box the 32 bit page index will then be + * the limiting factor. + */ + +#if defined(CONFIG_LBD) || (defined(HAVE_SECTOR_T) && (BITS_PER_LONG == 64)) +# ifndef XFS_BIG_FILESYSTEMS +# define XFS_BIG_FILESYSTEMS 1 +# endif +#else +# ifndef XFS_BIG_FILESYSTEMS +# define XFS_BIG_FILESYSTEMS 0 +# endif +#endif + +typedef __uint32_t xfs_agblock_t; /* blockno in alloc. group */ +typedef __uint32_t xfs_extlen_t; /* extent length in blocks */ +typedef __uint32_t xfs_agnumber_t; /* allocation group number */ +typedef __int32_t xfs_extnum_t; /* # of extents in a file */ +typedef __int16_t xfs_aextnum_t; /* # extents in an attribute fork */ +typedef __int64_t xfs_fsize_t; /* bytes in a file */ +typedef __uint64_t xfs_ufsize_t; /* unsigned bytes in a file */ + +typedef __int32_t xfs_suminfo_t; /* type of bitmap summary info */ +typedef __int32_t xfs_rtword_t; /* word type for bitmap manipulations */ + +typedef __int64_t xfs_lsn_t; /* log sequence number */ +typedef __int32_t xfs_tid_t; /* transaction identifier */ + +typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */ +typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */ + +typedef __uint16_t xfs_prid_t; /* prid_t truncated to 16bits in XFS */ + +/* + * These types are 64 bits on disk but are either 32 or 64 bits in memory. + * Disk based types: + */ +typedef __uint64_t xfs_dfsbno_t; /* blockno in filesystem (agno|agbno) */ +typedef __uint64_t xfs_drfsbno_t; /* blockno in filesystem (raw) */ +typedef __uint64_t xfs_drtbno_t; /* extent (block) in realtime area */ +typedef __uint64_t xfs_dfiloff_t; /* block number in a file */ +typedef __uint64_t xfs_dfilblks_t; /* number of blocks in a file */ + +/* + * Memory based types are conditional. + */ +#if XFS_BIG_FILESYSTEMS +typedef __uint64_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */ +typedef __uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ +typedef __uint64_t xfs_rtblock_t; /* extent (block) in realtime area */ +typedef __int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ +#else +typedef __uint32_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */ +typedef __uint32_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ +typedef __uint32_t xfs_rtblock_t; /* extent (block) in realtime area */ +typedef __int32_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ +#endif +typedef __uint64_t xfs_fileoff_t; /* block number in a file */ +typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ +typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */ + +typedef __uint8_t xfs_arch_t; /* architecture of an xfs fs */ + +/* + * Null values for the types. + */ +#define NULLDFSBNO ((xfs_dfsbno_t)-1) +#define NULLDRFSBNO ((xfs_drfsbno_t)-1) +#define NULLDRTBNO ((xfs_drtbno_t)-1) +#define NULLDFILOFF ((xfs_dfiloff_t)-1) + +#define NULLFSBLOCK ((xfs_fsblock_t)-1) +#define NULLRFSBLOCK ((xfs_rfsblock_t)-1) +#define NULLRTBLOCK ((xfs_rtblock_t)-1) +#define NULLFILEOFF ((xfs_fileoff_t)-1) + +#define NULLAGBLOCK ((xfs_agblock_t)-1) +#define NULLAGNUMBER ((xfs_agnumber_t)-1) +#define NULLEXTNUM ((xfs_extnum_t)-1) + +#define NULLCOMMITLSN ((xfs_lsn_t)-1) + +/* + * Max values for extlen, extnum, aextnum. + */ +#define MAXEXTLEN ((xfs_extlen_t)0x001fffff) /* 21 bits */ +#define MAXEXTNUM ((xfs_extnum_t)0x7fffffff) /* signed int */ +#define MAXAEXTNUM ((xfs_aextnum_t)0x7fff) /* signed short */ + +/* + * MAXNAMELEN is the length (including the terminating null) of + * the longest permissible file (component) name. + */ +#define MAXNAMELEN 256 + +typedef struct xfs_dirent { /* data from readdir() */ + xfs_ino_t d_ino; /* inode number of entry */ + xfs_off_t d_off; /* offset of disk directory entry */ + unsigned short d_reclen; /* length of this record */ + char d_name[1]; /* name of file */ +} xfs_dirent_t; + +#define DIRENTBASESIZE (((xfs_dirent_t *)0)->d_name - (char *)0) +#define DIRENTSIZE(namelen) \ + ((DIRENTBASESIZE + (namelen) + \ + sizeof(xfs_off_t)) & ~(sizeof(xfs_off_t) - 1)) + +typedef enum { + XFS_LOOKUP_EQi, XFS_LOOKUP_LEi, XFS_LOOKUP_GEi +} xfs_lookup_t; + +typedef enum { + XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_BMAPi, XFS_BTNUM_INOi, + XFS_BTNUM_MAX +} xfs_btnum_t; + + +/* + * Juggle IRIX device numbers - still used in ondisk structures + */ +#define XFS_DEV_BITSMAJOR 14 +#define XFS_DEV_BITSMINOR 18 +#define XFS_DEV_MAXMAJ 0x1ff +#define XFS_DEV_MAXMIN 0x3ffff +#define XFS_DEV_MAJOR(dev) ((int)(((unsigned)(dev)>>XFS_DEV_BITSMINOR) \ + & XFS_DEV_MAXMAJ)) +#define XFS_DEV_MINOR(dev) ((int)((dev)&XFS_DEV_MAXMIN)) +#define XFS_MKDEV(major,minor) ((xfs_dev_t)(((major)<i_mount, NULL, dp, + VNAME(dentry), VNAMELEN(dentry), inum); + if (!error) { + /* + * Unlock the directory. We do this because we can't + * hold the directory lock while doing the vn_get() + * in xfs_iget(). Doing so could cause us to hold + * a lock while waiting for the inode to finish + * being inactive while it's waiting for a log + * reservation in the inactive routine. + */ + xfs_iunlock(dp, lock_mode); + error = xfs_iget(dp->i_mount, NULL, *inum, 0, ipp, 0); + xfs_ilock(dp, lock_mode); + + if (error) { + *ipp = NULL; + } else if ((*ipp)->i_d.di_mode == 0) { + /* + * The inode has been freed. Something is + * wrong so just get out of here. + */ + xfs_iunlock(dp, lock_mode); + xfs_iput_new(*ipp, 0); + *ipp = NULL; + xfs_ilock(dp, lock_mode); + error = XFS_ERROR(ENOENT); + } + } + return error; +} + +/* + * Allocates a new inode from disk and return a pointer to the + * incore copy. This routine will internally commit the current + * transaction and allocate a new one if the Space Manager needed + * to do an allocation to replenish the inode free-list. + * + * This routine is designed to be called from xfs_create and + * xfs_create_dir. + * + */ +int +xfs_dir_ialloc( + xfs_trans_t **tpp, /* input: current transaction; + output: may be a new transaction. */ + xfs_inode_t *dp, /* directory within whose allocate + the inode. */ + mode_t mode, + nlink_t nlink, + xfs_dev_t rdev, + cred_t *credp, + prid_t prid, /* project id */ + int okalloc, /* ok to allocate new space */ + xfs_inode_t **ipp, /* pointer to inode; it will be + locked. */ + int *committed) + +{ + xfs_trans_t *tp; + xfs_trans_t *ntp; + xfs_inode_t *ip; + xfs_buf_t *ialloc_context = NULL; + boolean_t call_again = B_FALSE; + int code; + uint log_res; + uint log_count; + void *dqinfo; + uint tflags; + + tp = *tpp; + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + + /* + * xfs_ialloc will return a pointer to an incore inode if + * the Space Manager has an available inode on the free + * list. Otherwise, it will do an allocation and replenish + * the freelist. Since we can only do one allocation per + * transaction without deadlocks, we will need to commit the + * current transaction and start a new one. We will then + * need to call xfs_ialloc again to get the inode. + * + * If xfs_ialloc did an allocation to replenish the freelist, + * it returns the bp containing the head of the freelist as + * ialloc_context. We will hold a lock on it across the + * transaction commit so that no other process can steal + * the inode(s) that we've just allocated. + */ + code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, okalloc, + &ialloc_context, &call_again, &ip); + + /* + * Return an error if we were unable to allocate a new inode. + * This should only happen if we run out of space on disk or + * encounter a disk error. + */ + if (code) { + *ipp = NULL; + return code; + } + if (!call_again && (ip == NULL)) { + *ipp = NULL; + return XFS_ERROR(ENOSPC); + } + + /* + * If call_again is set, then we were unable to get an + * inode in one operation. We need to commit the current + * transaction and call xfs_ialloc() again. It is guaranteed + * to succeed the second time. + */ + if (call_again) { + + /* + * Normally, xfs_trans_commit releases all the locks. + * We call bhold to hang on to the ialloc_context across + * the commit. Holding this buffer prevents any other + * processes from doing any allocations in this + * allocation group. + */ + xfs_trans_bhold(tp, ialloc_context); + /* + * Save the log reservation so we can use + * them in the next transaction. + */ + log_res = xfs_trans_get_log_res(tp); + log_count = xfs_trans_get_log_count(tp); + + /* + * We want the quota changes to be associated with the next + * transaction, NOT this one. So, detach the dqinfo from this + * and attach it to the next transaction. + */ + dqinfo = NULL; + tflags = 0; + if (tp->t_dqinfo) { + dqinfo = (void *)tp->t_dqinfo; + tp->t_dqinfo = NULL; + tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY; + tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY); + } + + ntp = xfs_trans_dup(tp); + code = xfs_trans_commit(tp, 0, NULL); + tp = ntp; + if (committed != NULL) { + *committed = 1; + } + /* + * If we get an error during the commit processing, + * release the buffer that is still held and return + * to the caller. + */ + if (code) { + xfs_buf_relse(ialloc_context); + if (dqinfo) { + tp->t_dqinfo = dqinfo; + XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); + } + *tpp = ntp; + *ipp = NULL; + return code; + } + code = xfs_trans_reserve(tp, 0, log_res, 0, + XFS_TRANS_PERM_LOG_RES, log_count); + /* + * Re-attach the quota info that we detached from prev trx. + */ + if (dqinfo) { + tp->t_dqinfo = dqinfo; + tp->t_flags |= tflags; + } + + if (code) { + xfs_buf_relse(ialloc_context); + *tpp = ntp; + *ipp = NULL; + return code; + } + xfs_trans_bjoin(tp, ialloc_context); + + /* + * Call ialloc again. Since we've locked out all + * other allocations in this allocation group, + * this call should always succeed. + */ + code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, + okalloc, &ialloc_context, &call_again, &ip); + + /* + * If we get an error at this point, return to the caller + * so that the current transaction can be aborted. + */ + if (code) { + *tpp = tp; + *ipp = NULL; + return code; + } + ASSERT ((!call_again) && (ip != NULL)); + + } else { + if (committed != NULL) { + *committed = 0; + } + } + + *ipp = ip; + *tpp = tp; + + return 0; +} + +/* + * Decrement the link count on an inode & log the change. + * If this causes the link count to go to zero, initiate the + * logging activity required to truncate a file. + */ +int /* error */ +xfs_droplink( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + int error; + + xfs_ichgtime(ip, XFS_ICHGTIME_CHG); + + ASSERT (ip->i_d.di_nlink > 0); + ip->i_d.di_nlink--; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + error = 0; + if (ip->i_d.di_nlink == 0) { + /* + * We're dropping the last link to this file. + * Move the on-disk inode to the AGI unlinked list. + * From xfs_inactive() we will pull the inode from + * the list and free it. + */ + error = xfs_iunlink(tp, ip); + } + return error; +} + +/* + * This gets called when the inode's version needs to be changed from 1 to 2. + * Currently this happens when the nlink field overflows the old 16-bit value + * or when chproj is called to change the project for the first time. + * As a side effect the superblock version will also get rev'd + * to contain the NLINK bit. + */ +void +xfs_bump_ino_vers2( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + xfs_mount_t *mp; + unsigned long s; + + ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); + ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); + + ip->i_d.di_version = XFS_DINODE_VERSION_2; + ip->i_d.di_onlink = 0; + memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); + mp = tp->t_mountp; + if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { + s = XFS_SB_LOCK(mp); + if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { + XFS_SB_VERSION_ADDNLINK(&mp->m_sb); + XFS_SB_UNLOCK(mp, s); + xfs_mod_sb(tp, XFS_SB_VERSIONNUM); + } else { + XFS_SB_UNLOCK(mp, s); + } + } + /* Caller must log the inode */ +} + +/* + * Increment the link count on an inode & log the change. + */ +int +xfs_bumplink( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + if (ip->i_d.di_nlink >= XFS_MAXLINK) + return XFS_ERROR(EMLINK); + xfs_ichgtime(ip, XFS_ICHGTIME_CHG); + + ASSERT(ip->i_d.di_nlink > 0); + ip->i_d.di_nlink++; + if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && + (ip->i_d.di_nlink > XFS_MAXLINK_1)) { + /* + * The inode has increased its number of links beyond + * what can fit in an old format inode. It now needs + * to be converted to a version 2 inode with a 32 bit + * link count. If this is the first inode in the file + * system to do this, then we need to bump the superblock + * version number as well. + */ + xfs_bump_ino_vers2(tp, ip); + } + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return 0; +} + +/* + * Try to truncate the given file to 0 length. Currently called + * only out of xfs_remove when it has to truncate a file to free + * up space for the remove to proceed. + */ +int +xfs_truncate_file( + xfs_mount_t *mp, + xfs_inode_t *ip) +{ + xfs_trans_t *tp; + int error; + +#ifdef QUOTADEBUG + /* + * This is called to truncate the quotainodes too. + */ + if (XFS_IS_UQUOTA_ON(mp)) { + if (ip->i_ino != mp->m_sb.sb_uquotino) + ASSERT(ip->i_udquot); + } + if (XFS_IS_GQUOTA_ON(mp)) { + if (ip->i_ino != mp->m_sb.sb_gquotino) + ASSERT(ip->i_gdquot); + } +#endif + /* + * Make the call to xfs_itruncate_start before starting the + * transaction, because we cannot make the call while we're + * in a transaction. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, (xfs_fsize_t)0); + + tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); + if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; + } + + /* + * Follow the normal truncate locking protocol. Since we + * hold the inode in the transaction, we know that it's number + * of references will stay constant. + */ + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(tp, ip); + /* + * Signal a sync xaction. The only case where that isn't + * the case is if we're truncating an already unlinked file + * on a wsync fs. In that case, we know the blocks can't + * reappear in the file because the links to file are + * permanently toast. Currently, we're always going to + * want a sync transaction because this code is being + * called from places where nlink is guaranteed to be 1 + * but I'm leaving the tests in to protect against future + * changes -- rcc. + */ + error = xfs_itruncate_finish(&tp, ip, (xfs_fsize_t)0, + XFS_DATA_FORK, + ((ip->i_d.di_nlink != 0 || + !(mp->m_flags & XFS_MOUNT_WSYNC)) + ? 1 : 0)); + if (error) { + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT); + } else { + xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, + NULL); + } + xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + + return error; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_utils.h linux.21rc5-ac2/fs/xfs/xfs_utils.h --- linux.21rc5/fs/xfs/xfs_utils.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_utils.h 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_UTILS_H__ +#define __XFS_UTILS_H__ + +#define IRELE(ip) VN_RELE(XFS_ITOV(ip)) +#define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) +#define ITRACE(ip) vn_trace_ref(XFS_ITOV(ip), __FILE__, __LINE__, \ + (inst_t *)__return_address) + +extern int xfs_rename (bhv_desc_t *, vname_t *, vnode_t *, vname_t *, cred_t *); +extern int xfs_get_dir_entry (vname_t *, xfs_inode_t **); +extern int xfs_dir_lookup_int (bhv_desc_t *, uint, vname_t *, xfs_ino_t *, + xfs_inode_t **); +extern int xfs_truncate_file (xfs_mount_t *, xfs_inode_t *); +extern int xfs_dir_ialloc (xfs_trans_t **, xfs_inode_t *, mode_t, nlink_t, + xfs_dev_t, cred_t *, prid_t, int, + xfs_inode_t **, int *); +extern int xfs_droplink (xfs_trans_t *, xfs_inode_t *); +extern int xfs_bumplink (xfs_trans_t *, xfs_inode_t *); +extern void xfs_bump_ino_vers2 (xfs_trans_t *, xfs_inode_t *); + +#endif /* __XFS_UTILS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_vfsops.c linux.21rc5-ac2/fs/xfs/xfs_vfsops.c --- linux.21rc5/fs/xfs/xfs_vfsops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_vfsops.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,1857 @@ +/* + * XFS filesystem operations. + * + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_btree.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_ag.h" +#include "xfs_error.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_rw.h" +#include "xfs_buf_item.h" +#include "xfs_extfree_item.h" +#include "xfs_quota.h" +#include "xfs_dmapi.h" +#include "xfs_dir2_trace.h" +#include "xfs_acl.h" +#include "xfs_attr.h" +#include "xfs_clnt.h" +#include "xfs_log_priv.h" + +STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); + +int +xfs_init(void) +{ + extern kmem_zone_t *xfs_da_state_zone; + extern kmem_zone_t *xfs_bmap_free_item_zone; + extern kmem_zone_t *xfs_btree_cur_zone; + extern kmem_zone_t *xfs_inode_zone; + extern kmem_zone_t *xfs_chashlist_zone; + extern kmem_zone_t *xfs_trans_zone; + extern kmem_zone_t *xfs_buf_item_zone; + extern kmem_zone_t *xfs_efd_zone; + extern kmem_zone_t *xfs_efi_zone; + extern kmem_zone_t *xfs_dabuf_zone; +#ifdef DEBUG_NOT + extern ktrace_t *xfs_alloc_trace_buf; + extern ktrace_t *xfs_bmap_trace_buf; + extern ktrace_t *xfs_bmbt_trace_buf; + extern ktrace_t *xfs_dir_trace_buf; + extern ktrace_t *xfs_attr_trace_buf; + extern ktrace_t *xfs_dir2_trace_buf; +#endif /* DEBUG */ +#ifdef XFS_DABUF_DEBUG + extern lock_t xfs_dabuf_global_lock; +#endif + extern int xfs_refcache_size; + +#ifdef XFS_DABUF_DEBUG + spinlock_init(&xfs_dabuf_global_lock, "xfsda"); +#endif + + /* + * Initialize all of the zone allocators we use. + */ + xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), + "xfs_bmap_free_item"); + xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), + "xfs_btree_cur"); + xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode"); + xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); + xfs_da_state_zone = + kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); + xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); + + /* + * The size of the zone allocated buf log item is the maximum + * size possible under XFS. This wastes a little bit of memory, + * but it is much faster. + */ + xfs_buf_item_zone = + kmem_zone_init((sizeof(xfs_buf_log_item_t) + + (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / + NBWORD) * sizeof(int))), + "xfs_buf_item"); + xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + + ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), + "xfs_efd_item"); + xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + + ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), + "xfs_efi_item"); + xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); + xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili"); + xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t), + "xfs_chashlist"); + _ACL_ZONE_INIT(xfs_acl_zone, "xfs_acl"); + +#ifdef CONFIG_XFS_VNODE_TRACING + ktrace_init(VNODE_TRACE_SIZE); +#else +#ifdef DEBUG + ktrace_init(64); +#endif +#endif + + /* + * Allocate global trace buffers. + */ +#ifdef XFS_ALLOC_TRACE + xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_BMAP_TRACE + xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_BMBT_TRACE + xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_DIR_TRACE + xfs_dir_trace_buf = ktrace_alloc(XFS_DIR_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_ATTR_TRACE + xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_DIR2_TRACE + xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_SLEEP); +#endif + + xfs_dir_startup(); + +#if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) + xfs_error_test_init(); +#endif /* DEBUG || INDUCE_IO_ERROR */ + + xfs_init_procfs(); + xfs_sysctl_register(); + + xfs_refcache_size = xfs_params.refcache_size; + + /* + * The inode hash table is created on a per mounted + * file system bases. + */ + + return 0; +} + +void +xfs_cleanup(void) +{ + extern kmem_zone_t *xfs_bmap_free_item_zone; + extern kmem_zone_t *xfs_btree_cur_zone; + extern kmem_zone_t *xfs_inode_zone; + extern kmem_zone_t *xfs_trans_zone; + extern kmem_zone_t *xfs_da_state_zone; + extern kmem_zone_t *xfs_dabuf_zone; + extern kmem_zone_t *xfs_efd_zone; + extern kmem_zone_t *xfs_efi_zone; + extern kmem_zone_t *xfs_buf_item_zone; + extern kmem_zone_t *xfs_chashlist_zone; + extern xfs_inode_t **xfs_refcache; + + xfs_cleanup_procfs(); + xfs_sysctl_unregister(); + if (xfs_refcache) { + kmem_free(xfs_refcache, + XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *)); + } + + kmem_cache_destroy(xfs_bmap_free_item_zone); + kmem_cache_destroy(xfs_btree_cur_zone); + kmem_cache_destroy(xfs_inode_zone); + kmem_cache_destroy(xfs_trans_zone); + kmem_cache_destroy(xfs_da_state_zone); + kmem_cache_destroy(xfs_dabuf_zone); + kmem_cache_destroy(xfs_buf_item_zone); + kmem_cache_destroy(xfs_efd_zone); + kmem_cache_destroy(xfs_efi_zone); + kmem_cache_destroy(xfs_ifork_zone); + kmem_cache_destroy(xfs_ili_zone); + kmem_cache_destroy(xfs_chashlist_zone); + _ACL_ZONE_DESTROY(xfs_acl_zone); +#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING)) + ktrace_uninit(); +#endif +} + +/* + * xfs_start_flags + * + * This function fills in xfs_mount_t fields based on mount args. + * Note: the superblock has _not_ yet been read in. + */ +STATIC int +xfs_start_flags( + struct xfs_mount_args *ap, + struct xfs_mount *mp, + int ronly) +{ + /* Values are in BBs */ + if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { + /* + * At this point the superblock has not been read + * in, therefore we do not know the block size. + * Before, the mount call ends we will convert + * these to FSBs. + */ + mp->m_dalign = ap->sunit; + mp->m_swidth = ap->swidth; + } + + if (ap->logbufs != 0 && ap->logbufs != -1 && + (ap->logbufs < XLOG_NUM_ICLOGS || + ap->logbufs > XLOG_MAX_ICLOGS)) { + cmn_err(CE_WARN, + "XFS: invalid logbufs value: %d [not %d-%d]", + ap->logbufs, XLOG_NUM_ICLOGS, XLOG_MAX_ICLOGS); + return XFS_ERROR(EINVAL); + } + mp->m_logbufs = ap->logbufs; + if (ap->logbufsize != -1 && + ap->logbufsize != 16 * 1024 && + ap->logbufsize != 32 * 1024 && + ap->logbufsize != 64 * 1024 && + ap->logbufsize != 128 * 1024 && + ap->logbufsize != 256 * 1024) { + cmn_err(CE_WARN, + "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", + ap->logbufsize); + return XFS_ERROR(EINVAL); + } + mp->m_logbsize = ap->logbufsize; + mp->m_fsname_len = strlen(ap->fsname) + 1; + mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP); + strcpy(mp->m_fsname, ap->fsname); + + /* + * Pull in the 'wsync' and 'ino64' mount options before we do the real + * work of mounting and recovery. The arg pointer will + * be NULL when we are being called from the root mount code. + */ + if (ap->flags & XFSMNT_WSYNC) + mp->m_flags |= XFS_MOUNT_WSYNC; +#if XFS_BIG_FILESYSTEMS + if (ap->flags & XFSMNT_INO64) { + mp->m_flags |= XFS_MOUNT_INO64; + mp->m_inoadd = XFS_INO64_OFFSET; + } +#endif + if (ap->flags & XFSMNT_NOATIME) + mp->m_flags |= XFS_MOUNT_NOATIME; + + if (ap->flags & XFSMNT_RETERR) + mp->m_flags |= XFS_MOUNT_RETERR; + + if (ap->flags & XFSMNT_NOALIGN) + mp->m_flags |= XFS_MOUNT_NOALIGN; + + if (ap->flags & XFSMNT_OSYNCISOSYNC) + mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC; + + if (ap->flags & XFSMNT_32BITINODES) + mp->m_flags |= XFS_MOUNT_32BITINODES; + + if (ap->flags & XFSMNT_IOSIZE) { + if (ap->iosizelog > XFS_MAX_IO_LOG || + ap->iosizelog < XFS_MIN_IO_LOG) { + cmn_err(CE_WARN, + "XFS: invalid log iosize: %d [not %d-%d]", + ap->iosizelog, XFS_MIN_IO_LOG, + XFS_MAX_IO_LOG); + return XFS_ERROR(EINVAL); + } + + mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; + mp->m_readio_log = mp->m_writeio_log = ap->iosizelog; + } + + /* + * no recovery flag requires a read-only mount + */ + if (ap->flags & XFSMNT_NORECOVERY) { + if (!ronly) { + cmn_err(CE_WARN, + "XFS: tried to mount a FS read-write without recovery!"); + return XFS_ERROR(EINVAL); + } + mp->m_flags |= XFS_MOUNT_NORECOVERY; + } + + if (ap->flags & XFSMNT_NOUUID) + mp->m_flags |= XFS_MOUNT_NOUUID; + if (ap->flags & XFSMNT_NOLOGFLUSH) + mp->m_flags |= XFS_MOUNT_NOLOGFLUSH; + + return 0; +} + +/* + * This function fills in xfs_mount_t fields based on mount args. + * Note: the superblock _has_ now been read in. + */ +STATIC int +xfs_finish_flags( + struct xfs_mount_args *ap, + struct xfs_mount *mp, + int ronly) +{ + /* Fail a mount where the logbuf is smaller then the log stripe */ + if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) { + if (((ap->logbufsize == -1) && + (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) || + (ap->logbufsize < mp->m_sb.sb_logsunit)) { + cmn_err(CE_WARN, + "XFS: logbuf size must be greater than or equal to log stripe size"); + return XFS_ERROR(EINVAL); + } + } else { + /* Fail a mount if the logbuf is larger than 32K */ + if (ap->logbufsize > XLOG_BIG_RECORD_BSIZE) { + cmn_err(CE_WARN, + "XFS: logbuf size for version 1 logs must be 16K or 32K"); + return XFS_ERROR(EINVAL); + } + } + + /* + * prohibit r/w mounts of read-only filesystems + */ + if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { + cmn_err(CE_WARN, + "XFS: cannot mount a read-only filesystem as read-write"); + return XFS_ERROR(EROFS); + } + + /* + * disallow mount attempts with (IRIX) project quota enabled + */ + if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && + (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT)) { + cmn_err(CE_WARN, + "XFS: cannot mount a filesystem with IRIX project quota enabled"); + return XFS_ERROR(ENOSYS); + } + + /* + * check for shared mount. + */ + if (ap->flags & XFSMNT_SHARED) { + if (!XFS_SB_VERSION_HASSHARED(&mp->m_sb)) + return XFS_ERROR(EINVAL); + + /* + * For IRIX 6.5, shared mounts must have the shared + * version bit set, have the persistent readonly + * field set, must be version 0 and can only be mounted + * read-only. + */ + if (!ronly || !(mp->m_sb.sb_flags & XFS_SBF_READONLY) || + (mp->m_sb.sb_shared_vn != 0)) + return XFS_ERROR(EINVAL); + + mp->m_flags |= XFS_MOUNT_SHARED; + + /* + * Shared XFS V0 can't deal with DMI. Return EINVAL. + */ + if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI)) + return XFS_ERROR(EINVAL); + } + + return 0; +} + +/* + * xfs_mount + * + * The file system configurations are: + * (1) device (partition) with data and internal log + * (2) logical volume with data and log subvolumes. + * (3) logical volume with data, log, and realtime subvolumes. + * + * We only have to handle opening the log and realtime volumes here if + * they are present. The data subvolume has already been opened by + * get_sb_bdev() and is stored in vfsp->vfs_super->s_bdev. + */ +STATIC int +xfs_mount( + struct bhv_desc *bhvp, + struct xfs_mount_args *args, + cred_t *credp) +{ + struct vfs *vfsp = bhvtovfs(bhvp); + struct bhv_desc *p; + struct xfs_mount *mp = XFS_BHVTOM(bhvp); + struct block_device *ddev, *logdev, *rtdev; + int ronly = (vfsp->vfs_flag & VFS_RDONLY); + int flags = 0, error; + + ddev = vfsp->vfs_super->s_bdev; + logdev = rtdev = NULL; + + /* + * Open real time and log devices - order is important. + */ + if (args->logname[0]) { + error = xfs_blkdev_get(mp, args->logname, &logdev); + if (error) + return error; + } + if (args->rtname[0]) { + error = xfs_blkdev_get(mp, args->rtname, &rtdev); + if (error) { + xfs_blkdev_put(logdev); + return error; + } + + if (rtdev == ddev || rtdev == logdev) { + cmn_err(CE_WARN, + "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev."); + xfs_blkdev_put(logdev); + xfs_blkdev_put(rtdev); + return EINVAL; + } + } + + /* + * Setup xfs_mount function vectors from available behaviors + */ + p = vfs_bhv_lookup(vfsp, VFS_POSITION_DM); + mp->m_dm_ops = p ? *(xfs_dmops_t *) vfs_bhv_custom(p) : xfs_dmcore_xfs; + p = vfs_bhv_lookup(vfsp, VFS_POSITION_QM); + mp->m_qm_ops = p ? *(xfs_qmops_t *) vfs_bhv_custom(p) : xfs_qmcore_xfs; + p = vfs_bhv_lookup(vfsp, VFS_POSITION_IO); + mp->m_io_ops = p ? *(xfs_ioops_t *) vfs_bhv_custom(p) : xfs_iocore_xfs; + + /* + * Setup xfs_mount buffer target pointers + */ + mp->m_ddev_targp = xfs_alloc_buftarg(ddev); + if (rtdev) + mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev); + mp->m_logdev_targp = (logdev && logdev != ddev) ? + xfs_alloc_buftarg(logdev) : mp->m_ddev_targp; + + /* + * Setup flags based on mount(2) options and then the superblock + */ + error = xfs_start_flags(args, mp, ronly); + if (error) + goto error; + error = xfs_readsb(mp); + if (error) + goto error; + error = xfs_finish_flags(args, mp, ronly); + if (error) { + xfs_freesb(mp); + goto error; + } + + /* + * Setup xfs_mount buffer target pointers based on superblock + */ + xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, + mp->m_sb.sb_sectsize); + if (logdev && logdev != ddev) { + unsigned int log_sector_size = BBSIZE; + + if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb)) + log_sector_size = mp->m_sb.sb_logsectsize; + xfs_setsize_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize, + log_sector_size); + } + if (rtdev) + xfs_setsize_buftarg(mp->m_rtdev_targp, mp->m_sb.sb_blocksize, + mp->m_sb.sb_sectsize); + + if (!(error = XFS_IOINIT(vfsp, args, flags))) + return 0; + + error: + xfs_binval(mp->m_ddev_targp); + if (logdev != NULL && logdev != ddev) { + xfs_binval(mp->m_logdev_targp); + } + if (rtdev != NULL) { + xfs_binval(mp->m_rtdev_targp); + } + xfs_unmountfs_close(mp, NULL); + return error; +} + +STATIC int +xfs_unmount( + bhv_desc_t *bdp, + int flags, + cred_t *credp) +{ + struct vfs *vfsp = bhvtovfs(bdp); + xfs_mount_t *mp = XFS_BHVTOM(bdp); + xfs_inode_t *rip; + vnode_t *rvp; + int unmount_event_wanted = 0; + int unmount_event_flags = 0; + int xfs_unmountfs_needed = 0; + int error; + + rip = mp->m_rootip; + rvp = XFS_ITOV(rip); + + if (vfsp->vfs_flag & VFS_DMI) { + bhv_desc_t *rbdp; + + rbdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(rvp), &xfs_vnodeops); + error = XFS_SEND_NAMESP(mp, DM_EVENT_PREUNMOUNT, + rbdp, DM_RIGHT_NULL, rbdp, DM_RIGHT_NULL, + NULL, NULL, 0, 0, + (mp->m_dmevmask & (1<m_dmevmask & (1<m_ddev_targp); + error = xfs_unmount_flush(mp, 0); + if (error) + goto out; + + ASSERT(vn_count(rvp) == 1); + + /* + * Drop the reference count + */ + VN_RELE(rvp); + + /* + * If we're forcing a shutdown, typically because of a media error, + * we want to make sure we invalidate dirty pages that belong to + * referenced vnodes as well. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + error = xfs_sync(&mp->m_bhv, + (SYNC_WAIT | SYNC_CLOSE), credp); + ASSERT(error != EFSCORRUPTED); + } + xfs_unmountfs_needed = 1; + +out: + /* Send DMAPI event, if required. + * Then do xfs_unmountfs() if needed. + * Then return error (or zero). + */ + if (unmount_event_wanted) { + /* Note: mp structure must still exist for + * XFS_SEND_UNMOUNT() call. + */ + XFS_SEND_UNMOUNT(mp, vfsp, error == 0 ? rvp : NULL, + DM_RIGHT_NULL, 0, error, unmount_event_flags); + } + if (xfs_unmountfs_needed) { + /* + * Call common unmount function to flush to disk + * and free the super block buffer & mount structures. + */ + xfs_unmountfs(mp, credp); + } + + return XFS_ERROR(error); +} + +STATIC int +xfs_mntupdate( + bhv_desc_t *bdp, + int *flags, + struct xfs_mount_args *args) +{ + struct vfs *vfsp = bhvtovfs(bdp); + xfs_mount_t *mp = XFS_BHVTOM(bdp); + int pincount, error; + + if (args->flags & XFSMNT_NOATIME) + mp->m_flags |= XFS_MOUNT_NOATIME; + else + mp->m_flags &= ~XFS_MOUNT_NOATIME; + + if (!(vfsp->vfs_flag & VFS_RDONLY)) { + VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error); + } + + if (*flags & MS_RDONLY) { + xfs_refcache_purge_mp(mp); + pagebuf_delwri_flush(mp->m_ddev_targp, 0, NULL); + xfs_finish_reclaim_all(mp, 0); + + do { + VFS_SYNC(vfsp, SYNC_ATTR|SYNC_WAIT, NULL, error); + pagebuf_delwri_flush(mp->m_ddev_targp, PBDF_WAIT, + &pincount); + } while (pincount); + + /* Ok now write out an unmount record */ + xfs_log_unmount_write(mp); + xfs_unmountfs_writesb(mp); + vfsp->vfs_flag |= VFS_RDONLY; + } else { + vfsp->vfs_flag &= ~VFS_RDONLY; + } + + return 0; +} + +/* + * xfs_unmount_flush implements a set of flush operation on special + * inodes, which are needed as a separate set of operations so that + * they can be called as part of relocation process. + */ +int +xfs_unmount_flush( + xfs_mount_t *mp, /* Mount structure we are getting + rid of. */ + int relocation) /* Called from vfs relocation. */ +{ + xfs_inode_t *rip = mp->m_rootip; + xfs_inode_t *rbmip; + xfs_inode_t *rsumip = NULL; + vnode_t *rvp = XFS_ITOV(rip); + int error; + + xfs_ilock(rip, XFS_ILOCK_EXCL); + xfs_iflock(rip); + + /* + * Flush out the real time inodes. + */ + if ((rbmip = mp->m_rbmip) != NULL) { + xfs_ilock(rbmip, XFS_ILOCK_EXCL); + xfs_iflock(rbmip); + error = xfs_iflush(rbmip, XFS_IFLUSH_SYNC); + xfs_iunlock(rbmip, XFS_ILOCK_EXCL); + + if (error == EFSCORRUPTED) + goto fscorrupt_out; + + ASSERT(vn_count(XFS_ITOV(rbmip)) == 1); + + rsumip = mp->m_rsumip; + xfs_ilock(rsumip, XFS_ILOCK_EXCL); + xfs_iflock(rsumip); + error = xfs_iflush(rsumip, XFS_IFLUSH_SYNC); + xfs_iunlock(rsumip, XFS_ILOCK_EXCL); + + if (error == EFSCORRUPTED) + goto fscorrupt_out; + + ASSERT(vn_count(XFS_ITOV(rsumip)) == 1); + } + + /* + * Synchronously flush root inode to disk + */ + error = xfs_iflush(rip, XFS_IFLUSH_SYNC); + if (error == EFSCORRUPTED) + goto fscorrupt_out2; + + if (vn_count(rvp) != 1 && !relocation) { + xfs_iunlock(rip, XFS_ILOCK_EXCL); + return XFS_ERROR(EBUSY); + } + + /* + * Release dquot that rootinode, rbmino and rsumino might be holding, + * flush and purge the quota inodes. + */ + error = XFS_QM_UNMOUNT(mp); + if (error == EFSCORRUPTED) + goto fscorrupt_out2; + + if (rbmip) { + VN_RELE(XFS_ITOV(rbmip)); + VN_RELE(XFS_ITOV(rsumip)); + } + + xfs_iunlock(rip, XFS_ILOCK_EXCL); + return 0; + +fscorrupt_out: + xfs_ifunlock(rip); + +fscorrupt_out2: + xfs_iunlock(rip, XFS_ILOCK_EXCL); + + return XFS_ERROR(EFSCORRUPTED); +} + +/* + * xfs_root extracts the root vnode from a vfs. + * + * vfsp -- the vfs struct for the desired file system + * vpp -- address of the caller's vnode pointer which should be + * set to the desired fs root vnode + */ +STATIC int +xfs_root( + bhv_desc_t *bdp, + vnode_t **vpp) +{ + vnode_t *vp; + + vp = XFS_ITOV((XFS_BHVTOM(bdp))->m_rootip); + VN_HOLD(vp); + *vpp = vp; + return 0; +} + +/* + * xfs_statvfs + * + * Fill in the statvfs structure for the given file system. We use + * the superblock lock in the mount structure to ensure a consistent + * snapshot of the counters returned. + */ +STATIC int +xfs_statvfs( + bhv_desc_t *bdp, + struct statfs *statp, + vnode_t *vp) +{ + __uint64_t fakeinos; + xfs_extlen_t lsize; + xfs_mount_t *mp; + xfs_sb_t *sbp; + unsigned long s; + + mp = XFS_BHVTOM(bdp); + sbp = &(mp->m_sb); + + statp->f_type = XFS_SB_MAGIC; + + s = XFS_SB_LOCK(mp); + statp->f_bsize = sbp->sb_blocksize; + lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; + statp->f_blocks = sbp->sb_dblocks - lsize; + statp->f_bfree = statp->f_bavail = sbp->sb_fdblocks; + fakeinos = statp->f_bfree << sbp->sb_inopblog; +#if XFS_BIG_FILESYSTEMS + fakeinos += mp->m_inoadd; +#endif + statp->f_files = + MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); + if (mp->m_maxicount) +#if XFS_BIG_FILESYSTEMS + if (!mp->m_inoadd) +#endif + statp->f_files = + MIN(statp->f_files, (long)mp->m_maxicount); + statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); + XFS_SB_UNLOCK(mp, s); + + statp->f_fsid.val[0] = mp->m_dev; + statp->f_fsid.val[1] = 0; + statp->f_namelen = MAXNAMELEN - 1; + + return 0; +} + + +/* + * xfs_sync flushes any pending I/O to file system vfsp. + * + * This routine is called by vfs_sync() to make sure that things make it + * out to disk eventually, on sync() system calls to flush out everything, + * and when the file system is unmounted. For the vfs_sync() case, all + * we really need to do is sync out the log to make all of our meta-data + * updates permanent (except for timestamps). For calls from pflushd(), + * dirty pages are kept moving by calling pdflush() on the inodes + * containing them. We also flush the inodes that we can lock without + * sleeping and the superblock if we can lock it without sleeping from + * vfs_sync() so that items at the tail of the log are always moving out. + * + * Flags: + * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want + * to sleep if we can help it. All we really need + * to do is ensure that the log is synced at least + * periodically. We also push the inodes and + * superblock if we can lock them without sleeping + * and they are not pinned. + * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not + * set, then we really want to lock each inode and flush + * it. + * SYNC_WAIT - All the flushes that take place in this call should + * be synchronous. + * SYNC_DELWRI - This tells us to push dirty pages associated with + * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to + * determine if they should be flushed sync, async, or + * delwri. + * SYNC_CLOSE - This flag is passed when the system is being + * unmounted. We should sync and invalidate everthing. + * SYNC_FSDATA - This indicates that the caller would like to make + * sure the superblock is safe on disk. We can ensure + * this by simply makeing sure the log gets flushed + * if SYNC_BDFLUSH is set, and by actually writing it + * out otherwise. + * + */ +/*ARGSUSED*/ +STATIC int +xfs_sync( + bhv_desc_t *bdp, + int flags, + cred_t *credp) +{ + xfs_mount_t *mp; + + mp = XFS_BHVTOM(bdp); + return (xfs_syncsub(mp, flags, 0, NULL)); +} + +/* + * xfs sync routine for internal use + * + * This routine supports all of the flags defined for the generic VFS_SYNC + * interface as explained above under xfs_sync. In the interests of not + * changing interfaces within the 6.5 family, additional internallly- + * required functions are specified within a separate xflags parameter, + * only available by calling this routine. + * + */ +STATIC int +xfs_sync_inodes( + xfs_mount_t *mp, + int flags, + int xflags, + int *bypassed) +{ + xfs_inode_t *ip = NULL; + xfs_inode_t *ip_next; + xfs_buf_t *bp; + vnode_t *vp = NULL; + vmap_t vmap; + int error; + int last_error; + uint64_t fflag; + uint lock_flags; + uint base_lock_flags; + boolean_t mount_locked; + boolean_t vnode_refed; + int preempt; + xfs_dinode_t *dip; + xfs_iptr_t *ipointer; +#ifdef DEBUG + boolean_t ipointer_in = B_FALSE; + +#define IPOINTER_SET ipointer_in = B_TRUE +#define IPOINTER_CLR ipointer_in = B_FALSE +#else +#define IPOINTER_SET +#define IPOINTER_CLR +#endif + + +/* Insert a marker record into the inode list after inode ip. The list + * must be locked when this is called. After the call the list will no + * longer be locked. + */ +#define IPOINTER_INSERT(ip, mp) { \ + ASSERT(ipointer_in == B_FALSE); \ + ipointer->ip_mnext = ip->i_mnext; \ + ipointer->ip_mprev = ip; \ + ip->i_mnext = (xfs_inode_t *)ipointer; \ + ipointer->ip_mnext->i_mprev = (xfs_inode_t *)ipointer; \ + preempt = 0; \ + XFS_MOUNT_IUNLOCK(mp); \ + mount_locked = B_FALSE; \ + IPOINTER_SET; \ + } + +/* Remove the marker from the inode list. If the marker was the only item + * in the list then there are no remaining inodes and we should zero out + * the whole list. If we are the current head of the list then move the head + * past us. + */ +#define IPOINTER_REMOVE(ip, mp) { \ + ASSERT(ipointer_in == B_TRUE); \ + if (ipointer->ip_mnext != (xfs_inode_t *)ipointer) { \ + ip = ipointer->ip_mnext; \ + ip->i_mprev = ipointer->ip_mprev; \ + ipointer->ip_mprev->i_mnext = ip; \ + if (mp->m_inodes == (xfs_inode_t *)ipointer) { \ + mp->m_inodes = ip; \ + } \ + } else { \ + ASSERT(mp->m_inodes == (xfs_inode_t *)ipointer); \ + mp->m_inodes = NULL; \ + ip = NULL; \ + } \ + IPOINTER_CLR; \ + } + +#define XFS_PREEMPT_MASK 0x7f + + if (bypassed) + *bypassed = 0; + if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) + return 0; + error = 0; + last_error = 0; + preempt = 0; + + /* Allocate a reference marker */ + ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); + + fflag = XFS_B_ASYNC; /* default is don't wait */ + if (flags & SYNC_BDFLUSH) + fflag = XFS_B_DELWRI; + if (flags & SYNC_WAIT) + fflag = 0; /* synchronous overrides all */ + + base_lock_flags = XFS_ILOCK_SHARED; + if (flags & (SYNC_DELWRI | SYNC_CLOSE)) { + /* + * We need the I/O lock if we're going to call any of + * the flush/inval routines. + */ + base_lock_flags |= XFS_IOLOCK_SHARED; + } + + XFS_MOUNT_ILOCK(mp); + + ip = mp->m_inodes; + + mount_locked = B_TRUE; + vnode_refed = B_FALSE; + + IPOINTER_CLR; + + do { + ASSERT(ipointer_in == B_FALSE); + ASSERT(vnode_refed == B_FALSE); + + lock_flags = base_lock_flags; + + /* + * There were no inodes in the list, just break out + * of the loop. + */ + if (ip == NULL) { + break; + } + + /* + * We found another sync thread marker - skip it + */ + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + + vp = XFS_ITOV_NULL(ip); + + /* + * If the vnode is gone then this is being torn down, + * call reclaim if it is flushed, else let regular flush + * code deal with it later in the loop. + */ + + if (vp == NULL) { + /* Skip ones already in reclaim */ + if (ip->i_flags & XFS_IRECLAIM) { + ip = ip->i_mnext; + continue; + } + if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { + ip = ip->i_mnext; + } else if ((xfs_ipincount(ip) == 0) && + xfs_iflock_nowait(ip)) { + IPOINTER_INSERT(ip, mp); + + xfs_finish_reclaim(ip, 1, + XFS_IFLUSH_DELWRI_ELSE_ASYNC); + + XFS_MOUNT_ILOCK(mp); + mount_locked = B_TRUE; + IPOINTER_REMOVE(ip, mp); + } else { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + ip = ip->i_mnext; + } + continue; + } + + if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { + XFS_MOUNT_IUNLOCK(mp); + kmem_free(ipointer, sizeof(xfs_iptr_t)); + return 0; + } + + /* + * If this is just vfs_sync() or pflushd() calling + * then we can skip inodes for which it looks like + * there is nothing to do. Since we don't have the + * inode locked this is racey, but these are periodic + * calls so it doesn't matter. For the others we want + * to know for sure, so we at least try to lock them. + */ + if (flags & SYNC_BDFLUSH) { + if (((ip->i_itemp == NULL) || + !(ip->i_itemp->ili_format.ilf_fields & + XFS_ILOG_ALL)) && + (ip->i_update_core == 0)) { + ip = ip->i_mnext; + continue; + } + } + + /* + * Try to lock without sleeping. We're out of order with + * the inode list lock here, so if we fail we need to drop + * the mount lock and try again. If we're called from + * bdflush() here, then don't bother. + * + * The inode lock here actually coordinates with the + * almost spurious inode lock in xfs_ireclaim() to prevent + * the vnode we handle here without a reference from + * being freed while we reference it. If we lock the inode + * while it's on the mount list here, then the spurious inode + * lock in xfs_ireclaim() after the inode is pulled from + * the mount list will sleep until we release it here. + * This keeps the vnode from being freed while we reference + * it. It is also cheaper and simpler than actually doing + * a vn_get() for every inode we touch here. + */ + if (xfs_ilock_nowait(ip, lock_flags) == 0) { + + if ((flags & SYNC_BDFLUSH) || (vp == NULL)) { + ip = ip->i_mnext; + continue; + } + + /* + * We need to unlock the inode list lock in order + * to lock the inode. Insert a marker record into + * the inode list to remember our position, dropping + * the lock is now done inside the IPOINTER_INSERT + * macro. + * + * We also use the inode list lock to protect us + * in taking a snapshot of the vnode version number + * for use in calling vn_get(). + */ + VMAP(vp, vmap); + IPOINTER_INSERT(ip, mp); + + vp = vn_get(vp, &vmap); + if (vp == NULL) { + /* + * The vnode was reclaimed once we let go + * of the inode list lock. Skip to the + * next list entry. Remove the marker. + */ + + XFS_MOUNT_ILOCK(mp); + + mount_locked = B_TRUE; + vnode_refed = B_FALSE; + + IPOINTER_REMOVE(ip, mp); + + continue; + } + + xfs_ilock(ip, lock_flags); + + ASSERT(vp == XFS_ITOV(ip)); + ASSERT(ip->i_mount == mp); + + vnode_refed = B_TRUE; + } + + /* From here on in the loop we may have a marker record + * in the inode list. + */ + + if ((flags & SYNC_CLOSE) && (vp != NULL)) { + /* + * This is the shutdown case. We just need to + * flush and invalidate all the pages associated + * with the inode. Drop the inode lock since + * we can't hold it across calls to the buffer + * cache. + * + * We don't set the VREMAPPING bit in the vnode + * here, because we don't hold the vnode lock + * exclusively. It doesn't really matter, though, + * because we only come here when we're shutting + * down anyway. + */ + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + if (XFS_FORCED_SHUTDOWN(mp)) { + VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF); + } else { + VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_REMAPF); + } + + xfs_ilock(ip, XFS_ILOCK_SHARED); + + } else if ((flags & SYNC_DELWRI) && (vp != NULL)) { + if (VN_DIRTY(vp)) { + /* We need to have dropped the lock here, + * so insert a marker if we have not already + * done so. + */ + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + + /* + * Drop the inode lock since we can't hold it + * across calls to the buffer cache. + */ + xfs_iunlock(ip, XFS_ILOCK_SHARED); + VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, + fflag, FI_NONE, error); + xfs_ilock(ip, XFS_ILOCK_SHARED); + } + + } + + if (flags & SYNC_BDFLUSH) { + if ((flags & SYNC_ATTR) && + ((ip->i_update_core) || + ((ip->i_itemp != NULL) && + (ip->i_itemp->ili_format.ilf_fields != 0)))) { + + /* Insert marker and drop lock if not already + * done. + */ + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + + /* + * We don't want the periodic flushing of the + * inodes by vfs_sync() to interfere with + * I/O to the file, especially read I/O + * where it is only the access time stamp + * that is being flushed out. To prevent + * long periods where we have both inode + * locks held shared here while reading the + * inode's buffer in from disk, we drop the + * inode lock while reading in the inode + * buffer. We have to release the buffer + * and reacquire the inode lock so that they + * are acquired in the proper order (inode + * locks first). The buffer will go at the + * end of the lru chain, though, so we can + * expect it to still be there when we go + * for it again in xfs_iflush(). + */ + if ((xfs_ipincount(ip) == 0) && + xfs_iflock_nowait(ip)) { + + xfs_ifunlock(ip); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + error = xfs_itobp(mp, NULL, ip, + &dip, &bp, 0); + if (!error) { + xfs_buf_relse(bp); + } else { + /* Bailing out, remove the + * marker and free it. + */ + XFS_MOUNT_ILOCK(mp); + + IPOINTER_REMOVE(ip, mp); + + XFS_MOUNT_IUNLOCK(mp); + + ASSERT(!(lock_flags & + XFS_IOLOCK_SHARED)); + + kmem_free(ipointer, + sizeof(xfs_iptr_t)); + return (0); + } + + /* + * Since we dropped the inode lock, + * the inode may have been reclaimed. + * Therefore, we reacquire the mount + * lock and check to see if we were the + * inode reclaimed. If this happened + * then the ipointer marker will no + * longer point back at us. In this + * case, move ip along to the inode + * after the marker, remove the marker + * and continue. + */ + XFS_MOUNT_ILOCK(mp); + mount_locked = B_TRUE; + + if (ip != ipointer->ip_mprev) { + IPOINTER_REMOVE(ip, mp); + + ASSERT(!vnode_refed); + ASSERT(!(lock_flags & + XFS_IOLOCK_SHARED)); + continue; + } + + ASSERT(ip->i_mount == mp); + + if (xfs_ilock_nowait(ip, + XFS_ILOCK_SHARED) == 0) { + ASSERT(ip->i_mount == mp); + /* + * We failed to reacquire + * the inode lock without + * sleeping, so just skip + * the inode for now. We + * clear the ILOCK bit from + * the lock_flags so that we + * won't try to drop a lock + * we don't hold below. + */ + lock_flags &= ~XFS_ILOCK_SHARED; + IPOINTER_REMOVE(ip_next, mp); + } else if ((xfs_ipincount(ip) == 0) && + xfs_iflock_nowait(ip)) { + ASSERT(ip->i_mount == mp); + /* + * Since this is vfs_sync() + * calling we only flush the + * inode out if we can lock + * it without sleeping and + * it is not pinned. Drop + * the mount lock here so + * that we don't hold it for + * too long. We already have + * a marker in the list here. + */ + XFS_MOUNT_IUNLOCK(mp); + mount_locked = B_FALSE; + error = xfs_iflush(ip, + XFS_IFLUSH_DELWRI); + } else { + ASSERT(ip->i_mount == mp); + IPOINTER_REMOVE(ip_next, mp); + } + } + + } + + } else { + if ((flags & SYNC_ATTR) && + ((ip->i_update_core) || + ((ip->i_itemp != NULL) && + (ip->i_itemp->ili_format.ilf_fields != 0)))) { + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + + if (flags & SYNC_WAIT) { + xfs_iflock(ip); + error = xfs_iflush(ip, + XFS_IFLUSH_SYNC); + } else { + /* + * If we can't acquire the flush + * lock, then the inode is already + * being flushed so don't bother + * waiting. If we can lock it then + * do a delwri flush so we can + * combine multiple inode flushes + * in each disk write. + */ + if (xfs_iflock_nowait(ip)) { + error = xfs_iflush(ip, + XFS_IFLUSH_DELWRI); + } + else if (bypassed) + (*bypassed)++; + } + } + } + + if (lock_flags != 0) { + xfs_iunlock(ip, lock_flags); + } + + if (vnode_refed) { + /* + * If we had to take a reference on the vnode + * above, then wait until after we've unlocked + * the inode to release the reference. This is + * because we can be already holding the inode + * lock when VN_RELE() calls xfs_inactive(). + * + * Make sure to drop the mount lock before calling + * VN_RELE() so that we don't trip over ourselves if + * we have to go for the mount lock again in the + * inactive code. + */ + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + + VN_RELE(vp); + + vnode_refed = B_FALSE; + } + + if (error) { + last_error = error; + } + + /* + * bail out if the filesystem is corrupted. + */ + if (error == EFSCORRUPTED) { + if (!mount_locked) { + XFS_MOUNT_ILOCK(mp); + IPOINTER_REMOVE(ip, mp); + } + XFS_MOUNT_IUNLOCK(mp); + ASSERT(ipointer_in == B_FALSE); + kmem_free(ipointer, sizeof(xfs_iptr_t)); + return XFS_ERROR(error); + } + + /* Let other threads have a chance at the mount lock + * if we have looped many times without dropping the + * lock. + */ + if ((++preempt & XFS_PREEMPT_MASK) == 0) { + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + } + + if (mount_locked == B_FALSE) { + XFS_MOUNT_ILOCK(mp); + mount_locked = B_TRUE; + IPOINTER_REMOVE(ip, mp); + continue; + } + + ASSERT(ipointer_in == B_FALSE); + ip = ip->i_mnext; + + } while (ip != mp->m_inodes); + + XFS_MOUNT_IUNLOCK(mp); + + ASSERT(ipointer_in == B_FALSE); + + kmem_free(ipointer, sizeof(xfs_iptr_t)); + return XFS_ERROR(last_error); +} + +/* + * xfs sync routine for internal use + * + * This routine supports all of the flags defined for the generic VFS_SYNC + * interface as explained above under xfs_sync. In the interests of not + * changing interfaces within the 6.5 family, additional internallly- + * required functions are specified within a separate xflags parameter, + * only available by calling this routine. + * + */ +int +xfs_syncsub( + xfs_mount_t *mp, + int flags, + int xflags, + int *bypassed) +{ + int error = 0; + int last_error = 0; + uint log_flags = XFS_LOG_FORCE; + xfs_buf_t *bp; + xfs_buf_log_item_t *bip; + + /* + * Sync out the log. This ensures that the log is periodically + * flushed even if there is not enough activity to fill it up. + */ + if (flags & SYNC_WAIT) + log_flags |= XFS_LOG_SYNC; + + xfs_log_force(mp, (xfs_lsn_t)0, log_flags); + + if (flags & (SYNC_ATTR|SYNC_DELWRI)) { + if (flags & SYNC_BDFLUSH) + xfs_finish_reclaim_all(mp, 1); + else + error = xfs_sync_inodes(mp, flags, xflags, bypassed); + } + + /* + * Flushing out dirty data above probably generated more + * log activity, so if this isn't vfs_sync() then flush + * the log again. + */ + if (flags & SYNC_DELWRI) { + xfs_log_force(mp, (xfs_lsn_t)0, log_flags); + } + + if (flags & SYNC_FSDATA) { + /* + * If this is vfs_sync() then only sync the superblock + * if we can lock it without sleeping and it is not pinned. + */ + if (flags & SYNC_BDFLUSH) { + bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); + if (bp != NULL) { + bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); + if ((bip != NULL) && + xfs_buf_item_dirty(bip)) { + if (!(XFS_BUF_ISPINNED(bp))) { + XFS_BUF_ASYNC(bp); + error = xfs_bwrite(mp, bp); + } else { + xfs_buf_relse(bp); + } + } else { + xfs_buf_relse(bp); + } + } + } else { + bp = xfs_getsb(mp, 0); + /* + * If the buffer is pinned then push on the log so + * we won't get stuck waiting in the write for + * someone, maybe ourselves, to flush the log. + * Even though we just pushed the log above, we + * did not have the superblock buffer locked at + * that point so it can become pinned in between + * there and here. + */ + if (XFS_BUF_ISPINNED(bp)) + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + if (!(flags & SYNC_WAIT)) + XFS_BUF_BFLAGS(bp) |= XFS_B_ASYNC; + error = xfs_bwrite(mp, bp); + } + if (error) { + last_error = error; + } + } + + /* + * If this is the periodic sync, then kick some entries out of + * the reference cache. This ensures that idle entries are + * eventually kicked out of the cache. + */ + if (flags & SYNC_REFCACHE) { + xfs_refcache_purge_some(mp); + } + + /* + * Now check to see if the log needs a "dummy" transaction. + */ + + if (xfs_log_need_covered(mp)) { + xfs_trans_t *tp; + xfs_inode_t *ip; + + /* + * Put a dummy transaction in the log to tell + * recovery that all others are OK. + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); + if ((error = xfs_trans_reserve(tp, 0, + XFS_ICHANGE_LOG_RES(mp), + 0, 0, 0))) { + xfs_trans_cancel(tp, 0); + return error; + } + + ip = mp->m_rootip; + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + error = xfs_trans_commit(tp, 0, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + } + + /* + * When shutting down, we need to insure that the AIL is pushed + * to disk or the filesystem can appear corrupt from the PROM. + */ + if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) { + XFS_bflush(mp->m_ddev_targp); + if (mp->m_rtdev_targp) { + XFS_bflush(mp->m_rtdev_targp); + } + } + + return XFS_ERROR(last_error); +} + +/* + * xfs_vget - called by DMAPI to get vnode from file handle + */ +STATIC int +xfs_vget( + bhv_desc_t *bdp, + vnode_t **vpp, + fid_t *fidp) +{ + xfs_fid_t *xfid; + xfs_inode_t *ip; + int error; + xfs_ino_t ino; + unsigned int igen; + xfs_mount_t *mp; + + xfid = (struct xfs_fid *)fidp; + if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) { + ino = xfid->xfs_fid_ino; + igen = xfid->xfs_fid_gen; + } else { + /* + * Invalid. Since handles can be created in user space + * and passed in via gethandle(), this is not cause for + * a panic. + */ + return XFS_ERROR(EINVAL); + } + mp = XFS_BHVTOM(bdp); + error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0); + if (error) { + *vpp = NULL; + return error; + } + if (ip == NULL) { + *vpp = NULL; + return XFS_ERROR(EIO); + } + + if (ip->i_d.di_mode == 0 || (igen && (ip->i_d.di_gen != igen))) { + xfs_iput_new(ip, XFS_ILOCK_SHARED); + *vpp = NULL; + return XFS_ERROR(ENOENT); + } + + *vpp = XFS_ITOV(ip); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; +} + + +#define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ +#define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ +#define MNTOPT_LOGDEV "logdev" /* log device */ +#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ +#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ +#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ +#define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */ +#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ +#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ +#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ +#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ +#define MNTOPT_MTPT "mtpt" /* filesystem mount point */ +#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ +#define MNTOPT_NOLOGFLUSH "nologflush" /* don't hard flush on log writes */ +#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ + + +int +xfs_parseargs( + struct bhv_desc *bhv, + char *options, + struct xfs_mount_args *args, + int update) +{ + struct vfs *vfsp = bhvtovfs(bhv); + char *this_char, *value, *eov; + int dsunit, dswidth, vol_dsunit, vol_dswidth; + int iosize; + + if (!options) + return 0; + + iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0; + + while ((this_char = strsep(&options, ",")) != NULL) { + if (!*this_char) + continue; + if ((value = strchr(this_char, '=')) != NULL) + *value++ = 0; + + if (!strcmp(this_char, MNTOPT_LOGBUFS)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_LOGBUFS); + return -EINVAL; + } + args->logbufs = simple_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { + int last, in_kilobytes = 0; + + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_LOGBSIZE); + return -EINVAL; + } + last = strlen(value) - 1; + if (value[last] == 'K' || value[last] == 'k') { + in_kilobytes = 1; + value[last] = '\0'; + } + args->logbufsize = simple_strtoul(value, &eov, 10); + if (in_kilobytes) + args->logbufsize <<= 10; + } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_LOGDEV); + return -EINVAL; + } + strncpy(args->logname, value, MAXNAMELEN); + } else if (!strcmp(this_char, MNTOPT_MTPT)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_MTPT); + return -EINVAL; + } + strncpy(args->mtpt, value, MAXNAMELEN); + } else if (!strcmp(this_char, MNTOPT_RTDEV)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_RTDEV); + return -EINVAL; + } + strncpy(args->rtname, value, MAXNAMELEN); + } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_BIOSIZE); + return -EINVAL; + } + iosize = simple_strtoul(value, &eov, 10); + args->flags |= XFSMNT_IOSIZE; + args->iosizelog = (uint8_t) iosize; + } else if (!strcmp(this_char, MNTOPT_WSYNC)) { + args->flags |= XFSMNT_WSYNC; + } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { + args->flags |= XFSMNT_OSYNCISOSYNC; + } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { + args->flags |= XFSMNT_NORECOVERY; + } else if (!strcmp(this_char, MNTOPT_INO64)) { + args->flags |= XFSMNT_INO64; +#ifndef XFS_BIG_FILESYSTEMS + printk("XFS: %s option not allowed on this system\n", + MNTOPT_INO64); + return -EINVAL; +#endif + } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { + args->flags |= XFSMNT_NOALIGN; + } else if (!strcmp(this_char, MNTOPT_SUNIT)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_SUNIT); + return -EINVAL; + } + dsunit = simple_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_SWIDTH); + return -EINVAL; + } + dswidth = simple_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_NOUUID)) { + args->flags |= XFSMNT_NOUUID; + } else if (!strcmp(this_char, MNTOPT_NOLOGFLUSH)) { + args->flags |= XFSMNT_NOLOGFLUSH; + } else if (!strcmp(this_char, "osyncisdsync")) { + /* no-op, this is now the default */ +printk("XFS: osyncisdsync is now the default, option is deprecated.\n"); + } else if (!strcmp(this_char, "irixsgid")) { +printk("XFS: irixsgid is now a sysctl(2) variable, option is deprecated.\n"); + } else { + printk("XFS: unknown mount option [%s].\n", this_char); + return -EINVAL; + } + } + + if (args->flags & XFSMNT_NORECOVERY) { + if ((vfsp->vfs_flag & VFS_RDONLY) == 0) { + printk("XFS: no-recovery mounts must be read-only.\n"); + return -EINVAL; + } + } + + if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) { + printk( + "XFS: sunit and swidth options incompatible with the noalign option\n"); + return -EINVAL; + } + + if ((dsunit && !dswidth) || (!dsunit && dswidth)) { + printk("XFS: sunit and swidth must be specified together\n"); + return -EINVAL; + } + + if (dsunit && (dswidth % dsunit != 0)) { + printk( + "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)\n", + dswidth, dsunit); + return -EINVAL; + } + + if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { + if (dsunit) { + args->sunit = dsunit; + args->flags |= XFSMNT_RETERR; + } else { + args->sunit = vol_dsunit; + } + dswidth ? (args->swidth = dswidth) : + (args->swidth = vol_dswidth); + } else { + args->sunit = args->swidth = 0; + } + + return 0; +} + +int +xfs_showargs( + struct bhv_desc *bhv, + struct seq_file *m) +{ + static struct proc_xfs_info { + int flag; + char *str; + } xfs_info[] = { + /* the few simple ones we can get from the mount struct */ + { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, + { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, + { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, + { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, + { 0, NULL } + }; + struct proc_xfs_info *xfs_infop; + struct xfs_mount *mp = XFS_BHVTOM(bhv); + + for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) { + if (mp->m_flags & xfs_infop->flag) + seq_puts(m, xfs_infop->str); + } + + if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) + seq_printf(m, "," MNTOPT_BIOSIZE "=%d", mp->m_writeio_log); + + if (mp->m_logbufs > 0) + seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); + + if (mp->m_logbsize > 0) + seq_printf(m, "," MNTOPT_LOGBSIZE "=%d", mp->m_logbsize); + + if (mp->m_ddev_targp->pbr_dev != mp->m_logdev_targp->pbr_dev) + seq_printf(m, "," MNTOPT_LOGDEV "=%s", + bdevname(mp->m_logdev_targp->pbr_dev)); + + if (mp->m_rtdev_targp && + mp->m_ddev_targp->pbr_dev != mp->m_rtdev_targp->pbr_dev) + seq_printf(m, "," MNTOPT_RTDEV "=%s", + bdevname(mp->m_rtdev_targp->pbr_dev)); + + if (mp->m_dalign > 0) + seq_printf(m, "," MNTOPT_SUNIT "=%d", + (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); + + if (mp->m_swidth > 0) + seq_printf(m, "," MNTOPT_SWIDTH "=%d", + (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); + + return 0; +} + + +vfsops_t xfs_vfsops = { + BHV_IDENTITY_INIT(VFS_BHV_XFS,VFS_POSITION_XFS), + .vfs_parseargs = xfs_parseargs, + .vfs_showargs = xfs_showargs, + .vfs_mount = xfs_mount, + .vfs_unmount = xfs_unmount, + .vfs_mntupdate = xfs_mntupdate, + .vfs_root = xfs_root, + .vfs_statvfs = xfs_statvfs, + .vfs_sync = xfs_sync, + .vfs_vget = xfs_vget, + .vfs_dmapiops = (vfs_dmapiops_t)fs_nosys, + .vfs_quotactl = (vfs_quotactl_t)fs_nosys, + .vfs_get_inode = xfs_get_inode, + .vfs_init_vnode = xfs_initialize_vnode, + .vfs_force_shutdown = xfs_do_force_shutdown, +}; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/fs/xfs/xfs_vnodeops.c linux.21rc5-ac2/fs/xfs/xfs_vnodeops.c --- linux.21rc5/fs/xfs/xfs_vnodeops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/fs/xfs/xfs_vnodeops.c 2003-05-07 15:41:10.000000000 +0100 @@ -0,0 +1,4789 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_itable.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_rw.h" +#include "xfs_error.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_quota.h" +#include "xfs_utils.h" +#include "xfs_trans_space.h" +#include "xfs_dir_leaf.h" +#include "xfs_dmapi.h" +#include "xfs_mac.h" +#include "xfs_log_priv.h" + + +/* + * The maximum pathlen is 1024 bytes. Since the minimum file system + * blocksize is 512 bytes, we can get a max of 2 extents back from + * bmapi. + */ +#define SYMLINK_MAPS 2 + +extern int xfs_ioctl(bhv_desc_t *, struct inode *, struct file *, + unsigned int, unsigned long); + + +/* + * For xfs, we check that the file isn't too big to be opened by this kernel. + * No other open action is required for regular files. Devices are handled + * through the specfs file system, pipes through fifofs. Device and + * fifo vnodes are "wrapped" by specfs and fifofs vnodes, respectively, + * when a new vnode is first looked up or created. + */ +STATIC int +xfs_open( + bhv_desc_t *bdp, + cred_t *credp) +{ + int mode; + vnode_t *vp; + xfs_inode_t *ip; + + vp = BHV_TO_VNODE(bdp); + ip = XFS_BHVTOI(bdp); + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return XFS_ERROR(EIO); + + /* + * If it's a directory with any blocks, read-ahead block 0 + * as we're almost certain to have the next operation be a read there. + */ + if (vp->v_type == VDIR && ip->i_d.di_nextents > 0) { + mode = xfs_ilock_map_shared(ip); + if (ip->i_d.di_nextents > 0) + (void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); + xfs_iunlock(ip, mode); + } + return 0; +} + + +/* + * xfs_getattr + */ +STATIC int +xfs_getattr( + bhv_desc_t *bdp, + vattr_t *vap, + int flags, + cred_t *credp) +{ + xfs_inode_t *ip; + xfs_mount_t *mp; + vnode_t *vp; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + if (!(flags & ATTR_LAZY)) + xfs_ilock(ip, XFS_ILOCK_SHARED); + + vap->va_size = ip->i_d.di_size; + if (vap->va_mask == XFS_AT_SIZE) { + if (!(flags & ATTR_LAZY)) + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; + } + vap->va_nblocks = + XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); + vap->va_fsid = mp->m_dev; +#if XFS_BIG_FILESYSTEMS + vap->va_nodeid = ip->i_ino + mp->m_inoadd; +#else + vap->va_nodeid = ip->i_ino; +#endif + vap->va_nlink = ip->i_d.di_nlink; + + /* + * Quick exit for non-stat callers + */ + if ((vap->va_mask & + ~(XFS_AT_SIZE|XFS_AT_FSID|XFS_AT_NODEID| + XFS_AT_NLINK|XFS_AT_BLKSIZE)) == 0) { + if (!(flags & ATTR_LAZY)) + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; + } + + /* + * Copy from in-core inode. + */ + vap->va_type = vp->v_type; + vap->va_mode = ip->i_d.di_mode & MODEMASK; + vap->va_uid = ip->i_d.di_uid; + vap->va_gid = ip->i_d.di_gid; + vap->va_projid = ip->i_d.di_projid; + + /* + * Check vnode type block/char vs. everything else. + * Do it with bitmask because that's faster than looking + * for multiple values individually. + */ + if (((1 << vp->v_type) & ((1<va_rdev = 0; + + if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { + +#if 0 + /* Large block sizes confuse various + * user space programs, so letting the + * stripe size through is not a good + * idea for now. + */ + vap->va_blksize = mp->m_swidth ? + /* + * If the underlying volume is a stripe, then + * return the stripe width in bytes as the + * recommended I/O size. + */ + (mp->m_swidth << mp->m_sb.sb_blocklog) : + /* + * Return the largest of the preferred buffer + * sizes since doing small I/Os into larger + * buffers causes buffers to be decommissioned. + * The value returned is in bytes. + */ + (1 << (int)MAX(mp->m_readio_log, + mp->m_writeio_log)); + +#else + vap->va_blksize = + /* + * Return the largest of the preferred buffer + * sizes since doing small I/Os into larger + * buffers causes buffers to be decommissioned. + * The value returned is in bytes. + */ + 1 << (int)MAX(mp->m_readio_log, + mp->m_writeio_log); +#endif + } else { + + /* + * If the file blocks are being allocated from a + * realtime partition, then return the inode's + * realtime extent size or the realtime volume's + * extent size. + */ + vap->va_blksize = ip->i_d.di_extsize ? + (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) : + (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog); + } + } else { + vap->va_rdev = ip->i_df.if_u2.if_rdev; + vap->va_blksize = BLKDEV_IOSIZE; + } + + vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec; + vap->va_atime.tv_nsec = ip->i_d.di_atime.t_nsec; + vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec; + vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; + vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec; + vap->va_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; + + /* + * Exit for stat callers. See if any of the rest of the fields + * to be filled in are needed. + */ + if ((vap->va_mask & + (XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS| + XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0) { + if (!(flags & ATTR_LAZY)) + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; + } + /* + * convert di_flags to xflags + */ + vap->va_xflags = + ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? + XFS_XFLAG_REALTIME : 0) | + ((ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) ? + XFS_XFLAG_PREALLOC : 0) | + (XFS_IFORK_Q(ip) ? + XFS_XFLAG_HASATTR : 0); + vap->va_extsize = ip->i_d.di_extsize << mp->m_sb.sb_blocklog; + vap->va_nextents = + (ip->i_df.if_flags & XFS_IFEXTENTS) ? + ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) : + ip->i_d.di_nextents; + if (ip->i_afp != NULL) + vap->va_anextents = + (ip->i_afp->if_flags & XFS_IFEXTENTS) ? + ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) : + ip->i_d.di_anextents; + else + vap->va_anextents = 0; + vap->va_gencount = ip->i_d.di_gen; + vap->va_vcode = 0L; + + if (!(flags & ATTR_LAZY)) + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; +} + + +/* + * xfs_setattr + */ +STATIC int +xfs_setattr( + bhv_desc_t *bdp, + vattr_t *vap, + int flags, + cred_t *credp) +{ + xfs_inode_t *ip; + xfs_trans_t *tp; + xfs_mount_t *mp; + int mask; + int code; + uint lock_flags; + uint commit_flags=0; + uid_t uid=0, iuid=0; + gid_t gid=0, igid=0; + int timeflags = 0; + vnode_t *vp; + xfs_prid_t projid=0, iprojid=0; + int mandlock_before, mandlock_after; + struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; + int file_owner; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + /* + * Cannot set certain attributes. + */ + mask = vap->va_mask; + if (mask & XFS_AT_NOSET) { + return XFS_ERROR(EINVAL); + } + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + /* + * Timestamps do not need to be logged and hence do not + * need to be done within a transaction. + */ + if (mask & XFS_AT_UPDTIMES) { + ASSERT((mask & ~XFS_AT_UPDTIMES) == 0); + timeflags = ((mask & XFS_AT_UPDATIME) ? XFS_ICHGTIME_ACC : 0) | + ((mask & XFS_AT_UPDCTIME) ? XFS_ICHGTIME_CHG : 0) | + ((mask & XFS_AT_UPDMTIME) ? XFS_ICHGTIME_MOD : 0); + xfs_ichgtime(ip, timeflags); + return 0; + } + + olddquot1 = olddquot2 = NULL; + udqp = gdqp = NULL; + + /* + * If disk quotas is on, we make sure that the dquots do exist on disk, + * before we start any other transactions. Trying to do this later + * is messy. We don't care to take a readlock to look at the ids + * in inode here, because we can't hold it across the trans_reserve. + * If the IDs do change before we take the ilock, we're covered + * because the i_*dquot fields will get updated anyway. + */ + if (XFS_IS_QUOTA_ON(mp) && (mask & (XFS_AT_UID|XFS_AT_GID))) { + uint qflags = 0; + + if (mask & XFS_AT_UID) { + uid = vap->va_uid; + qflags |= XFS_QMOPT_UQUOTA; + } else { + uid = ip->i_d.di_uid; + } + if (mask & XFS_AT_GID) { + gid = vap->va_gid; + qflags |= XFS_QMOPT_GQUOTA; + } else { + gid = ip->i_d.di_gid; + } + /* + * We take a reference when we initialize udqp and gdqp, + * so it is important that we never blindly double trip on + * the same variable. See xfs_create() for an example. + */ + ASSERT(udqp == NULL); + ASSERT(gdqp == NULL); + code = XFS_QM_DQVOPALLOC(mp, ip, uid,gid, qflags, &udqp, &gdqp); + if (code) + return (code); + } + + /* + * For the other attributes, we acquire the inode lock and + * first do an error checking pass. + */ + tp = NULL; + lock_flags = XFS_ILOCK_EXCL; + if (!(mask & XFS_AT_SIZE)) { + if ((mask != (XFS_AT_CTIME|XFS_AT_ATIME|XFS_AT_MTIME)) || + (mp->m_flags & XFS_MOUNT_WSYNC)) { + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); + commit_flags = 0; + if ((code = xfs_trans_reserve(tp, 0, + XFS_ICHANGE_LOG_RES(mp), 0, + 0, 0))) { + lock_flags = 0; + goto error_return; + } + } + } else { + if (DM_EVENT_ENABLED (vp->v_vfsp, ip, DM_EVENT_TRUNCATE) && + !(flags & ATTR_DMI)) { + code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, bdp, + vap->va_size, 0, AT_DELAY_FLAG(flags), NULL); + if (code) { + lock_flags = 0; + goto error_return; + } + } + lock_flags |= XFS_IOLOCK_EXCL; + } + + xfs_ilock(ip, lock_flags); + + if (_MAC_XFS_IACCESS(ip, MACWRITE, credp)) { + code = XFS_ERROR(EACCES); + goto error_return; + } + + /* boolean: are we the file owner? */ + file_owner = (current->fsuid == ip->i_d.di_uid); + + /* + * Change various properties of a file. + * Only the owner or users with CAP_FOWNER + * capability may do these things. + */ + if (mask & + (XFS_AT_MODE|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_UID| + XFS_AT_GID|XFS_AT_PROJID)) { + /* + * CAP_FOWNER overrides the following restrictions: + * + * The user ID of the calling process must be equal + * to the file owner ID, except in cases where the + * CAP_FSETID capability is applicable. + */ + if (!file_owner && !capable(CAP_FOWNER)) { + code = XFS_ERROR(EPERM); + goto error_return; + } + + /* + * CAP_FSETID overrides the following restrictions: + * + * The effective user ID of the calling process shall match + * the file owner when setting the set-user-ID and + * set-group-ID bits on that file. + * + * The effective group ID or one of the supplementary group + * IDs of the calling process shall match the group owner of + * the file when setting the set-group-ID bit on that file + */ + if (mask & XFS_AT_MODE) { + mode_t m = 0; + + if ((vap->va_mode & ISUID) && !file_owner) + m |= ISUID; + if ((vap->va_mode & ISGID) && + !in_group_p((gid_t)ip->i_d.di_gid)) + m |= ISGID; +#if 0 + /* Linux allows this, Irix doesn't. */ + if ((vap->va_mode & ISVTX) && vp->v_type != VDIR) + m |= ISVTX; +#endif + if (m && !capable(CAP_FSETID)) + vap->va_mode &= ~m; + } + } + + /* + * Change file ownership. Must be the owner or privileged. + * If the system was configured with the "restricted_chown" + * option, the owner is not permitted to give away the file, + * and can change the group id only to a group of which he + * or she is a member. + */ + if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { + /* + * These IDs could have changed since we last looked at them. + * But, we're assured that if the ownership did change + * while we didn't have the inode locked, inode's dquot(s) + * would have changed also. + */ + iuid = ip->i_d.di_uid; + iprojid = ip->i_d.di_projid; + igid = ip->i_d.di_gid; + gid = (mask & XFS_AT_GID) ? vap->va_gid : igid; + uid = (mask & XFS_AT_UID) ? vap->va_uid : iuid; + projid = (mask & XFS_AT_PROJID) ? (xfs_prid_t)vap->va_projid : + iprojid; + + /* + * CAP_CHOWN overrides the following restrictions: + * + * If _POSIX_CHOWN_RESTRICTED is defined, this capability + * shall override the restriction that a process cannot + * change the user ID of a file it owns and the restriction + * that the group ID supplied to the chown() function + * shall be equal to either the group ID or one of the + * supplementary group IDs of the calling process. + * + * XXX: How does restricted_chown affect projid? + */ + if (restricted_chown && + (iuid != uid || (igid != gid && + !in_group_p((gid_t)gid))) && + !capable(CAP_CHOWN)) { + code = XFS_ERROR(EPERM); + goto error_return; + } + /* + * Do a quota reservation only if uid or gid is actually + * going to change. + */ + if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || + (XFS_IS_GQUOTA_ON(mp) && igid != gid)) { + ASSERT(tp); + code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, + capable(CAP_FOWNER) ? + XFS_QMOPT_FORCE_RES : 0); + if (code) /* out of quota */ + goto error_return; + } + } + + /* + * Truncate file. Must have write permission and not be a directory. + */ + if (mask & XFS_AT_SIZE) { + /* Short circuit the truncate case for zero length files */ + if ((vap->va_size == 0) && + (ip->i_d.di_size == 0) && (ip->i_d.di_nextents == 0)) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + lock_flags &= ~XFS_ILOCK_EXCL; + if (mask & XFS_AT_CTIME) + xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + code = 0; + goto error_return; + } + + if (vp->v_type == VDIR) { + code = XFS_ERROR(EISDIR); + goto error_return; + } else if (vp->v_type != VREG) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + /* + * Make sure that the dquots are attached to the inode. + */ + if ((code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED))) + goto error_return; + } + + /* + * Change file access or modified times. + */ + if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { + if (!file_owner) { + if ((flags & ATTR_UTIME) && + !capable(CAP_FOWNER)) { + code = XFS_ERROR(EPERM); + goto error_return; + } + } + } + + /* + * Change extent size or realtime flag. + */ + if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) { + /* + * Can't change extent size if any extents are allocated. + */ + if (ip->i_d.di_nextents && (mask & XFS_AT_EXTSIZE) && + ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != + vap->va_extsize) ) { + code = XFS_ERROR(EINVAL); /* EFBIG? */ + goto error_return; + } + + /* + * Can't set extent size unless the file is marked, or + * about to be marked as a realtime file. + * + * This check will be removed when fixed size extents + * with buffered data writes is implemented. + * + */ + if ((mask & XFS_AT_EXTSIZE) && + ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != + vap->va_extsize) && + (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || + ((mask & XFS_AT_XFLAGS) && + (vap->va_xflags & XFS_XFLAG_REALTIME))))) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + + /* + * Can't change realtime flag if any extents are allocated. + */ + if (ip->i_d.di_nextents && (mask & XFS_AT_XFLAGS) && + (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != + (vap->va_xflags & XFS_XFLAG_REALTIME)) { + code = XFS_ERROR(EINVAL); /* EFBIG? */ + goto error_return; + } + /* + * Extent size must be a multiple of the appropriate block + * size, if set at all. + */ + if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) { + xfs_extlen_t size; + + if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || + ((mask & XFS_AT_XFLAGS) && + (vap->va_xflags & XFS_XFLAG_REALTIME))) { + size = mp->m_sb.sb_rextsize << + mp->m_sb.sb_blocklog; + } else { + size = mp->m_sb.sb_blocksize; + } + if (vap->va_extsize % size) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + } + /* + * If realtime flag is set then must have realtime data. + */ + if ((mask & XFS_AT_XFLAGS) && + (vap->va_xflags & XFS_XFLAG_REALTIME)) { + if ((mp->m_sb.sb_rblocks == 0) || + (mp->m_sb.sb_rextsize == 0) || + (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + } + } + + /* + * Now we can make the changes. Before we join the inode + * to the transaction, if XFS_AT_SIZE is set then take care of + * the part of the truncation that must be done without the + * inode lock. This needs to be done before joining the inode + * to the transaction, because the inode cannot be unlocked + * once it is a part of the transaction. + */ + if (mask & XFS_AT_SIZE) { + if (vap->va_size > ip->i_d.di_size) { + code = xfs_igrow_start(ip, vap->va_size, credp); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } else if (vap->va_size < ip->i_d.di_size) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, + (xfs_fsize_t)vap->va_size); + code = 0; + } else { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + code = 0; + } + if (code) { + ASSERT(tp == NULL); + lock_flags &= ~XFS_ILOCK_EXCL; + ASSERT(lock_flags == XFS_IOLOCK_EXCL); + goto error_return; + } + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); + if ((code = xfs_trans_reserve(tp, 0, + XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return code; + } + commit_flags = XFS_TRANS_RELEASE_LOG_RES; + xfs_ilock(ip, XFS_ILOCK_EXCL); + } + + if (tp) { + xfs_trans_ijoin(tp, ip, lock_flags); + xfs_trans_ihold(tp, ip); + } + + /* determine whether mandatory locking mode changes */ + mandlock_before = MANDLOCK(vp, ip->i_d.di_mode); + + /* + * Truncate file. Must have write permission and not be a directory. + */ + if (mask & XFS_AT_SIZE) { + if (vap->va_size > ip->i_d.di_size) { + xfs_igrow_finish(tp, ip, vap->va_size, + !(flags & ATTR_DMI)); + } else if ((vap->va_size < ip->i_d.di_size) || + ((vap->va_size == 0) && ip->i_d.di_nextents)) { + /* + * signal a sync transaction unless + * we're truncating an already unlinked + * file on a wsync filesystem + */ + code = xfs_itruncate_finish(&tp, ip, + (xfs_fsize_t)vap->va_size, + XFS_DATA_FORK, + ((ip->i_d.di_nlink != 0 || + !(mp->m_flags & XFS_MOUNT_WSYNC)) + ? 1 : 0)); + if (code) { + goto abort_return; + } + } + /* + * Have to do this even if the file's size doesn't change. + */ + timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; + } + + /* + * Change file access modes. + */ + if (mask & XFS_AT_MODE) { + ip->i_d.di_mode &= IFMT; + ip->i_d.di_mode |= vap->va_mode & ~IFMT; + + xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); + timeflags |= XFS_ICHGTIME_CHG; + } + + /* + * Change file ownership. Must be the owner or privileged. + * If the system was configured with the "restricted_chown" + * option, the owner is not permitted to give away the file, + * and can change the group id only to a group of which he + * or she is a member. + */ + if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { + /* + * CAP_FSETID overrides the following restrictions: + * + * The set-user-ID and set-group-ID bits of a file will be + * cleared upon successful return from chown() + */ + if ((ip->i_d.di_mode & (ISUID|ISGID)) && + !capable(CAP_FSETID)) { + ip->i_d.di_mode &= ~(ISUID|ISGID); + } + + /* + * Change the ownerships and register quota modifications + * in the transaction. + */ + if (iuid != uid) { + if (XFS_IS_UQUOTA_ON(mp)) { + ASSERT(mask & XFS_AT_UID); + ASSERT(udqp); + olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip, + &ip->i_udquot, udqp); + } + ip->i_d.di_uid = uid; + } + if (igid != gid) { + if (XFS_IS_GQUOTA_ON(mp)) { + ASSERT(mask & XFS_AT_GID); + ASSERT(gdqp); + olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip, + &ip->i_gdquot, gdqp); + } + ip->i_d.di_gid = gid; + } + if (iprojid != projid) { + ip->i_d.di_projid = projid; + /* + * We may have to rev the inode as well as + * the superblock version number since projids didn't + * exist before DINODE_VERSION_2 and SB_VERSION_NLINK. + */ + if (ip->i_d.di_version == XFS_DINODE_VERSION_1) + xfs_bump_ino_vers2(tp, ip); + } + + xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); + timeflags |= XFS_ICHGTIME_CHG; + } + + + /* + * Change file access or modified times. + */ + if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { + if (mask & XFS_AT_ATIME) { + ip->i_d.di_atime.t_sec = vap->va_atime.tv_sec; + ip->i_d.di_atime.t_nsec = vap->va_atime.tv_nsec; + ip->i_update_core = 1; + timeflags &= ~XFS_ICHGTIME_ACC; + } + if (mask & XFS_AT_MTIME) { + ip->i_d.di_mtime.t_sec = vap->va_mtime.tv_sec; + ip->i_d.di_mtime.t_nsec = vap->va_mtime.tv_nsec; + timeflags &= ~XFS_ICHGTIME_MOD; + timeflags |= XFS_ICHGTIME_CHG; + } + if (tp && (flags & ATTR_UTIME)) + xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); + } + + /* + * Change XFS-added attributes. + */ + if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) { + if (mask & XFS_AT_EXTSIZE) { + /* + * Converting bytes to fs blocks. + */ + ip->i_d.di_extsize = vap->va_extsize >> + mp->m_sb.sb_blocklog; + } + if (mask & XFS_AT_XFLAGS) { + ip->i_d.di_flags = 0; + if (vap->va_xflags & XFS_XFLAG_REALTIME) { + ip->i_d.di_flags |= XFS_DIFLAG_REALTIME; + ip->i_iocore.io_flags |= XFS_IOCORE_RT; + } + /* can't set PREALLOC this way, just ignore it */ + } + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + timeflags |= XFS_ICHGTIME_CHG; + } + + /* + * Change file inode change time only if XFS_AT_CTIME set + * AND we have been called by a DMI function. + */ + + if ( (flags & ATTR_DMI) && (mask & XFS_AT_CTIME) ) { + ip->i_d.di_ctime.t_sec = vap->va_ctime.tv_sec; + ip->i_d.di_ctime.t_nsec = vap->va_ctime.tv_nsec; + ip->i_update_core = 1; + timeflags &= ~XFS_ICHGTIME_CHG; + } + + /* + * Send out timestamp changes that need to be set to the + * current time. Not done when called by a DMI function. + */ + if (timeflags && !(flags & ATTR_DMI)) + xfs_ichgtime(ip, timeflags); + + XFS_STATS_INC(xfsstats.xs_ig_attrchg); + + /* + * If this is a synchronous mount, make sure that the + * transaction goes to disk before returning to the user. + * This is slightly sub-optimal in that truncates require + * two sync transactions instead of one for wsync filesytems. + * One for the truncate and one for the timestamps since we + * don't want to change the timestamps unless we're sure the + * truncate worked. Truncates are less than 1% of the laddis + * mix so this probably isn't worth the trouble to optimize. + */ + code = 0; + if (tp) { + if (mp->m_flags & XFS_MOUNT_WSYNC) + xfs_trans_set_sync(tp); + + code = xfs_trans_commit(tp, commit_flags, NULL); + } + + /* + * If the (regular) file's mandatory locking mode changed, then + * notify the vnode. We do this under the inode lock to prevent + * racing calls to vop_vnode_change. + */ + mandlock_after = MANDLOCK(vp, ip->i_d.di_mode); + if (mandlock_before != mandlock_after) { + VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_ENF_LOCKING, + mandlock_after); + } + + xfs_iunlock(ip, lock_flags); + + /* + * Release any dquot(s) the inode had kept before chown. + */ + XFS_QM_DQRELE(mp, olddquot1); + XFS_QM_DQRELE(mp, olddquot2); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + if (code) { + return code; + } + + if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_ATTRIBUTE) && + !(flags & ATTR_DMI)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, bdp, DM_RIGHT_NULL, + NULL, DM_RIGHT_NULL, NULL, NULL, + 0, 0, AT_DELAY_FLAG(flags)); + } + return 0; + + abort_return: + commit_flags |= XFS_TRANS_ABORT; + /* FALLTHROUGH */ + error_return: + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + if (tp) { + xfs_trans_cancel(tp, commit_flags); + } + if (lock_flags != 0) { + xfs_iunlock(ip, lock_flags); + } + return code; +} + + +/* + * xfs_access + * Null conversion from vnode mode bits to inode mode bits, as in efs. + */ +STATIC int +xfs_access( + bhv_desc_t *bdp, + int mode, + cred_t *credp) +{ + xfs_inode_t *ip; + int error; + + vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__, + (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + xfs_ilock(ip, XFS_ILOCK_SHARED); + error = xfs_iaccess(ip, mode, credp); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return error; +} + + +/* + * xfs_readlink + * + */ +STATIC int +xfs_readlink( + bhv_desc_t *bdp, + uio_t *uiop, + cred_t *credp) +{ + xfs_inode_t *ip; + int count; + xfs_off_t offset; + int pathlen; + vnode_t *vp; + int error = 0; + xfs_mount_t *mp; + int nmaps; + xfs_bmbt_irec_t mval[SYMLINK_MAPS]; + xfs_daddr_t d; + int byte_cnt; + int n; + xfs_buf_t *bp; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + xfs_ilock(ip, XFS_ILOCK_SHARED); + + ASSERT((ip->i_d.di_mode & IFMT) == IFLNK); + + offset = uiop->uio_offset; + count = uiop->uio_resid; + + if (offset < 0) { + error = XFS_ERROR(EINVAL); + goto error_return; + } + if (count <= 0) { + error = 0; + goto error_return; + } + + if (!(uiop->uio_fmode & FINVIS)) { + xfs_ichgtime(ip, XFS_ICHGTIME_ACC); + } + + /* + * See if the symlink is stored inline. + */ + pathlen = (int)ip->i_d.di_size; + + if (ip->i_df.if_flags & XFS_IFINLINE) { + error = uiomove(ip->i_df.if_u1.if_data, pathlen, UIO_READ, uiop); + } + else { + /* + * Symlink not inline. Call bmap to get it in. + */ + nmaps = SYMLINK_MAPS; + + error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), + 0, NULL, 0, mval, &nmaps, NULL); + + if (error) { + goto error_return; + } + + for (n = 0; n < nmaps; n++) { + d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); + byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); + bp = xfs_buf_read(mp->m_ddev_targp, d, + BTOBB(byte_cnt), 0); + error = XFS_BUF_GETERROR(bp); + if (error) { + xfs_ioerror_alert("xfs_readlink", + ip->i_mount, bp, XFS_BUF_ADDR(bp)); + xfs_buf_relse(bp); + goto error_return; + } + if (pathlen < byte_cnt) + byte_cnt = pathlen; + pathlen -= byte_cnt; + + error = uiomove(XFS_BUF_PTR(bp), byte_cnt, + UIO_READ, uiop); + xfs_buf_relse (bp); + } + + } + + +error_return: + + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + return error; +} + + +/* + * xfs_fsync + * + * This is called to sync the inode and its data out to disk. + * We need to hold the I/O lock while flushing the data, and + * the inode lock while flushing the inode. The inode lock CANNOT + * be held while flushing the data, so acquire after we're done + * with that. + */ +STATIC int +xfs_fsync( + bhv_desc_t *bdp, + int flag, + cred_t *credp, + xfs_off_t start, + xfs_off_t stop) +{ + xfs_inode_t *ip; + int error; + int error2; + int syncall; + vnode_t *vp; + xfs_trans_t *tp; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + + ASSERT(start >= 0 && stop >= -1); + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return XFS_ERROR(EIO); + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + syncall = error = error2 = 0; + + if (stop == -1) { + ASSERT(start >= 0); + if (start == 0) + syncall = 1; + stop = xfs_file_last_byte(ip); + } + + /* + * If we're invalidating, always flush since we want to + * tear things down. Otherwise, don't flush anything if + * we're not dirty. + */ + if (flag & FSYNC_INVAL) { + if (ip->i_df.if_flags & XFS_IFEXTENTS && + ip->i_df.if_bytes > 0) { + VOP_FLUSHINVAL_PAGES(vp, start, -1, FI_REMAPF_LOCKED); + } + ASSERT(syncall == 0 || (VN_CACHED(vp) == 0)); + } else { + /* + * In the non-invalidating case, calls to fsync() do not + * flush all the dirty mmap'd pages. That requires a + * call to msync(). + */ + VOP_FLUSH_PAGES(vp, start, -1, + (flag & FSYNC_WAIT) ? 0 : XFS_B_ASYNC, + FI_NONE, error2); + } + + if (error2) { + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return XFS_ERROR(error2); + } + + /* + * We always need to make sure that the required inode state + * is safe on disk. The vnode might be clean but because + * of committed transactions that haven't hit the disk yet. + * Likewise, there could be unflushed non-transactional + * changes to the inode core that have to go to disk. + * + * The following code depends on one assumption: that + * any transaction that changes an inode logs the core + * because it has to change some field in the inode core + * (typically nextents or nblocks). That assumption + * implies that any transactions against an inode will + * catch any non-transactional updates. If inode-altering + * transactions exist that violate this assumption, the + * code breaks. Right now, it figures that if the involved + * update_* field is clear and the inode is unpinned, the + * inode is clean. Either it's been flushed or it's been + * committed and the commit has hit the disk unpinning the inode. + * (Note that xfs_inode_item_format() called at commit clears + * the update_* fields.) + */ + xfs_ilock(ip, XFS_ILOCK_SHARED); + + /* If we are flushing data then we care about update_size + * being set, otherwise we care about update_core + */ + if ((flag & FSYNC_DATA) ? + (ip->i_update_size == 0) : + (ip->i_update_core == 0)) { + /* + * Timestamps/size haven't changed since last inode + * flush or inode transaction commit. That means + * either nothing got written or a transaction + * committed which caught the updates. If the + * latter happened and the transaction hasn't + * hit the disk yet, the inode will be still + * be pinned. If it is, force the log. + */ + + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED); + + if (xfs_ipincount(ip)) { + xfs_log_force(ip->i_mount, (xfs_lsn_t)0, + XFS_LOG_FORCE | + ((flag & FSYNC_WAIT) + ? XFS_LOG_SYNC : 0)); + } + error = 0; + } else { + /* + * Kick off a transaction to log the inode + * core to get the updates. Make it + * sync if FSYNC_WAIT is passed in (which + * is done by everybody but specfs). The + * sync transaction will also force the log. + */ + xfs_iunlock(ip, XFS_ILOCK_SHARED); + tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); + if ((error = xfs_trans_reserve(tp, 0, + XFS_FSYNC_TS_LOG_RES(ip->i_mount), + 0, 0, 0))) { + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* + * Note - it's possible that we might have pushed + * ourselves out of the way during trans_reserve + * which would flush the inode. But there's no + * guarantee that the inode buffer has actually + * gone out yet (it's delwri). Plus the buffer + * could be pinned anyway if it's part of an + * inode in another recent transaction. So we + * play it safe and fire off the transaction anyway. + */ + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + if (flag & FSYNC_WAIT) + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, NULL); + + xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL); + } + return error; +} + + +#if 0 +/* + * This is a utility routine for xfs_inactive. It is called when a + * transaction attempting to free up the disk space for a file encounters + * an error. It cancels the old transaction and starts up a new one + * to be used to free up the inode. It also sets the inode size and extent + * counts to 0 and frees up any memory being used to store inline data, + * extents, or btree roots. + */ +STATIC void +xfs_itruncate_cleanup( + xfs_trans_t **tpp, + xfs_inode_t *ip, + int commit_flags, + int fork) +{ + xfs_mount_t *mp; + /* REFERENCED */ + int error; + + mp = ip->i_mount; + if (*tpp) { + xfs_trans_cancel(*tpp, commit_flags | XFS_TRANS_ABORT); + } + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + *tpp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); + error = xfs_trans_reserve(*tpp, 0, XFS_IFREE_LOG_RES(mp), 0, 0, + XFS_DEFAULT_LOG_COUNT); + if (error) { + return; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ijoin(*tpp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(*tpp, ip); + + xfs_idestroy_fork(ip, fork); + + if (fork == XFS_DATA_FORK) { + ip->i_d.di_nblocks = 0; + ip->i_d.di_nextents = 0; + ip->i_d.di_size = 0; + } else { + ip->i_d.di_anextents = 0; + } + xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE); +} +#endif + +/* + * This is called by xfs_inactive to free any blocks beyond eof, + * when the link count isn't zero. + */ +STATIC int +xfs_inactive_free_eofblocks( + xfs_mount_t *mp, + xfs_inode_t *ip) +{ + xfs_trans_t *tp; + int error; + xfs_fileoff_t end_fsb; + xfs_fileoff_t last_fsb; + xfs_filblks_t map_len; + int nimaps; + xfs_bmbt_irec_t imap; + + /* + * Figure out if there are any blocks beyond the end + * of the file. If not, then there is nothing to do. + */ + end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_d.di_size)); + last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAX_FILE_OFFSET); + map_len = last_fsb - end_fsb; + if (map_len <= 0) + return (0); + + nimaps = 1; + xfs_ilock(ip, XFS_ILOCK_SHARED); + error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0, + NULL, 0, &imap, &nimaps, NULL); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + if (!error && (nimaps != 0) && + (imap.br_startblock != HOLESTARTBLOCK)) { + /* + * Attach the dquots to the inode up front. + */ + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return (error); + + /* + * There are blocks after the end of file. + * Free them up now by truncating the file to + * its current size. + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); + + /* + * Do the xfs_itruncate_start() call before + * reserving any log space because + * itruncate_start will call into the buffer + * cache and we can't + * do that within a transaction. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, + ip->i_d.di_size); + + error = xfs_trans_reserve(tp, 0, + XFS_ITRUNCATE_LOG_RES(mp), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + if (error) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return (error); + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, + XFS_IOLOCK_EXCL | + XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + error = xfs_itruncate_finish(&tp, ip, + ip->i_d.di_size, + XFS_DATA_FORK, + 0); + /* + * If we get an error at this point we + * simply don't bother truncating the file. + */ + if (error) { + xfs_trans_cancel(tp, + (XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT)); + } else { + error = xfs_trans_commit(tp, + XFS_TRANS_RELEASE_LOG_RES, + NULL); + } + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + } + return (error); +} + +/* + * Free a symlink that has blocks associated with it. + */ +STATIC int +xfs_inactive_symlink_rmt( + xfs_inode_t *ip, + xfs_trans_t **tpp) +{ + xfs_buf_t *bp; + int committed; + int done; + int error; + xfs_fsblock_t first_block; + xfs_bmap_free_t free_list; + int i; + xfs_mount_t *mp; + xfs_bmbt_irec_t mval[SYMLINK_MAPS]; + int nmaps; + xfs_trans_t *ntp; + int size; + xfs_trans_t *tp; + + tp = *tpp; + mp = ip->i_mount; + ASSERT(ip->i_d.di_size > XFS_IFORK_DSIZE(ip)); + /* + * We're freeing a symlink that has some + * blocks allocated to it. Free the + * blocks here. We know that we've got + * either 1 or 2 extents and that we can + * free them all in one bunmapi call. + */ + ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2); + if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + *tpp = NULL; + return error; + } + /* + * Lock the inode, fix the size, and join it to the transaction. + * Hold it so in the normal path, we still have it locked for + * the second transaction. In the error paths we need it + * held so the cancel won't rele it, see below. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + size = (int)ip->i_d.di_size; + ip->i_d.di_size = 0; + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + /* + * Find the block(s) so we can inval and unmap them. + */ + done = 0; + XFS_BMAP_INIT(&free_list, &first_block); + nmaps = sizeof(mval) / sizeof(mval[0]); + if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), + XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, + &free_list))) + goto error0; + /* + * Invalidate the block(s). + */ + for (i = 0; i < nmaps; i++) { + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, mval[i].br_startblock), + XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0); + xfs_trans_binval(tp, bp); + } + /* + * Unmap the dead block(s) to the free_list. + */ + if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps, + &first_block, &free_list, &done))) + goto error1; + ASSERT(done); + /* + * Commit the first transaction. This logs the EFI and the inode. + */ + if ((error = xfs_bmap_finish(&tp, &free_list, first_block, &committed))) + goto error1; + /* + * The transaction must have been committed, since there were + * actually extents freed by xfs_bunmapi. See xfs_bmap_finish. + * The new tp has the extent freeing and EFDs. + */ + ASSERT(committed); + /* + * The first xact was committed, so add the inode to the new one. + * Mark it dirty so it will be logged and moved forward in the log as + * part of every commit. + */ + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + /* + * Get a new, empty transaction to return to our caller. + */ + ntp = xfs_trans_dup(tp); + /* + * Commit the transaction containing extent freeing and EFD's. + * If we get an error on the commit here or on the reserve below, + * we need to unlock the inode since the new transaction doesn't + * have the inode attached. + */ + error = xfs_trans_commit(tp, 0, NULL); + tp = ntp; + if (error) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + goto error0; + } + /* + * Remove the memory for extent descriptions (just bookkeeping). + */ + if (ip->i_df.if_bytes) + xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK); + ASSERT(ip->i_df.if_bytes == 0); + /* + * Put an itruncate log reservation in the new transaction + * for our caller. + */ + if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + goto error0; + } + /* + * Return with the inode locked but not joined to the transaction. + */ + *tpp = tp; + return 0; + + error1: + xfs_bmap_cancel(&free_list); + error0: + /* + * Have to come here with the inode locked and either + * (held and in the transaction) or (not in the transaction). + * If the inode isn't held then cancel would iput it, but + * that's wrong since this is inactive and the vnode ref + * count is 0 already. + * Cancel won't do anything to the inode if held, but it still + * needs to be locked until the cancel is done, if it was + * joined to the transaction. + */ + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + *tpp = NULL; + return error; + +} + +STATIC int +xfs_inactive_symlink_local( + xfs_inode_t *ip, + xfs_trans_t **tpp) +{ + int error; + + ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip)); + /* + * We're freeing a symlink which fit into + * the inode. Just free the memory used + * to hold the old symlink. + */ + error = xfs_trans_reserve(*tpp, 0, + XFS_ITRUNCATE_LOG_RES(ip->i_mount), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + + if (error) { + xfs_trans_cancel(*tpp, 0); + *tpp = NULL; + return (error); + } + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + + /* + * Zero length symlinks _can_ exist. + */ + if (ip->i_df.if_bytes > 0) { + xfs_idata_realloc(ip, + -(ip->i_df.if_bytes), + XFS_DATA_FORK); + ASSERT(ip->i_df.if_bytes == 0); + } + return (0); +} + +/* + * + */ +STATIC int +xfs_inactive_attrs( + xfs_inode_t *ip, + xfs_trans_t **tpp, + int *commitflags) +{ + xfs_trans_t *tp; + int error; + xfs_mount_t *mp; + + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); + tp = *tpp; + mp = ip->i_mount; + ASSERT(ip->i_d.di_forkoff != 0); + xfs_trans_commit(tp, *commitflags, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + *commitflags = 0; + + error = xfs_attr_inactive(ip); + if (error) { + *tpp = NULL; + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return (error); /* goto out*/ + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); + error = xfs_trans_reserve(tp, 0, + XFS_IFREE_LOG_RES(mp), + 0, 0, + XFS_DEFAULT_LOG_COUNT); + if (error) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + *tpp = NULL; + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return (error); + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_idestroy_fork(ip, XFS_ATTR_FORK); + + ASSERT(ip->i_d.di_anextents == 0); + + *tpp = tp; + return (0); +} + +STATIC int +xfs_release( + bhv_desc_t *bdp) +{ + xfs_inode_t *ip; + vnode_t *vp; + xfs_mount_t *mp; + int error; + + vp = BHV_TO_VNODE(bdp); + ip = XFS_BHVTOI(bdp); + + if ((vp->v_type != VREG) || (ip->i_d.di_mode == 0)) { + return 0; + } + + /* If this is a read-only mount, don't do this (would generate I/O) */ + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + return 0; + + /* If we are in the NFS reference cache then don't do this now */ + if (ip->i_refcache) + return 0; + + mp = ip->i_mount; + + if (ip->i_d.di_nlink != 0) { + if ((((ip->i_d.di_mode & IFMT) == IFREG) && + ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && + (ip->i_df.if_flags & XFS_IFEXTENTS)) && + (!(ip->i_d.di_flags & XFS_DIFLAG_PREALLOC))) { + if ((error = xfs_inactive_free_eofblocks(mp, ip))) + return (error); + /* Update linux inode block count after free above */ + LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, + ip->i_d.di_nblocks + ip->i_delayed_blks); + } + } + + return 0; +} + +/* + * xfs_inactive + * + * This is called when the vnode reference count for the vnode + * goes to zero. If the file has been unlinked, then it must + * now be truncated. Also, we clear all of the read-ahead state + * kept for the inode here since the file is now closed. + */ +STATIC int +xfs_inactive( + bhv_desc_t *bdp, + cred_t *credp) +{ + xfs_inode_t *ip; + vnode_t *vp; + xfs_trans_t *tp; + xfs_mount_t *mp; + int error; + int commit_flags; + int truncate; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + + /* + * If the inode is already free, then there can be nothing + * to clean up here. + */ + if (ip->i_d.di_mode == 0) { + ASSERT(ip->i_df.if_real_bytes == 0); + ASSERT(ip->i_df.if_broot_bytes == 0); + return VN_INACTIVE_CACHE; + } + + /* + * Only do a truncate if it's a regular file with + * some actual space in it. It's OK to look at the + * inode's fields without the lock because we're the + * only one with a reference to the inode. + */ + truncate = ((ip->i_d.di_nlink == 0) && + ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0)) && + ((ip->i_d.di_mode & IFMT) == IFREG)); + + mp = ip->i_mount; + + if (ip->i_d.di_nlink == 0 && + DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_DESTROY)) { + (void) XFS_SEND_DESTROY(mp, bdp, DM_RIGHT_NULL); + } + + error = 0; + + /* If this is a read-only mount, don't do this (would generate I/O) */ + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + goto out; + + if (ip->i_d.di_nlink != 0) { + if ((((ip->i_d.di_mode & IFMT) == IFREG) && + ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && + (ip->i_df.if_flags & XFS_IFEXTENTS)) && + (!(ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) || + (ip->i_delayed_blks != 0))) { + if ((error = xfs_inactive_free_eofblocks(mp, ip))) + return (VN_INACTIVE_CACHE); + /* Update linux inode block count after free above */ + LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, + ip->i_d.di_nblocks + ip->i_delayed_blks); + } + goto out; + } + + ASSERT(ip->i_d.di_nlink == 0); + + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return (VN_INACTIVE_CACHE); + + tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); + if (truncate) { + /* + * Do the xfs_itruncate_start() call before + * reserving any log space because itruncate_start + * will call into the buffer cache and we can't + * do that within a transaction. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0); + + error = xfs_trans_reserve(tp, 0, + XFS_ITRUNCATE_LOG_RES(mp), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + if (error) { + /* Don't call itruncate_cleanup */ + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return (VN_INACTIVE_CACHE); + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + /* + * normally, we have to run xfs_itruncate_finish sync. + * But if filesystem is wsync and we're in the inactive + * path, then we know that nlink == 0, and that the + * xaction that made nlink == 0 is permanently committed + * since xfs_remove runs as a synchronous transaction. + */ + error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, + (!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0)); + commit_flags = XFS_TRANS_RELEASE_LOG_RES; + + if (error) { + xfs_trans_cancel(tp, commit_flags | XFS_TRANS_ABORT); + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + return (VN_INACTIVE_CACHE); + } + } else if ((ip->i_d.di_mode & IFMT) == IFLNK) { + + /* + * If we get an error while cleaning up a + * symlink we bail out. + */ + error = (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) ? + xfs_inactive_symlink_rmt(ip, &tp) : + xfs_inactive_symlink_local(ip, &tp); + + if (error) { + ASSERT(tp == NULL); + return (VN_INACTIVE_CACHE); + } + + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + commit_flags = XFS_TRANS_RELEASE_LOG_RES; + + } else { + error = xfs_trans_reserve(tp, 0, + XFS_IFREE_LOG_RES(mp), + 0, 0, + XFS_DEFAULT_LOG_COUNT); + if (error) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + return (VN_INACTIVE_CACHE); + } + + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + commit_flags = 0; + } + + /* + * If there are attributes associated with the file + * then blow them away now. The code calls a routine + * that recursively deconstructs the attribute fork. + * We need to just commit the current transaction + * because we can't use it for xfs_attr_inactive(). + */ + if (ip->i_d.di_anextents > 0) { + error = xfs_inactive_attrs(ip, &tp, &commit_flags); + /* + * If we got an error, the transaction is already + * cancelled, and the inode is unlocked. Just get out. + */ + if (error) + return (VN_INACTIVE_CACHE); + } else if (ip->i_afp) { + xfs_idestroy_fork(ip, XFS_ATTR_FORK); + } + + /* + * Free the inode. + */ + error = xfs_ifree(tp, ip); + if (error) { + /* + * If we fail to free the inode, shut down. The cancel + * might do that, we need to make sure. Otherwise the + * inode might be lost for a long time or forever. + */ + if (!XFS_FORCED_SHUTDOWN(mp)) { + cmn_err(CE_NOTE, + "xfs_inactive: xfs_ifree() returned an error = %d on %s", + error, mp->m_fsname); + xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); + } + xfs_trans_cancel(tp, commit_flags | XFS_TRANS_ABORT); + } else { + /* + * Credit the quota account(s). The inode is gone. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1); + + /* + * Just ignore errors at this point. There is + * nothing we can do except to try to keep going. + */ + (void) xfs_trans_commit(tp, commit_flags, NULL); + } + /* + * Release the dquots held by inode, if any. + */ + XFS_QM_DQDETACH(mp, ip); + + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + + out: + return VN_INACTIVE_CACHE; +} + + +/* + * xfs_lookup + */ +STATIC int +xfs_lookup( + bhv_desc_t *dir_bdp, + vname_t *dentry, + vnode_t **vpp, + int flags, + vnode_t *rdir, + cred_t *credp) +{ + xfs_inode_t *dp, *ip; + xfs_ino_t e_inum; + int error; + uint lock_mode; + vnode_t *dir_vp; + + dir_vp = BHV_TO_VNODE(dir_bdp); + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + dp = XFS_BHVTOI(dir_bdp); + + if (XFS_FORCED_SHUTDOWN(dp->i_mount)) + return XFS_ERROR(EIO); + + lock_mode = xfs_ilock_map_shared(dp); + error = xfs_dir_lookup_int(dir_bdp, lock_mode, dentry, &e_inum, &ip); + if (!error) { + *vpp = XFS_ITOV(ip); + ITRACE(ip); + } + xfs_iunlock_map_shared(dp, lock_mode); + return error; +} + + +#define XFS_CREATE_NEW_MAXTRIES 10000 + +/* + * xfs_create (create a new file). + */ +STATIC int +xfs_create( + bhv_desc_t *dir_bdp, + vname_t *dentry, + vattr_t *vap, + vnode_t **vpp, + cred_t *credp) +{ + char *name = VNAME(dentry); + vnode_t *dir_vp; + xfs_inode_t *dp, *ip; + vnode_t *vp=NULL; + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_dev_t rdev; + int error; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + boolean_t dp_joined_to_trans; + int dm_event_sent = 0; + uint cancel_flags; + int committed; + xfs_prid_t prid; + struct xfs_dquot *udqp, *gdqp; + uint resblks; + int dm_di_mode; + int namelen; + + ASSERT(!*vpp); + dir_vp = BHV_TO_VNODE(dir_bdp); + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + dp = XFS_BHVTOI(dir_bdp); + mp = dp->i_mount; + + dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); + namelen = VNAMELEN(dentry); + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, + dir_bdp, DM_RIGHT_NULL, NULL, + DM_RIGHT_NULL, name, NULL, + dm_di_mode, 0, 0); + + if (error) + return error; + dm_event_sent = 1; + } + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + /* Return through std_return after this point. */ + + udqp = gdqp = NULL; + if (vap->va_mask & XFS_AT_PROJID) + prid = (xfs_prid_t)vap->va_projid; + else + prid = (xfs_prid_t)dfltprid; + + /* + * Make sure that we have allocated dquot(s) on disk. + */ + error = XFS_QM_DQVOPALLOC(mp, dp, current->fsuid, current->fsgid, + XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp); + if (error) + goto std_return; + + ip = NULL; + dp_joined_to_trans = B_FALSE; + + tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + resblks = XFS_CREATE_SPACE_RES(mp, namelen); + /* + * Initially assume that the file does not exist and + * reserve the resources for that case. If that is not + * the case we'll drop the one we have and get a more + * appropriate transaction later. + */ + error = xfs_trans_reserve(tp, resblks, XFS_CREATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_CREATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); + } + if (error) { + cancel_flags = 0; + dp = NULL; + goto error_return; + } + + xfs_ilock(dp, XFS_ILOCK_EXCL); + + XFS_BMAP_INIT(&free_list, &first_block); + + ASSERT(ip == NULL); + + /* + * Reserve disk quota and the inode. + */ + error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); + if (error) + goto error_return; + + if (resblks == 0 && + (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen))) + goto error_return; + rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0; + error = xfs_dir_ialloc(&tp, dp, + MAKEIMODE(vap->va_type,vap->va_mode), 1, + rdev, credp, prid, resblks > 0, + &ip, &committed); + if (error) { + if (error == ENOSPC) + goto error_return; + goto abort_return; + } + ITRACE(ip); + + /* + * At this point, we've gotten a newly allocated inode. + * It is locked (and joined to the transaction). + */ + + ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); + + /* + * Now we join the directory inode to the transaction. + * We do not do it earlier because xfs_dir_ialloc + * might commit the previous transaction (and release + * all the locks). + */ + + VN_HOLD(dir_vp); + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + dp_joined_to_trans = B_TRUE; + + error = XFS_DIR_CREATENAME(mp, tp, dp, name, namelen, ip->i_ino, + &first_block, &free_list, + resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0); + if (error) { + ASSERT(error != ENOSPC); + goto abort_return; + } + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + + /* + * If this is a synchronous mount, make sure that the + * create transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + dp->i_gen++; + + /* + * Attach the dquot(s) to the inodes and modify them incore. + * These ids of the inode couldn't have changed since the new + * inode has been locked ever since it was created. + */ + XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); + + /* + * xfs_trans_commit normally decrements the vnode ref count + * when it unlocks the inode. Since we want to return the + * vnode to the caller, we bump the vnode ref count now. + */ + IHOLD(ip); + vp = XFS_ITOV(ip); + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + xfs_bmap_cancel(&free_list); + goto abort_rele; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + IRELE(ip); + tp = NULL; + goto error_return; + } + + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + /* + * Propogate the fact that the vnode changed after the + * xfs_inode locks have been released. + */ + VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3); + + *vpp = vp; + + /* Fallthrough to std_return with error = 0 */ + +std_return: + if ( (*vpp || (error != 0 && dm_event_sent != 0)) && + DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), + DM_EVENT_POSTCREATE)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, + dir_bdp, DM_RIGHT_NULL, + *vpp ? vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops):NULL, + DM_RIGHT_NULL, name, NULL, + dm_di_mode, error, 0); + } + return error; + + abort_return: + cancel_flags |= XFS_TRANS_ABORT; + /* FALLTHROUGH */ + error_return: + + if (tp != NULL) + xfs_trans_cancel(tp, cancel_flags); + + if (!dp_joined_to_trans && (dp != NULL)) + xfs_iunlock(dp, XFS_ILOCK_EXCL); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + goto std_return; + + abort_rele: + /* + * Wait until after the current transaction is aborted to + * release the inode. This prevents recursive transactions + * and deadlocks from xfs_inactive. + */ + cancel_flags |= XFS_TRANS_ABORT; + xfs_trans_cancel(tp, cancel_flags); + IRELE(ip); + + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + goto std_return; +} + +#ifdef DEBUG +/* + * Some counters to see if (and how often) we are hitting some deadlock + * prevention code paths. + */ + +int xfs_rm_locks; +int xfs_rm_lock_delays; +int xfs_rm_attempts; +#endif + +/* + * The following routine will lock the inodes associated with the + * directory and the named entry in the directory. The locks are + * acquired in increasing inode number. + * + * If the entry is "..", then only the directory is locked. The + * vnode ref count will still include that from the .. entry in + * this case. + * + * There is a deadlock we need to worry about. If the locked directory is + * in the AIL, it might be blocking up the log. The next inode we lock + * could be already locked by another thread waiting for log space (e.g + * a permanent log reservation with a long running transaction (see + * xfs_itruncate_finish)). To solve this, we must check if the directory + * is in the ail and use lock_nowait. If we can't lock, we need to + * drop the inode lock on the directory and try again. xfs_iunlock will + * potentially push the tail if we were holding up the log. + */ +STATIC int +xfs_lock_dir_and_entry( + xfs_inode_t *dp, + vname_t *dentry, + xfs_inode_t *ip) /* inode of entry 'name' */ +{ + int attempts; + xfs_ino_t e_inum; + xfs_inode_t *ips[2]; + xfs_log_item_t *lp; + +#ifdef DEBUG + xfs_rm_locks++; +#endif + attempts = 0; + +again: + xfs_ilock(dp, XFS_ILOCK_EXCL); + + e_inum = ip->i_ino; + + ITRACE(ip); + + /* + * We want to lock in increasing inum. Since we've already + * acquired the lock on the directory, we may need to release + * if if the inum of the entry turns out to be less. + */ + if (e_inum > dp->i_ino) { + /* + * We are already in the right order, so just + * lock on the inode of the entry. + * We need to use nowait if dp is in the AIL. + */ + + lp = (xfs_log_item_t *)dp->i_itemp; + if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { + if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { + attempts++; +#ifdef DEBUG + xfs_rm_attempts++; +#endif + + /* + * Unlock dp and try again. + * xfs_iunlock will try to push the tail + * if the inode is in the AIL. + */ + + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + if ((attempts % 5) == 0) { + delay(1); /* Don't just spin the CPU */ +#ifdef DEBUG + xfs_rm_lock_delays++; +#endif + } + goto again; + } + } else { + xfs_ilock(ip, XFS_ILOCK_EXCL); + } + } else if (e_inum < dp->i_ino) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + ips[0] = ip; + ips[1] = dp; + xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); + } + /* else e_inum == dp->i_ino */ + /* This can happen if we're asked to lock /x/.. + * the entry is "..", which is also the parent directory. + */ + + return 0; +} + +#ifdef DEBUG +int xfs_locked_n; +int xfs_small_retries; +int xfs_middle_retries; +int xfs_lots_retries; +int xfs_lock_delays; +#endif + +/* + * The following routine will lock n inodes in exclusive mode. + * We assume the caller calls us with the inodes in i_ino order. + * + * We need to detect deadlock where an inode that we lock + * is in the AIL and we start waiting for another inode that is locked + * by a thread in a long running transaction (such as truncate). This can + * result in deadlock since the long running trans might need to wait + * for the inode we just locked in order to push the tail and free space + * in the log. + */ +void +xfs_lock_inodes( + xfs_inode_t **ips, + int inodes, + int first_locked, + uint lock_mode) +{ + int attempts = 0, i, j, try_lock; + xfs_log_item_t *lp; + + ASSERT(ips && (inodes >= 2)); /* we need at least two */ + + if (first_locked) { + try_lock = 1; + i = 1; + } else { + try_lock = 0; + i = 0; + } + +again: + for (; i < inodes; i++) { + ASSERT(ips[i]); + + if (i && (ips[i] == ips[i-1])) /* Already locked */ + continue; + + /* + * If try_lock is not set yet, make sure all locked inodes + * are not in the AIL. + * If any are, set try_lock to be used later. + */ + + if (!try_lock) { + for (j = (i - 1); j >= 0 && !try_lock; j--) { + lp = (xfs_log_item_t *)ips[j]->i_itemp; + if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { + try_lock++; + } + } + } + + /* + * If any of the previous locks we have locked is in the AIL, + * we must TRY to get the second and subsequent locks. If + * we can't get any, we must release all we have + * and try again. + */ + + if (try_lock) { + /* try_lock must be 0 if i is 0. */ + /* + * try_lock means we have an inode locked + * that is in the AIL. + */ + ASSERT(i != 0); + if (!xfs_ilock_nowait(ips[i], lock_mode)) { + attempts++; + + /* + * Unlock all previous guys and try again. + * xfs_iunlock will try to push the tail + * if the inode is in the AIL. + */ + + for(j = i - 1; j >= 0; j--) { + + /* + * Check to see if we've already + * unlocked this one. + * Not the first one going back, + * and the inode ptr is the same. + */ + if ((j != (i - 1)) && ips[j] == + ips[j+1]) + continue; + + xfs_iunlock(ips[j], lock_mode); + } + + if ((attempts % 5) == 0) { + delay(1); /* Don't just spin the CPU */ +#ifdef DEBUG + xfs_lock_delays++; +#endif + } + i = 0; + try_lock = 0; + goto again; + } + } else { + xfs_ilock(ips[i], lock_mode); + } + } + +#ifdef DEBUG + if (attempts) { + if (attempts < 5) xfs_small_retries++; + else if (attempts < 100) xfs_middle_retries++; + else xfs_lots_retries++; + } else { + xfs_locked_n++; + } +#endif +} + +#ifdef DEBUG +#define REMOVE_DEBUG_TRACE(x) {remove_which_error_return = (x);} +int remove_which_error_return = 0; +#else /* ! DEBUG */ +#define REMOVE_DEBUG_TRACE(x) +#endif /* ! DEBUG */ + + +/* + * xfs_remove + * + */ +STATIC int +xfs_remove( + bhv_desc_t *dir_bdp, + vname_t *dentry, + cred_t *credp) +{ + vnode_t *dir_vp; + char *name = VNAME(dentry); + xfs_inode_t *dp, *ip; + xfs_trans_t *tp = NULL; + xfs_mount_t *mp; + int error = 0; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + int cancel_flags; + int committed; + int dm_di_mode = 0; + int link_zero; + uint resblks; + int namelen; + + dir_vp = BHV_TO_VNODE(dir_bdp); + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + dp = XFS_BHVTOI(dir_bdp); + mp = dp->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + namelen = VNAMELEN(dentry); + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_bdp, + DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, + name, NULL, 0, 0, 0); + if (error) + return error; + } + + /* From this point on, return through std_return */ + ip = NULL; + + /* + * We need to get a reference to ip before we get our log + * reservation. The reason for this is that we cannot call + * xfs_iget for an inode for which we do not have a reference + * once we've acquired a log reservation. This is because the + * inode we are trying to get might be in xfs_inactive going + * for a log reservation. Since we'll have to wait for the + * inactive code to complete before returning from xfs_iget, + * we need to make sure that we don't have log space reserved + * when we call xfs_iget. Instead we get an unlocked referece + * to the inode before getting our log reservation. + */ + error = xfs_get_dir_entry(dentry, &ip); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + goto std_return; + } + + dm_di_mode = ip->i_d.di_mode; + + vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); + + ITRACE(ip); + + error = XFS_QM_DQATTACH(mp, dp, 0); + if (!error && dp != ip) + error = XFS_QM_DQATTACH(mp, ip, 0); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + IRELE(ip); + goto std_return; + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + /* + * We try to get the real space reservation first, + * allowing for directory btree deletion(s) implying + * possible bmap insert(s). If we can't get the space + * reservation then we use 0 instead, and avoid the bmap + * btree insert(s) in the directory code by, if the bmap + * insert tries to happen, instead trimming the LAST + * block from the directory. + */ + resblks = XFS_REMOVE_SPACE_RES(mp); + error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); + } + if (error) { + ASSERT(error != ENOSPC); + REMOVE_DEBUG_TRACE(__LINE__); + xfs_trans_cancel(tp, 0); + IRELE(ip); + return error; + } + + error = xfs_lock_dir_and_entry(dp, dentry, ip); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + xfs_trans_cancel(tp, cancel_flags); + IRELE(ip); + goto std_return; + } + + /* + * At this point, we've gotten both the directory and the entry + * inodes locked. + */ + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + if (dp != ip) { + /* + * Increment vnode ref count only in this case since + * there's an extra vnode reference in the case where + * dp == ip. + */ + IHOLD(dp); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + } + + if ((error = _MAC_XFS_IACCESS(ip, MACWRITE, credp))) { + REMOVE_DEBUG_TRACE(__LINE__); + goto error_return; + } + + /* + * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. + */ + XFS_BMAP_INIT(&free_list, &first_block); + error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, ip->i_ino, + &first_block, &free_list, 0); + if (error) { + ASSERT(error != ENOENT); + REMOVE_DEBUG_TRACE(__LINE__); + goto error1; + } + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + dp->i_gen++; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + + error = xfs_droplink(tp, ip); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + goto error1; + } + + /* Determine if this is the last link while + * we are in the transaction. + */ + link_zero = (ip)->i_d.di_nlink==0; + + /* + * Take an extra ref on the inode so that it doesn't + * go to xfs_inactive() from within the commit. + */ + IHOLD(ip); + + /* + * If this is a synchronous mount, make sure that the + * remove transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + goto error_rele; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + IRELE(ip); + goto std_return; + } + + /* + * Before we drop our extra reference to the inode, purge it + * from the refcache if it is there. By waiting until afterwards + * to do the IRELE, we ensure that we won't go inactive in the + * xfs_refcache_purge_ip routine (although that would be OK). + */ + xfs_refcache_purge_ip(ip); + + vn_trace_exit(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); + + /* + * Let interposed file systems know about removed links. + */ + VOP_LINK_REMOVED(XFS_ITOV(ip), dir_vp, link_zero); + + IRELE(ip); + +/* Fall through to std_return with error = 0 */ + std_return: + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, + DM_EVENT_POSTREMOVE)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, + dir_bdp, DM_RIGHT_NULL, + NULL, DM_RIGHT_NULL, + name, NULL, dm_di_mode, error, 0); + } + return error; + + error1: + xfs_bmap_cancel(&free_list); + cancel_flags |= XFS_TRANS_ABORT; + + error_return: + xfs_trans_cancel(tp, cancel_flags); + goto std_return; + + error_rele: + /* + * In this case make sure to not release the inode until after + * the current transaction is aborted. Releasing it beforehand + * can cause us to go to xfs_inactive and start a recursive + * transaction which can easily deadlock with the current one. + */ + xfs_bmap_cancel(&free_list); + cancel_flags |= XFS_TRANS_ABORT; + xfs_trans_cancel(tp, cancel_flags); + + /* + * Before we drop our extra reference to the inode, purge it + * from the refcache if it is there. By waiting until afterwards + * to do the IRELE, we ensure that we won't go inactive in the + * xfs_refcache_purge_ip routine (although that would be OK). + */ + xfs_refcache_purge_ip(ip); + + IRELE(ip); + + goto std_return; +} + + +/* + * xfs_link + * + */ +STATIC int +xfs_link( + bhv_desc_t *target_dir_bdp, + vnode_t *src_vp, + vname_t *dentry, + cred_t *credp) +{ + xfs_inode_t *tdp, *sip; + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_inode_t *ips[2]; + int error; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + int cancel_flags; + int committed; + vnode_t *target_dir_vp; + bhv_desc_t *src_bdp; + int resblks; + char *target_name = VNAME(dentry); + int target_namelen; + + target_dir_vp = BHV_TO_VNODE(target_dir_bdp); + vn_trace_entry(target_dir_vp, __FUNCTION__, (inst_t *)__return_address); + vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address); + + target_namelen = VNAMELEN(dentry); + if (src_vp->v_type == VDIR) + return XFS_ERROR(EPERM); + + /* + * For now, manually find the XFS behavior descriptor for + * the source vnode. If it doesn't exist then something + * is wrong and we should just return an error. + * Eventually we need to figure out how link is going to + * work in the face of stacked vnodes. + */ + src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops); + if (src_bdp == NULL) { + return XFS_ERROR(EXDEV); + } + sip = XFS_BHVTOI(src_bdp); + tdp = XFS_BHVTOI(target_dir_bdp); + mp = tdp->i_mount; + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + if (DM_EVENT_ENABLED(src_vp->v_vfsp, tdp, DM_EVENT_LINK)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK, + target_dir_bdp, DM_RIGHT_NULL, + src_bdp, DM_RIGHT_NULL, + target_name, NULL, 0, 0, 0); + if (error) + return error; + } + + /* Return through std_return after this point. */ + + error = XFS_QM_DQATTACH(mp, sip, 0); + if (!error && sip != tdp) + error = XFS_QM_DQATTACH(mp, tdp, 0); + if (error) + goto std_return; + + tp = xfs_trans_alloc(mp, XFS_TRANS_LINK); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + resblks = XFS_LINK_SPACE_RES(mp, target_namelen); + error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_LINK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); + } + if (error) { + cancel_flags = 0; + goto error_return; + } + + if (sip->i_ino < tdp->i_ino) { + ips[0] = sip; + ips[1] = tdp; + } else { + ips[0] = tdp; + ips[1] = sip; + } + + xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); + + /* + * Increment vnode ref counts since xfs_trans_commit & + * xfs_trans_cancel will both unlock the inodes and + * decrement the associated ref counts. + */ + VN_HOLD(src_vp); + VN_HOLD(target_dir_vp); + xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); + + /* + * If the source has too many links, we can't make any more to it. + */ + if (sip->i_d.di_nlink >= XFS_MAXLINK) { + error = XFS_ERROR(EMLINK); + goto error_return; + } + + if (resblks == 0 && + (error = XFS_DIR_CANENTER(mp, tp, tdp, target_name, + target_namelen))) + goto error_return; + + XFS_BMAP_INIT(&free_list, &first_block); + + error = XFS_DIR_CREATENAME(mp, tp, tdp, target_name, target_namelen, + sip->i_ino, &first_block, &free_list, + resblks); + if (error) + goto abort_return; + xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + tdp->i_gen++; + xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); + + error = xfs_bumplink(tp, sip); + if (error) { + goto abort_return; + } + + /* + * If this is a synchronous mount, make sure that the + * link transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_bmap_finish (&tp, &free_list, first_block, &committed); + if (error) { + xfs_bmap_cancel(&free_list); + goto abort_return; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + goto std_return; + } + + /* Fall through to std_return with error = 0. */ +std_return: + if (DM_EVENT_ENABLED(src_vp->v_vfsp, sip, + DM_EVENT_POSTLINK)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK, + target_dir_bdp, DM_RIGHT_NULL, + src_bdp, DM_RIGHT_NULL, + target_name, NULL, 0, error, 0); + } + return error; + + abort_return: + cancel_flags |= XFS_TRANS_ABORT; + /* FALLTHROUGH */ + error_return: + xfs_trans_cancel(tp, cancel_flags); + + goto std_return; +} +/* + * xfs_mkdir + * + */ +STATIC int +xfs_mkdir( + bhv_desc_t *dir_bdp, + vname_t *dentry, + vattr_t *vap, + vnode_t **vpp, + cred_t *credp) +{ + char *dir_name = VNAME(dentry); + xfs_inode_t *dp; + xfs_inode_t *cdp; /* inode of created dir */ + vnode_t *cvp; /* vnode of created dir */ + xfs_trans_t *tp; + xfs_dev_t rdev; + xfs_mount_t *mp; + int cancel_flags; + int error; + int committed; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + vnode_t *dir_vp; + boolean_t dp_joined_to_trans; + boolean_t created = B_FALSE; + int dm_event_sent = 0; + xfs_prid_t prid; + struct xfs_dquot *udqp, *gdqp; + uint resblks; + int dm_di_mode; + int dir_namelen; + + dir_vp = BHV_TO_VNODE(dir_bdp); + dp = XFS_BHVTOI(dir_bdp); + mp = dp->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + dir_namelen = VNAMELEN(dentry); + + tp = NULL; + dp_joined_to_trans = B_FALSE; + dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, + dir_bdp, DM_RIGHT_NULL, NULL, + DM_RIGHT_NULL, dir_name, NULL, + dm_di_mode, 0, 0); + if (error) + return error; + dm_event_sent = 1; + } + + /* Return through std_return after this point. */ + + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + mp = dp->i_mount; + udqp = gdqp = NULL; + if (vap->va_mask & XFS_AT_PROJID) + prid = (xfs_prid_t)vap->va_projid; + else + prid = (xfs_prid_t)dfltprid; + + /* + * Make sure that we have allocated dquot(s) on disk. + */ + error = XFS_QM_DQVOPALLOC(mp, dp, current->fsuid, current->fsgid, + XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); + if (error) + goto std_return; + + tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + resblks = XFS_MKDIR_SPACE_RES(mp, dir_namelen); + error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_MKDIR_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_MKDIR_LOG_COUNT); + } + if (error) { + cancel_flags = 0; + dp = NULL; + goto error_return; + } + + xfs_ilock(dp, XFS_ILOCK_EXCL); + + /* + * Check for directory link count overflow. + */ + if (dp->i_d.di_nlink >= XFS_MAXLINK) { + error = XFS_ERROR(EMLINK); + goto error_return; + } + + /* + * Reserve disk quota and the inode. + */ + error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); + if (error) + goto error_return; + + if (resblks == 0 && + (error = XFS_DIR_CANENTER(mp, tp, dp, dir_name, dir_namelen))) + goto error_return; + /* + * create the directory inode. + */ + rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0; + error = xfs_dir_ialloc(&tp, dp, + MAKEIMODE(vap->va_type,vap->va_mode), 2, + rdev, credp, prid, resblks > 0, + &cdp, NULL); + if (error) { + if (error == ENOSPC) + goto error_return; + goto abort_return; + } + ITRACE(cdp); + + /* + * Now we add the directory inode to the transaction. + * We waited until now since xfs_dir_ialloc might start + * a new transaction. Had we joined the transaction + * earlier, the locks might have gotten released. + */ + VN_HOLD(dir_vp); + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + dp_joined_to_trans = B_TRUE; + + XFS_BMAP_INIT(&free_list, &first_block); + + error = XFS_DIR_CREATENAME(mp, tp, dp, dir_name, dir_namelen, + cdp->i_ino, &first_block, &free_list, + resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0); + if (error) { + ASSERT(error != ENOSPC); + goto error1; + } + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + /* + * Bump the in memory version number of the parent directory + * so that other processes accessing it will recognize that + * the directory has changed. + */ + dp->i_gen++; + + error = XFS_DIR_INIT(mp, tp, cdp, dp); + if (error) { + goto error2; + } + + cdp->i_gen = 1; + error = xfs_bumplink(tp, dp); + if (error) { + goto error2; + } + + cvp = XFS_ITOV(cdp); + + created = B_TRUE; + + *vpp = cvp; + IHOLD(cdp); + + /* + * Attach the dquots to the new inode and modify the icount incore. + */ + XFS_QM_DQVOPCREATE(mp, tp, cdp, udqp, gdqp); + + /* + * If this is a synchronous mount, make sure that the + * mkdir transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + IRELE(cdp); + goto error2; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + if (error) { + IRELE(cdp); + } + + /* Fall through to std_return with error = 0 or errno from + * xfs_trans_commit. */ + +std_return: + if ( (created || (error != 0 && dm_event_sent != 0)) && + DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), + DM_EVENT_POSTCREATE)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, + dir_bdp, DM_RIGHT_NULL, + created ? XFS_ITOBHV(cdp):NULL, + DM_RIGHT_NULL, + dir_name, NULL, + dm_di_mode, error, 0); + } + return error; + + error2: + error1: + xfs_bmap_cancel(&free_list); + abort_return: + cancel_flags |= XFS_TRANS_ABORT; + error_return: + xfs_trans_cancel(tp, cancel_flags); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + if (!dp_joined_to_trans && (dp != NULL)) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + } + + goto std_return; +} + + +/* + * xfs_rmdir + * + */ +STATIC int +xfs_rmdir( + bhv_desc_t *dir_bdp, + vname_t *dentry, + cred_t *credp) +{ + char *name = VNAME(dentry); + xfs_inode_t *dp; + xfs_inode_t *cdp; /* child directory */ + xfs_trans_t *tp; + xfs_mount_t *mp; + int error; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + int cancel_flags; + int committed; + vnode_t *dir_vp; + int dm_di_mode = 0; + int last_cdp_link; + int namelen; + uint resblks; + + dir_vp = BHV_TO_VNODE(dir_bdp); + dp = XFS_BHVTOI(dir_bdp); + mp = dp->i_mount; + + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + if (XFS_FORCED_SHUTDOWN(XFS_BHVTOI(dir_bdp)->i_mount)) + return XFS_ERROR(EIO); + namelen = VNAMELEN(dentry); + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, + dir_bdp, DM_RIGHT_NULL, + NULL, DM_RIGHT_NULL, + name, NULL, 0, 0, 0); + if (error) + return XFS_ERROR(error); + } + + /* Return through std_return after this point. */ + + cdp = NULL; + + /* + * We need to get a reference to cdp before we get our log + * reservation. The reason for this is that we cannot call + * xfs_iget for an inode for which we do not have a reference + * once we've acquired a log reservation. This is because the + * inode we are trying to get might be in xfs_inactive going + * for a log reservation. Since we'll have to wait for the + * inactive code to complete before returning from xfs_iget, + * we need to make sure that we don't have log space reserved + * when we call xfs_iget. Instead we get an unlocked referece + * to the inode before getting our log reservation. + */ + error = xfs_get_dir_entry(dentry, &cdp); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + goto std_return; + } + mp = dp->i_mount; + dm_di_mode = cdp->i_d.di_mode; + + /* + * Get the dquots for the inodes. + */ + error = XFS_QM_DQATTACH(mp, dp, 0); + if (!error && dp != cdp) + error = XFS_QM_DQATTACH(mp, cdp, 0); + if (error) { + IRELE(cdp); + REMOVE_DEBUG_TRACE(__LINE__); + goto std_return; + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + /* + * We try to get the real space reservation first, + * allowing for directory btree deletion(s) implying + * possible bmap insert(s). If we can't get the space + * reservation then we use 0 instead, and avoid the bmap + * btree insert(s) in the directory code by, if the bmap + * insert tries to happen, instead trimming the LAST + * block from the directory. + */ + resblks = XFS_REMOVE_SPACE_RES(mp); + error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT); + } + if (error) { + ASSERT(error != ENOSPC); + cancel_flags = 0; + IRELE(cdp); + goto error_return; + } + XFS_BMAP_INIT(&free_list, &first_block); + + /* + * Now lock the child directory inode and the parent directory + * inode in the proper order. This will take care of validating + * that the directory entry for the child directory inode has + * not changed while we were obtaining a log reservation. + */ + error = xfs_lock_dir_and_entry(dp, dentry, cdp); + if (error) { + xfs_trans_cancel(tp, cancel_flags); + IRELE(cdp); + goto std_return; + } + + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + if (dp != cdp) { + /* + * Only increment the parent directory vnode count if + * we didn't bump it in looking up cdp. The only time + * we don't bump it is when we're looking up ".". + */ + VN_HOLD(dir_vp); + } + + ITRACE(cdp); + xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL); + + if ((error = _MAC_XFS_IACCESS(cdp, MACWRITE, credp))) { + goto error_return; + } + + ASSERT(cdp->i_d.di_nlink >= 2); + if (cdp->i_d.di_nlink != 2) { + error = XFS_ERROR(ENOTEMPTY); + goto error_return; + } + if (!XFS_DIR_ISEMPTY(mp, cdp)) { + error = XFS_ERROR(ENOTEMPTY); + goto error_return; + } + + error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, cdp->i_ino, + &first_block, &free_list, resblks); + if (error) { + goto error1; + } + + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + /* + * Bump the in memory generation count on the parent + * directory so that other can know that it has changed. + */ + dp->i_gen++; + + /* + * Drop the link from cdp's "..". + */ + error = xfs_droplink(tp, dp); + if (error) { + goto error1; + } + + /* + * Drop the link from dp to cdp. + */ + error = xfs_droplink(tp, cdp); + if (error) { + goto error1; + } + + /* + * Drop the "." link from cdp to self. + */ + error = xfs_droplink(tp, cdp); + if (error) { + goto error1; + } + + /* Determine these before committing transaction */ + last_cdp_link = (cdp)->i_d.di_nlink==0; + + /* + * Take an extra ref on the child vnode so that it + * does not go to xfs_inactive() from within the commit. + */ + IHOLD(cdp); + + /* + * If this is a synchronous mount, make sure that the + * rmdir transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_bmap_finish (&tp, &free_list, first_block, &committed); + if (error) { + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT)); + IRELE(cdp); + goto std_return; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + IRELE(cdp); + goto std_return; + } + + + /* + * Let interposed file systems know about removed links. + */ + VOP_LINK_REMOVED(XFS_ITOV(cdp), dir_vp, last_cdp_link); + + IRELE(cdp); + + /* Fall through to std_return with error = 0 or the errno + * from xfs_trans_commit. */ +std_return: + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, + dir_bdp, DM_RIGHT_NULL, + NULL, DM_RIGHT_NULL, + name, NULL, dm_di_mode, + error, 0); + } + return error; + + error1: + xfs_bmap_cancel(&free_list); + cancel_flags |= XFS_TRANS_ABORT; + error_return: + xfs_trans_cancel(tp, cancel_flags); + goto std_return; +} + + +/* + * xfs_readdir + * + * Read dp's entries starting at uiop->uio_offset and translate them into + * bufsize bytes worth of struct dirents starting at bufbase. + */ +STATIC int +xfs_readdir( + bhv_desc_t *dir_bdp, + uio_t *uiop, + cred_t *credp, + int *eofp) +{ + xfs_inode_t *dp; + xfs_trans_t *tp = NULL; + int error = 0; + uint lock_mode; + xfs_off_t start_offset; + + vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__, + (inst_t *)__return_address); + dp = XFS_BHVTOI(dir_bdp); + + if (XFS_FORCED_SHUTDOWN(dp->i_mount)) { + return XFS_ERROR(EIO); + } + + lock_mode = xfs_ilock_map_shared(dp); + start_offset = uiop->uio_offset; + error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp); + if (start_offset != uiop->uio_offset) { + xfs_ichgtime(dp, XFS_ICHGTIME_ACC); + } + xfs_iunlock_map_shared(dp, lock_mode); + return error; +} + + +/* + * xfs_symlink + * + */ +STATIC int +xfs_symlink( + bhv_desc_t *dir_bdp, + vname_t *dentry, + vattr_t *vap, + char *target_path, + vnode_t **vpp, + cred_t *credp) +{ + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_inode_t *dp; + xfs_inode_t *ip; + int error; + int pathlen; + xfs_dev_t rdev; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + boolean_t dp_joined_to_trans; + vnode_t *dir_vp; + uint cancel_flags; + int committed; + xfs_fileoff_t first_fsb; + xfs_filblks_t fs_blocks; + int nmaps; + xfs_bmbt_irec_t mval[SYMLINK_MAPS]; + xfs_daddr_t d; + char *cur_chunk; + int byte_cnt; + int n; + xfs_buf_t *bp; + xfs_prid_t prid; + struct xfs_dquot *udqp, *gdqp; + uint resblks; + char *link_name = VNAME(dentry); + int link_namelen; + + *vpp = NULL; + dir_vp = BHV_TO_VNODE(dir_bdp); + dp = XFS_BHVTOI(dir_bdp); + dp_joined_to_trans = B_FALSE; + error = 0; + ip = NULL; + tp = NULL; + + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + mp = dp->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + link_namelen = VNAMELEN(dentry); + + /* + * Check component lengths of the target path name. + */ + pathlen = strlen(target_path); + if (pathlen >= MAXPATHLEN) /* total string too long */ + return XFS_ERROR(ENAMETOOLONG); + if (pathlen >= MAXNAMELEN) { /* is any component too long? */ + int len, total; + char *path; + + for(total = 0, path = target_path; total < pathlen;) { + /* + * Skip any slashes. + */ + while(*path == '/') { + total++; + path++; + } + + /* + * Count up to the next slash or end of path. + * Error out if the component is bigger than MAXNAMELEN. + */ + for(len = 0; *path != '/' && total < pathlen;total++, path++) { + if (++len >= MAXNAMELEN) { + error = ENAMETOOLONG; + return error; + } + } + } + } + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_SYMLINK)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dir_bdp, + DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, + link_name, target_path, 0, 0, 0); + if (error) + return error; + } + + /* Return through std_return after this point. */ + + udqp = gdqp = NULL; + if (vap->va_mask & XFS_AT_PROJID) + prid = (xfs_prid_t)vap->va_projid; + else + prid = (xfs_prid_t)dfltprid; + + /* + * Make sure that we have allocated dquot(s) on disk. + */ + error = XFS_QM_DQVOPALLOC(mp, dp, current->fsuid, current->fsgid, + XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); + if (error) + goto std_return; + + tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + /* + * The symlink will fit into the inode data fork? + * There can't be any attributes so we get the whole variable part. + */ + if (pathlen <= XFS_LITINO(mp)) + fs_blocks = 0; + else + fs_blocks = XFS_B_TO_FSB(mp, pathlen); + resblks = XFS_SYMLINK_SPACE_RES(mp, link_namelen, fs_blocks); + error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); + if (error == ENOSPC && fs_blocks == 0) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_SYMLINK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); + } + if (error) { + cancel_flags = 0; + dp = NULL; + goto error_return; + } + + xfs_ilock(dp, XFS_ILOCK_EXCL); + + /* + * Reserve disk quota : blocks and inode. + */ + error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); + if (error) + goto error_return; + + /* + * Check for ability to enter directory entry, if no space reserved. + */ + if (resblks == 0 && + (error = XFS_DIR_CANENTER(mp, tp, dp, link_name, link_namelen))) + goto error_return; + /* + * Initialize the bmap freelist prior to calling either + * bmapi or the directory create code. + */ + XFS_BMAP_INIT(&free_list, &first_block); + + /* + * Allocate an inode for the symlink. + */ + rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0; + + error = xfs_dir_ialloc(&tp, dp, IFLNK | (vap->va_mode&~IFMT), + 1, rdev, credp, prid, resblks > 0, &ip, NULL); + if (error) { + if (error == ENOSPC) + goto error_return; + goto error1; + } + ITRACE(ip); + + VN_HOLD(dir_vp); + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + dp_joined_to_trans = B_TRUE; + + /* + * Also attach the dquot(s) to it, if applicable. + */ + XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); + + if (resblks) + resblks -= XFS_IALLOC_SPACE_RES(mp); + /* + * If the symlink will fit into the inode, write it inline. + */ + if (pathlen <= XFS_IFORK_DSIZE(ip)) { + xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK); + memcpy(ip->i_df.if_u1.if_data, target_path, pathlen); + ip->i_d.di_size = pathlen; + + /* + * The inode was initially created in extent format. + */ + ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT); + ip->i_df.if_flags |= XFS_IFINLINE; + + ip->i_d.di_format = XFS_DINODE_FMT_LOCAL; + xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE); + + } else { + first_fsb = 0; + nmaps = SYMLINK_MAPS; + + error = xfs_bmapi(tp, ip, first_fsb, fs_blocks, + XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, + &first_block, resblks, mval, &nmaps, + &free_list); + if (error) { + goto error1; + } + + if (resblks) + resblks -= fs_blocks; + ip->i_d.di_size = pathlen; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + cur_chunk = target_path; + for (n = 0; n < nmaps; n++) { + d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); + byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, + BTOBB(byte_cnt), 0); + ASSERT(bp && !XFS_BUF_GETERROR(bp)); + if (pathlen < byte_cnt) { + byte_cnt = pathlen; + } + pathlen -= byte_cnt; + + memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt); + cur_chunk += byte_cnt; + + xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1); + } + } + + /* + * Create the directory entry for the symlink. + */ + error = XFS_DIR_CREATENAME(mp, tp, dp, link_name, link_namelen, + ip->i_ino, &first_block, &free_list, resblks); + if (error) { + goto error1; + } + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + + /* + * Bump the in memory version number of the parent directory + * so that other processes accessing it will recognize that + * the directory has changed. + */ + dp->i_gen++; + + /* + * If this is a synchronous mount, make sure that the + * symlink transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + /* + * xfs_trans_commit normally decrements the vnode ref count + * when it unlocks the inode. Since we want to return the + * vnode to the caller, we bump the vnode ref count now. + */ + IHOLD(ip); + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + goto error2; + } + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + /* Fall through to std_return with error = 0 or errno from + * xfs_trans_commit */ +std_return: + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), + DM_EVENT_POSTSYMLINK)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK, + dir_bdp, DM_RIGHT_NULL, + error ? NULL : XFS_ITOBHV(ip), + DM_RIGHT_NULL, link_name, target_path, + 0, error, 0); + } + + if (!error) { + vnode_t *vp; + + ASSERT(ip); + vp = XFS_ITOV(ip); + *vpp = vp; + } + return error; + + error2: + IRELE(ip); + error1: + xfs_bmap_cancel(&free_list); + cancel_flags |= XFS_TRANS_ABORT; + error_return: + xfs_trans_cancel(tp, cancel_flags); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + if (!dp_joined_to_trans && (dp != NULL)) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + } + + goto std_return; +} + + +/* + * xfs_fid2 + * + * A fid routine that takes a pointer to a previously allocated + * fid structure (like xfs_fast_fid) but uses a 64 bit inode number. + */ +STATIC int +xfs_fid2( + bhv_desc_t *bdp, + fid_t *fidp) +{ + xfs_inode_t *ip; + xfs_fid2_t *xfid; + + vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__, + (inst_t *)__return_address); + ASSERT(sizeof(fid_t) >= sizeof(xfs_fid2_t)); + + xfid = (xfs_fid2_t *)fidp; + ip = XFS_BHVTOI(bdp); + xfid->fid_len = sizeof(xfs_fid2_t) - sizeof(xfid->fid_len); + xfid->fid_pad = 0; + /* + * use memcpy because the inode is a long long and there's no + * assurance that xfid->fid_ino is properly aligned. + */ + memcpy(&xfid->fid_ino, &ip->i_ino, sizeof(xfid->fid_ino)); + xfid->fid_gen = ip->i_d.di_gen; + + return 0; +} + + +/* + * xfs_rwlock + */ +int +xfs_rwlock( + bhv_desc_t *bdp, + vrwlock_t locktype) +{ + xfs_inode_t *ip; + vnode_t *vp; + + vp = BHV_TO_VNODE(bdp); + if (vp->v_type == VDIR) + return 1; + ip = XFS_BHVTOI(bdp); + if (locktype == VRWLOCK_WRITE) { + xfs_ilock(ip, XFS_IOLOCK_EXCL); + } else if (locktype == VRWLOCK_TRY_READ) { + return (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)); + } else if (locktype == VRWLOCK_TRY_WRITE) { + return (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)); + } else { + ASSERT((locktype == VRWLOCK_READ) || + (locktype == VRWLOCK_WRITE_DIRECT)); + xfs_ilock(ip, XFS_IOLOCK_SHARED); + } + + return 1; +} + + +/* + * xfs_rwunlock + */ +void +xfs_rwunlock( + bhv_desc_t *bdp, + vrwlock_t locktype) +{ + xfs_inode_t *ip; + xfs_inode_t *release_ip; + vnode_t *vp; + int error; + + vp = BHV_TO_VNODE(bdp); + if (vp->v_type == VDIR) + return; + ip = XFS_BHVTOI(bdp); + if (locktype == VRWLOCK_WRITE) { + /* + * In the write case, we may have added a new entry to + * the reference cache. This might store a pointer to + * an inode to be released in this inode. If it is there, + * clear the pointer and release the inode after unlocking + * this one. + */ + release_ip = ip->i_release; + ip->i_release = NULL; + xfs_iunlock (ip, XFS_IOLOCK_EXCL); + + if (release_ip != NULL) { + VOP_RELEASE(XFS_ITOV(release_ip), error); + VN_RELE(XFS_ITOV(release_ip)); + } + } else { + ASSERT((locktype == VRWLOCK_READ) || + (locktype == VRWLOCK_WRITE_DIRECT)); + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + } + return; +} + +STATIC int +xfs_inode_flush( + bhv_desc_t *bdp, + int flags) +{ + xfs_inode_t *ip; + xfs_dinode_t *dip; + xfs_mount_t *mp; + xfs_buf_t *bp; + int error = 0; + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + /* Bypass inodes which have already been cleaned by + * the inode flush clustering code inside xfs_iflush + */ + if ((ip->i_update_core == 0) && + ((ip->i_itemp == NULL) || + !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL))) + return 0; + + if (flags & FLUSH_LOG) { + xfs_inode_log_item_t *iip = ip->i_itemp; + + if (iip && iip->ili_last_lsn) { + xlog_t *log = mp->m_log; + xfs_lsn_t sync_lsn; + int s, log_flags = XFS_LOG_FORCE; + + s = GRANT_LOCK(log); + sync_lsn = log->l_last_sync_lsn; + GRANT_UNLOCK(log, s); + + if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0)) + return 0; + + if (flags & FLUSH_SYNC) + log_flags |= XFS_LOG_SYNC; + return xfs_log_force(mp, iip->ili_last_lsn, + log_flags); + } + } + + /* We make this non-blocking if the inode is contended, + * return EAGAIN to indicate to the caller that they + * did not succeed. This prevents the flush path from + * blocking on inodes inside another operation right + * now, they get caught later by xfs_sync. + */ + if (flags & FLUSH_INODE) { + if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { + if ((xfs_ipincount(ip) == 0) && xfs_iflock_nowait(ip)) { + int flush_flags; + +#if 0 + /* not turning this on until some + * performance analysis is done + */ + if (flags & FLUSH_SYNC) + flush_flags = XFS_IFLUSH_SYNC; + else +#endif + flush_flags = XFS_IFLUSH_DELWRI_ELSE_ASYNC; + + xfs_ifunlock(ip); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); + if (error) + return error; + xfs_buf_relse(bp); + + if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED) == 0) + return EAGAIN; + + if (xfs_ipincount(ip) || + !xfs_iflock_nowait(ip)) { + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return EAGAIN; + } + + error = xfs_iflush(ip, flush_flags); + } else { + error = EAGAIN; + } + xfs_iunlock(ip, XFS_ILOCK_SHARED); + } else { + error = EAGAIN; + } + } + + return error; +} + + +int +xfs_set_dmattrs ( + bhv_desc_t *bdp, + u_int evmask, + u_int16_t state, + cred_t *credp) +{ + xfs_inode_t *ip; + xfs_trans_t *tp; + xfs_mount_t *mp; + int error; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS); + error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + + ip->i_iocore.io_dmevmask = ip->i_d.di_dmevmask = evmask; + ip->i_iocore.io_dmstate = ip->i_d.di_dmstate = state; + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + IHOLD(ip); + error = xfs_trans_commit(tp, 0, NULL); + + return error; +} + + +/* + * xfs_reclaim + */ +STATIC int +xfs_reclaim( + bhv_desc_t *bdp) +{ + xfs_inode_t *ip; + vnode_t *vp; + + vp = BHV_TO_VNODE(bdp); + + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ASSERT(!VN_MAPPED(vp)); + ip = XFS_BHVTOI(bdp); + + if ((ip->i_d.di_mode & IFMT) == IFREG) { + if (ip->i_d.di_size > 0) { + /* + * Flush and invalidate any data left around that is + * a part of this file. + * + * Get the inode's i/o lock so that buffers are pushed + * out while holding the proper lock. We can't hold + * the inode lock here since flushing out buffers may + * cause us to try to get the lock in xfs_strategy(). + * + * We don't have to call remapf() here, because there + * cannot be any mapped file references to this vnode + * since it is being reclaimed. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + /* + * If we hit an IO error, we need to make sure that the + * buffer and page caches of file data for + * the file are tossed away. We don't want to use + * VOP_FLUSHINVAL_PAGES here because we don't want dirty + * pages to stay attached to the vnode, but be + * marked P_BAD. pdflush/vnode_pagebad + * hates that. + */ + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_NONE); + } else { + VOP_TOSS_PAGES(vp, 0, -1, FI_NONE); + } + + ASSERT(VN_CACHED(vp) == 0); + ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || + ip->i_delayed_blks == 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + } else if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { + /* + * di_size field may not be quite accurate if we're + * shutting down. + */ + VOP_TOSS_PAGES(vp, 0, -1, FI_NONE); + ASSERT(VN_CACHED(vp) == 0); + } + } + + /* If we have nothing to flush with this inode then complete the + * teardown now, otherwise break the link between the xfs inode + * and the linux inode and clean up the xfs inode later. This + * avoids flushing the inode to disk during the delete operation + * itself. + */ + if (!ip->i_update_core && (ip->i_itemp == NULL)) { + xfs_ilock(ip, XFS_ILOCK_EXCL); + return xfs_finish_reclaim(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC); + } else { + xfs_mount_t *mp = ip->i_mount; + + /* Protect sync from us */ + XFS_MOUNT_ILOCK(mp); + vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); + list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); + + XFS_MOUNT_IUNLOCK(mp); + } + return 0; +} + +int +xfs_finish_reclaim( + xfs_inode_t *ip, + int locked, + int sync_mode) +{ + xfs_ihash_t *ih = ip->i_hash; + int error; + + if (!locked) + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* The hash lock here protects a thread in xfs_iget_core from + * racing with us on linking the inode back with a vnode. + * Once we have the XFS_IRECLAIM flag set it will not touch + * us. + */ + write_lock(&ih->ih_lock); + if (ip->i_flags & XFS_IRECLAIM || (!locked && XFS_ITOV_NULL(ip))) { + write_unlock(&ih->ih_lock); + if (!locked) + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return(1); + } + ip->i_flags |= XFS_IRECLAIM; + write_unlock(&ih->ih_lock); + + /* + * If the inode is still dirty, then flush it out. If the inode + * is not in the AIL, then it will be OK to flush it delwri as + * long as xfs_iflush() does not keep any references to the inode. + * We leave that decision up to xfs_iflush() since it has the + * knowledge of whether it's OK to simply do a delwri flush of + * the inode or whether we need to wait until the inode is + * pulled from the AIL. + * We get the flush lock regardless, though, just to make sure + * we don't free it while it is being flushed. + */ + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + if (!locked) { + xfs_iflock(ip); + } + + if (ip->i_update_core || + ((ip->i_itemp != NULL) && + (ip->i_itemp->ili_format.ilf_fields != 0))) { + error = xfs_iflush(ip, sync_mode); + /* + * If we hit an error, typically because of filesystem + * shutdown, we don't need to let vn_reclaim to know + * because we're gonna reclaim the inode anyway. + */ + if (error) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_ireclaim(ip); + return (0); + } + xfs_iflock(ip); /* synchronize with xfs_iflush_done */ + } + + ASSERT(ip->i_update_core == 0); + ASSERT(ip->i_itemp == NULL || + ip->i_itemp->ili_format.ilf_fields == 0); + } + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + xfs_ireclaim(ip); + return 0; +} + +int +xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock) +{ + int purged; + struct list_head *curr, *next; + xfs_inode_t *ip; + int done = 0; + + while (!done) { + purged = 0; + XFS_MOUNT_ILOCK(mp); + list_for_each_safe(curr, next, &mp->m_del_inodes) { + ip = list_entry(curr, xfs_inode_t, i_reclaim); + if (noblock) { + if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) + continue; + if (xfs_ipincount(ip) || + !xfs_iflock_nowait(ip)) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + continue; + } + } + XFS_MOUNT_IUNLOCK(mp); + xfs_finish_reclaim(ip, noblock, + XFS_IFLUSH_DELWRI_ELSE_ASYNC); + purged = 1; + break; + } + + done = !purged; + } + + XFS_MOUNT_IUNLOCK(mp); + return 0; +} + +/* + * xfs_alloc_file_space() + * This routine allocates disk space for the given file. + * + * If alloc_type == 0, this request is for an ALLOCSP type + * request which will change the file size. In this case, no + * DMAPI event will be generated by the call. A TRUNCATE event + * will be generated later by xfs_setattr. + * + * If alloc_type != 0, this request is for a RESVSP type + * request, and a DMAPI DM_EVENT_WRITE will be generated if the + * lower block boundary byte address is less than the file's + * length. + * + * RETURNS: + * 0 on success + * errno on error + * + */ +int +xfs_alloc_file_space( + xfs_inode_t *ip, + xfs_off_t offset, + xfs_off_t len, + int alloc_type, + int attr_flags) +{ + xfs_filblks_t allocated_fsb; + xfs_filblks_t allocatesize_fsb; + int committed; + xfs_off_t count; + xfs_filblks_t datablocks; + int error; + xfs_fsblock_t firstfsb; + xfs_bmap_free_t free_list; + xfs_bmbt_irec_t *imapp; + xfs_bmbt_irec_t imaps[1]; + xfs_mount_t *mp; + int numrtextents; + int reccount; + uint resblks; + int rt; + int rtextsize; + xfs_fileoff_t startoffset_fsb; + xfs_trans_t *tp; + int xfs_bmapi_flags; + + vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + /* + * determine if this is a realtime file + */ + if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) { + if (ip->i_d.di_extsize) + rtextsize = ip->i_d.di_extsize; + else + rtextsize = mp->m_sb.sb_rextsize; + } else + rtextsize = 0; + + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return error; + + if (len <= 0) + return XFS_ERROR(EINVAL); + + count = len; + error = 0; + imapp = &imaps[0]; + reccount = 1; + xfs_bmapi_flags = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); + startoffset_fsb = XFS_B_TO_FSBT(mp, offset); + allocatesize_fsb = XFS_B_TO_FSB(mp, count); + + /* Generate a DMAPI event if needed. */ + if (alloc_type != 0 && offset < ip->i_d.di_size && + (attr_flags&ATTR_DMI) == 0 && + DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) { + xfs_off_t end_dmi_offset; + + end_dmi_offset = offset+len; + if (end_dmi_offset > ip->i_d.di_size) + end_dmi_offset = ip->i_d.di_size; + error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOBHV(ip), + offset, end_dmi_offset - offset, + 0, NULL); + if (error) + return(error); + } + + /* + * allocate file space until done or until there is an error + */ +retry: + while (allocatesize_fsb && !error) { + /* + * determine if reserving space on + * the data or realtime partition. + */ + if (rt) { + xfs_fileoff_t s, e; + + s = startoffset_fsb; + do_div(s, rtextsize); + s *= rtextsize; + e = roundup_64(startoffset_fsb + allocatesize_fsb, + rtextsize); + numrtextents = (int)(e - s) / mp->m_sb.sb_rextsize; + datablocks = 0; + } else { + datablocks = allocatesize_fsb; + numrtextents = 0; + } + + /* + * allocate and setup the transaction + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); + resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks); + error = xfs_trans_reserve(tp, + resblks, + XFS_WRITE_LOG_RES(mp), + numrtextents, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + + /* + * check for running out of space + */ + if (error) { + /* + * Free the transaction structure. + */ + ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + break; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, + ip->i_udquot, ip->i_gdquot, resblks, 0, rt ? + XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); + if (error) + goto error1; + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + /* + * issue the bmapi() call to allocate the blocks + */ + XFS_BMAP_INIT(&free_list, &firstfsb); + error = xfs_bmapi(tp, ip, startoffset_fsb, + allocatesize_fsb, xfs_bmapi_flags, + &firstfsb, 0, imapp, &reccount, + &free_list); + if (error) { + goto error0; + } + + /* + * complete the transaction + */ + error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); + if (error) { + goto error0; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + if (error) { + break; + } + + allocated_fsb = imapp->br_blockcount; + + if (reccount == 0) { + error = XFS_ERROR(ENOSPC); + break; + } + + startoffset_fsb += allocated_fsb; + allocatesize_fsb -= allocated_fsb; + } +dmapi_enospc_check: + if (error == ENOSPC && (attr_flags&ATTR_DMI) == 0 && + DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_NOSPACE)) { + + error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE, + XFS_ITOBHV(ip), DM_RIGHT_NULL, + XFS_ITOBHV(ip), DM_RIGHT_NULL, + NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ + if (error == 0) + goto retry; /* Maybe DMAPI app. has made space */ + /* else fall through with error from XFS_SEND_DATA */ + } + + return error; + + error0: + xfs_bmap_cancel(&free_list); + error1: + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + goto dmapi_enospc_check; +} + +/* + * Zero file bytes between startoff and endoff inclusive. + * The iolock is held exclusive and no blocks are buffered. + */ +STATIC int +xfs_zero_remaining_bytes( + xfs_inode_t *ip, + xfs_off_t startoff, + xfs_off_t endoff) +{ + xfs_buf_t *bp; + int error=0; + xfs_bmbt_irec_t imap; + xfs_off_t lastoffset; + xfs_mount_t *mp; + int nimap; + xfs_off_t offset; + xfs_fileoff_t offset_fsb; + + mp = ip->i_mount; + bp = XFS_ngetrbuf(mp->m_sb.sb_blocksize,mp); + ASSERT(!XFS_BUF_GETERROR(bp)); + + if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) { + XFS_BUF_SET_TARGET(bp, mp->m_rtdev_targp); + } else { + XFS_BUF_SET_TARGET(bp, mp->m_ddev_targp); + } + + for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { + offset_fsb = XFS_B_TO_FSBT(mp, offset); + nimap = 1; + error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, NULL, 0, &imap, + &nimap, NULL); + if (error || nimap < 1) + break; + ASSERT(imap.br_blockcount >= 1); + ASSERT(imap.br_startoff == offset_fsb); + lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1; + if (lastoffset > endoff) + lastoffset = endoff; + if (imap.br_startblock == HOLESTARTBLOCK) + continue; + ASSERT(imap.br_startblock != DELAYSTARTBLOCK); + if (imap.br_state == XFS_EXT_UNWRITTEN) + continue; + XFS_BUF_UNDONE(bp); + XFS_BUF_UNWRITE(bp); + XFS_BUF_READ(bp); + XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock)); + xfsbdstrat(mp, bp); + if ((error = xfs_iowait(bp))) { + xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", + mp, bp, XFS_BUF_ADDR(bp)); + break; + } + memset(XFS_BUF_PTR(bp) + + (offset - XFS_FSB_TO_B(mp, imap.br_startoff)), + 0, lastoffset - offset + 1); + XFS_BUF_UNDONE(bp); + XFS_BUF_UNREAD(bp); + XFS_BUF_WRITE(bp); + xfsbdstrat(mp, bp); + if ((error = xfs_iowait(bp))) { + xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", + mp, bp, XFS_BUF_ADDR(bp)); + break; + } + } + XFS_nfreerbuf(bp); + return error; +} + +/* + * xfs_free_file_space() + * This routine frees disk space for the given file. + * + * This routine is only called by xfs_change_file_space + * for an UNRESVSP type call. + * + * RETURNS: + * 0 on success + * errno on error + * + */ +STATIC int +xfs_free_file_space( + xfs_inode_t *ip, + xfs_off_t offset, + xfs_off_t len, + int attr_flags) +{ + int committed; + int done; + xfs_off_t end_dmi_offset; + xfs_fileoff_t endoffset_fsb; + int error; + xfs_fsblock_t firstfsb; + xfs_bmap_free_t free_list; + xfs_off_t ilen; + xfs_bmbt_irec_t imap; + xfs_off_t ioffset; + xfs_extlen_t mod=0; + xfs_mount_t *mp; + int nimap; + uint resblks; + int rounding; + int rt; + xfs_fileoff_t startoffset_fsb; + xfs_trans_t *tp; + + vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); + mp = ip->i_mount; + + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return error; + + error = 0; + if (len <= 0) /* if nothing being freed */ + return error; + rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME); + startoffset_fsb = XFS_B_TO_FSB(mp, offset); + end_dmi_offset = offset + len; + endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset); + + if (offset < ip->i_d.di_size && + (attr_flags & ATTR_DMI) == 0 && + DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) { + if (end_dmi_offset > ip->i_d.di_size) + end_dmi_offset = ip->i_d.di_size; + error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOBHV(ip), + offset, end_dmi_offset - offset, + AT_DELAY_FLAG(attr_flags), NULL); + if (error) + return(error); + } + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog), + (__uint8_t)NBPP); + ilen = len + (offset & (rounding - 1)); + ioffset = offset & ~(rounding - 1); + if (ilen & (rounding - 1)) + ilen = (ilen + rounding) & ~(rounding - 1); + xfs_inval_cached_pages(XFS_ITOV(ip), &(ip->i_iocore), ioffset, 0, 0); + /* + * Need to zero the stuff we're not freeing, on disk. + * If its a realtime file & can't use unwritten extents then we + * actually need to zero the extent edges. Otherwise xfs_bunmapi + * will take care of it for us. + */ + if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { + nimap = 1; + error = xfs_bmapi(NULL, ip, startoffset_fsb, 1, 0, NULL, 0, + &imap, &nimap, NULL); + if (error) + return error; + ASSERT(nimap == 0 || nimap == 1); + if (nimap && imap.br_startblock != HOLESTARTBLOCK) { + xfs_daddr_t block; + + ASSERT(imap.br_startblock != DELAYSTARTBLOCK); + block = imap.br_startblock; + mod = do_div(block, mp->m_sb.sb_rextsize); + if (mod) + startoffset_fsb += mp->m_sb.sb_rextsize - mod; + } + nimap = 1; + error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, 1, 0, NULL, 0, + &imap, &nimap, NULL); + if (error) + return error; + ASSERT(nimap == 0 || nimap == 1); + if (nimap && imap.br_startblock != HOLESTARTBLOCK) { + ASSERT(imap.br_startblock != DELAYSTARTBLOCK); + mod++; + if (mod && (mod != mp->m_sb.sb_rextsize)) + endoffset_fsb -= mod; + } + } + if ((done = (endoffset_fsb <= startoffset_fsb))) + /* + * One contiguous piece to clear + */ + error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1); + else { + /* + * Some full blocks, possibly two pieces to clear + */ + if (offset < XFS_FSB_TO_B(mp, startoffset_fsb)) + error = xfs_zero_remaining_bytes(ip, offset, + XFS_FSB_TO_B(mp, startoffset_fsb) - 1); + if (!error && + XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len) + error = xfs_zero_remaining_bytes(ip, + XFS_FSB_TO_B(mp, endoffset_fsb), + offset + len - 1); + } + + /* + * free file space until done or until there is an error + */ + resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); + while (!error && !done) { + + /* + * allocate and setup the transaction + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); + error = xfs_trans_reserve(tp, + resblks, + XFS_WRITE_LOG_RES(mp), + 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + + /* + * check for running out of space + */ + if (error) { + /* + * Free the transaction structure. + */ + ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + break; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = XFS_TRANS_RESERVE_QUOTA(mp, tp, + ip->i_udquot, ip->i_gdquot, resblks, 0, rt ? + XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); + if (error) + goto error1; + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + /* + * issue the bunmapi() call to free the blocks + */ + XFS_BMAP_INIT(&free_list, &firstfsb); + error = xfs_bunmapi(tp, ip, startoffset_fsb, + endoffset_fsb - startoffset_fsb, + 0, 2, &firstfsb, &free_list, &done); + if (error) { + goto error0; + } + + /* + * complete the transaction + */ + error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); + if (error) { + goto error0; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } + + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; + + error0: + xfs_bmap_cancel(&free_list); + error1: + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); + xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + return error; +} + +/* + * xfs_change_file_space() + * This routine allocates or frees disk space for the given file. + * The user specified parameters are checked for alignment and size + * limitations. + * + * RETURNS: + * 0 on success + * errno on error + * + */ +int +xfs_change_file_space( + bhv_desc_t *bdp, + int cmd, + xfs_flock64_t *bf, + xfs_off_t offset, + cred_t *credp, + int attr_flags) +{ + int clrprealloc; + int error; + xfs_fsize_t fsize; + xfs_inode_t *ip; + xfs_mount_t *mp; + int setprealloc; + xfs_off_t startoffset; + xfs_off_t llen; + xfs_trans_t *tp; + vattr_t va; + vnode_t *vp; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + /* + * must be a regular file and have write permission + */ + if (vp->v_type != VREG) + return XFS_ERROR(EINVAL); + + xfs_ilock(ip, XFS_ILOCK_SHARED); + + if ((error = xfs_iaccess(ip, IWRITE, credp))) { + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return error; + } + + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + switch (bf->l_whence) { + case 0: /*SEEK_SET*/ + break; + case 1: /*SEEK_CUR*/ + bf->l_start += offset; + break; + case 2: /*SEEK_END*/ + bf->l_start += ip->i_d.di_size; + break; + default: + return XFS_ERROR(EINVAL); + } + + llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len; + + if ( (bf->l_start < 0) + || (bf->l_start > XFS_MAX_FILE_OFFSET) + || (bf->l_start + llen < 0) + || (bf->l_start + llen > XFS_MAX_FILE_OFFSET)) + return XFS_ERROR(EINVAL); + + bf->l_whence = 0; + + startoffset = bf->l_start; + fsize = ip->i_d.di_size; + + /* + * XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve + * file space. + * These calls do NOT zero the data space allocated to the file, + * nor do they change the file size. + * + * XFS_IOC_ALLOCSP and XFS_IOC_FREESP will allocate and free file + * space. + * These calls cause the new file data to be zeroed and the file + * size to be changed. + */ + setprealloc = clrprealloc = 0; + + switch (cmd) { + case XFS_IOC_RESVSP: + case XFS_IOC_RESVSP64: + error = xfs_alloc_file_space(ip, startoffset, bf->l_len, + 1, attr_flags); + if (error) + return error; + setprealloc = 1; + break; + + case XFS_IOC_UNRESVSP: + case XFS_IOC_UNRESVSP64: + if ((error = xfs_free_file_space(ip, startoffset, bf->l_len, + attr_flags))) + return error; + break; + + case XFS_IOC_ALLOCSP: + case XFS_IOC_ALLOCSP64: + case XFS_IOC_FREESP: + case XFS_IOC_FREESP64: + if (startoffset > fsize) { + error = xfs_alloc_file_space(ip, fsize, + startoffset - fsize, 0, attr_flags); + if (error) + break; + } + + va.va_mask = XFS_AT_SIZE; + va.va_size = startoffset; + + error = xfs_setattr(bdp, &va, attr_flags, credp); + + if (error) + return error; + + clrprealloc = 1; + break; + + default: + ASSERT(0); + return XFS_ERROR(EINVAL); + } + + /* + * update the inode timestamp, mode, and prealloc flag bits + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID); + + if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp), + 0, 0, 0))) { + /* ASSERT(0); */ + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + ip->i_d.di_mode &= ~ISUID; + + /* + * Note that we don't have to worry about mandatory + * file locking being disabled here because we only + * clear the ISGID bit if the Group execute bit is + * on, but if it was on then mandatory locking wouldn't + * have been enabled. + */ + if (ip->i_d.di_mode & (IEXEC >> 3)) + ip->i_d.di_mode &= ~ISGID; + + xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + if (setprealloc) + ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; + else if (clrprealloc) + ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_set_sync(tp); + + error = xfs_trans_commit(tp, 0, NULL); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + return error; +} + +vnodeops_t xfs_vnodeops = { + BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS), + .vop_open = xfs_open, + .vop_read = xfs_read, + .vop_write = xfs_write, + .vop_ioctl = xfs_ioctl, + .vop_getattr = xfs_getattr, + .vop_setattr = xfs_setattr, + .vop_access = xfs_access, + .vop_lookup = xfs_lookup, + .vop_create = xfs_create, + .vop_remove = xfs_remove, + .vop_link = xfs_link, + .vop_rename = xfs_rename, + .vop_mkdir = xfs_mkdir, + .vop_rmdir = xfs_rmdir, + .vop_readdir = xfs_readdir, + .vop_symlink = xfs_symlink, + .vop_readlink = xfs_readlink, + .vop_fsync = xfs_fsync, + .vop_inactive = xfs_inactive, + .vop_fid2 = xfs_fid2, + .vop_rwlock = xfs_rwlock, + .vop_rwunlock = xfs_rwunlock, + .vop_bmap = xfs_bmap, + .vop_reclaim = xfs_reclaim, + .vop_attr_get = xfs_attr_get, + .vop_attr_set = xfs_attr_set, + .vop_attr_remove = xfs_attr_remove, + .vop_attr_list = xfs_attr_list, + .vop_link_removed = (vop_link_removed_t)fs_noval, + .vop_vnode_change = (vop_vnode_change_t)fs_noval, + .vop_tosspages = fs_tosspages, + .vop_flushinval_pages = fs_flushinval_pages, + .vop_flush_pages = fs_flush_pages, + .vop_release = xfs_release, + .vop_iflush = xfs_inode_flush, +}; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acconfig.h linux.21rc5-ac2/include/acpi/acconfig.h --- linux.21rc5/include/acpi/acconfig.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acconfig.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,197 @@ +/****************************************************************************** + * + * Name: acconfig.h - Global configuration constants + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef _ACCONFIG_H +#define _ACCONFIG_H + + +/****************************************************************************** + * + * Configuration options + * + *****************************************************************************/ + +/* + * ACPI_DEBUG_OUTPUT - This switch enables all the debug facilities of the + * ACPI subsystem. This includes the DEBUG_PRINT output + * statements. When disabled, all DEBUG_PRINT + * statements are compiled out. + * + * ACPI_APPLICATION - Use this switch if the subsystem is going to be run + * at the application level. + * + */ + +/* Version string */ + +#define ACPI_CA_VERSION 0x20030509 + +/* Maximum objects in the various object caches */ + +#define ACPI_MAX_STATE_CACHE_DEPTH 64 /* State objects for stacks */ +#define ACPI_MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */ +#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 64 /* Parse tree objects */ +#define ACPI_MAX_OBJECT_CACHE_DEPTH 64 /* Interpreter operand objects */ +#define ACPI_MAX_WALK_CACHE_DEPTH 4 /* Objects for parse tree walks */ + +/* + * Should the subystem abort the loading of an ACPI table if the + * table checksum is incorrect? + */ +#define ACPI_CHECKSUM_ABORT FALSE + + +/****************************************************************************** + * + * Subsystem Constants + * + *****************************************************************************/ + +/* Version of ACPI supported */ + +#define ACPI_CA_SUPPORT_LEVEL 2 + +/* String size constants */ + +#define ACPI_MAX_STRING_LENGTH 512 +#define ACPI_PATHNAME_MAX 256 /* A full namespace pathname */ + +/* Maximum count for a semaphore object */ + +#define ACPI_MAX_SEMAPHORE_COUNT 256 + +/* Max reference count (for debug only) */ + +#define ACPI_MAX_REFERENCE_COUNT 0x400 + +/* Size of cached memory mapping for system memory operation region */ + +#define ACPI_SYSMEM_REGION_WINDOW_SIZE 4096 + + +/****************************************************************************** + * + * ACPI Specification constants (Do not change unless the specification changes) + * + *****************************************************************************/ + +/* Number of distinct GPE register blocks and register width */ + +#define ACPI_MAX_GPE_BLOCKS 2 +#define ACPI_GPE_REGISTER_WIDTH 8 + +/* + * Method info (in WALK_STATE), containing local variables and argumetns + */ +#define ACPI_METHOD_NUM_LOCALS 8 +#define ACPI_METHOD_MAX_LOCAL 7 + +#define ACPI_METHOD_NUM_ARGS 7 +#define ACPI_METHOD_MAX_ARG 6 + +/* Maximum length of resulting string when converting from a buffer */ + +#define ACPI_MAX_STRING_CONVERSION 200 + +/* + * Operand Stack (in WALK_STATE), Must be large enough to contain METHOD_MAX_ARG + */ +#define ACPI_OBJ_NUM_OPERANDS 8 +#define ACPI_OBJ_MAX_OPERAND 7 + +/* Names within the namespace are 4 bytes long */ + +#define ACPI_NAME_SIZE 4 +#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ +#define ACPI_PATH_SEPARATOR '.' + +/* Constants used in searching for the RSDP in low memory */ + +#define ACPI_LO_RSDP_WINDOW_BASE 0 /* Physical Address */ +#define ACPI_HI_RSDP_WINDOW_BASE 0xE0000 /* Physical Address */ +#define ACPI_LO_RSDP_WINDOW_SIZE 0x400 +#define ACPI_HI_RSDP_WINDOW_SIZE 0x20000 +#define ACPI_RSDP_SCAN_STEP 16 + +/* Operation regions */ + +#define ACPI_NUM_PREDEFINED_REGIONS 8 +#define ACPI_USER_REGION_BEGIN 0x80 + +/* Maximum space_ids for Operation Regions */ + +#define ACPI_MAX_ADDRESS_SPACE 255 + +/* Array sizes. Used for range checking also */ + +#define ACPI_NUM_ACCESS_TYPES 6 +#define ACPI_NUM_UPDATE_RULES 3 +#define ACPI_NUM_LOCK_RULES 2 +#define ACPI_NUM_MATCH_OPS 6 +#define ACPI_NUM_OPCODES 256 +#define ACPI_NUM_FIELD_NAMES 2 + +/* RSDP checksums */ + +#define ACPI_RSDP_CHECKSUM_LENGTH 20 +#define ACPI_RSDP_XCHECKSUM_LENGTH 36 + +/* SMBus bidirectional buffer size */ + +#define ACPI_SMBUS_BUFFER_SIZE 34 + + +/****************************************************************************** + * + * ACPI AML Debugger + * + *****************************************************************************/ + +#define ACPI_DEBUGGER_MAX_ARGS 8 /* Must be max method args + 1 */ + +#define ACPI_DEBUGGER_COMMAND_PROMPT '-' +#define ACPI_DEBUGGER_EXECUTE_PROMPT '%' + + +#endif /* _ACCONFIG_H */ + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acdebug.h linux.21rc5-ac2/include/acpi/acdebug.h --- linux.21rc5/include/acpi/acdebug.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acdebug.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,478 @@ +/****************************************************************************** + * + * Name: acdebug.h - ACPI/AML debugger + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACDEBUG_H__ +#define __ACDEBUG_H__ + + +#define ACPI_DEBUG_BUFFER_SIZE 4196 + +struct command_info +{ + char *name; /* Command Name */ + u8 min_args; /* Minimum arguments required */ +}; + + +struct argument_info +{ + char *name; /* Argument Name */ +}; + + +#define PARAM_LIST(pl) pl + +#define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose) + +#define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\ + acpi_os_printf PARAM_LIST(fp);} + +#define EX_NO_SINGLE_STEP 1 +#define EX_SINGLE_STEP 2 + + +/* Prototypes */ + + +/* + * dbxface - external debugger interfaces + */ + +acpi_status +acpi_db_initialize ( + void); + +void +acpi_db_terminate ( + void); + +acpi_status +acpi_db_single_step ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + u32 op_type); + +acpi_status +acpi_db_start_command ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + +void +acpi_db_method_end ( + struct acpi_walk_state *walk_state); + + +/* + * dbcmds - debug commands and output routines + */ + +void +acpi_db_display_table_info ( + char *table_arg); + +void +acpi_db_unload_acpi_table ( + char *table_arg, + char *instance_arg); + +void +acpi_db_set_method_breakpoint ( + char *location, + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + +void +acpi_db_set_method_call_breakpoint ( + union acpi_parse_object *op); + +void +acpi_db_disassemble_aml ( + char *statements, + union acpi_parse_object *op); + +void +acpi_db_dump_namespace ( + char *start_arg, + char *depth_arg); + +void +acpi_db_dump_namespace_by_owner ( + char *owner_arg, + char *depth_arg); + +void +acpi_db_send_notify ( + char *name, + u32 value); + +void +acpi_db_set_method_data ( + char *type_arg, + char *index_arg, + char *value_arg); + +acpi_status +acpi_db_display_objects ( + char *obj_type_arg, + char *display_count_arg); + +acpi_status +acpi_db_find_name_in_namespace ( + char *name_arg); + +void +acpi_db_set_scope ( + char *name); + +void +acpi_db_find_references ( + char *object_arg); + +void +acpi_db_display_locks (void); + + +void +acpi_db_display_resources ( + char *object_arg); + +void +acpi_db_display_gpes (void); + +void +acpi_db_check_integrity ( + void); + +acpi_status +acpi_db_integrity_walk ( + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value); + +acpi_status +acpi_db_walk_and_match_name ( + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value); + +acpi_status +acpi_db_walk_for_references ( + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value); + +acpi_status +acpi_db_walk_for_specific_objects ( + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value); + +void +acpi_db_generate_gpe ( + char *gpe_arg, + char *block_arg); + +/* + * dbdisply - debug display commands + */ + +void +acpi_db_display_method_info ( + union acpi_parse_object *op); + +void +acpi_db_decode_and_display_object ( + char *target, + char *output_type); + +void +acpi_db_decode_node ( + struct acpi_namespace_node *node); + +void +acpi_db_display_result_object ( + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_db_display_all_methods ( + char *display_count_arg); + +void +acpi_db_display_internal_object ( + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state); + +void +acpi_db_display_arguments ( + void); + +void +acpi_db_display_locals ( + void); + +void +acpi_db_display_results ( + void); + +void +acpi_db_display_calling_tree ( + void); + +void +acpi_db_display_argument_object ( + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state); + +void +acpi_db_dump_parser_descriptor ( + union acpi_parse_object *op); + +void * +acpi_db_get_pointer ( + void *target); + +void +acpi_db_decode_internal_object ( + union acpi_operand_object *obj_desc); + + +/* + * dbexec - debugger control method execution + */ + +void +acpi_db_execute ( + char *name, + char **args, + u32 flags); + +void +acpi_db_create_execution_threads ( + char *num_threads_arg, + char *num_loops_arg, + char *method_name_arg); + +acpi_status +acpi_db_execute_method ( + struct acpi_db_method_info *info, + struct acpi_buffer *return_obj); + +void +acpi_db_execute_setup ( + struct acpi_db_method_info *info); + +u32 +acpi_db_get_outstanding_allocations ( + void); + +void ACPI_SYSTEM_XFACE +acpi_db_method_thread ( + void *context); + +acpi_status +acpi_db_execution_walk ( + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value); + + +/* + * dbfileio - Debugger file I/O commands + */ + +acpi_object_type +acpi_db_match_argument ( + char *user_argument, + struct argument_info *arguments); + +acpi_status +ae_local_load_table ( + struct acpi_table_header *table_ptr); + +void +acpi_db_close_debug_file ( + void); + +void +acpi_db_open_debug_file ( + char *name); + +acpi_status +acpi_db_load_acpi_table ( + char *filename); + +acpi_status +acpi_db_get_table_from_file ( + char *filename, + struct acpi_table_header **table); + +acpi_status +acpi_db_read_table_from_file ( + char *filename, + struct acpi_table_header **table); + +/* + * dbhistry - debugger HISTORY command + */ + +void +acpi_db_add_to_history ( + char *command_line); + +void +acpi_db_display_history (void); + +char * +acpi_db_get_from_history ( + char *command_num_arg); + + +/* + * dbinput - user front-end to the AML debugger + */ + +acpi_status +acpi_db_command_dispatch ( + char *input_buffer, + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + +void ACPI_SYSTEM_XFACE +acpi_db_execute_thread ( + void *context); + +acpi_status +acpi_db_user_commands ( + char prompt, + union acpi_parse_object *op); + +void +acpi_db_display_help ( + char *help_type); + +char * +acpi_db_get_next_token ( + char *string, + char **next); + +u32 +acpi_db_get_line ( + char *input_buffer); + +u32 +acpi_db_match_command ( + char *user_command); + +void +acpi_db_single_thread ( + void); + + +/* + * dbstats - Generation and display of ACPI table statistics + */ + +void +acpi_db_generate_statistics ( + union acpi_parse_object *root, + u8 is_method); + + +acpi_status +acpi_db_display_statistics ( + char *type_arg); + +acpi_status +acpi_db_classify_one_object ( + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value); + +void +acpi_db_count_namespace_objects ( + void); + +void +acpi_db_enumerate_object ( + union acpi_operand_object *obj_desc); + + +/* + * dbutils - AML debugger utilities + */ + +void +acpi_db_set_output_destination ( + u32 where); + +void +acpi_db_dump_buffer ( + u32 address); + +void +acpi_db_dump_object ( + union acpi_object *obj_desc, + u32 level); + +void +acpi_db_prep_namestring ( + char *name); + + +acpi_status +acpi_db_second_pass_parse ( + union acpi_parse_object *root); + +struct acpi_namespace_node * +acpi_db_local_ns_lookup ( + char *name); + + +#endif /* __ACDEBUG_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acdispat.h linux.21rc5-ac2/include/acpi/acdispat.h --- linux.21rc5/include/acpi/acdispat.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acdispat.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,513 @@ +/****************************************************************************** + * + * Name: acdispat.h - dispatcher (parser to interpreter interface) + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#ifndef _ACDISPAT_H_ +#define _ACDISPAT_H_ + + +#define NAMEOF_LOCAL_NTE "__L0" +#define NAMEOF_ARG_NTE "__A0" + + +/* Common interfaces */ + +acpi_status +acpi_ds_obj_stack_push ( + void *object, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_obj_stack_pop ( + u32 pop_count, + struct acpi_walk_state *walk_state); + +void * +acpi_ds_obj_stack_get_value ( + u32 index, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_obj_stack_pop_object ( + union acpi_operand_object **object, + struct acpi_walk_state *walk_state); + + +/* dsopcode - support for late evaluation */ + +acpi_status +acpi_ds_execute_arguments ( + struct acpi_namespace_node *node, + struct acpi_namespace_node *scope_node, + u32 aml_length, + u8 *aml_start); + +acpi_status +acpi_ds_get_buffer_field_arguments ( + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ds_get_region_arguments ( + union acpi_operand_object *rgn_desc); + +acpi_status +acpi_ds_get_buffer_arguments ( + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ds_get_package_arguments ( + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ds_init_buffer_field ( + u16 aml_opcode, + union acpi_operand_object *obj_desc, + union acpi_operand_object *buffer_desc, + union acpi_operand_object *offset_desc, + union acpi_operand_object *length_desc, + union acpi_operand_object *result_desc); + +acpi_status +acpi_ds_eval_buffer_field_operands ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + +acpi_status +acpi_ds_eval_region_operands ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + +acpi_status +acpi_ds_eval_data_object_operands ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ds_initialize_region ( + acpi_handle obj_handle); + + +/* dsctrl - Parser/Interpreter interface, control stack routines */ + + +acpi_status +acpi_ds_exec_begin_control_op ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + +acpi_status +acpi_ds_exec_end_control_op ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + + +/* dsexec - Parser/Interpreter interface, method execution callbacks */ + + +acpi_status +acpi_ds_get_predicate_value ( + struct acpi_walk_state *walk_state, + union acpi_operand_object *result_obj); + +acpi_status +acpi_ds_exec_begin_op ( + struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op); + +acpi_status +acpi_ds_exec_end_op ( + struct acpi_walk_state *state); + + +/* dsfield - Parser/Interpreter interface for AML fields */ + +acpi_status +acpi_ds_get_field_names ( + struct acpi_create_field_info *info, + struct acpi_walk_state *walk_state, + union acpi_parse_object *arg); + +acpi_status +acpi_ds_create_field ( + union acpi_parse_object *op, + struct acpi_namespace_node *region_node, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_create_bank_field ( + union acpi_parse_object *op, + struct acpi_namespace_node *region_node, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_create_index_field ( + union acpi_parse_object *op, + struct acpi_namespace_node *region_node, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_create_buffer_field ( + union acpi_parse_object *op, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_init_field_objects ( + union acpi_parse_object *op, + struct acpi_walk_state *walk_state); + + +/* dsload - Parser/Interpreter interface, namespace load callbacks */ + +acpi_status +acpi_ds_load1_begin_op ( + struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op); + +acpi_status +acpi_ds_load1_end_op ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_load2_begin_op ( + struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op); + +acpi_status +acpi_ds_load2_end_op ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_init_callbacks ( + struct acpi_walk_state *walk_state, + u32 pass_number); + + +/* dsmthdat - method data (locals/args) */ + + +acpi_status +acpi_ds_store_object_to_local ( + u16 opcode, + u32 index, + union acpi_operand_object *src_desc, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_method_data_get_entry ( + u16 opcode, + u32 index, + struct acpi_walk_state *walk_state, + union acpi_operand_object ***node); + +void +acpi_ds_method_data_delete_all ( + struct acpi_walk_state *walk_state); + +u8 +acpi_ds_is_method_value ( + union acpi_operand_object *obj_desc); + +acpi_object_type +acpi_ds_method_data_get_type ( + u16 opcode, + u32 index, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_method_data_get_value ( + u16 opcode, + u32 index, + struct acpi_walk_state *walk_state, + union acpi_operand_object **dest_desc); + +void +acpi_ds_method_data_delete_value ( + u16 opcode, + u32 index, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_method_data_init_args ( + union acpi_operand_object **params, + u32 max_param_count, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_method_data_get_node ( + u16 opcode, + u32 index, + struct acpi_walk_state *walk_state, + struct acpi_namespace_node **node); + +void +acpi_ds_method_data_init ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_method_data_set_value ( + u16 opcode, + u32 index, + union acpi_operand_object *object, + struct acpi_walk_state *walk_state); + + +/* dsmethod - Parser/Interpreter interface - control method parsing */ + +acpi_status +acpi_ds_parse_method ( + acpi_handle obj_handle); + +acpi_status +acpi_ds_call_control_method ( + struct acpi_thread_state *thread, + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + +acpi_status +acpi_ds_restart_control_method ( + struct acpi_walk_state *walk_state, + union acpi_operand_object *return_desc); + +acpi_status +acpi_ds_terminate_control_method ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_begin_method_execution ( + struct acpi_namespace_node *method_node, + union acpi_operand_object *obj_desc, + struct acpi_namespace_node *calling_method_node); + + +/* dsobj - Parser/Interpreter interface - object initialization and conversion */ + +acpi_status +acpi_ds_init_one_object ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value); + +acpi_status +acpi_ds_initialize_objects ( + struct acpi_table_desc *table_desc, + struct acpi_namespace_node *start_node); + +acpi_status +acpi_ds_build_internal_buffer_obj ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + u32 buffer_length, + union acpi_operand_object **obj_desc_ptr); + +acpi_status +acpi_ds_build_internal_package_obj ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + u32 package_length, + union acpi_operand_object **obj_desc); + +acpi_status +acpi_ds_build_internal_object ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + union acpi_operand_object **obj_desc_ptr); + +acpi_status +acpi_ds_init_object_from_op ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + u16 opcode, + union acpi_operand_object **obj_desc); + +acpi_status +acpi_ds_create_node ( + struct acpi_walk_state *walk_state, + struct acpi_namespace_node *node, + union acpi_parse_object *op); + + +/* dsutils - Parser/Interpreter interface utility routines */ + +u8 +acpi_ds_is_result_used ( + union acpi_parse_object *op, + struct acpi_walk_state *walk_state); + +void +acpi_ds_delete_result_if_not_used ( + union acpi_parse_object *op, + union acpi_operand_object *result_obj, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_create_operand ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *arg, + u32 args_remaining); + +acpi_status +acpi_ds_create_operands ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *first_arg); + +acpi_status +acpi_ds_resolve_operands ( + struct acpi_walk_state *walk_state); + +void +acpi_ds_clear_operands ( + struct acpi_walk_state *walk_state); + + +/* + * dswscope - Scope Stack manipulation + */ + +acpi_status +acpi_ds_scope_stack_push ( + struct acpi_namespace_node *node, + acpi_object_type type, + struct acpi_walk_state *walk_state); + + +acpi_status +acpi_ds_scope_stack_pop ( + struct acpi_walk_state *walk_state); + +void +acpi_ds_scope_stack_clear ( + struct acpi_walk_state *walk_state); + + +/* dswstate - parser WALK_STATE management routines */ + +struct acpi_walk_state * +acpi_ds_create_walk_state ( + acpi_owner_id owner_id, + union acpi_parse_object *origin, + union acpi_operand_object *mth_desc, + struct acpi_thread_state *thread); + +acpi_status +acpi_ds_init_aml_walk ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + struct acpi_namespace_node *method_node, + u8 *aml_start, + u32 aml_length, + union acpi_operand_object **params, + union acpi_operand_object **return_obj_desc, + u32 pass_number); + +acpi_status +acpi_ds_obj_stack_delete_all ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_obj_stack_pop_and_delete ( + u32 pop_count, + struct acpi_walk_state *walk_state); + +void +acpi_ds_delete_walk_state ( + struct acpi_walk_state *walk_state); + +struct acpi_walk_state * +acpi_ds_pop_walk_state ( + struct acpi_thread_state *thread); + +void +acpi_ds_push_walk_state ( + struct acpi_walk_state *walk_state, + struct acpi_thread_state *thread); + +acpi_status +acpi_ds_result_stack_pop ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_result_stack_push ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_result_stack_clear ( + struct acpi_walk_state *walk_state); + +struct acpi_walk_state * +acpi_ds_get_current_walk_state ( + struct acpi_thread_state *thread); + +void +acpi_ds_delete_walk_state_cache ( + void); + +acpi_status +acpi_ds_result_insert ( + void *object, + u32 index, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_result_remove ( + union acpi_operand_object **object, + u32 index, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_result_pop ( + union acpi_operand_object **object, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_result_push ( + union acpi_operand_object *object, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ds_result_pop_from_bottom ( + union acpi_operand_object **object, + struct acpi_walk_state *walk_state); + +#endif /* _ACDISPAT_H_ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acevents.h linux.21rc5-ac2/include/acpi/acevents.h --- linux.21rc5/include/acpi/acevents.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acevents.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,263 @@ +/****************************************************************************** + * + * Name: acevents.h - Event subcomponent prototypes and defines + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACEVENTS_H__ +#define __ACEVENTS_H__ + + +acpi_status +acpi_ev_initialize ( + void); + +acpi_status +acpi_ev_handler_initialize ( + void); + + +/* + * Evfixed - Fixed event handling + */ + +acpi_status +acpi_ev_fixed_event_initialize ( + void); + +u32 +acpi_ev_fixed_event_detect ( + void); + +u32 +acpi_ev_fixed_event_dispatch ( + u32 event); + + +/* + * Evmisc + */ + +u8 +acpi_ev_is_notify_object ( + struct acpi_namespace_node *node); + +acpi_status +acpi_ev_acquire_global_lock( + u16 timeout); + +acpi_status +acpi_ev_release_global_lock( + void); + +acpi_status +acpi_ev_init_global_lock_handler ( + void); + +u32 +acpi_ev_get_gpe_number_index ( + u32 gpe_number); + +acpi_status +acpi_ev_queue_notify_request ( + struct acpi_namespace_node *node, + u32 notify_value); + +void ACPI_SYSTEM_XFACE +acpi_ev_notify_dispatch ( + void *context); + + +/* + * Evgpe - GPE handling and dispatch + */ + +acpi_status +acpi_ev_walk_gpe_list ( + ACPI_GPE_CALLBACK gpe_walk_callback); + +u8 +acpi_ev_valid_gpe_event ( + struct acpi_gpe_event_info *gpe_event_info); + +struct acpi_gpe_event_info * +acpi_ev_get_gpe_event_info ( + acpi_handle gpe_device, + u32 gpe_number); + +acpi_status +acpi_ev_gpe_initialize ( + void); + +acpi_status +acpi_ev_create_gpe_block ( + struct acpi_namespace_node *gpe_device, + struct acpi_generic_address *gpe_block_address, + u32 register_count, + u8 gpe_block_base_number, + u32 interrupt_level, + struct acpi_gpe_block_info **return_gpe_block); + +acpi_status +acpi_ev_delete_gpe_block ( + struct acpi_gpe_block_info *gpe_block); + +u32 +acpi_ev_gpe_dispatch ( + struct acpi_gpe_event_info *gpe_event_info, + u32 gpe_number); + +u32 +acpi_ev_gpe_detect ( + struct acpi_gpe_xrupt_info *gpe_xrupt_list); + +/* + * Evregion - Address Space handling + */ + +acpi_status +acpi_ev_init_address_spaces ( + void); + +acpi_status +acpi_ev_address_space_dispatch ( + union acpi_operand_object *region_obj, + u32 function, + acpi_physical_address address, + u32 bit_width, + void *value); + +acpi_status +acpi_ev_install_handler ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value); + +acpi_status +acpi_ev_attach_region ( + union acpi_operand_object *handler_obj, + union acpi_operand_object *region_obj, + u8 acpi_ns_is_locked); + +void +acpi_ev_detach_region ( + union acpi_operand_object *region_obj, + u8 acpi_ns_is_locked); + + +/* + * Evregini - Region initialization and setup + */ + +acpi_status +acpi_ev_system_memory_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context); + +acpi_status +acpi_ev_io_space_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context); + +acpi_status +acpi_ev_pci_config_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context); + +acpi_status +acpi_ev_cmos_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context); + +acpi_status +acpi_ev_pci_bar_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context); + +acpi_status +acpi_ev_default_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context); + +acpi_status +acpi_ev_initialize_region ( + union acpi_operand_object *region_obj, + u8 acpi_ns_locked); + + +/* + * Evsci - SCI (System Control Interrupt) handling/dispatch + */ + +u32 ACPI_SYSTEM_XFACE +acpi_ev_gpe_xrupt_handler ( + void *context); + +u32 +acpi_ev_install_sci_handler ( + void); + +acpi_status +acpi_ev_remove_sci_handler ( + void); + +u32 +acpi_ev_initialize_sCI ( + u32 program_sCI); + +void +acpi_ev_terminate ( + void); + + +#endif /* __ACEVENTS_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acexcep.h linux.21rc5-ac2/include/acpi/acexcep.h --- linux.21rc5/include/acpi/acexcep.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acexcep.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,304 @@ +/****************************************************************************** + * + * Name: acexcep.h - Exception codes returned by the ACPI subsystem + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACEXCEP_H__ +#define __ACEXCEP_H__ + + +/* + * Exceptions returned by external ACPI interfaces + */ + +#define AE_CODE_ENVIRONMENTAL 0x0000 +#define AE_CODE_PROGRAMMER 0x1000 +#define AE_CODE_ACPI_TABLES 0x2000 +#define AE_CODE_AML 0x3000 +#define AE_CODE_CONTROL 0x4000 +#define AE_CODE_MASK 0xF000 + + +#define ACPI_SUCCESS(a) (!(a)) +#define ACPI_FAILURE(a) (a) + + +#define AE_OK (acpi_status) 0x0000 + +/* + * Environmental exceptions + */ +#define AE_ERROR (acpi_status) (0x0001 | AE_CODE_ENVIRONMENTAL) +#define AE_NO_ACPI_TABLES (acpi_status) (0x0002 | AE_CODE_ENVIRONMENTAL) +#define AE_NO_NAMESPACE (acpi_status) (0x0003 | AE_CODE_ENVIRONMENTAL) +#define AE_NO_MEMORY (acpi_status) (0x0004 | AE_CODE_ENVIRONMENTAL) +#define AE_NOT_FOUND (acpi_status) (0x0005 | AE_CODE_ENVIRONMENTAL) +#define AE_NOT_EXIST (acpi_status) (0x0006 | AE_CODE_ENVIRONMENTAL) +#define AE_ALREADY_EXISTS (acpi_status) (0x0007 | AE_CODE_ENVIRONMENTAL) +#define AE_TYPE (acpi_status) (0x0008 | AE_CODE_ENVIRONMENTAL) +#define AE_NULL_OBJECT (acpi_status) (0x0009 | AE_CODE_ENVIRONMENTAL) +#define AE_NULL_ENTRY (acpi_status) (0x000A | AE_CODE_ENVIRONMENTAL) +#define AE_BUFFER_OVERFLOW (acpi_status) (0x000B | AE_CODE_ENVIRONMENTAL) +#define AE_STACK_OVERFLOW (acpi_status) (0x000C | AE_CODE_ENVIRONMENTAL) +#define AE_STACK_UNDERFLOW (acpi_status) (0x000D | AE_CODE_ENVIRONMENTAL) +#define AE_NOT_IMPLEMENTED (acpi_status) (0x000E | AE_CODE_ENVIRONMENTAL) +#define AE_VERSION_MISMATCH (acpi_status) (0x000F | AE_CODE_ENVIRONMENTAL) +#define AE_SUPPORT (acpi_status) (0x0010 | AE_CODE_ENVIRONMENTAL) +#define AE_SHARE (acpi_status) (0x0011 | AE_CODE_ENVIRONMENTAL) +#define AE_LIMIT (acpi_status) (0x0012 | AE_CODE_ENVIRONMENTAL) +#define AE_TIME (acpi_status) (0x0013 | AE_CODE_ENVIRONMENTAL) +#define AE_UNKNOWN_STATUS (acpi_status) (0x0014 | AE_CODE_ENVIRONMENTAL) +#define AE_ACQUIRE_DEADLOCK (acpi_status) (0x0015 | AE_CODE_ENVIRONMENTAL) +#define AE_RELEASE_DEADLOCK (acpi_status) (0x0016 | AE_CODE_ENVIRONMENTAL) +#define AE_NOT_ACQUIRED (acpi_status) (0x0017 | AE_CODE_ENVIRONMENTAL) +#define AE_ALREADY_ACQUIRED (acpi_status) (0x0018 | AE_CODE_ENVIRONMENTAL) +#define AE_NO_HARDWARE_RESPONSE (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL) +#define AE_NO_GLOBAL_LOCK (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL) +#define AE_LOGICAL_ADDRESS (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL) +#define AE_ABORT_METHOD (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL) +#define AE_SAME_HANDLER (acpi_status) (0x001D | AE_CODE_ENVIRONMENTAL) + +#define AE_CODE_ENV_MAX 0x001D + +/* + * Programmer exceptions + */ +#define AE_BAD_PARAMETER (acpi_status) (0x0001 | AE_CODE_PROGRAMMER) +#define AE_BAD_CHARACTER (acpi_status) (0x0002 | AE_CODE_PROGRAMMER) +#define AE_BAD_PATHNAME (acpi_status) (0x0003 | AE_CODE_PROGRAMMER) +#define AE_BAD_DATA (acpi_status) (0x0004 | AE_CODE_PROGRAMMER) +#define AE_BAD_ADDRESS (acpi_status) (0x0005 | AE_CODE_PROGRAMMER) +#define AE_ALIGNMENT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER) +#define AE_BAD_HEX_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER) +#define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0008 | AE_CODE_PROGRAMMER) +#define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0009 | AE_CODE_PROGRAMMER) + +#define AE_CODE_PGM_MAX 0x0009 + + +/* + * Acpi table exceptions + */ +#define AE_BAD_SIGNATURE (acpi_status) (0x0001 | AE_CODE_ACPI_TABLES) +#define AE_BAD_HEADER (acpi_status) (0x0002 | AE_CODE_ACPI_TABLES) +#define AE_BAD_CHECKSUM (acpi_status) (0x0003 | AE_CODE_ACPI_TABLES) +#define AE_BAD_VALUE (acpi_status) (0x0004 | AE_CODE_ACPI_TABLES) +#define AE_TABLE_NOT_SUPPORTED (acpi_status) (0x0005 | AE_CODE_ACPI_TABLES) +#define AE_INVALID_TABLE_LENGTH (acpi_status) (0x0006 | AE_CODE_ACPI_TABLES) + +#define AE_CODE_TBL_MAX 0x0006 + + +/* + * AML exceptions. These are caused by problems with + * the actual AML byte stream + */ +#define AE_AML_ERROR (acpi_status) (0x0001 | AE_CODE_AML) +#define AE_AML_PARSE (acpi_status) (0x0002 | AE_CODE_AML) +#define AE_AML_BAD_OPCODE (acpi_status) (0x0003 | AE_CODE_AML) +#define AE_AML_NO_OPERAND (acpi_status) (0x0004 | AE_CODE_AML) +#define AE_AML_OPERAND_TYPE (acpi_status) (0x0005 | AE_CODE_AML) +#define AE_AML_OPERAND_VALUE (acpi_status) (0x0006 | AE_CODE_AML) +#define AE_AML_UNINITIALIZED_LOCAL (acpi_status) (0x0007 | AE_CODE_AML) +#define AE_AML_UNINITIALIZED_ARG (acpi_status) (0x0008 | AE_CODE_AML) +#define AE_AML_UNINITIALIZED_ELEMENT (acpi_status) (0x0009 | AE_CODE_AML) +#define AE_AML_NUMERIC_OVERFLOW (acpi_status) (0x000A | AE_CODE_AML) +#define AE_AML_REGION_LIMIT (acpi_status) (0x000B | AE_CODE_AML) +#define AE_AML_BUFFER_LIMIT (acpi_status) (0x000C | AE_CODE_AML) +#define AE_AML_PACKAGE_LIMIT (acpi_status) (0x000D | AE_CODE_AML) +#define AE_AML_DIVIDE_BY_ZERO (acpi_status) (0x000E | AE_CODE_AML) +#define AE_AML_BAD_NAME (acpi_status) (0x000F | AE_CODE_AML) +#define AE_AML_NAME_NOT_FOUND (acpi_status) (0x0010 | AE_CODE_AML) +#define AE_AML_INTERNAL (acpi_status) (0x0011 | AE_CODE_AML) +#define AE_AML_INVALID_SPACE_ID (acpi_status) (0x0012 | AE_CODE_AML) +#define AE_AML_STRING_LIMIT (acpi_status) (0x0013 | AE_CODE_AML) +#define AE_AML_NO_RETURN_VALUE (acpi_status) (0x0014 | AE_CODE_AML) +#define AE_AML_METHOD_LIMIT (acpi_status) (0x0015 | AE_CODE_AML) +#define AE_AML_NOT_OWNER (acpi_status) (0x0016 | AE_CODE_AML) +#define AE_AML_MUTEX_ORDER (acpi_status) (0x0017 | AE_CODE_AML) +#define AE_AML_MUTEX_NOT_ACQUIRED (acpi_status) (0x0018 | AE_CODE_AML) +#define AE_AML_INVALID_RESOURCE_TYPE (acpi_status) (0x0019 | AE_CODE_AML) +#define AE_AML_INVALID_INDEX (acpi_status) (0x001A | AE_CODE_AML) +#define AE_AML_REGISTER_LIMIT (acpi_status) (0x001B | AE_CODE_AML) +#define AE_AML_NO_WHILE (acpi_status) (0x001C | AE_CODE_AML) +#define AE_AML_ALIGNMENT (acpi_status) (0x001D | AE_CODE_AML) +#define AE_AML_NO_RESOURCE_END_TAG (acpi_status) (0x001E | AE_CODE_AML) +#define AE_AML_BAD_RESOURCE_VALUE (acpi_status) (0x001F | AE_CODE_AML) +#define AE_AML_CIRCULAR_REFERENCE (acpi_status) (0x0020 | AE_CODE_AML) + +#define AE_CODE_AML_MAX 0x0020 + +/* + * Internal exceptions used for control + */ +#define AE_CTRL_RETURN_VALUE (acpi_status) (0x0001 | AE_CODE_CONTROL) +#define AE_CTRL_PENDING (acpi_status) (0x0002 | AE_CODE_CONTROL) +#define AE_CTRL_TERMINATE (acpi_status) (0x0003 | AE_CODE_CONTROL) +#define AE_CTRL_TRUE (acpi_status) (0x0004 | AE_CODE_CONTROL) +#define AE_CTRL_FALSE (acpi_status) (0x0005 | AE_CODE_CONTROL) +#define AE_CTRL_DEPTH (acpi_status) (0x0006 | AE_CODE_CONTROL) +#define AE_CTRL_END (acpi_status) (0x0007 | AE_CODE_CONTROL) +#define AE_CTRL_TRANSFER (acpi_status) (0x0008 | AE_CODE_CONTROL) +#define AE_CTRL_BREAK (acpi_status) (0x0009 | AE_CODE_CONTROL) +#define AE_CTRL_CONTINUE (acpi_status) (0x000A | AE_CODE_CONTROL) +#define AE_CTRL_SKIP (acpi_status) (0x000B | AE_CODE_CONTROL) + +#define AE_CODE_CTRL_MAX 0x000B + + +#ifdef DEFINE_ACPI_GLOBALS + +/* + * String versions of the exception codes above + * These strings must match the corresponding defines exactly + */ +char const *acpi_gbl_exception_names_env[] = +{ + "AE_OK", + "AE_ERROR", + "AE_NO_ACPI_TABLES", + "AE_NO_NAMESPACE", + "AE_NO_MEMORY", + "AE_NOT_FOUND", + "AE_NOT_EXIST", + "AE_ALREADY_EXISTS", + "AE_TYPE", + "AE_NULL_OBJECT", + "AE_NULL_ENTRY", + "AE_BUFFER_OVERFLOW", + "AE_STACK_OVERFLOW", + "AE_STACK_UNDERFLOW", + "AE_NOT_IMPLEMENTED", + "AE_VERSION_MISMATCH", + "AE_SUPPORT", + "AE_SHARE", + "AE_LIMIT", + "AE_TIME", + "AE_UNKNOWN_STATUS", + "AE_ACQUIRE_DEADLOCK", + "AE_RELEASE_DEADLOCK", + "AE_NOT_ACQUIRED", + "AE_ALREADY_ACQUIRED", + "AE_NO_HARDWARE_RESPONSE", + "AE_NO_GLOBAL_LOCK", + "AE_LOGICAL_ADDRESS", + "AE_ABORT_METHOD", + "AE_SAME_HANDLER" +}; + +char const *acpi_gbl_exception_names_pgm[] = +{ + "AE_BAD_PARAMETER", + "AE_BAD_CHARACTER", + "AE_BAD_PATHNAME", + "AE_BAD_DATA", + "AE_BAD_ADDRESS", + "AE_ALIGNMENT", + "AE_BAD_HEX_CONSTANT", + "AE_BAD_OCTAL_CONSTANT", + "AE_BAD_DECIMAL_CONSTANT" +}; + +char const *acpi_gbl_exception_names_tbl[] = +{ + "AE_BAD_SIGNATURE", + "AE_BAD_HEADER", + "AE_BAD_CHECKSUM", + "AE_BAD_VALUE", + "AE_TABLE_NOT_SUPPORTED", + "AE_INVALID_TABLE_LENGTH" +}; + +char const *acpi_gbl_exception_names_aml[] = +{ + "AE_AML_ERROR", + "AE_AML_PARSE", + "AE_AML_BAD_OPCODE", + "AE_AML_NO_OPERAND", + "AE_AML_OPERAND_TYPE", + "AE_AML_OPERAND_VALUE", + "AE_AML_UNINITIALIZED_LOCAL", + "AE_AML_UNINITIALIZED_ARG", + "AE_AML_UNINITIALIZED_ELEMENT", + "AE_AML_NUMERIC_OVERFLOW", + "AE_AML_REGION_LIMIT", + "AE_AML_BUFFER_LIMIT", + "AE_AML_PACKAGE_LIMIT", + "AE_AML_DIVIDE_BY_ZERO", + "AE_AML_BAD_NAME", + "AE_AML_NAME_NOT_FOUND", + "AE_AML_INTERNAL", + "AE_AML_INVALID_SPACE_ID", + "AE_AML_STRING_LIMIT", + "AE_AML_NO_RETURN_VALUE", + "AE_AML_METHOD_LIMIT", + "AE_AML_NOT_OWNER", + "AE_AML_MUTEX_ORDER", + "AE_AML_MUTEX_NOT_ACQUIRED", + "AE_AML_INVALID_RESOURCE_TYPE", + "AE_AML_INVALID_INDEX", + "AE_AML_REGISTER_LIMIT", + "AE_AML_NO_WHILE", + "AE_AML_ALIGNMENT", + "AE_AML_NO_RESOURCE_END_TAG", + "AE_AML_BAD_RESOURCE_VALUE", + "AE_AML_CIRCULAR_REFERENCE" +}; + +char const *acpi_gbl_exception_names_ctrl[] = +{ + "AE_CTRL_RETURN_VALUE", + "AE_CTRL_PENDING", + "AE_CTRL_TERMINATE", + "AE_CTRL_TRUE", + "AE_CTRL_FALSE", + "AE_CTRL_DEPTH", + "AE_CTRL_END", + "AE_CTRL_TRANSFER", + "AE_CTRL_BREAK", + "AE_CTRL_CONTINUE", + "AE_CTRL_SKIP" +}; + +#endif /* ACPI GLOBALS */ + + +#endif /* __ACEXCEP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acglobal.h linux.21rc5-ac2/include/acpi/acglobal.h --- linux.21rc5/include/acpi/acglobal.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acglobal.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,306 @@ +/****************************************************************************** + * + * Name: acglobal.h - Declarations for global variables + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACGLOBAL_H__ +#define __ACGLOBAL_H__ + + +/* + * Ensure that the globals are actually defined only once. + * + * The use of these defines allows a single list of globals (here) in order + * to simplify maintenance of the code. + */ +#ifdef DEFINE_ACPI_GLOBALS +#define ACPI_EXTERN +#else +#define ACPI_EXTERN extern +#endif + + +/***************************************************************************** + * + * Debug support + * + ****************************************************************************/ + +/* Runtime configuration of debug print levels */ + +extern u32 acpi_dbg_level; +extern u32 acpi_dbg_layer; + +/* Procedure nesting level for debug output */ + +extern u32 acpi_gbl_nesting_level; + + +/***************************************************************************** + * + * ACPI Table globals + * + ****************************************************************************/ + +/* + * Table pointers. + * Although these pointers are somewhat redundant with the global acpi_table, + * they are convenient because they are typed pointers. + * + * These tables are single-table only; meaning that there can be at most one + * of each in the system. Each global points to the actual table. + * + */ +ACPI_EXTERN u32 acpi_gbl_table_flags; +ACPI_EXTERN u32 acpi_gbl_rsdt_table_count; +ACPI_EXTERN struct rsdp_descriptor *acpi_gbl_RSDP; +ACPI_EXTERN XSDT_DESCRIPTOR *acpi_gbl_XSDT; +ACPI_EXTERN FADT_DESCRIPTOR *acpi_gbl_FADT; +ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT; +ACPI_EXTERN FACS_DESCRIPTOR *acpi_gbl_FACS; +ACPI_EXTERN struct acpi_common_facs acpi_gbl_common_fACS; + +/* + * Handle both ACPI 1.0 and ACPI 2.0 Integer widths + * If we are running a method that exists in a 32-bit ACPI table. + * Use only 32 bits of the Integer for conversion. + */ +ACPI_EXTERN u8 acpi_gbl_integer_bit_width; +ACPI_EXTERN u8 acpi_gbl_integer_byte_width; +ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable; +ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable; + +/* + * Since there may be multiple SSDTs and PSDTS, a single pointer is not + * sufficient; Therefore, there isn't one! + */ + + +/* + * ACPI Table info arrays + */ +extern struct acpi_table_list acpi_gbl_table_lists[NUM_ACPI_TABLE_TYPES]; +extern struct acpi_table_support acpi_gbl_table_data[NUM_ACPI_TABLE_TYPES]; + +/* + * Predefined mutex objects. This array contains the + * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. + * (The table maps local handles to the real OS handles) + */ +ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[NUM_MUTEX]; + + +/***************************************************************************** + * + * Miscellaneous globals + * + ****************************************************************************/ + + +ACPI_EXTERN struct acpi_memory_list acpi_gbl_memory_lists[ACPI_NUM_MEM_LISTS]; +ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify; +ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify; +ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler; +ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; +ACPI_EXTERN acpi_handle acpi_gbl_global_lock_semaphore; + +ACPI_EXTERN u32 acpi_gbl_global_lock_thread_count; +ACPI_EXTERN u32 acpi_gbl_original_mode; +ACPI_EXTERN u32 acpi_gbl_rsdp_original_location; +ACPI_EXTERN u32 acpi_gbl_ns_lookup_count; +ACPI_EXTERN u32 acpi_gbl_ps_find_count; +ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save; +ACPI_EXTERN u16 acpi_gbl_next_table_owner_id; +ACPI_EXTERN u16 acpi_gbl_next_method_owner_id; +ACPI_EXTERN u16 acpi_gbl_global_lock_handle; +ACPI_EXTERN u8 acpi_gbl_debugger_configuration; +ACPI_EXTERN u8 acpi_gbl_global_lock_acquired; +ACPI_EXTERN u8 acpi_gbl_step_to_next_call; +ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present; +ACPI_EXTERN u8 acpi_gbl_global_lock_present; +ACPI_EXTERN u8 acpi_gbl_events_initialized; + +extern u8 acpi_gbl_shutdown; +extern u32 acpi_gbl_startup_flags; +extern const u8 acpi_gbl_decode_to8bit[8]; +extern const char *acpi_gbl_db_sleep_states[ACPI_S_STATE_COUNT]; +extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES]; +extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS]; + + +/***************************************************************************** + * + * Namespace globals + * + ****************************************************************************/ + +#define NUM_NS_TYPES ACPI_TYPE_INVALID+1 + +#if defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY) +#define NUM_PREDEFINED_NAMES 10 +#else +#define NUM_PREDEFINED_NAMES 9 +#endif + +ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct; +ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node; + +extern const u8 acpi_gbl_ns_properties[NUM_NS_TYPES]; +extern const struct acpi_predefined_names acpi_gbl_pre_defined_names [NUM_PREDEFINED_NAMES]; + +#ifdef ACPI_DEBUG_OUTPUT +ACPI_EXTERN u32 acpi_gbl_current_node_count; +ACPI_EXTERN u32 acpi_gbl_current_node_size; +ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count; +ACPI_EXTERN acpi_size acpi_gbl_entry_stack_pointer; +ACPI_EXTERN acpi_size acpi_gbl_lowest_stack_pointer; +ACPI_EXTERN u32 acpi_gbl_deepest_nesting; +#endif + +/***************************************************************************** + * + * Interpreter globals + * + ****************************************************************************/ + + +ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list; + +/* Control method single step flag */ + +ACPI_EXTERN u8 acpi_gbl_cm_single_step; + + +/***************************************************************************** + * + * Parser globals + * + ****************************************************************************/ + +ACPI_EXTERN union acpi_parse_object *acpi_gbl_parsed_namespace_root; + +/***************************************************************************** + * + * Hardware globals + * + ****************************************************************************/ + +extern struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG]; +ACPI_EXTERN u8 acpi_gbl_sleep_type_a; +ACPI_EXTERN u8 acpi_gbl_sleep_type_b; + + +/***************************************************************************** + * + * Event and GPE globals + * + ****************************************************************************/ + +extern struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS]; +ACPI_EXTERN struct acpi_fixed_event_handler acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]; +ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; +ACPI_EXTERN struct acpi_gpe_block_info *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; +ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock; + + +/***************************************************************************** + * + * Debugger globals + * + ****************************************************************************/ + + +ACPI_EXTERN u8 acpi_gbl_db_output_flags; + +#ifdef ACPI_DISASSEMBLER + +ACPI_EXTERN u8 acpi_gbl_db_opt_disasm; +ACPI_EXTERN u8 acpi_gbl_db_opt_verbose; +#endif + + +#ifdef ACPI_DEBUGGER + +extern u8 acpi_gbl_method_executing; +extern u8 acpi_gbl_abort_method; +extern u8 acpi_gbl_db_terminate_threads; + +ACPI_EXTERN int optind; +ACPI_EXTERN char *optarg; + +ACPI_EXTERN u8 acpi_gbl_db_opt_tables; +ACPI_EXTERN u8 acpi_gbl_db_opt_stats; +ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; + + +ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]; +ACPI_EXTERN char acpi_gbl_db_line_buf[80]; +ACPI_EXTERN char acpi_gbl_db_parsed_buf[80]; +ACPI_EXTERN char acpi_gbl_db_scope_buf[40]; +ACPI_EXTERN char acpi_gbl_db_debug_filename[40]; +ACPI_EXTERN u8 acpi_gbl_db_output_to_file; +ACPI_EXTERN char *acpi_gbl_db_buffer; +ACPI_EXTERN char *acpi_gbl_db_filename; +ACPI_EXTERN u32 acpi_gbl_db_debug_level; +ACPI_EXTERN u32 acpi_gbl_db_console_debug_level; +ACPI_EXTERN struct acpi_table_header *acpi_gbl_db_table_ptr; +ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node; + +/* + * Statistic globals + */ +ACPI_EXTERN u16 acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX+1]; +ACPI_EXTERN u16 acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX+1]; +ACPI_EXTERN u16 acpi_gbl_obj_type_count_misc; +ACPI_EXTERN u16 acpi_gbl_node_type_count_misc; +ACPI_EXTERN u32 acpi_gbl_num_nodes; +ACPI_EXTERN u32 acpi_gbl_num_objects; + + +ACPI_EXTERN u32 acpi_gbl_size_of_parse_tree; +ACPI_EXTERN u32 acpi_gbl_size_of_method_trees; +ACPI_EXTERN u32 acpi_gbl_size_of_node_entries; +ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects; + +#endif /* ACPI_DEBUGGER */ + + +#endif /* __ACGLOBAL_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/achware.h linux.21rc5-ac2/include/acpi/achware.h --- linux.21rc5/include/acpi/achware.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/achware.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,177 @@ +/****************************************************************************** + * + * Name: achware.h -- hardware specific interfaces + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACHWARE_H__ +#define __ACHWARE_H__ + + +/* PM Timer ticks per second (HZ) */ +#define PM_TIMER_FREQUENCY 3579545 + + +/* Prototypes */ + + +acpi_status +acpi_hw_initialize ( + void); + +acpi_status +acpi_hw_shutdown ( + void); + +acpi_status +acpi_hw_initialize_system_info ( + void); + +acpi_status +acpi_hw_set_mode ( + u32 mode); + +u32 +acpi_hw_get_mode ( + void); + +u32 +acpi_hw_get_mode_capabilities ( + void); + +/* Register I/O Prototypes */ + +struct acpi_bit_register_info * +acpi_hw_get_bit_register_info ( + u32 register_id); + +acpi_status +acpi_hw_register_read ( + u8 use_lock, + u32 register_id, + u32 *return_value); + +acpi_status +acpi_hw_register_write ( + u8 use_lock, + u32 register_id, + u32 value); + +acpi_status +acpi_hw_low_level_read ( + u32 width, + u32 *value, + struct acpi_generic_address *reg); + +acpi_status +acpi_hw_low_level_write ( + u32 width, + u32 value, + struct acpi_generic_address *reg); + +acpi_status +acpi_hw_clear_acpi_status ( + void); + + +/* GPE support */ + +acpi_status +acpi_hw_enable_gpe ( + struct acpi_gpe_event_info *gpe_event_info); + +void +acpi_hw_enable_gpe_for_wakeup ( + struct acpi_gpe_event_info *gpe_event_info); + +acpi_status +acpi_hw_disable_gpe ( + struct acpi_gpe_event_info *gpe_event_info); + +acpi_status +acpi_hw_disable_gpe_block ( + struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block); + +void +acpi_hw_disable_gpe_for_wakeup ( + struct acpi_gpe_event_info *gpe_event_info); + +acpi_status +acpi_hw_clear_gpe ( + struct acpi_gpe_event_info *gpe_event_info); + +acpi_status +acpi_hw_clear_gpe_block ( + struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block); + +acpi_status +acpi_hw_get_gpe_status ( + struct acpi_gpe_event_info *gpe_event_info, + acpi_event_status *event_status); + +acpi_status +acpi_hw_disable_non_wakeup_gpes ( + void); + +acpi_status +acpi_hw_enable_non_wakeup_gpes ( + void); + + +/* ACPI Timer prototypes */ + +acpi_status +acpi_get_timer_resolution ( + u32 *resolution); + +acpi_status +acpi_get_timer ( + u32 *ticks); + +acpi_status +acpi_get_timer_duration ( + u32 start_ticks, + u32 end_ticks, + u32 *time_elapsed); + + +#endif /* __ACHWARE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acinterp.h linux.21rc5-ac2/include/acpi/acinterp.h --- linux.21rc5/include/acpi/acinterp.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acinterp.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,738 @@ +/****************************************************************************** + * + * Name: acinterp.h - Interpreter subcomponent prototypes and defines + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACINTERP_H__ +#define __ACINTERP_H__ + + +#define ACPI_WALK_OPERANDS (&(walk_state->operands [walk_state->num_operands -1])) + + +acpi_status +acpi_ex_resolve_operands ( + u16 opcode, + union acpi_operand_object **stack_ptr, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_check_object_type ( + acpi_object_type type_needed, + acpi_object_type this_type, + void *object); + +/* + * exxface - External interpreter interfaces + */ + +acpi_status +acpi_ex_load_table ( + acpi_table_type table_id); + +acpi_status +acpi_ex_execute_method ( + struct acpi_namespace_node *method_node, + union acpi_operand_object **params, + union acpi_operand_object **return_obj_desc); + + +/* + * exconvrt - object conversion + */ + +acpi_status +acpi_ex_convert_to_integer ( + union acpi_operand_object *obj_desc, + union acpi_operand_object **result_desc, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_convert_to_buffer ( + union acpi_operand_object *obj_desc, + union acpi_operand_object **result_desc, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_convert_to_string ( + union acpi_operand_object *obj_desc, + union acpi_operand_object **result_desc, + u32 base, + u32 max_length, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_convert_to_target_type ( + acpi_object_type destination_type, + union acpi_operand_object *source_desc, + union acpi_operand_object **result_desc, + struct acpi_walk_state *walk_state); + +u32 +acpi_ex_convert_to_ascii ( + acpi_integer integer, + u32 base, + u8 *string, + u8 max_length); + +/* + * exfield - ACPI AML (p-code) execution - field manipulation + */ + +acpi_status +acpi_ex_extract_from_field ( + union acpi_operand_object *obj_desc, + void *buffer, + u32 buffer_length); + +acpi_status +acpi_ex_insert_into_field ( + union acpi_operand_object *obj_desc, + void *buffer, + u32 buffer_length); + +acpi_status +acpi_ex_setup_region ( + union acpi_operand_object *obj_desc, + u32 field_datum_byte_offset); + +acpi_status +acpi_ex_access_region ( + union acpi_operand_object *obj_desc, + u32 field_datum_byte_offset, + acpi_integer *value, + u32 read_write); + +u8 +acpi_ex_register_overflow ( + union acpi_operand_object *obj_desc, + acpi_integer value); + +acpi_status +acpi_ex_field_datum_io ( + union acpi_operand_object *obj_desc, + u32 field_datum_byte_offset, + acpi_integer *value, + u32 read_write); + +acpi_status +acpi_ex_write_with_update_rule ( + union acpi_operand_object *obj_desc, + acpi_integer mask, + acpi_integer field_value, + u32 field_datum_byte_offset); + +void +acpi_ex_get_buffer_datum( + acpi_integer *datum, + void *buffer, + u32 buffer_length, + u32 byte_granularity, + u32 buffer_offset); + +void +acpi_ex_set_buffer_datum ( + acpi_integer merged_datum, + void *buffer, + u32 buffer_length, + u32 byte_granularity, + u32 buffer_offset); + +acpi_status +acpi_ex_read_data_from_field ( + struct acpi_walk_state *walk_state, + union acpi_operand_object *obj_desc, + union acpi_operand_object **ret_buffer_desc); + +acpi_status +acpi_ex_write_data_to_field ( + union acpi_operand_object *source_desc, + union acpi_operand_object *obj_desc, + union acpi_operand_object **result_desc); + +/* + * exmisc - ACPI AML (p-code) execution - specific opcodes + */ + +acpi_status +acpi_ex_opcode_3A_0T_0R ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_opcode_3A_1T_1R ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_opcode_6A_0T_1R ( + struct acpi_walk_state *walk_state); + +u8 +acpi_ex_do_match ( + u32 match_op, + acpi_integer package_value, + acpi_integer match_value); + +acpi_status +acpi_ex_get_object_reference ( + union acpi_operand_object *obj_desc, + union acpi_operand_object **return_desc, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_resolve_multiple ( + struct acpi_walk_state *walk_state, + union acpi_operand_object *operand, + acpi_object_type *return_type, + union acpi_operand_object **return_desc); + +acpi_status +acpi_ex_concat_template ( + union acpi_operand_object *obj_desc, + union acpi_operand_object *obj_desc2, + union acpi_operand_object **actual_return_desc, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_do_concatenate ( + union acpi_operand_object *obj_desc, + union acpi_operand_object *obj_desc2, + union acpi_operand_object **actual_return_desc, + struct acpi_walk_state *walk_state); + +u8 +acpi_ex_do_logical_op ( + u16 opcode, + acpi_integer operand0, + acpi_integer operand1); + +acpi_integer +acpi_ex_do_math_op ( + u16 opcode, + acpi_integer operand0, + acpi_integer operand1); + +acpi_status +acpi_ex_create_mutex ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_create_processor ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_create_power_resource ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_create_region ( + u8 *aml_start, + u32 aml_length, + u8 region_space, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_create_table_region ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_create_event ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_create_alias ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_create_method ( + u8 *aml_start, + u32 aml_length, + struct acpi_walk_state *walk_state); + + +/* + * exconfig - dynamic table load/unload + */ + +acpi_status +acpi_ex_add_table ( + struct acpi_table_header *table, + struct acpi_namespace_node *parent_node, + union acpi_operand_object **ddb_handle); + +acpi_status +acpi_ex_load_op ( + union acpi_operand_object *obj_desc, + union acpi_operand_object *target, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_load_table_op ( + struct acpi_walk_state *walk_state, + union acpi_operand_object **return_desc); + +acpi_status +acpi_ex_unload_table ( + union acpi_operand_object *ddb_handle); + + +/* + * exmutex - mutex support + */ + +acpi_status +acpi_ex_acquire_mutex ( + union acpi_operand_object *time_desc, + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_release_mutex ( + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state); + +void +acpi_ex_release_all_mutexes ( + struct acpi_thread_state *thread); + +void +acpi_ex_unlink_mutex ( + union acpi_operand_object *obj_desc); + +void +acpi_ex_link_mutex ( + union acpi_operand_object *obj_desc, + struct acpi_thread_state *thread); + +/* + * exprep - ACPI AML (p-code) execution - prep utilities + */ + +acpi_status +acpi_ex_prep_common_field_object ( + union acpi_operand_object *obj_desc, + u8 field_flags, + u8 field_attribute, + u32 field_bit_position, + u32 field_bit_length); + +acpi_status +acpi_ex_prep_field_value ( + struct acpi_create_field_info *info); + +/* + * exsystem - Interface to OS services + */ + +acpi_status +acpi_ex_system_do_notify_op ( + union acpi_operand_object *value, + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ex_system_do_suspend( + u32 time); + +acpi_status +acpi_ex_system_do_stall ( + u32 time); + +acpi_status +acpi_ex_system_acquire_mutex( + union acpi_operand_object *time, + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ex_system_release_mutex( + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ex_system_signal_event( + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ex_system_wait_event( + union acpi_operand_object *time, + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ex_system_reset_event( + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ex_system_wait_semaphore ( + acpi_handle semaphore, + u16 timeout); + + +/* + * exmonadic - ACPI AML (p-code) execution, monadic operators + */ + +acpi_status +acpi_ex_opcode_1A_0T_0R ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_opcode_1A_0T_1R ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_opcode_1A_1T_1R ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_opcode_1A_1T_0R ( + struct acpi_walk_state *walk_state); + +/* + * exdyadic - ACPI AML (p-code) execution, dyadic operators + */ + +acpi_status +acpi_ex_opcode_2A_0T_0R ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_opcode_2A_0T_1R ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_opcode_2A_1T_1R ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_opcode_2A_2T_1R ( + struct acpi_walk_state *walk_state); + + +/* + * exresolv - Object resolution and get value functions + */ + +acpi_status +acpi_ex_resolve_to_value ( + union acpi_operand_object **stack_ptr, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_resolve_node_to_value ( + struct acpi_namespace_node **stack_ptr, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_resolve_object_to_value ( + union acpi_operand_object **stack_ptr, + struct acpi_walk_state *walk_state); + + +/* + * exdump - Scanner debug output routines + */ + +void +acpi_ex_dump_operand ( + union acpi_operand_object *entry_desc); + +void +acpi_ex_dump_operands ( + union acpi_operand_object **operands, + acpi_interpreter_mode interpreter_mode, + char *ident, + u32 num_levels, + char *note, + char *module_name, + u32 line_number); + +void +acpi_ex_dump_object_descriptor ( + union acpi_operand_object *object, + u32 flags); + +void +acpi_ex_dump_node ( + struct acpi_namespace_node *node, + u32 flags); + +void +acpi_ex_out_string ( + char *title, + char *value); + +void +acpi_ex_out_pointer ( + char *title, + void *value); + +void +acpi_ex_out_integer ( + char *title, + u32 value); + +void +acpi_ex_out_address ( + char *title, + acpi_physical_address value); + + +/* + * exnames - interpreter/scanner name load/execute + */ + +char * +acpi_ex_allocate_name_string ( + u32 prefix_count, + u32 num_name_segs); + +u32 +acpi_ex_good_char ( + u32 character); + +acpi_status +acpi_ex_name_segment ( + u8 **in_aml_address, + char *name_string); + +acpi_status +acpi_ex_get_name_string ( + acpi_object_type data_type, + u8 *in_aml_address, + char **out_name_string, + u32 *out_name_length); + +acpi_status +acpi_ex_do_name ( + acpi_object_type data_type, + acpi_interpreter_mode load_exec_mode); + + +/* + * exstore - Object store support + */ + +acpi_status +acpi_ex_store ( + union acpi_operand_object *val_desc, + union acpi_operand_object *dest_desc, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_store_object_to_index ( + union acpi_operand_object *val_desc, + union acpi_operand_object *dest_desc, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_store_object_to_node ( + union acpi_operand_object *source_desc, + struct acpi_namespace_node *node, + struct acpi_walk_state *walk_state); + + +/* + * exstoren + */ + +acpi_status +acpi_ex_resolve_object ( + union acpi_operand_object **source_desc_ptr, + acpi_object_type target_type, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ex_store_object_to_object ( + union acpi_operand_object *source_desc, + union acpi_operand_object *dest_desc, + union acpi_operand_object **new_desc, + struct acpi_walk_state *walk_state); + + +/* + * excopy - object copy + */ + +acpi_status +acpi_ex_store_buffer_to_buffer ( + union acpi_operand_object *source_desc, + union acpi_operand_object *target_desc); + +acpi_status +acpi_ex_store_string_to_string ( + union acpi_operand_object *source_desc, + union acpi_operand_object *target_desc); + +acpi_status +acpi_ex_copy_integer_to_index_field ( + union acpi_operand_object *source_desc, + union acpi_operand_object *target_desc); + +acpi_status +acpi_ex_copy_integer_to_bank_field ( + union acpi_operand_object *source_desc, + union acpi_operand_object *target_desc); + +acpi_status +acpi_ex_copy_data_to_named_field ( + union acpi_operand_object *source_desc, + struct acpi_namespace_node *node); + +acpi_status +acpi_ex_copy_integer_to_buffer_field ( + union acpi_operand_object *source_desc, + union acpi_operand_object *target_desc); + +/* + * exutils - interpreter/scanner utilities + */ + +acpi_status +acpi_ex_enter_interpreter ( + void); + +void +acpi_ex_exit_interpreter ( + void); + +void +acpi_ex_truncate_for32bit_table ( + union acpi_operand_object *obj_desc); + +u8 +acpi_ex_acquire_global_lock ( + u32 rule); + +void +acpi_ex_release_global_lock ( + u8 locked); + +u32 +acpi_ex_digits_needed ( + acpi_integer value, + u32 base); + +void +acpi_ex_eisa_id_to_string ( + u32 numeric_id, + char *out_string); + +void +acpi_ex_unsigned_integer_to_string ( + acpi_integer value, + char *out_string); + + +/* + * exregion - default op_region handlers + */ + +acpi_status +acpi_ex_system_memory_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context); + +acpi_status +acpi_ex_system_io_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context); + +acpi_status +acpi_ex_pci_config_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context); + +acpi_status +acpi_ex_cmos_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context); + +acpi_status +acpi_ex_pci_bar_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context); + +acpi_status +acpi_ex_embedded_controller_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context); + +acpi_status +acpi_ex_sm_bus_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context); + + +acpi_status +acpi_ex_data_table_space_handler ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context); + +#endif /* __INTERP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/aclocal.h linux.21rc5-ac2/include/acpi/aclocal.h --- linux.21rc5/include/acpi/aclocal.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/aclocal.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,982 @@ +/****************************************************************************** + * + * Name: aclocal.h - Internal data types used across the ACPI subsystem + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACLOCAL_H__ +#define __ACLOCAL_H__ + + +#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ + +typedef void * acpi_mutex; +typedef u32 acpi_mutex_handle; + + +/* Total number of aml opcodes defined */ + +#define AML_NUM_OPCODES 0x7E + + +/***************************************************************************** + * + * Mutex typedefs and structs + * + ****************************************************************************/ + + +/* + * Predefined handles for the mutex objects used within the subsystem + * All mutex objects are automatically created by acpi_ut_mutex_initialize. + * + * The acquire/release ordering protocol is implied via this list. Mutexes + * with a lower value must be acquired before mutexes with a higher value. + * + * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names table also! + */ + +#define ACPI_MTX_EXECUTE 0 +#define ACPI_MTX_INTERPRETER 1 +#define ACPI_MTX_PARSER 2 +#define ACPI_MTX_DISPATCHER 3 +#define ACPI_MTX_TABLES 4 +#define ACPI_MTX_OP_REGIONS 5 +#define ACPI_MTX_NAMESPACE 6 +#define ACPI_MTX_EVENTS 7 +#define ACPI_MTX_HARDWARE 8 +#define ACPI_MTX_CACHES 9 +#define ACPI_MTX_MEMORY 10 +#define ACPI_MTX_DEBUG_CMD_COMPLETE 11 +#define ACPI_MTX_DEBUG_CMD_READY 12 + +#define MAX_MUTEX 12 +#define NUM_MUTEX MAX_MUTEX+1 + + +#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) +#ifdef DEFINE_ACPI_GLOBALS + +/* Names for the mutexes used in the subsystem */ + +static char *acpi_gbl_mutex_names[] = +{ + "ACPI_MTX_Execute", + "ACPI_MTX_Interpreter", + "ACPI_MTX_Parser", + "ACPI_MTX_Dispatcher", + "ACPI_MTX_Tables", + "ACPI_MTX_op_regions", + "ACPI_MTX_Namespace", + "ACPI_MTX_Events", + "ACPI_MTX_Hardware", + "ACPI_MTX_Caches", + "ACPI_MTX_Memory", + "ACPI_MTX_debug_cmd_complete", + "ACPI_MTX_debug_cmd_ready", +}; + +#endif +#endif + + +/* Table for the global mutexes */ + +struct acpi_mutex_info +{ + acpi_mutex mutex; + u32 use_count; + u32 owner_id; +}; + +/* This owner ID means that the mutex is not in use (unlocked) */ + +#define ACPI_MUTEX_NOT_ACQUIRED (u32) (-1) + + +/* Lock flag parameter for various interfaces */ + +#define ACPI_MTX_DO_NOT_LOCK 0 +#define ACPI_MTX_LOCK 1 + + +typedef u16 acpi_owner_id; +#define ACPI_OWNER_TYPE_TABLE 0x0 +#define ACPI_OWNER_TYPE_METHOD 0x1 +#define ACPI_FIRST_METHOD_ID 0x0001 +#define ACPI_FIRST_TABLE_ID 0xF000 + + +/* Field access granularities */ + +#define ACPI_FIELD_BYTE_GRANULARITY 1 +#define ACPI_FIELD_WORD_GRANULARITY 2 +#define ACPI_FIELD_DWORD_GRANULARITY 4 +#define ACPI_FIELD_QWORD_GRANULARITY 8 + +/***************************************************************************** + * + * Namespace typedefs and structs + * + ****************************************************************************/ + + +/* Operational modes of the AML interpreter/scanner */ + +typedef enum +{ + ACPI_IMODE_LOAD_PASS1 = 0x01, + ACPI_IMODE_LOAD_PASS2 = 0x02, + ACPI_IMODE_EXECUTE = 0x0E + +} acpi_interpreter_mode; + + +/* + * The Node describes a named object that appears in the AML + * An acpi_node is used to store Nodes. + * + * data_type is used to differentiate between internal descriptors, and MUST + * be the first byte in this structure. + */ + +union acpi_name_union +{ + u32 integer; + char ascii[4]; +}; + +struct acpi_namespace_node +{ + u8 descriptor; /* Used to differentiate object descriptor types */ + u8 type; /* Type associated with this name */ + u16 owner_id; + union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */ + + + union acpi_operand_object *object; /* Pointer to attached ACPI object (optional) */ + struct acpi_namespace_node *child; /* First child */ + struct acpi_namespace_node *peer; /* Next peer*/ + u16 reference_count; /* Current count of references and children */ + u8 flags; +}; + + +#define ACPI_ENTRY_NOT_FOUND NULL + + +/* Node flags */ + +#define ANOBJ_RESERVED 0x01 +#define ANOBJ_END_OF_PEER_LIST 0x02 +#define ANOBJ_DATA_WIDTH_32 0x04 /* Parent table is 64-bits */ +#define ANOBJ_METHOD_ARG 0x08 +#define ANOBJ_METHOD_LOCAL 0x10 +#define ANOBJ_METHOD_NO_RETVAL 0x20 +#define ANOBJ_METHOD_SOME_NO_RETVAL 0x40 + +#define ANOBJ_IS_BIT_OFFSET 0x80 + + +/* + * ACPI Table Descriptor. One per ACPI table + */ +struct acpi_table_desc +{ + struct acpi_table_desc *prev; + struct acpi_table_desc *next; + struct acpi_table_desc *installed_desc; + struct acpi_table_header *pointer; + u8 *aml_start; + u64 physical_address; + u32 aml_length; + acpi_size length; + acpi_owner_id table_id; + u8 type; + u8 allocation; + u8 loaded_into_namespace; +}; + +struct acpi_table_list +{ + struct acpi_table_desc *next; + u32 count; +}; + + +struct acpi_find_context +{ + char *search_for; + acpi_handle *list; + u32 *count; +}; + + +struct acpi_ns_search_data +{ + struct acpi_namespace_node *node; +}; + + +/* + * Predefined Namespace items + */ +struct acpi_predefined_names +{ + char *name; + u8 type; + char *val; +}; + + +/* Object types used during package copies */ + + +#define ACPI_COPY_TYPE_SIMPLE 0 +#define ACPI_COPY_TYPE_PACKAGE 1 + +/* Info structure used to convert external<->internal namestrings */ + +struct acpi_namestring_info +{ + char *external_name; + char *next_external_char; + char *internal_name; + u32 length; + u32 num_segments; + u32 num_carats; + u8 fully_qualified; +}; + + +/* Field creation info */ + +struct acpi_create_field_info +{ + struct acpi_namespace_node *region_node; + struct acpi_namespace_node *field_node; + struct acpi_namespace_node *register_node; + struct acpi_namespace_node *data_register_node; + u32 bank_value; + u32 field_bit_position; + u32 field_bit_length; + u8 field_flags; + u8 attribute; + u8 field_type; +}; + + +/***************************************************************************** + * + * Event typedefs and structs + * + ****************************************************************************/ + +/* Information about a GPE, one per each GPE in an array */ + +struct acpi_gpe_event_info +{ + struct acpi_namespace_node *method_node; /* Method node for this GPE level */ + acpi_gpe_handler handler; /* Address of handler, if any */ + void *context; /* Context to be passed to handler */ + struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ + u8 flags; /* Level or Edge */ + u8 bit_mask; /* This GPE within the register */ +}; + +/* Information about a GPE register pair, one per each status/enable pair in an array */ + +struct acpi_gpe_register_info +{ + struct acpi_generic_address status_address; /* Address of status reg */ + struct acpi_generic_address enable_address; /* Address of enable reg */ + u8 status; /* Current value of status reg */ + u8 enable; /* Current value of enable reg */ + u8 wake_enable; /* Mask of bits to keep enabled when sleeping */ + u8 base_gpe_number; /* Base GPE number for this register */ +}; + +/* + * Information about a GPE register block, one per each installed block -- + * GPE0, GPE1, and one per each installed GPE Block Device. + */ +struct acpi_gpe_block_info +{ + struct acpi_gpe_block_info *previous; + struct acpi_gpe_block_info *next; + struct acpi_gpe_xrupt_info *xrupt_block; /* Backpointer to interrupt block */ + struct acpi_gpe_register_info *register_info; /* One per GPE register pair */ + struct acpi_gpe_event_info *event_info; /* One for each GPE */ + struct acpi_generic_address block_address; /* Base address of the block */ + u32 register_count; /* Number of register pairs in block */ + u8 block_base_number;/* Base GPE number for this block */ +}; + +/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ + +struct acpi_gpe_xrupt_info +{ + struct acpi_gpe_xrupt_info *previous; + struct acpi_gpe_xrupt_info *next; + struct acpi_gpe_block_info *gpe_block_list_head; /* List of GPE blocks for this xrupt */ + u32 interrupt_level; /* System interrupt level */ +}; + + +typedef acpi_status (*ACPI_GPE_CALLBACK) ( + struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block); + + +/* Information about each particular fixed event */ + +struct acpi_fixed_event_handler +{ + acpi_event_handler handler; /* Address of handler. */ + void *context; /* Context to be passed to handler */ +}; + +struct acpi_fixed_event_info +{ + u8 status_register_id; + u8 enable_register_id; + u16 status_bit_mask; + u16 enable_bit_mask; +}; + +/* Information used during field processing */ + +struct acpi_field_info +{ + u8 skip_field; + u8 field_flag; + u32 pkg_length; +}; + + +/***************************************************************************** + * + * Generic "state" object for stacks + * + ****************************************************************************/ + + +#define ACPI_CONTROL_NORMAL 0xC0 +#define ACPI_CONTROL_CONDITIONAL_EXECUTING 0xC1 +#define ACPI_CONTROL_PREDICATE_EXECUTING 0xC2 +#define ACPI_CONTROL_PREDICATE_FALSE 0xC3 +#define ACPI_CONTROL_PREDICATE_TRUE 0xC4 + + +/* Forward declarations */ +struct acpi_walk_state ; +struct acpi_obj_mutex; +union acpi_parse_object ; + + +#define ACPI_STATE_COMMON /* Two 32-bit fields and a pointer */\ + u8 data_type; /* To differentiate various internal objs */\ + u8 flags; \ + u16 value; \ + u16 state; \ + u16 reserved; \ + void *next; \ + +struct acpi_common_state +{ + ACPI_STATE_COMMON +}; + + +/* + * Update state - used to traverse complex objects such as packages + */ +struct acpi_update_state +{ + ACPI_STATE_COMMON + union acpi_operand_object *object; +}; + + +/* + * Pkg state - used to traverse nested package structures + */ +struct acpi_pkg_state +{ + ACPI_STATE_COMMON + union acpi_operand_object *source_object; + union acpi_operand_object *dest_object; + struct acpi_walk_state *walk_state; + void *this_target_obj; + u32 num_packages; + u16 index; +}; + + +/* + * Control state - one per if/else and while constructs. + * Allows nesting of these constructs + */ +struct acpi_control_state +{ + ACPI_STATE_COMMON + union acpi_parse_object *predicate_op; + u8 *aml_predicate_start; /* Start of if/while predicate */ + u8 *package_end; /* End of if/while block */ + u16 opcode; +}; + + +/* + * Scope state - current scope during namespace lookups + */ +struct acpi_scope_state +{ + ACPI_STATE_COMMON + struct acpi_namespace_node *node; +}; + + +struct acpi_pscope_state +{ + ACPI_STATE_COMMON + union acpi_parse_object *op; /* Current op being parsed */ + u8 *arg_end; /* Current argument end */ + u8 *pkg_end; /* Current package end */ + u32 arg_list; /* Next argument to parse */ + u32 arg_count; /* Number of fixed arguments */ +}; + + +/* + * Thread state - one per thread across multiple walk states. Multiple walk + * states are created when there are nested control methods executing. + */ +struct acpi_thread_state +{ + ACPI_STATE_COMMON + struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */ + union acpi_operand_object *acquired_mutex_list; /* List of all currently acquired mutexes */ + u32 thread_id; /* Running thread ID */ + u16 current_sync_level; /* Mutex Sync (nested acquire) level */ +}; + + +/* + * Result values - used to accumulate the results of nested + * AML arguments + */ +struct acpi_result_values +{ + ACPI_STATE_COMMON + union acpi_operand_object *obj_desc [ACPI_OBJ_NUM_OPERANDS]; + u8 num_results; + u8 last_insert; +}; + + +typedef +acpi_status (*acpi_parse_downwards) ( + struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op); + +typedef +acpi_status (*acpi_parse_upwards) ( + struct acpi_walk_state *walk_state); + + +/* + * Notify info - used to pass info to the deferred notify + * handler/dispatcher. + */ +struct acpi_notify_info +{ + ACPI_STATE_COMMON + struct acpi_namespace_node *node; + union acpi_operand_object *handler_obj; +}; + + +/* Generic state is union of structs above */ + +union acpi_generic_state +{ + struct acpi_common_state common; + struct acpi_control_state control; + struct acpi_update_state update; + struct acpi_scope_state scope; + struct acpi_pscope_state parse_scope; + struct acpi_pkg_state pkg; + struct acpi_thread_state thread; + struct acpi_result_values results; + struct acpi_notify_info notify; +}; + + +/***************************************************************************** + * + * Interpreter typedefs and structs + * + ****************************************************************************/ + +typedef +acpi_status (*ACPI_EXECUTE_OP) ( + struct acpi_walk_state *walk_state); + + +/***************************************************************************** + * + * Parser typedefs and structs + * + ****************************************************************************/ + +/* + * AML opcode, name, and argument layout + */ +struct acpi_opcode_info +{ +#if defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUG_OUTPUT) + char *name; /* Opcode name (disassembler/debug only) */ +#endif + u32 parse_args; /* Grammar/Parse time arguments */ + u32 runtime_args; /* Interpret time arguments */ + u32 flags; /* Misc flags */ + u8 object_type; /* Corresponding internal object type */ + u8 class; /* Opcode class */ + u8 type; /* Opcode type */ +}; + + +union acpi_parse_value +{ + acpi_integer integer; /* Integer constant (Up to 64 bits) */ + struct uint64_struct integer64; /* Structure overlay for 2 32-bit Dwords */ + u32 size; /* bytelist or field size */ + char *string; /* NULL terminated string */ + u8 *buffer; /* buffer or string */ + char *name; /* NULL terminated string */ + union acpi_parse_object *arg; /* arguments and contained ops */ +}; + + +#define ACPI_PARSE_COMMON \ + u8 data_type; /* To differentiate various internal objs */\ + u8 flags; /* Type of Op */\ + u16 aml_opcode; /* AML opcode */\ + u32 aml_offset; /* Offset of declaration in AML */\ + union acpi_parse_object *parent; /* Parent op */\ + union acpi_parse_object *next; /* Next op */\ + ACPI_DISASM_ONLY_MEMBERS (\ + u8 disasm_flags; /* Used during AML disassembly */\ + u8 disasm_opcode; /* Subtype used for disassembly */\ + char aml_op_name[16]) /* Op name (debug only) */\ + /* NON-DEBUG members below: */\ + struct acpi_namespace_node *node; /* For use by interpreter */\ + union acpi_parse_value value; /* Value or args associated with the opcode */\ + + +#define ACPI_DASM_BUFFER 0x00 +#define ACPI_DASM_RESOURCE 0x01 +#define ACPI_DASM_STRING 0x02 +#define ACPI_DASM_UNICODE 0x03 +#define ACPI_DASM_EISAID 0x04 +#define ACPI_DASM_MATCHOP 0x05 + +/* + * generic operation (for example: If, While, Store) + */ +struct acpi_parse_obj_common +{ + ACPI_PARSE_COMMON +}; + + +/* + * Extended Op for named ops (Scope, Method, etc.), deferred ops (Methods and op_regions), + * and bytelists. + */ +struct acpi_parse_obj_named +{ + ACPI_PARSE_COMMON + u8 *path; + u8 *data; /* AML body or bytelist data */ + u32 length; /* AML length */ + u32 name; /* 4-byte name or zero if no name */ +}; + + +/* The parse node is the fundamental element of the parse tree */ + +struct acpi_parse_obj_asl +{ + ACPI_PARSE_COMMON + union acpi_parse_object *child; + union acpi_parse_object *parent_method; + char *filename; + char *external_name; + char *namepath; + char name_seg[4]; + u32 extra_value; + u32 column; + u32 line_number; + u32 logical_line_number; + u32 logical_byte_offset; + u32 end_line; + u32 end_logical_line; + u32 acpi_btype; + u32 aml_length; + u32 aml_subtree_length; + u32 final_aml_length; + u32 final_aml_offset; + u32 compile_flags; + u16 parse_opcode; + u8 aml_opcode_length; + u8 aml_pkg_len_bytes; + u8 extra; + char parse_op_name[12]; +}; + + +union acpi_parse_object +{ + struct acpi_parse_obj_common common; + struct acpi_parse_obj_named named; + struct acpi_parse_obj_asl asl; +}; + + +/* + * Parse state - one state per parser invocation and each control + * method. + */ +struct acpi_parse_state +{ + u32 aml_size; + u8 *aml_start; /* First AML byte */ + u8 *aml; /* Next AML byte */ + u8 *aml_end; /* (last + 1) AML byte */ + u8 *pkg_start; /* Current package begin */ + u8 *pkg_end; /* Current package end */ + union acpi_parse_object *start_op; /* Root of parse tree */ + struct acpi_namespace_node *start_node; + union acpi_generic_state *scope; /* Current scope */ + union acpi_parse_object *start_scope; +}; + + +/* Parse object flags */ + +#define ACPI_PARSEOP_GENERIC 0x01 +#define ACPI_PARSEOP_NAMED 0x02 +#define ACPI_PARSEOP_DEFERRED 0x04 +#define ACPI_PARSEOP_BYTELIST 0x08 +#define ACPI_PARSEOP_IN_CACHE 0x80 + +/* Parse object disasm_flags */ + +#define ACPI_PARSEOP_IGNORE 0x01 +#define ACPI_PARSEOP_PARAMLIST 0x02 +#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04 +#define ACPI_PARSEOP_SPECIAL 0x10 + + +/***************************************************************************** + * + * Hardware (ACPI registers) and PNP + * + ****************************************************************************/ + +#define PCI_ROOT_HID_STRING "PNP0A03" + +struct acpi_bit_register_info +{ + u8 parent_register; + u8 bit_position; + u16 access_bit_mask; +}; + + +/* + * Register IDs + * These are the full ACPI registers + */ +#define ACPI_REGISTER_PM1_STATUS 0x01 +#define ACPI_REGISTER_PM1_ENABLE 0x02 +#define ACPI_REGISTER_PM1_CONTROL 0x03 +#define ACPI_REGISTER_PM1A_CONTROL 0x04 +#define ACPI_REGISTER_PM1B_CONTROL 0x05 +#define ACPI_REGISTER_PM2_CONTROL 0x06 +#define ACPI_REGISTER_PM_TIMER 0x07 +#define ACPI_REGISTER_PROCESSOR_BLOCK 0x08 +#define ACPI_REGISTER_SMI_COMMAND_BLOCK 0x09 + + +/* Masks used to access the bit_registers */ + +#define ACPI_BITMASK_TIMER_STATUS 0x0001 +#define ACPI_BITMASK_BUS_MASTER_STATUS 0x0010 +#define ACPI_BITMASK_GLOBAL_LOCK_STATUS 0x0020 +#define ACPI_BITMASK_POWER_BUTTON_STATUS 0x0100 +#define ACPI_BITMASK_SLEEP_BUTTON_STATUS 0x0200 +#define ACPI_BITMASK_RT_CLOCK_STATUS 0x0400 +#define ACPI_BITMASK_WAKE_STATUS 0x8000 + +#define ACPI_BITMASK_ALL_FIXED_STATUS (ACPI_BITMASK_TIMER_STATUS | \ + ACPI_BITMASK_BUS_MASTER_STATUS | \ + ACPI_BITMASK_GLOBAL_LOCK_STATUS | \ + ACPI_BITMASK_POWER_BUTTON_STATUS | \ + ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ + ACPI_BITMASK_RT_CLOCK_STATUS | \ + ACPI_BITMASK_WAKE_STATUS) + +#define ACPI_BITMASK_TIMER_ENABLE 0x0001 +#define ACPI_BITMASK_GLOBAL_LOCK_ENABLE 0x0020 +#define ACPI_BITMASK_POWER_BUTTON_ENABLE 0x0100 +#define ACPI_BITMASK_SLEEP_BUTTON_ENABLE 0x0200 +#define ACPI_BITMASK_RT_CLOCK_ENABLE 0x0400 + +#define ACPI_BITMASK_SCI_ENABLE 0x0001 +#define ACPI_BITMASK_BUS_MASTER_RLD 0x0002 +#define ACPI_BITMASK_GLOBAL_LOCK_RELEASE 0x0004 +#define ACPI_BITMASK_SLEEP_TYPE_X 0x1C00 +#define ACPI_BITMASK_SLEEP_ENABLE 0x2000 + +#define ACPI_BITMASK_ARB_DISABLE 0x0001 + + +/* Raw bit position of each bit_register */ + +#define ACPI_BITPOSITION_TIMER_STATUS 0x00 +#define ACPI_BITPOSITION_BUS_MASTER_STATUS 0x04 +#define ACPI_BITPOSITION_GLOBAL_LOCK_STATUS 0x05 +#define ACPI_BITPOSITION_POWER_BUTTON_STATUS 0x08 +#define ACPI_BITPOSITION_SLEEP_BUTTON_STATUS 0x09 +#define ACPI_BITPOSITION_RT_CLOCK_STATUS 0x0A +#define ACPI_BITPOSITION_WAKE_STATUS 0x0F + +#define ACPI_BITPOSITION_TIMER_ENABLE 0x00 +#define ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE 0x05 +#define ACPI_BITPOSITION_POWER_BUTTON_ENABLE 0x08 +#define ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE 0x09 +#define ACPI_BITPOSITION_RT_CLOCK_ENABLE 0x0A + +#define ACPI_BITPOSITION_SCI_ENABLE 0x00 +#define ACPI_BITPOSITION_BUS_MASTER_RLD 0x01 +#define ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE 0x02 +#define ACPI_BITPOSITION_SLEEP_TYPE_X 0x0A +#define ACPI_BITPOSITION_SLEEP_ENABLE 0x0D + +#define ACPI_BITPOSITION_ARB_DISABLE 0x00 + + +/***************************************************************************** + * + * Resource descriptors + * + ****************************************************************************/ + + +/* resource_type values */ + +#define ACPI_RESOURCE_TYPE_MEMORY_RANGE 0 +#define ACPI_RESOURCE_TYPE_IO_RANGE 1 +#define ACPI_RESOURCE_TYPE_BUS_NUMBER_RANGE 2 + +/* Resource descriptor types and masks */ + +#define ACPI_RDESC_TYPE_LARGE 0x80 +#define ACPI_RDESC_TYPE_SMALL 0x00 + +#define ACPI_RDESC_TYPE_MASK 0x80 +#define ACPI_RDESC_SMALL_MASK 0x78 /* Only bits 6:3 contain the type */ + + +/* + * Small resource descriptor types + * Note: The 3 length bits (2:0) must be zero + */ +#define ACPI_RDESC_TYPE_IRQ_FORMAT 0x20 +#define ACPI_RDESC_TYPE_DMA_FORMAT 0x28 +#define ACPI_RDESC_TYPE_START_DEPENDENT 0x30 +#define ACPI_RDESC_TYPE_END_DEPENDENT 0x38 +#define ACPI_RDESC_TYPE_IO_PORT 0x40 +#define ACPI_RDESC_TYPE_FIXED_IO_PORT 0x48 +#define ACPI_RDESC_TYPE_SMALL_VENDOR 0x70 +#define ACPI_RDESC_TYPE_END_TAG 0x78 + +/* + * Large resource descriptor types + */ + +#define ACPI_RDESC_TYPE_MEMORY_24 0x81 +#define ACPI_RDESC_TYPE_GENERAL_REGISTER 0x82 +#define ACPI_RDESC_TYPE_LARGE_VENDOR 0x84 +#define ACPI_RDESC_TYPE_MEMORY_32 0x85 +#define ACPI_RDESC_TYPE_FIXED_MEMORY_32 0x86 +#define ACPI_RDESC_TYPE_DWORD_ADDRESS_SPACE 0x87 +#define ACPI_RDESC_TYPE_WORD_ADDRESS_SPACE 0x88 +#define ACPI_RDESC_TYPE_EXTENDED_XRUPT 0x89 +#define ACPI_RDESC_TYPE_QWORD_ADDRESS_SPACE 0x8A + + +/* String version of device HIDs and UIDs */ + +#define ACPI_DEVICE_ID_LENGTH 0x09 + +struct acpi_device_id +{ + char buffer[ACPI_DEVICE_ID_LENGTH]; +}; + + +/***************************************************************************** + * + * Miscellaneous + * + ****************************************************************************/ + +#define ACPI_ASCII_ZERO 0x30 + + +/***************************************************************************** + * + * Debugger + * + ****************************************************************************/ + +struct acpi_db_method_info +{ + acpi_handle thread_gate; + char *name; + char **args; + u32 flags; + u32 num_loops; + char pathname[128]; +}; + +struct acpi_integrity_info +{ + u32 nodes; + u32 objects; +}; + + +#define ACPI_DB_REDIRECTABLE_OUTPUT 0x01 +#define ACPI_DB_CONSOLE_OUTPUT 0x02 +#define ACPI_DB_DUPLICATE_OUTPUT 0x03 + + +/***************************************************************************** + * + * Debug + * + ****************************************************************************/ + +struct acpi_debug_print_info +{ + u32 component_id; + char *proc_name; + char *module_name; +}; + + +/* Entry for a memory allocation (debug only) */ + +#define ACPI_MEM_MALLOC 0 +#define ACPI_MEM_CALLOC 1 +#define ACPI_MAX_MODULE_NAME 16 + +#define ACPI_COMMON_DEBUG_MEM_HEADER \ + struct acpi_debug_mem_block *previous; \ + struct acpi_debug_mem_block *next; \ + u32 size; \ + u32 component; \ + u32 line; \ + char module[ACPI_MAX_MODULE_NAME]; \ + u8 alloc_type; + +struct acpi_debug_mem_header +{ + ACPI_COMMON_DEBUG_MEM_HEADER +}; + +struct acpi_debug_mem_block +{ + ACPI_COMMON_DEBUG_MEM_HEADER + u64 user_space; +}; + + +#define ACPI_MEM_LIST_GLOBAL 0 +#define ACPI_MEM_LIST_NSNODE 1 + +#define ACPI_MEM_LIST_FIRST_CACHE_LIST 2 +#define ACPI_MEM_LIST_STATE 2 +#define ACPI_MEM_LIST_PSNODE 3 +#define ACPI_MEM_LIST_PSNODE_EXT 4 +#define ACPI_MEM_LIST_OPERAND 5 +#define ACPI_MEM_LIST_WALK 6 +#define ACPI_MEM_LIST_MAX 6 +#define ACPI_NUM_MEM_LISTS 7 + + +struct acpi_memory_list +{ + void *list_head; + u16 link_offset; + u16 max_cache_depth; + u16 cache_depth; + u16 object_size; + +#ifdef ACPI_DBG_TRACK_ALLOCATIONS + + /* Statistics for debug memory tracking only */ + + u32 total_allocated; + u32 total_freed; + u32 current_total_size; + u32 cache_requests; + u32 cache_hits; + char *list_name; +#endif +}; + + +#endif /* __ACLOCAL_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acmacros.h linux.21rc5-ac2/include/acpi/acmacros.h --- linux.21rc5/include/acpi/acmacros.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acmacros.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,690 @@ +/****************************************************************************** + * + * Name: acmacros.h - C macros for the entire subsystem. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACMACROS_H__ +#define __ACMACROS_H__ + + +/* + * Data manipulation macros + */ + +#define ACPI_LOWORD(l) ((u16)(u32)(l)) +#define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF)) +#define ACPI_LOBYTE(l) ((u8)(u16)(l)) +#define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF)) + + +#if ACPI_MACHINE_WIDTH == 16 + +/* + * For 16-bit addresses, we have to assume that the upper 32 bits + * are zero. + */ +#define ACPI_LODWORD(l) ((u32)(l)) +#define ACPI_HIDWORD(l) ((u32)(0)) + +#define ACPI_GET_ADDRESS(a) ((a).lo) +#define ACPI_STORE_ADDRESS(a,b) {(a).hi=0;(a).lo=(u32)(b);} +#define ACPI_VALID_ADDRESS(a) ((a).hi | (a).lo) + +#else +#ifdef ACPI_NO_INTEGER64_SUPPORT +/* + * acpi_integer is 32-bits, no 64-bit support on this platform + */ +#define ACPI_LODWORD(l) ((u32)(l)) +#define ACPI_HIDWORD(l) ((u32)(0)) + +#define ACPI_GET_ADDRESS(a) (a) +#define ACPI_STORE_ADDRESS(a,b) ((a)=(b)) +#define ACPI_VALID_ADDRESS(a) (a) + +#else + +/* + * Full 64-bit address/integer on both 32-bit and 64-bit platforms + */ +#define ACPI_LODWORD(l) ((u32)(u64)(l)) +#define ACPI_HIDWORD(l) ((u32)(((*(struct uint64_struct *)(void *)(&l))).hi)) + +#define ACPI_GET_ADDRESS(a) (a) +#define ACPI_STORE_ADDRESS(a,b) ((a)=(acpi_physical_address)(b)) +#define ACPI_VALID_ADDRESS(a) (a) +#endif +#endif + + /* + * Extract a byte of data using a pointer. Any more than a byte and we + * get into potential aligment issues -- see the STORE macros below + */ +#define ACPI_GET8(addr) (*(u8*)(addr)) + +/* Pointer arithmetic */ + +#define ACPI_PTR_ADD(t,a,b) (t *) (void *)((char *)(a) + (acpi_native_uint)(b)) +#define ACPI_PTR_DIFF(a,b) (acpi_native_uint) ((char *)(a) - (char *)(b)) + +/* Pointer/Integer type conversions */ + +#define ACPI_TO_POINTER(i) ACPI_PTR_ADD (void, (void *) NULL,(acpi_native_uint)i) +#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL) +#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL) +#define ACPI_FADT_OFFSET(f) ACPI_OFFSET (FADT_DESCRIPTOR, f) + +#define ACPI_CAST_PTR(t, p) ((t *)(void *)(p)) +#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **)(void *)(p)) + +#if ACPI_MACHINE_WIDTH == 16 +#define ACPI_STORE_POINTER(d,s) ACPI_MOVE_32_TO_32(d,s) +#define ACPI_PHYSADDR_TO_PTR(i) (void *)(i) +#define ACPI_PTR_TO_PHYSADDR(i) (u32) (char *)(i) +#else +#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) +#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) +#endif + +/* + * Macros for moving data around to/from buffers that are possibly unaligned. + * If the hardware supports the transfer of unaligned data, just do the store. + * Otherwise, we have to move one byte at a time. + */ + +#ifdef ACPI_BIG_ENDIAN +/* + * Macros for big-endian machines + */ + +/* This macro sets a buffer index, starting from the end of the buffer */ + +#define ACPI_BUFFER_INDEX(buf_len,buf_offset,byte_gran) ((buf_len) - (((buf_offset)+1) * (byte_gran))) + +/* These macros reverse the bytes during the move, converting little-endian to big endian */ + + /* Big Endian <== Little Endian */ + /* Hi...Lo Lo...Hi */ +/* 16-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_16_TO_16(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\ + (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];} + +#define ACPI_MOVE_16_TO_32(d,s) {(*(u32 *)(void *)(d))=0;\ + ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ + ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} + +#define ACPI_MOVE_16_TO_64(d,s) {(*(u64 *)(void *)(d))=0;\ + ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ + ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];} + +/* 32-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ + +#define ACPI_MOVE_32_TO_32(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\ + (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\ + (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ + (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} + +#define ACPI_MOVE_32_TO_64(d,s) {(*(u64 *)(void *)(d))=0;\ + ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ + ((u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\ + ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ + ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];} + +/* 64-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ + +#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */ + +#define ACPI_MOVE_64_TO_64(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[7];\ + (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[6];\ + (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[5];\ + (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[4];\ + (( u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ + (( u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\ + (( u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ + (( u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];} +#else +/* + * Macros for little-endian machines + */ + +/* This macro sets a buffer index, starting from the beginning of the buffer */ + +#define ACPI_BUFFER_INDEX(buf_len,buf_offset,byte_gran) (buf_offset) + +#ifdef ACPI_MISALIGNED_TRANSFERS + +/* The hardware supports unaligned transfers, just do the little-endian move */ + +#if ACPI_MACHINE_WIDTH == 16 + +/* No 64-bit integers */ +/* 16-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s) +#define ACPI_MOVE_16_TO_32(d,s) *(u32 *)(void *)(d) = *(u16 *)(void *)(s) +#define ACPI_MOVE_16_TO_64(d,s) ACPI_MOVE_16_TO_32(d,s) + +/* 32-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ +#define ACPI_MOVE_32_TO_32(d,s) *(u32 *)(void *)(d) = *(u32 *)(void *)(s) +#define ACPI_MOVE_32_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s) + +/* 64-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ +#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */ +#define ACPI_MOVE_64_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s) + +#else +/* 16-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s) +#define ACPI_MOVE_16_TO_32(d,s) *(u32 *)(void *)(d) = *(u16 *)(void *)(s) +#define ACPI_MOVE_16_TO_64(d,s) *(u64 *)(void *)(d) = *(u16 *)(void *)(s) + +/* 32-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ +#define ACPI_MOVE_32_TO_32(d,s) *(u32 *)(void *)(d) = *(u32 *)(void *)(s) +#define ACPI_MOVE_32_TO_64(d,s) *(u64 *)(void *)(d) = *(u32 *)(void *)(s) + +/* 64-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ +#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */ +#define ACPI_MOVE_64_TO_64(d,s) *(u64 *)(void *)(d) = *(u64 *)(void *)(s) +#endif + +#else +/* + * The hardware does not support unaligned transfers. We must move the + * data one byte at a time. These macros work whether the source or + * the destination (or both) is/are unaligned. (Little-endian move) + */ + +/* 16-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_16_TO_16(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\ + (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];} + +#define ACPI_MOVE_16_TO_32(d,s) {(*(u32 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);} +#define ACPI_MOVE_16_TO_64(d,s) {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);} + +/* 32-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ + +#define ACPI_MOVE_32_TO_32(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\ + (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\ + (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\ + (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];} + +#define ACPI_MOVE_32_TO_64(d,s) {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_32_TO_32(d,s);} + +/* 64-bit source, 16/32/64 destination */ + +#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ +#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */ +#define ACPI_MOVE_64_TO_64(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\ + (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\ + (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\ + (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];\ + (( u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[4];\ + (( u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[5];\ + (( u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[6];\ + (( u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[7];} +#endif +#endif + +/* Macros based on machine integer width */ + +#if ACPI_MACHINE_WIDTH == 16 +#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) + +#elif ACPI_MACHINE_WIDTH == 32 +#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_32_TO_16(d,s) + +#elif ACPI_MACHINE_WIDTH == 64 +#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_64_TO_16(d,s) + +#else +#error unknown ACPI_MACHINE_WIDTH +#endif + + +/* + * Fast power-of-two math macros for non-optimized compilers + */ + +#define _ACPI_DIV(value,power_of2) ((u32) ((value) >> (power_of2))) +#define _ACPI_MUL(value,power_of2) ((u32) ((value) << (power_of2))) +#define _ACPI_MOD(value,divisor) ((u32) ((value) & ((divisor) -1))) + +#define ACPI_DIV_2(a) _ACPI_DIV(a,1) +#define ACPI_MUL_2(a) _ACPI_MUL(a,1) +#define ACPI_MOD_2(a) _ACPI_MOD(a,2) + +#define ACPI_DIV_4(a) _ACPI_DIV(a,2) +#define ACPI_MUL_4(a) _ACPI_MUL(a,2) +#define ACPI_MOD_4(a) _ACPI_MOD(a,4) + +#define ACPI_DIV_8(a) _ACPI_DIV(a,3) +#define ACPI_MUL_8(a) _ACPI_MUL(a,3) +#define ACPI_MOD_8(a) _ACPI_MOD(a,8) + +#define ACPI_DIV_16(a) _ACPI_DIV(a,4) +#define ACPI_MUL_16(a) _ACPI_MUL(a,4) +#define ACPI_MOD_16(a) _ACPI_MOD(a,16) + + +/* + * Rounding macros (Power of two boundaries only) + */ +#define ACPI_ROUND_DOWN(value,boundary) (((acpi_native_uint)(value)) & (~(((acpi_native_uint) boundary)-1))) +#define ACPI_ROUND_UP(value,boundary) ((((acpi_native_uint)(value)) + (((acpi_native_uint) boundary)-1)) & (~(((acpi_native_uint) boundary)-1))) + +#define ACPI_ROUND_DOWN_TO_32_BITS(a) ACPI_ROUND_DOWN(a,4) +#define ACPI_ROUND_DOWN_TO_64_BITS(a) ACPI_ROUND_DOWN(a,8) +#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a) ACPI_ROUND_DOWN(a,ALIGNED_ADDRESS_BOUNDARY) + +#define ACPI_ROUND_UP_to_32_bITS(a) ACPI_ROUND_UP(a,4) +#define ACPI_ROUND_UP_to_64_bITS(a) ACPI_ROUND_UP(a,8) +#define ACPI_ROUND_UP_TO_NATIVE_WORD(a) ACPI_ROUND_UP(a,ALIGNED_ADDRESS_BOUNDARY) + + +#define ACPI_ROUND_BITS_UP_TO_BYTES(a) ACPI_DIV_8((a) + 7) +#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a) ACPI_DIV_8((a)) + +#define ACPI_ROUND_UP_TO_1K(a) (((a) + 1023) >> 10) + +/* Generic (non-power-of-two) rounding */ + +#define ACPI_ROUND_UP_TO(value,boundary) (((value) + ((boundary)-1)) / (boundary)) + +/* + * Bitmask creation + * Bit positions start at zero. + * MASK_BITS_ABOVE creates a mask starting AT the position and above + * MASK_BITS_BELOW creates a mask starting one bit BELOW the position + */ +#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_INTEGER_MAX) << ((u32) (position)))) +#define ACPI_MASK_BITS_BELOW(position) ((ACPI_INTEGER_MAX) << ((u32) (position))) + +#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) + +/* Macros for GAS addressing */ + +#if ACPI_MACHINE_WIDTH != 16 + +#define ACPI_PCI_DEVICE(a) (u16) ((ACPI_HIDWORD ((a))) & 0x0000FFFF) +#define ACPI_PCI_FUNCTION(a) (u16) ((ACPI_LODWORD ((a))) >> 16) +#define ACPI_PCI_REGISTER(a) (u16) ((ACPI_LODWORD ((a))) & 0x0000FFFF) + +#else + +/* No support for GAS and PCI IDs in 16-bit mode */ + +#define ACPI_PCI_FUNCTION(a) (u16) ((a) & 0xFFFF0000) +#define ACPI_PCI_DEVICE(a) (u16) ((a) & 0x0000FFFF) +#define ACPI_PCI_REGISTER(a) (u16) ((a) & 0x0000FFFF) + +#endif + + +/* Bitfields within ACPI registers */ + +#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask) +#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask) + +/* + * An struct acpi_namespace_node * can appear in some contexts, + * where a pointer to an union acpi_operand_object can also + * appear. This macro is used to distinguish them. + * + * The "Descriptor" field is the first field in both structures. + */ +#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->descriptor_id) +#define ACPI_SET_DESCRIPTOR_TYPE(d,t) (((union acpi_descriptor *)(void *)(d))->descriptor_id = t) + + +/* Macro to test the object type */ + +#define ACPI_GET_OBJECT_TYPE(d) (((union acpi_operand_object *)(void *)(d))->common.type) + +/* Macro to check the table flags for SINGLE or MULTIPLE tables are allowed */ + +#define ACPI_IS_SINGLE_TABLE(x) (((x) & 0x01) == ACPI_TABLE_SINGLE ? 1 : 0) + +/* + * Macros for the master AML opcode table + */ +#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT) +#define ACPI_OP(name,Pargs,Iargs,obj_type,class,type,flags) {name,(u32)(Pargs),(u32)(Iargs),(u32)(flags),obj_type,class,type} +#else +#define ACPI_OP(name,Pargs,Iargs,obj_type,class,type,flags) {(u32)(Pargs),(u32)(Iargs),(u32)(flags),obj_type,class,type} +#endif + +#ifdef ACPI_DISASSEMBLER +#define ACPI_DISASM_ONLY_MEMBERS(a) a; +#else +#define ACPI_DISASM_ONLY_MEMBERS(a) +#endif + +#define ARG_TYPE_WIDTH 5 +#define ARG_1(x) ((u32)(x)) +#define ARG_2(x) ((u32)(x) << (1 * ARG_TYPE_WIDTH)) +#define ARG_3(x) ((u32)(x) << (2 * ARG_TYPE_WIDTH)) +#define ARG_4(x) ((u32)(x) << (3 * ARG_TYPE_WIDTH)) +#define ARG_5(x) ((u32)(x) << (4 * ARG_TYPE_WIDTH)) +#define ARG_6(x) ((u32)(x) << (5 * ARG_TYPE_WIDTH)) + +#define ARGI_LIST1(a) (ARG_1(a)) +#define ARGI_LIST2(a,b) (ARG_1(b)|ARG_2(a)) +#define ARGI_LIST3(a,b,c) (ARG_1(c)|ARG_2(b)|ARG_3(a)) +#define ARGI_LIST4(a,b,c,d) (ARG_1(d)|ARG_2(c)|ARG_3(b)|ARG_4(a)) +#define ARGI_LIST5(a,b,c,d,e) (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a)) +#define ARGI_LIST6(a,b,c,d,e,f) (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a)) + +#define ARGP_LIST1(a) (ARG_1(a)) +#define ARGP_LIST2(a,b) (ARG_1(a)|ARG_2(b)) +#define ARGP_LIST3(a,b,c) (ARG_1(a)|ARG_2(b)|ARG_3(c)) +#define ARGP_LIST4(a,b,c,d) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)) +#define ARGP_LIST5(a,b,c,d,e) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)) +#define ARGP_LIST6(a,b,c,d,e,f) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f)) + +#define GET_CURRENT_ARG_TYPE(list) (list & ((u32) 0x1F)) +#define INCREMENT_ARG_LIST(list) (list >>= ((u32) ARG_TYPE_WIDTH)) + + +/* + * Reporting macros that are never compiled out + */ + +#define ACPI_PARAM_LIST(pl) pl + +/* + * Error reporting. These versions add callers module and line#. Since + * _THIS_MODULE gets compiled out when ACPI_DEBUG_OUTPUT isn't defined, only + * use it in debug mode. + */ + +#ifdef ACPI_DEBUG_OUTPUT + +#define ACPI_REPORT_INFO(fp) {acpi_ut_report_info(_THIS_MODULE,__LINE__,_COMPONENT); \ + acpi_os_printf ACPI_PARAM_LIST(fp);} +#define ACPI_REPORT_ERROR(fp) {acpi_ut_report_error(_THIS_MODULE,__LINE__,_COMPONENT); \ + acpi_os_printf ACPI_PARAM_LIST(fp);} +#define ACPI_REPORT_WARNING(fp) {acpi_ut_report_warning(_THIS_MODULE,__LINE__,_COMPONENT); \ + acpi_os_printf ACPI_PARAM_LIST(fp);} +#define ACPI_REPORT_NSERROR(s,e) acpi_ns_report_error(_THIS_MODULE,__LINE__,_COMPONENT, s, e); + +#define ACPI_REPORT_METHOD_ERROR(s,n,p,e) acpi_ns_report_method_error(_THIS_MODULE,__LINE__,_COMPONENT, s, n, p, e); + +#else + +#define ACPI_REPORT_INFO(fp) {acpi_ut_report_info("ACPI",__LINE__,_COMPONENT); \ + acpi_os_printf ACPI_PARAM_LIST(fp);} +#define ACPI_REPORT_ERROR(fp) {acpi_ut_report_error("ACPI",__LINE__,_COMPONENT); \ + acpi_os_printf ACPI_PARAM_LIST(fp);} +#define ACPI_REPORT_WARNING(fp) {acpi_ut_report_warning("ACPI",__LINE__,_COMPONENT); \ + acpi_os_printf ACPI_PARAM_LIST(fp);} +#define ACPI_REPORT_NSERROR(s,e) acpi_ns_report_error("ACPI",__LINE__,_COMPONENT, s, e); + +#define ACPI_REPORT_METHOD_ERROR(s,n,p,e) acpi_ns_report_method_error("ACPI",__LINE__,_COMPONENT, s, n, p, e); + +#endif + +/* Error reporting. These versions pass thru the module and line# */ + +#define _ACPI_REPORT_INFO(a,b,c,fp) {acpi_ut_report_info(a,b,c); \ + acpi_os_printf ACPI_PARAM_LIST(fp);} +#define _ACPI_REPORT_ERROR(a,b,c,fp) {acpi_ut_report_error(a,b,c); \ + acpi_os_printf ACPI_PARAM_LIST(fp);} +#define _ACPI_REPORT_WARNING(a,b,c,fp) {acpi_ut_report_warning(a,b,c); \ + acpi_os_printf ACPI_PARAM_LIST(fp);} + +/* + * Debug macros that are conditionally compiled + */ + +#ifdef ACPI_DEBUG_OUTPUT + +#define ACPI_MODULE_NAME(name) static char ACPI_UNUSED_VAR *_THIS_MODULE = name; + +/* + * Function entry tracing. + * The first parameter should be the procedure name as a quoted string. This is declared + * as a local string ("_proc_name) so that it can be also used by the function exit macros below. + */ + +#define ACPI_FUNCTION_NAME(a) struct acpi_debug_print_info _dbg; \ + _dbg.component_id = _COMPONENT; \ + _dbg.proc_name = a; \ + _dbg.module_name = _THIS_MODULE; + +#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ + acpi_ut_trace(__LINE__,&_dbg) +#define ACPI_FUNCTION_TRACE_PTR(a,b) ACPI_FUNCTION_NAME(a) \ + acpi_ut_trace_ptr(__LINE__,&_dbg,(void *)b) +#define ACPI_FUNCTION_TRACE_U32(a,b) ACPI_FUNCTION_NAME(a) \ + acpi_ut_trace_u32(__LINE__,&_dbg,(u32)b) +#define ACPI_FUNCTION_TRACE_STR(a,b) ACPI_FUNCTION_NAME(a) \ + acpi_ut_trace_str(__LINE__,&_dbg,(char *)b) + +#define ACPI_FUNCTION_ENTRY() acpi_ut_track_stack_ptr() + +/* + * Function exit tracing. + * WARNING: These macros include a return statement. This is usually considered + * bad form, but having a separate exit macro is very ugly and difficult to maintain. + * One of the FUNCTION_TRACE macros above must be used in conjunction with these macros + * so that "_proc_name" is defined. + */ +#ifdef ACPI_USE_DO_WHILE_0 +#define ACPI_DO_WHILE0(a) do a while(0) +#else +#define ACPI_DO_WHILE0(a) a +#endif + +#define return_VOID ACPI_DO_WHILE0 ({acpi_ut_exit(__LINE__,&_dbg);return;}) +#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({acpi_ut_status_exit(__LINE__,&_dbg,(s));return((s));}) +#define return_VALUE(s) ACPI_DO_WHILE0 ({acpi_ut_value_exit(__LINE__,&_dbg,(acpi_integer)(s));return((s));}) +#define return_PTR(s) ACPI_DO_WHILE0 ({acpi_ut_ptr_exit(__LINE__,&_dbg,(u8 *)(s));return((s));}) + +/* Conditional execution */ + +#define ACPI_DEBUG_EXEC(a) a +#define ACPI_NORMAL_EXEC(a) + +#define ACPI_DEBUG_DEFINE(a) a; +#define ACPI_DEBUG_ONLY_MEMBERS(a) a; +#define _VERBOSE_STRUCTURES + + +/* Stack and buffer dumping */ + +#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand(a) +#define ACPI_DUMP_OPERANDS(a,b,c,d,e) acpi_ex_dump_operands(a,b,c,d,e,_THIS_MODULE,__LINE__) + + +#define ACPI_DUMP_ENTRY(a,b) acpi_ns_dump_entry (a,b) +#define ACPI_DUMP_TABLES(a,b) acpi_ns_dump_tables(a,b) +#define ACPI_DUMP_PATHNAME(a,b,c,d) acpi_ns_dump_pathname(a,b,c,d) +#define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a) +#define ACPI_DUMP_BUFFER(a,b) acpi_ut_dump_buffer((u8 *)a,b,DB_BYTE_DISPLAY,_COMPONENT) +#define ACPI_BREAK_MSG(a) acpi_os_signal (ACPI_SIGNAL_BREAKPOINT,(a)) + + +/* + * Generate INT3 on ACPI_ERROR (Debug only!) + */ + +#define ACPI_ERROR_BREAK +#ifdef ACPI_ERROR_BREAK +#define ACPI_BREAK_ON_ERROR(lvl) if ((lvl)&ACPI_ERROR) \ + acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,"Fatal error encountered\n") +#else +#define ACPI_BREAK_ON_ERROR(lvl) +#endif + +/* + * Master debug print macros + * Print iff: + * 1) Debug print for the current component is enabled + * 2) Debug error level or trace level for the print statement is enabled + */ + +#define ACPI_DEBUG_PRINT(pl) acpi_ut_debug_print ACPI_PARAM_LIST(pl) +#define ACPI_DEBUG_PRINT_RAW(pl) acpi_ut_debug_print_raw ACPI_PARAM_LIST(pl) + + +#else +/* + * This is the non-debug case -- make everything go away, + * leaving no executable debug code! + */ + +#define ACPI_MODULE_NAME(name) +#define _THIS_MODULE "" + +#define ACPI_DEBUG_EXEC(a) +#define ACPI_NORMAL_EXEC(a) a; + +#define ACPI_DEBUG_DEFINE(a) +#define ACPI_DEBUG_ONLY_MEMBERS(a) +#define ACPI_FUNCTION_NAME(a) +#define ACPI_FUNCTION_TRACE(a) +#define ACPI_FUNCTION_TRACE_PTR(a,b) +#define ACPI_FUNCTION_TRACE_U32(a,b) +#define ACPI_FUNCTION_TRACE_STR(a,b) +#define ACPI_FUNCTION_EXIT +#define ACPI_FUNCTION_STATUS_EXIT(s) +#define ACPI_FUNCTION_VALUE_EXIT(s) +#define ACPI_FUNCTION_ENTRY() +#define ACPI_DUMP_STACK_ENTRY(a) +#define ACPI_DUMP_OPERANDS(a,b,c,d,e) +#define ACPI_DUMP_ENTRY(a,b) +#define ACPI_DUMP_TABLES(a,b) +#define ACPI_DUMP_PATHNAME(a,b,c,d) +#define ACPI_DUMP_RESOURCE_LIST(a) +#define ACPI_DUMP_BUFFER(a,b) +#define ACPI_DEBUG_PRINT(pl) +#define ACPI_DEBUG_PRINT_RAW(pl) +#define ACPI_BREAK_MSG(a) + +#define return_VOID return +#define return_ACPI_STATUS(s) return(s) +#define return_VALUE(s) return(s) +#define return_PTR(s) return(s) + +#endif + +/* + * Some code only gets executed when the debugger is built in. + * Note that this is entirely independent of whether the + * DEBUG_PRINT stuff (set by ACPI_DEBUG_OUTPUT) is on, or not. + */ +#ifdef ACPI_DEBUGGER +#define ACPI_DEBUGGER_EXEC(a) a +#else +#define ACPI_DEBUGGER_EXEC(a) +#endif + + +/* + * For 16-bit code, we want to shrink some things even though + * we are using ACPI_DEBUG_OUTPUT to get the debug output + */ +#if ACPI_MACHINE_WIDTH == 16 +#undef ACPI_DEBUG_ONLY_MEMBERS +#undef _VERBOSE_STRUCTURES +#define ACPI_DEBUG_ONLY_MEMBERS(a) +#endif + + +#ifdef ACPI_DEBUG_OUTPUT +/* + * 1) Set name to blanks + * 2) Copy the object name + */ +#define ACPI_ADD_OBJECT_NAME(a,b) ACPI_MEMSET (a->common.name, ' ', sizeof (a->common.name));\ + ACPI_STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name)) +#else + +#define ACPI_ADD_OBJECT_NAME(a,b) +#endif + + +/* + * Memory allocation tracking (DEBUG ONLY) + */ + +#ifndef ACPI_DBG_TRACK_ALLOCATIONS + +/* Memory allocation */ + +#define ACPI_MEM_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_THIS_MODULE,__LINE__) +#define ACPI_MEM_CALLOCATE(a) acpi_ut_callocate((acpi_size)(a), _COMPONENT,_THIS_MODULE,__LINE__) +#define ACPI_MEM_FREE(a) acpi_os_free(a) +#define ACPI_MEM_TRACKING(a) + + +#else + +/* Memory allocation */ + +#define ACPI_MEM_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a),_COMPONENT,_THIS_MODULE,__LINE__) +#define ACPI_MEM_CALLOCATE(a) acpi_ut_callocate_and_track((acpi_size)(a), _COMPONENT,_THIS_MODULE,__LINE__) +#define ACPI_MEM_FREE(a) acpi_ut_free_and_track(a,_COMPONENT,_THIS_MODULE,__LINE__) +#define ACPI_MEM_TRACKING(a) a + +#endif /* ACPI_DBG_TRACK_ALLOCATIONS */ + + +#define ACPI_GET_STACK_POINTER _asm {mov eax, ebx} + +#endif /* ACMACROS_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acnamesp.h linux.21rc5-ac2/include/acpi/acnamesp.h --- linux.21rc5/include/acpi/acnamesp.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acnamesp.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,513 @@ +/****************************************************************************** + * + * Name: acnamesp.h - Namespace subcomponent prototypes and defines + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACNAMESP_H__ +#define __ACNAMESP_H__ + + +/* To search the entire name space, pass this as search_base */ + +#define ACPI_NS_ALL ((acpi_handle)0) + +/* + * Elements of acpi_ns_properties are bit significant + * and should be one-to-one with values of acpi_object_type + */ +#define ACPI_NS_NORMAL 0 +#define ACPI_NS_NEWSCOPE 1 /* a definition of this type opens a name scope */ +#define ACPI_NS_LOCAL 2 /* suppress search of enclosing scopes */ + + +/* Definitions of the predefined namespace names */ + +#define ACPI_UNKNOWN_NAME (u32) 0x3F3F3F3F /* Unknown name is "????" */ +#define ACPI_ROOT_NAME (u32) 0x5F5F5F5C /* Root name is "\___" */ +#define ACPI_SYS_BUS_NAME (u32) 0x5F53425F /* Sys bus name is "_SB_" */ + +#define ACPI_NS_ROOT_PATH "\\" +#define ACPI_NS_SYSTEM_BUS "_SB_" + + +/* Flags for acpi_ns_lookup, acpi_ns_search_and_enter */ + +#define ACPI_NS_NO_UPSEARCH 0 +#define ACPI_NS_SEARCH_PARENT 0x01 +#define ACPI_NS_DONT_OPEN_SCOPE 0x02 +#define ACPI_NS_NO_PEER_SEARCH 0x04 +#define ACPI_NS_ERROR_IF_FOUND 0x08 + +#define ACPI_NS_WALK_UNLOCK TRUE +#define ACPI_NS_WALK_NO_UNLOCK FALSE + + +acpi_status +acpi_ns_load_namespace ( + void); + +acpi_status +acpi_ns_initialize_objects ( + void); + +acpi_status +acpi_ns_initialize_devices ( + void); + + +/* Namespace init - nsxfinit */ + +acpi_status +acpi_ns_init_one_device ( + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value); + +acpi_status +acpi_ns_init_one_object ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value); + + +acpi_status +acpi_ns_walk_namespace ( + acpi_object_type type, + acpi_handle start_object, + u32 max_depth, + u8 unlock_before_callback, + acpi_walk_callback user_function, + void *context, + void **return_value); + +struct acpi_namespace_node * +acpi_ns_get_next_node ( + acpi_object_type type, + struct acpi_namespace_node *parent, + struct acpi_namespace_node *child); + +void +acpi_ns_delete_namespace_by_owner ( + u16 table_id); + + +/* Namespace loading - nsload */ + +acpi_status +acpi_ns_one_complete_parse ( + u32 pass_number, + struct acpi_table_desc *table_desc); + +acpi_status +acpi_ns_parse_table ( + struct acpi_table_desc *table_desc, + struct acpi_namespace_node *scope); + +acpi_status +acpi_ns_load_table ( + struct acpi_table_desc *table_desc, + struct acpi_namespace_node *node); + +acpi_status +acpi_ns_load_table_by_type ( + acpi_table_type table_type); + + +/* + * Top-level namespace access - nsaccess + */ + +acpi_status +acpi_ns_root_initialize ( + void); + +acpi_status +acpi_ns_lookup ( + union acpi_generic_state *scope_info, + char *name, + acpi_object_type type, + acpi_interpreter_mode interpreter_mode, + u32 flags, + struct acpi_walk_state *walk_state, + struct acpi_namespace_node **ret_node); + + +/* + * Named object allocation/deallocation - nsalloc + */ + +struct acpi_namespace_node * +acpi_ns_create_node ( + u32 name); + +void +acpi_ns_delete_node ( + struct acpi_namespace_node *node); + +void +acpi_ns_delete_namespace_subtree ( + struct acpi_namespace_node *parent_handle); + +void +acpi_ns_detach_object ( + struct acpi_namespace_node *node); + +void +acpi_ns_delete_children ( + struct acpi_namespace_node *parent); + +int +acpi_ns_compare_names ( + char *name1, + char *name2); + +void +acpi_ns_remove_reference ( + struct acpi_namespace_node *node); + + +/* + * Namespace modification - nsmodify + */ + +acpi_status +acpi_ns_unload_namespace ( + acpi_handle handle); + +acpi_status +acpi_ns_delete_subtree ( + acpi_handle start_handle); + + +/* + * Namespace dump/print utilities - nsdump + */ + +void +acpi_ns_dump_tables ( + acpi_handle search_base, + u32 max_depth); + +void +acpi_ns_dump_entry ( + acpi_handle handle, + u32 debug_level); + +void +acpi_ns_dump_pathname ( + acpi_handle handle, + char *msg, + u32 level, + u32 component); + +void +acpi_ns_print_pathname ( + u32 num_segments, + char *pathname); + +acpi_status +acpi_ns_dump_one_device ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value); + +void +acpi_ns_dump_root_devices ( + void); + +acpi_status +acpi_ns_dump_one_object ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value); + +void +acpi_ns_dump_objects ( + acpi_object_type type, + u8 display_type, + u32 max_depth, + u32 ownder_id, + acpi_handle start_handle); + + +/* + * Namespace evaluation functions - nseval + */ + +acpi_status +acpi_ns_evaluate_by_handle ( + struct acpi_namespace_node *prefix_node, + union acpi_operand_object **params, + union acpi_operand_object **return_object); + +acpi_status +acpi_ns_evaluate_by_name ( + char *pathname, + union acpi_operand_object **params, + union acpi_operand_object **return_object); + +acpi_status +acpi_ns_evaluate_relative ( + struct acpi_namespace_node *prefix_node, + char *pathname, + union acpi_operand_object **params, + union acpi_operand_object **return_object); + +acpi_status +acpi_ns_execute_control_method ( + struct acpi_namespace_node *method_node, + union acpi_operand_object **params, + union acpi_operand_object **return_obj_desc); + +acpi_status +acpi_ns_get_object_value ( + struct acpi_namespace_node *object_node, + union acpi_operand_object **return_obj_desc); + + +/* + * Parent/Child/Peer utility functions + */ + +acpi_name +acpi_ns_find_parent_name ( + struct acpi_namespace_node *node_to_search); + + +/* + * Name and Scope manipulation - nsnames + */ + +u32 +acpi_ns_opens_scope ( + acpi_object_type type); + +void +acpi_ns_build_external_path ( + struct acpi_namespace_node *node, + acpi_size size, + char *name_buffer); + +char * +acpi_ns_get_external_pathname ( + struct acpi_namespace_node *node); + +char * +acpi_ns_name_of_current_scope ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ns_handle_to_pathname ( + acpi_handle target_handle, + struct acpi_buffer *buffer); + +u8 +acpi_ns_pattern_match ( + struct acpi_namespace_node *obj_node, + char *search_for); + +acpi_status +acpi_ns_get_node_by_path ( + char *external_pathname, + struct acpi_namespace_node *in_prefix_node, + u32 flags, + struct acpi_namespace_node **out_node); + +acpi_size +acpi_ns_get_pathname_length ( + struct acpi_namespace_node *node); + + +/* + * Object management for namespace nodes - nsobject + */ + +acpi_status +acpi_ns_attach_object ( + struct acpi_namespace_node *node, + union acpi_operand_object *object, + acpi_object_type type); + +union acpi_operand_object * +acpi_ns_get_attached_object ( + struct acpi_namespace_node *node); + +union acpi_operand_object * +acpi_ns_get_secondary_object ( + union acpi_operand_object *obj_desc); + +acpi_status +acpi_ns_attach_data ( + struct acpi_namespace_node *node, + acpi_object_handler handler, + void *data); + +acpi_status +acpi_ns_detach_data ( + struct acpi_namespace_node *node, + acpi_object_handler handler); + +acpi_status +acpi_ns_get_attached_data ( + struct acpi_namespace_node *node, + acpi_object_handler handler, + void **data); + + +/* + * Namespace searching and entry - nssearch + */ + +acpi_status +acpi_ns_search_and_enter ( + u32 entry_name, + struct acpi_walk_state *walk_state, + struct acpi_namespace_node *node, + acpi_interpreter_mode interpreter_mode, + acpi_object_type type, + u32 flags, + struct acpi_namespace_node **ret_node); + +acpi_status +acpi_ns_search_node ( + u32 entry_name, + struct acpi_namespace_node *node, + acpi_object_type type, + struct acpi_namespace_node **ret_node); + +void +acpi_ns_install_node ( + struct acpi_walk_state *walk_state, + struct acpi_namespace_node *parent_node, + struct acpi_namespace_node *node, + acpi_object_type type); + + +/* + * Utility functions - nsutils + */ + +u8 +acpi_ns_valid_root_prefix ( + char prefix); + +u8 +acpi_ns_valid_path_separator ( + char sep); + +acpi_object_type +acpi_ns_get_type ( + struct acpi_namespace_node *node); + +u32 +acpi_ns_local ( + acpi_object_type type); + +void +acpi_ns_report_error ( + char *module_name, + u32 line_number, + u32 component_id, + char *internal_name, + acpi_status lookup_status); + +void +acpi_ns_report_method_error ( + char *module_name, + u32 line_number, + u32 component_id, + char *message, + struct acpi_namespace_node *node, + char *path, + acpi_status lookup_status); + +void +acpi_ns_print_node_pathname ( + struct acpi_namespace_node *node, + char *msg); + +acpi_status +acpi_ns_build_internal_name ( + struct acpi_namestring_info *info); + +void +acpi_ns_get_internal_name_length ( + struct acpi_namestring_info *info); + +acpi_status +acpi_ns_internalize_name ( + char *dotted_name, + char **converted_name); + +acpi_status +acpi_ns_externalize_name ( + u32 internal_name_length, + char *internal_name, + u32 *converted_name_length, + char **converted_name); + +struct acpi_namespace_node * +acpi_ns_map_handle_to_node ( + acpi_handle handle); + +acpi_handle +acpi_ns_convert_entry_to_handle( + struct acpi_namespace_node *node); + +void +acpi_ns_terminate ( + void); + +struct acpi_namespace_node * +acpi_ns_get_parent_node ( + struct acpi_namespace_node *node); + + +struct acpi_namespace_node * +acpi_ns_get_next_valid_node ( + struct acpi_namespace_node *node); + + +#endif /* __ACNAMESP_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acobject.h linux.21rc5-ac2/include/acpi/acobject.h --- linux.21rc5/include/acpi/acobject.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acobject.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,493 @@ + +/****************************************************************************** + * + * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef _ACOBJECT_H +#define _ACOBJECT_H + + +/* + * The union acpi_operand_object is used to pass AML operands from the dispatcher + * to the interpreter, and to keep track of the various handlers such as + * address space handlers and notify handlers. The object is a constant + * size in order to allow it to be cached and reused. + */ + +/******************************************************************************* + * + * Common Descriptors + * + ******************************************************************************/ + +/* + * Common area for all objects. + * + * data_type is used to differentiate between internal descriptors, and MUST + * be the first byte in this structure. + */ +#define ACPI_OBJECT_COMMON_HEADER /* SIZE/ALIGNMENT: 32 bits, one ptr plus trailing 8-bit flag */\ + u8 descriptor; /* To differentiate various internal objs */\ + u8 type; /* acpi_object_type */\ + u16 reference_count; /* For object deletion management */\ + union acpi_operand_object *next_object; /* Objects linked to parent NS node */\ + u8 flags; \ + +/* Values for flag byte above */ + +#define AOPOBJ_AML_CONSTANT 0x01 +#define AOPOBJ_STATIC_POINTER 0x02 +#define AOPOBJ_DATA_VALID 0x04 +#define AOPOBJ_OBJECT_INITIALIZED 0x08 +#define AOPOBJ_SETUP_COMPLETE 0x10 +#define AOPOBJ_SINGLE_DATUM 0x20 + + +/* + * Common bitfield for the field objects + * "Field Datum" -- a datum from the actual field object + * "Buffer Datum" -- a datum from a user buffer, read from or to be written to the field + */ +#define ACPI_COMMON_FIELD_INFO /* SIZE/ALIGNMENT: 24 bits + three 32-bit values */\ + u8 field_flags; /* Access, update, and lock bits */\ + u8 attribute; /* From access_as keyword */\ + u8 access_byte_width; /* Read/Write size in bytes */\ + u32 bit_length; /* Length of field in bits */\ + u32 base_byte_offset; /* Byte offset within containing object */\ + u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\ + u8 datum_valid_bits; /* Valid bit in first "Field datum" */\ + u8 end_field_valid_bits; /* Valid bits in the last "field datum" */\ + u8 end_buffer_valid_bits; /* Valid bits in the last "buffer datum" */\ + u32 value; /* Value to store into the Bank or Index register */\ + struct acpi_namespace_node *node; /* Link back to parent node */ + + +/* + * Fields common to both Strings and Buffers + */ +#define ACPI_COMMON_BUFFER_INFO \ + u32 length; + + +/* + * Common fields for objects that support ASL notifications + */ +#define ACPI_COMMON_NOTIFY_INFO \ + union acpi_operand_object *system_notify; /* Handler for system notifies */\ + union acpi_operand_object *device_notify; /* Handler for driver notifies */\ + union acpi_operand_object *address_space; /* Handler for Address space */ + + +/****************************************************************************** + * + * Basic data types + * + *****************************************************************************/ + +struct acpi_object_common +{ + ACPI_OBJECT_COMMON_HEADER +}; + + +struct acpi_object_integer +{ + ACPI_OBJECT_COMMON_HEADER + acpi_integer value; +}; + + +struct acpi_object_string /* Null terminated, ASCII characters only */ +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_BUFFER_INFO + char *pointer; /* String in AML stream or allocated string */ +}; + + +struct acpi_object_buffer +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_BUFFER_INFO + u8 *pointer; /* Buffer in AML stream or allocated buffer */ + struct acpi_namespace_node *node; /* Link back to parent node */ + u8 *aml_start; + u32 aml_length; +}; + + +struct acpi_object_package +{ + ACPI_OBJECT_COMMON_HEADER + + u32 count; /* # of elements in package */ + u32 aml_length; + u8 *aml_start; + struct acpi_namespace_node *node; /* Link back to parent node */ + union acpi_operand_object **elements; /* Array of pointers to acpi_objects */ +}; + + +/****************************************************************************** + * + * Complex data types + * + *****************************************************************************/ + +struct acpi_object_event +{ + ACPI_OBJECT_COMMON_HEADER + void *semaphore; +}; + + +#define INFINITE_CONCURRENCY 0xFF + +struct acpi_object_method +{ + ACPI_OBJECT_COMMON_HEADER + u8 method_flags; + u8 param_count; + u32 aml_length; + void *semaphore; + u8 *aml_start; + u8 concurrency; + u8 thread_count; + acpi_owner_id owning_id; +}; + + +struct acpi_object_mutex +{ + ACPI_OBJECT_COMMON_HEADER + u16 sync_level; + u16 acquisition_depth; + struct acpi_thread_state *owner_thread; + void *semaphore; + union acpi_operand_object *prev; /* Link for list of acquired mutexes */ + union acpi_operand_object *next; /* Link for list of acquired mutexes */ + struct acpi_namespace_node *node; /* containing object */ +}; + + +struct acpi_object_region +{ + ACPI_OBJECT_COMMON_HEADER + + u8 space_id; + union acpi_operand_object *address_space; /* Handler for region access */ + struct acpi_namespace_node *node; /* containing object */ + union acpi_operand_object *next; + u32 length; + acpi_physical_address address; +}; + + +/****************************************************************************** + * + * Objects that can be notified. All share a common notify_info area. + * + *****************************************************************************/ + +struct acpi_object_notify_common /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */ +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_NOTIFY_INFO +}; + + +struct acpi_object_device +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_NOTIFY_INFO + struct acpi_gpe_block_info *gpe_block; +}; + + +struct acpi_object_power_resource +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_NOTIFY_INFO + u32 system_level; + u32 resource_order; +}; + + +struct acpi_object_processor +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_NOTIFY_INFO + u32 proc_id; + u32 length; + acpi_io_address address; +}; + + +struct acpi_object_thermal_zone +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_NOTIFY_INFO +}; + + +/****************************************************************************** + * + * Fields. All share a common header/info field. + * + *****************************************************************************/ + +struct acpi_object_field_common /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */ +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_FIELD_INFO + union acpi_operand_object *region_obj; /* Containing Operation Region object */ + /* (REGION/BANK fields only) */ +}; + + +struct acpi_object_region_field +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_FIELD_INFO + union acpi_operand_object *region_obj; /* Containing op_region object */ +}; + + +struct acpi_object_bank_field +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_FIELD_INFO + union acpi_operand_object *region_obj; /* Containing op_region object */ + union acpi_operand_object *bank_obj; /* bank_select Register object */ +}; + + +struct acpi_object_index_field +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_FIELD_INFO + + /* + * No "region_obj" pointer needed since the Index and Data registers + * are each field definitions unto themselves. + */ + union acpi_operand_object *index_obj; /* Index register */ + union acpi_operand_object *data_obj; /* Data register */ +}; + + +/* The buffer_field is different in that it is part of a Buffer, not an op_region */ + +struct acpi_object_buffer_field +{ + ACPI_OBJECT_COMMON_HEADER + ACPI_COMMON_FIELD_INFO + union acpi_operand_object *buffer_obj; /* Containing Buffer object */ +}; + + +/****************************************************************************** + * + * Objects for handlers + * + *****************************************************************************/ + +struct acpi_object_notify_handler +{ + ACPI_OBJECT_COMMON_HEADER + struct acpi_namespace_node *node; /* Parent device */ + acpi_notify_handler handler; + void *context; +}; + + +/* Flags for address handler */ + +#define ACPI_ADDR_HANDLER_DEFAULT_INSTALLED 0x1 + + +struct acpi_object_addr_handler +{ + ACPI_OBJECT_COMMON_HEADER + u8 space_id; + u16 hflags; + acpi_adr_space_handler handler; + struct acpi_namespace_node *node; /* Parent device */ + void *context; + acpi_adr_space_setup setup; + union acpi_operand_object *region_list; /* regions using this handler */ + union acpi_operand_object *next; +}; + + +/****************************************************************************** + * + * Special internal objects + * + *****************************************************************************/ + +/* + * The Reference object type is used for these opcodes: + * Arg[0-6], Local[0-7], index_op, name_op, zero_op, one_op, ones_op, debug_op + */ +struct acpi_object_reference +{ + ACPI_OBJECT_COMMON_HEADER + u8 target_type; /* Used for index_op */ + u16 opcode; + u32 offset; /* Used for arg_op, local_op, and index_op */ + void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */ + struct acpi_namespace_node *node; + union acpi_operand_object **where; +}; + + +/* + * Extra object is used as additional storage for types that + * have AML code in their declarations (term_args) that must be + * evaluated at run time. + * + * Currently: Region and field_unit types + */ +struct acpi_object_extra +{ + ACPI_OBJECT_COMMON_HEADER + u8 byte_fill1; + u16 word_fill1; + u32 aml_length; + u8 *aml_start; + struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ + void *region_context; /* Region-specific data */ +}; + + +/* Additional data that can be attached to namespace nodes */ + +struct acpi_object_data +{ + ACPI_OBJECT_COMMON_HEADER + acpi_object_handler handler; + void *pointer; +}; + + +/* Structure used when objects are cached for reuse */ + +struct acpi_object_cache_list +{ + ACPI_OBJECT_COMMON_HEADER + union acpi_operand_object *next; /* Link for object cache and internal lists*/ +}; + + +/****************************************************************************** + * + * union acpi_operand_object Descriptor - a giant union of all of the above + * + *****************************************************************************/ + +union acpi_operand_object +{ + struct acpi_object_common common; + struct acpi_object_integer integer; + struct acpi_object_string string; + struct acpi_object_buffer buffer; + struct acpi_object_package package; + struct acpi_object_event event; + struct acpi_object_method method; + struct acpi_object_mutex mutex; + struct acpi_object_region region; + struct acpi_object_notify_common common_notify; + struct acpi_object_device device; + struct acpi_object_power_resource power_resource; + struct acpi_object_processor processor; + struct acpi_object_thermal_zone thermal_zone; + struct acpi_object_field_common common_field; + struct acpi_object_region_field field; + struct acpi_object_buffer_field buffer_field; + struct acpi_object_bank_field bank_field; + struct acpi_object_index_field index_field; + struct acpi_object_notify_handler notify; + struct acpi_object_addr_handler address_space; + struct acpi_object_reference reference; + struct acpi_object_extra extra; + struct acpi_object_data data; + struct acpi_object_cache_list cache; +}; + + +/****************************************************************************** + * + * union acpi_descriptor - objects that share a common descriptor identifier + * + *****************************************************************************/ + + +/* Object descriptor types */ + +#define ACPI_DESC_TYPE_CACHED 0x11 /* Used only when object is cached */ +#define ACPI_DESC_TYPE_STATE 0x20 +#define ACPI_DESC_TYPE_STATE_UPDATE 0x21 +#define ACPI_DESC_TYPE_STATE_PACKAGE 0x22 +#define ACPI_DESC_TYPE_STATE_CONTROL 0x23 +#define ACPI_DESC_TYPE_STATE_RPSCOPE 0x24 +#define ACPI_DESC_TYPE_STATE_PSCOPE 0x25 +#define ACPI_DESC_TYPE_STATE_WSCOPE 0x26 +#define ACPI_DESC_TYPE_STATE_RESULT 0x27 +#define ACPI_DESC_TYPE_STATE_NOTIFY 0x28 +#define ACPI_DESC_TYPE_STATE_THREAD 0x29 +#define ACPI_DESC_TYPE_WALK 0x44 +#define ACPI_DESC_TYPE_PARSER 0x66 +#define ACPI_DESC_TYPE_OPERAND 0x88 +#define ACPI_DESC_TYPE_NAMED 0xAA + + +union acpi_descriptor +{ + u8 descriptor_id; /* To differentiate various internal objs */\ + union acpi_operand_object object; + struct acpi_namespace_node node; + union acpi_parse_object op; +}; + + +#endif /* _ACOBJECT_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acoutput.h linux.21rc5-ac2/include/acpi/acoutput.h --- linux.21rc5/include/acpi/acoutput.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acoutput.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,185 @@ +/****************************************************************************** + * + * Name: acoutput.h -- debug output + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACOUTPUT_H__ +#define __ACOUTPUT_H__ + +/* + * Debug levels and component IDs. These are used to control the + * granularity of the output of the DEBUG_PRINT macro -- on a per- + * component basis and a per-exception-type basis. + */ + +/* Component IDs are used in the global "debug_layer" */ + +#define ACPI_UTILITIES 0x00000001 +#define ACPI_HARDWARE 0x00000002 +#define ACPI_EVENTS 0x00000004 +#define ACPI_TABLES 0x00000008 +#define ACPI_NAMESPACE 0x00000010 +#define ACPI_PARSER 0x00000020 +#define ACPI_DISPATCHER 0x00000040 +#define ACPI_EXECUTER 0x00000080 +#define ACPI_RESOURCES 0x00000100 +#define ACPI_CA_DEBUGGER 0x00000200 +#define ACPI_OS_SERVICES 0x00000400 +#define ACPI_CA_DISASSEMBLER 0x00000800 + +/* Component IDs for ACPI tools and utilities */ + +#define ACPI_COMPILER 0x00001000 +#define ACPI_TOOLS 0x00002000 + +#define ACPI_ALL_COMPONENTS 0x00003FFF +#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS) + + +/* Component IDs reserved for ACPI drivers */ + +#define ACPI_ALL_DRIVERS 0xFFFF0000 + + +/* + * Raw debug output levels, do not use these in the DEBUG_PRINT macros + */ +#define ACPI_LV_ERROR 0x00000001 +#define ACPI_LV_WARN 0x00000002 +#define ACPI_LV_INIT 0x00000004 +#define ACPI_LV_DEBUG_OBJECT 0x00000008 +#define ACPI_LV_INFO 0x00000010 +#define ACPI_LV_ALL_EXCEPTIONS 0x0000001F + +/* Trace verbosity level 1 [Standard Trace Level] */ + +#define ACPI_LV_INIT_NAMES 0x00000020 +#define ACPI_LV_PARSE 0x00000040 +#define ACPI_LV_LOAD 0x00000080 +#define ACPI_LV_DISPATCH 0x00000100 +#define ACPI_LV_EXEC 0x00000200 +#define ACPI_LV_NAMES 0x00000400 +#define ACPI_LV_OPREGION 0x00000800 +#define ACPI_LV_BFIELD 0x00001000 +#define ACPI_LV_TABLES 0x00002000 +#define ACPI_LV_VALUES 0x00004000 +#define ACPI_LV_OBJECTS 0x00008000 +#define ACPI_LV_RESOURCES 0x00010000 +#define ACPI_LV_USER_REQUESTS 0x00020000 +#define ACPI_LV_PACKAGE 0x00040000 +#define ACPI_LV_VERBOSITY1 0x0007FF40 | ACPI_LV_ALL_EXCEPTIONS + +/* Trace verbosity level 2 [Function tracing and memory allocation] */ + +#define ACPI_LV_ALLOCATIONS 0x00100000 +#define ACPI_LV_FUNCTIONS 0x00200000 +#define ACPI_LV_OPTIMIZATIONS 0x00400000 +#define ACPI_LV_VERBOSITY2 0x00700000 | ACPI_LV_VERBOSITY1 +#define ACPI_LV_ALL ACPI_LV_VERBOSITY2 + +/* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ + +#define ACPI_LV_MUTEX 0x01000000 +#define ACPI_LV_THREADS 0x02000000 +#define ACPI_LV_IO 0x04000000 +#define ACPI_LV_INTERRUPTS 0x08000000 +#define ACPI_LV_VERBOSITY3 0x0F000000 | ACPI_LV_VERBOSITY2 + +/* Exceptionally verbose output -- also used in the global "debug_level" */ + +#define ACPI_LV_AML_DISASSEMBLE 0x10000000 +#define ACPI_LV_VERBOSE_INFO 0x20000000 +#define ACPI_LV_FULL_TABLES 0x40000000 +#define ACPI_LV_EVENTS 0x80000000 + +#define ACPI_LV_VERBOSE 0xF0000000 + + +/* + * Debug level macros that are used in the DEBUG_PRINT macros + */ +#define ACPI_DEBUG_LEVEL(dl) (u32) dl,__LINE__,&_dbg + +/* Exception level -- used in the global "debug_level" */ + +#define ACPI_DB_ERROR ACPI_DEBUG_LEVEL (ACPI_LV_ERROR) +#define ACPI_DB_WARN ACPI_DEBUG_LEVEL (ACPI_LV_WARN) +#define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT) +#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT) +#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO) +#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS) + + +/* Trace level -- also used in the global "debug_level" */ + +#define ACPI_DB_INIT_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_INIT_NAMES) +#define ACPI_DB_THREADS ACPI_DEBUG_LEVEL (ACPI_LV_THREADS) +#define ACPI_DB_PARSE ACPI_DEBUG_LEVEL (ACPI_LV_PARSE) +#define ACPI_DB_DISPATCH ACPI_DEBUG_LEVEL (ACPI_LV_DISPATCH) +#define ACPI_DB_LOAD ACPI_DEBUG_LEVEL (ACPI_LV_LOAD) +#define ACPI_DB_EXEC ACPI_DEBUG_LEVEL (ACPI_LV_EXEC) +#define ACPI_DB_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_NAMES) +#define ACPI_DB_OPREGION ACPI_DEBUG_LEVEL (ACPI_LV_OPREGION) +#define ACPI_DB_BFIELD ACPI_DEBUG_LEVEL (ACPI_LV_BFIELD) +#define ACPI_DB_TABLES ACPI_DEBUG_LEVEL (ACPI_LV_TABLES) +#define ACPI_DB_FUNCTIONS ACPI_DEBUG_LEVEL (ACPI_LV_FUNCTIONS) +#define ACPI_DB_OPTIMIZATIONS ACPI_DEBUG_LEVEL (ACPI_LV_OPTIMIZATIONS) +#define ACPI_DB_VALUES ACPI_DEBUG_LEVEL (ACPI_LV_VALUES) +#define ACPI_DB_OBJECTS ACPI_DEBUG_LEVEL (ACPI_LV_OBJECTS) +#define ACPI_DB_ALLOCATIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALLOCATIONS) +#define ACPI_DB_RESOURCES ACPI_DEBUG_LEVEL (ACPI_LV_RESOURCES) +#define ACPI_DB_IO ACPI_DEBUG_LEVEL (ACPI_LV_IO) +#define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS) +#define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS) +#define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE) +#define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX) + +#define ACPI_DB_ALL ACPI_DEBUG_LEVEL (ACPI_LV_ALL) + + +/* Defaults for debug_level, debug and normal */ + +#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_WARN | ACPI_LV_ERROR | ACPI_LV_DEBUG_OBJECT) +#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_WARN | ACPI_LV_ERROR | ACPI_LV_DEBUG_OBJECT) +#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) + + +#endif /* __ACOUTPUT_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acparser.h linux.21rc5-ac2/include/acpi/acparser.h --- linux.21rc5/include/acpi/acparser.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acparser.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,347 @@ +/****************************************************************************** + * + * Module Name: acparser.h - AML Parser subcomponent prototypes and defines + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#ifndef __ACPARSER_H__ +#define __ACPARSER_H__ + + +#define OP_HAS_RETURN_VALUE 1 + +/* variable # arguments */ + +#define ACPI_VAR_ARGS ACPI_UINT32_MAX + + +#define ACPI_PARSE_DELETE_TREE 0x0001 +#define ACPI_PARSE_NO_TREE_DELETE 0x0000 +#define ACPI_PARSE_TREE_MASK 0x0001 + +#define ACPI_PARSE_LOAD_PASS1 0x0010 +#define ACPI_PARSE_LOAD_PASS2 0x0020 +#define ACPI_PARSE_EXECUTE 0x0030 +#define ACPI_PARSE_MODE_MASK 0x0030 + +#define ACPI_PARSE_DEFERRED_OP 0x0100 + +/* Parser external interfaces */ + +acpi_status +acpi_psx_load_table ( + u8 *pcode_addr, + u32 pcode_length); + +acpi_status +acpi_psx_execute ( + struct acpi_namespace_node *method_node, + union acpi_operand_object **params, + union acpi_operand_object **return_obj_desc); + + +/****************************************************************************** + * + * Parser interfaces + * + *****************************************************************************/ + + +/* psargs - Parse AML opcode arguments */ + +u8 * +acpi_ps_get_next_package_end ( + struct acpi_parse_state *parser_state); + +u32 +acpi_ps_get_next_package_length ( + struct acpi_parse_state *parser_state); + +char * +acpi_ps_get_next_namestring ( + struct acpi_parse_state *parser_state); + +void +acpi_ps_get_next_simple_arg ( + struct acpi_parse_state *parser_state, + u32 arg_type, + union acpi_parse_object *arg); + +acpi_status +acpi_ps_get_next_namepath ( + struct acpi_walk_state *walk_state, + struct acpi_parse_state *parser_state, + union acpi_parse_object *arg, + u8 method_call); + +union acpi_parse_object * +acpi_ps_get_next_field ( + struct acpi_parse_state *parser_state); + +acpi_status +acpi_ps_get_next_arg ( + struct acpi_walk_state *walk_state, + struct acpi_parse_state *parser_state, + u32 arg_type, + union acpi_parse_object **return_arg); + + +/* psfind */ + +union acpi_parse_object * +acpi_ps_find_name ( + union acpi_parse_object *scope, + u32 name, + u32 opcode); + +union acpi_parse_object* +acpi_ps_get_parent ( + union acpi_parse_object *op); + + +/* psopcode - AML Opcode information */ + +const struct acpi_opcode_info * +acpi_ps_get_opcode_info ( + u16 opcode); + +char * +acpi_ps_get_opcode_name ( + u16 opcode); + + +/* psparse - top level parsing routines */ + +u32 +acpi_ps_get_opcode_size ( + u32 opcode); + +void +acpi_ps_complete_this_op ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + +acpi_status +acpi_ps_next_parse_state ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + acpi_status callback_status); + +acpi_status +acpi_ps_find_object ( + struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op); + +void +acpi_ps_delete_parse_tree ( + union acpi_parse_object *root); + +acpi_status +acpi_ps_parse_loop ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ps_parse_aml ( + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ps_parse_table ( + u8 *aml, + u32 aml_size, + acpi_parse_downwards descending_callback, + acpi_parse_upwards ascending_callback, + union acpi_parse_object **root_object); + +u16 +acpi_ps_peek_opcode ( + struct acpi_parse_state *state); + + +/* psscope - Scope stack management routines */ + + +acpi_status +acpi_ps_init_scope ( + struct acpi_parse_state *parser_state, + union acpi_parse_object *root); + +union acpi_parse_object * +acpi_ps_get_parent_scope ( + struct acpi_parse_state *state); + +u8 +acpi_ps_has_completed_scope ( + struct acpi_parse_state *parser_state); + +void +acpi_ps_pop_scope ( + struct acpi_parse_state *parser_state, + union acpi_parse_object **op, + u32 *arg_list, + u32 *arg_count); + +acpi_status +acpi_ps_push_scope ( + struct acpi_parse_state *parser_state, + union acpi_parse_object *op, + u32 remaining_args, + u32 arg_count); + +void +acpi_ps_cleanup_scope ( + struct acpi_parse_state *state); + + +/* pstree - parse tree manipulation routines */ + +void +acpi_ps_append_arg( + union acpi_parse_object *op, + union acpi_parse_object *arg); + +union acpi_parse_object* +acpi_ps_find ( + union acpi_parse_object *scope, + char *path, + u16 opcode, + u32 create); + +union acpi_parse_object * +acpi_ps_get_arg( + union acpi_parse_object *op, + u32 argn); + +union acpi_parse_object * +acpi_ps_get_child ( + union acpi_parse_object *op); + +union acpi_parse_object * +acpi_ps_get_depth_next ( + union acpi_parse_object *origin, + union acpi_parse_object *op); + + +/* pswalk - parse tree walk routines */ + +acpi_status +acpi_ps_walk_parsed_aml ( + union acpi_parse_object *start_op, + union acpi_parse_object *end_op, + union acpi_operand_object *mth_desc, + struct acpi_namespace_node *start_node, + union acpi_operand_object **params, + union acpi_operand_object **caller_return_desc, + acpi_owner_id owner_id, + acpi_parse_downwards descending_callback, + acpi_parse_upwards ascending_callback); + +acpi_status +acpi_ps_get_next_walk_op ( + struct acpi_walk_state *walk_state, + union acpi_parse_object *op, + acpi_parse_upwards ascending_callback); + +acpi_status +acpi_ps_delete_completed_op ( + struct acpi_walk_state *walk_state); + + +/* psutils - parser utilities */ + +union acpi_parse_object * +acpi_ps_create_scope_op ( + void); + +void +acpi_ps_init_op ( + union acpi_parse_object *op, + u16 opcode); + +union acpi_parse_object * +acpi_ps_alloc_op ( + u16 opcode); + +void +acpi_ps_free_op ( + union acpi_parse_object *op); + +void +acpi_ps_delete_parse_cache ( + void); + +u8 +acpi_ps_is_leading_char ( + u32 c); + +u8 +acpi_ps_is_prefix_char ( + u32 c); + +u32 +acpi_ps_get_name( + union acpi_parse_object *op); + +void +acpi_ps_set_name( + union acpi_parse_object *op, + u32 name); + + +/* psdump - display parser tree */ + +u32 +acpi_ps_sprint_path ( + char *buffer_start, + u32 buffer_size, + union acpi_parse_object *op); + +u32 +acpi_ps_sprint_op ( + char *buffer_start, + u32 buffer_size, + union acpi_parse_object *op); + +void +acpi_ps_show ( + union acpi_parse_object *op); + + +#endif /* __ACPARSER_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acpi_bus.h linux.21rc5-ac2/include/acpi/acpi_bus.h --- linux.21rc5/include/acpi/acpi_bus.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acpi_bus.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,314 @@ +/* + * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef __ACPI_BUS_H__ +#define __ACPI_BUS_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,4)) +#include +#define CONFIG_LDM +#endif + +#include + + +/* TBD: Make dynamic */ +#define ACPI_MAX_HANDLES 10 +struct acpi_handle_list { + u32 count; + acpi_handle handles[ACPI_MAX_HANDLES]; +}; + + +/* acpi_utils.h */ +acpi_status +acpi_extract_package ( + union acpi_object *package, + struct acpi_buffer *format, + struct acpi_buffer *buffer); +acpi_status +acpi_evaluate_integer ( + acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, + unsigned long *data); +acpi_status +acpi_evaluate_reference ( + acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, + struct acpi_handle_list *list); + + +#ifdef CONFIG_ACPI_BUS + +#include + +#define ACPI_BUS_FILE_ROOT "acpi" +extern struct proc_dir_entry *acpi_root_dir; +extern FADT_DESCRIPTOR acpi_fadt; + +enum acpi_bus_removal_type { + ACPI_BUS_REMOVAL_NORMAL = 0, + ACPI_BUS_REMOVAL_EJECT, + ACPI_BUS_REMOVAL_SUPRISE, + ACPI_BUS_REMOVAL_TYPE_COUNT +}; + +enum acpi_bus_device_type { + ACPI_BUS_TYPE_DEVICE = 0, + ACPI_BUS_TYPE_POWER, + ACPI_BUS_TYPE_PROCESSOR, + ACPI_BUS_TYPE_THERMAL, + ACPI_BUS_TYPE_SYSTEM, + ACPI_BUS_TYPE_POWER_BUTTON, + ACPI_BUS_TYPE_SLEEP_BUTTON, + ACPI_BUS_DEVICE_TYPE_COUNT +}; + +struct acpi_driver; +struct acpi_device; + + +/* + * ACPI Driver + * ----------- + */ + +typedef int (*acpi_op_add) (struct acpi_device *device); +typedef int (*acpi_op_remove) (struct acpi_device *device, int type); +typedef int (*acpi_op_lock) (struct acpi_device *device, int type); +typedef int (*acpi_op_start) (struct acpi_device *device); +typedef int (*acpi_op_stop) (struct acpi_device *device, int type); +typedef int (*acpi_op_suspend) (struct acpi_device *device, int state); +typedef int (*acpi_op_resume) (struct acpi_device *device, int state); +typedef int (*acpi_op_scan) (struct acpi_device *device); +typedef int (*acpi_op_bind) (struct acpi_device *device); + +struct acpi_device_ops { + acpi_op_add add; + acpi_op_remove remove; + acpi_op_lock lock; + acpi_op_start start; + acpi_op_stop stop; + acpi_op_suspend suspend; + acpi_op_resume resume; + acpi_op_scan scan; + acpi_op_bind bind; +}; + +struct acpi_driver { + struct list_head node; + char name[80]; + char class[80]; + int references; + char *ids; /* Supported Hardware IDs */ + struct acpi_device_ops ops; +}; + +/* + * ACPI Device + * ----------- + */ + +/* Status (_STA) */ + +struct acpi_device_status { + u32 present:1; + u32 enabled:1; + u32 show_in_ui:1; + u32 functional:1; + u32 battery_present:1; + u32 reserved:27; +}; + + +/* Flags */ + +struct acpi_device_flags { + u32 dynamic_status:1; + u32 hardware_id:1; + u32 compatible_ids:1; + u32 bus_address:1; + u32 unique_id:1; + u32 removable:1; + u32 ejectable:1; + u32 lockable:1; + u32 suprise_removal_ok:1; + u32 power_manageable:1; + u32 performance_manageable:1; + u32 reserved:21; +}; + + +/* File System */ + +struct acpi_device_dir { + struct proc_dir_entry *entry; +}; + +#define acpi_device_dir(d) ((d)->dir.entry) + + +/* Plug and Play */ + +typedef char acpi_bus_id[5]; +typedef unsigned long acpi_bus_address; +typedef char acpi_hardware_id[9]; +typedef char acpi_unique_id[9]; +typedef char acpi_device_name[40]; +typedef char acpi_device_class[20]; + +struct acpi_device_pnp { + acpi_bus_id bus_id; /* Object name */ + acpi_bus_address bus_address; /* _ADR */ + acpi_hardware_id hardware_id; /* _HID */ + acpi_unique_id unique_id; /* _UID */ + acpi_device_name device_name; /* Driver-determined */ + acpi_device_class device_class; /* " */ +}; + +#define acpi_device_bid(d) ((d)->pnp.bus_id) +#define acpi_device_adr(d) ((d)->pnp.bus_address) +#define acpi_device_hid(d) ((d)->pnp.hardware_id) +#define acpi_device_uid(d) ((d)->pnp.unique_id) +#define acpi_device_name(d) ((d)->pnp.device_name) +#define acpi_device_class(d) ((d)->pnp.device_class) + + +/* Power Management */ + +struct acpi_device_power_flags { + u32 explicit_get:1; /* _PSC present? */ + u32 power_resources:1; /* Power resources */ + u32 inrush_current:1; /* Serialize Dx->D0 */ + u32 wake_capable:1; /* Wakeup supported? */ + u32 wake_enabled:1; /* Enabled for wakeup */ + u32 power_removed:1; /* Optimize Dx->D0 */ + u32 reserved:26; +}; + +struct acpi_device_power_state { + struct { + u8 valid:1; + u8 explicit_set:1; /* _PSx present? */ + u8 reserved:6; + } flags; + int power; /* % Power (compared to D0) */ + int latency; /* Dx->D0 time (microseconds) */ + struct acpi_handle_list resources; /* Power resources referenced */ +}; + +struct acpi_device_power { + int state; /* Current state */ + struct acpi_device_power_flags flags; + struct acpi_device_power_state states[4]; /* Power states (D0-D3) */ +}; + + +/* Performance Management */ + +struct acpi_device_perf_flags { + u8 reserved:8; +}; + +struct acpi_device_perf_state { + struct { + u8 valid:1; + u8 reserved:7; + } flags; + u8 power; /* % Power (compared to P0) */ + u8 performance; /* % Performance ( " ) */ + int latency; /* Px->P0 time (microseconds) */ +}; + +struct acpi_device_perf { + int state; + struct acpi_device_perf_flags flags; + int state_count; + struct acpi_device_perf_state *states; +}; + + +/* Device */ + +struct acpi_device { + acpi_handle handle; + struct acpi_device *parent; + struct list_head children; + struct list_head node; + struct acpi_device_status status; + struct acpi_device_flags flags; + struct acpi_device_pnp pnp; + struct acpi_device_power power; + struct acpi_device_perf performance; + struct acpi_device_dir dir; + struct acpi_device_ops ops; + struct acpi_driver *driver; + void *driver_data; +#ifdef CONFIG_LDM + struct device dev; +#endif +}; + +#define acpi_driver_data(d) ((d)->driver_data) + + +/* + * Events + * ------ + */ + +struct acpi_bus_event { + struct list_head node; + acpi_device_class device_class; + acpi_bus_id bus_id; + u32 type; + u32 data; +}; + + +/* + * External Functions + */ + +int acpi_bus_get_device(acpi_handle, struct acpi_device **device); +int acpi_bus_get_status (struct acpi_device *device); +int acpi_bus_get_power (acpi_handle handle, int *state); +int acpi_bus_set_power (acpi_handle handle, int state); +int acpi_bus_generate_event (struct acpi_device *device, u8 type, int data); +int acpi_bus_receive_event (struct acpi_bus_event *event); +int acpi_bus_register_driver (struct acpi_driver *driver); +int acpi_bus_unregister_driver (struct acpi_driver *driver); +int acpi_bus_scan (struct acpi_device *device); +int acpi_init (void); +void acpi_exit (void); + + +#endif /*CONFIG_ACPI_BUS*/ + +#endif /*__ACPI_BUS_H__*/ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acpi_drivers.h linux.21rc5-ac2/include/acpi/acpi_drivers.h --- linux.21rc5/include/acpi/acpi_drivers.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acpi_drivers.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,348 @@ +/* + * acpi_drivers.h ($Revision: 32 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef __ACPI_DRIVERS_H__ +#define __ACPI_DRIVERS_H__ + +#include +#include "acpi_bus.h" + + +#define ACPI_MAX_STRING 80 + + +/* -------------------------------------------------------------------------- + ACPI Bus + -------------------------------------------------------------------------- */ + +#define ACPI_BUS_COMPONENT 0x00010000 +#define ACPI_BUS_CLASS "system_bus" +#define ACPI_BUS_HID "ACPI_BUS" +#define ACPI_BUS_DRIVER_NAME "ACPI Bus Driver" +#define ACPI_BUS_DEVICE_NAME "System Bus" + + +/* -------------------------------------------------------------------------- + AC Adapter + -------------------------------------------------------------------------- */ + +#define ACPI_AC_COMPONENT 0x00020000 +#define ACPI_AC_CLASS "ac_adapter" +#define ACPI_AC_HID "ACPI0003" +#define ACPI_AC_DRIVER_NAME "ACPI AC Adapter Driver" +#define ACPI_AC_DEVICE_NAME "AC Adapter" +#define ACPI_AC_FILE_STATE "state" +#define ACPI_AC_NOTIFY_STATUS 0x80 +#define ACPI_AC_STATUS_OFFLINE 0x00 +#define ACPI_AC_STATUS_ONLINE 0x01 +#define ACPI_AC_STATUS_UNKNOWN 0xFF + + +/* -------------------------------------------------------------------------- + Battery + -------------------------------------------------------------------------- */ + +#define ACPI_BATTERY_COMPONENT 0x00040000 +#define ACPI_BATTERY_CLASS "battery" +#define ACPI_BATTERY_HID "PNP0C0A" +#define ACPI_BATTERY_DRIVER_NAME "ACPI Battery Driver" +#define ACPI_BATTERY_DEVICE_NAME "Battery" +#define ACPI_BATTERY_FILE_INFO "info" +#define ACPI_BATTERY_FILE_STATUS "state" +#define ACPI_BATTERY_FILE_ALARM "alarm" +#define ACPI_BATTERY_NOTIFY_STATUS 0x80 +#define ACPI_BATTERY_NOTIFY_INFO 0x81 +#define ACPI_BATTERY_UNITS_WATTS "mW" +#define ACPI_BATTERY_UNITS_AMPS "mA" + + +/* -------------------------------------------------------------------------- + Button + -------------------------------------------------------------------------- */ + +#define ACPI_BUTTON_COMPONENT 0x00080000 +#define ACPI_BUTTON_DRIVER_NAME "ACPI Button Driver" +#define ACPI_BUTTON_CLASS "button" +#define ACPI_BUTTON_FILE_INFO "info" +#define ACPI_BUTTON_FILE_STATE "state" +#define ACPI_BUTTON_TYPE_UNKNOWN 0x00 +#define ACPI_BUTTON_NOTIFY_STATUS 0x80 + +#define ACPI_BUTTON_SUBCLASS_POWER "power" +#define ACPI_BUTTON_HID_POWER "PNP0C0C" +#define ACPI_BUTTON_HID_POWERF "ACPI_FPB" +#define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button (CM)" +#define ACPI_BUTTON_DEVICE_NAME_POWERF "Power Button (FF)" +#define ACPI_BUTTON_TYPE_POWER 0x01 +#define ACPI_BUTTON_TYPE_POWERF 0x02 + +#define ACPI_BUTTON_SUBCLASS_SLEEP "sleep" +#define ACPI_BUTTON_HID_SLEEP "PNP0C0E" +#define ACPI_BUTTON_HID_SLEEPF "ACPI_FSB" +#define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button (CM)" +#define ACPI_BUTTON_DEVICE_NAME_SLEEPF "Sleep Button (FF)" +#define ACPI_BUTTON_TYPE_SLEEP 0x03 +#define ACPI_BUTTON_TYPE_SLEEPF 0x04 + +#define ACPI_BUTTON_SUBCLASS_LID "lid" +#define ACPI_BUTTON_HID_LID "PNP0C0D" +#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch" +#define ACPI_BUTTON_TYPE_LID 0x05 + + +/* -------------------------------------------------------------------------- + Embedded Controller + -------------------------------------------------------------------------- */ + +#define ACPI_EC_COMPONENT 0x00100000 +#define ACPI_EC_CLASS "embedded_controller" +#define ACPI_EC_HID "PNP0C09" +#define ACPI_EC_DRIVER_NAME "ACPI Embedded Controller Driver" +#define ACPI_EC_DEVICE_NAME "Embedded Controller" +#define ACPI_EC_FILE_INFO "info" + +#ifdef CONFIG_ACPI_EC + +int acpi_ec_ecdt_probe (void); +int acpi_ec_init (void); +void acpi_ec_exit (void); + +#endif + + +/* -------------------------------------------------------------------------- + Fan + -------------------------------------------------------------------------- */ + +#define ACPI_FAN_COMPONENT 0x00200000 +#define ACPI_FAN_CLASS "fan" +#define ACPI_FAN_HID "PNP0C0B" +#define ACPI_FAN_DRIVER_NAME "ACPI Fan Driver" +#define ACPI_FAN_DEVICE_NAME "Fan" +#define ACPI_FAN_FILE_STATE "state" +#define ACPI_FAN_NOTIFY_STATUS 0x80 + + +/* -------------------------------------------------------------------------- + PCI + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_PCI + +#define ACPI_PCI_COMPONENT 0x00400000 + +/* ACPI PCI Root Bridge (pci_root.c) */ + +#define ACPI_PCI_ROOT_CLASS "pci_bridge" +#define ACPI_PCI_ROOT_HID "PNP0A03" +#define ACPI_PCI_ROOT_DRIVER_NAME "ACPI PCI Root Bridge Driver" +#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge" + +int acpi_pci_root_init (void); +void acpi_pci_root_exit (void); +void acpi_pci_get_translations (struct acpi_pci_id* id, u64* mem_tra, u64* io_tra); + +/* ACPI PCI Interrupt Link (pci_link.c) */ + +#define ACPI_PCI_LINK_CLASS "pci_irq_routing" +#define ACPI_PCI_LINK_HID "PNP0C0F" +#define ACPI_PCI_LINK_DRIVER_NAME "ACPI PCI Interrupt Link Driver" +#define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link" +#define ACPI_PCI_LINK_FILE_INFO "info" +#define ACPI_PCI_LINK_FILE_STATUS "state" + +int acpi_pci_link_check (void); +int acpi_pci_link_get_irq (acpi_handle handle, int index); +int acpi_pci_link_init (void); +void acpi_pci_link_exit (void); + +/* ACPI PCI Interrupt Routing (pci_irq.c) */ + +int acpi_pci_irq_add_prt (acpi_handle handle, int segment, int bus); + +/* ACPI PCI Device Binding (pci_bind.c) */ + +struct pci_bus; + +int acpi_pci_bind (struct acpi_device *device); +int acpi_pci_bind_root (struct acpi_device *device, struct acpi_pci_id *id, struct pci_bus *bus); + +#endif /*CONFIG_ACPI_PCI*/ + + +/* -------------------------------------------------------------------------- + Power Resource + -------------------------------------------------------------------------- */ + +#define ACPI_POWER_COMPONENT 0x00800000 +#define ACPI_POWER_CLASS "power_resource" +#define ACPI_POWER_HID "ACPI_PWR" +#define ACPI_POWER_DRIVER_NAME "ACPI Power Resource Driver" +#define ACPI_POWER_DEVICE_NAME "Power Resource" +#define ACPI_POWER_FILE_INFO "info" +#define ACPI_POWER_FILE_STATUS "state" +#define ACPI_POWER_RESOURCE_STATE_OFF 0x00 +#define ACPI_POWER_RESOURCE_STATE_ON 0x01 +#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF + +#ifdef CONFIG_ACPI_POWER + +int acpi_power_get_inferred_state (struct acpi_device *device); +int acpi_power_transition (struct acpi_device *device, int state); +int acpi_power_init (void); +void acpi_power_exit (void); + +#endif + + +/* -------------------------------------------------------------------------- + Processor + -------------------------------------------------------------------------- */ + +#define ACPI_PROCESSOR_COMPONENT 0x01000000 +#define ACPI_PROCESSOR_CLASS "processor" +#define ACPI_PROCESSOR_HID "ACPI_CPU" +#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" +#define ACPI_PROCESSOR_DEVICE_NAME "Processor" +#define ACPI_PROCESSOR_FILE_INFO "info" +#define ACPI_PROCESSOR_FILE_POWER "power" +#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" +#define ACPI_PROCESSOR_FILE_THROTTLING "throttling" +#define ACPI_PROCESSOR_FILE_LIMIT "limit" +#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 +#define ACPI_PROCESSOR_NOTIFY_POWER 0x81 +#define ACPI_PROCESSOR_LIMIT_NONE 0x00 +#define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01 +#define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02 + +int acpi_processor_set_thermal_limit(acpi_handle handle, int type); + + +/* -------------------------------------------------------------------------- + System + -------------------------------------------------------------------------- */ + +#define ACPI_SYSTEM_COMPONENT 0x02000000 +#define ACPI_SYSTEM_CLASS "system" +#define ACPI_SYSTEM_HID "ACPI_SYS" +#define ACPI_SYSTEM_DRIVER_NAME "ACPI System Driver" +#define ACPI_SYSTEM_DEVICE_NAME "System" +#define ACPI_SYSTEM_FILE_INFO "info" +#define ACPI_SYSTEM_FILE_EVENT "event" +#define ACPI_SYSTEM_FILE_ALARM "alarm" +#define ACPI_SYSTEM_FILE_DSDT "dsdt" +#define ACPI_SYSTEM_FILE_FADT "fadt" +#define ACPI_SYSTEM_FILE_SLEEP "sleep" +#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer" +#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level" + +#ifdef CONFIG_ACPI_SYSTEM + +int acpi_system_init (void); +void acpi_system_exit (void); + +#endif + + +/* -------------------------------------------------------------------------- + Thermal Zone + -------------------------------------------------------------------------- */ + +#define ACPI_THERMAL_COMPONENT 0x04000000 +#define ACPI_THERMAL_CLASS "thermal_zone" +#define ACPI_THERMAL_HID "ACPI_THM" +#define ACPI_THERMAL_DRIVER_NAME "ACPI Thermal Zone Driver" +#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone" +#define ACPI_THERMAL_FILE_STATE "state" +#define ACPI_THERMAL_FILE_TEMPERATURE "temperature" +#define ACPI_THERMAL_FILE_TRIP_POINTS "trip_points" +#define ACPI_THERMAL_FILE_COOLING_MODE "cooling_mode" +#define ACPI_THERMAL_FILE_POLLING_FREQ "polling_frequency" +#define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80 +#define ACPI_THERMAL_NOTIFY_THRESHOLDS 0x81 +#define ACPI_THERMAL_NOTIFY_DEVICES 0x82 +#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0 +#define ACPI_THERMAL_NOTIFY_HOT 0xF1 +#define ACPI_THERMAL_MODE_ACTIVE 0x00 +#define ACPI_THERMAL_MODE_PASSIVE 0x01 +#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff" + + +/* -------------------------------------------------------------------------- + Debug Support + -------------------------------------------------------------------------- */ + +#define ACPI_DEBUG_RESTORE 0 +#define ACPI_DEBUG_LOW 1 +#define ACPI_DEBUG_MEDIUM 2 +#define ACPI_DEBUG_HIGH 3 +#define ACPI_DEBUG_DRIVERS 4 + +extern u32 acpi_dbg_level; +extern u32 acpi_dbg_layer; + +static inline void +acpi_set_debug ( + u32 flag) +{ + static u32 layer_save; + static u32 level_save; + + switch (flag) { + case ACPI_DEBUG_RESTORE: + acpi_dbg_layer = layer_save; + acpi_dbg_level = level_save; + break; + case ACPI_DEBUG_LOW: + case ACPI_DEBUG_MEDIUM: + case ACPI_DEBUG_HIGH: + case ACPI_DEBUG_DRIVERS: + layer_save = acpi_dbg_layer; + level_save = acpi_dbg_level; + break; + } + + switch (flag) { + case ACPI_DEBUG_LOW: + acpi_dbg_layer = ACPI_COMPONENT_DEFAULT | ACPI_ALL_DRIVERS; + acpi_dbg_level = ACPI_DEBUG_DEFAULT; + break; + case ACPI_DEBUG_MEDIUM: + acpi_dbg_layer = ACPI_COMPONENT_DEFAULT | ACPI_ALL_DRIVERS; + acpi_dbg_level = ACPI_LV_FUNCTIONS | ACPI_LV_ALL_EXCEPTIONS; + break; + case ACPI_DEBUG_HIGH: + acpi_dbg_layer = 0xFFFFFFFF; + acpi_dbg_level = 0xFFFFFFFF; + break; + case ACPI_DEBUG_DRIVERS: + acpi_dbg_layer = ACPI_ALL_DRIVERS; + acpi_dbg_level = 0xFFFFFFFF; + break; + } +} + + +#endif /*__ACPI_DRIVERS_H__*/ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acpi.h linux.21rc5-ac2/include/acpi/acpi.h --- linux.21rc5/include/acpi/acpi.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acpi.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,69 @@ +/****************************************************************************** + * + * Name: acpi.h - Master include file, Publics and external data. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACPI_H__ +#define __ACPI_H__ + +/* + * Common includes for all ACPI driver files + * We put them here because we don't want to duplicate them + * in the rest of the source code again and again. + */ +#include "acconfig.h" /* Configuration constants */ +#include "platform/acenv.h" /* Target environment specific items */ +#include "actypes.h" /* Fundamental common data types */ +#include "acexcep.h" /* ACPI exception codes */ +#include "acmacros.h" /* C macros */ +#include "actbl.h" /* ACPI table definitions */ +#include "aclocal.h" /* Internal data types */ +#include "acoutput.h" /* Error output and Debug macros */ +#include "acpiosxf.h" /* Interfaces to the ACPI-to-OS layer*/ +#include "acpixf.h" /* ACPI core subsystem external interfaces */ +#include "acobject.h" /* ACPI internal object */ +#include "acstruct.h" /* Common structures */ +#include "acglobal.h" /* All global variables */ +#include "achware.h" /* Hardware defines and interfaces */ +#include "acutils.h" /* Utility interfaces */ + + +#endif /* __ACPI_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acpiosxf.h linux.21rc5-ac2/include/acpi/acpiosxf.h --- linux.21rc5/include/acpi/acpiosxf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acpiosxf.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,373 @@ + +/****************************************************************************** + * + * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These + * interfaces must be implemented by OSL to interface the + * ACPI components to the host operating system. + * + *****************************************************************************/ + + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACPIOSXF_H__ +#define __ACPIOSXF_H__ + +#include "platform/acenv.h" +#include "actypes.h" + + +/* Priorities for acpi_os_queue_for_execution */ + +#define OSD_PRIORITY_GPE 1 +#define OSD_PRIORITY_HIGH 2 +#define OSD_PRIORITY_MED 3 +#define OSD_PRIORITY_LO 4 + +#define ACPI_NO_UNIT_LIMIT ((u32) -1) +#define ACPI_MUTEX_SEM 1 + + +/* Functions for acpi_os_signal */ + +#define ACPI_SIGNAL_FATAL 0 +#define ACPI_SIGNAL_BREAKPOINT 1 + +struct acpi_signal_fatal_info +{ + u32 type; + u32 code; + u32 argument; +}; + + +/* + * OSL Initialization and shutdown primitives + */ + +acpi_status +acpi_os_initialize ( + void); + +acpi_status +acpi_os_terminate ( + void); + + +/* + * ACPI Table interfaces + */ + +acpi_status +acpi_os_get_root_pointer ( + u32 flags, + struct acpi_pointer *address); + +acpi_status +acpi_os_predefined_override ( + const struct acpi_predefined_names *init_val, + acpi_string *new_val); + +acpi_status +acpi_os_table_override ( + struct acpi_table_header *existing_table, + struct acpi_table_header **new_table); + + +/* + * Synchronization primitives + */ + +acpi_status +acpi_os_create_semaphore ( + u32 max_units, + u32 initial_units, + acpi_handle *out_handle); + +acpi_status +acpi_os_delete_semaphore ( + acpi_handle handle); + +acpi_status +acpi_os_wait_semaphore ( + acpi_handle handle, + u32 units, + u16 timeout); + +acpi_status +acpi_os_signal_semaphore ( + acpi_handle handle, + u32 units); + +acpi_status +acpi_os_create_lock ( + acpi_handle *out_handle); + +void +acpi_os_delete_lock ( + acpi_handle handle); + +void +acpi_os_acquire_lock ( + acpi_handle handle, + u32 flags); + +void +acpi_os_release_lock ( + acpi_handle handle, + u32 flags); + + +/* + * Memory allocation and mapping + */ + +void * +acpi_os_allocate ( + acpi_size size); + +void +acpi_os_free ( + void * memory); + +acpi_status +acpi_os_map_memory ( + acpi_physical_address physical_address, + acpi_size size, + void **logical_address); + +void +acpi_os_unmap_memory ( + void *logical_address, + acpi_size size); + +acpi_status +acpi_os_get_physical_address ( + void *logical_address, + acpi_physical_address *physical_address); + + +/* + * Interrupt handlers + */ + +acpi_status +acpi_os_install_interrupt_handler ( + u32 interrupt_number, + OSD_HANDLER service_routine, + void *context); + +acpi_status +acpi_os_remove_interrupt_handler ( + u32 interrupt_number, + OSD_HANDLER service_routine); + + +/* + * Threads and Scheduling + */ + +u32 +acpi_os_get_thread_id ( + void); + +acpi_status +acpi_os_queue_for_execution ( + u32 priority, + OSD_EXECUTION_CALLBACK function, + void *context); + +void +acpi_os_sleep ( + u32 seconds, + u32 milliseconds); + +void +acpi_os_stall ( + u32 microseconds); + + +/* + * Platform and hardware-independent I/O interfaces + */ + +acpi_status +acpi_os_read_port ( + acpi_io_address address, + u32 *value, + u32 width); + +acpi_status +acpi_os_write_port ( + acpi_io_address address, + u32 value, + u32 width); + + +/* + * Platform and hardware-independent physical memory interfaces + */ + +acpi_status +acpi_os_read_memory ( + acpi_physical_address address, + u32 *value, + u32 width); + +acpi_status +acpi_os_write_memory ( + acpi_physical_address address, + u32 value, + u32 width); + + +/* + * Platform and hardware-independent PCI configuration space access + */ + +acpi_status +acpi_os_read_pci_configuration ( + struct acpi_pci_id *pci_id, + u32 register, + void *value, + u32 width); + +acpi_status +acpi_os_write_pci_configuration ( + struct acpi_pci_id *pci_id, + u32 register, + acpi_integer value, + u32 width); + +/* + * Interim function needed for PCI IRQ routing + */ +void +acpi_os_derive_pci_id( + acpi_handle rhandle, + acpi_handle chandle, + struct acpi_pci_id **pci_id); + +/* + * Miscellaneous + */ + +u8 +acpi_os_readable ( + void *pointer, + acpi_size length); + +u8 +acpi_os_writable ( + void *pointer, + acpi_size length); + +u32 +acpi_os_get_timer ( + void); + +acpi_status +acpi_os_signal ( + u32 function, + void *info); + +/* + * Debug print routines + */ + +void ACPI_INTERNAL_VAR_XFACE +acpi_os_printf ( + const char *format, + ...); + +void +acpi_os_vprintf ( + const char *format, + va_list args); + +void +acpi_os_redirect_output ( + void *destination); + + +/* + * Debug input + */ + +u32 +acpi_os_get_line ( + char *buffer); + + +/* + * Directory manipulation + */ + +void * +acpi_os_open_directory ( + char *pathname, + char *wildcard_spec, + char requested_file_type); + +/* requeste_file_type values */ + +#define REQUEST_FILE_ONLY 0 +#define REQUEST_DIR_ONLY 1 + + +char * +acpi_os_get_next_filename ( + void *dir_handle); + +void +acpi_os_close_directory ( + void *dir_handle); + +/* + * Debug + */ + +void +acpi_os_dbg_assert( + void *failed_assertion, + void *file_name, + u32 line_number, + char *message); + + +#endif /* __ACPIOSXF_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acpixf.h linux.21rc5-ac2/include/acpi/acpixf.h --- linux.21rc5/include/acpi/acpixf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acpixf.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,462 @@ + +/****************************************************************************** + * + * Name: acpixf.h - External interfaces to the ACPI subsystem + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#ifndef __ACXFACE_H__ +#define __ACXFACE_H__ + +#include "actypes.h" +#include "actbl.h" + + + /* + * Global interfaces + */ + +acpi_status +acpi_initialize_subsystem ( + void); + +acpi_status +acpi_enable_subsystem ( + u32 flags); + +acpi_status +acpi_initialize_objects ( + u32 flags); + +acpi_status +acpi_terminate ( + void); + +acpi_status +acpi_subsystem_status ( + void); + +acpi_status +acpi_enable ( + void); + +acpi_status +acpi_disable ( + void); + +acpi_status +acpi_get_system_info ( + struct acpi_buffer *ret_buffer); + +const char * +acpi_format_exception ( + acpi_status exception); + +acpi_status +acpi_purge_cached_objects ( + void); + +acpi_status +acpi_install_initialization_handler ( + acpi_init_handler handler, + u32 function); + +/* + * ACPI Memory manager + */ + +void * +acpi_allocate ( + u32 size); + +void * +acpi_callocate ( + u32 size); + +void +acpi_free ( + void *address); + + +/* + * ACPI table manipulation interfaces + */ + +acpi_status +acpi_find_root_pointer ( + u32 flags, + struct acpi_pointer *rsdp_address); + +acpi_status +acpi_load_tables ( + void); + +acpi_status +acpi_load_table ( + struct acpi_table_header *table_ptr); + +acpi_status +acpi_unload_table ( + acpi_table_type table_type); + +acpi_status +acpi_get_table_header ( + acpi_table_type table_type, + u32 instance, + struct acpi_table_header *out_table_header); + +acpi_status +acpi_get_table ( + acpi_table_type table_type, + u32 instance, + struct acpi_buffer *ret_buffer); + +acpi_status +acpi_get_firmware_table ( + acpi_string signature, + u32 instance, + u32 flags, + struct acpi_table_header **table_pointer); + + +/* + * Namespace and name interfaces + */ + +acpi_status +acpi_walk_namespace ( + acpi_object_type type, + acpi_handle start_object, + u32 max_depth, + acpi_walk_callback user_function, + void *context, + void **return_value); + +acpi_status +acpi_get_devices ( + char *HID, + acpi_walk_callback user_function, + void *context, + void **return_value); + +acpi_status +acpi_get_name ( + acpi_handle handle, + u32 name_type, + struct acpi_buffer *ret_path_ptr); + +acpi_status +acpi_get_handle ( + acpi_handle parent, + acpi_string pathname, + acpi_handle *ret_handle); + +acpi_status +acpi_attach_data ( + acpi_handle obj_handle, + acpi_object_handler handler, + void *data); + +acpi_status +acpi_detach_data ( + acpi_handle obj_handle, + acpi_object_handler handler); + +acpi_status +acpi_get_data ( + acpi_handle obj_handle, + acpi_object_handler handler, + void **data); + + +/* + * Object manipulation and enumeration + */ + +acpi_status +acpi_evaluate_object ( + acpi_handle object, + acpi_string pathname, + struct acpi_object_list *parameter_objects, + struct acpi_buffer *return_object_buffer); + +acpi_status +acpi_evaluate_object_typed ( + acpi_handle object, + acpi_string pathname, + struct acpi_object_list *external_params, + struct acpi_buffer *return_buffer, + acpi_object_type return_type); + +acpi_status +acpi_get_object_info ( + acpi_handle device, + struct acpi_device_info *info); + +acpi_status +acpi_get_next_object ( + acpi_object_type type, + acpi_handle parent, + acpi_handle child, + acpi_handle *out_handle); + +acpi_status +acpi_get_type ( + acpi_handle object, + acpi_object_type *out_type); + +acpi_status +acpi_get_parent ( + acpi_handle object, + acpi_handle *out_handle); + + +/* + * Event handler interfaces + */ + +acpi_status +acpi_install_fixed_event_handler ( + u32 acpi_event, + acpi_event_handler handler, + void *context); + +acpi_status +acpi_remove_fixed_event_handler ( + u32 acpi_event, + acpi_event_handler handler); + +acpi_status +acpi_install_notify_handler ( + acpi_handle device, + u32 handler_type, + acpi_notify_handler handler, + void *context); + +acpi_status +acpi_remove_notify_handler ( + acpi_handle device, + u32 handler_type, + acpi_notify_handler handler); + +acpi_status +acpi_install_address_space_handler ( + acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, + void *context); + +acpi_status +acpi_remove_address_space_handler ( + acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler); + +acpi_status +acpi_install_gpe_handler ( + acpi_handle gpe_device, + u32 gpe_number, + u32 type, + acpi_gpe_handler handler, + void *context); + +acpi_status +acpi_acquire_global_lock ( + u16 timeout, + u32 *handle); + +acpi_status +acpi_release_global_lock ( + u32 handle); + +acpi_status +acpi_remove_gpe_handler ( + acpi_handle gpe_device, + u32 gpe_number, + acpi_gpe_handler handler); + +acpi_status +acpi_enable_event ( + u32 event, + u32 flags); + +acpi_status +acpi_disable_event ( + u32 event, + u32 flags); + +acpi_status +acpi_clear_event ( + u32 event); + +acpi_status +acpi_get_event_status ( + u32 event, + acpi_event_status *event_status); + +acpi_status +acpi_enable_gpe ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags); + +acpi_status +acpi_disable_gpe ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags); + +acpi_status +acpi_clear_gpe ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags); + +acpi_status +acpi_get_gpe_status ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags, + acpi_event_status *event_status); + +acpi_status +acpi_install_gpe_block ( + acpi_handle gpe_device, + struct acpi_generic_address *gpe_block_address, + u32 register_count, + u32 interrupt_level); + +acpi_status +acpi_remove_gpe_block ( + acpi_handle gpe_device); + + +/* + * Resource interfaces + */ + +typedef +acpi_status (*ACPI_WALK_RESOURCE_CALLBACK) ( + struct acpi_resource *resource, + void *context); + + +acpi_status +acpi_get_current_resources( + acpi_handle device_handle, + struct acpi_buffer *ret_buffer); + +acpi_status +acpi_get_possible_resources( + acpi_handle device_handle, + struct acpi_buffer *ret_buffer); + +acpi_status +acpi_walk_resources ( + acpi_handle device_handle, + char *path, + ACPI_WALK_RESOURCE_CALLBACK user_function, + void *context); + +acpi_status +acpi_set_current_resources ( + acpi_handle device_handle, + struct acpi_buffer *in_buffer); + +acpi_status +acpi_get_irq_routing_table ( + acpi_handle bus_device_handle, + struct acpi_buffer *ret_buffer); + +acpi_status +acpi_resource_to_address64 ( + struct acpi_resource *resource, + struct acpi_resource_address64 *out); + +/* + * Hardware (ACPI device) interfaces + */ + +acpi_status +acpi_get_register ( + u32 register_id, + u32 *return_value, + u32 flags); + +acpi_status +acpi_set_register ( + u32 register_id, + u32 value, + u32 flags); + +acpi_status +acpi_set_firmware_waking_vector ( + acpi_physical_address physical_address); + +acpi_status +acpi_get_firmware_waking_vector ( + acpi_physical_address *physical_address); + +acpi_status +acpi_get_sleep_type_data ( + u8 sleep_state, + u8 *slp_typ_a, + u8 *slp_typ_b); + +acpi_status +acpi_enter_sleep_state_prep ( + u8 sleep_state); + +acpi_status +acpi_enter_sleep_state ( + u8 sleep_state); + +acpi_status +acpi_enter_sleep_state_s4bios ( + void); + +acpi_status +acpi_leave_sleep_state ( + u8 sleep_state); + + +#endif /* __ACXFACE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acresrc.h linux.21rc5-ac2/include/acpi/acresrc.h --- linux.21rc5/include/acpi/acresrc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acresrc.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,391 @@ +/****************************************************************************** + * + * Name: acresrc.h - Resource Manager function prototypes + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACRESRC_H__ +#define __ACRESRC_H__ + + +/* + * Function prototypes called from Acpi* APIs + */ + +acpi_status +acpi_rs_get_prt_method_data ( + acpi_handle handle, + struct acpi_buffer *ret_buffer); + + +acpi_status +acpi_rs_get_crs_method_data ( + acpi_handle handle, + struct acpi_buffer *ret_buffer); + +acpi_status +acpi_rs_get_prs_method_data ( + acpi_handle handle, + struct acpi_buffer *ret_buffer); + +acpi_status +acpi_rs_get_method_data ( + acpi_handle handle, + char *path, + struct acpi_buffer *ret_buffer); + +acpi_status +acpi_rs_set_srs_method_data ( + acpi_handle handle, + struct acpi_buffer *ret_buffer); + +acpi_status +acpi_rs_create_resource_list ( + union acpi_operand_object *byte_stream_buffer, + struct acpi_buffer *output_buffer); + +acpi_status +acpi_rs_create_byte_stream ( + struct acpi_resource *linked_list_buffer, + struct acpi_buffer *output_buffer); + +acpi_status +acpi_rs_create_pci_routing_table ( + union acpi_operand_object *package_object, + struct acpi_buffer *output_buffer); + + +/* + * Function prototypes called from acpi_rs_create* + */ +void +acpi_rs_dump_irq ( + union acpi_resource_data *data); + +void +acpi_rs_dump_address16 ( + union acpi_resource_data *data); + +void +acpi_rs_dump_address32 ( + union acpi_resource_data *data); + +void +acpi_rs_dump_address64 ( + union acpi_resource_data *data); + +void +acpi_rs_dump_dma ( + union acpi_resource_data *data); + +void +acpi_rs_dump_io ( + union acpi_resource_data *data); + +void +acpi_rs_dump_extended_irq ( + union acpi_resource_data *data); + +void +acpi_rs_dump_fixed_io ( + union acpi_resource_data *data); + +void +acpi_rs_dump_fixed_memory32 ( + union acpi_resource_data *data); + +void +acpi_rs_dump_memory24 ( + union acpi_resource_data *data); + +void +acpi_rs_dump_memory32 ( + union acpi_resource_data *data); + +void +acpi_rs_dump_start_depend_fns ( + union acpi_resource_data *data); + +void +acpi_rs_dump_vendor_specific ( + union acpi_resource_data *data); + +void +acpi_rs_dump_resource_list ( + struct acpi_resource *resource); + +void +acpi_rs_dump_irq_list ( + u8 *route_table); + +acpi_status +acpi_rs_get_byte_stream_start ( + u8 *byte_stream_buffer, + u8 **byte_stream_start, + u32 *size); + +acpi_status +acpi_rs_get_list_length ( + u8 *byte_stream_buffer, + u32 byte_stream_buffer_length, + acpi_size *size_needed); + +acpi_status +acpi_rs_get_byte_stream_length ( + struct acpi_resource *linked_list_buffer, + acpi_size *size_needed); + +acpi_status +acpi_rs_get_pci_routing_table_length ( + union acpi_operand_object *package_object, + acpi_size *buffer_size_needed); + +acpi_status +acpi_rs_byte_stream_to_list ( + u8 *byte_stream_buffer, + u32 byte_stream_buffer_length, + u8 *output_buffer); + +acpi_status +acpi_rs_list_to_byte_stream ( + struct acpi_resource *linked_list, + acpi_size byte_stream_size_needed, + u8 *output_buffer); + +acpi_status +acpi_rs_io_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_fixed_io_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_io_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_fixed_io_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_irq_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_irq_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_dma_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_dma_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_address16_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_address16_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_address32_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_address32_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_address64_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_address64_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_start_depend_fns_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_end_depend_fns_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_start_depend_fns_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_end_depend_fns_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_memory24_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_memory24_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_memory32_range_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_fixed_memory32_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_memory32_range_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_fixed_memory32_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_extended_irq_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_extended_irq_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_end_tag_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_end_tag_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +acpi_status +acpi_rs_vendor_resource ( + u8 *byte_stream_buffer, + acpi_size *bytes_consumed, + u8 **output_buffer, + acpi_size *structure_size); + +acpi_status +acpi_rs_vendor_stream ( + struct acpi_resource *linked_list, + u8 **output_buffer, + acpi_size *bytes_consumed); + +u8 +acpi_rs_get_resource_type ( + u8 resource_start_byte); + +#endif /* __ACRESRC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acstruct.h linux.21rc5-ac2/include/acpi/acstruct.h --- linux.21rc5/include/acpi/acstruct.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acstruct.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,202 @@ +/****************************************************************************** + * + * Name: acstruct.h - Internal structs + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACSTRUCT_H__ +#define __ACSTRUCT_H__ + + +/***************************************************************************** + * + * Tree walking typedefs and structs + * + ****************************************************************************/ + + +/* + * Walk state - current state of a parse tree walk. Used for both a leisurely stroll through + * the tree (for whatever reason), and for control method execution. + */ + +#define ACPI_NEXT_OP_DOWNWARD 1 +#define ACPI_NEXT_OP_UPWARD 2 + +#define ACPI_WALK_NON_METHOD 0 +#define ACPI_WALK_METHOD 1 +#define ACPI_WALK_METHOD_RESTART 2 +#define ACPI_WALK_CONST_REQUIRED 3 +#define ACPI_WALK_CONST_OPTIONAL 4 + +struct acpi_walk_state +{ + u8 data_type; /* To differentiate various internal objs MUST BE FIRST!*/\ + acpi_owner_id owner_id; /* Owner of objects created during the walk */ + u8 last_predicate; /* Result of last predicate */ + u8 current_result; /* */ + u8 next_op_info; /* Info about next_op */ + u8 num_operands; /* Stack pointer for Operands[] array */ + u8 return_used; + u8 walk_type; + u16 opcode; /* Current AML opcode */ + u8 scope_depth; + u8 reserved1; + u32 arg_count; /* push for fixed or var args */ + u32 aml_offset; + u32 arg_types; + u32 method_breakpoint; /* For single stepping */ + u32 user_breakpoint; /* User AML breakpoint */ + u32 parse_flags; + u32 prev_arg_types; + + u8 *aml_last_while; + struct acpi_namespace_node arguments[ACPI_METHOD_NUM_ARGS]; /* Control method arguments */ + union acpi_operand_object **caller_return_desc; + union acpi_generic_state *control_state; /* List of control states (nested IFs) */ + struct acpi_namespace_node local_variables[ACPI_METHOD_NUM_LOCALS]; /* Control method locals */ + struct acpi_namespace_node *method_call_node; /* Called method Node*/ + union acpi_parse_object *method_call_op; /* method_call Op if running a method */ + union acpi_operand_object *method_desc; /* Method descriptor if running a method */ + struct acpi_namespace_node *method_node; /* Method Node if running a method */ + union acpi_parse_object *op; /* Current parser op */ + union acpi_operand_object *operands[ACPI_OBJ_NUM_OPERANDS+1]; /* Operands passed to the interpreter (+1 for NULL terminator) */ + const struct acpi_opcode_info *op_info; /* Info on current opcode */ + union acpi_parse_object *origin; /* Start of walk [Obsolete] */ + union acpi_operand_object **params; + struct acpi_parse_state parser_state; /* Current state of parser */ + union acpi_operand_object *result_obj; + union acpi_generic_state *results; /* Stack of accumulated results */ + union acpi_operand_object *return_desc; /* Return object, if any */ + union acpi_generic_state *scope_info; /* Stack of nested scopes */ + + union acpi_parse_object *prev_op; /* Last op that was processed */ + union acpi_parse_object *next_op; /* next op to be processed */ + acpi_parse_downwards descending_callback; + acpi_parse_upwards ascending_callback; + struct acpi_thread_state *thread; + struct acpi_walk_state *next; /* Next walk_state in list */ +}; + + +/* Info used by acpi_ps_init_objects */ + +struct acpi_init_walk_info +{ + u16 method_count; + u16 device_count; + u16 op_region_count; + u16 field_count; + u16 buffer_count; + u16 package_count; + u16 op_region_init; + u16 field_init; + u16 buffer_init; + u16 package_init; + u16 object_count; + struct acpi_table_desc *table_desc; +}; + + +/* Info used by acpi_ns_initialize_devices */ + +struct acpi_device_walk_info +{ + u16 device_count; + u16 num_STA; + u16 num_INI; + struct acpi_table_desc *table_desc; +}; + + +/* TBD: [Restructure] Merge with struct above */ + +struct acpi_walk_info +{ + u32 debug_level; + u32 owner_id; + u8 display_type; +}; + +/* Display Types */ + +#define ACPI_DISPLAY_SUMMARY 0 +#define ACPI_DISPLAY_OBJECTS 1 + +struct acpi_get_devices_info +{ + acpi_walk_callback user_function; + void *context; + char *hid; +}; + + +union acpi_aml_operands +{ + union acpi_operand_object *operands[7]; + + struct + { + struct acpi_object_integer *type; + struct acpi_object_integer *code; + struct acpi_object_integer *argument; + + } fatal; + + struct + { + union acpi_operand_object *source; + struct acpi_object_integer *index; + union acpi_operand_object *target; + + } index; + + struct + { + union acpi_operand_object *source; + struct acpi_object_integer *index; + struct acpi_object_integer *length; + union acpi_operand_object *target; + + } mid; +}; + + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/actables.h linux.21rc5-ac2/include/acpi/actables.h --- linux.21rc5/include/acpi/actables.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/actables.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,233 @@ +/****************************************************************************** + * + * Name: actables.h - ACPI table management + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACTABLES_H__ +#define __ACTABLES_H__ + + +/* Used in acpi_tb_map_acpi_table for size parameter if table header is to be used */ + +#define SIZE_IN_HEADER 0 + + +acpi_status +acpi_tb_handle_to_object ( + u16 table_id, + struct acpi_table_desc **table_desc); + +/* + * tbconvrt - Table conversion routines + */ + +acpi_status +acpi_tb_convert_to_xsdt ( + struct acpi_table_desc *table_info); + +acpi_status +acpi_tb_convert_table_fadt ( + void); + +acpi_status +acpi_tb_build_common_facs ( + struct acpi_table_desc *table_info); + +u32 +acpi_tb_get_table_count ( + struct rsdp_descriptor *RSDP, + struct acpi_table_header *RSDT); + +/* + * tbget - Table "get" routines + */ + +acpi_status +acpi_tb_get_table ( + struct acpi_pointer *address, + struct acpi_table_desc *table_info); + +acpi_status +acpi_tb_get_table_header ( + struct acpi_pointer *address, + struct acpi_table_header *return_header); + +acpi_status +acpi_tb_get_table_body ( + struct acpi_pointer *address, + struct acpi_table_header *header, + struct acpi_table_desc *table_info); + +acpi_status +acpi_tb_get_this_table ( + struct acpi_pointer *address, + struct acpi_table_header *header, + struct acpi_table_desc *table_info); + +acpi_status +acpi_tb_table_override ( + struct acpi_table_header *header, + struct acpi_table_desc *table_info); + +acpi_status +acpi_tb_get_table_ptr ( + acpi_table_type table_type, + u32 instance, + struct acpi_table_header **table_ptr_loc); + +acpi_status +acpi_tb_verify_rsdp ( + struct acpi_pointer *address); + +void +acpi_tb_get_rsdt_address ( + struct acpi_pointer *out_address); + +acpi_status +acpi_tb_validate_rsdt ( + struct acpi_table_header *table_ptr); + +acpi_status +acpi_tb_get_required_tables ( + void); + +acpi_status +acpi_tb_get_primary_table ( + struct acpi_pointer *address, + struct acpi_table_desc *table_info); + +acpi_status +acpi_tb_get_secondary_table ( + struct acpi_pointer *address, + acpi_string signature, + struct acpi_table_desc *table_info); + +/* + * tbinstall - Table installation + */ + +acpi_status +acpi_tb_install_table ( + struct acpi_table_desc *table_info); + +acpi_status +acpi_tb_match_signature ( + char *signature, + struct acpi_table_desc *table_info, + u8 search_type); + +acpi_status +acpi_tb_recognize_table ( + struct acpi_table_desc *table_info, + u8 search_type); + +acpi_status +acpi_tb_init_table_descriptor ( + acpi_table_type table_type, + struct acpi_table_desc *table_info); + + +/* + * tbremove - Table removal and deletion + */ + +void +acpi_tb_delete_all_tables ( + void); + +void +acpi_tb_delete_tables_by_type ( + acpi_table_type type); + +void +acpi_tb_delete_single_table ( + struct acpi_table_desc *table_desc); + +struct acpi_table_desc * +acpi_tb_uninstall_table ( + struct acpi_table_desc *table_desc); + + +/* + * tbrsd - RSDP, RSDT utilities + */ + +acpi_status +acpi_tb_get_table_rsdt ( + void); + +u8 * +acpi_tb_scan_memory_for_rsdp ( + u8 *start_address, + u32 length); + +acpi_status +acpi_tb_find_rsdp ( + struct acpi_table_desc *table_info, + u32 flags); + + +/* + * tbutils - common table utilities + */ + +acpi_status +acpi_tb_find_table ( + char *signature, + char *oem_id, + char *oem_table_id, + struct acpi_table_header **table_ptr); + +acpi_status +acpi_tb_verify_table_checksum ( + struct acpi_table_header *table_header); + +u8 +acpi_tb_checksum ( + void *buffer, + u32 length); + +acpi_status +acpi_tb_validate_table_header ( + struct acpi_table_header *table_header); + + +#endif /* __ACTABLES_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/actbl1.h linux.21rc5-ac2/include/acpi/actbl1.h --- linux.21rc5/include/acpi/actbl1.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/actbl1.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,136 @@ +/****************************************************************************** + * + * Name: actbl1.h - ACPI 1.0 tables + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACTBL1_H__ +#define __ACTBL1_H__ + +#pragma pack(1) + +/* + * ACPI 1.0 Root System Description Table (RSDT) + */ +struct rsdt_descriptor_rev1 +{ + struct acpi_table_header header; /* ACPI Table header */ + u32 table_offset_entry [1]; /* Array of pointers to other */ + /* ACPI tables */ +}; + + +/* + * ACPI 1.0 Firmware ACPI Control Structure (FACS) + */ +struct facs_descriptor_rev1 +{ + char signature[4]; /* ACPI Signature */ + u32 length; /* Length of structure, in bytes */ + u32 hardware_signature; /* Hardware configuration signature */ + u32 firmware_waking_vector; /* ACPI OS waking vector */ + u32 global_lock; /* Global Lock */ + u32 S4bios_f : 1; /* Indicates if S4BIOS support is present */ + u32 reserved1 : 31; /* Must be 0 */ + u8 resverved3 [40]; /* Reserved - must be zero */ +}; + + +/* + * ACPI 1.0 Fixed ACPI Description Table (FADT) + */ +struct fadt_descriptor_rev1 +{ + struct acpi_table_header header; /* ACPI Table header */ + u32 firmware_ctrl; /* Physical address of FACS */ + u32 dsdt; /* Physical address of DSDT */ + u8 model; /* System Interrupt Model */ + u8 reserved1; /* Reserved */ + u16 sci_int; /* System vector of SCI interrupt */ + u32 smi_cmd; /* Port address of SMI command port */ + u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ + u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ + u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ + u8 reserved2; /* Reserved - must be zero */ + u32 pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */ + u32 pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */ + u32 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ + u32 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ + u32 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ + u32 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ + u32 gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ + u32 gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ + u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */ + u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */ + u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ + u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ + u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ + u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ + u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ + u8 reserved3; /* Reserved */ + u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ + u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ + u16 flush_size; /* Size of area read to flush caches */ + u16 flush_stride; /* Stride used in flushing caches */ + u8 duty_offset; /* Bit location of duty cycle field in p_cnt reg */ + u8 duty_width; /* Bit width of duty cycle field in p_cnt reg */ + u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ + u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ + u8 century; /* Index to century in RTC CMOS RAM */ + u8 reserved4; /* Reserved */ + u8 reserved4a; /* Reserved */ + u8 reserved4b; /* Reserved */ + u32 wb_invd : 1; /* The wbinvd instruction works properly */ + u32 wb_invd_flush : 1; /* The wbinvd flushes but does not invalidate */ + u32 proc_c1 : 1; /* All processors support C1 state */ + u32 plvl2_up : 1; /* C2 state works on MP system */ + u32 pwr_button : 1; /* Power button is handled as a generic feature */ + u32 sleep_button : 1; /* Sleep button is handled as a generic feature, or not present */ + u32 fixed_rTC : 1; /* RTC wakeup stat not in fixed register space */ + u32 rtcs4 : 1; /* RTC wakeup stat not possible from S4 */ + u32 tmr_val_ext : 1; /* The tmr_val width is 32 bits (0 = 24 bits) */ + u32 reserved5 : 23; /* Reserved - must be zero */ +}; + +#pragma pack() + +#endif /* __ACTBL1_H__ */ + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/actbl2.h linux.21rc5-ac2/include/acpi/actbl2.h --- linux.21rc5/include/acpi/actbl2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/actbl2.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,201 @@ +/****************************************************************************** + * + * Name: actbl2.h - ACPI Specification Revision 2.0 Tables + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACTBL2_H__ +#define __ACTBL2_H__ + +/* + * Prefered Power Management Profiles + */ +#define PM_UNSPECIFIED 0 +#define PM_DESKTOP 1 +#define PM_MOBILE 2 +#define PM_WORKSTATION 3 +#define PM_ENTERPRISE_SERVER 4 +#define PM_SOHO_SERVER 5 +#define PM_APPLIANCE_PC 6 + +/* + * ACPI Boot Arch Flags + */ +#define BAF_LEGACY_DEVICES 0x0001 +#define BAF_8042_KEYBOARD_CONTROLLER 0x0002 + +#define FADT2_REVISION_ID 3 + + +#pragma pack(1) + +/* + * ACPI 2.0 Root System Description Table (RSDT) + */ +struct rsdt_descriptor_rev2 +{ + struct acpi_table_header header; /* ACPI table header */ + u32 table_offset_entry [1]; /* Array of pointers to */ + /* ACPI table headers */ +}; + + +/* + * ACPI 2.0 Extended System Description Table (XSDT) + */ +struct xsdt_descriptor_rev2 +{ + struct acpi_table_header header; /* ACPI table header */ + u64 table_offset_entry [1]; /* Array of pointers to */ + /* ACPI table headers */ +}; + + +/* + * ACPI 2.0 Firmware ACPI Control Structure (FACS) + */ +struct facs_descriptor_rev2 +{ + char signature[4]; /* ACPI signature */ + u32 length; /* Length of structure, in bytes */ + u32 hardware_signature; /* Hardware configuration signature */ + u32 firmware_waking_vector; /* 32bit physical address of the Firmware Waking Vector. */ + u32 global_lock; /* Global Lock used to synchronize access to shared hardware resources */ + u32 S4bios_f : 1; /* S4Bios_f - Indicates if S4BIOS support is present */ + u32 reserved1 : 31; /* Must be 0 */ + u64 xfirmware_waking_vector; /* 64bit physical address of the Firmware Waking Vector. */ + u8 version; /* Version of this table */ + u8 reserved3 [31]; /* Reserved - must be zero */ +}; + + +/* + * ACPI 2.0 Generic Address Structure (GAS) + */ +struct acpi_generic_address +{ + u8 address_space_id; /* Address space where struct or register exists. */ + u8 register_bit_width; /* Size in bits of given register */ + u8 register_bit_offset; /* Bit offset within the register */ + u8 reserved; /* Must be 0 */ + u64 address; /* 64-bit address of struct or register */ +}; + + +/* + * ACPI 2.0 Fixed ACPI Description Table (FADT) + */ +struct fadt_descriptor_rev2 +{ + struct acpi_table_header header; /* ACPI table header */ + u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ + u32 V1_dsdt; /* 32-bit physical address of DSDT */ + u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ + u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ + u16 sci_int; /* System vector of SCI interrupt */ + u32 smi_cmd; /* Port address of SMI command port */ + u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ + u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ + u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ + u8 pstate_cnt; /* Processor performance state control*/ + u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */ + u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */ + u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ + u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ + u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ + u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ + u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ + u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ + u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */ + u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */ + u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ + u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ + u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ + u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ + u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ + u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ + u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ + u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ + u16 flush_size; /* Number of flush strides that need to be read */ + u16 flush_stride; /* Processor's memory cache line width, in bytes */ + u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ + u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ + u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ + u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ + u8 century; /* Index to century in RTC CMOS RAM */ + u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ + u8 reserved2; /* Reserved */ + u32 wb_invd : 1; /* The wbinvd instruction works properly */ + u32 wb_invd_flush : 1; /* The wbinvd flushes but does not invalidate */ + u32 proc_c1 : 1; /* All processors support C1 state */ + u32 plvl2_up : 1; /* C2 state works on MP system */ + u32 pwr_button : 1; /* Power button is handled as a generic feature */ + u32 sleep_button : 1; /* Sleep button is handled as a generic feature, or not present */ + u32 fixed_rTC : 1; /* RTC wakeup stat not in fixed register space */ + u32 rtcs4 : 1; /* RTC wakeup stat not possible from S4 */ + u32 tmr_val_ext : 1; /* Indicates tmr_val is 32 bits 0=24-bits*/ + u32 dock_cap : 1; /* Supports Docking */ + u32 reset_reg_sup : 1; /* Indicates system supports system reset via the FADT RESET_REG*/ + u32 sealed_case : 1; /* Indicates system has no internal expansion capabilities and case is sealed. */ + u32 headless : 1; /* Indicates system does not have local video capabilities or local input devices.*/ + u32 cpu_sw_sleep : 1; /* Indicates to OSPM that a processor native instruction */ + /* Must be executed after writing the SLP_TYPx register. */ + u32 reserved6 : 18; /* Reserved - must be zero */ + + struct acpi_generic_address reset_register; /* Reset register address in GAS format */ + u8 reset_value; /* Value to write to the reset_register port to reset the system. */ + u8 reserved7[3]; /* These three bytes must be zero */ + u64 xfirmware_ctrl; /* 64-bit physical address of FACS */ + u64 Xdsdt; /* 64-bit physical address of DSDT */ + struct acpi_generic_address xpm1a_evt_blk; /* Extended Power Mgt 1a acpi_event Reg Blk address */ + struct acpi_generic_address xpm1b_evt_blk; /* Extended Power Mgt 1b acpi_event Reg Blk address */ + struct acpi_generic_address xpm1a_cnt_blk; /* Extended Power Mgt 1a Control Reg Blk address */ + struct acpi_generic_address xpm1b_cnt_blk; /* Extended Power Mgt 1b Control Reg Blk address */ + struct acpi_generic_address xpm2_cnt_blk; /* Extended Power Mgt 2 Control Reg Blk address */ + struct acpi_generic_address xpm_tmr_blk; /* Extended Power Mgt Timer Ctrl Reg Blk address */ + struct acpi_generic_address xgpe0_blk; /* Extended General Purpose acpi_event 0 Reg Blk address */ + struct acpi_generic_address xgpe1_blk; /* Extended General Purpose acpi_event 1 Reg Blk address */ +}; + + +#pragma pack() + +#endif /* __ACTBL2_H__ */ + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/actbl71.h linux.21rc5-ac2/include/acpi/actbl71.h --- linux.21rc5/include/acpi/actbl71.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/actbl71.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,144 @@ +/****************************************************************************** + * + * Name: actbl71.h - IA-64 Extensions to the ACPI Spec Rev. 0.71 + * This file includes tables specific to this + * specification revision. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ACTBL71_H__ +#define __ACTBL71_H__ + + +/* 0.71 FADT address_space data item bitmasks defines */ +/* If the associated bit is zero then it is in memory space else in io space */ + +#define SMI_CMD_ADDRESS_SPACE 0x01 +#define PM1_BLK_ADDRESS_SPACE 0x02 +#define PM2_CNT_BLK_ADDRESS_SPACE 0x04 +#define PM_TMR_BLK_ADDRESS_SPACE 0x08 +#define GPE0_BLK_ADDRESS_SPACE 0x10 +#define GPE1_BLK_ADDRESS_SPACE 0x20 + +/* Only for clarity in declarations */ + +typedef u64 IO_ADDRESS; + + +#pragma pack(1) +struct /* Root System Descriptor Pointer */ +{ + NATIVE_CHAR signature [8]; /* contains "RSD PTR " */ + u8 checksum; /* to make sum of struct == 0 */ + NATIVE_CHAR oem_id [6]; /* OEM identification */ + u8 reserved; /* Must be 0 for 1.0, 2 for 2.0 */ + u64 rsdt_physical_address; /* 64-bit physical address of RSDT */ +}; + + +/*****************************************/ +/* IA64 Extensions to ACPI Spec Rev 0.71 */ +/* for the Root System Description Table */ +/*****************************************/ +struct +{ + struct acpi_table_header header; /* Table header */ + u32 reserved_pad; /* IA64 alignment, must be 0 */ + u64 table_offset_entry [1]; /* Array of pointers to other */ + /* tables' headers */ +}; + + +/*******************************************/ +/* IA64 Extensions to ACPI Spec Rev 0.71 */ +/* for the Firmware ACPI Control Structure */ +/*******************************************/ +struct +{ + NATIVE_CHAR signature[4]; /* signature "FACS" */ + u32 length; /* length of structure, in bytes */ + u32 hardware_signature; /* hardware configuration signature */ + u32 reserved4; /* must be 0 */ + u64 firmware_waking_vector; /* ACPI OS waking vector */ + u64 global_lock; /* Global Lock */ + u32 S4bios_f : 1; /* Indicates if S4BIOS support is present */ + u32 reserved1 : 31; /* must be 0 */ + u8 reserved3 [28]; /* reserved - must be zero */ +}; + + +/******************************************/ +/* IA64 Extensions to ACPI Spec Rev 0.71 */ +/* for the Fixed ACPI Description Table */ +/******************************************/ +struct +{ + struct acpi_table_header header; /* table header */ + u32 reserved_pad; /* IA64 alignment, must be 0 */ + u64 firmware_ctrl; /* 64-bit Physical address of FACS */ + u64 dsdt; /* 64-bit Physical address of DSDT */ + u8 model; /* System Interrupt Model */ + u8 address_space; /* Address Space Bitmask */ + u16 sci_int; /* System vector of SCI interrupt */ + u8 acpi_enable; /* value to write to smi_cmd to enable ACPI */ + u8 acpi_disable; /* value to write to smi_cmd to disable ACPI */ + u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ + u8 reserved2; /* reserved - must be zero */ + u64 smi_cmd; /* Port address of SMI command port */ + u64 pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */ + u64 pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */ + u64 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ + u64 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ + u64 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ + u64 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ + u64 gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ + u64 gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ + u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */ + u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */ + u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ + u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ + u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ + u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ + u8 gpe1_base; /* offset in gpe model where gpe1 events start */ + u8 reserved3; /* reserved */ + u16 plvl2_lat; /* worst case HW latency to enter/exit C2 state */ + u16 plvl3_lat; /* worst case HW latency to enter/exit C3 state */ + u8 day_alrm; /* index to day-of-month alarm in RTC CMOS RAM */ + u8 mon_alrm; /* index to month-of-year alarm in RTC CMOS RAM */ + u8 century; /* index to century in RTC CMOS RAM */ + u8 reserved4; /* reserved */ + u32 flush_cash : 1; /* PAL_FLUSH_CACHE is correctly supported */ + u32 reserved5 : 1; /* reserved - must be zero */ + u32 proc_c1 : 1; /* all processors support C1 state */ + u32 plvl2_up : 1; /* C2 state works on MP system */ + u32 pwr_button : 1; /* Power button is handled as a generic feature */ + u32 sleep_button : 1; /* Sleep button is handled as a generic feature, or not present */ + u32 fixed_rTC : 1; /* RTC wakeup stat not in fixed register space */ + u32 rtcs4 : 1; /* RTC wakeup stat not possible from S4 */ + u32 tmr_val_ext : 1; /* tmr_val is 32 bits */ + u32 dock_cap : 1; /* Supports Docking */ + u32 reserved6 : 22; /* reserved - must be zero */ +}; + +#pragma pack() + +#endif /* __ACTBL71_H__ */ + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/actbl.h linux.21rc5-ac2/include/acpi/actbl.h --- linux.21rc5/include/acpi/actbl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/actbl.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,235 @@ +/****************************************************************************** + * + * Name: actbl.h - Table data structures defined in ACPI specification + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACTBL_H__ +#define __ACTBL_H__ + + +/* + * Values for description table header signatures + */ +#define RSDP_NAME "RSDP" +#define RSDP_SIG "RSD PTR " /* RSDT Pointer signature */ +#define APIC_SIG "APIC" /* Multiple APIC Description Table */ +#define DSDT_SIG "DSDT" /* Differentiated System Description Table */ +#define FADT_SIG "FACP" /* Fixed ACPI Description Table */ +#define FACS_SIG "FACS" /* Firmware ACPI Control Structure */ +#define PSDT_SIG "PSDT" /* Persistent System Description Table */ +#define RSDT_SIG "RSDT" /* Root System Description Table */ +#define XSDT_SIG "XSDT" /* Extended System Description Table */ +#define SSDT_SIG "SSDT" /* Secondary System Description Table */ +#define SBST_SIG "SBST" /* Smart Battery Specification Table */ +#define SPIC_SIG "SPIC" /* IOSAPIC table */ +#define BOOT_SIG "BOOT" /* Boot table */ + + +#define GL_OWNED 0x02 /* Ownership of global lock is bit 1 */ + +/* values of Mapic.Model */ + +#define DUAL_PIC 0 +#define MULTIPLE_APIC 1 + +/* values of Type in struct apic_header */ + +#define APIC_PROC 0 +#define APIC_IO 1 + + +/* + * Common table types. The base code can remain + * constant if the underlying tables are changed + */ +#define RSDT_DESCRIPTOR struct rsdt_descriptor_rev2 +#define XSDT_DESCRIPTOR struct xsdt_descriptor_rev2 +#define FACS_DESCRIPTOR struct facs_descriptor_rev2 +#define FADT_DESCRIPTOR struct fadt_descriptor_rev2 + + +#pragma pack(1) + +/* + * Architecture-independent tables + * The architecture dependent tables are in separate files + */ +struct rsdp_descriptor /* Root System Descriptor Pointer */ +{ + char signature [8]; /* ACPI signature, contains "RSD PTR " */ + u8 checksum; /* To make sum of struct == 0 */ + char oem_id [6]; /* OEM identification */ + u8 revision; /* Must be 0 for 1.0, 2 for 2.0 */ + u32 rsdt_physical_address; /* 32-bit physical address of RSDT */ + u32 length; /* XSDT Length in bytes including hdr */ + u64 xsdt_physical_address; /* 64-bit physical address of XSDT */ + u8 extended_checksum; /* Checksum of entire table */ + char reserved [3]; /* Reserved field must be 0 */ +}; + + +struct acpi_table_header /* ACPI common table header */ +{ + char signature [4]; /* ACPI signature (4 ASCII characters) */ + u32 length; /* Length of table, in bytes, including header */ + u8 revision; /* ACPI Specification minor version # */ + u8 checksum; /* To make sum of entire table == 0 */ + char oem_id [6]; /* OEM identification */ + char oem_table_id [8]; /* OEM table identification */ + u32 oem_revision; /* OEM revision number */ + char asl_compiler_id [4]; /* ASL compiler vendor ID */ + u32 asl_compiler_revision; /* ASL compiler revision number */ +}; + + +struct acpi_common_facs /* Common FACS for internal use */ +{ + u32 *global_lock; + u64 *firmware_waking_vector; + u8 vector_width; +}; + + +struct apic_table +{ + struct acpi_table_header header; /* ACPI table header */ + u32 local_apic_address; /* Physical address for accessing local APICs */ + u32 PCATcompat : 1; /* a one indicates system also has dual 8259s */ + u32 reserved1 : 31; +}; + + +struct apic_header +{ + u8 type; /* APIC type. Either APIC_PROC or APIC_IO */ + u8 length; /* Length of APIC structure */ +}; + + +struct processor_apic +{ + struct apic_header header; + u8 processor_apic_id; /* ACPI processor id */ + u8 local_apic_id; /* Processor's local APIC id */ + u32 processor_enabled: 1; /* Processor is usable if set */ + u32 reserved1 : 31; +}; + + +struct io_apic +{ + struct apic_header header; + u8 io_apic_id; /* I/O APIC ID */ + u8 reserved; /* Reserved - must be zero */ + u32 io_apic_address; /* APIC's physical address */ + u32 vector; /* Interrupt vector index where INTI + * lines start */ +}; + + +/* + * IA64 TBD: Add SAPIC Tables + */ + +/* + * IA64 TBD: Modify Smart Battery Description to comply with ACPI IA64 + * extensions. + */ +struct smart_battery_description_table +{ + struct acpi_table_header header; + u32 warning_level; + u32 low_level; + u32 critical_level; +}; + +struct hpet_description_table +{ + struct acpi_table_header header; + u32 hardware_id; + u32 base_address[3]; + u8 hpet_number; + u16 clock_tick; + u8 attributes; +}; +#pragma pack() + + +/* + * ACPI Table information. We save the table address, length, + * and type of memory allocation (mapped or allocated) for each + * table for 1) when we exit, and 2) if a new table is installed + */ +#define ACPI_MEM_NOT_ALLOCATED 0 +#define ACPI_MEM_ALLOCATED 1 +#define ACPI_MEM_MAPPED 2 + +/* Definitions for the Flags bitfield member of struct acpi_table_support */ + +#define ACPI_TABLE_SINGLE 0x00 +#define ACPI_TABLE_MULTIPLE 0x01 +#define ACPI_TABLE_EXECUTABLE 0x02 + +#define ACPI_TABLE_ROOT 0x00 +#define ACPI_TABLE_PRIMARY 0x10 +#define ACPI_TABLE_SECONDARY 0x20 +#define ACPI_TABLE_ALL 0x30 +#define ACPI_TABLE_TYPE_MASK 0x30 + +/* Data about each known table type */ + +struct acpi_table_support +{ + char *name; + char *signature; + void **global_ptr; + u8 sig_length; + u8 flags; +}; + + +/* + * Get the architecture-specific tables + */ +#include "actbl1.h" /* Acpi 1.0 table definitions */ +#include "actbl2.h" /* Acpi 2.0 table definitions */ + +#endif /* __ACTBL_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/actypes.h linux.21rc5-ac2/include/acpi/actypes.h --- linux.21rc5/include/acpi/actypes.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/actypes.h 2003-05-29 01:02:16.000000000 +0100 @@ -0,0 +1,1222 @@ +/****************************************************************************** + * + * Name: actypes.h - Common data types for the entire ACPI subsystem + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACTYPES_H__ +#define __ACTYPES_H__ + +/*! [Begin] no source code translation (keep the typedefs) */ + + + +/* + * Data type ranges + */ +#define ACPI_UINT8_MAX (UINT8) 0xFF +#define ACPI_UINT16_MAX (UINT16) 0xFFFF +#define ACPI_UINT32_MAX (UINT32) 0xFFFFFFFF +#define ACPI_UINT64_MAX (UINT64) 0xFFFFFFFFFFFFFFFF +#define ACPI_ASCII_MAX 0x7F + + +#ifdef DEFINE_ALTERNATE_TYPES +/* + * Types used only in translated source, defined here to enable + * cross-platform compilation only. + */ +typedef int s32; +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef COMPILER_DEPENDENT_UINT64 u64; + +#endif + + +/* + * Data types - Fixed across all compilation models (16/32/64) + * + * BOOLEAN Logical Boolean. + * INT8 8-bit (1 byte) signed value + * UINT8 8-bit (1 byte) unsigned value + * INT16 16-bit (2 byte) signed value + * UINT16 16-bit (2 byte) unsigned value + * INT32 32-bit (4 byte) signed value + * UINT32 32-bit (4 byte) unsigned value + * INT64 64-bit (8 byte) signed value + * UINT64 64-bit (8 byte) unsigned value + * ACPI_NATIVE_INT 32-bit on IA-32, 64-bit on IA-64 signed value + * ACPI_NATIVE_UINT 32-bit on IA-32, 64-bit on IA-64 unsigned value + */ + +#ifndef ACPI_MACHINE_WIDTH +#error ACPI_MACHINE_WIDTH not defined +#endif + +#if ACPI_MACHINE_WIDTH == 64 + +/*! [Begin] no source code translation (keep the typedefs) */ + +/* + * 64-bit type definitions + */ +typedef unsigned char UINT8; +typedef unsigned char BOOLEAN; +typedef unsigned short UINT16; +typedef int INT32; +typedef unsigned int UINT32; +typedef COMPILER_DEPENDENT_INT64 INT64; +typedef COMPILER_DEPENDENT_UINT64 UINT64; + +/*! [End] no source code translation !*/ + +typedef s64 acpi_native_int; +typedef u64 acpi_native_uint; + +typedef u64 acpi_table_ptr; +typedef u64 acpi_io_address; +typedef u64 acpi_physical_address; +typedef u64 acpi_size; + +#define ALIGNED_ADDRESS_BOUNDARY 0x00000008 /* No hardware alignment support in IA64 */ +#define ACPI_USE_NATIVE_DIVIDE /* Native 64-bit integer support */ +#define ACPI_MAX_PTR ACPI_UINT64_MAX +#define ACPI_SIZE_MAX ACPI_UINT64_MAX + + +#elif ACPI_MACHINE_WIDTH == 16 + +/*! [Begin] no source code translation (keep the typedefs) */ + +/* + * 16-bit type definitions + */ +typedef unsigned char UINT8; +typedef unsigned char BOOLEAN; +typedef unsigned int UINT16; +typedef long INT32; +typedef int INT16; +typedef unsigned long UINT32; + +struct +{ + UINT32 Lo; + UINT32 Hi; +}; + +/*! [End] no source code translation !*/ + +typedef u16 acpi_native_uint; +typedef s16 acpi_native_int; + +typedef u32 acpi_table_ptr; +typedef u32 acpi_io_address; +typedef char *acpi_physical_address; +typedef u16 acpi_size; + +#define ALIGNED_ADDRESS_BOUNDARY 0x00000002 +#define ACPI_MISALIGNED_TRANSFERS +#define ACPI_USE_NATIVE_DIVIDE /* No 64-bit integers, ok to use native divide */ +#define ACPI_MAX_PTR ACPI_UINT16_MAX +#define ACPI_SIZE_MAX ACPI_UINT16_MAX + +/* + * (16-bit only) internal integers must be 32-bits, so + * 64-bit integers cannot be supported + */ +#define ACPI_NO_INTEGER64_SUPPORT + + +#elif ACPI_MACHINE_WIDTH == 32 + +/*! [Begin] no source code translation (keep the typedefs) */ + +/* + * 32-bit type definitions (default) + */ +typedef unsigned char UINT8; +typedef unsigned char BOOLEAN; +typedef unsigned short UINT16; +typedef int INT32; +typedef unsigned int UINT32; +typedef COMPILER_DEPENDENT_INT64 INT64; +typedef COMPILER_DEPENDENT_UINT64 UINT64; + +/*! [End] no source code translation !*/ + +typedef s32 acpi_native_int; +typedef u32 acpi_native_uint; + +typedef u64 acpi_table_ptr; +typedef u32 acpi_io_address; +typedef u64 acpi_physical_address; +typedef u32 acpi_size; + +#define ALIGNED_ADDRESS_BOUNDARY 0x00000004 +#define ACPI_MISALIGNED_TRANSFERS +#define ACPI_MAX_PTR ACPI_UINT32_MAX +#define ACPI_SIZE_MAX ACPI_UINT32_MAX + +#else +#error unknown ACPI_MACHINE_WIDTH +#endif + + +/* + * Miscellaneous common types + */ +typedef u32 UINT32_BIT; +typedef acpi_native_uint ACPI_PTRDIFF; + +/* + * Pointer overlays to avoid lots of typecasting for + * code that accepts both physical and logical pointers. + */ +union acpi_pointers +{ + acpi_physical_address physical; + void *logical; + acpi_table_ptr value; +}; + +struct acpi_pointer +{ + u32 pointer_type; + union acpi_pointers pointer; +}; + +/* pointer_types for above */ + +#define ACPI_PHYSICAL_POINTER 0x01 +#define ACPI_LOGICAL_POINTER 0x02 + +/* Processor mode */ + +#define ACPI_PHYSICAL_ADDRESSING 0x04 +#define ACPI_LOGICAL_ADDRESSING 0x08 +#define ACPI_MEMORY_MODE 0x0C + +#define ACPI_PHYSMODE_PHYSPTR ACPI_PHYSICAL_ADDRESSING | ACPI_PHYSICAL_POINTER +#define ACPI_LOGMODE_PHYSPTR ACPI_LOGICAL_ADDRESSING | ACPI_PHYSICAL_POINTER +#define ACPI_LOGMODE_LOGPTR ACPI_LOGICAL_ADDRESSING | ACPI_LOGICAL_POINTER + + +/* + * Useful defines + */ +#ifdef FALSE +#undef FALSE +#endif +#define FALSE (1 == 0) + +#ifdef TRUE +#undef TRUE +#endif +#define TRUE (1 == 1) + +#ifndef NULL +#define NULL (void *) 0 +#endif + + +/* + * Local datatypes + */ +typedef u32 acpi_status; /* All ACPI Exceptions */ +typedef u32 acpi_name; /* 4-byte ACPI name */ +typedef char * acpi_string; /* Null terminated ASCII string */ +typedef void * acpi_handle; /* Actually a ptr to an Node */ + +struct uint64_struct +{ + u32 lo; + u32 hi; +}; + +union uint64_overlay +{ + u64 full; + struct uint64_struct part; +}; + +struct uint32_struct +{ + u32 lo; + u32 hi; +}; + + +/* + * Acpi integer width. In ACPI version 1, integers are + * 32 bits. In ACPI version 2, integers are 64 bits. + * Note that this pertains to the ACPI integer type only, not + * other integers used in the implementation of the ACPI CA + * subsystem. + */ +#ifdef ACPI_NO_INTEGER64_SUPPORT + +/* 32-bit integers only, no 64-bit support */ + +typedef u32 acpi_integer; +#define ACPI_INTEGER_MAX ACPI_UINT32_MAX +#define ACPI_INTEGER_BIT_SIZE 32 +#define ACPI_MAX_BCD_VALUE 99999999 +#define ACPI_MAX_BCD_DIGITS 8 +#define ACPI_MAX_DECIMAL_DIGITS 10 + +#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 32-bit divide */ + + +#else + +/* 64-bit integers */ + +typedef u64 acpi_integer; +#define ACPI_INTEGER_MAX ACPI_UINT64_MAX +#define ACPI_INTEGER_BIT_SIZE 64 +#define ACPI_MAX_BCD_VALUE 9999999999999999 +#define ACPI_MAX_BCD_DIGITS 16 +#define ACPI_MAX_DECIMAL_DIGITS 19 + +#if ACPI_MACHINE_WIDTH == 64 +#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */ +#endif +#endif + + +/* + * Constants with special meanings + */ +#define ACPI_ROOT_OBJECT (acpi_handle) ACPI_PTR_ADD (char, NULL, ACPI_MAX_PTR) + + +/* + * Initialization sequence + */ +#define ACPI_FULL_INITIALIZATION 0x00 +#define ACPI_NO_ADDRESS_SPACE_INIT 0x01 +#define ACPI_NO_HARDWARE_INIT 0x02 +#define ACPI_NO_EVENT_INIT 0x04 +#define ACPI_NO_HANDLER_INIT 0x08 +#define ACPI_NO_ACPI_ENABLE 0x10 +#define ACPI_NO_DEVICE_INIT 0x20 +#define ACPI_NO_OBJECT_INIT 0x40 + +/* + * Initialization state + */ +#define ACPI_INITIALIZED_OK 0x01 + +/* + * Power state values + */ + +#define ACPI_STATE_UNKNOWN (u8) 0xFF + +#define ACPI_STATE_S0 (u8) 0 +#define ACPI_STATE_S1 (u8) 1 +#define ACPI_STATE_S2 (u8) 2 +#define ACPI_STATE_S3 (u8) 3 +#define ACPI_STATE_S4 (u8) 4 +#define ACPI_STATE_S5 (u8) 5 +#define ACPI_S_STATES_MAX ACPI_STATE_S5 +#define ACPI_S_STATE_COUNT 6 + +#define ACPI_STATE_D0 (u8) 0 +#define ACPI_STATE_D1 (u8) 1 +#define ACPI_STATE_D2 (u8) 2 +#define ACPI_STATE_D3 (u8) 3 +#define ACPI_D_STATES_MAX ACPI_STATE_D3 +#define ACPI_D_STATE_COUNT 4 + +#define ACPI_STATE_C0 (u8) 0 +#define ACPI_STATE_C1 (u8) 1 +#define ACPI_STATE_C2 (u8) 2 +#define ACPI_STATE_C3 (u8) 3 +#define ACPI_C_STATES_MAX ACPI_STATE_C3 +#define ACPI_C_STATE_COUNT 4 + +/* + * Sleep type invalid value + */ +#define ACPI_SLEEP_TYPE_MAX 0x7 +#define ACPI_SLEEP_TYPE_INVALID 0xFF + +/* + * Standard notify values + */ +#define ACPI_NOTIFY_BUS_CHECK (u8) 0 +#define ACPI_NOTIFY_DEVICE_CHECK (u8) 1 +#define ACPI_NOTIFY_DEVICE_WAKE (u8) 2 +#define ACPI_NOTIFY_EJECT_REQUEST (u8) 3 +#define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 4 +#define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 5 +#define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 6 +#define ACPI_NOTIFY_POWER_FAULT (u8) 7 + + +/* + * Table types. These values are passed to the table related APIs + */ +typedef u32 acpi_table_type; + +#define ACPI_TABLE_RSDP (acpi_table_type) 0 +#define ACPI_TABLE_DSDT (acpi_table_type) 1 +#define ACPI_TABLE_FADT (acpi_table_type) 2 +#define ACPI_TABLE_FACS (acpi_table_type) 3 +#define ACPI_TABLE_PSDT (acpi_table_type) 4 +#define ACPI_TABLE_SSDT (acpi_table_type) 5 +#define ACPI_TABLE_XSDT (acpi_table_type) 6 +#define ACPI_TABLE_MAX 6 +#define NUM_ACPI_TABLE_TYPES (ACPI_TABLE_MAX+1) + + +/* + * Types associated with ACPI names and objects. The first group of + * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition + * of the ACPI object_type() operator (See the ACPI Spec). Therefore, + * only add to the first group if the spec changes. + * + * Types must be kept in sync with the global acpi_ns_properties + * and acpi_ns_type_names arrays. + */ +typedef u32 acpi_object_type; + +#define ACPI_TYPE_ANY 0x00 +#define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */ +#define ACPI_TYPE_STRING 0x02 +#define ACPI_TYPE_BUFFER 0x03 +#define ACPI_TYPE_PACKAGE 0x04 /* byte_const, multiple data_term/Constant/super_name */ +#define ACPI_TYPE_FIELD_UNIT 0x05 +#define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */ +#define ACPI_TYPE_EVENT 0x07 +#define ACPI_TYPE_METHOD 0x08 /* Name, byte_const, multiple Code */ +#define ACPI_TYPE_MUTEX 0x09 +#define ACPI_TYPE_REGION 0x0A +#define ACPI_TYPE_POWER 0x0B /* Name,byte_const,word_const,multi Node */ +#define ACPI_TYPE_PROCESSOR 0x0C /* Name,byte_const,Dword_const,byte_const,multi nm_o */ +#define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */ +#define ACPI_TYPE_BUFFER_FIELD 0x0E +#define ACPI_TYPE_DDB_HANDLE 0x0F +#define ACPI_TYPE_DEBUG_OBJECT 0x10 + +#define ACPI_TYPE_EXTERNAL_MAX 0x10 + +/* + * These are object types that do not map directly to the ACPI + * object_type() operator. They are used for various internal purposes only. + * If new predefined ACPI_TYPEs are added (via the ACPI specification), these + * internal types must move upwards. (There is code that depends on these + * values being contiguous with the external types above.) + */ +#define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 +#define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 +#define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 +#define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */ +#define ACPI_TYPE_LOCAL_ALIAS 0x15 +#define ACPI_TYPE_LOCAL_NOTIFY 0x16 +#define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x17 +#define ACPI_TYPE_LOCAL_RESOURCE 0x18 +#define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x19 +#define ACPI_TYPE_LOCAL_SCOPE 0x1A /* 1 Name, multiple object_list Nodes */ + +#define ACPI_TYPE_NS_NODE_MAX 0x1A /* Last typecode used within a NS Node */ + +/* + * These are special object types that never appear in + * a Namespace node, only in an union acpi_operand_object + */ +#define ACPI_TYPE_LOCAL_EXTRA 0x1B +#define ACPI_TYPE_LOCAL_DATA 0x1C + +#define ACPI_TYPE_LOCAL_MAX 0x1C + +/* All types above here are invalid */ + +#define ACPI_TYPE_INVALID 0x1D +#define ACPI_TYPE_NOT_FOUND 0xFF + + +/* + * Bitmapped ACPI types. Used internally only + */ +#define ACPI_BTYPE_ANY 0x00000000 +#define ACPI_BTYPE_INTEGER 0x00000001 +#define ACPI_BTYPE_STRING 0x00000002 +#define ACPI_BTYPE_BUFFER 0x00000004 +#define ACPI_BTYPE_PACKAGE 0x00000008 +#define ACPI_BTYPE_FIELD_UNIT 0x00000010 +#define ACPI_BTYPE_DEVICE 0x00000020 +#define ACPI_BTYPE_EVENT 0x00000040 +#define ACPI_BTYPE_METHOD 0x00000080 +#define ACPI_BTYPE_MUTEX 0x00000100 +#define ACPI_BTYPE_REGION 0x00000200 +#define ACPI_BTYPE_POWER 0x00000400 +#define ACPI_BTYPE_PROCESSOR 0x00000800 +#define ACPI_BTYPE_THERMAL 0x00001000 +#define ACPI_BTYPE_BUFFER_FIELD 0x00002000 +#define ACPI_BTYPE_DDB_HANDLE 0x00004000 +#define ACPI_BTYPE_DEBUG_OBJECT 0x00008000 +#define ACPI_BTYPE_REFERENCE 0x00010000 +#define ACPI_BTYPE_RESOURCE 0x00020000 + +#define ACPI_BTYPE_COMPUTE_DATA (ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING | ACPI_BTYPE_BUFFER) + +#define ACPI_BTYPE_DATA (ACPI_BTYPE_COMPUTE_DATA | ACPI_BTYPE_PACKAGE) +#define ACPI_BTYPE_DATA_REFERENCE (ACPI_BTYPE_DATA | ACPI_BTYPE_REFERENCE | ACPI_BTYPE_DDB_HANDLE) +#define ACPI_BTYPE_DEVICE_OBJECTS (ACPI_BTYPE_DEVICE | ACPI_BTYPE_THERMAL | ACPI_BTYPE_PROCESSOR) +#define ACPI_BTYPE_OBJECTS_AND_REFS 0x0001FFFF /* ARG or LOCAL */ +#define ACPI_BTYPE_ALL_OBJECTS 0x0000FFFF + +/* + * All I/O + */ +#define ACPI_READ 0 +#define ACPI_WRITE 1 +#define ACPI_IO_MASK 1 + + +/* + * Acpi Event Types: Fixed & General Purpose + */ +typedef u32 acpi_event_type; + +/* + * Fixed events + */ +#define ACPI_EVENT_PMTIMER 0 +#define ACPI_EVENT_GLOBAL 1 +#define ACPI_EVENT_POWER_BUTTON 2 +#define ACPI_EVENT_SLEEP_BUTTON 3 +#define ACPI_EVENT_RTC 4 +#define ACPI_EVENT_MAX 4 +#define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 + +#define ACPI_GPE_INVALID 0xFF +#define ACPI_GPE_MAX 0xFF +#define ACPI_NUM_GPE 256 + +#define ACPI_EVENT_LEVEL_TRIGGERED 1 +#define ACPI_EVENT_EDGE_TRIGGERED 2 + +/* + * Flags for GPE and Lock interfaces + */ +#define ACPI_EVENT_WAKE_ENABLE 0x2 +#define ACPI_EVENT_WAKE_DISABLE 0x2 + +#define ACPI_NOT_ISR 0x1 +#define ACPI_ISR 0x0 + + +/* + * acpi_event Status: + * ------------- + * The encoding of acpi_event_status is illustrated below. + * Note that a set bit (1) indicates the property is TRUE + * (e.g. if bit 0 is set then the event is enabled). + * +-------------+-+-+-+ + * | Bits 31:3 |2|1|0| + * +-------------+-+-+-+ + * | | | | + * | | | +- Enabled? + * | | +--- Enabled for wake? + * | +----- Set? + * +----------- + */ +typedef u32 acpi_event_status; + +#define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00 +#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 +#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 +#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 + + +/* Notify types */ + +#define ACPI_SYSTEM_NOTIFY 0 +#define ACPI_DEVICE_NOTIFY 1 +#define ACPI_MAX_NOTIFY_HANDLER_TYPE 1 + +#define ACPI_MAX_SYS_NOTIFY 0x7f + + +/* Address Space (Operation Region) Types */ + +typedef u8 acpi_adr_space_type; + +#define ACPI_ADR_SPACE_SYSTEM_MEMORY (acpi_adr_space_type) 0 +#define ACPI_ADR_SPACE_SYSTEM_IO (acpi_adr_space_type) 1 +#define ACPI_ADR_SPACE_PCI_CONFIG (acpi_adr_space_type) 2 +#define ACPI_ADR_SPACE_EC (acpi_adr_space_type) 3 +#define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 +#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 +#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 +#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 7 +#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127 + + +/* + * bit_register IDs + * These are bitfields defined within the full ACPI registers + */ +#define ACPI_BITREG_TIMER_STATUS 0x00 +#define ACPI_BITREG_BUS_MASTER_STATUS 0x01 +#define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02 +#define ACPI_BITREG_POWER_BUTTON_STATUS 0x03 +#define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04 +#define ACPI_BITREG_RT_CLOCK_STATUS 0x05 +#define ACPI_BITREG_WAKE_STATUS 0x06 + +#define ACPI_BITREG_TIMER_ENABLE 0x07 +#define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x08 +#define ACPI_BITREG_POWER_BUTTON_ENABLE 0x09 +#define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0A +#define ACPI_BITREG_RT_CLOCK_ENABLE 0x0B +#define ACPI_BITREG_WAKE_ENABLE 0x0C + +#define ACPI_BITREG_SCI_ENABLE 0x0D +#define ACPI_BITREG_BUS_MASTER_RLD 0x0E +#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x0F +#define ACPI_BITREG_SLEEP_TYPE_A 0x10 +#define ACPI_BITREG_SLEEP_TYPE_B 0x11 +#define ACPI_BITREG_SLEEP_ENABLE 0x12 + +#define ACPI_BITREG_ARB_DISABLE 0x13 + +#define ACPI_BITREG_MAX 0x13 +#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 + + +/* + * External ACPI object definition + */ +union acpi_object +{ + acpi_object_type type; /* See definition of acpi_ns_type for values */ + struct + { + acpi_object_type type; + acpi_integer value; /* The actual number */ + } integer; + + struct + { + acpi_object_type type; + u32 length; /* # of bytes in string, excluding trailing null */ + char *pointer; /* points to the string value */ + } string; + + struct + { + acpi_object_type type; + u32 length; /* # of bytes in buffer */ + u8 *pointer; /* points to the buffer */ + } buffer; + + struct + { + acpi_object_type type; + u32 fill1; + acpi_handle handle; /* object reference */ + } reference; + + struct + { + acpi_object_type type; + u32 count; /* # of elements in package */ + union acpi_object *elements; /* Pointer to an array of ACPI_OBJECTs */ + } package; + + struct + { + acpi_object_type type; + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + } processor; + + struct + { + acpi_object_type type; + u32 system_level; + u32 resource_order; + } power_resource; +}; + + +/* + * List of objects, used as a parameter list for control method evaluation + */ +struct acpi_object_list +{ + u32 count; + union acpi_object *pointer; +}; + + +/* + * Miscellaneous common Data Structures used by the interfaces + */ +#define ACPI_NO_BUFFER 0 +#define ACPI_ALLOCATE_BUFFER (acpi_size) (-1) +#define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (-2) + +struct acpi_buffer +{ + acpi_size length; /* Length in bytes of the buffer */ + void *pointer; /* pointer to buffer */ +}; + + +/* + * name_type for acpi_get_name + */ +#define ACPI_FULL_PATHNAME 0 +#define ACPI_SINGLE_NAME 1 +#define ACPI_NAME_TYPE_MAX 1 + + +/* + * Structure and flags for acpi_get_system_info + */ +#define ACPI_SYS_MODE_UNKNOWN 0x0000 +#define ACPI_SYS_MODE_ACPI 0x0001 +#define ACPI_SYS_MODE_LEGACY 0x0002 +#define ACPI_SYS_MODES_MASK 0x0003 + + +/* + * ACPI Table Info. One per ACPI table _type_ + */ +struct acpi_table_info +{ + u32 count; +}; + + +/* + * System info returned by acpi_get_system_info() + */ +struct acpi_system_info +{ + u32 acpi_ca_version; + u32 flags; + u32 timer_resolution; + u32 reserved1; + u32 reserved2; + u32 debug_level; + u32 debug_layer; + u32 num_table_types; + struct acpi_table_info table_info [NUM_ACPI_TABLE_TYPES]; +}; + + +/* + * Types specific to the OS service interfaces + */ + +typedef u32 +(ACPI_SYSTEM_XFACE *OSD_HANDLER) ( + void *context); + +typedef void +(ACPI_SYSTEM_XFACE *OSD_EXECUTION_CALLBACK) ( + void *context); + +/* + * Various handlers and callback procedures + */ +typedef +u32 (*acpi_event_handler) ( + void *context); + +typedef +void (*acpi_gpe_handler) ( + void *context); + +typedef +void (*acpi_notify_handler) ( + acpi_handle device, + u32 value, + void *context); + +typedef +void (*acpi_object_handler) ( + acpi_handle object, + u32 function, + void *data); + +typedef +acpi_status (*acpi_init_handler) ( + acpi_handle object, + u32 function); + +#define ACPI_INIT_DEVICE_INI 1 + + +/* Address Spaces (Operation Regions */ + +typedef +acpi_status (*acpi_adr_space_handler) ( + u32 function, + acpi_physical_address address, + u32 bit_width, + acpi_integer *value, + void *handler_context, + void *region_context); + +#define ACPI_DEFAULT_HANDLER NULL + + +typedef +acpi_status (*acpi_adr_space_setup) ( + acpi_handle region_handle, + u32 function, + void *handler_context, + void **region_context); + +#define ACPI_REGION_ACTIVATE 0 +#define ACPI_REGION_DEACTIVATE 1 + +typedef +acpi_status (*acpi_walk_callback) ( + acpi_handle obj_handle, + u32 nesting_level, + void *context, + void **return_value); + + +/* Interrupt handler return values */ + +#define ACPI_INTERRUPT_NOT_HANDLED 0x00 +#define ACPI_INTERRUPT_HANDLED 0x01 + + +/* Structure and flags for acpi_get_device_info */ + +#define ACPI_VALID_HID 0x1 +#define ACPI_VALID_UID 0x2 +#define ACPI_VALID_ADR 0x4 +#define ACPI_VALID_STA 0x8 + + +#define ACPI_COMMON_OBJ_INFO \ + acpi_object_type type; /* ACPI object type */ \ + acpi_name name /* ACPI object Name */ + + +struct acpi_obj_info_header +{ + ACPI_COMMON_OBJ_INFO; +}; + + +struct acpi_device_info +{ + ACPI_COMMON_OBJ_INFO; + + u32 valid; /* Are the next bits legit? */ + char hardware_id[9]; /* _HID value if any */ + char unique_id[9]; /* _UID value if any */ + acpi_integer address; /* _ADR value if any */ + u32 current_status; /* _STA value */ +}; + + +/* Context structs for address space handlers */ + +struct acpi_pci_id +{ + u16 segment; + u16 bus; + u16 device; + u16 function; +}; + + +struct acpi_mem_space_context +{ + u32 length; + acpi_physical_address address; + acpi_physical_address mapped_physical_address; + u8 *mapped_logical_address; + acpi_size mapped_length; +}; + + +/* + * Definitions for Resource Attributes + */ + +/* + * Memory Attributes + */ +#define ACPI_READ_ONLY_MEMORY (u8) 0x00 +#define ACPI_READ_WRITE_MEMORY (u8) 0x01 + +#define ACPI_NON_CACHEABLE_MEMORY (u8) 0x00 +#define ACPI_CACHABLE_MEMORY (u8) 0x01 +#define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02 +#define ACPI_PREFETCHABLE_MEMORY (u8) 0x03 + +/* + * IO Attributes + * The ISA Io ranges are: n000-n0_ffh, n400-n4_ffh, n800-n8_ffh, n_c00-n_cFFh. + * The non-ISA Io ranges are: n100-n3_ffh, n500-n7_ffh, n900-n_bFfh, n_cd0-n_fFFh. + */ +#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01 +#define ACPI_ISA_ONLY_RANGES (u8) 0x02 +#define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES) + +#define ACPI_SPARSE_TRANSLATION (u8) 0x03 + +/* + * IO Port Descriptor Decode + */ +#define ACPI_DECODE_10 (u8) 0x00 /* 10-bit IO address decode */ +#define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */ + +/* + * IRQ Attributes + */ +#define ACPI_EDGE_SENSITIVE (u8) 0x00 +#define ACPI_LEVEL_SENSITIVE (u8) 0x01 + +#define ACPI_ACTIVE_HIGH (u8) 0x00 +#define ACPI_ACTIVE_LOW (u8) 0x01 + +#define ACPI_EXCLUSIVE (u8) 0x00 +#define ACPI_SHARED (u8) 0x01 + +/* + * DMA Attributes + */ +#define ACPI_COMPATIBILITY (u8) 0x00 +#define ACPI_TYPE_A (u8) 0x01 +#define ACPI_TYPE_B (u8) 0x02 +#define ACPI_TYPE_F (u8) 0x03 + +#define ACPI_NOT_BUS_MASTER (u8) 0x00 +#define ACPI_BUS_MASTER (u8) 0x01 + +#define ACPI_TRANSFER_8 (u8) 0x00 +#define ACPI_TRANSFER_8_16 (u8) 0x01 +#define ACPI_TRANSFER_16 (u8) 0x02 + +/* + * Start Dependent Functions Priority definitions + */ +#define ACPI_GOOD_CONFIGURATION (u8) 0x00 +#define ACPI_ACCEPTABLE_CONFIGURATION (u8) 0x01 +#define ACPI_SUB_OPTIMAL_CONFIGURATION (u8) 0x02 + +/* + * 16, 32 and 64-bit Address Descriptor resource types + */ +#define ACPI_MEMORY_RANGE (u8) 0x00 +#define ACPI_IO_RANGE (u8) 0x01 +#define ACPI_BUS_NUMBER_RANGE (u8) 0x02 + +#define ACPI_ADDRESS_NOT_FIXED (u8) 0x00 +#define ACPI_ADDRESS_FIXED (u8) 0x01 + +#define ACPI_POS_DECODE (u8) 0x00 +#define ACPI_SUB_DECODE (u8) 0x01 + +#define ACPI_PRODUCER (u8) 0x00 +#define ACPI_CONSUMER (u8) 0x01 + + +/* + * Structures used to describe device resources + */ +struct acpi_resource_irq +{ + u32 edge_level; + u32 active_high_low; + u32 shared_exclusive; + u32 number_of_interrupts; + u32 interrupts[1]; +}; + +struct acpi_resource_dma +{ + u32 type; + u32 bus_master; + u32 transfer; + u32 number_of_channels; + u32 channels[1]; +}; + +struct acpi_resource_start_dpf +{ + u32 compatibility_priority; + u32 performance_robustness; +}; + +/* + * END_DEPENDENT_FUNCTIONS_RESOURCE struct is not + * needed because it has no fields + */ + +struct acpi_resource_io +{ + u32 io_decode; + u32 min_base_address; + u32 max_base_address; + u32 alignment; + u32 range_length; +}; + +struct acpi_resource_fixed_io +{ + u32 base_address; + u32 range_length; +}; + +struct acpi_resource_vendor +{ + u32 length; + u8 reserved[1]; +}; + +struct acpi_resource_end_tag +{ + u8 checksum; +}; + +struct acpi_resource_mem24 +{ + u32 read_write_attribute; + u32 min_base_address; + u32 max_base_address; + u32 alignment; + u32 range_length; +}; + +struct acpi_resource_mem32 +{ + u32 read_write_attribute; + u32 min_base_address; + u32 max_base_address; + u32 alignment; + u32 range_length; +}; + +struct acpi_resource_fixed_mem32 +{ + u32 read_write_attribute; + u32 range_base_address; + u32 range_length; +}; + +struct acpi_memory_attribute +{ + u16 cache_attribute; + u16 read_write_attribute; +}; + +struct acpi_io_attribute +{ + u16 range_attribute; + u16 translation_attribute; +}; + +struct acpi_bus_attribute +{ + u16 reserved1; + u16 reserved2; +}; + +union acpi_resource_attribute +{ + struct acpi_memory_attribute memory; + struct acpi_io_attribute io; + struct acpi_bus_attribute bus; +}; + +struct acpi_resource_source +{ + u32 index; + u32 string_length; + char *string_ptr; +}; + +struct acpi_resource_address16 +{ + u32 resource_type; + u32 producer_consumer; + u32 decode; + u32 min_address_fixed; + u32 max_address_fixed; + union acpi_resource_attribute attribute; + u32 granularity; + u32 min_address_range; + u32 max_address_range; + u32 address_translation_offset; + u32 address_length; + struct acpi_resource_source resource_source; +}; + +struct acpi_resource_address32 +{ + u32 resource_type; + u32 producer_consumer; + u32 decode; + u32 min_address_fixed; + u32 max_address_fixed; + union acpi_resource_attribute attribute; + u32 granularity; + u32 min_address_range; + u32 max_address_range; + u32 address_translation_offset; + u32 address_length; + struct acpi_resource_source resource_source; +}; + +struct acpi_resource_address64 +{ + u32 resource_type; + u32 producer_consumer; + u32 decode; + u32 min_address_fixed; + u32 max_address_fixed; + union acpi_resource_attribute attribute; + u64 granularity; + u64 min_address_range; + u64 max_address_range; + u64 address_translation_offset; + u64 address_length; + struct acpi_resource_source resource_source; +}; + +struct acpi_resource_ext_irq +{ + u32 producer_consumer; + u32 edge_level; + u32 active_high_low; + u32 shared_exclusive; + u32 number_of_interrupts; + struct acpi_resource_source resource_source; + u32 interrupts[1]; +}; + + +/* ACPI_RESOURCE_TYPEs */ + +#define ACPI_RSTYPE_IRQ 0 +#define ACPI_RSTYPE_DMA 1 +#define ACPI_RSTYPE_START_DPF 2 +#define ACPI_RSTYPE_END_DPF 3 +#define ACPI_RSTYPE_IO 4 +#define ACPI_RSTYPE_FIXED_IO 5 +#define ACPI_RSTYPE_VENDOR 6 +#define ACPI_RSTYPE_END_TAG 7 +#define ACPI_RSTYPE_MEM24 8 +#define ACPI_RSTYPE_MEM32 9 +#define ACPI_RSTYPE_FIXED_MEM32 10 +#define ACPI_RSTYPE_ADDRESS16 11 +#define ACPI_RSTYPE_ADDRESS32 12 +#define ACPI_RSTYPE_ADDRESS64 13 +#define ACPI_RSTYPE_EXT_IRQ 14 + +typedef u32 acpi_resource_type; + +union acpi_resource_data +{ + struct acpi_resource_irq irq; + struct acpi_resource_dma dma; + struct acpi_resource_start_dpf start_dpf; + struct acpi_resource_io io; + struct acpi_resource_fixed_io fixed_io; + struct acpi_resource_vendor vendor_specific; + struct acpi_resource_end_tag end_tag; + struct acpi_resource_mem24 memory24; + struct acpi_resource_mem32 memory32; + struct acpi_resource_fixed_mem32 fixed_memory32; + struct acpi_resource_address16 address16; + struct acpi_resource_address32 address32; + struct acpi_resource_address64 address64; + struct acpi_resource_ext_irq extended_irq; +}; + +struct acpi_resource +{ + acpi_resource_type id; + u32 length; + union acpi_resource_data data; +}; + +#define ACPI_RESOURCE_LENGTH 12 +#define ACPI_RESOURCE_LENGTH_NO_DATA 8 /* Id + Length fields */ + +#define ACPI_SIZEOF_RESOURCE(type) (ACPI_RESOURCE_LENGTH_NO_DATA + sizeof (type)) + +#define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length) + +#ifdef ACPI_MISALIGNED_TRANSFERS +#define ACPI_ALIGN_RESOURCE_SIZE(length) (length) +#else +#define ACPI_ALIGN_RESOURCE_SIZE(length) ACPI_ROUND_UP_TO_NATIVE_WORD(length) +#endif + +/* + * END: of definitions for Resource Attributes + */ + + +struct acpi_pci_routing_table +{ + u32 length; + u32 pin; + acpi_integer address; /* here for 64-bit alignment */ + u32 source_index; + char source[4]; /* pad to 64 bits so sizeof() works in all cases */ +}; + +/* + * END: of definitions for PCI Routing tables + */ + + +#endif /* __ACTYPES_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/acutils.h linux.21rc5-ac2/include/acpi/acutils.h --- linux.21rc5/include/acpi/acutils.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/acutils.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,836 @@ +/****************************************************************************** + * + * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef _ACUTILS_H +#define _ACUTILS_H + + +typedef +acpi_status (*acpi_pkg_callback) ( + u8 object_type, + union acpi_operand_object *source_object, + union acpi_generic_state *state, + void *context); + + +acpi_status +acpi_ut_walk_package_tree ( + union acpi_operand_object *source_object, + void *target_object, + acpi_pkg_callback walk_callback, + void *context); + + +struct acpi_pkg_info +{ + u8 *free_space; + acpi_size length; + u32 object_space; + u32 num_packages; +}; + +#define REF_INCREMENT (u16) 0 +#define REF_DECREMENT (u16) 1 +#define REF_FORCE_DELETE (u16) 2 + +/* acpi_ut_dump_buffer */ + +#define DB_BYTE_DISPLAY 1 +#define DB_WORD_DISPLAY 2 +#define DB_DWORD_DISPLAY 4 +#define DB_QWORD_DISPLAY 8 + + +/* Global initialization interfaces */ + +void +acpi_ut_init_globals ( + void); + +void +acpi_ut_terminate ( + void); + + +/* + * ut_init - miscellaneous initialization and shutdown + */ + +acpi_status +acpi_ut_hardware_initialize ( + void); + +void +acpi_ut_subsystem_shutdown ( + void); + +acpi_status +acpi_ut_validate_fadt ( + void); + +/* + * ut_global - Global data structures and procedures + */ + +#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) + +char * +acpi_ut_get_mutex_name ( + u32 mutex_id); + +#endif + +char * +acpi_ut_get_type_name ( + acpi_object_type type); + +char * +acpi_ut_get_object_type_name ( + union acpi_operand_object *obj_desc); + +char * +acpi_ut_get_region_name ( + u8 space_id); + +char * +acpi_ut_get_event_name ( + u32 event_id); + +char +acpi_ut_hex_to_ascii_char ( + acpi_integer integer, + u32 position); + +u8 +acpi_ut_valid_object_type ( + acpi_object_type type); + +acpi_owner_id +acpi_ut_allocate_owner_id ( + u32 id_type); + + +/* + * ut_clib - Local implementations of C library functions + */ + +#ifndef ACPI_USE_SYSTEM_CLIBRARY + +acpi_size +acpi_ut_strlen ( + const char *string); + +char * +acpi_ut_strcpy ( + char *dst_string, + const char *src_string); + +char * +acpi_ut_strncpy ( + char *dst_string, + const char *src_string, + acpi_size count); + +int +acpi_ut_strncmp ( + const char *string1, + const char *string2, + acpi_size count); + +int +acpi_ut_strcmp ( + const char *string1, + const char *string2); + +char * +acpi_ut_strcat ( + char *dst_string, + const char *src_string); + +char * +acpi_ut_strncat ( + char *dst_string, + const char *src_string, + acpi_size count); + +u32 +acpi_ut_strtoul ( + const char *string, + char **terminator, + u32 base); + +char * +acpi_ut_strstr ( + char *string1, + char *string2); + +void * +acpi_ut_memcpy ( + void *dest, + const void *src, + acpi_size count); + +void * +acpi_ut_memset ( + void *dest, + acpi_native_uint value, + acpi_size count); + +int +acpi_ut_to_upper ( + int c); + +int +acpi_ut_to_lower ( + int c); + +extern const u8 _acpi_ctype[]; + +#define _ACPI_XA 0x00 /* extra alphabetic - not supported */ +#define _ACPI_XS 0x40 /* extra space */ +#define _ACPI_BB 0x00 /* BEL, BS, etc. - not supported */ +#define _ACPI_CN 0x20 /* CR, FF, HT, NL, VT */ +#define _ACPI_DI 0x04 /* '0'-'9' */ +#define _ACPI_LO 0x02 /* 'a'-'z' */ +#define _ACPI_PU 0x10 /* punctuation */ +#define _ACPI_SP 0x08 /* space */ +#define _ACPI_UP 0x01 /* 'A'-'Z' */ +#define _ACPI_XD 0x80 /* '0'-'9', 'A'-'F', 'a'-'f' */ + +#define ACPI_IS_DIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_DI)) +#define ACPI_IS_SPACE(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_SP)) +#define ACPI_IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD)) +#define ACPI_IS_UPPER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_UP)) +#define ACPI_IS_LOWER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO)) +#define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU)) +#define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP)) +#define ACPI_IS_ASCII(c) ((c) < 0x80) + +#endif /* ACPI_USE_SYSTEM_CLIBRARY */ + +/* + * ut_copy - Object construction and conversion interfaces + */ + +acpi_status +acpi_ut_build_simple_object( + union acpi_operand_object *obj, + union acpi_object *user_obj, + u8 *data_space, + u32 *buffer_space_used); + +acpi_status +acpi_ut_build_package_object ( + union acpi_operand_object *obj, + u8 *buffer, + u32 *space_used); + +acpi_status +acpi_ut_copy_ielement_to_eelement ( + u8 object_type, + union acpi_operand_object *source_object, + union acpi_generic_state *state, + void *context); + +acpi_status +acpi_ut_copy_ielement_to_ielement ( + u8 object_type, + union acpi_operand_object *source_object, + union acpi_generic_state *state, + void *context); + +acpi_status +acpi_ut_copy_iobject_to_eobject ( + union acpi_operand_object *obj, + struct acpi_buffer *ret_buffer); + +acpi_status +acpi_ut_copy_esimple_to_isimple( + union acpi_object *user_obj, + union acpi_operand_object **return_obj); + +acpi_status +acpi_ut_copy_eobject_to_iobject ( + union acpi_object *obj, + union acpi_operand_object **internal_obj); + +acpi_status +acpi_ut_copy_isimple_to_isimple ( + union acpi_operand_object *source_obj, + union acpi_operand_object *dest_obj); + +acpi_status +acpi_ut_copy_ipackage_to_ipackage ( + union acpi_operand_object *source_obj, + union acpi_operand_object *dest_obj, + struct acpi_walk_state *walk_state); + +acpi_status +acpi_ut_copy_simple_object ( + union acpi_operand_object *source_desc, + union acpi_operand_object *dest_desc); + +acpi_status +acpi_ut_copy_iobject_to_iobject ( + union acpi_operand_object *source_desc, + union acpi_operand_object **dest_desc, + struct acpi_walk_state *walk_state); + + +/* + * ut_create - Object creation + */ + +acpi_status +acpi_ut_update_object_reference ( + union acpi_operand_object *object, + u16 action); + + +/* + * ut_debug - Debug interfaces + */ + +void +acpi_ut_init_stack_ptr_trace ( + void); + +void +acpi_ut_track_stack_ptr ( + void); + +void +acpi_ut_trace ( + u32 line_number, + struct acpi_debug_print_info *dbg_info); + +void +acpi_ut_trace_ptr ( + u32 line_number, + struct acpi_debug_print_info *dbg_info, + void *pointer); + +void +acpi_ut_trace_u32 ( + u32 line_number, + struct acpi_debug_print_info *dbg_info, + u32 integer); + +void +acpi_ut_trace_str ( + u32 line_number, + struct acpi_debug_print_info *dbg_info, + char *string); + +void +acpi_ut_exit ( + u32 line_number, + struct acpi_debug_print_info *dbg_info); + +void +acpi_ut_status_exit ( + u32 line_number, + struct acpi_debug_print_info *dbg_info, + acpi_status status); + +void +acpi_ut_value_exit ( + u32 line_number, + struct acpi_debug_print_info *dbg_info, + acpi_integer value); + +void +acpi_ut_ptr_exit ( + u32 line_number, + struct acpi_debug_print_info *dbg_info, + u8 *ptr); + +void +acpi_ut_report_info ( + char *module_name, + u32 line_number, + u32 component_id); + +void +acpi_ut_report_error ( + char *module_name, + u32 line_number, + u32 component_id); + +void +acpi_ut_report_warning ( + char *module_name, + u32 line_number, + u32 component_id); + +void +acpi_ut_dump_buffer ( + u8 *buffer, + u32 count, + u32 display, + u32 component_id); + +void ACPI_INTERNAL_VAR_XFACE +acpi_ut_debug_print ( + u32 requested_debug_level, + u32 line_number, + struct acpi_debug_print_info *dbg_info, + char *format, + ...) ACPI_PRINTF_LIKE_FUNC; + +void ACPI_INTERNAL_VAR_XFACE +acpi_ut_debug_print_raw ( + u32 requested_debug_level, + u32 line_number, + struct acpi_debug_print_info *dbg_info, + char *format, + ...) ACPI_PRINTF_LIKE_FUNC; + + +/* + * ut_delete - Object deletion + */ + +void +acpi_ut_delete_internal_obj ( + union acpi_operand_object *object); + +void +acpi_ut_delete_internal_package_object ( + union acpi_operand_object *object); + +void +acpi_ut_delete_internal_simple_object ( + union acpi_operand_object *object); + +void +acpi_ut_delete_internal_object_list ( + union acpi_operand_object **obj_list); + + +/* + * ut_eval - object evaluation + */ + +/* Method name strings */ + +#define METHOD_NAME__HID "_HID" +#define METHOD_NAME__CID "_CID" +#define METHOD_NAME__UID "_UID" +#define METHOD_NAME__ADR "_ADR" +#define METHOD_NAME__STA "_STA" +#define METHOD_NAME__REG "_REG" +#define METHOD_NAME__SEG "_SEG" +#define METHOD_NAME__BBN "_BBN" +#define METHOD_NAME__PRT "_PRT" +#define METHOD_NAME__CRS "_CRS" +#define METHOD_NAME__PRS "_PRS" + + +acpi_status +acpi_ut_evaluate_object ( + struct acpi_namespace_node *prefix_node, + char *path, + u32 expected_return_btypes, + union acpi_operand_object **return_desc); + +acpi_status +acpi_ut_evaluate_numeric_object ( + char *object_name, + struct acpi_namespace_node *device_node, + acpi_integer *address); + +acpi_status +acpi_ut_execute_HID ( + struct acpi_namespace_node *device_node, + struct acpi_device_id *hid); + +acpi_status +acpi_ut_execute_CID ( + struct acpi_namespace_node *device_node, + struct acpi_device_id *cid); + +acpi_status +acpi_ut_execute_STA ( + struct acpi_namespace_node *device_node, + u32 *status_flags); + +acpi_status +acpi_ut_execute_UID ( + struct acpi_namespace_node *device_node, + struct acpi_device_id *uid); + + +/* + * ut_mutex - mutual exclusion interfaces + */ + +acpi_status +acpi_ut_mutex_initialize ( + void); + +void +acpi_ut_mutex_terminate ( + void); + +acpi_status +acpi_ut_create_mutex ( + acpi_mutex_handle mutex_id); + +acpi_status +acpi_ut_delete_mutex ( + acpi_mutex_handle mutex_id); + +acpi_status +acpi_ut_acquire_mutex ( + acpi_mutex_handle mutex_id); + +acpi_status +acpi_ut_release_mutex ( + acpi_mutex_handle mutex_id); + + +/* + * ut_object - internal object create/delete/cache routines + */ + +union acpi_operand_object * +acpi_ut_create_internal_object_dbg ( + char *module_name, + u32 line_number, + u32 component_id, + acpi_object_type type); + +void * +acpi_ut_allocate_object_desc_dbg ( + char *module_name, + u32 line_number, + u32 component_id); + +#define acpi_ut_create_internal_object(t) acpi_ut_create_internal_object_dbg (_THIS_MODULE,__LINE__,_COMPONENT,t) +#define acpi_ut_allocate_object_desc() acpi_ut_allocate_object_desc_dbg (_THIS_MODULE,__LINE__,_COMPONENT) + +void +acpi_ut_delete_object_desc ( + union acpi_operand_object *object); + +u8 +acpi_ut_valid_internal_object ( + void *object); + +union acpi_operand_object * +acpi_ut_create_buffer_object ( + acpi_size buffer_size); + + +/* + * ut_ref_cnt - Object reference count management + */ + +void +acpi_ut_add_reference ( + union acpi_operand_object *object); + +void +acpi_ut_remove_reference ( + union acpi_operand_object *object); + +/* + * ut_size - Object size routines + */ + +acpi_status +acpi_ut_get_simple_object_size ( + union acpi_operand_object *obj, + acpi_size *obj_length); + +acpi_status +acpi_ut_get_package_object_size ( + union acpi_operand_object *obj, + acpi_size *obj_length); + +acpi_status +acpi_ut_get_object_size( + union acpi_operand_object *obj, + acpi_size *obj_length); + +acpi_status +acpi_ut_get_element_length ( + u8 object_type, + union acpi_operand_object *source_object, + union acpi_generic_state *state, + void *context); + + +/* + * ut_state - Generic state creation/cache routines + */ + +void +acpi_ut_push_generic_state ( + union acpi_generic_state **list_head, + union acpi_generic_state *state); + +union acpi_generic_state * +acpi_ut_pop_generic_state ( + union acpi_generic_state **list_head); + + +union acpi_generic_state * +acpi_ut_create_generic_state ( + void); + +struct acpi_thread_state * +acpi_ut_create_thread_state ( + void); + +union acpi_generic_state * +acpi_ut_create_update_state ( + union acpi_operand_object *object, + u16 action); + +union acpi_generic_state * +acpi_ut_create_pkg_state ( + void *internal_object, + void *external_object, + u16 index); + +acpi_status +acpi_ut_create_update_state_and_push ( + union acpi_operand_object *object, + u16 action, + union acpi_generic_state **state_list); + +acpi_status +acpi_ut_create_pkg_state_and_push ( + void *internal_object, + void *external_object, + u16 index, + union acpi_generic_state **state_list); + +union acpi_generic_state * +acpi_ut_create_control_state ( + void); + +void +acpi_ut_delete_generic_state ( + union acpi_generic_state *state); + +void +acpi_ut_delete_generic_state_cache ( + void); + +void +acpi_ut_delete_object_cache ( + void); + +/* + * utmisc + */ + +void +acpi_ut_print_string ( + char *string, + u8 max_length); + +acpi_status +acpi_ut_divide ( + acpi_integer *in_dividend, + acpi_integer *in_divisor, + acpi_integer *out_quotient, + acpi_integer *out_remainder); + +acpi_status +acpi_ut_short_divide ( + acpi_integer *in_dividend, + u32 divisor, + acpi_integer *out_quotient, + u32 *out_remainder); + +u8 +acpi_ut_valid_acpi_name ( + u32 name); + +u8 +acpi_ut_valid_acpi_character ( + char character); + +acpi_status +acpi_ut_strtoul64 ( + char *string, + u32 base, + acpi_integer *ret_integer); + +char * +acpi_ut_strupr ( + char *src_string); + +u8 * +acpi_ut_get_resource_end_tag ( + union acpi_operand_object *obj_desc); + +u8 +acpi_ut_generate_checksum ( + u8 *buffer, + u32 length); + +u32 +acpi_ut_dword_byte_swap ( + u32 value); + +void +acpi_ut_set_integer_width ( + u8 revision); + +#ifdef ACPI_DEBUG_OUTPUT +void +acpi_ut_display_init_pathname ( + u8 type, + struct acpi_namespace_node *obj_handle, + char *path); + +#endif + + +/* + * Utalloc - memory allocation and object caching + */ + +void * +acpi_ut_acquire_from_cache ( + u32 list_id); + +void +acpi_ut_release_to_cache ( + u32 list_id, + void *object); + +void +acpi_ut_delete_generic_cache ( + u32 list_id); + +acpi_status +acpi_ut_validate_buffer ( + struct acpi_buffer *buffer); + +acpi_status +acpi_ut_initialize_buffer ( + struct acpi_buffer *buffer, + acpi_size required_length); + + +/* Memory allocation functions */ + +void * +acpi_ut_allocate ( + acpi_size size, + u32 component, + char *module, + u32 line); + +void * +acpi_ut_callocate ( + acpi_size size, + u32 component, + char *module, + u32 line); + + +#ifdef ACPI_DBG_TRACK_ALLOCATIONS + +void * +acpi_ut_allocate_and_track ( + acpi_size size, + u32 component, + char *module, + u32 line); + +void * +acpi_ut_callocate_and_track ( + acpi_size size, + u32 component, + char *module, + u32 line); + +void +acpi_ut_free_and_track ( + void *address, + u32 component, + char *module, + u32 line); + +struct acpi_debug_mem_block * +acpi_ut_find_allocation ( + u32 list_id, + void *allocation); + +acpi_status +acpi_ut_track_allocation ( + u32 list_id, + struct acpi_debug_mem_block *address, + acpi_size size, + u8 alloc_type, + u32 component, + char *module, + u32 line); + +acpi_status +acpi_ut_remove_allocation ( + u32 list_id, + struct acpi_debug_mem_block *address, + u32 component, + char *module, + u32 line); + +void +acpi_ut_dump_allocation_info ( + void); + +void +acpi_ut_dump_allocations ( + u32 component, + char *module); +#endif + + +#endif /* _ACUTILS_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/amlcode.h linux.21rc5-ac2/include/acpi/amlcode.h --- linux.21rc5/include/acpi/amlcode.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/amlcode.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,506 @@ +/****************************************************************************** + * + * Name: amlcode.h - Definitions for AML, as included in "definition blocks" + * Declarations and definitions contained herein are derived + * directly from the ACPI specification. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __AMLCODE_H__ +#define __AMLCODE_H__ + +/* primary opcodes */ + +#define AML_NULL_CHAR (u16) 0x00 + +#define AML_ZERO_OP (u16) 0x00 +#define AML_ONE_OP (u16) 0x01 +#define AML_UNASSIGNED (u16) 0x02 +#define AML_ALIAS_OP (u16) 0x06 +#define AML_NAME_OP (u16) 0x08 +#define AML_BYTE_OP (u16) 0x0a +#define AML_WORD_OP (u16) 0x0b +#define AML_DWORD_OP (u16) 0x0c +#define AML_STRING_OP (u16) 0x0d +#define AML_QWORD_OP (u16) 0x0e /* ACPI 2.0 */ +#define AML_SCOPE_OP (u16) 0x10 +#define AML_BUFFER_OP (u16) 0x11 +#define AML_PACKAGE_OP (u16) 0x12 +#define AML_VAR_PACKAGE_OP (u16) 0x13 /* ACPI 2.0 */ +#define AML_METHOD_OP (u16) 0x14 +#define AML_DUAL_NAME_PREFIX (u16) 0x2e +#define AML_MULTI_NAME_PREFIX_OP (u16) 0x2f +#define AML_NAME_CHAR_SUBSEQ (u16) 0x30 +#define AML_NAME_CHAR_FIRST (u16) 0x41 +#define AML_OP_PREFIX (u16) 0x5b +#define AML_ROOT_PREFIX (u16) 0x5c +#define AML_PARENT_PREFIX (u16) 0x5e +#define AML_LOCAL_OP (u16) 0x60 +#define AML_LOCAL0 (u16) 0x60 +#define AML_LOCAL1 (u16) 0x61 +#define AML_LOCAL2 (u16) 0x62 +#define AML_LOCAL3 (u16) 0x63 +#define AML_LOCAL4 (u16) 0x64 +#define AML_LOCAL5 (u16) 0x65 +#define AML_LOCAL6 (u16) 0x66 +#define AML_LOCAL7 (u16) 0x67 +#define AML_ARG_OP (u16) 0x68 +#define AML_ARG0 (u16) 0x68 +#define AML_ARG1 (u16) 0x69 +#define AML_ARG2 (u16) 0x6a +#define AML_ARG3 (u16) 0x6b +#define AML_ARG4 (u16) 0x6c +#define AML_ARG5 (u16) 0x6d +#define AML_ARG6 (u16) 0x6e +#define AML_STORE_OP (u16) 0x70 +#define AML_REF_OF_OP (u16) 0x71 +#define AML_ADD_OP (u16) 0x72 +#define AML_CONCAT_OP (u16) 0x73 +#define AML_SUBTRACT_OP (u16) 0x74 +#define AML_INCREMENT_OP (u16) 0x75 +#define AML_DECREMENT_OP (u16) 0x76 +#define AML_MULTIPLY_OP (u16) 0x77 +#define AML_DIVIDE_OP (u16) 0x78 +#define AML_SHIFT_LEFT_OP (u16) 0x79 +#define AML_SHIFT_RIGHT_OP (u16) 0x7a +#define AML_BIT_AND_OP (u16) 0x7b +#define AML_BIT_NAND_OP (u16) 0x7c +#define AML_BIT_OR_OP (u16) 0x7d +#define AML_BIT_NOR_OP (u16) 0x7e +#define AML_BIT_XOR_OP (u16) 0x7f +#define AML_BIT_NOT_OP (u16) 0x80 +#define AML_FIND_SET_LEFT_BIT_OP (u16) 0x81 +#define AML_FIND_SET_RIGHT_BIT_OP (u16) 0x82 +#define AML_DEREF_OF_OP (u16) 0x83 +#define AML_CONCAT_RES_OP (u16) 0x84 /* ACPI 2.0 */ +#define AML_MOD_OP (u16) 0x85 /* ACPI 2.0 */ +#define AML_NOTIFY_OP (u16) 0x86 +#define AML_SIZE_OF_OP (u16) 0x87 +#define AML_INDEX_OP (u16) 0x88 +#define AML_MATCH_OP (u16) 0x89 +#define AML_CREATE_DWORD_FIELD_OP (u16) 0x8a +#define AML_CREATE_WORD_FIELD_OP (u16) 0x8b +#define AML_CREATE_BYTE_FIELD_OP (u16) 0x8c +#define AML_CREATE_BIT_FIELD_OP (u16) 0x8d +#define AML_TYPE_OP (u16) 0x8e +#define AML_CREATE_QWORD_FIELD_OP (u16) 0x8f /* ACPI 2.0 */ +#define AML_LAND_OP (u16) 0x90 +#define AML_LOR_OP (u16) 0x91 +#define AML_LNOT_OP (u16) 0x92 +#define AML_LEQUAL_OP (u16) 0x93 +#define AML_LGREATER_OP (u16) 0x94 +#define AML_LLESS_OP (u16) 0x95 +#define AML_TO_BUFFER_OP (u16) 0x96 /* ACPI 2.0 */ +#define AML_TO_DECSTRING_OP (u16) 0x97 /* ACPI 2.0 */ +#define AML_TO_HEXSTRING_OP (u16) 0x98 /* ACPI 2.0 */ +#define AML_TO_INTEGER_OP (u16) 0x99 /* ACPI 2.0 */ +#define AML_TO_STRING_OP (u16) 0x9c /* ACPI 2.0 */ +#define AML_COPY_OP (u16) 0x9d /* ACPI 2.0 */ +#define AML_MID_OP (u16) 0x9e /* ACPI 2.0 */ +#define AML_CONTINUE_OP (u16) 0x9f /* ACPI 2.0 */ +#define AML_IF_OP (u16) 0xa0 +#define AML_ELSE_OP (u16) 0xa1 +#define AML_WHILE_OP (u16) 0xa2 +#define AML_NOOP_OP (u16) 0xa3 +#define AML_RETURN_OP (u16) 0xa4 +#define AML_BREAK_OP (u16) 0xa5 +#define AML_BREAK_POINT_OP (u16) 0xcc +#define AML_ONES_OP (u16) 0xff + +/* prefixed opcodes */ + +#define AML_EXTOP (u16) 0x005b + + +#define AML_MUTEX_OP (u16) 0x5b01 +#define AML_EVENT_OP (u16) 0x5b02 +#define AML_SHIFT_RIGHT_BIT_OP (u16) 0x5b10 +#define AML_SHIFT_LEFT_BIT_OP (u16) 0x5b11 +#define AML_COND_REF_OF_OP (u16) 0x5b12 +#define AML_CREATE_FIELD_OP (u16) 0x5b13 +#define AML_LOAD_TABLE_OP (u16) 0x5b1f /* ACPI 2.0 */ +#define AML_LOAD_OP (u16) 0x5b20 +#define AML_STALL_OP (u16) 0x5b21 +#define AML_SLEEP_OP (u16) 0x5b22 +#define AML_ACQUIRE_OP (u16) 0x5b23 +#define AML_SIGNAL_OP (u16) 0x5b24 +#define AML_WAIT_OP (u16) 0x5b25 +#define AML_RESET_OP (u16) 0x5b26 +#define AML_RELEASE_OP (u16) 0x5b27 +#define AML_FROM_BCD_OP (u16) 0x5b28 +#define AML_TO_BCD_OP (u16) 0x5b29 +#define AML_UNLOAD_OP (u16) 0x5b2a +#define AML_REVISION_OP (u16) 0x5b30 +#define AML_DEBUG_OP (u16) 0x5b31 +#define AML_FATAL_OP (u16) 0x5b32 +#define AML_REGION_OP (u16) 0x5b80 +#define AML_FIELD_OP (u16) 0x5b81 +#define AML_DEVICE_OP (u16) 0x5b82 +#define AML_PROCESSOR_OP (u16) 0x5b83 +#define AML_POWER_RES_OP (u16) 0x5b84 +#define AML_THERMAL_ZONE_OP (u16) 0x5b85 +#define AML_INDEX_FIELD_OP (u16) 0x5b86 +#define AML_BANK_FIELD_OP (u16) 0x5b87 +#define AML_DATA_REGION_OP (u16) 0x5b88 /* ACPI 2.0 */ + + +/* Bogus opcodes (they are actually two separate opcodes) */ + +#define AML_LGREATEREQUAL_OP (u16) 0x9295 +#define AML_LLESSEQUAL_OP (u16) 0x9294 +#define AML_LNOTEQUAL_OP (u16) 0x9293 + + +/* + * Internal opcodes + * Use only "Unknown" AML opcodes, don't attempt to use + * any valid ACPI ASCII values (A-Z, 0-9, '-') + */ + +#define AML_INT_NAMEPATH_OP (u16) 0x002d +#define AML_INT_NAMEDFIELD_OP (u16) 0x0030 +#define AML_INT_RESERVEDFIELD_OP (u16) 0x0031 +#define AML_INT_ACCESSFIELD_OP (u16) 0x0032 +#define AML_INT_BYTELIST_OP (u16) 0x0033 +#define AML_INT_STATICSTRING_OP (u16) 0x0034 +#define AML_INT_METHODCALL_OP (u16) 0x0035 +#define AML_INT_RETURN_VALUE_OP (u16) 0x0036 +#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037 + + +#define ARG_NONE 0x0 + +/* + * Argument types for the AML Parser + * Each field in the arg_types u32 is 5 bits, allowing for a maximum of 6 arguments. + * There can be up to 31 unique argument types + * Zero is reserved as end-of-list indicator + */ + +#define ARGP_BYTEDATA 0x01 +#define ARGP_BYTELIST 0x02 +#define ARGP_CHARLIST 0x03 +#define ARGP_DATAOBJ 0x04 +#define ARGP_DATAOBJLIST 0x05 +#define ARGP_DWORDDATA 0x06 +#define ARGP_FIELDLIST 0x07 +#define ARGP_NAME 0x08 +#define ARGP_NAMESTRING 0x09 +#define ARGP_OBJLIST 0x0A +#define ARGP_PKGLENGTH 0x0B +#define ARGP_SUPERNAME 0x0C +#define ARGP_TARGET 0x0D +#define ARGP_TERMARG 0x0E +#define ARGP_TERMLIST 0x0F +#define ARGP_WORDDATA 0x10 +#define ARGP_QWORDDATA 0x11 +#define ARGP_SIMPLENAME 0x12 + +/* + * Resolved argument types for the AML Interpreter + * Each field in the arg_types u32 is 5 bits, allowing for a maximum of 6 arguments. + * There can be up to 31 unique argument types (0 is end-of-arg-list indicator) + * + * Note1: These values are completely independent from the ACPI_TYPEs + * i.e., ARGI_INTEGER != ACPI_TYPE_INTEGER + * + * Note2: If and when 5 bits becomes insufficient, it would probably be best + * to convert to a 6-byte array of argument types, allowing 8 bits per argument. + */ + +/* Single, simple types */ + +#define ARGI_ANYTYPE 0x01 /* Don't care */ +#define ARGI_PACKAGE 0x02 +#define ARGI_EVENT 0x03 +#define ARGI_MUTEX 0x04 +#define ARGI_DDBHANDLE 0x05 + +/* Interchangeable types (via implicit conversion) */ + +#define ARGI_INTEGER 0x06 +#define ARGI_STRING 0x07 +#define ARGI_BUFFER 0x08 +#define ARGI_BUFFER_OR_STRING 0x09 /* Used by MID op only */ +#define ARGI_COMPUTEDATA 0x0A /* Buffer, String, or Integer */ + +/* Reference objects */ + +#define ARGI_INTEGER_REF 0x0B +#define ARGI_OBJECT_REF 0x0C +#define ARGI_DEVICE_REF 0x0D +#define ARGI_REFERENCE 0x0E +#define ARGI_TARGETREF 0x0F /* Target, subject to implicit conversion */ +#define ARGI_FIXED_TARGET 0x10 /* Target, no implicit conversion */ +#define ARGI_SIMPLE_TARGET 0x11 /* Name, Local, Arg -- no implicit conversion */ + +/* Multiple/complex types */ + +#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a Node - Used only by size_of operator*/ +#define ARGI_COMPLEXOBJ 0x13 /* Buffer, String, or package (Used by INDEX op only) */ +#define ARGI_REF_OR_STRING 0x14 /* Reference or String (Used by DEREFOF op only) */ +#define ARGI_REGION_OR_FIELD 0x15 /* Used by LOAD op only */ + +/* Note: types above can expand to 0x1F maximum */ + +#define ARGI_INVALID_OPCODE 0xFFFFFFFF + + +/* + * hash offsets + */ +#define AML_EXTOP_HASH_OFFSET 22 +#define AML_LNOT_HASH_OFFSET 19 + + +/* + * opcode groups and types + */ + +#define OPGRP_NAMED 0x01 +#define OPGRP_FIELD 0x02 +#define OPGRP_BYTELIST 0x04 + + +/* + * Opcode information + */ + +/* Opcode flags */ + +#define AML_HAS_ARGS 0x0800 +#define AML_HAS_TARGET 0x0400 +#define AML_HAS_RETVAL 0x0200 +#define AML_NSOBJECT 0x0100 +#define AML_NSOPCODE 0x0080 +#define AML_NSNODE 0x0040 +#define AML_NAMED 0x0020 +#define AML_DEFER 0x0010 +#define AML_FIELD 0x0008 +#define AML_CREATE 0x0004 +#define AML_MATH 0x0002 +#define AML_LOGICAL 0x0001 +#define AML_CONSTANT 0x1000 + +/* Convenient flag groupings */ + +#define AML_FLAGS_EXEC_1A_0T_0R AML_HAS_ARGS /* Monadic1 */ +#define AML_FLAGS_EXEC_1A_0T_1R AML_HAS_ARGS | AML_HAS_RETVAL /* Monadic2 */ +#define AML_FLAGS_EXEC_1A_1T_0R AML_HAS_ARGS | AML_HAS_TARGET +#define AML_FLAGS_EXEC_1A_1T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL /* monadic2_r */ +#define AML_FLAGS_EXEC_2A_0T_0R AML_HAS_ARGS /* Dyadic1 */ +#define AML_FLAGS_EXEC_2A_0T_1R AML_HAS_ARGS | AML_HAS_RETVAL /* Dyadic2 */ +#define AML_FLAGS_EXEC_2A_1T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL /* dyadic2_r */ +#define AML_FLAGS_EXEC_2A_2T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL +#define AML_FLAGS_EXEC_3A_0T_0R AML_HAS_ARGS +#define AML_FLAGS_EXEC_3A_1T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL +#define AML_FLAGS_EXEC_6A_0T_1R AML_HAS_ARGS | AML_HAS_RETVAL + + +/* + * The opcode Type is used in a dispatch table, do not change + * without updating the table. + */ +#define AML_TYPE_EXEC_1A_0T_0R 0x00 /* Monadic1 */ +#define AML_TYPE_EXEC_1A_0T_1R 0x01 /* Monadic2 */ +#define AML_TYPE_EXEC_1A_1T_0R 0x02 +#define AML_TYPE_EXEC_1A_1T_1R 0x03 /* monadic2_r */ +#define AML_TYPE_EXEC_2A_0T_0R 0x04 /* Dyadic1 */ +#define AML_TYPE_EXEC_2A_0T_1R 0x05 /* Dyadic2 */ +#define AML_TYPE_EXEC_2A_1T_1R 0x06 /* dyadic2_r */ +#define AML_TYPE_EXEC_2A_2T_1R 0x07 +#define AML_TYPE_EXEC_3A_0T_0R 0x08 +#define AML_TYPE_EXEC_3A_1T_1R 0x09 +#define AML_TYPE_EXEC_6A_0T_1R 0x0A +/* End of types used in dispatch table */ + +#define AML_TYPE_LITERAL 0x0B +#define AML_TYPE_CONSTANT 0x0C +#define AML_TYPE_METHOD_ARGUMENT 0x0D +#define AML_TYPE_LOCAL_VARIABLE 0x0E +#define AML_TYPE_DATA_TERM 0x0F + +/* Generic for an op that returns a value */ + +#define AML_TYPE_METHOD_CALL 0x10 + +/* Misc */ + +#define AML_TYPE_CREATE_FIELD 0x11 +#define AML_TYPE_CREATE_OBJECT 0x12 +#define AML_TYPE_CONTROL 0x13 +#define AML_TYPE_NAMED_NO_OBJ 0x14 +#define AML_TYPE_NAMED_FIELD 0x15 +#define AML_TYPE_NAMED_SIMPLE 0x16 +#define AML_TYPE_NAMED_COMPLEX 0x17 +#define AML_TYPE_RETURN 0x18 + +#define AML_TYPE_UNDEFINED 0x19 +#define AML_TYPE_BOGUS 0x1A + + +/* + * Opcode classes + */ +#define AML_CLASS_EXECUTE 0x00 +#define AML_CLASS_CREATE 0x01 +#define AML_CLASS_ARGUMENT 0x02 +#define AML_CLASS_NAMED_OBJECT 0x03 +#define AML_CLASS_CONTROL 0x04 +#define AML_CLASS_ASCII 0x05 +#define AML_CLASS_PREFIX 0x06 +#define AML_CLASS_INTERNAL 0x07 +#define AML_CLASS_RETURN_VALUE 0x08 +#define AML_CLASS_METHOD_CALL 0x09 +#define AML_CLASS_UNKNOWN 0x0A + + +/* Predefined Operation Region space_iDs */ + +typedef enum +{ + REGION_MEMORY = 0, + REGION_IO, + REGION_PCI_CONFIG, + REGION_EC, + REGION_SMBUS, + REGION_CMOS, + REGION_PCI_BAR, + REGION_DATA_TABLE, /* Internal use only */ + REGION_FIXED_HW = 0x7F + +} AML_REGION_TYPES; + + +/* Comparison operation codes for match_op operator */ + +typedef enum +{ + MATCH_MTR = 0, + MATCH_MEQ = 1, + MATCH_MLE = 2, + MATCH_MLT = 3, + MATCH_MGE = 4, + MATCH_MGT = 5 + +} AML_MATCH_OPERATOR; + +#define MAX_MATCH_OPERATOR 5 + + +/* + * field_flags + * + * This byte is extracted from the AML and includes three separate + * pieces of information about the field: + * 1) The field access type + * 2) The field update rule + * 3) The lock rule for the field + * + * Bits 00 - 03 : access_type (any_acc, byte_acc, etc.) + * 04 : lock_rule (1 == Lock) + * 05 - 06 : update_rule + */ +#define AML_FIELD_ACCESS_TYPE_MASK 0x0F +#define AML_FIELD_LOCK_RULE_MASK 0x10 +#define AML_FIELD_UPDATE_RULE_MASK 0x60 + + +/* 1) Field Access Types */ + +typedef enum +{ + AML_FIELD_ACCESS_ANY = 0x00, + AML_FIELD_ACCESS_BYTE = 0x01, + AML_FIELD_ACCESS_WORD = 0x02, + AML_FIELD_ACCESS_DWORD = 0x03, + AML_FIELD_ACCESS_QWORD = 0x04, /* ACPI 2.0 */ + AML_FIELD_ACCESS_BUFFER = 0x05 /* ACPI 2.0 */ + +} AML_ACCESS_TYPE; + + +/* 2) Field Lock Rules */ + +typedef enum +{ + AML_FIELD_LOCK_NEVER = 0x00, + AML_FIELD_LOCK_ALWAYS = 0x10 + +} AML_LOCK_RULE; + + +/* 3) Field Update Rules */ + +typedef enum +{ + AML_FIELD_UPDATE_PRESERVE = 0x00, + AML_FIELD_UPDATE_WRITE_AS_ONES = 0x20, + AML_FIELD_UPDATE_WRITE_AS_ZEROS = 0x40 + +} AML_UPDATE_RULE; + + +/* + * Field Access Attributes. + * This byte is extracted from the AML via the + * access_as keyword + */ +typedef enum +{ + AML_FIELD_ATTRIB_SMB_QUICK = 0x02, + AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04, + AML_FIELD_ATTRIB_SMB_BYTE = 0x06, + AML_FIELD_ATTRIB_SMB_WORD = 0x08, + AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A, + AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C, + AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D + +} AML_ACCESS_ATTRIBUTE; + + +/* bit fields in method_flags byte */ + +#define METHOD_FLAGS_ARG_COUNT 0x07 +#define METHOD_FLAGS_SERIALIZED 0x08 +#define METHOD_FLAGS_SYNCH_LEVEL 0xF0 + + +#endif /* __AMLCODE_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/amlresrc.h linux.21rc5-ac2/include/acpi/amlresrc.h --- linux.21rc5/include/acpi/amlresrc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/amlresrc.h 2003-04-28 17:59:26.000000000 +0100 @@ -0,0 +1,310 @@ + +/****************************************************************************** + * + * Module Name: amlresrc.h - AML resource descriptors + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#ifndef __AMLRESRC_H +#define __AMLRESRC_H + + +#define ASL_RESNAME_ADDRESS "_ADR" +#define ASL_RESNAME_ALIGNMENT "_ALN" +#define ASL_RESNAME_ADDRESSSPACE "_ASI" +#define ASL_RESNAME_BASEADDRESS "_BAS" +#define ASL_RESNAME_BUSMASTER "_BM_" /* Master(1), Slave(0) */ +#define ASL_RESNAME_DECODE "_DEC" +#define ASL_RESNAME_DMA "_DMA" +#define ASL_RESNAME_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */ +#define ASL_RESNAME_GRANULARITY "_GRA" +#define ASL_RESNAME_INTERRUPT "_INT" +#define ASL_RESNAME_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */ +#define ASL_RESNAME_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */ +#define ASL_RESNAME_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */ +#define ASL_RESNAME_LENGTH "_LEN" +#define ASL_RESNAME_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */ +#define ASL_RESNAME_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */ +#define ASL_RESNAME_MAXADDR "_MAX" +#define ASL_RESNAME_MINADDR "_MIN" +#define ASL_RESNAME_MAXTYPE "_MAF" +#define ASL_RESNAME_MINTYPE "_MIF" +#define ASL_RESNAME_REGISTERBITOFFSET "_RBO" +#define ASL_RESNAME_REGISTERBITWIDTH "_RBW" +#define ASL_RESNAME_RANGETYPE "_RNG" +#define ASL_RESNAME_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */ +#define ASL_RESNAME_TRANSLATION "_TRA" +#define ASL_RESNAME_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */ +#define ASL_RESNAME_TYPE "_TTP" /* Translation(1), Static (0) */ +#define ASL_RESNAME_XFERTYPE "_SIz" /* 8(0), 8_and16(1), 16(2) */ + + +/* Default sizes for "small" resource descriptors */ + +#define ASL_RDESC_IRQ_SIZE 0x02 +#define ASL_RDESC_DMA_SIZE 0x02 +#define ASL_RDESC_ST_DEPEND_SIZE 0x00 +#define ASL_RDESC_END_DEPEND_SIZE 0x00 +#define ASL_RDESC_IO_SIZE 0x07 +#define ASL_RDESC_FIXED_IO_SIZE 0x03 +#define ASL_RDESC_END_TAG_SIZE 0x01 + + +struct asl_resource_node +{ + u32 buffer_length; + void *buffer; + struct asl_resource_node *next; +}; + + +/* + * Resource descriptors defined in the ACPI specification. + * + * Alignment must be BYTE because these descriptors + * are used to overlay the AML byte stream. + */ +#pragma pack(1) + +struct asl_irq_format_desc +{ + u8 descriptor_type; + u16 irq_mask; + u8 flags; +}; + + +struct asl_irq_noflags_desc +{ + u8 descriptor_type; + u16 irq_mask; +}; + + +struct asl_dma_format_desc +{ + u8 descriptor_type; + u8 dma_channel_mask; + u8 flags; +}; + + +struct asl_start_dependent_desc +{ + u8 descriptor_type; + u8 flags; +}; + + +struct asl_start_dependent_noprio_desc +{ + u8 descriptor_type; +}; + + +struct asl_end_dependent_desc +{ + u8 descriptor_type; +}; + + +struct asl_io_port_desc +{ + u8 descriptor_type; + u8 information; + u16 address_min; + u16 address_max; + u8 alignment; + u8 length; +}; + + +struct asl_fixed_io_port_desc +{ + u8 descriptor_type; + u16 base_address; + u8 length; +}; + + +struct asl_small_vendor_desc +{ + u8 descriptor_type; + u8 vendor_defined[7]; +}; + + +struct asl_end_tag_desc +{ + u8 descriptor_type; + u8 checksum; +}; + + +/* LARGE descriptors */ + +struct asl_memory_24_desc +{ + u8 descriptor_type; + u16 length; + u8 information; + u16 address_min; + u16 address_max; + u16 alignment; + u16 range_length; +}; + + +struct asl_large_vendor_desc +{ + u8 descriptor_type; + u16 length; + u8 vendor_defined[1]; +}; + + +struct asl_memory_32_desc +{ + u8 descriptor_type; + u16 length; + u8 information; + u32 address_min; + u32 address_max; + u32 alignment; + u32 range_length; +}; + + +struct asl_fixed_memory_32_desc +{ + u8 descriptor_type; + u16 length; + u8 information; + u32 base_address; + u32 range_length; +}; + + +struct asl_qword_address_desc +{ + u8 descriptor_type; + u16 length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u64 granularity; + u64 address_min; + u64 address_max; + u64 translation_offset; + u64 address_length; + u8 optional_fields[2]; +}; + + +struct asl_dword_address_desc +{ + u8 descriptor_type; + u16 length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u32 granularity; + u32 address_min; + u32 address_max; + u32 translation_offset; + u32 address_length; + u8 optional_fields[2]; +}; + + +struct asl_word_address_desc +{ + u8 descriptor_type; + u16 length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u16 granularity; + u16 address_min; + u16 address_max; + u16 translation_offset; + u16 address_length; + u8 optional_fields[2]; +}; + + +struct asl_extended_xrupt_desc +{ + u8 descriptor_type; + u16 length; + u8 flags; + u8 table_length; + u32 interrupt_number[1]; + /* res_source_index, res_source optional fields follow */ +}; + + +struct asl_general_register_desc +{ + u8 descriptor_type; + u16 length; + u8 address_space_id; + u8 bit_width; + u8 bit_offset; + u8 reserved; + u64 address; +}; + +/* restore default alignment */ + +#pragma pack() + +/* Union of all resource descriptors, sow we can allocate the worst case */ + +union asl_resource_desc +{ + struct asl_irq_format_desc irq; + struct asl_dma_format_desc dma; + struct asl_start_dependent_desc std; + struct asl_end_dependent_desc end; + struct asl_io_port_desc iop; + struct asl_fixed_io_port_desc fio; + struct asl_small_vendor_desc smv; + struct asl_end_tag_desc et; + + struct asl_memory_24_desc M24; + struct asl_large_vendor_desc lgv; + struct asl_memory_32_desc M32; + struct asl_fixed_memory_32_desc F32; + struct asl_qword_address_desc qas; + struct asl_dword_address_desc das; + struct asl_word_address_desc was; + struct asl_extended_xrupt_desc exx; + struct asl_general_register_desc grg; + u32 u32_item; + u16 u16_item; + u8 U8item; +}; + + +#endif + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/platform/acenv.h linux.21rc5-ac2/include/acpi/platform/acenv.h --- linux.21rc5/include/acpi/platform/acenv.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/platform/acenv.h 2003-04-28 18:00:22.000000000 +0100 @@ -0,0 +1,363 @@ +/****************************************************************************** + * + * Name: acenv.h - Generation environment specific items + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACENV_H__ +#define __ACENV_H__ + + +/* + * Configuration for ACPI tools and utilities + */ + +#ifdef _ACPI_DUMP_APP +#ifndef MSDOS +#define ACPI_DEBUG_OUTPUT +#endif +#define ACPI_APPLICATION +#define ACPI_DISASSEMBLER +#define ACPI_NO_METHOD_EXECUTION +#define ACPI_USE_SYSTEM_CLIBRARY +#endif + +#ifdef _ACPI_EXEC_APP +#undef DEBUGGER_THREADING +#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED +#define ACPI_DEBUG_OUTPUT +#define ACPI_APPLICATION +#define ACPI_DEBUGGER +#define ACPI_DISASSEMBLER +#define ACPI_USE_SYSTEM_CLIBRARY +#endif + +#ifdef _ACPI_ASL_COMPILER +#define ACPI_DEBUG_OUTPUT +#define ACPI_APPLICATION +#define ACPI_DISASSEMBLER +#define ACPI_CONSTANT_EVAL_ONLY +#define ACPI_USE_SYSTEM_CLIBRARY +#endif + +/* + * Environment configuration. The purpose of this file is to interface to the + * local generation environment. + * + * 1) ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library. + * Otherwise, local versions of string/memory functions will be used. + * 2) ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and + * the standard header files may be used. + * + * The ACPI subsystem only uses low level C library functions that do not call + * operating system services and may therefore be inlined in the code. + * + * It may be necessary to tailor these include files to the target + * generation environment. + * + * + * Functions and constants used from each header: + * + * string.h: memcpy + * memset + * strcat + * strcmp + * strcpy + * strlen + * strncmp + * strncat + * strncpy + * + * stdlib.h: strtoul + * + * stdarg.h: va_list + * va_arg + * va_start + * va_end + * + */ + +/*! [Begin] no source code translation */ + +#if defined(__linux__) +#include "aclinux.h" + +#elif defined(_AED_EFI) +#include "acefi.h" + +#elif defined(WIN32) +#include "acwin.h" + +#elif defined(WIN64) +#include "acwin64.h" + +#elif defined(MSDOS) /* Must appear after WIN32 and WIN64 check */ +#include "acdos16.h" + +#elif defined(__FreeBSD__) +#include "acfreebsd.h" + +#elif defined(MODESTO) +#include "acmodesto.h" + +#elif defined(NETWARE) +#include "acnetware.h" + +#else + +/* All other environments */ + +#define ACPI_USE_STANDARD_HEADERS + +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long + + +/* Name of host operating system (returned by the _OS_ namespace object) */ + +#define ACPI_OS_NAME "Intel ACPI/CA Core Subsystem" + +/* This macro is used to tag functions as "printf-like" because + * some compilers can catch printf format string problems. MSVC + * doesn't, so this is proprocessed away. + */ +#define ACPI_PRINTF_LIKE_FUNC + +#endif + +/* + * Memory allocation tracking. Used only if + * 1) This is the debug version + * 2) This is NOT a 16-bit version of the code (not enough real-mode memory) + */ +#ifdef ACPI_DEBUG_OUTPUT +#if ACPI_MACHINE_WIDTH != 16 +#define ACPI_DBG_TRACK_ALLOCATIONS +#endif +#endif + +/*! [End] no source code translation !*/ + + +/* + * Debugger threading model + * Use single threaded if the entire subsystem is contained in an application + * Use multiple threaded when the subsystem is running in the kernel. + * + * By default the model is single threaded if ACPI_APPLICATION is set, + * multi-threaded if ACPI_APPLICATION is not set. + */ +#define DEBUGGER_SINGLE_THREADED 0 +#define DEBUGGER_MULTI_THREADED 1 + +#ifdef ACPI_APPLICATION +#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED + +#else +#define DEBUGGER_THREADING DEBUGGER_MULTI_THREADED +#endif + + +/****************************************************************************** + * + * C library configuration + * + *****************************************************************************/ + +#ifdef ACPI_USE_SYSTEM_CLIBRARY +/* + * Use the standard C library headers. + * We want to keep these to a minimum. + */ + +#ifdef ACPI_USE_STANDARD_HEADERS +/* + * Use the standard headers from the standard locations + */ +#include +#include +#include +#include + +#endif /* ACPI_USE_STANDARD_HEADERS */ + +/* + * We will be linking to the standard Clib functions + */ + +#define ACPI_STRSTR(s1,s2) strstr((s1), (s2)) +#define ACPI_STRUPR(s) (void) acpi_ut_strupr ((s)) +#define ACPI_STRLEN(s) (acpi_size) strlen((s)) +#define ACPI_STRCPY(d,s) (void) strcpy((d), (s)) +#define ACPI_STRNCPY(d,s,n) (void) strncpy((d), (s), (acpi_size)(n)) +#define ACPI_STRNCMP(d,s,n) strncmp((d), (s), (acpi_size)(n)) +#define ACPI_STRCMP(d,s) strcmp((d), (s)) +#define ACPI_STRCAT(d,s) (void) strcat((d), (s)) +#define ACPI_STRNCAT(d,s,n) strncat((d), (s), (acpi_size)(n)) +#define ACPI_STRTOUL(d,s,n) strtoul((d), (s), (acpi_size)(n)) +#define ACPI_MEMCPY(d,s,n) (void) memcpy((d), (s), (acpi_size)(n)) +#define ACPI_MEMSET(d,s,n) (void) memset((d), (s), (acpi_size)(n)) + +#define ACPI_TOUPPER toupper +#define ACPI_TOLOWER tolower +#define ACPI_IS_XDIGIT isxdigit +#define ACPI_IS_DIGIT isdigit +#define ACPI_IS_SPACE isspace +#define ACPI_IS_UPPER isupper +#define ACPI_IS_PRINT isprint +#define ACPI_IS_ALPHA isalpha +#define ACPI_IS_ASCII isascii + +/****************************************************************************** + * + * Not using native C library, use local implementations + * + *****************************************************************************/ +#else + +/* + * Use local definitions of C library macros and functions + * NOTE: The function implementations may not be as efficient + * as an inline or assembly code implementation provided by a + * native C library. + */ + +#ifndef va_arg + +#ifndef _VALIST +#define _VALIST +typedef char *va_list; +#endif /* _VALIST */ + +/* + * Storage alignment properties + */ + +#define _AUPBND (sizeof (acpi_native_int) - 1) +#define _ADNBND (sizeof (acpi_native_int) - 1) + +/* + * Variable argument list macro definitions + */ + +#define _bnd(X, bnd) (((sizeof (X)) + (bnd)) & (~(bnd))) +#define va_arg(ap, T) (*(T *)(((ap) += (_bnd (T, _AUPBND))) - (_bnd (T,_ADNBND)))) +#define va_end(ap) (void) 0 +#define va_start(ap, A) (void) ((ap) = (((char *) &(A)) + (_bnd (A,_AUPBND)))) + +#endif /* va_arg */ + + +#define ACPI_STRSTR(s1,s2) acpi_ut_strstr ((s1), (s2)) +#define ACPI_STRUPR(s) (void) acpi_ut_strupr ((s)) +#define ACPI_STRLEN(s) (acpi_size) acpi_ut_strlen ((s)) +#define ACPI_STRCPY(d,s) (void) acpi_ut_strcpy ((d), (s)) +#define ACPI_STRNCPY(d,s,n) (void) acpi_ut_strncpy ((d), (s), (acpi_size)(n)) +#define ACPI_STRNCMP(d,s,n) acpi_ut_strncmp ((d), (s), (acpi_size)(n)) +#define ACPI_STRCMP(d,s) acpi_ut_strcmp ((d), (s)) +#define ACPI_STRCAT(d,s) (void) acpi_ut_strcat ((d), (s)) +#define ACPI_STRNCAT(d,s,n) acpi_ut_strncat ((d), (s), (acpi_size)(n)) +#define ACPI_STRTOUL(d,s,n) acpi_ut_strtoul ((d), (s), (acpi_size)(n)) +#define ACPI_MEMCPY(d,s,n) (void) acpi_ut_memcpy ((d), (s), (acpi_size)(n)) +#define ACPI_MEMSET(d,v,n) (void) acpi_ut_memset ((d), (v), (acpi_size)(n)) +#define ACPI_TOUPPER acpi_ut_to_upper +#define ACPI_TOLOWER acpi_ut_to_lower + +#endif /* ACPI_USE_SYSTEM_CLIBRARY */ + + +/****************************************************************************** + * + * Assembly code macros + * + *****************************************************************************/ + +/* + * Handle platform- and compiler-specific assembly language differences. + * These should already have been defined by the platform includes above. + * + * Notes: + * 1) Interrupt 3 is used to break into a debugger + * 2) Interrupts are turned off during ACPI register setup + */ + +/* Unrecognized compiler, use defaults */ + +#ifndef ACPI_ASM_MACROS + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() +#define ACPI_ENABLE_IRQS() +#define ACPI_ACQUIRE_GLOBAL_LOCK(Glptr, acq) +#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, acq) + +#endif /* ACPI_ASM_MACROS */ + + +#ifdef ACPI_APPLICATION + +/* Don't want software interrupts within a ring3 application */ + +#undef BREAKPOINT3 +#define BREAKPOINT3 +#endif + + +/****************************************************************************** + * + * Compiler-specific information is contained in the compiler-specific + * headers. + * + *****************************************************************************/ +#endif /* __ACENV_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/platform/acgcc.h linux.21rc5-ac2/include/acpi/platform/acgcc.h --- linux.21rc5/include/acpi/platform/acgcc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/platform/acgcc.h 2003-04-28 18:00:22.000000000 +0100 @@ -0,0 +1,59 @@ +/****************************************************************************** + * + * Name: acgcc.h - GCC specific defines, etc. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACGCC_H__ +#define __ACGCC_H__ + +/* This macro is used to tag functions as "printf-like" because + * some compilers (like GCC) can catch printf format string problems. + */ +#define ACPI_PRINTF_LIKE_FUNC __attribute__ ((__format__ (__printf__, 4, 5))) + +/* Some compilers complain about unused variables. Sometimes we don't want to + * use all the variables (most specifically for _THIS_MODULE). This allow us + * to to tell the compiler warning in a per-variable manner that a variable + * is unused. + */ +#define ACPI_UNUSED_VAR __attribute__ ((unused)) + +#endif /* __ACGCC_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/acpi/platform/aclinux.h linux.21rc5-ac2/include/acpi/platform/aclinux.h --- linux.21rc5/include/acpi/platform/aclinux.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/acpi/platform/aclinux.h 2003-04-28 18:00:22.000000000 +0100 @@ -0,0 +1,92 @@ +/****************************************************************************** + * + * Name: aclinux.h - OS specific defines, etc. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2003, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACLINUX_H__ +#define __ACLINUX_H__ + +#define ACPI_OS_NAME "Linux" + +#define ACPI_USE_SYSTEM_CLIBRARY +#define ACPI_USE_DO_WHILE_0 + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define strtoul simple_strtoul + +#define ACPI_MACHINE_WIDTH BITS_PER_LONG + +#else /* !__KERNEL__ */ + +#include +#include +#include +#include +#include + +#if defined(__ia64__) || defined(__x86_64__) +#define ACPI_MACHINE_WIDTH 64 +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long +#else +#define ACPI_MACHINE_WIDTH 32 +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long +#define ACPI_USE_NATIVE_DIVIDE +#endif + +#endif /* __KERNEL__ */ + +/* Linux uses GCC */ + +#include "acgcc.h" + +#endif /* __ACLINUX_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-alpha/bitops.h linux.21rc5-ac2/include/asm-alpha/bitops.h --- linux.21rc5/include/asm-alpha/bitops.h 2003-05-28 15:07:38.000000000 +0100 +++ linux.21rc5-ac2/include/asm-alpha/bitops.h 2003-04-22 16:44:38.000000000 +0100 @@ -3,6 +3,7 @@ #include #include +#include /* * Copyright 1994, Linus Torvalds. @@ -74,6 +75,17 @@ * WARNING: non atomic version. */ static __inline__ void +__clear_bit(unsigned long nr, volatile void * addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m &= ~(1 << (nr & 31)); +} + +/* + * WARNING: non atomic version. + */ +static __inline__ void __change_bit(unsigned long nr, volatile void * addr) { int *m = ((int *) addr) + (nr >> 5); @@ -264,6 +276,28 @@ #endif } +/* + * __ffs = Find First set bit in word. Undefined if no set bit exists. + */ +static inline unsigned long __ffs(unsigned long word) +{ +#if defined(__alpha_cix__) && defined(__alpha_fix__) + /* Whee. EV67 can calculate it directly. */ + unsigned long result; + __asm__("cttz %1,%0" : "=r"(result) : "r"(word)); + return result; +#else + unsigned long bits, qofs, bofs; + + __asm__("cmpbge $31,%1,%0" : "=r"(bits) : "r"(word)); + qofs = ffz_b(bits); + bits = __kernel_extbl(word, qofs); + bofs = ffz_b(~bits); + + return qofs*8 + bofs; +#endif +} + #ifdef __KERNEL__ /* @@ -278,6 +312,20 @@ return word ? result+1 : 0; } +/* + * fls: find last bit set. + */ +#if defined(__alpha_cix__) && defined(__alpha_fix__) +static inline int fls(int word) +{ + long result; + __asm__("ctlz %1,%0" : "=r"(result) : "r"(word & 0xffffffff)); + return 64 - result; +} +#else +#define fls generic_fls +#endif + /* Compute powers of two for the given integer. */ static inline int floor_log2(unsigned long word) { @@ -365,13 +413,77 @@ } /* - * The optimizer actually does good code for this case.. + * Find next one bit in a bitmap reasonably efficiently. + */ +static inline unsigned long +find_next_bit(void * addr, unsigned long size, unsigned long offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp &= ~0UL << offset; + if (size < 64) + goto found_first; + if (tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if ((tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp &= ~0UL >> (64 - size); + if (!tmp) + return result + size; +found_middle: + return result + __ffs(tmp); +} + +/* + * The optimizer actually does good code for this case. */ #define find_first_zero_bit(addr, size) \ find_next_zero_bit((addr), (size), 0) +#define find_first_bit(addr, size) \ + find_next_bit((addr), (size), 0) #ifdef __KERNEL__ +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is set. + */ +static inline unsigned long +_sched_find_first_bit(unsigned long *b) +{ + unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; + unsigned long ofs; + + ofs = (b1 ? 64 : 128); + b1 = (b1 ? b1 : b2); + ofs = (b0 ? 0 : ofs); + b0 = (b0 ? b0 : b1); + + return __ffs(b0) + ofs; +} + + #define ext2_set_bit __test_and_set_bit #define ext2_clear_bit __test_and_clear_bit #define ext2_test_bit test_bit diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-alpha/ioctls.h linux.21rc5-ac2/include/asm-alpha/ioctls.h --- linux.21rc5/include/asm-alpha/ioctls.h 2003-05-28 15:07:38.000000000 +0100 +++ linux.21rc5-ac2/include/asm-alpha/ioctls.h 2003-04-22 16:44:38.000000000 +0100 @@ -9,6 +9,7 @@ #define FIONBIO _IOW('f', 126, int) #define FIONREAD _IOR('f', 127, int) #define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) #define TIOCGETP _IOR('t', 8, struct sgttyb) #define TIOCSETP _IOW('t', 9, struct sgttyb) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-alpha/smp.h linux.21rc5-ac2/include/asm-alpha/smp.h --- linux.21rc5/include/asm-alpha/smp.h 2003-05-28 15:07:38.000000000 +0100 +++ linux.21rc5-ac2/include/asm-alpha/smp.h 2003-04-22 16:44:38.000000000 +0100 @@ -55,7 +55,7 @@ #define cpu_logical_map(cpu) __cpu_logical_map[cpu] #define hard_smp_processor_id() __hard_smp_processor_id() -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) extern unsigned long cpu_present_mask; #define cpu_online_map cpu_present_mask diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-alpha/system.h linux.21rc5-ac2/include/asm-alpha/system.h --- linux.21rc5/include/asm-alpha/system.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-alpha/system.h 2003-04-22 16:44:38.000000000 +0100 @@ -310,11 +310,11 @@ #define __sti() do { barrier(); setipl(IPL_MIN); } while(0) #define __save_flags(flags) ((flags) = rdps()) #define __save_and_cli(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) -#define __save_and_sti(flags) do { barrier(); (flags) = swpipl(IPL_MIN); } while(0) +#define __save_and_sti(flags) do { (flags) = swpipl(IPL_MIN); barrier(); } while(0) #define __restore_flags(flags) do { barrier(); setipl(flags); barrier(); } while(0) #define local_irq_save(flags) __save_and_cli(flags) -#define local_irq_set(flags) __save_and_sti(flags) +#define local_irq_set(flags) __save_and_sti(flags) #define local_irq_restore(flags) __restore_flags(flags) #define local_irq_disable() __cli() #define local_irq_enable() __sti() @@ -323,8 +323,6 @@ extern int global_irq_holder; -#define save_and_cli(flags) (save_flags(flags), cli()) - extern void __global_cli(void); extern void __global_sti(void); extern unsigned long __global_save_flags(void); @@ -334,6 +332,8 @@ #define sti() __global_sti() #define save_flags(flags) ((flags) = __global_save_flags()) #define restore_flags(flags) __global_restore_flags(flags) +#define save_and_cli(flags) (save_flags(flags), cli()) +#define save_and_sti(flags) (save_flags(flags), sti()) #else /* CONFIG_SMP */ @@ -341,6 +341,7 @@ #define sti() __sti() #define save_flags(flags) __save_flags(flags) #define save_and_cli(flags) __save_and_cli(flags) +#define save_and_sti(flags) __save_and_sti(flags) #define restore_flags(flags) __restore_flags(flags) #endif /* CONFIG_SMP */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-arm/page.h linux.21rc5-ac2/include/asm-arm/page.h --- linux.21rc5/include/asm-arm/page.h 2003-05-28 15:07:40.000000000 +0100 +++ linux.21rc5-ac2/include/asm-arm/page.h 2003-04-22 16:44:38.000000000 +0100 @@ -106,6 +106,9 @@ #define VALID_PAGE(page) ((page - mem_map) < max_mapnr) #endif +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + #endif #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-cris/ioctls.h linux.21rc5-ac2/include/asm-cris/ioctls.h --- linux.21rc5/include/asm-cris/ioctls.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-cris/ioctls.h 2003-04-22 16:44:38.000000000 +0100 @@ -69,6 +69,7 @@ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define FIOQSIZE 0x5460 #define TIOCSERSETRS485 0x5460 /* enable rs-485 */ #define TIOCSERWRRS485 0x5461 /* write rs-485 */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-generic/bitops.h linux.21rc5-ac2/include/asm-generic/bitops.h --- linux.21rc5/include/asm-generic/bitops.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-generic/bitops.h 2003-04-22 16:44:38.000000000 +0100 @@ -51,6 +51,12 @@ return ((mask & *addr) != 0); } +/* + * fls: find last bit set. + */ + +#define fls(x) generic_fls(x) + #ifdef __KERNEL__ /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/acpi.h linux.21rc5-ac2/include/asm-i386/acpi.h --- linux.21rc5/include/asm-i386/acpi.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/acpi.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,168 @@ +/* + * asm-i386/acpi.h + * + * Copyright (C) 2001 Paul Diefenbaugh + * Copyright (C) 2001 Patrick Mochel + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef _ASM_ACPI_H +#define _ASM_ACPI_H + +#ifdef __KERNEL__ + +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() __cli() +#define ACPI_ENABLE_IRQS() __sti() +#define ACPI_FLUSH_CPU_CACHE() wbinvd() + +/* + * A brief explanation as GNU inline assembly is a bit hairy + * %0 is the output parameter in EAX ("=a") + * %1 and %2 are the input parameters in ECX ("c") + * and an immediate value ("i") respectively + * All actual register references are preceded with "%%" as in "%%edx" + * Immediate values in the assembly are preceded by "$" as in "$0x1" + * The final asm parameter are the operation altered non-output registers. + */ +#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ + do { \ + int dummy; \ + asm("1: movl (%1),%%eax;" \ + "movl %%eax,%%edx;" \ + "andl %2,%%edx;" \ + "btsl $0x1,%%edx;" \ + "adcl $0x0,%%edx;" \ + "lock; cmpxchgl %%edx,(%1);" \ + "jnz 1b;" \ + "cmpb $0x3,%%dl;" \ + "sbbl %%eax,%%eax" \ + :"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~1L):"dx"); \ + } while(0) + +#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ + do { \ + int dummy; \ + asm("1: movl (%1),%%eax;" \ + "movl %%eax,%%edx;" \ + "andl %2,%%edx;" \ + "lock; cmpxchgl %%edx,(%1);" \ + "jnz 1b;" \ + "andl $0x1,%%eax" \ + :"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~3L):"dx"); \ + } while(0) + + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ + asm("divl %2;" \ + :"=a"(q32), "=d"(r32) \ + :"r"(d32), \ + "0"(n_lo), "1"(n_hi)) + + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ + asm("shrl $1,%2;" \ + "rcrl $1,%3;" \ + :"=r"(n_hi), "=r"(n_lo) \ + :"0"(n_hi), "1"(n_lo)) + + +#ifndef CONFIG_ACPI_BOOT +#define acpi_lapic 0 +#define acpi_ioapic 0 +#else +#ifdef CONFIG_X86_LOCAL_APIC +extern int acpi_lapic; +#else +#define acpi_lapic 0 +#endif +#ifdef CONFIG_X86_IO_APIC +extern int acpi_ioapic; +#else +#define acpi_ioapic 0 +#endif + +/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ +#define FIX_ACPI_PAGES 4 + +#endif /*CONFIG_ACPI_BOOT*/ + +#ifdef CONFIG_ACPI_SLEEP + +extern unsigned long saved_eip; +extern unsigned long saved_esp; +extern unsigned long saved_ebp; +extern unsigned long saved_ebx; +extern unsigned long saved_esi; +extern unsigned long saved_edi; + +static inline void acpi_save_register_state(unsigned long return_point) +{ + saved_eip = return_point; + asm volatile ("movl %%esp,(%0)" : "=m" (saved_esp)); + asm volatile ("movl %%ebp,(%0)" : "=m" (saved_ebp)); + asm volatile ("movl %%ebx,(%0)" : "=m" (saved_ebx)); + asm volatile ("movl %%edi,(%0)" : "=m" (saved_edi)); + asm volatile ("movl %%esi,(%0)" : "=m" (saved_esi)); +} + +#define acpi_restore_register_state() do {} while (0) + + +/* routines for saving/restoring kernel state */ +extern int acpi_save_state_mem(void); +extern int acpi_save_state_disk(void); +extern void acpi_restore_state_mem(void); + +extern unsigned long acpi_wakeup_address; + +extern void do_suspend_lowlevel_s4bios(int resume); + +/* early initialization routine */ +extern void acpi_reserve_bootmem(void); + +#endif /*CONFIG_ACPI_SLEEP*/ + + +#endif /*__KERNEL__*/ + +#endif /*_ASM_ACPI_H*/ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/apicdef.h linux.21rc5-ac2/include/asm-i386/apicdef.h --- linux.21rc5/include/asm-i386/apicdef.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/apicdef.h 2003-04-25 13:33:12.000000000 +0100 @@ -18,6 +18,7 @@ #define GET_APIC_VERSION(x) ((x)&0xFF) #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) #define APIC_INTEGRATED(x) ((x)&0xF0) +#define APIC_XAPIC_SUPPORT(x) ((x)>=0x14) #define APIC_TASKPRI 0x80 #define APIC_TPRI_MASK 0xFF #define APIC_ARBPRI 0x90 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/bitops.h linux.21rc5-ac2/include/asm-i386/bitops.h --- linux.21rc5/include/asm-i386/bitops.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/bitops.h 2003-05-28 21:48:47.000000000 +0100 @@ -6,6 +6,7 @@ */ #include +#include /* * These have to be done with inline assembly: that way the bit-setting @@ -75,6 +76,14 @@ :"=m" (ADDR) :"Ir" (nr)); } + +static __inline__ void __clear_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( + "btrl %1,%0" + :"=m" (ADDR) + :"Ir" (nr)); +} #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() @@ -284,6 +293,34 @@ } /** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first set bit, not the number of the byte + * containing a bit. + */ +static __inline__ int find_first_bit(void * addr, unsigned size) +{ + int d0, d1; + int res; + + /* This looks at memory. Mark it volatile to tell gcc not to move it around */ + __asm__ __volatile__( + "xorl %%eax,%%eax\n\t" + "repe; scasl\n\t" + "jz 1f\n\t" + "leal -4(%%edi),%%edi\n\t" + "bsfl (%%edi),%%eax\n" + "1:\tsubl %%ebx,%%edi\n\t" + "shll $3,%%edi\n\t" + "addl %%edi,%%eax" + :"=a" (res), "=&c" (d0), "=&D" (d1) + :"1" ((size + 31) >> 5), "2" (addr), "b" (addr)); + return res; +} + +/** * find_next_zero_bit - find the first zero bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at @@ -296,7 +333,7 @@ if (bit) { /* - * Look for zero in first byte + * Look for zero in the first 32 bits. */ __asm__("bsfl %1,%0\n\t" "jne 1f\n\t" @@ -317,6 +354,39 @@ } /** + * find_next_bit - find the first set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +static __inline__ int find_next_bit (void * addr, int size, int offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 5); + int set = 0, bit = offset & 31, res; + + if (bit) { + /* + * Look for nonzero in the first 32 bits: + */ + __asm__("bsfl %1,%0\n\t" + "jne 1f\n\t" + "movl $32, %0\n" + "1:" + : "=r" (set) + : "r" (*p >> bit)); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr)); + return (offset + set + res); +} + +/** * ffz - find first zero in word. * @word: The word to search * @@ -330,8 +400,47 @@ return word; } +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __inline__ unsigned long __ffs(unsigned long word) +{ + __asm__("bsfl %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + +/* + * fls: find last bit set. + */ + +#define fls(x) generic_fls(x) + #ifdef __KERNEL__ +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is cleared. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +} + /** * ffs - find first bit set * @x: the word to search diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/desc.h linux.21rc5-ac2/include/asm-i386/desc.h --- linux.21rc5/include/asm-i386/desc.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/desc.h 2003-04-22 16:44:38.000000000 +0100 @@ -18,23 +18,31 @@ * 9 - APM BIOS support * 10 - APM BIOS support * 11 - APM BIOS support + * 12 - PNPBIOS support + * 13 - PNPBIOS support + * 14 - PNPBIOS support + * 15 - PNPBIOS support + * 16 - PNPBIOS support + * 17 - not used + * 18 - not used + * 19 - not used * * The TSS+LDT descriptors are spread out a bit so that every CPU * has an exclusive cacheline for the per-CPU TSS and LDT: * - * 12 - CPU#0 TSS <-- new cacheline - * 13 - CPU#0 LDT - * 14 - not used - * 15 - not used - * 16 - CPU#1 TSS <-- new cacheline - * 17 - CPU#1 LDT - * 18 - not used - * 19 - not used + * 20 - CPU#0 TSS <-- new cacheline + * 21 - CPU#0 LDT + * 22 - not used + * 23 - not used + * 24 - CPU#1 TSS <-- new cacheline + * 25 - CPU#1 LDT + * 26 - not used + * 27 - not used * ... NR_CPUS per-CPU TSS+LDT's if on SMP * * Entry into gdt where to find first TSS. */ -#define __FIRST_TSS_ENTRY 12 +#define __FIRST_TSS_ENTRY 20 #define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY+1) #define __TSS(n) (((n)<<2) + __FIRST_TSS_ENTRY) @@ -79,13 +87,13 @@ /* * load one particular LDT into the current CPU */ -static inline void load_LDT (struct mm_struct *mm) +static inline void load_LDT (mm_context_t *pc) { int cpu = smp_processor_id(); - void *segments = mm->context.segments; - int count = LDT_ENTRIES; + void *segments = pc->ldt; + int count = pc->size; - if (!segments) { + if (!count) { segments = &default_ldt[0]; count = 5; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/edd.h linux.21rc5-ac2/include/asm-i386/edd.h --- linux.21rc5/include/asm-i386/edd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/edd.h 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,172 @@ +/* + * linux/include/asm-i386/edd.h + * Copyright (C) 2002 Dell Computer Corporation + * by Matt Domsch + * + * structures and definitions for the int 13h, ax={41,48}h + * BIOS Enhanced Disk Drive Services + * This is based on the T13 group document D1572 Revision 0 (August 14 2002) + * available at http://www.t13.org/docs2002/d1572r0.pdf. It is + * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf + * + * In a nutshell, arch/i386/boot/setup.S populates a scratch table + * in the empty_zero_block that contains a list of BIOS-enumerated + * boot devices. + * In arch/i386/kernel/setup.c, this information is + * transferred into the edd structure, and in arch/i386/kernel/edd.c, that + * information is used to identify BIOS boot disk. The code in setup.S + * is very sensitive to the size of these structures. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _ASM_I386_EDD_H +#define _ASM_I386_EDD_H + +#define EDDNR 0x1e9 /* addr of number of edd_info structs at EDDBUF + in empty_zero_block - treat this as 1 byte */ +#define EDDBUF 0x600 /* addr of edd_info structs in empty_zero_block */ +#define EDDMAXNR 6 /* number of edd_info structs starting at EDDBUF */ +#define EDDEXTSIZE 4 /* change these if you muck with the structures */ +#define EDDPARMSIZE 74 +#define CHECKEXTENSIONSPRESENT 0x41 +#define GETDEVICEPARAMETERS 0x48 +#define EDDMAGIC1 0x55AA +#define EDDMAGIC2 0xAA55 + +#ifndef __ASSEMBLY__ + +#define EDD_EXT_FIXED_DISK_ACCESS (1 << 0) +#define EDD_EXT_DEVICE_LOCKING_AND_EJECTING (1 << 1) +#define EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT (1 << 2) +#define EDD_EXT_64BIT_EXTENSIONS (1 << 3) + +#define EDD_INFO_DMA_BOUNDRY_ERROR_TRANSPARENT (1 << 0) +#define EDD_INFO_GEOMETRY_VALID (1 << 1) +#define EDD_INFO_REMOVABLE (1 << 2) +#define EDD_INFO_WRITE_VERIFY (1 << 3) +#define EDD_INFO_MEDIA_CHANGE_NOTIFICATION (1 << 4) +#define EDD_INFO_LOCKABLE (1 << 5) +#define EDD_INFO_NO_MEDIA_PRESENT (1 << 6) +#define EDD_INFO_USE_INT13_FN50 (1 << 7) + +struct edd_device_params { + u16 length; + u16 info_flags; + u32 num_default_cylinders; + u32 num_default_heads; + u32 sectors_per_track; + u64 number_of_sectors; + u16 bytes_per_sector; + u32 dpte_ptr; /* 0xFFFFFFFF for our purposes */ + u16 key; /* = 0xBEDD */ + u8 device_path_info_length; /* = 44 */ + u8 reserved2; + u16 reserved3; + u8 host_bus_type[4]; + u8 interface_type[8]; + union { + struct { + u16 base_address; + u16 reserved1; + u32 reserved2; + } __attribute__ ((packed)) isa; + struct { + u8 bus; + u8 slot; + u8 function; + u8 channel; + u32 reserved; + } __attribute__ ((packed)) pci; + /* pcix is same as pci */ + struct { + u64 reserved; + } __attribute__ ((packed)) ibnd; + struct { + u64 reserved; + } __attribute__ ((packed)) xprs; + struct { + u64 reserved; + } __attribute__ ((packed)) htpt; + struct { + u64 reserved; + } __attribute__ ((packed)) unknown; + } interface_path; + union { + struct { + u8 device; + u8 reserved1; + u16 reserved2; + u32 reserved3; + u64 reserved4; + } __attribute__ ((packed)) ata; + struct { + u8 device; + u8 lun; + u8 reserved1; + u8 reserved2; + u32 reserved3; + u64 reserved4; + } __attribute__ ((packed)) atapi; + struct { + u16 id; + u64 lun; + u16 reserved1; + u32 reserved2; + } __attribute__ ((packed)) scsi; + struct { + u64 serial_number; + u64 reserved; + } __attribute__ ((packed)) usb; + struct { + u64 eui; + u64 reserved; + } __attribute__ ((packed)) i1394; + struct { + u64 wwid; + u64 lun; + } __attribute__ ((packed)) fibre; + struct { + u64 identity_tag; + u64 reserved; + } __attribute__ ((packed)) i2o; + struct { + u32 array_number; + u32 reserved1; + u64 reserved2; + } __attribute((packed)) raid; + struct { + u8 device; + u8 reserved1; + u16 reserved2; + u32 reserved3; + u64 reserved4; + } __attribute__ ((packed)) sata; + struct { + u64 reserved1; + u64 reserved2; + } __attribute__ ((packed)) unknown; + } device_path; + u8 reserved4; + u8 checksum; +} __attribute__ ((packed)); + +struct edd_info { + u8 device; + u8 version; + u16 interface_support; + struct edd_device_params params; +} __attribute__ ((packed)); + +extern struct edd_info edd[EDDMAXNR]; +extern unsigned char eddnr; +#endif /*!__ASSEMBLY__ */ + +#endif /* _ASM_I386_EDD_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/fixmap.h linux.21rc5-ac2/include/asm-i386/fixmap.h --- linux.21rc5/include/asm-i386/fixmap.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/fixmap.h 2003-05-28 21:48:47.000000000 +0100 @@ -15,6 +15,7 @@ #include #include +#include #include #include #ifdef CONFIG_HIGHMEM @@ -57,7 +58,7 @@ #endif #ifdef CONFIG_X86_VISWS_APIC FIX_CO_CPU, /* Cobalt timer */ - FIX_CO_APIC, /* Cobalt APIC Redirection Table */ + FIX_CO_APIC, /* Cobalt APIC Redirection Table */ FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ #endif @@ -71,6 +72,10 @@ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif +#ifdef CONFIG_ACPI_BOOT + FIX_ACPI_BEGIN, + FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, +#endif __end_of_permanent_fixed_addresses, /* temporary boot-time mappings, used before ioremap() is functional */ #define NR_FIX_BTMAPS 16 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/hardirq.h linux.21rc5-ac2/include/asm-i386/hardirq.h --- linux.21rc5/include/asm-i386/hardirq.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/hardirq.h 2003-05-29 01:44:06.000000000 +0100 @@ -12,7 +12,11 @@ unsigned int __local_bh_count; unsigned int __syscall_count; struct task_struct * __ksoftirqd_task; /* waitqueue is too large */ + unsigned long idle_timestamp; unsigned int __nmi_count; /* arch dependent */ +#if CONFIG_X86_LOCAL_APIC + unsigned int apic_timer_irqs; /* arch dependent */ +#endif } ____cacheline_aligned irq_cpustat_t; #include /* Standard mappings for irq_cpustat_t above */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/highmem.h linux.21rc5-ac2/include/asm-i386/highmem.h --- linux.21rc5/include/asm-i386/highmem.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/highmem.h 2003-05-29 01:44:06.000000000 +0100 @@ -46,7 +46,7 @@ * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ -#define PKMAP_BASE (0xfe000000UL) +#define PKMAP_BASE (0xff800000UL) #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/hw_irq.h linux.21rc5-ac2/include/asm-i386/hw_irq.h --- linux.21rc5/include/asm-i386/hw_irq.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/hw_irq.h 2003-05-29 01:44:06.000000000 +0100 @@ -13,8 +13,10 @@ */ #include +#include #include #include +#include /* * IDT vectors usable for external interrupt sources start @@ -213,7 +215,7 @@ atomic_inc((atomic_t *)&prof_buffer[eip]); } -#ifdef CONFIG_SMP /*more of this file should probably be ifdefed SMP */ +#if defined(CONFIG_X86_IO_APIC) static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { if (IO_APIC_IRQ(i)) send_IPI_self(IO_APIC_VECTOR(i)); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/io_apic.h linux.21rc5-ac2/include/asm-i386/io_apic.h --- linux.21rc5/include/asm-i386/io_apic.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/io_apic.h 2003-05-29 00:38:18.000000000 +0100 @@ -118,15 +118,6 @@ } /* - * Re-write a value: to be used for read-modify-write - * cycles where the read already set up the index register. - */ -static inline void io_apic_modify(unsigned int apic, unsigned int value) -{ - *(IO_APIC_BASE(apic)+4) = value; -} - -/* * Synchronize the IO-APIC and the CPU by doing * a dummy read from the IO-APIC */ @@ -144,6 +135,13 @@ */ #define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup) +#ifdef CONFIG_ACPI_BOOT +extern int io_apic_get_unique_id (int ioapic, int apic_id); +extern int io_apic_get_version (int ioapic); +extern int io_apic_get_redir_entries (int ioapic); +extern int io_apic_set_pci_routing (int ioapic, int pin, int irq); +#endif + #else /* !CONFIG_X86_IO_APIC */ #define io_apic_assign_pci_irqs 0 #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/ioctls.h linux.21rc5-ac2/include/asm-i386/ioctls.h --- linux.21rc5/include/asm-i386/ioctls.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/ioctls.h 2003-04-22 16:44:38.000000000 +0100 @@ -67,6 +67,7 @@ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define FIOQSIZE 0x5460 /* Used for packet mode */ #define TIOCPKT_DATA 0 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/ipc.h linux.21rc5-ac2/include/asm-i386/ipc.h --- linux.21rc5/include/asm-i386/ipc.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/ipc.h 2003-04-22 16:44:38.000000000 +0100 @@ -14,6 +14,7 @@ #define SEMOP 1 #define SEMGET 2 #define SEMCTL 3 +#define SEMTIMEDOP 4 #define MSGSND 11 #define MSGRCV 12 #define MSGGET 13 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/mmu_context.h linux.21rc5-ac2/include/asm-i386/mmu_context.h --- linux.21rc5/include/asm-i386/mmu_context.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/mmu_context.h 2003-05-29 01:44:07.000000000 +0100 @@ -7,10 +7,12 @@ #include /* - * possibly do the LDT unload here? + * hooks to add arch specific data into the mm struct. + * Note that destroy_context is called even if init_new_context + * fails. */ -#define destroy_context(mm) do { } while(0) -#define init_new_context(tsk,mm) 0 +int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +void destroy_context(struct mm_struct *mm); #ifdef CONFIG_SMP @@ -27,22 +29,21 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) { - if (prev != next) { + if (likely(prev != next)) { /* stop flush ipis for the previous mm */ clear_bit(cpu, &prev->cpu_vm_mask); - /* - * Re-load LDT if necessary - */ - if (prev->context.segments != next->context.segments) - load_LDT(next); #ifdef CONFIG_SMP cpu_tlbstate[cpu].state = TLBSTATE_OK; cpu_tlbstate[cpu].active_mm = next; #endif set_bit(cpu, &next->cpu_vm_mask); - set_bit(cpu, &next->context.cpuvalid); /* Re-load page tables */ load_cr3(next->pgd); + /* load_LDT, if either the previous or next thread + * has a non-default LDT. + */ + if (unlikely(next->context.size+prev->context.size)) + load_LDT(&next->context); } #ifdef CONFIG_SMP else { @@ -54,9 +55,8 @@ * tlb flush IPI delivery. We must reload %cr3. */ load_cr3(next->pgd); + load_LDT(&next->context); } - if (!test_and_set_bit(cpu, &next->context.cpuvalid)) - load_LDT(next); } #endif } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/mmu.h linux.21rc5-ac2/include/asm-i386/mmu.h --- linux.21rc5/include/asm-i386/mmu.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/mmu.h 2003-04-22 16:44:38.000000000 +0100 @@ -4,10 +4,13 @@ /* * The i386 doesn't have a mmu context, but * we put the segment information here. + * + * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { - void *segments; - unsigned long cpuvalid; + int size; + struct semaphore sem; + void * ldt; } mm_context_t; #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/mpspec.h linux.21rc5-ac2/include/asm-i386/mpspec.h --- linux.21rc5/include/asm-i386/mpspec.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/mpspec.h 2003-05-28 21:48:47.000000000 +0100 @@ -219,5 +219,19 @@ extern int pic_mode; extern int using_apic_timer; +#ifdef CONFIG_ACPI_BOOT +extern void mp_register_lapic (u8 id, u8 enabled); +extern void mp_register_lapic_address (u64 address); + +#ifdef CONFIG_X86_IO_APIC +extern void mp_register_ioapic (u8 id, u32 address, u32 irq_base); +extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 global_irq); +extern void mp_config_acpi_legacy_irqs (void); +extern void mp_config_ioapic_for_sci(int irq); +extern void mp_parse_prt (void); +#endif /*CONFIG_X86_IO_APIC*/ + +#endif /*CONFIG_ACPI_BOOT*/ + #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/pci.h linux.21rc5-ac2/include/asm-i386/pci.h --- linux.21rc5/include/asm-i386/pci.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/pci.h 2003-05-29 01:44:06.000000000 +0100 @@ -19,6 +19,11 @@ #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM (pci_mem_start) +void pcibios_config_init(void); +struct pci_bus * pcibios_scan_root(int bus); +extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value); +extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value); + void pcibios_set_master(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq); struct irq_routing_table *pcibios_get_irq_routing_table(void); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/pgalloc.h linux.21rc5-ac2/include/asm-i386/pgalloc.h --- linux.21rc5/include/asm-i386/pgalloc.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/pgalloc.h 2003-05-29 01:44:06.000000000 +0100 @@ -224,7 +224,7 @@ { struct mm_struct *active_mm; int state; -}; +} ____cacheline_aligned; extern struct tlb_state cpu_tlbstate[NR_CPUS]; #endif /* CONFIG_SMP */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/processor.h linux.21rc5-ac2/include/asm-i386/processor.h --- linux.21rc5/include/asm-i386/processor.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/processor.h 2003-05-28 21:48:47.000000000 +0100 @@ -437,9 +437,12 @@ */ extern int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); -/* Copy and release all segment info associated with a VM */ -extern void copy_segments(struct task_struct *p, struct mm_struct * mm); -extern void release_segments(struct mm_struct * mm); +/* Copy and release all segment info associated with a VM + * Unusable due to lack of error handling, use {init_new,destroy}_context + * instead. + */ +static inline void copy_segments(struct task_struct *p, struct mm_struct * mm) { } +static inline void release_segments(struct mm_struct * mm) { } /* * Return saved PC of a blocked thread. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/save_state.h linux.21rc5-ac2/include/asm-i386/save_state.h --- linux.21rc5/include/asm-i386/save_state.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/save_state.h 2003-05-29 01:44:07.000000000 +0100 @@ -0,0 +1,212 @@ +#ifndef __ASM_I386_SAVE_STATE_H +#define __ASM_I386_SAVE_STATE_H + +/* + * Copyright 2001-2002 Pavel Machek + * Based on code + * Copyright 2001 Patrick Mochel + */ +#include +#include + +/* image of the saved processor state */ +struct saved_context { + u32 eax, ebx, ecx, edx; + u32 esp, ebp, esi, edi; + u16 es, fs, gs, ss; + u32 cr0, cr2, cr3, cr4; + u16 gdt_pad; + u16 gdt_limit; + u32 gdt_base; + u16 idt_pad; + u16 idt_limit; + u32 idt_base; + u16 ldt; + u16 tss; + u32 tr; + u32 safety; + u32 return_address; + u32 eflags; +} __attribute__((packed)); + +static struct saved_context saved_context; + +#define loaddebug(thread,register) \ + __asm__("movl %0,%%db" #register \ + : /* no output */ \ + :"r" ((thread)->debugreg[register])) + + +/* + * save_processor_context + * + * Save the state of the processor before we go to sleep. + * + * return_stack is the value of the stack pointer (%esp) as the caller sees it. + * A good way could not be found to obtain it from here (don't want to make _too_ + * many assumptions about the layout of the stack this far down.) Also, the + * handy little __builtin_frame_pointer(level) where level > 0, is blatantly + * buggy - it returns the value of the stack at the proper location, not the + * location, like it should (as of gcc 2.91.66) + * + * Note that the context and timing of this function is pretty critical. + * With a minimal amount of things going on in the caller and in here, gcc + * does a good job of being just a dumb compiler. Watch the assembly output + * if anything changes, though, and make sure everything is going in the right + * place. + */ +static inline void save_processor_context (void) +{ + kernel_fpu_begin(); + + /* + * descriptor tables + */ + asm volatile ("sgdt (%0)" : "=m" (saved_context.gdt_limit)); + asm volatile ("sidt (%0)" : "=m" (saved_context.idt_limit)); + asm volatile ("sldt (%0)" : "=m" (saved_context.ldt)); + asm volatile ("str (%0)" : "=m" (saved_context.tr)); + + /* + * save the general registers. + * note that gcc has constructs to specify output of certain registers, + * but they're not used here, because it assumes that you want to modify + * those registers, so it tries to be smart and save them beforehand. + * It's really not necessary, and kinda fishy (check the assembly output), + * so it's avoided. + */ + asm volatile ("movl %%esp, (%0)" : "=m" (saved_context.esp)); + asm volatile ("movl %%eax, (%0)" : "=m" (saved_context.eax)); + asm volatile ("movl %%ebx, (%0)" : "=m" (saved_context.ebx)); + asm volatile ("movl %%ecx, (%0)" : "=m" (saved_context.ecx)); + asm volatile ("movl %%edx, (%0)" : "=m" (saved_context.edx)); + asm volatile ("movl %%ebp, (%0)" : "=m" (saved_context.ebp)); + asm volatile ("movl %%esi, (%0)" : "=m" (saved_context.esi)); + asm volatile ("movl %%edi, (%0)" : "=m" (saved_context.edi)); + + /* + * segment registers + */ + asm volatile ("movw %%es, %0" : "=r" (saved_context.es)); + asm volatile ("movw %%fs, %0" : "=r" (saved_context.fs)); + asm volatile ("movw %%gs, %0" : "=r" (saved_context.gs)); + asm volatile ("movw %%ss, %0" : "=r" (saved_context.ss)); + + /* + * control registers + */ + asm volatile ("movl %%cr0, %0" : "=r" (saved_context.cr0)); + asm volatile ("movl %%cr2, %0" : "=r" (saved_context.cr2)); + asm volatile ("movl %%cr3, %0" : "=r" (saved_context.cr3)); + asm volatile ("movl %%cr4, %0" : "=r" (saved_context.cr4)); + + /* + * eflags + */ + asm volatile ("pushfl ; popl (%0)" : "=m" (saved_context.eflags)); +} + +static void fix_processor_context(void) +{ + int nr = smp_processor_id(); + struct tss_struct * t = &init_tss[nr]; + + set_tss_desc(nr,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy tsc or some similar stupidity. */ + gdt_table[__TSS(nr)].b &= 0xfffffdff; + + load_TR(nr); /* This does ltr */ + __load_LDT(nr); /* This does lldt */ + + /* + * Now maybe reload the debug registers + */ + if (current->thread.debugreg[7]){ + loaddebug(¤t->thread, 0); + loaddebug(¤t->thread, 1); + loaddebug(¤t->thread, 2); + loaddebug(¤t->thread, 3); + /* no 4 and 5 */ + loaddebug(¤t->thread, 6); + loaddebug(¤t->thread, 7); + } + +} + +static void +do_fpu_end(void) +{ + /* restore FPU regs if necessary */ + /* Do it out of line so that gcc does not move cr0 load to some stupid place */ + kernel_fpu_end(); +} + +/* + * restore_processor_context + * + * Restore the processor context as it was before we went to sleep + * - descriptor tables + * - control registers + * - segment registers + * - flags + * + * Note that it is critical that this function is declared inline. + * It was separated out from restore_state to make that function + * a little clearer, but it needs to be inlined because we won't have a + * stack when we get here (so we can't push a return address). + */ +static inline void restore_processor_context (void) +{ + /* + * first restore %ds, so we can access our data properly + */ + asm volatile (".align 4"); + asm volatile ("movw %0, %%ds" :: "r" ((u16)__KERNEL_DS)); + + + /* + * control registers + */ + asm volatile ("movl %0, %%cr4" :: "r" (saved_context.cr4)); + asm volatile ("movl %0, %%cr3" :: "r" (saved_context.cr3)); + asm volatile ("movl %0, %%cr2" :: "r" (saved_context.cr2)); + asm volatile ("movl %0, %%cr0" :: "r" (saved_context.cr0)); + + /* + * segment registers + */ + asm volatile ("movw %0, %%es" :: "r" (saved_context.es)); + asm volatile ("movw %0, %%fs" :: "r" (saved_context.fs)); + asm volatile ("movw %0, %%gs" :: "r" (saved_context.gs)); + asm volatile ("movw %0, %%ss" :: "r" (saved_context.ss)); + + /* + * the other general registers + * + * note that even though gcc has constructs to specify memory + * input into certain registers, it will try to be too smart + * and save them at the beginning of the function. This is esp. + * bad since we don't have a stack set up when we enter, and we + * want to preserve the values on exit. So, we set them manually. + */ + asm volatile ("movl %0, %%esp" :: "m" (saved_context.esp)); + asm volatile ("movl %0, %%ebp" :: "m" (saved_context.ebp)); + asm volatile ("movl %0, %%eax" :: "m" (saved_context.eax)); + asm volatile ("movl %0, %%ebx" :: "m" (saved_context.ebx)); + asm volatile ("movl %0, %%ecx" :: "m" (saved_context.ecx)); + asm volatile ("movl %0, %%edx" :: "m" (saved_context.edx)); + asm volatile ("movl %0, %%esi" :: "m" (saved_context.esi)); + asm volatile ("movl %0, %%edi" :: "m" (saved_context.edi)); + + /* + * now restore the descriptor tables to their proper values + */ + asm volatile ("lgdt (%0)" :: "m" (saved_context.gdt_limit)); + asm volatile ("lidt (%0)" :: "m" (saved_context.idt_limit)); + asm volatile ("lldt (%0)" :: "m" (saved_context.ldt)); + + fix_processor_context(); + do_fpu_end(); +} + +#endif + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/smpboot.h linux.21rc5-ac2/include/asm-i386/smpboot.h --- linux.21rc5/include/asm-i386/smpboot.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/smpboot.h 2003-04-22 19:06:32.000000000 +0100 @@ -21,7 +21,19 @@ /* * Can't recognize Summit xAPICs at present, so use the OEM ID. */ - if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(prod, "VIGIL SMP", 9)){ + if (!strncmp(oem, "IBM ENSW", 8) && + (!strncmp(prod, "VIGIL SMP", 9) || + !strncmp(prod, "EXA", 3) || + !strncmp(prod, "RUTHLESS", 8))){ + clustered_apic_mode = CLUSTERED_APIC_XAPIC; + apic_broadcast_id = APIC_BROADCAST_ID_XAPIC; + int_dest_addr_mode = APIC_DEST_PHYSICAL; + int_delivery_mode = dest_Fixed; + esr_disable = 1; + /*Start cyclone clock*/ + cyclone_setup(0); + } + else if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(prod, "RUTHLESS SMP", 9)){ clustered_apic_mode = CLUSTERED_APIC_XAPIC; apic_broadcast_id = APIC_BROADCAST_ID_XAPIC; int_dest_addr_mode = APIC_DEST_PHYSICAL; @@ -116,6 +128,6 @@ return cpu_online_map; } #else -#define target_cpus() (cpu_online_map) +#define target_cpus() (0xFF) #endif #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/smp.h linux.21rc5-ac2/include/asm-i386/smp.h --- linux.21rc5/include/asm-i386/smp.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/smp.h 2003-05-29 01:44:06.000000000 +0100 @@ -40,6 +40,7 @@ extern void smp_flush_tlb(void); extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); extern void smp_send_reschedule(int cpu); +extern void smp_send_reschedule_all(void); extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); @@ -58,6 +59,8 @@ return cpu; } +#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) + /* * Some lowlevel functions might want to know about * the real APIC ID <-> CPU # mapping. @@ -81,7 +84,7 @@ * so this is correct in the x86 case. */ -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) static __inline int hard_smp_processor_id(void) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-i386/system.h linux.21rc5-ac2/include/asm-i386/system.h --- linux.21rc5/include/asm-i386/system.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-i386/system.h 2003-05-28 21:48:47.000000000 +0100 @@ -12,25 +12,22 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); -#define prepare_to_switch() do { } while(0) #define switch_to(prev,next,last) do { \ asm volatile("pushl %%esi\n\t" \ "pushl %%edi\n\t" \ "pushl %%ebp\n\t" \ "movl %%esp,%0\n\t" /* save ESP */ \ - "movl %3,%%esp\n\t" /* restore ESP */ \ + "movl %2,%%esp\n\t" /* restore ESP */ \ "movl $1f,%1\n\t" /* save EIP */ \ - "pushl %4\n\t" /* restore EIP */ \ + "pushl %3\n\t" /* restore EIP */ \ "jmp __switch_to\n" \ "1:\t" \ "popl %%ebp\n\t" \ "popl %%edi\n\t" \ "popl %%esi\n\t" \ - :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ - "=b" (last) \ + :"=m" (prev->thread.esp),"=m" (prev->thread.eip) \ :"m" (next->thread.esp),"m" (next->thread.eip), \ - "a" (prev), "d" (next), \ - "b" (prev)); \ + "a" (prev), "d" (next)); \ } while (0) #define _set_base(addr,base) do { unsigned long __pr; \ @@ -323,18 +320,19 @@ /* used in the idle loop; sti takes one instruction cycle to complete */ #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") -#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0); -#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0); +#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0); +#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0); /* For spinlocks etc */ #if 0 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") -#define local_irq_set(x) __asm__ __volatile__("pushfl ; popl %0 ; sti":"=g" (x): /* no input */ :"memory") -#else -#define local_irq_save(x) __save_and_cli(x) -#define local_irq_set(x) __save_and_sti(x) +#define local_irq_set(x) __asm__ __volatile__("pushfl ; popl %0 ; sti":"=g" (x): /* no input */ :"memory") +#else +#define local_irq_save(x) __save_and_cli(x) +#define local_irq_set(x) __save_and_sti(x) #endif + #define local_irq_restore(x) __restore_flags(x) #define local_irq_disable() __cli() #define local_irq_enable() __sti() @@ -349,8 +347,8 @@ #define sti() __global_sti() #define save_flags(x) ((x)=__global_save_flags()) #define restore_flags(x) __global_restore_flags(x) -#define save_and_cli(x) do { save_flags(x); cli(); } while(0); -#define save_and_sti(x) do { save_flags(x); sti(); } while(0); +#define save_and_cli(x) do { save_flags(x); cli(); } while(0) +#define save_and_sti(x) do { save_flags(x); sti(); } while(0) #else @@ -375,5 +373,6 @@ #define BROKEN_ACPI_Sx 0x0001 #define BROKEN_INIT_AFTER_S1 0x0002 +#define BROKEN_PNP_BIOS 0x0004 #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-ia64/ioctls.h linux.21rc5-ac2/include/asm-ia64/ioctls.h --- linux.21rc5/include/asm-ia64/ioctls.h 2003-05-28 15:07:41.000000000 +0100 +++ linux.21rc5-ac2/include/asm-ia64/ioctls.h 2003-04-22 16:44:38.000000000 +0100 @@ -72,6 +72,7 @@ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define FIOQSIZE 0x5460 /* Used for packet mode */ #define TIOCPKT_DATA 0 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-ia64/unistd.h linux.21rc5-ac2/include/asm-ia64/unistd.h --- linux.21rc5/include/asm-ia64/unistd.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-ia64/unistd.h 2003-04-22 16:44:38.000000000 +0100 @@ -223,6 +223,7 @@ #define __NR_security 1233 /* 1234-1235: reserved for {alloc,free}_hugepages */ /* 1238-1242: reserved for io_{setup,destroy,getevents,submit,cancel} */ +#define __NR_semtimedop 1247 #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-m68k/ioctls.h linux.21rc5-ac2/include/asm-m68k/ioctls.h --- linux.21rc5/include/asm-m68k/ioctls.h 2003-05-28 15:07:38.000000000 +0100 +++ linux.21rc5-ac2/include/asm-m68k/ioctls.h 2003-04-22 16:44:38.000000000 +0100 @@ -65,6 +65,7 @@ #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define FIOQSIZE 0x545E /* Used for packet mode */ #define TIOCPKT_DATA 0 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-mips/sibyte/swarm_ide.h linux.21rc5-ac2/include/asm-mips/sibyte/swarm_ide.h --- linux.21rc5/include/asm-mips/sibyte/swarm_ide.h 2003-05-28 15:07:38.000000000 +0100 +++ linux.21rc5-ac2/include/asm-mips/sibyte/swarm_ide.h 2003-04-22 16:44:38.000000000 +0100 @@ -25,8 +25,23 @@ #define SWARM_IDE_REG(pcaddr) (SWARM_IDE_BASE + ((pcaddr)<<5)) #define SWARM_IDE_INT (K_INT_GPIO_4) -extern ide_ideproc_t swarm_ideproc; +#ifdef __IDE_SWARM_C +static inline void swarm_outb (u8 val, unsigned long port) +{ + *(volatile u8 *)(mips_io_port_base + (port)) = val; +} + +static inline void swarm_outw (u16 val, unsigned long port) +{ + *(volatile u16 *)(mips_io_port_base + (port)) = val; +} + +static inline void swarm_outl (u32 val, unsigned long port) +{ + *(volatile u32 *)(mips_io_port_base + (port)) = val; +} +#else /* !__IDE_SWARM_C */ #define swarm_outb(val,port) \ do { \ *(volatile u8 *)(mips_io_port_base + (port)) = val; \ @@ -41,6 +56,7 @@ do { \ *(volatile u32 *)(mips_io_port_base + (port)) = val;\ } while(0) +#endif /* __IDE_SWARM_C */ static inline unsigned char swarm_inb(unsigned long port) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-mips64/sibyte/swarm_ide.h linux.21rc5-ac2/include/asm-mips64/sibyte/swarm_ide.h --- linux.21rc5/include/asm-mips64/sibyte/swarm_ide.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-mips64/sibyte/swarm_ide.h 2003-04-22 16:44:38.000000000 +0100 @@ -25,8 +25,22 @@ #define SWARM_IDE_REG(pcaddr) (SWARM_IDE_BASE + ((pcaddr)<<5)) #define SWARM_IDE_INT (K_INT_GPIO_4) -extern ide_ideproc_t swarm_ideproc; +#ifdef __IDE_SWARM_C +static inline void swarm_outb (u8 val, unsigned long port) +{ + *(volatile u8 *)(mips_io_port_base + (port)) = val; +} + +static inline void swarm_outw (u16 val, unsigned long port) +{ + *(volatile u16 *)(mips_io_port_base + (port)) = val; +} +static inline void swarm_outl (u32 val, unsigned long port) +{ + *(volatile u32 *)(mips_io_port_base + (port)) = val; +} +#else /* !__IDE_SWARM_C */ #define swarm_outb(val,port) \ do { \ *(volatile u8 *)(mips_io_port_base + (port)) = val; \ @@ -41,6 +55,7 @@ do { \ *(volatile u32 *)(mips_io_port_base + (port)) = val;\ } while(0) +#endif /* __IDE_SWARM_C */ static inline unsigned char swarm_inb(unsigned long port) { @@ -57,7 +72,6 @@ return (*(volatile u32 *)(mips_io_port_base + port)); } - static inline void swarm_outsb(unsigned long port, void *addr, unsigned int count) { while (count--) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-ppc/bitops.h linux.21rc5-ac2/include/asm-ppc/bitops.h --- linux.21rc5/include/asm-ppc/bitops.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-ppc/bitops.h 2003-04-22 16:44:38.000000000 +0100 @@ -7,6 +7,7 @@ #define _PPC_BITOPS_H #include +#include #include #include @@ -241,6 +242,13 @@ return __ilog2(x & -x); } +#ifdef __KERNEL__ + +static inline int __ffs(unsigned long x) +{ + return __ilog2(x & -x); +} + /* * ffs: find first bit set. This is defined the same way as * the libc and compiler builtin ffs routines, therefore @@ -252,6 +260,18 @@ } /* + * fls: find last (most-significant) bit set. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static __inline__ int fls(unsigned int x) +{ + int lz; + + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); + return 32 - lz; +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ @@ -260,6 +280,25 @@ #define hweight16(x) generic_hweight16(x) #define hweight8(x) generic_hweight8(x) +#endif /* __KERNEL__ */ + +/* + * Find the first bit set in a 140-bit bitmap. + * The first 100 bits are unlikely to be set. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +} + /* * This implementation of find_{first,next}_zero_bit was stolen from * Linus' asm-alpha/bitops.h. @@ -306,6 +345,8 @@ } +#ifdef __KERNEL__ + #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, addr) #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, addr) @@ -369,5 +410,7 @@ #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) +#endif /* __KERNEL__ */ + #endif /* _PPC_BITOPS_H */ #endif /* __KERNEL__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-ppc/delay.h linux.21rc5-ac2/include/asm-ppc/delay.h --- linux.21rc5/include/asm-ppc/delay.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-ppc/delay.h 2003-05-27 14:16:15.000000000 +0100 @@ -15,52 +15,44 @@ extern unsigned long loops_per_jiffy; -extern void __delay(unsigned int loops); +/* maximum permitted argument to udelay */ +#define __MAX_UDELAY 1000000 -/* - * Note that 19 * 226 == 4294 ==~ 2^32 / 10^6, so - * loops = (4294 * usecs * loops_per_jiffy * HZ) / 2^32. - * - * The mulhwu instruction gives us loops = (a * b) / 2^32. - * We choose a = usecs * 19 * HZ and b = loops_per_jiffy * 226 - * because this lets us support a wide range of HZ and - * loops_per_jiffy values without either a or b overflowing 2^32. - * Thus we need usecs * HZ <= (2^32 - 1) / 19 = 226050910 and - * loops_per_jiffy <= (2^32 - 1) / 226 = 19004280 - * (which corresponds to ~3800 bogomips at HZ = 100). - * -- paulus - */ -#define __MAX_UDELAY (226050910UL/HZ) /* maximum udelay argument */ -#define __MAX_NDELAY (4294967295UL/HZ) /* maximum ndelay argument */ +extern void __delay(unsigned int loops); -extern __inline__ void __udelay(unsigned int x) +/* N.B. the `secs' parameter here is a fixed-point number with + the binary point to the left of the most-significant bit. */ +extern __inline__ void __const_udelay(unsigned int secs) { unsigned int loops; __asm__("mulhwu %0,%1,%2" : "=r" (loops) : - "r" (x), "r" (loops_per_jiffy * 226)); - __delay(loops); + "r" (secs), "r" (loops_per_jiffy)); + __delay(loops * HZ); } -extern __inline__ void __ndelay(unsigned int x) +/* + * note that 4294 == 2^32 / 10^6, multiplying by 4294 converts from + * microseconds to a 32-bit fixed-point number of seconds. + */ +extern __inline__ void __udelay(unsigned int usecs) { - unsigned int loops; - - __asm__("mulhwu %0,%1,%2" : "=r" (loops) : - "r" (x), "r" (loops_per_jiffy * 5)); - __delay(loops); + __const_udelay(usecs * 4294); +} +extern __inline__ void __ndelay(unsigned int usecs) +{ + __const_udelay(usecs * 5); } extern void __bad_udelay(void); /* deliberately undefined */ -extern void __bad_ndelay(void); /* deliberately undefined */ +extern void __bad_ndelay(void); #define udelay(n) (__builtin_constant_p(n)? \ - ((n) > __MAX_UDELAY? __bad_udelay(): __udelay((n) * (19 * HZ))) : \ - __udelay((n) * (19 * HZ))) - -#define ndelay(n) (__builtin_constant_p(n)? \ - ((n) > __MAX_NDELAY? __bad_ndelay(): __ndelay((n) * HZ)) : \ - __ndelay((n) * HZ)) + ((n) > __MAX_UDELAY? __bad_udelay(): __const_udelay((n) * 4294u)) : \ + __udelay(n)) +#define ndelay(n) (__builtin_constant_p(n) ? \ + ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ + __ndelay(n)) #endif /* defined(_PPC_DELAY_H) */ #endif /* __KERNEL__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-ppc/io.h linux.21rc5-ac2/include/asm-ppc/io.h --- linux.21rc5/include/asm-ppc/io.h 2003-05-28 15:09:04.000000000 +0100 +++ linux.21rc5-ac2/include/asm-ppc/io.h 2003-04-22 17:07:49.000000000 +0100 @@ -75,6 +75,24 @@ #define insl(port, buf, nl) _insl_ns((u32 *)((port)+_IO_BASE), (buf), (nl)) #define outsl(port, buf, nl) _outsl_ns((u32 *)((port)+_IO_BASE), (buf), (nl)) +/* + * io_flush(unsigned int value) + * + * Similar to AIX function, it makes the CPU think the value + * has actually been _used_. Call it after an IO read to + * make sure the read was actually done (especially useful + * for reads used for flushing posted writes) + */ +static inline void __io_flush(unsigned int value) +{ + __asm__ __volatile__( + " twi 0,%0,0\n" + " isync\n" + " nop\n" /* Is that one necessary ? */ + : : "r" (value)); +} +#define io_flush(val) __io_flush((unsigned int)(val)) + #ifdef CONFIG_ALL_PPC /* * On powermacs, we will get a machine check exception if we diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/bitops.h linux.21rc5-ac2/include/asm-s390/bitops.h --- linux.21rc5/include/asm-s390/bitops.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/bitops.h 2003-04-25 13:47:51.000000000 +0100 @@ -47,6 +47,7 @@ extern const char _oi_bitmap[]; extern const char _ni_bitmap[]; extern const char _zb_findmap[]; +extern const char _sb_findmap[]; #ifdef CONFIG_SMP /* @@ -642,6 +643,45 @@ return (res < size) ? res : size; } +static inline int +find_first_bit(unsigned long * addr, unsigned size) +{ + unsigned long cmp, count; + int res; + + if (!size) + return 0; + __asm__(" slr %1,%1\n" + " lr %2,%3\n" + " slr %0,%0\n" + " ahi %2,31\n" + " srl %2,5\n" + "0: c %1,0(%0,%4)\n" + " jne 1f\n" + " ahi %0,4\n" + " brct %2,0b\n" + " lr %0,%3\n" + " j 4f\n" + "1: l %2,0(%0,%4)\n" + " sll %0,3\n" + " lhi %1,0xff\n" + " tml %2,0xffff\n" + " jnz 2f\n" + " ahi %0,16\n" + " srl %2,16\n" + "2: tml %2,0x00ff\n" + " jnz 3f\n" + " ahi %0,8\n" + " srl %2,8\n" + "3: nr %2,%1\n" + " ic %2,0(%2,%5)\n" + " alr %0,%2\n" + "4:" + : "=&a" (res), "=&d" (cmp), "=&a" (count) + : "a" (size), "a" (addr), "a" (&_sb_findmap) : "cc" ); + return (res < size) ? res : size; +} + static __inline__ int find_next_zero_bit (void * addr, int size, int offset) { unsigned long * p = ((unsigned long *) addr) + (offset >> 5); @@ -680,6 +720,45 @@ return (offset + res); } +static inline int +find_next_bit (unsigned long * addr, int size, int offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 5); + unsigned long bitvec, reg; + int set, bit = offset & 31, res; + + if (bit) { + /* + * Look for set bit in first word + */ + bitvec = (*p) >> bit; + __asm__(" slr %0,%0\n" + " lhi %2,0xff\n" + " tml %1,0xffff\n" + " jnz 0f\n" + " ahi %0,16\n" + " srl %1,16\n" + "0: tml %1,0x00ff\n" + " jnz 1f\n" + " ahi %0,8\n" + " srl %1,8\n" + "1: nr %1,%2\n" + " ic %1,0(%1,%3)\n" + " alr %0,%1" + : "=&d" (set), "+a" (bitvec), "=&d" (reg) + : "a" (&_sb_findmap) : "cc" ); + if (set < (32 - bit)) + return set + offset; + offset += 32 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr)); + return (offset + res); +} + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. @@ -708,6 +787,43 @@ } /* + * __ffs = find first bit in word. Undefined if no bit exists, + * so code should check against 0UL first.. + */ +static inline unsigned long __ffs (unsigned long word) +{ + unsigned long reg, result; + + __asm__(" slr %0,%0\n" + " lhi %2,0xff\n" + " tml %1,0xffff\n" + " jnz 0f\n" + " ahi %0,16\n" + " srl %1,16\n" + "0: tml %1,0x00ff\n" + " jnz 1f\n" + " ahi %0,8\n" + " srl %1,8\n" + "1: nr %1,%2\n" + " ic %1,0(%1,%3)\n" + " alr %0,%1" + : "=&d" (result), "+a" (word), "=&d" (reg) + : "a" (&_sb_findmap) : "cc" ); + return result; +} + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is cleared. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + return find_first_bit(b, 140); +} + +/* * ffs: find first bit set. This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). @@ -745,6 +861,39 @@ } /* + * fls: find last bit set. + */ +extern __inline__ int fls(int x) +{ + int r = 32; + + if (x == 0) + return 0; + __asm__(" tmh %1,0xffff\n" + " jz 0f\n" + " sll %1,16\n" + " ahi %0,-16\n" + "0: tmh %1,0xff00\n" + " jz 1f\n" + " sll %1,8\n" + " ahi %0,-8\n" + "1: tmh %1,0xf000\n" + " jz 2f\n" + " sll %1,4\n" + " ahi %0,-4\n" + "2: tmh %1,0xc000\n" + " jz 3f\n" + " sll %1,2\n" + " ahi %0,-2\n" + "3: tmh %1,0x8000\n" + " jz 4f\n" + " ahi %0,-1\n" + "4:" + : "+d" (r), "+d" (x) : : "cc" ); + return r; +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/ccwcache.h linux.21rc5-ac2/include/asm-s390/ccwcache.h --- linux.21rc5/include/asm-s390/ccwcache.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/ccwcache.h 2003-04-25 13:49:34.000000000 +0100 @@ -1,8 +1,11 @@ /* - * File...........: linux/include/asm-s390/ccwcache.h - * Author(s)......: Holger Smolinski - * Bugreports.to..: - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000 + * File...........: linux/include/asm-s390/ccwcache.h + * Author(s)......: Holger Smolinski + * Bugreports.to..: + * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000 + * + * $Revision: 1.8 $ + * */ #ifndef CCWCACHE_H #define CCWCACHE_H @@ -38,7 +41,7 @@ struct ccw_req_t *refers; /* Does this request refer to another one? */ void *function; /* refers to the originating ERP action */ ; - unsigned long long expires; /* expiratioj period */ + unsigned long long expires; /* expiration period */ /* these are for profiling purposes */ unsigned long long buildclk; /* TOD-clock of request generation */ unsigned long long startclk; /* TOD-clock of request start */ @@ -48,6 +51,8 @@ /* these are for internal use */ int cplength; /* length of the channel program in CCWs */ int datasize; /* amount of additional data in bytes */ + void *lowmem_idal; /* lowmem page for idals (if in use) */ + void *lowmem_idal_ptr; /* ptr to the actual idal word in the idals page */ kmem_cache_t *cache; /* the cache this data comes from */ } __attribute__ ((aligned(4))) ccw_req_t; @@ -55,16 +60,19 @@ /* * ccw_req_t -> status can be: */ -#define CQR_STATUS_EMPTY 0x00 /* request is empty */ -#define CQR_STATUS_FILLED 0x01 /* request is ready to be preocessed */ -#define CQR_STATUS_QUEUED 0x02 /* request is queued to be processed */ -#define CQR_STATUS_IN_IO 0x03 /* request is currently in IO */ -#define CQR_STATUS_DONE 0x04 /* request is completed successfully */ -#define CQR_STATUS_ERROR 0x05 /* request is completed with error */ -#define CQR_STATUS_FAILED 0x06 /* request is finally failed */ -#define CQR_STATUS_PENDING 0x07 /* request is waiting for interrupt - ERP only */ - -#define CQR_FLAGS_CHAINED 0x01 /* request is chained by another (last CCW is TIC) */ +#define CQR_STATUS_EMPTY 0x00 /* cqr is empty */ +#define CQR_STATUS_FILLED 0x01 /* cqr is ready to be preocessed */ +#define CQR_STATUS_QUEUED 0x02 /* cqr is queued to be processed */ +#define CQR_STATUS_IN_IO 0x03 /* cqr is currently in IO */ +#define CQR_STATUS_DONE 0x04 /* cqr is completed successfully */ +#define CQR_STATUS_ERROR 0x05 /* cqr is completed with error */ +#define CQR_STATUS_FAILED 0x06 /* cqr is finally failed */ +#define CQR_STATUS_PENDING 0x07 /* cqr is waiting for interrupt - ERP only */ + +#define CQR_FLAGS_CHAINED 0x01 /* cqr is chained by another (last CCW is TIC) */ +#define CQR_FLAGS_FINALIZED 0x02 +#define CQR_FLAGS_LM_CQR 0x04 /* cqr uses page from lowmem_pool */ +#define CQR_FLAGS_LM_IDAL 0x08 /* IDALs uses page from lowmem_pool */ #ifdef __KERNEL__ #define SMALLEST_SLAB (sizeof(struct ccw_req_t) <= 128 ? 128 :\ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/ctrlchar.h linux.21rc5-ac2/include/asm-s390/ctrlchar.h --- linux.21rc5/include/asm-s390/ctrlchar.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/ctrlchar.h 2003-05-28 15:40:29.000000000 +0100 @@ -0,0 +1,20 @@ +/* + * Implemented in drivers/s390/char/ctrlchar.c + * Unified handling of special chars. + * + * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Fritz Elfert + * + */ + +struct tty_struct; + +extern unsigned int ctrlchar_handle(const unsigned char *buf, int len, + struct tty_struct *tty, int is_console); +extern void ctrlchar_init(void); + +#define CTRLCHAR_CTRL (0 << 8) +#define CTRLCHAR_NONE (1 << 8) +#define CTRLCHAR_SYSRQ (2 << 8) + +#define CTRLCHAR_MASK (~0xffu) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/dasd.h linux.21rc5-ac2/include/asm-s390/dasd.h --- linux.21rc5/include/asm-s390/dasd.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/dasd.h 2003-04-25 13:49:34.000000000 +0100 @@ -8,8 +8,15 @@ * any future changes wrt the API will result in a change of the APIVERSION reported * to userspace by the DASDAPIVER-ioctl * + * $Revision: 1.51 $ + * * History of changes (starts July 2000) * 05/04/01 created by moving the kernel interface to drivers/s390/block/dasd_int.h + * 12/06/01 DASD_API_VERSION 2 - binary compatible to 0 (new BIODASDINFO2) + * 01/23/02 DASD_API_VERSION 3 - added BIODASDPSRD IOCTL + * 02/15/02 DASD_API_VERSION 4 - added BIODASDSATTR IOCTL + * 06/07/02 DASD_API_VERSION 5 - added 'boxed DASD' support + * */ #ifndef DASD_H @@ -18,11 +25,126 @@ #define DASD_IOCTL_LETTER 'D' -#if (DASD_API_VERSION == 0) +#define DASD_API_VERSION 5 + +/* + * struct dasd_information2_t + * represents any data about the device, which is visible to userspace. + * including foramt and featueres. + */ +typedef struct dasd_information2_t { + unsigned int devno; /* S/390 devno */ + unsigned int real_devno; /* for aliases */ + unsigned int schid; /* S/390 subchannel identifier */ + unsigned int cu_type : 16; /* from SenseID */ + unsigned int cu_model : 8; /* from SenseID */ + unsigned int dev_type : 16; /* from SenseID */ + unsigned int dev_model : 8; /* from SenseID */ + unsigned int open_count; + unsigned int req_queue_len; + unsigned int chanq_len; /* length of chanq */ + char type[4]; /* from discipline.name, 'none' for unknown */ + unsigned int status; /* current device level */ + unsigned int label_block; /* where to find the VOLSER */ + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ + unsigned int characteristics_size; + unsigned int confdata_size; + char characteristics[64]; /* from read_device_characteristics */ + char configuration_data[256]; /* from read_configuration_data */ + unsigned int format; /* format info like formatted/cdl/ldl/... */ + unsigned int features; /* dasd features like 'ro',... */ + unsigned int reserved0; /* reserved for further use ,... */ + unsigned int reserved1; /* reserved for further use ,... */ + unsigned int reserved2; /* reserved for further use ,... */ + unsigned int reserved3; /* reserved for further use ,... */ + unsigned int reserved4; /* reserved for further use ,... */ + unsigned int reserved5; /* reserved for further use ,... */ + unsigned int reserved6; /* reserved for further use ,... */ + unsigned int reserved7; /* reserved for further use ,... */ +} dasd_information2_t; + +/* + * values to be used for dasd_information_t.format + * 0x00: NOT formatted + * 0x01: Linux disc layout + * 0x02: Common disc layout + */ +#define DASD_FORMAT_NONE 0 +#define DASD_FORMAT_LDL 1 +#define DASD_FORMAT_CDL 2 +/* + * values to be used for dasd_information_t.features + * 0x00: default features + * 0x01: readonly (ro) + */ +#define DASD_FEATURE_DEFAULT 0 +#define DASD_FEATURE_READONLY 1 #define DASD_PARTN_BITS 2 /* + * struct dasd_information_t + * represents any data about the data, which is visible to userspace + */ +typedef struct dasd_information_t { + unsigned int devno; /* S/390 devno */ + unsigned int real_devno; /* for aliases */ + unsigned int schid; /* S/390 subchannel identifier */ + unsigned int cu_type : 16; /* from SenseID */ + unsigned int cu_model : 8; /* from SenseID */ + unsigned int dev_type : 16; /* from SenseID */ + unsigned int dev_model : 8; /* from SenseID */ + unsigned int open_count; + unsigned int req_queue_len; + unsigned int chanq_len; /* length of chanq */ + char type[4]; /* from discipline.name, 'none' for unknown */ + unsigned int status; /* current device level */ + unsigned int label_block; /* where to find the VOLSER */ + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ + unsigned int characteristics_size; + unsigned int confdata_size; + char characteristics[64]; /* from read_device_characteristics */ + char configuration_data[256]; /* from read_configuration_data */ +} dasd_information_t; + +/* + * Read Subsystem Data - Perfomance Statistics + */ +typedef struct dasd_rssd_perf_stats_t { + unsigned char invalid:1; + unsigned char format:3; + unsigned char data_format:4; + unsigned char unit_address; + unsigned short device_status; + unsigned int nr_read_normal; + unsigned int nr_read_normal_hits; + unsigned int nr_write_normal; + unsigned int nr_write_fast_normal_hits; + unsigned int nr_read_seq; + unsigned int nr_read_seq_hits; + unsigned int nr_write_seq; + unsigned int nr_write_fast_seq_hits; + unsigned int nr_read_cache; + unsigned int nr_read_cache_hits; + unsigned int nr_write_cache; + unsigned int nr_write_fast_cache_hits; + unsigned int nr_inhibit_cache; + unsigned int nr_bybass_cache; + unsigned int nr_seq_dasd_to_cache; + unsigned int nr_dasd_to_cache; + unsigned int nr_cache_to_dasd; + unsigned int nr_delayed_fast_write; + unsigned int nr_normal_fast_write; + unsigned int nr_seq_fast_write; + unsigned int nr_cache_miss; + unsigned char status2; + unsigned int nr_quick_write_promotes; + unsigned char reserved; + unsigned short ssid; + unsigned char reseved2[96]; +} __attribute__((packed)) dasd_rssd_perf_stats_t; + +/* * struct profile_info_t * holds the profinling information */ @@ -62,30 +184,36 @@ #define DASD_FMT_INT_INVAL 4 /* invalidate tracks */ #define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */ + /* - * struct dasd_information_t - * represents any data about the data, which is visible to userspace + * struct attrib_data_t + * represents the operation (cache) bits for the device. + * Used in DE to influence caching of the DASD. */ -typedef struct dasd_information_t { - unsigned int devno; /* S/390 devno */ - unsigned int real_devno; /* for aliases */ - unsigned int schid; /* S/390 subchannel identifier */ - unsigned int cu_type : 16; /* from SenseID */ - unsigned int cu_model : 8; /* from SenseID */ - unsigned int dev_type : 16; /* from SenseID */ - unsigned int dev_model : 8; /* from SenseID */ - unsigned int open_count; - unsigned int req_queue_len; - unsigned int chanq_len; - char type[4]; /* from discipline.name, 'none' for unknown */ - unsigned int status; /* current device level */ - unsigned int label_block; /* where to find the VOLSER */ - unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ - unsigned int characteristics_size; - unsigned int confdata_size; - char characteristics[64]; /* from read_device_characteristics */ - char configuration_data[256]; /* from read_configuration_data */ -} dasd_information_t; +typedef struct attrib_data_t { + unsigned char operation:3; /* cache operation mode */ + unsigned char reserved:5; /* cache operation mode */ + unsigned short nr_cyl; /* no of cyliners for read ahaed */ + unsigned char reserved2[29]; /* for future use */ +} __attribute__ ((packed)) attrib_data_t; + +/* definition of operation (cache) bits within attributes of DE */ +#define DASD_NORMAL_CACHE 0x0 +#define DASD_BYPASS_CACHE 0x1 +#define DASD_INHIBIT_LOAD 0x2 +#define DASD_SEQ_ACCESS 0x3 +#define DASD_SEQ_PRESTAGE 0x4 +#define DASD_REC_ACCESS 0x5 + + +/******************************************************************************** + * SECTION: Definition of IOCTLs + * + * Here ist how the ioctl-nr should be used: + * 0 - 31 DASD driver itself + * 32 - 239 still open + * 240 - 255 reserved for EMC + *******************************************************************************/ /* Disable the volume (for Linux) */ #define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0) @@ -97,15 +225,26 @@ #define BIODASDSLCK _IO(DASD_IOCTL_LETTER,4) /* steal lock */ /* reset profiling information of a device */ #define BIODASDPRRST _IO(DASD_IOCTL_LETTER,5) + + /* retrieve API version number */ #define DASDAPIVER _IOR(DASD_IOCTL_LETTER,0,int) /* Get information on a dasd device */ #define BIODASDINFO _IOR(DASD_IOCTL_LETTER,1,dasd_information_t) /* retrieve profiling information of a device */ #define BIODASDPRRD _IOR(DASD_IOCTL_LETTER,2,dasd_profile_info_t) +/* Get information on a dasd device (enhanced) */ +#define BIODASDINFO2 _IOR(DASD_IOCTL_LETTER,3,dasd_information2_t) +/* Performance Statistics Read */ +#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t) + + /* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */ #define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) -#endif /* DASD_API_VERSION */ +/* Set Attributes (cache operations) */ +#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) + + #endif /* DASD_H */ /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/idals.h linux.21rc5-ac2/include/asm-s390/idals.h --- linux.21rc5/include/asm-s390/idals.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/idals.h 2003-04-25 13:47:51.000000000 +0100 @@ -217,7 +217,7 @@ * Copy count bytes from an idal buffer to user memory */ static inline size_t -idal_buffer_to_user(struct idal_buffer *ib, void *to, size_t count) +idal_buffer_to_user(const struct idal_buffer *ib, void *to, size_t count) { size_t left; int i; @@ -238,7 +238,7 @@ * Copy count bytes from user memory to an idal buffer */ static inline size_t -idal_buffer_from_user(struct idal_buffer *ib, void *from, size_t count) +idal_buffer_from_user(struct idal_buffer *ib, const void *from, size_t count) { size_t left; int i; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/irq.h linux.21rc5-ac2/include/asm-s390/irq.h --- linux.21rc5/include/asm-s390/irq.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/irq.h 2003-04-25 13:48:57.000000000 +0100 @@ -497,6 +497,8 @@ #define DEVSTAT_SUSPENDED 0x00000400 #define DEVSTAT_UNKNOWN_DEV 0x00000800 #define DEVSTAT_UNFRIENDLY_DEV 0x00001000 +#define DEVSTAT_NOT_ACC 0x00002000 +#define DEVSTAT_NOT_ACC_ERR 0x00004000 #define DEVSTAT_FINAL_STATUS 0x80000000 #define DEVINFO_NOT_OPER DEVSTAT_NOT_OPER @@ -581,6 +583,7 @@ #define DOIO_TIMEOUT 0x0080 /* 3 secs. timeout for sync. I/O */ #define DOIO_DONT_CALL_INTHDLR 0x0100 /* don't call interrupt handler */ #define DOIO_CANCEL_ON_TIMEOUT 0x0200 /* cancel I/O if it timed out */ +#define DOIO_USE_DIAG98 0x0400 /* use DIAG98 instead of SSCH */ /* * do_IO() @@ -785,6 +788,25 @@ return ccode; } +extern __inline__ int diag98(int irq, volatile orb_t *addr) +{ + int ccode; + + __asm__ __volatile__( + " lr 1,%1\n" + " lr 0,%2\n" /* orb in 0 */ + " lhi 2,12\n" /* function code 0x0c */ + " diag 2,0,152\n" /* diag98 instead of ssch, + result in gpr 1 */ + " ipm %0\n" /* usual cc evaluation. cc=3 will be + reported as not operational */ + " srl %0,28" + : "=d" (ccode) + : "d" (irq | 0x10000L), "a" (addr) + : "cc", "0", "1", "2"); + return ccode; +} + extern __inline__ int rsch(int irq) { int ccode; @@ -888,25 +910,8 @@ void VM_virtual_device_info( __u16 devno, /* device number */ senseid_t *ps ); /* ptr to senseID data */ -extern __inline__ int diag210( diag210_t * addr) -{ - int ccode; +extern int diag210( diag210_t * addr); - __asm__ __volatile__( -#ifdef CONFIG_ARCH_S390X - " sam31\n" - " diag %1,0,0x210\n" - " sam64\n" -#else - " diag %1,0,0x210\n" -#endif - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "a" (addr) - : "cc" ); - return ccode; -} extern __inline__ int chsc( chsc_area_t * chsc_area) { int cc; @@ -999,6 +1004,8 @@ #include +#define get_irq_lock(irq) &ioinfo[irq]->irq_lock + #define s390irq_spin_lock(irq) \ spin_lock(&(ioinfo[irq]->irq_lock)) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/page.h linux.21rc5-ac2/include/asm-s390/page.h --- linux.21rc5/include/asm-s390/page.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/page.h 2003-05-28 15:13:38.000000000 +0100 @@ -9,7 +9,6 @@ #ifndef _S390_PAGE_H #define _S390_PAGE_H -#include #include #include @@ -18,9 +17,6 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -#define STORAGE_ACC_KEY 6 -#define STORAGE_ACC_KEY_MASK (STORAGE_ACC_KEY<<4) - #ifdef __KERNEL__ #ifndef __ASSEMBLY__ @@ -124,62 +120,13 @@ #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT)) #define VALID_PAGE(page) ((page - mem_map) < max_mapnr) -/* the pfix table of all mem addresses */ -extern void* *pfix_table; -#define pfix_get_page_addr(ptr) (pfix_table[(unsigned long)ptr>>PAGE_SHIFT]) -#define pfix_get_addr(ptr) (pfix_get_page_addr(ptr)+ \ - ((unsigned long)ptr&PAGE_MASK)) - -/* - * These functions allow to lock and unlock pages in VM. They need - * the absolute address of the page to be locked or unlocked. This will - * only work if the diag98 option in the user directory is enabled for - * the guest. - */ - -/* if result is 0, addr will become the host absolute address */ -extern __inline__ int pfix_lock_page(void *addr) -{ - int ret = -ENOMEM; - - if (!MACHINE_HAS_PFIX) - return -ENOSYS; - - __asm__ __volatile__( - " l 0,%1\n" /* parameter in gpr 0 */ - " lhi 2,0\n" /* function code is 0 */ - " diag 2,0,0x98\n" /* result in gpr 1 */ - " jnz 0f\n" - " lhi %0,0\n" - "0: l %1,1\n" /* gpr1 contains host absol. addr */ - : "+d" (ret), "+d" ((unsigned long)addr) : - : "0", "1", "2", "cc" ); - return ret; -} - -extern __inline__ int pfix_unlock_page(unsigned long addr) -{ - int ret = -EINVAL; - - if (!MACHINE_HAS_PFIX) - return -ENOSYS; - - __asm__ __volatile__( - " lr 0,%1\n" /* parameter in gpr 0 */ - " lhi 2,4\n" /* function code is 4 */ - " diag 2,0,0x98\n" /* result in gpr 1 */ - " jnz 0f\n" - " lhi %0,0\n" - "0:\n" - : "+d" (ret) - : "a" (addr) - : "0", "1", "2", "cc" ); - return ret; -} - #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +unsigned int get_storage_key(void); +unsigned long pfix_get_page_addr(void *); +unsigned long pfix_get_addr(void *); + #endif /* __KERNEL__ */ #endif /* _S390_PAGE_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/pgtable.h linux.21rc5-ac2/include/asm-s390/pgtable.h --- linux.21rc5/include/asm-s390/pgtable.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/pgtable.h 2003-05-28 15:13:38.000000000 +0100 @@ -236,22 +236,12 @@ pte_val(pteval) &= ~_PAGE_MKCLEAN; asm volatile ("sske %0,%1" - : : "d" (STORAGE_ACC_KEY_MASK), - "a" (pte_val(pteval))); + : : "d" (get_storage_key()), "a" (pte_val(pteval))); } *pteptr = pteval; } -extern inline void set_access_key(pte_t pteval) -{ - asm volatile (" iske 0,%1\n" - " or 0,%0\n" - " sske 0,%1\n" - : : "d" (STORAGE_ACC_KEY_MASK), - "a" (pte_val(pteval)) : "0", "cc"); -} - #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* @@ -404,7 +394,7 @@ /* We can't clear the changed bit atomically. For now we * clear (!) the page referenced bit. */ asm volatile ("sske %0,%1" - : : "d" (STORAGE_ACC_KEY_MASK), "a" (*ptep)); + : : "d" (get_storage_key()), "a" (*ptep)); return 1; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/s390io.h linux.21rc5-ac2/include/asm-s390/s390io.h --- linux.21rc5/include/asm-s390/s390io.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/s390io.h 2003-04-25 13:48:57.000000000 +0100 @@ -8,6 +8,7 @@ #ifndef __s390io_h #define __s390io_h +#include /* * IRQ data structure used by I/O subroutines @@ -57,7 +58,9 @@ unsigned int dval : 1; /* device number valid */ unsigned int unknown : 1; /* unknown device - if SenseID failed */ unsigned int unfriendly: 1; /* device is locked by someone else */ - unsigned int unused : (sizeof(unsigned int)*8 - 25); /* unused */ + unsigned int killio : 1; /* currently killing pending io */ + unsigned int noio : 1; /* don't let drivers start io */ + unsigned int unused : (sizeof(unsigned int)*8 - 27); /* unused */ } __attribute__ ((packed)) flags; } ui; @@ -80,8 +83,9 @@ unsigned long qflag; /* queued flags */ __u8 qlpm; /* queued logical path mask */ ssd_info_t ssd_info; /* subchannel description */ - - } __attribute__ ((aligned(8))) ioinfo_t; + struct tq_struct pver_bh; /* path verification bottom half task */ + atomic_t pver_pending; /* != 0 if path verification for sch is pending */ +} __attribute__ ((aligned(8))) ioinfo_t; #define IOINFO_FLAGS_BUSY 0x80000000 #define IOINFO_FLAGS_OPER 0x40000000 @@ -101,5 +105,9 @@ #define CHSC_SEI_ACC_LINKADDR 2 #define CHSC_SEI_ACC_FULLLINKADDR 3 +#define CIO_PATHGONE_WAIT4INT 0x01 +#define CIO_PATHGONE_IOERR 0x02 +#define CIO_PATHGONE_DEVGONE 0x04 + #endif /* __s390io_h */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/setup.h linux.21rc5-ac2/include/asm-s390/setup.h --- linux.21rc5/include/asm-s390/setup.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/setup.h 2003-04-25 13:53:31.000000000 +0100 @@ -25,14 +25,16 @@ */ extern unsigned long machine_flags; -#define MACHINE_IS_VM (machine_flags & 1) -#define MACHINE_HAS_IEEE (machine_flags & 2) -#define MACHINE_IS_P390 (machine_flags & 4) -#define MACHINE_HAS_CSP (machine_flags & 8) -#define MACHINE_HAS_MVPG (machine_flags & 16) -#define MACHINE_HAS_PFIX (machine_flags & 128) +#define MACHINE_IS_VM (machine_flags & 1) +#define MACHINE_HAS_IEEE (machine_flags & 2) +#define MACHINE_IS_P390 (machine_flags & 4) +#define MACHINE_HAS_CSP (machine_flags & 8) +#define MACHINE_HAS_MVPG (machine_flags & 16) +/* 32 is MACHINE_HAS_DIAG44 on s390x */ +#define MACHINE_NEW_STIDP (machine_flags & 64) +#define MACHINE_HAS_PFIX (machine_flags & 128) -#define MACHINE_HAS_HWC (!MACHINE_IS_P390) +#define MACHINE_HAS_SCLP (!MACHINE_IS_P390) /* * Console mode. Override with conmode= @@ -41,10 +43,10 @@ extern unsigned int console_device; #define CONSOLE_IS_UNDEFINED (console_mode == 0) -#define CONSOLE_IS_HWC (console_mode == 1) +#define CONSOLE_IS_SCLP (console_mode == 1) #define CONSOLE_IS_3215 (console_mode == 2) #define CONSOLE_IS_3270 (console_mode == 3) -#define SET_CONSOLE_HWC do { console_mode = 1; } while (0) +#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0) #define SET_CONSOLE_3215 do { console_mode = 2; } while (0) #define SET_CONSOLE_3270 do { console_mode = 3; } while (0) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/smp.h linux.21rc5-ac2/include/asm-s390/smp.h --- linux.21rc5/include/asm-s390/smp.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/smp.h 2003-04-25 13:47:51.000000000 +0100 @@ -42,7 +42,7 @@ #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) extern __inline__ int cpu_logical_map(int cpu) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/tape390.h linux.21rc5-ac2/include/asm-s390/tape390.h --- linux.21rc5/include/asm-s390/tape390.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/tape390.h 2003-04-25 13:53:58.000000000 +0100 @@ -0,0 +1,39 @@ +/************************************************************************* + * + * tape390.h + * enables user programs to display messages on the tape device + * + * S390 and zSeries version + * Copyright (C) 2001 IBM Corporation + * Author(s): Despina Papadopoulou + * + *************************************************************************/ + +#ifndef _TAPE390_H +#define _TAPE390_H + +#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct) + +/* + * The TAPE390_DISPLAY ioctl calls the Load Display command + * which transfers 17 bytes of data from the channel to the subsystem: + * - 1 format control byte, and + * - two 8-byte messages + * + * Format control byte: + * 0-2: New Message Overlay + * 3: Alternate Messages + * 4: Blink Message + * 5: Display Low/High Message + * 6: Reserved + * 7: Automatic Load Request + * + */ + +typedef struct display_struct { + char cntrl; + char message1[8]; + char message2[8]; +} display_struct; + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390/vtoc.h linux.21rc5-ac2/include/asm-s390/vtoc.h --- linux.21rc5/include/asm-s390/vtoc.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390/vtoc.h 2003-04-25 13:49:34.000000000 +0100 @@ -18,7 +18,6 @@ #include #endif -#define DASD_API_VERSION 0 #define LINE_LENGTH 80 #define VTOC_START_CC 0x0 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/bitops.h linux.21rc5-ac2/include/asm-s390x/bitops.h --- linux.21rc5/include/asm-s390x/bitops.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/bitops.h 2003-04-25 13:47:51.000000000 +0100 @@ -51,6 +51,7 @@ extern const char _oi_bitmap[]; extern const char _ni_bitmap[]; extern const char _zb_findmap[]; +extern const char _sb_findmap[]; #ifdef CONFIG_SMP /* @@ -653,6 +654,48 @@ return (res < size) ? res : size; } +static inline unsigned long +find_first_bit(unsigned long * addr, unsigned long size) +{ + unsigned long res, cmp, count; + + if (!size) + return 0; + __asm__(" slgr %1,%1\n" + " lgr %2,%3\n" + " slgr %0,%0\n" + " aghi %2,63\n" + " srlg %2,%2,6\n" + "0: cg %1,0(%0,%4)\n" + " jne 1f\n" + " aghi %0,8\n" + " brct %2,0b\n" + " lgr %0,%3\n" + " j 5f\n" + "1: lg %2,0(%0,%4)\n" + " sllg %0,%0,3\n" + " clr %2,%1\n" + " jne 2f\n" + " aghi %0,32\n" + " srlg %2,%2,32\n" + "2: lghi %1,0xff\n" + " tmll %2,0xffff\n" + " jnz 3f\n" + " aghi %0,16\n" + " srl %2,16\n" + "3: tmll %2,0x00ff\n" + " jnz 4f\n" + " aghi %0,8\n" + " srl %2,8\n" + "4: ngr %2,%1\n" + " ic %2,0(%2,%5)\n" + " algr %0,%2\n" + "5:" + : "=&a" (res), "=&d" (cmp), "=&a" (count) + : "a" (size), "a" (addr), "a" (&_sb_findmap) : "cc" ); + return (res < size) ? res : size; +} + static __inline__ unsigned long find_next_zero_bit (void * addr, unsigned long size, unsigned long offset) { @@ -697,6 +740,49 @@ return (offset + res); } +static inline unsigned long +find_next_bit (unsigned long * addr, unsigned long size, unsigned long offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 6); + unsigned long bitvec, reg; + unsigned long set, bit = offset & 63, res; + + if (bit) { + /* + * Look for zero in first word + */ + bitvec = (*p) >> bit; + __asm__(" slgr %0,%0\n" + " ltr %1,%1\n" + " jnz 0f\n" + " aghi %0,32\n" + " srlg %1,%1,32\n" + "0: lghi %2,0xff\n" + " tmll %1,0xffff\n" + " jnz 1f\n" + " aghi %0,16\n" + " srlg %1,%1,16\n" + "1: tmll %1,0x00ff\n" + " jnz 2f\n" + " aghi %0,8\n" + " srlg %1,%1,8\n" + "2: ngr %1,%2\n" + " ic %1,0(%1,%3)\n" + " algr %0,%1" + : "=&d" (set), "+a" (bitvec), "=&d" (reg) + : "a" (&_sb_findmap) : "cc" ); + if (set < (64 - bit)) + return set + offset; + offset += 64 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 64 * (p - (unsigned long *) addr)); + return (offset + res); +} + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. @@ -730,6 +816,47 @@ } /* + * __ffs = find first bit in word. Undefined if no bit exists, + * so code should check against 0UL first.. + */ +static inline unsigned long __ffs (unsigned long word) +{ + unsigned long reg, result; + + __asm__(" slgr %0,%0\n" + " ltr %1,%1\n" + " jnz 0f\n" + " aghi %0,32\n" + " srlg %1,%1,32\n" + "0: lghi %2,0xff\n" + " tmll %1,0xffff\n" + " jnz 1f\n" + " aghi %0,16\n" + " srlg %1,%1,16\n" + "1: tmll %1,0x00ff\n" + " jnz 2f\n" + " aghi %0,8\n" + " srlg %1,%1,8\n" + "2: ngr %1,%2\n" + " ic %1,0(%1,%3)\n" + " algr %0,%1" + : "=&d" (result), "+a" (word), "=&d" (reg) + : "a" (&_sb_findmap) : "cc" ); + return result; +} + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is cleared. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + return find_first_bit(b, 140); +} + +/* * ffs: find first bit set. This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). @@ -767,6 +894,39 @@ } /* + * fls: find last bit set. + */ +extern __inline__ int fls(int x) +{ + int r = 32; + + if (x == 0) + return 0; + __asm__(" tmh %1,0xffff\n" + " jz 0f\n" + " sll %1,16\n" + " ahi %0,-16\n" + "0: tmh %1,0xff00\n" + " jz 1f\n" + " sll %1,8\n" + " ahi %0,-8\n" + "1: tmh %1,0xf000\n" + " jz 2f\n" + " sll %1,4\n" + " ahi %0,-4\n" + "2: tmh %1,0xc000\n" + " jz 3f\n" + " sll %1,2\n" + " ahi %0,-2\n" + "3: tmh %1,0x8000\n" + " jz 4f\n" + " ahi %0,-1\n" + "4:" + : "+d" (r), "+d" (x) : : "cc" ); + return r; +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/ccwcache.h linux.21rc5-ac2/include/asm-s390x/ccwcache.h --- linux.21rc5/include/asm-s390x/ccwcache.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/ccwcache.h 2003-04-25 13:49:34.000000000 +0100 @@ -1,8 +1,11 @@ /* - * File...........: linux/include/asm-s390/ccwcache.h - * Author(s)......: Holger Smolinski - * Bugreports.to..: - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000 + * File...........: linux/include/asm-s390/ccwcache.h + * Author(s)......: Holger Smolinski + * Bugreports.to..: + * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000 + * + * $Revision: 1.10 $ + * */ #ifndef CCWCACHE_H #define CCWCACHE_H @@ -38,7 +41,7 @@ struct ccw_req_t *refers; /* Does this request refer to another one? */ void *function; /* refers to the originating ERP action */ ; - unsigned long long expires; /* expiratioj period */ + unsigned long long expires; /* expiration period */ /* these are for profiling purposes */ unsigned long long buildclk; /* TOD-clock of request generation */ unsigned long long startclk; /* TOD-clock of request start */ @@ -48,6 +51,8 @@ /* these are for internal use */ int cplength; /* length of the channel program in CCWs */ int datasize; /* amount of additional data in bytes */ + void *lowmem_idal; /* lowmem page for idals (if in use) */ + void *lowmem_idal_ptr; /* ptr to the actual idal word in the idals page */ kmem_cache_t *cache; /* the cache this data comes from */ } __attribute__ ((aligned(4))) ccw_req_t; @@ -55,16 +60,19 @@ /* * ccw_req_t -> status can be: */ -#define CQR_STATUS_EMPTY 0x00 /* request is empty */ -#define CQR_STATUS_FILLED 0x01 /* request is ready to be preocessed */ -#define CQR_STATUS_QUEUED 0x02 /* request is queued to be processed */ -#define CQR_STATUS_IN_IO 0x03 /* request is currently in IO */ -#define CQR_STATUS_DONE 0x04 /* request is completed successfully */ -#define CQR_STATUS_ERROR 0x05 /* request is completed with error */ -#define CQR_STATUS_FAILED 0x06 /* request is finally failed */ -#define CQR_STATUS_PENDING 0x07 /* request is waiting for interrupt - ERP only */ - -#define CQR_FLAGS_CHAINED 0x01 /* request is chained by another (last CCW is TIC) */ +#define CQR_STATUS_EMPTY 0x00 /* cqr is empty */ +#define CQR_STATUS_FILLED 0x01 /* cqr is ready to be preocessed */ +#define CQR_STATUS_QUEUED 0x02 /* cqr is queued to be processed */ +#define CQR_STATUS_IN_IO 0x03 /* cqr is currently in IO */ +#define CQR_STATUS_DONE 0x04 /* cqr is completed successfully */ +#define CQR_STATUS_ERROR 0x05 /* cqr is completed with error */ +#define CQR_STATUS_FAILED 0x06 /* cqr is finally failed */ +#define CQR_STATUS_PENDING 0x07 /* cqr is waiting for interrupt - ERP only */ + +#define CQR_FLAGS_CHAINED 0x01 /* cqr is chained by another (last CCW is TIC) */ +#define CQR_FLAGS_FINALIZED 0x02 +#define CQR_FLAGS_LM_CQR 0x04 /* cqr uses page from lowmem_pool */ +#define CQR_FLAGS_LM_IDAL 0x08 /* IDALs uses page from lowmem_pool */ #ifdef __KERNEL__ #define SMALLEST_SLAB (sizeof(struct ccw_req_t) <= 128 ? 128 :\ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/ctrlchar.h linux.21rc5-ac2/include/asm-s390x/ctrlchar.h --- linux.21rc5/include/asm-s390x/ctrlchar.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/ctrlchar.h 2003-05-28 15:40:29.000000000 +0100 @@ -0,0 +1,20 @@ +/* + * Implemented in drivers/s390/char/ctrlchar.c + * Unified handling of special chars. + * + * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Fritz Elfert + * + */ + +struct tty_struct; + +extern unsigned int ctrlchar_handle(const unsigned char *buf, int len, + struct tty_struct *tty, int is_console); +extern void ctrlchar_init(void); + +#define CTRLCHAR_CTRL (0 << 8) +#define CTRLCHAR_NONE (1 << 8) +#define CTRLCHAR_SYSRQ (2 << 8) + +#define CTRLCHAR_MASK (~0xffu) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/dasd.h linux.21rc5-ac2/include/asm-s390x/dasd.h --- linux.21rc5/include/asm-s390x/dasd.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/dasd.h 2003-04-25 13:49:34.000000000 +0100 @@ -8,8 +8,15 @@ * any future changes wrt the API will result in a change of the APIVERSION reported * to userspace by the DASDAPIVER-ioctl * + * $Revision: 1.51 $ + * * History of changes (starts July 2000) * 05/04/01 created by moving the kernel interface to drivers/s390/block/dasd_int.h + * 12/06/01 DASD_API_VERSION 2 - binary compatible to 0 (new BIODASDINFO2) + * 01/23/02 DASD_API_VERSION 3 - added BIODASDPSRD IOCTL + * 02/15/02 DASD_API_VERSION 4 - added BIODASDSATTR IOCTL + * 06/07/02 DASD_API_VERSION 5 - added 'boxed DASD' support + * */ #ifndef DASD_H @@ -18,11 +25,126 @@ #define DASD_IOCTL_LETTER 'D' -#if (DASD_API_VERSION == 0) +#define DASD_API_VERSION 5 + +/* + * struct dasd_information2_t + * represents any data about the device, which is visible to userspace. + * including foramt and featueres. + */ +typedef struct dasd_information2_t { + unsigned int devno; /* S/390 devno */ + unsigned int real_devno; /* for aliases */ + unsigned int schid; /* S/390 subchannel identifier */ + unsigned int cu_type : 16; /* from SenseID */ + unsigned int cu_model : 8; /* from SenseID */ + unsigned int dev_type : 16; /* from SenseID */ + unsigned int dev_model : 8; /* from SenseID */ + unsigned int open_count; + unsigned int req_queue_len; + unsigned int chanq_len; /* length of chanq */ + char type[4]; /* from discipline.name, 'none' for unknown */ + unsigned int status; /* current device level */ + unsigned int label_block; /* where to find the VOLSER */ + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ + unsigned int characteristics_size; + unsigned int confdata_size; + char characteristics[64]; /* from read_device_characteristics */ + char configuration_data[256]; /* from read_configuration_data */ + unsigned int format; /* format info like formatted/cdl/ldl/... */ + unsigned int features; /* dasd features like 'ro',... */ + unsigned int reserved0; /* reserved for further use ,... */ + unsigned int reserved1; /* reserved for further use ,... */ + unsigned int reserved2; /* reserved for further use ,... */ + unsigned int reserved3; /* reserved for further use ,... */ + unsigned int reserved4; /* reserved for further use ,... */ + unsigned int reserved5; /* reserved for further use ,... */ + unsigned int reserved6; /* reserved for further use ,... */ + unsigned int reserved7; /* reserved for further use ,... */ +} dasd_information2_t; + +/* + * values to be used for dasd_information_t.format + * 0x00: NOT formatted + * 0x01: Linux disc layout + * 0x02: Common disc layout + */ +#define DASD_FORMAT_NONE 0 +#define DASD_FORMAT_LDL 1 +#define DASD_FORMAT_CDL 2 +/* + * values to be used for dasd_information_t.features + * 0x00: default features + * 0x01: readonly (ro) + */ +#define DASD_FEATURE_DEFAULT 0 +#define DASD_FEATURE_READONLY 1 #define DASD_PARTN_BITS 2 /* + * struct dasd_information_t + * represents any data about the data, which is visible to userspace + */ +typedef struct dasd_information_t { + unsigned int devno; /* S/390 devno */ + unsigned int real_devno; /* for aliases */ + unsigned int schid; /* S/390 subchannel identifier */ + unsigned int cu_type : 16; /* from SenseID */ + unsigned int cu_model : 8; /* from SenseID */ + unsigned int dev_type : 16; /* from SenseID */ + unsigned int dev_model : 8; /* from SenseID */ + unsigned int open_count; + unsigned int req_queue_len; + unsigned int chanq_len; /* length of chanq */ + char type[4]; /* from discipline.name, 'none' for unknown */ + unsigned int status; /* current device level */ + unsigned int label_block; /* where to find the VOLSER */ + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ + unsigned int characteristics_size; + unsigned int confdata_size; + char characteristics[64]; /* from read_device_characteristics */ + char configuration_data[256]; /* from read_configuration_data */ +} dasd_information_t; + +/* + * Read Subsystem Data - Perfomance Statistics + */ +typedef struct dasd_rssd_perf_stats_t { + unsigned char invalid:1; + unsigned char format:3; + unsigned char data_format:4; + unsigned char unit_address; + unsigned short device_status; + unsigned int nr_read_normal; + unsigned int nr_read_normal_hits; + unsigned int nr_write_normal; + unsigned int nr_write_fast_normal_hits; + unsigned int nr_read_seq; + unsigned int nr_read_seq_hits; + unsigned int nr_write_seq; + unsigned int nr_write_fast_seq_hits; + unsigned int nr_read_cache; + unsigned int nr_read_cache_hits; + unsigned int nr_write_cache; + unsigned int nr_write_fast_cache_hits; + unsigned int nr_inhibit_cache; + unsigned int nr_bybass_cache; + unsigned int nr_seq_dasd_to_cache; + unsigned int nr_dasd_to_cache; + unsigned int nr_cache_to_dasd; + unsigned int nr_delayed_fast_write; + unsigned int nr_normal_fast_write; + unsigned int nr_seq_fast_write; + unsigned int nr_cache_miss; + unsigned char status2; + unsigned int nr_quick_write_promotes; + unsigned char reserved; + unsigned short ssid; + unsigned char reseved2[96]; +} __attribute__((packed)) dasd_rssd_perf_stats_t; + +/* * struct profile_info_t * holds the profinling information */ @@ -62,30 +184,36 @@ #define DASD_FMT_INT_INVAL 4 /* invalidate tracks */ #define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */ + /* - * struct dasd_information_t - * represents any data about the data, which is visible to userspace + * struct attrib_data_t + * represents the operation (cache) bits for the device. + * Used in DE to influence caching of the DASD. */ -typedef struct dasd_information_t { - unsigned int devno; /* S/390 devno */ - unsigned int real_devno; /* for aliases */ - unsigned int schid; /* S/390 subchannel identifier */ - unsigned int cu_type : 16; /* from SenseID */ - unsigned int cu_model : 8; /* from SenseID */ - unsigned int dev_type : 16; /* from SenseID */ - unsigned int dev_model : 8; /* from SenseID */ - unsigned int open_count; - unsigned int req_queue_len; - unsigned int chanq_len; - char type[4]; /* from discipline.name, 'none' for unknown */ - unsigned int status; /* current device level */ - unsigned int label_block; /* where to find the VOLSER */ - unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ - unsigned int characteristics_size; - unsigned int confdata_size; - char characteristics[64]; /* from read_device_characteristics */ - char configuration_data[256]; /* from read_configuration_data */ -} dasd_information_t; +typedef struct attrib_data_t { + unsigned char operation:3; /* cache operation mode */ + unsigned char reserved:5; /* cache operation mode */ + unsigned short nr_cyl; /* no of cyliners for read ahaed */ + unsigned char reserved2[29]; /* for future use */ +} __attribute__ ((packed)) attrib_data_t; + +/* definition of operation (cache) bits within attributes of DE */ +#define DASD_NORMAL_CACHE 0x0 +#define DASD_BYPASS_CACHE 0x1 +#define DASD_INHIBIT_LOAD 0x2 +#define DASD_SEQ_ACCESS 0x3 +#define DASD_SEQ_PRESTAGE 0x4 +#define DASD_REC_ACCESS 0x5 + + +/******************************************************************************** + * SECTION: Definition of IOCTLs + * + * Here ist how the ioctl-nr should be used: + * 0 - 31 DASD driver itself + * 32 - 239 still open + * 240 - 255 reserved for EMC + *******************************************************************************/ /* Disable the volume (for Linux) */ #define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0) @@ -97,15 +225,26 @@ #define BIODASDSLCK _IO(DASD_IOCTL_LETTER,4) /* steal lock */ /* reset profiling information of a device */ #define BIODASDPRRST _IO(DASD_IOCTL_LETTER,5) + + /* retrieve API version number */ #define DASDAPIVER _IOR(DASD_IOCTL_LETTER,0,int) /* Get information on a dasd device */ #define BIODASDINFO _IOR(DASD_IOCTL_LETTER,1,dasd_information_t) /* retrieve profiling information of a device */ #define BIODASDPRRD _IOR(DASD_IOCTL_LETTER,2,dasd_profile_info_t) +/* Get information on a dasd device (enhanced) */ +#define BIODASDINFO2 _IOR(DASD_IOCTL_LETTER,3,dasd_information2_t) +/* Performance Statistics Read */ +#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t) + + /* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */ #define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) -#endif /* DASD_API_VERSION */ +/* Set Attributes (cache operations) */ +#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) + + #endif /* DASD_H */ /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/idals.h linux.21rc5-ac2/include/asm-s390x/idals.h --- linux.21rc5/include/asm-s390x/idals.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/idals.h 2003-04-25 13:47:51.000000000 +0100 @@ -217,7 +217,7 @@ * Copy count bytes from an idal buffer to user memory */ static inline size_t -idal_buffer_to_user(struct idal_buffer *ib, void *to, size_t count) +idal_buffer_to_user(const struct idal_buffer *ib, void *to, size_t count) { size_t left; int i; @@ -238,7 +238,7 @@ * Copy count bytes from user memory to an idal buffer */ static inline size_t -idal_buffer_from_user(struct idal_buffer *ib, void *from, size_t count) +idal_buffer_from_user(struct idal_buffer *ib, const void *from, size_t count) { size_t left; int i; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/irq.h linux.21rc5-ac2/include/asm-s390x/irq.h --- linux.21rc5/include/asm-s390x/irq.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/irq.h 2003-04-25 13:48:57.000000000 +0100 @@ -498,6 +498,8 @@ #define DEVSTAT_SUSPENDED 0x00000400 #define DEVSTAT_UNKNOWN_DEV 0x00000800 #define DEVSTAT_UNFRIENDLY_DEV 0x00001000 +#define DEVSTAT_NOT_ACC 0x00002000 +#define DEVSTAT_NOT_ACC_ERR 0x00004000 #define DEVSTAT_FINAL_STATUS 0x80000000 #define DEVINFO_NOT_OPER DEVSTAT_NOT_OPER @@ -582,6 +584,7 @@ #define DOIO_TIMEOUT 0x0080 /* 3 secs. timeout for sync. I/O */ #define DOIO_DONT_CALL_INTHDLR 0x0100 /* don't call interrupt handler */ #define DOIO_CANCEL_ON_TIMEOUT 0x0200 /* cancel I/O if it timed out */ +#define DOIO_USE_DIAG98 0x0400 /* use DIAG98 instead of SSCH */ /* * do_IO() @@ -786,6 +789,25 @@ return ccode; } +extern __inline__ int diag98(int irq, volatile orb_t *addr) +{ + int ccode; + + __asm__ __volatile__( + " lr 1,%1\n" + " lgr 0,%2\n" /* orb in 0 */ + " lghi 2,12\n" /* function code 0x0c */ + " diag 2,0,152\n" /* diag98 instead of ssch, + result in gpr 1 */ + " ipm %0\n" /* usual cc evaluation. cc=3 will be + reported as not operational */ + " srl %0,28" + : "=d" (ccode) + : "d" (irq | 0x10000L), "a" (addr) + : "cc", "0", "1", "2"); + return ccode; +} + extern __inline__ int rsch(int irq) { int ccode; @@ -889,25 +911,7 @@ void VM_virtual_device_info( __u16 devno, /* device number */ senseid_t *ps ); /* ptr to senseID data */ -extern __inline__ int diag210( diag210_t * addr) -{ - int ccode; - - __asm__ __volatile__( -#ifdef CONFIG_ARCH_S390X - " sam31\n" - " diag %1,0,0x210\n" - " sam64\n" -#else - " diag %1,0,0x210\n" -#endif - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "a" (addr) - : "cc" ); - return ccode; -} +extern int diag210( diag210_t * addr); extern __inline__ int chsc( chsc_area_t * chsc_area) { @@ -1001,6 +1005,8 @@ #include +#define get_irq_lock(irq) &ioinfo[irq]->irq_lock + #define s390irq_spin_lock(irq) \ spin_lock(&(ioinfo[irq]->irq_lock)) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/page.h linux.21rc5-ac2/include/asm-s390x/page.h --- linux.21rc5/include/asm-s390x/page.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/page.h 2003-05-28 15:13:38.000000000 +0100 @@ -9,7 +9,6 @@ #ifndef _S390_PAGE_H #define _S390_PAGE_H -#include #include /* PAGE_SHIFT determines the page size */ @@ -17,9 +16,6 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -#define STORAGE_ACC_KEY 6 -#define STORAGE_ACC_KEY_MASK (STORAGE_ACC_KEY<<4) - #ifdef __KERNEL__ #ifndef __ASSEMBLY__ @@ -130,62 +126,13 @@ #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT)) #define VALID_PAGE(page) ((page - mem_map) < max_mapnr) -/* the pfix table of all mem addresses */ -extern void* *pfix_table; -#define pfix_get_page_addr(ptr) (pfix_table[(unsigned long)ptr>>PAGE_SHIFT]) -#define pfix_get_addr(ptr) (pfix_get_page_addr(ptr)+ \ - ((unsigned long)ptr&PAGE_MASK)) - -/* - * These two functions allow to lock and unlock pages in VM. They need - * the absolute address of the page to be locked or unlocked. This will - * only work if the diag98 option in the user directory is enabled for - * the guest. - */ - -/* if result is 0, addr will become the host absolute address */ -extern __inline__ int pfix_lock_page(void *addr) -{ - int ret = -ENOMEM; - - if (!MACHINE_HAS_PFIX) - return -ENOSYS; - - __asm__ __volatile__( - " lg 0,%1\n" /* parameter in gpr 0 */ - " lghi 2,0\n" /* function code is 0 */ - " diag 2,0,0x98\n" /* result in gpr 1 */ - " jnz 0f\n" - " lghi %0,0\n" - "0: lg %1,1\n" /* gpr1 contains host absol. addr */ - : "+d" (ret), "+d" ((unsigned long)addr) : - : "0", "1", "2", "cc" ); - return ret; -} - -extern __inline__ int pfix_unlock_page(unsigned long addr) -{ - int ret = -EINVAL; - - if (!MACHINE_HAS_PFIX) - return -ENOSYS; - - __asm__ __volatile__( - " lgr 0,%1\n" /* parameter in gpr 0 */ - " lghi 2,4\n" /* function code is 4 */ - " diag 2,0,0x98\n" /* result in gpr 1 */ - " jnz 0f\n" - " lghi %0,0\n" - "0:\n" - : "+d" (ret) - : "a" (addr) - : "0", "1", "2", "cc" ); - return ret; -} - #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define get_storage_key() 0 +#define pfix_get_page_addr(addr) (unsigned long)addr +#define pfix_get_addr(addr) (unsigned long)addr + #endif /* __KERNEL__ */ #endif /* _S390_PAGE_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/pgtable.h linux.21rc5-ac2/include/asm-s390x/pgtable.h --- linux.21rc5/include/asm-s390x/pgtable.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/pgtable.h 2003-05-28 15:13:38.000000000 +0100 @@ -233,22 +233,12 @@ pte_val(pteval) &= ~_PAGE_MKCLEAN; asm volatile ("sske %0,%1" - : : "d" (STORAGE_ACC_KEY_MASK), - "a" (pte_val(pteval))); + : : "d" (0), "a" (pte_val(pteval))); } *pteptr = pteval; } -extern inline void set_access_key(pte_t pteval) -{ - asm volatile (" iske 0,%1\n" - " or 0,%0\n" - " sske 0,%1\n" - : : "d" (STORAGE_ACC_KEY_MASK), - "a" (pte_val(pteval)) : "0", "cc"); -} - #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* @@ -462,7 +452,7 @@ /* We can't clear the changed bit atomically. For now we * clear (!) the page referenced bit. */ asm volatile ("sske %0,%1" - : : "d" (STORAGE_ACC_KEY_MASK), "a" (*ptep)); + : : "d" (0), "a" (*ptep)); return 1; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/s390io.h linux.21rc5-ac2/include/asm-s390x/s390io.h --- linux.21rc5/include/asm-s390x/s390io.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/s390io.h 2003-04-25 13:48:57.000000000 +0100 @@ -8,6 +8,7 @@ #ifndef __s390io_h #define __s390io_h +#include /* * IRQ data structure used by I/O subroutines @@ -23,7 +24,7 @@ struct _ioinfo *prev; struct _ioinfo *next; - __u8 st; /* subchannel type */ + __u8 st; /* subchannel type */ union { unsigned int info; @@ -57,7 +58,9 @@ unsigned int dval : 1; /* device number valid */ unsigned int unknown : 1; /* unknown device - if SenseID failed */ unsigned int unfriendly: 1; /* device is locked by someone else */ - unsigned int unused : (sizeof(unsigned int)*8 - 25); /* unused */ + unsigned int killio : 1; /* currently killing pending io */ + unsigned int noio : 1; /* don't let drivers start io */ + unsigned int unused : (sizeof(unsigned int)*8 - 27); /* unused */ } __attribute__ ((packed)) flags; } ui; @@ -80,8 +83,9 @@ unsigned long qflag; /* queued flags */ __u8 qlpm; /* queued logical path mask */ ssd_info_t ssd_info; /* subchannel description */ - - } __attribute__ ((aligned(8))) ioinfo_t; + struct tq_struct pver_bh; /* path verification bottom half task */ + atomic_t pver_pending; /* != 0 if path verification for sch is pending */ +} __attribute__ ((aligned(8))) ioinfo_t; #define IOINFO_FLAGS_BUSY 0x80000000 #define IOINFO_FLAGS_OPER 0x40000000 @@ -101,5 +105,9 @@ #define CHSC_SEI_ACC_LINKADDR 2 #define CHSC_SEI_ACC_FULLLINKADDR 3 +#define CIO_PATHGONE_WAIT4INT 0x01 +#define CIO_PATHGONE_IOERR 0x02 +#define CIO_PATHGONE_DEVGONE 0x04 + #endif /* __s390io_h */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/setup.h linux.21rc5-ac2/include/asm-s390x/setup.h --- linux.21rc5/include/asm-s390x/setup.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/setup.h 2003-05-28 15:13:38.000000000 +0100 @@ -29,9 +29,10 @@ #define MACHINE_IS_P390 (machine_flags & 4) #define MACHINE_HAS_MVPG (machine_flags & 16) #define MACHINE_HAS_DIAG44 (machine_flags & 32) -#define MACHINE_HAS_PFIX (machine_flags & 128) +#define MACHINE_NEW_STIDP (machine_flags & 64) +#define MACHINE_HAS_PFIX (0) -#define MACHINE_HAS_HWC (!MACHINE_IS_P390) +#define MACHINE_HAS_SCLP (!MACHINE_IS_P390) /* * Console mode. Override with conmode= @@ -40,10 +41,10 @@ extern unsigned int console_device; #define CONSOLE_IS_UNDEFINED (console_mode == 0) -#define CONSOLE_IS_HWC (console_mode == 1) +#define CONSOLE_IS_SCLP (console_mode == 1) #define CONSOLE_IS_3215 (console_mode == 2) #define CONSOLE_IS_3270 (console_mode == 3) -#define SET_CONSOLE_HWC do { console_mode = 1; } while (0) +#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0) #define SET_CONSOLE_3215 do { console_mode = 2; } while (0) #define SET_CONSOLE_3270 do { console_mode = 3; } while (0) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/smp.h linux.21rc5-ac2/include/asm-s390x/smp.h --- linux.21rc5/include/asm-s390x/smp.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/smp.h 2003-04-25 13:47:51.000000000 +0100 @@ -42,7 +42,7 @@ #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) extern __inline__ int cpu_logical_map(int cpu) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/tape390.h linux.21rc5-ac2/include/asm-s390x/tape390.h --- linux.21rc5/include/asm-s390x/tape390.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/tape390.h 2003-04-25 13:53:58.000000000 +0100 @@ -0,0 +1,39 @@ +/************************************************************************* + * + * tape390.h + * enables user programs to display messages on the tape device + * + * S390 and zSeries version + * Copyright (C) 2001 IBM Corporation + * Author(s): Despina Papadopoulou + * + *************************************************************************/ + +#ifndef _TAPE390_H +#define _TAPE390_H + +#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct) + +/* + * The TAPE390_DISPLAY ioctl calls the Load Display command + * which transfers 17 bytes of data from the channel to the subsystem: + * - 1 format control byte, and + * - two 8-byte messages + * + * Format control byte: + * 0-2: New Message Overlay + * 3: Alternate Messages + * 4: Blink Message + * 5: Display Low/High Message + * 6: Reserved + * 7: Automatic Load Request + * + */ + +typedef struct display_struct { + char cntrl; + char message1[8]; + char message2[8]; +} display_struct; + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-s390x/vtoc.h linux.21rc5-ac2/include/asm-s390x/vtoc.h --- linux.21rc5/include/asm-s390x/vtoc.h 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/include/asm-s390x/vtoc.h 2003-04-25 13:49:34.000000000 +0100 @@ -18,7 +18,6 @@ #include #endif -#define DASD_API_VERSION 0 #define LINE_LENGTH 80 #define VTOC_START_CC 0x0 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-sparc/ioctls.h linux.21rc5-ac2/include/asm-sparc/ioctls.h --- linux.21rc5/include/asm-sparc/ioctls.h 2003-05-28 15:07:38.000000000 +0100 +++ linux.21rc5-ac2/include/asm-sparc/ioctls.h 2003-04-22 16:44:38.000000000 +0100 @@ -86,6 +86,7 @@ #define FIONBIO _IOW('f', 126, int) #define FIONREAD _IOR('f', 127, int) #define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) /* SCARY Rutgers local SunOS kernel hackery, perhaps I will support it * someday. This is completely bogus, I know... diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-sparc64/ioctls.h linux.21rc5-ac2/include/asm-sparc64/ioctls.h --- linux.21rc5/include/asm-sparc64/ioctls.h 2003-05-28 15:07:40.000000000 +0100 +++ linux.21rc5-ac2/include/asm-sparc64/ioctls.h 2003-04-22 16:44:38.000000000 +0100 @@ -87,6 +87,7 @@ #define FIONBIO _IOW('f', 126, int) #define FIONREAD _IOR('f', 127, int) #define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) /* SCARY Rutgers local SunOS kernel hackery, perhaps I will support it * someday. This is completely bogus, I know... diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/asm-x86_64/system.h linux.21rc5-ac2/include/asm-x86_64/system.h --- linux.21rc5/include/asm-x86_64/system.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/asm-x86_64/system.h 2003-05-28 15:12:29.000000000 +0100 @@ -284,8 +284,8 @@ #define sti() __global_sti() #define save_flags(x) ((x)=__global_save_flags()) #define restore_flags(x) __global_restore_flags(x) -#define save_and_cli(x) do { save_flags(x); cli(); } while(0); -#define save_and_sti(x) do { save_flags(x); sti(); } while(0); +#define save_and_cli(x) do { save_flags(x); cli(); } while(0) +#define save_and_sti(x) do { save_flags(x); sti(); } while(0) #else diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/acct.h linux.21rc5-ac2/include/linux/acct.h --- linux.21rc5/include/linux/acct.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/acct.h 2003-05-28 21:48:47.000000000 +0100 @@ -56,7 +56,9 @@ comp_t ac_swaps; /* Accounting Number of Swaps */ __u32 ac_exitcode; /* Accounting Exitcode */ char ac_comm[ACCT_COMM + 1]; /* Accounting Command Name */ - char ac_pad[10]; /* Accounting Padding Bytes */ + char ac_pad[2]; /* Accounting Padding Bytes */ + __u32 ac_uid32; /* 32 bit UID */ + __u32 ac_gid32; /* 32 bit GID */ }; /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/acpi.h linux.21rc5-ac2/include/linux/acpi.h --- linux.21rc5/include/linux/acpi.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/acpi.h 2003-05-28 21:48:48.000000000 +0100 @@ -1,180 +1,414 @@ /* - * acpi.h - ACPI driver interface + * acpi.h - ACPI Interface * - * Copyright (C) 1999 Andrew Henroid + * Copyright (C) 2001 Paul Diefenbaugh * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef _LINUX_ACPI_H #define _LINUX_ACPI_H -#include -#include -#ifdef __KERNEL__ -#include -#include -#endif /* __KERNEL__ */ +#ifndef _LINUX +#define _LINUX +#endif -/* - * Device states - */ -typedef enum { - ACPI_D0, /* fully-on */ - ACPI_D1, /* partial-on */ - ACPI_D2, /* partial-on */ - ACPI_D3, /* fully-off */ -} acpi_dstate_t; - -typedef enum { - ACPI_S0, /* working state */ - ACPI_S1, /* power-on suspend */ - ACPI_S2, /* suspend to ram, with devices */ - ACPI_S3, /* suspend to ram */ - ACPI_S4, /* suspend to disk */ - ACPI_S5, /* soft-off */ -} acpi_sstate_t; - -/* RSDP location */ -#define ACPI_BIOS_ROM_BASE (0x0e0000) -#define ACPI_BIOS_ROM_END (0x100000) - -/* Table signatures */ -#define ACPI_RSDP1_SIG 0x20445352 /* 'RSD ' */ -#define ACPI_RSDP2_SIG 0x20525450 /* 'PTR ' */ -#define ACPI_RSDT_SIG 0x54445352 /* 'RSDT' */ -#define ACPI_FADT_SIG 0x50434146 /* 'FACP' */ -#define ACPI_DSDT_SIG 0x54445344 /* 'DSDT' */ -#define ACPI_FACS_SIG 0x53434146 /* 'FACS' */ - -#define ACPI_SIG_LEN 4 -#define ACPI_FADT_SIGNATURE "FACP" - -/* PM1_STS/EN flags */ -#define ACPI_TMR 0x0001 -#define ACPI_BM 0x0010 -#define ACPI_GBL 0x0020 -#define ACPI_PWRBTN 0x0100 -#define ACPI_SLPBTN 0x0200 -#define ACPI_RTC 0x0400 -#define ACPI_WAK 0x8000 - -/* PM1_CNT flags */ -#define ACPI_SCI_EN 0x0001 -#define ACPI_BM_RLD 0x0002 -#define ACPI_GBL_RLS 0x0004 -#define ACPI_SLP_TYP0 0x0400 -#define ACPI_SLP_TYP1 0x0800 -#define ACPI_SLP_TYP2 0x1000 -#define ACPI_SLP_EN 0x2000 - -#define ACPI_SLP_TYP_MASK 0x1c00 -#define ACPI_SLP_TYP_SHIFT 10 - -/* PM_TMR masks */ -#define ACPI_TMR_VAL_EXT 0x00000100 -#define ACPI_TMR_MASK 0x00ffffff -#define ACPI_TMR_HZ 3579545 /* 3.58 MHz */ -#define ACPI_TMR_KHZ (ACPI_TMR_HZ / 1000) - -#define ACPI_MICROSEC_TO_TMR_TICKS(val) \ - (((val) * (ACPI_TMR_KHZ)) / 1000) - -/* PM2_CNT flags */ -#define ACPI_ARB_DIS 0x01 - -/* FADT flags */ -#define ACPI_WBINVD 0x00000001 -#define ACPI_WBINVD_FLUSH 0x00000002 -#define ACPI_PROC_C1 0x00000004 -#define ACPI_P_LVL2_UP 0x00000008 -#define ACPI_PWR_BUTTON 0x00000010 -#define ACPI_SLP_BUTTON 0x00000020 -#define ACPI_FIX_RTC 0x00000040 -#define ACPI_RTC_64 0x00000080 -#define ACPI_TMR_VAL_EXT 0x00000100 -#define ACPI_DCK_CAP 0x00000200 - -/* FADT BOOT_ARCH flags */ -#define FADT_BOOT_ARCH_LEGACY_DEVICES 0x0001 -#define FADT_BOOT_ARCH_KBD_CONTROLLER 0x0002 - -/* FACS flags */ -#define ACPI_S4BIOS 0x00000001 - -/* processor block offsets */ -#define ACPI_P_CNT 0x00000000 -#define ACPI_P_LVL2 0x00000004 -#define ACPI_P_LVL3 0x00000005 - -/* C-state latencies (microseconds) */ -#define ACPI_MAX_P_LVL2_LAT 100 -#define ACPI_MAX_P_LVL3_LAT 1000 -#define ACPI_INFINITE_LAT (~0UL) +#include + +#include +#include +#include +#include + + +#ifdef CONFIG_ACPI_BOOT + +enum acpi_irq_model_id { + ACPI_IRQ_MODEL_PIC = 0, + ACPI_IRQ_MODEL_IOAPIC, + ACPI_IRQ_MODEL_IOSAPIC, + ACPI_IRQ_MODEL_COUNT +}; + +extern enum acpi_irq_model_id acpi_irq_model; + + +/* Root System Description Pointer (RSDP) */ + +struct acpi_table_rsdp { + char signature[8]; + u8 checksum; + char oem_id[6]; + u8 revision; + u32 rsdt_address; +} __attribute__ ((packed)); + +struct acpi20_table_rsdp { + char signature[8]; + u8 checksum; + char oem_id[6]; + u8 revision; + u32 rsdt_address; + u32 length; + u64 xsdt_address; + u8 ext_checksum; + u8 reserved[3]; +} __attribute__ ((packed)); + +typedef struct { + u8 type; + u8 length; +} __attribute__ ((packed)) acpi_table_entry_header; + +/* Root System Description Table (RSDT) */ + +struct acpi_table_rsdt { + struct acpi_table_header header; + u32 entry[1]; +} __attribute__ ((packed)); + +/* Extended System Description Table (XSDT) */ + +struct acpi_table_xsdt { + struct acpi_table_header header; + u64 entry[1]; +} __attribute__ ((packed)); + +/* Fixed ACPI Description Table (FADT) */ + +struct acpi_table_fadt { + struct acpi_table_header header; + u32 facs_addr; + u32 dsdt_addr; + /* ... */ +} __attribute__ ((packed)); + +/* Multiple APIC Description Table (MADT) */ + +struct acpi_table_madt { + struct acpi_table_header header; + u32 lapic_address; + struct { + u32 pcat_compat:1; + u32 reserved:31; + } flags; +} __attribute__ ((packed)); + +enum acpi_madt_entry_id { + ACPI_MADT_LAPIC = 0, + ACPI_MADT_IOAPIC, + ACPI_MADT_INT_SRC_OVR, + ACPI_MADT_NMI_SRC, + ACPI_MADT_LAPIC_NMI, + ACPI_MADT_LAPIC_ADDR_OVR, + ACPI_MADT_IOSAPIC, + ACPI_MADT_LSAPIC, + ACPI_MADT_PLAT_INT_SRC, + ACPI_MADT_ENTRY_COUNT +}; + +typedef struct { + u16 polarity:2; + u16 trigger:2; + u16 reserved:12; +} __attribute__ ((packed)) acpi_interrupt_flags; + +struct acpi_table_lapic { + acpi_table_entry_header header; + u8 acpi_id; + u8 id; + struct { + u32 enabled:1; + u32 reserved:31; + } flags; +} __attribute__ ((packed)); + +struct acpi_table_ioapic { + acpi_table_entry_header header; + u8 id; + u8 reserved; + u32 address; + u32 global_irq_base; +} __attribute__ ((packed)); + +struct acpi_table_int_src_ovr { + acpi_table_entry_header header; + u8 bus; + u8 bus_irq; + u32 global_irq; + acpi_interrupt_flags flags; +} __attribute__ ((packed)); + +struct acpi_table_nmi_src { + acpi_table_entry_header header; + acpi_interrupt_flags flags; + u32 global_irq; +} __attribute__ ((packed)); + +struct acpi_table_lapic_nmi { + acpi_table_entry_header header; + u8 acpi_id; + acpi_interrupt_flags flags; + u8 lint; +} __attribute__ ((packed)); + +struct acpi_table_lapic_addr_ovr { + acpi_table_entry_header header; + u8 reserved[2]; + u64 address; +} __attribute__ ((packed)); + +struct acpi_table_iosapic { + acpi_table_entry_header header; + u8 id; + u8 reserved; + u32 global_irq_base; + u64 address; +} __attribute__ ((packed)); + +struct acpi_table_lsapic { + acpi_table_entry_header header; + u8 acpi_id; + u8 id; + u8 eid; + u8 reserved[3]; + struct { + u32 enabled:1; + u32 reserved:31; + } flags; +} __attribute__ ((packed)); + +struct acpi_table_plat_int_src { + acpi_table_entry_header header; + acpi_interrupt_flags flags; + u8 type; /* See acpi_interrupt_type */ + u8 id; + u8 eid; + u8 iosapic_vector; + u32 global_irq; + u32 reserved; +} __attribute__ ((packed)); + +enum acpi_interrupt_id { + ACPI_INTERRUPT_PMI = 1, + ACPI_INTERRUPT_INIT, + ACPI_INTERRUPT_CPEI, + ACPI_INTERRUPT_COUNT +}; + +#define ACPI_SPACE_MEM 0 + +struct acpi_gen_regaddr { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 resv; + u32 addrl; + u32 addrh; +} __attribute__ ((packed)); + +struct acpi_table_hpet { + struct acpi_table_header header; + u32 id; + struct acpi_gen_regaddr addr; + u8 number; + u16 min_tick; + u8 page_protect; +} __attribute__ ((packed)); /* - * Sysctl declarations + * System Resource Affinity Table (SRAT) + * see http://www.microsoft.com/hwdev/design/srat.htm */ -enum -{ - CTL_ACPI = 10 +struct acpi_table_srat { + struct acpi_table_header header; + u32 table_revision; + u64 reserved; +} __attribute__ ((packed)); + +enum acpi_srat_entry_id { + ACPI_SRAT_PROCESSOR_AFFINITY = 0, + ACPI_SRAT_MEMORY_AFFINITY, + ACPI_SRAT_ENTRY_COUNT }; -enum -{ - ACPI_FADT = 1, +struct acpi_table_processor_affinity { + acpi_table_entry_header header; + u8 proximity_domain; + u8 apic_id; + struct { + u32 enabled:1; + u32 reserved:31; + } flags; + u8 lsapic_eid; + u8 reserved[7]; +} __attribute__ ((packed)); + +struct acpi_table_memory_affinity { + acpi_table_entry_header header; + u8 proximity_domain; + u8 reserved1[5]; + u32 base_addr_lo; + u32 base_addr_hi; + u32 length_lo; + u32 length_hi; + u32 memory_type; /* See acpi_address_range_id */ + struct { + u32 enabled:1; + u32 hot_pluggable:1; + u32 reserved:30; + } flags; + u64 reserved2; +} __attribute__ ((packed)); + +enum acpi_address_range_id { + ACPI_ADDRESS_RANGE_MEMORY = 1, + ACPI_ADDRESS_RANGE_RESERVED = 2, + ACPI_ADDRESS_RANGE_ACPI = 3, + ACPI_ADDRESS_RANGE_NVS = 4, + ACPI_ADDRESS_RANGE_COUNT +}; + +/* + * System Locality Information Table (SLIT) + * see http://devresource.hp.com/devresource/docs/techpapers/ia64/slit.pdf + */ + +struct acpi_table_slit { + struct acpi_table_header header; + u64 localities; + u8 entry[1]; /* real size = localities^2 */ +} __attribute__ ((packed)); + +/* Smart Battery Description Table (SBST) */ + +struct acpi_table_sbst { + struct acpi_table_header header; + u32 warning; /* Warn user */ + u32 low; /* Critical sleep */ + u32 critical; /* Critical shutdown */ +} __attribute__ ((packed)); + +/* Embedded Controller Boot Resources Table (ECDT) */ + +struct acpi_table_ecdt { + struct acpi_table_header header; + struct acpi_generic_address ec_control; + struct acpi_generic_address ec_data; + u32 uid; + u8 gpe_bit; + char ec_id[0]; +} __attribute__ ((packed)); + +/* Table Handlers */ + +enum acpi_table_id { + ACPI_TABLE_UNKNOWN = 0, + ACPI_APIC, + ACPI_BOOT, + ACPI_DBGP, ACPI_DSDT, - ACPI_PM1_ENABLE, - ACPI_GPE_ENABLE, - ACPI_GPE_LEVEL, - ACPI_EVENT, - ACPI_P_BLK, - ACPI_ENTER_LVL2_LAT, - ACPI_ENTER_LVL3_LAT, - ACPI_P_LVL2_LAT, - ACPI_P_LVL3_LAT, - ACPI_C1_TIME, - ACPI_C2_TIME, - ACPI_C3_TIME, - ACPI_C1_COUNT, - ACPI_C2_COUNT, - ACPI_C3_COUNT, - ACPI_S0_SLP_TYP, - ACPI_S1_SLP_TYP, - ACPI_S5_SLP_TYP, - ACPI_SLEEP, + ACPI_ECDT, + ACPI_ETDT, + ACPI_FADT, ACPI_FACS, - ACPI_XSDT, - ACPI_PMTIMER, - ACPI_BATT, + ACPI_OEMX, + ACPI_PSDT, + ACPI_SBST, + ACPI_SLIT, + ACPI_SPCR, + ACPI_SRAT, + ACPI_SSDT, + ACPI_SPMI, + ACPI_HPET, + ACPI_TABLE_COUNT +}; + +typedef int (*acpi_table_handler) (unsigned long phys_addr, unsigned long size); + +extern acpi_table_handler acpi_table_ops[ACPI_TABLE_COUNT]; + +typedef int (*acpi_madt_entry_handler) (acpi_table_entry_header *header); + +char * __acpi_map_table (unsigned long phys_addr, unsigned long size); +unsigned long acpi_find_rsdp (void); +int acpi_boot_init (void); +int acpi_numa_init (void); + +int acpi_table_init (void); +int acpi_table_parse (enum acpi_table_id id, acpi_table_handler handler); +int acpi_get_table_header_early (enum acpi_table_id id, struct acpi_table_header **header); +int acpi_table_parse_madt (enum acpi_madt_entry_id id, acpi_madt_entry_handler handler); +int acpi_table_parse_srat (enum acpi_srat_entry_id id, acpi_madt_entry_handler handler); +void acpi_table_print (struct acpi_table_header *header, unsigned long phys_addr); +void acpi_table_print_madt_entry (acpi_table_entry_header *madt); +void acpi_table_print_srat_entry (acpi_table_entry_header *srat); + +/* the following four functions are architecture-dependent */ +void acpi_numa_slit_init (struct acpi_table_slit *slit); +void acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa); +void acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma); +void acpi_numa_arch_fixup(void); + +#endif /*CONFIG_ACPI_BOOT*/ + + +#ifdef CONFIG_ACPI_PCI + +struct acpi_prt_entry { + struct list_head node; + struct acpi_pci_id id; + u8 pin; + struct { + acpi_handle handle; + u32 index; + } link; + u32 irq; +}; + +struct acpi_prt_list { + int count; + struct list_head entries; }; -#define ACPI_SLP_TYP_DISABLED (~0UL) +extern struct acpi_prt_list acpi_prt; -#ifdef __KERNEL__ +struct pci_dev; -/* routines for saving/restoring kernel state */ -FASTCALL(extern unsigned long acpi_save_state_mem(unsigned long return_point)); -FASTCALL(extern int acpi_save_state_disk(unsigned long return_point)); -extern void acpi_restore_state(void); +int acpi_pci_irq_enable (struct pci_dev *dev); +int acpi_pci_irq_init (void); -extern unsigned long acpi_wakeup_address; +#endif /*CONFIG_ACPI_PCI*/ -#endif /* __KERNEL__ */ +#ifdef CONFIG_ACPI_EC + +int ec_read(u8 addr, u8 *val); +int ec_write(u8 addr, u8 val); + +#endif /*CONFIG_ACPI_EC*/ + +#ifdef CONFIG_ACPI int acpi_init(void); +int acpi_blacklisted(void); + +#endif /*CONFIG_ACPI*/ -#endif /* _LINUX_ACPI_H */ +#endif /*_LINUX_ACPI_H*/ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/agp_backend.h linux.21rc5-ac2/include/linux/agp_backend.h --- linux.21rc5/include/linux/agp_backend.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/agp_backend.h 2003-05-28 15:31:54.000000000 +0100 @@ -52,15 +52,18 @@ INTEL_I840, INTEL_I845, INTEL_I850, + INTEL_I855_PM, INTEL_I860, + INTEL_I865_G, VIA_GENERIC, VIA_VP3, VIA_MVP3, VIA_MVP4, + VIA_APOLLO_PLE133, VIA_APOLLO_PRO, VIA_APOLLO_KX133, VIA_APOLLO_KT133, - VIA_APOLLO_P4X400, + VIA_APOLLO_KM266, VIA_APOLLO_KT400, SIS_GENERIC, AMD_GENERIC, @@ -81,6 +84,9 @@ SVWRKS_HE, SVWRKS_LE, SVWRKS_GENERIC, + NVIDIA_NFORCE, + NVIDIA_NFORCE2, + NVIDIA_GENERIC, HP_ZX1, }; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/binfmts.h linux.21rc5-ac2/include/linux/binfmts.h --- linux.21rc5/include/linux/binfmts.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/binfmts.h 2003-05-28 21:48:48.000000000 +0100 @@ -16,6 +16,8 @@ #ifdef __KERNEL__ +struct file; + /* * This structure is used to hold the arguments that are used when loading binaries. */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/bitops.h linux.21rc5-ac2/include/linux/bitops.h --- linux.21rc5/include/linux/bitops.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/bitops.h 2003-05-28 21:48:47.000000000 +0100 @@ -1,6 +1,6 @@ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H - +#include /* * ffs: find first bit set. This is defined the same way as @@ -38,6 +38,47 @@ } /* + * fls: find last bit set. + */ + +extern __inline__ int generic_fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000)) { + x <<= 1; + r -= 1; + } + return r; +} + +extern __inline__ int get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return order; /* We could be slightly more clever with -1 here... */ +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/blkdev.h linux.21rc5-ac2/include/linux/blkdev.h --- linux.21rc5/include/linux/blkdev.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/blkdev.h 2003-05-29 01:44:06.000000000 +0100 @@ -138,6 +138,8 @@ * Tasks wait here for free read and write requests */ wait_queue_head_t wait_for_requests[2]; + + struct request *last_request; }; #define blk_queue_plugged(q) (q)->plugged diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/blk.h linux.21rc5-ac2/include/linux/blk.h --- linux.21rc5/include/linux/blk.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/blk.h 2003-05-29 01:44:06.000000000 +0100 @@ -86,7 +86,15 @@ static inline void blkdev_dequeue_request(struct request * req) { - list_del(&req->queue); + request_queue_t *q = req->q; + if (q) { + if (q->last_request == req || + (q->head_active && + q->last_request == blkdev_next_request(req))) + q->last_request = NULL; + } + + list_del(&req->queue); } int end_that_request_first(struct request *req, int uptodate, char *name); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/cpufreq.h linux.21rc5-ac2/include/linux/cpufreq.h --- linux.21rc5/include/linux/cpufreq.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/cpufreq.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,267 @@ +/* + * linux/include/linux/cpufreq.h + * + * Copyright (C) 2001 Russell King + * (C) 2002 Dominik Brodowski + * + * + * $Id: cpufreq.h,v 1.32 2003/01/02 22:16:27 db Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_CPUFREQ_H +#define _LINUX_CPUFREQ_H + +#include +#include +#include + + +/********************************************************************* + * CPUFREQ NOTIFIER INTERFACE * + *********************************************************************/ + +int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); +int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); + +#define CPUFREQ_TRANSITION_NOTIFIER (0) +#define CPUFREQ_POLICY_NOTIFIER (1) + +#define CPUFREQ_ALL_CPUS ((NR_CPUS)) + + +/********************** cpufreq policy notifiers *********************/ + +#define CPUFREQ_POLICY_POWERSAVE (1) +#define CPUFREQ_POLICY_PERFORMANCE (2) + +/* Frequency values here are CPU kHz so that hardware which doesn't run + * with some frequencies can complain without having to guess what per + * cent / per mille means. + * Maximum transition latency is in nanoseconds - if it's unknown, + * CPUFREQ_ETERNAL shall be used. + */ + +#define CPUFREQ_ETERNAL (-1) +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; +}; + +struct cpufreq_policy { + unsigned int cpu; /* cpu nr or CPUFREQ_ALL_CPUS */ + unsigned int min; /* in kHz */ + unsigned int max; /* in kHz */ + unsigned int policy; /* see above */ + struct cpufreq_cpuinfo cpuinfo; /* see above */ +}; + +#define CPUFREQ_ADJUST (0) +#define CPUFREQ_INCOMPATIBLE (1) +#define CPUFREQ_NOTIFY (2) + + +/******************** cpufreq transition notifiers *******************/ + +#define CPUFREQ_PRECHANGE (0) +#define CPUFREQ_POSTCHANGE (1) + +struct cpufreq_freqs { + unsigned int cpu; /* cpu nr or CPUFREQ_ALL_CPUS */ + unsigned int old; + unsigned int new; +}; + + +/** + * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) + * @old: old value + * @div: divisor + * @mult: multiplier + * + * Needed for loops_per_jiffy and similar calculations. We do it + * this way to avoid math overflow on 32-bit machines. This will + * become architecture dependent once high-resolution-timer is + * merged (or any other thing that introduces sc_math.h). + * + * new = old * mult / div + */ +static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) +{ + unsigned long val, carry; + + mult /= 100; + div /= 100; + val = (old / div) * mult; + carry = old % div; + carry = carry * mult / div; + + return carry + val; +}; + + +/********************************************************************* + * CPUFREQ DRIVER INTERFACE * + *********************************************************************/ + +struct cpufreq_driver { + /* needed by all drivers */ + int (*verify) (struct cpufreq_policy *policy); + int (*setpolicy) (struct cpufreq_policy *policy); + struct cpufreq_policy *policy; + /* 2.4. compatible API */ +#ifdef CONFIG_CPU_FREQ_24_API + unsigned int cpu_cur_freq[NR_CPUS]; +#endif +}; + +int cpufreq_register(struct cpufreq_driver *driver_data); +int cpufreq_unregister(void); + +void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); + + +static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) +{ + if (policy->min < min) + policy->min = min; + if (policy->max < min) + policy->max = min; + if (policy->min > max) + policy->min = max; + if (policy->max > max) + policy->max = max; + if (policy->min > policy->max) + policy->min = policy->max; + return; +} + +/********************************************************************* + * CPUFREQ 2.6. INTERFACE * + *********************************************************************/ +int cpufreq_set_policy(struct cpufreq_policy *policy); +int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); + +#ifdef CONFIG_PM +int cpufreq_restore(void); +#endif + + +#ifdef CONFIG_CPU_FREQ_24_API +/********************************************************************* + * CPUFREQ 2.4. INTERFACE * + *********************************************************************/ +int cpufreq_setmax(unsigned int cpu); +int cpufreq_set(unsigned int kHz, unsigned int cpu); +unsigned int cpufreq_get(unsigned int cpu); + +/* /proc/sys/cpu */ +enum { + CPU_NR = 1, /* compatibilty reasons */ + CPU_NR_0 = 1, + CPU_NR_1 = 2, + CPU_NR_2 = 3, + CPU_NR_3 = 4, + CPU_NR_4 = 5, + CPU_NR_5 = 6, + CPU_NR_6 = 7, + CPU_NR_7 = 8, + CPU_NR_8 = 9, + CPU_NR_9 = 10, + CPU_NR_10 = 11, + CPU_NR_11 = 12, + CPU_NR_12 = 13, + CPU_NR_13 = 14, + CPU_NR_14 = 15, + CPU_NR_15 = 16, + CPU_NR_16 = 17, + CPU_NR_17 = 18, + CPU_NR_18 = 19, + CPU_NR_19 = 20, + CPU_NR_20 = 21, + CPU_NR_21 = 22, + CPU_NR_22 = 23, + CPU_NR_23 = 24, + CPU_NR_24 = 25, + CPU_NR_25 = 26, + CPU_NR_26 = 27, + CPU_NR_27 = 28, + CPU_NR_28 = 29, + CPU_NR_29 = 30, + CPU_NR_30 = 31, + CPU_NR_31 = 32, +}; + +/* /proc/sys/cpu/{0,1,...,(NR_CPUS-1)} */ +enum { + CPU_NR_FREQ_MAX = 1, + CPU_NR_FREQ_MIN = 2, + CPU_NR_FREQ = 3, +}; + +#define CTL_CPU_VARS_SPEED_MAX(cpunr) { \ + .ctl_name = CPU_NR_FREQ_MAX, \ + .data = &cpu_max_freq[cpunr], \ + .procname = "speed-max", \ + .maxlen = sizeof(cpu_max_freq[cpunr]),\ + .mode = 0444, \ + .proc_handler = proc_dointvec, } + +#define CTL_CPU_VARS_SPEED_MIN(cpunr) { \ + .ctl_name = CPU_NR_FREQ_MIN, \ + .data = &cpu_min_freq[cpunr], \ + .procname = "speed-min", \ + .maxlen = sizeof(cpu_min_freq[cpunr]),\ + .mode = 0444, \ + .proc_handler = proc_dointvec, } + +#define CTL_CPU_VARS_SPEED(cpunr) { \ + .ctl_name = CPU_NR_FREQ, \ + .procname = "speed", \ + .mode = 0644, \ + .proc_handler = cpufreq_procctl, \ + .strategy = cpufreq_sysctl, \ + .extra1 = (void*) (cpunr), } + +#define CTL_TABLE_CPU_VARS(cpunr) static ctl_table ctl_cpu_vars_##cpunr[] = {\ + CTL_CPU_VARS_SPEED_MAX(cpunr), \ + CTL_CPU_VARS_SPEED_MIN(cpunr), \ + CTL_CPU_VARS_SPEED(cpunr), \ + { .ctl_name = 0, }, } + +/* the ctl_table entry for each CPU */ +#define CPU_ENUM(s) { \ + .ctl_name = (CPU_NR + s), \ + .procname = #s, \ + .mode = 0555, \ + .child = ctl_cpu_vars_##s } + +#endif /* CONFIG_CPU_FREQ_24_API */ + +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ + +#define CPUFREQ_ENTRY_INVALID ~0 +#define CPUFREQ_TABLE_END ~1 + +struct cpufreq_frequency_table { + unsigned int index; /* any */ + unsigned int frequency; /* kHz - doesn't need to be in ascending + * order */ +}; + +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); + +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); + +int cpufreq_frequency_table_setpolicy(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int *index); + +#endif /* _LINUX_CPUFREQ_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/crc32.h linux.21rc5-ac2/include/linux/crc32.h --- linux.21rc5/include/linux/crc32.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/linux/crc32.h 2003-05-28 21:48:47.000000000 +0100 @@ -1,49 +1,27 @@ /* - * crc32.h for early Linux 2.4.19pre kernel inclusion - * This defines ether_crc_le() and ether_crc() as inline functions - * This is slated to change to using the library crc32 functions - * as kernel 2.5.2 included at some future date. + * crc32.h + * See linux/lib/crc32.c for license and changes */ #ifndef _LINUX_CRC32_H #define _LINUX_CRC32_H #include -/* The little-endian AUTODIN II ethernet CRC calculation. - N.B. Do not use for bulk data, use a table-based routine instead. - This is common code and should be moved to net/core/crc.c */ -static unsigned const ethernet_polynomial_le = 0xedb88320U; -static inline unsigned ether_crc_le(int length, unsigned char *data) -{ - unsigned int crc = 0xffffffff; /* Initial value. */ - while(--length >= 0) { - unsigned char current_octet = *data++; - int bit; - for (bit = 8; --bit >= 0; current_octet >>= 1) { - if ((crc ^ current_octet) & 1) { - crc >>= 1; - crc ^= ethernet_polynomial_le; - } else - crc >>= 1; - } - } - return crc; -} +extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len); +extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len); +extern u32 bitreverse(u32 in); -static unsigned const ethernet_polynomial = 0x04c11db7U; -static inline u32 ether_crc(int length, unsigned char *data) -{ - int crc = -1; - while (--length >= 0) { - unsigned char current_octet = *data++; - int bit; - for (bit = 0; bit < 8; bit++, current_octet >>= 1) { - crc = (crc << 1) ^ - ((crc < 0) ^ (current_octet & 1) ? - ethernet_polynomial : 0); - } - } - return crc; -} +#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)data, length) + +/* + * Helpers for hash table generation of ethernet nics: + * + * Ethernet sends the least significant bit of a byte first, thus crc32_le + * is used. The output of crc32_le is bit reversed [most significant bit + * is in bit nr 0], thus it must be reversed before use. Except for + * nics that bit swap the result internally... + */ +#define ether_crc(length, data) bitreverse(crc32_le(~0, data, length)) +#define ether_crc_le(length, data) crc32_le(~0, data, length) #endif /* _LINUX_CRC32_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/dcache.h linux.21rc5-ac2/include/linux/dcache.h --- linux.21rc5/include/linux/dcache.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/dcache.h 2003-05-28 21:48:47.000000000 +0100 @@ -62,7 +62,8 @@ return end_name_hash(hash); } -#define DNAME_INLINE_LEN 16 +/* XXX: check good value for 64bit */ +#define DNAME_INLINE_LEN 32 struct dentry { atomic_t d_count; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/device-mapper.h linux.21rc5-ac2/include/linux/device-mapper.h --- linux.21rc5/include/linux/device-mapper.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/device-mapper.h 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DEVICE_MAPPER_H +#define _LINUX_DEVICE_MAPPER_H + +#define DM_DIR "mapper" /* Slashes not supported */ +#define DM_MAX_TYPE_NAME 16 +#define DM_NAME_LEN 128 +#define DM_UUID_LEN 129 + +#ifdef __KERNEL__ + +struct dm_table; +struct dm_dev; +typedef unsigned long offset_t; + +typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; + +/* + * Prototypes for functions for a target + */ +typedef int (*dm_ctr_fn) (struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context); +typedef void (*dm_dtr_fn) (struct dm_table *t, void *c); +typedef int (*dm_map_fn) (struct buffer_head *bh, int rw, void *context); +typedef int (*dm_err_fn) (struct buffer_head *bh, int rw, void *context); +typedef int (*dm_status_fn) (status_type_t status_type, char *result, + int maxlen, void *context); + +void dm_error(const char *message); + +/* + * Constructors should call these functions to ensure destination devices + * are opened/closed correctly + */ +int dm_table_get_device(struct dm_table *t, const char *path, + offset_t start, offset_t len, + int mode, struct dm_dev **result); +void dm_table_put_device(struct dm_table *table, struct dm_dev *d); + +/* + * Information about a target type + */ +struct target_type { + const char *name; + struct module *module; + dm_ctr_fn ctr; + dm_dtr_fn dtr; + dm_map_fn map; + dm_err_fn err; + dm_status_fn status; +}; + +int dm_register_target(struct target_type *t); +int dm_unregister_target(struct target_type *t); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_DEVICE_MAPPER_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/dm-ioctl.h linux.21rc5-ac2/include/linux/dm-ioctl.h --- linux.21rc5/include/linux/dm-ioctl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/dm-ioctl.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DM_IOCTL_H +#define _LINUX_DM_IOCTL_H + +#include +#include + +/* + * Implements a traditional ioctl interface to the device mapper. + */ + +/* + * All ioctl arguments consist of a single chunk of memory, with + * this structure at the start. If a uuid is specified any + * lookup (eg. for a DM_INFO) will be done on that, *not* the + * name. + */ +struct dm_ioctl { + /* + * The version number is made up of three parts: + * major - no backward or forward compatibility, + * minor - only backwards compatible, + * patch - both backwards and forwards compatible. + * + * All clients of the ioctl interface should fill in the + * version number of the interface that they were + * compiled with. + * + * All recognised ioctl commands (ie. those that don't + * return -ENOTTY) fill out this field, even if the + * command failed. + */ + uint32_t version[3]; /* in/out */ + uint32_t data_size; /* total size of data passed in + * including this struct */ + + uint32_t data_start; /* offset to start of data + * relative to start of this struct */ + + uint32_t target_count; /* in/out */ + uint32_t open_count; /* out */ + uint32_t flags; /* in/out */ + + __kernel_dev_t dev; /* in/out */ + + char name[DM_NAME_LEN]; /* device name */ + char uuid[DM_UUID_LEN]; /* unique identifier for + * the block device */ +}; + +/* + * Used to specify tables. These structures appear after the + * dm_ioctl. + */ +struct dm_target_spec { + int32_t status; /* used when reading from kernel only */ + uint64_t sector_start; + uint32_t length; + + /* + * Offset in bytes (from the start of this struct) to + * next target_spec. + */ + uint32_t next; + + char target_type[DM_MAX_TYPE_NAME]; + + /* + * Parameter string starts immediately after this object. + * Be careful to add padding after string to ensure correct + * alignment of subsequent dm_target_spec. + */ +}; + +/* + * Used to retrieve the target dependencies. + */ +struct dm_target_deps { + uint32_t count; + + __kernel_dev_t dev[0]; /* out */ +}; + +/* + * If you change this make sure you make the corresponding change + * to dm-ioctl.c:lookup_ioctl() + */ +enum { + /* Top level cmds */ + DM_VERSION_CMD = 0, + DM_REMOVE_ALL_CMD, + + /* device level cmds */ + DM_DEV_CREATE_CMD, + DM_DEV_REMOVE_CMD, + DM_DEV_RELOAD_CMD, + DM_DEV_RENAME_CMD, + DM_DEV_SUSPEND_CMD, + DM_DEV_DEPS_CMD, + DM_DEV_STATUS_CMD, + + /* target level cmds */ + DM_TARGET_STATUS_CMD, + DM_TARGET_WAIT_CMD +}; + +#define DM_IOCTL 0xfd + +#define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl) +#define DM_REMOVE_ALL _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl) + +#define DM_DEV_CREATE _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, struct dm_ioctl) +#define DM_DEV_REMOVE _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, struct dm_ioctl) +#define DM_DEV_RELOAD _IOWR(DM_IOCTL, DM_DEV_RELOAD_CMD, struct dm_ioctl) +#define DM_DEV_SUSPEND _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl) +#define DM_DEV_RENAME _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, struct dm_ioctl) +#define DM_DEV_DEPS _IOWR(DM_IOCTL, DM_DEV_DEPS_CMD, struct dm_ioctl) +#define DM_DEV_STATUS _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl) + +#define DM_TARGET_STATUS _IOWR(DM_IOCTL, DM_TARGET_STATUS_CMD, struct dm_ioctl) +#define DM_TARGET_WAIT _IOWR(DM_IOCTL, DM_TARGET_WAIT_CMD, struct dm_ioctl) + +#define DM_VERSION_MAJOR 1 +#define DM_VERSION_MINOR 0 +#define DM_VERSION_PATCHLEVEL 3 +#define DM_VERSION_EXTRA "-ioctl-bk (2002-08-5)" + +/* Status bits */ +#define DM_READONLY_FLAG 0x00000001 +#define DM_SUSPEND_FLAG 0x00000002 +#define DM_EXISTS_FLAG 0x00000004 +#define DM_PERSISTENT_DEV_FLAG 0x00000008 + +/* + * Flag passed into ioctl STATUS command to get table information + * rather than current status. + */ +#define DM_STATUS_TABLE_FLAG 0x00000010 + +#endif /* _LINUX_DM_IOCTL_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/dqblk_v1.h linux.21rc5-ac2/include/linux/dqblk_v1.h --- linux.21rc5/include/linux/dqblk_v1.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/dqblk_v1.h 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,18 @@ +/* + * File with in-memory structures of old quota format + */ + +#ifndef _LINUX_DQBLK_V1_H +#define _LINUX_DQBLK_V1_H + +/* Id of quota format */ +#define QFMT_VFS_OLD 1 + +/* Root squash turned on */ +#define V1_DQF_RSQUASH 1 + +/* Special information about quotafile */ +struct v1_mem_dqinfo { +}; + +#endif /* _LINUX_DQBLK_V1_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/dqblk_v2.h linux.21rc5-ac2/include/linux/dqblk_v2.h --- linux.21rc5/include/linux/dqblk_v2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/dqblk_v2.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,20 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_DQBLK_V2_H +#define _LINUX_DQBLK_V2_H + +#include + +/* id numbers of quota format */ +#define QFMT_VFS_V0 2 + +/* Inmemory copy of version specific information */ +struct v2_mem_dqinfo { + unsigned int dqi_blocks; + unsigned int dqi_free_blk; + unsigned int dqi_free_entry; +}; + +#endif /* _LINUX_DQBLK_V2_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/dqblk_xfs.h linux.21rc5-ac2/include/linux/dqblk_xfs.h --- linux.21rc5/include/linux/dqblk_xfs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/dqblk_xfs.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,159 @@ +/* + * Copyright (c) 1995-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef _LINUX_DQBLK_XFS_H +#define _LINUX_DQBLK_XFS_H + +#include + +/* + * Disk quota - quotactl(2) commands for the XFS Quota Manager (XQM). + */ + +#define XQM_CMD(x) (('X'<<8)+(x)) /* note: forms first QCMD argument */ +#define Q_XQUOTAON XQM_CMD(0x1) /* enable accounting/enforcement */ +#define Q_XQUOTAOFF XQM_CMD(0x2) /* disable accounting/enforcement */ +#define Q_XGETQUOTA XQM_CMD(0x3) /* get disk limits and usage */ +#define Q_XSETQLIM XQM_CMD(0x4) /* set disk limits */ +#define Q_XGETQSTAT XQM_CMD(0x5) /* get quota subsystem status */ +#define Q_XQUOTARM XQM_CMD(0x6) /* free disk space used by dquots */ + +/* + * fs_disk_quota structure: + * + * This contains the current quota information regarding a user/proj/group. + * It is 64-bit aligned, and all the blk units are in BBs (Basic Blocks) of + * 512 bytes. + */ +#define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */ +typedef struct fs_disk_quota { + __s8 d_version; /* version of this structure */ + __s8 d_flags; /* XFS_{USER,PROJ,GROUP}_QUOTA */ + __u16 d_fieldmask; /* field specifier */ + __u32 d_id; /* user, project, or group ID */ + __u64 d_blk_hardlimit;/* absolute limit on disk blks */ + __u64 d_blk_softlimit;/* preferred limit on disk blks */ + __u64 d_ino_hardlimit;/* maximum # allocated inodes */ + __u64 d_ino_softlimit;/* preferred inode limit */ + __u64 d_bcount; /* # disk blocks owned by the user */ + __u64 d_icount; /* # inodes owned by the user */ + __s32 d_itimer; /* zero if within inode limits */ + /* if not, we refuse service */ + __s32 d_btimer; /* similar to above; for disk blocks */ + __u16 d_iwarns; /* # warnings issued wrt num inodes */ + __u16 d_bwarns; /* # warnings issued wrt disk blocks */ + __s32 d_padding2; /* padding2 - for future use */ + __u64 d_rtb_hardlimit;/* absolute limit on realtime blks */ + __u64 d_rtb_softlimit;/* preferred limit on RT disk blks */ + __u64 d_rtbcount; /* # realtime blocks owned */ + __s32 d_rtbtimer; /* similar to above; for RT disk blks */ + __u16 d_rtbwarns; /* # warnings issued wrt RT disk blks */ + __s16 d_padding3; /* padding3 - for future use */ + char d_padding4[8]; /* yet more padding */ +} fs_disk_quota_t; + +/* + * These fields are sent to Q_XSETQLIM to specify fields that need to change. + */ +#define FS_DQ_ISOFT (1<<0) +#define FS_DQ_IHARD (1<<1) +#define FS_DQ_BSOFT (1<<2) +#define FS_DQ_BHARD (1<<3) +#define FS_DQ_RTBSOFT (1<<4) +#define FS_DQ_RTBHARD (1<<5) +#define FS_DQ_LIMIT_MASK (FS_DQ_ISOFT | FS_DQ_IHARD | FS_DQ_BSOFT | \ + FS_DQ_BHARD | FS_DQ_RTBSOFT | FS_DQ_RTBHARD) +/* + * These timers can only be set in super user's dquot. For others, timers are + * automatically started and stopped. Superusers timer values set the limits + * for the rest. In case these values are zero, the DQ_{F,B}TIMELIMIT values + * defined below are used. + * These values also apply only to the d_fieldmask field for Q_XSETQLIM. + */ +#define FS_DQ_BTIMER (1<<6) +#define FS_DQ_ITIMER (1<<7) +#define FS_DQ_RTBTIMER (1<<8) +#define FS_DQ_TIMER_MASK (FS_DQ_BTIMER | FS_DQ_ITIMER | FS_DQ_RTBTIMER) + +/* + * The following constants define the default amount of time given a user + * before the soft limits are treated as hard limits (usually resulting + * in an allocation failure). These may be modified by the quotactl(2) + * system call with the Q_XSETQLIM command. + */ +#define DQ_FTIMELIMIT (7 * 24*60*60) /* 1 week */ +#define DQ_BTIMELIMIT (7 * 24*60*60) /* 1 week */ + +/* + * Various flags related to quotactl(2). Only relevant to XFS filesystems. + */ +#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ +#define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ +#define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ +#define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ + +#define XFS_USER_QUOTA (1<<0) /* user quota type */ +#define XFS_PROJ_QUOTA (1<<1) /* (IRIX) project quota type */ +#define XFS_GROUP_QUOTA (1<<2) /* group quota type */ + +/* + * fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system. + * Provides a centralized way to get meta infomation about the quota subsystem. + * eg. space taken up for user and group quotas, number of dquots currently + * incore. + */ +#define FS_QSTAT_VERSION 1 /* fs_quota_stat.qs_version */ + +/* + * Some basic infomation about 'quota files'. + */ +typedef struct fs_qfilestat { + __u64 qfs_ino; /* inode number */ + __u64 qfs_nblks; /* number of BBs 512-byte-blks */ + __u32 qfs_nextents; /* number of extents */ +} fs_qfilestat_t; + +typedef struct fs_quota_stat { + __s8 qs_version; /* version number for future changes */ + __u16 qs_flags; /* XFS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ + __s8 qs_pad; /* unused */ + fs_qfilestat_t qs_uquota; /* user quota storage information */ + fs_qfilestat_t qs_gquota; /* group quota storage information */ + __u32 qs_incoredqs; /* number of dquots incore */ + __s32 qs_btimelimit; /* limit for blks timer */ + __s32 qs_itimelimit; /* limit for inodes timer */ + __s32 qs_rtbtimelimit;/* limit for rt blks timer */ + __u16 qs_bwarnlimit; /* limit for num warnings */ + __u16 qs_iwarnlimit; /* limit for num warnings */ +} fs_quota_stat_t; + +#endif /* _LINUX_DQBLK_XFS_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/fs.h linux.21rc5-ac2/include/linux/fs.h --- linux.21rc5/include/linux/fs.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/fs.h 2003-05-28 21:48:48.000000000 +0100 @@ -221,6 +221,7 @@ BH_Launder, /* 1 if we can throttle on this buffer */ BH_Attached, /* 1 if b_inode_buffers is linked into a list */ BH_JBD, /* 1 if it has an attached journal_head */ + BH_Delay, /* 1 if the buffer is delayed allocate */ BH_PrivateStart,/* not a state bit, but the first bit available * for private allocation by other entities @@ -263,7 +264,7 @@ struct page *b_page; /* the page this bh is mapped to */ void (*b_end_io)(struct buffer_head *bh, int uptodate); /* I/O completion */ void *b_private; /* reserved for b_end_io */ - + void *b_journal_head; /* ext3 journal_heads */ unsigned long b_rsector; /* Real buffer location on disk */ wait_queue_head_t b_wait; @@ -283,6 +284,7 @@ #define buffer_new(bh) __buffer_state(bh,New) #define buffer_async(bh) __buffer_state(bh,Async) #define buffer_launder(bh) __buffer_state(bh,Launder) +#define buffer_delay(bh) __buffer_state(bh,Delay) #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) @@ -395,7 +397,7 @@ int (*flushpage) (struct page *, unsigned long); int (*releasepage) (struct page *, int); #define KERNEL_HAS_O_DIRECT /* this is for modules out of the kernel */ - int (*direct_IO)(int, struct inode *, struct kiobuf *, unsigned long, int); + int (*direct_IO)(int, struct file *, struct kiobuf *, unsigned long, int); void (*removepage)(struct page *); /* called when page gets removed from the inode */ }; @@ -455,6 +457,7 @@ unsigned long i_blksize; unsigned long i_blocks; unsigned long i_version; + unsigned short i_bytes; struct semaphore i_sem; struct semaphore i_zombie; struct inode_operations *i_op; @@ -515,6 +518,39 @@ } u; }; +static inline void inode_add_bytes(struct inode *inode, loff_t bytes) +{ + inode->i_blocks += bytes >> 9; + bytes &= 511; + inode->i_bytes += bytes; + if (inode->i_bytes >= 512) { + inode->i_blocks++; + inode->i_bytes -= 512; + } +} + +static inline void inode_sub_bytes(struct inode *inode, loff_t bytes) +{ + inode->i_blocks -= bytes >> 9; + bytes &= 511; + if (inode->i_bytes < bytes) { + inode->i_blocks--; + inode->i_bytes += 512; + } + inode->i_bytes -= bytes; +} + +static inline loff_t inode_get_bytes(struct inode *inode) +{ + return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes; +} + +static inline void inode_set_bytes(struct inode *inode, loff_t bytes) +{ + inode->i_blocks = bytes >> 9; + inode->i_bytes = bytes & 511; +} + struct fown_struct { int pid; /* pid or -pgrp where SIGIO should be sent */ uid_t uid, euid; /* uid/euid of process setting the owner */ @@ -660,20 +696,6 @@ int last_type; }; -#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */ -#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */ - -struct quota_mount_options -{ - unsigned int flags; /* Flags for diskquotas on this device */ - struct semaphore dqio_sem; /* lock device while I/O in progress */ - struct semaphore dqoff_sem; /* serialize quota_off() and quota_on() on device */ - struct file *files[MAXQUOTAS]; /* fp's to quotafiles */ - time_t inode_expire[MAXQUOTAS]; /* expiretime for inode-quota */ - time_t block_expire[MAXQUOTAS]; /* expiretime for block-quota */ - char rsquash[MAXQUOTAS]; /* for quotas threat root as any other user */ -}; - /* * Umount options */ @@ -721,6 +743,7 @@ struct file_system_type *s_type; struct super_operations *s_op; struct dquot_operations *dq_op; + struct quotactl_ops *s_qcop; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; @@ -735,7 +758,7 @@ struct block_device *s_bdev; struct list_head s_instances; - struct quota_mount_options s_dquot; /* Diskquota specific options */ + struct quota_info s_dquot; /* Diskquota specific options */ union { struct minix_sb_info minix_sb; @@ -940,6 +963,7 @@ #define I_LOCK 8 #define I_FREEING 16 #define I_CLEAR 32 +#define I_NEW 64 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) @@ -959,16 +983,6 @@ __mark_inode_dirty(inode, I_DIRTY_PAGES); } -struct dquot_operations { - void (*initialize) (struct inode *, short); - void (*drop) (struct inode *); - int (*alloc_block) (struct inode *, unsigned long, char); - int (*alloc_inode) (const struct inode *, unsigned long); - void (*free_block) (struct inode *, unsigned long); - void (*free_inode) (const struct inode *, unsigned long); - int (*transfer) (struct inode *, struct iattr *); -}; - struct file_system_type { const char *name; int fs_flags; @@ -1128,9 +1142,8 @@ extern void refile_buffer(struct buffer_head * buf); extern void create_empty_buffers(struct page *, kdev_t, unsigned long); extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate); - -/* reiserfs_writepage needs this */ -extern void set_buffer_async_io(struct buffer_head *bh) ; +extern void end_buffer_io_async(struct buffer_head *bh, int uptodate); +extern void set_buffer_async_io(struct buffer_head *bh); #define BUF_CLEAN 0 #define BUF_LOCKED 1 /* Buffers scheduled for write */ @@ -1377,12 +1390,47 @@ extern void force_delete(struct inode *); extern struct inode * igrab(struct inode *); extern ino_t iunique(struct super_block *, ino_t); +extern void unlock_new_inode(struct inode *); typedef int (*find_inode_t)(struct inode *, unsigned long, void *); -extern struct inode * iget4(struct super_block *, unsigned long, find_inode_t, void *); + +extern struct inode * iget4_locked(struct super_block *, unsigned long, + find_inode_t, void *); + +static inline struct inode *iget4(struct super_block *sb, unsigned long ino, + find_inode_t find_actor, void *opaque) +{ + struct inode *inode = iget4_locked(sb, ino, find_actor, opaque); + + if (inode && (inode->i_state & I_NEW)) { + /* + * reiserfs-specific kludge that is expected to go away ASAP. + */ + if (sb->s_op->read_inode2) + sb->s_op->read_inode2(inode, opaque); + else + sb->s_op->read_inode(inode); + unlock_new_inode(inode); + } + + return inode; +} + static inline struct inode *iget(struct super_block *sb, unsigned long ino) { - return iget4(sb, ino, NULL, NULL); + struct inode *inode = iget4_locked(sb, ino, NULL, NULL); + + if (inode && (inode->i_state & I_NEW)) { + sb->s_op->read_inode(inode); + unlock_new_inode(inode); + } + + return inode; +} + +static inline struct inode *iget_locked(struct super_block *sb, unsigned long ino) +{ + return iget4_locked(sb, ino, NULL, NULL); } extern void clear_inode(struct inode *); @@ -1460,6 +1508,7 @@ extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *); extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *); +extern ssize_t generic_file_write_nolock(struct file *, const char *, size_t, loff_t *); extern void do_generic_file_read(struct file *, loff_t *, read_descriptor_t *, read_actor_t); extern loff_t no_llseek(struct file *file, loff_t offset, int origin); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/fs_struct.h linux.21rc5-ac2/include/linux/fs_struct.h --- linux.21rc5/include/linux/fs_struct.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/fs_struct.h 2003-05-28 21:48:47.000000000 +0100 @@ -2,6 +2,9 @@ #define _LINUX_FS_STRUCT_H #ifdef __KERNEL__ +#include /* for RW_LOCK_* */ +#include /* for atomic_t */ + struct fs_struct { atomic_t count; rwlock_t lock; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/hdlc/ioctl.h linux.21rc5-ac2/include/linux/hdlc/ioctl.h --- linux.21rc5/include/linux/hdlc/ioctl.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/hdlc/ioctl.h 2003-04-22 16:44:38.000000000 +0100 @@ -34,22 +34,15 @@ } fr_proto_pvc; /* for creating/deleting FR PVCs */ typedef struct { + unsigned int dlci; + char master[IFNAMSIZ]; /* Name of master FRAD device */ +}fr_proto_pvc_info; /* for returning PVC information only */ + +typedef struct { unsigned int interval; unsigned int timeout; } cisco_proto; /* PPP doesn't need any info now - supply length = 0 to ioctl */ -union hdlc_settings { - raw_hdlc_proto raw_hdlc; - cisco_proto cisco; - fr_proto fr; - fr_proto_pvc fr_pvc; -}; - -union line_settings { - sync_serial_settings sync; - te1_settings te1; -}; - #endif /* __HDLC_IOCTL_H__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/hdlc.h linux.21rc5-ac2/include/linux/hdlc.h --- linux.21rc5/include/linux/hdlc.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/hdlc.h 2003-05-29 01:44:06.000000000 +0100 @@ -1,12 +1,11 @@ /* * Generic HDLC support routines for Linux * - * Copyright (C) 1999-2002 Krzysztof Halasa + * Copyright (C) 1999-2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #ifndef __HDLC_H @@ -52,7 +51,7 @@ #include #define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */ -#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10) /* max 10 bytes for FR */ +#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */ #define MAXLEN_LMISTAT 20 /* max size of status enquiry frame */ @@ -145,17 +144,20 @@ typedef struct pvc_device_struct { - struct net_device netdev; /* PVC net device - must be first */ - struct net_device_stats stats; struct hdlc_device_struct *master; - struct pvc_device_struct *next; + struct net_device *main; + struct net_device *ether; /* bridged Ethernet interface */ + struct pvc_device_struct *next; /* Sorted in ascending DLCI order */ + int dlci; + int open_count; struct { - int active; - int new; - int deleted; - int fecn; - int becn; + unsigned int new: 1; + unsigned int active: 1; + unsigned int exist: 1; + unsigned int deleted: 1; + unsigned int fecn: 1; + unsigned int becn: 1; }state; }pvc_device; @@ -180,18 +182,20 @@ void (*stop)(struct hdlc_device_struct *hdlc); void (*proto_detach)(struct hdlc_device_struct *hdlc); void (*netif_rx)(struct sk_buff *skb); + unsigned short (*type_trans)(struct sk_buff *skb, + struct net_device *dev); int proto; /* IF_PROTO_HDLC/CISCO/FR/etc. */ union { struct { fr_proto settings; pvc_device *first_pvc; - int pvc_count; + int dce_pvc_count; struct timer_list timer; int last_poll; int reliable; - int changed; + int dce_changed; int request; int fullrep_sent; u32 last_errors; /* last errors bit list */ @@ -226,6 +230,7 @@ int hdlc_raw_ioctl(hdlc_device *hdlc, struct ifreq *ifr); +int hdlc_raw_eth_ioctl(hdlc_device *hdlc, struct ifreq *ifr); int hdlc_cisco_ioctl(hdlc_device *hdlc, struct ifreq *ifr); int hdlc_ppp_ioctl(hdlc_device *hdlc, struct ifreq *ifr); int hdlc_fr_ioctl(hdlc_device *hdlc, struct ifreq *ifr); @@ -254,15 +259,9 @@ } -static __inline__ struct net_device* pvc_to_dev(pvc_device *pvc) -{ - return &pvc->netdev; -} - - static __inline__ pvc_device* dev_to_pvc(struct net_device *dev) { - return (pvc_device*)dev; + return (pvc_device*)dev->priv; } @@ -272,19 +271,6 @@ } -static __inline__ const char *pvc_to_name(pvc_device *pvc) -{ - return pvc_to_dev(pvc)->name; -} - - -static __inline__ u16 netdev_dlci(struct net_device *dev) -{ - return ntohs(*(u16*)dev->dev_addr); -} - - - static __inline__ u16 q922_to_dlci(u8 *hdr) { return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); @@ -345,5 +331,15 @@ } +static __inline__ unsigned short hdlc_type_trans(struct sk_buff *skb, + struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(skb->dev); + if (hdlc->type_trans) + return hdlc->type_trans(skb, dev); + else + return __constant_htons(ETH_P_HDLC); +} + #endif /* __KERNEL */ #endif /* __HDLC_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/if_ether.h linux.21rc5-ac2/include/linux/if_ether.h --- linux.21rc5/include/linux/if_ether.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/if_ether.h 2003-04-30 16:12:37.000000000 +0100 @@ -65,6 +65,7 @@ #define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport * over Ethernet */ +#define ETH_P_EDP2 0x88A2 /* Coraid EDP2 */ /* * Non DIX types. Won't clash for 1500 types. diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/if.h linux.21rc5-ac2/include/linux/if.h --- linux.21rc5/include/linux/if.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/if.h 2003-05-28 21:48:47.000000000 +0100 @@ -21,6 +21,8 @@ #include /* for "__kernel_caddr_t" et al */ #include /* for "struct sockaddr" et al */ + +#define IFNAMSIZ 16 #include /* Standard interface flags (netdevice->flags). */ @@ -69,7 +71,11 @@ #define IF_PROTO_FR_ADD_PVC 0x2004 /* Create FR PVC */ #define IF_PROTO_FR_DEL_PVC 0x2005 /* Delete FR PVC */ #define IF_PROTO_X25 0x2006 /* X.25 */ - +#define IF_PROTO_HDLC_ETH 0x2007 /* raw HDLC, Ethernet emulation */ +#define IF_PROTO_FR_ADD_ETH_PVC 0x2008 /* Create FR Ethernet-bridged PVC */ +#define IF_PROTO_FR_DEL_ETH_PVC 0x2009 /* Delete FR Ethernet-bridged PVC */ +#define IF_PROTO_FR_PVC 0x200A /* for reading PVC status */ +#define IF_PROTO_FR_ETH_PVC 0x200B /* @@ -103,6 +109,7 @@ cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; /* interface settings */ sync_serial_settings *sync; @@ -120,7 +127,6 @@ struct ifreq { #define IFHWADDRLEN 6 -#define IFNAMSIZ 16 union { char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/interrupt.h linux.21rc5-ac2/include/linux/interrupt.h --- linux.21rc5/include/linux/interrupt.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/interrupt.h 2003-05-29 01:44:06.000000000 +0100 @@ -40,7 +40,8 @@ CM206_BH, JS_BH, MACSERIAL_BH, - ISICOM_BH + ISICOM_BH, + SIOLX_BH }; #include diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/iobuf.h linux.21rc5-ac2/include/linux/iobuf.h --- linux.21rc5/include/linux/iobuf.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/iobuf.h 2003-05-29 01:44:07.000000000 +0100 @@ -38,7 +38,8 @@ int offset; /* Offset to start of valid data */ int length; /* Number of valid bytes of data */ - unsigned int locked : 1; /* If set, pages has been locked */ + unsigned int locked : 1, /* If set, pages has been locked */ + initialized:1; /* If set, done initialize */ struct page ** maplist; struct buffer_head ** bh; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/jbd.h linux.21rc5-ac2/include/linux/jbd.h --- linux.21rc5/include/linux/jbd.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/jbd.h 2003-05-29 01:44:07.000000000 +0100 @@ -311,7 +311,7 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) { - return bh->b_private; + return bh->b_journal_head; } #define HAVE_JOURNAL_CALLBACK_STATUS diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/lockd/lockd.h linux.21rc5-ac2/include/linux/lockd/lockd.h --- linux.21rc5/include/linux/lockd/lockd.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/lockd/lockd.h 2003-05-28 21:48:48.000000000 +0100 @@ -89,8 +89,11 @@ /* * This is a server block (i.e. a lock requested by some client which * couldn't be granted because of a conflicting lock). + * + * XXX: Beware of signedness errors. b_when is passed as a signed long + * into time_before_eq et al. --okir */ -#define NLM_NEVER (~(unsigned long) 0) +#define NLM_NEVER (0x7FFFFFF) struct nlm_block { struct nlm_block * b_next; /* linked list (all blocks) */ struct nlm_block * b_fnext; /* linked list (per file) */ @@ -161,6 +164,7 @@ u32 nlmsvc_testlock(struct nlm_file *, struct nlm_lock *, struct nlm_lock *); u32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *); +void nlmsvc_grant_reply(struct nlm_cookie *, u32); unsigned long nlmsvc_retry_blocked(void); int nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, int action); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/malloc.h linux.21rc5-ac2/include/linux/malloc.h --- linux.21rc5/include/linux/malloc.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/malloc.h 2003-05-29 01:44:06.000000000 +0100 @@ -1,7 +1,7 @@ #ifndef _LINUX_MALLOC_H #define _LINUX_MALLOC_H -#warning linux/malloc.h is deprecated, use linux/slab.h instead. +#error linux/malloc.h is deprecated, use linux/slab.h instead. #include #endif /* _LINUX_MALLOC_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/mempool.h linux.21rc5-ac2/include/linux/mempool.h --- linux.21rc5/include/linux/mempool.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/mempool.h 2003-05-28 21:48:48.000000000 +0100 @@ -0,0 +1,31 @@ +/* + * memory buffer pool support + */ +#ifndef _LINUX_MEMPOOL_H +#define _LINUX_MEMPOOL_H + +#include +#include + +struct mempool_s; +typedef struct mempool_s mempool_t; + +typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data); +typedef void (mempool_free_t)(void *element, void *pool_data); + +extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask); +extern void mempool_destroy(mempool_t *pool); +extern void * mempool_alloc(mempool_t *pool, int gfp_mask); +extern void mempool_free(void *element, mempool_t *pool); + +/* + * A mempool_alloc_t and mempool_free_t that get the memory from + * a slab that is passed in through pool_data. + */ +void *mempool_alloc_slab(int gfp_mask, void *pool_data); +void mempool_free_slab(void *element, void *pool_data); + + +#endif /* _LINUX_MEMPOOL_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/mman.h linux.21rc5-ac2/include/linux/mman.h --- linux.21rc5/include/linux/mman.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/mman.h 2003-04-22 16:44:38.000000000 +0100 @@ -6,4 +6,8 @@ #define MREMAP_MAYMOVE 1 #define MREMAP_FIXED 2 +extern int vm_enough_memory(long pages); +extern void vm_unacct_memory(long pages); +extern void vm_validate_enough(char *x); + #endif /* _LINUX_MMAN_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/mm.h linux.21rc5-ac2/include/linux/mm.h --- linux.21rc5/include/linux/mm.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/mm.h 2003-05-29 01:44:06.000000000 +0100 @@ -104,7 +104,13 @@ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ -#define VM_STACK_FLAGS 0x00000177 +#define VM_ACCOUNT 0x00100000 /* Memory is a vm accounted object */ + +#ifdef ARCH_STACK_GROWSUP +#define VM_STACK_FLAGS (VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT) +#else +#define VM_STACK_FLAGS (VM_DATA_DEFAULT_FLAGS|VM_GROWSDOWN|VM_ACCOUNT) +#endif #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) #define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK @@ -557,7 +563,7 @@ return ret; } -extern int do_munmap(struct mm_struct *, unsigned long, size_t); +extern int do_munmap(struct mm_struct *, unsigned long, size_t, int acct); extern unsigned long do_brk(unsigned long, unsigned long); @@ -624,34 +630,9 @@ return gfp_mask; } - -/* vma is the first one with address < vma->vm_end, - * and even address < vma->vm_start. Have to extend vma. */ -static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) -{ - unsigned long grow; - /* - * vma->vm_start/vm_end cannot change under us because the caller is required - * to hold the mmap_sem in write mode. We need to get the spinlock only - * before relocating the vma range ourself. - */ - address &= PAGE_MASK; - spin_lock(&vma->vm_mm->page_table_lock); - grow = (vma->vm_start - address) >> PAGE_SHIFT; - if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || - ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { - spin_unlock(&vma->vm_mm->page_table_lock); - return -ENOMEM; - } - vma->vm_start = address; - vma->vm_pgoff -= grow; - vma->vm_mm->total_vm += grow; - if (vma->vm_flags & VM_LOCKED) - vma->vm_mm->locked_vm += grow; - spin_unlock(&vma->vm_mm->page_table_lock); - return 0; -} +/* Do stack extension */ +extern int expand_stack(struct vm_area_struct * vma, unsigned long address); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/msdos_fs.h linux.21rc5-ac2/include/linux/msdos_fs.h --- linux.21rc5/include/linux/msdos_fs.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/msdos_fs.h 2003-05-28 21:48:48.000000000 +0100 @@ -145,8 +145,7 @@ __u32 reserved1[120]; /* Nothing as far as I can tell */ __u32 signature2; /* 0x61417272L */ __u32 free_clusters; /* Free cluster count. -1 if unknown */ - __u32 next_cluster; /* Most recently allocated cluster. - * Unused under Linux. */ + __u32 next_cluster; /* Most recently allocated cluster */ __u32 reserved2[4]; }; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/nfs_fs.h linux.21rc5-ac2/include/linux/nfs_fs.h --- linux.21rc5/include/linux/nfs_fs.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/nfs_fs.h 2003-05-29 01:44:07.000000000 +0100 @@ -99,11 +99,19 @@ #define NFS_FLAGS(inode) ((inode)->u.nfs_i.flags) #define NFS_REVALIDATING(inode) (NFS_FLAGS(inode) & NFS_INO_REVALIDATING) #define NFS_STALE(inode) (NFS_FLAGS(inode) & NFS_INO_STALE) +#define NFS_FLUSH(inode) (NFS_FLAGS(inode) & NFS_INO_FLUSH) #define NFS_FILEID(inode) ((inode)->u.nfs_i.fileid) -/* Inode Flags */ -#define NFS_USE_READDIRPLUS(inode) ((NFS_FLAGS(inode) & NFS_INO_ADVISE_RDPLUS) ? 1 : 0) +static inline int nfs_server_capable(struct inode *inode, int cap) +{ + return NFS_SERVER(inode)->caps & cap; +} + +static inline int NFS_USE_READDIRPLUS(struct inode *inode) +{ + return NFS_FLAGS(inode) & NFS_INO_ADVISE_RDPLUS; +} /* * These are the default flags for swap requests @@ -274,6 +282,11 @@ #define NFS_TestClearPageSync(page) test_and_clear_bit(PG_fs_1, &(page)->flags) /* + * linux/fs/nfs/direct.c + */ +extern int nfs_direct_IO(int, struct file *, struct kiobuf *, unsigned long, int); + +/* * linux/fs/mount_clnt.c * (Used only by nfsroot module) */ @@ -302,6 +315,23 @@ return __nfs_refresh_inode(inode,fattr); } +/* + * This function will be used to simulate weak cache consistency + * under NFSv2 when the NFSv3 attribute patch is included. + * For the moment, we just call nfs_refresh_inode(). + */ +static __inline__ int +nfs_write_attributes(struct inode *inode, struct nfs_fattr *fattr) +{ + if ((fattr->valid & NFS_ATTR_FATTR) && !(fattr->valid & NFS_ATTR_WCC)) { + fattr->pre_size = NFS_CACHE_ISIZE(inode); + fattr->pre_mtime = NFS_CACHE_MTIME(inode); + fattr->pre_ctime = NFS_CACHE_CTIME(inode); + fattr->valid |= NFS_ATTR_WCC; + } + return nfs_refresh_inode(inode, fattr); +} + static inline loff_t nfs_size_to_loff_t(__u64 size) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/nfs_fs_i.h linux.21rc5-ac2/include/linux/nfs_fs_i.h --- linux.21rc5/include/linux/nfs_fs_i.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/nfs_fs_i.h 2003-05-28 21:48:47.000000000 +0100 @@ -6,6 +6,16 @@ #include /* + * NFSv3 Access mode cache + */ +struct nfs_access_cache { + unsigned long jiffies; + struct rpc_cred * cred; + int mask; + int err; +}; + +/* * nfs fs inode data in memory */ struct nfs_inode_info { @@ -54,6 +64,8 @@ */ unsigned long cache_mtime_jiffies; + struct nfs_access_cache cache_access; + /* * This is the cookie verifier used for NFSv3 readdir * operations @@ -75,6 +87,9 @@ /* Credentials for shared mmap */ struct rpc_cred *mm_cred; + + /* The number of threads flushing out dirty mmaps pages */ + atomic_t flushers; }; /* diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/nfs_fs_sb.h linux.21rc5-ac2/include/linux/nfs_fs_sb.h --- linux.21rc5/include/linux/nfs_fs_sb.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/nfs_fs_sb.h 2003-05-28 21:48:47.000000000 +0100 @@ -10,6 +10,7 @@ struct rpc_clnt * client; /* RPC client handle */ struct nfs_rpc_ops * rpc_ops; /* NFS protocol vector */ int flags; /* various flags */ + unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ unsigned int rpages; /* read size (in pages) */ unsigned int wsize; /* write size */ @@ -36,4 +37,8 @@ struct nfs_server s_server; }; +/* Server capabilities */ +#define NFS_CAP_READDIRPLUS 1 + + #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/nfs_xdr.h linux.21rc5-ac2/include/linux/nfs_xdr.h --- linux.21rc5/include/linux/nfs_xdr.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/nfs_xdr.h 2003-05-01 14:16:14.000000000 +0100 @@ -27,6 +27,7 @@ __u64 atime; __u64 mtime; __u64 ctime; + unsigned long timestamp; }; #define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */ @@ -37,6 +38,7 @@ * Info on the file system */ struct nfs_fsinfo { + struct nfs_fattr *fattr; __u32 rtmax; /* max. read transfer size */ __u32 rtpref; /* pref. read transfer size */ __u32 rtmult; /* reads should be multiple of this */ @@ -45,21 +47,43 @@ __u32 wtmult; /* writes should be multiple of this */ __u32 dtpref; /* pref. readdir transfer size */ __u64 maxfilesize; - __u64 bsize; /* block size */ + __u64 time_delta; + __u32 properties; +}; + +struct nfs_fsstat { + struct nfs_fattr *fattr; __u64 tbytes; /* total size in bytes */ __u64 fbytes; /* # of free bytes */ __u64 abytes; /* # of bytes available to user */ __u64 tfiles; /* # of files */ __u64 ffiles; /* # of free files */ __u64 afiles; /* # of files available to user */ + __u32 invarsec; +}; + +struct nfs_pathconf { + struct nfs_fattr *fattr; /* Post-op attributes */ __u32 linkmax;/* max # of hard links */ - __u32 namelen;/* max name length */ + __u32 name_max;/* max name length */ + int no_trunc : 1, + chown_restricted : 1, + case_insensitive : 1, + case_preserving : 1; +}; + +struct nfs2_statfs { + __u32 tsize; /* Server transfer size */ + __u32 bsize; /* Filesystem block size */ + __u32 blocks; /* No. of "bsize" blocks on filesystem */ + __u32 bfree; /* No. of free "bsize" blocks */ + __u32 bavail; /* No. of available "bsize" blocks */ }; /* Arguments to the read call. * Note that NFS_READ_MAXIOV must be <= (MAX_IOVEC-2) from sunrpc/xprt.h */ -#define NFS_READ_MAXIOV 8 +#define NFS_READ_MAXIOV (9) struct nfs_readargs { struct nfs_fh * fh; @@ -78,7 +102,7 @@ /* Arguments to the write call. * Note that NFS_WRITE_MAXIOV must be <= (MAX_IOVEC-2) from sunrpc/xprt.h */ -#define NFS_WRITE_MAXIOV 8 +#define NFS_WRITE_MAXIOV (9) struct nfs_writeargs { struct nfs_fh * fh; __u64 offset; @@ -109,8 +133,8 @@ const char * name; unsigned int len; int eof; - struct nfs_fh fh; - struct nfs_fattr fattr; + struct nfs_fh *fh; + struct nfs_fattr *fattr; }; /* @@ -300,7 +324,7 @@ struct iattr *); int (*lookup) (struct inode *, struct qstr *, struct nfs_fh *, struct nfs_fattr *); - int (*access) (struct inode *, int , int); + int (*access) (struct inode *, struct rpc_cred *, int); int (*readlink)(struct inode *, struct page *); int (*read) (struct inode *, struct rpc_cred *, struct nfs_fattr *, @@ -332,7 +356,11 @@ int (*mknod) (struct inode *, struct qstr *, struct iattr *, dev_t, struct nfs_fh *, struct nfs_fattr *); int (*statfs) (struct nfs_server *, struct nfs_fh *, + struct nfs_fsstat *); + int (*fsinfo) (struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); + int (*pathconf) (struct nfs_server *, struct nfs_fh *, + struct nfs_pathconf *); u32 * (*decode_dirent)(u32 *, struct nfs_entry *, int plus); }; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/pci_ids.h linux.21rc5-ac2/include/linux/pci_ids.h --- linux.21rc5/include/linux/pci_ids.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/pci_ids.h 2003-05-28 15:32:28.000000000 +0100 @@ -501,6 +501,7 @@ #define PCI_DEVICE_ID_SI_650 0x0650 #define PCI_DEVICE_ID_SI_651 0x0651 #define PCI_DEVICE_ID_SI_652 0x0652 +#define PCI_DEVICE_ID_SI_655 0x0655 #define PCI_DEVICE_ID_SI_730 0x0730 #define PCI_DEVICE_ID_SI_630_VGA 0x6300 #define PCI_DEVICE_ID_SI_730_VGA 0x7300 @@ -923,7 +924,9 @@ #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA 0x0152 #define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO 0x0153 #define PCI_DEVICE_ID_NVIDIA_IGEFORCE2 0x01a0 +#define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4 #define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc +#define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2 0x0202 @@ -1025,11 +1028,13 @@ #define PCI_DEVICE_ID_VIA_8622 0x3102 #define PCI_DEVICE_ID_VIA_8233C_0 0x3109 #define PCI_DEVICE_ID_VIA_8361 0x3112 +#define PCI_DEVICE_ID_VIA_8375 0x3116 #define PCI_DEVICE_ID_VIA_8233A 0x3147 -#define PCI_DEVICE_ID_VIA_P4X333 0x3168 -#define PCI_DEVICE_ID_VIA_8235 0x3177 -#define PCI_DEVICE_ID_VIA_8377_0 0x3189 +#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149 +#define PCI_DEVICE_ID_VIA_P4X333 0x3168 +#define PCI_DEVICE_ID_VIA_8235 0x3177 #define PCI_DEVICE_ID_VIA_8377_0 0x3189 +#define PCI_DEVICE_ID_VIA_8237 0x3227 #define PCI_DEVICE_ID_VIA_86C100A 0x6100 #define PCI_DEVICE_ID_VIA_8231 0x8231 #define PCI_DEVICE_ID_VIA_8231_4 0x8235 diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/pc_keyb.h linux.21rc5-ac2/include/linux/pc_keyb.h --- linux.21rc5/include/linux/pc_keyb.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/pc_keyb.h 2003-04-22 16:44:38.000000000 +0100 @@ -128,3 +128,6 @@ struct fasync_struct *fasync; unsigned char buf[AUX_BUF_SIZE]; }; + +extern void pckbd_blink (char); + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/pnpbios.h linux.21rc5-ac2/include/linux/pnpbios.h --- linux.21rc5/include/linux/pnpbios.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/pnpbios.h 2003-05-29 01:44:07.000000000 +0100 @@ -0,0 +1,212 @@ +/* + * Include file for the interface to a PnP BIOS + * + * Original BIOS code (C) 1998 Christian Schmidt (chr.schmidt@tu-bs.de) + * PnP handler parts (c) 1998 Tom Lees + * Minor reorganizations by David Hinds + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_PNPBIOS_H +#define _LINUX_PNPBIOS_H + +#ifdef __KERNEL__ + +#include +#include + +/* + * Status codes (warnings and errors) + */ +#define PNP_SUCCESS 0x00 +#define PNP_NOT_SET_STATICALLY 0x7f +#define PNP_UNKNOWN_FUNCTION 0x81 +#define PNP_FUNCTION_NOT_SUPPORTED 0x82 +#define PNP_INVALID_HANDLE 0x83 +#define PNP_BAD_PARAMETER 0x84 +#define PNP_SET_FAILED 0x85 +#define PNP_EVENTS_NOT_PENDING 0x86 +#define PNP_SYSTEM_NOT_DOCKED 0x87 +#define PNP_NO_ISA_PNP_CARDS 0x88 +#define PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES 0x89 +#define PNP_CONFIG_CHANGE_FAILED_NO_BATTERY 0x8a +#define PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT 0x8b +#define PNP_BUFFER_TOO_SMALL 0x8c +#define PNP_USE_ESCD_SUPPORT 0x8d +#define PNP_MESSAGE_NOT_SUPPORTED 0x8e +#define PNP_HARDWARE_ERROR 0x8f + +#define ESCD_SUCCESS 0x00 +#define ESCD_IO_ERROR_READING 0x55 +#define ESCD_INVALID 0x56 +#define ESCD_BUFFER_TOO_SMALL 0x59 +#define ESCD_NVRAM_TOO_SMALL 0x5a +#define ESCD_FUNCTION_NOT_SUPPORTED 0x81 + +/* + * Events that can be received by "get event" + */ +#define PNPEV_ABOUT_TO_CHANGE_CONFIG 0x0001 +#define PNPEV_DOCK_CHANGED 0x0002 +#define PNPEV_SYSTEM_DEVICE_CHANGED 0x0003 +#define PNPEV_CONFIG_CHANGED_FAILED 0x0004 +#define PNPEV_UNKNOWN_SYSTEM_EVENT 0xffff +/* 0x8000 through 0xfffe are OEM defined */ + +/* + * Messages that should be sent through "send message" + */ +#define PNPMSG_OK 0x00 +#define PNPMSG_ABORT 0x01 +#define PNPMSG_UNDOCK_DEFAULT_ACTION 0x40 +#define PNPMSG_POWER_OFF 0x41 +#define PNPMSG_PNP_OS_ACTIVE 0x42 +#define PNPMSG_PNP_OS_INACTIVE 0x43 +/* 0x8000 through 0xffff are OEM defined */ + +#pragma pack(1) +struct pnp_dev_node_info { + __u16 no_nodes; + __u16 max_node_size; +}; +struct pnp_docking_station_info { + __u32 location_id; + __u32 serial; + __u16 capabilities; +}; +struct pnp_isa_config_struc { + __u8 revision; + __u8 no_csns; + __u16 isa_rd_data_port; + __u16 reserved; +}; +struct escd_info_struc { + __u16 min_escd_write_size; + __u16 escd_size; + __u32 nv_storage_base; +}; +struct pnp_bios_node { + __u16 size; + __u8 handle; + __u32 eisa_id; + __u8 type_code[3]; + __u16 flags; + __u8 data[0]; +}; +#pragma pack() + +struct pnpbios_device_id +{ + char id[8]; + unsigned long driver_data; +}; + +struct pnpbios_driver { + struct list_head node; + char *name; + const struct pnpbios_device_id *id_table; /* NULL if wants all devices */ + int (*probe) (struct pci_dev *dev, const struct pnpbios_device_id *id); /* New device inserted */ + void (*remove) (struct pci_dev *dev); /* Device removed, either due to hotplug remove or module remove */ +}; + +#ifdef CONFIG_PNPBIOS + +/* exported */ +extern int pnpbios_register_driver(struct pnpbios_driver *drv); +extern void pnpbios_unregister_driver(struct pnpbios_driver *drv); + +/* non-exported */ +#define pnpbios_for_each_dev(dev) \ + for(dev = pnpbios_dev_g(pnpbios_devices.next); dev != pnpbios_dev_g(&pnpbios_devices); dev = pnpbios_dev_g(dev->global_list.next)) + + +#define pnpbios_dev_g(n) list_entry(n, struct pci_dev, global_list) + +static __inline struct pnpbios_driver *pnpbios_dev_driver(const struct pci_dev *dev) +{ + return (struct pnpbios_driver *)dev->driver; +} + +extern int pnpbios_dont_use_current_config; +extern void *pnpbios_kmalloc(size_t size, int f); +extern int pnpbios_init (void); +extern int pnpbios_proc_init (void); +extern void pnpbios_proc_exit (void); + +extern int pnp_bios_dev_node_info (struct pnp_dev_node_info *data); +extern int pnp_bios_get_dev_node (u8 *nodenum, char config, struct pnp_bios_node *data); +extern int pnp_bios_set_dev_node (u8 nodenum, char config, struct pnp_bios_node *data); +extern int pnp_bios_get_stat_res (char *info); +extern int pnp_bios_isapnp_config (struct pnp_isa_config_struc *data); +extern int pnp_bios_escd_info (struct escd_info_struc *data); +extern int pnp_bios_read_escd (char *data, u32 nvram_base); +#if needed +extern int pnp_bios_get_event (u16 *message); +extern int pnp_bios_send_message (u16 message); +extern int pnp_bios_set_stat_res (char *info); +extern int pnp_bios_apm_id_table (char *table, u16 *size); +extern int pnp_bios_write_escd (char *data, u32 nvram_base); +#endif + +/* + * a helper function which helps ensure correct pnpbios_driver + * setup and cleanup for commonly-encountered hotplug/modular cases + * + * This MUST stay in a header, as it checks for -DMODULE + */ + +static inline int pnpbios_module_init(struct pnpbios_driver *drv) +{ + int rc = pnpbios_register_driver (drv); + + if (rc > 0) + return 0; + + /* iff CONFIG_HOTPLUG and built into kernel, we should + * leave the driver around for future hotplug events. + * For the module case, a hotplug daemon of some sort + * should load a module in response to an insert event. */ +#if defined(CONFIG_HOTPLUG) && !defined(MODULE) + if (rc == 0) + return 0; +#else + if (rc == 0) + rc = -ENODEV; +#endif + + /* if we get here, we need to clean up pci driver instance + * and return some sort of error */ + pnpbios_unregister_driver (drv); + + return rc; +} + +#else /* CONFIG_PNPBIOS */ + +static __inline__ int pnpbios_register_driver(struct pnpbios_driver *drv) +{ + return 0; +} + +static __inline__ void pnpbios_unregister_driver(struct pnpbios_driver *drv) +{ + return; +} + +#endif /* CONFIG_PNPBIOS */ +#endif /* __KERNEL__ */ + +#endif /* _LINUX_PNPBIOS_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/quotacompat.h linux.21rc5-ac2/include/linux/quotacompat.h --- linux.21rc5/include/linux/quotacompat.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/quotacompat.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,86 @@ +/* + * Definition of symbols used for backward compatible interface + */ + +#ifndef _LINUX_QUOTACOMPAT_ +#define _LINUX_QUOTACOMPAT_ + +#include +#include + +struct v1c_mem_dqblk { + __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ + __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ + __u32 dqb_curblocks; /* current block count */ + __u32 dqb_ihardlimit; /* maximum # allocated inodes */ + __u32 dqb_isoftlimit; /* preferred inode limit */ + __u32 dqb_curinodes; /* current # allocated inodes */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive files */ +}; + +struct v1c_dqstats { + __u32 lookups; + __u32 drops; + __u32 reads; + __u32 writes; + __u32 cache_hits; + __u32 allocated_dquots; + __u32 free_dquots; + __u32 syncs; +}; + +struct v2c_mem_dqblk { + unsigned int dqb_ihardlimit; + unsigned int dqb_isoftlimit; + unsigned int dqb_curinodes; + unsigned int dqb_bhardlimit; + unsigned int dqb_bsoftlimit; + qsize_t dqb_curspace; + __kernel_time_t dqb_btime; + __kernel_time_t dqb_itime; +}; + +struct v2c_mem_dqinfo { + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + unsigned int dqi_flags; + unsigned int dqi_blocks; + unsigned int dqi_free_blk; + unsigned int dqi_free_entry; +}; + +struct v2c_dqstats { + __u32 lookups; + __u32 drops; + __u32 reads; + __u32 writes; + __u32 cache_hits; + __u32 allocated_dquots; + __u32 free_dquots; + __u32 syncs; + __u32 version; +}; + +#define Q_COMP_QUOTAON 0x0100 /* enable quotas */ +#define Q_COMP_QUOTAOFF 0x0200 /* disable quotas */ +#define Q_COMP_SYNC 0x0600 /* sync disk copy of a filesystems quotas */ + +#define Q_V1_GETQUOTA 0x0300 /* get limits and usage */ +#define Q_V1_SETQUOTA 0x0400 /* set limits and usage */ +#define Q_V1_SETUSE 0x0500 /* set usage */ +#define Q_V1_SETQLIM 0x0700 /* set limits */ +#define Q_V1_GETSTATS 0x0800 /* get collected stats */ +#define Q_V1_RSQUASH 0x1000 /* set root_squash option */ + +#define Q_V2_SETQLIM 0x0700 /* set limits */ +#define Q_V2_GETINFO 0x0900 /* get info about quotas - graces, flags... */ +#define Q_V2_SETINFO 0x0A00 /* set info about quotas */ +#define Q_V2_SETGRACE 0x0B00 /* set inode and block grace */ +#define Q_V2_SETFLAGS 0x0C00 /* set flags for quota */ +#define Q_V2_GETQUOTA 0x0D00 /* get limits and usage */ +#define Q_V2_SETQUOTA 0x0E00 /* set limits and usage */ +#define Q_V2_SETUSE 0x0F00 /* set usage */ +#define Q_V2_GETSTATS 0x1100 /* get collected stats */ + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/quota.h linux.21rc5-ac2/include/linux/quota.h --- linux.21rc5/include/linux/quota.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/quota.h 2003-05-28 21:48:47.000000000 +0100 @@ -40,30 +40,22 @@ #define _LINUX_QUOTA_ #include +#include -/* - * Convert diskblocks to blocks and the other way around. - */ -#define dbtob(num) (num << BLOCK_SIZE_BITS) -#define btodb(num) (num >> BLOCK_SIZE_BITS) +#define __DQUOT_VERSION__ "dquot_6.5.1" +#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 -/* - * Convert count of filesystem blocks to diskquota blocks, meant - * for filesystems where i_blksize != BLOCK_SIZE - */ -#define fs_to_dq_blocks(num, blksize) (((num) * (blksize)) / BLOCK_SIZE) +typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ +typedef __u64 qsize_t; /* Type in which we store sizes */ -/* - * Definitions for disk quotas imposed on the average user - * (big brother finally hits Linux). - * - * The following constants define the amount of time given a user - * before the soft limits are treated as hard limits (usually resulting - * in an allocation failure). The timer is started when the user crosses - * their soft limit, it is reset when they go below their soft limit. - */ -#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ -#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ +/* Size of blocks in which are counted size limits */ +#define QUOTABLOCK_BITS 10 +#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) + +/* Conversion routines from and to quota blocks */ +#define qb2kb(x) ((x) << (QUOTABLOCK_BITS-10)) +#define kb2qb(x) ((x) >> (QUOTABLOCK_BITS-10)) +#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) #define MAXQUOTAS 2 #define USRQUOTA 0 /* element used for user quotas */ @@ -78,9 +70,6 @@ "undefined", \ }; -#define QUOTAFILENAME "quota" -#define QUOTAGROUP "staff" - /* * Command definitions for the 'quotactl' system call. * The commands are broken into a main command defined below @@ -91,61 +80,122 @@ #define SUBCMDSHIFT 8 #define QCMD(cmd, type) (((cmd) << SUBCMDSHIFT) | ((type) & SUBCMDMASK)) -#define Q_QUOTAON 0x0100 /* enable quotas */ -#define Q_QUOTAOFF 0x0200 /* disable quotas */ -#define Q_GETQUOTA 0x0300 /* get limits and usage */ -#define Q_SETQUOTA 0x0400 /* set limits and usage */ -#define Q_SETUSE 0x0500 /* set usage */ -#define Q_SYNC 0x0600 /* sync disk copy of a filesystems quotas */ -#define Q_SETQLIM 0x0700 /* set limits */ -#define Q_GETSTATS 0x0800 /* get collected stats */ -#define Q_RSQUASH 0x1000 /* set root_squash option */ - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is an array of these structures - * indexed by user or group number. +#define Q_SYNC 0x800001 /* sync disk copy of a filesystems quotas */ +#define Q_QUOTAON 0x800002 /* turn quotas on */ +#define Q_QUOTAOFF 0x800003 /* turn quotas off */ +#define Q_GETFMT 0x800004 /* get quota format used on given filesystem */ +#define Q_GETINFO 0x800005 /* get information about quota files */ +#define Q_SETINFO 0x800006 /* set information about quota files */ +#define Q_GETQUOTA 0x800007 /* get user quota structure */ +#define Q_SETQUOTA 0x800008 /* set user quota structure */ + +/* + * Quota structure used for communication with userspace via quotactl + * Following flags are used to specify which fields are valid + */ +#define QIF_BLIMITS 1 +#define QIF_SPACE 2 +#define QIF_ILIMITS 4 +#define QIF_INODES 8 +#define QIF_BTIME 16 +#define QIF_ITIME 32 +#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS) +#define QIF_USAGE (QIF_SPACE | QIF_INODES) +#define QIF_TIMES (QIF_BTIME | QIF_ITIME) +#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES) + +struct if_dqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; +}; + +/* + * Structure used for setting quota information about file via quotactl + * Following flags are used to specify which fields are valid */ -struct dqblk { +#define IIF_BGRACE 1 +#define IIF_IGRACE 2 +#define IIF_FLAGS 4 +#define IIF_ALL (IIF_BGRACE | IIF_IGRACE | IIF_FLAGS) + +struct if_dqinfo { + __u64 dqi_bgrace; + __u64 dqi_igrace; + __u32 dqi_flags; + __u32 dqi_valid; +}; + +#ifdef __KERNEL__ + +#include +#include +#include + +/* + * Data for one user/group kept in memory + */ +struct mem_dqblk { __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ - __u32 dqb_curblocks; /* current block count */ + qsize_t dqb_curspace; /* current used space */ __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ __u32 dqb_isoftlimit; /* preferred inode limit */ __u32 dqb_curinodes; /* current # allocated inodes */ - time_t dqb_btime; /* time limit for excessive disk use */ - time_t dqb_itime; /* time limit for excessive inode use */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive inode use */ }; /* - * Shorthand notation. + * Data for one quotafile kept in memory */ -#define dq_bhardlimit dq_dqb.dqb_bhardlimit -#define dq_bsoftlimit dq_dqb.dqb_bsoftlimit -#define dq_curblocks dq_dqb.dqb_curblocks -#define dq_ihardlimit dq_dqb.dqb_ihardlimit -#define dq_isoftlimit dq_dqb.dqb_isoftlimit -#define dq_curinodes dq_dqb.dqb_curinodes -#define dq_btime dq_dqb.dqb_btime -#define dq_itime dq_dqb.dqb_itime +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + union { + struct v1_mem_dqinfo v1_i; + struct v2_mem_dqinfo v2_i; + } u; +}; -#define dqoff(UID) ((loff_t)((UID) * sizeof (struct dqblk))) +#define DQF_MASK 0xffff /* Mask for format specific flags */ +#define DQF_INFO_DIRTY 0x10000 /* Is info dirty? */ +#define DQF_ANY_DQUOT_DIRTY 0x20000 /* Is any dquot dirty? */ + +extern inline void mark_info_dirty(struct mem_dqinfo *info) +{ + info->dqi_flags |= DQF_INFO_DIRTY; +} + +#define info_dirty(info) ((info)->dqi_flags & DQF_INFO_DIRTY) + +#define info_any_dirty(info) ((info)->dqi_flags & DQF_INFO_DIRTY ||\ + (info)->dqi_flags & DQF_ANY_DQUOT_DIRTY) + +#define sb_dqopt(sb) (&(sb)->s_dquot) struct dqstats { - __u32 lookups; - __u32 drops; - __u32 reads; - __u32 writes; - __u32 cache_hits; - __u32 allocated_dquots; - __u32 free_dquots; - __u32 syncs; + int lookups; + int drops; + int reads; + int writes; + int cache_hits; + int allocated_dquots; + int free_dquots; + int syncs; }; -#ifdef __KERNEL__ - -extern int nr_dquots, nr_free_dquots; -extern int dquot_root_squash; +extern struct dqstats dqstats; #define NR_DQHASH 43 /* Just an arbitrary number */ @@ -162,37 +212,115 @@ struct list_head dq_free; /* Free list element */ wait_queue_head_t dq_wait_lock; /* Pointer to waitqueue on dquot lock */ wait_queue_head_t dq_wait_free; /* Pointer to waitqueue for quota to be unused */ - int dq_count; /* Reference count */ + int dq_count; /* Use count */ + int dq_dup_ref; /* Number of duplicated refences */ /* fields after this point are cleared when invalidating */ struct super_block *dq_sb; /* superblock this applies to */ unsigned int dq_id; /* ID this applies to (uid, gid) */ kdev_t dq_dev; /* Device this applies to */ + loff_t dq_off; /* Offset of dquot on disk */ short dq_type; /* Type of quota */ short dq_flags; /* See DQ_* */ unsigned long dq_referenced; /* Number of times this dquot was referenced during its lifetime */ - struct dqblk dq_dqb; /* Diskquota usage */ + struct mem_dqblk dq_dqb; /* Diskquota usage */ }; #define NODQUOT (struct dquot *)NULL -/* - * Flags used for set_dqblk. - */ -#define SET_QUOTA 0x02 -#define SET_USE 0x04 -#define SET_QLIMIT 0x08 - #define QUOTA_OK 0 #define NO_QUOTA 1 +/* Operations which must be implemented by each quota format */ +struct quota_format_ops { + int (*check_quota_file)(struct super_block *sb, int type); /* Detect whether file is in our format */ + int (*read_file_info)(struct super_block *sb, int type); /* Read main info about file - called on quotaon() */ + int (*write_file_info)(struct super_block *sb, int type); /* Write main info about file */ + int (*free_file_info)(struct super_block *sb, int type); /* Called on quotaoff() */ + int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */ + int (*commit_dqblk)(struct dquot *dquot); /* Write (or delete) structure for one user */ +}; + +/* Operations working with dquots */ +struct dquot_operations { + void (*initialize) (struct inode *, int); + void (*drop) (struct inode *); + int (*alloc_space) (struct inode *, qsize_t, int); + int (*alloc_inode) (const struct inode *, unsigned long); + void (*free_space) (struct inode *, qsize_t); + void (*free_inode) (const struct inode *, unsigned long); + int (*transfer) (struct inode *, struct iattr *); + int (*sync_dquot) (struct dquot *); +}; + +/* Operations handling requests from userspace */ +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, char *); + int (*quota_off)(struct super_block *, int); + int (*quota_sync)(struct super_block *, int); + int (*get_info)(struct super_block *, int, struct if_dqinfo *); + int (*set_info)(struct super_block *, int, struct if_dqinfo *); + int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *); + int (*set_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *); + int (*get_xstate)(struct super_block *, struct fs_quota_stat *); + int (*set_xstate)(struct super_block *, unsigned int, int); + int (*get_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *); + int (*set_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *); +}; + +struct quota_format_type { + int qf_fmt_id; /* Quota format id */ + struct quota_format_ops *qf_ops; /* Operations of format */ + struct module *qf_owner; /* Module implementing quota format */ + struct quota_format_type *qf_next; +}; + +#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */ +#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */ + +struct quota_info { + unsigned int flags; /* Flags for diskquotas on this device */ + struct semaphore dqio_sem; /* lock device while I/O in progress */ + struct semaphore dqoff_sem; /* serialize quota_off() and quota_on() on device */ + struct file *files[MAXQUOTAS]; /* fp's to quotafiles */ + struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ + struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ +}; + +/* Inline would be better but we need to dereference super_block which is not defined yet */ +#define mark_dquot_dirty(dquot) do {\ + dquot->dq_flags |= DQ_MOD;\ + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_flags |= DQF_ANY_DQUOT_DIRTY;\ +} while (0) + +#define dquot_dirty(dquot) ((dquot)->dq_flags & DQ_MOD) + +static inline int is_enabled(struct quota_info *dqopt, int type) +{ + switch (type) { + case USRQUOTA: + return dqopt->flags & DQUOT_USR_ENABLED; + case GRPQUOTA: + return dqopt->flags & DQUOT_GRP_ENABLED; + } + return 0; +} + +#define sb_any_quota_enabled(sb) (is_enabled(sb_dqopt(sb), USRQUOTA) | is_enabled(sb_dqopt(sb), GRPQUOTA)) + +#define sb_has_quota_enabled(sb, type) (is_enabled(sb_dqopt(sb), type)) + +int register_quota_format(struct quota_format_type *fmt); +void unregister_quota_format(struct quota_format_type *fmt); +void init_dquot_operations(struct dquot_operations *fsdqops); + #else # /* nodep */ include __BEGIN_DECLS -long quotactl __P ((int, const char *, int, caddr_t)); +long quotactl __P ((unsigned int, const char *, int, caddr_t)); __END_DECLS #endif /* __KERNEL__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/quotaio_v1.h linux.21rc5-ac2/include/linux/quotaio_v1.h --- linux.21rc5/include/linux/quotaio_v1.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/quotaio_v1.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,33 @@ +#ifndef _LINUX_QUOTAIO_V1_H +#define _LINUX_QUOTAIO_V1_H + +#include + +/* + * The following constants define the amount of time given a user + * before the soft limits are treated as hard limits (usually resulting + * in an allocation failure). The timer is started when the user crosses + * their soft limit, it is reset when they go below their soft limit. + */ +#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ +#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is an array of these structures + * indexed by user or group number. + */ +struct v1_disk_dqblk { + __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ + __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ + __u32 dqb_curblocks; /* current block count */ + __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __u32 dqb_isoftlimit; /* preferred inode limit */ + __u32 dqb_curinodes; /* current # allocated inodes */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive inode use */ +}; + +#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) + +#endif /* _LINUX_QUOTAIO_V1_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/quotaio_v2.h linux.21rc5-ac2/include/linux/quotaio_v2.h --- linux.21rc5/include/linux/quotaio_v2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/quotaio_v2.h 2003-05-28 21:48:47.000000000 +0100 @@ -0,0 +1,79 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_QUOTAIO_V2_H +#define _LINUX_QUOTAIO_V2_H + +#include +#include + +/* + * Definitions of magics and versions of current quota files + */ +#define V2_INITQMAGICS {\ + 0xd9c01f11, /* USRQUOTA */\ + 0xd9c01927 /* GRPQUOTA */\ +} + +#define V2_INITQVERSIONS {\ + 0, /* USRQUOTA */\ + 0 /* GRPQUOTA */\ +} + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is a radix tree whose leaves point + * to blocks of these structures. + */ +struct v2_disk_dqblk { + __u32 dqb_id; /* id this quota applies to */ + __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __u32 dqb_isoftlimit; /* preferred inode limit */ + __u32 dqb_curinodes; /* current # allocated inodes */ + __u32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ + __u32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ + __u64 dqb_curspace; /* current space occupied (in bytes) */ + __u64 dqb_btime; /* time limit for excessive disk use */ + __u64 dqb_itime; /* time limit for excessive inode use */ +}; + +/* + * Here are header structures as written on disk and their in-memory copies + */ +/* First generic header */ +struct v2_disk_dqheader { + __u32 dqh_magic; /* Magic number identifying file */ + __u32 dqh_version; /* File version */ +}; + +/* Header with type and version specific information */ +struct v2_disk_dqinfo { + __u32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ + __u32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ + __u32 dqi_flags; /* Flags for quotafile (DQF_*) */ + __u32 dqi_blocks; /* Number of blocks in file */ + __u32 dqi_free_blk; /* Number of first free block in the list */ + __u32 dqi_free_entry; /* Number of block with at least one free entry */ +}; + +/* + * Structure of header of block with quota structures. It is padded to 16 bytes so + * there will be space for exactly 18 quota-entries in a block + */ +struct v2_disk_dqdbheader { + __u32 dqdh_next_free; /* Number of next block with free entry */ + __u32 dqdh_prev_free; /* Number of previous block with free entry */ + __u16 dqdh_entries; /* Number of valid entries in block */ + __u16 dqdh_pad1; + __u32 dqdh_pad2; +}; + +#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ +#define V2_DQBLKSIZE_BITS 10 +#define V2_DQBLKSIZE (1 << V2_DQBLKSIZE_BITS) /* Size of block with quota structures */ +#define V2_DQTREEOFF 1 /* Offset of tree in file in blocks */ +#define V2_DQTREEDEPTH 4 /* Depth of quota tree */ +#define V2_DQSTRINBLK ((V2_DQBLKSIZE - sizeof(struct v2_disk_dqdbheader)) / sizeof(struct v2_disk_dqblk)) /* Number of entries in one blocks */ + +#endif /* _LINUX_QUOTAIO_V2_H */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/quotaops.h linux.21rc5-ac2/include/linux/quotaops.h --- linux.21rc5/include/linux/quotaops.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/quotaops.h 2003-05-29 01:44:07.000000000 +0100 @@ -20,15 +20,16 @@ /* * declaration of quota_function calls in kernel. */ -extern void dquot_initialize(struct inode *inode, short type); +extern void sync_dquots_dev(kdev_t dev, int type); +extern void sync_dquots_sb(struct super_block *sb, int type); + +extern void dquot_initialize(struct inode *inode, int type); extern void dquot_drop(struct inode *inode); -extern int quota_off(struct super_block *sb, short type); -extern int sync_dquots(kdev_t dev, short type); -extern int dquot_alloc_block(struct inode *inode, unsigned long number, char prealloc); +extern int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); extern int dquot_alloc_inode(const struct inode *inode, unsigned long number); -extern void dquot_free_block(struct inode *inode, unsigned long number); +extern void dquot_free_space(struct inode *inode, qsize_t number); extern void dquot_free_inode(const struct inode *inode, unsigned long number); extern int dquot_transfer(struct inode *inode, struct iattr *iattr); @@ -36,11 +37,15 @@ /* * Operations supported for diskquotas. */ -#define sb_any_quota_enabled(sb) ((sb)->s_dquot.flags & (DQUOT_USR_ENABLED | DQUOT_GRP_ENABLED)) +extern struct dquot_operations dquot_operations; +extern struct quotactl_ops vfs_quotactl_ops; + +#define sb_dquot_ops (&dquot_operations) +#define sb_quotactl_ops (&vfs_quotactl_ops) static __inline__ void DQUOT_INIT(struct inode *inode) { - if (!inode->i_sb) + if (!inode->i_sb) out_of_line_bug(); lock_kernel(); if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) @@ -59,50 +64,50 @@ unlock_kernel(); } -static __inline__ int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, int nr) +static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); if (sb_any_quota_enabled(inode->i_sb)) { - /* Number of used blocks is updated in alloc_block() */ - if (inode->i_sb->dq_op->alloc_block(inode, fs_to_dq_blocks(nr, inode->i_sb->s_blocksize), 1) == NO_QUOTA) { + /* Used space is updated in alloc_space() */ + if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA) { unlock_kernel(); return 1; } } else - inode->i_blocks += nr << (inode->i_sb->s_blocksize_bits - 9); + inode_add_bytes(inode, nr); unlock_kernel(); return 0; } -static __inline__ int DQUOT_PREALLOC_BLOCK(struct inode *inode, int nr) +static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) { int ret; - if (!(ret = DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr))) + if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr))) mark_inode_dirty(inode); return ret; } -static __inline__ int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, int nr) +static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); if (sb_any_quota_enabled(inode->i_sb)) { - /* Number of used blocks is updated in alloc_block() */ - if (inode->i_sb->dq_op->alloc_block(inode, fs_to_dq_blocks(nr, inode->i_sb->s_blocksize), 0) == NO_QUOTA) { + /* Used space is updated in alloc_space() */ + if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA) { unlock_kernel(); return 1; } } else - inode->i_blocks += nr << (inode->i_sb->s_blocksize_bits - 9); + inode_add_bytes(inode, nr); unlock_kernel(); return 0; } -static __inline__ int DQUOT_ALLOC_BLOCK(struct inode *inode, int nr) +static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) { int ret; - if (!(ret = DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr))) + if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr))) mark_inode_dirty(inode); return ret; } @@ -121,19 +126,19 @@ return 0; } -static __inline__ void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, int nr) +static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); if (sb_any_quota_enabled(inode->i_sb)) - inode->i_sb->dq_op->free_block(inode, fs_to_dq_blocks(nr, inode->i_sb->s_blocksize)); + inode->i_sb->dq_op->free_space(inode, nr); else - inode->i_blocks -= nr << (inode->i_sb->s_blocksize_bits - 9); + inode_sub_bytes(inode, nr); unlock_kernel(); } -static __inline__ void DQUOT_FREE_BLOCK(struct inode *inode, int nr) +static __inline__ void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) { - DQUOT_FREE_BLOCK_NODIRTY(inode, nr); + DQUOT_FREE_SPACE_NODIRTY(inode, nr); mark_inode_dirty(inode); } @@ -159,63 +164,86 @@ return 0; } -#define DQUOT_SYNC(dev) sync_dquots(dev, -1) -#define DQUOT_OFF(sb) quota_off(sb, -1) +#define DQUOT_SYNC_DEV(dev) sync_dquots_dev(dev, -1) +#define DQUOT_SYNC_SB(sb) sync_dquots_sb(sb, -1) + +static __inline__ int DQUOT_OFF(struct super_block *sb) +{ + int ret = -ENOSYS; + + lock_kernel(); + if (sb->s_qcop && sb->s_qcop->quota_off) + ret = sb->s_qcop->quota_off(sb, -1); + unlock_kernel(); + return ret; +} #else /* * NO-OP when quota not configured. */ +#define sb_dquot_ops (NULL) +#define sb_quotactl_ops (NULL) #define DQUOT_INIT(inode) do { } while(0) #define DQUOT_DROP(inode) do { } while(0) #define DQUOT_ALLOC_INODE(inode) (0) #define DQUOT_FREE_INODE(inode) do { } while(0) -#define DQUOT_SYNC(dev) do { } while(0) +#define DQUOT_SYNC_DEV(dev) do { } while(0) +#define DQUOT_SYNC_SB(sb) do { } while(0) #define DQUOT_OFF(sb) do { } while(0) #define DQUOT_TRANSFER(inode, iattr) (0) -extern __inline__ int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, int nr) +#define sync_dquots_dev(dev, type) do { } while(0) +extern __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); - inode->i_blocks += nr << (inode->i_sb->s_blocksize_bits - 9); + inode_add_bytes(inode, nr); unlock_kernel(); return 0; } -extern __inline__ int DQUOT_PREALLOC_BLOCK(struct inode *inode, int nr) +extern __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) { - DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr); + DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr); mark_inode_dirty(inode); return 0; } -extern __inline__ int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, int nr) +extern __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); - inode->i_blocks += nr << (inode->i_sb->s_blocksize_bits - 9); + inode_add_bytes(inode, nr); unlock_kernel(); return 0; } -extern __inline__ int DQUOT_ALLOC_BLOCK(struct inode *inode, int nr) +extern __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) { - DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr); + DQUOT_ALLOC_SPACE_NODIRTY(inode, nr); mark_inode_dirty(inode); return 0; } -extern __inline__ void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, int nr) +extern __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); - inode->i_blocks -= nr << (inode->i_sb->s_blocksize_bits - 9); + inode_sub_bytes(inode, nr); unlock_kernel(); } -extern __inline__ void DQUOT_FREE_BLOCK(struct inode *inode, int nr) +extern __inline__ void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) { - DQUOT_FREE_BLOCK_NODIRTY(inode, nr); + DQUOT_FREE_SPACE_NODIRTY(inode, nr); mark_inode_dirty(inode); } #endif /* CONFIG_QUOTA */ + +#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_PREALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_PREALLOC_BLOCK(inode, nr) DQUOT_PREALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_ALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_ALLOC_BLOCK(inode, nr) DQUOT_ALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) DQUOT_FREE_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_FREE_BLOCK(inode, nr) DQUOT_FREE_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) + #endif /* _LINUX_QUOTAOPS_ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/sched.h linux.21rc5-ac2/include/linux/sched.h --- linux.21rc5/include/linux/sched.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/sched.h 2003-05-29 01:44:06.000000000 +0100 @@ -73,16 +73,16 @@ #define CT_TO_SECS(x) ((x) / HZ) #define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) -extern int nr_running, nr_threads; +extern int nr_threads; extern int last_pid; +extern unsigned long nr_running(void); +extern unsigned long nr_uninterruptible(void); #include #include #include #include -#ifdef __KERNEL__ #include -#endif #include @@ -109,12 +109,6 @@ #define SCHED_FIFO 1 #define SCHED_RR 2 -/* - * This is an additional bit set when we want to - * yield the CPU for one re-schedule.. - */ -#define SCHED_YIELD 0x10 - struct sched_param { int sched_priority; }; @@ -135,14 +129,21 @@ extern spinlock_t runqueue_lock; extern spinlock_t mmlist_lock; +typedef struct task_struct task_t; + extern void sched_init(void); -extern void init_idle(void); +extern void init_idle(task_t *idle, int cpu); +extern int idle_cpu(int cpu); extern void show_state(void); extern void cpu_init (void); extern void trap_init(void); extern void update_process_times(int user); -extern void update_one_process(struct task_struct *p, unsigned long user, +extern void update_one_process(task_t *p, unsigned long user, unsigned long system, int cpu); +extern void scheduler_tick(int user_tick, int system); +extern void migration_init(void); +extern unsigned long cache_decay_ticks; +extern int set_user(uid_t new_ruid, int dumpclear); #define MAX_SCHEDULE_TIMEOUT LONG_MAX extern signed long FASTCALL(schedule_timeout(signed long timeout)); @@ -153,10 +154,34 @@ extern int start_context_thread(void); extern int current_is_keventd(void); -#if CONFIG_SMP -extern void set_cpus_allowed(struct task_struct *p, unsigned long new_mask); +/* + * Priority of a process goes from 0..MAX_PRIO-1, valid RT + * priority is 0..MAX_RT_PRIO-1, and SCHED_OTHER tasks are + * in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values + * are inverted: lower p->prio value means higher priority. + * + * The MAX_RT_USER_PRIO value allows the actual maximum + * RT priority to be separate from the value exported to + * user-space. This allows kernel threads to set their + * priority to a value higher than any user task. Note: + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + +#define MAX_USER_RT_PRIO 100 +#define MAX_RT_PRIO MAX_USER_RT_PRIO + +#define MAX_PRIO (MAX_RT_PRIO + 40) + +/* + * The maximum RT priority is configurable. If the resulting + * bitmap is 160-bits , we can use a hand-coded routine which + * is optimal. Otherwise, we fall back on a generic routine for + * finding the first set bit from an arbitrarily-sized bitmap. + */ +#if MAX_PRIO < 160 && MAX_PRIO > 127 +#define sched_find_first_bit(map) _sched_find_first_bit(map) #else -# define set_cpus_allowed(p, new_mask) do { } while (0) +#define sched_find_first_bit(map) find_first_bit(map, MAX_PRIO) #endif /* @@ -280,6 +305,8 @@ extern struct user_struct root_user; #define INIT_USER (&root_user) +typedef struct prio_array prio_array_t; + struct task_struct { /* * offsets of these are hardcoded elsewhere - touch with care @@ -297,34 +324,25 @@ int lock_depth; /* Lock depth */ -/* - * offset 32 begins here on 32-bit platforms. We keep - * all fields in a single cacheline that are needed for - * the goodness() loop in schedule(). - */ - long counter; - long nice; - unsigned long policy; - struct mm_struct *mm; - int processor; - /* - * cpus_runnable is ~0 if the process is not running on any - * CPU. It's (1 << cpu) if it's running on a CPU. This mask - * is updated under the runqueue lock. - * - * To determine whether a process might run on a CPU, this - * mask is AND-ed with cpus_allowed. - */ - unsigned long cpus_runnable, cpus_allowed; /* - * (only the 'next' pointer fits into the cacheline, but - * that's just fine.) + * offset 32 begins here on 32-bit platforms. */ + unsigned int cpu; + int prio, static_prio; struct list_head run_list; - unsigned long sleep_time; + prio_array_t *array; + + unsigned long sleep_avg; + unsigned long sleep_timestamp; + + unsigned long policy; + unsigned long cpus_allowed; + unsigned int time_slice; + + task_t *next_task, *prev_task; + + struct mm_struct *mm, *active_mm; - struct task_struct *next_task, *prev_task; - struct mm_struct *active_mm; struct list_head local_pages; unsigned int allocation_order, nr_local_pages; @@ -348,12 +366,12 @@ * older sibling, respectively. (p->father can be replaced with * p->p_pptr->pid) */ - struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr; + task_t *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr; struct list_head thread_group; /* PID hash table linkage. */ - struct task_struct *pidhash_next; - struct task_struct **pidhash_pprev; + task_t *pidhash_next; + task_t **pidhash_pprev; wait_queue_head_t wait_chldexit; /* for wait4() */ struct completion *vfork_done; /* for vfork() */ @@ -432,8 +450,8 @@ #define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */ #define PF_FREE_PAGES 0x00002000 /* per process page freeing */ #define PF_NOIO 0x00004000 /* avoid generating further I/O */ - #define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */ +#define PF_FSTRANS 0x00200000 /* inside a filesystem transaction */ /* * Ptrace flags @@ -453,10 +471,16 @@ */ #define _STK_LIM (8*1024*1024) -#define DEF_COUNTER (10*HZ/100) /* 100 ms time slice */ -#define MAX_COUNTER (20*HZ/100) -#define DEF_NICE (0) +#if CONFIG_SMP +extern void set_cpus_allowed(task_t *p, unsigned long new_mask); +#else +#define set_cpus_allowed(p, new_mask) do { } while (0) +#endif +extern void set_user_nice(task_t *p, long nice); +extern int task_prio(task_t *p); +extern int task_nice(task_t *p); +extern int idle_cpu(int cpu); extern void yield(void); /* @@ -476,14 +500,14 @@ addr_limit: KERNEL_DS, \ exec_domain: &default_exec_domain, \ lock_depth: -1, \ - counter: DEF_COUNTER, \ - nice: DEF_NICE, \ + prio: MAX_PRIO-20, \ + static_prio: MAX_PRIO-20, \ policy: SCHED_OTHER, \ + cpus_allowed: ~0UL, \ mm: NULL, \ active_mm: &init_mm, \ - cpus_runnable: ~0UL, \ - cpus_allowed: ~0UL, \ run_list: LIST_HEAD_INIT(tsk.run_list), \ + time_slice: HZ, \ next_task: &tsk, \ prev_task: &tsk, \ p_opptr: &tsk, \ @@ -517,24 +541,24 @@ #endif union task_union { - struct task_struct task; + task_t task; unsigned long stack[INIT_TASK_SIZE/sizeof(long)]; }; extern union task_union init_task_union; extern struct mm_struct init_mm; -extern struct task_struct *init_tasks[NR_CPUS]; +extern task_t *init_tasks[NR_CPUS]; /* PID hashing. (shouldnt this be dynamic?) */ #define PIDHASH_SZ (4096 >> 2) -extern struct task_struct *pidhash[PIDHASH_SZ]; +extern task_t *pidhash[PIDHASH_SZ]; #define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1)) -static inline void hash_pid(struct task_struct *p) +static inline void hash_pid(task_t *p) { - struct task_struct **htable = &pidhash[pid_hashfn(p->pid)]; + task_t **htable = &pidhash[pid_hashfn(p->pid)]; if((p->pidhash_next = *htable) != NULL) (*htable)->pidhash_pprev = &p->pidhash_next; @@ -542,16 +566,16 @@ p->pidhash_pprev = htable; } -static inline void unhash_pid(struct task_struct *p) +static inline void unhash_pid(task_t *p) { if(p->pidhash_next) p->pidhash_next->pidhash_pprev = p->pidhash_pprev; *p->pidhash_pprev = p->pidhash_next; } -static inline struct task_struct *find_task_by_pid(int pid) +static inline task_t *find_task_by_pid(int pid) { - struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)]; + task_t *p, **htable = &pidhash[pid_hashfn(pid)]; for(p = *htable; p && p->pid != pid; p = p->pidhash_next) ; @@ -559,19 +583,6 @@ return p; } -#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL) - -static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu) -{ - tsk->processor = cpu; - tsk->cpus_runnable = 1UL << cpu; -} - -static inline void task_release_cpu(struct task_struct *tsk) -{ - tsk->cpus_runnable = ~0UL; -} - /* per-UID process charging. */ extern struct user_struct * alloc_uid(uid_t); extern void free_uid(struct user_struct *); @@ -598,47 +609,51 @@ extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q)); extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout)); -extern int FASTCALL(wake_up_process(struct task_struct * tsk)); +extern int FASTCALL(wake_up_process(task_t * tsk)); +extern void FASTCALL(wake_up_forked_process(task_t * tsk)); +extern void FASTCALL(sched_exit(task_t * p)); #define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) #define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) #define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) -#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) -#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) #define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) -#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) -#define wake_up_interruptible_sync_nr(x, nr) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr) +#ifdef CONFIG_SMP +#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) +#else +#define wake_up_interruptible_sync(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) +#endif + asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); extern void proc_caches_init(void); -extern void flush_signals(struct task_struct *); -extern void flush_signal_handlers(struct task_struct *); +extern void flush_signals(task_t *); +extern void flush_signal_handlers(task_t *); extern void sig_exit(int, int, struct siginfo *); extern int dequeue_signal(sigset_t *, siginfo_t *); extern void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask); extern void unblock_all_signals(void); -extern int send_sig_info(int, struct siginfo *, struct task_struct *); -extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int send_sig_info(int, struct siginfo *, task_t *); +extern int force_sig_info(int, struct siginfo *, task_t *); extern int kill_pg_info(int, struct siginfo *, pid_t); extern int kill_sl_info(int, struct siginfo *, pid_t); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern void notify_parent(struct task_struct *, int); -extern void do_notify_parent(struct task_struct *, int); -extern void force_sig(int, struct task_struct *); -extern int send_sig(int, struct task_struct *, int); +extern void notify_parent(task_t *, int); +extern void do_notify_parent(task_t *, int); +extern void force_sig(int, task_t *); +extern int send_sig(int, task_t *, int); extern int kill_pg(pid_t, int, int); extern int kill_sl(pid_t, int, int); extern int kill_proc(pid_t, int, int); extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *); extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long); -static inline int signal_pending(struct task_struct *p) +static inline int signal_pending(task_t *p) { return (p->sigpending != 0); } @@ -677,7 +692,7 @@ This is required every time the blocked sigset_t changes. All callers should have t->sigmask_lock. */ -static inline void recalc_sigpending(struct task_struct *t) +static inline void recalc_sigpending(task_t *t) { t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked); } @@ -784,16 +799,17 @@ extern int expand_fdset(struct files_struct *, int nr); extern void free_fdset(fd_set *, int); -extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); +extern int copy_thread(int, unsigned long, unsigned long, unsigned long, task_t *, struct pt_regs *); extern void flush_thread(void); extern void exit_thread(void); -extern void exit_mm(struct task_struct *); -extern void exit_files(struct task_struct *); -extern void exit_sighand(struct task_struct *); +extern void exit_mm(task_t *); +extern void exit_files(task_t *); +extern void exit_sighand(task_t *); extern void reparent_to_init(void); extern void daemonize(void); +extern task_t *child_reaper; extern int do_execve(char *, char **, char **, struct pt_regs *); extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long); @@ -802,6 +818,9 @@ extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); +extern void wait_task_inactive(task_t * p); +extern void kick_if_running(task_t * p); + extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); #define __wait_event(wq, condition) \ @@ -885,27 +904,12 @@ for (task = next_thread(current) ; task != current ; task = next_thread(task)) #define next_thread(p) \ - list_entry((p)->thread_group.next, struct task_struct, thread_group) + list_entry((p)->thread_group.next, task_t, thread_group) #define thread_group_leader(p) (p->pid == p->tgid) -static inline void del_from_runqueue(struct task_struct * p) -{ - nr_running--; - p->sleep_time = jiffies; - list_del(&p->run_list); - p->run_list.next = NULL; -} - -static inline int task_on_runqueue(struct task_struct *p) +static inline void unhash_process(task_t *p) { - return (p->run_list.next != NULL); -} - -static inline void unhash_process(struct task_struct *p) -{ - if (task_on_runqueue(p)) - out_of_line_bug(); write_lock_irq(&tasklist_lock); nr_threads--; unhash_pid(p); @@ -915,12 +919,12 @@ } /* Protects ->fs, ->files, ->mm, and synchronises with wait4(). Nests inside tasklist_lock */ -static inline void task_lock(struct task_struct *p) +static inline void task_lock(task_t *p) { spin_lock(&p->alloc_lock); } -static inline void task_unlock(struct task_struct *p) +static inline void task_unlock(task_t *p) { spin_unlock(&p->alloc_lock); } @@ -944,9 +948,29 @@ return res; } +static inline void set_need_resched(void) +{ + current->need_resched = 1; +} + +static inline void clear_need_resched(void) +{ + current->need_resched = 0; +} + +static inline void set_tsk_need_resched(struct task_struct *tsk) +{ + tsk->need_resched = 1; +} + +static inline void clear_tsk_need_resched(struct task_struct *tsk) +{ + tsk->need_resched = 0; +} + static inline int need_resched(void) { - return (unlikely(current->need_resched)); + return unlikely(current->need_resched); } extern void __cond_resched(void); @@ -956,5 +980,34 @@ __cond_resched(); } +/* + * Wrappers for p->cpu access. No-op on UP. + */ +#ifdef CONFIG_SMP + +static inline unsigned int task_cpu(struct task_struct *p) +{ + return p->cpu; +} + +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +{ + p->cpu = cpu; +} + +#else + +static inline unsigned int task_cpu(struct task_struct *p) +{ + return 0; +} + +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +{ +} + +#endif /* CONFIG_SMP */ + #endif /* __KERNEL__ */ + #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/sem.h linux.21rc5-ac2/include/linux/sem.h --- linux.21rc5/include/linux/sem.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/sem.h 2003-05-28 21:48:47.000000000 +0100 @@ -124,6 +124,8 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg); asmlinkage long sys_semop (int semid, struct sembuf *sops, unsigned nsops); asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg); +asmlinkage long sys_semtimedop (int semid, struct sembuf *sops, + unsigned nsops, const struct timespec *timeout); #endif /* __KERNEL__ */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/seq_file.h linux.21rc5-ac2/include/linux/seq_file.h --- linux.21rc5/include/linux/seq_file.h 2003-05-28 15:07:37.000000000 +0100 +++ linux.21rc5-ac2/include/linux/seq_file.h 2003-05-28 21:48:48.000000000 +0100 @@ -2,7 +2,13 @@ #define _LINUX_SEQ_FILE_H #ifdef __KERNEL__ +#include +#include +#include + struct seq_operations; +struct file; +struct inode; struct seq_file { char *buf; @@ -52,5 +58,7 @@ int seq_printf(struct seq_file *, const char *, ...) __attribute__ ((format (printf,2,3))); +int single_open(struct file *, int (*)(struct seq_file *, void *), void *); +int single_release(struct inode *, struct file *); #endif #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/serialP.h linux.21rc5-ac2/include/linux/serialP.h --- linux.21rc5/include/linux/serialP.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/serialP.h 2003-05-28 21:48:48.000000000 +0100 @@ -83,6 +83,7 @@ long pgrp; /* pgrp of opening process */ struct circ_buf xmit; spinlock_t xmit_lock; + spinlock_t irq_spinlock; u8 *iomem_base; u16 iomem_reg_shift; int io_type; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/smp_balance.h linux.21rc5-ac2/include/linux/smp_balance.h --- linux.21rc5/include/linux/smp_balance.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/linux/smp_balance.h 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,10 @@ +#ifndef _LINUX_SMP_BALANCE_H +#define _LINUX_SMP_BALANCE_H + +#ifdef ARCH_HAS_SMP_BALANCE +#include +#else +#define arch_load_balance(x,y) 0 +#endif + +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/smp.h linux.21rc5-ac2/include/linux/smp.h --- linux.21rc5/include/linux/smp.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/smp.h 2003-05-29 01:44:06.000000000 +0100 @@ -84,8 +84,16 @@ #define kernel_lock() #define cpu_logical_map(cpu) 0 #define cpu_number_map(cpu) 0 +#define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; }) #define smp_call_function(func,info,retry,wait) ({ 0; }) #define cpu_online_map 1 - +static inline void smp_send_reschedule(int cpu) { } +static inline void smp_send_reschedule_all(void) { } #endif + +/* + * Common definitions: + */ +#define cpu() smp_processor_id() + #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/sysctl.h linux.21rc5-ac2/include/linux/sysctl.h --- linux.21rc5/include/linux/sysctl.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/sysctl.h 2003-05-28 21:48:48.000000000 +0100 @@ -125,6 +125,7 @@ KERN_TAINTED=53, /* int: various kernel tainted flags */ KERN_CADPID=54, /* int: PID of the process to notify on CAD */ KERN_CORE_PATTERN=56, /* string: pattern for core-files */ + KERN_CORE_SETUID=58, /* int: set to allow core dumps of setuid apps */ }; @@ -144,6 +145,7 @@ VM_MAX_MAP_COUNT=11, /* int: Maximum number of active map areas */ VM_MIN_READAHEAD=12, /* Min file readahead */ VM_MAX_READAHEAD=13, /* Max file readahead */ + VM_PAGEBUF=22, /* struct: Control pagebuf parameters */ }; @@ -547,7 +549,7 @@ FS_STATINODE=2, FS_MAXINODE=3, /* int:maximum number of inodes that can be allocated */ FS_NRDQUOT=4, /* int:current number of allocated dquots */ - FS_MAXDQUOT=5, /* int:maximum number of dquots that can be allocated */ + /* was FS_MAXDQUOT */ FS_NRFILE=6, /* int:current number of allocated filedescriptors */ FS_MAXFILE=7, /* int:maximum number of filedescriptors that can be allocated */ FS_DENTRY=8, @@ -558,6 +560,20 @@ FS_LEASES=13, /* int: leases enabled */ FS_DIR_NOTIFY=14, /* int: directory notification enabled */ FS_LEASE_TIME=15, /* int: maximum time to wait for a lease break */ + FS_DQSTATS=16, /* dir: disc quota usage statistics */ + FS_XFS=17, /* struct: control xfs parameters */ +}; + +/* /proc/sys/fs/quota/ */ +enum { + FS_DQ_LOOKUPS = 1, + FS_DQ_DROPS = 2, + FS_DQ_READS = 3, + FS_DQ_WRITES = 4, + FS_DQ_CACHE_HITS = 5, + FS_DQ_ALLOCATED = 6, + FS_DQ_FREE = 7, + FS_DQ_SYNCS = 8, }; /* CTL_DEBUG names: */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/tty.h linux.21rc5-ac2/include/linux/tty.h --- linux.21rc5/include/linux/tty.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/tty.h 2003-05-28 21:48:48.000000000 +0100 @@ -265,7 +265,7 @@ int session; kdev_t device; unsigned long flags; - int count; + atomic_t count; struct winsize winsize; unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1; unsigned char low_latency:1, warned:1; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/usbdevice_fs.h linux.21rc5-ac2/include/linux/usbdevice_fs.h --- linux.21rc5/include/linux/usbdevice_fs.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/usbdevice_fs.h 2003-05-28 21:48:48.000000000 +0100 @@ -142,6 +142,8 @@ #define USBDEVFS_HUB_PORTINFO _IOR('U', 19, struct usbdevfs_hub_portinfo) #define USBDEVFS_RESET _IO('U', 20) #define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int) +#define USBDEVFS_DISCONNECT _IO('U', 22) +#define USBDEVFS_CONNECT _IO('U', 23) /* --------------------------------------------------------------------- */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/usb.h linux.21rc5-ac2/include/linux/usb.h --- linux.21rc5/include/linux/usb.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/include/linux/usb.h 2003-05-29 01:44:07.000000000 +0100 @@ -865,6 +865,7 @@ struct usb_device *children[USB_MAXCHILDREN]; }; +extern int usb_ifnum_to_ifpos(struct usb_device *dev, unsigned ifnum); extern struct usb_interface *usb_ifnum_to_if(struct usb_device *dev, unsigned ifnum); extern struct usb_endpoint_descriptor *usb_epnum_to_ep_desc(struct usb_device *dev, unsigned epnum); @@ -873,6 +874,7 @@ extern void usb_scan_devices(void); /* used these for multi-interface device registration */ +extern int usb_find_interface_driver_for_ifnum(struct usb_device *dev, unsigned ifnum); extern void usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *iface, void* priv); extern int usb_interface_claimed(struct usb_interface *iface); extern void usb_driver_release_interface(struct usb_driver *driver, struct usb_interface *iface); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/vmalloc.h linux.21rc5-ac2/include/linux/vmalloc.h --- linux.21rc5/include/linux/vmalloc.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/vmalloc.h 2003-05-29 01:44:06.000000000 +0100 @@ -21,11 +21,15 @@ extern struct vm_struct * get_vm_area (unsigned long size, unsigned long flags); extern void vfree(void * addr); +#define vunmap(addr) vfree(addr) +extern void * vmap(struct page **pages, int count, + unsigned long flags, pgprot_t prot); extern void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot); extern long vread(char *buf, char *addr, unsigned long count); extern void vmfree_area_pages(unsigned long address, unsigned long size); extern int vmalloc_area_pages(unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot); +extern void *vcalloc(unsigned long nmemb, unsigned long elem_size); /* * Allocate any pages diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/linux/wait.h linux.21rc5-ac2/include/linux/wait.h --- linux.21rc5/include/linux/wait.h 2003-05-28 15:07:36.000000000 +0100 +++ linux.21rc5-ac2/include/linux/wait.h 2003-05-28 21:48:47.000000000 +0100 @@ -58,6 +58,7 @@ # define wq_read_unlock read_unlock # define wq_write_lock_irq write_lock_irq # define wq_write_lock_irqsave write_lock_irqsave +# define wq_write_unlock_irq write_unlock_irq # define wq_write_unlock_irqrestore write_unlock_irqrestore # define wq_write_unlock write_unlock #else @@ -70,6 +71,7 @@ # define wq_read_unlock_irqrestore spin_unlock_irqrestore # define wq_write_lock_irq spin_lock_irq # define wq_write_lock_irqsave spin_lock_irqsave +# define wq_write_unlock_irq spin_unlock_irq # define wq_write_unlock_irqrestore spin_unlock_irqrestore # define wq_write_unlock spin_unlock #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/include/net/edp2.h linux.21rc5-ac2/include/net/edp2.h --- linux.21rc5/include/net/edp2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/include/net/edp2.h 2003-05-02 17:51:16.000000000 +0100 @@ -0,0 +1,131 @@ +/* + * The Coraid EDPv2 protocol definitions + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +struct edp2 +{ + /* This structure follows the DIX ethernet header */ + u8 flag_err; +#define EDP_F_RESPONSE 0x80 +#define EDP_F_ERROR 0x40 +#define EDP_F_ERRMASK 0x0F + u8 function; + u32 tag __attribute((packed)); +}; + +struct edp2_ata_fid0 +{ + u8 flag_ver; +#define EDP_ATA_WRITE 0x80 /* I/O from host */ +#define EDP_ATA_LBA48 0x40 /* Command is LBA48 */ +#define EDP_ATA_DBIT 0x10 /* Dbit state in LBA48 */ + u8 err_feature; + u8 sector; + u8 cmd_status; + u8 lba0; + u8 lba1; + u8 lba2; + u8 lba3; + u8 lba4; + u8 lba5; + u8 res0; + u8 res1; + u32 data[0]; +}; + +#define EDPT2_ATA_BADPARAM 0 +#define EDPT2_ATA_DISKFAIL 1 + +struct edp2_ata_fid1 +{ + u32 count; /* Outstanding buffer limit */ + u32 firmware; /* Firmware revision */ + u16 aoe; /* Protocol supported */ + u8 shad; /* Shelf ident */ + u8 slad; /* Slot ident */ + u32 data[0]; /* Config string */ +}; + +struct edp2_ata_fid2 +{ + u32 tx; + u32 rx; + u32 tx_error; + u32 rx_error; + u32 rx_trunc; + u32 rx_over; + u32 rx_crc; + u32 rx_short; + u32 rx_align; + u32 rx_violation; + u32 tx_carrier; + u32 tx_under; + u32 tx_retrans; + u32 tx_late; + u32 tx_heartbeat; + u32 tx_defer; + u32 tx_retry; +}; + +struct edp2_ata_fid3 +{ + u8 cmd; +#define EDP_ATA_FID3_CMD_CMP 0 +#define EDP_ATA_FID3_CMD_NCMP 1 +#define EDP_ATA_FID3_CLAIM 2 +#define EDP_ATA_FID3_FORCE 3 + + u8 res1, res2, res3; + u32 data[0]; +}; + + +#define MAX_QUEUED 128 + +/* + * Used to hold together the edp2 device database + */ + +struct edp2_device +{ + struct edp2_device *next; + + /* For upper layer */ + void *edp2_upper; + int users; + + /* Location */ + struct net_device *dev; + u8 shelf; + u8 slot; + u8 mac[6]; + + /* Properties */ + u32 queue; + u32 revision; + u8 protocol; + int dead:1; + + /* Time info */ + unsigned long last_ident; + + /* Protocol queue */ + int count; + u16 tag; /* bits 23-8 of the tag */ + struct sk_buff *skb[MAX_QUEUED]; + struct timer_list timer; +}; + +struct edp2_cb /* Holds our data on the queue copy of the skb */ +{ + int (*completion)(struct edp2_device *, struct edp2 *edp, unsigned long data, struct sk_buff *skb); + unsigned long data; + unsigned long timeout; + int count; +}; + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/init/do_mounts.c linux.21rc5-ac2/init/do_mounts.c --- linux.21rc5/init/do_mounts.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/init/do_mounts.c 2003-05-09 18:32:58.000000000 +0100 @@ -15,6 +15,7 @@ #include #include #include +#include #define BUILD_CRAMDISK @@ -474,6 +475,7 @@ * minix * ext2 * romfs + * cramfs * gzip */ static int __init @@ -483,6 +485,7 @@ struct minix_super_block *minixsb; struct ext2_super_block *ext2sb; struct romfs_super_block *romfsb; + struct cramfs_super *cramfsb; int nblocks = -1; unsigned char *buf; @@ -493,6 +496,7 @@ minixsb = (struct minix_super_block *) buf; ext2sb = (struct ext2_super_block *) buf; romfsb = (struct romfs_super_block *) buf; + cramfsb = (struct cramfs_super *) buf; memset(buf, 0xe5, size); /* @@ -522,6 +526,14 @@ goto done; } + if (cramfsb->magic == CRAMFS_MAGIC) { + printk(KERN_NOTICE + "RAMDISK: cramfs filesystem found at block %d\n", + start_block); + nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS; + goto done; + } + /* * Read block 1 to test for minix and ext2 superblock */ @@ -908,6 +920,8 @@ mount_devfs_fs (); } +#ifdef CONFIG_BLK_DEV_RAM + #if defined(BUILD_CRAMDISK) && defined(CONFIG_BLK_DEV_RAM) /* @@ -1054,3 +1068,4 @@ } #endif /* BUILD_CRAMDISK && CONFIG_BLK_DEV_RAM */ +#endif /* CONFIG_BLK_DEV_RAM */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/init/main.c linux.21rc5-ac2/init/main.c --- linux.21rc5/init/main.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/init/main.c 2003-04-28 18:00:34.000000000 +0100 @@ -36,6 +36,10 @@ #include #endif +#ifdef CONFIG_ACPI +#include +#endif + #ifdef CONFIG_PCI #include #endif @@ -60,6 +64,10 @@ #include #endif +#ifdef CONFIG_PNPBIOS +#include +#endif + #ifdef CONFIG_IRDA extern int irda_proto_init(void); extern int irda_device_init(void); @@ -106,6 +114,10 @@ #if defined(CONFIG_SYSVIPC) extern void ipc_init(void); #endif + +#ifdef CONFIG_PARISC +extern void parisc_init(void); +#endif /* * Boot command-line arguments @@ -288,8 +300,6 @@ extern void setup_arch(char **); extern void cpu_idle(void); -unsigned long wait_init_idle; - #ifndef CONFIG_SMP #ifdef CONFIG_X86_LOCAL_APIC @@ -298,34 +308,24 @@ APIC_init_uniprocessor(); } #else -#define smp_init() do { } while (0) +#define smp_init() do { } while (0) #endif #else - /* Called by boot processor to activate the rest. */ static void __init smp_init(void) { /* Get other processors into their bootup holding patterns. */ smp_boot_cpus(); - wait_init_idle = cpu_online_map; - clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */ smp_threads_ready=1; smp_commence(); - - /* Wait for the other cpus to set up their idle processes */ - printk("Waiting on wait_init_idle (map = 0x%lx)\n", wait_init_idle); - while (wait_init_idle) { - cpu_relax(); - barrier(); - } - printk("All processors have done init_idle\n"); } #endif + /* * We need to finalize in a non-__init function or else race conditions * between the root thread and the init thread may cause start_kernel to @@ -337,9 +337,8 @@ { kernel_thread(init, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL); unlock_kernel(); - current->need_resched = 1; - cpu_idle(); -} + cpu_idle(); +} /* * Activate the first processor. @@ -420,18 +419,25 @@ #ifdef CONFIG_PROC_FS proc_root_init(); #endif +#ifdef CONFIG_PARISC + parisc_init(); +#endif #if defined(CONFIG_SYSVIPC) ipc_init(); #endif check_bugs(); + printk("POSIX conformance testing by UNIFIX\n"); - /* - * We count on the initial thread going ok - * Like idlers init is an unlocked kernel thread, which will - * make syscalls (and thus be locked). + init_idle(current, smp_processor_id()); + /* + * We count on the initial thread going ok + * Like idlers init is an unlocked kernel thread, which will + * make syscalls (and thus be locked). */ smp_init(); + + /* Do the rest non-__init'ed, we're now alive */ rest_init(); } @@ -460,6 +466,10 @@ */ static void __init do_basic_setup(void) { + /* Start the per-CPU migration threads */ +#if CONFIG_SMP + migration_init(); +#endif /* * Tell the world that we're going to be the grim @@ -491,7 +501,9 @@ #if defined(CONFIG_ARCH_S390) s390_init_machine_check(); #endif - +#ifdef CONFIG_ACPI_INTERPRETER + acpi_init(); +#endif #ifdef CONFIG_PCI pci_init(); #endif @@ -519,6 +531,9 @@ #ifdef CONFIG_ISAPNP isapnp_init(); #endif +#ifdef CONFIG_PNPBIOS + pnpbios_init(); +#endif #ifdef CONFIG_TC tc_init(); #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/ipc/sem.c linux.21rc5-ac2/ipc/sem.c --- linux.21rc5/ipc/sem.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/ipc/sem.c 2003-04-22 16:44:38.000000000 +0100 @@ -62,6 +62,7 @@ #include #include #include +#include #include #include "util.h" @@ -839,6 +840,12 @@ asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops) { + return sys_semtimedop(semid, tsops, nsops, NULL); +} + +asmlinkage long sys_semtimedop (int semid, struct sembuf *tsops, + unsigned nsops, const struct timespec *timeout) +{ int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; @@ -846,6 +853,7 @@ struct sem_undo *un; int undos = 0, decrease = 0, alter = 0; struct sem_queue queue; + unsigned long jiffies_left = 0; if (nsops < 1 || semid < 0) return -EINVAL; @@ -860,6 +868,19 @@ error=-EFAULT; goto out_free; } + if (timeout) { + struct timespec _timeout; + if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { + error = -EFAULT; + goto out_free; + } + if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || + _timeout.tv_nsec >= 1000000000L) { + error = -EINVAL; + goto out_free; + } + jiffies_left = timespec_to_jiffies(&_timeout); + } sma = sem_lock(semid); error=-EINVAL; if(sma==NULL) @@ -932,7 +953,10 @@ current->state = TASK_INTERRUPTIBLE; sem_unlock(semid); - schedule(); + if (timeout) + jiffies_left = schedule_timeout(jiffies_left); + else + schedule(); tmp = sem_lock(semid); if(tmp==NULL) { @@ -957,6 +981,8 @@ break; } else { error = queue.status; + if (error == -EINTR && timeout && jiffies_left == 0) + error = -EAGAIN; if (queue.prev) /* got Interrupt */ break; /* Everything done by update_queue */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/ipc/shm.c linux.21rc5-ac2/ipc/shm.c --- linux.21rc5/ipc/shm.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/ipc/shm.c 2003-04-22 16:44:38.000000000 +0100 @@ -680,7 +680,7 @@ shmdnext = shmd->vm_next; if (shmd->vm_ops == &shm_vm_ops && shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) { - do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start); + do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start, 1); retval = 0; } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/ipc/util.c linux.21rc5-ac2/ipc/util.c --- linux.21rc5/ipc/util.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/ipc/util.c 2003-04-22 16:44:38.000000000 +0100 @@ -359,6 +359,12 @@ return -ENOSYS; } +asmlinkage long sys_semtimedop(int semid, struct sembuf *sops, unsigned nsops, + const struct timespec *timeout) +{ + return -ENOSYS; +} + asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) { return -ENOSYS; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/acct.c linux.21rc5-ac2/kernel/acct.c --- linux.21rc5/kernel/acct.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/kernel/acct.c 2003-04-22 16:44:38.000000000 +0100 @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -299,8 +300,10 @@ ac.ac_etime = encode_comp_t(jiffies - current->start_time); ac.ac_utime = encode_comp_t(current->times.tms_utime); ac.ac_stime = encode_comp_t(current->times.tms_stime); - ac.ac_uid = current->uid; - ac.ac_gid = current->gid; + ac.ac_uid = fs_high2lowuid(current->uid); + ac.ac_gid = fs_high2lowgid(current->gid); + ac.ac_uid32 = current->uid; + ac.ac_gid32 = current->gid; ac.ac_tty = (current->tty) ? kdev_t_to_nr(current->tty->device) : 0; ac.ac_flag = 0; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/capability.c linux.21rc5-ac2/kernel/capability.c --- linux.21rc5/kernel/capability.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/kernel/capability.c 2003-04-22 16:44:38.000000000 +0100 @@ -8,6 +8,8 @@ #include #include +unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */ + kernel_cap_t cap_bset = CAP_INIT_EFF_SET; /* Note: never hold tasklist_lock while spinning for this one */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/cpufreq.c linux.21rc5-ac2/kernel/cpufreq.c --- linux.21rc5/kernel/cpufreq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/kernel/cpufreq.c 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,1253 @@ +/* + * linux/kernel/cpufreq.c + * + * Copyright (C) 2001 Russell King + * (C) 2002 Dominik Brodowski + * + * $Id: cpufreq.c,v 1.55 2003/01/10 08:39:18 db Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_CPU_FREQ_24_API +#include +#endif + + +/** + * The "cpufreq driver" - the arch- or hardware-dependend low + * level driver of CPUFreq support, and its locking mutex. + * cpu_max_freq is in kHz. + */ +static struct cpufreq_driver *cpufreq_driver; +static DECLARE_MUTEX (cpufreq_driver_sem); + + +/** + * Two notifier lists: the "policy" list is involved in the + * validation process for a new CPU frequency policy; the + * "transition" list for kernel code that needs to handle + * changes to devices when the CPU clock speed changes. + * The mutex locks both lists. If both cpufreq_driver_sem + * and cpufreq_notifier_sem need to be hold, get cpufreq_driver_sem + * first. + */ +static struct notifier_block *cpufreq_policy_notifier_list; +static struct notifier_block *cpufreq_transition_notifier_list; +static DECLARE_MUTEX (cpufreq_notifier_sem); + + +/** + * The cpufreq default policy. Can be set by a "cpufreq=..." command + * line option. + */ +static struct cpufreq_policy default_policy = { + .cpu = CPUFREQ_ALL_CPUS, + .min = 0, + .max = 0, + .policy = 0, +}; + + +#ifdef CONFIG_CPU_FREQ_24_API +/** + * A few values needed by the 2.4.-compatible API + */ +static unsigned int cpu_max_freq[NR_CPUS]; +static unsigned int cpu_min_freq[NR_CPUS]; +static unsigned int cpu_cur_freq[NR_CPUS]; +#endif + + + +/********************************************************************* + * 2.6. API * + *********************************************************************/ + +/** + * cpufreq_parse_policy - parse a policy string + * @input_string: the string to parse. + * @policy: the policy written inside input_string + * + * This function parses a "policy string" - something the user echo'es into + * /proc/cpufreq or gives as boot parameter - into a struct cpufreq_policy. + * If there are invalid/missing entries, they are replaced with current + * cpufreq policy. + */ +static int cpufreq_parse_policy(char input_string[42], struct cpufreq_policy *policy) +{ + unsigned int min = 0; + unsigned int max = 0; + unsigned int cpu = 0; + char policy_string[42] = {'\0'}; + struct cpufreq_policy current_policy; + unsigned int result = -EFAULT; + unsigned int i = 0; + + if (!policy) + return -EINVAL; + + policy->min = 0; + policy->max = 0; + policy->policy = 0; + policy->cpu = CPUFREQ_ALL_CPUS; + + if (sscanf(input_string, "%d:%d:%d:%s", &cpu, &min, &max, policy_string) == 4) + { + policy->min = min; + policy->max = max; + policy->cpu = cpu; + result = 0; + goto scan_policy; + } + if (sscanf(input_string, "%d%%%d%%%d%%%s", &cpu, &min, &max, policy_string) == 4) + { + if (!cpufreq_get_policy(¤t_policy, cpu)) { + policy->min = (min * current_policy.cpuinfo.max_freq) / 100; + policy->max = (max * current_policy.cpuinfo.max_freq) / 100; + policy->cpu = cpu; + result = 0; + goto scan_policy; + } + } + + if (sscanf(input_string, "%d:%d:%s", &min, &max, policy_string) == 3) + { + policy->min = min; + policy->max = max; + result = 0; + goto scan_policy; + } + + if (sscanf(input_string, "%d%%%d%%%s", &min, &max, policy_string) == 3) + { + if (!cpufreq_get_policy(¤t_policy, cpu)) { + policy->min = (min * current_policy.cpuinfo.max_freq) / 100; + policy->max = (max * current_policy.cpuinfo.max_freq) / 100; + result = 0; + goto scan_policy; + } + } + + return -EINVAL; + +scan_policy: + + for (i=0;ipolicy = CPUFREQ_POLICY_POWERSAVE; + } + else if (!strncmp(policy_string, "performance",6) || + !strncmp(policy_string, "high",4) || + !strncmp(policy_string, "full",4)) + { + result = 0; + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + } + else if (!cpufreq_get_policy(¤t_policy, policy->cpu)) + { + policy->policy = current_policy.policy; + } + else + { + policy->policy = 0; + } + + return result; +} + + +/* + * cpufreq command line parameter. Must be hard values (kHz) + * cpufreq=1000000:2000000:PERFORMANCE + * to set the default CPUFreq policy. + */ +static int __init cpufreq_setup(char *str) +{ + cpufreq_parse_policy(str, &default_policy); + default_policy.cpu = CPUFREQ_ALL_CPUS; + return 1; +} +__setup("cpufreq=", cpufreq_setup); + + +#ifdef CONFIG_PROC_FS + +/** + * cpufreq_proc_read - read /proc/cpufreq + * + * This function prints out the current cpufreq policy. + */ +static int cpufreq_proc_read ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + char *p = page; + int len = 0; + struct cpufreq_policy policy; + unsigned int min_pctg = 0; + unsigned int max_pctg = 0; + unsigned int i = 0; + + if (off != 0) + goto end; + + p += sprintf(p, " minimum CPU frequency - maximum CPU frequency - policy\n"); + for (i=0;icount) + len = count; + if (len<0) + len = 0; + + return len; +} + + +/** + * cpufreq_proc_write - handles writing into /proc/cpufreq + * + * This function calls the parsing script and then sets the policy + * accordingly. + */ +static int cpufreq_proc_write ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + char proc_string[42] = {'\0'}; + struct cpufreq_policy policy; + unsigned int i = 0; + + + if ((count > sizeof(proc_string) - 1)) + return -EINVAL; + + if (copy_from_user(proc_string, buffer, count)) + return -EFAULT; + + proc_string[count] = '\0'; + + result = cpufreq_parse_policy(proc_string, &policy); + if (result) + return -EFAULT; + + if (policy.cpu == CPUFREQ_ALL_CPUS) + { + for (i=0; iread_proc = cpufreq_proc_read; + entry->write_proc = cpufreq_proc_write; + } + + return 0; +} + + +/** + * cpufreq_proc_exit - removes "cpufreq" from the /proc root directory. + * + * This function removes "cpufreq" from the /proc root directory. + */ +static void cpufreq_proc_exit (void) +{ + remove_proc_entry("cpufreq", &proc_root); + return; +} +#endif /* CONFIG_PROC_FS */ + + + +/********************************************************************* + * 2.4. COMPATIBLE API * + *********************************************************************/ + +#ifdef CONFIG_CPU_FREQ_24_API +/** + * cpufreq_set - set the CPU frequency + * @freq: target frequency in kHz + * @cpu: CPU for which the frequency is to be set + * + * Sets the CPU frequency to freq. + */ +int cpufreq_set(unsigned int freq, unsigned int cpu) +{ + struct cpufreq_policy policy; + down(&cpufreq_driver_sem); + if (!cpufreq_driver || !freq || (cpu > NR_CPUS)) { + up(&cpufreq_driver_sem); + return -EINVAL; + } + + policy.min = freq; + policy.max = freq; + policy.policy = CPUFREQ_POLICY_POWERSAVE; + policy.cpu = cpu; + + up(&cpufreq_driver_sem); + + if (policy.cpu == CPUFREQ_ALL_CPUS) + { + unsigned int i; + unsigned int ret = 0; + for (i=0; iextra1; + int len, left = *lenp; + + if (!left || (filp->f_pos && !write) || !cpu_online(cpu)) { + *lenp = 0; + return 0; + } + + if (write) { + unsigned int freq; + + len = left; + if (left > sizeof(buf)) + left = sizeof(buf); + if (copy_from_user(buf, buffer, left)) + return -EFAULT; + buf[sizeof(buf) - 1] = '\0'; + + freq = simple_strtoul(buf, &p, 0); + cpufreq_set(freq, cpu); + } else { + len = sprintf(buf, "%d\n", cpufreq_get(cpu)); + if (len > left) + len = left; + if (copy_to_user(buffer, buf, len)) + return -EFAULT; + } + + *lenp = len; + filp->f_pos += len; + return 0; +} + +static int +cpufreq_sysctl(ctl_table *table, int *name, int nlen, + void *oldval, size_t *oldlenp, + void *newval, size_t newlen, void **context) +{ + int cpu = (int) table->extra1; + + if (!cpu_online(cpu)) + return -EINVAL; + + if (oldval && oldlenp) { + size_t oldlen; + + if (get_user(oldlen, oldlenp)) + return -EFAULT; + + if (oldlen != sizeof(unsigned int)) + return -EINVAL; + + if (put_user(cpufreq_get(cpu), (unsigned int *)oldval) || + put_user(sizeof(unsigned int), oldlenp)) + return -EFAULT; + } + if (newval && newlen) { + unsigned int freq; + + if (newlen != sizeof(unsigned int)) + return -EINVAL; + + if (get_user(freq, (unsigned int *)newval)) + return -EFAULT; + + cpufreq_set(freq, cpu); + } + return 1; +} + +/* ctl_table ctl_cpu_vars_{0,1,...,(NR_CPUS-1)} */ +/* due to NR_CPUS tweaking, a lot of if/endifs are required, sorry */ + CTL_TABLE_CPU_VARS(0); +#if NR_CPUS > 1 + CTL_TABLE_CPU_VARS(1); +#endif +#if NR_CPUS > 2 + CTL_TABLE_CPU_VARS(2); +#endif +#if NR_CPUS > 3 + CTL_TABLE_CPU_VARS(3); +#endif +#if NR_CPUS > 4 + CTL_TABLE_CPU_VARS(4); +#endif +#if NR_CPUS > 5 + CTL_TABLE_CPU_VARS(5); +#endif +#if NR_CPUS > 6 + CTL_TABLE_CPU_VARS(6); +#endif +#if NR_CPUS > 7 + CTL_TABLE_CPU_VARS(7); +#endif +#if NR_CPUS > 8 + CTL_TABLE_CPU_VARS(8); +#endif +#if NR_CPUS > 9 + CTL_TABLE_CPU_VARS(9); +#endif +#if NR_CPUS > 10 + CTL_TABLE_CPU_VARS(10); +#endif +#if NR_CPUS > 11 + CTL_TABLE_CPU_VARS(11); +#endif +#if NR_CPUS > 12 + CTL_TABLE_CPU_VARS(12); +#endif +#if NR_CPUS > 13 + CTL_TABLE_CPU_VARS(13); +#endif +#if NR_CPUS > 14 + CTL_TABLE_CPU_VARS(14); +#endif +#if NR_CPUS > 15 + CTL_TABLE_CPU_VARS(15); +#endif +#if NR_CPUS > 16 + CTL_TABLE_CPU_VARS(16); +#endif +#if NR_CPUS > 17 + CTL_TABLE_CPU_VARS(17); +#endif +#if NR_CPUS > 18 + CTL_TABLE_CPU_VARS(18); +#endif +#if NR_CPUS > 19 + CTL_TABLE_CPU_VARS(19); +#endif +#if NR_CPUS > 20 + CTL_TABLE_CPU_VARS(20); +#endif +#if NR_CPUS > 21 + CTL_TABLE_CPU_VARS(21); +#endif +#if NR_CPUS > 22 + CTL_TABLE_CPU_VARS(22); +#endif +#if NR_CPUS > 23 + CTL_TABLE_CPU_VARS(23); +#endif +#if NR_CPUS > 24 + CTL_TABLE_CPU_VARS(24); +#endif +#if NR_CPUS > 25 + CTL_TABLE_CPU_VARS(25); +#endif +#if NR_CPUS > 26 + CTL_TABLE_CPU_VARS(26); +#endif +#if NR_CPUS > 27 + CTL_TABLE_CPU_VARS(27); +#endif +#if NR_CPUS > 28 + CTL_TABLE_CPU_VARS(28); +#endif +#if NR_CPUS > 29 + CTL_TABLE_CPU_VARS(29); +#endif +#if NR_CPUS > 30 + CTL_TABLE_CPU_VARS(30); +#endif +#if NR_CPUS > 31 + CTL_TABLE_CPU_VARS(31); +#endif +#if NR_CPUS > 32 +#error please extend CPU enumeration +#endif + +/* due to NR_CPUS tweaking, a lot of if/endifs are required, sorry */ +static ctl_table ctl_cpu_table[NR_CPUS + 1] = { + CPU_ENUM(0), +#if NR_CPUS > 1 + CPU_ENUM(1), +#endif +#if NR_CPUS > 2 + CPU_ENUM(2), +#endif +#if NR_CPUS > 3 + CPU_ENUM(3), +#endif +#if NR_CPUS > 4 + CPU_ENUM(4), +#endif +#if NR_CPUS > 5 + CPU_ENUM(5), +#endif +#if NR_CPUS > 6 + CPU_ENUM(6), +#endif +#if NR_CPUS > 7 + CPU_ENUM(7), +#endif +#if NR_CPUS > 8 + CPU_ENUM(8), +#endif +#if NR_CPUS > 9 + CPU_ENUM(9), +#endif +#if NR_CPUS > 10 + CPU_ENUM(10), +#endif +#if NR_CPUS > 11 + CPU_ENUM(11), +#endif +#if NR_CPUS > 12 + CPU_ENUM(12), +#endif +#if NR_CPUS > 13 + CPU_ENUM(13), +#endif +#if NR_CPUS > 14 + CPU_ENUM(14), +#endif +#if NR_CPUS > 15 + CPU_ENUM(15), +#endif +#if NR_CPUS > 16 + CPU_ENUM(16), +#endif +#if NR_CPUS > 17 + CPU_ENUM(17), +#endif +#if NR_CPUS > 18 + CPU_ENUM(18), +#endif +#if NR_CPUS > 19 + CPU_ENUM(19), +#endif +#if NR_CPUS > 20 + CPU_ENUM(20), +#endif +#if NR_CPUS > 21 + CPU_ENUM(21), +#endif +#if NR_CPUS > 22 + CPU_ENUM(22), +#endif +#if NR_CPUS > 23 + CPU_ENUM(23), +#endif +#if NR_CPUS > 24 + CPU_ENUM(24), +#endif +#if NR_CPUS > 25 + CPU_ENUM(25), +#endif +#if NR_CPUS > 26 + CPU_ENUM(26), +#endif +#if NR_CPUS > 27 + CPU_ENUM(27), +#endif +#if NR_CPUS > 28 + CPU_ENUM(28), +#endif +#if NR_CPUS > 29 + CPU_ENUM(29), +#endif +#if NR_CPUS > 30 + CPU_ENUM(30), +#endif +#if NR_CPUS > 31 + CPU_ENUM(31), +#endif +#if NR_CPUS > 32 +#error please extend CPU enumeration +#endif + { + .ctl_name = 0, + } +}; + +static ctl_table ctl_cpu[2] = { + { + .ctl_name = CTL_CPU, + .procname = "cpu", + .mode = 0555, + .child = ctl_cpu_table, + }, + { + .ctl_name = 0, + } +}; + +struct ctl_table_header *cpufreq_sysctl_table; + +static inline void cpufreq_sysctl_init(void) +{ + cpufreq_sysctl_table = register_sysctl_table(ctl_cpu, 0); +} + +static inline void cpufreq_sysctl_exit(void) +{ + unregister_sysctl_table(cpufreq_sysctl_table); +} + +#else +#define cpufreq_sysctl_init() do {} while(0) +#define cpufreq_sysctl_exit() do {} while(0) +#endif /* CONFIG_SYSCTL */ +#endif /* CONFIG_CPU_FREQ_24_API */ + + + +/********************************************************************* + * NOTIFIER LISTS INTERFACE * + *********************************************************************/ + +/** + * cpufreq_register_notifier - register a driver with cpufreq + * @nb: notifier function to register + * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER + * + * Add a driver to one of two lists: either a list of drivers that + * are notified about clock rate changes (once before and once after + * the transition), or a list of drivers that are notified about + * changes in cpufreq policy. + * + * This function may sleep, and has the same return conditions as + * notifier_chain_register. + */ +int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) +{ + int ret; + + down(&cpufreq_notifier_sem); + switch (list) { + case CPUFREQ_TRANSITION_NOTIFIER: + ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb); + break; + case CPUFREQ_POLICY_NOTIFIER: + ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb); + break; + default: + ret = -EINVAL; + } + up(&cpufreq_notifier_sem); + + return ret; +} +EXPORT_SYMBOL(cpufreq_register_notifier); + + +/** + * cpufreq_unregister_notifier - unregister a driver with cpufreq + * @nb: notifier block to be unregistered + * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER + * + * Remove a driver from the CPU frequency notifier list. + * + * This function may sleep, and has the same return conditions as + * notifier_chain_unregister. + */ +int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) +{ + int ret; + + down(&cpufreq_notifier_sem); + switch (list) { + case CPUFREQ_TRANSITION_NOTIFIER: + ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb); + break; + case CPUFREQ_POLICY_NOTIFIER: + ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb); + break; + default: + ret = -EINVAL; + } + up(&cpufreq_notifier_sem); + + return ret; +} +EXPORT_SYMBOL(cpufreq_unregister_notifier); + + + +/********************************************************************* + * POLICY INTERFACE * + *********************************************************************/ + +/** + * cpufreq_get_policy - get the current cpufreq_policy + * @policy: struct cpufreq_policy into which the current cpufreq_policy is written + * + * Reads the current cpufreq policy. + */ +int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) +{ + down(&cpufreq_driver_sem); + if (!cpufreq_driver || !policy || + (cpu >= NR_CPUS) || (!cpu_online(cpu))) { + up(&cpufreq_driver_sem); + return -EINVAL; + } + + policy->min = cpufreq_driver->policy[cpu].min; + policy->max = cpufreq_driver->policy[cpu].max; + policy->policy = cpufreq_driver->policy[cpu].policy; + policy->cpuinfo.max_freq = cpufreq_driver->policy[cpu].cpuinfo.max_freq; + policy->cpuinfo.min_freq = cpufreq_driver->policy[cpu].cpuinfo.min_freq; + policy->cpuinfo.transition_latency = cpufreq_driver->policy[cpu].cpuinfo.transition_latency; + policy->cpu = cpu; + + up(&cpufreq_driver_sem); + + return 0; +} +EXPORT_SYMBOL(cpufreq_get_policy); + + +/** + * cpufreq_set_policy - set a new CPUFreq policy + * @policy: policy to be set. + * + * Sets a new CPU frequency and voltage scaling policy. + */ +int cpufreq_set_policy(struct cpufreq_policy *policy) +{ + int ret; + + down(&cpufreq_driver_sem); + if (!cpufreq_driver || !cpufreq_driver->verify || + !cpufreq_driver->setpolicy || !policy || + (policy->cpu >= NR_CPUS) || (!cpu_online(policy->cpu))) { + up(&cpufreq_driver_sem); + return -EINVAL; + } + + policy->cpuinfo.max_freq = cpufreq_driver->policy[policy->cpu].cpuinfo.max_freq; + policy->cpuinfo.min_freq = cpufreq_driver->policy[policy->cpu].cpuinfo.min_freq; + policy->cpuinfo.transition_latency = cpufreq_driver->policy[policy->cpu].cpuinfo.transition_latency; + + /* verify the cpu speed can be set within this limit */ + ret = cpufreq_driver->verify(policy); + if (ret) { + up(&cpufreq_driver_sem); + return ret; + } + + down(&cpufreq_notifier_sem); + + /* adjust if neccessary - all reasons */ + notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, + policy); + + /* adjust if neccessary - hardware incompatibility*/ + notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE, + policy); + + /* verify the cpu speed can be set within this limit, + which might be different to the first one */ + ret = cpufreq_driver->verify(policy); + if (ret) { + up(&cpufreq_notifier_sem); + up(&cpufreq_driver_sem); + return ret; + } + + /* notification of the new policy */ + notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY, + policy); + + up(&cpufreq_notifier_sem); + + cpufreq_driver->policy[policy->cpu].min = policy->min; + cpufreq_driver->policy[policy->cpu].max = policy->max; + cpufreq_driver->policy[policy->cpu].policy = policy->policy; + +#ifdef CONFIG_CPU_FREQ_24_API + cpu_cur_freq[policy->cpu] = policy->max; +#endif + + ret = cpufreq_driver->setpolicy(policy); + + up(&cpufreq_driver_sem); + + return ret; +} +EXPORT_SYMBOL(cpufreq_set_policy); + + + +/********************************************************************* + * EXTERNALLY AFFECTING FREQUENCY CHANGES * + *********************************************************************/ + +/** + * adjust_jiffies - adjust the system "loops_per_jiffy" + * + * This function alters the system "loops_per_jiffy" for the clock + * speed change. Note that loops_per_jiffy cannot be updated on SMP + * systems as each CPU might be scaled differently. So, use the arch + * per-CPU loops_per_jiffy value wherever possible. + */ +#ifndef CONFIG_SMP +static unsigned long l_p_j_ref = 0; +static unsigned int l_p_j_ref_freq = 0; + +static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) +{ + if (!l_p_j_ref_freq) { + l_p_j_ref = loops_per_jiffy; + l_p_j_ref_freq = ci->old; + } + if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || + (val == CPUFREQ_POSTCHANGE && ci->old > ci->new)) + loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); +} +#else +#define adjust_jiffies(x...) do {} while (0) +#endif + + +/** + * cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition + * + * This function calls the transition notifiers and the "adjust_jiffies" function. It is called + * twice on all CPU frequency changes that have external effects. + */ +void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) +{ + down(&cpufreq_notifier_sem); + switch (state) { + case CPUFREQ_PRECHANGE: + notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs); + adjust_jiffies(CPUFREQ_PRECHANGE, freqs); + break; + case CPUFREQ_POSTCHANGE: + adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); + notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); +#ifdef CONFIG_CPU_FREQ_24_API + cpu_cur_freq[freqs->cpu] = freqs->new; +#endif + break; + } + up(&cpufreq_notifier_sem); +} +EXPORT_SYMBOL_GPL(cpufreq_notify_transition); + + + +/********************************************************************* + * REGISTER / UNREGISTER CPUFREQ DRIVER * + *********************************************************************/ + +/** + * cpufreq_register - register a CPU Frequency driver + * @driver_data: A struct cpufreq_driver containing the values submitted by the CPU Frequency driver. + * + * Registers a CPU Frequency driver to this core code. This code + * returns zero on success, -EBUSY when another driver got here first + * (and isn't unregistered in the meantime). + * + */ +int cpufreq_register(struct cpufreq_driver *driver_data) +{ + unsigned int ret; + unsigned int i; + struct cpufreq_policy policy; + + if (cpufreq_driver) + return -EBUSY; + + if (!driver_data || !driver_data->verify || + !driver_data->setpolicy) + return -EINVAL; + + down(&cpufreq_driver_sem); + cpufreq_driver = driver_data; + + /* check for a default policy - if it exists, use it on _all_ CPUs*/ + for (i=0; ipolicy[i].policy = default_policy.policy; + if (default_policy.min) + cpufreq_driver->policy[i].min = default_policy.min; + if (default_policy.max) + cpufreq_driver->policy[i].max = default_policy.max; + } + + /* set default policy on all CPUs. Must be called per-CPU and not + * with CPUFREQ_ALL_CPUs as there might be no common policy for all + * CPUs (UltraSPARC etc.) + */ + for (i=0; ipolicy[i].policy; + policy.min = cpufreq_driver->policy[i].min; + policy.max = cpufreq_driver->policy[i].max; + policy.cpu = i; + up(&cpufreq_driver_sem); + ret = cpufreq_set_policy(&policy); + down(&cpufreq_driver_sem); + if (ret) { + cpufreq_driver = NULL; + up(&cpufreq_driver_sem); + return ret; + } + } + + up(&cpufreq_driver_sem); + + cpufreq_proc_init(); + +#ifdef CONFIG_CPU_FREQ_24_API + down(&cpufreq_driver_sem); + for (i=0; ipolicy[i].cpuinfo.min_freq; + cpu_max_freq[i] = driver_data->policy[i].cpuinfo.max_freq; + cpu_cur_freq[i] = driver_data->cpu_cur_freq[i]; + } + up(&cpufreq_driver_sem); + + cpufreq_sysctl_init(); +#endif + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_register); + + +/** + * cpufreq_unregister - unregister the current CPUFreq driver + * + * Unregister the current CPUFreq driver. Only call this if you have + * the right to do so, i.e. if you have succeeded in initialising before! + * Returns zero if successful, and -EINVAL if the cpufreq_driver is + * currently not initialised. + */ +int cpufreq_unregister(void) +{ + down(&cpufreq_driver_sem); + + if (!cpufreq_driver) { + up(&cpufreq_driver_sem); + return -EINVAL; + } + + cpufreq_driver = NULL; + + up(&cpufreq_driver_sem); + + cpufreq_proc_exit(); + +#ifdef CONFIG_CPU_FREQ_24_API + cpufreq_sysctl_exit(); +#endif + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_unregister); + + +#ifdef CONFIG_PM +/** + * cpufreq_restore - restore the CPU clock frequency after resume + * + * Restore the CPU clock frequency so that our idea of the current + * frequency reflects the actual hardware. + */ +int cpufreq_restore(void) +{ + struct cpufreq_policy policy; + unsigned int i; + unsigned int ret = 0; + + if (in_interrupt()) + panic("cpufreq_restore() called from interrupt context!"); + + for (i=0;ipolicy[i].min; + policy.max = cpufreq_driver->policy[i].max; + policy.policy = cpufreq_driver->policy[i].policy; + policy.cpu = i; + up(&cpufreq_driver_sem); + + ret += cpufreq_set_policy(&policy); + } + + return ret; +} +EXPORT_SYMBOL_GPL(cpufreq_restore); +#else +#define cpufreq_restore() do {} while (0) +#endif /* CONFIG_PM */ + + +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ + +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table) +{ + unsigned int min_freq = ~0; + unsigned int max_freq = 0; + unsigned int i = 0; + + for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; + if (freq < min_freq) + min_freq = freq; + if (freq > max_freq) + max_freq = freq; + } + + policy->min = policy->cpuinfo.min_freq = min_freq; + policy->max = policy->cpuinfo.max_freq = max_freq; + + if (policy->min == ~0) + return -EINVAL; + else + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); + + +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table) +{ + unsigned int next_larger = ~0; + unsigned int i = 0; + unsigned int count = 0; + + if (!cpu_online(policy->cpu)) + return -EINVAL; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; + if ((freq >= policy->min) && (freq <= policy->max)) + count++; + else if ((next_larger > freq) && (freq > policy->max)) + next_larger = freq; + } + + if (!count) + policy->max = next_larger; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); + + +int cpufreq_frequency_table_setpolicy(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int *index) +{ + struct cpufreq_frequency_table optimal = { .index = ~0, }; + unsigned int i; + + switch (policy->policy) { + case CPUFREQ_POLICY_PERFORMANCE: + optimal.frequency = 0; + break; + case CPUFREQ_POLICY_POWERSAVE: + optimal.frequency = ~0; + break; + } + + if (!cpu_online(policy->cpu)) + return -EINVAL; + + for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; + if ((freq < policy->min) || (freq > policy->max)) + continue; + switch(policy->policy) { + case CPUFREQ_POLICY_PERFORMANCE: + if (optimal.frequency <= freq) { + optimal.frequency = freq; + optimal.index = i; + } + break; + case CPUFREQ_POLICY_POWERSAVE: + if (optimal.frequency >= freq) { + optimal.frequency = freq; + optimal.index = i; + } + break; + } + } + if (optimal.index > i) + return -EINVAL; + + *index = optimal.index; + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_setpolicy); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/exit.c linux.21rc5-ac2/kernel/exit.c --- linux.21rc5/kernel/exit.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/kernel/exit.c 2003-04-22 16:44:38.000000000 +0100 @@ -28,49 +28,22 @@ static void release_task(struct task_struct * p) { - if (p != current) { + if (p == current) + BUG(); #ifdef CONFIG_SMP - /* - * Wait to make sure the process isn't on the - * runqueue (active on some other CPU still) - */ - for (;;) { - task_lock(p); - if (!task_has_cpu(p)) - break; - task_unlock(p); - do { - cpu_relax(); - barrier(); - } while (task_has_cpu(p)); - } - task_unlock(p); + wait_task_inactive(p); #endif - atomic_dec(&p->user->processes); - free_uid(p->user); - unhash_process(p); - - release_thread(p); - current->cmin_flt += p->min_flt + p->cmin_flt; - current->cmaj_flt += p->maj_flt + p->cmaj_flt; - current->cnswap += p->nswap + p->cnswap; - /* - * Potentially available timeslices are retrieved - * here - this way the parent does not get penalized - * for creating too many processes. - * - * (this cannot be used to artificially 'generate' - * timeslices, because any timeslice recovered here - * was given away by the parent in the first place.) - */ - current->counter += p->counter; - if (current->counter >= MAX_COUNTER) - current->counter = MAX_COUNTER; - p->pid = 0; - free_task_struct(p); - } else { - printk("task releasing itself\n"); - } + atomic_dec(&p->user->processes); + free_uid(p->user); + unhash_process(p); + + release_thread(p); + current->cmin_flt += p->min_flt + p->cmin_flt; + current->cmaj_flt += p->maj_flt + p->cmaj_flt; + current->cnswap += p->nswap + p->cnswap; + sched_exit(p); + p->pid = 0; + free_task_struct(p); } /* @@ -150,6 +123,79 @@ return retval; } +/** + * reparent_to_init() - Reparent the calling kernel thread to the init task. + * + * If a kernel thread is launched as a result of a system call, or if + * it ever exits, it should generally reparent itself to init so that + * it is correctly cleaned up on exit. + * + * The various task state such as scheduling policy and priority may have + * been inherited from a user process, so we reset them to sane values here. + * + * NOTE that reparent_to_init() gives the caller full capabilities. + */ +void reparent_to_init(void) +{ + write_lock_irq(&tasklist_lock); + + /* Reparent to init */ + REMOVE_LINKS(current); + current->p_pptr = child_reaper; + current->p_opptr = child_reaper; + SET_LINKS(current); + + /* Set the exit signal to SIGCHLD so we signal init on exit */ + current->exit_signal = SIGCHLD; + + current->ptrace = 0; + if ((current->policy == SCHED_OTHER) && (task_nice(current) < 0)) + set_user_nice(current, 0); + /* cpus_allowed? */ + /* rt_priority? */ + /* signals? */ + current->cap_effective = CAP_INIT_EFF_SET; + current->cap_inheritable = CAP_INIT_INH_SET; + current->cap_permitted = CAP_FULL_SET; + current->keep_capabilities = 0; + memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim))); + current->user = INIT_USER; + + write_unlock_irq(&tasklist_lock); +} + +/* + * Put all the gunge required to become a kernel thread without + * attached user resources in one place where it belongs. + */ + +void daemonize(void) +{ + struct fs_struct *fs; + + + /* + * If we were started as result of loading a module, close all of the + * user space pages. We don't need them, and if we didn't close them + * they would be locked into memory. + */ + exit_mm(current); + + current->session = 1; + current->pgrp = 1; + current->tty = NULL; + + /* Become as one with the init task */ + + exit_fs(current); /* current->fs->count--; */ + fs = init_task.fs; + current->fs = fs; + atomic_inc(&fs->count); + exit_files(current); + current->files = init_task.files; + atomic_inc(¤t->files->count); +} + /* * When we die, we re-parent all our children. * Try to give them to another thread in our thread diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/fork.c linux.21rc5-ac2/kernel/fork.c --- linux.21rc5/kernel/fork.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/kernel/fork.c 2003-04-22 19:09:49.000000000 +0100 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -31,7 +32,6 @@ /* The idle threads do not count.. */ int nr_threads; -int nr_running; int max_threads; unsigned long total_forks; /* Handle normal Linux uptimes. */ @@ -39,6 +39,8 @@ struct task_struct *pidhash[PIDHASH_SZ]; +rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ + void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) { unsigned long flags; @@ -114,8 +116,10 @@ last_pid = 300; next_safe = PID_MAX; } - if(unlikely(last_pid == beginpid)) + if(unlikely(last_pid == beginpid)) { + next_safe = 0; goto nomorepids; + } goto repeat; } if(p->pid > last_pid && next_safe > p->pid) @@ -144,6 +148,7 @@ { struct vm_area_struct * mpnt, *tmp, **pprev; int retval; + unsigned long charge = 0; flush_cache_mm(current->mm); mm->locked_vm = 0; @@ -172,6 +177,12 @@ retval = -ENOMEM; if(mpnt->vm_flags & VM_DONTCOPY) continue; + if(mpnt->vm_flags & VM_ACCOUNT) { + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; + if(!vm_enough_memory(len)) + goto fail_nomem; + charge += len; + } tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!tmp) goto fail_nomem; @@ -215,10 +226,12 @@ } retval = 0; build_mmap_rb(mm); - -fail_nomem: +out: flush_tlb_mm(current->mm); return retval; +fail_nomem: + vm_unacct_memory(charge); + goto out; } spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; @@ -604,6 +617,7 @@ struct pt_regs *regs, unsigned long stack_size) { int retval; + unsigned long flags; struct task_struct *p; struct completion vfork; @@ -664,8 +678,7 @@ if (p->pid == 0 && current->pid != 0) goto bad_fork_cleanup; - p->run_list.next = NULL; - p->run_list.prev = NULL; + INIT_LIST_HEAD(&p->run_list); p->p_cptr = NULL; init_waitqueue_head(&p->wait_chldexit); @@ -691,14 +704,15 @@ #ifdef CONFIG_SMP { int i; - p->cpus_runnable = ~0UL; - p->processor = current->processor; + /* ?? should we just memset this ?? */ for(i = 0; i < smp_num_cpus; i++) - p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0; + p->per_cpu_utime[cpu_logical_map(i)] = + p->per_cpu_stime[cpu_logical_map(i)] = 0; spin_lock_init(&p->sigmask_lock); } #endif + p->array = NULL; p->lock_depth = -1; /* -1 = no lock */ p->start_time = jiffies; @@ -732,15 +746,27 @@ p->pdeath_signal = 0; /* - * "share" dynamic priority between parent and child, thus the - * total amount of dynamic priorities in the system doesn't change, - * more scheduling fairness. This is only important in the first - * timeslice, on the long run the scheduling behaviour is unchanged. - */ - p->counter = (current->counter + 1) >> 1; - current->counter >>= 1; - if (!current->counter) - current->need_resched = 1; + * Share the timeslice between parent and child, thus the + * total amount of pending timeslices in the system doesnt change, + * resulting in more scheduling fairness. + */ + __save_flags(flags); + __cli(); + if (!current->time_slice) + BUG(); + p->time_slice = (current->time_slice + 1) >> 1; + current->time_slice >>= 1; + if (!current->time_slice) { + /* + * This case is rare, it happens when the parent has only + * a single jiffy left from its timeslice. Taking the + * runqueue lock is not a problem. + */ + current->time_slice = 1; + scheduler_tick(0,0); + } + p->sleep_timestamp = jiffies; + __restore_flags(flags); /* * Ok, add it to the run-queues and make it @@ -776,11 +802,16 @@ if (p->ptrace & PT_PTRACED) send_sig(SIGSTOP, p, 1); - - wake_up_process(p); /* do this last */ + wake_up_forked_process(p); /* do this last */ ++total_forks; if (clone_flags & CLONE_VFORK) wait_for_completion(&vfork); + else + /* + * Let the child process run first, to avoid most of the + * COW overhead when the child exec()s afterwards. + */ + current->need_resched = 1; fork_out: return retval; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/kmod.c linux.21rc5-ac2/kernel/kmod.c --- linux.21rc5/kernel/kmod.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/kernel/kmod.c 2003-04-22 16:44:38.000000000 +0100 @@ -119,15 +119,8 @@ if (curtask->files->fd[i]) close(i); } - /* Drop the "current user" thing */ - { - struct user_struct *user = curtask->user; - curtask->user = INIT_USER; - atomic_inc(&INIT_USER->__count); - atomic_inc(&INIT_USER->processes); - atomic_dec(&user->processes); - free_uid(user); - } + /* Become root */ + set_user(0, 0); /* Give kmod all effective privileges.. */ curtask->euid = curtask->fsuid = 0; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/ksyms.c linux.21rc5-ac2/kernel/ksyms.c --- linux.21rc5/kernel/ksyms.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/kernel/ksyms.c 2003-05-28 15:20:53.000000000 +0100 @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -48,6 +49,7 @@ #include #include #include +#include #include #if defined(CONFIG_PROC_FS) @@ -67,6 +69,7 @@ extern spinlock_t dma_spin_lock; extern int panic_timeout; + #ifdef CONFIG_MODVERSIONS const struct module_symbol __export_Using_Versions __attribute__((section("__ksymtab"))) = { @@ -111,6 +114,8 @@ EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(__vmalloc); +EXPORT_SYMBOL(vcalloc); +EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(mem_map); EXPORT_SYMBOL(remap_page_range); @@ -141,13 +146,15 @@ EXPORT_SYMBOL(fget); EXPORT_SYMBOL(igrab); EXPORT_SYMBOL(iunique); -EXPORT_SYMBOL(iget4); +EXPORT_SYMBOL(iget4_locked); +EXPORT_SYMBOL(unlock_new_inode); EXPORT_SYMBOL(iput); EXPORT_SYMBOL(inode_init_once); EXPORT_SYMBOL(force_delete); EXPORT_SYMBOL(follow_up); EXPORT_SYMBOL(follow_down); EXPORT_SYMBOL(lookup_mnt); +EXPORT_SYMBOL(path_lookup); EXPORT_SYMBOL(path_init); EXPORT_SYMBOL(path_walk); EXPORT_SYMBOL(path_release); @@ -223,6 +230,7 @@ EXPORT_SYMBOL(generic_file_read); EXPORT_SYMBOL(do_generic_file_read); EXPORT_SYMBOL(generic_file_write); +EXPORT_SYMBOL(generic_file_write_nolock); EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_ro_fops); EXPORT_SYMBOL(generic_buffer_fdatasync); @@ -247,6 +255,7 @@ EXPORT_SYMBOL(find_inode_number); EXPORT_SYMBOL(is_subdir); EXPORT_SYMBOL(get_unused_fd); +EXPORT_SYMBOL(put_unused_fd); EXPORT_SYMBOL(vfs_create); EXPORT_SYMBOL(vfs_mkdir); EXPORT_SYMBOL(vfs_mknod); @@ -445,7 +454,9 @@ /* process management */ EXPORT_SYMBOL(complete_and_exit); EXPORT_SYMBOL(__wake_up); -EXPORT_SYMBOL(__wake_up_sync); +#if CONFIG_SMP +EXPORT_SYMBOL_GPL(__wake_up_sync); /* internal use only */ +#endif EXPORT_SYMBOL(wake_up_process); EXPORT_SYMBOL(sleep_on); EXPORT_SYMBOL(sleep_on_timeout); @@ -453,11 +464,14 @@ EXPORT_SYMBOL(interruptible_sleep_on_timeout); EXPORT_SYMBOL(schedule); EXPORT_SYMBOL(schedule_timeout); -#if CONFIG_SMP -EXPORT_SYMBOL(set_cpus_allowed); -#endif EXPORT_SYMBOL(yield); EXPORT_SYMBOL(__cond_resched); +EXPORT_SYMBOL(set_user_nice); +EXPORT_SYMBOL(task_nice); +EXPORT_SYMBOL_GPL(idle_cpu); +#ifdef CONFIG_SMP +EXPORT_SYMBOL_GPL(set_cpus_allowed); +#endif EXPORT_SYMBOL(jiffies); EXPORT_SYMBOL(xtime); EXPORT_SYMBOL(do_gettimeofday); @@ -468,7 +482,7 @@ #endif EXPORT_SYMBOL(kstat); -EXPORT_SYMBOL(nr_running); +EXPORT_SYMBOL(nr_context_switches); /* misc */ EXPORT_SYMBOL(panic); @@ -509,6 +523,8 @@ EXPORT_SYMBOL(seq_release); EXPORT_SYMBOL(seq_read); EXPORT_SYMBOL(seq_lseek); +EXPORT_SYMBOL(single_open); +EXPORT_SYMBOL(single_release); /* Program loader interfaces */ EXPORT_SYMBOL(setup_arg_pages); @@ -561,6 +577,12 @@ EXPORT_SYMBOL(strspn); EXPORT_SYMBOL(strsep); +#ifdef CONFIG_CRC32 +EXPORT_SYMBOL(crc32_le); +EXPORT_SYMBOL(crc32_be); +EXPORT_SYMBOL(bitreverse); +#endif + /* software interrupts */ EXPORT_SYMBOL(tasklet_hi_vec); EXPORT_SYMBOL(tasklet_vec); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/Makefile linux.21rc5-ac2/kernel/Makefile --- linux.21rc5/kernel/Makefile 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/kernel/Makefile 2003-04-22 16:44:38.000000000 +0100 @@ -9,7 +9,7 @@ O_TARGET := kernel.o -export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o printk.o +export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o printk.o cpufreq.o obj-y = sched.o dma.o fork.o exec_domain.o panic.o printk.o \ module.o exit.o itimer.o info.o time.o softirq.o resource.o \ @@ -19,6 +19,8 @@ obj-$(CONFIG_UID16) += uid16.o obj-$(CONFIG_MODULES) += ksyms.o obj-$(CONFIG_PM) += pm.o +obj-$(CONFIG_CPU_FREQ) += cpufreq.o +obj-$(CONFIG_IKCONFIG) += configs.o ifneq ($(CONFIG_IA64),y) # According to Alan Modra , the -fno-omit-frame-pointer is @@ -26,7 +28,21 @@ # me. I suspect most platforms don't need this, but until we know that for sure # I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k # to get a correct value for the wait-channel (WCHAN in ps). --davidm +# +# Some gcc's are building so that O(1) scheduler is triple faulting if we +# build -O2. (Turns out to be a CPU issue. Update your microcode if you hit it) +# CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer endif include $(TOPDIR)/Rules.make + +configs.o: $(TOPDIR)/scripts/mkconfigs configs.c + $(CC) $(CFLAGS) $(CFLAGS_KERNEL) -DEXPORT_SYMTAB -c -o configs.o configs.c + +$(TOPDIR)/scripts/mkconfigs: $(TOPDIR)/scripts/mkconfigs.c + $(HOSTCC) $(HOSTCFLAGS) -o $(TOPDIR)/scripts/mkconfigs $(TOPDIR)/scripts/mkconfigs.c + +configs.c: $(TOPDIR)/.config $(TOPDIR)/scripts/mkconfigs + $(TOPDIR)/scripts/mkconfigs $(TOPDIR)/.config configs.c + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/module.c linux.21rc5-ac2/kernel/module.c --- linux.21rc5/kernel/module.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/kernel/module.c 2003-05-28 15:19:25.000000000 +0100 @@ -345,10 +345,10 @@ asmlinkage long sys_init_module(const char *name_user, struct module *mod_user) { - struct module mod_tmp, *mod; + struct module mod_tmp, *mod, *mod2 = NULL; char *name, *n_name, *name_tmp = NULL; long namelen, n_namelen, i, error; - unsigned long mod_user_size; + unsigned long mod_user_size, flags; struct module_ref *dep; if (!capable(CAP_SYS_MODULE)) @@ -387,11 +387,23 @@ } strcpy(name_tmp, mod->name); - error = copy_from_user(mod, mod_user, mod_user_size); + /* Copying mod_user directly over mod breaks the module_list chain and + * races against search_exception_table. copy_from_user may sleep so it + * cannot be under modlist_lock, do the copy in two stages. + */ + if (!(mod2 = vmalloc(mod_user_size))) { + error = -ENOMEM; + goto err2; + } + error = copy_from_user(mod2, mod_user, mod_user_size); if (error) { error = -EFAULT; goto err2; } + spin_lock_irqsave(&modlist_lock, flags); + memcpy(mod, mod2, mod_user_size); + mod->next = mod_tmp.next; + spin_unlock_irqrestore(&modlist_lock, flags); /* Sanity check the size of the module. */ error = -EINVAL; @@ -505,7 +517,6 @@ to make the I and D caches consistent. */ flush_icache_range((unsigned long)mod, (unsigned long)mod + mod->size); - mod->next = mod_tmp.next; mod->refs = NULL; /* Sanity check the module's dependents */ @@ -566,6 +577,8 @@ err3: put_mod_name(n_name); err2: + if (mod2) + vfree(mod2); *mod = mod_tmp; strcpy((char *)mod->name, name_tmp); /* We know there is room for this */ err1: diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/panic.c linux.21rc5-ac2/kernel/panic.c --- linux.21rc5/kernel/panic.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/kernel/panic.c 2003-04-22 16:44:38.000000000 +0100 @@ -16,6 +16,8 @@ #include #include #include +#include +#include asmlinkage void sys_sync(void); /* it's really int */ @@ -28,9 +30,155 @@ panic_timeout = simple_strtoul(str, NULL, 0); return 1; } - __setup("panic=", panic_setup); + +#if (defined(CONFIG_X86) && defined(CONFIG_VT)) || defined(CONFIG_PC_KEYB) +#define do_blink(x) pckbd_blink(x) +#else +#define do_blink(x) 0 +#endif + +#ifdef CONFIG_PANIC_MORSE + +static int blink_setting = 1; + +static const unsigned char morsetable[] = { + 0122, 0, 0310, 0, 0, 0163, /* "#$%&' */ + 055, 0155, 0, 0, 0163, 0141, 0152, 0051, /* ()*+,-./ */ + 077, 076, 074, 070, 060, 040, 041, 043, 047, 057, /* 0-9 */ + 0107, 0125, 0, 0061, 0, 0114, 0, /* :;<=>?@ */ + 006, 021, 025, 011, 002, 024, 013, 020, 004, /* A-I */ + 036, 015, 022, 007, 005, 017, 026, 033, 012, /* J-R */ + 010, 003, 014, 030, 016, 031, 035, 023, /* S-Z */ + 0, 0, 0, 0, 0154 /* [\]^_ */ +}; + +__inline__ unsigned char tomorse(char c) { + if (c >= 'a' && c <= 'z') + c = c - 'a' + 'A'; + if (c >= '"' && c <= '_') { + return morsetable[c - '"']; + } else + return 0; +} + + +#define DITLEN (HZ / 5) +#define DAHLEN 3 * DITLEN +#define SPACELEN 7 * DITLEN + +#define FREQ 844 + +/* Tell the user who may be running in X and not see the console that we have + panic'ed. This is to distingush panics from "real" lockups. + Could in theory send the panic message as morse, but that is left as an + exercise for the reader. + And now it's done! LED and speaker morse code by Andrew Rodland + , with improvements based on suggestions from + linux@horizon.com and a host of others. +*/ + +void panic_blink(char *buf) +{ + static unsigned long next_jiffie = 0; + static char * bufpos = 0; + static unsigned char morse = 0; + static char state = 1; + + if (!blink_setting) + return; + + if (!buf) + buf="Panic lost?"; + + + if (bufpos && time_after (next_jiffie, jiffies)) { + return; /* Waiting for something. */ + } + + if (state) { /* Coming off of a blink. */ + if (blink_setting & 0x01) + do_blink(0); + + state = 0; + + if(morse > 1) { /* Not done yet, just a one-dit pause. */ + next_jiffie = jiffies + DITLEN; + } else { /* Get a new char, and figure out how much space. */ + + if(!bufpos) + bufpos = (char *)buf; /* First time through */ + + if(!*bufpos) { + bufpos = (char *)buf; /* Repeating */ + next_jiffie = jiffies + SPACELEN; + } else { + /* Inter-letter space */ + next_jiffie = jiffies + DAHLEN; + } + + if (! (morse = tomorse(*bufpos) )) { + next_jiffie = jiffies + SPACELEN; + state = 1; /* And get us back here */ + } + bufpos ++; + } + } else { /* Starting a new blink. We have valid code in morse. */ + int len; + + len = (morse & 001) ? DAHLEN : DITLEN; + + if (blink_setting & 0x02) + kd_mksound(FREQ, len); + + next_jiffie = jiffies + len; + + if (blink_setting & 0x01) + do_blink(1); + state = 1; + morse >>= 1; + } +} + +#else /* CONFIG_PANIC_MORSE */ + +static int blink_setting = HZ / 2; /* Over here, it's jiffies between blinks. */ + +/* This is the "original" 2.4-ac panic_blink, rewritten to use my + * sorta-arch-independent do_blink stuff. + */ +void panic_blink(char *buf) { + static char state = 0; + static unsigned long next_jiffie = 0; + + if (!blink_setting) + return; + + if (jiffies >= next_jiffie) { + state ^= 1; + do_blink(state); + next_jiffie = jiffies + blink_setting; + } + + return; +} + +#endif /* CONFIG_PANIC_MORSE */ + +static int __init panicblink_setup(char *str) +{ + int par; + if (get_option(&str,&par)) + blink_setting = par; + return 1; +} + +/* panicblink=0 disables the blinking as it caused problems with some console + switches. */ +__setup("panicblink=", panicblink_setup); + + /** * panic - halt the system * @fmt: The text string to print @@ -96,10 +244,7 @@ #endif sti(); for(;;) { -#if defined(CONFIG_X86) && defined(CONFIG_VT) - extern void panic_blink(void); - panic_blink(); -#endif + panic_blink(buf); CHECK_EMERGENCY_SYNC } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/printk.c linux.21rc5-ac2/kernel/printk.c --- linux.21rc5/kernel/printk.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/kernel/printk.c 2003-04-22 16:44:38.000000000 +0100 @@ -26,6 +26,7 @@ #include #include /* For in_interrupt() */ #include +#include #include @@ -529,6 +530,7 @@ if (must_wake_klogd && !oops_in_progress) wake_up_interruptible(&log_wait); } +EXPORT_SYMBOL(release_console_sem); /** console_conditional_schedule - yield the CPU if required * @@ -556,7 +558,14 @@ { struct console *c; - acquire_console_sem(); + /* + * Try to get the console semaphore. If someone else owns it + * we have to return without unblanking because console_unblank + * may be called in interrupt context. + */ + if (down_trylock(&console_sem) != 0) + return; + console_may_schedule = 0; for (c = console_drivers; c != NULL; c = c->next) if ((c->flags & CON_ENABLED) && c->unblank) c->unblank(); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/ptrace.c linux.21rc5-ac2/kernel/ptrace.c --- linux.21rc5/kernel/ptrace.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/kernel/ptrace.c 2003-05-09 18:33:05.000000000 +0100 @@ -32,20 +32,7 @@ if (child->state != TASK_STOPPED) return -ESRCH; #ifdef CONFIG_SMP - /* Make sure the child gets off its CPU.. */ - for (;;) { - task_lock(child); - if (!task_has_cpu(child)) - break; - task_unlock(child); - do { - if (child->state != TASK_STOPPED) - return -ESRCH; - barrier(); - cpu_relax(); - } while (task_has_cpu(child)); - } - task_unlock(child); + wait_task_inactive(child); #endif } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/sched.c linux.21rc5-ac2/kernel/sched.c --- linux.21rc5/kernel/sched.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/kernel/sched.c 2003-04-22 16:44:38.000000000 +0100 @@ -1,760 +1,1052 @@ /* - * linux/kernel/sched.c + * kernel/sched.c * * Kernel scheduler and related syscalls * - * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1991-2002 Linus Torvalds * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe * 1998-11-19 Implemented schedule_timeout() and related stuff * by Andrea Arcangeli - * 1998-12-28 Implemented better SMP scheduling by Ingo Molnar + * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: + * hybrid priority-list and round-robin design with + * an array-switch method of distributing timeslices + * and per-CPU runqueues. Additional code by Davide + * Libenzi, Robert Love, and Rusty Russell. */ -/* - * 'sched.c' is the main kernel file. It contains scheduling primitives - * (sleep_on, wakeup, schedule etc) as well as a number of simple system - * call functions (type getpid()), which just extract a field from - * current-task - */ - -#include #include +#include #include +#include +#include #include -#include +#include #include -#include #include -#include -#include +#include -#include -#include +/* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], + * and back. + */ +#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) +#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) +#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) -extern void timer_bh(void); -extern void tqueue_bh(void); -extern void immediate_bh(void); +/* + * 'User priority' is the nice value converted to something we + * can work with better when scaling various scheduler parameters, + * it's a [ 0 ... 39 ] range. + */ +#define USER_PRIO(p) ((p)-MAX_RT_PRIO) +#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) +#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) /* - * scheduler variables - */ + * These are the 'tuning knobs' of the scheduler: + * + * Minimum timeslice is 10 msecs, default timeslice is 150 msecs, + * maximum timeslice is 300 msecs. Timeslices get refilled after + * they expire. + */ +#define MIN_TIMESLICE ( 10 * HZ / 1000) +#define MAX_TIMESLICE (300 * HZ / 1000) +#define CHILD_PENALTY 95 +#define PARENT_PENALTY 100 +#define EXIT_WEIGHT 3 +#define PRIO_BONUS_RATIO 25 +#define INTERACTIVE_DELTA 2 +#define MAX_SLEEP_AVG (2*HZ) +#define STARVATION_LIMIT (2*HZ) -unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */ +/* + * If a task is 'interactive' then we reinsert it in the active + * array after it has expired its current timeslice. (it will not + * continue to run immediately, it will still roundrobin with + * other interactive tasks.) + * + * This part scales the interactivity limit depending on niceness. + * + * We scale it linearly, offset by the INTERACTIVE_DELTA delta. + * Here are a few examples of different nice levels: + * + * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0] + * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0] + * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0] + * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0] + * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0] + * + * (the X axis represents the possible -5 ... 0 ... +5 dynamic + * priority range a task can explore, a value of '1' means the + * task is rated interactive.) + * + * Ie. nice +19 tasks can never get 'interactive' enough to be + * reinserted into the active array. And only heavily CPU-hog nice -20 + * tasks will be expired. Default nice 0 tasks are somewhere between, + * it takes some effort for them to get interactive, but it's not + * too hard. + */ + +#define SCALE(v1,v1_max,v2_max) \ + (v1) * (v2_max) / (v1_max) + +#define DELTA(p) \ + (SCALE(TASK_NICE(p), 40, MAX_USER_PRIO*PRIO_BONUS_RATIO/100) + \ + INTERACTIVE_DELTA) -extern void mem_use(void); +#define TASK_INTERACTIVE(p) \ + ((p)->prio <= (p)->static_prio - DELTA(p)) /* - * Scheduling quanta. + * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ] + * to time slice values. * - * NOTE! The unix "nice" value influences how long a process - * gets. The nice value ranges from -20 to +19, where a -20 - * is a "high-priority" task, and a "+10" is a low-priority - * task. + * The higher a thread's priority, the bigger timeslices + * it gets during one round of execution. But even the lowest + * priority thread gets MIN_TIMESLICE worth of execution time. * - * We want the time-slice to be around 50ms or so, so this - * calculation depends on the value of HZ. + * task_timeslice() is the interface that is used by the scheduler. */ -#if HZ < 200 -#define TICK_SCALE(x) ((x) >> 2) -#elif HZ < 400 -#define TICK_SCALE(x) ((x) >> 1) -#elif HZ < 800 -#define TICK_SCALE(x) (x) -#elif HZ < 1600 -#define TICK_SCALE(x) ((x) << 1) -#else -#define TICK_SCALE(x) ((x) << 2) -#endif - -#define NICE_TO_TICKS(nice) (TICK_SCALE(20-(nice))+1) +#define BASE_TIMESLICE(p) (MIN_TIMESLICE + \ + ((MAX_TIMESLICE - MIN_TIMESLICE) * \ + (MAX_PRIO-1-(p)->static_prio)/(MAX_USER_PRIO - 1))) +static inline unsigned int task_timeslice(task_t *p) +{ + return BASE_TIMESLICE(p); +} /* - * Init task must be ok at boot for the ix86 as we will check its signals - * via the SMP irq return path. + * These are the runqueue data structures: */ - -struct task_struct * init_tasks[NR_CPUS] = {&init_task, }; + +#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long)) + +typedef struct runqueue runqueue_t; + +struct prio_array { + int nr_active; + unsigned long bitmap[BITMAP_SIZE]; + struct list_head queue[MAX_PRIO]; +}; /* - * The tasklist_lock protects the linked list of processes. - * - * The runqueue_lock locks the parts that actually access - * and change the run-queues, and have to be interrupt-safe. - * - * If both locks are to be concurrently held, the runqueue_lock - * nests inside the tasklist_lock. + * This is the main, per-CPU runqueue data structure. * - * task->alloc_lock nests inside tasklist_lock. + * Locking rule: those places that want to lock multiple runqueues + * (such as the load balancing or the process migration code), lock + * acquire operations must be ordered by ascending &runqueue. + */ +struct runqueue { + spinlock_t lock; + unsigned long nr_running, nr_switches, expired_timestamp, + nr_uninterruptible; + task_t *curr, *idle; + prio_array_t *active, *expired, arrays[2]; + int prev_nr_running[NR_CPUS]; + task_t *migration_thread; + struct list_head migration_queue; +} ____cacheline_aligned; + +static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; + +#define cpu_rq(cpu) (runqueues + (cpu)) +#define this_rq() cpu_rq(smp_processor_id()) +#define task_rq(p) cpu_rq(task_cpu(p)) +#define cpu_curr(cpu) (cpu_rq(cpu)->curr) +#define rt_task(p) ((p)->prio < MAX_RT_PRIO) + +/* + * Default context-switch locking: */ -spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */ -rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ +#ifndef prepare_arch_schedule +# define prepare_arch_schedule(prev) do { } while(0) +# define finish_arch_schedule(prev) do { } while(0) +# define prepare_arch_switch(rq) do { } while(0) +# define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock) +#endif -static LIST_HEAD(runqueue_head); /* - * We align per-CPU scheduling data on cacheline boundaries, - * to prevent cacheline ping-pong. + * task_rq_lock - lock the runqueue a given task resides on and disable + * interrupts. Note the ordering: we can safely lookup the task_rq without + * explicitly disabling preemption. */ -static union { - struct schedule_data { - struct task_struct * curr; - cycles_t last_schedule; - } schedule_data; - char __pad [SMP_CACHE_BYTES]; -} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}}; +static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +{ + struct runqueue *rq; -#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr -#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule +repeat_lock_task: + local_irq_save(*flags); + rq = task_rq(p); + spin_lock(&rq->lock); + if (unlikely(rq != task_rq(p))) { + spin_unlock_irqrestore(&rq->lock, *flags); + goto repeat_lock_task; + } + return rq; +} -struct kernel_stat kstat; -extern struct task_struct *child_reaper; +static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) +{ + spin_unlock_irqrestore(&rq->lock, *flags); +} -#ifdef CONFIG_SMP +/* + * rq_lock - lock a given runqueue and disable interrupts. + */ +static inline runqueue_t *this_rq_lock(void) +{ + runqueue_t *rq; -#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)]) -#define can_schedule(p,cpu) \ - ((p)->cpus_runnable & (p)->cpus_allowed & (1UL << cpu)) + local_irq_disable(); + rq = this_rq(); + spin_lock(&rq->lock); -#else + return rq; +} -#define idle_task(cpu) (&init_task) -#define can_schedule(p,cpu) (1) +static inline void rq_unlock(runqueue_t *rq) +{ + spin_unlock_irq(&rq->lock); +} -#endif +/* + * Adding/removing a task to/from a priority array: + */ +static inline void dequeue_task(struct task_struct *p, prio_array_t *array) +{ + array->nr_active--; + list_del(&p->run_list); + if (list_empty(array->queue + p->prio)) + __clear_bit(p->prio, array->bitmap); +} -void scheduling_functions_start_here(void) { } +static inline void enqueue_task(struct task_struct *p, prio_array_t *array) +{ + list_add_tail(&p->run_list, array->queue + p->prio); + __set_bit(p->prio, array->bitmap); + array->nr_active++; + p->array = array; +} /* - * This is the function that decides how desirable a process is.. - * You can weigh different processes against each other depending - * on what CPU they've run on lately etc to try to handle cache - * and TLB miss penalties. + * effective_prio - return the priority that is based on the static + * priority but is modified by bonuses/penalties. + * + * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] + * into the -5 ... 0 ... +5 bonus/penalty range. + * + * We use 25% of the full 0...39 priority range so that: * - * Return values: - * -1000: never select this - * 0: out of time, recalculate counters (but it might still be - * selected) - * +ve: "goodness" value (the larger, the better) - * +1000: realtime process, select this. + * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs. + * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks. + * + * Both properties are important to certain workloads. */ - -static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm) +static inline int effective_prio(task_t *p) { - int weight; + int bonus, prio; - /* - * select the current process after every other - * runnable process, but before the idle thread. - * Also, dont trigger a counter recalculation. - */ - weight = -1; - if (p->policy & SCHED_YIELD) - goto out; + bonus = MAX_USER_PRIO*PRIO_BONUS_RATIO*p->sleep_avg/MAX_SLEEP_AVG/100 - + MAX_USER_PRIO*PRIO_BONUS_RATIO/100/2; - /* - * Non-RT process - normal case first. - */ - if (p->policy == SCHED_OTHER) { + prio = p->static_prio - bonus; + if (prio < MAX_RT_PRIO) + prio = MAX_RT_PRIO; + if (prio > MAX_PRIO-1) + prio = MAX_PRIO-1; + return prio; +} + +/* + * activate_task - move a task to the runqueue. + * + * Also update all the scheduling statistics stuff. (sleep average + * calculation, priority modifiers, etc.) + */ +static inline void activate_task(task_t *p, runqueue_t *rq) +{ + unsigned long sleep_time = jiffies - p->sleep_timestamp; + prio_array_t *array = rq->active; + + if (!rt_task(p) && sleep_time) { /* - * Give the process a first-approximation goodness value - * according to the number of clock-ticks it has left. - * - * Don't do any other calculations if the time slice is - * over.. + * This code gives a bonus to interactive tasks. We update + * an 'average sleep time' value here, based on + * sleep_timestamp. The more time a task spends sleeping, + * the higher the average gets - and the higher the priority + * boost gets as well. */ - weight = p->counter; - if (!weight) - goto out; - -#ifdef CONFIG_SMP - /* Give a largish advantage to the same processor... */ - /* (this is equivalent to penalizing other processors) */ - if (p->processor == this_cpu) - weight += PROC_CHANGE_PENALTY; -#endif - - /* .. and a slight advantage to the current MM */ - if (p->mm == this_mm || !p->mm) - weight += 1; - weight += 20 - p->nice; - goto out; + p->sleep_avg += sleep_time; + if (p->sleep_avg > MAX_SLEEP_AVG) + p->sleep_avg = MAX_SLEEP_AVG; + p->prio = effective_prio(p); } - - /* - * Realtime process, select the first one on the - * runqueue (taking priorities within processes - * into account). - */ - weight = 1000 + p->rt_priority; -out: - return weight; + enqueue_task(p, array); + rq->nr_running++; } /* - * the 'goodness value' of replacing a process on a given CPU. - * positive value means 'replace', zero or negative means 'dont'. + * deactivate_task - remove a task from the runqueue. */ -static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu) +static inline void deactivate_task(struct task_struct *p, runqueue_t *rq) { - return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm); + rq->nr_running--; + if (p->state == TASK_UNINTERRUPTIBLE) + rq->nr_uninterruptible++; + dequeue_task(p, p->array); + p->array = NULL; } /* - * This is ugly, but reschedule_idle() is very timing-critical. - * We are called with the runqueue spinlock held and we must - * not claim the tasklist_lock. + * resched_task - mark a task 'to be rescheduled now'. + * + * On UP this means the setting of the need_resched flag, on SMP it + * might also involve a cross-CPU call to trigger the scheduler on + * the target CPU. */ -static FASTCALL(void reschedule_idle(struct task_struct * p)); - -static void reschedule_idle(struct task_struct * p) +static inline void resched_task(task_t *p) { #ifdef CONFIG_SMP - int this_cpu = smp_processor_id(); - struct task_struct *tsk, *target_tsk; - int cpu, best_cpu, i, max_prio; - cycles_t oldest_idle; - - /* - * shortcut if the woken up task's last CPU is - * idle now. - */ - best_cpu = p->processor; - if (can_schedule(p, best_cpu)) { - tsk = idle_task(best_cpu); - if (cpu_curr(best_cpu) == tsk) { - int need_resched; -send_now_idle: - /* - * If need_resched == -1 then we can skip sending - * the IPI altogether, tsk->need_resched is - * actively watched by the idle thread. - */ - need_resched = tsk->need_resched; - tsk->need_resched = 1; - if ((best_cpu != this_cpu) && !need_resched) - smp_send_reschedule(best_cpu); - return; - } - } - - /* - * We know that the preferred CPU has a cache-affine current - * process, lets try to find a new idle CPU for the woken-up - * process. Select the least recently active idle CPU. (that - * one will have the least active cache context.) Also find - * the executing process which has the least priority. - */ - oldest_idle = (cycles_t) -1; - target_tsk = NULL; - max_prio = 0; + int need_resched; - for (i = 0; i < smp_num_cpus; i++) { - cpu = cpu_logical_map(i); - if (!can_schedule(p, cpu)) - continue; - tsk = cpu_curr(cpu); - /* - * We use the first available idle CPU. This creates - * a priority list between idle CPUs, but this is not - * a problem. - */ - if (tsk == idle_task(cpu)) { -#if defined(__i386__) && defined(CONFIG_SMP) - /* - * Check if two siblings are idle in the same - * physical package. Use them if found. - */ - if (smp_num_siblings == 2) { - if (cpu_curr(cpu_sibling_map[cpu]) == - idle_task(cpu_sibling_map[cpu])) { - oldest_idle = last_schedule(cpu); - target_tsk = tsk; - break; - } - - } -#endif - if (last_schedule(cpu) < oldest_idle) { - oldest_idle = last_schedule(cpu); - target_tsk = tsk; - } - } else { - if (oldest_idle == -1ULL) { - int prio = preemption_goodness(tsk, p, cpu); - - if (prio > max_prio) { - max_prio = prio; - target_tsk = tsk; - } - } - } - } - tsk = target_tsk; - if (tsk) { - if (oldest_idle != -1ULL) { - best_cpu = tsk->processor; - goto send_now_idle; - } - tsk->need_resched = 1; - if (tsk->processor != this_cpu) - smp_send_reschedule(tsk->processor); - } - return; - - -#else /* UP */ - int this_cpu = smp_processor_id(); - struct task_struct *tsk; - - tsk = cpu_curr(this_cpu); - if (preemption_goodness(tsk, p, this_cpu) > 0) - tsk->need_resched = 1; + need_resched = p->need_resched; + wmb(); + set_tsk_need_resched(p); + if (!need_resched && (task_cpu(p) != smp_processor_id())) + smp_send_reschedule(task_cpu(p)); +#else + set_tsk_need_resched(p); #endif } +#ifdef CONFIG_SMP + /* - * Careful! + * wait_task_inactive - wait for a thread to unschedule. * - * This has to add the process to the _end_ of the - * run-queue, not the beginning. The goodness value will - * determine whether this process will run next. This is - * important to get SCHED_FIFO and SCHED_RR right, where - * a process that is either pre-empted or its time slice - * has expired, should be moved to the tail of the run - * queue for its priority - Bhavesh Davda + * The caller must ensure that the task *will* unschedule sometime soon, + * else this function might spin for a *long* time. */ -static inline void add_to_runqueue(struct task_struct * p) +void wait_task_inactive(task_t * p) { - list_add_tail(&p->run_list, &runqueue_head); - nr_running++; + unsigned long flags; + runqueue_t *rq; + +repeat: + rq = task_rq(p); + if (unlikely(rq->curr == p)) { + cpu_relax(); + goto repeat; + } + rq = task_rq_lock(p, &flags); + if (unlikely(rq->curr == p)) { + task_rq_unlock(rq, &flags); + goto repeat; + } + task_rq_unlock(rq, &flags); } -static inline void move_last_runqueue(struct task_struct * p) +/* + * kick_if_running - kick the remote CPU if the task is running currently. + * + * This code is used by the signal code to signal tasks + * which are in user-mode, as quickly as possible. + * + * (Note that we do this lockless - if the task does anything + * while the message is in flight then it will notice the + * sigpending condition anyway.) + */ +void kick_if_running(task_t * p) { - list_del(&p->run_list); - list_add_tail(&p->run_list, &runqueue_head); + if (p == task_rq(p)->curr) + resched_task(p); } +#endif -/* - * Wake up a process. Put it on the run-queue if it's not - * already there. The "current" process is always on the - * run-queue (except when the actual re-schedule is in - * progress), and as such you're allowed to do the simpler - * "current->state = TASK_RUNNING" to mark yourself runnable - * without the overhead of this. +/*** + * try_to_wake_up - wake up a thread + * @p: the to-be-woken-up thread + * @sync: do a synchronous wakeup? + * + * Put it on the run-queue if it's not already there. The "current" + * thread is always on the run-queue (except when the actual + * re-schedule is in progress), and as such you're allowed to do + * the simpler "current->state = TASK_RUNNING" to mark yourself + * runnable without the overhead of this. + * + * returns failure only if the task is already active. */ -static inline int try_to_wake_up(struct task_struct * p, int synchronous) +static int try_to_wake_up(task_t * p, int sync) { unsigned long flags; int success = 0; + long old_state; + runqueue_t *rq; - /* - * We want the common case fall through straight, thus the goto. - */ - spin_lock_irqsave(&runqueue_lock, flags); +repeat_lock_task: + rq = task_rq_lock(p, &flags); + old_state = p->state; + if (!p->array) { + /* + * Fast-migrate the task if it's not running or runnable + * currently. Do not violate hard affinity. + */ + if (unlikely(sync && (rq->curr != p) && + (task_cpu(p) != smp_processor_id()) && + (p->cpus_allowed & (1UL << smp_processor_id())))) { + + set_task_cpu(p, smp_processor_id()); + task_rq_unlock(rq, &flags); + goto repeat_lock_task; + } + if (old_state == TASK_UNINTERRUPTIBLE) + rq->nr_uninterruptible--; + activate_task(p, rq); + if (p->prio < rq->curr->prio) + resched_task(rq->curr); + success = 1; + } p->state = TASK_RUNNING; - if (task_on_runqueue(p)) - goto out; - add_to_runqueue(p); - if (!synchronous || !(p->cpus_allowed & (1UL << smp_processor_id()))) - reschedule_idle(p); - success = 1; -out: - spin_unlock_irqrestore(&runqueue_lock, flags); + task_rq_unlock(rq, &flags); + return success; } -inline int wake_up_process(struct task_struct * p) +int wake_up_process(task_t * p) { return try_to_wake_up(p, 0); } -static void process_timeout(unsigned long __data) +/* + * wake_up_forked_process - wake up a freshly forked process. + * + * This function will do some initial scheduler statistics housekeeping + * that must be done for every newly created process. + */ +void wake_up_forked_process(task_t * p) { - struct task_struct * p = (struct task_struct *) __data; + runqueue_t *rq = this_rq_lock(); - wake_up_process(p); + p->state = TASK_RUNNING; + if (!rt_task(p)) { + /* + * We decrease the sleep average of forking parents + * and children as well, to keep max-interactive tasks + * from forking tasks that are max-interactive. + */ + current->sleep_avg = current->sleep_avg * PARENT_PENALTY / 100; + p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100; + p->prio = effective_prio(p); + } + set_task_cpu(p, smp_processor_id()); + activate_task(p, rq); + + rq_unlock(rq); +} + +/* + * Potentially available exiting-child timeslices are + * retrieved here - this way the parent does not get + * penalized for creating too many threads. + * + * (this cannot be used to 'generate' timeslices + * artificially, because any timeslice recovered here + * was given away by the parent in the first place.) + */ +void sched_exit(task_t * p) +{ + __cli(); + current->time_slice += p->time_slice; + if (unlikely(current->time_slice > MAX_TIMESLICE)) + current->time_slice = MAX_TIMESLICE; + __sti(); + /* + * If the child was a (relative-) CPU hog then decrease + * the sleep_avg of the parent as well. + */ + if (p->sleep_avg < current->sleep_avg) + current->sleep_avg = (current->sleep_avg * EXIT_WEIGHT + + p->sleep_avg) / (EXIT_WEIGHT + 1); } /** - * schedule_timeout - sleep until timeout - * @timeout: timeout value in jiffies - * - * Make the current task sleep until @timeout jiffies have - * elapsed. The routine will return immediately unless - * the current task state has been set (see set_current_state()). - * - * You can set the task state as follows - - * - * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to - * pass before the routine returns. The routine will return 0 + * schedule_tail - first thing a freshly forked thread must call. * - * %TASK_INTERRUPTIBLE - the routine may return early if a signal is - * delivered to the current task. In this case the remaining time - * in jiffies will be returned, or 0 if the timer expired in time - * - * The current task state is guaranteed to be TASK_RUNNING when this - * routine returns. - * - * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule - * the CPU away without a bound on the timeout. In this case the return - * value will be %MAX_SCHEDULE_TIMEOUT. - * - * In all cases the return value is guaranteed to be non-negative. + * @prev: the thread we just switched away from. */ -signed long schedule_timeout(signed long timeout) +#if CONFIG_SMP +asmlinkage void schedule_tail(task_t *prev) { - struct timer_list timer; - unsigned long expire; - - switch (timeout) - { - case MAX_SCHEDULE_TIMEOUT: - /* - * These two special cases are useful to be comfortable - * in the caller. Nothing more. We could take - * MAX_SCHEDULE_TIMEOUT from one of the negative value - * but I' d like to return a valid offset (>=0) to allow - * the caller to do everything it want with the retval. - */ - schedule(); - goto out; - default: - /* - * Another bit of PARANOID. Note that the retval will be - * 0 since no piece of kernel is supposed to do a check - * for a negative retval of schedule_timeout() (since it - * should never happens anyway). You just have the printk() - * that will tell you if something is gone wrong and where. - */ - if (timeout < 0) - { - printk(KERN_ERR "schedule_timeout: wrong timeout " - "value %lx from %p\n", timeout, - __builtin_return_address(0)); - current->state = TASK_RUNNING; - goto out; - } - } + finish_arch_switch(this_rq()); + finish_arch_schedule(prev); +} +#endif - expire = timeout + jiffies; +/* + * context_switch - switch to the new MM and the new + * thread's register state. + */ +static inline task_t * context_switch(task_t *prev, task_t *next) +{ + struct mm_struct *mm = next->mm; + struct mm_struct *oldmm = prev->active_mm; - init_timer(&timer); - timer.expires = expire; - timer.data = (unsigned long) current; - timer.function = process_timeout; + if (unlikely(!mm)) { + next->active_mm = oldmm; + atomic_inc(&oldmm->mm_count); + enter_lazy_tlb(oldmm, next, smp_processor_id()); + } else + switch_mm(oldmm, mm, next, smp_processor_id()); - add_timer(&timer); - schedule(); - del_timer_sync(&timer); + if (unlikely(!prev->mm)) { + prev->active_mm = NULL; + mmdrop(oldmm); + } - timeout = expire - jiffies; + /* Here we just switch the register state and the stack. */ + switch_to(prev, next, prev); - out: - return timeout < 0 ? 0 : timeout; + return prev; } /* - * schedule_tail() is getting called from the fork return path. This - * cleans up all remaining scheduler things, without impacting the - * common case. + * nr_running, nr_uninterruptible and nr_context_switches: + * + * externally visible scheduler statistics: current number of runnable + * threads, current number of uninterruptible-sleeping threads, total + * number of context switches performed since bootup. */ -static inline void __schedule_tail(struct task_struct *prev) +unsigned long nr_running(void) { -#ifdef CONFIG_SMP - int policy; + unsigned long i, sum = 0; - /* - * prev->policy can be written from here only before `prev' - * can be scheduled (before setting prev->cpus_runnable to ~0UL). - * Of course it must also be read before allowing prev - * to be rescheduled, but since the write depends on the read - * to complete, wmb() is enough. (the spin_lock() acquired - * before setting cpus_runnable is not enough because the spin_lock() - * common code semantics allows code outside the critical section - * to enter inside the critical section) - */ - policy = prev->policy; - prev->policy = policy & ~SCHED_YIELD; - wmb(); + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_running; - /* - * fast path falls through. We have to clear cpus_runnable before - * checking prev->state to avoid a wakeup race. Protect against - * the task exiting early. - */ - task_lock(prev); - task_release_cpu(prev); - mb(); - if (prev->state == TASK_RUNNING) - goto needs_resched; - -out_unlock: - task_unlock(prev); /* Synchronise here with release_task() if prev is TASK_ZOMBIE */ - return; + return sum; +} - /* - * Slow path - we 'push' the previous process and - * reschedule_idle() will attempt to find a new - * processor for it. (but it might preempt the - * current process as well.) We must take the runqueue - * lock and re-check prev->state to be correct. It might - * still happen that this process has a preemption - * 'in progress' already - but this is not a problem and - * might happen in other circumstances as well. - */ -needs_resched: - { - unsigned long flags; +unsigned long nr_uninterruptible(void) +{ + unsigned long i, sum = 0; - /* - * Avoid taking the runqueue lock in cases where - * no preemption-check is necessery: - */ - if ((prev == idle_task(smp_processor_id())) || - (policy & SCHED_YIELD)) - goto out_unlock; + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible; - spin_lock_irqsave(&runqueue_lock, flags); - if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev)) - reschedule_idle(prev); - spin_unlock_irqrestore(&runqueue_lock, flags); - goto out_unlock; - } -#else - prev->policy &= ~SCHED_YIELD; -#endif /* CONFIG_SMP */ + return sum; } -asmlinkage void schedule_tail(struct task_struct *prev) +unsigned long nr_context_switches(void) { - __schedule_tail(prev); + unsigned long i, sum = 0; + + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_switches; + + return sum; } /* - * 'schedule()' is the scheduler function. It's a very simple and nice - * scheduler: it's not perfect, but certainly works for most things. + * double_rq_lock - safely lock two runqueues * - * The goto is "interesting". - * - * NOTE!! Task 0 is the 'idle' task, which gets called when no other - * tasks can run. It can not be killed, and it cannot sleep. The 'state' - * information in task[0] is never used. + * Note this does not disable interrupts like task_rq_lock, + * you need to do so manually before calling. */ -asmlinkage void schedule(void) +static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) { - struct schedule_data * sched_data; - struct task_struct *prev, *next, *p; - struct list_head *tmp; - int this_cpu, c; - - - spin_lock_prefetch(&runqueue_lock); + if (rq1 == rq2) + spin_lock(&rq1->lock); + else { + if (rq1 < rq2) { + spin_lock(&rq1->lock); + spin_lock(&rq2->lock); + } else { + spin_lock(&rq2->lock); + spin_lock(&rq1->lock); + } + } +} - BUG_ON(!current->active_mm); -need_resched_back: - prev = current; - this_cpu = prev->processor; +/* + * double_rq_unlock - safely unlock two runqueues + * + * Note this does not restore interrupts like task_rq_unlock, + * you need to do so manually after calling. + */ +static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) +{ + spin_unlock(&rq1->lock); + if (rq1 != rq2) + spin_unlock(&rq2->lock); +} - if (unlikely(in_interrupt())) { - printk("Scheduling in interrupt\n"); - BUG(); +#if CONFIG_SMP +/* + * double_lock_balance - lock the busiest runqueue + * + * this_rq is locked already. Recalculate nr_running if we have to + * drop the runqueue lock. + */ +static inline unsigned int double_lock_balance(runqueue_t *this_rq, + runqueue_t *busiest, int this_cpu, int idle, unsigned int nr_running) +{ + if (unlikely(!spin_trylock(&busiest->lock))) { + if (busiest < this_rq) { + spin_unlock(&this_rq->lock); + spin_lock(&busiest->lock); + spin_lock(&this_rq->lock); + /* Need to recalculate nr_running */ + if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu])) + nr_running = this_rq->nr_running; + else + nr_running = this_rq->prev_nr_running[this_cpu]; + } else + spin_lock(&busiest->lock); } + return nr_running; +} + +#include - release_kernel_lock(prev, this_cpu); +/* + * Current runqueue is empty, or rebalance tick: if there is an + * inbalance (current runqueue is too short) then pull from + * busiest runqueue(s). + * + * We call this with the current runqueue locked, + * irqs disabled. + */ +static void load_balance(runqueue_t *this_rq, int idle) +{ + int imbalance, nr_running, load, max_load, + idx, i, this_cpu = smp_processor_id(); + task_t *next = this_rq->idle, *tmp; + runqueue_t *busiest, *rq_src; + prio_array_t *array; + struct list_head *head, *curr; /* - * 'sched_data' is protected by the fact that we can run - * only one process per CPU. + * Handle platform specific balancing operations, such as + * hyperthreading. + */ + + if(arch_load_balance(this_cpu, idle)) + return; + + /* + * We search all runqueues to find the most busy one. + * We do this lockless to reduce cache-bouncing overhead, + * we re-check the 'best' source CPU later on again, with + * the lock held. + * + * We fend off statistical fluctuations in runqueue lengths by + * saving the runqueue length during the previous load-balancing + * operation and using the smaller one the current and saved lengths. + * If a runqueue is long enough for a longer amount of time then + * we recognize it and pull tasks from it. + * + * The 'current runqueue length' is a statistical maximum variable, + * for that one we take the longer one - to avoid fluctuations in + * the other direction. So for a load-balance to happen it needs + * stable long runqueue on the target CPU and stable short runqueue + * on the local runqueue. + * + * We make an exception if this CPU is about to become idle - in + * that case we are less picky about moving a task across CPUs and + * take what can be taken. */ - sched_data = & aligned_data[this_cpu].schedule_data; + if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu])) + nr_running = this_rq->nr_running; + else + nr_running = this_rq->prev_nr_running[this_cpu]; - spin_lock_irq(&runqueue_lock); + busiest = NULL; + max_load = 1; + for (i = 0; i < smp_num_cpus; i++) { + int logical = cpu_logical_map(i); - /* move an exhausted RR process to be last.. */ - if (unlikely(prev->policy == SCHED_RR)) - if (!prev->counter) { - prev->counter = NICE_TO_TICKS(prev->nice); - move_last_runqueue(prev); + rq_src = cpu_rq(logical); + if (idle || (rq_src->nr_running < this_rq->prev_nr_running[logical])) + load = rq_src->nr_running; + else + load = this_rq->prev_nr_running[logical]; + this_rq->prev_nr_running[logical] = rq_src->nr_running; + + if ((load > max_load) && (rq_src != this_rq)) { + busiest = rq_src; + max_load = load; } - - switch (prev->state) { - case TASK_INTERRUPTIBLE: - if (signal_pending(prev)) { - prev->state = TASK_RUNNING; - break; - } - default: - del_from_runqueue(prev); - case TASK_RUNNING:; } - prev->need_resched = 0; + if (likely(!busiest)) + return; + + imbalance = (max_load - nr_running) / 2; + + /* It needs an at least ~25% imbalance to trigger balancing. */ + if (!idle && (imbalance < (max_load + 3)/4)) + return; + + nr_running = double_lock_balance(this_rq, busiest, this_cpu, idle, nr_running); /* - * this is the scheduler proper: + * Make sure nothing changed since we checked the + * runqueue length. */ + if (busiest->nr_running <= nr_running + 1) + goto out_unlock; -repeat_schedule: /* - * Default process to select.. + * We first consider expired tasks. Those will likely not be + * executed in the near future, and they are most likely to + * be cache-cold, thus switching CPUs has the least effect + * on them. */ - next = idle_task(this_cpu); - c = -1000; - list_for_each(tmp, &runqueue_head) { - p = list_entry(tmp, struct task_struct, run_list); - if (can_schedule(p, this_cpu)) { - int weight = goodness(p, this_cpu, prev->active_mm); - if (weight > c) - c = weight, next = p; + if (busiest->expired->nr_active) + array = busiest->expired; + else + array = busiest->active; + +new_array: + /* Start searching at priority 0: */ + idx = 0; +skip_bitmap: + if (!idx) + idx = sched_find_first_bit(array->bitmap); + else + idx = find_next_bit(array->bitmap, MAX_PRIO, idx); + if (idx == MAX_PRIO) { + if (array == busiest->expired) { + array = busiest->active; + goto new_array; } + goto out_unlock; } - /* Do we need to re-calculate counters? */ - if (unlikely(!c)) { - struct task_struct *p; - - spin_unlock_irq(&runqueue_lock); - read_lock(&tasklist_lock); - for_each_task(p) - p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice); - read_unlock(&tasklist_lock); - spin_lock_irq(&runqueue_lock); - goto repeat_schedule; + head = array->queue + idx; + curr = head->prev; +skip_queue: + tmp = list_entry(curr, task_t, run_list); + + /* + * We do not migrate tasks that are: + * 1) running (obviously), or + * 2) cannot be migrated to this CPU due to cpus_allowed, or + * 3) are cache-hot on their current CPU. + */ + +#define CAN_MIGRATE_TASK(p,rq,this_cpu) \ + ((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \ + ((p) != (rq)->curr) && \ + ((p)->cpus_allowed & (1UL << (this_cpu)))) + + if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) { + curr = curr->next; + if (curr != head) + goto skip_queue; + idx++; + goto skip_bitmap; + } + next = tmp; + /* + * take the task out of the other runqueue and + * put it into this one: + */ + dequeue_task(next, array); + busiest->nr_running--; + set_task_cpu(next, this_cpu); + this_rq->nr_running++; + enqueue_task(next, this_rq->active); + if (next->prio < current->prio) + set_need_resched(); + if (!idle && --imbalance) { + if (array == busiest->expired) { + array = busiest->active; + goto new_array; + } } +out_unlock: + spin_unlock(&busiest->lock); +} - /* - * from this point on nothing can prevent us from - * switching to the next task, save this fact in - * sched_data. - */ - sched_data->curr = next; - task_set_cpu(next, this_cpu); - spin_unlock_irq(&runqueue_lock); +/* + * One of the idle_cpu_tick() and busy_cpu_tick() functions will + * get called every timer tick, on every CPU. Our balancing action + * frequency and balancing agressivity depends on whether the CPU is + * idle or not. + * + * busy-rebalance every 250 msecs. idle-rebalance every 1 msec. (or on + * systems with HZ=100, every 10 msecs.) + */ +#define BUSY_REBALANCE_TICK (HZ/4 ?: 1) +#define IDLE_REBALANCE_TICK (HZ/1000 ?: 1) + +static inline void idle_tick(void) +{ + if (jiffies % IDLE_REBALANCE_TICK) + return; + spin_lock(&this_rq()->lock); + load_balance(this_rq(), 1); + spin_unlock(&this_rq()->lock); +} - if (unlikely(prev == next)) { - /* We won't go through the normal tail, so do this by hand */ - prev->policy &= ~SCHED_YIELD; - goto same_process; - } +#endif -#ifdef CONFIG_SMP - /* - * maintain the per-process 'last schedule' value. - * (this has to be recalculated even if we reschedule to - * the same process) Currently this is only used on SMP, - * and it's approximate, so we do not have to maintain - * it while holding the runqueue spinlock. - */ - sched_data->last_schedule = get_cycles(); +/* + * We place interactive tasks back into the active array, if possible. + * + * To guarantee that this does not starve expired tasks we ignore the + * interactivity of a task if the first expired task had to wait more + * than a 'reasonable' amount of time. This deadline timeout is + * load-dependent, as the frequency of array switched decreases with + * increasing number of running tasks: + */ +#define EXPIRED_STARVING(rq) \ + ((rq)->expired_timestamp && \ + (jiffies - (rq)->expired_timestamp >= \ + STARVATION_LIMIT * ((rq)->nr_running) + 1)) - /* - * We drop the scheduler lock early (it's a global spinlock), - * thus we have to lock the previous process from getting - * rescheduled during switch_to(). - */ +/* + * This function gets called by the timer code, with HZ frequency. + * We call it with interrupts disabled. + */ +void scheduler_tick(int user_tick, int system) +{ + int cpu = smp_processor_id(); + runqueue_t *rq = this_rq(); + task_t *p = current; -#endif /* CONFIG_SMP */ + if (p == rq->idle) { + if (local_bh_count(cpu) || local_irq_count(cpu) > 1) + kstat.per_cpu_system[cpu] += system; +#if CONFIG_SMP + idle_tick(); +#endif + return; + } + if (TASK_NICE(p) > 0) + kstat.per_cpu_nice[cpu] += user_tick; + else + kstat.per_cpu_user[cpu] += user_tick; + kstat.per_cpu_system[cpu] += system; - kstat.context_swtch++; - /* - * there are 3 processes which are affected by a context switch: - * - * prev == .... ==> (last => next) - * - * It's the 'much more previous' 'prev' that is on next's stack, - * but prev is set to (the just run) 'last' process by switch_to(). - * This might sound slightly confusing but makes tons of sense. - */ - prepare_to_switch(); - { - struct mm_struct *mm = next->mm; - struct mm_struct *oldmm = prev->active_mm; - if (!mm) { - BUG_ON(next->active_mm); - next->active_mm = oldmm; - atomic_inc(&oldmm->mm_count); - enter_lazy_tlb(oldmm, next, this_cpu); - } else { - BUG_ON(next->active_mm != mm); - switch_mm(oldmm, mm, next, this_cpu); + /* Task might have expired already, but not scheduled off yet */ + if (p->array != rq->active) { + set_tsk_need_resched(p); + return; + } + spin_lock(&rq->lock); + if (unlikely(rt_task(p))) { + /* + * RR tasks need a special form of timeslice management. + * FIFO tasks have no timeslices. + */ + if ((p->policy == SCHED_RR) && !--p->time_slice) { + p->time_slice = task_timeslice(p); + set_tsk_need_resched(p); + + /* put it at the end of the queue: */ + dequeue_task(p, rq->active); + enqueue_task(p, rq->active); } + goto out; + } + /* + * The task was running during this tick - update the + * time slice counter and the sleep average. Note: we + * do not update a process's priority until it either + * goes to sleep or uses up its timeslice. This makes + * it possible for interactive tasks to use up their + * timeslices at their highest priority levels. + */ + if (p->sleep_avg) + p->sleep_avg--; + if (!--p->time_slice) { + dequeue_task(p, rq->active); + set_tsk_need_resched(p); + p->prio = effective_prio(p); + p->time_slice = task_timeslice(p); + + if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { + if (!rq->expired_timestamp) + rq->expired_timestamp = jiffies; + enqueue_task(p, rq->expired); + } else + enqueue_task(p, rq->active); + } +out: +#if CONFIG_SMP + if (!(jiffies % BUSY_REBALANCE_TICK)) + load_balance(rq, 0); +#endif + spin_unlock(&rq->lock); +} + +void scheduling_functions_start_here(void) { } + +/* + * schedule() is the main scheduler function. + */ +asmlinkage void schedule(void) +{ + task_t *prev, *next; + runqueue_t *rq; + prio_array_t *array; + struct list_head *queue; + int idx; + + if (unlikely(in_interrupt())) + BUG(); + +need_resched: + prev = current; + rq = this_rq(); + + release_kernel_lock(prev, smp_processor_id()); + prepare_arch_schedule(prev); + prev->sleep_timestamp = jiffies; + spin_lock_irq(&rq->lock); - if (!prev->mm) { - prev->active_mm = NULL; - mmdrop(oldmm); + switch (prev->state) { + case TASK_INTERRUPTIBLE: + if (unlikely(signal_pending(prev))) { + prev->state = TASK_RUNNING; + break; } + default: + deactivate_task(prev, rq); + case TASK_RUNNING: + ; + } +#if CONFIG_SMP +pick_next_task: +#endif + if (unlikely(!rq->nr_running)) { +#if CONFIG_SMP + load_balance(rq, 1); + if (rq->nr_running) + goto pick_next_task; +#endif + next = rq->idle; + rq->expired_timestamp = 0; + goto switch_tasks; } - /* - * This just switches the register state and the - * stack. - */ - switch_to(prev, next, prev); - __schedule_tail(prev); + array = rq->active; + if (unlikely(!array->nr_active)) { + /* + * Switch the active and expired arrays. + */ + rq->active = rq->expired; + rq->expired = array; + array = rq->active; + rq->expired_timestamp = 0; + } + + idx = sched_find_first_bit(array->bitmap); + queue = array->queue + idx; + next = list_entry(queue->next, task_t, run_list); + +switch_tasks: + prefetch(next); + clear_tsk_need_resched(prev); + + if (likely(prev != next)) { + rq->nr_switches++; + rq->curr = next; + + prepare_arch_switch(rq); + prev = context_switch(prev, next); + barrier(); + rq = this_rq(); + finish_arch_switch(rq); + } else + spin_unlock_irq(&rq->lock); + finish_arch_schedule(prev); -same_process: reacquire_kernel_lock(current); - if (current->need_resched) - goto need_resched_back; - return; + if (need_resched()) + goto need_resched; } /* - * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything - * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the - * non-exclusive tasks and one exclusive task. + * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just + * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve + * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already - * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns zero - * in this (rare) case, and we handle it by contonuing to scan the queue. + * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns + * zero in this (rare) case, and we handle it by continuing to scan the queue. */ -static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode, - int nr_exclusive, const int sync) +static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) { struct list_head *tmp; - struct task_struct *p; - - CHECK_MAGIC_WQHEAD(q); - WQ_CHECK_LIST_HEAD(&q->task_list); - - list_for_each(tmp,&q->task_list) { - unsigned int state; - wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); + unsigned int state; + wait_queue_t *curr; + task_t *p; - CHECK_MAGIC(curr->__magic); + list_for_each(tmp, &q->task_list) { + curr = list_entry(tmp, wait_queue_t, task_list); p = curr->task; state = p->state; - if (state & mode) { - WQ_NOTE_WAKER(curr); - if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) + if ((state & mode) && try_to_wake_up(p, sync) && + ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)) break; - } } } -void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr) +/** + * __wake_up - wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + * @nr_exclusive: how many wake-one or wake-many threads to wake up + */ +void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { - if (q) { - unsigned long flags; - wq_read_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr, 0); - wq_read_unlock_irqrestore(&q->lock, flags); - } + unsigned long flags; + + if (unlikely(!q)) + return; + + spin_lock_irqsave(&q->lock, flags); + __wake_up_common(q, mode, nr_exclusive, 0); + spin_unlock_irqrestore(&q->lock, flags); } -void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr) +#if CONFIG_SMP + +/** + * __wake_up - sync- wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + * @nr_exclusive: how many wake-one or wake-many threads to wake up + * + * The sync wakeup differs that the waker knows that it will schedule + * away soon, so while the target thread will be woken up, it will not + * be migrated to another CPU - ie. the two threads are 'synchronized' + * with each other. This can prevent needless bouncing between CPUs. + */ +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { - if (q) { - unsigned long flags; - wq_read_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr, 1); - wq_read_unlock_irqrestore(&q->lock, flags); - } + unsigned long flags; + + if (unlikely(!q)) + return; + + spin_lock_irqsave(&q->lock, flags); + if (likely(nr_exclusive)) + __wake_up_common(q, mode, nr_exclusive, 1); + else + __wake_up_common(q, mode, nr_exclusive, 0); + spin_unlock_irqrestore(&q->lock, flags); } +#endif + void complete(struct completion *x) { unsigned long flags; @@ -790,15 +1082,15 @@ wait_queue_t wait; \ init_waitqueue_entry(&wait, current); -#define SLEEP_ON_HEAD \ - wq_write_lock_irqsave(&q->lock,flags); \ +#define SLEEP_ON_HEAD \ + spin_lock_irqsave(&q->lock,flags); \ __add_wait_queue(q, &wait); \ - wq_write_unlock(&q->lock); + spin_unlock(&q->lock); #define SLEEP_ON_TAIL \ - wq_write_lock_irq(&q->lock); \ + spin_lock_irq(&q->lock); \ __remove_wait_queue(q, &wait); \ - wq_write_unlock_irqrestore(&q->lock,flags); + spin_unlock_irqrestore(&q->lock, flags); void interruptible_sleep_on(wait_queue_head_t *q) { @@ -850,55 +1142,53 @@ void scheduling_functions_end_here(void) { } -#if CONFIG_SMP -/** - * set_cpus_allowed() - change a given task's processor affinity - * @p: task to bind - * @new_mask: bitmask of allowed processors - * - * Upon return, the task is running on a legal processor. Note the caller - * must have a valid reference to the task: it must not exit() prematurely. - * This call can sleep; do not hold locks on call. - */ -void set_cpus_allowed(struct task_struct *p, unsigned long new_mask) +void set_user_nice(task_t *p, long nice) { - new_mask &= cpu_online_map; - BUG_ON(!new_mask); - - p->cpus_allowed = new_mask; + unsigned long flags; + prio_array_t *array; + runqueue_t *rq; + if (TASK_NICE(p) == nice || nice < -20 || nice > 19) + return; /* - * If the task is on a no-longer-allowed processor, we need to move - * it. If the task is not current, then set need_resched and send - * its processor an IPI to reschedule. - */ - if (!(p->cpus_runnable & p->cpus_allowed)) { - if (p != current) { - p->need_resched = 1; - smp_send_reschedule(p->processor); - } + * We have to be careful, if called from sys_setpriority(), + * the task might be in the middle of scheduling on another CPU. + */ + rq = task_rq_lock(p, &flags); + if (rt_task(p)) { + p->static_prio = NICE_TO_PRIO(nice); + goto out_unlock; + } + array = p->array; + if (array) + dequeue_task(p, array); + p->static_prio = NICE_TO_PRIO(nice); + p->prio = NICE_TO_PRIO(nice); + if (array) { + enqueue_task(p, array); /* - * Wait until we are on a legal processor. If the task is - * current, then we should be on a legal processor the next - * time we reschedule. Otherwise, we need to wait for the IPI. + * If the task is running and lowered its priority, + * or increased its priority then reschedule its CPU: */ - while (!(p->cpus_runnable & p->cpus_allowed)) - schedule(); + if ((NICE_TO_PRIO(nice) < p->static_prio) || (p == rq->curr)) + resched_task(rq->curr); } +out_unlock: + task_rq_unlock(rq, &flags); } -#endif /* CONFIG_SMP */ #ifndef __alpha__ -/* - * This has been replaced by sys_setpriority. Maybe it should be - * moved into the arch dependent tree for those ports that require - * it for backward compatibility? +/** + * sys_nice - change the priority of the current process. + * @increment: priority increment + * + * sys_setpriority is a more generic, but much slower function that + * does similar things. */ - asmlinkage long sys_nice(int increment) { - long newprio; + long nice; /* * Setpriority might change our priority at the same moment. @@ -914,34 +1204,69 @@ if (increment > 40) increment = 40; - newprio = current->nice + increment; - if (newprio < -20) - newprio = -20; - if (newprio > 19) - newprio = 19; - current->nice = newprio; + nice = PRIO_TO_NICE(current->static_prio) + increment; + if (nice < -20) + nice = -20; + if (nice > 19) + nice = 19; + set_user_nice(current, nice); return 0; } #endif -static inline struct task_struct *find_process_by_pid(pid_t pid) +/** + * task_prio - return the priority value of a given task. + * @p: the task in question. + * + * This is the priority value as seen by users in /proc. + * RT tasks are offset by -200. Normal tasks are centered + * around 0, value goes from -16 to +15. + */ +int task_prio(task_t *p) +{ + return p->prio - MAX_USER_RT_PRIO; +} + +/** + * task_nice - return the nice value of a given task. + * @p: the task in question. + */ +int task_nice(task_t *p) +{ + return TASK_NICE(p); +} + +/** + * idle_cpu - is a given cpu idle currently? + * @cpu: the processor in question. + */ +inline int idle_cpu(int cpu) { - struct task_struct *tsk = current; + return cpu_curr(cpu) == cpu_rq(cpu)->idle; +} - if (pid) - tsk = find_task_by_pid(pid); - return tsk; +/** + * find_process_by_pid - find a process with a matching PID value. + * @pid: the pid in question. + */ +static inline task_t *find_process_by_pid(pid_t pid) +{ + return pid ? find_task_by_pid(pid) : current; } -static int setscheduler(pid_t pid, int policy, - struct sched_param *param) +/* + * setscheduler - change the scheduling policy and/or RT priority of a thread. + */ +static int setscheduler(pid_t pid, int policy, struct sched_param *param) { struct sched_param lp; - struct task_struct *p; - int retval; + int retval = -EINVAL; + prio_array_t *array; + unsigned long flags; + runqueue_t *rq; + task_t *p; - retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; @@ -953,14 +1278,19 @@ * We play safe to avoid deadlocks. */ read_lock_irq(&tasklist_lock); - spin_lock(&runqueue_lock); p = find_process_by_pid(pid); retval = -ESRCH; if (!p) - goto out_unlock; - + goto out_unlock_tasklist; + + /* + * To be able to change p->policy safely, the apropriate + * runqueue lock must be held. + */ + rq = task_rq_lock(p, &flags); + if (policy < 0) policy = p->policy; else { @@ -969,56 +1299,78 @@ policy != SCHED_OTHER) goto out_unlock; } - + /* - * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid - * priority for SCHED_OTHER is 0. + * Valid priorities for SCHED_FIFO and SCHED_RR are + * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_OTHER is 0. */ retval = -EINVAL; - if (lp.sched_priority < 0 || lp.sched_priority > 99) + if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1) goto out_unlock; if ((policy == SCHED_OTHER) != (lp.sched_priority == 0)) goto out_unlock; retval = -EPERM; - if ((policy == SCHED_FIFO || policy == SCHED_RR) && + if ((policy == SCHED_FIFO || policy == SCHED_RR) && !capable(CAP_SYS_NICE)) goto out_unlock; if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) goto out_unlock; + array = p->array; + if (array) + deactivate_task(p, task_rq(p)); retval = 0; p->policy = policy; p->rt_priority = lp.sched_priority; - - current->need_resched = 1; + if (policy != SCHED_OTHER) + p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority; + else + p->prio = p->static_prio; + if (array) + activate_task(p, task_rq(p)); out_unlock: - spin_unlock(&runqueue_lock); + task_rq_unlock(rq, &flags); +out_unlock_tasklist: read_unlock_irq(&tasklist_lock); out_nounlock: return retval; } -asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, +/** + * sys_sched_setscheduler - set/change the scheduler policy and RT priority + * @pid: the pid in question. + * @policy: new policy + * @param: structure containing the new RT priority. + */ +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, struct sched_param *param) { return setscheduler(pid, policy, param); } +/** + * sys_sched_setparam - set/change the RT priority of a thread + * @pid: the pid in question. + * @param: structure containing the new RT priority. + */ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param) { return setscheduler(pid, -1, param); } +/** + * sys_sched_getscheduler - get the policy (scheduling class) of a thread + * @pid: the pid in question. + */ asmlinkage long sys_sched_getscheduler(pid_t pid) { - struct task_struct *p; - int retval; + int retval = -EINVAL; + task_t *p; - retval = -EINVAL; if (pid < 0) goto out_nounlock; @@ -1026,20 +1378,24 @@ read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (p) - retval = p->policy & ~SCHED_YIELD; + retval = p->policy; read_unlock(&tasklist_lock); out_nounlock: return retval; } +/** + * sys_sched_getparam - get the RT priority of a thread + * @pid: the pid in question. + * @param: sched_param structure containing the RT priority. + */ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param) { - struct task_struct *p; struct sched_param lp; - int retval; + int retval = -EINVAL; + task_t *p; - retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; @@ -1064,44 +1420,125 @@ return retval; } -asmlinkage long sys_sched_yield(void) +/** + * sys_sched_setaffinity - set the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to the new cpu mask + */ +asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) { + unsigned long new_mask; + int retval; + task_t *p; + + if (len < sizeof(new_mask)) + return -EINVAL; + + if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) + return -EFAULT; + + new_mask &= cpu_online_map; + if (!new_mask) + return -EINVAL; + + read_lock(&tasklist_lock); + + p = find_process_by_pid(pid); + if (!p) { + read_unlock(&tasklist_lock); + return -ESRCH; + } + /* - * Trick. sched_yield() first counts the number of truly - * 'pending' runnable processes, then returns if it's - * only the current processes. (This test does not have - * to be atomic.) In threaded applications this optimization - * gets triggered quite often. + * It is not safe to call set_cpus_allowed with the + * tasklist_lock held. We will bump the task_struct's + * usage count and then drop tasklist_lock. */ + get_task_struct(p); + read_unlock(&tasklist_lock); - int nr_pending = nr_running; + retval = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_NICE)) + goto out_unlock; -#if CONFIG_SMP - int i; + retval = 0; + set_cpus_allowed(p, new_mask); - // Subtract non-idle processes running on other CPUs. - for (i = 0; i < smp_num_cpus; i++) { - int cpu = cpu_logical_map(i); - if (aligned_data[cpu].schedule_data.curr != idle_task(cpu)) - nr_pending--; - } -#else - // on UP this process is on the runqueue as well - nr_pending--; -#endif - if (nr_pending) { - /* - * This process can only be rescheduled by us, - * so this is safe without any locking. - */ - if (current->policy == SCHED_OTHER) - current->policy |= SCHED_YIELD; - current->need_resched = 1; - - spin_lock_irq(&runqueue_lock); - move_last_runqueue(current); - spin_unlock_irq(&runqueue_lock); +out_unlock: + free_task_struct(p); + return retval; +} + +/** + * sys_sched_getaffinity - get the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to hold the current cpu mask + */ +asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) +{ + unsigned int real_len; + unsigned long mask; + int retval; + task_t *p; + + real_len = sizeof(mask); + if (len < real_len) + return -EINVAL; + + read_lock(&tasklist_lock); + + retval = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + retval = 0; + mask = p->cpus_allowed & cpu_online_map; + +out_unlock: + read_unlock(&tasklist_lock); + if (retval) + return retval; + if (copy_to_user(user_mask_ptr, &mask, real_len)) + return -EFAULT; + return real_len; +} + +/** + * sys_sched_yield - yield the current processor to other threads. + * + * this function yields the current CPU by moving the calling thread + * to the expired array. If there are no other threads running on this + * CPU then this function will return. + */ +asmlinkage long sys_sched_yield(void) +{ + runqueue_t *rq = this_rq_lock(); + prio_array_t *array = current->array; + + /* + * We implement yielding by moving the task into the expired + * queue. + * + * (special rule: RT tasks will just roundrobin in the active + * array.) + */ + if (likely(!rt_task(current))) { + dequeue_task(current, array); + enqueue_task(current, rq->expired); + } else { + list_del(¤t->run_list); + list_add_tail(¤t->run_list, array->queue + current->prio); } + spin_unlock(&rq->lock); + + schedule(); + return 0; } @@ -1115,15 +1552,29 @@ { set_current_state(TASK_RUNNING); sys_sched_yield(); - schedule(); } +/** + * _cond_resched - conditionally reschedule + * + * Helper function called if cond_resched inline decides we have + * exceeded the timeslice at this point. We give up the processor + * having made sure we will get it back + */ + void __cond_resched(void) { set_current_state(TASK_RUNNING); schedule(); } +/** + * sys_sched_get_priority_max - return maximum RT priority. + * @policy: scheduling class. + * + * this syscall returns the maximum rt_priority that can be used + * by a given scheduling class. + */ asmlinkage long sys_sched_get_priority_max(int policy) { int ret = -EINVAL; @@ -1131,7 +1582,7 @@ switch (policy) { case SCHED_FIFO: case SCHED_RR: - ret = 99; + ret = MAX_USER_RT_PRIO-1; break; case SCHED_OTHER: ret = 0; @@ -1140,6 +1591,13 @@ return ret; } +/** + * sys_sched_get_priority_min - return minimum RT priority. + * @policy: scheduling class. + * + * this syscall returns the minimum rt_priority that can be used + * by a given scheduling class. + */ asmlinkage long sys_sched_get_priority_min(int policy) { int ret = -EINVAL; @@ -1155,11 +1613,19 @@ return ret; } +/** + * sys_sched_rr_get_interval - return the default timeslice of a process. + * @pid: pid of the process. + * @interval: userspace pointer to the timeslice value. + * + * this syscall writes the default timeslice value of a given process + * into the user-space timespec buffer. A value of '0' means infinity. + */ asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval) { - struct timespec t; - struct task_struct *p; int retval = -EINVAL; + struct timespec t; + task_t *p; if (pid < 0) goto out_nounlock; @@ -1168,8 +1634,8 @@ read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (p) - jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : NICE_TO_TICKS(p->nice), - &t); + jiffies_to_timespec(p->policy & SCHED_FIFO ? + 0 : task_timeslice(p), &t); read_unlock(&tasklist_lock); if (p) retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; @@ -1177,14 +1643,14 @@ return retval; } -static void show_task(struct task_struct * p) +static void show_task(task_t * p) { unsigned long free = 0; int state; static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" }; printk("%-13.13s ", p->comm); - state = p->state ? ffz(~p->state) + 1 : 0; + state = p->state ? __ffs(p->state) + 1 : 0; if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *)) printk(stat_nam[state]); else @@ -1225,7 +1691,7 @@ printk(" (NOTLB)\n"); { - extern void show_trace_task(struct task_struct *tsk); + extern void show_trace_task(task_t *tsk); show_trace_task(p); } } @@ -1247,7 +1713,7 @@ void show_state(void) { - struct task_struct *p; + task_t *p; #if (BITS_PER_LONG == 32) printk("\n" @@ -1270,128 +1736,241 @@ read_unlock(&tasklist_lock); } -/** - * reparent_to_init() - Reparent the calling kernel thread to the init task. - * - * If a kernel thread is launched as a result of a system call, or if - * it ever exits, it should generally reparent itself to init so that - * it is correctly cleaned up on exit. - * - * The various task state such as scheduling policy and priority may have - * been inherited fro a user process, so we reset them to sane values here. - * - * NOTE that reparent_to_init() gives the caller full capabilities. - */ -void reparent_to_init(void) +void __init init_idle(task_t *idle, int cpu) { - struct task_struct *this_task = current; + runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle)); + unsigned long flags; + + __save_flags(flags); + __cli(); + double_rq_lock(idle_rq, rq); + + idle_rq->curr = idle_rq->idle = idle; + deactivate_task(idle, rq); + idle->array = NULL; + idle->prio = MAX_PRIO; + idle->state = TASK_RUNNING; + set_task_cpu(idle, cpu); + double_rq_unlock(idle_rq, rq); + set_tsk_need_resched(idle); + __restore_flags(flags); +} - write_lock_irq(&tasklist_lock); +extern void init_timervecs(void); +extern void timer_bh(void); +extern void tqueue_bh(void); +extern void immediate_bh(void); - /* Reparent to init */ - REMOVE_LINKS(this_task); - this_task->p_pptr = child_reaper; - this_task->p_opptr = child_reaper; - SET_LINKS(this_task); +void __init sched_init(void) +{ + runqueue_t *rq; + int i, j, k; - /* Set the exit signal to SIGCHLD so we signal init on exit */ - this_task->exit_signal = SIGCHLD; + for (i = 0; i < NR_CPUS; i++) { + prio_array_t *array; - /* We also take the runqueue_lock while altering task fields - * which affect scheduling decisions */ - spin_lock(&runqueue_lock); + rq = cpu_rq(i); + rq->active = rq->arrays; + rq->expired = rq->arrays + 1; + spin_lock_init(&rq->lock); + INIT_LIST_HEAD(&rq->migration_queue); + + for (j = 0; j < 2; j++) { + array = rq->arrays + j; + for (k = 0; k < MAX_PRIO; k++) { + INIT_LIST_HEAD(array->queue + k); + __clear_bit(k, array->bitmap); + } + // delimiter for bitsearch + __set_bit(MAX_PRIO, array->bitmap); + } + } + /* + * We have to do a little magic to get the first + * process right in SMP mode. + */ + rq = this_rq(); + rq->curr = current; + rq->idle = current; + wake_up_process(current); - this_task->ptrace = 0; - this_task->nice = DEF_NICE; - this_task->policy = SCHED_OTHER; - /* cpus_allowed? */ - /* rt_priority? */ - /* signals? */ - this_task->cap_effective = CAP_INIT_EFF_SET; - this_task->cap_inheritable = CAP_INIT_INH_SET; - this_task->cap_permitted = CAP_FULL_SET; - this_task->keep_capabilities = 0; - memcpy(this_task->rlim, init_task.rlim, sizeof(*(this_task->rlim))); - this_task->user = INIT_USER; + init_timervecs(); + init_bh(TIMER_BH, timer_bh); + init_bh(TQUEUE_BH, tqueue_bh); + init_bh(IMMEDIATE_BH, immediate_bh); - spin_unlock(&runqueue_lock); - write_unlock_irq(&tasklist_lock); + /* + * The boot idle thread does lazy MMU switching as well: + */ + atomic_inc(&init_mm.mm_count); + enter_lazy_tlb(&init_mm, current, smp_processor_id()); } +#if CONFIG_SMP + /* - * Put all the gunge required to become a kernel thread without - * attached user resources in one place where it belongs. - */ + * This is how migration works: + * + * 1) we queue a migration_req_t structure in the source CPU's + * runqueue and wake up that CPU's migration thread. + * 2) we down() the locked semaphore => thread blocks. + * 3) migration thread wakes up (implicitly it forces the migrated + * thread off the CPU) + * 4) it gets the migration request and checks whether the migrated + * task is still in the wrong runqueue. + * 5) if it's in the wrong runqueue then the migration thread removes + * it and puts it into the right queue. + * 6) migration thread up()s the semaphore. + * 7) we wake up and the migration is done. + */ + +typedef struct { + struct list_head list; + task_t *task; + struct semaphore sem; +} migration_req_t; -void daemonize(void) +/* + * Change a given task's CPU affinity. Migrate the process to a + * proper CPU and schedule it away if the CPU it's executing on + * is removed from the allowed bitmask. + * + * NOTE: the caller must have a valid reference to the task, the + * task must not exit() & deallocate itself prematurely. The + * call is not atomic; no spinlocks may be held. + */ +void set_cpus_allowed(task_t *p, unsigned long new_mask) { - struct fs_struct *fs; + unsigned long flags; + migration_req_t req; + runqueue_t *rq; + new_mask &= cpu_online_map; + if (!new_mask) + BUG(); + rq = task_rq_lock(p, &flags); + p->cpus_allowed = new_mask; /* - * If we were started as result of loading a module, close all of the - * user space pages. We don't need them, and if we didn't close them - * they would be locked into memory. + * Can the task run on the task's current CPU? If not then + * migrate the process off to a proper CPU. */ - exit_mm(current); - - current->session = 1; - current->pgrp = 1; - current->tty = NULL; - - /* Become as one with the init task */ + if (new_mask & (1UL << task_cpu(p))) { + task_rq_unlock(rq, &flags); + goto out; + } + /* + * If the task is not on a runqueue (and not running), then + * it is sufficient to simply update the task's cpu field. + */ + if (!p->array && (p != rq->curr)) { + set_task_cpu(p, __ffs(p->cpus_allowed)); + task_rq_unlock(rq, &flags); + goto out; + } + init_MUTEX_LOCKED(&req.sem); + req.task = p; + list_add(&req.list, &rq->migration_queue); + task_rq_unlock(rq, &flags); + wake_up_process(rq->migration_thread); - exit_fs(current); /* current->fs->count--; */ - fs = init_task.fs; - current->fs = fs; - atomic_inc(&fs->count); - exit_files(current); - current->files = init_task.files; - atomic_inc(¤t->files->count); + down(&req.sem); +out: + ; } -extern unsigned long wait_init_idle; - -void __init init_idle(void) +/* + * migration_thread - this is a highprio system thread that performs + * thread migration by 'pulling' threads into the target runqueue. + */ +static int migration_thread(void * bind_cpu) { - struct schedule_data * sched_data; - sched_data = &aligned_data[smp_processor_id()].schedule_data; + struct sched_param param = { sched_priority: MAX_RT_PRIO-1 }; + int cpu = cpu_logical_map((int) (long) bind_cpu); + runqueue_t *rq; + int ret; - if (current != &init_task && task_on_runqueue(current)) { - printk("UGH! (%d:%d) was on the runqueue, removing.\n", - smp_processor_id(), current->pid); - del_from_runqueue(current); + daemonize(); + sigfillset(¤t->blocked); + set_fs(KERNEL_DS); + /* + * The first migration thread is started on CPU #0. This one can + * migrate the other migration threads to their destination CPUs. + */ + if (cpu != 0) { + while (!cpu_rq(cpu_logical_map(0))->migration_thread) + yield(); + set_cpus_allowed(current, 1UL << cpu); } - sched_data->curr = current; - sched_data->last_schedule = get_cycles(); - clear_bit(current->processor, &wait_init_idle); -} + printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id()); + ret = setscheduler(0, SCHED_FIFO, ¶m); -extern void init_timervecs (void); + rq = this_rq(); + rq->migration_thread = current; -void __init sched_init(void) -{ - /* - * We have to do a little magic to get the first - * process right in SMP mode. - */ - int cpu = smp_processor_id(); - int nr; + sprintf(current->comm, "migration_CPU%d", smp_processor_id()); - init_task.processor = cpu; + for (;;) { + runqueue_t *rq_src, *rq_dest; + struct list_head *head; + int cpu_src, cpu_dest; + migration_req_t *req; + unsigned long flags; + task_t *p; + + spin_lock_irqsave(&rq->lock, flags); + head = &rq->migration_queue; + current->state = TASK_INTERRUPTIBLE; + if (list_empty(head)) { + spin_unlock_irqrestore(&rq->lock, flags); + schedule(); + continue; + } + req = list_entry(head->next, migration_req_t, list); + list_del_init(head->next); + spin_unlock_irqrestore(&rq->lock, flags); + + p = req->task; + cpu_dest = __ffs(p->cpus_allowed); + rq_dest = cpu_rq(cpu_dest); +repeat: + cpu_src = task_cpu(p); + rq_src = cpu_rq(cpu_src); + + local_irq_save(flags); + double_rq_lock(rq_src, rq_dest); + if (task_cpu(p) != cpu_src) { + double_rq_unlock(rq_src, rq_dest); + local_irq_restore(flags); + goto repeat; + } + if (rq_src == rq) { + set_task_cpu(p, cpu_dest); + if (p->array) { + deactivate_task(p, rq_src); + activate_task(p, rq_dest); + } + } + double_rq_unlock(rq_src, rq_dest); + local_irq_restore(flags); - for(nr = 0; nr < PIDHASH_SZ; nr++) - pidhash[nr] = NULL; + up(&req->sem); + } +} - init_timervecs(); +void __init migration_init(void) +{ + int cpu; - init_bh(TIMER_BH, timer_bh); - init_bh(TQUEUE_BH, tqueue_bh); - init_bh(IMMEDIATE_BH, immediate_bh); + current->cpus_allowed = 1UL << cpu_logical_map(0); + for (cpu = 0; cpu < smp_num_cpus; cpu++) + if (kernel_thread(migration_thread, (void *) (long) cpu, + CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) + BUG(); + current->cpus_allowed = -1L; - /* - * The boot idle thread does lazy MMU switching as well: - */ - atomic_inc(&init_mm.mm_count); - enter_lazy_tlb(&init_mm, current, cpu); + for (cpu = 0; cpu < smp_num_cpus; cpu++) + while (!cpu_rq(cpu_logical_map(cpu))->migration_thread) + schedule_timeout(2); } +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/signal.c linux.21rc5-ac2/kernel/signal.c --- linux.21rc5/kernel/signal.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/kernel/signal.c 2003-04-22 16:44:38.000000000 +0100 @@ -492,7 +492,7 @@ * No need to set need_resched since signal event passing * goes through ->blocked */ -static inline void signal_wake_up(struct task_struct *t) +inline void signal_wake_up(struct task_struct *t) { t->sigpending = 1; @@ -507,12 +507,9 @@ * process of changing - but no harm is done by that * other than doing an extra (lightweight) IPI interrupt. */ - spin_lock(&runqueue_lock); - if (task_has_cpu(t) && t->processor != smp_processor_id()) - smp_send_reschedule(t->processor); - spin_unlock(&runqueue_lock); -#endif /* CONFIG_SMP */ - + if ((t->state == TASK_RUNNING) && (t->cpu != cpu())) + kick_if_running(t); +#endif if (t->state & TASK_INTERRUPTIBLE) { wake_up_process(t); return; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/softirq.c linux.21rc5-ac2/kernel/softirq.c --- linux.21rc5/kernel/softirq.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/kernel/softirq.c 2003-05-20 20:28:36.000000000 +0100 @@ -364,15 +364,15 @@ int cpu = cpu_logical_map(bind_cpu); daemonize(); - current->nice = 19; + set_user_nice(current, 19); sigfillset(¤t->blocked); /* Migrate to the right CPU */ - current->cpus_allowed = 1UL << cpu; - while (smp_processor_id() != cpu) - schedule(); + set_cpus_allowed(current, 1UL << cpu); + if (cpu() != cpu) + BUG(); - sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu); + sprintf(current->comm, "ksoftirqd/%d", bind_cpu); __set_current_state(TASK_INTERRUPTIBLE); mb(); @@ -395,7 +395,7 @@ } } -static __init int spawn_ksoftirqd(void) +__init int spawn_ksoftirqd(void) { int cpu; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/sys.c linux.21rc5-ac2/kernel/sys.c --- linux.21rc5/kernel/sys.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/kernel/sys.c 2003-05-09 18:33:05.000000000 +0100 @@ -4,6 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ +#include #include #include #include @@ -239,10 +240,10 @@ } if (error == -ESRCH) error = 0; - if (niceval < p->nice && !capable(CAP_SYS_NICE)) + if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) error = -EACCES; else - p->nice = niceval; + set_user_nice(p, niceval); } read_unlock(&tasklist_lock); @@ -268,7 +269,7 @@ long niceval; if (!proc_sel(p, which, who)) continue; - niceval = 20 - p->nice; + niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; } @@ -509,9 +510,10 @@ } } -static int set_user(uid_t new_ruid, int dumpclear) +int set_user(uid_t new_ruid, int dumpclear) { struct user_struct *new_user, *old_user; + struct task_struct *this_task = current; /* What if a process setreuid()'s and this brings the * new uid over his NPROC rlimit? We can check this now @@ -521,17 +523,16 @@ new_user = alloc_uid(new_ruid); if (!new_user) return -EAGAIN; - old_user = current->user; - atomic_dec(&old_user->processes); + old_user = this_task->user; atomic_inc(&new_user->processes); + atomic_dec(&old_user->processes); - if(dumpclear) - { - current->mm->dumpable = 0; + if (dumpclear && this_task->mm) { + this_task->mm->dumpable = 0; wmb(); } - current->uid = new_ruid; - current->user = new_user; + this_task->uid = new_ruid; + this_task->user = new_user; free_uid(old_user); return 0; } @@ -1045,6 +1046,7 @@ asmlinkage long sys_sethostname(char *name, int len) { int errno; + char tmp[__NEW_UTS_LEN]; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1052,7 +1054,8 @@ return -EINVAL; down_write(&uts_sem); errno = -EFAULT; - if (!copy_from_user(system_utsname.nodename, name, len)) { + if (!copy_from_user(tmp, name, len)) { + memcpy(system_utsname.nodename, tmp, len); system_utsname.nodename[len] = 0; errno = 0; } @@ -1084,6 +1087,7 @@ asmlinkage long sys_setdomainname(char *name, int len) { int errno; + char tmp[__NEW_UTS_LEN]; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1092,9 +1096,10 @@ down_write(&uts_sem); errno = -EFAULT; - if (!copy_from_user(system_utsname.domainname, name, len)) { - errno = 0; + if (!copy_from_user(tmp, name, len)) { + memcpy(system_utsname.domainname, tmp, len); system_utsname.domainname[len] = 0; + errno = 0; } up_write(&uts_sem); return errno; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/sysctl.c linux.21rc5-ac2/kernel/sysctl.c --- linux.21rc5/kernel/sysctl.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/kernel/sysctl.c 2003-05-22 21:13:04.000000000 +0100 @@ -49,6 +49,7 @@ extern int max_queued_signals; extern int sysrq_enabled; extern int core_uses_pid; +extern int core_setuid_ok; extern char core_pattern[]; extern int cad_pid; @@ -80,6 +81,11 @@ extern int stop_a_enabled; #endif +#ifdef __hppa__ +extern int pwrsw_enabled; +extern int unaligned_enabled; +#endif + #ifdef CONFIG_ARCH_S390 #ifdef CONFIG_MATHEMU extern int sysctl_ieee_emulation_warnings; @@ -172,6 +178,8 @@ 0644, NULL, &proc_dointvec}, {KERN_CORE_USES_PID, "core_uses_pid", &core_uses_pid, sizeof(int), 0644, NULL, &proc_dointvec}, + {KERN_CORE_USES_PID, "core_setuid_ok", &core_setuid_ok, sizeof(int), + 0644, NULL, &proc_dointvec}, {KERN_CORE_PATTERN, "core_pattern", core_pattern, 64, 0644, NULL, &proc_dostring, &sysctl_string}, {KERN_TAINTED, "tainted", &tainted, sizeof(int), @@ -188,6 +196,12 @@ {KERN_SPARC_STOP_A, "stop-a", &stop_a_enabled, sizeof (int), 0644, NULL, &proc_dointvec}, #endif +#ifdef __hppa__ + {KERN_HPPA_PWRSW, "soft-power", &pwrsw_enabled, sizeof (int), + 0644, NULL, &proc_dointvec}, + {KERN_HPPA_UNALIGNED, "unaligned-trap", &unaligned_enabled, sizeof (int), + 0644, NULL, &proc_dointvec}, +#endif #ifdef CONFIG_PPC32 {KERN_PPC_ZEROPAGED, "zero-paged", &zero_paged_on, sizeof(int), 0644, NULL, &proc_dointvec}, @@ -296,8 +310,6 @@ 0444, NULL, &proc_dointvec}, {FS_MAXFILE, "file-max", &files_stat.max_files, sizeof(int), 0644, NULL, &proc_dointvec}, - {FS_NRDQUOT, "dquot-nr", &nr_dquots, 2*sizeof(int), - 0444, NULL, &proc_dointvec}, {FS_DENTRY, "dentry-state", &dentry_stat, 6*sizeof(int), 0444, NULL, &proc_dointvec}, {FS_OVERFLOWUID, "overflowuid", &fs_overflowuid, sizeof(int), 0644, NULL, diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/kernel/timer.c linux.21rc5-ac2/kernel/timer.c --- linux.21rc5/kernel/timer.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/kernel/timer.c 2003-04-22 16:44:38.000000000 +0100 @@ -25,6 +25,8 @@ #include +struct kernel_stat kstat; + /* * Timekeeping variables */ @@ -598,25 +600,7 @@ int cpu = smp_processor_id(), system = user_tick ^ 1; update_one_process(p, user_tick, system, cpu); - if (p->pid) { - if (--p->counter <= 0) { - p->counter = 0; - /* - * SCHED_FIFO is priority preemption, so this is - * not the place to decide whether to reschedule a - * SCHED_FIFO task or not - Bhavesh Davda - */ - if (p->policy != SCHED_FIFO) { - p->need_resched = 1; - } - } - if (p->nice > 0) - kstat.per_cpu_nice[cpu] += user_tick; - else - kstat.per_cpu_user[cpu] += user_tick; - kstat.per_cpu_system[cpu] += system; - } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1) - kstat.per_cpu_system[cpu] += system; + scheduler_tick(user_tick, system); } /* @@ -624,17 +608,7 @@ */ static unsigned long count_active_tasks(void) { - struct task_struct *p; - unsigned long nr = 0; - - read_lock(&tasklist_lock); - for_each_task(p) { - if ((p->state == TASK_RUNNING || - (p->state & TASK_UNINTERRUPTIBLE))) - nr += FIXED_1; - } - read_unlock(&tasklist_lock); - return nr; + return (nr_running() + nr_uninterruptible()) * FIXED_1; } /* @@ -827,6 +801,89 @@ #endif +static void process_timeout(unsigned long __data) +{ + wake_up_process((task_t *)__data); +} + +/** + * schedule_timeout - sleep until timeout + * @timeout: timeout value in jiffies + * + * Make the current task sleep until @timeout jiffies have + * elapsed. The routine will return immediately unless + * the current task state has been set (see set_current_state()). + * + * You can set the task state as follows - + * + * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to + * pass before the routine returns. The routine will return 0 + * + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is + * delivered to the current task. In this case the remaining time + * in jiffies will be returned, or 0 if the timer expired in time + * + * The current task state is guaranteed to be TASK_RUNNING when this + * routine returns. + * + * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule + * the CPU away without a bound on the timeout. In this case the return + * value will be %MAX_SCHEDULE_TIMEOUT. + * + * In all cases the return value is guaranteed to be non-negative. + */ +signed long schedule_timeout(signed long timeout) +{ + struct timer_list timer; + unsigned long expire; + + switch (timeout) + { + case MAX_SCHEDULE_TIMEOUT: + /* + * These two special cases are useful to be comfortable + * in the caller. Nothing more. We could take + * MAX_SCHEDULE_TIMEOUT from one of the negative value + * but I' d like to return a valid offset (>=0) to allow + * the caller to do everything it want with the retval. + */ + schedule(); + goto out; + default: + /* + * Another bit of PARANOID. Note that the retval will be + * 0 since no piece of kernel is supposed to do a check + * for a negative retval of schedule_timeout() (since it + * should never happens anyway). You just have the printk() + * that will tell you if something is gone wrong and where. + */ + if (timeout < 0) + { + printk(KERN_ERR "schedule_timeout: wrong timeout " + "value %lx from %p\n", timeout, + __builtin_return_address(0)); + current->state = TASK_RUNNING; + goto out; + } + } + + expire = timeout + jiffies; + + init_timer(&timer); + timer.expires = expire; + timer.data = (unsigned long) current; + timer.function = process_timeout; + + add_timer(&timer); + schedule(); + del_timer_sync(&timer); + + timeout = expire - jiffies; + + out: + return timeout < 0 ? 0 : timeout; +} + /* Thread ID - the internal kernel "pid" */ asmlinkage long sys_gettid(void) { @@ -873,4 +930,3 @@ } return 0; } - diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/lib/Config.in linux.21rc5-ac2/lib/Config.in --- linux.21rc5/lib/Config.in 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/lib/Config.in 2003-05-20 20:21:28.000000000 +0100 @@ -4,6 +4,8 @@ mainmenu_option next_comment comment 'Library routines' +tristate 'CRC32 functions' CONFIG_CRC32 + # # Do we need the compression support? # diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/lib/crc32.c linux.21rc5-ac2/lib/crc32.c --- linux.21rc5/lib/crc32.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/lib/crc32.c 2003-05-28 22:28:20.000000000 +0100 @@ -0,0 +1,539 @@ +/* + * Oct 15, 2000 Matt Domsch + * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! + * + * Oct 12, 2000 Matt Domsch + * Same crc32 function was used in 5 other places in the kernel. + * I made one version, and deleted the others. + * There are various incantations of crc32(). Some use a seed of 0 or ~0. + * Some xor at the end with ~0. The generic crc32() function takes + * seed as an argument, and doesn't xor at the end. Then individual + * users can do whatever they need. + * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. + * fs/jffs2 uses seed 0, doesn't xor with ~0. + * fs/partitions/efi.c uses seed ~0, xor's with ~0. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "crc32defs.h" +#if CRC_LE_BITS == 8 +#define tole(x) __constant_cpu_to_le32(x) +#define tobe(x) __constant_cpu_to_be32(x) +#else +#define tole(x) (x) +#define tobe(x) (x) +#endif +#include "crc32table.h" + +#if __GNUC__ >= 3 /* 2.x has "attribute", but only 3.0 has "pure */ +#define attribute(x) __attribute__(x) +#else +#define attribute(x) +#endif + +/* + * This code is in the public domain; copyright abandoned. + * Liability for non-performance of this code is limited to the amount + * you paid for it. Since it is distributed for free, your refund will + * be very very small. If it breaks, you get to keep both pieces. + * + * The version included in the kernel is covered by the GPL. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +MODULE_AUTHOR("Matt Domsch "); +MODULE_DESCRIPTION("Ethernet CRC32 calculations"); +MODULE_LICENSE("GPL and additional rights"); + +#if CRC_LE_BITS == 1 +/* + * In fact, the table-based code will work in this case, but it can be + * simplified by inlining the table in ?: form. + */ + +/** + * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 + * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for + * other uses, or the previous crc32 value if computing incrementally. + * @p - pointer to buffer over which CRC is run + * @len - length of buffer @p + * + */ +u32 attribute((pure)) crc32_le(u32 crc, unsigned char const *p, size_t len) +{ + int i; + while (len--) { + crc ^= *p++; + for (i = 0; i < 8; i++) + crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); + } + return crc; +} +#else /* Table-based approach */ + +/** + * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 + * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for + * other uses, or the previous crc32 value if computing incrementally. + * @p - pointer to buffer over which CRC is run + * @len - length of buffer @p + * + */ +u32 attribute((pure)) crc32_le(u32 crc, unsigned char const *p, size_t len) +{ +# if CRC_LE_BITS == 8 + const u32 *b =(u32 *)p; + const u32 *tab = crc32table_le; + +# ifdef __LITTLE_ENDIAN +# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) +# else +# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) +# endif + + crc = __cpu_to_le32(crc); + /* Align it */ + if(unlikely(((long)b)&3 && len)){ + do { + DO_CRC(*((u8 *)b)++); + } while ((--len) && ((long)b)&3 ); + } + if(likely(len >= 4)){ + /* load data 32 bits wide, xor data 32 bits wide. */ + size_t save_len = len & 3; + len = len >> 2; + --b; /* use pre increment below(*++b) for speed */ + do { + crc ^= *++b; + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + } while (--len); + b++; /* point to next byte(s) */ + len = save_len; + } + /* And the last few bytes */ + if(len){ + do { + DO_CRC(*((u8 *)b)++); + } while (--len); + } + + return __le32_to_cpu(crc); +#undef ENDIAN_SHIFT +#undef DO_CRC + +# elif CRC_LE_BITS == 4 + while (len--) { + crc ^= *p++; + crc = (crc >> 4) ^ crc32table_le[crc & 15]; + crc = (crc >> 4) ^ crc32table_le[crc & 15]; + } + return crc; +# elif CRC_LE_BITS == 2 + while (len--) { + crc ^= *p++; + crc = (crc >> 2) ^ crc32table_le[crc & 3]; + crc = (crc >> 2) ^ crc32table_le[crc & 3]; + crc = (crc >> 2) ^ crc32table_le[crc & 3]; + crc = (crc >> 2) ^ crc32table_le[crc & 3]; + } + return crc; +# endif +} +#endif + +#if CRC_BE_BITS == 1 +/* + * In fact, the table-based code will work in this case, but it can be + * simplified by inlining the table in ?: form. + */ + +/** + * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 + * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for + * other uses, or the previous crc32 value if computing incrementally. + * @p - pointer to buffer over which CRC is run + * @len - length of buffer @p + * + */ +u32 attribute((pure)) crc32_be(u32 crc, unsigned char const *p, size_t len) +{ + int i; + while (len--) { + crc ^= *p++ << 24; + for (i = 0; i < 8; i++) + crc = + (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : + 0); + } + return crc; +} + +#else /* Table-based approach */ +/** + * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 + * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for + * other uses, or the previous crc32 value if computing incrementally. + * @p - pointer to buffer over which CRC is run + * @len - length of buffer @p + * + */ +u32 attribute((pure)) crc32_be(u32 crc, unsigned char const *p, size_t len) +{ +# if CRC_BE_BITS == 8 + const u32 *b =(u32 *)p; + const u32 *tab = crc32table_be; + +# ifdef __LITTLE_ENDIAN +# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) +# else +# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) +# endif + + crc = __cpu_to_be32(crc); + /* Align it */ + if(unlikely(((long)b)&3 && len)){ + do { + DO_CRC(*((u8 *)b)++); + } while ((--len) && ((long)b)&3 ); + } + if(likely(len >= 4)){ + /* load data 32 bits wide, xor data 32 bits wide. */ + size_t save_len = len & 3; + len = len >> 2; + --b; /* use pre increment below(*++b) for speed */ + do { + crc ^= *++b; + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + } while (--len); + b++; /* point to next byte(s) */ + len = save_len; + } + /* And the last few bytes */ + if(len){ + do { + DO_CRC(*((u8 *)b)++); + } while (--len); + } + return __be32_to_cpu(crc); +#undef ENDIAN_SHIFT +#undef DO_CRC + +# elif CRC_BE_BITS == 4 + while (len--) { + crc ^= *p++ << 24; + crc = (crc << 4) ^ crc32table_be[crc >> 28]; + crc = (crc << 4) ^ crc32table_be[crc >> 28]; + } + return crc; +# elif CRC_BE_BITS == 2 + while (len--) { + crc ^= *p++ << 24; + crc = (crc << 2) ^ crc32table_be[crc >> 30]; + crc = (crc << 2) ^ crc32table_be[crc >> 30]; + crc = (crc << 2) ^ crc32table_be[crc >> 30]; + crc = (crc << 2) ^ crc32table_be[crc >> 30]; + } + return crc; +# endif +} +#endif + +u32 bitreverse(u32 x) +{ + x = (x >> 16) | (x << 16); + x = (x >> 8 & 0x00ff00ff) | (x << 8 & 0xff00ff00); + x = (x >> 4 & 0x0f0f0f0f) | (x << 4 & 0xf0f0f0f0); + x = (x >> 2 & 0x33333333) | (x << 2 & 0xcccccccc); + x = (x >> 1 & 0x55555555) | (x << 1 & 0xaaaaaaaa); + return x; +} + +#if defined(CONFIG_CRC32_MODULE) /* These are exported from kernel/ksyms.c in the non-module + case, to ensure that this file is pulled in from lib/lib.a */ +EXPORT_SYMBOL(crc32_le); +EXPORT_SYMBOL(crc32_be); +EXPORT_SYMBOL(bitreverse); +#endif + +/* + * A brief CRC tutorial. + * + * A CRC is a long-division remainder. You add the CRC to the message, + * and the whole thing (message+CRC) is a multiple of the given + * CRC polynomial. To check the CRC, you can either check that the + * CRC matches the recomputed value, *or* you can check that the + * remainder computed on the message+CRC is 0. This latter approach + * is used by a lot of hardware implementations, and is why so many + * protocols put the end-of-frame flag after the CRC. + * + * It's actually the same long division you learned in school, except that + * - We're working in binary, so the digits are only 0 and 1, and + * - When dividing polynomials, there are no carries. Rather than add and + * subtract, we just xor. Thus, we tend to get a bit sloppy about + * the difference between adding and subtracting. + * + * A 32-bit CRC polynomial is actually 33 bits long. But since it's + * 33 bits long, bit 32 is always going to be set, so usually the CRC + * is written in hex with the most significant bit omitted. (If you're + * familiar with the IEEE 754 floating-point format, it's the same idea.) + * + * Note that a CRC is computed over a string of *bits*, so you have + * to decide on the endianness of the bits within each byte. To get + * the best error-detecting properties, this should correspond to the + * order they're actually sent. For example, standard RS-232 serial is + * little-endian; the most significant bit (sometimes used for parity) + * is sent last. And when appending a CRC word to a message, you should + * do it in the right order, matching the endianness. + * + * Just like with ordinary division, the remainder is always smaller than + * the divisor (the CRC polynomial) you're dividing by. Each step of the + * division, you take one more digit (bit) of the dividend and append it + * to the current remainder. Then you figure out the appropriate multiple + * of the divisor to subtract to being the remainder back into range. + * In binary, it's easy - it has to be either 0 or 1, and to make the + * XOR cancel, it's just a copy of bit 32 of the remainder. + * + * When computing a CRC, we don't care about the quotient, so we can + * throw the quotient bit away, but subtract the appropriate multiple of + * the polynomial from the remainder and we're back to where we started, + * ready to process the next bit. + * + * A big-endian CRC written this way would be coded like: + * for (i = 0; i < input_bits; i++) { + * multiple = remainder & 0x80000000 ? CRCPOLY : 0; + * remainder = (remainder << 1 | next_input_bit()) ^ multiple; + * } + * Notice how, to get at bit 32 of the shifted remainder, we look + * at bit 31 of the remainder *before* shifting it. + * + * But also notice how the next_input_bit() bits we're shifting into + * the remainder don't actually affect any decision-making until + * 32 bits later. Thus, the first 32 cycles of this are pretty boring. + * Also, to add the CRC to a message, we need a 32-bit-long hole for it at + * the end, so we have to add 32 extra cycles shifting in zeros at the + * end of every message, + * + * So the standard trick is to rearrage merging in the next_input_bit() + * until the moment it's needed. Then the first 32 cycles can be precomputed, + * and merging in the final 32 zero bits to make room for the CRC can be + * skipped entirely. + * This changes the code to: + * for (i = 0; i < input_bits; i++) { + * remainder ^= next_input_bit() << 31; + * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + * remainder = (remainder << 1) ^ multiple; + * } + * With this optimization, the little-endian code is simpler: + * for (i = 0; i < input_bits; i++) { + * remainder ^= next_input_bit(); + * multiple = (remainder & 1) ? CRCPOLY : 0; + * remainder = (remainder >> 1) ^ multiple; + * } + * + * Note that the other details of endianness have been hidden in CRCPOLY + * (which must be bit-reversed) and next_input_bit(). + * + * However, as long as next_input_bit is returning the bits in a sensible + * order, we can actually do the merging 8 or more bits at a time rather + * than one bit at a time: + * for (i = 0; i < input_bytes; i++) { + * remainder ^= next_input_byte() << 24; + * for (j = 0; j < 8; j++) { + * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + * remainder = (remainder << 1) ^ multiple; + * } + * } + * Or in little-endian: + * for (i = 0; i < input_bytes; i++) { + * remainder ^= next_input_byte(); + * for (j = 0; j < 8; j++) { + * multiple = (remainder & 1) ? CRCPOLY : 0; + * remainder = (remainder << 1) ^ multiple; + * } + * } + * If the input is a multiple of 32 bits, you can even XOR in a 32-bit + * word at a time and increase the inner loop count to 32. + * + * You can also mix and match the two loop styles, for example doing the + * bulk of a message byte-at-a-time and adding bit-at-a-time processing + * for any fractional bytes at the end. + * + * The only remaining optimization is to the byte-at-a-time table method. + * Here, rather than just shifting one bit of the remainder to decide + * in the correct multiple to subtract, we can shift a byte at a time. + * This produces a 40-bit (rather than a 33-bit) intermediate remainder, + * but again the multiple of the polynomial to subtract depends only on + * the high bits, the high 8 bits in this case. + * + * The multile we need in that case is the low 32 bits of a 40-bit + * value whose high 8 bits are given, and which is a multiple of the + * generator polynomial. This is simply the CRC-32 of the given + * one-byte message. + * + * Two more details: normally, appending zero bits to a message which + * is already a multiple of a polynomial produces a larger multiple of that + * polynomial. To enable a CRC to detect this condition, it's common to + * invert the CRC before appending it. This makes the remainder of the + * message+crc come out not as zero, but some fixed non-zero value. + * + * The same problem applies to zero bits prepended to the message, and + * a similar solution is used. Instead of starting with a remainder of + * 0, an initial remainder of all ones is used. As long as you start + * the same way on decoding, it doesn't make a difference. + */ + +#if UNITTEST + +#include +#include + +#if 0 /*Not used at present */ +static void +buf_dump(char const *prefix, unsigned char const *buf, size_t len) +{ + fputs(prefix, stdout); + while (len--) + printf(" %02x", *buf++); + putchar('\n'); + +} +#endif + +static void bytereverse(unsigned char *buf, size_t len) +{ + while (len--) { + unsigned char x = *buf; + x = (x >> 4) | (x << 4); + x = (x >> 2 & 0x33) | (x << 2 & 0xcc); + x = (x >> 1 & 0x55) | (x << 1 & 0xaa); + *buf++ = x; + } +} + +static void random_garbage(unsigned char *buf, size_t len) +{ + while (len--) + *buf++ = (unsigned char) random(); +} + +#if 0 /* Not used at present */ +static void store_le(u32 x, unsigned char *buf) +{ + buf[0] = (unsigned char) x; + buf[1] = (unsigned char) (x >> 8); + buf[2] = (unsigned char) (x >> 16); + buf[3] = (unsigned char) (x >> 24); +} +#endif + +static void store_be(u32 x, unsigned char *buf) +{ + buf[0] = (unsigned char) (x >> 24); + buf[1] = (unsigned char) (x >> 16); + buf[2] = (unsigned char) (x >> 8); + buf[3] = (unsigned char) x; +} + +/* + * This checks that CRC(buf + CRC(buf)) = 0, and that + * CRC commutes with bit-reversal. This has the side effect + * of bytewise bit-reversing the input buffer, and returns + * the CRC of the reversed buffer. + */ +static u32 test_step(u32 init, unsigned char *buf, size_t len) +{ + u32 crc1, crc2; + size_t i; + + crc1 = crc32_be(init, buf, len); + store_be(crc1, buf + len); + crc2 = crc32_be(init, buf, len + 4); + if (crc2) + printf("\nCRC cancellation fail: 0x%08x should be 0\n", + crc2); + + for (i = 0; i <= len + 4; i++) { + crc2 = crc32_be(init, buf, i); + crc2 = crc32_be(crc2, buf + i, len + 4 - i); + if (crc2) + printf("\nCRC split fail: 0x%08x\n", crc2); + } + + /* Now swap it around for the other test */ + + bytereverse(buf, len + 4); + init = bitreverse(init); + crc2 = bitreverse(crc1); + if (crc1 != bitreverse(crc2)) + printf("\nBit reversal fail: 0x%08x -> %0x08x -> 0x%08x\n", + crc1, crc2, bitreverse(crc2)); + crc1 = crc32_le(init, buf, len); + if (crc1 != crc2) + printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1, + crc2); + crc2 = crc32_le(init, buf, len + 4); + if (crc2) + printf("\nCRC cancellation fail: 0x%08x should be 0\n", + crc2); + + for (i = 0; i <= len + 4; i++) { + crc2 = crc32_le(init, buf, i); + crc2 = crc32_le(crc2, buf + i, len + 4 - i); + if (crc2) + printf("\nCRC split fail: 0x%08x\n", crc2); + } + + return crc1; +} + +#define SIZE 64 +#define INIT1 0 +#define INIT2 0 + +int main(void) +{ + unsigned char buf1[SIZE + 4]; + unsigned char buf2[SIZE + 4]; + unsigned char buf3[SIZE + 4]; + int i, j; + u32 crc1, crc2, crc3; + + for (i = 0; i <= SIZE; i++) { + printf("\rTesting length %d...", i); + fflush(stdout); + random_garbage(buf1, i); + random_garbage(buf2, i); + for (j = 0; j < i; j++) + buf3[j] = buf1[j] ^ buf2[j]; + + crc1 = test_step(INIT1, buf1, i); + crc2 = test_step(INIT2, buf2, i); + /* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */ + crc3 = test_step(INIT1 ^ INIT2, buf3, i); + if (crc3 != (crc1 ^ crc2)) + printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n", + crc3, crc1, crc2); + } + printf("\nAll test complete. No failures expected.\n"); + return 0; +} + +#endif /* UNITTEST */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/lib/crc32defs.h linux.21rc5-ac2/lib/crc32defs.h --- linux.21rc5/lib/crc32defs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/lib/crc32defs.h 2003-05-28 15:20:53.000000000 +0100 @@ -0,0 +1,32 @@ +/* + * There are multiple 16-bit CRC polynomials in common use, but this is + * *the* standard CRC-32 polynomial, first popularized by Ethernet. + * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 + */ +#define CRCPOLY_LE 0xedb88320 +#define CRCPOLY_BE 0x04c11db7 + +/* How many bits at a time to use. Requires a table of 4< 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1 +# error CRC_LE_BITS must be a power of 2 between 1 and 8 +#endif + +/* + * Big-endian CRC computation. Used with serial bit streams sent + * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC. + */ +#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1 +# error CRC_BE_BITS must be a power of 2 between 1 and 8 +#endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/lib/gen_crc32table.c linux.21rc5-ac2/lib/gen_crc32table.c --- linux.21rc5/lib/gen_crc32table.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/lib/gen_crc32table.c 2003-05-20 20:21:28.000000000 +0100 @@ -0,0 +1,82 @@ +#include +#include "crc32defs.h" +#include + +#define ENTRIES_PER_LINE 4 + +#define LE_TABLE_SIZE (1 << CRC_LE_BITS) +#define BE_TABLE_SIZE (1 << CRC_BE_BITS) + +static u_int32_t crc32table_le[LE_TABLE_SIZE]; +static u_int32_t crc32table_be[BE_TABLE_SIZE]; + +/** + * crc32init_le() - allocate and initialize LE table data + * + * crc is the crc of the byte i; other entries are filled in based on the + * fact that crctable[i^j] = crctable[i] ^ crctable[j]. + * + */ +static void crc32init_le(void) +{ + unsigned i, j; + u_int32_t crc = 1; + + crc32table_le[0] = 0; + + for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { + crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); + for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) + crc32table_le[i + j] = crc ^ crc32table_le[j]; + } +} + +/** + * crc32init_be() - allocate and initialize BE table data + */ +static void crc32init_be(void) +{ + unsigned i, j; + u_int32_t crc = 0x80000000; + + crc32table_be[0] = 0; + + for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { + crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); + for (j = 0; j < i; j++) + crc32table_be[i + j] = crc ^ crc32table_be[j]; + } +} + +static void output_table(u_int32_t table[], int len, char *trans) +{ + int i; + + for (i = 0; i < len - 1; i++) { + if (i % ENTRIES_PER_LINE == 0) + printf("\n"); + printf("%s(0x%8.8xL), ", trans, table[i]); + } + printf("%s(0x%8.8xL)\n", trans, table[len - 1]); +} + +int main(int argc, char** argv) +{ + printf("/* this file is generated - do not edit */\n\n"); + + if (CRC_LE_BITS > 1) { + crc32init_le(); + printf("static const u32 crc32table_le[] = {"); + output_table(crc32table_le, LE_TABLE_SIZE, "tole"); + printf("};\n"); + } + + if (CRC_BE_BITS > 1) { + crc32init_be(); + printf("static const u32 crc32table_be[] = {"); + output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); + printf("};\n"); + } + + return 0; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/lib/Makefile linux.21rc5-ac2/lib/Makefile --- linux.21rc5/lib/Makefile 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/lib/Makefile 2003-05-28 15:20:53.000000000 +0100 @@ -1,4 +1,4 @@ -# + # # Makefile for some libs needed in the kernel. # # Note! Dependencies are done automagically by 'make dep', which also @@ -8,7 +8,8 @@ L_TARGET := lib.a -export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o rbtree.o +export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o \ + rbtree.o crc32.o obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o \ bust_spinlocks.o rbtree.o dump_stack.o @@ -20,10 +21,26 @@ obj-y += dec_and_lock.o endif +obj-$(CONFIG_CRC32) += crc32.o + subdir-$(CONFIG_ZLIB_INFLATE) += zlib_inflate subdir-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate +include $(TOPDIR)/drivers/net/Makefile.lib +include $(TOPDIR)/drivers/usb/Makefile.lib +include $(TOPDIR)/fs/Makefile.lib +include $(TOPDIR)/net/bluetooth/bnep/Makefile.lib + # Include the subdirs, if necessary. obj-y += $(join $(subdir-y),$(subdir-y:%=/%.o)) + include $(TOPDIR)/Rules.make + +crc32.o: crc32table.h + +gen_crc32table: gen_crc32table.c + $(HOSTCC) $(HOSTCCFLAGS) -o $@ $< + +crc32table.h: gen_crc32table + ./$< > $@ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/lib/vsprintf.c linux.21rc5-ac2/lib/vsprintf.c --- linux.21rc5/lib/vsprintf.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/lib/vsprintf.c 2003-05-09 18:33:12.000000000 +0100 @@ -616,8 +616,9 @@ case 'X': base = 16; break; - case 'd': case 'i': + base = 0; + case 'd': is_sign = 1; case 'u': break; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/linux-2.4.20rc2ac2-acpi-20030513.patch linux.21rc5-ac2/linux-2.4.20rc2ac2-acpi-20030513.patch --- linux.21rc5/linux-2.4.20rc2ac2-acpi-20030513.patch 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/linux-2.4.20rc2ac2-acpi-20030513.patch 2003-05-13 02:10:13.000000000 +0100 @@ -0,0 +1,2929 @@ +diff -urN linux-2.4.20/drivers/acpi/dispatcher/dsinit.c linux-2.4.20-acpi/drivers/acpi/dispatcher/dsinit.c +--- linux-2.4.20/drivers/acpi/dispatcher/dsinit.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/dispatcher/dsinit.c 2003-05-13 02:53:24.000000000 +0200 +@@ -222,8 +222,8 @@ + } + + ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT, +- "\nTable [%4.4s] - %hd Objects with %hd Devices %hd Methods %hd Regions\n", +- table_desc->pointer->signature, info.object_count, ++ "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n", ++ table_desc->pointer->signature, table_desc->table_id, info.object_count, + info.device_count, info.method_count, info.op_region_count)); + + ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH, +diff -urN linux-2.4.20/drivers/acpi/dispatcher/dsopcode.c linux-2.4.20-acpi/drivers/acpi/dispatcher/dsopcode.c +--- linux-2.4.20/drivers/acpi/dispatcher/dsopcode.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/dispatcher/dsopcode.c 2003-05-13 02:53:25.000000000 +0200 +@@ -82,7 +82,7 @@ + union acpi_parse_object *arg; + + +- ACPI_FUNCTION_TRACE ("acpi_ds_execute_arguments"); ++ ACPI_FUNCTION_TRACE ("ds_execute_arguments"); + + + /* +@@ -99,7 +99,7 @@ + + /* Create and initialize a new parser state */ + +- walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, NULL, NULL, NULL); ++ walk_state = acpi_ds_create_walk_state (0, NULL, NULL, NULL); + if (!walk_state) { + return_ACPI_STATUS (AE_NO_MEMORY); + } +@@ -139,7 +139,7 @@ + + /* Create and initialize a new parser state */ + +- walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, NULL, NULL, NULL); ++ walk_state = acpi_ds_create_walk_state (0, NULL, NULL, NULL); + if (!walk_state) { + return_ACPI_STATUS (AE_NO_MEMORY); + } +diff -urN linux-2.4.20/drivers/acpi/events/evmisc.c linux-2.4.20-acpi/drivers/acpi/events/evmisc.c +--- linux-2.4.20/drivers/acpi/events/evmisc.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/events/evmisc.c 2003-05-13 02:53:25.000000000 +0200 +@@ -156,10 +156,10 @@ + case ACPI_TYPE_POWER: + + if (notify_value <= ACPI_MAX_SYS_NOTIFY) { +- handler_obj = obj_desc->common_notify.sys_handler; ++ handler_obj = obj_desc->common_notify.system_notify; + } + else { +- handler_obj = obj_desc->common_notify.drv_handler; ++ handler_obj = obj_desc->common_notify.device_notify; + } + break; + +@@ -171,8 +171,8 @@ + + /* If there is any handler to run, schedule the dispatcher */ + +- if ((acpi_gbl_sys_notify.handler && (notify_value <= ACPI_MAX_SYS_NOTIFY)) || +- (acpi_gbl_drv_notify.handler && (notify_value > ACPI_MAX_SYS_NOTIFY)) || ++ if ((acpi_gbl_system_notify.handler && (notify_value <= ACPI_MAX_SYS_NOTIFY)) || ++ (acpi_gbl_device_notify.handler && (notify_value > ACPI_MAX_SYS_NOTIFY)) || + handler_obj) { + notify_info = acpi_ut_create_generic_state (); + if (!notify_info) { +@@ -235,17 +235,17 @@ + if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) { + /* Global system notification handler */ + +- if (acpi_gbl_sys_notify.handler) { +- global_handler = acpi_gbl_sys_notify.handler; +- global_context = acpi_gbl_sys_notify.context; ++ if (acpi_gbl_system_notify.handler) { ++ global_handler = acpi_gbl_system_notify.handler; ++ global_context = acpi_gbl_system_notify.context; + } + } + else { + /* Global driver notification handler */ + +- if (acpi_gbl_drv_notify.handler) { +- global_handler = acpi_gbl_drv_notify.handler; +- global_context = acpi_gbl_drv_notify.context; ++ if (acpi_gbl_device_notify.handler) { ++ global_handler = acpi_gbl_device_notify.handler; ++ global_context = acpi_gbl_device_notify.context; + } + } + +@@ -259,8 +259,8 @@ + + handler_obj = notify_info->notify.handler_obj; + if (handler_obj) { +- handler_obj->notify_handler.handler (notify_info->notify.node, notify_info->notify.value, +- handler_obj->notify_handler.context); ++ handler_obj->notify.handler (notify_info->notify.node, notify_info->notify.value, ++ handler_obj->notify.context); + } + + /* All done with the info object */ +diff -urN linux-2.4.20/drivers/acpi/events/evregion.c linux-2.4.20-acpi/drivers/acpi/events/evregion.c +--- linux-2.4.20/drivers/acpi/events/evregion.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/events/evregion.c 2003-05-13 02:53:25.000000000 +0200 +@@ -161,10 +161,10 @@ + } + + /* +- * _REG method has two arguments +- * Arg0: Integer: Operation region space ID ++ * _REG method has two arguments ++ * Arg0: Integer: Operation region space ID + * Same value as region_obj->Region.space_id +- * Arg1: Integer: connection status ++ * Arg1: Integer: connection status + * 1 for connecting the handler, + * 0 for disconnecting the handler + * Passed as a parameter +@@ -180,16 +180,14 @@ + goto cleanup; + } + +- /* +- * Set up the parameter objects +- */ ++ /* Set up the parameter objects */ ++ + params[0]->integer.value = region_obj->region.space_id; + params[1]->integer.value = function; + params[2] = NULL; + +- /* +- * Execute the method, no return value +- */ ++ /* Execute the method, no return value */ ++ + ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, region_obj2->extra.method_REG, NULL)); + status = acpi_ns_evaluate_by_handle (region_obj2->extra.method_REG, params, NULL); + +@@ -245,10 +243,9 @@ + return_ACPI_STATUS (AE_NOT_EXIST); + } + +- /* +- * Ensure that there is a handler associated with this region +- */ +- handler_desc = region_obj->region.addr_handler; ++ /* Ensure that there is a handler associated with this region */ ++ ++ handler_desc = region_obj->region.address_space; + if (!handler_desc) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "no handler for region(%p) [%s]\n", + region_obj, acpi_ut_get_region_name (region_obj->region.space_id))); +@@ -264,24 +261,23 @@ + /* + * This region has not been initialized yet, do it + */ +- region_setup = handler_desc->addr_handler.setup; ++ region_setup = handler_desc->address_space.setup; + if (!region_setup) { +- /* +- * Bad news, no init routine and not init'd +- */ ++ /* No initialization routine, exit with error */ ++ + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No init routine for region(%p) [%s]\n", + region_obj, acpi_ut_get_region_name (region_obj->region.space_id))); +- return_ACPI_STATUS (AE_UNKNOWN_STATUS); ++ return_ACPI_STATUS (AE_NOT_EXIST); + } + + /* + * We must exit the interpreter because the region setup will potentially +- * execute control methods ++ * execute control methods (e.g., _REG method for this region) + */ + acpi_ex_exit_interpreter (); + + status = region_setup (region_obj, ACPI_REGION_ACTIVATE, +- handler_desc->addr_handler.context, ®ion_context); ++ handler_desc->address_space.context, ®ion_context); + + /* Re-enter the interpreter */ + +@@ -290,9 +286,8 @@ + return_ACPI_STATUS (status2); + } + +- /* +- * Init routine may fail +- */ ++ /* Check for failure of the Region Setup */ ++ + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region Init: %s [%s]\n", + acpi_format_exception (status), +@@ -300,40 +295,50 @@ + return_ACPI_STATUS (status); + } + +- region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; +- + /* +- * Save the returned context for use in all accesses to +- * this particular region. ++ * Region initialization may have been completed by region_setup + */ +- region_obj2->extra.region_context = region_context; ++ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { ++ region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; ++ ++ if (region_obj2->extra.region_context) { ++ /* The handler for this region was already installed */ ++ ++ ACPI_MEM_FREE (region_context); ++ } ++ else { ++ /* ++ * Save the returned context for use in all accesses to ++ * this particular region ++ */ ++ region_obj2->extra.region_context = region_context; ++ } ++ } + } + +- /* +- * We have everything we need, begin the process +- */ +- handler = handler_desc->addr_handler.handler; ++ /* We have everything we need, we can invoke the address space handler */ ++ ++ handler = handler_desc->address_space.handler; + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", +- ®ion_obj->region.addr_handler->addr_handler, handler, ++ ®ion_obj->region.address_space->address_space, handler, + ACPI_HIDWORD (address), ACPI_LODWORD (address), + acpi_ut_get_region_name (region_obj->region.space_id))); + +- if (!(handler_desc->addr_handler.flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { ++ if (!(handler_desc->address_space.flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { + /* +- * For handlers other than the default (supplied) handlers, we must +- * exit the interpreter because the handler *might* block -- we don't +- * know what it will do, so we can't hold the lock on the intepreter. ++ * For handlers other than the default (supplied) handlers, we must ++ * exit the interpreter because the handler *might* block -- we don't ++ * know what it will do, so we can't hold the lock on the intepreter. + */ + acpi_ex_exit_interpreter(); + } + +- /* +- * Invoke the handler. +- */ ++ /* Call the handler */ ++ + status = handler (function, address, bit_width, value, +- handler_desc->addr_handler.context, ++ handler_desc->address_space.context, + region_obj2->extra.region_context); + + if (ACPI_FAILURE (status)) { +@@ -342,7 +347,7 @@ + acpi_format_exception (status))); + } + +- if (!(handler_desc->addr_handler.flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { ++ if (!(handler_desc->address_space.flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { + /* + * We just returned from a non-default handler, we must re-enter the + * interpreter +@@ -393,35 +398,30 @@ + } + region_context = region_obj2->extra.region_context; + +- /* +- * Get the address handler from the region object +- */ +- handler_obj = region_obj->region.addr_handler; ++ /* Get the address handler from the region object */ ++ ++ handler_obj = region_obj->region.address_space; + if (!handler_obj) { +- /* +- * This region has no handler, all done +- */ ++ /* This region has no handler, all done */ ++ + return_VOID; + } + ++ /* Find this region in the handler's list */ + +- /* +- * Find this region in the handler's list +- */ +- obj_desc = handler_obj->addr_handler.region_list; +- last_obj_ptr = &handler_obj->addr_handler.region_list; ++ obj_desc = handler_obj->address_space.region_list; ++ last_obj_ptr = &handler_obj->address_space.region_list; + + while (obj_desc) { +- /* +- * See if this is the one +- */ ++ /* Is this the correct Region? */ ++ + if (obj_desc == region_obj) { + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Removing Region %p from address handler %p\n", + region_obj, handler_obj)); +- /* +- * This is it, remove it from the handler's list +- */ ++ ++ /* This is it, remove it from the handler's list */ ++ + *last_obj_ptr = obj_desc->region.next; + obj_desc->region.next = NULL; /* Must clear field */ + +@@ -432,9 +432,8 @@ + } + } + +- /* +- * Now stop region accesses by executing the _REG method +- */ ++ /* Now stop region accesses by executing the _REG method */ ++ + status = acpi_ev_execute_reg_method (region_obj, 0); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s from region _REG, [%s]\n", +@@ -449,16 +448,14 @@ + } + } + +- /* +- * Call the setup handler with the deactivate notification +- */ +- region_setup = handler_obj->addr_handler.setup; ++ /* Call the setup handler with the deactivate notification */ ++ ++ region_setup = handler_obj->address_space.setup; + status = region_setup (region_obj, ACPI_REGION_DEACTIVATE, +- handler_obj->addr_handler.context, ®ion_context); ++ handler_obj->address_space.context, ®ion_context); ++ ++ /* Init routine may fail, Just ignore errors */ + +- /* +- * Init routine may fail, Just ignore errors +- */ + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s from region init, [%s]\n", + acpi_format_exception (status), +@@ -468,31 +465,29 @@ + region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); + + /* +- * Remove handler reference in the region ++ * Remove handler reference in the region + * +- * NOTE: this doesn't mean that the region goes away +- * The region is just inaccessible as indicated to +- * the _REG method ++ * NOTE: this doesn't mean that the region goes away ++ * The region is just inaccessible as indicated to ++ * the _REG method + * +- * If the region is on the handler's list +- * this better be the region's handler ++ * If the region is on the handler's list ++ * this better be the region's handler + */ +- region_obj->region.addr_handler = NULL; ++ region_obj->region.address_space = NULL; ++ acpi_ut_remove_reference (handler_obj); + + return_VOID; ++ } + +- } /* found the right handler */ ++ /* Walk the linked list of handlers */ + +- /* +- * Move through the linked list of handlers +- */ + last_obj_ptr = &obj_desc->region.next; + obj_desc = obj_desc->region.next; + } + +- /* +- * If we get here, the region was not in the handler's region list +- */ ++ /* If we get here, the region was not in the handler's region list */ ++ + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Cannot remove region %p from address handler %p\n", + region_obj, handler_obj)); +@@ -534,16 +529,19 @@ + region_obj, handler_obj, acpi_ut_get_region_name (region_obj->region.space_id))); + + +- /* +- * Link this region to the front of the handler's list +- */ +- region_obj->region.next = handler_obj->addr_handler.region_list; +- handler_obj->addr_handler.region_list = region_obj; ++ /* Link this region to the front of the handler's list */ + +- /* +- * Set the region's handler +- */ +- region_obj->region.addr_handler = handler_obj; ++ region_obj->region.next = handler_obj->address_space.region_list; ++ handler_obj->address_space.region_list = region_obj; ++ ++ /* Install the region's handler */ ++ ++ if (region_obj->region.address_space) { ++ return_ACPI_STATUS (AE_ALREADY_EXISTS); ++ } ++ ++ region_obj->region.address_space = handler_obj; ++ acpi_ut_add_reference (handler_obj); + + /* + * Tell all users that this region is usable by running the _REG +@@ -571,14 +569,14 @@ + + /******************************************************************************* + * +- * FUNCTION: acpi_ev_addr_handler_helper ++ * FUNCTION: acpi_ev_install_handler + * + * PARAMETERS: Handle - Node to be dumped + * Level - Nesting level of the handle + * Context - Passed into acpi_ns_walk_namespace + * + * DESCRIPTION: This routine installs an address handler into objects that are +- * of type Region. ++ * of type Region or Device. + * + * If the Object is a Device, and the device has a handler of + * the same type then the search is terminated in that branch. +@@ -589,20 +587,20 @@ + ******************************************************************************/ + + acpi_status +-acpi_ev_addr_handler_helper ( ++acpi_ev_install_handler ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value) + { + union acpi_operand_object *handler_obj; +- union acpi_operand_object *tmp_obj; ++ union acpi_operand_object *next_handler_obj; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + acpi_status status; + + +- ACPI_FUNCTION_NAME ("ev_addr_handler_helper"); ++ ACPI_FUNCTION_NAME ("ev_install_handler"); + + + handler_obj = (union acpi_operand_object *) context; +@@ -621,8 +619,8 @@ + } + + /* +- * We only care about regions.and objects +- * that can have address handlers ++ * We only care about regions.and objects ++ * that are allowed to have address space handlers + */ + if ((node->type != ACPI_TYPE_DEVICE) && + (node->type != ACPI_TYPE_REGION) && +@@ -634,81 +632,70 @@ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { +- /* +- * The object DNE, we don't care about it +- */ ++ /* No object, just exit */ ++ + return (AE_OK); + } + +- /* +- * Devices are handled different than regions +- */ ++ /* Devices are handled different than regions */ ++ + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_DEVICE) { +- /* +- * See if this guy has any handlers +- */ +- tmp_obj = obj_desc->device.addr_handler; +- while (tmp_obj) { +- /* +- * Now let's see if it's for the same address space. +- */ +- if (tmp_obj->addr_handler.space_id == handler_obj->addr_handler.space_id) { +- /* +- * It's for the same address space +- */ ++ /* Check if this Device already has a handler for this address space */ ++ ++ next_handler_obj = obj_desc->device.address_space; ++ while (next_handler_obj) { ++ /* Found a handler, is it for the same address space? */ ++ ++ if (next_handler_obj->address_space.space_id == handler_obj->address_space.space_id) { + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Found handler for region [%s] in device %p(%p) handler %p\n", +- acpi_ut_get_region_name (handler_obj->addr_handler.space_id), +- obj_desc, tmp_obj, handler_obj)); ++ acpi_ut_get_region_name (handler_obj->address_space.space_id), ++ obj_desc, next_handler_obj, handler_obj)); + + /* +- * Since the object we found it on was a device, then it +- * means that someone has already installed a handler for +- * the branch of the namespace from this device on. Just +- * bail out telling the walk routine to not traverse this +- * branch. This preserves the scoping rule for handlers. ++ * Since the object we found it on was a device, then it ++ * means that someone has already installed a handler for ++ * the branch of the namespace from this device on. Just ++ * bail out telling the walk routine to not traverse this ++ * branch. This preserves the scoping rule for handlers. + */ + return (AE_CTRL_DEPTH); + } + +- /* +- * Move through the linked list of handlers +- */ +- tmp_obj = tmp_obj->addr_handler.next; ++ /* Walk the linked list of handlers attached to this device */ ++ ++ next_handler_obj = next_handler_obj->address_space.next; + } + + /* +- * As long as the device didn't have a handler for this +- * space we don't care about it. We just ignore it and +- * proceed. ++ * As long as the device didn't have a handler for this ++ * space we don't care about it. We just ignore it and ++ * proceed. + */ + return (AE_OK); + } + +- /* +- * Only here if it was a region +- */ +- if (obj_desc->region.space_id != handler_obj->addr_handler.space_id) { ++ /* Object is a Region */ ++ ++ if (obj_desc->region.space_id != handler_obj->address_space.space_id) { + /* +- * This region is for a different address space +- * ignore it ++ * This region is for a different address space ++ * -- just ignore it + */ + return (AE_OK); + } + + /* +- * Now we have a region and it is for the handler's address +- * space type. ++ * Now we have a region and it is for the handler's address ++ * space type. + * +- * First disconnect region for any previous handler (if any) ++ * First disconnect region for any previous handler (if any) + */ + acpi_ev_detach_region (obj_desc, FALSE); + +- /* +- * Then connect the region to the new handler +- */ +- status = acpi_ev_attach_region (handler_obj, obj_desc, FALSE); ++ /* Connect the region to the new handler */ + ++ status = acpi_ev_attach_region (handler_obj, obj_desc, FALSE); + return (status); + } + +diff -urN linux-2.4.20/drivers/acpi/events/evrgnini.c linux-2.4.20-acpi/drivers/acpi/events/evrgnini.c +--- linux-2.4.20/drivers/acpi/events/evrgnini.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/events/evrgnini.c 2003-05-13 02:53:25.000000000 +0200 +@@ -165,10 +165,11 @@ + void **region_context) + { + acpi_status status = AE_OK; +- acpi_integer temp; ++ acpi_integer pci_value; + struct acpi_pci_id *pci_id = *region_context; + union acpi_operand_object *handler_obj; +- struct acpi_namespace_node *node; ++ struct acpi_namespace_node *parent_node; ++ struct acpi_namespace_node *pci_root_node; + union acpi_operand_object *region_obj = (union acpi_operand_object *) handle; + struct acpi_device_id object_hID; + +@@ -176,7 +177,7 @@ + ACPI_FUNCTION_TRACE ("ev_pci_config_region_setup"); + + +- handler_obj = region_obj->region.addr_handler; ++ handler_obj = region_obj->region.address_space; + if (!handler_obj) { + /* + * No installed handler. This shouldn't happen because the dispatch +@@ -187,45 +188,15 @@ + return_ACPI_STATUS (AE_NOT_EXIST); + } + ++ *region_context = NULL; + if (function == ACPI_REGION_DEACTIVATE) { + if (pci_id) { + ACPI_MEM_FREE (pci_id); +- *region_context = NULL; + } +- + return_ACPI_STATUS (status); + } + +- /* Create a new context */ +- +- pci_id = ACPI_MEM_CALLOCATE (sizeof (struct acpi_pci_id)); +- if (!pci_id) { +- return_ACPI_STATUS (AE_NO_MEMORY); +- } +- +- /* +- * For PCI Config space access, we have to pass the segment, bus, +- * device and function numbers. This routine must acquire those. +- */ +- +- /* +- * First get device and function numbers from the _ADR object +- * in the parent's scope. +- */ +- node = acpi_ns_get_parent_node (region_obj->region.node); +- +- /* Evaluate the _ADR object */ +- +- status = acpi_ut_evaluate_numeric_object (METHOD_NAME__ADR, node, &temp); +- +- /* +- * The default is zero, and since the allocation above zeroed +- * the data, just do nothing on failure. +- */ +- if (ACPI_SUCCESS (status)) { +- pci_id->device = ACPI_HIWORD (ACPI_LODWORD (temp)); +- pci_id->function = ACPI_LOWORD (ACPI_LODWORD (temp)); +- } ++ parent_node = acpi_ns_get_parent_node (region_obj->region.node); + + /* + * Get the _SEG and _BBN values from the device upon which the handler +@@ -236,16 +207,16 @@ + */ + + /* +- * If the addr_handler.Node is still pointing to the root, we need ++ * If the address_space.Node is still pointing to the root, we need + * to scan upward for a PCI Root bridge and re-associate the op_region + * handlers with that device. + */ +- if (handler_obj->addr_handler.node == acpi_gbl_root_node) { +- /* +- * Node is currently the parent object +- */ +- while (node != acpi_gbl_root_node) { +- status = acpi_ut_execute_HID (node, &object_hID); ++ if (handler_obj->address_space.node == acpi_gbl_root_node) { ++ /* Start search from the parent object */ ++ ++ pci_root_node = parent_node; ++ while (pci_root_node != acpi_gbl_root_node) { ++ status = acpi_ut_execute_HID (pci_root_node, &object_hID); + if (ACPI_SUCCESS (status)) { + /* Got a valid _HID, check if this is a PCI root */ + +@@ -253,44 +224,89 @@ + sizeof (PCI_ROOT_HID_STRING)))) { + /* Install a handler for this PCI root bridge */ + +- status = acpi_install_address_space_handler ((acpi_handle) node, ++ status = acpi_install_address_space_handler ((acpi_handle) pci_root_node, + ACPI_ADR_SPACE_PCI_CONFIG, + ACPI_DEFAULT_HANDLER, NULL, NULL); + if (ACPI_FAILURE (status)) { +- ACPI_REPORT_ERROR (("Could not install pci_config handler for %4.4s, %s\n", +- node->name.ascii, acpi_format_exception (status))); ++ if (status == AE_SAME_HANDLER) { ++ /* ++ * It is OK if the handler is already installed on the root ++ * bridge. Still need to return a context object for the ++ * new PCI_Config operation region, however. ++ */ ++ status = AE_OK; ++ } ++ else { ++ ACPI_REPORT_ERROR (( ++ "Could not install pci_config handler for Root Bridge %4.4s, %s\n", ++ pci_root_node->name.ascii, acpi_format_exception (status))); ++ } + } + break; + } + } + +- node = acpi_ns_get_parent_node (node); ++ pci_root_node = acpi_ns_get_parent_node (pci_root_node); + } ++ ++ /* PCI root bridge not found, use namespace root node */ + } + else { +- node = handler_obj->addr_handler.node; ++ pci_root_node = handler_obj->address_space.node; + } + + /* +- * The PCI segment number comes from the _SEG method ++ * If this region is now initialized, we are done. ++ * (install_address_space_handler could have initialized it) + */ +- status = acpi_ut_evaluate_numeric_object (METHOD_NAME__SEG, node, &temp); +- if (ACPI_SUCCESS (status)) { +- pci_id->segment = ACPI_LOWORD (temp); ++ if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { ++ return_ACPI_STATUS (AE_OK); ++ } ++ ++ /* Region is still not initialized. Create a new context */ ++ ++ pci_id = ACPI_MEM_CALLOCATE (sizeof (struct acpi_pci_id)); ++ if (!pci_id) { ++ return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* +- * The PCI bus number comes from the _BBN method ++ * For PCI_Config space access, we need the segment, bus, ++ * device and function numbers. Acquire them here. + */ +- status = acpi_ut_evaluate_numeric_object (METHOD_NAME__BBN, node, &temp); +- if (ACPI_SUCCESS (status)) { +- pci_id->bus = ACPI_LOWORD (temp); +- } + + /* +- * Complete this device's pci_id ++ * Get the PCI device and function numbers from the _ADR object ++ * contained in the parent's scope. ++ */ ++ status = acpi_ut_evaluate_numeric_object (METHOD_NAME__ADR, parent_node, &pci_value); ++ ++ /* ++ * The default is zero, and since the allocation above zeroed ++ * the data, just do nothing on failure. + */ +- acpi_os_derive_pci_id (node, region_obj->region.node, &pci_id); ++ if (ACPI_SUCCESS (status)) { ++ pci_id->device = ACPI_HIWORD (ACPI_LODWORD (pci_value)); ++ pci_id->function = ACPI_LOWORD (ACPI_LODWORD (pci_value)); ++ } ++ ++ /* The PCI segment number comes from the _SEG method */ ++ ++ status = acpi_ut_evaluate_numeric_object (METHOD_NAME__SEG, pci_root_node, &pci_value); ++ if (ACPI_SUCCESS (status)) { ++ pci_id->segment = ACPI_LOWORD (pci_value); ++ } ++ ++ /* The PCI bus number comes from the _BBN method */ ++ ++ status = acpi_ut_evaluate_numeric_object (METHOD_NAME__BBN, pci_root_node, &pci_value); ++ if (ACPI_SUCCESS (status)) { ++ pci_id->bus = ACPI_LOWORD (pci_value); ++ } ++ ++ /* Complete this device's pci_id */ ++ ++ acpi_os_derive_pci_id (pci_root_node, region_obj->region.node, &pci_id); + + *region_context = pci_id; + return_ACPI_STATUS (AE_OK); +@@ -451,14 +467,15 @@ + node = acpi_ns_get_parent_node (region_obj->region.node); + space_id = region_obj->region.space_id; + +- region_obj->region.addr_handler = NULL; ++ /* Setup defaults */ ++ ++ region_obj->region.address_space = NULL; + region_obj2->extra.method_REG = NULL; + region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE); + region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED; + +- /* +- * Find any "_REG" method associated with this region definition +- */ ++ /* Find any "_REG" method associated with this region definition */ ++ + status = acpi_ns_search_node (*reg_name_ptr, node, + ACPI_TYPE_METHOD, &method_node); + if (ACPI_SUCCESS (status)) { +@@ -475,29 +492,27 @@ + * ie: acpi_gbl_root_node->parent_entry being set to NULL + */ + while (node) { +- /* +- * Check to see if a handler exists +- */ ++ /* Check to see if a handler exists */ ++ + handler_obj = NULL; + obj_desc = acpi_ns_get_attached_object (node); + if (obj_desc) { +- /* +- * Can only be a handler if the object exists +- */ ++ /* Can only be a handler if the object exists */ ++ + switch (node->type) { + case ACPI_TYPE_DEVICE: + +- handler_obj = obj_desc->device.addr_handler; ++ handler_obj = obj_desc->device.address_space; + break; + + case ACPI_TYPE_PROCESSOR: + +- handler_obj = obj_desc->processor.addr_handler; ++ handler_obj = obj_desc->processor.address_space; + break; + + case ACPI_TYPE_THERMAL: + +- handler_obj = obj_desc->thermal_zone.addr_handler; ++ handler_obj = obj_desc->thermal_zone.address_space; + break; + + default: +@@ -508,7 +523,7 @@ + while (handler_obj) { + /* Is this handler of the correct type? */ + +- if (handler_obj->addr_handler.space_id == space_id) { ++ if (handler_obj->address_space.space_id == space_id) { + /* Found correct handler */ + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, +@@ -523,7 +538,7 @@ + + /* Try next handler in the list */ + +- handler_obj = handler_obj->addr_handler.next; ++ handler_obj = handler_obj->address_space.next; + } + } + +@@ -534,9 +549,8 @@ + node = acpi_ns_get_parent_node (node); + } + +- /* +- * If we get here, there is no handler for this region +- */ ++ /* If we get here, there is no handler for this region */ ++ + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "No handler for region_type %s(%X) (region_obj %p)\n", + acpi_ut_get_region_name (space_id), space_id, region_obj)); +diff -urN linux-2.4.20/drivers/acpi/events/evxface.c linux-2.4.20-acpi/drivers/acpi/events/evxface.c +--- linux-2.4.20/drivers/acpi/events/evxface.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/events/evxface.c 2003-05-13 02:53:25.000000000 +0200 +@@ -244,22 +244,22 @@ + /* Make sure the handler is not already installed */ + + if (((handler_type == ACPI_SYSTEM_NOTIFY) && +- acpi_gbl_sys_notify.handler) || ++ acpi_gbl_system_notify.handler) || + ((handler_type == ACPI_DEVICE_NOTIFY) && +- acpi_gbl_drv_notify.handler)) { ++ acpi_gbl_device_notify.handler)) { + status = AE_ALREADY_EXISTS; + goto unlock_and_exit; + } + + if (handler_type == ACPI_SYSTEM_NOTIFY) { +- acpi_gbl_sys_notify.node = node; +- acpi_gbl_sys_notify.handler = handler; +- acpi_gbl_sys_notify.context = context; ++ acpi_gbl_system_notify.node = node; ++ acpi_gbl_system_notify.handler = handler; ++ acpi_gbl_system_notify.context = context; + } + else /* ACPI_DEVICE_NOTIFY */ { +- acpi_gbl_drv_notify.node = node; +- acpi_gbl_drv_notify.handler = handler; +- acpi_gbl_drv_notify.context = context; ++ acpi_gbl_device_notify.node = node; ++ acpi_gbl_device_notify.handler = handler; ++ acpi_gbl_device_notify.context = context; + } + + /* Global notify handler installed */ +@@ -282,13 +282,12 @@ + + obj_desc = acpi_ns_get_attached_object (node); + if (obj_desc) { +- + /* Object exists - make sure there's no handler */ + + if (((handler_type == ACPI_SYSTEM_NOTIFY) && +- obj_desc->common_notify.sys_handler) || ++ obj_desc->common_notify.system_notify) || + ((handler_type == ACPI_DEVICE_NOTIFY) && +- obj_desc->common_notify.drv_handler)) { ++ obj_desc->common_notify.device_notify)) { + status = AE_ALREADY_EXISTS; + goto unlock_and_exit; + } +@@ -305,6 +304,11 @@ + /* Attach new object to the Node */ + + status = acpi_ns_attach_object (device, obj_desc, node->type); ++ ++ /* Remove local reference to the object */ ++ ++ acpi_ut_remove_reference (obj_desc); ++ + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } +@@ -318,15 +322,15 @@ + goto unlock_and_exit; + } + +- notify_obj->notify_handler.node = node; +- notify_obj->notify_handler.handler = handler; +- notify_obj->notify_handler.context = context; ++ notify_obj->notify.node = node; ++ notify_obj->notify.handler = handler; ++ notify_obj->notify.context = context; + + if (handler_type == ACPI_SYSTEM_NOTIFY) { +- obj_desc->common_notify.sys_handler = notify_obj; ++ obj_desc->common_notify.system_notify = notify_obj; + } + else /* ACPI_DEVICE_NOTIFY */ { +- obj_desc->common_notify.drv_handler = notify_obj; ++ obj_desc->common_notify.device_notify = notify_obj; + } + } + +@@ -395,22 +399,22 @@ + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing notify handler for ROOT object.\n")); + + if (((handler_type == ACPI_SYSTEM_NOTIFY) && +- !acpi_gbl_sys_notify.handler) || ++ !acpi_gbl_system_notify.handler) || + ((handler_type == ACPI_DEVICE_NOTIFY) && +- !acpi_gbl_drv_notify.handler)) { ++ !acpi_gbl_device_notify.handler)) { + status = AE_NOT_EXIST; + goto unlock_and_exit; + } + + if (handler_type == ACPI_SYSTEM_NOTIFY) { +- acpi_gbl_sys_notify.node = NULL; +- acpi_gbl_sys_notify.handler = NULL; +- acpi_gbl_sys_notify.context = NULL; ++ acpi_gbl_system_notify.node = NULL; ++ acpi_gbl_system_notify.handler = NULL; ++ acpi_gbl_system_notify.context = NULL; + } + else { +- acpi_gbl_drv_notify.node = NULL; +- acpi_gbl_drv_notify.handler = NULL; +- acpi_gbl_drv_notify.context = NULL; ++ acpi_gbl_device_notify.node = NULL; ++ acpi_gbl_device_notify.handler = NULL; ++ acpi_gbl_device_notify.context = NULL; + } + } + +@@ -436,14 +440,14 @@ + /* Object exists - make sure there's an existing handler */ + + if (handler_type == ACPI_SYSTEM_NOTIFY) { +- notify_obj = obj_desc->common_notify.sys_handler; ++ notify_obj = obj_desc->common_notify.system_notify; + } + else { +- notify_obj = obj_desc->common_notify.drv_handler; ++ notify_obj = obj_desc->common_notify.device_notify; + } + + if ((!notify_obj) || +- (notify_obj->notify_handler.handler != handler)) { ++ (notify_obj->notify.handler != handler)) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } +@@ -451,10 +455,10 @@ + /* Remove the handler */ + + if (handler_type == ACPI_SYSTEM_NOTIFY) { +- obj_desc->common_notify.sys_handler = NULL; ++ obj_desc->common_notify.system_notify = NULL; + } + else { +- obj_desc->common_notify.drv_handler = NULL; ++ obj_desc->common_notify.device_notify = NULL; + } + + acpi_ut_remove_reference (notify_obj); +diff -urN linux-2.4.20/drivers/acpi/events/evxfevnt.c linux-2.4.20-acpi/drivers/acpi/events/evxfevnt.c +--- linux-2.4.20/drivers/acpi/events/evxfevnt.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/events/evxfevnt.c 2003-05-13 02:53:25.000000000 +0200 +@@ -654,7 +654,11 @@ + } + + status = acpi_ns_attach_object (node, obj_desc, ACPI_TYPE_DEVICE); ++ ++ /* Remove local reference to the object */ ++ + acpi_ut_remove_reference (obj_desc); ++ + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } +diff -urN linux-2.4.20/drivers/acpi/events/evxfregn.c linux-2.4.20-acpi/drivers/acpi/events/evxfregn.c +--- linux-2.4.20/drivers/acpi/events/evxfregn.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/events/evxfregn.c 2003-05-13 02:53:25.000000000 +0200 +@@ -154,7 +154,7 @@ + break; + + default: +- status = AE_NOT_EXIST; ++ status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + } +@@ -170,26 +170,36 @@ + obj_desc = acpi_ns_get_attached_object (node); + if (obj_desc) { + /* +- * The object exists. ++ * The attached device object already exists. + * Make sure the handler is not already installed. + */ ++ handler_obj = obj_desc->device.address_space; + +- /* check the address handler the user requested */ ++ /* Walk the handler list for this device */ + +- handler_obj = obj_desc->device.addr_handler; + while (handler_obj) { +- /* +- * Found an Address handler, see if user requested this +- * address space. +- */ +- if(handler_obj->addr_handler.space_id == space_id) { +- status = AE_ALREADY_EXISTS; ++ /* Same space_id indicates a handler already installed */ ++ ++ if(handler_obj->address_space.space_id == space_id) { ++ if (handler_obj->address_space.handler == handler) { ++ /* ++ * It is (relatively) OK to attempt to install the SAME ++ * handler twice. This can easily happen with PCI_Config space. ++ */ ++ status = AE_SAME_HANDLER; ++ goto unlock_and_exit; ++ } ++ else { ++ /* A handler is already installed */ ++ ++ status = AE_ALREADY_EXISTS; ++ } + goto unlock_and_exit; + } + + /* Walk the linked list of handlers */ + +- handler_obj = handler_obj->addr_handler.next; ++ handler_obj = handler_obj->address_space.next; + } + } + else { +@@ -218,8 +228,12 @@ + /* Attach the new object to the Node */ + + status = acpi_ns_attach_object (node, obj_desc, type); ++ ++ /* Remove local reference to the object */ ++ ++ acpi_ut_remove_reference (obj_desc); ++ + if (ACPI_FAILURE (status)) { +- acpi_ut_remove_reference (obj_desc); + goto unlock_and_exit; + } + } +@@ -241,14 +255,25 @@ + goto unlock_and_exit; + } + +- handler_obj->addr_handler.space_id = (u8) space_id; +- handler_obj->addr_handler.hflags = flags; +- handler_obj->addr_handler.next = obj_desc->device.addr_handler; +- handler_obj->addr_handler.region_list = NULL; +- handler_obj->addr_handler.node = node; +- handler_obj->addr_handler.handler = handler; +- handler_obj->addr_handler.context = context; +- handler_obj->addr_handler.setup = setup; ++ /* Init handler obj */ ++ ++ handler_obj->address_space.space_id = (u8) space_id; ++ handler_obj->address_space.hflags = flags; ++ handler_obj->address_space.region_list = NULL; ++ handler_obj->address_space.node = node; ++ handler_obj->address_space.handler = handler; ++ handler_obj->address_space.context = context; ++ handler_obj->address_space.setup = setup; ++ ++ /* Install at head of Device.address_space list */ ++ ++ handler_obj->address_space.next = obj_desc->device.address_space; ++ ++ /* ++ * The Device object is the first reference on the handler_obj. ++ * Each region that uses the handler adds a reference. ++ */ ++ obj_desc->device.address_space = handler_obj; + + /* + * Walk the namespace finding all of the regions this +@@ -262,18 +287,9 @@ + * In either case, back up and search down the remainder + * of the branch + */ +- status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, device, +- ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, +- acpi_ev_addr_handler_helper, +- handler_obj, NULL); +- +- /* Place this handler 1st on the list */ +- +- handler_obj->common.reference_count = +- (u16) (handler_obj->common.reference_count + +- obj_desc->common.reference_count - 1); +- obj_desc->device.addr_handler = handler_obj; +- ++ status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, device, ACPI_UINT32_MAX, ++ ACPI_NS_WALK_UNLOCK, acpi_ev_install_handler, ++ handler_obj, NULL); + + unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); +@@ -341,12 +357,12 @@ + + /* Find the address handler the user requested */ + +- handler_obj = obj_desc->device.addr_handler; +- last_obj_ptr = &obj_desc->device.addr_handler; ++ handler_obj = obj_desc->device.address_space; ++ last_obj_ptr = &obj_desc->device.address_space; + while (handler_obj) { + /* We have a handler, see if user requested this one */ + +- if (handler_obj->addr_handler.space_id == space_id) { ++ if (handler_obj->address_space.space_id == space_id) { + /* Matched space_id, first dereference this in the Regions */ + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, +@@ -354,7 +370,7 @@ + handler_obj, handler, acpi_ut_get_region_name (space_id), + node, obj_desc)); + +- region_obj = handler_obj->addr_handler.region_list; ++ region_obj = handler_obj->address_space.region_list; + + /* Walk the handler's region list */ + +@@ -372,13 +388,13 @@ + * Walk the list: Just grab the head because the + * detach_region removed the previous head. + */ +- region_obj = handler_obj->addr_handler.region_list; ++ region_obj = handler_obj->address_space.region_list; + + } + + /* Remove this Handler object from the list */ + +- *last_obj_ptr = handler_obj->addr_handler.next; ++ *last_obj_ptr = handler_obj->address_space.next; + + /* Now we can delete the handler object */ + +@@ -388,8 +404,8 @@ + + /* Walk the linked list of handlers */ + +- last_obj_ptr = &handler_obj->addr_handler.next; +- handler_obj = handler_obj->addr_handler.next; ++ last_obj_ptr = &handler_obj->address_space.next; ++ handler_obj = handler_obj->address_space.next; + } + + /* The handler does not exist */ +diff -urN linux-2.4.20/drivers/acpi/executer/exconfig.c linux-2.4.20-acpi/drivers/acpi/executer/exconfig.c +--- linux-2.4.20/drivers/acpi/executer/exconfig.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/executer/exconfig.c 2003-05-13 02:53:25.000000000 +0200 +@@ -360,11 +360,11 @@ + /* The table must be either an SSDT or a PSDT */ + + if ((!ACPI_STRNCMP (table_ptr->signature, +- acpi_gbl_acpi_table_data[ACPI_TABLE_PSDT].signature, +- acpi_gbl_acpi_table_data[ACPI_TABLE_PSDT].sig_length)) && ++ acpi_gbl_table_data[ACPI_TABLE_PSDT].signature, ++ acpi_gbl_table_data[ACPI_TABLE_PSDT].sig_length)) && + (!ACPI_STRNCMP (table_ptr->signature, +- acpi_gbl_acpi_table_data[ACPI_TABLE_SSDT].signature, +- acpi_gbl_acpi_table_data[ACPI_TABLE_SSDT].sig_length))) { ++ acpi_gbl_table_data[ACPI_TABLE_SSDT].signature, ++ acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Table has invalid signature [%4.4s], must be SSDT or PSDT\n", + table_ptr->signature)); +diff -urN linux-2.4.20/drivers/acpi/executer/exdump.c linux-2.4.20-acpi/drivers/acpi/executer/exdump.c +--- linux-2.4.20/drivers/acpi/executer/exdump.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/executer/exdump.c 2003-05-13 02:53:25.000000000 +0200 +@@ -635,9 +635,9 @@ + + case ACPI_TYPE_DEVICE: + +- acpi_ex_out_pointer ("addr_handler", obj_desc->device.addr_handler); +- acpi_ex_out_pointer ("sys_handler", obj_desc->device.sys_handler); +- acpi_ex_out_pointer ("drv_handler", obj_desc->device.drv_handler); ++ acpi_ex_out_pointer ("address_space", obj_desc->device.address_space); ++ acpi_ex_out_pointer ("system_notify", obj_desc->device.system_notify); ++ acpi_ex_out_pointer ("device_notify", obj_desc->device.device_notify); + break; + + +@@ -673,7 +673,7 @@ + acpi_ex_out_integer ("Flags", obj_desc->region.flags); + acpi_ex_out_address ("Address", obj_desc->region.address); + acpi_ex_out_integer ("Length", obj_desc->region.length); +- acpi_ex_out_pointer ("addr_handler", obj_desc->region.addr_handler); ++ acpi_ex_out_pointer ("address_space", obj_desc->region.address_space); + acpi_ex_out_pointer ("Next", obj_desc->region.next); + break; + +@@ -682,8 +682,8 @@ + + acpi_ex_out_integer ("system_level", obj_desc->power_resource.system_level); + acpi_ex_out_integer ("resource_order", obj_desc->power_resource.resource_order); +- acpi_ex_out_pointer ("sys_handler", obj_desc->power_resource.sys_handler); +- acpi_ex_out_pointer ("drv_handler", obj_desc->power_resource.drv_handler); ++ acpi_ex_out_pointer ("system_notify", obj_desc->power_resource.system_notify); ++ acpi_ex_out_pointer ("device_notify", obj_desc->power_resource.device_notify); + break; + + +@@ -692,17 +692,17 @@ + acpi_ex_out_integer ("Processor ID", obj_desc->processor.proc_id); + acpi_ex_out_integer ("Length", obj_desc->processor.length); + acpi_ex_out_address ("Address", (acpi_physical_address) obj_desc->processor.address); +- acpi_ex_out_pointer ("sys_handler", obj_desc->processor.sys_handler); +- acpi_ex_out_pointer ("drv_handler", obj_desc->processor.drv_handler); +- acpi_ex_out_pointer ("addr_handler", obj_desc->processor.addr_handler); ++ acpi_ex_out_pointer ("system_notify", obj_desc->processor.system_notify); ++ acpi_ex_out_pointer ("device_notify", obj_desc->processor.device_notify); ++ acpi_ex_out_pointer ("address_space", obj_desc->processor.address_space); + break; + + + case ACPI_TYPE_THERMAL: + +- acpi_ex_out_pointer ("sys_handler", obj_desc->thermal_zone.sys_handler); +- acpi_ex_out_pointer ("drv_handler", obj_desc->thermal_zone.drv_handler); +- acpi_ex_out_pointer ("addr_handler", obj_desc->thermal_zone.addr_handler); ++ acpi_ex_out_pointer ("system_notify", obj_desc->thermal_zone.system_notify); ++ acpi_ex_out_pointer ("device_notify", obj_desc->thermal_zone.device_notify); ++ acpi_ex_out_pointer ("address_space", obj_desc->thermal_zone.address_space); + break; + + +@@ -762,18 +762,18 @@ + + case ACPI_TYPE_LOCAL_ADDRESS_HANDLER: + +- acpi_ex_out_integer ("space_id", obj_desc->addr_handler.space_id); +- acpi_ex_out_pointer ("Next", obj_desc->addr_handler.next); +- acpi_ex_out_pointer ("region_list", obj_desc->addr_handler.region_list); +- acpi_ex_out_pointer ("Node", obj_desc->addr_handler.node); +- acpi_ex_out_pointer ("Context", obj_desc->addr_handler.context); ++ acpi_ex_out_integer ("space_id", obj_desc->address_space.space_id); ++ acpi_ex_out_pointer ("Next", obj_desc->address_space.next); ++ acpi_ex_out_pointer ("region_list", obj_desc->address_space.region_list); ++ acpi_ex_out_pointer ("Node", obj_desc->address_space.node); ++ acpi_ex_out_pointer ("Context", obj_desc->address_space.context); + break; + + + case ACPI_TYPE_LOCAL_NOTIFY: + +- acpi_ex_out_pointer ("Node", obj_desc->notify_handler.node); +- acpi_ex_out_pointer ("Context", obj_desc->notify_handler.context); ++ acpi_ex_out_pointer ("Node", obj_desc->notify.node); ++ acpi_ex_out_pointer ("Context", obj_desc->notify.context); + break; + + +diff -urN linux-2.4.20/drivers/acpi/namespace/nsalloc.c linux-2.4.20-acpi/drivers/acpi/namespace/nsalloc.c +--- linux-2.4.20/drivers/acpi/namespace/nsalloc.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/namespace/nsalloc.c 2003-05-13 02:53:25.000000000 +0200 +@@ -116,19 +116,33 @@ + prev_node = NULL; + next_node = parent_node->child; + ++ /* Find the node that is the previous peer in the parent's child list */ ++ + while (next_node != node) { + prev_node = next_node; + next_node = prev_node->peer; + } + + if (prev_node) { ++ /* Node is not first child, unlink it */ ++ + prev_node->peer = next_node->peer; + if (next_node->flags & ANOBJ_END_OF_PEER_LIST) { + prev_node->flags |= ANOBJ_END_OF_PEER_LIST; + } + } + else { +- parent_node->child = next_node->peer; ++ /* Node is first child (has no previous peer) */ ++ ++ if (next_node->flags & ANOBJ_END_OF_PEER_LIST) { ++ /* No peers at all */ ++ ++ parent_node->child = NULL; ++ } ++ else { /* Link peer list to parent */ ++ ++ parent_node->child = next_node->peer; ++ } + } + + +@@ -222,7 +236,7 @@ + struct acpi_namespace_node *node, /* New Child*/ + acpi_object_type type) + { +- u16 owner_id = TABLE_ID_DSDT; ++ u16 owner_id = 0; + struct acpi_namespace_node *child_node; + #ifdef ACPI_ALPHABETIC_NAMESPACE + +@@ -355,6 +369,7 @@ + { + struct acpi_namespace_node *child_node; + struct acpi_namespace_node *next_node; ++ struct acpi_namespace_node *node; + u8 flags; + + +@@ -399,6 +414,25 @@ + * Detach an object if there is one, then free the child node + */ + acpi_ns_detach_object (child_node); ++ ++ /* ++ * Decrement the reference count(s) of all parents up to ++ * the root! (counts were incremented when the node was created) ++ */ ++ node = child_node; ++ while ((node = acpi_ns_get_parent_node (node)) != NULL) { ++ node->reference_count--; ++ } ++ ++ /* There should be only one reference remaining on this node */ ++ ++ if (child_node->reference_count != 1) { ++ ACPI_REPORT_WARNING (("Existing references (%d) on node being deleted (%p)\n", ++ child_node->reference_count, child_node)); ++ } ++ ++ /* Now we can delete the node */ ++ + ACPI_MEM_FREE (child_node); + + /* And move on to the next child in the list */ +@@ -512,7 +546,7 @@ + * + ******************************************************************************/ + +-static void ++void + acpi_ns_remove_reference ( + struct acpi_namespace_node *node) + { +diff -urN linux-2.4.20/drivers/acpi/namespace/nsload.c linux-2.4.20-acpi/drivers/acpi/namespace/nsload.c +--- linux-2.4.20/drivers/acpi/namespace/nsload.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/namespace/nsload.c 2003-05-13 02:53:25.000000000 +0200 +@@ -79,7 +79,7 @@ + + /* Check if table contains valid AML (must be DSDT, PSDT, SSDT, etc.) */ + +- if (!(acpi_gbl_acpi_table_data[table_desc->type].flags & ACPI_TABLE_EXECUTABLE)) { ++ if (!(acpi_gbl_table_data[table_desc->type].flags & ACPI_TABLE_EXECUTABLE)) { + /* Just ignore this table */ + + return_ACPI_STATUS (AE_OK); +@@ -182,7 +182,7 @@ + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Loading DSDT\n")); + +- table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_DSDT]; ++ table_desc = acpi_gbl_table_lists[ACPI_TABLE_DSDT].next; + + /* If table already loaded into namespace, just return */ + +@@ -190,8 +190,6 @@ + goto unlock_and_exit; + } + +- table_desc->table_id = TABLE_ID_DSDT; +- + /* Now load the single DSDT */ + + status = acpi_ns_load_table (table_desc, acpi_gbl_root_node); +@@ -205,13 +203,13 @@ + case ACPI_TABLE_SSDT: + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Loading %d SSDTs\n", +- acpi_gbl_acpi_tables[ACPI_TABLE_SSDT].count)); ++ acpi_gbl_table_lists[ACPI_TABLE_SSDT].count)); + + /* + * Traverse list of SSDT tables + */ +- table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_SSDT]; +- for (i = 0; i < acpi_gbl_acpi_tables[ACPI_TABLE_SSDT].count; i++) { ++ table_desc = acpi_gbl_table_lists[ACPI_TABLE_SSDT].next; ++ for (i = 0; i < acpi_gbl_table_lists[ACPI_TABLE_SSDT].count; i++) { + /* + * Only attempt to load table if it is not + * already loaded! +@@ -233,14 +231,14 @@ + case ACPI_TABLE_PSDT: + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Loading %d PSDTs\n", +- acpi_gbl_acpi_tables[ACPI_TABLE_PSDT].count)); ++ acpi_gbl_table_lists[ACPI_TABLE_PSDT].count)); + + /* + * Traverse list of PSDT tables + */ +- table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_PSDT]; ++ table_desc = acpi_gbl_table_lists[ACPI_TABLE_PSDT].next; + +- for (i = 0; i < acpi_gbl_acpi_tables[ACPI_TABLE_PSDT].count; i++) { ++ for (i = 0; i < acpi_gbl_table_lists[ACPI_TABLE_PSDT].count; i++) { + /* Only attempt to load table if it is not already loaded! */ + + if (!table_desc->loaded_into_namespace) { +diff -urN linux-2.4.20/drivers/acpi/namespace/nsparse.c linux-2.4.20-acpi/drivers/acpi/namespace/nsparse.c +--- linux-2.4.20/drivers/acpi/namespace/nsparse.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/namespace/nsparse.c 2003-05-13 02:53:25.000000000 +0200 +@@ -85,10 +85,9 @@ + return_ACPI_STATUS (AE_NO_MEMORY); + } + +- + /* Create and initialize a new walk state */ + +- walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, ++ walk_state = acpi_ds_create_walk_state (table_desc->table_id, + NULL, NULL, NULL); + if (!walk_state) { + acpi_ps_free_op (parse_root); +diff -urN linux-2.4.20/drivers/acpi/namespace/nsutils.c linux-2.4.20-acpi/drivers/acpi/namespace/nsutils.c +--- linux-2.4.20/drivers/acpi/namespace/nsutils.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/namespace/nsutils.c 2003-05-13 02:53:25.000000000 +0200 +@@ -799,38 +799,31 @@ + acpi_ns_terminate (void) + { + union acpi_operand_object *obj_desc; +- struct acpi_namespace_node *this_node; + + + ACPI_FUNCTION_TRACE ("ns_terminate"); + + +- this_node = acpi_gbl_root_node; +- + /* +- * 1) Free the entire namespace -- all objects, tables, and stacks ++ * 1) Free the entire namespace -- all nodes and objects + * +- * Delete all objects linked to the root +- * (additional table descriptors) ++ * Delete all object descriptors attached to namepsace nodes + */ +- acpi_ns_delete_namespace_subtree (this_node); ++ acpi_ns_delete_namespace_subtree (acpi_gbl_root_node); + +- /* Detach any object(s) attached to the root */ ++ /* Detach any objects attached to the root */ + +- obj_desc = acpi_ns_get_attached_object (this_node); ++ obj_desc = acpi_ns_get_attached_object (acpi_gbl_root_node); + if (obj_desc) { +- acpi_ns_detach_object (this_node); +- acpi_ut_remove_reference (obj_desc); ++ acpi_ns_detach_object (acpi_gbl_root_node); + } + +- acpi_ns_delete_children (this_node); + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Namespace freed\n")); + +- + /* + * 2) Now we can delete the ACPI tables + */ +- acpi_tb_delete_acpi_tables (); ++ acpi_tb_delete_all_tables (); + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "ACPI Tables freed\n")); + + return_VOID; +diff -urN linux-2.4.20/drivers/acpi/osl.c linux-2.4.20-acpi/drivers/acpi/osl.c +--- linux-2.4.20/drivers/acpi/osl.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/osl.c 2003-05-13 02:53:25.000000000 +0200 +@@ -997,7 +997,7 @@ + return 0; + + for (; count-- && str && *str; str++) { +- if (isalnum(*str) || *str == ' ') ++ if (isalnum(*str) || *str == ' ' || *str == ':') + *p++ = *str; + else if (*str == '\'' || *str == '"') + continue; +diff -urN linux-2.4.20/drivers/acpi/parser/pswalk.c linux-2.4.20-acpi/drivers/acpi/parser/pswalk.c +--- linux-2.4.20/drivers/acpi/parser/pswalk.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/parser/pswalk.c 2003-05-13 02:53:25.000000000 +0200 +@@ -270,7 +270,7 @@ + return_VOID; + } + +- walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, NULL, NULL, thread); ++ walk_state = acpi_ds_create_walk_state (0, NULL, NULL, thread); + if (!walk_state) { + return_VOID; + } +diff -urN linux-2.4.20/drivers/acpi/parser/psxface.c linux-2.4.20-acpi/drivers/acpi/parser/psxface.c +--- linux-2.4.20/drivers/acpi/parser/psxface.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/parser/psxface.c 2003-05-13 02:53:25.000000000 +0200 +@@ -178,7 +178,7 @@ + + /* Create and initialize a new walk state */ + +- walk_state = acpi_ds_create_walk_state (TABLE_ID_DSDT, NULL, NULL, NULL); ++ walk_state = acpi_ds_create_walk_state (0, NULL, NULL, NULL); + if (!walk_state) { + return_ACPI_STATUS (AE_NO_MEMORY); + } +diff -urN linux-2.4.20/drivers/acpi/tables/tbconvrt.c linux-2.4.20-acpi/drivers/acpi/tables/tbconvrt.c +--- linux-2.4.20/drivers/acpi/tables/tbconvrt.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/tables/tbconvrt.c 2003-05-13 02:53:25.000000000 +0200 +@@ -460,7 +460,7 @@ + + /* Free the original table */ + +- table_desc = &acpi_gbl_acpi_tables[ACPI_TABLE_FADT]; ++ table_desc = acpi_gbl_table_lists[ACPI_TABLE_FADT].next; + acpi_tb_delete_single_table (table_desc); + + /* Install the new table */ +diff -urN linux-2.4.20/drivers/acpi/tables/tbgetall.c linux-2.4.20-acpi/drivers/acpi/tables/tbgetall.c +--- linux-2.4.20/drivers/acpi/tables/tbgetall.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/tables/tbgetall.c 2003-05-13 02:53:25.000000000 +0200 +@@ -307,7 +307,7 @@ + + /* Always delete the RSDP mapping, we are done with it */ + +- acpi_tb_delete_acpi_table (ACPI_TABLE_RSDP); ++ acpi_tb_delete_tables_by_type (ACPI_TABLE_RSDP); + return_ACPI_STATUS (status); + } + +diff -urN linux-2.4.20/drivers/acpi/tables/tbget.c linux-2.4.20-acpi/drivers/acpi/tables/tbget.c +--- linux-2.4.20/drivers/acpi/tables/tbget.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/tables/tbget.c 2003-05-13 02:53:25.000000000 +0200 +@@ -456,18 +456,18 @@ + * instance is always in the list head. + */ + if (instance == 1) { +- /* +- * Just pluck the pointer out of the global table! +- * Will be null if no table is present +- */ +- *table_ptr_loc = acpi_gbl_acpi_tables[table_type].pointer; ++ /* Get the first */ ++ ++ if (acpi_gbl_table_lists[table_type].next) { ++ *table_ptr_loc = acpi_gbl_table_lists[table_type].next->pointer; ++ } + return_ACPI_STATUS (AE_OK); + } + + /* + * Check for instance out of range + */ +- if (instance > acpi_gbl_acpi_tables[table_type].count) { ++ if (instance > acpi_gbl_table_lists[table_type].count) { + return_ACPI_STATUS (AE_NOT_EXIST); + } + +@@ -478,7 +478,7 @@ + * need to walk from the 2nd table until we reach the Instance + * that the user is looking for and return its table pointer. + */ +- table_desc = acpi_gbl_acpi_tables[table_type].next; ++ table_desc = acpi_gbl_table_lists[table_type].next; + for (i = 2; i < instance; i++) { + table_desc = table_desc->next; + } +diff -urN linux-2.4.20/drivers/acpi/tables/tbinstal.c linux-2.4.20-acpi/drivers/acpi/tables/tbinstal.c +--- linux-2.4.20/drivers/acpi/tables/tbinstal.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/tables/tbinstal.c 2003-05-13 02:53:25.000000000 +0200 +@@ -79,13 +79,13 @@ + /* + * Search for a signature match among the known table types + */ +- for (i = 0; i < NUM_ACPI_TABLES; i++) { +- if (!(acpi_gbl_acpi_table_data[i].flags & search_type)) { ++ for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) { ++ if (!(acpi_gbl_table_data[i].flags & search_type)) { + continue; + } + +- if (!ACPI_STRNCMP (signature, acpi_gbl_acpi_table_data[i].signature, +- acpi_gbl_acpi_table_data[i].sig_length)) { ++ if (!ACPI_STRNCMP (signature, acpi_gbl_table_data[i].signature, ++ acpi_gbl_table_data[i].sig_length)) { + /* Found a signature match, return index if requested */ + + if (table_info) { +@@ -94,7 +94,7 @@ + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "Table [%4.4s] is an ACPI table consumed by the core subsystem\n", +- (char *) acpi_gbl_acpi_table_data[i].signature)); ++ (char *) acpi_gbl_table_data[i].signature)); + + return_ACPI_STATUS (AE_OK); + } +@@ -149,7 +149,7 @@ + } + + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "%s located at %p\n", +- acpi_gbl_acpi_table_data[table_info->type].name, table_info->pointer)); ++ acpi_gbl_table_data[table_info->type].name, table_info->pointer)); + + (void) acpi_ut_release_mutex (ACPI_MTX_TABLES); + return_ACPI_STATUS (status); +@@ -239,70 +239,58 @@ + acpi_table_type table_type, + struct acpi_table_desc *table_info) + { +- struct acpi_table_desc *list_head; ++ struct acpi_table_list *list_head; + struct acpi_table_desc *table_desc; + + + ACPI_FUNCTION_TRACE_U32 ("tb_init_table_descriptor", table_type); + ++ ++ /* Allocate a descriptor for this table */ ++ ++ table_desc = ACPI_MEM_CALLOCATE (sizeof (struct acpi_table_desc)); ++ if (!table_desc) { ++ return_ACPI_STATUS (AE_NO_MEMORY); ++ } ++ + /* + * Install the table into the global data structure + */ +- list_head = &acpi_gbl_acpi_tables[table_type]; +- table_desc = list_head; ++ list_head = &acpi_gbl_table_lists[table_type]; + + /* + * Two major types of tables: 1) Only one instance is allowed. This + * includes most ACPI tables such as the DSDT. 2) Multiple instances of + * the table are allowed. This includes SSDT and PSDTs. + */ +- if (ACPI_IS_SINGLE_TABLE (acpi_gbl_acpi_table_data[table_type].flags)) { ++ if (ACPI_IS_SINGLE_TABLE (acpi_gbl_table_data[table_type].flags)) { + /* + * Only one table allowed, and a table has alread been installed + * at this location, so return an error. + */ +- if (list_head->pointer) { ++ if (list_head->next) { + return_ACPI_STATUS (AE_ALREADY_EXISTS); + } +- +- table_desc->count = 1; +- table_desc->prev = NULL; +- table_desc->next = NULL; + } +- else { +- /* +- * Multiple tables allowed for this table type, we must link +- * the new table in to the list of tables of this type. +- */ +- if (list_head->pointer) { +- table_desc = ACPI_MEM_CALLOCATE (sizeof (struct acpi_table_desc)); +- if (!table_desc) { +- return_ACPI_STATUS (AE_NO_MEMORY); +- } +- +- list_head->count++; +- +- /* Update the original previous */ +- +- list_head->prev->next = table_desc; +- +- /* Update new entry */ + +- table_desc->prev = list_head->prev; +- table_desc->next = list_head; +- +- /* Update list head */ ++ /* ++ * Link the new table in to the list of tables of this type. ++ * Just insert at the start of the list, order unimportant. ++ * ++ * table_desc->Prev is already NULL from calloc() ++ */ ++ table_desc->next = list_head->next; ++ list_head->next = table_desc; + +- list_head->prev = table_desc; +- } +- else { +- table_desc->count = 1; +- } ++ if (table_desc->next) { ++ table_desc->next->prev = table_desc; + } + +- /* Common initialization of the table descriptor */ ++ list_head->count++; + +- table_desc->type = table_info->type; ++ /* Finish initialization of the table descriptor */ ++ ++ table_desc->type = (u8) table_type; + table_desc->pointer = table_info->pointer; + table_desc->length = table_info->length; + table_desc->allocation = table_info->allocation; +@@ -316,8 +304,8 @@ + * Set the appropriate global pointer (if there is one) to point to the + * newly installed table + */ +- if (acpi_gbl_acpi_table_data[table_type].global_ptr) { +- *(acpi_gbl_acpi_table_data[table_type].global_ptr) = table_info->pointer; ++ if (acpi_gbl_table_data[table_type].global_ptr) { ++ *(acpi_gbl_table_data[table_type].global_ptr) = table_info->pointer; + } + + /* Return Data */ +@@ -331,7 +319,7 @@ + + /******************************************************************************* + * +- * FUNCTION: acpi_tb_delete_acpi_tables ++ * FUNCTION: acpi_tb_delete_all_tables + * + * PARAMETERS: None. + * +@@ -342,7 +330,7 @@ + ******************************************************************************/ + + void +-acpi_tb_delete_acpi_tables (void) ++acpi_tb_delete_all_tables (void) + { + acpi_table_type type; + +@@ -351,15 +339,15 @@ + * Free memory allocated for ACPI tables + * Memory can either be mapped or allocated + */ +- for (type = 0; type < NUM_ACPI_TABLES; type++) { +- acpi_tb_delete_acpi_table (type); ++ for (type = 0; type < NUM_ACPI_TABLE_TYPES; type++) { ++ acpi_tb_delete_tables_by_type (type); + } + } + + + /******************************************************************************* + * +- * FUNCTION: acpi_tb_delete_acpi_table ++ * FUNCTION: acpi_tb_delete_tables_by_type + * + * PARAMETERS: Type - The table type to be deleted + * +@@ -371,11 +359,15 @@ + ******************************************************************************/ + + void +-acpi_tb_delete_acpi_table ( ++acpi_tb_delete_tables_by_type ( + acpi_table_type type) + { ++ struct acpi_table_desc *table_desc; ++ u32 count; ++ u32 i; + +- ACPI_FUNCTION_TRACE_U32 ("tb_delete_acpi_table", type); ++ ++ ACPI_FUNCTION_TRACE_U32 ("tb_delete_tables_by_type", type); + + + if (type > ACPI_TABLE_MAX) { +@@ -416,43 +408,10 @@ + } + + /* Free the table */ +- +- acpi_tb_free_acpi_tables_of_type (&acpi_gbl_acpi_tables[type]); +- +- (void) acpi_ut_release_mutex (ACPI_MTX_TABLES); +- return_VOID; +-} +- +- +-/******************************************************************************* +- * +- * FUNCTION: acpi_tb_free_acpi_tables_of_type +- * +- * PARAMETERS: table_info - A table info struct +- * +- * RETURN: None. +- * +- * DESCRIPTION: Free the memory associated with an internal ACPI table +- * Table mutex should be locked. +- * +- ******************************************************************************/ +- +-void +-acpi_tb_free_acpi_tables_of_type ( +- struct acpi_table_desc *list_head) +-{ +- struct acpi_table_desc *table_desc; +- u32 count; +- u32 i; +- +- +- ACPI_FUNCTION_TRACE_PTR ("tb_free_acpi_tables_of_type", list_head); +- +- + /* Get the head of the list */ + +- table_desc = list_head; +- count = list_head->count; ++ table_desc = acpi_gbl_table_lists[type].next; ++ count = acpi_gbl_table_lists[type].count; + + /* + * Walk the entire list, deleting both the allocated tables +@@ -462,6 +421,7 @@ + table_desc = acpi_tb_uninstall_table (table_desc); + } + ++ (void) acpi_ut_release_mutex (ACPI_MTX_TABLES); + return_VOID; + } + +@@ -484,30 +444,31 @@ + struct acpi_table_desc *table_desc) + { + +- if (!table_desc) { ++ /* Must have a valid table descriptor and pointer */ ++ ++ if ((!table_desc) || ++ (!table_desc->pointer)) { + return; + } + +- if (table_desc->pointer) { +- /* Valid table, determine type of memory allocation */ ++ /* Valid table, determine type of memory allocation */ + +- switch (table_desc->allocation) { +- case ACPI_MEM_NOT_ALLOCATED: +- break; ++ switch (table_desc->allocation) { ++ case ACPI_MEM_NOT_ALLOCATED: ++ break; + +- case ACPI_MEM_ALLOCATED: ++ case ACPI_MEM_ALLOCATED: + +- ACPI_MEM_FREE (table_desc->pointer); +- break; ++ ACPI_MEM_FREE (table_desc->pointer); ++ break; + +- case ACPI_MEM_MAPPED: ++ case ACPI_MEM_MAPPED: + +- acpi_os_unmap_memory (table_desc->pointer, table_desc->length); +- break; ++ acpi_os_unmap_memory (table_desc->pointer, table_desc->length); ++ break; + +- default: +- break; +- } ++ default: ++ break; + } + } + +@@ -533,18 +494,23 @@ + struct acpi_table_desc *next_desc; + + +- ACPI_FUNCTION_TRACE_PTR ("acpi_tb_uninstall_table", table_desc); ++ ACPI_FUNCTION_TRACE_PTR ("tb_uninstall_table", table_desc); + + + if (!table_desc) { + return_PTR (NULL); + } + +- /* Unlink the descriptor */ ++ /* Unlink the descriptor from the doubly linked list */ + + if (table_desc->prev) { + table_desc->prev->next = table_desc->next; + } ++ else { ++ /* Is first on list, update list head */ ++ ++ acpi_gbl_table_lists[table_desc->type].next = table_desc->next; ++ } + + if (table_desc->next) { + table_desc->next->prev = table_desc->prev; +@@ -554,23 +520,12 @@ + + acpi_tb_delete_single_table (table_desc); + +- /* Free the table descriptor (Don't delete the list head, tho) */ +- +- if ((table_desc->prev) == (table_desc->next)) { +- next_desc = NULL; ++ /* Free the table descriptor */ + +- /* Clear the list head */ ++ next_desc = table_desc->next; ++ ACPI_MEM_FREE (table_desc); + +- table_desc->pointer = NULL; +- table_desc->length = 0; +- table_desc->count = 0; +- } +- else { +- /* Free the table descriptor */ +- +- next_desc = table_desc->next; +- ACPI_MEM_FREE (table_desc); +- } ++ /* Return pointer to the next descriptor */ + + return_PTR (next_desc); + } +diff -urN linux-2.4.20/drivers/acpi/tables/tbutils.c linux-2.4.20-acpi/drivers/acpi/tables/tbutils.c +--- linux-2.4.20/drivers/acpi/tables/tbutils.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/tables/tbutils.c 2003-05-13 02:53:25.000000000 +0200 +@@ -66,26 +66,25 @@ + acpi_status + acpi_tb_handle_to_object ( + u16 table_id, +- struct acpi_table_desc **table_desc) ++ struct acpi_table_desc **return_table_desc) + { + u32 i; +- struct acpi_table_desc *list_head; ++ struct acpi_table_desc *table_desc; + + + ACPI_FUNCTION_NAME ("tb_handle_to_object"); + + + for (i = 0; i < ACPI_TABLE_MAX; i++) { +- list_head = &acpi_gbl_acpi_tables[i]; +- do { +- if (list_head->table_id == table_id) { +- *table_desc = list_head; ++ table_desc = acpi_gbl_table_lists[i].next; ++ while (table_desc) { ++ if (table_desc->table_id == table_id) { ++ *return_table_desc = table_desc; + return (AE_OK); + } + +- list_head = list_head->next; +- +- } while (list_head != &acpi_gbl_acpi_tables[i]); ++ table_desc = table_desc->next; ++ } + } + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "table_id=%X does not exist\n", table_id)); +diff -urN linux-2.4.20/drivers/acpi/tables/tbxface.c linux-2.4.20-acpi/drivers/acpi/tables/tbxface.c +--- linux-2.4.20/drivers/acpi/tables/tbxface.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/tables/tbxface.c 2003-05-13 02:53:25.000000000 +0200 +@@ -235,7 +235,7 @@ + acpi_unload_table ( + acpi_table_type table_type) + { +- struct acpi_table_desc *list_head; ++ struct acpi_table_desc *table_desc; + + + ACPI_FUNCTION_TRACE ("acpi_unload_table"); +@@ -250,22 +250,22 @@ + + /* Find all tables of the requested type */ + +- list_head = &acpi_gbl_acpi_tables[table_type]; +- do { ++ table_desc = acpi_gbl_table_lists[table_type].next; ++ while (table_desc); { + /* + * Delete all namespace entries owned by this table. Note that these + * entries can appear anywhere in the namespace by virtue of the AML + * "Scope" operator. Thus, we need to track ownership by an ID, not + * simply a position within the hierarchy + */ +- acpi_ns_delete_namespace_by_owner (list_head->table_id); ++ acpi_ns_delete_namespace_by_owner (table_desc->table_id); + +- /* Delete (or unmap) the actual table */ +- +- acpi_tb_delete_acpi_table (table_type); ++ table_desc = table_desc->next; ++ } + +- } while (list_head != &acpi_gbl_acpi_tables[table_type]); ++ /* Delete (or unmap) all tables of this type */ + ++ acpi_tb_delete_tables_by_type (table_type); + return_ACPI_STATUS (AE_OK); + } + +@@ -313,7 +313,7 @@ + /* Check the table type and instance */ + + if ((table_type > ACPI_TABLE_MAX) || +- (ACPI_IS_SINGLE_TABLE (acpi_gbl_acpi_table_data[table_type].flags) && ++ (ACPI_IS_SINGLE_TABLE (acpi_gbl_table_data[table_type].flags) && + instance > 1)) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } +@@ -394,7 +394,7 @@ + /* Check the table type and instance */ + + if ((table_type > ACPI_TABLE_MAX) || +- (ACPI_IS_SINGLE_TABLE (acpi_gbl_acpi_table_data[table_type].flags) && ++ (ACPI_IS_SINGLE_TABLE (acpi_gbl_table_data[table_type].flags) && + instance > 1)) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } +diff -urN linux-2.4.20/drivers/acpi/utilities/utdelete.c linux-2.4.20-acpi/drivers/acpi/utilities/utdelete.c +--- linux-2.4.20/drivers/acpi/utilities/utdelete.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/utilities/utdelete.c 2003-05-13 02:53:25.000000000 +0200 +@@ -71,6 +71,7 @@ + void *obj_pointer = NULL; + union acpi_operand_object *handler_desc; + union acpi_operand_object *second_desc; ++ union acpi_operand_object *next_desc; + + + ACPI_FUNCTION_TRACE_PTR ("ut_delete_internal_obj", object); +@@ -136,6 +137,15 @@ + if (object->device.gpe_block) { + (void) acpi_ev_delete_gpe_block (object->device.gpe_block); + } ++ ++ /* Walk the handler list for this device */ ++ ++ handler_desc = object->device.address_space; ++ while (handler_desc) { ++ next_desc = handler_desc->address_space.next; ++ acpi_ut_remove_reference (handler_desc); ++ handler_desc = next_desc; ++ } + break; + + +@@ -183,10 +193,13 @@ + * default handlers -- and therefore, we created the context object + * locally, it was not created by an external caller. + */ +- handler_desc = object->region.addr_handler; +- if ((handler_desc) && +- (handler_desc->addr_handler.hflags == ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { +- obj_pointer = second_desc->extra.region_context; ++ handler_desc = object->region.address_space; ++ if (handler_desc) { ++ if (handler_desc->address_space.hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) { ++ obj_pointer = second_desc->extra.region_context; ++ } ++ ++ acpi_ut_remove_reference (handler_desc); + } + + /* Now we can free the Extra object */ +@@ -211,7 +224,6 @@ + break; + } + +- + /* Free any allocated memory (pointer within the object) found above */ + + if (obj_pointer) { +@@ -299,7 +311,7 @@ + new_count = count; + + /* +- * Reference count action (increment, decrement, or force delete) ++ * Perform the reference count action (increment, decrement, or force delete) + */ + switch (action) { + +@@ -402,8 +414,6 @@ + { + acpi_status status; + u32 i; +- union acpi_operand_object *next; +- union acpi_operand_object *new; + union acpi_generic_state *state_list = NULL; + union acpi_generic_state *state; + +@@ -417,9 +427,8 @@ + return_ACPI_STATUS (AE_OK); + } + +- /* +- * Make sure that this isn't a namespace handle +- */ ++ /* Make sure that this isn't a namespace handle */ ++ + if (ACPI_GET_DESCRIPTOR_TYPE (object) == ACPI_DESC_TYPE_NAMED) { + ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Object %p is NS handle\n", object)); + return_ACPI_STATUS (AE_OK); +@@ -439,28 +448,8 @@ + switch (ACPI_GET_OBJECT_TYPE (object)) { + case ACPI_TYPE_DEVICE: + +- status = acpi_ut_create_update_state_and_push (object->device.addr_handler, +- action, &state_list); +- if (ACPI_FAILURE (status)) { +- goto error_exit; +- } +- +- acpi_ut_update_ref_count (object->device.sys_handler, action); +- acpi_ut_update_ref_count (object->device.drv_handler, action); +- break; +- +- +- case ACPI_TYPE_LOCAL_ADDRESS_HANDLER: +- +- /* Must walk list of address handlers */ +- +- next = object->addr_handler.next; +- while (next) { +- new = next->addr_handler.next; +- acpi_ut_update_ref_count (next, action); +- +- next = new; +- } ++ acpi_ut_update_ref_count (object->device.system_notify, action); ++ acpi_ut_update_ref_count (object->device.device_notify, action); + break; + + +@@ -590,16 +579,14 @@ + ACPI_FUNCTION_TRACE_PTR ("ut_add_reference", object); + + +- /* +- * Ensure that we have a valid object +- */ ++ /* Ensure that we have a valid object */ ++ + if (!acpi_ut_valid_internal_object (object)) { + return_VOID; + } + +- /* +- * We have a valid ACPI internal object, now increment the reference count +- */ ++ /* Increment the reference count */ ++ + (void) acpi_ut_update_object_reference (object, REF_INCREMENT); + return_VOID; + } +@@ -624,6 +611,7 @@ + + ACPI_FUNCTION_TRACE_PTR ("ut_remove_reference", object); + ++ + /* + * Allow a NULL pointer to be passed in, just ignore it. This saves + * each caller from having to check. Also, ignore NS nodes. +@@ -634,9 +622,8 @@ + return_VOID; + } + +- /* +- * Ensure that we have a valid object +- */ ++ /* Ensure that we have a valid object */ ++ + if (!acpi_ut_valid_internal_object (object)) { + return_VOID; + } +diff -urN linux-2.4.20/drivers/acpi/utilities/utglobal.c linux-2.4.20-acpi/drivers/acpi/utilities/utglobal.c +--- linux-2.4.20/drivers/acpi/utilities/utglobal.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/utilities/utglobal.c 2003-05-13 02:53:25.000000000 +0200 +@@ -299,10 +299,10 @@ + ******************************************************************************/ + + +-struct acpi_table_desc acpi_gbl_acpi_tables[NUM_ACPI_TABLES]; ++struct acpi_table_list acpi_gbl_table_lists[NUM_ACPI_TABLE_TYPES]; + + +-struct acpi_table_support acpi_gbl_acpi_table_data[NUM_ACPI_TABLES] = ++struct acpi_table_support acpi_gbl_table_data[NUM_ACPI_TABLE_TYPES] = + { + /*********** Name, Signature, Global typed pointer Signature size, Type How many allowed?, Contains valid AML? */ + +@@ -535,12 +535,10 @@ + + + #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) +- + /* + * Strings and procedures used for debug only + */ + +- + /***************************************************************************** + * + * FUNCTION: acpi_ut_get_mutex_name +@@ -558,7 +556,7 @@ + u32 mutex_id) + { + +- if (mutex_id > MAX_MTX) ++ if (mutex_id > MAX_MUTEX) + { + return ("Invalid Mutex ID"); + } +@@ -566,7 +564,6 @@ + return (acpi_gbl_mutex_names[mutex_id]); + } + +- + #endif + + +@@ -630,9 +627,12 @@ + owner_id = acpi_gbl_next_table_owner_id; + acpi_gbl_next_table_owner_id++; + ++ /* Check for wraparound */ ++ + if (acpi_gbl_next_table_owner_id == ACPI_FIRST_METHOD_ID) + { + acpi_gbl_next_table_owner_id = ACPI_FIRST_TABLE_ID; ++ ACPI_REPORT_WARNING (("Table owner ID wraparound\n")); + } + break; + +@@ -644,6 +644,8 @@ + + if (acpi_gbl_next_method_owner_id == ACPI_FIRST_TABLE_ID) + { ++ /* Check for wraparound */ ++ + acpi_gbl_next_method_owner_id = ACPI_FIRST_METHOD_ID; + } + break; +@@ -710,23 +712,19 @@ + + /* ACPI table structure */ + +- for (i = 0; i < NUM_ACPI_TABLES; i++) ++ for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) + { +- acpi_gbl_acpi_tables[i].prev = &acpi_gbl_acpi_tables[i]; +- acpi_gbl_acpi_tables[i].next = &acpi_gbl_acpi_tables[i]; +- acpi_gbl_acpi_tables[i].pointer = NULL; +- acpi_gbl_acpi_tables[i].length = 0; +- acpi_gbl_acpi_tables[i].allocation = ACPI_MEM_NOT_ALLOCATED; +- acpi_gbl_acpi_tables[i].count = 0; ++ acpi_gbl_table_lists[i].next = NULL; ++ acpi_gbl_table_lists[i].count = 0; + } + + /* Mutex locked flags */ + +- for (i = 0; i < NUM_MTX; i++) ++ for (i = 0; i < NUM_MUTEX; i++) + { +- acpi_gbl_acpi_mutex_info[i].mutex = NULL; +- acpi_gbl_acpi_mutex_info[i].owner_id = ACPI_MUTEX_NOT_ACQUIRED; +- acpi_gbl_acpi_mutex_info[i].use_count = 0; ++ acpi_gbl_mutex_info[i].mutex = NULL; ++ acpi_gbl_mutex_info[i].owner_id = ACPI_MUTEX_NOT_ACQUIRED; ++ acpi_gbl_mutex_info[i].use_count = 0; + } + + /* GPE support */ +@@ -737,8 +735,8 @@ + + /* Global notify handlers */ + +- acpi_gbl_sys_notify.handler = NULL; +- acpi_gbl_drv_notify.handler = NULL; ++ acpi_gbl_system_notify.handler = NULL; ++ acpi_gbl_device_notify.handler = NULL; + acpi_gbl_init_handler = NULL; + + /* Global "typed" ACPI table pointers */ +diff -urN linux-2.4.20/drivers/acpi/utilities/utmisc.c linux-2.4.20-acpi/drivers/acpi/utilities/utmisc.c +--- linux-2.4.20/drivers/acpi/utilities/utmisc.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/utilities/utmisc.c 2003-05-13 02:53:25.000000000 +0200 +@@ -549,7 +549,7 @@ + /* + * Create each of the predefined mutex objects + */ +- for (i = 0; i < NUM_MTX; i++) { ++ for (i = 0; i < NUM_MUTEX; i++) { + status = acpi_ut_create_mutex (i); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); +@@ -588,7 +588,7 @@ + /* + * Delete each predefined mutex object + */ +- for (i = 0; i < NUM_MTX; i++) { ++ for (i = 0; i < NUM_MUTEX; i++) { + (void) acpi_ut_delete_mutex (i); + } + +@@ -619,15 +619,15 @@ + ACPI_FUNCTION_TRACE_U32 ("ut_create_mutex", mutex_id); + + +- if (mutex_id > MAX_MTX) { ++ if (mutex_id > MAX_MUTEX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + +- if (!acpi_gbl_acpi_mutex_info[mutex_id].mutex) { ++ if (!acpi_gbl_mutex_info[mutex_id].mutex) { + status = acpi_os_create_semaphore (1, 1, +- &acpi_gbl_acpi_mutex_info[mutex_id].mutex); +- acpi_gbl_acpi_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; +- acpi_gbl_acpi_mutex_info[mutex_id].use_count = 0; ++ &acpi_gbl_mutex_info[mutex_id].mutex); ++ acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; ++ acpi_gbl_mutex_info[mutex_id].use_count = 0; + } + + return_ACPI_STATUS (status); +@@ -656,14 +656,14 @@ + ACPI_FUNCTION_TRACE_U32 ("ut_delete_mutex", mutex_id); + + +- if (mutex_id > MAX_MTX) { ++ if (mutex_id > MAX_MUTEX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + +- status = acpi_os_delete_semaphore (acpi_gbl_acpi_mutex_info[mutex_id].mutex); ++ status = acpi_os_delete_semaphore (acpi_gbl_mutex_info[mutex_id].mutex); + +- acpi_gbl_acpi_mutex_info[mutex_id].mutex = NULL; +- acpi_gbl_acpi_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; ++ acpi_gbl_mutex_info[mutex_id].mutex = NULL; ++ acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; + + return_ACPI_STATUS (status); + } +@@ -693,7 +693,7 @@ + ACPI_FUNCTION_NAME ("ut_acquire_mutex"); + + +- if (mutex_id > MAX_MTX) { ++ if (mutex_id > MAX_MUTEX) { + return (AE_BAD_PARAMETER); + } + +@@ -705,8 +705,8 @@ + * the mutex ordering rule. This indicates a coding error somewhere in + * the ACPI subsystem code. + */ +- for (i = mutex_id; i < MAX_MTX; i++) { +- if (acpi_gbl_acpi_mutex_info[i].owner_id == this_thread_id) { ++ for (i = mutex_id; i < MAX_MUTEX; i++) { ++ if (acpi_gbl_mutex_info[i].owner_id == this_thread_id) { + if (i == mutex_id) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Mutex [%s] already acquired by this thread [%X]\n", +@@ -728,14 +728,14 @@ + "Thread %X attempting to acquire Mutex [%s]\n", + this_thread_id, acpi_ut_get_mutex_name (mutex_id))); + +- status = acpi_os_wait_semaphore (acpi_gbl_acpi_mutex_info[mutex_id].mutex, ++ status = acpi_os_wait_semaphore (acpi_gbl_mutex_info[mutex_id].mutex, + 1, ACPI_WAIT_FOREVER); + if (ACPI_SUCCESS (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Thread %X acquired Mutex [%s]\n", + this_thread_id, acpi_ut_get_mutex_name (mutex_id))); + +- acpi_gbl_acpi_mutex_info[mutex_id].use_count++; +- acpi_gbl_acpi_mutex_info[mutex_id].owner_id = this_thread_id; ++ acpi_gbl_mutex_info[mutex_id].use_count++; ++ acpi_gbl_mutex_info[mutex_id].owner_id = this_thread_id; + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Thread %X could not acquire Mutex [%s] %s\n", +@@ -776,14 +776,14 @@ + "Thread %X releasing Mutex [%s]\n", this_thread_id, + acpi_ut_get_mutex_name (mutex_id))); + +- if (mutex_id > MAX_MTX) { ++ if (mutex_id > MAX_MUTEX) { + return (AE_BAD_PARAMETER); + } + + /* + * Mutex must be acquired in order to release it! + */ +- if (acpi_gbl_acpi_mutex_info[mutex_id].owner_id == ACPI_MUTEX_NOT_ACQUIRED) { ++ if (acpi_gbl_mutex_info[mutex_id].owner_id == ACPI_MUTEX_NOT_ACQUIRED) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Mutex [%s] is not acquired, cannot release\n", + acpi_ut_get_mutex_name (mutex_id))); +@@ -797,8 +797,8 @@ + * ordering rule. This indicates a coding error somewhere in + * the ACPI subsystem code. + */ +- for (i = mutex_id; i < MAX_MTX; i++) { +- if (acpi_gbl_acpi_mutex_info[i].owner_id == this_thread_id) { ++ for (i = mutex_id; i < MAX_MUTEX; i++) { ++ if (acpi_gbl_mutex_info[i].owner_id == this_thread_id) { + if (i == mutex_id) { + continue; + } +@@ -813,9 +813,9 @@ + + /* Mark unlocked FIRST */ + +- acpi_gbl_acpi_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; ++ acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED; + +- status = acpi_os_signal_semaphore (acpi_gbl_acpi_mutex_info[mutex_id].mutex, 1); ++ status = acpi_os_signal_semaphore (acpi_gbl_mutex_info[mutex_id].mutex, 1); + + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Thread %X could not release Mutex [%s] %s\n", +diff -urN linux-2.4.20/drivers/acpi/utilities/utxface.c linux-2.4.20-acpi/drivers/acpi/utilities/utxface.c +--- linux-2.4.20/drivers/acpi/utilities/utxface.c 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/drivers/acpi/utilities/utxface.c 2003-05-13 02:53:25.000000000 +0200 +@@ -145,22 +145,8 @@ + + + /* +- * Install the default op_region handlers. These are installed unless +- * other handlers have already been installed via the +- * install_address_space_handler interface +- */ +- if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { +- ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing default address space handlers\n")); +- +- status = acpi_ev_init_address_spaces (); +- if (ACPI_FAILURE (status)) { +- return_ACPI_STATUS (status); +- } +- } +- +- /* + * We must initialize the hardware before we can enable ACPI. +- * FADT values are validated here. ++ * The values from the FADT are validated here. + */ + if (!(flags & ACPI_NO_HARDWARE_INIT)) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI hardware\n")); +@@ -172,7 +158,7 @@ + } + + /* +- * Enable ACPI on this platform ++ * Enable ACPI mode + */ + if (!(flags & ACPI_NO_ACPI_ENABLE)) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Going into ACPI mode\n")); +@@ -187,8 +173,9 @@ + } + + /* +- * Note: +- * We must have the hardware AND events initialized before we can execute ++ * Initialize ACPI Event handling ++ * ++ * NOTE: We must have the hardware AND events initialized before we can execute + * ANY control methods SAFELY. Any control method can require ACPI hardware + * support, so the hardware MUST be initialized before execution! + */ +@@ -201,7 +188,7 @@ + } + } + +- /* Install SCI handler, Global Lock handler, GPE handlers */ ++ /* Install the SCI handler, Global Lock handler, and GPE handlers */ + + if (!(flags & ACPI_NO_HANDLER_INIT)) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing SCI/GL/GPE handlers\n")); +@@ -237,14 +224,20 @@ + + ACPI_FUNCTION_TRACE ("acpi_initialize_objects"); + ++ + /* +- * Initialize all device objects in the namespace +- * This runs the _STA and _INI methods. ++ * Install the default op_region handlers. These are installed unless ++ * other handlers have already been installed via the ++ * install_address_space_handler interface. ++ * ++ * NOTE: This will cause _REG methods to be run. Any objects accessed ++ * by the _REG methods will be automatically initialized, even if they ++ * contain executable AML (see call to acpi_ns_initialize_objects below). + */ +- if (!(flags & ACPI_NO_DEVICE_INIT)) { +- ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI Devices\n")); ++ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { ++ ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing default address space handlers\n")); + +- status = acpi_ns_initialize_devices (); ++ status = acpi_ev_init_address_spaces (); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } +@@ -252,8 +245,8 @@ + + /* + * Initialize the objects that remain uninitialized. This +- * runs the executable AML that is part of the declaration of op_regions +- * and Fields. ++ * runs the executable AML that may be part of the declaration of these ++ * objects: operation_regions, buffer_fields, Buffers, and Packages. + */ + if (!(flags & ACPI_NO_OBJECT_INIT)) { + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI Objects\n")); +@@ -265,6 +258,19 @@ + } + + /* ++ * Initialize all device objects in the namespace ++ * This runs the _STA and _INI methods. ++ */ ++ if (!(flags & ACPI_NO_DEVICE_INIT)) { ++ ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI Devices\n")); ++ ++ status = acpi_ns_initialize_devices (); ++ if (ACPI_FAILURE (status)) { ++ return_ACPI_STATUS (status); ++ } ++ } ++ ++ /* + * Empty the caches (delete the cached objects) on the assumption that + * the table load filled them up more than they will be at runtime -- + * thus wasting non-paged memory. +@@ -431,9 +437,9 @@ + + /* Current status of the ACPI tables, per table type */ + +- info_ptr->num_table_types = NUM_ACPI_TABLES; +- for (i = 0; i < NUM_ACPI_TABLES; i++) { +- info_ptr->table_info[i].count = acpi_gbl_acpi_tables[i].count; ++ info_ptr->num_table_types = NUM_ACPI_TABLE_TYPES; ++ for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) { ++ info_ptr->table_info[i].count = acpi_gbl_table_lists[i].count; + } + + return_ACPI_STATUS (AE_OK); +diff -urN linux-2.4.20/include/acpi/acconfig.h linux-2.4.20-acpi/include/acpi/acconfig.h +--- linux-2.4.20/include/acpi/acconfig.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/acconfig.h 2003-05-13 02:53:25.000000000 +0200 +@@ -64,7 +64,7 @@ + + /* Version string */ + +-#define ACPI_CA_VERSION 0x20030424 ++#define ACPI_CA_VERSION 0x20030509 + + /* Maximum objects in the various object caches */ + +diff -urN linux-2.4.20/include/acpi/acevents.h linux-2.4.20-acpi/include/acpi/acevents.h +--- linux-2.4.20/include/acpi/acevents.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/acevents.h 2003-05-13 02:53:25.000000000 +0200 +@@ -165,7 +165,7 @@ + void *value); + + acpi_status +-acpi_ev_addr_handler_helper ( ++acpi_ev_install_handler ( + acpi_handle obj_handle, + u32 level, + void *context, +diff -urN linux-2.4.20/include/acpi/acexcep.h linux-2.4.20-acpi/include/acpi/acexcep.h +--- linux-2.4.20/include/acpi/acexcep.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/acexcep.h 2003-05-13 02:53:25.000000000 +0200 +@@ -94,8 +94,9 @@ + #define AE_NO_GLOBAL_LOCK (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL) + #define AE_LOGICAL_ADDRESS (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL) + #define AE_ABORT_METHOD (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL) ++#define AE_SAME_HANDLER (acpi_status) (0x001D | AE_CODE_ENVIRONMENTAL) + +-#define AE_CODE_ENV_MAX 0x001C ++#define AE_CODE_ENV_MAX 0x001D + + /* + * Programmer exceptions +@@ -219,7 +220,8 @@ + "AE_NO_HARDWARE_RESPONSE", + "AE_NO_GLOBAL_LOCK", + "AE_LOGICAL_ADDRESS", +- "AE_ABORT_METHOD" ++ "AE_ABORT_METHOD", ++ "AE_SAME_HANDLER" + }; + + char const *acpi_gbl_exception_names_pgm[] = +diff -urN linux-2.4.20/include/acpi/acglobal.h linux-2.4.20-acpi/include/acpi/acglobal.h +--- linux-2.4.20/include/acpi/acglobal.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/acglobal.h 2003-05-13 02:53:25.000000000 +0200 +@@ -117,15 +117,15 @@ + /* + * ACPI Table info arrays + */ +-extern struct acpi_table_desc acpi_gbl_acpi_tables[NUM_ACPI_TABLES]; +-extern struct acpi_table_support acpi_gbl_acpi_table_data[NUM_ACPI_TABLES]; ++extern struct acpi_table_list acpi_gbl_table_lists[NUM_ACPI_TABLE_TYPES]; ++extern struct acpi_table_support acpi_gbl_table_data[NUM_ACPI_TABLE_TYPES]; + + /* + * Predefined mutex objects. This array contains the + * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. + * (The table maps local handles to the real OS handles) + */ +-ACPI_EXTERN struct acpi_mutex_info acpi_gbl_acpi_mutex_info [NUM_MTX]; ++ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[NUM_MUTEX]; + + + /***************************************************************************** +@@ -136,8 +136,8 @@ + + + ACPI_EXTERN struct acpi_memory_list acpi_gbl_memory_lists[ACPI_NUM_MEM_LISTS]; +-ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_drv_notify; +-ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_sys_notify; ++ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify; ++ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify; + ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler; + ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; + ACPI_EXTERN acpi_handle acpi_gbl_global_lock_semaphore; +@@ -202,7 +202,7 @@ + ****************************************************************************/ + + +-ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list; ++ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list; + + /* Control method single step flag */ + +@@ -215,7 +215,7 @@ + * + ****************************************************************************/ + +-ACPI_EXTERN union acpi_parse_object *acpi_gbl_parsed_namespace_root; ++ACPI_EXTERN union acpi_parse_object *acpi_gbl_parsed_namespace_root; + + /***************************************************************************** + * +@@ -236,8 +236,8 @@ + + extern struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS]; + ACPI_EXTERN struct acpi_fixed_event_handler acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]; +-ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; +-ACPI_EXTERN struct acpi_gpe_block_info *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; ++ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; ++ACPI_EXTERN struct acpi_gpe_block_info *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; + ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock; + + +diff -urN linux-2.4.20/include/acpi/aclocal.h linux-2.4.20-acpi/include/acpi/aclocal.h +--- linux-2.4.20/include/acpi/aclocal.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/aclocal.h 2003-05-13 02:53:25.000000000 +0200 +@@ -87,8 +87,8 @@ + #define ACPI_MTX_DEBUG_CMD_COMPLETE 11 + #define ACPI_MTX_DEBUG_CMD_READY 12 + +-#define MAX_MTX 12 +-#define NUM_MTX MAX_MTX+1 ++#define MAX_MUTEX 12 ++#define NUM_MUTEX MAX_MUTEX+1 + + + #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) +@@ -140,12 +140,8 @@ + typedef u16 acpi_owner_id; + #define ACPI_OWNER_TYPE_TABLE 0x0 + #define ACPI_OWNER_TYPE_METHOD 0x1 +-#define ACPI_FIRST_METHOD_ID 0x0000 +-#define ACPI_FIRST_TABLE_ID 0x8000 +- +-/* TBD: [Restructure] get rid of the need for this! */ +- +-#define TABLE_ID_DSDT (acpi_owner_id) 0x8000 ++#define ACPI_FIRST_METHOD_ID 0x0001 ++#define ACPI_FIRST_TABLE_ID 0xF000 + + + /* Field access granularities */ +@@ -232,13 +228,18 @@ + u64 physical_address; + u32 aml_length; + acpi_size length; +- u32 count; + acpi_owner_id table_id; + u8 type; + u8 allocation; + u8 loaded_into_namespace; + }; + ++struct acpi_table_list ++{ ++ struct acpi_table_desc *next; ++ u32 count; ++}; ++ + + struct acpi_find_context + { +diff -urN linux-2.4.20/include/acpi/acnamesp.h linux-2.4.20-acpi/include/acpi/acnamesp.h +--- linux-2.4.20/include/acpi/acnamesp.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/acnamesp.h 2003-05-13 02:53:25.000000000 +0200 +@@ -201,6 +201,11 @@ + char *name1, + char *name2); + ++void ++acpi_ns_remove_reference ( ++ struct acpi_namespace_node *node); ++ ++ + /* + * Namespace modification - nsmodify + */ +diff -urN linux-2.4.20/include/acpi/acobject.h linux-2.4.20-acpi/include/acpi/acobject.h +--- linux-2.4.20/include/acpi/acobject.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/acobject.h 2003-05-13 02:53:25.000000000 +0200 +@@ -112,9 +112,9 @@ + * Common fields for objects that support ASL notifications + */ + #define ACPI_COMMON_NOTIFY_INFO \ +- union acpi_operand_object *sys_handler; /* Handler for system notifies */\ +- union acpi_operand_object *drv_handler; /* Handler for driver notifies */\ +- union acpi_operand_object *addr_handler; /* Handler for Address space */ ++ union acpi_operand_object *system_notify; /* Handler for system notifies */\ ++ union acpi_operand_object *device_notify; /* Handler for driver notifies */\ ++ union acpi_operand_object *address_space; /* Handler for Address space */ + + + /****************************************************************************** +@@ -214,7 +214,7 @@ + ACPI_OBJECT_COMMON_HEADER + + u8 space_id; +- union acpi_operand_object *addr_handler; /* Handler for system notifies */ ++ union acpi_operand_object *address_space; /* Handler for region access */ + struct acpi_namespace_node *node; /* containing object */ + union acpi_operand_object *next; + u32 length; +@@ -446,8 +446,8 @@ + struct acpi_object_buffer_field buffer_field; + struct acpi_object_bank_field bank_field; + struct acpi_object_index_field index_field; +- struct acpi_object_notify_handler notify_handler; +- struct acpi_object_addr_handler addr_handler; ++ struct acpi_object_notify_handler notify; ++ struct acpi_object_addr_handler address_space; + struct acpi_object_reference reference; + struct acpi_object_extra extra; + struct acpi_object_data data; +diff -urN linux-2.4.20/include/acpi/acpiosxf.h linux-2.4.20-acpi/include/acpi/acpiosxf.h +--- linux-2.4.20/include/acpi/acpiosxf.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/acpiosxf.h 2003-05-13 02:53:25.000000000 +0200 +@@ -290,12 +290,12 @@ + u8 + acpi_os_readable ( + void *pointer, +- u32 length); ++ acpi_size length); + + u8 + acpi_os_writable ( + void *pointer, +- u32 length); ++ acpi_size length); + + u32 + acpi_os_get_timer ( +diff -urN linux-2.4.20/include/acpi/actables.h linux-2.4.20-acpi/include/acpi/actables.h +--- linux-2.4.20/include/acpi/actables.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/actables.h 2003-05-13 02:53:25.000000000 +0200 +@@ -157,7 +157,7 @@ + acpi_status + acpi_tb_recognize_table ( + struct acpi_table_desc *table_info, +- u8 search_type); ++ u8 search_type); + + acpi_status + acpi_tb_init_table_descriptor ( +@@ -170,11 +170,11 @@ + */ + + void +-acpi_tb_delete_acpi_tables ( ++acpi_tb_delete_all_tables ( + void); + + void +-acpi_tb_delete_acpi_table ( ++acpi_tb_delete_tables_by_type ( + acpi_table_type type); + + void +@@ -185,10 +185,6 @@ + acpi_tb_uninstall_table ( + struct acpi_table_desc *table_desc); + +-void +-acpi_tb_free_acpi_tables_of_type ( +- struct acpi_table_desc *table_info); +- + + /* + * tbrsd - RSDP, RSDT utilities +diff -urN linux-2.4.20/include/acpi/actypes.h linux-2.4.20-acpi/include/acpi/actypes.h +--- linux-2.4.20/include/acpi/actypes.h 2003-05-13 02:53:06.000000000 +0200 ++++ linux-2.4.20-acpi/include/acpi/actypes.h 2003-05-13 02:53:25.000000000 +0200 +@@ -407,7 +407,7 @@ + #define ACPI_TABLE_SSDT (acpi_table_type) 5 + #define ACPI_TABLE_XSDT (acpi_table_type) 6 + #define ACPI_TABLE_MAX 6 +-#define NUM_ACPI_TABLES (ACPI_TABLE_MAX+1) ++#define NUM_ACPI_TABLE_TYPES (ACPI_TABLE_MAX+1) + + + /* +@@ -747,7 +747,7 @@ + u32 debug_level; + u32 debug_layer; + u32 num_table_types; +- struct acpi_table_info table_info [NUM_ACPI_TABLES]; ++ struct acpi_table_info table_info [NUM_ACPI_TABLE_TYPES]; + }; + + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/MAINTAINERS linux.21rc5-ac2/MAINTAINERS --- linux.21rc5/MAINTAINERS 2003-05-28 15:08:49.000000000 +0100 +++ linux.21rc5-ac2/MAINTAINERS 2003-05-09 18:33:12.000000000 +0100 @@ -1,4 +1,4 @@ - + List of maintainers and how to submit kernel changes Please try to follow the guidelines below. This will make things @@ -476,6 +476,13 @@ W: http://www.debian.org/~dz/i8k/ S: Maintained +DEVICE-MAPPER +P: Joe Thornber +M: thornber@sistina.com +L: dm@uk.sistina.com +W: http://www.sistina.com/lvm +S: Maintained + DEVICE NUMBER REGISTRY P: H. Peter Anvin M: hpa@zytor.com @@ -1706,6 +1713,12 @@ W: http://www.linuxtr.net S: Maintained +TOSHIBA ACPI EXTRAS DRIVER +P: John Belmonte +M: toshiba_acpi@memebeam.org +W: http://memebeam.org/toys/ToshibaAcpiDriver +S: Maintained + TOSHIBA SMM DRIVER P: Jonathan Buzzard M: jonathan@buzzard.org.uk @@ -2008,6 +2021,14 @@ L: linux-x25@vger.kernel.org S: Maintained +XFS FILESYSTEM +P: Silicon Graphics Inc +M: owner-xfs@oss.sgi.com +M: lord@sgi.com +L: linux-xfs@oss.sgi.com +W: http://oss.sgi.com/projects/xfs +S: Supported + X86 3-LEVEL PAGING (PAE) SUPPORT P: Ingo Molnar M: mingo@redhat.com diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Makefile linux.21rc5-ac2/Makefile --- linux.21rc5/Makefile 2003-05-28 15:08:49.000000000 +0100 +++ linux.21rc5-ac2/Makefile 2003-05-28 15:12:29.000000000 +0100 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 SUBLEVEL = 21 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc5-ac1 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) @@ -40,10 +40,13 @@ MODFLAGS = -DMODULE CFLAGS_KERNEL = PERL = perl +AWK = awk +RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \ + else echo rpm; fi) export VERSION PATCHLEVEL SUBLEVEL EXTRAVERSION KERNELRELEASE ARCH \ CONFIG_SHELL TOPDIR HPATH HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \ - CPP AR NM STRIP OBJCOPY OBJDUMP MAKE MAKEFILES GENKSYMS MODFLAGS PERL + CPP AR NM STRIP OBJCOPY OBJDUMP MAKE MAKEFILES GENKSYMS MODFLAGS PERL AWK all: do-it-all @@ -138,6 +141,7 @@ drivers/block/block.o \ drivers/misc/misc.o \ drivers/net/net.o + DRIVERS-$(CONFIG_AGP) += drivers/char/agp/agp.o DRIVERS-$(CONFIG_DRM_NEW) += drivers/char/drm/drm.o DRIVERS-$(CONFIG_DRM_OLD) += drivers/char/drm-4.0/drm.o @@ -171,7 +175,7 @@ DRIVERS-$(CONFIG_FC4) += drivers/fc4/fc4.a DRIVERS-$(CONFIG_PPC32) += drivers/macintosh/macintosh.o DRIVERS-$(CONFIG_MAC) += drivers/macintosh/macintosh.o -DRIVERS-$(CONFIG_ISAPNP) += drivers/pnp/pnp.o +DRIVERS-$(CONFIG_PNP) += drivers/pnp/pnp.o DRIVERS-$(CONFIG_SGI_IP22) += drivers/sgi/sgi.a DRIVERS-$(CONFIG_VT) += drivers/video/video.o DRIVERS-$(CONFIG_PARIDE) += drivers/block/paride/paride.a @@ -199,6 +203,7 @@ kernel/ksyms.lst include/linux/compile.h \ vmlinux System.map \ .tmp* \ + scripts/mkconfigs kernel/configs.c kernel/configs.o \ drivers/char/consolemap_deftbl.c drivers/video/promcon_tbl.c \ drivers/char/conmakehash \ drivers/char/drm/*-mod.c \ @@ -226,6 +231,7 @@ # files removed with 'make mrproper' MRPROPER_FILES = \ include/linux/autoconf.h include/linux/version.h \ + tmp* \ drivers/net/hamradio/soundmodem/sm_tbl_{afsk1200,afsk2666,fsk9600}.h \ drivers/net/hamradio/soundmodem/sm_tbl_{hapn4800,psk4800}.h \ drivers/net/hamradio/soundmodem/sm_tbl_{afsk2400_7,afsk2400_8}.h \ @@ -243,6 +249,7 @@ include/asm \ .hdepend scripts/mkdep scripts/split-include scripts/docproc \ $(TOPDIR)/include/linux/modversions.h \ + scripts/mkconfigs kernel/configs.c kernel/configs.o \ kernel.spec # directories removed with 'make mrproper' @@ -316,7 +323,7 @@ linuxsubdirs: $(patsubst %, _dir_%, $(SUBDIRS)) -$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/version.h include/config/MARKER +$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/version.h tmp_include_depends $(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" -C $(patsubst _dir_%, %, $@) $(TOPDIR)/include/linux/version.h: include/linux/version.h @@ -356,13 +363,13 @@ comma := , -init/version.o: init/version.c include/linux/compile.h include/config/MARKER +init/version.o: init/version.c include/linux/compile.h tmp_include_depends $(CC) $(CFLAGS) $(CFLAGS_KERNEL) -DUTS_MACHINE='"$(ARCH)"' -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o init/version.o init/version.c -init/main.o: init/main.c include/config/MARKER +init/main.o: init/main.c tmp_include_depends $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o $@ $< -init/do_mounts.o: init/do_mounts.c include/config/MARKER +init/do_mounts.o: init/do_mounts.c tmp_include_depends $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o $@ $< fs lib mm ipc kernel drivers net: dummy @@ -389,7 +396,7 @@ modules: $(patsubst %, _mod_%, $(SUBDIRS)) .PHONY: $(patsubst %, _mod_%, $(SUBDIRS)) -$(patsubst %, _mod_%, $(SUBDIRS)) : include/linux/version.h include/config/MARKER +$(patsubst %, _mod_%, $(SUBDIRS)) : include/linux/version.h tmp_include_depends $(MAKE) -C $(patsubst _mod_%, %, $@) CFLAGS="$(CFLAGS) $(MODFLAGS)" MAKING_MODULES=1 modules .PHONY: modules_install @@ -495,6 +502,13 @@ endif scripts/mkdep -- `find $(FINDHPATH) \( -name SCCS -o -name .svn \) -prune -o -follow -name \*.h ! -name modversions.h -print` > .hdepend scripts/mkdep -- init/*.c > .depend + (find $(TOPDIR) \( -name .depend -o -name .hdepend \) -print | xargs $(AWK) -f scripts/include_deps) > tmp_include_depends + sed -ne 's/^\([^ ].*\):.*/ \1 \\/p' tmp_include_depends > tmp_include_depends_1 + (echo ""; echo "all: \\"; cat tmp_include_depends_1; echo "") >> tmp_include_depends + rm tmp_include_depends_1 + +tmp_include_depends: include/config/MARKER dummy + $(MAKE) -r -f tmp_include_depends all ifdef CONFIG_MODVERSIONS MODVERFILE := $(TOPDIR)/include/linux/modversions.h @@ -570,5 +584,5 @@ rm $(KERNELPATH) ; \ cd $(TOPDIR) ; \ . scripts/mkversion > .version ; \ - rpm -ta $(TOPDIR)/../$(KERNELPATH).tar.gz ; \ + $(RPM) -ta $(TOPDIR)/../$(KERNELPATH).tar.gz ; \ rm $(TOPDIR)/../$(KERNELPATH).tar.gz diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/filemap.c linux.21rc5-ac2/mm/filemap.c --- linux.21rc5/mm/filemap.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/mm/filemap.c 2003-05-27 14:17:48.000000000 +0100 @@ -950,6 +950,7 @@ spin_unlock(&pagecache_lock); return page; } +EXPORT_SYMBOL_GPL(find_trylock_page); /* * Must be called with the pagecache lock held, @@ -1326,6 +1327,8 @@ SetPageReferenced(page); } +EXPORT_SYMBOL(mark_page_accessed); + /* * This is a generic file read routine, and uses the * inode->i_op->readpage() function for the actual low-level @@ -1603,7 +1606,7 @@ if (retval) break; - retval = mapping->a_ops->direct_IO(rw, inode, iobuf, (offset+progress) >> blocksize_bits, blocksize); + retval = mapping->a_ops->direct_IO(rw, filp, iobuf, (offset+progress) >> blocksize_bits, blocksize); if (rw == READ && retval > 0) mark_dirty_kiobuf(iobuf, retval); @@ -2958,8 +2961,8 @@ * file system has to do this all by itself, unfortunately. * okir@monad.swb.de */ -ssize_t -generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos) +ssize_t generic_file_write_nolock(struct file * file, const char *buf, + size_t count, loff_t *ppos) { struct address_space *mapping = file->f_dentry->d_inode->i_mapping; struct inode *inode = mapping->host; @@ -2979,8 +2982,6 @@ cached_page = NULL; - down(&inode->i_sem); - pos = *ppos; err = -EINVAL; if (pos < 0) @@ -3002,13 +3003,16 @@ * Check whether we've reached the file size limit. */ err = -EFBIG; - + if (!S_ISBLK(inode->i_mode) && limit != RLIM_INFINITY) { if (pos >= limit) { send_sig(SIGXFSZ, current, 0); goto out; } - if (pos > 0xFFFFFFFFULL || count > limit - (u32)pos) { + /* Fix this up when we got to rlimit64 */ + if (pos > 0xFFFFFFFFULL) + count = 0; + else if(count > limit - (u32)pos) { /* send_sig(SIGXFSZ, current, 0); */ count = limit - (u32)pos; } @@ -3159,7 +3163,6 @@ err = written ? written : status; out: - up(&inode->i_sem); return err; fail_write: status = -EFAULT; @@ -3197,6 +3200,19 @@ goto out_status; } +ssize_t generic_file_write(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + struct inode *inode = file->f_dentry->d_inode->i_mapping->host; + int err; + + down(&inode->i_sem); + err = generic_file_write_nolock(file, buf, count, ppos); + up(&inode->i_sem); + + return err; +} + void __init page_cache_init(unsigned long mempages) { unsigned long htable_size, order; diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/highmem.c linux.21rc5-ac2/mm/highmem.c --- linux.21rc5/mm/highmem.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/mm/highmem.c 2003-04-22 16:44:38.000000000 +0100 @@ -362,6 +362,7 @@ /* we need to wait I/O completion */ run_task_queue(&tq_disk); + __set_current_state(TASK_RUNNING); yield(); goto repeat_alloc; } @@ -398,6 +399,7 @@ /* we need to wait I/O completion */ run_task_queue(&tq_disk); + __set_current_state(TASK_RUNNING); yield(); goto repeat_alloc; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/Makefile linux.21rc5-ac2/mm/Makefile --- linux.21rc5/mm/Makefile 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/mm/Makefile 2003-04-22 16:44:38.000000000 +0100 @@ -9,12 +9,12 @@ O_TARGET := mm.o -export-objs := shmem.o filemap.o memory.o page_alloc.o +export-objs := shmem.o filemap.o memory.o page_alloc.o mempool.o obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \ vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \ page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \ - shmem.o + shmem.o mempool.o obj-$(CONFIG_HIGHMEM) += highmem.o diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/mempool.c linux.21rc5-ac2/mm/mempool.c --- linux.21rc5/mm/mempool.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/mm/mempool.c 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,299 @@ +/* + * linux/mm/mempool.c + * + * memory buffer pool support. Such pools are mostly used + * for guaranteed, deadlock-free memory allocations during + * extreme VM load. + * + * started by Ingo Molnar, Copyright (C) 2001 + */ + +#include +#include +#include +#include + +struct mempool_s { + spinlock_t lock; + int min_nr; /* nr of elements at *elements */ + int curr_nr; /* Current nr of elements at *elements */ + void **elements; + + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +static void add_element(mempool_t *pool, void *element) +{ + BUG_ON(pool->curr_nr >= pool->min_nr); + pool->elements[pool->curr_nr++] = element; +} + +static void *remove_element(mempool_t *pool) +{ + BUG_ON(pool->curr_nr <= 0); + return pool->elements[--pool->curr_nr]; +} + +static void free_pool(mempool_t *pool) +{ + while (pool->curr_nr) { + void *element = remove_element(pool); + pool->free(element, pool->pool_data); + } + kfree(pool->elements); + kfree(pool); +} + +/** + * mempool_create - create a memory pool + * @min_nr: the minimum number of elements guaranteed to be + * allocated for this pool. + * @alloc_fn: user-defined element-allocation function. + * @free_fn: user-defined element-freeing function. + * @pool_data: optional private data available to the user-defined functions. + * + * this function creates and allocates a guaranteed size, preallocated + * memory pool. The pool can be used from the mempool_alloc and mempool_free + * functions. This function might sleep. Both the alloc_fn() and the free_fn() + * functions might sleep - as long as the mempool_alloc function is not called + * from IRQ contexts. + */ +mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data) +{ + mempool_t *pool; + + pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + memset(pool, 0, sizeof(*pool)); + pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL); + if (!pool->elements) { + kfree(pool); + return NULL; + } + spin_lock_init(&pool->lock); + pool->min_nr = min_nr; + pool->pool_data = pool_data; + init_waitqueue_head(&pool->wait); + pool->alloc = alloc_fn; + pool->free = free_fn; + + /* + * First pre-allocate the guaranteed number of buffers. + */ + while (pool->curr_nr < pool->min_nr) { + void *element; + + element = pool->alloc(GFP_KERNEL, pool->pool_data); + if (unlikely(!element)) { + free_pool(pool); + return NULL; + } + add_element(pool, element); + } + return pool; +} + +/** + * mempool_resize - resize an existing memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * @new_min_nr: the new minimum number of elements guaranteed to be + * allocated for this pool. + * @gfp_mask: the usual allocation bitmask. + * + * This function shrinks/grows the pool. In the case of growing, + * it cannot be guaranteed that the pool will be grown to the new + * size immediately, but new mempool_free() calls will refill it. + * + * Note, the caller must guarantee that no mempool_destroy is called + * while this function is running. mempool_alloc() & mempool_free() + * might be called (eg. from IRQ contexts) while this function executes. + */ +int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask) +{ + void *element; + void **new_elements; + unsigned long flags; + + BUG_ON(new_min_nr <= 0); + + spin_lock_irqsave(&pool->lock, flags); + if (new_min_nr < pool->min_nr) { + while (pool->curr_nr > new_min_nr) { + element = remove_element(pool); + spin_unlock_irqrestore(&pool->lock, flags); + pool->free(element, pool->pool_data); + spin_lock_irqsave(&pool->lock, flags); + } + pool->min_nr = new_min_nr; + goto out_unlock; + } + spin_unlock_irqrestore(&pool->lock, flags); + + /* Grow the pool */ + new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); + if (!new_elements) + return -ENOMEM; + + spin_lock_irqsave(&pool->lock, flags); + memcpy(new_elements, pool->elements, + pool->curr_nr * sizeof(*new_elements)); + kfree(pool->elements); + pool->elements = new_elements; + pool->min_nr = new_min_nr; + + while (pool->curr_nr < pool->min_nr) { + spin_unlock_irqrestore(&pool->lock, flags); + element = pool->alloc(gfp_mask, pool->pool_data); + if (!element) + goto out; + spin_lock_irqsave(&pool->lock, flags); + if (pool->curr_nr < pool->min_nr) + add_element(pool, element); + else + kfree(element); /* Raced */ + } +out_unlock: + spin_unlock_irqrestore(&pool->lock, flags); +out: + return 0; +} + +/** + * mempool_destroy - deallocate a memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * + * this function only sleeps if the free_fn() function sleeps. The caller + * has to guarantee that all elements have been returned to the pool (ie: + * freed) prior to calling mempool_destroy(). + */ +void mempool_destroy(mempool_t *pool) +{ + if (pool->curr_nr != pool->min_nr) + BUG(); /* There were outstanding elements */ + free_pool(pool); +} + +/** + * mempool_alloc - allocate an element from a specific memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * @gfp_mask: the usual allocation bitmask. + * + * this function only sleeps if the alloc_fn function sleeps or + * returns NULL. Note that due to preallocation, this function + * *never* fails when called from process contexts. (it might + * fail if called from an IRQ context.) + */ +void * mempool_alloc(mempool_t *pool, int gfp_mask) +{ + void *element; + unsigned long flags; + int curr_nr; + DECLARE_WAITQUEUE(wait, current); + int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO); + +repeat_alloc: + element = pool->alloc(gfp_nowait, pool->pool_data); + if (likely(element != NULL)) + return element; + + /* + * If the pool is less than 50% full then try harder + * to allocate an element: + */ + if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) { + element = pool->alloc(gfp_mask, pool->pool_data); + if (likely(element != NULL)) + return element; + } + + /* + * Kick the VM at this point. + */ + wakeup_bdflush(); + + spin_lock_irqsave(&pool->lock, flags); + if (likely(pool->curr_nr)) { + element = remove_element(pool); + spin_unlock_irqrestore(&pool->lock, flags); + return element; + } + spin_unlock_irqrestore(&pool->lock, flags); + + /* We must not sleep in the GFP_ATOMIC case */ + if (gfp_mask == gfp_nowait) + return NULL; + + run_task_queue(&tq_disk); + + add_wait_queue_exclusive(&pool->wait, &wait); + set_task_state(current, TASK_UNINTERRUPTIBLE); + + spin_lock_irqsave(&pool->lock, flags); + curr_nr = pool->curr_nr; + spin_unlock_irqrestore(&pool->lock, flags); + + if (!curr_nr) + schedule(); + + current->state = TASK_RUNNING; + remove_wait_queue(&pool->wait, &wait); + + goto repeat_alloc; +} + +/** + * mempool_free - return an element to the pool. + * @element: pool element pointer. + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * + * this function only sleeps if the free_fn() function sleeps. + */ +void mempool_free(void *element, mempool_t *pool) +{ + unsigned long flags; + + if (pool->curr_nr < pool->min_nr) { + spin_lock_irqsave(&pool->lock, flags); + if (pool->curr_nr < pool->min_nr) { + add_element(pool, element); + spin_unlock_irqrestore(&pool->lock, flags); + wake_up(&pool->wait); + return; + } + spin_unlock_irqrestore(&pool->lock, flags); + } + pool->free(element, pool->pool_data); +} + +/* + * A commonly used alloc and free fn. + */ +void *mempool_alloc_slab(int gfp_mask, void *pool_data) +{ + kmem_cache_t *mem = (kmem_cache_t *) pool_data; + return kmem_cache_alloc(mem, gfp_mask); +} + +void mempool_free_slab(void *element, void *pool_data) +{ + kmem_cache_t *mem = (kmem_cache_t *) pool_data; + kmem_cache_free(mem, element); +} + + +EXPORT_SYMBOL(mempool_create); +EXPORT_SYMBOL(mempool_resize); +EXPORT_SYMBOL(mempool_destroy); +EXPORT_SYMBOL(mempool_alloc); +EXPORT_SYMBOL(mempool_free); +EXPORT_SYMBOL(mempool_alloc_slab); +EXPORT_SYMBOL(mempool_free_slab); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/mlock.c linux.21rc5-ac2/mm/mlock.c --- linux.21rc5/mm/mlock.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/mm/mlock.c 2003-04-22 16:44:38.000000000 +0100 @@ -198,6 +198,7 @@ unsigned long lock_limit; int error = -ENOMEM; + vm_validate_enough("entering sys_mlock"); down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; @@ -220,6 +221,7 @@ error = do_mlock(start, len, 1); out: up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_mlock"); return error; } @@ -227,11 +229,13 @@ { int ret; + vm_validate_enough("entering sys_munlock"); down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; ret = do_mlock(start, len, 0); up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_munlock"); return ret; } @@ -268,6 +272,8 @@ unsigned long lock_limit; int ret = -EINVAL; + vm_validate_enough("entering sys_mlockall"); + down_write(¤t->mm->mmap_sem); if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) goto out; @@ -287,15 +293,18 @@ ret = do_mlockall(flags); out: up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_mlockall"); return ret; } asmlinkage long sys_munlockall(void) { int ret; + vm_validate_enough("entering sys_munlockall"); down_write(¤t->mm->mmap_sem); ret = do_mlockall(0); up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_munlockall"); return ret; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/mmap.c linux.21rc5-ac2/mm/mmap.c --- linux.21rc5/mm/mmap.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/mm/mmap.c 2003-05-28 15:16:11.000000000 +0100 @@ -1,8 +1,25 @@ /* * linux/mm/mmap.c - * * Written by obz. + * + * Address space accounting code + * (c) Copyright 2002 Red Hat Inc, All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + #include #include #include @@ -46,6 +63,7 @@ int sysctl_overcommit_memory; int max_map_count = DEFAULT_MAX_MAP_COUNT; +atomic_t vm_committed_space = ATOMIC_INIT(0); /* Check that a process has enough memory to allocate a * new virtual mapping. @@ -55,42 +73,109 @@ /* Stupid algorithm to decide if we have enough memory: while * simple, it hopefully works in most obvious cases.. Easy to * fool it, but this should catch most mistakes. - */ - /* 23/11/98 NJC: Somewhat less stupid version of algorithm, + * + * 23/11/98 NJC: Somewhat less stupid version of algorithm, * which tries to do "TheRightThing". Instead of using half of * (buffers+cache), use the minimum values. Allow an extra 2% * of num_physpages for safety margin. + * + * 2002/02/26 Alan Cox: Added two new modes that do real accounting */ + unsigned long free, allowed; + struct sysinfo i; - unsigned long free; + atomic_add(pages, &vm_committed_space); /* Sometimes we want to use more memory than we have. */ - if (sysctl_overcommit_memory) - return 1; + if (sysctl_overcommit_memory == 1) + return 1; + + if (sysctl_overcommit_memory == 0) + { + /* The page cache contains buffer pages these days.. */ + free = atomic_read(&page_cache_size); + free += nr_free_pages(); + free += nr_swap_pages; + + /* + * This double-counts: the nrpages are both in the page-cache + * and in the swapper space. At the same time, this compensates + * for the swap-space over-allocation (ie "nr_swap_pages" being + * too small. + */ + free += swapper_space.nrpages; + + /* + * The code below doesn't account for free space in the inode + * and dentry slab cache, slab cache fragmentation, inodes and + * dentries which will become freeable under VM load, etc. + * Lets just hope all these (complex) factors balance out... + */ + free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT; + free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT; + + if(free > pages) + return 1; + atomic_sub(pages, &vm_committed_space); + return 0; + } + allowed = total_swap_pages; + + if(sysctl_overcommit_memory == 2) + { + /* FIXME - need to add arch hooks to get the bits we need + without the higher overhead crap */ + si_meminfo(&i); + allowed += i.totalram >> 1; + } + if(atomic_read(&vm_committed_space) < allowed) + return 1; + atomic_sub(pages, &vm_committed_space); + return 0; + +} - /* The page cache contains buffer pages these days.. */ - free = atomic_read(&page_cache_size); - free += nr_free_pages(); - free += nr_swap_pages; +void vm_unacct_memory(long pages) +{ + atomic_sub(pages, &vm_committed_space); +} - /* - * This double-counts: the nrpages are both in the page-cache - * and in the swapper space. At the same time, this compensates - * for the swap-space over-allocation (ie "nr_swap_pages" being - * too small. - */ - free += swapper_space.nrpages; +/* + * Don't even bother telling me the locking is wrong - its a test + * routine and uniprocessor is quite sufficient.. + * + * To enable this debugging you must tweak the #if below, and build + * with no SYS5 shared memory (thats not validated yet) and non SMP + */ - /* - * The code below doesn't account for free space in the inode - * and dentry slab cache, slab cache fragmentation, inodes and - * dentries which will become freeable under VM load, etc. - * Lets just hope all these (complex) factors balance out... - */ - free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT; - free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT; +void vm_validate_enough(char *x) +{ +#if 0 + unsigned long count = 0UL; + struct mm_struct *mm; + struct vm_area_struct *vma; + struct list_head *mmp; + unsigned long flags; + + spin_lock_irqsave(&mmlist_lock, flags); - return free > pages; + list_for_each(mmp, &init_mm.mmlist) + { + mm = list_entry(mmp, struct mm_struct, mmlist); + for(vma = mm->mmap; vma!=NULL; vma=vma->vm_next) + { + if(vma->vm_flags & VM_ACCOUNT) + count += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + } + } + if(count != atomic_read(&vm_committed_space)) + { + printk("MM crappo accounting %s: %lu %ld.\n", + x, count, atomic_read(&vm_committed_space)); + atomic_set(&vm_committed_space, count); + } + spin_unlock_irqrestore(&mmlist_lock, flags); +#endif } /* Remove one vm structure from the inode's i_mapping address space. */ @@ -161,12 +246,13 @@ /* Always allow shrinking brk. */ if (brk <= mm->brk) { - if (!do_munmap(mm, newbrk, oldbrk-newbrk)) + if (!do_munmap(mm, newbrk, oldbrk-newbrk, 1)) goto set_brk; goto out; } /* Check against rlimit.. */ + /* FIXME: - this seems to be checked in do_brk.. */ rlim = current->rlim[RLIMIT_DATA].rlim_cur; if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) goto out; @@ -175,10 +261,6 @@ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) goto out; - /* Check if we have enough memory.. */ - if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT)) - goto out; - /* Ok, looks good - let it rip. */ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) goto out; @@ -399,7 +481,9 @@ int correct_wcount = 0; int error; rb_node_t ** rb_link, * rb_parent; + unsigned long charged = 0; + vm_validate_enough("entering do_mmap_pgoff"); if (file && (!file->f_op || !file->f_op->mmap)) return -ENODEV; @@ -484,7 +568,7 @@ munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); if (vma && vma->vm_start < addr + len) { - if (do_munmap(mm, addr, len)) + if (do_munmap(mm, addr, len, 1)) return -ENOMEM; goto munmap_back; } @@ -494,11 +578,15 @@ > current->rlim[RLIMIT_AS].rlim_cur) return -ENOMEM; - /* Private writable mapping? Check memory availability.. */ - if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE && - !(flags & MAP_NORESERVE) && - !vm_enough_memory(len >> PAGE_SHIFT)) - return -ENOMEM; + if (!(flags & MAP_NORESERVE) || sysctl_overcommit_memory > 1) { + if ((vm_flags & (VM_SHARED|VM_WRITE)) == VM_WRITE) { + /* Private writable mapping: check memory availability */ + charged = len >> PAGE_SHIFT; + if (!vm_enough_memory(charged)) + return -ENOMEM; + vm_flags |= VM_ACCOUNT; + } + } /* Can we just expand an old anonymous mapping? */ if (!file && !(vm_flags & VM_SHARED) && rb_parent) @@ -510,8 +598,9 @@ * not unmapped, but the maps are removed from the list. */ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + error = -ENOMEM; if (!vma) - return -ENOMEM; + goto unacct_error; vma->vm_mm = mm; vma->vm_start = addr; @@ -563,8 +652,7 @@ * to update the tree pointers. */ addr = vma->vm_start; - stale_vma = find_vma_prepare(mm, addr, &prev, - &rb_link, &rb_parent); + stale_vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); /* * Make sure the lowlevel driver did its job right. */ @@ -585,6 +673,7 @@ mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); } + vm_validate_enough("out from do_mmap_pgoff"); return addr; unmap_and_free_vma: @@ -597,6 +686,10 @@ zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start); free_vma: kmem_cache_free(vm_area_cachep, vma); +unacct_error: + if(charged) + vm_unacct_memory(charged); + vm_validate_enough("error path from do_mmap_pgoff"); return error; } @@ -739,6 +832,96 @@ return NULL; } +/* vma is the first one with address < vma->vm_end, + * and even address < vma->vm_start. Have to extend vma. */ + +#ifdef ARCH_STACK_GROWSUP +static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) +{ + unsigned long grow; + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + + vm_validate_enough("entering expand_stack"); + + /* + * vma->vm_start/vm_end cannot change under us because the caller is required + * to hold the mmap_sem in write mode. We need to get the spinlock only + * before relocating the vma range ourself. + */ + spin_lock(&vma->vm_mm->page_table_lock); + + address += 4 + PAGE_SIZE - 1; + address &= PAGE_MASK; + grow = (address - vma->vm_end) >> PAGE_SHIFT; + + /* Overcommit.. */ + if(!vm_enough_memory(grow)) { + spin_unlock(&vma->vm_mm->page_table_lock); + return -ENOMEM; + } + + if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur || + ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) + { + spin_unlock(&vma->vm_mm->page_table_lock); + vm_unacct_memory(grow); + vm_validate_enough("exiting expand_stack - FAIL"); + return -ENOMEM; + } + vma->vm_end = address; + vma->vm_mm->total_vm += grow; + if (vma->vm_flags & VM_LOCKED) + vma->vm_mm->locked_vm += grow; + vm_validate_enough("exiting expand_stack"); + return 0; +} +#else + +int expand_stack(struct vm_area_struct * vma, unsigned long address) +{ + unsigned long grow; + + if (!(vma->vm_flags & VM_GROWSDOWN)) + return -EFAULT; + + vm_validate_enough("entering expand_stack"); + + /* + * vma->vm_start/vm_end cannot change under us because the caller is required + * to hold the mmap_sem in write mode. We need to get the spinlock only + * before relocating the vma range ourself. + */ + address &= PAGE_MASK; + spin_lock(&vma->vm_mm->page_table_lock); + grow = (vma->vm_start - address) >> PAGE_SHIFT; + + /* Overcommit.. */ + if(!vm_enough_memory(grow)) { + spin_unlock(&vma->vm_mm->page_table_lock); + return -ENOMEM; + } + + if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || + ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { + spin_unlock(&vma->vm_mm->page_table_lock); + vm_unacct_memory(grow); + vm_validate_enough("exiting expand_stack - FAIL"); + return -ENOMEM; + } + vma->vm_start = address; + vma->vm_pgoff -= grow; + vma->vm_mm->total_vm += grow; + if (vma->vm_flags & VM_LOCKED) + vma->vm_mm->locked_vm += grow; + spin_unlock(&vma->vm_mm->page_table_lock); + vm_validate_enough("exiting expand_stack"); + return 0; +} + +#endif + struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr) { struct vm_area_struct * vma; @@ -786,7 +969,7 @@ */ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, struct vm_area_struct *area, unsigned long addr, size_t len, - struct vm_area_struct *extra) + struct vm_area_struct *extra, int acct) { struct vm_area_struct *mpnt; unsigned long end = addr + len; @@ -794,6 +977,8 @@ area->vm_mm->total_vm -= len >> PAGE_SHIFT; if (area->vm_flags & VM_LOCKED) area->vm_mm->locked_vm -= len >> PAGE_SHIFT; + if (acct && (area->vm_flags & VM_ACCOUNT)) + vm_unacct_memory(len >> PAGE_SHIFT); /* Unmapping the whole area. */ if (addr == area->vm_start && end == area->vm_end) { @@ -921,10 +1106,12 @@ * work. This now handles partial unmappings. * Jeremy Fitzhardine */ -int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) +int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct) { struct vm_area_struct *mpnt, *prev, **npp, *free, *extra; + if(acct) vm_validate_enough("entering do_munmap"); + if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr) return -EINVAL; @@ -991,6 +1178,7 @@ (file = mpnt->vm_file) != NULL) { atomic_dec(&file->f_dentry->d_inode->i_writecount); } + remove_shared_vm_struct(mpnt); mm->map_count--; @@ -999,7 +1187,7 @@ /* * Fix the mapping, and free the old area if it wasn't reused. */ - extra = unmap_fixup(mm, mpnt, st, size, extra); + extra = unmap_fixup(mm, mpnt, st, size, extra, acct); if (file) atomic_inc(&file->f_dentry->d_inode->i_writecount); } @@ -1010,6 +1198,7 @@ kmem_cache_free(vm_area_cachep, extra); free_pgtables(mm, prev, addr, addr+len); + if(acct) vm_validate_enough("exit -ok- do_munmap"); return 0; } @@ -1020,7 +1209,7 @@ struct mm_struct *mm = current->mm; down_write(&mm->mmap_sem); - ret = do_munmap(mm, addr, len); + ret = do_munmap(mm, addr, len, 1); up_write(&mm->mmap_sem); return ret; } @@ -1037,6 +1226,9 @@ unsigned long flags; rb_node_t ** rb_link, * rb_parent; + vm_validate_enough("entering do_brk"); + + len = PAGE_ALIGN(len); if (!len) return addr; @@ -1057,7 +1249,7 @@ munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); if (vma && vma->vm_start < addr + len) { - if (do_munmap(mm, addr, len)) + if (do_munmap(mm, addr, len, 1)) return -ENOMEM; goto munmap_back; } @@ -1073,7 +1265,7 @@ if (!vm_enough_memory(len >> PAGE_SHIFT)) return -ENOMEM; - flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags; + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; /* Can we just expand an old anonymous mapping? */ if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags)) @@ -1084,8 +1276,11 @@ */ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!vma) + { + /* We accounted this address space - undo it */ + vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; - + } vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; @@ -1104,6 +1299,9 @@ mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); } + + vm_validate_enough("exiting do_brk"); + return addr; } @@ -1145,6 +1343,10 @@ unsigned long end = mpnt->vm_end; unsigned long size = end - start; + /* If the VMA has been charged for, account for its removal */ + if (mpnt->vm_flags & VM_ACCOUNT) + vm_unacct_memory(size >> PAGE_SHIFT); + if (mpnt->vm_ops) { if (mpnt->vm_ops->close) mpnt->vm_ops->close(mpnt); @@ -1163,8 +1365,9 @@ BUG(); clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD); - flush_tlb_mm(mm); + vm_validate_enough("exiting exit_mmap"); + } /* Insert vm structure into process list sorted by address diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/mprotect.c linux.21rc5-ac2/mm/mprotect.c --- linux.21rc5/mm/mprotect.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/mm/mprotect.c 2003-04-22 16:44:38.000000000 +0100 @@ -2,6 +2,23 @@ * linux/mm/mprotect.c * * (C) Copyright 1994 Linus Torvalds + * + * Address space accounting code + * (c) Copyright 2002 Red Hat Inc, All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include @@ -241,11 +258,28 @@ { pgprot_t newprot; int error; + unsigned long charged = 0; if (newflags == vma->vm_flags) { *pprev = vma; return 0; } + + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we + * make it unwritable again. + * + * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting + * a MAP_NORESERVE private mapping to writable will now reserve. + */ + if ((newflags & VM_WRITE) && + !(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) { + charged = (end - start) >> PAGE_SHIFT; + if (!vm_enough_memory(charged)) + return -ENOMEM; + newflags |= VM_ACCOUNT; + } newprot = protection_map[newflags & 0xf]; if (start == vma->vm_start) { if (end == vma->vm_end) @@ -256,10 +290,10 @@ error = mprotect_fixup_end(vma, pprev, start, newflags, newprot); else error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot); - - if (error) + if (error) { + vm_unacct_memory(charged); return error; - + } change_protection(start, end, newprot); return 0; } @@ -270,6 +304,8 @@ struct vm_area_struct * vma, * next, * prev; int error = -EINVAL; + vm_validate_enough("entering mprotect"); + if (start & ~PAGE_MASK) return -EINVAL; len = PAGE_ALIGN(len); @@ -333,5 +369,6 @@ } out: up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting mprotect"); return error; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/mremap.c linux.21rc5-ac2/mm/mremap.c --- linux.21rc5/mm/mremap.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/mm/mremap.c 2003-04-22 16:44:38.000000000 +0100 @@ -2,6 +2,23 @@ * linux/mm/remap.c * * (C) Copyright 1996 Linus Torvalds + * + * Address space accounting code + * (c) Copyright 2002 Red Hat Inc, All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include @@ -13,8 +30,6 @@ #include #include -extern int vm_enough_memory(long pages); - static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr) { pgd_t * pgd; @@ -130,6 +145,7 @@ struct vm_area_struct * new_vma, * next, * prev; int allocated_vma; + new_vma = NULL; next = find_vma_prev(mm, new_addr, &prev); if (next) { @@ -189,7 +205,8 @@ new_vma->vm_ops->open(new_vma); insert_vm_struct(current->mm, new_vma); } - do_munmap(current->mm, addr, old_len); + /* The old VMA has been accounted for, don't double account */ + do_munmap(current->mm, addr, old_len, 0); current->mm->total_vm += new_len >> PAGE_SHIFT; if (new_vma->vm_flags & VM_LOCKED) { current->mm->locked_vm += new_len >> PAGE_SHIFT; @@ -217,6 +234,7 @@ { struct vm_area_struct *vma; unsigned long ret = -EINVAL; + unsigned long charged = 0; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; @@ -246,16 +264,17 @@ if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; - do_munmap(current->mm, new_addr, new_len); + do_munmap(current->mm, new_addr, new_len, 1); } /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. + * do_munmap does all the needed commit accounting */ ret = addr; if (old_len >= new_len) { - do_munmap(current->mm, addr+new_len, old_len - new_len); + do_munmap(current->mm, addr+new_len, old_len - new_len, 1); if (!(flags & MREMAP_FIXED) || (new_addr == addr)) goto out; } @@ -285,11 +304,12 @@ if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len) > current->rlim[RLIMIT_AS].rlim_cur) goto out; - /* Private writable mapping? Check memory availability.. */ - if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE && - !(flags & MAP_NORESERVE) && - !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT)) - goto out; + + if (vma->vm_flags & VM_ACCOUNT) { + charged = (new_len - old_len) >> PAGE_SHIFT; + if(!vm_enough_memory(charged)) + goto out_nc; + } /* old_len exactly to the end of the area.. * And we're not relocating the area. @@ -313,6 +333,7 @@ addr + new_len); } ret = addr; + vm_validate_enough("mremap path1"); goto out; } } @@ -336,6 +357,12 @@ ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: + if(ret & ~PAGE_MASK) + { + vm_unacct_memory(charged); + vm_validate_enough("mremap error path"); + } +out_nc: return ret; } @@ -345,8 +372,10 @@ { unsigned long ret; + vm_validate_enough("entry to mremap"); down_write(¤t->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(¤t->mm->mmap_sem); + vm_validate_enough("exit from mremap"); return ret; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/oom_kill.c linux.21rc5-ac2/mm/oom_kill.c --- linux.21rc5/mm/oom_kill.c 2003-05-28 15:07:35.000000000 +0100 +++ linux.21rc5-ac2/mm/oom_kill.c 2003-04-22 16:44:38.000000000 +0100 @@ -82,7 +82,7 @@ * Niced processes are most likely less important, so double * their badness points. */ - if (p->nice > 0) + if (task_nice(p) > 0) points *= 2; /* @@ -146,7 +146,7 @@ * all the memory it needs. That way it should be able to * exit() and clear out its resources quickly... */ - p->counter = 5 * HZ; + p->time_slice = HZ; p->flags |= PF_MEMALLOC | PF_MEMDIE; /* This process has hardware access, be more careful. */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/shmem.c linux.21rc5-ac2/mm/shmem.c --- linux.21rc5/mm/shmem.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/mm/shmem.c 2003-04-22 16:44:38.000000000 +0100 @@ -5,8 +5,25 @@ * 2000 Transmeta Corp. * 2000-2001 Christoph Rohland * 2000-2001 SAP AG + * 2002 Red Hat Inc. + * TODO: + * Accounting needs verification + * We need to verify all the security fixes from generic_file_write are + * now there * - * This file is released under the GPL. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* @@ -21,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -54,43 +72,25 @@ LIST_HEAD (shmem_inodes); static spinlock_t shmem_ilock = SPIN_LOCK_UNLOCKED; -atomic_t shmem_nrpages = ATOMIC_INIT(0); /* Not used right now */ +atomic_t shmem_nrpages = ATOMIC_INIT(0); static struct page *shmem_getpage_locked(struct shmem_inode_info *, struct inode *, unsigned long); -/* - * shmem_recalc_inode - recalculate the size of an inode - * - * @inode: inode to recalc - * @swap: additional swap pages freed externally - * - * We have to calculate the free blocks since the mm can drop pages - * behind our back - * - * But we know that normally - * inodes->i_blocks/BLOCKS_PER_PAGE == - * inode->i_mapping->nrpages + info->swapped - * - * So the mm freed - * inodes->i_blocks/BLOCKS_PER_PAGE - - * (inode->i_mapping->nrpages + info->swapped) - * - * It has to be called with the spinlock held. - */ - -static void shmem_recalc_inode(struct inode * inode) +static void shmem_removepage(struct page *page) { - unsigned long freed; + struct inode * inode; + struct shmem_sb_info * sbinfo; - freed = (inode->i_blocks/BLOCKS_PER_PAGE) - - (inode->i_mapping->nrpages + SHMEM_I(inode)->swapped); - if (freed){ - struct shmem_sb_info * sbinfo = SHMEM_SB(inode->i_sb); - inode->i_blocks -= freed*BLOCKS_PER_PAGE; - spin_lock (&sbinfo->stat_lock); - sbinfo->free_blocks += freed; - spin_unlock (&sbinfo->stat_lock); - } + atomic_dec(&shmem_nrpages); + if (PageLaunder(page)) + return; + + inode = page->mapping->host; + sbinfo = SHMEM_SB(inode->i_sb); + inode->i_blocks -= BLOCKS_PER_PAGE; + spin_lock (&sbinfo->stat_lock); + sbinfo->free_blocks++; + spin_unlock (&sbinfo->stat_lock); } /* @@ -320,6 +320,7 @@ unsigned long partial; unsigned long freed = 0; struct shmem_inode_info * info = SHMEM_I(inode); + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); down(&info->sem); inode->i_ctime = inode->i_mtime = CURRENT_TIME; @@ -352,11 +353,41 @@ freed += shmem_truncate_indirect(info, index); info->swapped -= freed; - shmem_recalc_inode(inode); + inode->i_blocks -= freed*BLOCKS_PER_PAGE; spin_unlock (&info->lock); + spin_lock (&sbinfo->stat_lock); + sbinfo->free_blocks += freed; + spin_unlock (&sbinfo->stat_lock); up(&info->sem); } +static int shmem_notify_change(struct dentry * dentry, struct iattr *attr) +{ + struct inode *inode = dentry->d_inode; + long change = 0; + int error; + + if ((attr->ia_valid & ATTR_SIZE) && (attr->ia_size <= SHMEM_MAX_BYTES)) { + /* + * Account swap file usage based on new file size, + * but just let vmtruncate fail on out-of-range sizes. + */ + change = VM_ACCT(attr->ia_size) - VM_ACCT(inode->i_size); + if (change > 0) { + if (!vm_enough_memory(change)) + return -ENOMEM; + } else + vm_unacct_memory(-change); + + } + error = inode_change_ok(inode, attr); + if (!error) + error = inode_setattr(inode, attr); + if (error && change) + vm_unacct_memory(change); + return error; +} + static void shmem_delete_inode(struct inode * inode) { struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); @@ -365,6 +396,7 @@ spin_lock (&shmem_ilock); list_del (&SHMEM_I(inode)->list); spin_unlock (&shmem_ilock); + vm_unacct_memory(VM_ACCT(inode->i_size)); inode->i_size = 0; shmem_truncate (inode); } @@ -419,6 +451,7 @@ swap_free(entry); ptr[offset] = (swp_entry_t) {0}; delete_from_swap_cache(page); + atomic_inc(&shmem_nrpages); add_to_page_cache(page, info->inode->i_mapping, offset + idx); SetPageDirty(page); SetPageUptodate(page); @@ -484,7 +517,6 @@ entry = shmem_swp_entry(info, index, 0); if (IS_ERR(entry)) /* this had been allocated on page allocation */ BUG(); - shmem_recalc_inode(inode); if (entry->val) BUG(); @@ -498,6 +530,7 @@ * Raced with "speculative" read_swap_cache_async. * Add page back to page cache, unref swap, try again. */ + atomic_inc(&shmem_nrpages); add_to_page_cache_locked(page, mapping, index); spin_unlock(&info->lock); swap_free(swap); @@ -555,7 +588,6 @@ return page; } - shmem_recalc_inode(inode); if (entry->val) { unsigned long flags; @@ -622,6 +654,7 @@ /* We have the page */ SetPageUptodate(page); + atomic_inc(&shmem_nrpages); return page; no_space: spin_unlock (&sbinfo->stat_lock); @@ -781,6 +814,13 @@ static struct inode_operations shmem_symlink_inode_operations; static struct inode_operations shmem_symlink_inline_operations; +/* + * This is a copy of generic_file_write slightly modified. It would + * help no end if it were kept remotely up to date with the + * generic_file_write changes. I don't alas see a good way to merge + * it back and use the generic one -- Alan + */ + static ssize_t shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) { @@ -792,6 +832,7 @@ unsigned long written; long status; int err; + loff_t maxpos; if ((ssize_t) count < 0) return -EINVAL; @@ -804,12 +845,12 @@ pos = *ppos; err = -EINVAL; if (pos < 0) - goto out; + goto out_nc; err = file->f_error; if (err) { file->f_error = 0; - goto out; + goto out_nc; } written = 0; @@ -817,6 +858,18 @@ if (file->f_flags & O_APPEND) pos = inode->i_size; + maxpos = inode->i_size; + + if (pos + count > inode->i_size) { + maxpos = pos + count; + if (maxpos > SHMEM_MAX_BYTES) + maxpos = SHMEM_MAX_BYTES; + if (!vm_enough_memory(VM_ACCT(maxpos) - VM_ACCT(inode->i_size))) { + err = -ENOMEM; + goto out_nc; + } + } + /* * Check whether we've reached the file size limit. */ @@ -826,21 +879,77 @@ send_sig(SIGXFSZ, current, 0); goto out; } - if (count > limit - pos) { + if (pos > 0xFFFFFFFFULL || count > limit - (u32)pos) { + /* send_sig(SIGXFSZ, current, 0); */ + count = limit - (u32)pos; + } + } + + /* + * LFS rule + */ + if ( pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) { + if (pos >= MAX_NON_LFS) { send_sig(SIGXFSZ, current, 0); - count = limit - pos; + goto out; + } + if (count > MAX_NON_LFS - (u32)pos) { + /* send_sig(SIGXFSZ, current, 0); */ + count = MAX_NON_LFS - (u32)pos; } } - status = 0; - if (count) { - remove_suid(inode); - inode->i_ctime = inode->i_mtime = CURRENT_TIME; + /* + * Are we about to exceed the fs block limit ? + * + * If we have written data it becomes a short write + * If we have exceeded without writing data we send + * a signal and give them an EFBIG. + * + * Linus frestrict idea will clean these up nicely.. + */ + + if (!S_ISBLK(inode->i_mode)) { + if (pos >= inode->i_sb->s_maxbytes) + { + if (count || pos > inode->i_sb->s_maxbytes) { + send_sig(SIGXFSZ, current, 0); + err = -EFBIG; + goto out; + } + /* zero-length writes at ->s_maxbytes are OK */ + } + + if (pos + count > inode->i_sb->s_maxbytes) + count = inode->i_sb->s_maxbytes - pos; + } else { + if (is_read_only(inode->i_rdev)) { + err = -EPERM; + goto out; + } + if (pos >= inode->i_size) { + if (count || pos > inode->i_size) { + err = -ENOSPC; + goto out; + } + } + + if (pos + count > inode->i_size) + count = inode->i_size - pos; } + err = 0; + if (count == 0) + goto out; + status = 0; + + remove_suid(inode); + inode->i_ctime = inode->i_mtime = CURRENT_TIME; + while (count) { unsigned long bytes, index, offset; char *kaddr; + int deactivate = 1; /* * Try to find the page in the cache. If it isn't there, @@ -851,6 +960,7 @@ bytes = PAGE_CACHE_SIZE - offset; if (bytes > count) { bytes = count; + deactivate = 0; } /* @@ -879,7 +989,7 @@ } kaddr = kmap(page); - status = copy_from_user(kaddr+offset, buf, bytes); + status = __copy_from_user(kaddr+offset, buf, bytes); kunmap(page); if (status) goto fail_write; @@ -896,7 +1006,14 @@ } unlock: /* Mark it unlocked again and drop the page.. */ + SetPageReferenced(page); UnlockPage(page); +#if USING_RMAP + if (deactivate) + deactivate_page(page); + else + mark_page_accessed(page); +#endif page_cache_release(page); if (status < 0) @@ -906,6 +1023,10 @@ err = written ? written : status; out: + /* Short writes give back address space */ + if (inode->i_size != maxpos) + vm_unacct_memory(VM_ACCT(maxpos) - VM_ACCT(inode->i_size)); +out_nc: up(&inode->i_sem); return err; fail_write: @@ -919,13 +1040,14 @@ struct inode *inode = filp->f_dentry->d_inode; struct address_space *mapping = inode->i_mapping; unsigned long index, offset; + int nr = 1; index = *ppos >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; - for (;;) { + while (nr && desc->count) { struct page *page; - unsigned long end_index, nr, ret; + unsigned long end_index, nr; end_index = inode->i_size >> PAGE_CACHE_SHIFT; if (index > end_index) @@ -955,14 +1077,12 @@ * "pos" here (the actor routine has to update the user buffer * pointers and the remaining count). */ - ret = file_read_actor(desc, page, offset, nr); - offset += ret; + nr = file_read_actor(desc, page, offset, nr); + offset += nr; index += offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; page_cache_release(page); - if (ret != nr || !desc->count) - break; } *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; @@ -1179,10 +1299,15 @@ memcpy(info, symname, len); inode->i_op = &shmem_symlink_inline_operations; } else { + if (!vm_enough_memory(VM_ACCT(1))) { + iput(inode); + return -ENOMEM; + } down(&info->sem); page = shmem_getpage_locked(info, inode, 0); if (IS_ERR(page)) { up(&info->sem); + vm_unacct_memory(VM_ACCT(1)); iput(inode); return PTR_ERR(page); } @@ -1385,6 +1510,7 @@ static struct address_space_operations shmem_aops = { + removepage: shmem_removepage, writepage: shmem_writepage, }; @@ -1399,6 +1525,7 @@ static struct inode_operations shmem_inode_operations = { truncate: shmem_truncate, + setattr: shmem_notify_change, }; static struct inode_operations shmem_dir_inode_operations = { @@ -1494,7 +1621,6 @@ struct inode * inode; struct dentry *dentry, *root; struct qstr this; - int vm_enough_memory(long pages); if (size > SHMEM_MAX_BYTES) return ERR_PTR(-EINVAL); @@ -1502,13 +1628,14 @@ if (!vm_enough_memory(VM_ACCT(size))) return ERR_PTR(-ENOMEM); + error = -ENOMEM; this.name = name; this.len = strlen(name); this.hash = 0; /* will go */ root = shm_mnt->mnt_root; dentry = d_alloc(root, &this); if (!dentry) - return ERR_PTR(-ENOMEM); + goto put_memory; error = -ENFILE; file = get_empty_filp(); @@ -1533,6 +1660,8 @@ put_filp(file); put_dentry: dput (dentry); +put_memory: + vm_unacct_memory(VM_ACCT(size)); return ERR_PTR(error); } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/slab.c linux.21rc5-ac2/mm/slab.c --- linux.21rc5/mm/slab.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/mm/slab.c 2003-04-23 14:46:43.000000000 +0100 @@ -297,8 +297,9 @@ #define RED_MAGIC2 0x170FC2A5UL /* when obj is inactive */ /* ...and for poisoning */ -#define POISON_BYTE 0x5a /* byte value for poisoning */ -#define POISON_END 0xa5 /* end-byte of poisoning */ +#define POISON_BEFORE 0x5a /* for use-uninitialised poisoning */ +#define POISON_AFTER 0x6b /* for use-after-free poisoning */ +#define POISON_END 0xa5 /* end-byte of poisoning */ #endif @@ -522,14 +523,15 @@ } #if DEBUG -static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr) +static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr, + unsigned char val) { int size = cachep->objsize; if (cachep->flags & SLAB_RED_ZONE) { addr += BYTES_PER_WORD; size -= 2*BYTES_PER_WORD; } - memset(addr, POISON_BYTE, size); + memset(addr, val, size); *(unsigned char *)(addr+size-1) = POISON_END; } @@ -1083,7 +1085,7 @@ objp -= BYTES_PER_WORD; if (cachep->flags & SLAB_POISON) /* need to poison the objs */ - kmem_poison_obj(cachep, objp); + kmem_poison_obj(cachep, objp, POISON_BEFORE); if (cachep->flags & SLAB_RED_ZONE) { if (*((unsigned long*)(objp)) != RED_MAGIC1) BUG(); @@ -1443,7 +1445,7 @@ BUG(); } if (cachep->flags & SLAB_POISON) - kmem_poison_obj(cachep, objp); + kmem_poison_obj(cachep, objp, POISON_AFTER); if (kmem_extra_free_checks(cachep, slabp, objp)) return; #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/swapfile.c linux.21rc5-ac2/mm/swapfile.c --- linux.21rc5/mm/swapfile.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/mm/swapfile.c 2003-04-22 16:44:38.000000000 +0100 @@ -709,23 +709,32 @@ struct swap_info_struct * p = NULL; unsigned short *swap_map; struct nameidata nd; + struct file *victim; + struct address_space *mapping; int i, type, prev; int err; + char *tmp; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - err = user_path_walk(specialfile, &nd); - if (err) + tmp = getname(specialfile); + if(IS_ERR(tmp)) + return PTR_ERR(tmp); + + victim = filp_open(tmp, O_RDWR, 0); + err = PTR_ERR(victim); + if (IS_ERR(victim)) goto out; + mapping = victim->f_dentry->d_inode->i_mapping; lock_kernel(); prev = -1; swap_list_lock(); for (type = swap_list.head; type >= 0; type = swap_info[type].next) { p = swap_info + type; if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) { - if (p->swap_file == nd.dentry) + if (p->swap_file->d_inode->i_mapping==mapping) break; } prev = type; @@ -733,7 +742,7 @@ err = -EINVAL; if (type < 0) { swap_list_unlock(); - goto out_dput; + goto out_dput_no_nd; } if (prev < 0) { @@ -771,7 +780,6 @@ } if (p->swap_device) blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP); - path_release(&nd); swap_list_lock(); swap_device_lock(p); @@ -790,9 +798,12 @@ err = 0; out_dput: - unlock_kernel(); path_release(&nd); +out_dput_no_nd: + unlock_kernel(); + filp_close(victim, NULL); out: + putname(tmp); return err; } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/mm/vmalloc.c linux.21rc5-ac2/mm/vmalloc.c --- linux.21rc5/mm/vmalloc.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/mm/vmalloc.c 2003-05-07 15:42:12.000000000 +0100 @@ -93,7 +93,8 @@ } static inline int alloc_area_pte (pte_t * pte, unsigned long address, - unsigned long size, int gfp_mask, pgprot_t prot) + unsigned long size, int gfp_mask, + pgprot_t prot, struct page ***pages) { unsigned long end; @@ -103,9 +104,20 @@ end = PMD_SIZE; do { struct page * page; - spin_unlock(&init_mm.page_table_lock); - page = alloc_page(gfp_mask); - spin_lock(&init_mm.page_table_lock); + + if (!pages) { + spin_unlock(&init_mm.page_table_lock); + page = alloc_page(gfp_mask); + spin_lock(&init_mm.page_table_lock); + } else { + page = (**pages); + (*pages)++; + + /* Add a reference to the page so we can free later */ + if (page) + atomic_inc(&page->count); + + } if (!pte_none(*pte)) printk(KERN_ERR "alloc_area_pte: page already exists\n"); if (!page) @@ -117,7 +129,9 @@ return 0; } -static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot) +static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, + unsigned long size, int gfp_mask, + pgprot_t prot, struct page ***pages) { unsigned long end; @@ -129,7 +143,8 @@ pte_t * pte = pte_alloc(&init_mm, pmd, address); if (!pte) return -ENOMEM; - if (alloc_area_pte(pte, address, end - address, gfp_mask, prot)) + if (alloc_area_pte(pte, address, end - address, + gfp_mask, prot, pages)) return -ENOMEM; address = (address + PMD_SIZE) & PMD_MASK; pmd++; @@ -137,8 +152,11 @@ return 0; } -inline int vmalloc_area_pages (unsigned long address, unsigned long size, - int gfp_mask, pgprot_t prot) +static inline int __vmalloc_area_pages (unsigned long address, + unsigned long size, + int gfp_mask, + pgprot_t prot, + struct page ***pages) { pgd_t * dir; unsigned long end = address + size; @@ -155,7 +173,7 @@ break; ret = -ENOMEM; - if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot)) + if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot, pages)) break; address = (address + PGDIR_SIZE) & PGDIR_MASK; @@ -168,6 +186,12 @@ return ret; } +int vmalloc_area_pages(unsigned long address, unsigned long size, + int gfp_mask, pgprot_t prot) +{ + return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL); +} + struct vm_struct * get_vm_area(unsigned long size, unsigned long flags) { unsigned long addr, next; @@ -246,7 +270,30 @@ if (!area) return NULL; addr = area->addr; - if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, prot)) { + if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, + prot, NULL)) { + vfree(addr); + return NULL; + } + return addr; +} + +void * vmap(struct page **pages, int count, + unsigned long flags, pgprot_t prot) +{ + void * addr; + struct vm_struct *area; + unsigned long size = count << PAGE_SHIFT; + + if (!size || size > (max_mapnr << PAGE_SHIFT)) + return NULL; + area = get_vm_area(size, flags); + if (!area) { + return NULL; + } + addr = area->addr; + if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0, + prot, &pages)) { vfree(addr); return NULL; } @@ -327,3 +374,22 @@ read_unlock(&vmlist_lock); return buf - buf_start; } + +void *vcalloc(unsigned long nmemb, unsigned long elem_size) +{ + unsigned long size; + void *addr; + + /* + * Check that we're not going to overflow. + */ + if (nmemb > (ULONG_MAX / elem_size)) + return NULL; + + size = nmemb * elem_size; + addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + + return addr; +} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/bluetooth/bnep/bnep.h linux.21rc5-ac2/net/bluetooth/bnep/bnep.h --- linux.21rc5/net/bluetooth/bnep/bnep.h 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/net/bluetooth/bnep/bnep.h 2003-05-29 01:44:06.000000000 +0100 @@ -26,7 +26,7 @@ #include #include -#include "crc32.h" +#include // Limits #define BNEP_MAX_PROTO_FILTERS 5 @@ -179,7 +179,7 @@ static inline int bnep_mc_hash(__u8 *addr) { - return (bnep_crc32(~0, addr, ETH_ALEN) >> 26); + return (crc32_be(~0, addr, ETH_ALEN) >> 26); } #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/bluetooth/bnep/core.c linux.21rc5-ac2/net/bluetooth/bnep/core.c --- linux.21rc5/net/bluetooth/bnep/core.c 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/net/bluetooth/bnep/core.c 2003-05-20 20:21:28.000000000 +0100 @@ -460,8 +460,6 @@ sigfillset(¤t->blocked); flush_signals(current); - current->nice = -15; - set_fs(KERNEL_DS); init_waitqueue_entry(&wait, current); @@ -680,7 +678,6 @@ static int __init bnep_init_module(void) { - bnep_crc32_init(); bnep_sock_init(); BT_INFO("BlueZ BNEP ver %s", VERSION); @@ -695,7 +692,6 @@ static void __exit bnep_cleanup_module(void) { bnep_sock_cleanup(); - bnep_crc32_cleanup(); } module_init(bnep_init_module); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/bluetooth/bnep/crc32.c linux.21rc5-ac2/net/bluetooth/bnep/crc32.c --- linux.21rc5/net/bluetooth/bnep/crc32.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/net/bluetooth/bnep/crc32.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,59 +0,0 @@ -/* - * Based on linux-2.5/lib/crc32 by Matt Domsch - * - * FIXME: Remove in 2.5 - */ - -#include -#include -#include -#include -#include -#include - -#include "crc32.h" - -#define CRCPOLY_BE 0x04c11db7 -#define CRC_BE_BITS 8 - -static u32 *bnep_crc32_table; - -/* - * This code is in the public domain; copyright abandoned. - * Liability for non-performance of this code is limited to the amount - * you paid for it. Since it is distributed for free, your refund will - * be very very small. If it breaks, you get to keep both pieces. - */ -u32 bnep_crc32(u32 crc, unsigned char const *p, size_t len) -{ - while (len--) - crc = (crc << 8) ^ bnep_crc32_table[(crc >> 24) ^ *p++]; - - return crc; -} - -int __init bnep_crc32_init(void) -{ - unsigned i, j; - u32 crc = 0x80000000; - - bnep_crc32_table = kmalloc((1 << CRC_BE_BITS) * sizeof(u32), GFP_KERNEL); - if (!bnep_crc32_table) - return -ENOMEM; - - bnep_crc32_table[0] = 0; - - for (i = 1; i < 1 << CRC_BE_BITS; i <<= 1) { - crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); - for (j = 0; j < i; j++) - bnep_crc32_table[i + j] = crc ^ bnep_crc32_table[j]; - } - return 0; -} - -void __exit bnep_crc32_cleanup(void) -{ - if (bnep_crc32_table) - kfree(bnep_crc32_table); - bnep_crc32_table = NULL; -} diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/bluetooth/bnep/crc32.h linux.21rc5-ac2/net/bluetooth/bnep/crc32.h --- linux.21rc5/net/bluetooth/bnep/crc32.h 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/net/bluetooth/bnep/crc32.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,10 +0,0 @@ -/* - * crc32.h - * See crc32.c for license and changes - * - * FIXME: Remove in 2.5 - */ - -int bnep_crc32_init(void); -void bnep_crc32_cleanup(void); -u32 bnep_crc32(u32 crc, unsigned char const *p, size_t len); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/bluetooth/bnep/Makefile linux.21rc5-ac2/net/bluetooth/bnep/Makefile --- linux.21rc5/net/bluetooth/bnep/Makefile 2003-05-28 15:09:05.000000000 +0100 +++ linux.21rc5-ac2/net/bluetooth/bnep/Makefile 2003-05-20 20:21:28.000000000 +0100 @@ -4,7 +4,7 @@ O_TARGET := bnep.o -obj-y := core.o sock.o netdev.o crc32.o +obj-y := core.o sock.o netdev.o obj-m += $(O_TARGET) include $(TOPDIR)/Rules.make diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/bluetooth/bnep/Makefile.lib linux.21rc5-ac2/net/bluetooth/bnep/Makefile.lib --- linux.21rc5/net/bluetooth/bnep/Makefile.lib 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/net/bluetooth/bnep/Makefile.lib 2003-05-20 20:21:28.000000000 +0100 @@ -0,0 +1 @@ +obj-$(CONFIG_BLUEZ_BNEP) += crc32.o diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/Config.in linux.21rc5-ac2/net/Config.in --- linux.21rc5/net/Config.in 2003-05-28 15:07:43.000000000 +0100 +++ linux.21rc5-ac2/net/Config.in 2003-04-30 15:10:28.000000000 +0100 @@ -67,6 +67,7 @@ dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25 + tristate 'EDP2 ATA Packet Layer (EXPERIMENTAL)' CONFIG_EDP2 tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC bool 'Frame Diverter (EXPERIMENTAL)' CONFIG_NET_DIVERT diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/decnet/dn_table.c linux.21rc5-ac2/net/decnet/dn_table.c --- linux.21rc5/net/decnet/dn_table.c 2003-05-28 15:07:43.000000000 +0100 +++ linux.21rc5-ac2/net/decnet/dn_table.c 2003-05-28 15:37:27.000000000 +0100 @@ -836,8 +836,7 @@ return NULL; if (in_interrupt() && net_ratelimit()) { - printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table -from interrupt\n"); + printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); return NULL; } if ((t = kmalloc(sizeof(struct dn_fib_table), GFP_KERNEL)) == NULL) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/edp2/edp2.c linux.21rc5-ac2/net/edp2/edp2.c --- linux.21rc5/net/edp2/edp2.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/net/edp2/edp2.c 2003-05-02 17:56:56.000000000 +0100 @@ -0,0 +1,598 @@ +/* + * Copyright 2003 Red Hat Inc. All Rights Reserved. + * + * Initial experimental implementation of the EDP2 protocol layer + * + * The contents of this file are subject to the Open Software License + * version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is + * included herein by reference. + * + * Alternatively, the contents of this file may be used under the + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void edp2_dev_timeout(unsigned long data); +static struct edp2_device *edp_devices; +spinlock_t edp2_device_database = SPIN_LOCK_UNLOCKED; + +EXPORT_SYMBOL_GPL(edp2_device_database); + +/** + * edp2_dev_find - find EDP2 device + * @mac: MAC address + * + * Each EDP2 device has a unique MAC address. We key on this + * for searches. We don't key on interface because we want to + * handle multipathing eventually + * + * The caller must hold the edp2_device_database lock. + * + * Returns: + * device pointer, or NULL - not found + */ + +struct edp2_device *edp2_dev_find(u8 *mac) +{ + struct edp2_device *dev = edp_devices; + + while(dev != NULL) + { + if(memcmp(dev->mac, mac, 6) == 0) + return dev; + dev = dev->next; + } + return NULL; +} + +EXPORT_SYMBOL_GPL(edp2_dev_find); + +/** + * edp2_dev_alloc - Allocate EDP2 device + * @mac: MAC address + * + * Add a new EDP2 device to the database. The basic fields are + * filled in and the device is added. The caller must hold the + * edp2_device_database_lock + * + * Returns: + * device pointer or NULL - out of memory + */ + +struct edp2_device *edp2_dev_alloc(u8 *mac) +{ + struct edp2_device *dev = kmalloc(sizeof(struct edp2_device), GFP_ATOMIC); + if(dev == NULL) + return NULL; + memset(dev, 0, sizeof(struct edp2_device)); + memcpy(dev->mac, mac, 6); + init_timer(&dev->timer); + dev->timer.data = (unsigned long)dev; + dev->timer.function = edp2_dev_timeout; + dev->users = 0; + dev->next = edp_devices; + edp_devices = dev; + return dev; +} + +EXPORT_SYMBOL_GPL(edp2_dev_alloc); + +/** + * edp2_dev_free - Free an EDP2 device + * @dev: device to free + * + * Release an unused EDP2 device. This layer does not + * check usage counts, the caller must handle that. + * The caller must hold the edp2_device_database_lock + */ + +void edp2_dev_free(struct edp2_device *dev) +{ + struct edp2_device **p = &edp_devices; + + while(*p != NULL) + { + if(*p == dev) + { + *p = dev->next; + if(dev->dev) + dev_put(dev->dev); + kfree(dev); + return; + } + p = &((*p)->next); + } + BUG(); +} + +EXPORT_SYMBOL_GPL(edp2_dev_free); + +/** + * edp2_dev_recover - hunt for a lost device + * @dev: The EDP2 device which vanished + * + * Called when an EDP2 device that is in use vanishes. We + * will eventually use this to do discovery for it on other + * links in case it is multipathed + */ + +static void edp2_dev_recover(struct edp2_device *dev) +{ +} + +/** + * edp2_dev_flush - flush down devices + * @dev: network device that is down + * + * Mark all the devices that are unreachable as dead + * for the moment. They aren't neccessarily defunct + * as they may be multipathed and if so they will ident + * for us later. Unused devices are freed. + */ + +static void edp2_dev_flush(struct net_device *dev) +{ + struct edp2_device **p = &edp_devices; + + while(*p != NULL) + { + if((*p)->dev == dev) + { + struct edp2_device *d = *p; + d->dead = 1; + dev_put(d->dev); + d->dev = NULL; + if(d->users == 0) + { + *p = d->next; + kfree(d); + continue; + } + else /* Live disk vanished, hunt for it */ + edp2_dev_recover(d); + } + p = &((*p)->next); + } +} + +/** + * edp2_device_recover_complete - device returns + * @edp: edp device + * + * An EDP device marked as dead has been heard from. At the + * moment we do minimal housekeeping here but more is likely + * to be added + */ + +static void edp2_device_recover_complete(struct edp2_device *edp) +{ + edp->dead = 0; +} + +/* + * EDP2 protocol handlers + */ + +/** + * edp2_ident_reply - EDP ident protocol handler + * @edp: EDP2 header + * @skb: Buffer + * + * An EDP2 ident message has been sent to us. These occur when + * we probe and also when a device is activated. We use the ident + * replies to keep the EDP2 device database current. + */ + +static void edp2_ident_reply(struct edp2_device *dev, struct edp2 *edp, struct sk_buff *skb) +{ + /* Ident indicates volume found or maybe a new volume */ + struct edp2_ata_fid1 *ident; + + ident = (struct edp2_ata_fid1 *)skb_pull(skb, sizeof(struct edp2_ata_fid1)); + if(ident == NULL) + return; + + if(dev) + { + /* FIXME: can't handle count change on active device */ + dev->queue = ntohl(ident->count); + dev->revision = ntohl(ident->firmware); + dev->protocol = ntohs(ident->aoe)&15; + dev->shelf = ntohs(ident->shad); + dev->slot = ntohs(ident->slad); + dev->last_ident = jiffies; + } + else + { + dev = edp2_dev_alloc(skb->mac.raw+6); + if(dev != NULL) + { + dev_hold(skb->dev); + dev->dev = skb->dev; + dev->last_ident = jiffies; + dev->queue = ntohl(ident->count); + if(dev->queue > MAX_QUEUED) + dev->queue = MAX_QUEUED; + dev->revision = ntohl(ident->firmware); + dev->protocol = ntohs(ident->aoe)&15; + dev->shelf = ntohs(ident->shad); + dev->slot = ntohs(ident->slad); + /* Report the new device to the disk layer */ + /* edp2_disk_new(dev, skb); */ + } + } +} + +/** + * edp2_callback - EDP2 disk functions + * @dev: EDP2 device + * @edp: EDP2 header + * @skb: buffer + * + * An EDP2 response has arrived. There are several kinds + * of request and reply frames for our drivers. We use the + * tag to find the matching transmit and then the callback + * to tidy up. + */ + +static void edp2_callback(struct edp2_device *dev, struct edp2 *edp, struct sk_buff *skb) +{ + struct sk_buff *txskb; + struct edp2 *txedp; + struct edp2_cb *cb; + int slot = edp->tag & (MAX_QUEUED-1); + + /* Use the tag to find the tx frame */ + txskb = dev->skb[slot]; + if(txskb == NULL) + return; + /* Check the tag */ + txedp = (struct edp2 *)(skb->data+14); + if(txedp->tag != edp->tag) + return; + /* Clean up before callback in case we free the last slot */ + dev->skb[slot] = NULL; + dev->count--; + /* Ok it is our reply */ + cb = (struct edp2_cb *)txskb->cb; + cb->completion(dev, edp, cb->data, skb); + kfree_skb(txskb); + if(dev->count == 0) + del_timer(&dev->timer); +} + +/** + * edp2_rcv - EDP2 packet handler + * @skb: buffer + * @dev: device received on + * @pt: protocol + * + * Called whenever the networking layer receives an EDP2 type frame. + * We perform the basic validation for an EDP initiator and then + * hand the frame on for processing. + */ + +static int edp2_rcv(struct sk_buff *skb, struct net_device *netdev, struct packet_type *pt) +{ + struct edp2 *edp; + struct edp2_device *dev; + + if(skb->pkt_type == PACKET_OTHERHOST) + goto drop; + + edp = (struct edp2 *)skb_pull(skb, 6); + if(edp == NULL) + goto drop; + + /* We are an initiator */ + if(!(edp->flag_err & EDP_F_RESPONSE)) + goto drop; + + + spin_lock(&edp2_device_database); + dev = edp2_dev_find(skb->mac.raw + 6); + if(dev != NULL) + { + if(dev->dead) + edp2_device_recover_complete(dev); + if(dev->dev != skb->dev) + { + /* A volume moved interface */ + dev_put(dev->dev); + dev_hold(skb->dev); + dev->dev = skb->dev; + } + } + switch(edp->function) + { + case 0: /* EDP ATA */ + case 3: /* Claim reply */ + edp2_callback(dev, edp, skb); + break; + case 1: /* EDP ident reply or beacon */ + edp2_ident_reply(dev, edp, skb); + break; + case 2: /* Statistics reply */ + default:; + /* Unknown type */ + } + spin_unlock(&edp2_device_database); +drop: + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +/** + * edp2_resend_frame - edp2 retry helper + * @dev: device to talk to + * @slot: slot to resend + * + * Retransmit an EDP packet. This is also called to kick off + * the initial transmission event. Caller must hold the edp2 + * protocol lock. + */ + +static void edp2_resend_frame(struct edp2_device *dev, int slot) +{ + struct sk_buff *skb; + if(dev->dead) + return; + skb = skb_clone(dev->skb[slot], GFP_ATOMIC); + if(skb == NULL) + return; + /* Fill in sending interface */ + memcpy(skb->data, dev->dev->dev_addr, 6); + /* Fire at will */ + skb->dev = dev->dev; + skb->nh.raw = skb->data + 14; + skb->mac.raw = skb->data; + skb->protocol = htons(ETH_P_EDP2); + dev_queue_xmit(skb); +} + +/** + * edp2_resend_timeout - EDP2 failed + * @dev: Device that timed out + * @slot: Slot ID of timed out frame + * + * Clean up after a timeout failure. We call the callback wth + * NULL replies to indicate a failure. Possibly we should have two + * callback functions ? + */ + +static void edp2_resend_timeout(struct edp2_device *dev, int slot) +{ + struct sk_buff *skb = dev->skb[slot]; + struct edp2_cb *cb; + dev->skb[slot] = NULL; + cb = (struct edp2_cb *)skb->cb; + cb->completion(dev, NULL, cb->data, NULL); + kfree_skb(skb); +} + +/** + * edp2_dev_timeout - EDP2 timer event + * @data: The EDP2 device that timed out + * + * Walk the EDP2 list for this device and handle retransmits. + * The current slot array is just get it going, we need a real + * linked chain to walk for this and other purposes. + * + * FIXME: locking + */ + +static void edp2_dev_timeout(unsigned long data) +{ + struct edp2_device *dev = (struct edp2_device *)data; + struct edp2_cb *cb; + unsigned long next = jiffies + 1000*HZ; + int found = 0; + int i; + + for(i=0; i < MAX_QUEUED; i++) + { + if(dev->skb[i]) + { + cb = (struct edp2_cb *)(dev->skb[i]->cb); + if(time_before(dev->skb[i], cb->timeout)) + { + /* Need to do better adaption ! */ + cb->timeout = jiffies + (1<count)*HZ; + cb->count++; + if(cb->count == 10) + edp2_resend_timeout(dev, i); + else + { + found=1; + edp2_resend_frame(dev, i); + } + } + else + { + found = 1; + if(time_before(cb->timeout, next)) + next = cb->timeout; + } + } + } + if(!found) + return; + mod_timer(&dev->timer, next); +} + +/** + * edp2_queue_xmit - send an EDP2 frame + * @dev: device to talk to + * @edp: edp header in packet + * @skb: packet to send + * + * Send a prepared EDP frame out to a device. The EDP frame + * has its timers set and is queued for retransmit in case + * of problems. We also use the low tag bits ourselves. + * + * The caller has completed the edp2_cb control block and + * must hold the edp2 protocol lock. + */ + +int edp2_queue_xmit(struct edp2_device *dev, struct edp2 *edp, struct sk_buff *skb) +{ + int next ; + edp->tag &= ~0xFF; /* Low byte is reserved for protocol muncher */ + + /* FIXME: better algorithm given the fact completion is + generally ordered */ + for(next = 0; next < MAX_QUEUED; next++) + if(dev->skb[next] == NULL) + break; + if(dev->skb[next]) + BUG(); + dev->skb[next] = skb; + edp->tag |= next; /* So we can find the skb fast */ + dev->count++; + /* FIXME: need to know if the timer is live before adjusting + but must do this race free of the expire path */ + mod_timer(&dev->timer, jiffies + 2 * HZ); + edp2_resend_frame(dev, next); + return next; +} + +EXPORT_SYMBOL_GPL(edp2_queue_xmit); + +/** + * edp2_alloc_skb - alloc an EDP2 frame + * @dev: Device we will talk to + * @len: length of frame (in full) + * @function: EDP function + * @callback: Callback handler on completion/error + * @data: data for callback + * @timeout: retransmit timeout + * + * Allocate and build the basis of an EDP2 frame + * We hand up a frame with everything built below the + * application layer. + * + * The caller must hold the edp2 protocol lock + */ + +struct sk_buff *edp2_alloc_skb(struct edp2_device *dev, int len, int function, + int (*callback)(struct edp2_device *, struct edp2 *, unsigned long, struct sk_buff *), + unsigned long data, unsigned long timeout) +{ + struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); + struct edp2_cb *cb = (struct edp2_cb *)skb->cb; + struct edp2 *edp; + u8 *hdr; + if(skb == NULL) + return NULL; + + /* MAC header */ + hdr = skb_put(skb, 14); + memcpy(hdr+6, dev->mac, 6); + hdr[12] = 0x88; + hdr[13] = 0xA2; + + edp = (struct edp2 *)skb_put(skb, sizeof(struct edp2)); + edp->flag_err = 0; + edp->function = function; + edp->tag = dev->tag << 8; /* Kernel tags are 0:devtag:slot */ + dev->tag++; /* Wraps at 65536 */ + + /* So we can handle the completion path nicely */ + cb->completion = callback; + cb->data = data; + cb->timeout = timeout; + cb->count = 0; + + return skb; +} + +EXPORT_SYMBOL_GPL(edp2_alloc_skb); + +/** + * edp2_device_event - device change notifier + * @this: event block + * @event: event id + * @ptr: data for event + * + * Listen to the network layer events and watch for devices going + * down. We need to release any devices that vanish so that the + * refcount drops and the device can be flushed. + */ + +static int edp2_device_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + unsigned long flags; + if(event == NETDEV_DOWN) + { + spin_lock_irqsave(&edp2_device_database, flags); + edp2_dev_flush(ptr); + spin_unlock_irqrestore(&edp2_device_database, flags); + } + return NOTIFY_DONE; +} + +static struct notifier_block edp2_notifier = { + notifier_call: edp2_device_event +}; + +static struct packet_type edp2_dix_packet_type = { + type: __constant_htons(ETH_P_EDP2), + func: edp2_rcv, + data: (void *) 1, /* Shared skb is ok */ +}; + +/** + * edp2_proto_init - set up the EDP2 protocol handlers + * + * This function sets up the protocol layer for EDP2. It assumes + * that the caller is ready to receive packets and events so + * requires the EDP block layer data structures are initialised. + */ + +static __init int edp2_proto_init(void) +{ + printk(KERN_INFO "Coraid EDPv2 Protocol Layer v0.01\n"); + printk(KERN_INFO " (c) Copyright 2003 Red Hat.\n"); + dev_add_pack(&edp2_dix_packet_type); + register_netdevice_notifier(&edp2_notifier); + return 0; +} + +/** + * edp2_proto_exit - cease listening to networking for EDP2 + * + * Disconnect EDP2 from the network layer and cease listening to + * any device events + */ + +static void __exit edp2_proto_exit(void) +{ + /* FIXME: module locking, device flushing */ + unregister_netdevice_notifier(&edp2_notifier); + dev_remove_pack(&edp2_dix_packet_type); +} + +module_init(edp2_proto_init); +module_exit(edp2_proto_exit); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/edp2/Makefile linux.21rc5-ac2/net/edp2/Makefile --- linux.21rc5/net/edp2/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/net/edp2/Makefile 2003-04-30 15:09:05.000000000 +0100 @@ -0,0 +1,15 @@ +# +# Makefile for the Linux EDP2 layer. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definition is now in the main makefile... + +export-objs = edp2.o + +obj-$(CONFIG_EDP2) := edp2.o + +include $(TOPDIR)/Rules.make + diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/ipv4/netfilter/ip_nat_helper.c linux.21rc5-ac2/net/ipv4/netfilter/ip_nat_helper.c --- linux.21rc5/net/ipv4/netfilter/ip_nat_helper.c 2003-05-28 15:09:06.000000000 +0100 +++ linux.21rc5-ac2/net/ipv4/netfilter/ip_nat_helper.c 2003-04-22 17:10:07.000000000 +0100 @@ -84,7 +84,6 @@ iph = (*skb)->nh.iph; if (iph->protocol == IPPROTO_TCP) { struct tcphdr *tcph = (void *)iph + iph->ihl*4; - void *data = (void *)tcph + tcph->doff*4; DEBUGP("ip_nat_resize_packet: Seq_offset before: "); DUMP_OFFSET(this_way); @@ -488,9 +487,9 @@ const char *tmp = me->me->name; if (strlen(tmp) + 6 > MODULE_MAX_NAMELEN) { - printk(__FUNCTION__ ": unable to " + printk("%s: unable to " "compute conntrack helper name " - "from %s\n", tmp); + "from %s\n", __FUNCTION__, tmp); return -EBUSY; } tmp += 6; @@ -573,7 +572,8 @@ && ct_helper->me) { __MOD_DEC_USE_COUNT(ct_helper->me); } else - printk(__FUNCTION__ ": unable to decrement usage count" - " of conntrack helper %s\n", me->me->name); + printk("%s: unable to decrement usage count" + " of conntrack helper %s\n", + __FUNCTION__, me->me->name); } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/irda/ircomm/ircomm_tty.c linux.21rc5-ac2/net/irda/ircomm/ircomm_tty.c --- linux.21rc5/net/irda/ircomm/ircomm_tty.c 2003-05-28 15:09:09.000000000 +0100 +++ linux.21rc5-ac2/net/irda/ircomm/ircomm_tty.c 2003-04-22 16:44:38.000000000 +0100 @@ -526,7 +526,7 @@ return; } - if ((tty->count == 1) && (self->open_count != 1)) { + if ((atomic_read(&tty->count) == 1) && (self->open_count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/Makefile linux.21rc5-ac2/net/Makefile --- linux.21rc5/net/Makefile 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/net/Makefile 2003-04-30 15:08:41.000000000 +0100 @@ -7,7 +7,7 @@ O_TARGET := network.o -mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched core +mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched core edp2 export-objs := netsyms.o subdir-y := core ethernet @@ -44,8 +44,8 @@ subdir-$(CONFIG_ATM) += atm subdir-$(CONFIG_DECNET) += decnet subdir-$(CONFIG_ECONET) += econet -subdir-$(CONFIG_VLAN_8021Q) += 8021q - +subdir-$(CONFIG_VLAN_8021Q) += 8021q +subdir-$(CONFIG_EDP2) += edp2 obj-y := socket.o $(join $(subdir-y), $(patsubst %,/%.o,$(notdir $(subdir-y)))) ifeq ($(CONFIG_NET),y) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/netsyms.c linux.21rc5-ac2/net/netsyms.c --- linux.21rc5/net/netsyms.c 2003-05-28 15:09:09.000000000 +0100 +++ linux.21rc5-ac2/net/netsyms.c 2003-04-22 17:10:07.000000000 +0100 @@ -82,7 +82,7 @@ extern void destroy_8023_client(struct datalink_proto *); #endif -#ifdef CONFIG_ATALK_MODULE +#if defined(CONFIG_ATALK_MODULE) || defined(CONFIG_FILTER) #include #endif @@ -93,7 +93,6 @@ /* Skbuff symbols. */ EXPORT_SYMBOL(skb_over_panic); EXPORT_SYMBOL(skb_under_panic); -EXPORT_SYMBOL(skb_pad); /* Socket layer registration */ EXPORT_SYMBOL(sock_register); @@ -466,6 +465,7 @@ #endif /* CONFIG_INET */ #ifdef CONFIG_TR +EXPORT_SYMBOL(tr_source_route); EXPORT_SYMBOL(tr_type_trans); #endif @@ -486,6 +486,7 @@ EXPORT_SYMBOL(__dev_get_by_index); EXPORT_SYMBOL(dev_get_by_name); EXPORT_SYMBOL(__dev_get_by_name); +EXPORT_SYMBOL(dev_getbyhwaddr); EXPORT_SYMBOL(netdev_finish_unregister); EXPORT_SYMBOL(netdev_set_master); EXPORT_SYMBOL(eth_type_trans); @@ -499,6 +500,7 @@ EXPORT_SYMBOL(__kfree_skb); EXPORT_SYMBOL(skb_clone); EXPORT_SYMBOL(skb_copy); +EXPORT_SYMBOL(skb_pad); EXPORT_SYMBOL(netif_rx); EXPORT_SYMBOL(netif_receive_skb); EXPORT_SYMBOL(dev_add_pack); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/sched/sch_cbq.c linux.21rc5-ac2/net/sched/sch_cbq.c --- linux.21rc5/net/sched/sch_cbq.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/net/sched/sch_cbq.c 2003-05-03 18:45:38.000000000 +0100 @@ -1056,11 +1056,9 @@ sch->stats.overlimits++; if (q->wd_expires && !netif_queue_stopped(sch->dev)) { long delay = PSCHED_US2JIFFIE(q->wd_expires); - del_timer(&q->wd_timer); if (delay <= 0) delay = 1; - q->wd_timer.expires = jiffies + delay; - add_timer(&q->wd_timer); + mod_timer(&q->wd_timer, jiffies + delay); sch->flags |= TCQ_F_THROTTLED; } } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/sched/sch_csz.c linux.21rc5-ac2/net/sched/sch_csz.c --- linux.21rc5/net/sched/sch_csz.c 2003-05-28 15:09:09.000000000 +0100 +++ linux.21rc5-ac2/net/sched/sch_csz.c 2003-05-03 18:42:39.000000000 +0100 @@ -708,11 +708,9 @@ */ if (q->wd_expires) { unsigned long delay = PSCHED_US2JIFFIE(q->wd_expires); - del_timer(&q->wd_timer); if (delay == 0) delay = 1; - q->wd_timer.expires = jiffies + delay; - add_timer(&q->wd_timer); + mod_timer(&q->wd_timer, jiffies + delay); sch->stats.overlimits++; } #endif diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/sched/sch_htb.c linux.21rc5-ac2/net/sched/sch_htb.c --- linux.21rc5/net/sched/sch_htb.c 2003-05-28 15:09:09.000000000 +0100 +++ linux.21rc5-ac2/net/sched/sch_htb.c 2003-05-03 18:43:25.000000000 +0100 @@ -986,9 +986,7 @@ printk(KERN_INFO "HTB delay %ld > 5sec\n", delay); delay = 5*HZ; } - del_timer(&q->timer); - q->timer.expires = jiffies + delay; - add_timer(&q->timer); + mod_timer(&q->timer, jiffies + delay); sch->flags |= TCQ_F_THROTTLED; sch->stats.overlimits++; HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay); diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/sunrpc/timer.c linux.21rc5-ac2/net/sunrpc/timer.c --- linux.21rc5/net/sunrpc/timer.c 2003-05-28 15:07:45.000000000 +0100 +++ linux.21rc5-ac2/net/sunrpc/timer.c 2003-05-12 16:38:07.000000000 +0100 @@ -8,7 +8,7 @@ #define RPC_RTO_MAX (60*HZ) #define RPC_RTO_INIT (HZ/5) -#define RPC_RTO_MIN (2) +#define RPC_RTO_MIN (HZ/30) void rpc_init_rtt(struct rpc_rtt *rt, long timeo) diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/net/sunrpc/xprt.c linux.21rc5-ac2/net/sunrpc/xprt.c --- linux.21rc5/net/sunrpc/xprt.c 2003-05-28 15:09:09.000000000 +0100 +++ linux.21rc5-ac2/net/sunrpc/xprt.c 2003-04-22 16:44:38.000000000 +0100 @@ -383,6 +383,7 @@ if (!sk) return; + write_lock_bh(&sk->callback_lock); xprt->inet = NULL; xprt->sock = NULL; @@ -390,6 +391,7 @@ sk->data_ready = xprt->old_data_ready; sk->state_change = xprt->old_state_change; sk->write_space = xprt->old_write_space; + write_unlock_bh(&sk->callback_lock); xprt_disconnect(xprt); sk->no_check = 0; @@ -657,8 +659,9 @@ struct sk_buff *skb; int err, repsize, copied; + read_lock(&sk->callback_lock); dprintk("RPC: udp_data_ready...\n"); - if (!(xprt = xprt_from_sock(sk))) { + if (sk->dead || !(xprt = xprt_from_sock(sk))) { printk("RPC: udp_data_ready request not found!\n"); goto out; } @@ -707,6 +710,7 @@ out: if (sk->sleep && waitqueue_active(sk->sleep)) wake_up_interruptible(sk->sleep); + read_unlock(&sk->callback_lock); } /* @@ -905,18 +909,21 @@ struct rpc_xprt *xprt; read_descriptor_t rd_desc; + read_lock(&sk->callback_lock); dprintk("RPC: tcp_data_ready...\n"); if (!(xprt = xprt_from_sock(sk))) { printk("RPC: tcp_data_ready socket info not found!\n"); - return; + goto out; } if (xprt->shutdown) - return; + goto out; /* We use rd_desc to pass struct xprt to tcp_data_recv */ rd_desc.buf = (char *)xprt; rd_desc.count = 65536; tcp_read_sock(sk, &rd_desc, tcp_data_recv); +out: + read_unlock(&sk->callback_lock); } static void @@ -924,6 +931,7 @@ { struct rpc_xprt *xprt; + read_lock(&sk->callback_lock); if (!(xprt = xprt_from_sock(sk))) goto out; dprintk("RPC: tcp_state_change client %p...\n", xprt); @@ -957,6 +965,7 @@ out: if (sk->sleep && waitqueue_active(sk->sleep)) wake_up_interruptible_all(sk->sleep); + read_unlock(&sk->callback_lock); } /* @@ -971,24 +980,25 @@ struct rpc_xprt *xprt; struct socket *sock; + read_lock(&sk->callback_lock); if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->socket)) - return; + goto out; if (xprt->shutdown) - return; + goto out; /* Wait until we have enough socket memory */ if (xprt->stream) { /* from net/ipv4/tcp.c:tcp_write_space */ if (tcp_wspace(sk) < tcp_min_write_space(sk)) - return; + goto out; } else { /* from net/core/sock.c:sock_def_write_space */ if (!sock_writeable(sk)) - return; + goto out; } if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) - return; + goto out; spin_lock_bh(&xprt->sock_lock); if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->pending) @@ -996,6 +1006,8 @@ spin_unlock_bh(&xprt->sock_lock); if (sk->sleep && waitqueue_active(sk->sleep)) wake_up_interruptible(sk->sleep); +out: + read_unlock(&sk->callback_lock); } /* @@ -1433,6 +1445,7 @@ if (xprt->inet) return -EBUSY; + write_lock_bh(&sk->callback_lock); sk->user_data = xprt; xprt->old_data_ready = sk->data_ready; xprt->old_state_change = sk->state_change; @@ -1453,6 +1466,7 @@ /* Reset to new socket */ xprt->sock = sock; xprt->inet = sk; + write_unlock_bh(&sk->callback_lock); /* * TCP requires the rpc I/O daemon is present */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/Rules.make linux.21rc5-ac2/Rules.make --- linux.21rc5/Rules.make 2003-05-28 15:07:42.000000000 +0100 +++ linux.21rc5-ac2/Rules.make 2003-04-22 16:44:38.000000000 +0100 @@ -291,10 +291,6 @@ include .depend endif -ifneq ($(wildcard $(TOPDIR)/.hdepend),) -include $(TOPDIR)/.hdepend -endif - # # Find files whose flags have changed and force recompilation. # For safety, this works in the converse direction: diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/scripts/extract-ikconfig linux.21rc5-ac2/scripts/extract-ikconfig --- linux.21rc5/scripts/extract-ikconfig 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/scripts/extract-ikconfig 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,17 @@ +#! /bin/bash -x +# extracts .config info from a [b]zImage file +# uses: binoffset (new), dd, zcat, strings, grep +# $arg1 is [b]zImage filename + +HDR=`binoffset $1 0x1f 0x8b 0x08 0x0` +PID=$$ +TMPFILE="$1.vmlin.$PID" + +# dd if=$1 bs=1 skip=$HDR | zcat - | strings /dev/stdin \ +# | grep "[A-Za-z_0-9]=[ynm]$" | sed "s/^/CONFIG_/" > $1.oldconfig.$PID +# exit + +dd if=$1 bs=1 skip=$HDR | zcat - > $TMPFILE +strings $TMPFILE | grep "^[\#[:blank:]]*CONFIG_[A-Za-z_0-9]*" > $1.oldconfig.$PID +wc $1.oldconfig.$PID +rm $TMPFILE diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/scripts/include_deps linux.21rc5-ac2/scripts/include_deps --- linux.21rc5/scripts/include_deps 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/scripts/include_deps 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,15 @@ +# Read the .depend files, extract the dependencies for .h targets, convert +# relative names to absolute and write the result to stdout. It is part of +# building the global .h dependency graph for kbuild 2.4. KAO + +/^[^ ]/ { copy = 0; fn = "/error/"; } +/^[^ ][^ ]*\.h:/ { copy = 1; fn = FILENAME; sub(/\.depend/, "", fn); } +!copy { next; } + { + indent = $0; sub(/[^ ].*/, "", indent); + if ($1 != "" && $1 !~ /^[@$\/\\]/) { $1 = fn $1 }; + if ($2 != "" && $2 !~ /^[@$\/\\]/) { $2 = fn $2 }; + $1 = $1; # ensure $0 is rebuilt + $0 = indent $0; + print; + } diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/scripts/mkconfigs.c linux.21rc5-ac2/scripts/mkconfigs.c --- linux.21rc5/scripts/mkconfigs.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21rc5-ac2/scripts/mkconfigs.c 2003-04-22 16:44:38.000000000 +0100 @@ -0,0 +1,181 @@ +/*************************************************************************** + * mkconfigs.c + * (C) 2002 Randy Dunlap + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +# Rules for scripts/mkconfigs.c input.config output.c +# to generate configs.c from linux/.config: +# - drop lines that begin with '#' +# - drop blank lines +# - lines that use double-quotes must \\-escape-quote them + +################## skeleton configs.c file: #################### + +#include +#include + +static char *configs[] __initdata = + + , + +; + +################### end configs.c file ###################### + + * Changelog for ver. 0.2, 2002-02-15, rddunlap@osdl.org: + - strip leading "CONFIG_" from config option strings; + - use "static" and "__attribute__((unused))"; + - don't use EXPORT_SYMBOL(); + - separate each config line with \newline instead of space; + + * Changelog for ver. 0.3, 2002-02-18, rddunlap@osdl.org: + - keep all "not set" comment lines from .config so that 'make *config' + will be happy, but don't keep other comments; + - keep leading "CONFIG_" on each line; + +****************************************************************/ + +#include +#include +#include +#include + +#define VERSION "0.2" +#define LINE_SIZE 1000 + +int include_all_lines = 1; // whether to include "=n" lines in the output + +void usage (const char *progname) +{ + fprintf (stderr, "%s ver. %s\n", progname, VERSION); + fprintf (stderr, "usage: %s input.config.name path/configs.c\n", + progname); + exit (1); +} + +void make_intro (FILE *sourcefile) +{ + fprintf (sourcefile, "#include \n"); +///// fprintf (sourcefile, "#include \n"); + fprintf (sourcefile, "\n"); +///// fprintf (sourcefile, "char *configs[] __initdata = {\n"); + fprintf (sourcefile, "static char __attribute__ ((unused)) *configs[] __initdata = {\n"); + fprintf (sourcefile, " \"CONFIG_BEGIN=n\\n\" ,\n"); +} + +void make_ending (FILE *sourcefile) +{ + fprintf (sourcefile, " \"CONFIG_END=n\\n\"\n"); + fprintf (sourcefile, "};\n"); +///// fprintf (sourcefile, "EXPORT_SYMBOL (configs);\n"); +} + +void make_lines (FILE *configfile, FILE *sourcefile) +{ + char cfgline[LINE_SIZE]; + char *ch; + + while (fgets (cfgline, LINE_SIZE, configfile)) { + /* kill the trailing newline in cfgline */ + cfgline[strlen (cfgline) - 1] = '\0'; + + /* don't keep #-only line or an empty/blank line */ + if ((cfgline[0] == '#' && cfgline[1] == '\0') || + cfgline[0] == '\0') + continue; + + if (!include_all_lines && + cfgline[0] == '#') // strip out all comment lines + continue; + + /* really only want to keep lines that begin with + * "CONFIG_" or "# CONFIG_" */ + if (strncmp (cfgline, "CONFIG_", 7) && + strncmp (cfgline, "# CONFIG_", 9)) + continue; + + /* + * use strchr() to check for "-quote in cfgline; + * if not found, output the line, quoted; + * if found, output a char at a time, with \\-quote + * preceding double-quote chars + */ + if (!strchr (cfgline, '"')) { + fprintf (sourcefile, " \"%s\\n\" ,\n", cfgline); + continue; + } + + /* go to char-at-a-time mode for this config and + * precede any double-quote with a backslash */ + fprintf (sourcefile, " \""); /* lead-in */ + for (ch = cfgline; *ch; ch++) { + if (*ch == '"') + fputc ('\\', sourcefile); + fputc (*ch, sourcefile); + } + fprintf (sourcefile, "\\n\" ,\n"); + } +} + +void make_configs (FILE *configfile, FILE *sourcefile) +{ + make_intro (sourcefile); + make_lines (configfile, sourcefile); + make_ending (sourcefile); +} + +int main (int argc, char *argv[]) +{ + char *progname = argv[0]; + char *configname, *sourcename; + FILE *configfile, *sourcefile; + + if (argc != 3) + usage (progname); + + configname = argv[1]; + sourcename = argv[2]; + + configfile = fopen (configname, "r"); + if (!configfile) { + fprintf (stderr, "%s: cannot open '%s'\n", + progname, configname); + exit (2); + } + sourcefile = fopen (sourcename, "w"); + if (!sourcefile) { + fprintf (stderr, "%s: cannot open '%s'\n", + progname, sourcename); + exit (2); + } + + make_configs (configfile, sourcefile); + + if (fclose (sourcefile)) { + fprintf (stderr, "%s: error %d closing '%s'\n", + progname, errno, sourcename); + exit (3); + } + if (fclose (configfile)) { + fprintf (stderr, "%s: error %d closing '%s'\n", + progname, errno, configname); + exit (3); + } + + exit (0); +} + +/* end mkconfigs.c */ diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/scripts/mkdep.c linux.21rc5-ac2/scripts/mkdep.c --- linux.21rc5/scripts/mkdep.c 2003-05-28 15:08:18.000000000 +0100 +++ linux.21rc5-ac2/scripts/mkdep.c 2003-04-22 16:44:38.000000000 +0100 @@ -48,6 +48,8 @@ char __depname[512] = "\n\t@touch "; #define depname (__depname+9) int hasdep; +char cwd[PATH_MAX]; +int lcwd; struct path_struct { int len; @@ -202,8 +204,28 @@ memcpy(path->buffer+path->len, name, len); path->buffer[path->len+len] = '\0'; if (access(path->buffer, F_OK) == 0) { + int l = lcwd + strlen(path->buffer); + int need_wildcard = 0; + char name2[l+2], *p; + if (path->buffer[0] == '/') { + memcpy(name2, path->buffer, l+1); + } + else { + need_wildcard = 1; + memcpy(name2, cwd, lcwd); + name2[lcwd] = '/'; + memcpy(name2+lcwd+1, path->buffer, path->len+len+1); + } + while ((p = strstr(name2, "/../"))) { + *p = '\0'; + strcpy(strrchr(name2, '/'), p+3); + } do_depname(); - printf(" \\\n %s", path->buffer); + if (need_wildcard) { + printf(" \\\n $(wildcard %s)", name2); + } else { + printf(" \\\n %s", name2); + } return; } } @@ -585,6 +607,12 @@ return 1; } + if (!getcwd(cwd, sizeof(cwd))) { + fprintf(stderr, "mkdep: getcwd() failed %m\n"); + return 1; + } + lcwd = strlen(cwd); + add_path("."); /* for #include "..." */ while (++argv, --argc > 0) { diff --exclude-from /usr/src/exclude -u --new-file --recursive linux.21rc5/scripts/tkgen.c linux.21rc5-ac2/scripts/tkgen.c --- linux.21rc5/scripts/tkgen.c 2003-05-28 15:08:18.000000000 +0100 +++ linux.21rc5-ac2/scripts/tkgen.c 2003-04-22 16:44:38.000000000 +0100 @@ -625,6 +625,7 @@ if ( ! vartable[i].global_written ) { global( vartable[i].name ); + vartable[i].global_written = 1; } printf( "\t" ); } @@ -699,6 +700,19 @@ } /* + * Generate global declarations for the dependency chain (e.g. CONSTANT_M). + */ + for ( tmp = cfg->depend; tmp; tmp = tmp->next ) + { + int i = get_varnum( tmp->name ); + if ( ! vartable[i].global_written ) + { + global( vartable[i].name ); + vartable[i].global_written = 1; + } + } + + /* * Generate indentation. */ printf( "\t" );